summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdrian Stratulat <adrian.stratulat@enea.com>2020-06-16 08:34:41 +0200
committerAdrian Stratulat <adrian.stratulat@enea.com>2020-07-13 05:12:23 +0200
commit9159176013f053a8294ce512b6408a8235871f0d (patch)
tree26edb62ec9a9f0e370da2e74325b94ce54aec443
parent02812575459b24c14b6d9b9f74c51175f0333385 (diff)
downloadmeta-nfv-access-bsp-arm-develop.tar.gz
Ampere/aarch64 bring-up changesfeature_arm64develop
* Add machine description for emag8180. * Disable syslinux build: syslinux is not compatible with ARM targets, but other recipes force it as a dependency anyway. * Add recipe for kernel linux-ampere_4.14 (used by emag8180 and qemuarm64 targets). * Upgrade i40e drivers to newer version due to some warnings generated by OVS-DPDK * Add OVMF support for AARCH64. Change-Id: I4cbc09ef83d717b39abf0981b80569a4a694cb0d Signed-off-by: Adrian Stratulat <adrian.stratulat@enea.com>
-rw-r--r--conf/layer.conf8
-rw-r--r--conf/machine/emag8180.conf13
-rw-r--r--recipes-core/ovmf/ovmf_git.bbappend112
-rw-r--r--recipes-devtools/syslinux/syslinux_%.bbappend13
-rw-r--r--recipes-kernel/linux/linux-ampere-guest_4.14.bb43
-rw-r--r--recipes-kernel/linux/linux-ampere/0001-Upgrade-i40e-drivers-to-2.11.29.patch63158
-rw-r--r--recipes-kernel/linux/linux-ampere_4.14.bb80
-rw-r--r--recipes-kernel/linux/linux-ampere_4.14.inc46
8 files changed, 63469 insertions, 4 deletions
diff --git a/conf/layer.conf b/conf/layer.conf
index 2ae610a..50199be 100644
--- a/conf/layer.conf
+++ b/conf/layer.conf
@@ -6,7 +6,7 @@ BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
6 ${LAYERDIR}/packagegroups/* \ 6 ${LAYERDIR}/packagegroups/* \
7 ${LAYERDIR}/recipes-*/*/*.bbappend" 7 ${LAYERDIR}/recipes-*/*/*.bbappend"
8 8
9BBFILE_COLLECTIONS += "enea-bsp-arm" 9BBFILE_COLLECTIONS += "nfv-access-bsp-arm"
10BBFILE_PATTERN_enea-bsp-arm = "^${LAYERDIR}/" 10BBFILE_PATTERN_nfv-access-bsp-arm = "^${LAYERDIR}/"
11BBFILE_PRIORITY_enea-bsp-arm = "6" 11BBFILE_PRIORITY_nfv-access-bsp-arm = "6"
12LAYERDEPENDS_enea-bsp-arm = "enea-bsp-common" 12LAYERDEPENDS_nfv-access-bsp-arm = "nfv-access-bsp-common"
diff --git a/conf/machine/emag8180.conf b/conf/machine/emag8180.conf
new file mode 100644
index 0000000..bc4b73b
--- /dev/null
+++ b/conf/machine/emag8180.conf
@@ -0,0 +1,13 @@
1#@TYPE: Machine
2#@NAME: Ampere eMAG 8180 64-bit Arm Processor
3#@DESCRIPTION: Ampere eMAG 8180 processor based on ARMv8 architecture
4
5require conf/machine/include/arm/arch-armv8.inc
6
7PREFERRED_PROVIDER_virtual/kernel ?= "linux-ampere"
8
9MACHINE_FEATURES += " efi"
10DPDK_TARGET_MACHINE ?= "armv8a"
11
12SERIAL_CONSOLES = "115200;ttyAMA0"
13KERNEL_IMAGETYPE = "Image"
diff --git a/recipes-core/ovmf/ovmf_git.bbappend b/recipes-core/ovmf/ovmf_git.bbappend
new file mode 100644
index 0000000..9fc7cb7
--- /dev/null
+++ b/recipes-core/ovmf/ovmf_git.bbappend
@@ -0,0 +1,112 @@
1# Overwrite the "do_compile" step in order to support AARCH64 targets.
2
3COMPATIBLE_HOST='(i.86|x86_64|aarch64).*'
4
5do_compile_class-target() {
6 export LFLAGS="${LDFLAGS}"
7 PARALLEL_JOBS="${@ '${PARALLEL_MAKE}'.replace('-j', '-n ')}"
8 OVMF_ARCH="X64"
9 if [ "${TARGET_ARCH}" != "x86_64" ] ; then
10 OVMF_ARCH="IA32"
11 fi
12 if [ "${TARGET_ARCH}" = "aarch64" ] ; then
13 OVMF_ARCH="AARCH64"
14 fi
15
16 # The build for the target uses BaseTools/Conf/tools_def.template
17 # from ovmf-native to find the compiler, which depends on
18 # exporting HOST_PREFIX.
19 export HOST_PREFIX="${HOST_PREFIX}"
20
21 # BaseTools/Conf gets copied to Conf, but only if that does not
22 # exist yet. To ensure that an updated template gets used during
23 # incremental builds, we need to remove the copy before we start.
24 rm -f `ls ${S}/Conf/*.txt | grep -v ReadMe.txt`
25
26 # ${WORKDIR}/ovmf is a well-known location where do_install and
27 # do_deploy will be able to find the files.
28 rm -rf ${WORKDIR}/ovmf
29 mkdir ${WORKDIR}/ovmf
30 OVMF_DIR_SUFFIX="X64"
31 if [ "${TARGET_ARCH}" != "x86_64" ] ; then
32 OVMF_DIR_SUFFIX="Ia32" # Note the different capitalization
33 fi
34 if [ "${TARGET_ARCH}" = "aarch64" ] ; then
35 OVMF_DIR_SUFFIX="AArch64" # Note the different capitalization
36 fi
37
38 FIXED_GCCVER=$(fixup_target_tools ${GCC_VER})
39 bbnote FIXED_GCCVER is ${FIXED_GCCVER}
40
41 if [ "${TARGET_ARCH}" = "aarch64" ] ; then
42 build_dir="${S}/Build/ArmVirtQemu-AARCH64/RELEASE_${FIXED_GCCVER}"
43 bbnote "Building without Secure Boot."
44 rm -rf ${S}/Build/ArmVirtQemu-AARCH64
45
46 # If the local BaseTools directory is not found, the EDK_TOOLS_PATH
47 # variable is used to determine the location of the toolchain
48 rm -rf ${S}/BaseTools
49 export EDK_TOOLS_PATH=${WORKDIR}/recipe-sysroot-native/usr/bin/edk2_basetools/BaseTools
50 bash -c "(source ${S}/edksetup.sh; GCC5_AARCH64_PREFIX=aarch64-poky-linux- build -a AARCH64 -p ArmVirtPkg/ArmVirtQemu.dsc -t GCC5 -b RELEASE)"
51 else
52 build_dir="${S}/Build/Ovmf$OVMF_DIR_SUFFIX/RELEASE_${FIXED_GCCVER}"
53 bbnote "Building without Secure Boot."
54 rm -rf ${S}/Build/Ovmf$OVMF_DIR_SUFFIX
55
56 ${S}/OvmfPkg/build.sh $PARALLEL_JOBS -a $OVMF_ARCH -b RELEASE -t ${FIXED_GCCVER}
57 ln ${build_dir}/FV/OVMF.fd ${WORKDIR}/ovmf/ovmf.fd
58 ln ${build_dir}/FV/OVMF_CODE.fd ${WORKDIR}/ovmf/ovmf.code.fd
59 ln ${build_dir}/FV/OVMF_VARS.fd ${WORKDIR}/ovmf/ovmf.vars.fd
60 fi
61
62 ln ${build_dir}/${OVMF_ARCH}/Shell.efi ${WORKDIR}/ovmf/
63
64 if ${@bb.utils.contains('PACKAGECONFIG', 'secureboot', 'true', 'false', d)}; then
65 # See CryptoPkg/Library/OpensslLib/Patch-HOWTO.txt and
66 # https://src.fedoraproject.org/cgit/rpms/edk2.git/tree/ for
67 # building with Secure Boot enabled.
68 bbnote "Building with Secure Boot."
69 rm -rf ${S}/Build/Ovmf$OVMF_DIR_SUFFIX
70 ln -sf ${OPENSSL_RELEASE} ${S}/CryptoPkg/Library/OpensslLib/openssl
71 ${S}/OvmfPkg/build.sh $PARALLEL_JOBS -a $OVMF_ARCH -b RELEASE -t ${FIXED_GCCVER} ${OVMF_SECURE_BOOT_FLAGS}
72 ln ${build_dir}/FV/OVMF.fd ${WORKDIR}/ovmf/ovmf.secboot.fd
73 ln ${build_dir}/FV/OVMF_CODE.fd ${WORKDIR}/ovmf/ovmf.secboot.code.fd
74 ln ${build_dir}/${OVMF_ARCH}/EnrollDefaultKeys.efi ${WORKDIR}/ovmf/
75 fi
76}
77
78do_install_class-target() {
79 if [ "${TARGET_ARCH}" = "aarch64" ] ; then
80 FIXED_GCCVER=$(fixup_target_tools ${GCC_VER})
81 build_dir="${S}/Build/ArmVirtQemu-AARCH64/RELEASE_${FIXED_GCCVER}"
82
83 install -d ${D}/usr/share/qemu
84 install -m 766 ${build_dir}/FV/QEMU_EFI.fd ${D}/usr/share/qemu/AAVMF_CODE.fd
85 install -m 766 ${build_dir}/FV/QEMU_VARS.fd ${D}/usr/share/qemu/AAVMF_VARS.fd
86 else
87 # Content for UEFI shell iso. We install the EFI shell as
88 # bootx64/ia32.efi because then it can be started even when the
89 # firmware itself does not contain it.
90 install -d ${D}/efi/boot
91 install ${WORKDIR}/ovmf/Shell.efi ${D}/efi/boot/boot${@ "ia32" if "${TARGET_ARCH}" != "x86_64" else "x64"}.efi
92 if ${@bb.utils.contains('PACKAGECONFIG', 'secureboot', 'true', 'false', d)}; then
93 install ${WORKDIR}/ovmf/EnrollDefaultKeys.efi ${D}
94 fi
95 fi
96}
97
98FILES_${PN}_class-target_aarch64 += "/usr/share/qemu/"
99
100do_deploy_class-target() {
101 if [ "${TARGET_ARCH}" != "aarch64" ] ; then
102 # For use with "runqemu ovmf".
103 for i in \
104 ovmf \
105 ovmf.code \
106 ovmf.vars \
107 ${@bb.utils.contains('PACKAGECONFIG', 'secureboot', 'ovmf.secboot ovmf.secboot.code', '', d)} \
108 ; do
109 qemu-img convert -f raw -O qcow2 ${WORKDIR}/ovmf/$i.fd ${DEPLOYDIR}/$i.qcow2
110 done
111 fi
112}
diff --git a/recipes-devtools/syslinux/syslinux_%.bbappend b/recipes-devtools/syslinux/syslinux_%.bbappend
new file mode 100644
index 0000000..045830a
--- /dev/null
+++ b/recipes-devtools/syslinux/syslinux_%.bbappend
@@ -0,0 +1,13 @@
1# Syslinux is not supposed to be available on ARM.
2# Some recipes/image-classes mark it as a dependency even if it's not used.
3
4COMPATIBLE_HOST = '(x86_64|i.86|aarch64).*-(linux|freebsd.*)'
5
6do_fetch[noexec] = "1"
7do_unpack[noexec] = "1"
8do_patch[noexec] = "1"
9do_populate_lic[noexec] = "1"
10do_configure[noexec] = "1"
11do_compile[noexec] = "1"
12do_install[noexec] = "1"
13do_populate_sysroot[noexec] = "1"
diff --git a/recipes-kernel/linux/linux-ampere-guest_4.14.bb b/recipes-kernel/linux/linux-ampere-guest_4.14.bb
new file mode 100644
index 0000000..8782972
--- /dev/null
+++ b/recipes-kernel/linux/linux-ampere-guest_4.14.bb
@@ -0,0 +1,43 @@
1require linux-ampere_4.14.inc
2
3################# meta-enea-virtualization/.../linux-intel-guest_4.14.bbappend ############
4
5# Disable Virtualization(host) support
6KERNEL_FEATURES_append = " features/kvm/host_n.scc"
7
8# KVM Guest
9KERNEL_FEATURES_append = " features/kvm/guest_kvm_y.scc"
10KERNEL_FEATURES_append = " features/kvm/virtio_y.scc"
11# PCI Legasy(required for /dev/vda)
12KERNEL_FEATURES_append = " features/kvm/virtio_pci_legacy_y.scc"
13
14# Full no Hz - nohz_full kernel param required
15KERNEL_FEATURES_append = " features/full_nohz/full_nohz-enable.scc"
16
17# Enable HPET, UIO, HUGETLB, PCI_MSI
18KERNEL_FEATURES_append = " features/intel-dpdk/intel-dpdk.scc"
19KERNEL_MODULE_AUTOLOAD += "uio"
20
21# VFIO/IOMMU
22KERNEL_FEATURES_append = " features/vfio/vfio_m.scc"
23
24# Low Latency kernel
25KERNEL_FEATURES_append = " features/lowlatency/lowlatency_y.scc"
26
27# CPU isolation
28KERNEL_FEATURES_append = " features/cgroups/cpusets.scc"
29KERNEL_FEATURES_append = " features/rcu/rcu_nocb_y.scc"
30
31# Enable E1000 and IXGBE drivers as built-in
32KERNEL_FEATURES_append = " features/intel-e1xxxx/e1xxxx_m.scc"
33KERNEL_FEATURES_append = " features/ixgbe/ixgbe_m.scc"
34
35# Enable PCI IOV
36KERNEL_FEATURES_append = " features/pci/pci_iov_y.scc"
37
38# Enable printk messages
39KERNEL_FEATURES_append = " features/printk/printk_y.scc"
40
41# Enable CD-ROM support for Cloud-init
42KERNEL_FEATURES_append = " features/isofs/isofs.scc"
43KERNEL_FEATURES_append = " features/cdrom/cdrom_m.scc"
diff --git a/recipes-kernel/linux/linux-ampere/0001-Upgrade-i40e-drivers-to-2.11.29.patch b/recipes-kernel/linux/linux-ampere/0001-Upgrade-i40e-drivers-to-2.11.29.patch
new file mode 100644
index 0000000..88c4182
--- /dev/null
+++ b/recipes-kernel/linux/linux-ampere/0001-Upgrade-i40e-drivers-to-2.11.29.patch
@@ -0,0 +1,63158 @@
1From 9164464963a87499b1448d436821bd3f7a7646d5 Mon Sep 17 00:00:00 2001
2From: Adrian Calianu <adrian.calianu@enea.com>
3Date: Thu, 21 May 2020 09:02:54 +0200
4Subject: [PATCH] Upgrade i40e drivers to 2.11.29
5
6Signed-off-by: Adrian Calianu <adrian.calianu@enea.com>
7---
8 drivers/net/ethernet/intel/i40e/Makefile | 209 +-
9 .../net/ethernet/intel/i40e/Module.supported | 1 +
10 drivers/net/ethernet/intel/i40e/common.mk | 305 +
11 drivers/net/ethernet/intel/i40e/i40e.h | 507 +-
12 drivers/net/ethernet/intel/i40e/i40e_adminq.c | 385 +-
13 drivers/net/ethernet/intel/i40e/i40e_adminq.h | 33 +-
14 .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 575 +-
15 drivers/net/ethernet/intel/i40e/i40e_alloc.h | 27 +-
16 drivers/net/ethernet/intel/i40e/i40e_client.c | 210 +-
17 drivers/net/ethernet/intel/i40e/i40e_client.h | 41 +-
18 drivers/net/ethernet/intel/i40e/i40e_common.c | 2968 +++-
19 drivers/net/ethernet/intel/i40e/i40e_dcb.c | 579 +-
20 drivers/net/ethernet/intel/i40e/i40e_dcb.h | 60 +-
21 drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 46 +-
22 drivers/net/ethernet/intel/i40e/i40e_ddp.c | 478 +
23 .../net/ethernet/intel/i40e/i40e_debugfs.c | 1154 +-
24 drivers/net/ethernet/intel/i40e/i40e_devids.h | 41 +-
25 drivers/net/ethernet/intel/i40e/i40e_diag.c | 70 +-
26 drivers/net/ethernet/intel/i40e/i40e_diag.h | 27 +-
27 .../net/ethernet/intel/i40e/i40e_ethtool.c | 3837 ++++-
28 .../ethernet/intel/i40e/i40e_ethtool_stats.h | 293 +
29 .../net/ethernet/intel/i40e/i40e_filters.c | 40 +
30 .../net/ethernet/intel/i40e/i40e_filters.h | 11 +
31 drivers/net/ethernet/intel/i40e/i40e_helper.h | 128 +
32 drivers/net/ethernet/intel/i40e/i40e_hmc.c | 68 +-
33 drivers/net/ethernet/intel/i40e/i40e_hmc.h | 27 +-
34 .../net/ethernet/intel/i40e/i40e_lan_hmc.c | 135 +-
35 .../net/ethernet/intel/i40e/i40e_lan_hmc.h | 27 +-
36 drivers/net/ethernet/intel/i40e/i40e_main.c | 12375 +++++++++++-----
37 drivers/net/ethernet/intel/i40e/i40e_nvm.c | 553 +-
38 drivers/net/ethernet/intel/i40e/i40e_osdep.h | 99 +-
39 .../net/ethernet/intel/i40e/i40e_prototype.h | 327 +-
40 drivers/net/ethernet/intel/i40e/i40e_ptp.c | 1120 +-
41 .../net/ethernet/intel/i40e/i40e_register.h | 172 +-
42 drivers/net/ethernet/intel/i40e/i40e_status.h | 28 +-
43 drivers/net/ethernet/intel/i40e/i40e_trace.h | 62 +-
44 drivers/net/ethernet/intel/i40e/i40e_txrx.c | 1348 +-
45 drivers/net/ethernet/intel/i40e/i40e_txrx.h | 171 +-
46 drivers/net/ethernet/intel/i40e/i40e_type.h | 270 +-
47 .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 8342 ++++++++---
48 .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 113 +-
49 drivers/net/ethernet/intel/i40e/kcompat.c | 2761 ++++
50 drivers/net/ethernet/intel/i40e/kcompat.h | 6838 +++++++++
51 .../ethernet/intel/i40e/kcompat_overflow.h | 319 +
52 drivers/net/ethernet/intel/i40e/kcompat_vfd.c | 2550 ++++
53 drivers/net/ethernet/intel/i40e/kcompat_vfd.h | 141 +
54 drivers/net/ethernet/intel/i40e/virtchnl.h | 949 ++
55 47 files changed, 41086 insertions(+), 9734 deletions(-)
56 create mode 100644 drivers/net/ethernet/intel/i40e/Module.supported
57 create mode 100644 drivers/net/ethernet/intel/i40e/common.mk
58 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_ddp.c
59 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h
60 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_filters.c
61 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_filters.h
62 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_helper.h
63 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat.c
64 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat.h
65 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat_overflow.h
66 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat_vfd.c
67 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat_vfd.h
68 create mode 100644 drivers/net/ethernet/intel/i40e/virtchnl.h
69
70diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
71index 3da482c3d..81f5ab908 100644
72--- a/drivers/net/ethernet/intel/i40e/Makefile
73+++ b/drivers/net/ethernet/intel/i40e/Makefile
74@@ -1,31 +1,10 @@
75-################################################################################
76-#
77-# Intel Ethernet Controller XL710 Family Linux Driver
78-# Copyright(c) 2013 - 2015 Intel Corporation.
79-#
80-# This program is free software; you can redistribute it and/or modify it
81-# under the terms and conditions of the GNU General Public License,
82-# version 2, as published by the Free Software Foundation.
83-#
84-# This program is distributed in the hope it will be useful, but WITHOUT
85-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
86-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
87-# more details.
88-#
89-# You should have received a copy of the GNU General Public License along
90-# with this program. If not, see <http://www.gnu.org/licenses/>.
91-#
92-# The full GNU General Public License is included in this distribution in
93-# the file called "COPYING".
94-#
95-# Contact Information:
96-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
97-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
98-#
99-################################################################################
100+# SPDX-License-Identifier: GPL-2.0
101+# Copyright(c) 2013 - 2020 Intel Corporation.
102
103+ifneq ($(KERNELRELEASE),)
104+# kbuild part of makefile
105 #
106-# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
107+# Makefile for the Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
108 #
109
110 ccflags-y += -I$(src)
111@@ -33,18 +12,170 @@ subdir-ccflags-y += -I$(src)
112
113 obj-$(CONFIG_I40E) += i40e.o
114
115-i40e-objs := i40e_main.o \
116- i40e_ethtool.o \
117- i40e_adminq.o \
118- i40e_common.o \
119- i40e_hmc.o \
120- i40e_lan_hmc.o \
121- i40e_nvm.o \
122- i40e_debugfs.o \
123- i40e_diag.o \
124- i40e_txrx.o \
125- i40e_ptp.o \
126- i40e_client.o \
127+i40e-objs := i40e_main.o \
128+ i40e_ethtool.o \
129+ i40e_adminq.o \
130+ i40e_common.o \
131+ i40e_hmc.o \
132+ i40e_lan_hmc.o \
133+ i40e_nvm.o \
134+ i40e_debugfs.o \
135+ i40e_diag.o \
136+ i40e_txrx.o \
137+ i40e_ptp.o \
138+ i40e_filters.o \
139+ i40e_ddp.o \
140+ i40e_client.o \
141 i40e_virtchnl_pf.o
142
143-i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
144+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
145+i40e-y += kcompat.o
146+i40e-y += kcompat_vfd.o
147+
148+else # ifneq($(KERNELRELEASE),)
149+# normal makefile
150+
151+DRIVER := i40e
152+
153+# If the user just wants to print the help output, don't include common.mk or
154+# perform any other checks. This ensures that running "make help" will always
155+# work even if kernel-devel is not installed, or if the common.mk fails under
156+# any other error condition.
157+ifneq ($(MAKECMDGOALS),help)
158+include common.mk
159+
160+# i40e does not support building on kernels older than 2.6.32
161+$(call minimum_kver_check,2,6,32)
162+endif
163+
164+# Command to update initramfs or display a warning message
165+ifeq (${cmd_initrd},)
166+define cmd_initramfs
167+@echo "Unable to update initramfs. You may need to do this manaully."
168+endef
169+else
170+define cmd_initramfs
171+@echo "Updating initramfs..."
172+-@$(call cmd_initrd)
173+endef
174+endif
175+
176+###############
177+# Build rules #
178+###############
179+
180+# Standard compilation, with regular output
181+default:
182+ @+$(call kernelbuild,modules)
183+
184+# Noisy output, for extra debugging
185+noisy:
186+ @+$(call kernelbuild,modules,V=1)
187+
188+# Silence any output generated
189+silent:
190+ @+$(call kernelbuild,modules,>/dev/null)
191+
192+# Enable higher warning level
193+checkwarnings: clean
194+ @+$(call kernelbuild,modules,W=1)
195+
196+# Run sparse static analyzer
197+sparse: clean
198+ @+$(call kernelbuild,modules,C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext")
199+
200+# Run coccicheck static analyzer
201+ccc: clean
202+ @+$(call kernelbuild,modules,coccicheck MODE=report)
203+
204+# Build manfiles
205+manfile:
206+ @gzip -c ../${DRIVER}.${MANSECTION} > ${DRIVER}.${MANSECTION}.gz
207+
208+# Clean the module subdirectories
209+clean:
210+ @+$(call kernelbuild,clean)
211+ @-rm -rf *.${MANSECTION}.gz *.ko
212+
213+mandocs_install: manfile
214+ @echo "Copying manpages..."
215+ @install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz
216+
217+# Install kernel module files. This target is called by the RPM specfile when
218+# generating binary RPMs, and is not expected to modify files outside of the
219+# build root. Thus, it must not update initramfs, or run depmod.
220+modules_install: default
221+ @echo "Installing modules..."
222+ @+$(call kernelbuild,modules_install)
223+
224+# After installing all the files, perform necessary work to ensure the system
225+# will use the new modules. This includes running depmod to update module
226+# dependencies and updating the initramfs image in case the module is loaded
227+# during early boot.
228+install: modules_install
229+ $(call cmd_depmod)
230+ $(call cmd_initramfs)
231+ $(MAKE) mandocs_install
232+
233+# Target used by rpmbuild spec file
234+rpm: modules_install
235+ $(MAKE) mandocs_install
236+
237+mandocs_uninstall:
238+ if [ -e ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ] ; then \
239+ rm -f ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ; \
240+ fi;
241+
242+# Remove installed module files. This target is called by the RPM specfile when
243+# generating binary RPMs, and is not expected to modify files outside of the
244+# build root. Thus, it must not update the initramfs image or run depmod.
245+modules_uninstall:
246+ rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko;
247+
248+# After uninstalling all the files, perform necessary work to restore the
249+# system back to using the default kernel modules. This includes running depmod
250+# to update module dependencies and updating the initramfs image.
251+uninstall: modules_uninstall mandocs_uninstall
252+ $(call cmd_depmod)
253+ $(call cmd_initramfs)
254+
255+########
256+# Help #
257+########
258+help:
259+ @echo 'Build targets:'
260+ @echo ' default - Build module(s) with standard verbosity'
261+ @echo ' noisy - Build module(s) with V=1 verbosity -- very noisy'
262+ @echo ' silent - Build module(s), squelching all output'
263+ @echo ''
264+ @echo 'Static Analysis:'
265+ @echo ' checkwarnings - Clean, then build module(s) with W=1 warnings enabled'
266+ @echo ' sparse - Clean, then check module(s) using sparse'
267+ @echo ' ccc - Clean, then check module(s) using coccicheck'
268+ @echo ''
269+ @echo 'Cleaning targets:'
270+ @echo ' clean - Clean files generated by kernel module build'
271+ @echo ''
272+ @echo 'Other targets:'
273+ @echo ' manfile - Generate a gzipped manpage'
274+ @echo ' modules_install - install the module(s) only'
275+ @echo ' mandocs_install - install the manpage only'
276+ @echo ' install - Build then install the module(s) and manpage, and update initramfs'
277+ @echo ' modules_uninstall - uninstall the module(s) only'
278+ @echo ' mandocs_uninstall - uninstall the manpage only'
279+ @echo ' uninstall - Uninstall the module(s) and manpage, and update initramfs'
280+ @echo ' help - Display this help message'
281+ @echo ''
282+ @echo 'Variables:'
283+ @echo ' LINUX_VERSION - Debug tool to force kernel LINUX_VERSION_CODE. Use at your own risk.'
284+ @echo ' W=N - Kernel variable for setting warning levels'
285+ @echo ' V=N - Kernel variable for setting output verbosity'
286+ @echo ' INSTALL_MOD_PATH - Add prefix for the module and manpage installation path'
287+ @echo ' INSTALL_MOD_DIR - Use module directory other than updates/drivers/net/ethernet/intel/${DRIVER}'
288+ @echo ' KSRC - Specifies the full path to the kernel tree to build against'
289+ @echo ' Other variables may be available for tuning make process, see'
290+ @echo ' Kernel Kbuild documentation for more information'
291+
292+.PHONY: default noisy clean manfile silent sparse ccc install uninstall help
293+
294+endif # ifneq($(KERNELRELEASE),)
295diff --git a/drivers/net/ethernet/intel/i40e/Module.supported b/drivers/net/ethernet/intel/i40e/Module.supported
296new file mode 100644
297index 000000000..c0e31cf7f
298--- /dev/null
299+++ b/drivers/net/ethernet/intel/i40e/Module.supported
300@@ -0,0 +1 @@
301+i40e.ko external
302diff --git a/drivers/net/ethernet/intel/i40e/common.mk b/drivers/net/ethernet/intel/i40e/common.mk
303new file mode 100644
304index 000000000..1b95715e7
305--- /dev/null
306+++ b/drivers/net/ethernet/intel/i40e/common.mk
307@@ -0,0 +1,305 @@
308+# SPDX-License-Identifier: GPL-2.0
309+# Copyright(c) 2013 - 2020 Intel Corporation.
310+
311+# SPDX-License-Identifier: GPL-2.0-only
312+# Copyright (C) 2015-2019 Intel Corporation
313+#
314+# common Makefile rules useful for out-of-tree Linux driver builds
315+#
316+# Usage: include common.mk
317+#
318+# After including, you probably want to add a minimum_kver_check call
319+#
320+# Required Variables:
321+# DRIVER
322+# -- Set to the lowercase driver name
323+
324+#####################
325+# Helpful functions #
326+#####################
327+
328+readlink = $(shell readlink -f ${1})
329+
330+# helper functions for converting kernel version to version codes
331+get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
332+get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
333+ [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
334+ [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
335+ printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
336+
337+################
338+# depmod Macro #
339+################
340+
341+cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \
342+ $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \
343+ -a ${KVER}
344+
345+################
346+# dracut Macro #
347+################
348+
349+cmd_initrd := $(shell \
350+ if which dracut > /dev/null 2>&1 ; then \
351+ echo "dracut --force"; \
352+ elif which update-initramfs > /dev/null 2>&1 ; then \
353+ echo "update-initramfs -u"; \
354+ fi )
355+
356+#####################
357+# Environment tests #
358+#####################
359+
360+DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]")
361+
362+ifeq (,${BUILD_KERNEL})
363+BUILD_KERNEL=$(shell uname -r)
364+endif
365+
366+# Kernel Search Path
367+# All the places we look for kernel source
368+KSP := /lib/modules/${BUILD_KERNEL}/source \
369+ /lib/modules/${BUILD_KERNEL}/build \
370+ /usr/src/linux-${BUILD_KERNEL} \
371+ /usr/src/linux-$(${BUILD_KERNEL} | sed 's/-.*//') \
372+ /usr/src/kernel-headers-${BUILD_KERNEL} \
373+ /usr/src/kernel-source-${BUILD_KERNEL} \
374+ /usr/src/linux-$(${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
375+ /usr/src/linux \
376+ /usr/src/kernels/${BUILD_KERNEL} \
377+ /usr/src/kernels
378+
379+# prune the list down to only values that exist and have an include/linux
380+# sub-directory. We can't use include/config because some older kernels don't
381+# have this.
382+test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir})
383+KSP := $(foreach dir, ${KSP}, ${test_dir})
384+
385+# we will use this first valid entry in the search path
386+ifeq (,${KSRC})
387+ KSRC := $(firstword ${KSP})
388+endif
389+
390+ifeq (,${KSRC})
391+ $(warning *** Kernel header files not in any of the expected locations.)
392+ $(warning *** Install the appropriate kernel development package, e.g.)
393+ $(error kernel-devel, for building kernel modules and try again)
394+else
395+ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC})
396+ KOBJ := /lib/modules/${BUILD_KERNEL}/build
397+else
398+ KOBJ := ${KSRC}
399+endif
400+endif
401+
402+# Version file Search Path
403+VSP := ${KOBJ}/include/generated/utsrelease.h \
404+ ${KOBJ}/include/linux/utsrelease.h \
405+ ${KOBJ}/include/linux/version.h \
406+ ${KOBJ}/include/generated/uapi/linux/version.h \
407+ /boot/vmlinuz.version.h
408+
409+# Config file Search Path
410+CSP := ${KOBJ}/include/generated/autoconf.h \
411+ ${KOBJ}/include/linux/autoconf.h \
412+ /boot/vmlinuz.autoconf.h
413+
414+# System.map Search Path (for depmod)
415+MSP := ${KSRC}/System.map \
416+ /boot/System.map-${BUILD_KERNEL}
417+
418+# prune the lists down to only files that exist
419+test_file = $(shell [ -f ${file} ] && echo ${file})
420+VSP := $(foreach file, ${VSP}, ${test_file})
421+CSP := $(foreach file, ${CSP}, ${test_file})
422+MSP := $(foreach file, ${MSP}, ${test_file})
423+
424+
425+# and use the first valid entry in the Search Paths
426+ifeq (,${VERSION_FILE})
427+ VERSION_FILE := $(firstword ${VSP})
428+endif
429+
430+ifeq (,${CONFIG_FILE})
431+ CONFIG_FILE := $(firstword ${CSP})
432+endif
433+
434+ifeq (,${SYSTEM_MAP_FILE})
435+ SYSTEM_MAP_FILE := $(firstword ${MSP})
436+endif
437+
438+ifeq (,$(wildcard ${VERSION_FILE}))
439+ $(error Linux kernel source not configured - missing version header file)
440+endif
441+
442+ifeq (,$(wildcard ${CONFIG_FILE}))
443+ $(error Linux kernel source not configured - missing autoconf.h)
444+endif
445+
446+ifeq (,$(wildcard ${SYSTEM_MAP_FILE}))
447+ $(warning Missing System.map file - depmod will not check for missing symbols)
448+endif
449+
450+ifneq ($(words $(subst :, ,$(CURDIR))), 1)
451+ $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path)
452+endif
453+
454+#######################
455+# Linux Version Setup #
456+#######################
457+
458+# The following command line parameter is intended for development of KCOMPAT
459+# against upstream kernels such as net-next which have broken or non-updated
460+# version codes in their Makefile. They are intended for debugging and
461+# development purpose only so that we can easily test new KCOMPAT early. If you
462+# don't know what this means, you do not need to set this flag. There is no
463+# arcane magic here.
464+
465+# Convert LINUX_VERSION into LINUX_VERSION_CODE
466+ifneq (${LINUX_VERSION},)
467+ LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
468+endif
469+
470+# Honor LINUX_VERSION_CODE
471+ifneq (${LINUX_VERSION_CODE},)
472+ $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.)
473+ KVER_CODE := ${LINUX_VERSION_CODE}
474+ EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
475+endif
476+
477+# Determine SLE_LOCALVERSION_CODE for SuSE SLE >= 11 (needed by kcompat)
478+# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string
479+# appended to the stable kernel version on which their kernel is based with
480+# additional versioning information (up to 3 numbers), a possible abbreviated
481+# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default
482+# or CONFIG_LOCALVERSION=-999.gdeadbee-default
483+ifeq (1,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
484+ grep -m 1 CONFIG_SUSE_KERNEL | awk '{ print $$3 }'))
485+
486+ifneq (10,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
487+ grep -m 1 CONFIG_SLE_VERSION | awk '{ print $$3 }'))
488+
489+ LOCALVERSION := $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
490+ grep -m 1 CONFIG_LOCALVERSION | awk '{ print $$3 }' |\
491+ cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//')
492+ LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1)
493+ LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2)
494+ LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3)
495+ SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \
496+ 0${LOCALVER_B} \* 256 + 0${LOCALVER_C})
497+ EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE}
498+endif
499+endif
500+
501+EXTRA_CFLAGS += ${CFLAGS_EXTRA}
502+
503+# get the kernel version - we use this to find the correct install path
504+KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \
505+ awk '{ print $$3 }' | sed 's/\"//g')
506+
507+# assume source symlink is the same as build, otherwise adjust KOBJ
508+ifneq (,$(wildcard /lib/modules/${KVER}/build))
509+ ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build))
510+ KOBJ=/lib/modules/${KVER}/build
511+ endif
512+endif
513+
514+ifeq (${KVER_CODE},)
515+ KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\
516+ grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
517+endif
518+
519+# minimum_kver_check
520+#
521+# helper function to provide uniform output for different drivers to abort the
522+# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)".
523+define _minimum_kver_check
524+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?"))
525+ $$(warning *** Aborting the build.)
526+ $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3})
527+endif
528+endef
529+minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3}))
530+
531+################
532+# Manual Pages #
533+################
534+
535+MANSECTION = 7
536+
537+ifeq (,${MANDIR})
538+ # find the best place to install the man page
539+ MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g')
540+ ifneq (,${MANPATH})
541+ # test based on inclusion in MANPATH
542+ test_dir = $(findstring ${dir}, ${MANPATH})
543+ else
544+ # no MANPATH, test based on directory existence
545+ test_dir = $(shell [ -e ${dir} ] && echo ${dir})
546+ endif
547+ # our preferred install path
548+ # should /usr/local/man be in here ?
549+ MANDIR := /usr/share/man /usr/man
550+ MANDIR := $(foreach dir, ${MANDIR}, ${test_dir})
551+ MANDIR := $(firstword ${MANDIR})
552+endif
553+ifeq (,${MANDIR})
554+ # fallback to /usr/man
555+ MANDIR := /usr/man
556+endif
557+
558+####################
559+# CCFLAGS variable #
560+####################
561+
562+# set correct CCFLAGS variable for kernels older than 2.6.24
563+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,24) ]; echo $$?))
564+CCFLAGS_VAR := EXTRA_CFLAGS
565+else
566+CCFLAGS_VAR := ccflags-y
567+endif
568+
569+#################
570+# KBUILD_OUTPUT #
571+#################
572+
573+# Only set KBUILD_OUTPUT if the real paths of KOBJ and KSRC differ
574+ifneq ($(call readlink,${KSRC}),$(call readlink,${KOBJ}))
575+export KBUILD_OUTPUT ?= ${KOBJ}
576+endif
577+
578+############################
579+# Module Install Directory #
580+############################
581+
582+# Default to using updates/drivers/net/ethernet/intel/ path, since depmod since
583+# v3.1 defaults to checking updates folder first, and only checking kernels/
584+# and extra afterwards. We use updates instead of kernel/* due to desire to
585+# prevent over-writing built-in modules files.
586+export INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/intel/${DRIVER}
587+
588+######################
589+# Kernel Build Macro #
590+######################
591+
592+# kernel build function
593+# ${1} is the kernel build target
594+# ${2} may contain any extra rules to pass directly to the sub-make process
595+#
596+# This function is expected to be executed by
597+# @+$(call kernelbuild,<target>,<extra parameters>)
598+# from within a Makefile recipe.
599+#
600+# The following variables are expected to be defined for its use:
601+# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem
602+# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS
603+# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable
604+# KSRC -- the location of the kernel source tree to build against
605+# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER
606+#
607+kernelbuild = ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \
608+ ${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \
609+ -C "${KSRC}" \
610+ CONFIG_${DRIVER_UPPERCASE}=m \
611+ M="${CURDIR}" \
612+ ${2} ${1}
613diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
614index d0c1bf544..776bb99b6 100644
615--- a/drivers/net/ethernet/intel/i40e/i40e.h
616+++ b/drivers/net/ethernet/intel/i40e/i40e.h
617@@ -1,28 +1,5 @@
618-/*******************************************************************************
619- *
620- * Intel Ethernet Controller XL710 Family Linux Driver
621- * Copyright(c) 2013 - 2017 Intel Corporation.
622- *
623- * This program is free software; you can redistribute it and/or modify it
624- * under the terms and conditions of the GNU General Public License,
625- * version 2, as published by the Free Software Foundation.
626- *
627- * This program is distributed in the hope it will be useful, but WITHOUT
628- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
629- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
630- * more details.
631- *
632- * You should have received a copy of the GNU General Public License along
633- * with this program. If not, see <http://www.gnu.org/licenses/>.
634- *
635- * The full GNU General Public License is included in this distribution in
636- * the file called "COPYING".
637- *
638- * Contact Information:
639- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
640- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
641- *
642- ******************************************************************************/
643+/* SPDX-License-Identifier: GPL-2.0 */
644+/* Copyright(c) 2013 - 2020 Intel Corporation. */
645
646 #ifndef _I40E_H_
647 #define _I40E_H_
648@@ -36,10 +13,9 @@
649 #include <linux/aer.h>
650 #include <linux/netdevice.h>
651 #include <linux/ioport.h>
652-#include <linux/iommu.h>
653 #include <linux/slab.h>
654 #include <linux/list.h>
655-#include <linux/hashtable.h>
656+#include <linux/hash.h>
657 #include <linux/string.h>
658 #include <linux/in.h>
659 #include <linux/ip.h>
660@@ -47,36 +23,61 @@
661 #include <linux/pkt_sched.h>
662 #include <linux/ipv6.h>
663 #include <net/checksum.h>
664+#include <net/ipv6.h>
665 #include <net/ip6_checksum.h>
666+#ifdef SIOCETHTOOL
667 #include <linux/ethtool.h>
668+#endif
669 #include <linux/if_vlan.h>
670 #include <linux/if_bridge.h>
671+#include "kcompat.h"
672+#ifdef HAVE_IOMMU_PRESENT
673+#include <linux/iommu.h>
674+#endif
675+#ifdef HAVE_SCTP
676+#include <linux/sctp.h>
677+#endif
678+#ifdef HAVE_PTP_1588_CLOCK
679 #include <linux/clocksource.h>
680 #include <linux/net_tstamp.h>
681 #include <linux/ptp_clock_kernel.h>
682+#endif /* HAVE_PTP_1588_CLOCK */
683+#ifdef __TC_MQPRIO_MODE_MAX
684+#include <net/pkt_cls.h>
685+#endif
686 #include "i40e_type.h"
687 #include "i40e_prototype.h"
688 #include "i40e_client.h"
689-#include <linux/avf/virtchnl.h>
690+#include "virtchnl.h"
691 #include "i40e_virtchnl_pf.h"
692 #include "i40e_txrx.h"
693 #include "i40e_dcb.h"
694
695+#ifdef HAVE_XDP_SUPPORT
696+#include <linux/bpf_trace.h>
697+#endif
698+
699 /* Useful i40e defaults */
700 #define I40E_MAX_VEB 16
701
702 #define I40E_MAX_NUM_DESCRIPTORS 4096
703 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
704 #define I40E_DEFAULT_NUM_DESCRIPTORS 512
705+#define L4_MODE_UDP 0
706+#define L4_MODE_TCP 1
707+#define L4_MODE_BOTH 2
708+#define L4_MODE_DISABLED -1
709+bool i40e_is_l4mode_enabled(void);
710 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32
711 #define I40E_MIN_NUM_DESCRIPTORS 64
712 #define I40E_MIN_MSIX 2
713 #define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
714-#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
715+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF, 16 VMDQ */
716 /* max 16 qps */
717 #define i40e_default_queues_per_vmdq(pf) \
718 (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
719 #define I40E_DEFAULT_QUEUES_PER_VF 4
720+#define I40E_MAX_VF_QUEUES 16
721 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
722 #define i40e_pf_get_max_q_per_tc(pf) \
723 (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
724@@ -85,6 +86,10 @@
725 #define I40E_MAX_AQ_BUF_SIZE 4096
726 #define I40E_AQ_LEN 256
727 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
728+/*
729+ * If I40E_MAX_USER_PRIORITY is updated please also update
730+ * I40E_CLIENT_MAX_USER_PRIORITY in i40e_client.h and i40evf_client.h
731+ */
732 #define I40E_MAX_USER_PRIORITY 8
733 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
734 #define I40E_DEFAULT_MSG_ENABLE 4
735@@ -114,7 +119,7 @@
736 #define I40E_CURRENT_NVM_VERSION_LO 0x40
737
738 #define I40E_RX_DESC(R, i) \
739- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
740+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
741 #define I40E_TX_DESC(R, i) \
742 (&(((struct i40e_tx_desc *)((R)->desc))[i]))
743 #define I40E_TX_CTXTDESC(R, i) \
744@@ -125,6 +130,10 @@
745 /* default to trying for four seconds */
746 #define I40E_TRY_LINK_TIMEOUT (4 * HZ)
747
748+/* BW rate limiting */
749+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
750+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
751+
752 /* driver state flags */
753 enum i40e_state_t {
754 __I40E_TESTING,
755@@ -136,6 +145,8 @@ enum i40e_state_t {
756 __I40E_MDD_EVENT_PENDING,
757 __I40E_VFLR_EVENT_PENDING,
758 __I40E_RESET_RECOVERY_PENDING,
759+ __I40E_TIMEOUT_RECOVERY_PENDING,
760+ __I40E_MISC_IRQ_REQUESTED,
761 __I40E_RESET_INTR_RECEIVED,
762 __I40E_REINIT_REQUESTED,
763 __I40E_PF_RESET_REQUESTED,
764@@ -144,17 +155,31 @@ enum i40e_state_t {
765 __I40E_EMP_RESET_REQUESTED,
766 __I40E_EMP_RESET_INTR_RECEIVED,
767 __I40E_SUSPENDED,
768- __I40E_PTP_TX_IN_PROGRESS,
769 __I40E_BAD_EEPROM,
770+ __I40E_DEBUG_MODE,
771 __I40E_DOWN_REQUESTED,
772 __I40E_FD_FLUSH_REQUESTED,
773+ __I40E_FD_ATR_AUTO_DISABLED,
774+ __I40E_FD_SB_AUTO_DISABLED,
775 __I40E_RESET_FAILED,
776 __I40E_PORT_SUSPENDED,
777+ __I40E_PTP_TX_IN_PROGRESS,
778 __I40E_VF_DISABLE,
779+ __I40E_RECOVERY_MODE,
780+ __I40E_MACVLAN_SYNC_PENDING,
781+ __I40E_UDP_FILTER_SYNC_PENDING,
782+ __I40E_TEMP_LINK_POLLING,
783+ __I40E_CLIENT_SERVICE_REQUESTED,
784+ __I40E_CLIENT_L2_CHANGE,
785+ __I40E_CLIENT_RESET,
786+ __I40E_VIRTCHNL_OP_PENDING,
787+ __I40E_VFS_RELEASING,
788 /* This must be last as it determines the size of the BITMAP */
789 __I40E_STATE_SIZE__,
790 };
791
792+#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
793+
794 /* VSI state flags */
795 enum i40e_vsi_state_t {
796 __I40E_VSI_DOWN,
797@@ -163,6 +188,7 @@ enum i40e_vsi_state_t {
798 __I40E_VSI_OVERFLOW_PROMISC,
799 __I40E_VSI_REINIT_REQUESTED,
800 __I40E_VSI_DOWN_REQUESTED,
801+ __I40E_VSI_RELEASING,
802 /* This must be last as it determines the size of the BITMAP */
803 __I40E_VSI_STATE_SIZE__,
804 };
805@@ -183,6 +209,10 @@ struct i40e_lump_tracking {
806
807 #define I40E_DEFAULT_ATR_SAMPLE_RATE 20
808 #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
809+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
810+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
811+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
812+#define I40E_IP_DUMMY_PACKET_LEN 34
813 #define I40E_FDIR_BUFFER_FULL_MARGIN 10
814 #define I40E_FDIR_BUFFER_HEAD_ROOM 32
815 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
816@@ -205,10 +235,20 @@ enum i40e_fd_stat_idx {
817 #define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
818 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
819
820+/* get PTP pins for ioctl */
821+#define SIOCGPINS (SIOCDEVPRIVATE + 0)
822+/* set PTP pins for ioctl */
823+#define SIOCSPINS (SIOCDEVPRIVATE + 1)
824+
825 /* The following structure contains the data parsed from the user-defined
826 * field of the ethtool_rx_flow_spec structure.
827 */
828 struct i40e_rx_flow_userdef {
829+ bool cloud_filter;
830+ bool tenant_id_valid;
831+ u32 tenant_id;
832+ bool tunnel_type_valid;
833+ u8 tunnel_type;
834 bool flex_filter;
835 u16 flex_word;
836 u16 flex_offset;
837@@ -216,7 +256,7 @@ struct i40e_rx_flow_userdef {
838
839 struct i40e_fdir_filter {
840 struct hlist_node fdir_node;
841- /* filter ipnut set */
842+ /* filter input set */
843 u8 flow_type;
844 u8 ip4_proto;
845 /* TX packet view of src and dst */
846@@ -233,7 +273,6 @@ struct i40e_fdir_filter {
847
848 /* filter control */
849 u16 q_index;
850- u8 flex_off;
851 u8 pctype;
852 u16 dest_vsi;
853 u8 dest_ctl;
854@@ -242,6 +281,65 @@ struct i40e_fdir_filter {
855 u32 fd_id;
856 };
857
858+#define I40E_CLOUD_FIELD_OMAC BIT(0)
859+#define I40E_CLOUD_FIELD_IMAC BIT(1)
860+#define I40E_CLOUD_FIELD_IVLAN BIT(2)
861+#define I40E_CLOUD_FIELD_TEN_ID BIT(3)
862+#define I40E_CLOUD_FIELD_IIP BIT(4)
863+
864+#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
865+#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
866+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
867+ I40E_CLOUD_FIELD_IVLAN)
868+#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
869+ I40E_CLOUD_FIELD_TEN_ID)
870+#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
871+ I40E_CLOUD_FIELD_IMAC | \
872+ I40E_CLOUD_FIELD_TEN_ID)
873+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
874+ I40E_CLOUD_FIELD_IVLAN | \
875+ I40E_CLOUD_FIELD_TEN_ID)
876+#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP
877+
878+struct i40e_cloud_filter {
879+ struct hlist_node cloud_node;
880+ unsigned long cookie;
881+ /* cloud filter input set follows */
882+ u8 outer_mac[ETH_ALEN];
883+ u8 inner_mac[ETH_ALEN];
884+ __be16 inner_vlan;
885+ __be32 inner_ip[4];
886+ u16 queue_id;
887+ u32 id;
888+ /* cloud filter input set follows */
889+ u8 dst_mac[ETH_ALEN];
890+ u8 src_mac[ETH_ALEN];
891+ __be16 vlan_id;
892+ u16 seid; /* filter control */
893+ __be16 dst_port;
894+ __be16 src_port;
895+ u32 tenant_id;
896+ union {
897+ struct {
898+ struct in_addr dst_ip;
899+ struct in_addr src_ip;
900+ } v4;
901+ struct {
902+ struct in6_addr dst_ip6;
903+ struct in6_addr src_ip6;
904+ } v6;
905+ } ip;
906+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
907+#define src_ipv6 ip.v6.src_ip6.s6_addr32
908+#define dst_ipv4 ip.v4.dst_ip.s_addr
909+#define src_ipv4 ip.v4.src_ip.s_addr
910+ u16 n_proto; /* Ethernet Protocol */
911+ u8 ip_proto; /* IPPROTO value */
912+ u8 flags;
913+#define I40E_CLOUD_TNL_TYPE_NONE 0xff
914+ u8 tunnel_type;
915+};
916+
917 #define I40E_ETH_P_LLDP 0x88cc
918
919 #define I40E_DCB_PRIO_TYPE_STRICT 0
920@@ -261,10 +359,34 @@ struct i40e_tc_configuration {
921 struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
922 };
923
924+#define I40E_UDP_PORT_INDEX_UNUSED 255
925 struct i40e_udp_port_config {
926 /* AdminQ command interface expects port number in Host byte order */
927 u16 port;
928 u8 type;
929+ u8 filter_index;
930+};
931+
932+#define I40_DDP_FLASH_REGION 100
933+#define I40E_PROFILE_INFO_SIZE 48
934+#define I40E_MAX_PROFILE_NUM 16
935+#define I40E_PROFILE_LIST_SIZE \
936+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
937+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
938+
939+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
940+ bool is_add);
941+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
942+
943+struct i40e_ddp_profile_list {
944+ u32 p_count;
945+ struct i40e_profile_info p_info[0];
946+};
947+
948+struct i40e_ddp_old_profile_list {
949+ struct list_head list;
950+ size_t old_ddp_size;
951+ u8 old_ddp_buf[0];
952 };
953
954 /* macros related to FLX_PIT */
955@@ -330,12 +452,49 @@ struct i40e_udp_port_config {
956 I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
957 I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
958
959+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
960+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
961+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
962+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
963+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
964+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
965+
966+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
967+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
968+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
969+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
970+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
971+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
972+
973 struct i40e_flex_pit {
974 struct list_head list;
975 u16 src_offset;
976 u8 pit_index;
977 };
978
979+struct i40e_channel {
980+ struct list_head list;
981+ bool initialized;
982+ u8 type;
983+ u16 vsi_number; /* Assigned VSI number from AQ 'Add VSI' response */
984+ u16 stat_counter_idx;
985+ u16 base_queue;
986+ u16 num_queue_pairs; /* Requested by user */
987+ u16 seid;
988+
989+ u8 enabled_tc;
990+ struct i40e_aqc_vsi_properties_data info;
991+
992+ u64 max_tx_rate;
993+
994+ /* track this channel belongs to which VSI */
995+ struct i40e_vsi *parent_vsi;
996+};
997+
998+#ifdef HAVE_PTP_1588_CLOCK
999+struct i40e_ptp_pins_settings;
1000+#endif /* HAVE_PTP_1588_CLOCK */
1001+
1002 /* struct that defines the Ethernet device */
1003 struct i40e_pf {
1004 struct pci_dev *pdev;
1005@@ -390,6 +549,9 @@ struct i40e_pf {
1006 struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
1007 u16 pending_udp_bitmap;
1008
1009+ struct hlist_head cloud_filter_list;
1010+ u16 num_cloud_filters;
1011+
1012 enum i40e_interrupt_policy int_policy;
1013 u16 rx_itr_default;
1014 u16 tx_itr_default;
1015@@ -401,55 +563,93 @@ struct i40e_pf {
1016 struct timer_list service_timer;
1017 struct work_struct service_task;
1018
1019- u64 hw_features;
1020-#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0)
1021-#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1)
1022-#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2)
1023-#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3)
1024-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4)
1025-#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5)
1026-#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6)
1027-#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7)
1028-#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8)
1029-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9)
1030-#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10)
1031-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11)
1032-#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12)
1033-#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13)
1034-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14)
1035-#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15)
1036-#define I40E_HW_STOP_FW_LLDP BIT_ULL(16)
1037-#define I40E_HW_PORT_ID_VALID BIT_ULL(17)
1038-#define I40E_HW_RESTART_AUTONEG BIT_ULL(18)
1039+ u32 hw_features;
1040+#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
1041+#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
1042+#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
1043+#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
1044+#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
1045+#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
1046+#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
1047+#define I40E_HW_NO_DCB_SUPPORT BIT(7)
1048+#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
1049+#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
1050+#define I40E_HW_PTP_L4_CAPABLE BIT(10)
1051+#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
1052+#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12)
1053+#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
1054+#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
1055+#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
1056+#define I40E_HW_STOP_FW_LLDP BIT(16)
1057+#define I40E_HW_PORT_ID_VALID BIT(17)
1058+#define I40E_HW_RESTART_AUTONEG BIT(18)
1059
1060 u64 flags;
1061-#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
1062-#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
1063-#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
1064-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4)
1065-#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
1066-#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
1067-#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
1068-#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
1069-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
1070-#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
1071-#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
1072-#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
1073-#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
1074-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
1075-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
1076-#define I40E_FLAG_PTP BIT_ULL(25)
1077-#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
1078-#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
1079-#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
1080-#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37)
1081-#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39)
1082-#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
1083-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
1084-#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
1085-#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
1086-#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
1087-#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
1088+#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
1089+#define I40E_FLAG_MSI_ENABLED BIT(1)
1090+#define I40E_FLAG_MSIX_ENABLED BIT(2)
1091+#define I40E_FLAG_RSS_ENABLED BIT(3)
1092+#define I40E_FLAG_VMDQ_ENABLED BIT(4)
1093+#define I40E_FLAG_SRIOV_ENABLED BIT(5)
1094+#define I40E_FLAG_DCB_CAPABLE BIT(6)
1095+#define I40E_FLAG_DCB_ENABLED BIT(7)
1096+#define I40E_FLAG_FD_SB_ENABLED BIT(8)
1097+#define I40E_FLAG_FD_ATR_ENABLED BIT(9)
1098+#define I40E_FLAG_MFP_ENABLED BIT(10)
1099+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
1100+#define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
1101+#define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
1102+#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
1103+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
1104+#define I40E_FLAG_LEGACY_RX BIT(16)
1105+#ifdef HAVE_PTP_1588_CLOCK
1106+#define I40E_FLAG_PTP BIT(17)
1107+#endif /* HAVE_PTP_1588_CLOCK */
1108+#define I40E_FLAG_IWARP_ENABLED BIT(18)
1109+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
1110+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
1111+#define I40E_FLAG_TC_MQPRIO BIT(21)
1112+#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
1113+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
1114+#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
1115+#define I40E_FLAG_RS_FEC BIT(25)
1116+#define I40E_FLAG_BASE_R_FEC BIT(26)
1117+#define I40E_FLAG_TOTAL_PORT_SHUTDOWN BIT(27)
1118+
1119+/* GPIO defines used by PTP */
1120+#define I40E_SDP3_2 18
1121+#define I40E_SDP3_3 19
1122+#define I40E_GPIO_4 20
1123+#define I40E_LED2_0 26
1124+#define I40E_LED2_1 27
1125+#define I40E_LED3_0 28
1126+#define I40E_LED3_1 29
1127+#define I40E_GPIO_SET_HIGH BIT(5)
1128+#define I40E_GPIO_SET_LOW 0
1129+#define I40E_DRIVE_SDP_ON BIT(6)
1130+#define I40E_DRIVE_SDP_OFF 0
1131+#define I40E_GPIO_PRT_NUM_0 0
1132+#define I40E_GPIO_PRT_NUM_1 1
1133+#define I40E_GPIO_PRT_NUM_2 2
1134+#define I40E_GPIO_PRT_NUM_3 3
1135+#define I40E_GPIO_RESERVED_2 BIT(2)
1136+#define I40E_GPIO_PIN_DIR_OUT BIT(4)
1137+#define I40E_GPIO_PIN_DIR_IN 0
1138+#define I40E_GPIO_TRI_CTL_OFF BIT(5)
1139+#define I40E_GPIO_CTL_OUT_HIGH BIT(6)
1140+#define I40E_GPIO_TIMESYNC_0 3 << 7
1141+#define I40E_GPIO_TIMESYNC_1 4 << 7
1142+#define I40E_GPIO_PHY_PIN_NA_ME_NO_SDP 0x3F << 20
1143+#define I40E_PORT_0_TIMESYNC_1 0x3F00184
1144+#define I40E_PORT_1_TIMESYNC_1 0x3F00185
1145+#define I40E_PORT_0_OUT_HIGH_TIMESYNC_0 0x3F00274
1146+#define I40E_PORT_1_OUT_HIGH_TIMESYNC_1 0x3F00275
1147+#define I40E_PTP_HALF_SECOND 500000000LL /* nano seconds */
1148+#define I40E_PRTTSYN_CTL0_FFFB_MASK 0xFFFFFFFB
1149+#define I40E_PTP_2_SEC_DELAY 2
1150+
1151+ /* flag to enable/disable vf base mode support */
1152+ bool vf_base_mode_only;
1153
1154 struct i40e_client_instance *cinst;
1155 bool stat_offsets_loaded;
1156@@ -507,22 +707,50 @@ struct i40e_pf {
1157 u16 dcbx_cap;
1158
1159 struct i40e_filter_control_settings filter_settings;
1160+#ifdef HAVE_PTP_1588_CLOCK
1161
1162 struct ptp_clock *ptp_clock;
1163 struct ptp_clock_info ptp_caps;
1164 struct sk_buff *ptp_tx_skb;
1165 unsigned long ptp_tx_start;
1166 struct hwtstamp_config tstamp_config;
1167+ struct timespec64 ptp_prev_hw_time;
1168+ struct work_struct ptp_pps_work;
1169+ struct work_struct ptp_extts0_work;
1170+ struct work_struct ptp_extts1_work;
1171+ ktime_t ptp_reset_start;
1172 struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
1173- u64 ptp_base_adj;
1174+ u32 ptp_adj_mult;
1175 u32 tx_hwtstamp_timeouts;
1176 u32 tx_hwtstamp_skipped;
1177 u32 rx_hwtstamp_cleared;
1178 u32 latch_event_flags;
1179+ u64 ptp_pps_start;
1180+ u32 pps_delay;
1181 spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
1182+ struct ptp_pin_desc ptp_pin[3];
1183 unsigned long latch_events[4];
1184 bool ptp_tx;
1185 bool ptp_rx;
1186+ struct i40e_ptp_pins_settings *ptp_pins;
1187+ struct kobject *ptp_kobj;
1188+#endif /* HAVE_PTP_1588_CLOCK */
1189+#ifdef I40E_ADD_PROBES
1190+ u64 tcp_segs;
1191+ u64 tx_tcp_cso;
1192+ u64 tx_udp_cso;
1193+ u64 tx_sctp_cso;
1194+ u64 tx_ip4_cso;
1195+ u64 rx_tcp_cso;
1196+ u64 rx_udp_cso;
1197+ u64 rx_sctp_cso;
1198+ u64 rx_ip4_cso;
1199+ u64 hw_csum_rx_outer;
1200+ u64 rx_tcp_cso_err;
1201+ u64 rx_udp_cso_err;
1202+ u64 rx_sctp_cso_err;
1203+ u64 rx_ip4_cso_err;
1204+#endif
1205 u16 rss_table_size; /* HW RSS table size */
1206 u32 max_bw;
1207 u32 min_bw;
1208@@ -530,6 +758,18 @@ struct i40e_pf {
1209 u32 ioremap_len;
1210 u32 fd_inv;
1211 u16 phy_led_val;
1212+ u16 last_sw_conf_flags;
1213+ u16 last_sw_conf_valid_flags;
1214+
1215+ u16 override_q_count;
1216+ struct vfd_objects *vfd_obj;
1217+ u16 ingress_rule_id;
1218+ int ingress_vlan;
1219+ u16 egress_rule_id;
1220+ int egress_vlan;
1221+ bool vf_bw_applied;
1222+ /* List to keep previous DDP profiles to be rolled back in the future */
1223+ struct list_head ddp_old_prof;
1224 };
1225
1226 /**
1227@@ -605,7 +845,11 @@ struct i40e_veb {
1228 /* struct that defines a VSI, associated with a dev */
1229 struct i40e_vsi {
1230 struct net_device *netdev;
1231+#ifdef HAVE_VLAN_RX_REGISTER
1232+ struct vlan_group *vlgrp;
1233+#else
1234 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
1235+#endif
1236 bool netdev_registered;
1237 bool stat_offsets_loaded;
1238
1239@@ -613,7 +857,7 @@ struct i40e_vsi {
1240 DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
1241 #define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
1242 #define I40E_VSI_FLAG_VEB_OWNER BIT(1)
1243- unsigned long flags;
1244+ u64 flags;
1245
1246 /* Per VSI lock to protect elements/hash (MAC filter) */
1247 spinlock_t mac_filter_hash_lock;
1248@@ -622,8 +866,13 @@ struct i40e_vsi {
1249 bool has_vlan_filter;
1250
1251 /* VSI stats */
1252+#ifdef HAVE_NDO_GET_STATS64
1253 struct rtnl_link_stats64 net_stats;
1254 struct rtnl_link_stats64 net_stats_offsets;
1255+#else
1256+ struct net_device_stats net_stats;
1257+ struct net_device_stats net_stats_offsets;
1258+#endif
1259 struct i40e_eth_stats eth_stats;
1260 struct i40e_eth_stats eth_stats_offsets;
1261 u32 tx_restart;
1262@@ -649,7 +898,6 @@ struct i40e_vsi {
1263 u8 *rss_hkey_user; /* User configured hash keys */
1264 u8 *rss_lut_user; /* User configured lookup table entries */
1265
1266-
1267 u16 max_frame;
1268 u16 rx_buf_len;
1269
1270@@ -669,10 +917,13 @@ struct i40e_vsi {
1271 u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
1272 u16 req_queue_pairs; /* User requested queue pairs */
1273 u16 num_queue_pairs; /* Used tx and rx pairs */
1274- u16 num_desc;
1275+ u16 num_tx_desc;
1276+ u16 num_rx_desc;
1277 enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
1278 s16 vf_id; /* Virtual function ID for SRIOV VSIs */
1279-
1280+#ifdef __TC_MQPRIO_MODE_MAX
1281+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
1282+#endif
1283 struct i40e_tc_configuration tc_config;
1284 struct i40e_aqc_vsi_properties_data info;
1285
1286@@ -693,11 +944,25 @@ struct i40e_vsi {
1287 struct kobject *kobj; /* sysfs object */
1288 bool current_isup; /* Sync 'link up' logging */
1289 enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
1290+ /* channel specific fields */
1291+ u16 cnt_q_avail; /* num of queues available for channel usage */
1292+ u16 orig_rss_size;
1293+ u16 current_rss_size;
1294+ bool reconfig_rss;
1295+
1296+ u16 next_base_queue; /* next queue to be used for channel setup */
1297+
1298+ struct list_head ch_list;
1299+ u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
1300
1301 void *priv; /* client driver data reference. */
1302+ bool block_tx_timeout;
1303
1304 /* VSI specific handlers */
1305 irqreturn_t (*irq_handler)(int irq, void *data);
1306+#ifdef ETHTOOL_GRXRINGS
1307+#endif
1308+
1309 } ____cacheline_internodealigned_in_smp;
1310
1311 struct i40e_netdev_priv {
1312@@ -716,16 +981,17 @@ struct i40e_q_vector {
1313 struct i40e_ring_container rx;
1314 struct i40e_ring_container tx;
1315
1316+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
1317 u8 num_ringpairs; /* total number of ring pairs in vector */
1318
1319+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
1320 cpumask_t affinity_mask;
1321 struct irq_affinity_notify affinity_notify;
1322+#endif
1323
1324 struct rcu_head rcu; /* to avoid race with update stats on free */
1325 char name[I40E_INT_NAME_STR_LEN];
1326 bool arm_wb_state;
1327-#define ITR_COUNTDOWN_START 100
1328- u8 itr_countdown; /* when 0 should adjust ITR */
1329 } ____cacheline_internodealigned_in_smp;
1330
1331 /* lan device */
1332@@ -845,7 +1111,7 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
1333 /* needed by i40e_ethtool.c */
1334 int i40e_up(struct i40e_vsi *vsi);
1335 void i40e_down(struct i40e_vsi *vsi);
1336-extern const char i40e_driver_name[];
1337+extern char i40e_driver_name[];
1338 extern const char i40e_driver_version_str[];
1339 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
1340 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
1341@@ -854,6 +1120,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
1342 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
1343 u16 rss_table_size, u16 rss_size);
1344 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
1345+struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_pf *pf, u16 seid);
1346 /**
1347 * i40e_find_vsi_by_type - Find and return Flow Director VSI
1348 * @pf: PF to search for VSI
1349@@ -874,8 +1141,13 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
1350 return NULL;
1351 }
1352 void i40e_update_stats(struct i40e_vsi *vsi);
1353+void i40e_update_veb_stats(struct i40e_veb *veb);
1354 void i40e_update_eth_stats(struct i40e_vsi *vsi);
1355+#ifdef HAVE_NDO_GET_STATS64
1356 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
1357+#else
1358+struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
1359+#endif
1360 int i40e_fetch_switch_configuration(struct i40e_pf *pf,
1361 bool printconfig);
1362
1363@@ -888,6 +1160,8 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
1364 u32 i40e_get_global_fd_count(struct i40e_pf *pf);
1365 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
1366 void i40e_set_ethtool_ops(struct net_device *netdev);
1367+struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1368+ const u8 *macaddr, s16 vlan);
1369 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1370 const u8 *macaddr, s16 vlan);
1371 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
1372@@ -896,14 +1170,26 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
1373 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
1374 u16 uplink, u32 param1);
1375 int i40e_vsi_release(struct i40e_vsi *vsi);
1376+int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type);
1377+int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi);
1378+int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi);
1379+int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc);
1380+int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename);
1381 void i40e_service_event_schedule(struct i40e_pf *pf);
1382 void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
1383 u8 *msg, u16 len);
1384
1385+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp,
1386+ bool enable);
1387+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable);
1388 int i40e_vsi_start_rings(struct i40e_vsi *vsi);
1389 void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
1390 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
1391 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
1392+void i40e_quiesce_vsi(struct i40e_vsi *vsi);
1393+void i40e_unquiesce_vsi(struct i40e_vsi *vsi);
1394+void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf);
1395+void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf);
1396 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
1397 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
1398 u16 downlink_seid, u8 enabled_tc);
1399@@ -912,6 +1198,7 @@ void i40e_veb_release(struct i40e_veb *veb);
1400 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
1401 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
1402 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
1403+int i40e_get_cloud_filter_type(u8 flags, u16 *type);
1404 void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
1405 void i40e_pf_reset_stats(struct i40e_pf *pf);
1406 #ifdef CONFIG_DEBUG_FS
1407@@ -933,6 +1220,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
1408 void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
1409 void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
1410 void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
1411+void i40e_client_update_msix_info(struct i40e_pf *pf);
1412 int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
1413 /**
1414 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
1415@@ -945,9 +1233,6 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
1416 struct i40e_hw *hw = &pf->hw;
1417 u32 val;
1418
1419- /* definitely clear the PBA here, as this function is meant to
1420- * clean out all previous interrupts AND enable the interrupt
1421- */
1422 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1423 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1424 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1425@@ -956,8 +1241,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
1426 }
1427
1428 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
1429-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
1430+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
1431 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
1432+int i40e_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
1433 int i40e_open(struct net_device *netdev);
1434 int i40e_close(struct net_device *netdev);
1435 int i40e_vsi_open(struct i40e_vsi *vsi);
1436@@ -970,18 +1256,22 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1437 const u8 *macaddr);
1438 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
1439 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
1440+int i40e_count_filters(struct i40e_vsi *vsi);
1441 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
1442 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
1443-#ifdef CONFIG_I40E_DCB
1444+#ifdef CONFIG_DCB
1445+#ifdef HAVE_DCBNL_IEEE
1446 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
1447 struct i40e_dcbx_config *old_cfg,
1448 struct i40e_dcbx_config *new_cfg);
1449 void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
1450 void i40e_dcbnl_setup(struct i40e_vsi *vsi);
1451+#endif /* HAVE_DCBNL_IEEE */
1452 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
1453 struct i40e_dcbx_config *old_cfg,
1454 struct i40e_dcbx_config *new_cfg);
1455-#endif /* CONFIG_I40E_DCB */
1456+#endif /* CONFIG_DCB */
1457+#ifdef HAVE_PTP_1588_CLOCK
1458 void i40e_ptp_rx_hang(struct i40e_pf *pf);
1459 void i40e_ptp_tx_hang(struct i40e_pf *pf);
1460 void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
1461@@ -989,16 +1279,45 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
1462 void i40e_ptp_set_increment(struct i40e_pf *pf);
1463 int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
1464 int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
1465+int i40e_ptp_set_pins_ioctl(struct i40e_pf *pf, struct ifreq *ifr);
1466+int i40e_ptp_get_pins(struct i40e_pf *pf, struct ifreq *ifr);
1467+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
1468+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
1469 void i40e_ptp_init(struct i40e_pf *pf);
1470 void i40e_ptp_stop(struct i40e_pf *pf);
1471+int i40e_ptp_alloc_pins(struct i40e_pf *pf);
1472+#endif /* HAVE_PTP_1588_CLOCK */
1473+u8 i40e_pf_get_num_tc(struct i40e_pf *pf);
1474 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
1475 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
1476 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
1477 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
1478+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
1479+int i40e_add_del_cloud_filter_ex(struct i40e_pf *pf,
1480+ struct i40e_cloud_filter *filter,
1481+ bool add);
1482+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
1483+ struct i40e_cloud_filter *filter,
1484+ bool add);
1485+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
1486+ struct i40e_cloud_filter *filter,
1487+ bool add);
1488 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
1489+int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
1490+int i40e_get_link_speed(struct i40e_vsi *vsi);
1491+
1492+void i40e_set_fec_in_flags(u8 fec_cfg, u64 *flags);
1493+
1494+#ifdef HAVE_XDP_SUPPORT
1495+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
1496+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
1497+#endif
1498
1499 static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
1500 {
1501 return !!vsi->xdp_prog;
1502 }
1503+int i40e_restore_ingress_egress_mirror(struct i40e_vsi *src_vsi, int mirror, u16 rule_type,
1504+ u16 *rule_id);
1505+
1506 #endif /* _I40E_H_ */
1507diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
1508index ba04988e0..15b9d9c89 100644
1509--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
1510+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
1511@@ -1,28 +1,5 @@
1512-/*******************************************************************************
1513- *
1514- * Intel Ethernet Controller XL710 Family Linux Driver
1515- * Copyright(c) 2013 - 2016 Intel Corporation.
1516- *
1517- * This program is free software; you can redistribute it and/or modify it
1518- * under the terms and conditions of the GNU General Public License,
1519- * version 2, as published by the Free Software Foundation.
1520- *
1521- * This program is distributed in the hope it will be useful, but WITHOUT
1522- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1523- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1524- * more details.
1525- *
1526- * You should have received a copy of the GNU General Public License along
1527- * with this program. If not, see <http://www.gnu.org/licenses/>.
1528- *
1529- * The full GNU General Public License is included in this distribution in
1530- * the file called "COPYING".
1531- *
1532- * Contact Information:
1533- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1534- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
1535- *
1536- ******************************************************************************/
1537+// SPDX-License-Identifier: GPL-2.0
1538+/* Copyright(c) 2013 - 2020 Intel Corporation. */
1539
1540 #include "i40e_status.h"
1541 #include "i40e_type.h"
1542@@ -30,8 +7,6 @@
1543 #include "i40e_adminq.h"
1544 #include "i40e_prototype.h"
1545
1546-static void i40e_resume_aq(struct i40e_hw *hw);
1547-
1548 /**
1549 * i40e_adminq_init_regs - Initialize AdminQ registers
1550 * @hw: pointer to the hardware structure
1551@@ -119,6 +94,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
1552 **/
1553 static void i40e_free_adminq_asq(struct i40e_hw *hw)
1554 {
1555+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
1556 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
1557 }
1558
1559@@ -169,21 +145,21 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
1560 /* now configure the descriptors for use */
1561 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
1562
1563- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1564+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1565 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1566- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1567+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1568 desc->opcode = 0;
1569 /* This is in accordance with Admin queue design, there is no
1570 * register for buffer size configuration
1571 */
1572- desc->datalen = cpu_to_le16((u16)bi->size);
1573+ desc->datalen = CPU_TO_LE16((u16)bi->size);
1574 desc->retval = 0;
1575 desc->cookie_high = 0;
1576 desc->cookie_low = 0;
1577 desc->params.external.addr_high =
1578- cpu_to_le32(upper_32_bits(bi->pa));
1579+ CPU_TO_LE32(upper_32_bits(bi->pa));
1580 desc->params.external.addr_low =
1581- cpu_to_le32(lower_32_bits(bi->pa));
1582+ CPU_TO_LE32(lower_32_bits(bi->pa));
1583 desc->params.external.param0 = 0;
1584 desc->params.external.param1 = 0;
1585 }
1586@@ -291,7 +267,7 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
1587 **/
1588 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
1589 {
1590- i40e_status ret_code = 0;
1591+ i40e_status ret_code = I40E_SUCCESS;
1592 u32 reg = 0;
1593
1594 /* Clear Head and Tail */
1595@@ -320,7 +296,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
1596 **/
1597 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
1598 {
1599- i40e_status ret_code = 0;
1600+ i40e_status ret_code = I40E_SUCCESS;
1601 u32 reg = 0;
1602
1603 /* Clear Head and Tail */
1604@@ -359,7 +335,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
1605 **/
1606 static i40e_status i40e_init_asq(struct i40e_hw *hw)
1607 {
1608- i40e_status ret_code = 0;
1609+ i40e_status ret_code = I40E_SUCCESS;
1610
1611 if (hw->aq.asq.count > 0) {
1612 /* queue already initialized */
1613@@ -379,18 +355,18 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
1614
1615 /* allocate the ring memory */
1616 ret_code = i40e_alloc_adminq_asq_ring(hw);
1617- if (ret_code)
1618+ if (ret_code != I40E_SUCCESS)
1619 goto init_adminq_exit;
1620
1621 /* allocate buffers in the rings */
1622 ret_code = i40e_alloc_asq_bufs(hw);
1623- if (ret_code)
1624+ if (ret_code != I40E_SUCCESS)
1625 goto init_adminq_free_rings;
1626
1627 /* initialize base registers */
1628 ret_code = i40e_config_asq_regs(hw);
1629- if (ret_code)
1630- goto init_adminq_free_rings;
1631+ if (ret_code != I40E_SUCCESS)
1632+ goto init_config_regs;
1633
1634 /* success! */
1635 hw->aq.asq.count = hw->aq.num_asq_entries;
1636@@ -398,6 +374,10 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
1637
1638 init_adminq_free_rings:
1639 i40e_free_adminq_asq(hw);
1640+ return ret_code;
1641+
1642+init_config_regs:
1643+ i40e_free_asq_bufs(hw);
1644
1645 init_adminq_exit:
1646 return ret_code;
1647@@ -418,7 +398,7 @@ init_adminq_exit:
1648 **/
1649 static i40e_status i40e_init_arq(struct i40e_hw *hw)
1650 {
1651- i40e_status ret_code = 0;
1652+ i40e_status ret_code = I40E_SUCCESS;
1653
1654 if (hw->aq.arq.count > 0) {
1655 /* queue already initialized */
1656@@ -438,17 +418,17 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
1657
1658 /* allocate the ring memory */
1659 ret_code = i40e_alloc_adminq_arq_ring(hw);
1660- if (ret_code)
1661+ if (ret_code != I40E_SUCCESS)
1662 goto init_adminq_exit;
1663
1664 /* allocate buffers in the rings */
1665 ret_code = i40e_alloc_arq_bufs(hw);
1666- if (ret_code)
1667+ if (ret_code != I40E_SUCCESS)
1668 goto init_adminq_free_rings;
1669
1670 /* initialize base registers */
1671 ret_code = i40e_config_arq_regs(hw);
1672- if (ret_code)
1673+ if (ret_code != I40E_SUCCESS)
1674 goto init_adminq_free_rings;
1675
1676 /* success! */
1677@@ -470,9 +450,9 @@ init_adminq_exit:
1678 **/
1679 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
1680 {
1681- i40e_status ret_code = 0;
1682+ i40e_status ret_code = I40E_SUCCESS;
1683
1684- mutex_lock(&hw->aq.asq_mutex);
1685+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
1686
1687 if (hw->aq.asq.count == 0) {
1688 ret_code = I40E_ERR_NOT_READY;
1689@@ -492,7 +472,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
1690 i40e_free_asq_bufs(hw);
1691
1692 shutdown_asq_out:
1693- mutex_unlock(&hw->aq.asq_mutex);
1694+ i40e_release_spinlock(&hw->aq.asq_spinlock);
1695 return ret_code;
1696 }
1697
1698@@ -504,9 +484,9 @@ shutdown_asq_out:
1699 **/
1700 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
1701 {
1702- i40e_status ret_code = 0;
1703+ i40e_status ret_code = I40E_SUCCESS;
1704
1705- mutex_lock(&hw->aq.arq_mutex);
1706+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1707
1708 if (hw->aq.arq.count == 0) {
1709 ret_code = I40E_ERR_NOT_READY;
1710@@ -526,10 +506,86 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
1711 i40e_free_arq_bufs(hw);
1712
1713 shutdown_arq_out:
1714- mutex_unlock(&hw->aq.arq_mutex);
1715+ i40e_release_spinlock(&hw->aq.arq_spinlock);
1716 return ret_code;
1717 }
1718
1719+/**
1720+ * i40e_resume_aq - resume AQ processing from 0
1721+ * @hw: pointer to the hardware structure
1722+ **/
1723+static void i40e_resume_aq(struct i40e_hw *hw)
1724+{
1725+ /* Registers are reset after PF reset */
1726+ hw->aq.asq.next_to_use = 0;
1727+ hw->aq.asq.next_to_clean = 0;
1728+
1729+ i40e_config_asq_regs(hw);
1730+
1731+ hw->aq.arq.next_to_use = 0;
1732+ hw->aq.arq.next_to_clean = 0;
1733+
1734+ i40e_config_arq_regs(hw);
1735+}
1736+
1737+/**
1738+ * i40e_set_hw_flags - set HW flags
1739+ * @hw: pointer to the hardware structure
1740+ **/
1741+static void i40e_set_hw_flags(struct i40e_hw *hw)
1742+{
1743+ struct i40e_adminq_info *aq = &hw->aq;
1744+
1745+ hw->flags = 0;
1746+
1747+ switch (hw->mac.type) {
1748+ case I40E_MAC_XL710:
1749+ if (aq->api_maj_ver > 1 ||
1750+ (aq->api_maj_ver == 1 &&
1751+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
1752+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
1753+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
1754+ /* The ability to RX (not drop) 802.1ad frames */
1755+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
1756+ }
1757+ break;
1758+ case I40E_MAC_X722:
1759+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
1760+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
1761+
1762+ if (aq->api_maj_ver > 1 ||
1763+ (aq->api_maj_ver == 1 &&
1764+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
1765+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
1766+
1767+ if (aq->api_maj_ver > 1 ||
1768+ (aq->api_maj_ver == 1 &&
1769+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
1770+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
1771+ /* fall through */
1772+ default:
1773+ break;
1774+ }
1775+
1776+ /* Newer versions of firmware require lock when reading the NVM */
1777+ if (aq->api_maj_ver > 1 ||
1778+ (aq->api_maj_ver == 1 &&
1779+ aq->api_min_ver >= 5))
1780+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
1781+
1782+ if (aq->api_maj_ver > 1 ||
1783+ (aq->api_maj_ver == 1 &&
1784+ aq->api_min_ver >= 8)) {
1785+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
1786+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
1787+ }
1788+
1789+ if (aq->api_maj_ver > 1 ||
1790+ (aq->api_maj_ver == 1 &&
1791+ aq->api_min_ver >= 9))
1792+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
1793+}
1794+
1795 /**
1796 * i40e_init_adminq - main initialization routine for Admin Queue
1797 * @hw: pointer to the hardware structure
1798@@ -543,19 +599,22 @@ shutdown_arq_out:
1799 **/
1800 i40e_status i40e_init_adminq(struct i40e_hw *hw)
1801 {
1802+ struct i40e_adminq_info *aq = &hw->aq;
1803+ i40e_status ret_code;
1804 u16 cfg_ptr, oem_hi, oem_lo;
1805 u16 eetrack_lo, eetrack_hi;
1806- i40e_status ret_code;
1807 int retry = 0;
1808
1809 /* verify input for valid configuration */
1810- if ((hw->aq.num_arq_entries == 0) ||
1811- (hw->aq.num_asq_entries == 0) ||
1812- (hw->aq.arq_buf_size == 0) ||
1813- (hw->aq.asq_buf_size == 0)) {
1814+ if (aq->num_arq_entries == 0 ||
1815+ aq->num_asq_entries == 0 ||
1816+ aq->arq_buf_size == 0 ||
1817+ aq->asq_buf_size == 0) {
1818 ret_code = I40E_ERR_CONFIG;
1819 goto init_adminq_exit;
1820 }
1821+ i40e_init_spinlock(&aq->asq_spinlock);
1822+ i40e_init_spinlock(&aq->arq_spinlock);
1823
1824 /* Set up register offsets */
1825 i40e_adminq_init_regs(hw);
1826@@ -565,12 +624,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
1827
1828 /* allocate the ASQ */
1829 ret_code = i40e_init_asq(hw);
1830- if (ret_code)
1831- goto init_adminq_destroy_locks;
1832+ if (ret_code != I40E_SUCCESS)
1833+ goto init_adminq_destroy_spinlocks;
1834
1835 /* allocate the ARQ */
1836 ret_code = i40e_init_arq(hw);
1837- if (ret_code)
1838+ if (ret_code != I40E_SUCCESS)
1839 goto init_adminq_free_asq;
1840
1841 /* There are some cases where the firmware may not be quite ready
1842@@ -579,11 +638,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
1843 */
1844 do {
1845 ret_code = i40e_aq_get_firmware_version(hw,
1846- &hw->aq.fw_maj_ver,
1847- &hw->aq.fw_min_ver,
1848- &hw->aq.fw_build,
1849- &hw->aq.api_maj_ver,
1850- &hw->aq.api_min_ver,
1851+ &aq->fw_maj_ver,
1852+ &aq->fw_min_ver,
1853+ &aq->fw_build,
1854+ &aq->api_maj_ver,
1855+ &aq->api_min_ver,
1856 NULL);
1857 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
1858 break;
1859@@ -594,6 +653,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
1860 if (ret_code != I40E_SUCCESS)
1861 goto init_adminq_free_arq;
1862
1863+ /*
1864+ * Some features were introduced in different FW API version
1865+ * for different MAC type.
1866+ */
1867+ i40e_set_hw_flags(hw);
1868+
1869 /* get the NVM version info */
1870 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
1871 &hw->nvm.version);
1872@@ -601,13 +666,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
1873 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
1874 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
1875 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
1876- i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
1877- &oem_hi);
1878- i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
1879- &oem_lo);
1880+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), &oem_hi);
1881+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), &oem_lo);
1882 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
1883
1884- if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
1885+ if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
1886 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
1887 goto init_adminq_free_arq;
1888 }
1889@@ -617,7 +680,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
1890 hw->nvm_release_on_done = false;
1891 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1892
1893- ret_code = 0;
1894+ ret_code = I40E_SUCCESS;
1895
1896 /* success! */
1897 goto init_adminq_exit;
1898@@ -626,7 +689,9 @@ init_adminq_free_arq:
1899 i40e_shutdown_arq(hw);
1900 init_adminq_free_asq:
1901 i40e_shutdown_asq(hw);
1902-init_adminq_destroy_locks:
1903+init_adminq_destroy_spinlocks:
1904+ i40e_destroy_spinlock(&aq->asq_spinlock);
1905+ i40e_destroy_spinlock(&aq->arq_spinlock);
1906
1907 init_adminq_exit:
1908 return ret_code;
1909@@ -638,13 +703,15 @@ init_adminq_exit:
1910 **/
1911 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
1912 {
1913- i40e_status ret_code = 0;
1914+ i40e_status ret_code = I40E_SUCCESS;
1915
1916 if (i40e_check_asq_alive(hw))
1917 i40e_aq_queue_shutdown(hw, true);
1918
1919 i40e_shutdown_asq(hw);
1920 i40e_shutdown_arq(hw);
1921+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
1922+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
1923
1924 if (hw->nvm_buff.va)
1925 i40e_free_virt_mem(hw, &hw->nvm_buff);
1926@@ -669,17 +736,18 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
1927 desc = I40E_ADMINQ_DESC(*asq, ntc);
1928 details = I40E_ADMINQ_DETAILS(*asq, ntc);
1929 while (rd32(hw, hw->aq.asq.head) != ntc) {
1930- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1931+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
1932 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
1933
1934 if (details->callback) {
1935 I40E_ADMINQ_CALLBACK cb_func =
1936 (I40E_ADMINQ_CALLBACK)details->callback;
1937- desc_cb = *desc;
1938+ i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
1939+ I40E_DMA_TO_DMA);
1940 cb_func(hw, &desc_cb);
1941 }
1942- memset(desc, 0, sizeof(*desc));
1943- memset(details, 0, sizeof(*details));
1944+ i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
1945+ i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
1946 ntc++;
1947 if (ntc == asq->count)
1948 ntc = 0;
1949@@ -709,23 +777,26 @@ static bool i40e_asq_done(struct i40e_hw *hw)
1950 }
1951
1952 /**
1953- * i40e_asq_send_command - send command to Admin Queue
1954+ * i40e_asq_send_command_atomic - send command to Admin Queue
1955 * @hw: pointer to the hw struct
1956 * @desc: prefilled descriptor describing the command (non DMA mem)
1957 * @buff: buffer to use for indirect commands
1958 * @buff_size: size of buffer for indirect commands
1959 * @cmd_details: pointer to command details structure
1960+ * @is_atomic_context: is the function called in an atomic context?
1961 *
1962 * This is the main send command driver routine for the Admin Queue send
1963 * queue. It runs the queue, cleans the queue, etc
1964 **/
1965-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
1966- struct i40e_aq_desc *desc,
1967- void *buff, /* can be NULL */
1968- u16 buff_size,
1969- struct i40e_asq_cmd_details *cmd_details)
1970+enum i40e_status_code
1971+i40e_asq_send_command_atomic(struct i40e_hw *hw,
1972+ struct i40e_aq_desc *desc,
1973+ void *buff, /* can be NULL */
1974+ u16 buff_size,
1975+ struct i40e_asq_cmd_details *cmd_details,
1976+ bool is_atomic_context)
1977 {
1978- i40e_status status = 0;
1979+ i40e_status status = I40E_SUCCESS;
1980 struct i40e_dma_mem *dma_buff = NULL;
1981 struct i40e_asq_cmd_details *details;
1982 struct i40e_aq_desc *desc_on_ring;
1983@@ -733,7 +804,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
1984 u16 retval = 0;
1985 u32 val = 0;
1986
1987- mutex_lock(&hw->aq.asq_mutex);
1988+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
1989+
1990+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
1991
1992 if (hw->aq.asq.count == 0) {
1993 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1994@@ -742,37 +815,40 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
1995 goto asq_send_command_error;
1996 }
1997
1998- hw->aq.asq_last_status = I40E_AQ_RC_OK;
1999-
2000 val = rd32(hw, hw->aq.asq.head);
2001 if (val >= hw->aq.num_asq_entries) {
2002 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
2003 "AQTX: head overrun at %d\n", val);
2004- status = I40E_ERR_QUEUE_EMPTY;
2005+ status = I40E_ERR_ADMIN_QUEUE_FULL;
2006 goto asq_send_command_error;
2007 }
2008
2009 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
2010 if (cmd_details) {
2011- *details = *cmd_details;
2012+ i40e_memcpy(details,
2013+ cmd_details,
2014+ sizeof(struct i40e_asq_cmd_details),
2015+ I40E_NONDMA_TO_NONDMA);
2016
2017 /* If the cmd_details are defined copy the cookie. The
2018- * cpu_to_le32 is not needed here because the data is ignored
2019+ * CPU_TO_LE32 is not needed here because the data is ignored
2020 * by the FW, only used by the driver
2021 */
2022 if (details->cookie) {
2023 desc->cookie_high =
2024- cpu_to_le32(upper_32_bits(details->cookie));
2025+ CPU_TO_LE32(upper_32_bits(details->cookie));
2026 desc->cookie_low =
2027- cpu_to_le32(lower_32_bits(details->cookie));
2028+ CPU_TO_LE32(lower_32_bits(details->cookie));
2029 }
2030 } else {
2031- memset(details, 0, sizeof(struct i40e_asq_cmd_details));
2032+ i40e_memset(details, 0,
2033+ sizeof(struct i40e_asq_cmd_details),
2034+ I40E_NONDMA_MEM);
2035 }
2036
2037 /* clear requested flags and then set additional flags if defined */
2038- desc->flags &= ~cpu_to_le16(details->flags_dis);
2039- desc->flags |= cpu_to_le16(details->flags_ena);
2040+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
2041+ desc->flags |= CPU_TO_LE16(details->flags_ena);
2042
2043 if (buff_size > hw->aq.asq_buf_size) {
2044 i40e_debug(hw,
2045@@ -810,26 +886,28 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
2046 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
2047
2048 /* if the desc is available copy the temp desc to the right place */
2049- *desc_on_ring = *desc;
2050+ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
2051+ I40E_NONDMA_TO_DMA);
2052
2053 /* if buff is not NULL assume indirect command */
2054 if (buff != NULL) {
2055 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
2056 /* copy the user buff into the respective DMA buff */
2057- memcpy(dma_buff->va, buff, buff_size);
2058- desc_on_ring->datalen = cpu_to_le16(buff_size);
2059+ i40e_memcpy(dma_buff->va, buff, buff_size,
2060+ I40E_NONDMA_TO_DMA);
2061+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
2062
2063 /* Update the address values in the desc with the pa value
2064 * for respective buffer
2065 */
2066 desc_on_ring->params.external.addr_high =
2067- cpu_to_le32(upper_32_bits(dma_buff->pa));
2068+ CPU_TO_LE32(upper_32_bits(dma_buff->pa));
2069 desc_on_ring->params.external.addr_low =
2070- cpu_to_le32(lower_32_bits(dma_buff->pa));
2071+ CPU_TO_LE32(lower_32_bits(dma_buff->pa));
2072 }
2073
2074 /* bump the tail */
2075- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
2076+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
2077 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
2078 buff, buff_size);
2079 (hw->aq.asq.next_to_use)++;
2080@@ -850,17 +928,22 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
2081 */
2082 if (i40e_asq_done(hw))
2083 break;
2084- udelay(50);
2085+ if (is_atomic_context)
2086+ udelay(50);
2087+ else
2088+ usleep_range(40, 60);
2089 total_delay += 50;
2090 } while (total_delay < hw->aq.asq_cmd_timeout);
2091 }
2092
2093 /* if ready, copy the desc back to temp */
2094 if (i40e_asq_done(hw)) {
2095- *desc = *desc_on_ring;
2096+ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
2097+ I40E_DMA_TO_NONDMA);
2098 if (buff != NULL)
2099- memcpy(buff, dma_buff->va, buff_size);
2100- retval = le16_to_cpu(desc->retval);
2101+ i40e_memcpy(buff, dma_buff->va, buff_size,
2102+ I40E_DMA_TO_NONDMA);
2103+ retval = LE16_TO_CPU(desc->retval);
2104 if (retval != 0) {
2105 i40e_debug(hw,
2106 I40E_DEBUG_AQ_MESSAGE,
2107@@ -872,34 +955,54 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
2108 }
2109 cmd_completed = true;
2110 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
2111- status = 0;
2112+ status = I40E_SUCCESS;
2113+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
2114+ status = I40E_ERR_NOT_READY;
2115 else
2116 status = I40E_ERR_ADMIN_QUEUE_ERROR;
2117 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
2118 }
2119
2120- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
2121+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
2122 "AQTX: desc and buffer writeback:\n");
2123 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
2124
2125 /* save writeback aq if requested */
2126 if (details->wb_desc)
2127- *details->wb_desc = *desc_on_ring;
2128+ i40e_memcpy(details->wb_desc, desc_on_ring,
2129+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
2130
2131 /* update the error if time out occurred */
2132 if ((!cmd_completed) &&
2133 (!details->async && !details->postpone)) {
2134- i40e_debug(hw,
2135- I40E_DEBUG_AQ_MESSAGE,
2136- "AQTX: Writeback timeout.\n");
2137- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
2138+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
2139+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
2140+ "AQTX: AQ Critical error.\n");
2141+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
2142+ } else {
2143+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
2144+ "AQTX: Writeback timeout.\n");
2145+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
2146+ }
2147 }
2148
2149 asq_send_command_error:
2150- mutex_unlock(&hw->aq.asq_mutex);
2151+ i40e_release_spinlock(&hw->aq.asq_spinlock);
2152 return status;
2153 }
2154
2155+// inline function with previous signature to avoid modifying
2156+// all existing calls to i40e_asq_send_command
2157+inline i40e_status i40e_asq_send_command(struct i40e_hw *hw,
2158+ struct i40e_aq_desc *desc,
2159+ void *buff, /* can be NULL */
2160+ u16 buff_size,
2161+ struct i40e_asq_cmd_details *cmd_details)
2162+{
2163+ return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
2164+ cmd_details, false);
2165+}
2166+
2167 /**
2168 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
2169 * @desc: pointer to the temp descriptor (non DMA mem)
2170@@ -911,9 +1014,10 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
2171 u16 opcode)
2172 {
2173 /* zero out the desc */
2174- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
2175- desc->opcode = cpu_to_le16(opcode);
2176- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
2177+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
2178+ I40E_NONDMA_MEM);
2179+ desc->opcode = CPU_TO_LE16(opcode);
2180+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
2181 }
2182
2183 /**
2184@@ -930,7 +1034,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2185 struct i40e_arq_event_info *e,
2186 u16 *pending)
2187 {
2188- i40e_status ret_code = 0;
2189+ i40e_status ret_code = I40E_SUCCESS;
2190 u16 ntc = hw->aq.arq.next_to_clean;
2191 struct i40e_aq_desc *desc;
2192 struct i40e_dma_mem *bi;
2193@@ -940,10 +1044,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2194 u16 ntu;
2195
2196 /* pre-clean the event info */
2197- memset(&e->desc, 0, sizeof(e->desc));
2198+ i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
2199
2200 /* take the lock before we start messing with the ring */
2201- mutex_lock(&hw->aq.arq_mutex);
2202+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
2203
2204 if (hw->aq.arq.count == 0) {
2205 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
2206@@ -953,7 +1057,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2207 }
2208
2209 /* set next_to_use to head */
2210- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
2211+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
2212 if (ntu == ntc) {
2213 /* nothing to do - shouldn't need to update ring's values */
2214 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
2215@@ -965,8 +1069,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2216 desc_idx = ntc;
2217
2218 hw->aq.arq_last_status =
2219- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
2220- flags = le16_to_cpu(desc->flags);
2221+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
2222+ flags = LE16_TO_CPU(desc->flags);
2223 if (flags & I40E_AQ_FLAG_ERR) {
2224 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
2225 i40e_debug(hw,
2226@@ -975,14 +1079,16 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2227 hw->aq.arq_last_status);
2228 }
2229
2230- e->desc = *desc;
2231- datalen = le16_to_cpu(desc->datalen);
2232+ i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
2233+ I40E_DMA_TO_NONDMA);
2234+ datalen = LE16_TO_CPU(desc->datalen);
2235 e->msg_len = min(datalen, e->buf_len);
2236 if (e->msg_buf != NULL && (e->msg_len != 0))
2237- memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
2238- e->msg_len);
2239+ i40e_memcpy(e->msg_buf,
2240+ hw->aq.arq.r.arq_bi[desc_idx].va,
2241+ e->msg_len, I40E_DMA_TO_NONDMA);
2242
2243- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
2244+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
2245 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
2246 hw->aq.arq_buf_size);
2247
2248@@ -991,14 +1097,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2249 * size
2250 */
2251 bi = &hw->aq.arq.r.arq_bi[ntc];
2252- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
2253+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
2254
2255- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
2256+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
2257 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
2258- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
2259- desc->datalen = cpu_to_le16((u16)bi->size);
2260- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
2261- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
2262+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
2263+ desc->datalen = CPU_TO_LE16((u16)bi->size);
2264+ desc->params.external.addr_high = CPU_TO_LE32(upper_32_bits(bi->pa));
2265+ desc->params.external.addr_low = CPU_TO_LE32(lower_32_bits(bi->pa));
2266
2267 /* set tail = the last cleaned desc index. */
2268 wr32(hw, hw->aq.arq.tail, ntc);
2269@@ -1009,27 +1115,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
2270 hw->aq.arq.next_to_clean = ntc;
2271 hw->aq.arq.next_to_use = ntu;
2272
2273- i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
2274+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
2275 clean_arq_element_out:
2276 /* Set pending if needed, unlock and return */
2277- if (pending)
2278+ if (pending != NULL)
2279 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
2280 clean_arq_element_err:
2281- mutex_unlock(&hw->aq.arq_mutex);
2282+ i40e_release_spinlock(&hw->aq.arq_spinlock);
2283
2284 return ret_code;
2285 }
2286
2287-static void i40e_resume_aq(struct i40e_hw *hw)
2288-{
2289- /* Registers are reset after PF reset */
2290- hw->aq.asq.next_to_use = 0;
2291- hw->aq.asq.next_to_clean = 0;
2292-
2293- i40e_config_asq_regs(hw);
2294-
2295- hw->aq.arq.next_to_use = 0;
2296- hw->aq.arq.next_to_clean = 0;
2297-
2298- i40e_config_arq_regs(hw);
2299-}
2300diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
2301index 2349fbe04..f0af697c4 100644
2302--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
2303+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
2304@@ -1,28 +1,5 @@
2305-/*******************************************************************************
2306- *
2307- * Intel Ethernet Controller XL710 Family Linux Driver
2308- * Copyright(c) 2013 - 2014 Intel Corporation.
2309- *
2310- * This program is free software; you can redistribute it and/or modify it
2311- * under the terms and conditions of the GNU General Public License,
2312- * version 2, as published by the Free Software Foundation.
2313- *
2314- * This program is distributed in the hope it will be useful, but WITHOUT
2315- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2316- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2317- * more details.
2318- *
2319- * You should have received a copy of the GNU General Public License along
2320- * with this program. If not, see <http://www.gnu.org/licenses/>.
2321- *
2322- * The full GNU General Public License is included in this distribution in
2323- * the file called "COPYING".
2324- *
2325- * Contact Information:
2326- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
2327- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2328- *
2329- ******************************************************************************/
2330+/* SPDX-License-Identifier: GPL-2.0 */
2331+/* Copyright(c) 2013 - 2020 Intel Corporation. */
2332
2333 #ifndef _I40E_ADMINQ_H_
2334 #define _I40E_ADMINQ_H_
2335@@ -98,8 +75,8 @@ struct i40e_adminq_info {
2336 u16 api_maj_ver; /* api major version */
2337 u16 api_min_ver; /* api minor version */
2338
2339- struct mutex asq_mutex; /* Send queue lock */
2340- struct mutex arq_mutex; /* Receive queue lock */
2341+ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
2342+ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
2343
2344 /* last status values on send and receive queues */
2345 enum i40e_admin_queue_err asq_last_status;
2346@@ -111,7 +88,7 @@ struct i40e_adminq_info {
2347 * aq_ret: AdminQ handler error code can override aq_rc
2348 * aq_rc: AdminQ firmware error code to convert
2349 **/
2350-static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
2351+static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
2352 {
2353 int aq_to_posix[] = {
2354 0, /* I40E_AQ_RC_OK */
2355diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
2356index 5d5f422cb..f4940fe53 100644
2357--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
2358+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
2359@@ -1,28 +1,5 @@
2360-/*******************************************************************************
2361- *
2362- * Intel Ethernet Controller XL710 Family Linux Driver
2363- * Copyright(c) 2013 - 2017 Intel Corporation.
2364- *
2365- * This program is free software; you can redistribute it and/or modify it
2366- * under the terms and conditions of the GNU General Public License,
2367- * version 2, as published by the Free Software Foundation.
2368- *
2369- * This program is distributed in the hope it will be useful, but WITHOUT
2370- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2371- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2372- * more details.
2373- *
2374- * You should have received a copy of the GNU General Public License along
2375- * with this program. If not, see <http://www.gnu.org/licenses/>.
2376- *
2377- * The full GNU General Public License is included in this distribution in
2378- * the file called "COPYING".
2379- *
2380- * Contact Information:
2381- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
2382- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2383- *
2384- ******************************************************************************/
2385+/* SPDX-License-Identifier: GPL-2.0 */
2386+/* Copyright(c) 2013 - 2020 Intel Corporation. */
2387
2388 #ifndef _I40E_ADMINQ_CMD_H_
2389 #define _I40E_ADMINQ_CMD_H_
2390@@ -34,7 +11,19 @@
2391 */
2392
2393 #define I40E_FW_API_VERSION_MAJOR 0x0001
2394-#define I40E_FW_API_VERSION_MINOR 0x0005
2395+#define I40E_FW_API_VERSION_MINOR_X722 0x0009
2396+#define I40E_FW_API_VERSION_MINOR_X710 0x000A
2397+
2398+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
2399+ I40E_FW_API_VERSION_MINOR_X710 : \
2400+ I40E_FW_API_VERSION_MINOR_X722)
2401+
2402+/* API version 1.7 implements additional link and PHY-specific APIs */
2403+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
2404+/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
2405+#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
2406+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
2407+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
2408
2409 struct i40e_aq_desc {
2410 __le16 flags;
2411@@ -78,17 +67,17 @@ struct i40e_aq_desc {
2412 #define I40E_AQ_FLAG_EI_SHIFT 14
2413 #define I40E_AQ_FLAG_FE_SHIFT 15
2414
2415-#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
2416-#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
2417-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
2418-#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
2419-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
2420-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
2421-#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
2422-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
2423-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
2424-#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
2425-#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
2426+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
2427+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
2428+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
2429+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
2430+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
2431+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
2432+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
2433+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
2434+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
2435+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
2436+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
2437
2438 /* error codes */
2439 enum i40e_admin_queue_err {
2440@@ -146,6 +135,7 @@ enum i40e_admin_queue_opc {
2441 /* WoL commands */
2442 i40e_aqc_opc_set_wol_filter = 0x0120,
2443 i40e_aqc_opc_get_wake_reason = 0x0121,
2444+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
2445
2446 /* internal switch commands */
2447 i40e_aqc_opc_get_switch_config = 0x0200,
2448@@ -186,17 +176,19 @@ enum i40e_admin_queue_opc {
2449 i40e_aqc_opc_add_cloud_filters = 0x025C,
2450 i40e_aqc_opc_remove_cloud_filters = 0x025D,
2451 i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
2452+ i40e_aqc_opc_replace_cloud_filters = 0x025F,
2453
2454 i40e_aqc_opc_add_mirror_rule = 0x0260,
2455 i40e_aqc_opc_delete_mirror_rule = 0x0261,
2456
2457- /* Pipeline Personalization Profile */
2458+ /* Dynamic Device Personalization */
2459 i40e_aqc_opc_write_personalization_profile = 0x0270,
2460 i40e_aqc_opc_get_personalization_profile_list = 0x0271,
2461
2462 /* DCB commands */
2463 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
2464 i40e_aqc_opc_dcb_updated = 0x0302,
2465+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
2466
2467 /* TX scheduler */
2468 i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
2469@@ -221,6 +213,8 @@ enum i40e_admin_queue_opc {
2470 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
2471 i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
2472
2473+ /* phy commands*/
2474+
2475 /* phy commands*/
2476 i40e_aqc_opc_get_phy_abilities = 0x0600,
2477 i40e_aqc_opc_set_phy_config = 0x0601,
2478@@ -236,6 +230,8 @@ enum i40e_admin_queue_opc {
2479 i40e_aqc_opc_set_phy_debug = 0x0622,
2480 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
2481 i40e_aqc_opc_run_phy_activity = 0x0626,
2482+ i40e_aqc_opc_set_phy_register = 0x0628,
2483+ i40e_aqc_opc_get_phy_register = 0x0629,
2484
2485 /* NVM commands */
2486 i40e_aqc_opc_nvm_read = 0x0701,
2487@@ -243,6 +239,7 @@ enum i40e_admin_queue_opc {
2488 i40e_aqc_opc_nvm_update = 0x0703,
2489 i40e_aqc_opc_nvm_config_read = 0x0704,
2490 i40e_aqc_opc_nvm_config_write = 0x0705,
2491+ i40e_aqc_opc_nvm_progress = 0x0706,
2492 i40e_aqc_opc_oem_post_update = 0x0720,
2493 i40e_aqc_opc_thermal_sensor = 0x0721,
2494
2495@@ -271,6 +268,7 @@ enum i40e_admin_queue_opc {
2496 i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
2497 i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
2498 i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
2499+ i40e_aqc_opc_lldp_restore = 0x0A0A,
2500
2501 /* Tunnel commands */
2502 i40e_aqc_opc_add_udp_tunnel = 0x0B00,
2503@@ -531,6 +529,7 @@ struct i40e_aqc_mac_address_read {
2504 #define I40E_AQC_PORT_ADDR_VALID 0x40
2505 #define I40E_AQC_WOL_ADDR_VALID 0x80
2506 #define I40E_AQC_MC_MAG_EN_VALID 0x100
2507+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
2508 #define I40E_AQC_ADDR_VALID_MASK 0x3F0
2509 u8 reserved[6];
2510 __le32 addr_high;
2511@@ -591,6 +590,7 @@ struct i40e_aqc_set_wol_filter {
2512 __le16 cmd_flags;
2513 #define I40E_AQC_SET_WOL_FILTER 0x8000
2514 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
2515+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
2516 #define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
2517 #define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
2518 __le16 valid_flags;
2519@@ -764,8 +764,52 @@ struct i40e_aqc_set_switch_config {
2520 /* flags used for both fields below */
2521 #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
2522 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
2523+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
2524 __le16 valid_flags;
2525- u8 reserved[12];
2526+ /* The ethertype in switch_tag is dropped on ingress and used
2527+ * internally by the switch. Set this to zero for the default
2528+ * of 0x88a8 (802.1ad). Should be zero for firmware API
2529+ * versions lower than 1.7.
2530+ */
2531+ __le16 switch_tag;
2532+ /* The ethertypes in first_tag and second_tag are used to
2533+ * match the outer and inner VLAN tags (respectively) when HW
2534+ * double VLAN tagging is enabled via the set port parameters
2535+ * AQ command. Otherwise these are both ignored. Set them to
2536+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
2537+ * for firmware API versions lower than 1.7.
2538+ */
2539+ __le16 first_tag;
2540+ __le16 second_tag;
2541+ /* Next byte is split into following:
2542+ * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
2543+ * Bit 6 : 0 : Destination Port, 1: source port
2544+ * Bit 5..4 : L4 type
2545+ * 0: rsvd
2546+ * 1: TCP
2547+ * 2: UDP
2548+ * 3: Both TCP and UDP
2549+ * Bits 3:0 Mode
2550+ * 0: default mode
2551+ * 1: L4 port only mode
2552+ * 2: non-tunneled mode
2553+ * 3: tunneled mode
2554+ */
2555+#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
2556+
2557+#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
2558+
2559+#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
2560+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
2561+#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
2562+#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
2563+
2564+#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
2565+#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
2566+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
2567+#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
2568+ u8 mode;
2569+ u8 rsvd5[5];
2570 };
2571
2572 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
2573@@ -1318,14 +1362,17 @@ struct i40e_aqc_add_remove_cloud_filters {
2574 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
2575 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
2576 I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
2577- u8 reserved2[4];
2578+ u8 big_buffer_flag;
2579+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
2580+#define I40E_AQC_ADD_CLOUD_CMD_BB 1
2581+ u8 reserved2[3];
2582 __le32 addr_high;
2583 __le32 addr_low;
2584 };
2585
2586 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
2587
2588-struct i40e_aqc_add_remove_cloud_filters_element_data {
2589+struct i40e_aqc_cloud_filters_element_data {
2590 u8 outer_mac[6];
2591 u8 inner_mac[6];
2592 __le16 inner_vlan;
2593@@ -1337,13 +1384,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
2594 struct {
2595 u8 data[16];
2596 } v6;
2597+ struct {
2598+ __le16 data[8];
2599+ } raw_v6;
2600 } ipaddr;
2601 __le16 flags;
2602 #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
2603 #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
2604 I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
2605 /* 0x0000 reserved */
2606-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
2607+/* 0x0001 reserved */
2608 /* 0x0002 reserved */
2609 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
2610 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
2611@@ -1355,6 +1405,13 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
2612 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
2613 #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
2614 #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
2615+/* 0x000D reserved */
2616+/* 0x000E reserved */
2617+/* 0x000F reserved */
2618+/* 0x0010 to 0x0017 is for custom filters */
2619+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
2620+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
2621+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
2622
2623 #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
2624 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
2625@@ -1389,6 +1446,88 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
2626 u8 response_reserved[7];
2627 };
2628
2629+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
2630+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set.
2631+ */
2632+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
2633+ struct i40e_aqc_cloud_filters_element_data element;
2634+ u16 general_fields[32];
2635+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
2636+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
2637+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
2638+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
2639+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
2640+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
2641+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
2642+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
2643+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
2644+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
2645+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
2646+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
2647+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
2648+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
2649+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
2650+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
2651+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
2652+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
2653+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
2654+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
2655+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
2656+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
2657+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
2658+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
2659+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
2660+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
2661+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
2662+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
2663+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
2664+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
2665+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
2666+};
2667+
2668+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
2669+
2670+/* i40e_aqc_cloud_filters_element_bb is used when
2671+ * I40E_AQC_CLOUD_CMD_BB flag is set.
2672+ */
2673+struct i40e_aqc_cloud_filters_element_bb {
2674+ struct i40e_aqc_cloud_filters_element_data element;
2675+ u16 general_fields[32];
2676+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
2677+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
2678+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
2679+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
2680+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
2681+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
2682+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
2683+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
2684+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
2685+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
2686+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
2687+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
2688+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
2689+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
2690+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
2691+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
2692+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
2693+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
2694+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
2695+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
2696+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
2697+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
2698+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
2699+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
2700+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
2701+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
2702+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
2703+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
2704+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
2705+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
2706+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
2707+};
2708+
2709+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
2710+
2711 struct i40e_aqc_remove_cloud_filters_completion {
2712 __le16 perfect_ovlan_used;
2713 __le16 perfect_ovlan_free;
2714@@ -1400,6 +1539,61 @@ struct i40e_aqc_remove_cloud_filters_completion {
2715
2716 I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
2717
2718+/* Replace filter Command 0x025F
2719+ * uses the i40e_aqc_replace_cloud_filters,
2720+ * and the generic indirect completion structure
2721+ */
2722+struct i40e_filter_data {
2723+ u8 filter_type;
2724+ u8 input[3];
2725+};
2726+
2727+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
2728+
2729+struct i40e_aqc_replace_cloud_filters_cmd {
2730+ u8 valid_flags;
2731+#define I40E_AQC_REPLACE_L1_FILTER 0x0
2732+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
2733+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
2734+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
2735+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
2736+ u8 old_filter_type;
2737+ u8 new_filter_type;
2738+ u8 tr_bit;
2739+ u8 tr_bit2;
2740+ u8 reserved[3];
2741+ __le32 addr_high;
2742+ __le32 addr_low;
2743+};
2744+
2745+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
2746+
2747+struct i40e_aqc_replace_cloud_filters_cmd_buf {
2748+ u8 data[32];
2749+/* Filter type INPUT codes*/
2750+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
2751+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
2752+
2753+/* Field Vector offsets */
2754+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
2755+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
2756+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
2757+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
2758+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
2759+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
2760+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
2761+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
2762+/* big FLU */
2763+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
2764+/* big FLU */
2765+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
2766+
2767+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
2768+ struct i40e_filter_data filters[8];
2769+};
2770+
2771+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
2772+
2773 /* Add Mirror Rule (indirect or direct 0x0260)
2774 * Delete Mirror Rule (indirect or direct 0x0261)
2775 * note: some rule types (4,5) do not use an external buffer.
2776@@ -1435,7 +1629,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
2777
2778 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
2779
2780-/* Pipeline Personalization Profile */
2781+/* Dynamic Device Personalization */
2782 struct i40e_aqc_write_personalization_profile {
2783 u8 flags;
2784 u8 reserved[3];
2785@@ -1446,7 +1640,7 @@ struct i40e_aqc_write_personalization_profile {
2786
2787 I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
2788
2789-struct i40e_aqc_write_ppp_resp {
2790+struct i40e_aqc_write_ddp_resp {
2791 __le32 error_offset;
2792 __le32 error_info;
2793 __le32 addr_high;
2794@@ -1455,8 +1649,8 @@ struct i40e_aqc_write_ppp_resp {
2795
2796 struct i40e_aqc_get_applied_profiles {
2797 u8 flags;
2798-#define I40E_AQC_GET_PPP_GET_CONF 0x1
2799-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
2800+#define I40E_AQC_GET_DDP_GET_CONF 0x1
2801+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
2802 u8 rsv[3];
2803 __le32 reserved;
2804 __le32 addr_high;
2805@@ -1726,6 +1920,8 @@ enum i40e_aq_phy_type {
2806 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
2807 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
2808 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
2809+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
2810+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
2811 I40E_PHY_TYPE_100BASE_TX = 0x11,
2812 I40E_PHY_TYPE_1000BASE_T = 0x12,
2813 I40E_PHY_TYPE_10GBASE_T = 0x13,
2814@@ -1744,24 +1940,74 @@ enum i40e_aq_phy_type {
2815 I40E_PHY_TYPE_25GBASE_CR = 0x20,
2816 I40E_PHY_TYPE_25GBASE_SR = 0x21,
2817 I40E_PHY_TYPE_25GBASE_LR = 0x22,
2818- I40E_PHY_TYPE_MAX
2819-};
2820-
2821+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
2822+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
2823+ I40E_PHY_TYPE_2_5GBASE_T = 0x30,
2824+ I40E_PHY_TYPE_5GBASE_T = 0x31,
2825+ I40E_PHY_TYPE_MAX,
2826+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
2827+ I40E_PHY_TYPE_EMPTY = 0xFE,
2828+ I40E_PHY_TYPE_DEFAULT = 0xFF,
2829+};
2830+
2831+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
2832+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
2833+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
2834+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
2835+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
2836+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
2837+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
2838+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
2839+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
2840+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
2841+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
2842+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
2843+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
2844+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
2845+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
2846+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
2847+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
2848+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
2849+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
2850+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
2851+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
2852+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
2853+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
2854+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
2855+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
2856+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
2857+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
2858+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
2859+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
2860+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
2861+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
2862+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
2863+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
2864+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
2865+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
2866+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
2867+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
2868+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
2869+
2870+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
2871 #define I40E_LINK_SPEED_100MB_SHIFT 0x1
2872 #define I40E_LINK_SPEED_1000MB_SHIFT 0x2
2873 #define I40E_LINK_SPEED_10GB_SHIFT 0x3
2874 #define I40E_LINK_SPEED_40GB_SHIFT 0x4
2875 #define I40E_LINK_SPEED_20GB_SHIFT 0x5
2876 #define I40E_LINK_SPEED_25GB_SHIFT 0x6
2877+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
2878
2879 enum i40e_aq_link_speed {
2880 I40E_LINK_SPEED_UNKNOWN = 0,
2881- I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
2882- I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
2883- I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
2884- I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
2885- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
2886- I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
2887+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
2888+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
2889+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
2890+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
2891+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
2892+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
2893+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
2894+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
2895 };
2896
2897 struct i40e_aqc_module_desc {
2898@@ -1787,20 +2033,27 @@ struct i40e_aq_get_phy_abilities_resp {
2899 #define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
2900 #define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
2901 __le16 eee_capability;
2902+#define I40E_AQ_EEE_AUTO 0x0001
2903 #define I40E_AQ_EEE_100BASE_TX 0x0002
2904 #define I40E_AQ_EEE_1000BASE_T 0x0004
2905 #define I40E_AQ_EEE_10GBASE_T 0x0008
2906 #define I40E_AQ_EEE_1000BASE_KX 0x0010
2907 #define I40E_AQ_EEE_10GBASE_KX4 0x0020
2908 #define I40E_AQ_EEE_10GBASE_KR 0x0040
2909+#define I40E_AQ_EEE_2_5GBASE_T 0x0100
2910+#define I40E_AQ_EEE_5GBASE_T 0x0200
2911 __le32 eeer_val;
2912 u8 d3_lpan;
2913 #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
2914 u8 phy_type_ext;
2915-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
2916-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
2917+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
2918+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
2919 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
2920 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
2921+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
2922+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
2923+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
2924+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
2925 u8 fec_cfg_curr_mod_ext_info;
2926 #define I40E_AQ_ENABLE_FEC_KR 0x01
2927 #define I40E_AQ_ENABLE_FEC_RS 0x02
2928@@ -1834,10 +2087,6 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
2929 __le32 eeer;
2930 u8 low_power_ctrl;
2931 u8 phy_type_ext;
2932-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
2933-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
2934-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
2935-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
2936 u8 fec_config;
2937 #define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
2938 #define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
2939@@ -1855,20 +2104,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
2940 struct i40e_aq_set_mac_config {
2941 __le16 max_frame_size;
2942 u8 params;
2943-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
2944-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
2945-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
2946-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
2947-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
2948-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
2949-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
2950-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
2951-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
2952-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
2953-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
2954-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
2955-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
2956-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
2957+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
2958+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
2959+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
2960+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
2961+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
2962+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
2963+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
2964+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
2965+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
2966+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
2967+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
2968+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
2969+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
2970+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
2971+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
2972 u8 tx_timer_priority; /* bitmap */
2973 __le16 tx_timer_value;
2974 __le16 fc_refresh_threshold;
2975@@ -1934,19 +2184,31 @@ struct i40e_aqc_get_link_status {
2976 #define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
2977 #define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
2978 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
2979+/* Since firmware API 1.7 loopback field keeps power class info as well */
2980+#define I40E_AQ_LOOPBACK_MASK 0x07
2981+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
2982+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
2983 __le16 max_frame_size;
2984 u8 config;
2985 #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
2986 #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
2987 #define I40E_AQ_CONFIG_CRC_ENA 0x04
2988 #define I40E_AQ_CONFIG_PACING_MASK 0x78
2989- u8 power_desc;
2990+ union {
2991+ struct {
2992+ u8 power_desc;
2993 #define I40E_AQ_LINK_POWER_CLASS_1 0x00
2994 #define I40E_AQ_LINK_POWER_CLASS_2 0x01
2995 #define I40E_AQ_LINK_POWER_CLASS_3 0x02
2996 #define I40E_AQ_LINK_POWER_CLASS_4 0x03
2997 #define I40E_AQ_PWR_CLASS_MASK 0x03
2998- u8 reserved[4];
2999+ u8 reserved[4];
3000+ };
3001+ struct {
3002+ u8 link_type[4];
3003+ u8 link_type_ext;
3004+ };
3005+ };
3006 };
3007
3008 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
3009@@ -1983,11 +2245,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
3010
3011 /* Set Loopback mode (0x0618) */
3012 struct i40e_aqc_set_lb_mode {
3013- __le16 lb_mode;
3014+ u8 lb_level;
3015+#define I40E_AQ_LB_NONE 0
3016+#define I40E_AQ_LB_MAC 1
3017+#define I40E_AQ_LB_SERDES 2
3018+#define I40E_AQ_LB_PHY_INT 3
3019+#define I40E_AQ_LB_PHY_EXT 4
3020+#define I40E_AQ_LB_BASE_T_PCS 5
3021+#define I40E_AQ_LB_BASE_T_EXT 6
3022 #define I40E_AQ_LB_PHY_LOCAL 0x01
3023 #define I40E_AQ_LB_PHY_REMOTE 0x02
3024 #define I40E_AQ_LB_MAC_LOCAL 0x04
3025- u8 reserved[14];
3026+ u8 lb_type;
3027+#define I40E_AQ_LB_LOCAL 0
3028+#define I40E_AQ_LB_FAR 0x01
3029+ u8 speed;
3030+#define I40E_AQ_LB_SPEED_NONE 0
3031+#define I40E_AQ_LB_SPEED_1G 1
3032+#define I40E_AQ_LB_SPEED_10G 2
3033+#define I40E_AQ_LB_SPEED_40G 3
3034+#define I40E_AQ_LB_SPEED_20G 4
3035+ u8 force_speed;
3036+ u8 reserved[12];
3037 };
3038
3039 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
3040@@ -2004,7 +2283,7 @@ struct i40e_aqc_set_phy_debug {
3041 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
3042 /* Disable link manageability on a single port */
3043 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
3044-/* Disable link manageability on all ports */
3045+/* Disable link manageability on all ports needs both bits 4 and 5 */
3046 #define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
3047 u8 reserved[15];
3048 };
3049@@ -2017,26 +2296,71 @@ enum i40e_aq_phy_reg_type {
3050 I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
3051 };
3052
3053+#pragma pack(1)
3054 /* Run PHY Activity (0x0626) */
3055 struct i40e_aqc_run_phy_activity {
3056- __le16 activity_id;
3057- u8 flags;
3058- u8 reserved1;
3059- __le32 control;
3060- __le32 data;
3061- u8 reserved2[4];
3062+ u8 cmd_flags;
3063+ __le16 activity_id;
3064+#define I40E_AQ_RUN_PHY_ACT_ID_USR_DFND 0x10
3065+ u8 reserved;
3066+ union {
3067+ struct {
3068+ __le32 dnl_opcode;
3069+#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR 0x801a
3070+#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT 0x801b
3071+#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR 0x1801b
3072+ __le32 data;
3073+ u8 reserved2[4];
3074+ } cmd;
3075+ struct {
3076+ __le32 cmd_status;
3077+#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC 0x4
3078+#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK 0xFFFF
3079+ __le32 data0;
3080+ __le32 data1;
3081+ } resp;
3082+ } params;
3083 };
3084+#pragma pack()
3085
3086 I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
3087
3088+/* Set PHY Register command (0x0628) */
3089+/* Get PHY Register command (0x0629) */
3090+struct i40e_aqc_phy_register_access {
3091+ u8 phy_interface;
3092+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
3093+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
3094+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
3095+ u8 dev_addres;
3096+ u8 cmd_flags;
3097+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
3098+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
3099+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
3100+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
3101+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
3102+ u8 reserved1;
3103+ __le32 reg_address;
3104+ __le32 reg_value;
3105+ u8 reserved2[4];
3106+};
3107+
3108+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
3109+
3110 /* NVM Read command (indirect 0x0701)
3111 * NVM Erase commands (direct 0x0702)
3112 * NVM Update commands (indirect 0x0703)
3113 */
3114 struct i40e_aqc_nvm_update {
3115 u8 command_flags;
3116-#define I40E_AQ_NVM_LAST_CMD 0x01
3117-#define I40E_AQ_NVM_FLASH_ONLY 0x80
3118+#define I40E_AQ_NVM_LAST_CMD 0x01
3119+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
3120+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
3121+#define I40E_AQ_NVM_FLASH_ONLY 0x80
3122+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
3123+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
3124+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
3125+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
3126 u8 module_pointer;
3127 __le16 length;
3128 __le32 offset;
3129@@ -2049,8 +2373,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
3130 /* NVM Config Read (indirect 0x0704) */
3131 struct i40e_aqc_nvm_config_read {
3132 __le16 cmd_flags;
3133-#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
3134-#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
3135+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
3136+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
3137 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
3138 __le16 element_count;
3139 __le16 element_id; /* Feature/field ID */
3140@@ -2075,9 +2399,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
3141 /* Used for 0x0704 as well as for 0x0705 commands */
3142 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
3143 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
3144- BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
3145+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
3146 #define I40E_AQ_ANVM_FEATURE 0
3147-#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
3148+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
3149 struct i40e_aqc_nvm_config_data_feature {
3150 __le16 feature_id;
3151 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
3152@@ -2279,23 +2603,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
3153 /* Stop LLDP (direct 0x0A05) */
3154 struct i40e_aqc_lldp_stop {
3155 u8 command;
3156-#define I40E_AQ_LLDP_AGENT_STOP 0x0
3157-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
3158+#define I40E_AQ_LLDP_AGENT_STOP 0x0
3159+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
3160+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
3161 u8 reserved[15];
3162 };
3163
3164 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
3165
3166 /* Start LLDP (direct 0x0A06) */
3167-
3168 struct i40e_aqc_lldp_start {
3169 u8 command;
3170-#define I40E_AQ_LLDP_AGENT_START 0x1
3171+#define I40E_AQ_LLDP_AGENT_START 0x1
3172+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
3173 u8 reserved[15];
3174 };
3175
3176 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
3177
3178+/* Set DCB (direct 0x0303) */
3179+struct i40e_aqc_set_dcb_parameters {
3180+ u8 command;
3181+#define I40E_AQ_DCB_SET_AGENT 0x1
3182+#define I40E_DCB_VALID 0x1
3183+ u8 valid_flags;
3184+ u8 reserved[14];
3185+};
3186+
3187+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
3188+
3189 /* Get CEE DCBX Oper Config (0x0A07)
3190 * uses the generic descriptor struct
3191 * returns below as indirect response
3192@@ -2350,20 +2686,7 @@ struct i40e_aqc_get_cee_dcb_cfg_resp {
3193 u8 oper_tc_bw[8];
3194 u8 oper_pfc_en;
3195 __le16 oper_app_prio;
3196-#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
3197-#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
3198-#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
3199-#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
3200-#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
3201-#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
3202-#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
3203 __le32 tlv_status;
3204-#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
3205-#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
3206-#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
3207-#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
3208-#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
3209-#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
3210 u8 reserved[12];
3211 };
3212
3213@@ -2374,11 +2697,12 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
3214 */
3215 struct i40e_aqc_lldp_set_local_mib {
3216 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
3217-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
3218+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
3219+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
3220 #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
3221 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
3222-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
3223- BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
3224+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
3225+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
3226 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
3227 u8 type;
3228 u8 reserved0;
3229@@ -2390,19 +2714,37 @@ struct i40e_aqc_lldp_set_local_mib {
3230
3231 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
3232
3233+struct i40e_aqc_lldp_set_local_mib_resp {
3234+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
3235+ u8 status;
3236+ u8 reserved[15];
3237+};
3238+
3239+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
3240+
3241 /* Stop/Start LLDP Agent (direct 0x0A09)
3242 * Used for stopping/starting specific LLDP agent. e.g. DCBx
3243 */
3244 struct i40e_aqc_lldp_stop_start_specific_agent {
3245 #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
3246 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \
3247- BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
3248+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
3249 u8 command;
3250 u8 reserved[15];
3251 };
3252
3253 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
3254
3255+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
3256+struct i40e_aqc_lldp_restore {
3257+ u8 command;
3258+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
3259+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
3260+ u8 reserved[15];
3261+};
3262+
3263+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
3264+
3265 /* Add Udp Tunnel command and completion (direct 0x0B00) */
3266 struct i40e_aqc_add_udp_tunnel {
3267 __le16 udp_port;
3268@@ -2449,7 +2791,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
3269 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
3270
3271 struct i40e_aqc_get_set_rss_key {
3272-#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
3273+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
3274 #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
3275 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
3276 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
3277@@ -2469,13 +2811,14 @@ struct i40e_aqc_get_set_rss_key_data {
3278 I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
3279
3280 struct i40e_aqc_get_set_rss_lut {
3281-#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
3282+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
3283 #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
3284 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
3285 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
3286 __le16 vsi_id;
3287 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
3288-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
3289+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
3290+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
3291
3292 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
3293 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
3294diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
3295index 926811ad4..d7feb645f 100644
3296--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
3297+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
3298@@ -1,28 +1,5 @@
3299-/*******************************************************************************
3300- *
3301- * Intel Ethernet Controller XL710 Family Linux Driver
3302- * Copyright(c) 2013 - 2014 Intel Corporation.
3303- *
3304- * This program is free software; you can redistribute it and/or modify it
3305- * under the terms and conditions of the GNU General Public License,
3306- * version 2, as published by the Free Software Foundation.
3307- *
3308- * This program is distributed in the hope it will be useful, but WITHOUT
3309- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3310- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3311- * more details.
3312- *
3313- * You should have received a copy of the GNU General Public License along
3314- * with this program. If not, see <http://www.gnu.org/licenses/>.
3315- *
3316- * The full GNU General Public License is included in this distribution in
3317- * the file called "COPYING".
3318- *
3319- * Contact Information:
3320- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3321- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3322- *
3323- ******************************************************************************/
3324+/* SPDX-License-Identifier: GPL-2.0 */
3325+/* Copyright(c) 2013 - 2020 Intel Corporation. */
3326
3327 #ifndef _I40E_ALLOC_H_
3328 #define _I40E_ALLOC_H_
3329diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
3330index 1b1e2acbd..2877a1203 100644
3331--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
3332+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
3333@@ -1,28 +1,5 @@
3334-/*******************************************************************************
3335- *
3336- * Intel Ethernet Controller XL710 Family Linux Driver
3337- * Copyright(c) 2013 - 2017 Intel Corporation.
3338- *
3339- * This program is free software; you can redistribute it and/or modify it
3340- * under the terms and conditions of the GNU General Public License,
3341- * version 2, as published by the Free Software Foundation.
3342- *
3343- * This program is distributed in the hope it will be useful, but WITHOUT
3344- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3345- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3346- * more details.
3347- *
3348- * You should have received a copy of the GNU General Public License along
3349- * with this program. If not, see <http://www.gnu.org/licenses/>.
3350- *
3351- * The full GNU General Public License is included in this distribution in
3352- * the file called "COPYING".
3353- *
3354- * Contact Information:
3355- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3356- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3357- *
3358- ******************************************************************************/
3359+// SPDX-License-Identifier: GPL-2.0
3360+/* Copyright(c) 2013 - 2020 Intel Corporation. */
3361
3362 #include <linux/list.h>
3363 #include <linux/errno.h>
3364@@ -53,17 +30,23 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
3365 bool is_vf, u32 vf_id,
3366 u32 flag, u32 valid_flag);
3367
3368+static int i40e_client_device_register(struct i40e_info *ldev);
3369+
3370+static void i40e_client_device_unregister(struct i40e_info *ldev);
3371+
3372 static struct i40e_ops i40e_lan_ops = {
3373 .virtchnl_send = i40e_client_virtchnl_send,
3374 .setup_qvlist = i40e_client_setup_qvlist,
3375 .request_reset = i40e_client_request_reset,
3376 .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
3377+ .client_device_register = i40e_client_device_register,
3378+ .client_device_unregister = i40e_client_device_unregister,
3379 };
3380
3381 /**
3382 * i40e_client_get_params - Get the params that can change at runtime
3383 * @vsi: the VSI with the message
3384- * @param: clinet param struct
3385+ * @params: clinet param struct
3386 *
3387 **/
3388 static
3389@@ -94,6 +77,10 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
3390 return 0;
3391 }
3392
3393+static void i40e_client_device_release(struct device *dev)
3394+{
3395+}
3396+
3397 /**
3398 * i40e_notify_client_of_vf_msg - call the client vf message callback
3399 * @vsi: the VSI with the message
3400@@ -287,21 +274,27 @@ out:
3401 return capable;
3402 }
3403
3404+void i40e_client_update_msix_info(struct i40e_pf *pf)
3405+{
3406+ struct i40e_client_instance *cdev = pf->cinst;
3407+
3408+ if (!cdev || !cdev->client)
3409+ return;
3410+
3411+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
3412+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
3413+}
3414+
3415 /**
3416 * i40e_client_add_instance - add a client instance struct to the instance list
3417 * @pf: pointer to the board struct
3418- * @client: pointer to a client struct in the client list.
3419- * @existing: if there was already an existing instance
3420- *
3421 **/
3422 static void i40e_client_add_instance(struct i40e_pf *pf)
3423 {
3424 struct i40e_client_instance *cdev = NULL;
3425 struct netdev_hw_addr *mac = NULL;
3426 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3427-
3428- if (!registered_client || pf->cinst)
3429- return;
3430+ struct platform_device *platform_dev;
3431
3432 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
3433 if (!cdev)
3434@@ -320,6 +313,12 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
3435 cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
3436 cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
3437 cdev->lan_info.fw_build = pf->hw.aq.fw_build;
3438+ platform_dev = &cdev->lan_info.platform_dev;
3439+ platform_dev->name = "i40e_rdma";
3440+ platform_dev->id = PLATFORM_DEVID_AUTO;
3441+ platform_dev->id_auto = true;
3442+ platform_dev->dev.release = i40e_client_device_release;
3443+ platform_dev->dev.parent = &pf->pdev->dev;
3444 set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
3445
3446 if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
3447@@ -328,9 +327,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
3448 return;
3449 }
3450
3451- cdev->lan_info.msix_count = pf->num_iwarp_msix;
3452- cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
3453-
3454 mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
3455 struct netdev_hw_addr, list);
3456 if (mac)
3457@@ -338,8 +334,12 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
3458 else
3459 dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
3460
3461- cdev->client = registered_client;
3462+ cdev->client = NULL;
3463 pf->cinst = cdev;
3464+
3465+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
3466+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
3467+ platform_device_register(platform_dev);
3468 }
3469
3470 /**
3471@@ -360,14 +360,13 @@ void i40e_client_del_instance(struct i40e_pf *pf)
3472 **/
3473 void i40e_client_subtask(struct i40e_pf *pf)
3474 {
3475- struct i40e_client *client = registered_client;
3476+ struct i40e_client *client;
3477 struct i40e_client_instance *cdev;
3478 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3479 int ret = 0;
3480
3481- if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
3482+ if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
3483 return;
3484- pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
3485 cdev = pf->cinst;
3486
3487 /* If we're down or resetting, just bail */
3488@@ -375,14 +374,16 @@ void i40e_client_subtask(struct i40e_pf *pf)
3489 test_bit(__I40E_CONFIG_BUSY, pf->state))
3490 return;
3491
3492- if (!client || !cdev)
3493+ if (!cdev || !cdev->client)
3494 return;
3495
3496- /* Here we handle client opens. If the client is down, but
3497- * the netdev is up, then open the client.
3498+ client = cdev->client;
3499+
3500+ /* Here we handle client opens. If the client is down, and
3501+ * the netdev is registered, then open the client.
3502 */
3503 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
3504- if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
3505+ if (vsi->netdev_registered &&
3506 client->ops && client->ops->open) {
3507 set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
3508 ret = client->ops->open(&cdev->lan_info, client);
3509@@ -393,17 +394,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
3510 i40e_client_del_instance(pf);
3511 }
3512 }
3513- } else {
3514- /* Likewise for client close. If the client is up, but the netdev
3515- * is down, then close the client.
3516- */
3517- if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
3518- client->ops && client->ops->close) {
3519- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
3520- client->ops->close(&cdev->lan_info, client, false);
3521- i40e_client_release_qvlist(&cdev->lan_info);
3522- }
3523 }
3524+
3525+ /* enable/disable PE TCP_ENA flag based on netdev down/up
3526+ */
3527+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
3528+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
3529+ 0, 0, 0,
3530+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
3531+ else
3532+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
3533+ 0, 0,
3534+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
3535+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
3536 }
3537
3538 /**
3539@@ -436,17 +439,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
3540 pf->hw.pf_id, pf->hw.bus.bus_id,
3541 pf->hw.bus.device, pf->hw.bus.func);
3542
3543- /* If a client has already been registered, we need to add an instance
3544- * of it to our new LAN device.
3545- */
3546- if (registered_client)
3547- i40e_client_add_instance(pf);
3548-
3549- /* Since in some cases register may have happened before a device gets
3550- * added, we can schedule a subtask to go initiate the clients if
3551- * they can be launched at probe time.
3552- */
3553- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
3554+ i40e_client_add_instance(pf);
3555+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
3556 i40e_service_event_schedule(pf);
3557
3558 out:
3559@@ -465,6 +459,8 @@ int i40e_lan_del_device(struct i40e_pf *pf)
3560 struct i40e_device *ldev, *tmp;
3561 int ret = -ENODEV;
3562
3563+ platform_device_unregister(&pf->cinst->lan_info.platform_dev);
3564+
3565 /* First, remove any client instance. */
3566 i40e_client_del_instance(pf);
3567
3568@@ -480,6 +476,7 @@ int i40e_lan_del_device(struct i40e_pf *pf)
3569 break;
3570 }
3571 }
3572+
3573 mutex_unlock(&i40e_device_mutex);
3574 return ret;
3575 }
3576@@ -517,10 +514,7 @@ static void i40e_client_release(struct i40e_client *client)
3577 "Client %s instance for PF id %d closed\n",
3578 client->name, pf->hw.pf_id);
3579 }
3580- /* delete the client instance */
3581- i40e_client_del_instance(pf);
3582- dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
3583- client->name);
3584+ cdev->client = NULL;
3585 clear_bit(__I40E_SERVICE_SCHED, pf->state);
3586 }
3587 mutex_unlock(&i40e_device_mutex);
3588@@ -539,9 +533,9 @@ static void i40e_client_prepare(struct i40e_client *client)
3589 mutex_lock(&i40e_device_mutex);
3590 list_for_each_entry(ldev, &i40e_devices, list) {
3591 pf = ldev->pf;
3592- i40e_client_add_instance(pf);
3593+ pf->cinst->client = registered_client;
3594 /* Start the client subtask */
3595- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
3596+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
3597 i40e_service_event_schedule(pf);
3598 }
3599 mutex_unlock(&i40e_device_mutex);
3600@@ -566,7 +560,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
3601 i40e_status err;
3602
3603 err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
3604- 0, msg, len, NULL);
3605+ I40E_SUCCESS, msg, len, NULL);
3606 if (err)
3607 dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
3608 err, hw->aq.asq_last_status);
3609@@ -578,7 +572,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
3610 * i40e_client_setup_qvlist
3611 * @ldev: pointer to L2 context.
3612 * @client: Client pointer.
3613- * @qv_info: queue and vector list
3614+ * @qvlist_info: queue and vector list
3615 *
3616 * Return 0 on success or < 0 on error
3617 **/
3618@@ -653,7 +647,7 @@ err:
3619 * i40e_client_request_reset
3620 * @ldev: pointer to L2 context.
3621 * @client: Client pointer.
3622- * @level: reset level
3623+ * @reset_level: reset level
3624 **/
3625 static void i40e_client_request_reset(struct i40e_info *ldev,
3626 struct i40e_client *client,
3627@@ -717,13 +711,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
3628 return -ENOENT;
3629 }
3630
3631- if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
3632- (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
3633+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
3634+ (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
3635 ctxt.info.valid_sections =
3636 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
3637 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
3638- } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
3639- !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
3640+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
3641+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
3642 ctxt.info.valid_sections =
3643 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
3644 ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
3645@@ -747,6 +741,66 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
3646 return err;
3647 }
3648
3649+static int i40e_client_device_register(struct i40e_info *ldev)
3650+{
3651+ struct i40e_client *client;
3652+ struct i40e_pf *pf;
3653+
3654+ if (!ldev) {
3655+ pr_err("Failed to reg client dev: ldev ptr NULL\n");
3656+ return -EINVAL;
3657+ }
3658+
3659+ client = ldev->client;
3660+ pf = ldev->pf;
3661+ if (!client) {
3662+ pr_err("Failed to reg client dev: client ptr NULL\n");
3663+ return -EINVAL;
3664+ }
3665+
3666+ if (!ldev->ops || !client->ops) {
3667+ pr_err("Failed to reg client dev: client dev peer_ops/ops NULL\n");
3668+ return -EINVAL;
3669+ }
3670+
3671+ if (client->version.major != I40E_CLIENT_VERSION_MAJOR ||
3672+ client->version.minor != I40E_CLIENT_VERSION_MINOR) {
3673+ pr_err("i40e: Failed to register client %s due to mismatched client interface version\n",
3674+ client->name);
3675+ pr_err("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
3676+ client->version.major, client->version.minor,
3677+ client->version.build,
3678+ i40e_client_interface_version_str);
3679+ return -EINVAL;
3680+ }
3681+
3682+ pf->cinst->client = ldev->client;
3683+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
3684+ i40e_service_event_schedule(pf);
3685+
3686+ return 0;
3687+}
3688+
3689+static void i40e_client_device_unregister(struct i40e_info *ldev)
3690+{
3691+ struct i40e_pf *pf = ldev->pf;
3692+ struct i40e_client_instance *cdev = pf->cinst;
3693+
3694+ while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
3695+ usleep_range(500, 1000);
3696+
3697+ if (!cdev || !cdev->client || !cdev->client->ops ||
3698+ !cdev->client->ops->close) {
3699+ dev_err(&pf->pdev->dev, "Cannot close client device\n");
3700+ return;
3701+ }
3702+ cdev->client->ops->close(&cdev->lan_info, cdev->client, false);
3703+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
3704+ i40e_client_release_qvlist(&cdev->lan_info);
3705+ pf->cinst->client = NULL;
3706+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
3707+}
3708+
3709 /**
3710 * i40e_register_client - Register a i40e client driver with the L2 driver
3711 * @client: pointer to the i40e_client struct
3712@@ -791,7 +845,7 @@ int i40e_register_client(struct i40e_client *client)
3713
3714 i40e_client_prepare(client);
3715
3716- pr_info("i40e: Registered client %s\n", client->name);
3717+ pr_info("i40e: Registered client %s\n", client->name);
3718 out:
3719 return ret;
3720 }
3721diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
3722index 15b21a531..40041ad26 100644
3723--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
3724+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
3725@@ -1,32 +1,11 @@
3726-/*******************************************************************************
3727- *
3728- * Intel Ethernet Controller XL710 Family Linux Driver
3729- * Copyright(c) 2013 - 2015 Intel Corporation.
3730- *
3731- * This program is free software; you can redistribute it and/or modify it
3732- * under the terms and conditions of the GNU General Public License,
3733- * version 2, as published by the Free Software Foundation.
3734- *
3735- * This program is distributed in the hope it will be useful, but WITHOUT
3736- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3737- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3738- * more details.
3739- *
3740- * You should have received a copy of the GNU General Public License along
3741- * with this program. If not, see <http://www.gnu.org/licenses/>.
3742- *
3743- * The full GNU General Public License is included in this distribution in
3744- * the file called "COPYING".
3745- *
3746- * Contact Information:
3747- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3748- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3749- *
3750- ******************************************************************************/
3751+/* SPDX-License-Identifier: GPL-2.0 */
3752+/* Copyright(c) 2013 - 2020 Intel Corporation. */
3753
3754 #ifndef _I40E_CLIENT_H_
3755 #define _I40E_CLIENT_H_
3756
3757+#include <linux/platform_device.h>
3758+
3759 #define I40E_CLIENT_STR_LENGTH 10
3760
3761 /* Client interface version should be updated anytime there is a change in the
3762@@ -128,11 +107,14 @@ struct i40e_info {
3763 u16 fw_maj_ver; /* firmware major version */
3764 u16 fw_min_ver; /* firmware minor version */
3765 u32 fw_build; /* firmware build number */
3766+
3767+ struct platform_device platform_dev;
3768+ struct i40e_client *client;
3769 };
3770
3771 #define I40E_CLIENT_RESET_LEVEL_PF 1
3772 #define I40E_CLIENT_RESET_LEVEL_CORE 2
3773-#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
3774+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
3775
3776 struct i40e_ops {
3777 /* setup_q_vector_list enables queues with a particular vector */
3778@@ -155,6 +137,10 @@ struct i40e_ops {
3779 struct i40e_client *client,
3780 bool is_vf, u32 vf_id,
3781 u32 flag, u32 valid_flag);
3782+
3783+ int (*client_device_register)(struct i40e_info *ldev);
3784+
3785+ void (*client_device_unregister)(struct i40e_info *ldev);
3786 };
3787
3788 struct i40e_client_ops {
3789@@ -211,7 +197,8 @@ struct i40e_client {
3790 #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
3791 u8 type;
3792 #define I40E_CLIENT_IWARP 0
3793- const struct i40e_client_ops *ops; /* client ops provided by the client */
3794+ /* client ops provided by the client */
3795+ const struct i40e_client_ops *ops;
3796 };
3797
3798 static inline bool i40e_client_is_registered(struct i40e_client *client)
3799diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
3800index 111426ba5..69fd8cc2e 100644
3801--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
3802+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
3803@@ -1,33 +1,10 @@
3804-/*******************************************************************************
3805- *
3806- * Intel Ethernet Controller XL710 Family Linux Driver
3807- * Copyright(c) 2013 - 2016 Intel Corporation.
3808- *
3809- * This program is free software; you can redistribute it and/or modify it
3810- * under the terms and conditions of the GNU General Public License,
3811- * version 2, as published by the Free Software Foundation.
3812- *
3813- * This program is distributed in the hope it will be useful, but WITHOUT
3814- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3815- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3816- * more details.
3817- *
3818- * You should have received a copy of the GNU General Public License along
3819- * with this program. If not, see <http://www.gnu.org/licenses/>.
3820- *
3821- * The full GNU General Public License is included in this distribution in
3822- * the file called "COPYING".
3823- *
3824- * Contact Information:
3825- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3826- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3827- *
3828- ******************************************************************************/
3829+// SPDX-License-Identifier: GPL-2.0
3830+/* Copyright(c) 2013 - 2020 Intel Corporation. */
3831
3832 #include "i40e_type.h"
3833 #include "i40e_adminq.h"
3834 #include "i40e_prototype.h"
3835-#include <linux/avf/virtchnl.h>
3836+#include "virtchnl.h"
3837
3838 /**
3839 * i40e_set_mac_type - Sets MAC type
3840@@ -36,9 +13,9 @@
3841 * This function sets the mac type of the adapter based on the
3842 * vendor ID and device ID stored in the hw structure.
3843 **/
3844-static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
3845+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
3846 {
3847- i40e_status status = 0;
3848+ i40e_status status = I40E_SUCCESS;
3849
3850 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
3851 switch (hw->device_id) {
3852@@ -51,10 +28,16 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
3853 case I40E_DEV_ID_QSFP_C:
3854 case I40E_DEV_ID_10G_BASE_T:
3855 case I40E_DEV_ID_10G_BASE_T4:
3856+ case I40E_DEV_ID_10G_BASE_T_BC:
3857+ case I40E_DEV_ID_10G_B:
3858+ case I40E_DEV_ID_10G_SFP:
3859+ case I40E_DEV_ID_5G_BASE_T_BC:
3860 case I40E_DEV_ID_20G_KR2:
3861 case I40E_DEV_ID_20G_KR2_A:
3862 case I40E_DEV_ID_25G_B:
3863 case I40E_DEV_ID_25G_SFP28:
3864+ case I40E_DEV_ID_X710_N3000:
3865+ case I40E_DEV_ID_XXV710_N3000:
3866 hw->mac.type = I40E_MAC_XL710;
3867 break;
3868 case I40E_DEV_ID_KX_X722:
3869@@ -146,7 +129,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
3870 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
3871 {
3872 switch (stat_err) {
3873- case 0:
3874+ case I40E_SUCCESS:
3875 return "OK";
3876 case I40E_ERR_NVM:
3877 return "I40E_ERR_NVM";
3878@@ -278,6 +261,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
3879 return "I40E_NOT_SUPPORTED";
3880 case I40E_ERR_FIRMWARE_API_VERSION:
3881 return "I40E_ERR_FIRMWARE_API_VERSION";
3882+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
3883+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
3884 }
3885
3886 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
3887@@ -298,47 +283,48 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
3888 void *buffer, u16 buf_len)
3889 {
3890 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
3891- u16 len;
3892+ u32 effective_mask = hw->debug_mask & mask;
3893 u8 *buf = (u8 *)buffer;
3894+ u16 len;
3895+ char prefix[27];
3896
3897- if ((!(mask & hw->debug_mask)) || (desc == NULL))
3898+ if (!effective_mask || !desc)
3899 return;
3900
3901- len = le16_to_cpu(aq_desc->datalen);
3902+ len = LE16_TO_CPU(aq_desc->datalen);
3903
3904- i40e_debug(hw, mask,
3905+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
3906 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
3907- le16_to_cpu(aq_desc->opcode),
3908- le16_to_cpu(aq_desc->flags),
3909- le16_to_cpu(aq_desc->datalen),
3910- le16_to_cpu(aq_desc->retval));
3911- i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
3912- le32_to_cpu(aq_desc->cookie_high),
3913- le32_to_cpu(aq_desc->cookie_low));
3914- i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
3915- le32_to_cpu(aq_desc->params.internal.param0),
3916- le32_to_cpu(aq_desc->params.internal.param1));
3917- i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
3918- le32_to_cpu(aq_desc->params.external.addr_high),
3919- le32_to_cpu(aq_desc->params.external.addr_low));
3920-
3921- if ((buffer != NULL) && (aq_desc->datalen != 0)) {
3922+ LE16_TO_CPU(aq_desc->opcode),
3923+ LE16_TO_CPU(aq_desc->flags),
3924+ LE16_TO_CPU(aq_desc->datalen),
3925+ LE16_TO_CPU(aq_desc->retval));
3926+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
3927+ "\tcookie (h,l) 0x%08X 0x%08X\n",
3928+ LE32_TO_CPU(aq_desc->cookie_high),
3929+ LE32_TO_CPU(aq_desc->cookie_low));
3930+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
3931+ "\tparam (0,1) 0x%08X 0x%08X\n",
3932+ LE32_TO_CPU(aq_desc->params.internal.param0),
3933+ LE32_TO_CPU(aq_desc->params.internal.param1));
3934+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
3935+ "\taddr (h,l) 0x%08X 0x%08X\n",
3936+ LE32_TO_CPU(aq_desc->params.external.addr_high),
3937+ LE32_TO_CPU(aq_desc->params.external.addr_low));
3938+
3939+ if (buffer && (buf_len != 0) && (len != 0) &&
3940+ (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
3941 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
3942 if (buf_len < len)
3943 len = buf_len;
3944- /* write the full 16-byte chunks */
3945- if (hw->debug_mask & mask) {
3946- char prefix[27];
3947-
3948- snprintf(prefix, sizeof(prefix),
3949- "i40e %02x:%02x.%x: \t0x",
3950- hw->bus.bus_id,
3951- hw->bus.device,
3952- hw->bus.func);
3953-
3954- print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
3955- 16, 1, buf, len, false);
3956- }
3957+ snprintf(prefix, sizeof(prefix),
3958+ "i40e %02x:%02x.%x: \t0x",
3959+ hw->bus.bus_id,
3960+ hw->bus.device,
3961+ hw->bus.func);
3962+
3963+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
3964+ 16, 1, buf, len, false);
3965 }
3966 }
3967
3968@@ -352,9 +338,8 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
3969 {
3970 if (hw->aq.asq.len)
3971 return !!(rd32(hw, hw->aq.asq.len) &
3972- I40E_PF_ATQLEN_ATQENABLE_MASK);
3973- else
3974- return false;
3975+ I40E_PF_ATQLEN_ATQENABLE_MASK);
3976+ return false;
3977 }
3978
3979 /**
3980@@ -377,7 +362,7 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
3981 i40e_aqc_opc_queue_shutdown);
3982
3983 if (unloading)
3984- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
3985+ cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
3986 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3987
3988 return status;
3989@@ -395,9 +380,9 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
3990 * Internal function to get or set RSS look up table
3991 **/
3992 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
3993- u16 vsi_id, bool pf_lut,
3994- u8 *lut, u16 lut_size,
3995- bool set)
3996+ u16 vsi_id, bool pf_lut,
3997+ u8 *lut, u16 lut_size,
3998+ bool set)
3999 {
4000 i40e_status status;
4001 struct i40e_aq_desc desc;
4002@@ -412,22 +397,22 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
4003 i40e_aqc_opc_get_rss_lut);
4004
4005 /* Indirect command */
4006- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4007- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4008+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
4009+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
4010
4011 cmd_resp->vsi_id =
4012- cpu_to_le16((u16)((vsi_id <<
4013+ CPU_TO_LE16((u16)((vsi_id <<
4014 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
4015 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
4016- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
4017+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
4018
4019 if (pf_lut)
4020- cmd_resp->flags |= cpu_to_le16((u16)
4021+ cmd_resp->flags |= CPU_TO_LE16((u16)
4022 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
4023 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
4024 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
4025 else
4026- cmd_resp->flags |= cpu_to_le16((u16)
4027+ cmd_resp->flags |= CPU_TO_LE16((u16)
4028 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
4029 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
4030 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
4031@@ -448,7 +433,7 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
4032 * get the RSS lookup table, PF or VSI type
4033 **/
4034 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
4035- bool pf_lut, u8 *lut, u16 lut_size)
4036+ bool pf_lut, u8 *lut, u16 lut_size)
4037 {
4038 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
4039 false);
4040@@ -465,7 +450,7 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
4041 * set the RSS lookup table, PF or VSI type
4042 **/
4043 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
4044- bool pf_lut, u8 *lut, u16 lut_size)
4045+ bool pf_lut, u8 *lut, u16 lut_size)
4046 {
4047 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
4048 }
4049@@ -498,14 +483,14 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
4050 i40e_aqc_opc_get_rss_key);
4051
4052 /* Indirect command */
4053- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4054- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4055+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
4056+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
4057
4058 cmd_resp->vsi_id =
4059- cpu_to_le16((u16)((vsi_id <<
4060+ CPU_TO_LE16((u16)((vsi_id <<
4061 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
4062 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
4063- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
4064+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
4065
4066 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
4067
4068@@ -520,8 +505,8 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
4069 *
4070 **/
4071 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
4072- u16 vsi_id,
4073- struct i40e_aqc_get_set_rss_key_data *key)
4074+ u16 vsi_id,
4075+ struct i40e_aqc_get_set_rss_key_data *key)
4076 {
4077 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
4078 }
4079@@ -535,8 +520,8 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
4080 * set the RSS key per VSI
4081 **/
4082 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
4083- u16 vsi_id,
4084- struct i40e_aqc_get_set_rss_key_data *key)
4085+ u16 vsi_id,
4086+ struct i40e_aqc_get_set_rss_key_data *key)
4087 {
4088 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
4089 }
4090@@ -920,7 +905,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
4091 **/
4092 i40e_status i40e_init_shared_code(struct i40e_hw *hw)
4093 {
4094- i40e_status status = 0;
4095+ i40e_status status = I40E_SUCCESS;
4096 u32 port, ari, func_rid;
4097
4098 i40e_set_mac_type(hw);
4099@@ -947,8 +932,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
4100 else
4101 hw->pf_id = (u8)(func_rid & 0x7);
4102
4103- if (hw->mac.type == I40E_MAC_X722)
4104- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
4105+ /* NVMUpdate features structure initialization */
4106+ hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR;
4107+ hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR;
4108+ hw->nvmupd_features.size = sizeof(hw->nvmupd_features);
4109+ i40e_memset(hw->nvmupd_features.features, 0x0,
4110+ I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN *
4111+ sizeof(*hw->nvmupd_features.features),
4112+ I40E_NONDMA_MEM);
4113+
4114+ hw->nvmupd_features.features[0] = I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT;
4115
4116 status = i40e_init_nvm(hw);
4117 return status;
4118@@ -972,11 +965,11 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
4119 i40e_status status;
4120
4121 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
4122- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
4123+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
4124
4125 status = i40e_asq_send_command(hw, &desc, addrs,
4126 sizeof(*addrs), cmd_details);
4127- *flags = le16_to_cpu(cmd_data->command_flags);
4128+ *flags = LE16_TO_CPU(cmd_data->command_flags);
4129
4130 return status;
4131 }
4132@@ -999,9 +992,9 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
4133
4134 i40e_fill_default_direct_cmd_desc(&desc,
4135 i40e_aqc_opc_mac_address_write);
4136- cmd_data->command_flags = cpu_to_le16(flags);
4137- cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
4138- cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
4139+ cmd_data->command_flags = CPU_TO_LE16(flags);
4140+ cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
4141+ cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
4142 ((u32)mac_addr[3] << 16) |
4143 ((u32)mac_addr[4] << 8) |
4144 mac_addr[5]);
4145@@ -1060,7 +1053,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
4146 /**
4147 * i40e_pre_tx_queue_cfg - pre tx queue configure
4148 * @hw: pointer to the HW structure
4149- * @queue: target PF queue index
4150+ * @queue: target pf queue index
4151 * @enable: state change request
4152 *
4153 * Handles hw requirement to indicate intention to enable
4154@@ -1098,28 +1091,28 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
4155 * Reads the part number string from the EEPROM.
4156 **/
4157 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
4158- u32 pba_num_size)
4159+ u32 pba_num_size)
4160 {
4161- i40e_status status = 0;
4162+ i40e_status status = I40E_SUCCESS;
4163 u16 pba_word = 0;
4164 u16 pba_size = 0;
4165 u16 pba_ptr = 0;
4166 u16 i = 0;
4167
4168 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
4169- if (status || (pba_word != 0xFAFA)) {
4170+ if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
4171 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
4172 return status;
4173 }
4174
4175 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
4176- if (status) {
4177+ if (status != I40E_SUCCESS) {
4178 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
4179 return status;
4180 }
4181
4182 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
4183- if (status) {
4184+ if (status != I40E_SUCCESS) {
4185 hw_dbg(hw, "Failed to read PBA Block size.\n");
4186 return status;
4187 }
4188@@ -1135,7 +1128,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
4189
4190 for (i = 0; i < pba_size; i++) {
4191 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
4192- if (status) {
4193+ if (status != I40E_SUCCESS) {
4194 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
4195 return status;
4196 }
4197@@ -1169,6 +1162,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
4198 break;
4199 case I40E_PHY_TYPE_100BASE_TX:
4200 case I40E_PHY_TYPE_1000BASE_T:
4201+ case I40E_PHY_TYPE_2_5GBASE_T:
4202+ case I40E_PHY_TYPE_5GBASE_T:
4203 case I40E_PHY_TYPE_10GBASE_T:
4204 media = I40E_MEDIA_TYPE_BASET;
4205 break;
4206@@ -1180,6 +1175,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
4207 case I40E_PHY_TYPE_40GBASE_AOC:
4208 case I40E_PHY_TYPE_10GBASE_AOC:
4209 case I40E_PHY_TYPE_25GBASE_CR:
4210+ case I40E_PHY_TYPE_25GBASE_AOC:
4211+ case I40E_PHY_TYPE_25GBASE_ACC:
4212 media = I40E_MEDIA_TYPE_DA;
4213 break;
4214 case I40E_PHY_TYPE_1000BASE_KX:
4215@@ -1203,7 +1200,29 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
4216 return media;
4217 }
4218
4219-#define I40E_PF_RESET_WAIT_COUNT_A0 200
4220+/**
4221+ * i40e_poll_globr - Poll for Global Reset completion
4222+ * @hw: pointer to the hardware structure
4223+ * @retry_limit: how many times to retry before failure
4224+ **/
4225+static i40e_status i40e_poll_globr(struct i40e_hw *hw,
4226+ u32 retry_limit)
4227+{
4228+ u32 cnt, reg = 0;
4229+
4230+ for (cnt = 0; cnt < retry_limit; cnt++) {
4231+ reg = rd32(hw, I40E_GLGEN_RSTAT);
4232+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4233+ return I40E_SUCCESS;
4234+ msleep(100);
4235+ }
4236+
4237+ hw_dbg(hw, "Global reset failed.\n");
4238+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
4239+
4240+ return I40E_ERR_RESET_FAILED;
4241+}
4242+
4243 #define I40E_PF_RESET_WAIT_COUNT 200
4244 /**
4245 * i40e_pf_reset - Reset the PF
4246@@ -1224,13 +1243,10 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
4247 * couple counts longer to be sure we don't just miss the end.
4248 */
4249 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
4250- I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
4251- I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4252+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
4253+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4254
4255- /* It can take upto 15 secs for GRST steady state.
4256- * Bump it to 16 secs max to be safe.
4257- */
4258- grst_del = grst_del * 20;
4259+ grst_del = min(grst_del * 20, 160U);
4260
4261 for (cnt = 0; cnt < grst_del; cnt++) {
4262 reg = rd32(hw, I40E_GLGEN_RSTAT);
4263@@ -1266,20 +1282,24 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
4264 * we don't need to do the PF Reset
4265 */
4266 if (!cnt) {
4267- if (hw->revision_id == 0)
4268- cnt = I40E_PF_RESET_WAIT_COUNT_A0;
4269- else
4270- cnt = I40E_PF_RESET_WAIT_COUNT;
4271+ u32 reg2 = 0;
4272+
4273 reg = rd32(hw, I40E_PFGEN_CTRL);
4274 wr32(hw, I40E_PFGEN_CTRL,
4275 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
4276- for (; cnt; cnt--) {
4277+ for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4278 reg = rd32(hw, I40E_PFGEN_CTRL);
4279 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4280 break;
4281+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
4282+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
4283+ break;
4284 usleep_range(1000, 2000);
4285 }
4286- if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4287+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4288+ if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS)
4289+ return I40E_ERR_RESET_FAILED;
4290+ } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4291 hw_dbg(hw, "PF reset polling failed to complete.\n");
4292 return I40E_ERR_RESET_FAILED;
4293 }
4294@@ -1287,7 +1307,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
4295
4296 i40e_clear_pxe_mode(hw);
4297
4298- return 0;
4299+ return I40E_SUCCESS;
4300 }
4301
4302 /**
4303@@ -1308,18 +1328,18 @@ void i40e_clear_hw(struct i40e_hw *hw)
4304 u32 val;
4305 u32 eol = 0x7ff;
4306
4307- /* get number of interrupts, queues, and VFs */
4308+ /* get number of interrupts, queues, and vfs */
4309 val = rd32(hw, I40E_GLPCI_CNF2);
4310 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4311- I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4312+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4313 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4314- I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4315+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4316
4317 val = rd32(hw, I40E_PFLAN_QALLOC);
4318 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4319- I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4320+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4321 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4322- I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4323+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4324 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4325 num_queues = (j - base_queue) + 1;
4326 else
4327@@ -1327,9 +1347,9 @@ void i40e_clear_hw(struct i40e_hw *hw)
4328
4329 val = rd32(hw, I40E_PF_VT_PFALLOC);
4330 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4331- I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4332+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4333 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4334- I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4335+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4336 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4337 num_vfs = (j - i) + 1;
4338 else
4339@@ -1392,20 +1412,8 @@ void i40e_clear_hw(struct i40e_hw *hw)
4340 **/
4341 void i40e_clear_pxe_mode(struct i40e_hw *hw)
4342 {
4343- u32 reg;
4344-
4345 if (i40e_check_asq_alive(hw))
4346 i40e_aq_clear_pxe_mode(hw, NULL);
4347-
4348- /* Clear single descriptor fetch/write-back mode */
4349- reg = rd32(hw, I40E_GLLAN_RCTL_0);
4350-
4351- if (hw->revision_id == 0) {
4352- /* As a work around clear PXE_MODE instead of setting it */
4353- wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
4354- } else {
4355- wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
4356- }
4357 }
4358
4359 /**
4360@@ -1420,9 +1428,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
4361 u32 gpio_val = 0;
4362 u32 port;
4363
4364- if (!hw->func_caps.led[idx])
4365+ if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
4366+ !hw->func_caps.led[idx])
4367 return 0;
4368-
4369 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
4370 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
4371 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
4372@@ -1441,8 +1449,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
4373 #define I40E_FILTER_ACTIVITY 0xE
4374 #define I40E_LINK_ACTIVITY 0xC
4375 #define I40E_MAC_ACTIVITY 0xD
4376+#define I40E_FW_LED BIT(4)
4377+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
4378+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
4379+
4380 #define I40E_LED0 22
4381
4382+#define I40E_PIN_FUNC_SDP 0x0
4383+#define I40E_PIN_FUNC_LED 0x1
4384+
4385 /**
4386 * i40e_led_get - return current on/off mode
4387 * @hw: pointer to the hw struct
4388@@ -1468,7 +1483,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
4389 continue;
4390
4391 /* ignore gpio LED src mode entries related to the activity
4392- * LEDs
4393+ * LEDs
4394 */
4395 current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
4396 >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
4397@@ -1476,6 +1491,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
4398 case I40E_COMBINED_ACTIVITY:
4399 case I40E_FILTER_ACTIVITY:
4400 case I40E_MAC_ACTIVITY:
4401+ case I40E_LINK_ACTIVITY:
4402 continue;
4403 default:
4404 break;
4405@@ -1503,8 +1519,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
4406 u32 current_mode = 0;
4407 int i;
4408
4409- if (mode & 0xfffffff0)
4410+ if (mode & ~I40E_LED_MODE_VALID) {
4411 hw_dbg(hw, "invalid mode passed in %X\n", mode);
4412+ return;
4413+ }
4414
4415 /* as per the documentation GPIO 22-29 are the LED
4416 * GPIO pins named LED0..LED7
4417@@ -1524,19 +1542,30 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
4418 case I40E_COMBINED_ACTIVITY:
4419 case I40E_FILTER_ACTIVITY:
4420 case I40E_MAC_ACTIVITY:
4421+ case I40E_LINK_ACTIVITY:
4422 continue;
4423 default:
4424 break;
4425 }
4426
4427+ if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
4428+ u32 pin_func = 0;
4429+
4430+ if (mode & I40E_FW_LED)
4431+ pin_func = I40E_PIN_FUNC_SDP;
4432+ else
4433+ pin_func = I40E_PIN_FUNC_LED;
4434+
4435+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
4436+ gpio_val |= ((pin_func <<
4437+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
4438+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
4439+ }
4440 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
4441 /* this & is a bit of paranoia, but serves as a range check */
4442 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
4443 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
4444
4445- if (mode == I40E_LINK_ACTIVITY)
4446- blink = false;
4447-
4448 if (blink)
4449 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
4450 else
4451@@ -1566,35 +1595,61 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
4452 {
4453 struct i40e_aq_desc desc;
4454 i40e_status status;
4455+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
4456 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
4457
4458 if (!abilities)
4459 return I40E_ERR_PARAM;
4460
4461- i40e_fill_default_direct_cmd_desc(&desc,
4462- i40e_aqc_opc_get_phy_abilities);
4463+ do {
4464+ i40e_fill_default_direct_cmd_desc(&desc,
4465+ i40e_aqc_opc_get_phy_abilities);
4466
4467- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4468- if (abilities_size > I40E_AQ_LARGE_BUF)
4469- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4470+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
4471+ if (abilities_size > I40E_AQ_LARGE_BUF)
4472+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
4473
4474- if (qualified_modules)
4475- desc.params.external.param0 |=
4476- cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
4477+ if (qualified_modules)
4478+ desc.params.external.param0 |=
4479+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
4480
4481- if (report_init)
4482- desc.params.external.param0 |=
4483- cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
4484+ if (report_init)
4485+ desc.params.external.param0 |=
4486+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
4487
4488- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
4489- cmd_details);
4490+ status = i40e_asq_send_command(hw, &desc, abilities,
4491+ abilities_size, cmd_details);
4492
4493- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
4494- status = I40E_ERR_UNKNOWN_PHY;
4495+ switch (hw->aq.asq_last_status) {
4496+ case I40E_AQ_RC_EIO:
4497+ status = I40E_ERR_UNKNOWN_PHY;
4498+ break;
4499+ case I40E_AQ_RC_EAGAIN:
4500+ usleep_range(1000, 2000);
4501+ total_delay++;
4502+ status = I40E_ERR_TIMEOUT;
4503+ break;
4504+ /* also covers I40E_AQ_RC_OK */
4505+ default:
4506+ break;
4507+ }
4508+
4509+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
4510+ (total_delay < max_delay));
4511+
4512+ if (status != I40E_SUCCESS)
4513+ return status;
4514
4515 if (report_init) {
4516- hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
4517- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
4518+ if (hw->mac.type == I40E_MAC_XL710 &&
4519+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
4520+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
4521+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
4522+ } else {
4523+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
4524+ hw->phy.phy_types |=
4525+ ((u64)abilities->phy_type_ext << 32);
4526+ }
4527 }
4528
4529 return status;
4530@@ -1612,14 +1667,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
4531 * of the PHY Config parameters. This status will be indicated by the
4532 * command response.
4533 **/
4534-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
4535+i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw,
4536 struct i40e_aq_set_phy_config *config,
4537 struct i40e_asq_cmd_details *cmd_details)
4538 {
4539 struct i40e_aq_desc desc;
4540 struct i40e_aq_set_phy_config *cmd =
4541- (struct i40e_aq_set_phy_config *)&desc.params.raw;
4542- enum i40e_status_code status;
4543+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
4544+ i40e_status status;
4545
4546 if (!config)
4547 return I40E_ERR_PARAM;
4548@@ -1637,16 +1692,18 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
4549 /**
4550 * i40e_set_fc
4551 * @hw: pointer to the hw struct
4552+ * @aq_failures: buffer to return AdminQ failure information
4553+ * @atomic_restart: whether to enable atomic link restart
4554 *
4555 * Set the requested flow control mode using set_phy_config.
4556 **/
4557-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
4558+i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
4559 bool atomic_restart)
4560 {
4561 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
4562 struct i40e_aq_get_phy_abilities_resp abilities;
4563 struct i40e_aq_set_phy_config config;
4564- enum i40e_status_code status;
4565+ i40e_status status;
4566 u8 pause_mask = 0x0;
4567
4568 *aq_failures = 0x0;
4569@@ -1674,7 +1731,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
4570 return status;
4571 }
4572
4573- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
4574+ memset(&config, 0, sizeof(config));
4575 /* clear the old pause settings */
4576 config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
4577 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
4578@@ -1723,7 +1780,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
4579 * Tell the firmware that the driver is taking over from PXE
4580 **/
4581 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
4582- struct i40e_asq_cmd_details *cmd_details)
4583+ struct i40e_asq_cmd_details *cmd_details)
4584 {
4585 i40e_status status;
4586 struct i40e_aq_desc desc;
4587@@ -1751,8 +1808,7 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
4588 * Sets up the link and restarts the Auto-Negotiation over the link.
4589 **/
4590 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
4591- bool enable_link,
4592- struct i40e_asq_cmd_details *cmd_details)
4593+ bool enable_link, struct i40e_asq_cmd_details *cmd_details)
4594 {
4595 struct i40e_aq_desc desc;
4596 struct i40e_aqc_set_link_restart_an *cmd =
4597@@ -1800,15 +1856,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
4598 command_flags = I40E_AQ_LSE_ENABLE;
4599 else
4600 command_flags = I40E_AQ_LSE_DISABLE;
4601- resp->command_flags = cpu_to_le16(command_flags);
4602+ resp->command_flags = CPU_TO_LE16(command_flags);
4603
4604 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4605
4606- if (status)
4607+ if (status != I40E_SUCCESS)
4608 goto aq_get_link_info_exit;
4609
4610 /* save off old link status information */
4611- hw->phy.link_info_old = *hw_link_info;
4612+ i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
4613+ sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
4614
4615 /* update link status */
4616 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
4617@@ -1819,8 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
4618 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
4619 I40E_AQ_CONFIG_FEC_RS_ENA);
4620 hw_link_info->ext_info = resp->ext_info;
4621- hw_link_info->loopback = resp->loopback;
4622- hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
4623+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
4624+ hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
4625 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
4626
4627 /* update fc info */
4628@@ -1840,7 +1897,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
4629 else
4630 hw_link_info->crc_enable = false;
4631
4632- if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
4633+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
4634 hw_link_info->lse_enable = true;
4635 else
4636 hw_link_info->lse_enable = false;
4637@@ -1850,9 +1907,20 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
4638 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
4639 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
4640
4641+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
4642+ hw->mac.type != I40E_MAC_X722) {
4643+ __le32 tmp;
4644+
4645+ i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
4646+ I40E_NONDMA_TO_NONDMA);
4647+ hw->phy.phy_types = LE32_TO_CPU(tmp);
4648+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
4649+ }
4650+
4651 /* save link status information */
4652 if (link)
4653- *link = *hw_link_info;
4654+ i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
4655+ I40E_NONDMA_TO_NONDMA);
4656
4657 /* flag cleared so helper functions don't call AQ again */
4658 hw->phy.get_link_info = false;
4659@@ -1870,8 +1938,8 @@ aq_get_link_info_exit:
4660 * Set link interrupt mask.
4661 **/
4662 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
4663- u16 mask,
4664- struct i40e_asq_cmd_details *cmd_details)
4665+ u16 mask,
4666+ struct i40e_asq_cmd_details *cmd_details)
4667 {
4668 struct i40e_aq_desc desc;
4669 struct i40e_aqc_set_phy_int_mask *cmd =
4670@@ -1881,7 +1949,7 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
4671 i40e_fill_default_direct_cmd_desc(&desc,
4672 i40e_aqc_opc_set_phy_int_mask);
4673
4674- cmd->event_mask = cpu_to_le16(mask);
4675+ cmd->event_mask = CPU_TO_LE16(mask);
4676
4677 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4678
4679@@ -1897,7 +1965,7 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
4680 * Reset the external PHY.
4681 **/
4682 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
4683- struct i40e_asq_cmd_details *cmd_details)
4684+ struct i40e_asq_cmd_details *cmd_details)
4685 {
4686 struct i40e_aq_desc desc;
4687 struct i40e_aqc_set_phy_debug *cmd =
4688@@ -1937,23 +2005,24 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
4689 i40e_fill_default_direct_cmd_desc(&desc,
4690 i40e_aqc_opc_add_vsi);
4691
4692- cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
4693+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
4694 cmd->connection_type = vsi_ctx->connection_type;
4695 cmd->vf_id = vsi_ctx->vf_num;
4696- cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
4697+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
4698
4699- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
4700+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
4701
4702- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
4703- sizeof(vsi_ctx->info), cmd_details);
4704+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
4705+ sizeof(vsi_ctx->info),
4706+ cmd_details, true);
4707
4708- if (status)
4709+ if (status != I40E_SUCCESS)
4710 goto aq_add_vsi_exit;
4711
4712- vsi_ctx->seid = le16_to_cpu(resp->seid);
4713- vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
4714- vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
4715- vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
4716+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
4717+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
4718+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
4719+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
4720
4721 aq_add_vsi_exit:
4722 return status;
4723@@ -1966,8 +2035,8 @@ aq_add_vsi_exit:
4724 * @cmd_details: pointer to command details structure or NULL
4725 **/
4726 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
4727- u16 seid,
4728- struct i40e_asq_cmd_details *cmd_details)
4729+ u16 seid,
4730+ struct i40e_asq_cmd_details *cmd_details)
4731 {
4732 struct i40e_aq_desc desc;
4733 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
4734@@ -1976,11 +2045,11 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
4735 i40e_status status;
4736
4737 i40e_fill_default_direct_cmd_desc(&desc,
4738- i40e_aqc_opc_set_vsi_promiscuous_modes);
4739+ i40e_aqc_opc_set_vsi_promiscuous_modes);
4740
4741- cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
4742- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
4743- cmd->seid = cpu_to_le16(seid);
4744+ cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
4745+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
4746+ cmd->seid = CPU_TO_LE16(seid);
4747
4748 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4749
4750@@ -1994,8 +2063,8 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
4751 * @cmd_details: pointer to command details structure or NULL
4752 **/
4753 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
4754- u16 seid,
4755- struct i40e_asq_cmd_details *cmd_details)
4756+ u16 seid,
4757+ struct i40e_asq_cmd_details *cmd_details)
4758 {
4759 struct i40e_aq_desc desc;
4760 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
4761@@ -2004,11 +2073,11 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
4762 i40e_status status;
4763
4764 i40e_fill_default_direct_cmd_desc(&desc,
4765- i40e_aqc_opc_set_vsi_promiscuous_modes);
4766+ i40e_aqc_opc_set_vsi_promiscuous_modes);
4767
4768- cmd->promiscuous_flags = cpu_to_le16(0);
4769- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
4770- cmd->seid = cpu_to_le16(seid);
4771+ cmd->promiscuous_flags = CPU_TO_LE16(0);
4772+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
4773+ cmd->seid = CPU_TO_LE16(seid);
4774
4775 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4776
4777@@ -2045,15 +2114,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
4778 flags |= I40E_AQC_SET_VSI_PROMISC_TX;
4779 }
4780
4781- cmd->promiscuous_flags = cpu_to_le16(flags);
4782+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4783
4784- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
4785+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
4786 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
4787- (hw->aq.api_maj_ver > 1))
4788- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
4789+ (hw->aq.api_maj_ver > 1))
4790+ cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
4791
4792- cmd->seid = cpu_to_le16(seid);
4793- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4794+ cmd->seid = CPU_TO_LE16(seid);
4795+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
4796+ cmd_details, true);
4797
4798 return status;
4799 }
4800@@ -2080,11 +2150,49 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
4801 if (set)
4802 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
4803
4804- cmd->promiscuous_flags = cpu_to_le16(flags);
4805+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4806+
4807+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
4808+
4809+ cmd->seid = CPU_TO_LE16(seid);
4810+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
4811+ cmd_details, true);
4812+
4813+ return status;
4814+}
4815+
4816+/**
4817+* i40e_aq_set_vsi_full_promiscuous
4818+* @hw: pointer to the hw struct
4819+* @seid: VSI number
4820+* @set: set promiscuous enable/disable
4821+* @cmd_details: pointer to command details structure or NULL
4822+**/
4823+i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
4824+ u16 seid, bool set,
4825+ struct i40e_asq_cmd_details *cmd_details)
4826+{
4827+ struct i40e_aq_desc desc;
4828+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
4829+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
4830+ i40e_status status;
4831+ u16 flags = 0;
4832+
4833+ i40e_fill_default_direct_cmd_desc(&desc,
4834+ i40e_aqc_opc_set_vsi_promiscuous_modes);
4835+
4836+ if (set)
4837+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
4838+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
4839+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
4840+
4841+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4842
4843- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
4844+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
4845+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
4846+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4847
4848- cmd->seid = cpu_to_le16(seid);
4849+ cmd->seid = CPU_TO_LE16(seid);
4850 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4851
4852 return status;
4853@@ -2098,29 +2206,29 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
4854 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
4855 * @cmd_details: pointer to command details structure or NULL
4856 **/
4857-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
4858- u16 seid, bool enable,
4859- u16 vid,
4860+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
4861+ u16 seid, bool enable, u16 vid,
4862 struct i40e_asq_cmd_details *cmd_details)
4863 {
4864 struct i40e_aq_desc desc;
4865 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
4866 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
4867- enum i40e_status_code status;
4868+ i40e_status status;
4869 u16 flags = 0;
4870
4871 i40e_fill_default_direct_cmd_desc(&desc,
4872- i40e_aqc_opc_set_vsi_promiscuous_modes);
4873+ i40e_aqc_opc_set_vsi_promiscuous_modes);
4874
4875 if (enable)
4876 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
4877
4878- cmd->promiscuous_flags = cpu_to_le16(flags);
4879- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
4880- cmd->seid = cpu_to_le16(seid);
4881- cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
4882+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4883+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
4884+ cmd->seid = CPU_TO_LE16(seid);
4885+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
4886
4887- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4888+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
4889+ cmd_details, true);
4890
4891 return status;
4892 }
4893@@ -2133,29 +2241,29 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
4894 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
4895 * @cmd_details: pointer to command details structure or NULL
4896 **/
4897-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
4898- u16 seid, bool enable,
4899- u16 vid,
4900+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
4901+ u16 seid, bool enable, u16 vid,
4902 struct i40e_asq_cmd_details *cmd_details)
4903 {
4904 struct i40e_aq_desc desc;
4905 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
4906 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
4907- enum i40e_status_code status;
4908+ i40e_status status;
4909 u16 flags = 0;
4910
4911 i40e_fill_default_direct_cmd_desc(&desc,
4912- i40e_aqc_opc_set_vsi_promiscuous_modes);
4913+ i40e_aqc_opc_set_vsi_promiscuous_modes);
4914
4915 if (enable)
4916 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
4917
4918- cmd->promiscuous_flags = cpu_to_le16(flags);
4919- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
4920- cmd->seid = cpu_to_le16(seid);
4921- cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
4922+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4923+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
4924+ cmd->seid = CPU_TO_LE16(seid);
4925+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
4926
4927- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4928+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
4929+ cmd_details, true);
4930
4931 return status;
4932 }
4933@@ -2184,10 +2292,10 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
4934 if (enable)
4935 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
4936
4937- cmd->promiscuous_flags = cpu_to_le16(flags);
4938- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4939- cmd->seid = cpu_to_le16(seid);
4940- cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
4941+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4942+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4943+ cmd->seid = CPU_TO_LE16(seid);
4944+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
4945
4946 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4947
4948@@ -2217,13 +2325,13 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
4949
4950 if (set_filter)
4951 cmd->promiscuous_flags
4952- |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4953+ |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4954 else
4955 cmd->promiscuous_flags
4956- &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4957+ &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4958
4959- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4960- cmd->seid = cpu_to_le16(seid);
4961+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
4962+ cmd->seid = CPU_TO_LE16(seid);
4963 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4964
4965 return status;
4966@@ -2237,8 +2345,8 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
4967 * @cmd_details: pointer to command details structure or NULL
4968 **/
4969 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
4970- u16 seid, bool enable,
4971- struct i40e_asq_cmd_details *cmd_details)
4972+ u16 seid, bool enable,
4973+ struct i40e_asq_cmd_details *cmd_details)
4974 {
4975 struct i40e_aq_desc desc;
4976 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
4977@@ -2251,9 +2359,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
4978 if (enable)
4979 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
4980
4981- cmd->promiscuous_flags = cpu_to_le16(flags);
4982- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
4983- cmd->seid = cpu_to_le16(seid);
4984+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
4985+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
4986+ cmd->seid = CPU_TO_LE16(seid);
4987
4988 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4989
4990@@ -2281,20 +2389,20 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
4991 i40e_fill_default_direct_cmd_desc(&desc,
4992 i40e_aqc_opc_get_vsi_parameters);
4993
4994- cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
4995+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
4996
4997- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4998+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
4999
5000 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
5001 sizeof(vsi_ctx->info), NULL);
5002
5003- if (status)
5004+ if (status != I40E_SUCCESS)
5005 goto aq_get_vsi_params_exit;
5006
5007- vsi_ctx->seid = le16_to_cpu(resp->seid);
5008- vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
5009- vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
5010- vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
5011+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
5012+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
5013+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
5014+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
5015
5016 aq_get_vsi_params_exit:
5017 return status;
5018@@ -2322,15 +2430,16 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
5019
5020 i40e_fill_default_direct_cmd_desc(&desc,
5021 i40e_aqc_opc_update_vsi_parameters);
5022- cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
5023+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
5024
5025- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5026+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5027
5028- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
5029- sizeof(vsi_ctx->info), cmd_details);
5030+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
5031+ sizeof(vsi_ctx->info),
5032+ cmd_details, true);
5033
5034- vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
5035- vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
5036+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
5037+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
5038
5039 return status;
5040 }
5041@@ -2357,13 +2466,13 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
5042
5043 i40e_fill_default_direct_cmd_desc(&desc,
5044 i40e_aqc_opc_get_switch_config);
5045- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5046+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
5047 if (buf_size > I40E_AQ_LARGE_BUF)
5048- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5049- scfg->seid = cpu_to_le16(*start_seid);
5050+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5051+ scfg->seid = CPU_TO_LE16(*start_seid);
5052
5053 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
5054- *start_seid = le16_to_cpu(scfg->seid);
5055+ *start_seid = LE16_TO_CPU(scfg->seid);
5056
5057 return status;
5058 }
5059@@ -2372,26 +2481,31 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
5060 * i40e_aq_set_switch_config
5061 * @hw: pointer to the hardware structure
5062 * @flags: bit flag values to set
5063+ * @mode: cloud filter mode
5064 * @valid_flags: which bit flags to set
5065 * @cmd_details: pointer to command details structure or NULL
5066 *
5067 * Set switch configuration bits
5068 **/
5069-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
5070- u16 flags,
5071- u16 valid_flags,
5072+i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw,
5073+ u16 flags, u16 valid_flags, u8 mode,
5074 struct i40e_asq_cmd_details *cmd_details)
5075 {
5076 struct i40e_aq_desc desc;
5077 struct i40e_aqc_set_switch_config *scfg =
5078 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
5079- enum i40e_status_code status;
5080+ i40e_status status;
5081
5082 i40e_fill_default_direct_cmd_desc(&desc,
5083 i40e_aqc_opc_set_switch_config);
5084- scfg->flags = cpu_to_le16(flags);
5085- scfg->valid_flags = cpu_to_le16(valid_flags);
5086-
5087+ scfg->flags = CPU_TO_LE16(flags);
5088+ scfg->valid_flags = CPU_TO_LE16(valid_flags);
5089+ scfg->mode = mode;
5090+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
5091+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
5092+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
5093+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
5094+ }
5095 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5096
5097 return status;
5098@@ -2424,17 +2538,17 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
5099
5100 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5101
5102- if (!status) {
5103- if (fw_major_version)
5104- *fw_major_version = le16_to_cpu(resp->fw_major);
5105- if (fw_minor_version)
5106- *fw_minor_version = le16_to_cpu(resp->fw_minor);
5107- if (fw_build)
5108- *fw_build = le32_to_cpu(resp->fw_build);
5109- if (api_major_version)
5110- *api_major_version = le16_to_cpu(resp->api_major);
5111- if (api_minor_version)
5112- *api_minor_version = le16_to_cpu(resp->api_minor);
5113+ if (status == I40E_SUCCESS) {
5114+ if (fw_major_version != NULL)
5115+ *fw_major_version = LE16_TO_CPU(resp->fw_major);
5116+ if (fw_minor_version != NULL)
5117+ *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
5118+ if (fw_build != NULL)
5119+ *fw_build = LE32_TO_CPU(resp->fw_build);
5120+ if (api_major_version != NULL)
5121+ *api_major_version = LE16_TO_CPU(resp->api_major);
5122+ if (api_minor_version != NULL)
5123+ *api_minor_version = LE16_TO_CPU(resp->api_minor);
5124 }
5125
5126 return status;
5127@@ -2463,7 +2577,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
5128
5129 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
5130
5131- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5132+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5133 cmd->driver_major_ver = dv->major_version;
5134 cmd->driver_minor_ver = dv->minor_version;
5135 cmd->driver_build_ver = dv->build_version;
5136@@ -2486,18 +2600,18 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
5137 * @link_up: pointer to bool (true/false = linkup/linkdown)
5138 *
5139 * Variable link_up true if link is up, false if link is down.
5140- * The variable link_up is invalid if returned value of status != 0
5141+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
5142 *
5143 * Side effect: LinkStatusEvent reporting becomes enabled
5144 **/
5145 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
5146 {
5147- i40e_status status = 0;
5148+ i40e_status status = I40E_SUCCESS;
5149
5150 if (hw->phy.get_link_info) {
5151 status = i40e_update_link_info(hw);
5152
5153- if (status)
5154+ if (status != I40E_SUCCESS)
5155 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
5156 status);
5157 }
5158@@ -2514,7 +2628,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
5159 i40e_status i40e_update_link_info(struct i40e_hw *hw)
5160 {
5161 struct i40e_aq_get_phy_abilities_resp abilities;
5162- i40e_status status = 0;
5163+ i40e_status status = I40E_SUCCESS;
5164
5165 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
5166 if (status)
5167@@ -2529,14 +2643,20 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
5168 if (status)
5169 return status;
5170
5171- hw->phy.link_info.req_fec_info =
5172- abilities.fec_cfg_curr_mod_ext_info &
5173- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
5174+ if (abilities.fec_cfg_curr_mod_ext_info &
5175+ I40E_AQ_ENABLE_FEC_AUTO)
5176+ hw->phy.link_info.req_fec_info =
5177+ (I40E_AQ_REQUEST_FEC_KR |
5178+ I40E_AQ_REQUEST_FEC_RS);
5179+ else
5180+ hw->phy.link_info.req_fec_info =
5181+ abilities.fec_cfg_curr_mod_ext_info &
5182+ (I40E_AQ_REQUEST_FEC_KR |
5183+ I40E_AQ_REQUEST_FEC_RS);
5184
5185- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
5186- sizeof(hw->phy.link_info.module_type));
5187+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
5188+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
5189 }
5190-
5191 return status;
5192 }
5193
5194@@ -2574,8 +2694,8 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
5195
5196 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
5197
5198- cmd->uplink_seid = cpu_to_le16(uplink_seid);
5199- cmd->downlink_seid = cpu_to_le16(downlink_seid);
5200+ cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
5201+ cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
5202 cmd->enable_tcs = enabled_tc;
5203 if (!uplink_seid)
5204 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
5205@@ -2588,12 +2708,12 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
5206 if (!enable_stats)
5207 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
5208
5209- cmd->veb_flags = cpu_to_le16(veb_flags);
5210+ cmd->veb_flags = CPU_TO_LE16(veb_flags);
5211
5212 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5213
5214 if (!status && veb_seid)
5215- *veb_seid = le16_to_cpu(resp->veb_seid);
5216+ *veb_seid = LE16_TO_CPU(resp->veb_seid);
5217
5218 return status;
5219 }
5220@@ -2629,22 +2749,22 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
5221
5222 i40e_fill_default_direct_cmd_desc(&desc,
5223 i40e_aqc_opc_get_veb_parameters);
5224- cmd_resp->seid = cpu_to_le16(veb_seid);
5225+ cmd_resp->seid = CPU_TO_LE16(veb_seid);
5226
5227 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5228 if (status)
5229 goto get_veb_exit;
5230
5231 if (switch_id)
5232- *switch_id = le16_to_cpu(cmd_resp->switch_id);
5233+ *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
5234 if (statistic_index)
5235- *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
5236+ *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
5237 if (vebs_used)
5238- *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
5239+ *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
5240 if (vebs_free)
5241- *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
5242+ *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
5243 if (floating) {
5244- u16 flags = le16_to_cpu(cmd_resp->veb_flags);
5245+ u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
5246
5247 if (flags & I40E_AQC_ADD_VEB_FLOATING)
5248 *floating = true;
5249@@ -2684,22 +2804,22 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
5250
5251 /* prep the rest of the request */
5252 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
5253- cmd->num_addresses = cpu_to_le16(count);
5254- cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
5255+ cmd->num_addresses = CPU_TO_LE16(count);
5256+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
5257 cmd->seid[1] = 0;
5258 cmd->seid[2] = 0;
5259
5260 for (i = 0; i < count; i++)
5261 if (is_multicast_ether_addr(mv_list[i].mac_addr))
5262 mv_list[i].flags |=
5263- cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
5264+ CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
5265
5266- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5267+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5268 if (buf_size > I40E_AQ_LARGE_BUF)
5269- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5270+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5271
5272- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
5273- cmd_details);
5274+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
5275+ cmd_details, true);
5276
5277 return status;
5278 }
5279@@ -2731,17 +2851,17 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
5280
5281 /* prep the rest of the request */
5282 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
5283- cmd->num_addresses = cpu_to_le16(count);
5284- cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
5285+ cmd->num_addresses = CPU_TO_LE16(count);
5286+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
5287 cmd->seid[1] = 0;
5288 cmd->seid[2] = 0;
5289
5290- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5291+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5292 if (buf_size > I40E_AQ_LARGE_BUF)
5293- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5294+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5295
5296- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
5297- cmd_details);
5298+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
5299+ cmd_details, true);
5300
5301 return status;
5302 }
5303@@ -2757,17 +2877,17 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
5304 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
5305 * @cmd_details: pointer to command details structure or NULL
5306 * @rule_id: Rule ID returned from FW
5307- * @rule_used: Number of rules used in internal switch
5308- * @rule_free: Number of rules free in internal switch
5309+ * @rules_used: Number of rules used in internal switch
5310+ * @rules_free: Number of rules free in internal switch
5311 *
5312 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
5313 * VEBs/VEPA elements only
5314 **/
5315 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
5316- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
5317- u16 count, __le16 *mr_list,
5318- struct i40e_asq_cmd_details *cmd_details,
5319- u16 *rule_id, u16 *rules_used, u16 *rules_free)
5320+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
5321+ u16 count, __le16 *mr_list,
5322+ struct i40e_asq_cmd_details *cmd_details,
5323+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
5324 {
5325 struct i40e_aq_desc desc;
5326 struct i40e_aqc_add_delete_mirror_rule *cmd =
5327@@ -2781,29 +2901,29 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
5328
5329 /* prep the rest of the request */
5330 i40e_fill_default_direct_cmd_desc(&desc, opcode);
5331- cmd->seid = cpu_to_le16(sw_seid);
5332- cmd->rule_type = cpu_to_le16(rule_type &
5333+ cmd->seid = CPU_TO_LE16(sw_seid);
5334+ cmd->rule_type = CPU_TO_LE16(rule_type &
5335 I40E_AQC_MIRROR_RULE_TYPE_MASK);
5336- cmd->num_entries = cpu_to_le16(count);
5337+ cmd->num_entries = CPU_TO_LE16(count);
5338 /* Dest VSI for add, rule_id for delete */
5339- cmd->destination = cpu_to_le16(id);
5340+ cmd->destination = CPU_TO_LE16(id);
5341 if (mr_list) {
5342- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5343+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
5344 I40E_AQ_FLAG_RD));
5345 if (buf_size > I40E_AQ_LARGE_BUF)
5346- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5347+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5348 }
5349
5350 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
5351 cmd_details);
5352- if (!status ||
5353+ if (status == I40E_SUCCESS ||
5354 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
5355 if (rule_id)
5356- *rule_id = le16_to_cpu(resp->rule_id);
5357+ *rule_id = LE16_TO_CPU(resp->rule_id);
5358 if (rules_used)
5359- *rules_used = le16_to_cpu(resp->mirror_rules_used);
5360+ *rules_used = LE16_TO_CPU(resp->mirror_rules_used);
5361 if (rules_free)
5362- *rules_free = le16_to_cpu(resp->mirror_rules_free);
5363+ *rules_free = LE16_TO_CPU(resp->mirror_rules_free);
5364 }
5365 return status;
5366 }
5367@@ -2818,8 +2938,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
5368 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
5369 * @cmd_details: pointer to command details structure or NULL
5370 * @rule_id: Rule ID returned from FW
5371- * @rule_used: Number of rules used in internal switch
5372- * @rule_free: Number of rules free in internal switch
5373+ * @rules_used: Number of rules used in internal switch
5374+ * @rules_free: Number of rules free in internal switch
5375 *
5376 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
5377 **/
5378@@ -2849,8 +2969,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
5379 * add_mirrorrule.
5380 * @mr_list: list of mirrored VLAN IDs to be removed
5381 * @cmd_details: pointer to command details structure or NULL
5382- * @rule_used: Number of rules used in internal switch
5383- * @rule_free: Number of rules free in internal switch
5384+ * @rules_used: Number of rules used in internal switch
5385+ * @rules_free: Number of rules free in internal switch
5386 *
5387 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
5388 **/
5389@@ -2877,7 +2997,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
5390 /**
5391 * i40e_aq_send_msg_to_vf
5392 * @hw: pointer to the hardware structure
5393- * @vfid: VF id to send msg
5394+ * @vfid: vf id to send msg
5395 * @v_opcode: opcodes for VF-PF communication
5396 * @v_retval: return error code
5397 * @msg: pointer to the msg buffer
5398@@ -2896,16 +3016,16 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
5399 i40e_status status;
5400
5401 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
5402- cmd->id = cpu_to_le32(vfid);
5403- desc.cookie_high = cpu_to_le32(v_opcode);
5404- desc.cookie_low = cpu_to_le32(v_retval);
5405- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
5406+ cmd->id = CPU_TO_LE32(vfid);
5407+ desc.cookie_high = CPU_TO_LE32(v_opcode);
5408+ desc.cookie_low = CPU_TO_LE32(v_retval);
5409+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
5410 if (msglen) {
5411- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5412+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
5413 I40E_AQ_FLAG_RD));
5414 if (msglen > I40E_AQ_LARGE_BUF)
5415- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5416- desc.datalen = cpu_to_le16(msglen);
5417+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5418+ desc.datalen = CPU_TO_LE16(msglen);
5419 }
5420 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
5421
5422@@ -2935,13 +3055,13 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
5423
5424 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
5425
5426- cmd_resp->address = cpu_to_le32(reg_addr);
5427+ cmd_resp->address = CPU_TO_LE32(reg_addr);
5428
5429 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5430
5431- if (!status) {
5432- *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
5433- (u64)le32_to_cpu(cmd_resp->value_low);
5434+ if (status == I40E_SUCCESS) {
5435+ *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
5436+ (u64)LE32_TO_CPU(cmd_resp->value_low);
5437 }
5438
5439 return status;
5440@@ -2957,8 +3077,8 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
5441 * Write to a register using the admin queue commands
5442 **/
5443 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
5444- u32 reg_addr, u64 reg_val,
5445- struct i40e_asq_cmd_details *cmd_details)
5446+ u32 reg_addr, u64 reg_val,
5447+ struct i40e_asq_cmd_details *cmd_details)
5448 {
5449 struct i40e_aq_desc desc;
5450 struct i40e_aqc_debug_reg_read_write *cmd =
5451@@ -2967,9 +3087,9 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
5452
5453 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
5454
5455- cmd->address = cpu_to_le32(reg_addr);
5456- cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
5457- cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
5458+ cmd->address = CPU_TO_LE32(reg_addr);
5459+ cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
5460+ cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
5461
5462 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5463
5464@@ -3000,9 +3120,9 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
5465
5466 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
5467
5468- cmd_resp->resource_id = cpu_to_le16(resource);
5469- cmd_resp->access_type = cpu_to_le16(access);
5470- cmd_resp->resource_number = cpu_to_le32(sdp_number);
5471+ cmd_resp->resource_id = CPU_TO_LE16(resource);
5472+ cmd_resp->access_type = CPU_TO_LE16(access);
5473+ cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
5474
5475 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5476 /* The completion specifies the maximum time in ms that the driver
5477@@ -3011,8 +3131,8 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
5478 * busy return value and the timeout field indicates the maximum time
5479 * the current owner of the resource has to free it.
5480 */
5481- if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
5482- *timeout = le32_to_cpu(cmd_resp->timeout);
5483+ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
5484+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
5485
5486 return status;
5487 }
5488@@ -3038,8 +3158,8 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
5489
5490 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
5491
5492- cmd->resource_id = cpu_to_le16(resource);
5493- cmd->resource_number = cpu_to_le32(sdp_number);
5494+ cmd->resource_id = CPU_TO_LE16(resource);
5495+ cmd->resource_number = CPU_TO_LE32(sdp_number);
5496
5497 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5498
5499@@ -3080,12 +3200,12 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
5500 if (last_command)
5501 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
5502 cmd->module_pointer = module_pointer;
5503- cmd->offset = cpu_to_le32(offset);
5504- cmd->length = cpu_to_le16(length);
5505+ cmd->offset = CPU_TO_LE32(offset);
5506+ cmd->length = CPU_TO_LE16(length);
5507
5508- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5509+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
5510 if (length > I40E_AQ_LARGE_BUF)
5511- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5512+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5513
5514 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
5515
5516@@ -3093,6 +3213,100 @@ i40e_aq_read_nvm_exit:
5517 return status;
5518 }
5519
5520+/**
5521+ * i40e_aq_read_nvm_config - read an nvm config block
5522+ * @hw: pointer to the hw struct
5523+ * @cmd_flags: NVM access admin command bits
5524+ * @field_id: field or feature id
5525+ * @data: buffer for result
5526+ * @buf_size: buffer size
5527+ * @element_count: pointer to count of elements read by FW
5528+ * @cmd_details: pointer to command details structure or NULL
5529+ **/
5530+i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw,
5531+ u8 cmd_flags, u32 field_id, void *data,
5532+ u16 buf_size, u16 *element_count,
5533+ struct i40e_asq_cmd_details *cmd_details)
5534+{
5535+ struct i40e_aq_desc desc;
5536+ struct i40e_aqc_nvm_config_read *cmd =
5537+ (struct i40e_aqc_nvm_config_read *)&desc.params.raw;
5538+ i40e_status status;
5539+
5540+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
5541+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
5542+ if (buf_size > I40E_AQ_LARGE_BUF)
5543+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5544+
5545+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
5546+ cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
5547+ if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
5548+ cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
5549+ else
5550+ cmd->element_id_msw = 0;
5551+
5552+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
5553+
5554+ if (!status && element_count)
5555+ *element_count = LE16_TO_CPU(cmd->element_count);
5556+
5557+ return status;
5558+}
5559+
5560+/**
5561+ * i40e_aq_write_nvm_config - write an nvm config block
5562+ * @hw: pointer to the hw struct
5563+ * @cmd_flags: NVM access admin command bits
5564+ * @data: buffer for result
5565+ * @buf_size: buffer size
5566+ * @element_count: count of elements to be written
5567+ * @cmd_details: pointer to command details structure or NULL
5568+ **/
5569+i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
5570+ u8 cmd_flags, void *data, u16 buf_size,
5571+ u16 element_count,
5572+ struct i40e_asq_cmd_details *cmd_details)
5573+{
5574+ struct i40e_aq_desc desc;
5575+ struct i40e_aqc_nvm_config_write *cmd =
5576+ (struct i40e_aqc_nvm_config_write *)&desc.params.raw;
5577+ i40e_status status;
5578+
5579+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
5580+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5581+ if (buf_size > I40E_AQ_LARGE_BUF)
5582+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5583+
5584+ cmd->element_count = CPU_TO_LE16(element_count);
5585+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
5586+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
5587+
5588+ return status;
5589+}
5590+
5591+/**
5592+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
5593+ * @hw: pointer to the hw struct
5594+ * @buff: buffer for result
5595+ * @buff_size: buffer size
5596+ * @cmd_details: pointer to command details structure or NULL
5597+ **/
5598+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
5599+ void *buff, u16 buff_size,
5600+ struct i40e_asq_cmd_details *cmd_details)
5601+{
5602+ struct i40e_aq_desc desc;
5603+ i40e_status status;
5604+
5605+
5606+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
5607+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5608+ if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
5609+ status = I40E_ERR_NOT_IMPLEMENTED;
5610+
5611+ return status;
5612+}
5613+
5614 /**
5615 * i40e_aq_erase_nvm
5616 * @hw: pointer to the hw struct
5617@@ -3105,8 +3319,8 @@ i40e_aq_read_nvm_exit:
5618 * Erase the NVM sector using the admin queue commands
5619 **/
5620 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
5621- u32 offset, u16 length, bool last_command,
5622- struct i40e_asq_cmd_details *cmd_details)
5623+ u32 offset, u16 length, bool last_command,
5624+ struct i40e_asq_cmd_details *cmd_details)
5625 {
5626 struct i40e_aq_desc desc;
5627 struct i40e_aqc_nvm_update *cmd =
5628@@ -3125,8 +3339,8 @@ i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
5629 if (last_command)
5630 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
5631 cmd->module_pointer = module_pointer;
5632- cmd->offset = cpu_to_le32(offset);
5633- cmd->length = cpu_to_le16(length);
5634+ cmd->offset = CPU_TO_LE32(offset);
5635+ cmd->length = CPU_TO_LE16(length);
5636
5637 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5638
5639@@ -3151,29 +3365,33 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5640 u32 valid_functions, num_functions;
5641 u32 number, logical_id, phys_id;
5642 struct i40e_hw_capabilities *p;
5643+ i40e_status status;
5644+ u16 id, ocp_cfg_word0;
5645 u8 major_rev;
5646 u32 i = 0;
5647- u16 id;
5648
5649 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
5650
5651 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
5652- p = &hw->dev_caps;
5653+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
5654 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
5655- p = &hw->func_caps;
5656+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
5657 else
5658 return;
5659
5660 for (i = 0; i < cap_count; i++, cap++) {
5661- id = le16_to_cpu(cap->id);
5662- number = le32_to_cpu(cap->number);
5663- logical_id = le32_to_cpu(cap->logical_id);
5664- phys_id = le32_to_cpu(cap->phys_id);
5665+ id = LE16_TO_CPU(cap->id);
5666+ number = LE32_TO_CPU(cap->number);
5667+ logical_id = LE32_TO_CPU(cap->logical_id);
5668+ phys_id = LE32_TO_CPU(cap->phys_id);
5669 major_rev = cap->major_rev;
5670
5671 switch (id) {
5672 case I40E_AQ_CAP_ID_SWITCH_MODE:
5673 p->switch_mode = number;
5674+ i40e_debug(hw, I40E_DEBUG_INIT,
5675+ "HW Capability: Switch mode = %d\n",
5676+ p->switch_mode);
5677 break;
5678 case I40E_AQ_CAP_ID_MNG_MODE:
5679 p->management_mode = number;
5680@@ -3185,38 +3403,67 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5681 } else {
5682 p->mng_protocols_over_mctp = 0;
5683 }
5684+ i40e_debug(hw, I40E_DEBUG_INIT,
5685+ "HW Capability: Management Mode = %d\n",
5686+ p->management_mode);
5687 break;
5688 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
5689 p->npar_enable = number;
5690+ i40e_debug(hw, I40E_DEBUG_INIT,
5691+ "HW Capability: NPAR enable = %d\n",
5692+ p->npar_enable);
5693 break;
5694 case I40E_AQ_CAP_ID_OS2BMC_CAP:
5695 p->os2bmc = number;
5696+ i40e_debug(hw, I40E_DEBUG_INIT,
5697+ "HW Capability: OS2BMC = %d\n", p->os2bmc);
5698 break;
5699 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
5700 p->valid_functions = number;
5701+ i40e_debug(hw, I40E_DEBUG_INIT,
5702+ "HW Capability: Valid Functions = %d\n",
5703+ p->valid_functions);
5704 break;
5705 case I40E_AQ_CAP_ID_SRIOV:
5706 if (number == 1)
5707 p->sr_iov_1_1 = true;
5708+ i40e_debug(hw, I40E_DEBUG_INIT,
5709+ "HW Capability: SR-IOV = %d\n",
5710+ p->sr_iov_1_1);
5711 break;
5712 case I40E_AQ_CAP_ID_VF:
5713 p->num_vfs = number;
5714 p->vf_base_id = logical_id;
5715+ i40e_debug(hw, I40E_DEBUG_INIT,
5716+ "HW Capability: VF count = %d\n",
5717+ p->num_vfs);
5718+ i40e_debug(hw, I40E_DEBUG_INIT,
5719+ "HW Capability: VF base_id = %d\n",
5720+ p->vf_base_id);
5721 break;
5722 case I40E_AQ_CAP_ID_VMDQ:
5723 if (number == 1)
5724 p->vmdq = true;
5725+ i40e_debug(hw, I40E_DEBUG_INIT,
5726+ "HW Capability: VMDQ = %d\n", p->vmdq);
5727 break;
5728 case I40E_AQ_CAP_ID_8021QBG:
5729 if (number == 1)
5730 p->evb_802_1_qbg = true;
5731+ i40e_debug(hw, I40E_DEBUG_INIT,
5732+ "HW Capability: 802.1Qbg = %d\n", number);
5733 break;
5734 case I40E_AQ_CAP_ID_8021QBR:
5735 if (number == 1)
5736 p->evb_802_1_qbh = true;
5737+ i40e_debug(hw, I40E_DEBUG_INIT,
5738+ "HW Capability: 802.1Qbh = %d\n", number);
5739 break;
5740 case I40E_AQ_CAP_ID_VSI:
5741 p->num_vsis = number;
5742+ i40e_debug(hw, I40E_DEBUG_INIT,
5743+ "HW Capability: VSI count = %d\n",
5744+ p->num_vsis);
5745 break;
5746 case I40E_AQ_CAP_ID_DCB:
5747 if (number == 1) {
5748@@ -3224,27 +3471,56 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5749 p->enabled_tcmap = logical_id;
5750 p->maxtc = phys_id;
5751 }
5752+ i40e_debug(hw, I40E_DEBUG_INIT,
5753+ "HW Capability: DCB = %d\n", p->dcb);
5754+ i40e_debug(hw, I40E_DEBUG_INIT,
5755+ "HW Capability: TC Mapping = %d\n",
5756+ logical_id);
5757+ i40e_debug(hw, I40E_DEBUG_INIT,
5758+ "HW Capability: TC Max = %d\n", p->maxtc);
5759 break;
5760 case I40E_AQ_CAP_ID_FCOE:
5761 if (number == 1)
5762 p->fcoe = true;
5763+ i40e_debug(hw, I40E_DEBUG_INIT,
5764+ "HW Capability: FCOE = %d\n", p->fcoe);
5765 break;
5766 case I40E_AQ_CAP_ID_ISCSI:
5767 if (number == 1)
5768 p->iscsi = true;
5769+ i40e_debug(hw, I40E_DEBUG_INIT,
5770+ "HW Capability: iSCSI = %d\n", p->iscsi);
5771 break;
5772 case I40E_AQ_CAP_ID_RSS:
5773 p->rss = true;
5774 p->rss_table_size = number;
5775 p->rss_table_entry_width = logical_id;
5776+ i40e_debug(hw, I40E_DEBUG_INIT,
5777+ "HW Capability: RSS = %d\n", p->rss);
5778+ i40e_debug(hw, I40E_DEBUG_INIT,
5779+ "HW Capability: RSS table size = %d\n",
5780+ p->rss_table_size);
5781+ i40e_debug(hw, I40E_DEBUG_INIT,
5782+ "HW Capability: RSS table width = %d\n",
5783+ p->rss_table_entry_width);
5784 break;
5785 case I40E_AQ_CAP_ID_RXQ:
5786 p->num_rx_qp = number;
5787 p->base_queue = phys_id;
5788+ i40e_debug(hw, I40E_DEBUG_INIT,
5789+ "HW Capability: Rx QP = %d\n", number);
5790+ i40e_debug(hw, I40E_DEBUG_INIT,
5791+ "HW Capability: base_queue = %d\n",
5792+ p->base_queue);
5793 break;
5794 case I40E_AQ_CAP_ID_TXQ:
5795 p->num_tx_qp = number;
5796 p->base_queue = phys_id;
5797+ i40e_debug(hw, I40E_DEBUG_INIT,
5798+ "HW Capability: Tx QP = %d\n", number);
5799+ i40e_debug(hw, I40E_DEBUG_INIT,
5800+ "HW Capability: base_queue = %d\n",
5801+ p->base_queue);
5802 break;
5803 case I40E_AQ_CAP_ID_MSIX:
5804 p->num_msix_vectors = number;
5805@@ -3254,6 +3530,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5806 break;
5807 case I40E_AQ_CAP_ID_VF_MSIX:
5808 p->num_msix_vectors_vf = number;
5809+ i40e_debug(hw, I40E_DEBUG_INIT,
5810+ "HW Capability: MSIX VF vector count = %d\n",
5811+ p->num_msix_vectors_vf);
5812 break;
5813 case I40E_AQ_CAP_ID_FLEX10:
5814 if (major_rev == 1) {
5815@@ -3270,41 +3549,72 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5816 }
5817 p->flex10_mode = logical_id;
5818 p->flex10_status = phys_id;
5819+ i40e_debug(hw, I40E_DEBUG_INIT,
5820+ "HW Capability: Flex10 mode = %d\n",
5821+ p->flex10_mode);
5822+ i40e_debug(hw, I40E_DEBUG_INIT,
5823+ "HW Capability: Flex10 status = %d\n",
5824+ p->flex10_status);
5825 break;
5826 case I40E_AQ_CAP_ID_CEM:
5827 if (number == 1)
5828 p->mgmt_cem = true;
5829+ i40e_debug(hw, I40E_DEBUG_INIT,
5830+ "HW Capability: CEM = %d\n", p->mgmt_cem);
5831 break;
5832 case I40E_AQ_CAP_ID_IWARP:
5833 if (number == 1)
5834 p->iwarp = true;
5835+ i40e_debug(hw, I40E_DEBUG_INIT,
5836+ "HW Capability: iWARP = %d\n", p->iwarp);
5837 break;
5838 case I40E_AQ_CAP_ID_LED:
5839 if (phys_id < I40E_HW_CAP_MAX_GPIO)
5840 p->led[phys_id] = true;
5841+ i40e_debug(hw, I40E_DEBUG_INIT,
5842+ "HW Capability: LED - PIN %d\n", phys_id);
5843 break;
5844 case I40E_AQ_CAP_ID_SDP:
5845 if (phys_id < I40E_HW_CAP_MAX_GPIO)
5846 p->sdp[phys_id] = true;
5847+ i40e_debug(hw, I40E_DEBUG_INIT,
5848+ "HW Capability: SDP - PIN %d\n", phys_id);
5849 break;
5850 case I40E_AQ_CAP_ID_MDIO:
5851 if (number == 1) {
5852 p->mdio_port_num = phys_id;
5853 p->mdio_port_mode = logical_id;
5854 }
5855+ i40e_debug(hw, I40E_DEBUG_INIT,
5856+ "HW Capability: MDIO port number = %d\n",
5857+ p->mdio_port_num);
5858+ i40e_debug(hw, I40E_DEBUG_INIT,
5859+ "HW Capability: MDIO port mode = %d\n",
5860+ p->mdio_port_mode);
5861 break;
5862 case I40E_AQ_CAP_ID_1588:
5863 if (number == 1)
5864 p->ieee_1588 = true;
5865+ i40e_debug(hw, I40E_DEBUG_INIT,
5866+ "HW Capability: IEEE 1588 = %d\n",
5867+ p->ieee_1588);
5868 break;
5869 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
5870 p->fd = true;
5871 p->fd_filters_guaranteed = number;
5872 p->fd_filters_best_effort = logical_id;
5873+ i40e_debug(hw, I40E_DEBUG_INIT,
5874+ "HW Capability: Flow Director = 1\n");
5875+ i40e_debug(hw, I40E_DEBUG_INIT,
5876+ "HW Capability: Guaranteed FD filters = %d\n",
5877+ p->fd_filters_guaranteed);
5878 break;
5879 case I40E_AQ_CAP_ID_WSR_PROT:
5880 p->wr_csr_prot = (u64)number;
5881 p->wr_csr_prot |= (u64)logical_id << 32;
5882+ i40e_debug(hw, I40E_DEBUG_INIT,
5883+ "HW Capability: wr_csr_prot = 0x%llX\n\n",
5884+ (p->wr_csr_prot & 0xffff));
5885 break;
5886 case I40E_AQ_CAP_ID_NVM_MGMT:
5887 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
5888@@ -3312,6 +3622,19 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5889 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
5890 p->update_disabled = true;
5891 break;
5892+ case I40E_AQ_CAP_ID_WOL_AND_PROXY:
5893+ hw->num_wol_proxy_filters = (u16)number;
5894+ hw->wol_proxy_vsi_seid = (u16)logical_id;
5895+ p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
5896+ if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
5897+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
5898+ else
5899+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
5900+ p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
5901+ i40e_debug(hw, I40E_DEBUG_INIT,
5902+ "HW Capability: WOL proxy filters = %d\n",
5903+ hw->num_wol_proxy_filters);
5904+ break;
5905 default:
5906 break;
5907 }
5908@@ -3320,11 +3643,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5909 if (p->fcoe)
5910 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
5911
5912- /* Software override ensuring FCoE is disabled if npar or mfp
5913- * mode because it is not supported in these modes.
5914- */
5915- if (p->npar_enable || p->flex10_enable)
5916- p->fcoe = false;
5917+ /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
5918+ p->fcoe = false;
5919
5920 /* count the enabled ports (aka the "not disabled" ports) */
5921 hw->num_ports = 0;
5922@@ -3340,6 +3660,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
5923 hw->num_ports++;
5924 }
5925
5926+ /* OCP cards case: if a mezz is removed the ethernet port is at
5927+ * disabled state in PRTGEN_CNF register. Additional NVM read is
5928+ * needed in order to check if we are dealing with OCP card.
5929+ * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
5930+ * physical ports results in wrong partition id calculation and thus
5931+ * not supporting WoL.
5932+ */
5933+ if (hw->mac.type == I40E_MAC_X722) {
5934+ if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) {
5935+ status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
5936+ 2 * I40E_SR_OCP_CFG_WORD0,
5937+ sizeof(ocp_cfg_word0),
5938+ &ocp_cfg_word0, true, NULL);
5939+ if (status == I40E_SUCCESS &&
5940+ (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
5941+ hw->num_ports = 4;
5942+ i40e_release_nvm(hw);
5943+ }
5944+ }
5945+
5946 valid_functions = p->valid_functions;
5947 num_functions = 0;
5948 while (valid_functions) {
5949@@ -3380,7 +3720,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
5950 {
5951 struct i40e_aqc_list_capabilites *cmd;
5952 struct i40e_aq_desc desc;
5953- i40e_status status = 0;
5954+ i40e_status status = I40E_SUCCESS;
5955
5956 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
5957
5958@@ -3392,17 +3732,17 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
5959
5960 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
5961
5962- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5963+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
5964 if (buff_size > I40E_AQ_LARGE_BUF)
5965- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5966+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
5967
5968 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5969- *data_size = le16_to_cpu(desc.datalen);
5970+ *data_size = LE16_TO_CPU(desc.datalen);
5971
5972 if (status)
5973 goto exit;
5974
5975- i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
5976+ i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
5977 list_type_opc);
5978
5979 exit:
5980@@ -3417,14 +3757,15 @@ exit:
5981 * @length: length of the section to be written (in bytes from the offset)
5982 * @data: command buffer (size [bytes] = length)
5983 * @last_command: tells if this is the last command in a series
5984+ * @preservation_flags: Preservation mode flags
5985 * @cmd_details: pointer to command details structure or NULL
5986 *
5987 * Update the NVM using the admin queue commands
5988 **/
5989 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
5990- u32 offset, u16 length, void *data,
5991- bool last_command,
5992- struct i40e_asq_cmd_details *cmd_details)
5993+ u32 offset, u16 length, void *data,
5994+ bool last_command, u8 preservation_flags,
5995+ struct i40e_asq_cmd_details *cmd_details)
5996 {
5997 struct i40e_aq_desc desc;
5998 struct i40e_aqc_nvm_update *cmd =
5999@@ -3442,13 +3783,23 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
6000 /* If this is the last command in a series, set the proper flag. */
6001 if (last_command)
6002 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
6003+ if (hw->mac.type == I40E_MAC_X722) {
6004+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
6005+ cmd->command_flags |=
6006+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
6007+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
6008+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
6009+ cmd->command_flags |=
6010+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
6011+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
6012+ }
6013 cmd->module_pointer = module_pointer;
6014- cmd->offset = cpu_to_le32(offset);
6015- cmd->length = cpu_to_le16(length);
6016+ cmd->offset = CPU_TO_LE32(offset);
6017+ cmd->length = CPU_TO_LE16(length);
6018
6019- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6020+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6021 if (length > I40E_AQ_LARGE_BUF)
6022- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
6023+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
6024
6025 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
6026
6027@@ -3457,58 +3808,152 @@ i40e_aq_update_nvm_exit:
6028 }
6029
6030 /**
6031- * i40e_aq_get_lldp_mib
6032+ * i40e_aq_rearrange_nvm
6033 * @hw: pointer to the hw struct
6034- * @bridge_type: type of bridge requested
6035- * @mib_type: Local, Remote or both Local and Remote MIBs
6036- * @buff: pointer to a user supplied buffer to store the MIB block
6037- * @buff_size: size of the buffer (in bytes)
6038- * @local_len : length of the returned Local LLDP MIB
6039- * @remote_len: length of the returned Remote LLDP MIB
6040+ * @rearrange_nvm: defines direction of rearrangement
6041 * @cmd_details: pointer to command details structure or NULL
6042 *
6043- * Requests the complete LLDP MIB (entire packet).
6044+ * Rearrange NVM structure, available only for transition FW
6045 **/
6046-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
6047- u8 mib_type, void *buff, u16 buff_size,
6048- u16 *local_len, u16 *remote_len,
6049+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
6050+ u8 rearrange_nvm,
6051 struct i40e_asq_cmd_details *cmd_details)
6052 {
6053- struct i40e_aq_desc desc;
6054- struct i40e_aqc_lldp_get_mib *cmd =
6055- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
6056- struct i40e_aqc_lldp_get_mib *resp =
6057- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
6058+ struct i40e_aqc_nvm_update *cmd;
6059 i40e_status status;
6060+ struct i40e_aq_desc desc;
6061
6062- if (buff_size == 0 || !buff)
6063- return I40E_ERR_PARAM;
6064-
6065- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
6066- /* Indirect Command */
6067- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
6068+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
6069
6070- cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
6071- cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
6072- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
6073+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
6074
6075- desc.datalen = cpu_to_le16(buff_size);
6076+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
6077+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
6078
6079- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
6080+ if (!rearrange_nvm) {
6081+ status = I40E_ERR_PARAM;
6082+ goto i40e_aq_rearrange_nvm_exit;
6083+ }
6084+
6085+ cmd->command_flags |= rearrange_nvm;
6086+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6087+
6088+i40e_aq_rearrange_nvm_exit:
6089+ return status;
6090+}
6091+
6092+/**
6093+ * i40e_aq_nvm_progress
6094+ * @hw: pointer to the hw struct
6095+ * @progress: pointer to progress returned from AQ
6096+ * @cmd_details: pointer to command details structure or NULL
6097+ *
6098+ * Gets progress of flash rearrangement process
6099+ **/
6100+i40e_status i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
6101+ struct i40e_asq_cmd_details *cmd_details)
6102+{
6103+ i40e_status status;
6104+ struct i40e_aq_desc desc;
6105+
6106+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
6107+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6108+ *progress = desc.params.raw[0];
6109+ return status;
6110+}
6111+
6112+/**
6113+ * i40e_aq_get_lldp_mib
6114+ * @hw: pointer to the hw struct
6115+ * @bridge_type: type of bridge requested
6116+ * @mib_type: Local, Remote or both Local and Remote MIBs
6117+ * @buff: pointer to a user supplied buffer to store the MIB block
6118+ * @buff_size: size of the buffer (in bytes)
6119+ * @local_len : length of the returned Local LLDP MIB
6120+ * @remote_len: length of the returned Remote LLDP MIB
6121+ * @cmd_details: pointer to command details structure or NULL
6122+ *
6123+ * Requests the complete LLDP MIB (entire packet).
6124+ **/
6125+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
6126+ u8 mib_type, void *buff, u16 buff_size,
6127+ u16 *local_len, u16 *remote_len,
6128+ struct i40e_asq_cmd_details *cmd_details)
6129+{
6130+ struct i40e_aq_desc desc;
6131+ struct i40e_aqc_lldp_get_mib *cmd =
6132+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
6133+ struct i40e_aqc_lldp_get_mib *resp =
6134+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
6135+ i40e_status status;
6136+
6137+ if (buff_size == 0 || !buff)
6138+ return I40E_ERR_PARAM;
6139+
6140+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
6141+ /* Indirect Command */
6142+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
6143+
6144+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
6145+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
6146+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
6147+
6148+ desc.datalen = CPU_TO_LE16(buff_size);
6149+
6150+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
6151 if (buff_size > I40E_AQ_LARGE_BUF)
6152- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
6153+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
6154
6155 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
6156 if (!status) {
6157 if (local_len != NULL)
6158- *local_len = le16_to_cpu(resp->local_len);
6159+ *local_len = LE16_TO_CPU(resp->local_len);
6160 if (remote_len != NULL)
6161- *remote_len = le16_to_cpu(resp->remote_len);
6162+ *remote_len = LE16_TO_CPU(resp->remote_len);
6163 }
6164
6165 return status;
6166 }
6167
6168+ /**
6169+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
6170+ * @hw: pointer to the hw struct
6171+ * @mib_type: Local, Remote or both Local and Remote MIBs
6172+ * @buff: pointer to a user supplied buffer to store the MIB block
6173+ * @buff_size: size of the buffer (in bytes)
6174+ * @cmd_details: pointer to command details structure or NULL
6175+ *
6176+ * Set the LLDP MIB.
6177+ **/
6178+i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw,
6179+ u8 mib_type, void *buff, u16 buff_size,
6180+ struct i40e_asq_cmd_details *cmd_details)
6181+{
6182+ struct i40e_aq_desc desc;
6183+ struct i40e_aqc_lldp_set_local_mib *cmd =
6184+ (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
6185+ i40e_status status;
6186+
6187+ if (buff_size == 0 || !buff)
6188+ return I40E_ERR_PARAM;
6189+
6190+ i40e_fill_default_direct_cmd_desc(&desc,
6191+ i40e_aqc_opc_lldp_set_local_mib);
6192+ /* Indirect Command */
6193+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6194+ if (buff_size > I40E_AQ_LARGE_BUF)
6195+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
6196+ desc.datalen = CPU_TO_LE16(buff_size);
6197+
6198+ cmd->type = mib_type;
6199+ cmd->length = CPU_TO_LE16(buff_size);
6200+ cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)buff));
6201+ cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)buff));
6202+
6203+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
6204+ return status;
6205+}
6206+
6207 /**
6208 * i40e_aq_cfg_lldp_mib_change_event
6209 * @hw: pointer to the hw struct
6210@@ -3537,15 +3982,55 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
6211 return status;
6212 }
6213
6214+/**
6215+ * i40e_aq_restore_lldp
6216+ * @hw: pointer to the hw struct
6217+ * @setting: pointer to factory setting variable or NULL
6218+ * @restore: True if factory settings should be restored
6219+ * @cmd_details: pointer to command details structure or NULL
6220+ *
6221+ * Restore LLDP Agent factory settings if @restore set to True. In other case
6222+ * only returns factory setting in AQ response.
6223+ **/
6224+enum i40e_status_code
6225+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
6226+ struct i40e_asq_cmd_details *cmd_details)
6227+{
6228+ struct i40e_aq_desc desc;
6229+ struct i40e_aqc_lldp_restore *cmd =
6230+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
6231+ i40e_status status;
6232+
6233+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
6234+ i40e_debug(hw, I40E_DEBUG_ALL,
6235+ "Restore LLDP not supported by current FW version.\n");
6236+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
6237+ }
6238+
6239+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
6240+
6241+ if (restore)
6242+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
6243+
6244+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6245+
6246+ if (setting)
6247+ *setting = cmd->command & 1;
6248+
6249+ return status;
6250+}
6251+
6252 /**
6253 * i40e_aq_stop_lldp
6254 * @hw: pointer to the hw struct
6255 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
6256+ * @persist: True if stop of LLDP should be persistent across power cycles
6257 * @cmd_details: pointer to command details structure or NULL
6258 *
6259 * Stop or Shutdown the embedded LLDP Agent
6260 **/
6261 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
6262+ bool persist,
6263 struct i40e_asq_cmd_details *cmd_details)
6264 {
6265 struct i40e_aq_desc desc;
6266@@ -3558,6 +4043,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
6267 if (shutdown_agent)
6268 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
6269
6270+ if (persist) {
6271+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
6272+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
6273+ else
6274+ i40e_debug(hw, I40E_DEBUG_ALL,
6275+ "Persistent Stop LLDP not supported by current FW version.\n");
6276+ }
6277+
6278 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6279
6280 return status;
6281@@ -3566,11 +4059,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
6282 /**
6283 * i40e_aq_start_lldp
6284 * @hw: pointer to the hw struct
6285+ * @persist: True if start of LLDP should be persistent across power cycles
6286 * @cmd_details: pointer to command details structure or NULL
6287 *
6288 * Start the embedded LLDP Agent on all ports.
6289 **/
6290 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
6291+ bool persist,
6292 struct i40e_asq_cmd_details *cmd_details)
6293 {
6294 struct i40e_aq_desc desc;
6295@@ -3582,6 +4077,45 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
6296
6297 cmd->command = I40E_AQ_LLDP_AGENT_START;
6298
6299+ if (persist) {
6300+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
6301+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
6302+ else
6303+ i40e_debug(hw, I40E_DEBUG_ALL,
6304+ "Persistent Start LLDP not supported by current FW version.\n");
6305+ }
6306+
6307+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6308+
6309+ return status;
6310+}
6311+
6312+/**
6313+ * i40e_aq_set_dcb_parameters
6314+ * @hw: pointer to the hw struct
6315+ * @cmd_details: pointer to command details structure or NULL
6316+ * @dcb_enable: True if DCB configuration needs to be applied
6317+ *
6318+ **/
6319+enum i40e_status_code
6320+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
6321+ struct i40e_asq_cmd_details *cmd_details)
6322+{
6323+ struct i40e_aq_desc desc;
6324+ struct i40e_aqc_set_dcb_parameters *cmd =
6325+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
6326+ i40e_status status;
6327+
6328+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
6329+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
6330+
6331+ i40e_fill_default_direct_cmd_desc(&desc,
6332+ i40e_aqc_opc_set_dcb_parameters);
6333+
6334+ if (dcb_enable) {
6335+ cmd->valid_flags = I40E_DCB_VALID;
6336+ cmd->command = I40E_AQ_DCB_SET_AGENT;
6337+ }
6338 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6339
6340 return status;
6341@@ -3597,8 +4131,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
6342 * Get CEE DCBX mode operational configuration from firmware
6343 **/
6344 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
6345- void *buff, u16 buff_size,
6346- struct i40e_asq_cmd_details *cmd_details)
6347+ void *buff, u16 buff_size,
6348+ struct i40e_asq_cmd_details *cmd_details)
6349 {
6350 struct i40e_aq_desc desc;
6351 i40e_status status;
6352@@ -3608,24 +4142,53 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
6353
6354 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
6355
6356- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
6357+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
6358 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
6359 cmd_details);
6360
6361 return status;
6362 }
6363
6364+/**
6365+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
6366+ * @hw: pointer to the hw struct
6367+ * @start_agent: True if DCBx Agent needs to be Started
6368+ * False if DCBx Agent needs to be Stopped
6369+ * @cmd_details: pointer to command details structure or NULL
6370+ *
6371+ * Start/Stop the embedded dcbx Agent
6372+ **/
6373+i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
6374+ bool start_agent,
6375+ struct i40e_asq_cmd_details *cmd_details)
6376+{
6377+ struct i40e_aq_desc desc;
6378+ struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
6379+ (struct i40e_aqc_lldp_stop_start_specific_agent *)
6380+ &desc.params.raw;
6381+ i40e_status status;
6382+
6383+ i40e_fill_default_direct_cmd_desc(&desc,
6384+ i40e_aqc_opc_lldp_stop_start_spec_agent);
6385+
6386+ if (start_agent)
6387+ cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
6388+
6389+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6390+
6391+ return status;
6392+}
6393+
6394 /**
6395 * i40e_aq_add_udp_tunnel
6396 * @hw: pointer to the hw struct
6397 * @udp_port: the UDP port to add in Host byte order
6398- * @header_len: length of the tunneling header length in DWords
6399 * @protocol_index: protocol index type
6400 * @filter_index: pointer to filter index
6401 * @cmd_details: pointer to command details structure or NULL
6402 *
6403 * Note: Firmware expects the udp_port value to be in Little Endian format,
6404- * and this function will call cpu_to_le16 to convert from Host byte order to
6405+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
6406 * Little Endian order.
6407 **/
6408 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
6409@@ -3642,7 +4205,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
6410
6411 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
6412
6413- cmd->udp_port = cpu_to_le16(udp_port);
6414+ cmd->udp_port = CPU_TO_LE16(udp_port);
6415 cmd->protocol_type = protocol_index;
6416
6417 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6418@@ -3676,6 +4239,45 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
6419 return status;
6420 }
6421
6422+/**
6423+ * i40e_aq_get_switch_resource_alloc (0x0204)
6424+ * @hw: pointer to the hw struct
6425+ * @num_entries: pointer to u8 to store the number of resource entries returned
6426+ * @buf: pointer to a user supplied buffer. This buffer must be large enough
6427+ * to store the resource information for all resource types. Each
6428+ * resource type is a i40e_aqc_switch_resource_alloc_data structure.
6429+ * @count: size, in bytes, of the buffer provided
6430+ * @cmd_details: pointer to command details structure or NULL
6431+ *
6432+ * Query the resources allocated to a function.
6433+ **/
6434+i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
6435+ u8 *num_entries,
6436+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
6437+ u16 count,
6438+ struct i40e_asq_cmd_details *cmd_details)
6439+{
6440+ struct i40e_aq_desc desc;
6441+ struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
6442+ (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
6443+ i40e_status status;
6444+ u16 length = count * sizeof(*buf);
6445+
6446+ i40e_fill_default_direct_cmd_desc(&desc,
6447+ i40e_aqc_opc_get_switch_resource_alloc);
6448+
6449+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
6450+ if (length > I40E_AQ_LARGE_BUF)
6451+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
6452+
6453+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
6454+
6455+ if (!status && num_entries)
6456+ *num_entries = cmd_resp->num_entries;
6457+
6458+ return status;
6459+}
6460+
6461 /**
6462 * i40e_aq_delete_element - Delete switch element
6463 * @hw: pointer to the hw struct
6464@@ -3697,9 +4299,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
6465
6466 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
6467
6468- cmd->seid = cpu_to_le16(seid);
6469-
6470- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6471+ cmd->seid = CPU_TO_LE16(seid);
6472+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
6473+ cmd_details, true);
6474
6475 return status;
6476 }
6477@@ -3709,6 +4311,14 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
6478 * @hw: pointer to the hw struct
6479 * @cmd_details: pointer to command details structure or NULL
6480 *
6481+ * When LLDP is handled in PF this command is used by the PF
6482+ * to notify EMP that a DCB setting is modified.
6483+ * When LLDP is handled in EMP this command is used by the PF
6484+ * to notify EMP whenever one of the following parameters get
6485+ * modified:
6486+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
6487+ * - PCIRTT in PRTDCB_GENC.PCIRTT
6488+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
6489 * EMP will return when the shared RPB settings have been
6490 * recomputed and modified. The retval field in the descriptor
6491 * will be set to 0 when RPB is modified.
6492@@ -3772,15 +4382,15 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
6493 i40e_fill_default_direct_cmd_desc(&desc, opcode);
6494
6495 /* Indirect command */
6496- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
6497+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
6498 if (cmd_param_flag)
6499- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
6500+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
6501 if (buff_size > I40E_AQ_LARGE_BUF)
6502- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
6503+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
6504
6505- desc.datalen = cpu_to_le16(buff_size);
6506+ desc.datalen = CPU_TO_LE16(buff_size);
6507
6508- cmd->vsi_seid = cpu_to_le16(seid);
6509+ cmd->vsi_seid = CPU_TO_LE16(seid);
6510
6511 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
6512
6513@@ -3807,8 +4417,8 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
6514 i40e_fill_default_direct_cmd_desc(&desc,
6515 i40e_aqc_opc_configure_vsi_bw_limit);
6516
6517- cmd->vsi_seid = cpu_to_le16(seid);
6518- cmd->credit = cpu_to_le16(credit);
6519+ cmd->vsi_seid = CPU_TO_LE16(seid);
6520+ cmd->credit = CPU_TO_LE16(credit);
6521 cmd->max_credit = max_credit;
6522
6523 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6524@@ -3838,6 +4448,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
6525 * @hw: pointer to the hw struct
6526 * @seid: seid of the switching component connected to Physical Port
6527 * @ets_data: Buffer holding ETS parameters
6528+ * @opcode: Tx scheduler AQ command opcode
6529 * @cmd_details: pointer to command details structure or NULL
6530 **/
6531 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
6532@@ -3961,7 +4572,7 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
6533 * The function checks for the valid filter/context sizes being
6534 * passed for FCoE and PE.
6535 *
6536- * Returns 0 if the values passed are valid and within
6537+ * Returns I40E_SUCCESS if the values passed are valid and within
6538 * range else returns an error.
6539 **/
6540 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
6541@@ -3970,6 +4581,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
6542 u32 fcoe_cntx_size, fcoe_filt_size;
6543 u32 pe_cntx_size, pe_filt_size;
6544 u32 fcoe_fmax;
6545+
6546 u32 val;
6547
6548 /* Validate FCoE settings passed */
6549@@ -4044,7 +4656,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
6550 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
6551 return I40E_ERR_INVALID_SIZE;
6552
6553- return 0;
6554+ return I40E_SUCCESS;
6555 }
6556
6557 /**
6558@@ -4059,7 +4671,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
6559 i40e_status i40e_set_filter_control(struct i40e_hw *hw,
6560 struct i40e_filter_control_settings *settings)
6561 {
6562- i40e_status ret = 0;
6563+ i40e_status ret = I40E_SUCCESS;
6564 u32 hash_lut_size = 0;
6565 u32 val;
6566
6567@@ -4111,7 +4723,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
6568
6569 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
6570
6571- return 0;
6572+ return I40E_SUCCESS;
6573 }
6574
6575 /**
6576@@ -4151,7 +4763,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
6577 if (is_add) {
6578 i40e_fill_default_direct_cmd_desc(&desc,
6579 i40e_aqc_opc_add_control_packet_filter);
6580- cmd->queue = cpu_to_le16(queue);
6581+ cmd->queue = CPU_TO_LE16(queue);
6582 } else {
6583 i40e_fill_default_direct_cmd_desc(&desc,
6584 i40e_aqc_opc_remove_control_packet_filter);
6585@@ -4160,17 +4772,17 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
6586 if (mac_addr)
6587 ether_addr_copy(cmd->mac, mac_addr);
6588
6589- cmd->etype = cpu_to_le16(ethtype);
6590- cmd->flags = cpu_to_le16(flags);
6591- cmd->seid = cpu_to_le16(vsi_seid);
6592+ cmd->etype = CPU_TO_LE16(ethtype);
6593+ cmd->flags = CPU_TO_LE16(flags);
6594+ cmd->seid = CPU_TO_LE16(vsi_seid);
6595
6596 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6597
6598 if (!status && stats) {
6599- stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
6600- stats->etype_used = le16_to_cpu(resp->etype_used);
6601- stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
6602- stats->etype_free = le16_to_cpu(resp->etype_free);
6603+ stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
6604+ stats->etype_used = LE16_TO_CPU(resp->etype_used);
6605+ stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
6606+ stats->etype_free = LE16_TO_CPU(resp->etype_free);
6607 }
6608
6609 return status;
6610@@ -4181,10 +4793,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
6611 * @hw: pointer to the hw struct
6612 * @seid: VSI seid to add ethertype filter from
6613 **/
6614-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
6615 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
6616 u16 seid)
6617 {
6618+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
6619 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
6620 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
6621 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
6622@@ -4199,89 +4811,361 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
6623 }
6624
6625 /**
6626- * i40e_aq_alternate_read
6627+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
6628+ * @filters: list of cloud filters
6629+ * @filter_count: length of list
6630+ *
6631+ * There's an issue in the device where the Geneve VNI layout needs
6632+ * to be shifted 1 byte over from the VxLAN VNI
6633+ **/
6634+static void i40e_fix_up_geneve_vni(
6635+ struct i40e_aqc_cloud_filters_element_data *filters,
6636+ u8 filter_count)
6637+{
6638+ struct i40e_aqc_cloud_filters_element_data *f = filters;
6639+ int i;
6640+
6641+ for (i = 0; i < filter_count; i++) {
6642+ u16 tnl_type;
6643+ u32 ti;
6644+
6645+ tnl_type = (LE16_TO_CPU(f[i].flags) &
6646+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6647+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6648+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6649+ ti = LE32_TO_CPU(f[i].tenant_id);
6650+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
6651+ }
6652+ }
6653+}
6654+
6655+/**
6656+ * i40e_aq_add_cloud_filters
6657 * @hw: pointer to the hardware structure
6658- * @reg_addr0: address of first dword to be read
6659- * @reg_val0: pointer for data read from 'reg_addr0'
6660- * @reg_addr1: address of second dword to be read
6661- * @reg_val1: pointer for data read from 'reg_addr1'
6662+ * @seid: VSI seid to add cloud filters from
6663+ * @filters: Buffer which contains the filters to be added
6664+ * @filter_count: number of filters contained in the buffer
6665 *
6666- * Read one or two dwords from alternate structure. Fields are indicated
6667- * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
6668- * is not passed then only register at 'reg_addr0' is read.
6669+ * Set the cloud filters for a given VSI. The contents of the
6670+ * i40e_aqc_cloud_filters_element_data are filled
6671+ * in by the caller of the function.
6672 *
6673 **/
6674-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
6675- u32 reg_addr0, u32 *reg_val0,
6676- u32 reg_addr1, u32 *reg_val1)
6677+i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw,
6678+ u16 seid,
6679+ struct i40e_aqc_cloud_filters_element_data *filters,
6680+ u8 filter_count)
6681 {
6682 struct i40e_aq_desc desc;
6683- struct i40e_aqc_alternate_write *cmd_resp =
6684- (struct i40e_aqc_alternate_write *)&desc.params.raw;
6685+ struct i40e_aqc_add_remove_cloud_filters *cmd =
6686+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6687 i40e_status status;
6688+ u16 buff_len;
6689
6690- if (!reg_val0)
6691- return I40E_ERR_PARAM;
6692+ i40e_fill_default_direct_cmd_desc(&desc,
6693+ i40e_aqc_opc_add_cloud_filters);
6694
6695- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
6696- cmd_resp->address0 = cpu_to_le32(reg_addr0);
6697- cmd_resp->address1 = cpu_to_le32(reg_addr1);
6698+ buff_len = filter_count * sizeof(*filters);
6699+ desc.datalen = CPU_TO_LE16(buff_len);
6700+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6701+ cmd->num_filters = filter_count;
6702+ cmd->seid = CPU_TO_LE16(seid);
6703
6704- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
6705+ i40e_fix_up_geneve_vni(filters, filter_count);
6706
6707- if (!status) {
6708- *reg_val0 = le32_to_cpu(cmd_resp->data0);
6709+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6710+
6711+ return status;
6712+}
6713+
6714+/**
6715+ * i40e_aq_add_cloud_filters_bb
6716+ * @hw: pointer to the hardware structure
6717+ * @seid: VSI seid to add cloud filters from
6718+ * @filters: Buffer which contains the filters in big buffer to be added
6719+ * @filter_count: number of filters contained in the buffer
6720+ *
6721+ * Set the cloud filters for a given VSI. The contents of the
6722+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6723+ * the function.
6724+ *
6725+ **/
6726+enum i40e_status_code
6727+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6728+ struct i40e_aqc_cloud_filters_element_bb *filters,
6729+ u8 filter_count)
6730+{
6731+ struct i40e_aq_desc desc;
6732+ struct i40e_aqc_add_remove_cloud_filters *cmd =
6733+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6734+ i40e_status status;
6735+ u16 buff_len;
6736+ int i;
6737
6738- if (reg_val1)
6739- *reg_val1 = le32_to_cpu(cmd_resp->data1);
6740+ i40e_fill_default_direct_cmd_desc(&desc,
6741+ i40e_aqc_opc_add_cloud_filters);
6742+
6743+ buff_len = filter_count * sizeof(*filters);
6744+ desc.datalen = CPU_TO_LE16(buff_len);
6745+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6746+ cmd->num_filters = filter_count;
6747+ cmd->seid = CPU_TO_LE16(seid);
6748+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6749+
6750+ for (i = 0; i < filter_count; i++) {
6751+ u16 tnl_type;
6752+ u32 ti;
6753+
6754+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
6755+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6756+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6757+
6758+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
6759+ * one more byte further than normally used for Tenant ID in
6760+ * other tunnel types.
6761+ */
6762+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6763+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
6764+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
6765+ }
6766 }
6767
6768+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6769+
6770 return status;
6771 }
6772
6773 /**
6774- * i40e_aq_resume_port_tx
6775+ * i40e_aq_rem_cloud_filters
6776 * @hw: pointer to the hardware structure
6777- * @cmd_details: pointer to command details structure or NULL
6778+ * @seid: VSI seid to remove cloud filters from
6779+ * @filters: Buffer which contains the filters to be removed
6780+ * @filter_count: number of filters contained in the buffer
6781+ *
6782+ * Remove the cloud filters for a given VSI. The contents of the
6783+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
6784+ * of the function.
6785 *
6786- * Resume port's Tx traffic
6787 **/
6788-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
6789- struct i40e_asq_cmd_details *cmd_details)
6790+enum i40e_status_code
6791+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
6792+ struct i40e_aqc_cloud_filters_element_data *filters,
6793+ u8 filter_count)
6794 {
6795 struct i40e_aq_desc desc;
6796+ struct i40e_aqc_add_remove_cloud_filters *cmd =
6797+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6798 i40e_status status;
6799+ u16 buff_len;
6800
6801- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
6802+ i40e_fill_default_direct_cmd_desc(&desc,
6803+ i40e_aqc_opc_remove_cloud_filters);
6804
6805- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
6806+ buff_len = filter_count * sizeof(*filters);
6807+ desc.datalen = CPU_TO_LE16(buff_len);
6808+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6809+ cmd->num_filters = filter_count;
6810+ cmd->seid = CPU_TO_LE16(seid);
6811+
6812+ i40e_fix_up_geneve_vni(filters, filter_count);
6813+
6814+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6815
6816 return status;
6817 }
6818
6819 /**
6820- * i40e_set_pci_config_data - store PCI bus info
6821- * @hw: pointer to hardware structure
6822- * @link_status: the link status word from PCI config space
6823+ * i40e_aq_rem_cloud_filters_bb
6824+ * @hw: pointer to the hardware structure
6825+ * @seid: VSI seid to remove cloud filters from
6826+ * @filters: Buffer which contains the filters in big buffer to be removed
6827+ * @filter_count: number of filters contained in the buffer
6828+ *
6829+ * Remove the big buffer cloud filters for a given VSI. The contents of the
6830+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6831+ * function.
6832 *
6833- * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
6834 **/
6835-void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
6836+enum i40e_status_code
6837+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6838+ struct i40e_aqc_cloud_filters_element_bb *filters,
6839+ u8 filter_count)
6840 {
6841- hw->bus.type = i40e_bus_type_pci_express;
6842+ struct i40e_aq_desc desc;
6843+ struct i40e_aqc_add_remove_cloud_filters *cmd =
6844+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6845+ i40e_status status;
6846+ u16 buff_len;
6847+ int i;
6848
6849- switch (link_status & PCI_EXP_LNKSTA_NLW) {
6850- case PCI_EXP_LNKSTA_NLW_X1:
6851- hw->bus.width = i40e_bus_width_pcie_x1;
6852- break;
6853- case PCI_EXP_LNKSTA_NLW_X2:
6854- hw->bus.width = i40e_bus_width_pcie_x2;
6855- break;
6856- case PCI_EXP_LNKSTA_NLW_X4:
6857- hw->bus.width = i40e_bus_width_pcie_x4;
6858- break;
6859- case PCI_EXP_LNKSTA_NLW_X8:
6860- hw->bus.width = i40e_bus_width_pcie_x8;
6861+ i40e_fill_default_direct_cmd_desc(&desc,
6862+ i40e_aqc_opc_remove_cloud_filters);
6863+
6864+ buff_len = filter_count * sizeof(*filters);
6865+ desc.datalen = CPU_TO_LE16(buff_len);
6866+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6867+ cmd->num_filters = filter_count;
6868+ cmd->seid = CPU_TO_LE16(seid);
6869+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6870+
6871+ for (i = 0; i < filter_count; i++) {
6872+ u16 tnl_type;
6873+ u32 ti;
6874+
6875+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
6876+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6877+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6878+
6879+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
6880+ * one more byte further than normally used for Tenant ID in
6881+ * other tunnel types.
6882+ */
6883+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6884+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
6885+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
6886+ }
6887+ }
6888+
6889+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6890+
6891+ return status;
6892+}
6893+
6894+/**
6895+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
6896+ * @hw: pointer to the hw struct
6897+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
6898+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
6899+ *
6900+ **/
6901+enum
6902+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
6903+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
6904+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
6905+{
6906+ struct i40e_aq_desc desc;
6907+ struct i40e_aqc_replace_cloud_filters_cmd *cmd =
6908+ (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
6909+ i40e_status status = I40E_SUCCESS;
6910+ int i = 0;
6911+
6912+ /* X722 doesn't support this command */
6913+ if (hw->mac.type == I40E_MAC_X722)
6914+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
6915+
6916+ /* need FW version greater than 6.00 */
6917+ if (hw->aq.fw_maj_ver < 6)
6918+ return I40E_NOT_SUPPORTED;
6919+
6920+ i40e_fill_default_direct_cmd_desc(&desc,
6921+ i40e_aqc_opc_replace_cloud_filters);
6922+
6923+ desc.datalen = CPU_TO_LE16(32);
6924+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6925+ cmd->old_filter_type = filters->old_filter_type;
6926+ cmd->new_filter_type = filters->new_filter_type;
6927+ cmd->valid_flags = filters->valid_flags;
6928+ cmd->tr_bit = filters->tr_bit;
6929+ cmd->tr_bit2 = filters->tr_bit2;
6930+
6931+ status = i40e_asq_send_command(hw, &desc, cmd_buf,
6932+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
6933+
6934+ /* for get cloud filters command */
6935+ for (i = 0; i < 32; i += 4) {
6936+ cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
6937+ cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
6938+ cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
6939+ cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
6940+ }
6941+
6942+ return status;
6943+}
6944+
6945+/**
6946+ * i40e_aq_alternate_read
6947+ * @hw: pointer to the hardware structure
6948+ * @reg_addr0: address of first dword to be read
6949+ * @reg_val0: pointer for data read from 'reg_addr0'
6950+ * @reg_addr1: address of second dword to be read
6951+ * @reg_val1: pointer for data read from 'reg_addr1'
6952+ *
6953+ * Read one or two dwords from alternate structure. Fields are indicated
6954+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
6955+ * is not passed then only register at 'reg_addr0' is read.
6956+ *
6957+ **/
6958+i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
6959+ u32 reg_addr0, u32 *reg_val0,
6960+ u32 reg_addr1, u32 *reg_val1)
6961+{
6962+ struct i40e_aq_desc desc;
6963+ struct i40e_aqc_alternate_write *cmd_resp =
6964+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
6965+ i40e_status status;
6966+
6967+ if (reg_val0 == NULL)
6968+ return I40E_ERR_PARAM;
6969+
6970+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
6971+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
6972+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
6973+
6974+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
6975+
6976+ if (status == I40E_SUCCESS) {
6977+ *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
6978+
6979+ if (reg_val1 != NULL)
6980+ *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
6981+ }
6982+
6983+ return status;
6984+}
6985+
6986+/**
6987+ * i40e_aq_resume_port_tx
6988+ * @hw: pointer to the hardware structure
6989+ * @cmd_details: pointer to command details structure or NULL
6990+ *
6991+ * Resume port's Tx traffic
6992+ **/
6993+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
6994+ struct i40e_asq_cmd_details *cmd_details)
6995+{
6996+ struct i40e_aq_desc desc;
6997+ i40e_status status;
6998+
6999+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
7000+
7001+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
7002+
7003+ return status;
7004+}
7005+
7006+/**
7007+ * i40e_set_pci_config_data - store PCI bus info
7008+ * @hw: pointer to hardware structure
7009+ * @link_status: the link status word from PCI config space
7010+ *
7011+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
7012+ **/
7013+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
7014+{
7015+ hw->bus.type = i40e_bus_type_pci_express;
7016+
7017+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
7018+ case PCI_EXP_LNKSTA_NLW_X1:
7019+ hw->bus.width = i40e_bus_width_pcie_x1;
7020+ break;
7021+ case PCI_EXP_LNKSTA_NLW_X2:
7022+ hw->bus.width = i40e_bus_width_pcie_x2;
7023+ break;
7024+ case PCI_EXP_LNKSTA_NLW_X4:
7025+ hw->bus.width = i40e_bus_width_pcie_x4;
7026+ break;
7027+ case PCI_EXP_LNKSTA_NLW_X8:
7028+ hw->bus.width = i40e_bus_width_pcie_x8;
7029 break;
7030 default:
7031 hw->bus.width = i40e_bus_width_unknown;
7032@@ -4315,15 +5199,16 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
7033 * @ret_buff_size: actual buffer size returned
7034 * @ret_next_table: next block to read
7035 * @ret_next_index: next index to read
7036+ * @cmd_details: pointer to command details structure or NULL
7037 *
7038 * Dump internal FW/HW data for debug purposes.
7039 *
7040 **/
7041 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
7042- u8 table_id, u32 start_index, u16 buff_size,
7043- void *buff, u16 *ret_buff_size,
7044- u8 *ret_next_table, u32 *ret_next_index,
7045- struct i40e_asq_cmd_details *cmd_details)
7046+ u8 table_id, u32 start_index, u16 buff_size,
7047+ void *buff, u16 *ret_buff_size,
7048+ u8 *ret_next_table, u32 *ret_next_index,
7049+ struct i40e_asq_cmd_details *cmd_details)
7050 {
7051 struct i40e_aq_desc desc;
7052 struct i40e_aqc_debug_dump_internals *cmd =
7053@@ -4338,24 +5223,24 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
7054 i40e_fill_default_direct_cmd_desc(&desc,
7055 i40e_aqc_opc_debug_dump_internals);
7056 /* Indirect Command */
7057- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
7058+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
7059 if (buff_size > I40E_AQ_LARGE_BUF)
7060- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
7061+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
7062
7063 cmd->cluster_id = cluster_id;
7064 cmd->table_id = table_id;
7065- cmd->idx = cpu_to_le32(start_index);
7066+ cmd->idx = CPU_TO_LE32(start_index);
7067
7068- desc.datalen = cpu_to_le16(buff_size);
7069+ desc.datalen = CPU_TO_LE16(buff_size);
7070
7071 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
7072 if (!status) {
7073- if (ret_buff_size)
7074- *ret_buff_size = le16_to_cpu(desc.datalen);
7075- if (ret_next_table)
7076+ if (ret_buff_size != NULL)
7077+ *ret_buff_size = LE16_TO_CPU(desc.datalen);
7078+ if (ret_next_table != NULL)
7079 *ret_next_table = resp->table_id;
7080- if (ret_next_index)
7081- *ret_next_index = le32_to_cpu(resp->idx);
7082+ if (ret_next_index != NULL)
7083+ *ret_next_index = LE32_TO_CPU(resp->idx);
7084 }
7085
7086 return status;
7087@@ -4372,8 +5257,8 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
7088 * Read bw from the alternate ram for the given pf
7089 **/
7090 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
7091- u32 *max_bw, u32 *min_bw,
7092- bool *min_valid, bool *max_valid)
7093+ u32 *max_bw, u32 *min_bw,
7094+ bool *min_valid, bool *max_valid)
7095 {
7096 i40e_status status;
7097 u32 max_bw_addr, min_bw_addr;
7098@@ -4420,19 +5305,15 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
7099 u16 bwd_size = sizeof(*bw_data);
7100
7101 i40e_fill_default_direct_cmd_desc(&desc,
7102- i40e_aqc_opc_configure_partition_bw);
7103+ i40e_aqc_opc_configure_partition_bw);
7104
7105 /* Indirect command */
7106- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
7107- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
7108+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
7109+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
7110
7111- if (bwd_size > I40E_AQ_LARGE_BUF)
7112- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
7113+ desc.datalen = CPU_TO_LE16(bwd_size);
7114
7115- desc.datalen = cpu_to_le16(bwd_size);
7116-
7117- status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
7118- cmd_details);
7119+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
7120
7121 return status;
7122 }
7123@@ -4441,13 +5322,13 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
7124 * i40e_read_phy_register_clause22
7125 * @hw: pointer to the HW structure
7126 * @reg: register address in the page
7127- * @phy_adr: PHY address on MDIO interface
7128+ * @phy_addr: PHY address on MDIO interface
7129 * @value: PHY register value
7130 *
7131 * Reads specified PHY register value
7132 **/
7133 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
7134- u16 reg, u8 phy_addr, u16 *value)
7135+ u16 reg, u8 phy_addr, u16 *value)
7136 {
7137 i40e_status status = I40E_ERR_TIMEOUT;
7138 u8 port_num = (u8)hw->func_caps.mdio_port_num;
7139@@ -4463,7 +5344,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
7140 do {
7141 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
7142 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
7143- status = 0;
7144+ status = I40E_SUCCESS;
7145 break;
7146 }
7147 udelay(10);
7148@@ -4486,13 +5367,13 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
7149 * i40e_write_phy_register_clause22
7150 * @hw: pointer to the HW structure
7151 * @reg: register address in the page
7152- * @phy_adr: PHY address on MDIO interface
7153+ * @phy_addr: PHY address on MDIO interface
7154 * @value: PHY register value
7155 *
7156 * Writes specified PHY register value
7157 **/
7158 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
7159- u16 reg, u8 phy_addr, u16 value)
7160+ u16 reg, u8 phy_addr, u16 value)
7161 {
7162 i40e_status status = I40E_ERR_TIMEOUT;
7163 u8 port_num = (u8)hw->func_caps.mdio_port_num;
7164@@ -4512,7 +5393,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
7165 do {
7166 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
7167 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
7168- status = 0;
7169+ status = I40E_SUCCESS;
7170 break;
7171 }
7172 udelay(10);
7173@@ -4527,7 +5408,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
7174 * @hw: pointer to the HW structure
7175 * @page: registers page number
7176 * @reg: register address in the page
7177- * @phy_adr: PHY address on MDIO interface
7178+ * @phy_addr: PHY address on MDIO interface
7179 * @value: PHY register value
7180 *
7181 * Reads specified PHY register value
7182@@ -4536,9 +5417,9 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
7183 u8 page, u16 reg, u8 phy_addr, u16 *value)
7184 {
7185 i40e_status status = I40E_ERR_TIMEOUT;
7186- u32 command = 0;
7187+ u32 command = 0;
7188 u16 retry = 1000;
7189- u8 port_num = hw->func_caps.mdio_port_num;
7190+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
7191
7192 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
7193 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
7194@@ -4551,10 +5432,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
7195 do {
7196 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
7197 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
7198- status = 0;
7199+ status = I40E_SUCCESS;
7200 break;
7201 }
7202- usleep_range(10, 20);
7203+ udelay(10);
7204 retry--;
7205 } while (retry);
7206
7207@@ -4576,10 +5457,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
7208 do {
7209 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
7210 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
7211- status = 0;
7212+ status = I40E_SUCCESS;
7213 break;
7214 }
7215- usleep_range(10, 20);
7216+ udelay(10);
7217 retry--;
7218 } while (retry);
7219
7220@@ -4601,7 +5482,7 @@ phy_read_end:
7221 * @hw: pointer to the HW structure
7222 * @page: registers page number
7223 * @reg: register address in the page
7224- * @phy_adr: PHY address on MDIO interface
7225+ * @phy_addr: PHY address on MDIO interface
7226 * @value: PHY register value
7227 *
7228 * Writes value to specified PHY register
7229@@ -4610,9 +5491,9 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
7230 u8 page, u16 reg, u8 phy_addr, u16 value)
7231 {
7232 i40e_status status = I40E_ERR_TIMEOUT;
7233- u32 command = 0;
7234+ u32 command = 0;
7235 u16 retry = 1000;
7236- u8 port_num = hw->func_caps.mdio_port_num;
7237+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
7238
7239 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
7240 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
7241@@ -4625,10 +5506,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
7242 do {
7243 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
7244 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
7245- status = 0;
7246+ status = I40E_SUCCESS;
7247 break;
7248 }
7249- usleep_range(10, 20);
7250+ udelay(10);
7251 retry--;
7252 } while (retry);
7253 if (status) {
7254@@ -4652,10 +5533,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
7255 do {
7256 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
7257 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
7258- status = 0;
7259+ status = I40E_SUCCESS;
7260 break;
7261 }
7262- usleep_range(10, 20);
7263+ udelay(10);
7264 retry--;
7265 } while (retry);
7266
7267@@ -4668,28 +5549,30 @@ phy_write_end:
7268 * @hw: pointer to the HW structure
7269 * @page: registers page number
7270 * @reg: register address in the page
7271- * @phy_adr: PHY address on MDIO interface
7272+ * @phy_addr: PHY address on MDIO interface
7273 * @value: PHY register value
7274 *
7275 * Writes value to specified PHY register
7276 **/
7277 i40e_status i40e_write_phy_register(struct i40e_hw *hw,
7278- u8 page, u16 reg, u8 phy_addr, u16 value)
7279+ u8 page, u16 reg, u8 phy_addr, u16 value)
7280 {
7281 i40e_status status;
7282
7283 switch (hw->device_id) {
7284 case I40E_DEV_ID_1G_BASE_T_X722:
7285- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
7286- value);
7287+ status = i40e_write_phy_register_clause22(hw,
7288+ reg, phy_addr, value);
7289 break;
7290 case I40E_DEV_ID_10G_BASE_T:
7291 case I40E_DEV_ID_10G_BASE_T4:
7292+ case I40E_DEV_ID_10G_BASE_T_BC:
7293+ case I40E_DEV_ID_5G_BASE_T_BC:
7294 case I40E_DEV_ID_10G_BASE_T_X722:
7295 case I40E_DEV_ID_25G_B:
7296 case I40E_DEV_ID_25G_SFP28:
7297- status = i40e_write_phy_register_clause45(hw, page, reg,
7298- phy_addr, value);
7299+ status = i40e_write_phy_register_clause45(hw,
7300+ page, reg, phy_addr, value);
7301 break;
7302 default:
7303 status = I40E_ERR_UNKNOWN_PHY;
7304@@ -4704,13 +5587,13 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
7305 * @hw: pointer to the HW structure
7306 * @page: registers page number
7307 * @reg: register address in the page
7308- * @phy_adr: PHY address on MDIO interface
7309+ * @phy_addr: PHY address on MDIO interface
7310 * @value: PHY register value
7311 *
7312 * Reads specified PHY register value
7313 **/
7314 i40e_status i40e_read_phy_register(struct i40e_hw *hw,
7315- u8 page, u16 reg, u8 phy_addr, u16 *value)
7316+ u8 page, u16 reg, u8 phy_addr, u16 *value)
7317 {
7318 i40e_status status;
7319
7320@@ -4721,6 +5604,8 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
7321 break;
7322 case I40E_DEV_ID_10G_BASE_T:
7323 case I40E_DEV_ID_10G_BASE_T4:
7324+ case I40E_DEV_ID_10G_BASE_T_BC:
7325+ case I40E_DEV_ID_5G_BASE_T_BC:
7326 case I40E_DEV_ID_10G_BASE_T_X722:
7327 case I40E_DEV_ID_25G_B:
7328 case I40E_DEV_ID_25G_SFP28:
7329@@ -4739,13 +5624,12 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
7330 * i40e_get_phy_address
7331 * @hw: pointer to the HW structure
7332 * @dev_num: PHY port num that address we want
7333- * @phy_addr: Returned PHY address
7334 *
7335 * Gets PHY address for current port
7336 **/
7337 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
7338 {
7339- u8 port_num = hw->func_caps.mdio_port_num;
7340+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
7341 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
7342
7343 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
7344@@ -4760,11 +5644,11 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
7345 * Blinks PHY link LED
7346 **/
7347 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
7348- u32 time, u32 interval)
7349+ u32 time, u32 interval)
7350 {
7351- i40e_status status = 0;
7352+ i40e_status status = I40E_SUCCESS;
7353 u32 i;
7354- u16 led_ctl;
7355+ u16 led_ctl = 0;
7356 u16 gpio_led_port;
7357 u16 led_reg;
7358 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
7359@@ -4825,6 +5709,64 @@ phy_blinking_end:
7360 return status;
7361 }
7362
7363+/**
7364+ * i40e_led_get_reg - read LED register
7365+ * @hw: pointer to the HW structure
7366+ * @led_addr: LED register address
7367+ * @reg_val: read register value
7368+ **/
7369+static i40e_status i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
7370+ u32 *reg_val)
7371+{
7372+ i40e_status status;
7373+ u8 phy_addr = 0;
7374+
7375+ *reg_val = 0;
7376+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
7377+ status = i40e_aq_get_phy_register(hw,
7378+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
7379+ I40E_PHY_COM_REG_PAGE, true,
7380+ I40E_PHY_LED_PROV_REG_1,
7381+ reg_val, NULL);
7382+ } else {
7383+ phy_addr = i40e_get_phy_address(hw, hw->port);
7384+ status = i40e_read_phy_register_clause45(hw,
7385+ I40E_PHY_COM_REG_PAGE,
7386+ led_addr, phy_addr,
7387+ (u16 *)reg_val);
7388+ }
7389+ return status;
7390+}
7391+
7392+/**
7393+ * i40e_led_set_reg - write LED register
7394+ * @hw: pointer to the HW structure
7395+ * @led_addr: LED register address
7396+ * @reg_val: register value to write
7397+ **/
7398+static i40e_status i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
7399+ u32 reg_val)
7400+{
7401+ i40e_status status;
7402+ u8 phy_addr = 0;
7403+
7404+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
7405+ status = i40e_aq_set_phy_register(hw,
7406+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
7407+ I40E_PHY_COM_REG_PAGE, true,
7408+ I40E_PHY_LED_PROV_REG_1,
7409+ reg_val, NULL);
7410+ } else {
7411+ phy_addr = i40e_get_phy_address(hw, hw->port);
7412+ status = i40e_write_phy_register_clause45(hw,
7413+ I40E_PHY_COM_REG_PAGE,
7414+ led_addr, phy_addr,
7415+ (u16)reg_val);
7416+ }
7417+
7418+ return status;
7419+}
7420+
7421 /**
7422 * i40e_led_get_phy - return current on/off mode
7423 * @hw: pointer to the hw struct
7424@@ -4833,21 +5775,27 @@ phy_blinking_end:
7425 *
7426 **/
7427 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
7428- u16 *val)
7429+ u16 *val)
7430 {
7431- i40e_status status = 0;
7432+ i40e_status status = I40E_SUCCESS;
7433 u16 gpio_led_port;
7434+ u32 reg_val_aq;
7435+ u16 temp_addr;
7436 u8 phy_addr = 0;
7437 u16 reg_val;
7438- u16 temp_addr;
7439- u8 port_num;
7440- u32 i;
7441
7442+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
7443+ status = i40e_aq_get_phy_register(hw,
7444+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
7445+ I40E_PHY_COM_REG_PAGE, true,
7446+ I40E_PHY_LED_PROV_REG_1,
7447+ &reg_val_aq, NULL);
7448+ if (status == I40E_SUCCESS)
7449+ *val = (u16)reg_val_aq;
7450+ return status;
7451+ }
7452 temp_addr = I40E_PHY_LED_PROV_REG_1;
7453- i = rd32(hw, I40E_PFGEN_PORTNUM);
7454- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
7455- phy_addr = i40e_get_phy_address(hw, port_num);
7456-
7457+ phy_addr = i40e_get_phy_address(hw, hw->port);
7458 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
7459 temp_addr++) {
7460 status = i40e_read_phy_register_clause45(hw,
7461@@ -4869,62 +5817,240 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
7462 * i40e_led_set_phy
7463 * @hw: pointer to the HW structure
7464 * @on: true or false
7465+ * @led_addr: address of led register to use
7466 * @mode: original val plus bit for set or ignore
7467+ *
7468 * Set led's on or off when controlled by the PHY
7469 *
7470 **/
7471 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
7472- u16 led_addr, u32 mode)
7473+ u16 led_addr, u32 mode)
7474 {
7475- i40e_status status = 0;
7476- u16 led_ctl = 0;
7477- u16 led_reg = 0;
7478- u8 phy_addr = 0;
7479- u8 port_num;
7480- u32 i;
7481+ i40e_status status = I40E_SUCCESS;
7482+ u32 led_ctl = 0;
7483+ u32 led_reg = 0;
7484
7485- i = rd32(hw, I40E_PFGEN_PORTNUM);
7486- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
7487- phy_addr = i40e_get_phy_address(hw, port_num);
7488- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
7489- led_addr, phy_addr, &led_reg);
7490+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
7491 if (status)
7492 return status;
7493 led_ctl = led_reg;
7494 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
7495 led_reg = 0;
7496- status = i40e_write_phy_register_clause45(hw,
7497- I40E_PHY_COM_REG_PAGE,
7498- led_addr, phy_addr,
7499- led_reg);
7500+ status = i40e_led_set_reg(hw, led_addr, led_reg);
7501 if (status)
7502 return status;
7503 }
7504- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
7505- led_addr, phy_addr, &led_reg);
7506+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
7507 if (status)
7508 goto restore_config;
7509 if (on)
7510 led_reg = I40E_PHY_LED_MANUAL_ON;
7511 else
7512 led_reg = 0;
7513- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
7514- led_addr, phy_addr, led_reg);
7515+ status = i40e_led_set_reg(hw, led_addr, led_reg);
7516 if (status)
7517 goto restore_config;
7518 if (mode & I40E_PHY_LED_MODE_ORIG) {
7519 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
7520- status = i40e_write_phy_register_clause45(hw,
7521- I40E_PHY_COM_REG_PAGE,
7522- led_addr, phy_addr, led_ctl);
7523+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
7524 }
7525 return status;
7526+
7527 restore_config:
7528- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
7529- led_addr, phy_addr, led_ctl);
7530+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
7531 return status;
7532 }
7533
7534+/**
7535+ * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register
7536+ * @hw: pointer to the hw struct
7537+ * @stat: pointer to structure with status of rx and tx lpi
7538+ *
7539+ * Read LPI state directly from external PHY register or from MAC
7540+ * register, depending on device ID and current link speed.
7541+ */
7542+i40e_status i40e_get_phy_lpi_status(struct i40e_hw *hw,
7543+ struct i40e_hw_port_stats *stat)
7544+{
7545+ i40e_status ret = I40E_SUCCESS;
7546+ u32 val;
7547+
7548+ stat->rx_lpi_status = 0;
7549+ stat->tx_lpi_status = 0;
7550+
7551+ if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC ||
7552+ hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) &&
7553+ (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB ||
7554+ hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) {
7555+ ret = i40e_aq_get_phy_register(hw,
7556+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
7557+ I40E_BCM_PHY_PCS_STATUS1_PAGE,
7558+ true,
7559+ I40E_BCM_PHY_PCS_STATUS1_REG,
7560+ &val, NULL);
7561+
7562+ if (ret != I40E_SUCCESS)
7563+ return ret;
7564+
7565+ stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI);
7566+ stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI);
7567+
7568+ return ret;
7569+ }
7570+
7571+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
7572+ stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
7573+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
7574+ stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
7575+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
7576+
7577+ return ret;
7578+}
7579+
7580+/**
7581+ * i40e_get_lpi_counters - read LPI counters from EEE statistics
7582+ * @hw: pointer to the hw struct
7583+ * @tx_counter: pointer to memory for TX LPI counter
7584+ * @rx_counter: pointer to memory for RX LPI counter
7585+ * @is_clear: returns true if counters are clear after read
7586+ *
7587+ * Read Low Power Idle (LPI) mode counters from Energy Efficient
7588+ * Ethernet (EEE) statistics.
7589+ **/
7590+i40e_status i40e_get_lpi_counters(struct i40e_hw *hw,
7591+ u32 *tx_counter, u32 *rx_counter,
7592+ bool *is_clear)
7593+{
7594+ /* only X710-T*L requires special handling of counters
7595+ * for other devices we just read the MAC registers
7596+ */
7597+ if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC ||
7598+ hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) &&
7599+ hw->phy.link_info.link_speed != I40E_LINK_SPEED_1GB) {
7600+ i40e_status retval;
7601+ u32 cmd_status;
7602+
7603+ *is_clear = false;
7604+ retval = i40e_aq_run_phy_activity(hw,
7605+ I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
7606+ I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT,
7607+ &cmd_status, tx_counter, rx_counter, NULL);
7608+
7609+ if (cmd_status != I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
7610+ retval = I40E_ERR_ADMIN_QUEUE_ERROR;
7611+
7612+ return retval;
7613+ }
7614+
7615+ *is_clear = true;
7616+ *tx_counter = rd32(hw, I40E_PRTPM_TLPIC);
7617+ *rx_counter = rd32(hw, I40E_PRTPM_RLPIC);
7618+
7619+ return I40E_SUCCESS;
7620+}
7621+
7622+/**
7623+ * i40e_get_lpi_duration - read LPI time duration from EEE statistics
7624+ * @hw: pointer to the hw struct
7625+ * @stat: pointer to structure with status of rx and tx lpi
7626+ * @tx_duration: pointer to memory for TX LPI time duration
7627+ * @rx_duration: pointer to memory for RX LPI time duration
7628+ *
7629+ * Read Low Power Idle (LPI) mode time duration from Energy Efficient
7630+ * Ethernet (EEE) statistics.
7631+ */
7632+i40e_status i40e_get_lpi_duration(struct i40e_hw *hw,
7633+ struct i40e_hw_port_stats *stat,
7634+ u64 *tx_duration, u64 *rx_duration)
7635+{
7636+ u32 tx_time_dur, rx_time_dur;
7637+ i40e_status retval;
7638+ u32 cmd_status;
7639+
7640+ if (hw->device_id != I40E_DEV_ID_10G_BASE_T_BC &&
7641+ hw->device_id != I40E_DEV_ID_5G_BASE_T_BC)
7642+ return I40E_ERR_NOT_IMPLEMENTED;
7643+
7644+ retval = i40e_aq_run_phy_activity
7645+ (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
7646+ I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR,
7647+ &cmd_status, &tx_time_dur, &rx_time_dur, NULL);
7648+
7649+ if (retval)
7650+ return retval;
7651+ if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) !=
7652+ I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
7653+ return I40E_ERR_ADMIN_QUEUE_ERROR;
7654+
7655+ if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB &&
7656+ !tx_time_dur && !rx_time_dur &&
7657+ stat->tx_lpi_status && stat->rx_lpi_status) {
7658+ retval = i40e_aq_run_phy_activity
7659+ (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
7660+ I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR,
7661+ &cmd_status,
7662+ &tx_time_dur, &rx_time_dur, NULL);
7663+
7664+ if (retval)
7665+ return retval;
7666+ if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) !=
7667+ I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
7668+ return I40E_ERR_ADMIN_QUEUE_ERROR;
7669+ tx_time_dur = 0;
7670+ rx_time_dur = 0;
7671+ }
7672+
7673+ *tx_duration = tx_time_dur;
7674+ *rx_duration = rx_time_dur;
7675+
7676+ return retval;
7677+}
7678+
7679+/**
7680+ * i40e_lpi_stat_update - update LPI counters with values relative to offset
7681+ * @hw: pointer to the hw struct
7682+ * @offset_loaded: flag indicating need of writing current value to offset
7683+ * @tx_offset: pointer to offset of TX LPI counter
7684+ * @tx_stat: pointer to value of TX LPI counter
7685+ * @rx_offset: pointer to offset of RX LPI counter
7686+ * @rx_stat: pointer to value of RX LPI counter
7687+ *
7688+ * Update Low Power Idle (LPI) mode counters while having regard to passed
7689+ * offsets.
7690+ **/
7691+i40e_status i40e_lpi_stat_update(struct i40e_hw *hw,
7692+ bool offset_loaded, u64 *tx_offset,
7693+ u64 *tx_stat, u64 *rx_offset,
7694+ u64 *rx_stat)
7695+{
7696+ i40e_status retval;
7697+ u32 tx_counter, rx_counter;
7698+ bool is_clear;
7699+
7700+ retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter, &is_clear);
7701+ if (retval)
7702+ goto err;
7703+
7704+ if (is_clear) {
7705+ *tx_stat += tx_counter;
7706+ *rx_stat += rx_counter;
7707+ } else {
7708+ if (!offset_loaded) {
7709+ *tx_offset = tx_counter;
7710+ *rx_offset = rx_counter;
7711+ }
7712+
7713+ *tx_stat = (tx_counter >= *tx_offset) ?
7714+ (u32)(tx_counter - *tx_offset) :
7715+ (u32)((tx_counter + BIT_ULL(32)) - *tx_offset);
7716+ *rx_stat = (rx_counter >= *rx_offset) ?
7717+ (u32)(rx_counter - *rx_offset) :
7718+ (u32)((rx_counter + BIT_ULL(32)) - *rx_offset);
7719+ }
7720+err:
7721+ return retval;
7722+}
7723+
7724 /**
7725 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
7726 * @hw: pointer to the hw struct
7727@@ -4944,17 +6070,17 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
7728 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
7729 i40e_status status;
7730
7731- if (!reg_val)
7732+ if (reg_val == NULL)
7733 return I40E_ERR_PARAM;
7734
7735 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
7736
7737- cmd_resp->address = cpu_to_le32(reg_addr);
7738+ cmd_resp->address = CPU_TO_LE32(reg_addr);
7739
7740 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
7741
7742- if (status == 0)
7743- *reg_val = le32_to_cpu(cmd_resp->value);
7744+ if (status == I40E_SUCCESS)
7745+ *reg_val = LE32_TO_CPU(cmd_resp->value);
7746
7747 return status;
7748 }
7749@@ -4966,7 +6092,7 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
7750 **/
7751 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
7752 {
7753- i40e_status status = 0;
7754+ i40e_status status = I40E_SUCCESS;
7755 bool use_register;
7756 int retry = 5;
7757 u32 val = 0;
7758@@ -5012,10 +6138,11 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
7759
7760 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
7761
7762- cmd->address = cpu_to_le32(reg_addr);
7763- cmd->value = cpu_to_le32(reg_val);
7764+ cmd->address = CPU_TO_LE32(reg_addr);
7765+ cmd->value = CPU_TO_LE32(reg_val);
7766
7767- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
7768+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
7769+ cmd_details, true);
7770
7771 return status;
7772 }
7773@@ -5028,7 +6155,7 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
7774 **/
7775 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
7776 {
7777- i40e_status status = 0;
7778+ i40e_status status = I40E_SUCCESS;
7779 bool use_register;
7780 int retry = 5;
7781
7782@@ -5052,7 +6179,358 @@ do_retry:
7783 }
7784
7785 /**
7786- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
7787+ * i40e_mdio_if_number_selection - MDIO I/F number selection
7788+ * @hw: pointer to the hw struct
7789+ * @set_mdio: use MDIO I/F number specified by mdio_num
7790+ * @mdio_num: MDIO I/F number
7791+ * @cmd: pointer to PHY Register command structure
7792+ **/
7793+static void
7794+i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num,
7795+ struct i40e_aqc_phy_register_access *cmd)
7796+{
7797+ if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
7798+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
7799+ cmd->cmd_flags |=
7800+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
7801+ ((mdio_num <<
7802+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
7803+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
7804+ else
7805+ i40e_debug(hw, I40E_DEBUG_PHY,
7806+ "MDIO I/F number selection not supported by current FW version.\n");
7807+ }
7808+}
7809+
7810+/**
7811+ * i40e_aq_set_phy_register_ext
7812+ * @hw: pointer to the hw struct
7813+ * @phy_select: select which phy should be accessed
7814+ * @dev_addr: PHY device address
7815+ * @page_change: enable auto page change
7816+ * @set_mdio: use MDIO I/F number specified by mdio_num
7817+ * @mdio_num: MDIO I/F number
7818+ * @reg_addr: PHY register address
7819+ * @reg_val: new register value
7820+ * @cmd_details: pointer to command details structure or NULL
7821+ *
7822+ * Write the external PHY register.
7823+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
7824+ * may use simple wrapper i40e_aq_set_phy_register.
7825+ **/
7826+enum i40e_status_code
7827+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
7828+ u8 phy_select, u8 dev_addr, bool page_change,
7829+ bool set_mdio, u8 mdio_num,
7830+ u32 reg_addr, u32 reg_val,
7831+ struct i40e_asq_cmd_details *cmd_details)
7832+{
7833+ struct i40e_aq_desc desc;
7834+ struct i40e_aqc_phy_register_access *cmd =
7835+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
7836+ i40e_status status;
7837+
7838+ i40e_fill_default_direct_cmd_desc(&desc,
7839+ i40e_aqc_opc_set_phy_register);
7840+
7841+ cmd->phy_interface = phy_select;
7842+ cmd->dev_addres = dev_addr;
7843+ cmd->reg_address = CPU_TO_LE32(reg_addr);
7844+ cmd->reg_value = CPU_TO_LE32(reg_val);
7845+
7846+ if (!page_change)
7847+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
7848+
7849+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
7850+
7851+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
7852+
7853+ return status;
7854+}
7855+
7856+/**
7857+ * i40e_aq_get_phy_register_ext
7858+ * @hw: pointer to the hw struct
7859+ * @phy_select: select which phy should be accessed
7860+ * @dev_addr: PHY device address
7861+ * @page_change: enable auto page change
7862+ * @set_mdio: use MDIO I/F number specified by mdio_num
7863+ * @mdio_num: MDIO I/F number
7864+ * @reg_addr: PHY register address
7865+ * @reg_val: read register value
7866+ * @cmd_details: pointer to command details structure or NULL
7867+ *
7868+ * Read the external PHY register.
7869+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
7870+ * may use simple wrapper i40e_aq_get_phy_register.
7871+ **/
7872+enum i40e_status_code
7873+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
7874+ u8 phy_select, u8 dev_addr, bool page_change,
7875+ bool set_mdio, u8 mdio_num,
7876+ u32 reg_addr, u32 *reg_val,
7877+ struct i40e_asq_cmd_details *cmd_details)
7878+{
7879+ struct i40e_aq_desc desc;
7880+ struct i40e_aqc_phy_register_access *cmd =
7881+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
7882+ i40e_status status;
7883+
7884+ i40e_fill_default_direct_cmd_desc(&desc,
7885+ i40e_aqc_opc_get_phy_register);
7886+
7887+ cmd->phy_interface = phy_select;
7888+ cmd->dev_addres = dev_addr;
7889+ cmd->reg_address = CPU_TO_LE32(reg_addr);
7890+
7891+ if (!page_change)
7892+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
7893+
7894+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
7895+
7896+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
7897+ if (!status)
7898+ *reg_val = LE32_TO_CPU(cmd->reg_value);
7899+
7900+ return status;
7901+}
7902+
7903+/**
7904+ * i40e_aq_run_phy_activity
7905+ * @hw: pointer to the hw struct
7906+ * @activity_id: ID of DNL activity to run
7907+ * @dnl_opcode: opcode passed to DNL script
7908+ * @cmd_status: pointer to memory to write return value of DNL script
7909+ * @data0: pointer to memory for first 4 bytes of data returned by DNL script
7910+ * @data1: pointer to memory for last 4 bytes of data returned by DNL script
7911+ * @cmd_details: pointer to command details structure or NULL
7912+ *
7913+ * Run DNL admin command.
7914+ **/
7915+enum i40e_status_code
7916+i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode,
7917+ u32 *cmd_status, u32 *data0, u32 *data1,
7918+ struct i40e_asq_cmd_details *cmd_details)
7919+{
7920+ struct i40e_aqc_run_phy_activity *cmd;
7921+ i40e_status retval;
7922+ struct i40e_aq_desc desc;
7923+
7924+ cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw;
7925+
7926+ if (!cmd_status || !data0 || !data1) {
7927+ retval = I40E_ERR_PARAM;
7928+ goto err;
7929+ }
7930+
7931+ i40e_fill_default_direct_cmd_desc(&desc,
7932+ i40e_aqc_opc_run_phy_activity);
7933+
7934+ cmd->activity_id = CPU_TO_LE16(activity_id);
7935+ cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode);
7936+
7937+ retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
7938+ if (retval)
7939+ goto err;
7940+
7941+ *cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status);
7942+ *data0 = LE32_TO_CPU(cmd->params.resp.data0);
7943+ *data1 = LE32_TO_CPU(cmd->params.resp.data1);
7944+err:
7945+ return retval;
7946+}
7947+
7948+/**
7949+ * i40e_aq_set_arp_proxy_config
7950+ * @hw: pointer to the HW structure
7951+ * @proxy_config: pointer to proxy config command table struct
7952+ * @cmd_details: pointer to command details
7953+ *
7954+ * Set ARP offload parameters from pre-populated
7955+ * i40e_aqc_arp_proxy_data struct
7956+ **/
7957+i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
7958+ struct i40e_aqc_arp_proxy_data *proxy_config,
7959+ struct i40e_asq_cmd_details *cmd_details)
7960+{
7961+ struct i40e_aq_desc desc;
7962+ i40e_status status;
7963+
7964+ if (!proxy_config)
7965+ return I40E_ERR_PARAM;
7966+
7967+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
7968+
7969+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
7970+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
7971+ desc.params.external.addr_high =
7972+ CPU_TO_LE32(upper_32_bits((u64)proxy_config));
7973+ desc.params.external.addr_low =
7974+ CPU_TO_LE32(lower_32_bits((u64)proxy_config));
7975+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
7976+
7977+ status = i40e_asq_send_command(hw, &desc, proxy_config,
7978+ sizeof(struct i40e_aqc_arp_proxy_data),
7979+ cmd_details);
7980+
7981+ return status;
7982+}
7983+
7984+/**
7985+ * i40e_aq_opc_set_ns_proxy_table_entry
7986+ * @hw: pointer to the HW structure
7987+ * @ns_proxy_table_entry: pointer to NS table entry command struct
7988+ * @cmd_details: pointer to command details
7989+ *
7990+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
7991+ * from pre-populated i40e_aqc_ns_proxy_data struct
7992+ **/
7993+i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
7994+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
7995+ struct i40e_asq_cmd_details *cmd_details)
7996+{
7997+ struct i40e_aq_desc desc;
7998+ i40e_status status;
7999+
8000+ if (!ns_proxy_table_entry)
8001+ return I40E_ERR_PARAM;
8002+
8003+ i40e_fill_default_direct_cmd_desc(&desc,
8004+ i40e_aqc_opc_set_ns_proxy_table_entry);
8005+
8006+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
8007+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
8008+ desc.params.external.addr_high =
8009+ CPU_TO_LE32(upper_32_bits((u64)ns_proxy_table_entry));
8010+ desc.params.external.addr_low =
8011+ CPU_TO_LE32(lower_32_bits((u64)ns_proxy_table_entry));
8012+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
8013+
8014+ status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
8015+ sizeof(struct i40e_aqc_ns_proxy_data),
8016+ cmd_details);
8017+
8018+ return status;
8019+}
8020+
8021+/**
8022+ * i40e_aq_set_clear_wol_filter
8023+ * @hw: pointer to the hw struct
8024+ * @filter_index: index of filter to modify (0-7)
8025+ * @filter: buffer containing filter to be set
8026+ * @set_filter: true to set filter, false to clear filter
8027+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
8028+ * if false, pass through packets may cause wake-up
8029+ * @filter_valid: true if filter action is valid
8030+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
8031+ * @cmd_details: pointer to command details structure or NULL
8032+ *
8033+ * Set or clear WoL filter for port attached to the PF
8034+ **/
8035+i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
8036+ u8 filter_index,
8037+ struct i40e_aqc_set_wol_filter_data *filter,
8038+ bool set_filter, bool no_wol_tco,
8039+ bool filter_valid, bool no_wol_tco_valid,
8040+ struct i40e_asq_cmd_details *cmd_details)
8041+{
8042+ struct i40e_aq_desc desc;
8043+ struct i40e_aqc_set_wol_filter *cmd =
8044+ (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
8045+ i40e_status status;
8046+ u16 cmd_flags = 0;
8047+ u16 valid_flags = 0;
8048+ u16 buff_len = 0;
8049+
8050+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
8051+
8052+ if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
8053+ return I40E_ERR_PARAM;
8054+ cmd->filter_index = CPU_TO_LE16(filter_index);
8055+
8056+ if (set_filter) {
8057+ if (!filter)
8058+ return I40E_ERR_PARAM;
8059+
8060+ cmd_flags |= I40E_AQC_SET_WOL_FILTER;
8061+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
8062+ }
8063+
8064+ if (no_wol_tco)
8065+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
8066+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
8067+
8068+ if (filter_valid)
8069+ valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
8070+ if (no_wol_tco_valid)
8071+ valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
8072+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
8073+
8074+ buff_len = sizeof(*filter);
8075+ desc.datalen = CPU_TO_LE16(buff_len);
8076+
8077+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
8078+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
8079+
8080+ cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)filter));
8081+ cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)filter));
8082+
8083+ status = i40e_asq_send_command(hw, &desc, filter,
8084+ buff_len, cmd_details);
8085+
8086+ return status;
8087+}
8088+
8089+/**
8090+ * i40e_aq_get_wake_event_reason
8091+ * @hw: pointer to the hw struct
8092+ * @wake_reason: return value, index of matching filter
8093+ * @cmd_details: pointer to command details structure or NULL
8094+ *
8095+ * Get information for the reason of a Wake Up event
8096+ **/
8097+i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
8098+ u16 *wake_reason,
8099+ struct i40e_asq_cmd_details *cmd_details)
8100+{
8101+ struct i40e_aq_desc desc;
8102+ struct i40e_aqc_get_wake_reason_completion *resp =
8103+ (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
8104+ i40e_status status;
8105+
8106+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
8107+
8108+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
8109+
8110+ if (status == I40E_SUCCESS)
8111+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
8112+
8113+ return status;
8114+}
8115+
8116+/**
8117+* i40e_aq_clear_all_wol_filters
8118+* @hw: pointer to the hw struct
8119+* @cmd_details: pointer to command details structure or NULL
8120+*
8121+* Get information for the reason of a Wake Up event
8122+**/
8123+i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
8124+ struct i40e_asq_cmd_details *cmd_details)
8125+{
8126+ struct i40e_aq_desc desc;
8127+ i40e_status status;
8128+
8129+ i40e_fill_default_direct_cmd_desc(&desc,
8130+ i40e_aqc_opc_clear_all_wol_filters);
8131+
8132+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
8133+
8134+ return status;
8135+}
8136+
8137+/**
8138+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
8139 * @hw: pointer to the hw struct
8140 * @buff: command buffer (size in bytes = buff_size)
8141 * @buff_size: buffer size in bytes
8142@@ -5062,7 +6540,7 @@ do_retry:
8143 * @cmd_details: pointer to command details structure or NULL
8144 **/
8145 enum
8146-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
8147+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
8148 u16 buff_size, u32 track_id,
8149 u32 *error_offset, u32 *error_info,
8150 struct i40e_asq_cmd_details *cmd_details)
8151@@ -5071,41 +6549,42 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
8152 struct i40e_aqc_write_personalization_profile *cmd =
8153 (struct i40e_aqc_write_personalization_profile *)
8154 &desc.params.raw;
8155- struct i40e_aqc_write_ppp_resp *resp;
8156+ struct i40e_aqc_write_ddp_resp *resp;
8157 i40e_status status;
8158
8159 i40e_fill_default_direct_cmd_desc(&desc,
8160- i40e_aqc_opc_write_personalization_profile);
8161+ i40e_aqc_opc_write_personalization_profile);
8162
8163- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
8164+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
8165 if (buff_size > I40E_AQ_LARGE_BUF)
8166- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
8167+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
8168
8169- desc.datalen = cpu_to_le16(buff_size);
8170+ desc.datalen = CPU_TO_LE16(buff_size);
8171
8172- cmd->profile_track_id = cpu_to_le32(track_id);
8173+ cmd->profile_track_id = CPU_TO_LE32(track_id);
8174
8175 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
8176 if (!status) {
8177- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
8178+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
8179 if (error_offset)
8180- *error_offset = le32_to_cpu(resp->error_offset);
8181+ *error_offset = LE32_TO_CPU(resp->error_offset);
8182 if (error_info)
8183- *error_info = le32_to_cpu(resp->error_info);
8184+ *error_info = LE32_TO_CPU(resp->error_info);
8185 }
8186
8187 return status;
8188 }
8189
8190 /**
8191- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
8192+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
8193 * @hw: pointer to the hw struct
8194 * @buff: command buffer (size in bytes = buff_size)
8195 * @buff_size: buffer size in bytes
8196+ * @flags: AdminQ command flags
8197 * @cmd_details: pointer to command details structure or NULL
8198 **/
8199 enum
8200-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
8201+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
8202 u16 buff_size, u8 flags,
8203 struct i40e_asq_cmd_details *cmd_details)
8204 {
8205@@ -5115,12 +6594,12 @@ i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
8206 i40e_status status;
8207
8208 i40e_fill_default_direct_cmd_desc(&desc,
8209- i40e_aqc_opc_get_personalization_profile_list);
8210+ i40e_aqc_opc_get_personalization_profile_list);
8211
8212- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
8213+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
8214 if (buff_size > I40E_AQ_LARGE_BUF)
8215- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
8216- desc.datalen = cpu_to_le16(buff_size);
8217+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
8218+ desc.datalen = CPU_TO_LE16(buff_size);
8219
8220 cmd->flags = flags;
8221
8222@@ -5158,6 +6637,165 @@ i40e_find_segment_in_package(u32 segment_type,
8223 return NULL;
8224 }
8225
8226+/* Get section table in profile */
8227+#define I40E_SECTION_TABLE(profile, sec_tbl) \
8228+ do { \
8229+ struct i40e_profile_segment *p = (profile); \
8230+ u32 count; \
8231+ u32 *nvm; \
8232+ count = p->device_table_count; \
8233+ nvm = (u32 *)&p->device_table[count]; \
8234+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
8235+ } while (0)
8236+
8237+/* Get section header in profile */
8238+#define I40E_SECTION_HEADER(profile, offset) \
8239+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
8240+
8241+/**
8242+ * i40e_find_section_in_profile
8243+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
8244+ * @profile: pointer to the i40e segment header to be searched
8245+ *
8246+ * This function searches i40e segment for a particular section type. On
8247+ * success it returns a pointer to the section header, otherwise it will
8248+ * return NULL.
8249+ **/
8250+struct i40e_profile_section_header *
8251+i40e_find_section_in_profile(u32 section_type,
8252+ struct i40e_profile_segment *profile)
8253+{
8254+ struct i40e_profile_section_header *sec;
8255+ struct i40e_section_table *sec_tbl;
8256+ u32 sec_off;
8257+ u32 i;
8258+
8259+ if (profile->header.type != SEGMENT_TYPE_I40E)
8260+ return NULL;
8261+
8262+ I40E_SECTION_TABLE(profile, sec_tbl);
8263+
8264+ for (i = 0; i < sec_tbl->section_count; i++) {
8265+ sec_off = sec_tbl->section_offset[i];
8266+ sec = I40E_SECTION_HEADER(profile, sec_off);
8267+ if (sec->section.type == section_type)
8268+ return sec;
8269+ }
8270+
8271+ return NULL;
8272+}
8273+
8274+/**
8275+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
8276+ * @hw: pointer to the hw struct
8277+ * @aq: command buffer containing all data to execute AQ
8278+ **/
8279+static enum
8280+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
8281+ struct i40e_profile_aq_section *aq)
8282+{
8283+ i40e_status status;
8284+ struct i40e_aq_desc desc;
8285+ u8 *msg = NULL;
8286+ u16 msglen;
8287+
8288+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
8289+ desc.flags |= CPU_TO_LE16(aq->flags);
8290+ i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
8291+ I40E_NONDMA_TO_NONDMA);
8292+
8293+ msglen = aq->datalen;
8294+ if (msglen) {
8295+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
8296+ I40E_AQ_FLAG_RD));
8297+ if (msglen > I40E_AQ_LARGE_BUF)
8298+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
8299+ desc.datalen = CPU_TO_LE16(msglen);
8300+ msg = &aq->data[0];
8301+ }
8302+
8303+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
8304+
8305+ if (status != I40E_SUCCESS) {
8306+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
8307+ "unable to exec DDP AQ opcode %u, error %d\n",
8308+ aq->opcode, status);
8309+ return status;
8310+ }
8311+
8312+ /* copy returned desc to aq_buf */
8313+ i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
8314+ I40E_NONDMA_TO_NONDMA);
8315+
8316+ return I40E_SUCCESS;
8317+}
8318+
8319+/**
8320+ * i40e_validate_profile
8321+ * @hw: pointer to the hardware structure
8322+ * @profile: pointer to the profile segment of the package to be validated
8323+ * @track_id: package tracking id
8324+ * @rollback: flag if the profile is for rollback.
8325+ *
8326+ * Validates supported devices and profile's sections.
8327+ */
8328+static enum i40e_status_code
8329+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
8330+ u32 track_id, bool rollback)
8331+{
8332+ struct i40e_profile_section_header *sec = NULL;
8333+ i40e_status status = I40E_SUCCESS;
8334+ struct i40e_section_table *sec_tbl;
8335+ u32 vendor_dev_id;
8336+ u32 dev_cnt;
8337+ u32 sec_off;
8338+ u32 i;
8339+
8340+ if (track_id == I40E_DDP_TRACKID_INVALID) {
8341+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
8342+ return I40E_NOT_SUPPORTED;
8343+ }
8344+
8345+ dev_cnt = profile->device_table_count;
8346+ for (i = 0; i < dev_cnt; i++) {
8347+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
8348+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
8349+ hw->device_id == (vendor_dev_id & 0xFFFF))
8350+ break;
8351+ }
8352+ if (dev_cnt && (i == dev_cnt)) {
8353+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
8354+ "Device doesn't support DDP\n");
8355+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
8356+ }
8357+
8358+ I40E_SECTION_TABLE(profile, sec_tbl);
8359+
8360+ /* Validate sections types */
8361+ for (i = 0; i < sec_tbl->section_count; i++) {
8362+ sec_off = sec_tbl->section_offset[i];
8363+ sec = I40E_SECTION_HEADER(profile, sec_off);
8364+ if (rollback) {
8365+ if (sec->section.type == SECTION_TYPE_MMIO ||
8366+ sec->section.type == SECTION_TYPE_AQ ||
8367+ sec->section.type == SECTION_TYPE_RB_AQ) {
8368+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
8369+ "Not a roll-back package\n");
8370+ return I40E_NOT_SUPPORTED;
8371+ }
8372+ } else {
8373+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
8374+ sec->section.type == SECTION_TYPE_RB_MMIO) {
8375+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
8376+ "Not an original package\n");
8377+ return I40E_NOT_SUPPORTED;
8378+ }
8379+ }
8380+ }
8381+
8382+ return status;
8383+}
8384+
8385 /**
8386 * i40e_write_profile
8387 * @hw: pointer to the hardware structure
8388@@ -5170,55 +6808,102 @@ enum i40e_status_code
8389 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
8390 u32 track_id)
8391 {
8392- i40e_status status = 0;
8393+ i40e_status status = I40E_SUCCESS;
8394 struct i40e_section_table *sec_tbl;
8395 struct i40e_profile_section_header *sec = NULL;
8396- u32 dev_cnt;
8397- u32 vendor_dev_id;
8398- u32 *nvm;
8399+ struct i40e_profile_aq_section *ddp_aq;
8400 u32 section_size = 0;
8401 u32 offset = 0, info = 0;
8402+ u32 sec_off;
8403 u32 i;
8404
8405- if (!track_id) {
8406- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
8407- return I40E_NOT_SUPPORTED;
8408- }
8409+ status = i40e_validate_profile(hw, profile, track_id, false);
8410+ if (status)
8411+ return status;
8412
8413- dev_cnt = profile->device_table_count;
8414+ I40E_SECTION_TABLE(profile, sec_tbl);
8415
8416- for (i = 0; i < dev_cnt; i++) {
8417- vendor_dev_id = profile->device_table[i].vendor_dev_id;
8418- if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
8419- if (hw->device_id == (vendor_dev_id & 0xFFFF))
8420+ for (i = 0; i < sec_tbl->section_count; i++) {
8421+ sec_off = sec_tbl->section_offset[i];
8422+ sec = I40E_SECTION_HEADER(profile, sec_off);
8423+ /* Process generic admin command */
8424+ if (sec->section.type == SECTION_TYPE_AQ) {
8425+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
8426+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
8427+ if (status) {
8428+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
8429+ "Failed to execute aq: section %d, opcode %u\n",
8430+ i, ddp_aq->opcode);
8431 break;
8432+ }
8433+ sec->section.type = SECTION_TYPE_RB_AQ;
8434+ }
8435+
8436+ /* Skip any non-mmio sections */
8437+ if (sec->section.type != SECTION_TYPE_MMIO)
8438+ continue;
8439+
8440+ section_size = sec->section.size +
8441+ sizeof(struct i40e_profile_section_header);
8442+
8443+ /* Write MMIO section */
8444+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
8445+ track_id, &offset, &info, NULL);
8446+ if (status) {
8447+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
8448+ "Failed to write profile: section %d, offset %d, info %d\n",
8449+ i, offset, info);
8450+ break;
8451+ }
8452 }
8453- if (i == dev_cnt) {
8454- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
8455- return I40E_ERR_DEVICE_NOT_SUPPORTED;
8456- }
8457+ return status;
8458+}
8459
8460- nvm = (u32 *)&profile->device_table[dev_cnt];
8461- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
8462+/**
8463+ * i40e_rollback_profile
8464+ * @hw: pointer to the hardware structure
8465+ * @profile: pointer to the profile segment of the package to be removed
8466+ * @track_id: package tracking id
8467+ *
8468+ * Rolls back previously loaded package.
8469+ */
8470+enum i40e_status_code
8471+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
8472+ u32 track_id)
8473+{
8474+ struct i40e_profile_section_header *sec = NULL;
8475+ i40e_status status = I40E_SUCCESS;
8476+ struct i40e_section_table *sec_tbl;
8477+ u32 offset = 0, info = 0;
8478+ u32 section_size = 0;
8479+ u32 sec_off;
8480+ int i;
8481
8482- for (i = 0; i < sec_tbl->section_count; i++) {
8483- sec = (struct i40e_profile_section_header *)((u8 *)profile +
8484- sec_tbl->section_offset[i]);
8485+ status = i40e_validate_profile(hw, profile, track_id, true);
8486+ if (status)
8487+ return status;
8488
8489- /* Skip 'AQ', 'note' and 'name' sections */
8490- if (sec->section.type != SECTION_TYPE_MMIO)
8491+ I40E_SECTION_TABLE(profile, sec_tbl);
8492+
8493+ /* For rollback write sections in reverse */
8494+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
8495+ sec_off = sec_tbl->section_offset[i];
8496+ sec = I40E_SECTION_HEADER(profile, sec_off);
8497+
8498+ /* Skip any non-rollback sections */
8499+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
8500 continue;
8501
8502 section_size = sec->section.size +
8503 sizeof(struct i40e_profile_section_header);
8504
8505- /* Write profile */
8506- status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
8507+ /* Write roll-back MMIO section */
8508+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
8509 track_id, &offset, &info, NULL);
8510 if (status) {
8511 i40e_debug(hw, I40E_DEBUG_PACKAGE,
8512- "Failed to write profile: offset %d, info %d",
8513- offset, info);
8514+ "Failed to write profile: section %d, offset %d, info %d\n",
8515+ i, offset, info);
8516 break;
8517 }
8518 }
8519@@ -5239,7 +6924,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
8520 struct i40e_profile_segment *profile,
8521 u8 *profile_info_sec, u32 track_id)
8522 {
8523- i40e_status status = 0;
8524+ i40e_status status = I40E_SUCCESS;
8525 struct i40e_profile_section_header *sec = NULL;
8526 struct i40e_profile_info *pinfo;
8527 u32 offset = 0, info = 0;
8528@@ -5255,10 +6940,11 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
8529 sec->section.offset);
8530 pinfo->track_id = track_id;
8531 pinfo->version = profile->version;
8532- pinfo->op = I40E_PPP_ADD_TRACKID;
8533- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
8534+ pinfo->op = I40E_DDP_ADD_TRACKID;
8535+ i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
8536+ I40E_NONDMA_TO_NONDMA);
8537
8538- status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
8539+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
8540 track_id, &offset, &info, NULL);
8541 return status;
8542 }
8543diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
8544index 55079fe3e..0580a51fe 100644
8545--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
8546+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
8547@@ -1,28 +1,5 @@
8548-/*******************************************************************************
8549- *
8550- * Intel Ethernet Controller XL710 Family Linux Driver
8551- * Copyright(c) 2013 - 2017 Intel Corporation.
8552- *
8553- * This program is free software; you can redistribute it and/or modify it
8554- * under the terms and conditions of the GNU General Public License,
8555- * version 2, as published by the Free Software Foundation.
8556- *
8557- * This program is distributed in the hope it will be useful, but WITHOUT
8558- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8559- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8560- * more details.
8561- *
8562- * You should have received a copy of the GNU General Public License along
8563- * with this program. If not, see <http://www.gnu.org/licenses/>.
8564- *
8565- * The full GNU General Public License is included in this distribution in
8566- * the file called "COPYING".
8567- *
8568- * Contact Information:
8569- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
8570- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
8571- *
8572- ******************************************************************************/
8573+// SPDX-License-Identifier: GPL-2.0
8574+/* Copyright(c) 2013 - 2020 Intel Corporation. */
8575
8576 #include "i40e_adminq.h"
8577 #include "i40e_prototype.h"
8578@@ -46,7 +23,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
8579 *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
8580 I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
8581
8582- return 0;
8583+ return I40E_SUCCESS;
8584 }
8585
8586 /**
8587@@ -387,7 +364,6 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
8588 I40E_LLDP_TLV_LEN_SHIFT);
8589
8590 dcbcfg->numapps = length / sizeof(*app);
8591-
8592 if (!dcbcfg->numapps)
8593 return;
8594 if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
8595@@ -523,7 +499,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
8596 i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
8597 struct i40e_dcbx_config *dcbcfg)
8598 {
8599- i40e_status ret = 0;
8600+ i40e_status ret = I40E_SUCCESS;
8601 struct i40e_lldp_org_tlv *tlv;
8602 u16 type;
8603 u16 length;
8604@@ -578,7 +554,7 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
8605 u8 bridgetype,
8606 struct i40e_dcbx_config *dcbcfg)
8607 {
8608- i40e_status ret = 0;
8609+ i40e_status ret = I40E_SUCCESS;
8610 struct i40e_virt_mem mem;
8611 u8 *lldpmib;
8612
8613@@ -613,8 +589,8 @@ static void i40e_cee_to_dcb_v1_config(
8614 struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
8615 struct i40e_dcbx_config *dcbcfg)
8616 {
8617- u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
8618- u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
8619+ u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
8620+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
8621 u8 i, tc, err;
8622
8623 /* CEE PG data to ETS config */
8624@@ -627,7 +603,7 @@ static void i40e_cee_to_dcb_v1_config(
8625 tc = (u8)((cee_cfg->oper_prio_tc[i] &
8626 I40E_CEE_PGID_PRIO_0_MASK) >>
8627 I40E_CEE_PGID_PRIO_0_SHIFT);
8628- dcbcfg->etscfg.prioritytable[i * 2] = tc;
8629+ dcbcfg->etscfg.prioritytable[i*2] = tc;
8630 tc = (u8)((cee_cfg->oper_prio_tc[i] &
8631 I40E_CEE_PGID_PRIO_1_MASK) >>
8632 I40E_CEE_PGID_PRIO_1_SHIFT);
8633@@ -694,8 +670,8 @@ static void i40e_cee_to_dcb_config(
8634 struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
8635 struct i40e_dcbx_config *dcbcfg)
8636 {
8637- u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
8638- u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
8639+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
8640+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
8641 u8 i, tc, err, sync, oper;
8642
8643 /* CEE PG data to ETS config */
8644@@ -708,11 +684,11 @@ static void i40e_cee_to_dcb_config(
8645 tc = (u8)((cee_cfg->oper_prio_tc[i] &
8646 I40E_CEE_PGID_PRIO_0_MASK) >>
8647 I40E_CEE_PGID_PRIO_0_SHIFT);
8648- dcbcfg->etscfg.prioritytable[i * 2] = tc;
8649+ dcbcfg->etscfg.prioritytable[i*2] = tc;
8650 tc = (u8)((cee_cfg->oper_prio_tc[i] &
8651 I40E_CEE_PGID_PRIO_1_MASK) >>
8652 I40E_CEE_PGID_PRIO_1_SHIFT);
8653- dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
8654+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
8655 }
8656
8657 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8658@@ -792,7 +768,7 @@ static void i40e_cee_to_dcb_config(
8659 **/
8660 static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
8661 {
8662- i40e_status ret = 0;
8663+ i40e_status ret = I40E_SUCCESS;
8664
8665 /* IEEE mode */
8666 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
8667@@ -808,7 +784,7 @@ static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
8668 &hw->remote_dcbx_config);
8669 /* Don't treat ENOENT as an error for Remote MIBs */
8670 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
8671- ret = 0;
8672+ ret = I40E_SUCCESS;
8673
8674 out:
8675 return ret;
8676@@ -822,7 +798,7 @@ out:
8677 **/
8678 i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
8679 {
8680- i40e_status ret = 0;
8681+ i40e_status ret = I40E_SUCCESS;
8682 struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
8683 struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
8684
8685@@ -837,22 +813,22 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
8686 ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
8687 ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
8688 sizeof(cee_v1_cfg), NULL);
8689- if (!ret) {
8690+ if (ret == I40E_SUCCESS) {
8691 /* CEE mode */
8692 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
8693 hw->local_dcbx_config.tlv_status =
8694- le16_to_cpu(cee_v1_cfg.tlv_status);
8695+ LE16_TO_CPU(cee_v1_cfg.tlv_status);
8696 i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
8697 &hw->local_dcbx_config);
8698 }
8699 } else {
8700 ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
8701 sizeof(cee_cfg), NULL);
8702- if (!ret) {
8703+ if (ret == I40E_SUCCESS) {
8704 /* CEE mode */
8705 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
8706 hw->local_dcbx_config.tlv_status =
8707- le32_to_cpu(cee_cfg.tlv_status);
8708+ LE32_TO_CPU(cee_cfg.tlv_status);
8709 i40e_cee_to_dcb_config(&cee_cfg,
8710 &hw->local_dcbx_config);
8711 }
8712@@ -862,7 +838,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
8713 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
8714 return i40e_get_ieee_dcb_config(hw);
8715
8716- if (ret)
8717+ if (ret != I40E_SUCCESS)
8718 goto out;
8719
8720 /* Get CEE DCB Desired Config */
8721@@ -873,11 +849,11 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
8722
8723 /* Get Remote DCB Config */
8724 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8725- I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8726- &hw->remote_dcbx_config);
8727+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8728+ &hw->remote_dcbx_config);
8729 /* Don't treat ENOENT as an error for Remote MIBs */
8730 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
8731- ret = 0;
8732+ ret = I40E_SUCCESS;
8733
8734 out:
8735 return ret;
8736@@ -886,22 +862,41 @@ out:
8737 /**
8738 * i40e_init_dcb
8739 * @hw: pointer to the hw struct
8740+ * @enable_mib_change: enable mib change event
8741 *
8742 * Update DCB configuration from the Firmware
8743 **/
8744-i40e_status i40e_init_dcb(struct i40e_hw *hw)
8745+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
8746 {
8747- i40e_status ret = 0;
8748+ i40e_status ret = I40E_SUCCESS;
8749 struct i40e_lldp_variables lldp_cfg;
8750 u8 adminstatus = 0;
8751
8752 if (!hw->func_caps.dcb)
8753- return ret;
8754+ return I40E_NOT_SUPPORTED;
8755
8756 /* Read LLDP NVM area */
8757- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
8758+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
8759+ u8 offset = 0;
8760+
8761+ if (hw->mac.type == I40E_MAC_XL710)
8762+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
8763+ else if (hw->mac.type == I40E_MAC_X722)
8764+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
8765+ else
8766+ return I40E_NOT_SUPPORTED;
8767+
8768+ ret = i40e_read_nvm_module_data(hw,
8769+ I40E_SR_EMP_SR_SETTINGS_PTR,
8770+ offset,
8771+ I40E_LLDP_CURRENT_STATUS_OFFSET,
8772+ I40E_LLDP_CURRENT_STATUS_SIZE,
8773+ &lldp_cfg.adminstatus);
8774+ } else {
8775+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
8776+ }
8777 if (ret)
8778- return ret;
8779+ return I40E_ERR_NOT_READY;
8780
8781 /* Get the LLDP AdminStatus for the current port */
8782 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
8783@@ -910,7 +905,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
8784 /* LLDP agent disabled */
8785 if (!adminstatus) {
8786 hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
8787- return ret;
8788+ return I40E_ERR_NOT_READY;
8789 }
8790
8791 /* Get DCBX status */
8792@@ -919,27 +914,454 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
8793 return ret;
8794
8795 /* Check the DCBX Status */
8796- switch (hw->dcbx_status) {
8797- case I40E_DCBX_STATUS_DONE:
8798- case I40E_DCBX_STATUS_IN_PROGRESS:
8799+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
8800+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
8801 /* Get current DCBX configuration */
8802 ret = i40e_get_dcb_config(hw);
8803 if (ret)
8804 return ret;
8805- break;
8806- case I40E_DCBX_STATUS_DISABLED:
8807+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
8808+ return I40E_ERR_NOT_READY;
8809+ }
8810+
8811+ /* Configure the LLDP MIB change event */
8812+ if (enable_mib_change)
8813+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
8814+
8815+ return ret;
8816+}
8817+
8818+/**
8819+ * i40e_get_fw_lldp_status
8820+ * @hw: pointer to the hw struct
8821+ * @lldp_status: pointer to the status enum
8822+ *
8823+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
8824+ * Status of agent is reported via @lldp_status parameter.
8825+ **/
8826+enum i40e_status_code
8827+i40e_get_fw_lldp_status(struct i40e_hw *hw,
8828+ enum i40e_get_fw_lldp_status_resp *lldp_status)
8829+{
8830+ i40e_status ret;
8831+ struct i40e_virt_mem mem;
8832+ u8 *lldpmib;
8833+
8834+ if (!lldp_status)
8835+ return I40E_ERR_PARAM;
8836+
8837+ /* Allocate buffer for the LLDPDU */
8838+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
8839+ if (ret)
8840 return ret;
8841- case I40E_DCBX_STATUS_NOT_STARTED:
8842- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
8843+
8844+ lldpmib = (u8 *)mem.va;
8845+ ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
8846+ I40E_LLDPDU_SIZE, NULL, NULL, NULL);
8847+
8848+ if (ret == I40E_SUCCESS) {
8849+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
8850+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
8851+ /* MIB is not available yet but the agent is running */
8852+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
8853+ ret = I40E_SUCCESS;
8854+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
8855+ *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
8856+ ret = I40E_SUCCESS;
8857+ }
8858+
8859+ i40e_free_virt_mem(hw, &mem);
8860+ return ret;
8861+}
8862+
8863+/**
8864+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
8865+ * @tlv: Fill the ETS config data in IEEE format
8866+ * @dcbcfg: Local store which holds the DCB Config
8867+ *
8868+ * Prepare IEEE 802.1Qaz ETS CFG TLV
8869+ **/
8870+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
8871+ struct i40e_dcbx_config *dcbcfg)
8872+{
8873+ u8 priority0, priority1, maxtcwilling = 0;
8874+ struct i40e_dcb_ets_config *etscfg;
8875+ u16 offset = 0, typelength, i;
8876+ u8 *buf = tlv->tlvinfo;
8877+ u32 ouisubtype;
8878+
8879+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
8880+ I40E_IEEE_ETS_TLV_LENGTH);
8881+ tlv->typelength = htons(typelength);
8882+
8883+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
8884+ I40E_IEEE_SUBTYPE_ETS_CFG);
8885+ tlv->ouisubtype = htonl(ouisubtype);
8886+
8887+ /* First Octet post subtype
8888+ * --------------------------
8889+ * |will-|CBS | Re- | Max |
8890+ * |ing | |served| TCs |
8891+ * --------------------------
8892+ * |1bit | 1bit|3 bits|3bits|
8893+ */
8894+ etscfg = &dcbcfg->etscfg;
8895+ if (etscfg->willing)
8896+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
8897+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
8898+ buf[offset] = maxtcwilling;
8899+
8900+ /* Move offset to Priority Assignment Table */
8901+ offset++;
8902+
8903+ /* Priority Assignment Table (4 octets)
8904+ * Octets:| 1 | 2 | 3 | 4 |
8905+ * -----------------------------------------
8906+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
8907+ * -----------------------------------------
8908+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
8909+ * -----------------------------------------
8910+ */
8911+ for (i = 0; i < 4; i++) {
8912+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
8913+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
8914+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
8915+ priority1;
8916+ offset++;
8917+ }
8918+
8919+ /* TC Bandwidth Table (8 octets)
8920+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
8921+ * ---------------------------------
8922+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
8923+ * ---------------------------------
8924+ */
8925+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8926+ buf[offset++] = etscfg->tcbwtable[i];
8927+
8928+ /* TSA Assignment Table (8 octets)
8929+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
8930+ * ---------------------------------
8931+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
8932+ * ---------------------------------
8933+ */
8934+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8935+ buf[offset++] = etscfg->tsatable[i];
8936+}
8937+
8938+/**
8939+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
8940+ * @tlv: Fill ETS Recommended TLV in IEEE format
8941+ * @dcbcfg: Local store which holds the DCB Config
8942+ *
8943+ * Prepare IEEE 802.1Qaz ETS REC TLV
8944+ **/
8945+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
8946+ struct i40e_dcbx_config *dcbcfg)
8947+{
8948+ struct i40e_dcb_ets_config *etsrec;
8949+ u16 offset = 0, typelength, i;
8950+ u8 priority0, priority1;
8951+ u8 *buf = tlv->tlvinfo;
8952+ u32 ouisubtype;
8953+
8954+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
8955+ I40E_IEEE_ETS_TLV_LENGTH);
8956+ tlv->typelength = htons(typelength);
8957+
8958+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
8959+ I40E_IEEE_SUBTYPE_ETS_REC);
8960+ tlv->ouisubtype = htonl(ouisubtype);
8961+
8962+ etsrec = &dcbcfg->etsrec;
8963+ /* First Octet is reserved */
8964+ /* Move offset to Priority Assignment Table */
8965+ offset++;
8966+
8967+ /* Priority Assignment Table (4 octets)
8968+ * Octets:| 1 | 2 | 3 | 4 |
8969+ * -----------------------------------------
8970+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
8971+ * -----------------------------------------
8972+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
8973+ * -----------------------------------------
8974+ */
8975+ for (i = 0; i < 4; i++) {
8976+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
8977+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
8978+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
8979+ priority1;
8980+ offset++;
8981+ }
8982+
8983+ /* TC Bandwidth Table (8 octets)
8984+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
8985+ * ---------------------------------
8986+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
8987+ * ---------------------------------
8988+ */
8989+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8990+ buf[offset++] = etsrec->tcbwtable[i];
8991+
8992+ /* TSA Assignment Table (8 octets)
8993+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
8994+ * ---------------------------------
8995+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
8996+ * ---------------------------------
8997+ */
8998+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8999+ buf[offset++] = etsrec->tsatable[i];
9000+}
9001+
9002+ /**
9003+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
9004+ * @tlv: Fill PFC TLV in IEEE format
9005+ * @dcbcfg: Local store to get PFC CFG data
9006+ *
9007+ * Prepare IEEE 802.1Qaz PFC CFG TLV
9008+ **/
9009+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
9010+ struct i40e_dcbx_config *dcbcfg)
9011+{
9012+ u8 *buf = tlv->tlvinfo;
9013+ u32 ouisubtype;
9014+ u16 typelength;
9015+
9016+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
9017+ I40E_IEEE_PFC_TLV_LENGTH);
9018+ tlv->typelength = htons(typelength);
9019+
9020+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
9021+ I40E_IEEE_SUBTYPE_PFC_CFG);
9022+ tlv->ouisubtype = htonl(ouisubtype);
9023+
9024+ /* ----------------------------------------
9025+ * |will-|MBC | Re- | PFC | PFC Enable |
9026+ * |ing | |served| cap | |
9027+ * -----------------------------------------
9028+ * |1bit | 1bit|2 bits|4bits| 1 octet |
9029+ */
9030+ if (dcbcfg->pfc.willing)
9031+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
9032+
9033+ if (dcbcfg->pfc.mbc)
9034+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
9035+
9036+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
9037+ buf[1] = dcbcfg->pfc.pfcenable;
9038+}
9039+
9040+/**
9041+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
9042+ * @tlv: Fill APP TLV in IEEE format
9043+ * @dcbcfg: Local store to get APP CFG data
9044+ *
9045+ * Prepare IEEE 802.1Qaz APP CFG TLV
9046+ **/
9047+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
9048+ struct i40e_dcbx_config *dcbcfg)
9049+{
9050+ u16 typelength, length, offset = 0;
9051+ u8 priority, selector, i = 0;
9052+ u8 *buf = tlv->tlvinfo;
9053+ u32 ouisubtype;
9054+
9055+ /* No APP TLVs then just return */
9056+ if (dcbcfg->numapps == 0)
9057+ return;
9058+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
9059+ I40E_IEEE_SUBTYPE_APP_PRI);
9060+ tlv->ouisubtype = htonl(ouisubtype);
9061+
9062+ /* Move offset to App Priority Table */
9063+ offset++;
9064+ /* Application Priority Table (3 octets)
9065+ * Octets:| 1 | 2 | 3 |
9066+ * -----------------------------------------
9067+ * |Priority|Rsrvd| Sel | Protocol ID |
9068+ * -----------------------------------------
9069+ * Bits:|23 21|20 19|18 16|15 0|
9070+ * -----------------------------------------
9071+ */
9072+ while (i < dcbcfg->numapps) {
9073+ priority = dcbcfg->app[i].priority & 0x7;
9074+ selector = dcbcfg->app[i].selector & 0x7;
9075+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
9076+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
9077+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
9078+ /* Move to next app */
9079+ offset += 3;
9080+ i++;
9081+ if (i >= I40E_DCBX_MAX_APPS)
9082+ break;
9083+ }
9084+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
9085+ length = sizeof(tlv->ouisubtype) + 1 + (i*3);
9086+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
9087+ (length & 0x1FF));
9088+ tlv->typelength = htons(typelength);
9089+}
9090+
9091+ /**
9092+ * i40e_add_dcb_tlv - Add all IEEE TLVs
9093+ * @tlv: pointer to org tlv
9094+ *
9095+ * add tlv information
9096+ **/
9097+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
9098+ struct i40e_dcbx_config *dcbcfg,
9099+ u16 tlvid)
9100+{
9101+ switch (tlvid) {
9102+ case I40E_IEEE_TLV_ID_ETS_CFG:
9103+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
9104+ break;
9105+ case I40E_IEEE_TLV_ID_ETS_REC:
9106+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
9107+ break;
9108+ case I40E_IEEE_TLV_ID_PFC_CFG:
9109+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
9110+ break;
9111+ case I40E_IEEE_TLV_ID_APP_PRI:
9112+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
9113+ break;
9114 default:
9115 break;
9116 }
9117+}
9118
9119- /* Configure the LLDP MIB change event */
9120- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
9121+ /**
9122+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
9123+ * @hw: pointer to the hw struct
9124+ *
9125+ * Set DCB configuration to the Firmware
9126+ **/
9127+i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
9128+{
9129+ i40e_status ret = I40E_SUCCESS;
9130+ struct i40e_dcbx_config *dcbcfg;
9131+ struct i40e_virt_mem mem;
9132+ u8 mib_type, *lldpmib;
9133+ u16 miblen;
9134+
9135+ /* update the hw local config */
9136+ dcbcfg = &hw->local_dcbx_config;
9137+ /* Allocate the LLDPDU */
9138+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
9139 if (ret)
9140 return ret;
9141
9142+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
9143+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
9144+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
9145+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
9146+ }
9147+ lldpmib = (u8 *)mem.va;
9148+ ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
9149+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
9150+
9151+ i40e_free_virt_mem(hw, &mem);
9152+ return ret;
9153+}
9154+
9155+/**
9156+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
9157+ * @lldpmib: pointer to mib to be output
9158+ * @miblen: pointer to u16 for length of lldpmib
9159+ * @dcbcfg: store for LLDPDU data
9160+ *
9161+ * send DCB configuration to FW
9162+ **/
9163+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
9164+ struct i40e_dcbx_config *dcbcfg)
9165+{
9166+ u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
9167+ i40e_status ret = I40E_SUCCESS;
9168+ struct i40e_lldp_org_tlv *tlv;
9169+ u16 typelength;
9170+
9171+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
9172+ while (1) {
9173+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
9174+ typelength = ntohs(tlv->typelength);
9175+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
9176+ I40E_LLDP_TLV_LEN_SHIFT);
9177+ if (length)
9178+ offset += length + 2;
9179+ /* END TLV or beyond LLDPDU size */
9180+ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
9181+ (offset > I40E_LLDPDU_SIZE))
9182+ break;
9183+ /* Move to next TLV */
9184+ if (length)
9185+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
9186+ sizeof(tlv->typelength) + length);
9187+ }
9188+ *miblen = offset;
9189+ return ret;
9190+}
9191+
9192+/**
9193+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
9194+ * @hw: pointer to the HW structure
9195+ * @lldp_cfg: pointer to hold lldp configuration variables
9196+ * @module: address of the module pointer
9197+ * @word_offset: offset of LLDP configuration
9198+ *
9199+ * Reads the LLDP configuration data from NVM using passed addresses
9200+ **/
9201+static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
9202+ struct i40e_lldp_variables *lldp_cfg,
9203+ u8 module, u32 word_offset)
9204+{
9205+ u32 address, offset = (2 * word_offset);
9206+ i40e_status ret;
9207+ __le16 raw_mem;
9208+ u16 mem;
9209+
9210+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
9211+ if (ret != I40E_SUCCESS)
9212+ return ret;
9213+
9214+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
9215+ true, NULL);
9216+ i40e_release_nvm(hw);
9217+ if (ret != I40E_SUCCESS)
9218+ return ret;
9219+
9220+ mem = LE16_TO_CPU(raw_mem);
9221+ /* Check if this pointer needs to be read in word size or 4K sector
9222+ * units.
9223+ */
9224+ if (mem & I40E_PTR_TYPE)
9225+ address = (0x7FFF & mem) * 4096;
9226+ else
9227+ address = (0x7FFF & mem) * 2;
9228+
9229+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
9230+ if (ret != I40E_SUCCESS)
9231+ goto err_lldp_cfg;
9232+
9233+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
9234+ true, NULL);
9235+ i40e_release_nvm(hw);
9236+ if (ret != I40E_SUCCESS)
9237+ return ret;
9238+
9239+ mem = LE16_TO_CPU(raw_mem);
9240+ offset = mem + word_offset;
9241+ offset *= 2;
9242+
9243+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
9244+ if (ret != I40E_SUCCESS)
9245+ goto err_lldp_cfg;
9246+
9247+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
9248+ sizeof(struct i40e_lldp_variables), lldp_cfg,
9249+ true, NULL);
9250+ i40e_release_nvm(hw);
9251+
9252+err_lldp_cfg:
9253 return ret;
9254 }
9255
9256@@ -951,24 +1373,37 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
9257 * Reads the LLDP configuration data from NVM
9258 **/
9259 i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
9260- struct i40e_lldp_variables *lldp_cfg)
9261+ struct i40e_lldp_variables *lldp_cfg)
9262 {
9263- i40e_status ret = 0;
9264- u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
9265+ i40e_status ret = I40E_SUCCESS;
9266+ u32 mem;
9267
9268 if (!lldp_cfg)
9269 return I40E_ERR_PARAM;
9270
9271 ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
9272- if (ret)
9273- goto err_lldp_cfg;
9274+ if (ret != I40E_SUCCESS)
9275+ return ret;
9276
9277- ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
9278- sizeof(struct i40e_lldp_variables),
9279- (u8 *)lldp_cfg,
9280- true, NULL);
9281+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
9282+ &mem, true, NULL);
9283 i40e_release_nvm(hw);
9284+ if (ret != I40E_SUCCESS)
9285+ return ret;
9286+
9287+ /* Read a bit that holds information whether we are running flat or
9288+ * structured NVM image. Flat image has LLDP configuration in shadow
9289+ * ram, so there is a need to pass different addresses for both cases.
9290+ */
9291+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
9292+ /* Flat NVM case */
9293+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
9294+ I40E_SR_LLDP_CFG_PTR);
9295+ } else {
9296+ /* Good old structured NVM image */
9297+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
9298+ I40E_NVM_LLDP_CFG_PTR);
9299+ }
9300
9301-err_lldp_cfg:
9302 return ret;
9303 }
9304diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
9305index 92d01042c..3cdaeeb15 100644
9306--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
9307+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
9308@@ -1,28 +1,5 @@
9309-/*******************************************************************************
9310- *
9311- * Intel Ethernet Controller XL710 Family Linux Driver
9312- * Copyright(c) 2013 - 2014 Intel Corporation.
9313- *
9314- * This program is free software; you can redistribute it and/or modify it
9315- * under the terms and conditions of the GNU General Public License,
9316- * version 2, as published by the Free Software Foundation.
9317- *
9318- * This program is distributed in the hope it will be useful, but WITHOUT
9319- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9320- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9321- * more details.
9322- *
9323- * You should have received a copy of the GNU General Public License along
9324- * with this program. If not, see <http://www.gnu.org/licenses/>.
9325- *
9326- * The full GNU General Public License is included in this distribution in
9327- * the file called "COPYING".
9328- *
9329- * Contact Information:
9330- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9331- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9332- *
9333- ******************************************************************************/
9334+/* SPDX-License-Identifier: GPL-2.0 */
9335+/* Copyright(c) 2013 - 2020 Intel Corporation. */
9336
9337 #ifndef _I40E_DCB_H_
9338 #define _I40E_DCB_H_
9339@@ -53,6 +30,11 @@
9340 #define I40E_CEE_SUBTYPE_APP_PRI 4
9341
9342 #define I40E_CEE_MAX_FEAT_TYPE 3
9343+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
9344+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
9345+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
9346+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
9347+
9348 /* Defines for LLDP TLV header */
9349 #define I40E_LLDP_TLV_LEN_SHIFT 0
9350 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
9351@@ -98,6 +80,20 @@
9352 #define I40E_IEEE_APP_PRIO_SHIFT 5
9353 #define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
9354
9355+/* TLV definitions for preparing MIB */
9356+#define I40E_TLV_ID_CHASSIS_ID 0
9357+#define I40E_TLV_ID_PORT_ID 1
9358+#define I40E_TLV_ID_TIME_TO_LIVE 2
9359+#define I40E_IEEE_TLV_ID_ETS_CFG 3
9360+#define I40E_IEEE_TLV_ID_ETS_REC 4
9361+#define I40E_IEEE_TLV_ID_PFC_CFG 5
9362+#define I40E_IEEE_TLV_ID_APP_PRI 6
9363+#define I40E_TLV_ID_END_OF_LLDPPDU 7
9364+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
9365+
9366+#define I40E_IEEE_ETS_TLV_LENGTH 25
9367+#define I40E_IEEE_PFC_TLV_LENGTH 6
9368+#define I40E_IEEE_APP_TLV_LENGTH 11
9369
9370 #pragma pack(1)
9371
9372@@ -139,6 +135,11 @@ struct i40e_cee_app_prio {
9373 };
9374 #pragma pack()
9375
9376+enum i40e_get_fw_lldp_status_resp {
9377+ I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
9378+ I40E_GET_FW_LLDP_STATUS_ENABLED = 1
9379+};
9380+
9381 i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
9382 u16 *status);
9383 i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
9384@@ -147,5 +148,12 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
9385 u8 bridgetype,
9386 struct i40e_dcbx_config *dcbcfg);
9387 i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
9388-i40e_status i40e_init_dcb(struct i40e_hw *hw);
9389+i40e_status i40e_init_dcb(struct i40e_hw *hw,
9390+ bool enable_mib_change);
9391+enum i40e_status_code
9392+i40e_get_fw_lldp_status(struct i40e_hw *hw,
9393+ enum i40e_get_fw_lldp_status_resp *lldp_status);
9394+i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
9395+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
9396+ struct i40e_dcbx_config *dcbcfg);
9397 #endif /* _I40E_DCB_H_ */
9398diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
9399index 886e667f2..1610197b4 100644
9400--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
9401+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
9402@@ -1,33 +1,11 @@
9403-/*******************************************************************************
9404- *
9405- * Intel Ethernet Controller XL710 Family Linux Driver
9406- * Copyright(c) 2013 - 2014 Intel Corporation.
9407- *
9408- * This program is free software; you can redistribute it and/or modify it
9409- * under the terms and conditions of the GNU General Public License,
9410- * version 2, as published by the Free Software Foundation.
9411- *
9412- * This program is distributed in the hope it will be useful, but WITHOUT
9413- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9414- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9415- * more details.
9416- *
9417- * You should have received a copy of the GNU General Public License along
9418- * with this program. If not, see <http://www.gnu.org/licenses/>.
9419- *
9420- * The full GNU General Public License is included in this distribution in
9421- * the file called "COPYING".
9422- *
9423- * Contact Information:
9424- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9425- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9426- *
9427- ******************************************************************************/
9428+// SPDX-License-Identifier: GPL-2.0
9429+/* Copyright(c) 2013 - 2020 Intel Corporation. */
9430
9431-#ifdef CONFIG_I40E_DCB
9432+#ifdef CONFIG_DCB
9433 #include "i40e.h"
9434 #include <net/dcbnl.h>
9435
9436+#ifdef HAVE_DCBNL_IEEE
9437 /**
9438 * i40e_get_pfc_delay - retrieve PFC Link Delay
9439 * @hw: pointer to hardware struct
9440@@ -46,7 +24,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
9441
9442 /**
9443 * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
9444- * @netdev: the corresponding netdev
9445+ * @dev: the corresponding netdev
9446 * @ets: structure to hold the ETS information
9447 *
9448 * Returns local IEEE ETS configuration
9449@@ -85,8 +63,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
9450
9451 /**
9452 * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
9453- * @netdev: the corresponding netdev
9454- * @ets: structure to hold the PFC information
9455+ * @dev: the corresponding netdev
9456+ * @pfc: structure to hold the PFC information
9457 *
9458 * Returns local IEEE PFC configuration
9459 **/
9460@@ -118,7 +96,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
9461
9462 /**
9463 * i40e_dcbnl_getdcbx - retrieve current DCBx capability
9464- * @netdev: the corresponding netdev
9465+ * @dev: the corresponding netdev
9466 *
9467 * Returns DCBx capability features
9468 **/
9469@@ -131,7 +109,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
9470
9471 /**
9472 * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
9473- * @netdev: the corresponding netdev
9474+ * @dev: the corresponding netdev
9475+ * @perm_addr: buffer to store the MAC address
9476 *
9477 * Returns the SAN MAC address used for LLDP exchange
9478 **/
9479@@ -198,8 +177,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
9480 }
9481 }
9482
9483+#ifdef HAVE_DCBNL_IEEE_DELAPP
9484 /* Notify user-space of the changes */
9485 dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
9486+#endif
9487 }
9488
9489 /**
9490@@ -317,4 +298,5 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
9491 /* Set initial IEEE DCB settings */
9492 i40e_dcbnl_set_all(vsi);
9493 }
9494-#endif /* CONFIG_I40E_DCB */
9495+#endif /* HAVE_DCBNL_IEEE */
9496+#endif /* CONFIG_DCB */
9497diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
9498new file mode 100644
9499index 000000000..bda29f1a8
9500--- /dev/null
9501+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
9502@@ -0,0 +1,478 @@
9503+// SPDX-License-Identifier: GPL-2.0
9504+/* Copyright(c) 2013 - 2020 Intel Corporation. */
9505+
9506+#include "i40e.h"
9507+
9508+#include <linux/firmware.h>
9509+
9510+/**
9511+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
9512+ * @a: new profile info
9513+ * @b: old profile info
9514+ *
9515+ * checks if DDP profiles are the equivalent.
9516+ * Returns true if profiles are the same.
9517+ **/
9518+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
9519+ struct i40e_profile_info *b)
9520+{
9521+ return a->track_id == b->track_id &&
9522+ !memcmp(&a->version, &b->version, sizeof(a->version)) &&
9523+ !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
9524+}
9525+
9526+/**
9527+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
9528+ * @hw: HW data structure
9529+ * @pinfo: DDP profile information structure
9530+ *
9531+ * checks if DDP profile loaded already.
9532+ * Returns >0 if the profile exists.
9533+ * Returns 0 if the profile is absent.
9534+ * Returns <0 if error.
9535+ **/
9536+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
9537+ struct i40e_profile_info *pinfo)
9538+{
9539+ struct i40e_ddp_profile_list *profile_list;
9540+ u8 buff[I40E_PROFILE_LIST_SIZE];
9541+ i40e_status status;
9542+ int i;
9543+
9544+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
9545+ NULL);
9546+ if (status != I40E_SUCCESS)
9547+ return -1;
9548+
9549+ profile_list = (struct i40e_ddp_profile_list *)buff;
9550+ for (i = 0; i < profile_list->p_count; i++) {
9551+ if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
9552+ return 1;
9553+ }
9554+ return 0;
9555+}
9556+
9557+/**
9558+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
9559+ * @new: new profile info
9560+ * @old: old profile info
9561+ *
9562+ * checks if DDP profiles overlap.
9563+ * Returns true if profiles are overlap.
9564+ **/
9565+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
9566+ struct i40e_profile_info *old)
9567+{
9568+ unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
9569+ unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
9570+
9571+ /* 0x00 group must be only the first */
9572+ if (group_id_new == 0)
9573+ return true;
9574+ /* 0xFF group is compatible with anything else */
9575+ if (group_id_new == 0xFF || group_id_old == 0xFF)
9576+ return false;
9577+ /* otherwise only profiles from the same group are compatible*/
9578+ return group_id_old != group_id_new;
9579+}
9580+
9581+/**
9582+ * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
9583+ * @hw: HW data structure
9584+ * @pinfo: DDP profile information structure
9585+ *
9586+ * checks if DDP profile overlaps with existing one.
9587+ * Returns >0 if the profile overlaps.
9588+ * Returns 0 if the profile is ok.
9589+ * Returns <0 if error.
9590+ **/
9591+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
9592+ struct i40e_profile_info *pinfo)
9593+{
9594+ struct i40e_ddp_profile_list *profile_list;
9595+ u8 buff[I40E_PROFILE_LIST_SIZE];
9596+ i40e_status status;
9597+ int i;
9598+
9599+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
9600+ NULL);
9601+ if (status != I40E_SUCCESS)
9602+ return -EIO;
9603+
9604+ profile_list = (struct i40e_ddp_profile_list *)buff;
9605+ for (i = 0; i < profile_list->p_count; i++) {
9606+ if (i40e_ddp_profiles_overlap(pinfo,
9607+ &profile_list->p_info[i]))
9608+ return 1;
9609+ }
9610+ return 0;
9611+}
9612+
9613+
9614+/**
9615+ * i40e_add_pinfo
9616+ * @hw: pointer to the hardware structure
9617+ * @profile: pointer to the profile segment of the package
9618+ * @profile_info_sec: buffer for information section
9619+ * @track_id: package tracking id
9620+ *
9621+ * Register a profile to the list of loaded profiles.
9622+ */
9623+static enum i40e_status_code
9624+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
9625+ u8 *profile_info_sec, u32 track_id)
9626+{
9627+ struct i40e_profile_section_header *sec;
9628+ struct i40e_profile_info *pinfo;
9629+ i40e_status status;
9630+ u32 offset = 0, info = 0;
9631+
9632+ sec = (struct i40e_profile_section_header *)profile_info_sec;
9633+ sec->tbl_size = 1;
9634+ sec->data_end = sizeof(struct i40e_profile_section_header) +
9635+ sizeof(struct i40e_profile_info);
9636+ sec->section.type = SECTION_TYPE_INFO;
9637+ sec->section.offset = sizeof(struct i40e_profile_section_header);
9638+ sec->section.size = sizeof(struct i40e_profile_info);
9639+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
9640+ sec->section.offset);
9641+ pinfo->track_id = track_id;
9642+ pinfo->version = profile->version;
9643+ pinfo->op = I40E_DDP_ADD_TRACKID;
9644+
9645+ /* Clear reserved field */
9646+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
9647+ i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
9648+ I40E_NONDMA_TO_NONDMA);
9649+
9650+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
9651+ track_id, &offset, &info, NULL);
9652+ return status;
9653+}
9654+
9655+/**
9656+ * i40e_del_pinfo - delete DDP profile info from NIC
9657+ * @hw: HW data structure
9658+ * @profile: DDP profile segment to be deleted
9659+ * @profile_info_sec: DDP profile section header
9660+ * @track_id: track ID of the profile for deletion
9661+ *
9662+ * Removes DDP profile from the NIC.
9663+ **/
9664+static enum i40e_status_code
9665+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
9666+ u8 *profile_info_sec, u32 track_id)
9667+{
9668+ struct i40e_profile_section_header *sec;
9669+ struct i40e_profile_info *pinfo;
9670+ i40e_status status;
9671+ u32 offset = 0, info = 0;
9672+
9673+ sec = (struct i40e_profile_section_header *)profile_info_sec;
9674+ sec->tbl_size = 1;
9675+ sec->data_end = sizeof(struct i40e_profile_section_header) +
9676+ sizeof(struct i40e_profile_info);
9677+ sec->section.type = SECTION_TYPE_INFO;
9678+ sec->section.offset = sizeof(struct i40e_profile_section_header);
9679+ sec->section.size = sizeof(struct i40e_profile_info);
9680+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
9681+ sec->section.offset);
9682+ pinfo->track_id = track_id;
9683+ pinfo->version = profile->version;
9684+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
9685+
9686+ /* Clear reserved field */
9687+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
9688+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
9689+
9690+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
9691+ track_id, &offset, &info, NULL);
9692+ return status;
9693+}
9694+
9695+/**
9696+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
9697+ * @netdev: net device structure (for logging purposes)
9698+ * @pkg_hdr: pointer to package header
9699+ * @size_huge: size of the whole DDP profile package in size_t
9700+ *
9701+ * Checks correctness of pkg header: Version, size too big/small, and
9702+ * all segment offsets alignment and boundaries. This function lets
9703+ * reject non DDP profile file to be loaded by administrator mistake.
9704+ **/
9705+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
9706+ struct i40e_package_header *pkg_hdr,
9707+ size_t size_huge)
9708+{
9709+ u32 size = 0xFFFFFFFFU & size_huge;
9710+ u32 pkg_hdr_size;
9711+ u32 segment;
9712+
9713+ if (!pkg_hdr)
9714+ return false;
9715+
9716+ if (pkg_hdr->version.major > 0) {
9717+ struct i40e_ddp_version ver = pkg_hdr->version;
9718+ netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
9719+ ver.major, ver.minor, ver.update, ver.draft);
9720+ return false;
9721+ }
9722+ if (size_huge > size) {
9723+ netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
9724+ return false;
9725+ }
9726+ if (size < (sizeof(struct i40e_package_header) +
9727+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
9728+ netdev_err(netdev, "Invalid DDP profile - size is too small.");
9729+ return false;
9730+ }
9731+
9732+ pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
9733+ if (size < pkg_hdr_size) {
9734+ netdev_err(netdev, "Invalid DDP profile - too many segments");
9735+ return false;
9736+ }
9737+ for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
9738+ u32 offset = pkg_hdr->segment_offset[segment];
9739+
9740+ if (0xFU & offset) {
9741+ netdev_err(netdev, "Invalid DDP profile %u segment alignment", segment);
9742+ return false;
9743+ }
9744+ if (pkg_hdr_size > offset || offset >= size) {
9745+ netdev_err(netdev, "Invalid DDP profile %u segment offset", segment);
9746+ return false;
9747+ }
9748+ }
9749+
9750+ return true;
9751+}
9752+
9753+/**
9754+ * i40e_ddp_load - performs DDP loading
9755+ * @netdev: net device structure
9756+ * @data: buffer containing recipe file
9757+ * @size: size of the buffer
9758+ * @is_add: true when loading profile, false when rolling back the previous one
9759+ *
9760+ * Checks correctness and loads DDP profile to the NIC. The function is
9761+ * also used for rolling back previously loaded profile.
9762+ **/
9763+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
9764+ bool is_add)
9765+{
9766+ u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
9767+ sizeof(struct i40e_profile_info)];
9768+ struct i40e_metadata_segment *metadata_hdr;
9769+ struct i40e_profile_segment *profile_hdr;
9770+ struct i40e_profile_info pinfo;
9771+ struct i40e_package_header *pkg_hdr;
9772+ i40e_status status;
9773+ struct i40e_netdev_priv *np = netdev_priv(netdev);
9774+ struct i40e_vsi *vsi = np->vsi;
9775+ struct i40e_pf *pf = vsi->back;
9776+ u32 track_id;
9777+ int istatus;
9778+
9779+ pkg_hdr = (struct i40e_package_header *)data;
9780+ if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
9781+ return -EINVAL;
9782+
9783+ if (size < (sizeof(struct i40e_package_header) +
9784+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
9785+ netdev_err(netdev, "Invalid DDP recipe size.");
9786+ return -EINVAL;
9787+ }
9788+
9789+ /* Find beginning of segment data in buffer */
9790+ metadata_hdr = (struct i40e_metadata_segment *)
9791+ i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
9792+ if (!metadata_hdr) {
9793+ netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
9794+ return -EINVAL;
9795+ }
9796+
9797+ track_id = metadata_hdr->track_id;
9798+ profile_hdr = (struct i40e_profile_segment *)
9799+ i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
9800+ if (!profile_hdr) {
9801+ netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
9802+ return -EINVAL;
9803+ }
9804+
9805+ pinfo.track_id = track_id;
9806+ pinfo.version = profile_hdr->version;
9807+ if (is_add)
9808+ pinfo.op = I40E_DDP_ADD_TRACKID;
9809+ else
9810+ pinfo.op = I40E_DDP_REMOVE_TRACKID;
9811+
9812+ memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
9813+
9814+ /* Check if profile data already exists*/
9815+ istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
9816+ if (istatus < 0) {
9817+ netdev_err(netdev, "Failed to fetch loaded profiles.");
9818+ return istatus;
9819+ }
9820+ if (is_add) {
9821+ if (istatus > 0) {
9822+ netdev_err(netdev, "DDP profile already loaded.");
9823+ return -EINVAL;
9824+ }
9825+ istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
9826+ if (istatus < 0) {
9827+ netdev_err(netdev, "Failed to fetch loaded profiles.");
9828+ return istatus;
9829+ }
9830+ if (istatus > 0) {
9831+ netdev_err(netdev, "DDP profile overlaps with existing one.");
9832+ return -EINVAL;
9833+ }
9834+ } else {
9835+ if (istatus == 0) {
9836+ netdev_err(netdev,
9837+ "DDP profile for deletion does not exist.");
9838+ return -EINVAL;
9839+ }
9840+ }
9841+
9842+ /* Load profile data */
9843+ if (is_add) {
9844+ status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
9845+ if (status) {
9846+ if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
9847+ netdev_err(netdev, "Profile is not supported by the device.");
9848+ return -EPERM;
9849+ }
9850+ netdev_err(netdev, "Failed to write DDP profile.");
9851+ return -EIO;
9852+ }
9853+ } else {
9854+ status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
9855+ if (status) {
9856+ netdev_err(netdev, "Failed to remove DDP profile.");
9857+ return -EIO;
9858+ }
9859+ }
9860+
9861+ /* Add/remove profile to/from profile list in FW */
9862+ if (is_add) {
9863+ status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
9864+ track_id);
9865+ if (status) {
9866+ netdev_err(netdev, "Failed to add DDP profile info.");
9867+ return -EIO;
9868+ }
9869+ } else {
9870+ status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
9871+ track_id);
9872+ if (status) {
9873+ netdev_err(netdev, "Failed to restore DDP profile info.");
9874+ return -EIO;
9875+ }
9876+ }
9877+
9878+ return 0;
9879+}
9880+
9881+/**
9882+ * i40e_ddp_restore - restore previously loaded profile and remove from list
9883+ * @pf: PF data struct
9884+ *
9885+ * Restores previously loaded profile stored on the list in driver memory.
9886+ * After rolling back removes entry from the list.
9887+ **/
9888+static int i40e_ddp_restore(struct i40e_pf *pf)
9889+{
9890+ struct i40e_ddp_old_profile_list *entry;
9891+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
9892+ int status = 0;
9893+
9894+ if (!list_empty(&pf->ddp_old_prof)) {
9895+ entry = list_first_entry(&pf->ddp_old_prof,
9896+ struct i40e_ddp_old_profile_list,
9897+ list);
9898+ status = i40e_ddp_load(netdev, entry->old_ddp_buf,
9899+ entry->old_ddp_size, false);
9900+ list_del(&entry->list);
9901+ kfree(entry);
9902+ }
9903+ return status;
9904+}
9905+
9906+#define I40E_DDP_PROFILE_NAME_MAX \
9907+ (sizeof(I40E_DDP_PROFILE_PATH) + ETHTOOL_FLASH_MAX_FILENAME)
9908+
9909+/**
9910+ * i40e_ddp_flash - callback function for ethtool flash feature
9911+ * @netdev: net device structure
9912+ * @flash: kernel flash structure
9913+ *
9914+ * Ethtool callback function used for loading and unloading DDP profiles.
9915+ **/
9916+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
9917+{
9918+ const struct firmware *ddp_config;
9919+ struct i40e_netdev_priv *np = netdev_priv(netdev);
9920+ struct i40e_vsi *vsi = np->vsi;
9921+ struct i40e_pf *pf = vsi->back;
9922+ int status = 0;
9923+
9924+ /* Check for valid region first */
9925+ if (flash->region != I40_DDP_FLASH_REGION) {
9926+ netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
9927+ return -EINVAL;
9928+ }
9929+ if (pf->hw.bus.func != 0) {
9930+ netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
9931+ return -EINVAL;
9932+ }
9933+
9934+ /* If the user supplied "-" instead of file name rollback previously
9935+ * stored profile.
9936+ */
9937+ if (strncmp(flash->data, "-", 2) != 0) {
9938+ struct i40e_ddp_old_profile_list *list_entry;
9939+ char profile_name[I40E_DDP_PROFILE_NAME_MAX];
9940+
9941+ profile_name[sizeof(profile_name)-1]=0;
9942+ strncpy(profile_name, I40E_DDP_PROFILE_PATH, sizeof(profile_name)-1);
9943+ strncat(profile_name, flash->data, ETHTOOL_FLASH_MAX_FILENAME);
9944+ /* Load DDP recipe. */
9945+ status = request_firmware(&ddp_config, profile_name,
9946+ &netdev->dev);
9947+ if (status) {
9948+ netdev_err(netdev, "DDP recipe file request failed.");
9949+ return status;
9950+ }
9951+
9952+ status = i40e_ddp_load(netdev, ddp_config->data,
9953+ ddp_config->size, true);
9954+
9955+ if (!status) {
9956+ list_entry = kzalloc(
9957+ sizeof(struct i40e_ddp_old_profile_list) +
9958+ ddp_config->size, GFP_KERNEL);
9959+ if (!list_entry) {
9960+ netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
9961+ netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
9962+ } else {
9963+ memcpy(list_entry->old_ddp_buf,
9964+ ddp_config->data, ddp_config->size);
9965+ list_entry->old_ddp_size = ddp_config->size;
9966+ list_add(&list_entry->list, &pf->ddp_old_prof);
9967+ }
9968+ }
9969+
9970+ release_firmware(ddp_config);
9971+ } else {
9972+ if (!list_empty(&pf->ddp_old_prof)) {
9973+ status = i40e_ddp_restore(pf);
9974+ } else {
9975+ netdev_warn(netdev, "There is no DDP profile to restore.");
9976+ status = -ENOENT;
9977+ }
9978+ }
9979+ return status;
9980+}
9981diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
9982index 8f326f87a..928ef5b27 100644
9983--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
9984+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
9985@@ -1,28 +1,5 @@
9986-/*******************************************************************************
9987- *
9988- * Intel Ethernet Controller XL710 Family Linux Driver
9989- * Copyright(c) 2013 - 2016 Intel Corporation.
9990- *
9991- * This program is free software; you can redistribute it and/or modify it
9992- * under the terms and conditions of the GNU General Public License,
9993- * version 2, as published by the Free Software Foundation.
9994- *
9995- * This program is distributed in the hope it will be useful, but WITHOUT
9996- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9997- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9998- * more details.
9999- *
10000- * You should have received a copy of the GNU General Public License along
10001- * with this program. If not, see <http://www.gnu.org/licenses/>.
10002- *
10003- * The full GNU General Public License is included in this distribution in
10004- * the file called "COPYING".
10005- *
10006- * Contact Information:
10007- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
10008- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10009- *
10010- ******************************************************************************/
10011+// SPDX-License-Identifier: GPL-2.0
10012+/* Copyright(c) 2013 - 2020 Intel Corporation. */
10013
10014 #ifdef CONFIG_DEBUG_FS
10015
10016@@ -35,8 +12,8 @@ static struct dentry *i40e_dbg_root;
10017
10018 /**
10019 * i40e_dbg_find_vsi - searches for the vsi with the given seid
10020- * @pf - the PF structure to search for the vsi
10021- * @seid - seid of the vsi it is searching for
10022+ * @pf: the PF structure to search for the vsi
10023+ * @seid: seid of the vsi it is searching for
10024 **/
10025 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
10026 {
10027@@ -54,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
10028
10029 /**
10030 * i40e_dbg_find_veb - searches for the veb with the given seid
10031- * @pf - the PF structure to search for the veb
10032- * @seid - seid of the veb it is searching for
10033+ * @pf: the PF structure to search for the veb
10034+ * @seid: seid of the veb it is searching for
10035 **/
10036 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
10037 {
10038@@ -88,7 +65,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
10039 {
10040 struct i40e_pf *pf = filp->private_data;
10041 int bytes_not_copied;
10042- int buf_size = 256;
10043+ size_t buf_size = 256;
10044 char *buf;
10045 int len;
10046
10047@@ -124,6 +101,44 @@ static char *i40e_filter_state_string[] = {
10048 "REMOVE",
10049 };
10050
10051+/**
10052+ * i40e_dbg_dump_vsi_filters - handles dump of mac/vlan filters for a VSI
10053+ * @pf: the i40e_pf created in command write
10054+ * @vsi: the vsi to dump
10055+ */
10056+static void i40e_dbg_dump_vsi_filters(struct i40e_pf *pf, struct i40e_vsi *vsi)
10057+{
10058+ struct i40e_mac_filter *f;
10059+ int bkt;
10060+
10061+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
10062+ dev_info(&pf->pdev->dev,
10063+ " mac_filter_hash: %pM vid=%d, state %s\n",
10064+ f->macaddr, f->vlan,
10065+ i40e_filter_state_string[f->state]);
10066+ }
10067+ dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
10068+ vsi->active_filters, vsi->promisc_threshold,
10069+ (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
10070+ "ON" : "OFF"));
10071+}
10072+
10073+/**
10074+ * i40e_dbg_dump_all_vsi_filters - dump mac/vlan filters for all VSI on a PF
10075+ * @pf: the i40e_pf created in command write
10076+ */
10077+static void i40e_dbg_dump_all_vsi_filters(struct i40e_pf *pf)
10078+{
10079+ int i;
10080+
10081+ for (i = 0; i < pf->num_alloc_vsi; i++)
10082+ if (pf->vsi[i]) {
10083+ dev_info(&pf->pdev->dev, "vsi seid %d\n",
10084+ pf->vsi[i]->seid);
10085+ i40e_dbg_dump_vsi_filters(pf, pf->vsi[i]);
10086+ }
10087+}
10088+
10089 /**
10090 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
10091 * @pf: the i40e_pf created in command write
10092@@ -131,10 +146,13 @@ static char *i40e_filter_state_string[] = {
10093 **/
10094 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10095 {
10096+#ifdef HAVE_NDO_GET_STATS64
10097 struct rtnl_link_stats64 *nstat;
10098- struct i40e_mac_filter *f;
10099+#else
10100+ struct net_device_stats *nstat;
10101+#endif
10102 struct i40e_vsi *vsi;
10103- int i, bkt;
10104+ int i;
10105
10106 vsi = i40e_dbg_find_vsi(pf, seid);
10107 if (!vsi) {
10108@@ -145,20 +163,38 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10109 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
10110 if (vsi->netdev) {
10111 struct net_device *nd = vsi->netdev;
10112+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
10113+ u32 hw_features;
10114+#endif
10115
10116 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
10117 nd->name, nd->state, nd->flags);
10118 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
10119 (unsigned long int)nd->features);
10120+#ifdef HAVE_NDO_SET_FEATURES
10121+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
10122+ hw_features = get_netdev_hw_features(vsi->netdev);
10123+ dev_info(&pf->pdev->dev, " hw_features = 0x%08x\n",
10124+ hw_features);
10125+#else
10126 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
10127 (unsigned long int)nd->hw_features);
10128+#endif
10129+#endif
10130+#ifdef HAVE_NETDEV_VLAN_FEATURES
10131 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
10132 (unsigned long int)nd->vlan_features);
10133+#endif
10134 }
10135+#ifdef HAVE_VLAN_RX_REGISTER
10136+ dev_info(&pf->pdev->dev, " vlgrp is %s\n",
10137+ vsi->vlgrp ? "<valid>" : "<null>");
10138+#else
10139+ dev_info(&pf->pdev->dev, " active_vlans is %s\n",
10140+ vsi->active_vlans ? "<valid>" : "<null>");
10141+#endif /* HAVE_VLAN_RX_REGISTER */
10142 dev_info(&pf->pdev->dev,
10143- " vlgrp: & = %p\n", vsi->active_vlans);
10144- dev_info(&pf->pdev->dev,
10145- " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
10146+ " flags = 0x%016llx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
10147 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
10148 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
10149 dev_info(&pf->pdev->dev,
10150@@ -169,16 +205,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10151 pf->hw.mac.addr,
10152 pf->hw.mac.san_addr,
10153 pf->hw.mac.port_addr);
10154- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
10155- dev_info(&pf->pdev->dev,
10156- " mac_filter_hash: %pM vid=%d, state %s\n",
10157- f->macaddr, f->vlan,
10158- i40e_filter_state_string[f->state]);
10159- }
10160- dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
10161- vsi->active_filters, vsi->promisc_threshold,
10162- (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
10163- "ON" : "OFF"));
10164+ i40e_dbg_dump_vsi_filters(pf, vsi);
10165 nstat = i40e_get_vsi_stats_struct(vsi);
10166 dev_info(&pf->pdev->dev,
10167 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
10168@@ -264,22 +291,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10169 vsi->rx_buf_failed, vsi->rx_page_failed);
10170 rcu_read_lock();
10171 for (i = 0; i < vsi->num_queue_pairs; i++) {
10172- struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
10173+ struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
10174
10175 if (!rx_ring)
10176 continue;
10177
10178 dev_info(&pf->pdev->dev,
10179- " rx_rings[%i]: desc = %p\n",
10180- i, rx_ring->desc);
10181- dev_info(&pf->pdev->dev,
10182- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
10183- i, rx_ring->dev,
10184- rx_ring->netdev,
10185- rx_ring->rx_bi);
10186- dev_info(&pf->pdev->dev,
10187- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
10188- i, rx_ring->state,
10189+ " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
10190+ i, *rx_ring->state,
10191 rx_ring->queue_index,
10192 rx_ring->reg_idx);
10193 dev_info(&pf->pdev->dev,
10194@@ -307,35 +326,23 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10195 rx_ring->rx_stats.realloc_count,
10196 rx_ring->rx_stats.page_reuse_count);
10197 dev_info(&pf->pdev->dev,
10198- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
10199- i, rx_ring->size,
10200- (unsigned long int)rx_ring->dma);
10201- dev_info(&pf->pdev->dev,
10202- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
10203- i, rx_ring->vsi,
10204- rx_ring->q_vector);
10205+ " rx_rings[%i]: size = %i\n",
10206+ i, rx_ring->size);
10207 dev_info(&pf->pdev->dev,
10208 " rx_rings[%i]: rx_itr_setting = %d (%s)\n",
10209- i, rx_ring->rx_itr_setting,
10210- ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
10211+ i, rx_ring->itr_setting,
10212+ ITR_IS_DYNAMIC(rx_ring->itr_setting) ?
10213+ "dynamic" : "fixed");
10214 }
10215 for (i = 0; i < vsi->num_queue_pairs; i++) {
10216- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
10217+ struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
10218
10219 if (!tx_ring)
10220 continue;
10221
10222 dev_info(&pf->pdev->dev,
10223- " tx_rings[%i]: desc = %p\n",
10224- i, tx_ring->desc);
10225- dev_info(&pf->pdev->dev,
10226- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
10227- i, tx_ring->dev,
10228- tx_ring->netdev,
10229- tx_ring->tx_bi);
10230- dev_info(&pf->pdev->dev,
10231- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
10232- i, tx_ring->state,
10233+ " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
10234+ i, *tx_ring->state,
10235 tx_ring->queue_index,
10236 tx_ring->reg_idx);
10237 dev_info(&pf->pdev->dev,
10238@@ -355,20 +362,16 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10239 tx_ring->tx_stats.tx_busy,
10240 tx_ring->tx_stats.tx_done_old);
10241 dev_info(&pf->pdev->dev,
10242- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
10243- i, tx_ring->size,
10244- (unsigned long int)tx_ring->dma);
10245- dev_info(&pf->pdev->dev,
10246- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
10247- i, tx_ring->vsi,
10248- tx_ring->q_vector);
10249+ " tx_rings[%i]: size = %i\n",
10250+ i, tx_ring->size);
10251 dev_info(&pf->pdev->dev,
10252 " tx_rings[%i]: DCB tc = %d\n",
10253 i, tx_ring->dcb_tc);
10254 dev_info(&pf->pdev->dev,
10255 " tx_rings[%i]: tx_itr_setting = %d (%s)\n",
10256- i, tx_ring->tx_itr_setting,
10257- ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed");
10258+ i, tx_ring->itr_setting,
10259+ ITR_IS_DYNAMIC(tx_ring->itr_setting) ?
10260+ "dynamic" : "fixed");
10261 }
10262 rcu_read_unlock();
10263 dev_info(&pf->pdev->dev,
10264@@ -384,8 +387,9 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10265 " seid = %d, id = %d, uplink_seid = %d\n",
10266 vsi->seid, vsi->id, vsi->uplink_seid);
10267 dev_info(&pf->pdev->dev,
10268- " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
10269- vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
10270+ " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
10271+ vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
10272+ vsi->num_rx_desc);
10273 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
10274 if (vsi->type == I40E_VSI_SRIOV)
10275 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
10276@@ -466,8 +470,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
10277 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
10278 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
10279 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
10280- if (vsi->back)
10281- dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back);
10282 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
10283 dev_info(&pf->pdev->dev,
10284 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
10285@@ -534,6 +536,15 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
10286 }
10287 }
10288
10289+/* Helper macros for printing upper half of the 32byte descriptor. */
10290+#ifdef I40E_32BYTE_RX
10291+#define RXD_RSVD1(_rxd) ((_rxd)->read.rsvd1)
10292+#define RXD_RSVD2(_rxd) ((_rxd)->read.rsvd2)
10293+#else
10294+#define RXD_RSVD1(_rxd) 0ULL
10295+#define RXD_RSVD2(_rxd) 0ULL
10296+#endif
10297+
10298 /**
10299 * i40e_dbg_dump_desc - handles dump desc write into command datum
10300 * @cnt: number of arguments that the user supplied
10301@@ -548,7 +559,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
10302 {
10303 struct i40e_tx_desc *txd;
10304 union i40e_rx_desc *rxd;
10305- struct i40e_ring *ring;
10306+ struct i40e_ring ring;
10307 struct i40e_vsi *vsi;
10308 int i;
10309
10310@@ -567,58 +578,59 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
10311 vsi_seid);
10312 return;
10313 }
10314-
10315- ring = kmemdup(is_rx_ring
10316- ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id],
10317- sizeof(*ring), GFP_KERNEL);
10318- if (!ring)
10319- return;
10320-
10321+ if (is_rx_ring)
10322+ ring = *vsi->rx_rings[ring_id];
10323+ else
10324+ ring = *vsi->tx_rings[ring_id];
10325 if (cnt == 2) {
10326+ void *head = (struct i40e_tx_desc *)ring.desc + ring.count;
10327+ u32 tx_head = le32_to_cpu(*(volatile __le32 *)head);
10328+
10329 dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
10330 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
10331- for (i = 0; i < ring->count; i++) {
10332+ dev_info(&pf->pdev->dev, "head = %04x tail = %04x\n",
10333+ is_rx_ring ? 0 : tx_head, readl(ring.tail));
10334+ dev_info(&pf->pdev->dev, "ntc = %04x ntu = %04x\n",
10335+ ring.next_to_clean, ring.next_to_use);
10336+ for (i = 0; i < ring.count; i++) {
10337 if (!is_rx_ring) {
10338- txd = I40E_TX_DESC(ring, i);
10339+ txd = I40E_TX_DESC(&ring, i);
10340 dev_info(&pf->pdev->dev,
10341 " d[%03x] = 0x%016llx 0x%016llx\n",
10342 i, txd->buffer_addr,
10343 txd->cmd_type_offset_bsz);
10344 } else {
10345- rxd = I40E_RX_DESC(ring, i);
10346+ rxd = I40E_RX_DESC(&ring, i);
10347 dev_info(&pf->pdev->dev,
10348 " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
10349 i, rxd->read.pkt_addr,
10350 rxd->read.hdr_addr,
10351- rxd->read.rsvd1, rxd->read.rsvd2);
10352+ RXD_RSVD1(rxd), RXD_RSVD2(rxd));
10353 }
10354 }
10355 } else if (cnt == 3) {
10356- if (desc_n >= ring->count || desc_n < 0) {
10357+ if (desc_n >= ring.count || desc_n < 0) {
10358 dev_info(&pf->pdev->dev,
10359 "descriptor %d not found\n", desc_n);
10360- goto out;
10361+ return;
10362 }
10363 if (!is_rx_ring) {
10364- txd = I40E_TX_DESC(ring, desc_n);
10365+ txd = I40E_TX_DESC(&ring, desc_n);
10366 dev_info(&pf->pdev->dev,
10367 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
10368 vsi_seid, ring_id, desc_n,
10369 txd->buffer_addr, txd->cmd_type_offset_bsz);
10370 } else {
10371- rxd = I40E_RX_DESC(ring, desc_n);
10372+ rxd = I40E_RX_DESC(&ring, desc_n);
10373 dev_info(&pf->pdev->dev,
10374 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
10375 vsi_seid, ring_id, desc_n,
10376 rxd->read.pkt_addr, rxd->read.hdr_addr,
10377- rxd->read.rsvd1, rxd->read.rsvd2);
10378+ RXD_RSVD1(rxd), RXD_RSVD2(rxd));
10379 }
10380 } else {
10381 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
10382 }
10383-
10384-out:
10385- kfree(ring);
10386 }
10387
10388 /**
10389@@ -636,7 +648,161 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
10390 }
10391
10392 /**
10393- * i40e_dbg_dump_stats - handles dump stats write into command datum
10394+ * i40e_dbg_dump_resources - handles dump resources request
10395+ * @pf: the i40e_pf created in command write
10396+ **/
10397+static void i40e_dbg_dump_resources(struct i40e_pf *pf)
10398+{
10399+ struct i40e_aqc_switch_resource_alloc_element_resp *buf;
10400+ int buf_len;
10401+ u16 count = 32;
10402+ u8 num_entries;
10403+ int ret, i;
10404+
10405+ buf_len = count * sizeof(*buf);
10406+ buf = kzalloc(buf_len, GFP_KERNEL);
10407+ if (!buf) {
10408+ dev_err(&pf->pdev->dev, "Can't get memory\n");
10409+ return;
10410+ }
10411+
10412+ ret = i40e_aq_get_switch_resource_alloc(&pf->hw, &num_entries,
10413+ buf, count, NULL);
10414+ if (ret) {
10415+ dev_err(&pf->pdev->dev,
10416+ "fail to get resources, err %s aq_err %s\n",
10417+ i40e_stat_str(&pf->hw, ret),
10418+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10419+ kfree(buf);
10420+ return;
10421+ }
10422+
10423+ dev_info(&pf->pdev->dev, " resources:\n");
10424+ dev_info(&pf->pdev->dev, " guar total used unalloc name\n");
10425+ for (i = 0; i < num_entries; i++) {
10426+ char *p;
10427+
10428+ switch (buf[i].resource_type) {
10429+ case I40E_AQ_RESOURCE_TYPE_VEB:
10430+ p = "vebs";
10431+ break;
10432+ case I40E_AQ_RESOURCE_TYPE_VSI:
10433+ p = "vsis";
10434+ break;
10435+ case I40E_AQ_RESOURCE_TYPE_MACADDR:
10436+ p = "macaddrs";
10437+ break;
10438+ case I40E_AQ_RESOURCE_TYPE_STAG:
10439+ p = "stags";
10440+ break;
10441+ case I40E_AQ_RESOURCE_TYPE_ETAG:
10442+ p = "etags";
10443+ break;
10444+ case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
10445+ p = "multicast hash";
10446+ break;
10447+ case I40E_AQ_RESOURCE_TYPE_UNICAST_HASH:
10448+ p = "unicast hash";
10449+ break;
10450+ case I40E_AQ_RESOURCE_TYPE_VLAN:
10451+ p = "vlans";
10452+ break;
10453+ case I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY:
10454+ p = "vsi list entries";
10455+ break;
10456+ case I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY:
10457+ p = "etag list entries";
10458+ break;
10459+ case I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL:
10460+ p = "vlan stat pools";
10461+ break;
10462+ case I40E_AQ_RESOURCE_TYPE_MIRROR_RULE:
10463+ p = "mirror rules";
10464+ break;
10465+ case I40E_AQ_RESOURCE_TYPE_QUEUE_SETS:
10466+ p = "queue sets";
10467+ break;
10468+ case I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS:
10469+ p = "vlan filters";
10470+ break;
10471+ case I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS:
10472+ p = "inner mac filters";
10473+ break;
10474+ case I40E_AQ_RESOURCE_TYPE_IP_FILTERS:
10475+ p = "ip filters";
10476+ break;
10477+ case I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS:
10478+ p = "gre vn keys";
10479+ break;
10480+ case I40E_AQ_RESOURCE_TYPE_VN2_KEYS:
10481+ p = "vn2 keys";
10482+ break;
10483+ case I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS:
10484+ p = "tunnel ports";
10485+ break;
10486+ default:
10487+ p = "unknown";
10488+ break;
10489+ }
10490+
10491+ dev_info(&pf->pdev->dev, " %4d %4d %4d %4d %s\n",
10492+ buf[i].guaranteed, buf[i].total, buf[i].used,
10493+ buf[i].total_unalloced, p);
10494+ }
10495+
10496+ kfree(buf);
10497+}
10498+
10499+/**
10500+ * i40e_dbg_dump_capabilities - handles dump capabilities request
10501+ * @pf: the i40e_pf created in command write
10502+ **/
10503+static void i40e_dbg_dump_capabilities(struct i40e_pf *pf)
10504+{
10505+ struct i40e_hw_capabilities *p;
10506+
10507+ p = (struct i40e_hw_capabilities *)&pf->hw.func_caps;
10508+ dev_info(&pf->pdev->dev, " capabilities:\n");
10509+ dev_info(&pf->pdev->dev,
10510+ " switch_mode = %d\tmgmt_mode = %d\tnpar = %d\tos2bmc = %d\n",
10511+ p->switch_mode, p->management_mode, p->npar_enable, p->os2bmc);
10512+ dev_info(&pf->pdev->dev,
10513+ " valid_functions = 0x%04x\tsr_iov_1_1 = %d\tnum_vfs = %d\tvf_base_id = %d\n",
10514+ p->valid_functions, p->sr_iov_1_1, p->num_vfs, p->vf_base_id);
10515+ dev_info(&pf->pdev->dev, " nvm_image_type = %d\n", p->nvm_image_type);
10516+ dev_info(&pf->pdev->dev,
10517+ " num_vsis = %d\tvmdq = %d\tflex10_enable = %d\tflex10_capable = %d\n",
10518+ p->num_vsis, p->vmdq, p->flex10_enable, p->flex10_capable);
10519+ dev_info(&pf->pdev->dev,
10520+ " evb_802_1_qbg = %d\tevb_802_1_qbh = %d\tmgmt_cem = %d\tieee_1588 = %d\n",
10521+ p->evb_802_1_qbg, p->evb_802_1_qbh, p->mgmt_cem, p->ieee_1588);
10522+ dev_info(&pf->pdev->dev,
10523+ " fcoe = %d\tiwarp = %d\tmdio_port_num = %d\tmdio_port_mode = %d\n",
10524+ p->fcoe, p->iwarp, p->mdio_port_num, p->mdio_port_mode);
10525+ dev_info(&pf->pdev->dev,
10526+ " dcb = %d\tenabled_tcmap = %d\tmaxtc = %d\tiscsi = %d\n",
10527+ p->dcb, p->enabled_tcmap, p->maxtc, p->iscsi);
10528+ dev_info(&pf->pdev->dev,
10529+ " fd = %d\tfd_filters_guaranteed = %d\tfd_filters_best_effort = %d\tnum_flow_director_filters = %d\n",
10530+ p->fd, p->fd_filters_guaranteed, p->fd_filters_best_effort,
10531+ p->num_flow_director_filters);
10532+ dev_info(&pf->pdev->dev,
10533+ " rss = %d\trss_table_size = %d\trss_table_entry_width = %d\n",
10534+ p->rss, p->rss_table_size, p->rss_table_entry_width);
10535+ dev_info(&pf->pdev->dev,
10536+ " led[0] = %d\tsdp[0] = %d\tled_pin_num = %d\tsdp_pin_num = %d\n",
10537+ p->led[0], p->sdp[0], p->led_pin_num, p->sdp_pin_num);
10538+ dev_info(&pf->pdev->dev,
10539+ " num_rx_qp = %d\tnum_tx_qp = %d\tbase_queue = %d\n",
10540+ p->num_rx_qp, p->num_tx_qp, p->base_queue);
10541+ dev_info(&pf->pdev->dev,
10542+ " num_msix_vectors = %d\tnum_msix_vectors_vf = %d\trx_buf_chain_len = %d\n",
10543+ p->num_msix_vectors, p->num_msix_vectors_vf,
10544+ p->rx_buf_chain_len);
10545+}
10546+
10547+/**
10548+ * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
10549 * @pf: the i40e_pf created in command write
10550 * @estats: the eth stats structure to be dumped
10551 **/
10552@@ -669,17 +835,35 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
10553 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
10554 {
10555 struct i40e_veb *veb;
10556+ int i;
10557
10558 veb = i40e_dbg_find_veb(pf, seid);
10559 if (!veb) {
10560 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
10561 return;
10562 }
10563+#ifdef HAVE_BRIDGE_ATTRIBS
10564 dev_info(&pf->pdev->dev,
10565 "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
10566 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
10567 veb->uplink_seid,
10568 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10569+#else
10570+ dev_info(&pf->pdev->dev,
10571+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
10572+ veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
10573+ veb->uplink_seid,
10574+ "VEB");
10575+#endif
10576+ dev_info(&pf->pdev->dev,
10577+ "veb bw: enabled_tc=0x%x bw_limit=%d bw_max_quanta=%d is_abs_credits=%d\n",
10578+ veb->enabled_tc, veb->bw_limit, veb->bw_max_quanta,
10579+ veb->is_abs_credits);
10580+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10581+ dev_info(&pf->pdev->dev, "veb bw: tc=%d bw_share=%d bw_limit=%d max_quanta=%d\n",
10582+ i, veb->bw_tc_share_credits[i],
10583+ veb->bw_tc_limit_credits[i], veb->bw_tc_max_quanta[i]);
10584+ }
10585 i40e_dbg_dump_eth_stats(pf, &veb->stats);
10586 }
10587
10588@@ -740,6 +924,100 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
10589 i40e_dbg_dump_vf(pf, i);
10590 }
10591
10592+/**
10593+ * i40e_dbg_dump_dcb_cfg - Dump DCB config data struct
10594+ * @pf: the corresponding PF
10595+ * @cfg: DCB Config data structure
10596+ * @prefix: Prefix string
10597+ **/
10598+static void i40e_dbg_dump_dcb_cfg(struct i40e_pf *pf,
10599+ struct i40e_dcbx_config *cfg,
10600+ char *prefix)
10601+{
10602+ int i;
10603+
10604+ dev_info(&pf->pdev->dev,
10605+ "%s ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
10606+ prefix, cfg->etscfg.willing, cfg->etscfg.cbs,
10607+ cfg->etscfg.maxtcs);
10608+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10609+ dev_info(&pf->pdev->dev, "%s ets_cfg: up=%d tc=%d\n",
10610+ prefix, i, cfg->etscfg.prioritytable[i]);
10611+ }
10612+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10613+ dev_info(&pf->pdev->dev, "%s ets_cfg: tc=%d tcbw=%d tctsa=%d\n",
10614+ prefix, i, cfg->etscfg.tcbwtable[i],
10615+ cfg->etscfg.tsatable[i]);
10616+ }
10617+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10618+ dev_info(&pf->pdev->dev, "%s ets_rec: up=%d tc=%d\n",
10619+ prefix, i, cfg->etsrec.prioritytable[i]);
10620+ }
10621+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10622+ dev_info(&pf->pdev->dev, "%s ets_rec: tc=%d tcbw=%d tctsa=%d\n",
10623+ prefix, i, cfg->etsrec.tcbwtable[i],
10624+ cfg->etsrec.tsatable[i]);
10625+ }
10626+ dev_info(&pf->pdev->dev,
10627+ "%s pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
10628+ prefix, cfg->pfc.willing, cfg->pfc.mbc,
10629+ cfg->pfc.pfccap, cfg->pfc.pfcenable);
10630+
10631+ dev_info(&pf->pdev->dev,
10632+ "%s app_table: num_apps=%d\n", prefix, (int)cfg->numapps);
10633+ for (i = 0; i < (int)cfg->numapps; i++) {
10634+ dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n",
10635+ prefix, i, cfg->app[i].priority,
10636+ cfg->app[i].selector,
10637+ cfg->app[i].protocolid);
10638+ }
10639+}
10640+
10641+/**
10642+ * i40e_dbg_dump_fdir_filter - Dump out flow director filter contents
10643+ * @pf: the corresponding PF
10644+ * @f: the flow director filter
10645+ **/
10646+static inline void i40e_dbg_dump_fdir_filter(struct i40e_pf *pf,
10647+ struct i40e_fdir_filter *f)
10648+{
10649+ dev_info(&pf->pdev->dev, "fdir filter %d:\n", f->fd_id);
10650+ dev_info(&pf->pdev->dev, " flow_type=%d ip4_proto=%d\n",
10651+ f->flow_type, f->ip4_proto);
10652+ dev_info(&pf->pdev->dev, " dst_ip= %pi4 dst_port=%d\n",
10653+ &f->dst_ip, f->dst_port);
10654+ dev_info(&pf->pdev->dev, " src_ip= %pi4 src_port=%d\n",
10655+ &f->src_ip, f->src_port);
10656+ dev_info(&pf->pdev->dev, " sctp_v_tag=%d q_index=%d\n",
10657+ f->sctp_v_tag, f->q_index);
10658+ if (f->flex_filter)
10659+ dev_info(&pf->pdev->dev, " flex_word=%04x flex_offset=%d\n",
10660+ f->flex_word, f->flex_offset);
10661+ dev_info(&pf->pdev->dev, " pctype=%d dest_vsi=%d dest_ctl=%d\n",
10662+ f->pctype, f->dest_vsi, f->dest_ctl);
10663+ dev_info(&pf->pdev->dev, " fd_status=%d cnt_index=%d\n",
10664+ f->fd_status, f->cnt_index);
10665+}
10666+
10667+/**
10668+ * i40e_dbg_dump_cloud_filter - Dump out cloud filter contents
10669+ * @pf: the corresponding PF
10670+ * @f: the flow director filter
10671+ **/
10672+static inline void i40e_dbg_dump_cloud_filter(struct i40e_pf *pf,
10673+ struct i40e_cloud_filter *f)
10674+{
10675+ dev_info(&pf->pdev->dev, "cloud filter %d:\n", f->id);
10676+ dev_info(&pf->pdev->dev, " outer_mac[]=%pM inner_mac=%pM\n",
10677+ f->outer_mac, f->inner_mac);
10678+ dev_info(&pf->pdev->dev, " inner_vlan %d, inner_ip[0] %pi4\n",
10679+ be16_to_cpu(f->inner_vlan), f->inner_ip);
10680+ dev_info(&pf->pdev->dev, " tenant_id=%d flags=0x%02x, tunnel_type=0x%02x\n",
10681+ f->tenant_id, f->flags, f->tunnel_type);
10682+ dev_info(&pf->pdev->dev, " seid=%d queue_id=%d\n",
10683+ f->seid, f->queue_id);
10684+}
10685+
10686 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
10687 /**
10688 * i40e_dbg_command_write - write into command datum
10689@@ -781,7 +1059,48 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10690 count = cmd_buf_tmp - cmd_buf + 1;
10691 }
10692
10693- if (strncmp(cmd_buf, "add vsi", 7) == 0) {
10694+ if (strncmp(cmd_buf, "read", 4) == 0) {
10695+ u32 address;
10696+ u32 value;
10697+
10698+ cnt = sscanf(&cmd_buf[4], "%i", &address);
10699+ if (cnt != 1) {
10700+ dev_info(&pf->pdev->dev, "read <reg>\n");
10701+ goto command_write_done;
10702+ }
10703+
10704+ /* check the range on address */
10705+ if (address > (pf->ioremap_len - sizeof(u32))) {
10706+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
10707+ address, (pf->ioremap_len - sizeof(u32)));
10708+ goto command_write_done;
10709+ }
10710+
10711+ value = rd32(&pf->hw, address);
10712+ dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
10713+ address, value);
10714+
10715+ } else if (strncmp(cmd_buf, "write", 5) == 0) {
10716+ u32 address, value;
10717+
10718+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
10719+ if (cnt != 2) {
10720+ dev_info(&pf->pdev->dev, "write <reg> <value>\n");
10721+ goto command_write_done;
10722+ }
10723+
10724+ /* check the range on address */
10725+ if (address > (pf->ioremap_len - sizeof(u32))) {
10726+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
10727+ address, (pf->ioremap_len - sizeof(u32)));
10728+ goto command_write_done;
10729+ }
10730+ wr32(&pf->hw, address, value);
10731+ value = rd32(&pf->hw, address);
10732+ dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
10733+ address, value);
10734+
10735+ } else if (strncmp(cmd_buf, "add vsi", 7) == 0) {
10736 vsi_seid = -1;
10737 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
10738 if (cnt == 0) {
10739@@ -798,8 +1117,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10740 */
10741 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
10742 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10743- i40e_do_reset_safe(pf,
10744- BIT_ULL(__I40E_PF_RESET_REQUESTED));
10745+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
10746 }
10747
10748 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
10749@@ -871,6 +1189,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10750
10751 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
10752 int i;
10753+
10754 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
10755 if (cnt != 1) {
10756 dev_info(&pf->pdev->dev,
10757@@ -898,9 +1217,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10758 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
10759 i40e_status ret;
10760 u16 vid;
10761- unsigned int v;
10762+ int v;
10763
10764- cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
10765+ cnt = sscanf(&cmd_buf[8], "%i %d", &vsi_seid, &v);
10766 if (cnt != 2) {
10767 dev_info(&pf->pdev->dev,
10768 "add pvid: bad command string, cnt=%d\n", cnt);
10769@@ -914,7 +1233,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10770 goto command_write_done;
10771 }
10772
10773- vid = v;
10774+ vid = (unsigned)v;
10775 ret = i40e_vsi_add_pvid(vsi, vid);
10776 if (!ret)
10777 dev_info(&pf->pdev->dev,
10778@@ -949,6 +1268,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10779 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
10780 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
10781 i40e_fetch_switch_configuration(pf, true);
10782+ } else if (strncmp(&cmd_buf[5], "resources", 9) == 0) {
10783+ i40e_dbg_dump_resources(pf);
10784+ } else if (strncmp(&cmd_buf[5], "capabilities", 7) == 0) {
10785+ i40e_dbg_dump_capabilities(pf);
10786 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
10787 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
10788 if (cnt > 0)
10789@@ -969,6 +1292,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10790 i40e_dbg_dump_vf_all(pf);
10791 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
10792 int ring_id, desc_n;
10793+
10794 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
10795 cnt = sscanf(&cmd_buf[12], "%i %i %i",
10796 &vsi_seid, &ring_id, &desc_n);
10797@@ -1007,6 +1331,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10798 &pf->hw.local_dcbx_config;
10799 struct i40e_dcbx_config *r_cfg =
10800 &pf->hw.remote_dcbx_config;
10801+ struct i40e_dcbx_config *d_cfg =
10802+ &pf->hw.desired_dcbx_config;
10803 int i, ret;
10804 u16 switch_id;
10805
10806@@ -1049,68 +1375,18 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10807 kfree(bw_data);
10808 bw_data = NULL;
10809
10810- dev_info(&pf->pdev->dev,
10811- "port dcbx_mode=%d\n", cfg->dcbx_mode);
10812- dev_info(&pf->pdev->dev,
10813- "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
10814- cfg->etscfg.willing, cfg->etscfg.cbs,
10815- cfg->etscfg.maxtcs);
10816- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10817- dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
10818- i, cfg->etscfg.prioritytable[i],
10819- cfg->etscfg.tcbwtable[i],
10820- cfg->etscfg.tsatable[i]);
10821- }
10822- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10823- dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
10824- i, cfg->etsrec.prioritytable[i],
10825- cfg->etsrec.tcbwtable[i],
10826- cfg->etsrec.tsatable[i]);
10827- }
10828- dev_info(&pf->pdev->dev,
10829- "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
10830- cfg->pfc.willing, cfg->pfc.mbc,
10831- cfg->pfc.pfccap, cfg->pfc.pfcenable);
10832- dev_info(&pf->pdev->dev,
10833- "port app_table: num_apps=%d\n", cfg->numapps);
10834- for (i = 0; i < cfg->numapps; i++) {
10835- dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
10836- i, cfg->app[i].priority,
10837- cfg->app[i].selector,
10838- cfg->app[i].protocolid);
10839- }
10840- /* Peer TLV DCBX data */
10841- dev_info(&pf->pdev->dev,
10842- "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
10843- r_cfg->etscfg.willing,
10844- r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
10845- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10846- dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
10847- i, r_cfg->etscfg.prioritytable[i],
10848- r_cfg->etscfg.tcbwtable[i],
10849- r_cfg->etscfg.tsatable[i]);
10850- }
10851- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10852- dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
10853- i, r_cfg->etsrec.prioritytable[i],
10854- r_cfg->etsrec.tcbwtable[i],
10855- r_cfg->etsrec.tsatable[i]);
10856- }
10857- dev_info(&pf->pdev->dev,
10858- "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
10859- r_cfg->pfc.willing,
10860- r_cfg->pfc.mbc,
10861- r_cfg->pfc.pfccap,
10862- r_cfg->pfc.pfcenable);
10863- dev_info(&pf->pdev->dev,
10864- "remote port app_table: num_apps=%d\n",
10865- r_cfg->numapps);
10866- for (i = 0; i < r_cfg->numapps; i++) {
10867- dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
10868- i, r_cfg->app[i].priority,
10869- r_cfg->app[i].selector,
10870- r_cfg->app[i].protocolid);
10871+ if (cfg->dcbx_mode == I40E_DCBX_MODE_CEE) {
10872+ dev_info(&pf->pdev->dev,
10873+ "CEE DCBX mode with Oper TLV Status = 0x%x\n",
10874+ cfg->tlv_status);
10875+ i40e_dbg_dump_dcb_cfg(pf, d_cfg, "DesiredCfg");
10876+ } else {
10877+ dev_info(&pf->pdev->dev, "IEEE DCBX mode\n");
10878 }
10879+
10880+ i40e_dbg_dump_dcb_cfg(pf, cfg, "OperCfg");
10881+ i40e_dbg_dump_dcb_cfg(pf, r_cfg, "PeerCfg");
10882+
10883 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
10884 int cluster_id, table_id;
10885 int index, ret;
10886@@ -1155,73 +1431,65 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
10887 buff, rlen, true);
10888 kfree(buff);
10889 buff = NULL;
10890+ } else if (strncmp(&cmd_buf[5], "filters", 7) == 0) {
10891+ struct i40e_fdir_filter *f_rule;
10892+ struct i40e_cloud_filter *c_rule;
10893+ struct hlist_node *node2;
10894+
10895+ hlist_for_each_entry_safe(f_rule, node2,
10896+ &pf->fdir_filter_list,
10897+ fdir_node) {
10898+ i40e_dbg_dump_fdir_filter(pf, f_rule);
10899+ }
10900+
10901+ /* find the cloud filter rule ids */
10902+ hlist_for_each_entry_safe(c_rule, node2,
10903+ &pf->cloud_filter_list,
10904+ cloud_node) {
10905+ i40e_dbg_dump_cloud_filter(pf, c_rule);
10906+ }
10907+ i40e_dbg_dump_all_vsi_filters(pf);
10908 } else {
10909 dev_info(&pf->pdev->dev,
10910 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
10911 dev_info(&pf->pdev->dev, "dump switch\n");
10912 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
10913+ dev_info(&pf->pdev->dev, "dump capabilities\n");
10914+ dev_info(&pf->pdev->dev, "dump resources\n");
10915 dev_info(&pf->pdev->dev, "dump reset stats\n");
10916 dev_info(&pf->pdev->dev, "dump port\n");
10917- dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
10918+ dev_info(&pf->pdev->dev, "dump VF [vf_id]\n");
10919 dev_info(&pf->pdev->dev,
10920 "dump debug fwdata <cluster_id> <table_id> <index>\n");
10921+ dev_info(&pf->pdev->dev, "dump filters\n");
10922 }
10923- } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
10924- dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
10925- i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
10926-
10927- } else if (strncmp(cmd_buf, "corer", 5) == 0) {
10928- dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
10929- i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
10930
10931- } else if (strncmp(cmd_buf, "globr", 5) == 0) {
10932- dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
10933- i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
10934+ } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
10935+ u32 level;
10936
10937- } else if (strncmp(cmd_buf, "empr", 4) == 0) {
10938- dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
10939- i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
10940-
10941- } else if (strncmp(cmd_buf, "read", 4) == 0) {
10942- u32 address;
10943- u32 value;
10944-
10945- cnt = sscanf(&cmd_buf[4], "%i", &address);
10946- if (cnt != 1) {
10947- dev_info(&pf->pdev->dev, "read <reg>\n");
10948- goto command_write_done;
10949- }
10950-
10951- /* check the range on address */
10952- if (address > (pf->ioremap_len - sizeof(u32))) {
10953- dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
10954- address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
10955- goto command_write_done;
10956- }
10957-
10958- value = rd32(&pf->hw, address);
10959- dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
10960- address, value);
10961-
10962- } else if (strncmp(cmd_buf, "write", 5) == 0) {
10963- u32 address, value;
10964-
10965- cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
10966- if (cnt != 2) {
10967- dev_info(&pf->pdev->dev, "write <reg> <value>\n");
10968- goto command_write_done;
10969- }
10970-
10971- /* check the range on address */
10972- if (address > (pf->ioremap_len - sizeof(u32))) {
10973- dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
10974- address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
10975- goto command_write_done;
10976+ cnt = sscanf(&cmd_buf[10], "%i", &level);
10977+ if (cnt) {
10978+ if (I40E_DEBUG_USER & level) {
10979+ pf->hw.debug_mask = level;
10980+ dev_info(&pf->pdev->dev,
10981+ "set hw.debug_mask = 0x%08x\n",
10982+ pf->hw.debug_mask);
10983+ }
10984+ pf->msg_enable = level;
10985+ dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
10986+ pf->msg_enable);
10987+ } else {
10988+ dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
10989+ pf->msg_enable);
10990 }
10991- wr32(&pf->hw, address, value);
10992- value = rd32(&pf->hw, address);
10993- dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
10994- address, value);
10995+ } else if (strncmp(cmd_buf, "defport on", 10) == 0) {
10996+ dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport enabled\n");
10997+ pf->cur_promisc = true;
10998+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
10999+ } else if (strncmp(cmd_buf, "defport off", 11) == 0) {
11000+ dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport disabled\n");
11001+ pf->cur_promisc = false;
11002+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
11003 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
11004 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
11005 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
11006@@ -1260,7 +1528,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11007 struct i40e_aq_desc *desc;
11008 i40e_status ret;
11009
11010- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
11011+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
11012 if (!desc)
11013 goto command_write_done;
11014 cnt = sscanf(&cmd_buf[11],
11015@@ -1308,7 +1576,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11016 u16 buffer_len;
11017 u8 *buff;
11018
11019- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
11020+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
11021 if (!desc)
11022 goto command_write_done;
11023 cnt = sscanf(&cmd_buf[20],
11024@@ -1339,7 +1607,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11025 desc = NULL;
11026 goto command_write_done;
11027 }
11028- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
11029+ desc->flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
11030 ret = i40e_asq_send_command(&pf->hw, desc, buff,
11031 buffer_len, NULL);
11032 if (!ret) {
11033@@ -1371,11 +1639,78 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11034 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
11035 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
11036 i40e_get_current_fd_count(pf));
11037+ /* vf base mode on/off hooks needs to be used by validation only to
11038+ * make sure vf base mode driver is not broken
11039+ */
11040+ } else if (strncmp(cmd_buf, "vf base mode on", 15) == 0) {
11041+ if (!pf->num_alloc_vfs) {
11042+ pf->vf_base_mode_only = true;
11043+ dev_info(&pf->pdev->dev, "VF Base mode is enabled\n");
11044+ } else
11045+ dev_info(&pf->pdev->dev,
11046+ "cannot configure VF Base mode when VFs are allocated\n");
11047+ } else if (strncmp(cmd_buf, "vf base mode off", 16) == 0) {
11048+ if (!pf->num_alloc_vfs) {
11049+ pf->vf_base_mode_only = false;
11050+ dev_info(&pf->pdev->dev, "VF Base mode is disabled\n");
11051+ } else
11052+ dev_info(&pf->pdev->dev,
11053+ "cannot configure VF Base mode when VFs are allocated\n");
11054+ } else if ((strncmp(cmd_buf, "add ethtype filter", 18) == 0) ||
11055+ (strncmp(cmd_buf, "rem ethtype filter", 18) == 0)) {
11056+ u16 ethtype;
11057+ u16 queue;
11058+ bool add = false;
11059+ int ret;
11060+
11061+ if (strncmp(cmd_buf, "add", 3) == 0)
11062+ add = true;
11063+
11064+ cnt = sscanf(&cmd_buf[18],
11065+ "%hi %hi",
11066+ &ethtype, &queue);
11067+ if (cnt != 2) {
11068+ dev_info(&pf->pdev->dev,
11069+ "%s ethtype filter: bad command string, cnt=%d\n",
11070+ add ? "add" : "rem",
11071+ cnt);
11072+ goto command_write_done;
11073+ }
11074+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
11075+ pf->hw.mac.addr,
11076+ ethtype, 0,
11077+ pf->vsi[pf->lan_vsi]->seid,
11078+ queue, add, NULL, NULL);
11079+ if (ret) {
11080+ dev_info(&pf->pdev->dev,
11081+ "%s: add/rem Control Packet Filter AQ command failed =0x%x\n",
11082+ add ? "add" : "rem",
11083+ pf->hw.aq.asq_last_status);
11084+ goto command_write_done;
11085+ }
11086+
11087+ } else if (strncmp(cmd_buf, "dcb off", 7) == 0) {
11088+ u8 tc = i40e_pf_get_num_tc(pf);
11089+ /* Allow disabling only when in single TC mode */
11090+ if (tc > 1) {
11091+ dev_info(&pf->pdev->dev, "Failed to disable DCB as TC count(%d) is greater than 1.\n",
11092+ tc);
11093+ goto command_write_done;
11094+ }
11095+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
11096+ } else if (strncmp(cmd_buf, "dcb on", 6) == 0) {
11097+ pf->flags |= I40E_FLAG_DCB_ENABLED;
11098 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
11099 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
11100 int ret;
11101
11102- ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
11103+ if (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) {
11104+ dev_info(&pf->pdev->dev,
11105+ "Use ethtool to disable LLDP firmware agent:"\
11106+ "\"ethtool --set-priv-flags <interface> disable-fw-lldp on\".\n");
11107+ goto command_write_done;
11108+ }
11109+ ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
11110 if (ret) {
11111 dev_info(&pf->pdev->dev,
11112 "Stop LLDP AQ command failed =0x%x\n",
11113@@ -1389,17 +1724,25 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11114 0, true, NULL, NULL);
11115 if (ret) {
11116 dev_info(&pf->pdev->dev,
11117- "%s: Add Control Packet Filter AQ command failed =0x%x\n",
11118- __func__, pf->hw.aq.asq_last_status);
11119+ "%s: Add Control Packet Filter AQ command failed =0x%x\n",
11120+ __func__, pf->hw.aq.asq_last_status);
11121 goto command_write_done;
11122 }
11123-#ifdef CONFIG_I40E_DCB
11124+#ifdef CONFIG_DCB
11125+#ifdef HAVE_DCBNL_IEEE
11126 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
11127 DCB_CAP_DCBX_VER_IEEE;
11128-#endif /* CONFIG_I40E_DCB */
11129+#endif /* HAVE_DCBNL_IEEE */
11130+#endif /* CONFIG_DCB */
11131 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
11132 int ret;
11133
11134+ if (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) {
11135+ dev_info(&pf->pdev->dev,
11136+ "Use ethtool to enable LLDP firmware agent:"\
11137+ "\"ethtool --set-priv-flags <interface> disable-fw-lldp off\".\n");
11138+ goto command_write_done;
11139+ }
11140 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
11141 pf->hw.mac.addr,
11142 I40E_ETH_P_LLDP, 0,
11143@@ -1407,22 +1750,23 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11144 0, false, NULL, NULL);
11145 if (ret) {
11146 dev_info(&pf->pdev->dev,
11147- "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
11148- __func__, pf->hw.aq.asq_last_status);
11149+ "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
11150+ __func__, pf->hw.aq.asq_last_status);
11151 /* Continue and start FW LLDP anyways */
11152 }
11153-
11154- ret = i40e_aq_start_lldp(&pf->hw, NULL);
11155+ ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
11156 if (ret) {
11157 dev_info(&pf->pdev->dev,
11158 "Start LLDP AQ command failed =0x%x\n",
11159 pf->hw.aq.asq_last_status);
11160 goto command_write_done;
11161 }
11162-#ifdef CONFIG_I40E_DCB
11163+#ifdef CONFIG_DCB
11164+#ifdef HAVE_DCBNL_IEEE
11165 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
11166 DCB_CAP_DCBX_VER_IEEE;
11167-#endif /* CONFIG_I40E_DCB */
11168+#endif /* HAVE_DCBNL_IEEE */
11169+#endif /* CONFIG_DCB */
11170 } else if (strncmp(&cmd_buf[5],
11171 "get local", 9) == 0) {
11172 u16 llen, rlen;
11173@@ -1564,6 +1908,251 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11174 }
11175 kfree(buff);
11176 buff = NULL;
11177+ } else if (strncmp(cmd_buf, "set rss_size", 12) == 0) {
11178+ int q_count;
11179+
11180+ cnt = sscanf(&cmd_buf[12], "%i", &q_count);
11181+ if (cnt != 1) {
11182+ dev_info(&pf->pdev->dev,
11183+ "set rss_size: bad command string, cnt=%d\n", cnt);
11184+ goto command_write_done;
11185+ }
11186+ if (q_count <= 0) {
11187+ dev_info(&pf->pdev->dev,
11188+ "set rss_size: %d is too small\n",
11189+ q_count);
11190+ goto command_write_done;
11191+ }
11192+ dev_info(&pf->pdev->dev,
11193+ "set rss_size requesting %d queues\n", q_count);
11194+ rtnl_lock();
11195+ i40e_reconfig_rss_queues(pf, q_count);
11196+ rtnl_unlock();
11197+ dev_info(&pf->pdev->dev, "new rss_size %d\n",
11198+ pf->alloc_rss_size);
11199+ } else if (strncmp(cmd_buf, "get bw", 6) == 0) {
11200+ i40e_status status;
11201+ u32 max_bw, min_bw;
11202+ bool min_valid, max_valid;
11203+
11204+ status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11205+ &min_valid, &max_valid);
11206+
11207+ if (status) {
11208+ dev_info(&pf->pdev->dev, "get bw failed with status %d\n",
11209+ status);
11210+ goto command_write_done;
11211+ }
11212+ if (!min_valid) {
11213+ dev_info(&pf->pdev->dev, "min bw invalid\n");
11214+ } else if (min_bw & I40E_ALT_BW_RELATIVE_MASK) {
11215+ dev_info(&pf->pdev->dev, "relative min bw = %d%%\n",
11216+ min_bw & I40E_ALT_BW_VALUE_MASK);
11217+ } else {
11218+ dev_info(&pf->pdev->dev, "absolute min bw = %dMb/s\n",
11219+ (min_bw & I40E_ALT_BW_VALUE_MASK)*128);
11220+ }
11221+ if (!max_valid) {
11222+ dev_info(&pf->pdev->dev, "max bw invalid\n");
11223+ } else if (max_bw & I40E_ALT_BW_RELATIVE_MASK) {
11224+ dev_info(&pf->pdev->dev, "relative max bw = %d%%\n",
11225+ max_bw & I40E_ALT_BW_VALUE_MASK);
11226+ } else {
11227+ dev_info(&pf->pdev->dev, "absolute max bw = %dMb/s\n",
11228+ (max_bw & I40E_ALT_BW_VALUE_MASK)*128);
11229+ }
11230+ } else if (strncmp(cmd_buf, "set bw", 6) == 0) {
11231+ struct i40e_aqc_configure_partition_bw_data bw_data;
11232+ i40e_status status;
11233+ u32 max_bw, min_bw;
11234+
11235+ /* Set the valid bit for this PF */
11236+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11237+
11238+ /* Get the bw's */
11239+ cnt = sscanf(&cmd_buf[7], "%u %u", &max_bw, &min_bw);
11240+ if (cnt != 2) {
11241+ dev_info(&pf->pdev->dev,"set bw <MAX> <MIN>\n");
11242+ goto command_write_done;
11243+ }
11244+ bw_data.max_bw[pf->hw.pf_id] = max_bw;
11245+ bw_data.min_bw[pf->hw.pf_id] = min_bw;
11246+
11247+ /* Set the new bandwidths */
11248+ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11249+ if (status) {
11250+ dev_info(&pf->pdev->dev, "configure partition bw failed with status %d\n",
11251+ status);
11252+ goto command_write_done;
11253+ }
11254+ } else if (strncmp(cmd_buf, "commit bw", 9) == 0) {
11255+ /* Commit temporary BW setting to permanent NVM image */
11256+ enum i40e_admin_queue_err last_aq_status;
11257+ i40e_status aq_status;
11258+ u16 nvm_word;
11259+
11260+ if (pf->hw.partition_id != 1) {
11261+ dev_info(&pf->pdev->dev,
11262+ "Commit BW only works on first partition!\n");
11263+ goto command_write_done;
11264+ }
11265+
11266+ /* Acquire NVM for read access */
11267+ aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11268+ if (aq_status) {
11269+ dev_info(&pf->pdev->dev,
11270+ "Error %d: Cannot acquire NVM for Read Access\n",
11271+ aq_status);
11272+ goto command_write_done;
11273+ }
11274+
11275+ /* Read word 0x10 of NVM - SW compatibility word 1 */
11276+ aq_status = i40e_aq_read_nvm(&pf->hw,
11277+ I40E_SR_NVM_CONTROL_WORD,
11278+ 0x10, sizeof(nvm_word), &nvm_word,
11279+ false, NULL);
11280+ /* Save off last admin queue command status before releasing
11281+ * the NVM
11282+ */
11283+ last_aq_status = pf->hw.aq.asq_last_status;
11284+ i40e_release_nvm(&pf->hw);
11285+ if (aq_status) {
11286+ dev_info(&pf->pdev->dev, "NVM read error %d:%d\n",
11287+ aq_status, last_aq_status);
11288+ goto command_write_done;
11289+ }
11290+
11291+ /* Wait a bit for NVM release to complete */
11292+ msleep(100);
11293+
11294+ /* Acquire NVM for write access */
11295+ aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11296+ if (aq_status) {
11297+ dev_info(&pf->pdev->dev,
11298+ "Error %d: Cannot acquire NVM for Write Access\n",
11299+ aq_status);
11300+ goto command_write_done;
11301+ }
11302+ /* Write it back out unchanged to initiate update NVM,
11303+ * which will force a write of the shadow (alt) RAM to
11304+ * the NVM - thus storing the bandwidth values permanently.
11305+ */
11306+ aq_status = i40e_aq_update_nvm(&pf->hw,
11307+ I40E_SR_NVM_CONTROL_WORD,
11308+ 0x10, sizeof(nvm_word),
11309+ &nvm_word, true, 0, NULL);
11310+ /* Save off last admin queue command status before releasing
11311+ * the NVM
11312+ */
11313+ last_aq_status = pf->hw.aq.asq_last_status;
11314+ i40e_release_nvm(&pf->hw);
11315+ if (aq_status)
11316+ dev_info(&pf->pdev->dev,
11317+ "BW settings NOT SAVED - error %d:%d updating NVM\n",
11318+ aq_status, last_aq_status);
11319+ } else if (strncmp(cmd_buf, "add switch ingress mirror", 25) == 0) {
11320+ u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
11321+ u16 switch_seid, dst_vsi_seid, rule_id;
11322+ i40e_status aq_status;
11323+
11324+ cnt = sscanf(&cmd_buf[25], "%hu %hu",
11325+ &switch_seid, &dst_vsi_seid);
11326+ if (cnt != 2) {
11327+ dev_info(&pf->pdev->dev,
11328+ "add mirror: bad command string, cnt=%d\n",
11329+ cnt);
11330+ goto command_write_done;
11331+ }
11332+
11333+ aq_status =
11334+ i40e_aq_add_mirrorrule(&pf->hw,
11335+ switch_seid, rule_type,
11336+ dst_vsi_seid, 0, NULL, NULL,
11337+ &rule_id, NULL, NULL);
11338+ if (aq_status)
11339+ dev_info(&pf->pdev->dev,
11340+ "add ingress mirror failed with status %d\n",
11341+ aq_status);
11342+ else
11343+ dev_info(&pf->pdev->dev,
11344+ "Ingress mirror rule %d added\n", rule_id);
11345+ } else if (strncmp(cmd_buf, "add switch egress mirror", 24) == 0) {
11346+ u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
11347+ u16 switch_seid, dst_vsi_seid, rule_id;
11348+ i40e_status aq_status;
11349+
11350+ cnt = sscanf(&cmd_buf[24], "%hu %hu",
11351+ &switch_seid, &dst_vsi_seid);
11352+ if (cnt != 2) {
11353+ dev_info(&pf->pdev->dev,
11354+ "add mirror: bad command string, cnt=%d\n",
11355+ cnt);
11356+ goto command_write_done;
11357+ }
11358+
11359+ aq_status =
11360+ i40e_aq_add_mirrorrule(&pf->hw,
11361+ switch_seid, rule_type,
11362+ dst_vsi_seid, 0, NULL, NULL,
11363+ &rule_id, NULL, NULL);
11364+ if (aq_status)
11365+ dev_info(&pf->pdev->dev,
11366+ "add egress mirror failed with status %d\n",
11367+ aq_status);
11368+ else
11369+ dev_info(&pf->pdev->dev,
11370+ "Egress mirror rule %d added\n", rule_id);
11371+ } else if (strncmp(cmd_buf, "del switch ingress mirror", 25) == 0) {
11372+ u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
11373+ i40e_status aq_status;
11374+ u16 switch_seid, rule_id;
11375+
11376+ cnt = sscanf(&cmd_buf[25], "%hu %hu",
11377+ &switch_seid, &rule_id);
11378+ if (cnt != 2) {
11379+ dev_info(&pf->pdev->dev,
11380+ "del mirror: bad command string, cnt=%d\n",
11381+ cnt);
11382+ goto command_write_done;
11383+ }
11384+
11385+ aq_status =
11386+ i40e_aq_delete_mirrorrule(&pf->hw, switch_seid,
11387+ rule_type, rule_id, 0, NULL,
11388+ NULL, NULL, NULL);
11389+ if (aq_status)
11390+ dev_info(&pf->pdev->dev,
11391+ "mirror rule remove failed with status %d\n",
11392+ aq_status);
11393+ else
11394+ dev_info(&pf->pdev->dev,
11395+ "Mirror rule %d removed\n", rule_id);
11396+ } else if (strncmp(cmd_buf, "del switch egress mirror", 24) == 0) {
11397+ u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
11398+ i40e_status aq_status;
11399+ u16 switch_seid, rule_id;
11400+
11401+ cnt = sscanf(&cmd_buf[24], "%hu %hu",
11402+ &switch_seid, &rule_id);
11403+ if (cnt != 2) {
11404+ dev_info(&pf->pdev->dev,
11405+ "del mirror: bad command string, cnt=%d\n",
11406+ cnt);
11407+ goto command_write_done;
11408+ }
11409+
11410+ aq_status =
11411+ i40e_aq_delete_mirrorrule(&pf->hw, switch_seid,
11412+ rule_type, rule_id, 0, NULL,
11413+ NULL, NULL, NULL);
11414+ if (aq_status)
11415+ dev_info(&pf->pdev->dev,
11416+ "mirror rule remove failed with status %d\n",
11417+ aq_status);
11418+ else
11419+ dev_info(&pf->pdev->dev,
11420+ "Mirror rule %d removed\n", rule_id);
11421+
11422 } else {
11423 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
11424 dev_info(&pf->pdev->dev, "available commands\n");
11425@@ -1575,21 +2164,27 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11426 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
11427 dev_info(&pf->pdev->dev, " dump switch\n");
11428 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
11429+ dev_info(&pf->pdev->dev, " dump capabilities\n");
11430+ dev_info(&pf->pdev->dev, " dump resources\n");
11431 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
11432 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
11433 dev_info(&pf->pdev->dev, " dump desc aq\n");
11434 dev_info(&pf->pdev->dev, " dump reset stats\n");
11435 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
11436+ dev_info(&pf->pdev->dev, " msg_enable [level]\n");
11437 dev_info(&pf->pdev->dev, " read <reg>\n");
11438 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
11439 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
11440 dev_info(&pf->pdev->dev, " clear_stats port\n");
11441- dev_info(&pf->pdev->dev, " pfr\n");
11442- dev_info(&pf->pdev->dev, " corer\n");
11443- dev_info(&pf->pdev->dev, " globr\n");
11444+ dev_info(&pf->pdev->dev, " defport on\n");
11445+ dev_info(&pf->pdev->dev, " defport off\n");
11446 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
11447 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
11448 dev_info(&pf->pdev->dev, " fd current cnt");
11449+ dev_info(&pf->pdev->dev, " vf base mode on\n");
11450+ dev_info(&pf->pdev->dev, " vf base mode off\n");
11451+ dev_info(&pf->pdev->dev, " add ethtype filter <ethtype> <to_queue>");
11452+ dev_info(&pf->pdev->dev, " rem ethtype filter <ethtype> <to_queue>");
11453 dev_info(&pf->pdev->dev, " lldp start\n");
11454 dev_info(&pf->pdev->dev, " lldp stop\n");
11455 dev_info(&pf->pdev->dev, " lldp get local\n");
11456@@ -1597,6 +2192,16 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
11457 dev_info(&pf->pdev->dev, " lldp event on\n");
11458 dev_info(&pf->pdev->dev, " lldp event off\n");
11459 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
11460+ dev_info(&pf->pdev->dev, " set rss_size <count>\n");
11461+ dev_info(&pf->pdev->dev, " dcb off\n");
11462+ dev_info(&pf->pdev->dev, " dcb on\n");
11463+ dev_info(&pf->pdev->dev, " get bw\n");
11464+ dev_info(&pf->pdev->dev, " set bw <MAX> <MIN>\n");
11465+ dev_info(&pf->pdev->dev, " commit bw\n");
11466+ dev_info(&pf->pdev->dev, " add switch ingress mirror <sw_seid> <dst_seid>\n");
11467+ dev_info(&pf->pdev->dev, " add switch egress mirror <sw_seid> <dst_seid>\n");
11468+ dev_info(&pf->pdev->dev, " del switch ingress mirror <sw_seid> <rule_id>\n");
11469+ dev_info(&pf->pdev->dev, " del switch egress mirror <sw_seid> <rule_id>\n");
11470 }
11471
11472 command_write_done:
11473@@ -1631,7 +2236,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
11474 {
11475 struct i40e_pf *pf = filp->private_data;
11476 int bytes_not_copied;
11477- int buf_size = 256;
11478+ size_t buf_size = 256;
11479 char *buf;
11480 int len;
11481
11482@@ -1696,30 +2301,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
11483 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
11484 }
11485
11486- if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
11487- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
11488- if (cnt != 1) {
11489- dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
11490- goto netdev_ops_write_done;
11491- }
11492- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
11493- if (!vsi) {
11494- dev_info(&pf->pdev->dev,
11495- "tx_timeout: VSI %d not found\n", vsi_seid);
11496- } else if (!vsi->netdev) {
11497- dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
11498- vsi_seid);
11499- } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
11500- dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
11501- vsi_seid);
11502- } else if (rtnl_trylock()) {
11503- vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
11504- rtnl_unlock();
11505- dev_info(&pf->pdev->dev, "tx_timeout called\n");
11506- } else {
11507- dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
11508- }
11509- } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
11510+ if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
11511 int mtu;
11512
11513 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
11514@@ -1736,8 +2318,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
11515 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
11516 vsi_seid);
11517 } else if (rtnl_trylock()) {
11518+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
11519+ vsi->netdev->netdev_ops->extended.ndo_change_mtu(
11520+ vsi->netdev, mtu);
11521+#else
11522 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
11523 mtu);
11524+#endif
11525 rtnl_unlock();
11526 dev_info(&pf->pdev->dev, "change_mtu called\n");
11527 } else {
11528@@ -1783,14 +2370,33 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
11529 napi_schedule(&vsi->q_vectors[i]->napi);
11530 dev_info(&pf->pdev->dev, "napi called\n");
11531 }
11532+ } else if (strncmp(i40e_dbg_netdev_ops_buf,
11533+ "toggle_tx_timeout", 17) == 0) {
11534+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[17], "%i", &vsi_seid);
11535+ if (cnt != 1) {
11536+ dev_info(&pf->pdev->dev, "toggle_tx_timeout <vsi_seid>\n");
11537+ goto netdev_ops_write_done;
11538+ }
11539+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
11540+ if (!vsi) {
11541+ dev_info(&pf->pdev->dev, "toggle_tx_timeout: VSI %d not found\n",
11542+ vsi_seid);
11543+ } else {
11544+ if (vsi->block_tx_timeout)
11545+ vsi->block_tx_timeout = false;
11546+ else
11547+ vsi->block_tx_timeout = true;
11548+ dev_info(&pf->pdev->dev, "toggle_tx_timeout: block_tx_timeout = %d\n",
11549+ vsi->block_tx_timeout);
11550+ }
11551 } else {
11552 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
11553 i40e_dbg_netdev_ops_buf);
11554 dev_info(&pf->pdev->dev, "available commands\n");
11555- dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
11556 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
11557 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
11558 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
11559+ dev_info(&pf->pdev->dev, " toggle_tx_timeout <vsi_seid>\n");
11560 }
11561 netdev_ops_write_done:
11562 return count;
11563diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
11564index 8e46098ba..55a7c1079 100644
11565--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
11566+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
11567@@ -1,33 +1,11 @@
11568-/*******************************************************************************
11569- *
11570- * Intel Ethernet Controller XL710 Family Linux Driver
11571- * Copyright(c) 2013 - 2015 Intel Corporation.
11572- *
11573- * This program is free software; you can redistribute it and/or modify it
11574- * under the terms and conditions of the GNU General Public License,
11575- * version 2, as published by the Free Software Foundation.
11576- *
11577- * This program is distributed in the hope it will be useful, but WITHOUT
11578- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11579- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11580- * more details.
11581- *
11582- * You should have received a copy of the GNU General Public License along
11583- * with this program. If not, see <http://www.gnu.org/licenses/>.
11584- *
11585- * The full GNU General Public License is included in this distribution in
11586- * the file called "COPYING".
11587- *
11588- * Contact Information:
11589- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
11590- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11591- *
11592- ******************************************************************************/
11593+/* SPDX-License-Identifier: GPL-2.0 */
11594+/* Copyright(c) 2013 - 2020 Intel Corporation. */
11595
11596-#ifndef _I40E_DEVIDS_H_
11597 #define _I40E_DEVIDS_H_
11598
11599 /* Device IDs */
11600+#define I40E_DEV_ID_X710_N3000 0x0CF8
11601+#define I40E_DEV_ID_XXV710_N3000 0x0D58
11602 #define I40E_DEV_ID_SFP_XL710 0x1572
11603 #define I40E_DEV_ID_QEMU 0x1574
11604 #define I40E_DEV_ID_KX_B 0x1580
11605@@ -41,6 +19,13 @@
11606 #define I40E_DEV_ID_10G_BASE_T4 0x1589
11607 #define I40E_DEV_ID_25G_B 0x158A
11608 #define I40E_DEV_ID_25G_SFP28 0x158B
11609+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
11610+#define I40E_DEV_ID_10G_B 0x104F
11611+#define I40E_DEV_ID_10G_SFP 0x104E
11612+#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
11613+#define I40E_IS_X710TL_DEVICE(d) \
11614+ (((d) == I40E_DEV_ID_10G_BASE_T_BC) || \
11615+ ((d) == I40E_DEV_ID_5G_BASE_T_BC))
11616 #define I40E_DEV_ID_KX_X722 0x37CE
11617 #define I40E_DEV_ID_QSFP_X722 0x37CF
11618 #define I40E_DEV_ID_SFP_X722 0x37D0
11619@@ -52,4 +37,6 @@
11620 (d) == I40E_DEV_ID_QSFP_B || \
11621 (d) == I40E_DEV_ID_QSFP_C)
11622
11623-#endif /* _I40E_DEVIDS_H_ */
11624+#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \
11625+ (d) == I40E_DEV_ID_25G_SFP28)
11626+
11627diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
11628index f141e78d4..dc2e9d6b4 100644
11629--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
11630+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
11631@@ -1,28 +1,5 @@
11632-/*******************************************************************************
11633- *
11634- * Intel Ethernet Controller XL710 Family Linux Driver
11635- * Copyright(c) 2013 - 2014 Intel Corporation.
11636- *
11637- * This program is free software; you can redistribute it and/or modify it
11638- * under the terms and conditions of the GNU General Public License,
11639- * version 2, as published by the Free Software Foundation.
11640- *
11641- * This program is distributed in the hope it will be useful, but WITHOUT
11642- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11643- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11644- * more details.
11645- *
11646- * You should have received a copy of the GNU General Public License along
11647- * with this program. If not, see <http://www.gnu.org/licenses/>.
11648- *
11649- * The full GNU General Public License is included in this distribution in
11650- * the file called "COPYING".
11651- *
11652- * Contact Information:
11653- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
11654- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11655- *
11656- ******************************************************************************/
11657+// SPDX-License-Identifier: GPL-2.0
11658+/* Copyright(c) 2013 - 2020 Intel Corporation. */
11659
11660 #include "i40e_diag.h"
11661 #include "i40e_prototype.h"
11662@@ -46,9 +23,11 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
11663 wr32(hw, reg, (pat & mask));
11664 val = rd32(hw, reg);
11665 if ((val & mask) != (pat & mask)) {
11666+#ifdef ETHTOOL_TEST
11667 i40e_debug(hw, I40E_DEBUG_DIAG,
11668 "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
11669 __func__, reg, pat, val);
11670+#endif
11671 return I40E_ERR_DIAG_TEST_FAILED;
11672 }
11673 }
11674@@ -56,35 +35,29 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
11675 wr32(hw, reg, orig_val);
11676 val = rd32(hw, reg);
11677 if (val != orig_val) {
11678+#ifdef ETHTOOL_TEST
11679 i40e_debug(hw, I40E_DEBUG_DIAG,
11680 "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
11681 __func__, reg, orig_val, val);
11682+#endif
11683 return I40E_ERR_DIAG_TEST_FAILED;
11684 }
11685
11686- return 0;
11687+ return I40E_SUCCESS;
11688 }
11689
11690 struct i40e_diag_reg_test_info i40e_reg_list[] = {
11691 /* offset mask elements stride */
11692- {I40E_QTX_CTL(0), 0x0000FFBF, 1,
11693- I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
11694- {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
11695- I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
11696- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
11697- I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
11698- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
11699- I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
11700- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
11701- I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
11702+ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
11703+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
11704+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
11705+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
11706+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
11707 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
11708 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
11709- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
11710- I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
11711- {I40E_QINT_TQCTL(0), 0x000000FF, 1,
11712- I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
11713- {I40E_QINT_RQCTL(0), 0x000000FF, 1,
11714- I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
11715+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
11716+ {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
11717+ {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
11718 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
11719 { 0 }
11720 };
11721@@ -97,12 +70,12 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = {
11722 **/
11723 i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
11724 {
11725- i40e_status ret_code = 0;
11726+ i40e_status ret_code = I40E_SUCCESS;
11727 u32 reg, mask;
11728 u32 i, j;
11729
11730 for (i = 0; i40e_reg_list[i].offset != 0 &&
11731- !ret_code; i++) {
11732+ ret_code == I40E_SUCCESS; i++) {
11733
11734 /* set actual reg range for dynamically allocated resources */
11735 if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
11736@@ -119,9 +92,10 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
11737
11738 /* test register access */
11739 mask = i40e_reg_list[i].mask;
11740- for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
11741- reg = i40e_reg_list[i].offset +
11742- (j * i40e_reg_list[i].stride);
11743+ for (j = 0; j < i40e_reg_list[i].elements &&
11744+ ret_code == I40E_SUCCESS; j++) {
11745+ reg = i40e_reg_list[i].offset
11746+ + (j * i40e_reg_list[i].stride);
11747 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
11748 }
11749 }
11750@@ -142,7 +116,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
11751
11752 /* read NVM control word and if NVM valid, validate EEPROM checksum*/
11753 ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
11754- if (!ret_code &&
11755+ if ((ret_code == I40E_SUCCESS) &&
11756 ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
11757 BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
11758 return i40e_validate_nvm_checksum(hw, NULL);
11759diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
11760index 0b5911652..697015b4f 100644
11761--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
11762+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
11763@@ -1,28 +1,5 @@
11764-/*******************************************************************************
11765- *
11766- * Intel Ethernet Controller XL710 Family Linux Driver
11767- * Copyright(c) 2013 - 2014 Intel Corporation.
11768- *
11769- * This program is free software; you can redistribute it and/or modify it
11770- * under the terms and conditions of the GNU General Public License,
11771- * version 2, as published by the Free Software Foundation.
11772- *
11773- * This program is distributed in the hope it will be useful, but WITHOUT
11774- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11775- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11776- * more details.
11777- *
11778- * You should have received a copy of the GNU General Public License along
11779- * with this program. If not, see <http://www.gnu.org/licenses/>.
11780- *
11781- * The full GNU General Public License is included in this distribution in
11782- * the file called "COPYING".
11783- *
11784- * Contact Information:
11785- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
11786- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11787- *
11788- ******************************************************************************/
11789+/* SPDX-License-Identifier: GPL-2.0 */
11790+/* Copyright(c) 2013 - 2020 Intel Corporation. */
11791
11792 #ifndef _I40E_DIAG_H_
11793 #define _I40E_DIAG_H_
11794diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
11795index 05e89864f..7f93f74d4 100644
11796--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
11797+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
11798@@ -1,54 +1,30 @@
11799-/*******************************************************************************
11800- *
11801- * Intel Ethernet Controller XL710 Family Linux Driver
11802- * Copyright(c) 2013 - 2016 Intel Corporation.
11803- *
11804- * This program is free software; you can redistribute it and/or modify it
11805- * under the terms and conditions of the GNU General Public License,
11806- * version 2, as published by the Free Software Foundation.
11807- *
11808- * This program is distributed in the hope it will be useful, but WITHOUT
11809- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11810- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11811- * more details.
11812- *
11813- * You should have received a copy of the GNU General Public License along
11814- * with this program. If not, see <http://www.gnu.org/licenses/>.
11815- *
11816- * The full GNU General Public License is included in this distribution in
11817- * the file called "COPYING".
11818- *
11819- * Contact Information:
11820- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
11821- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11822- *
11823- ******************************************************************************/
11824+// SPDX-License-Identifier: GPL-2.0
11825+/* Copyright(c) 2013 - 2020 Intel Corporation. */
11826
11827 /* ethtool support for i40e */
11828
11829 #include "i40e.h"
11830 #include "i40e_diag.h"
11831
11832-struct i40e_stats {
11833- char stat_string[ETH_GSTRING_LEN];
11834- int sizeof_stat;
11835- int stat_offset;
11836-};
11837+#ifdef SIOCETHTOOL
11838+#ifndef ETH_GSTRING_LEN
11839+#define ETH_GSTRING_LEN 32
11840
11841-#define I40E_STAT(_type, _name, _stat) { \
11842- .stat_string = _name, \
11843- .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
11844- .stat_offset = offsetof(_type, _stat) \
11845-}
11846+#endif
11847+#ifdef ETHTOOL_GSTATS
11848+
11849+#include "i40e_ethtool_stats.h"
11850
11851-#define I40E_NETDEV_STAT(_net_stat) \
11852- I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
11853 #define I40E_PF_STAT(_name, _stat) \
11854- I40E_STAT(struct i40e_pf, _name, _stat)
11855+ I40E_STAT(struct i40e_pf, _name, _stat)
11856 #define I40E_VSI_STAT(_name, _stat) \
11857- I40E_STAT(struct i40e_vsi, _name, _stat)
11858+ I40E_STAT(struct i40e_vsi, _name, _stat)
11859 #define I40E_VEB_STAT(_name, _stat) \
11860- I40E_STAT(struct i40e_veb, _name, _stat)
11861+ I40E_STAT(struct i40e_veb, _name, _stat)
11862+#define I40E_PFC_STAT(_name, _stat) \
11863+ I40E_STAT(struct i40e_pfc_stats, _name, _stat)
11864+#define I40E_QUEUE_STAT(_name, _stat) \
11865+ I40E_STAT(struct i40e_ring, _name, _stat)
11866
11867 static const struct i40e_stats i40e_gstrings_net_stats[] = {
11868 I40E_NETDEV_STAT(rx_packets),
11869@@ -65,18 +41,25 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
11870 };
11871
11872 static const struct i40e_stats i40e_gstrings_veb_stats[] = {
11873- I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
11874- I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
11875- I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
11876- I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
11877- I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
11878- I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
11879- I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
11880- I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
11881- I40E_VEB_STAT("rx_discards", stats.rx_discards),
11882- I40E_VEB_STAT("tx_discards", stats.tx_discards),
11883- I40E_VEB_STAT("tx_errors", stats.tx_errors),
11884- I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
11885+ I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes),
11886+ I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes),
11887+ I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast),
11888+ I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast),
11889+ I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast),
11890+ I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast),
11891+ I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast),
11892+ I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast),
11893+ I40E_VEB_STAT("veb.rx_discards", stats.rx_discards),
11894+ I40E_VEB_STAT("veb.tx_discards", stats.tx_discards),
11895+ I40E_VEB_STAT("veb.tx_errors", stats.tx_errors),
11896+ I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
11897+};
11898+
11899+static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
11900+ I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
11901+ I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
11902+ I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
11903+ I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
11904 };
11905
11906 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
11907@@ -89,6 +72,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
11908 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
11909 I40E_VSI_STAT("tx_linearize", tx_linearize),
11910 I40E_VSI_STAT("tx_force_wb", tx_force_wb),
11911+ I40E_VSI_STAT("tx_busy", tx_busy),
11912 I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
11913 I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
11914 };
11915@@ -104,93 +88,171 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
11916 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
11917 */
11918 static const struct i40e_stats i40e_gstrings_stats[] = {
11919- I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
11920- I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
11921- I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
11922- I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
11923- I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
11924- I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
11925- I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
11926- I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
11927- I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
11928- I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
11929- I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
11930- I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
11931- I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
11932- I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
11933- I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
11934- I40E_PF_STAT("tx_timeout", tx_timeout_count),
11935- I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
11936- I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
11937- I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
11938- I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
11939- I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
11940- I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
11941- I40E_PF_STAT("rx_size_64", stats.rx_size_64),
11942- I40E_PF_STAT("rx_size_127", stats.rx_size_127),
11943- I40E_PF_STAT("rx_size_255", stats.rx_size_255),
11944- I40E_PF_STAT("rx_size_511", stats.rx_size_511),
11945- I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
11946- I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
11947- I40E_PF_STAT("rx_size_big", stats.rx_size_big),
11948- I40E_PF_STAT("tx_size_64", stats.tx_size_64),
11949- I40E_PF_STAT("tx_size_127", stats.tx_size_127),
11950- I40E_PF_STAT("tx_size_255", stats.tx_size_255),
11951- I40E_PF_STAT("tx_size_511", stats.tx_size_511),
11952- I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
11953- I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
11954- I40E_PF_STAT("tx_size_big", stats.tx_size_big),
11955- I40E_PF_STAT("rx_undersize", stats.rx_undersize),
11956- I40E_PF_STAT("rx_fragments", stats.rx_fragments),
11957- I40E_PF_STAT("rx_oversize", stats.rx_oversize),
11958- I40E_PF_STAT("rx_jabber", stats.rx_jabber),
11959- I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
11960- I40E_PF_STAT("arq_overflows", arq_overflows),
11961- I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
11962- I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
11963- I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
11964- I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
11965- I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
11966- I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
11967- I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
11968- I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
11969+ I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
11970+ I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
11971+ I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
11972+ I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
11973+ I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
11974+ I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
11975+ I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
11976+ I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
11977+ I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
11978+ I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
11979+ I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
11980+ I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
11981+ I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
11982+ I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
11983+ I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
11984+ I40E_PF_STAT("port.tx_timeout", tx_timeout_count),
11985+ I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
11986+ I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors),
11987+ I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
11988+ I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
11989+ I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
11990+ I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
11991+ I40E_PF_STAT("port.rx_size_64", stats.rx_size_64),
11992+ I40E_PF_STAT("port.rx_size_127", stats.rx_size_127),
11993+ I40E_PF_STAT("port.rx_size_255", stats.rx_size_255),
11994+ I40E_PF_STAT("port.rx_size_511", stats.rx_size_511),
11995+ I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
11996+ I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
11997+ I40E_PF_STAT("port.rx_size_big", stats.rx_size_big),
11998+ I40E_PF_STAT("port.tx_size_64", stats.tx_size_64),
11999+ I40E_PF_STAT("port.tx_size_127", stats.tx_size_127),
12000+ I40E_PF_STAT("port.tx_size_255", stats.tx_size_255),
12001+ I40E_PF_STAT("port.tx_size_511", stats.tx_size_511),
12002+ I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
12003+ I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
12004+ I40E_PF_STAT("port.tx_size_big", stats.tx_size_big),
12005+ I40E_PF_STAT("port.rx_undersize", stats.rx_undersize),
12006+ I40E_PF_STAT("port.rx_fragments", stats.rx_fragments),
12007+ I40E_PF_STAT("port.rx_oversize", stats.rx_oversize),
12008+ I40E_PF_STAT("port.rx_jabber", stats.rx_jabber),
12009+ I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests),
12010+ I40E_PF_STAT("port.arq_overflows", arq_overflows),
12011+#ifdef HAVE_PTP_1588_CLOCK
12012+ I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
12013+ I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared),
12014+ I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped),
12015+#endif /* HAVE_PTP_1588_CLOCK */
12016+ I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt),
12017+ I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match),
12018+ I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
12019+ I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status),
12020+ I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match),
12021+ I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status),
12022+#ifdef I40E_ADD_PROBES
12023+ I40E_PF_STAT("port.tx_tcp_segments", tcp_segs),
12024+ I40E_PF_STAT("port.tx_tcp_cso", tx_tcp_cso),
12025+ I40E_PF_STAT("port.tx_udp_cso", tx_udp_cso),
12026+ I40E_PF_STAT("port.tx_sctp_cso", tx_sctp_cso),
12027+ I40E_PF_STAT("port.tx_ip4_cso", tx_ip4_cso),
12028+ I40E_PF_STAT("port.rx_tcp_cso", rx_tcp_cso),
12029+ I40E_PF_STAT("port.rx_udp_cso", rx_udp_cso),
12030+ I40E_PF_STAT("port.rx_sctp_cso", rx_sctp_cso),
12031+ I40E_PF_STAT("port.rx_ip4_cso", rx_ip4_cso),
12032+ I40E_PF_STAT("port.rx_csum_offload_outer", hw_csum_rx_outer),
12033+ I40E_PF_STAT("port.rx_tcp_cso_error", rx_tcp_cso_err),
12034+ I40E_PF_STAT("port.rx_udp_cso_error", rx_udp_cso_err),
12035+ I40E_PF_STAT("port.rx_sctp_cso_error", rx_sctp_cso_err),
12036+ I40E_PF_STAT("port.rx_ip4_cso_error", rx_ip4_cso_err),
12037+#endif
12038
12039 /* LPI stats */
12040- I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
12041- I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
12042- I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
12043- I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
12044+ I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status),
12045+ I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status),
12046+ I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count),
12047+ I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
12048+ I40E_PF_STAT("port.tx_lpi_duration", stats.tx_lpi_duration),
12049+ I40E_PF_STAT("port.rx_lpi_duration", stats.rx_lpi_duration),
12050 };
12051
12052-#define I40E_QUEUE_STATS_LEN(n) \
12053- (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
12054- * 2 /* Tx and Rx together */ \
12055- * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
12056-#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
12057-#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
12058+struct i40e_pfc_stats {
12059+ u64 priority_xon_rx;
12060+ u64 priority_xoff_rx;
12061+ u64 priority_xon_tx;
12062+ u64 priority_xoff_tx;
12063+ u64 priority_xon_2_xoff;
12064+};
12065+
12066+static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
12067+ I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
12068+ I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
12069+ I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
12070+ I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
12071+ I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
12072+};
12073+
12074+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
12075+
12076 #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
12077-#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
12078- I40E_MISC_STATS_LEN + \
12079- I40E_QUEUE_STATS_LEN((n)))
12080-#define I40E_PFC_STATS_LEN ( \
12081- (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
12082- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
12083- FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
12084- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
12085- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
12086- / sizeof(u64))
12087-#define I40E_VEB_TC_STATS_LEN ( \
12088- (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
12089- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
12090- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
12091- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
12092- / sizeof(u64))
12093-#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
12094-#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
12095-#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
12096+
12097+#define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN)
12098+
12099+#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
12100+ I40E_MAX_USER_PRIORITY)
12101+
12102+#define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \
12103+ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
12104+ I40E_MAX_TRAFFIC_CLASS))
12105+
12106+#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
12107+
12108+/* Length (number) of PF core stats only (i.e. without queues / extra stats): */
12109+#define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \
12110 I40E_PFC_STATS_LEN + \
12111- I40E_VSI_STATS_LEN((n)))
12112+ I40E_VEB_STATS_LEN + \
12113+ I40E_VSI_STATS_LEN)
12114+
12115+/* Length of stats for a single queue */
12116+#define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats)
12117+#ifdef HAVE_XDP_SUPPORT
12118+#define I40E_QUEUE_STATS_XDP_LEN ARRAY_SIZE(i40e_gstrings_rx_queue_xdp_stats)
12119+#endif
12120
12121+#ifndef I40E_PF_EXTRA_STATS_OFF
12122+
12123+#define I40E_STATS_NAME_VFID_EXTRA "vf___."
12124+#define I40E_STATS_NAME_VFID_EXTRA_LEN (sizeof(I40E_STATS_NAME_VFID_EXTRA) - 1)
12125+
12126+static struct i40e_stats i40e_gstrings_eth_stats_extra[] = {
12127+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12128+ "rx_bytes", eth_stats.rx_bytes),
12129+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12130+ "rx_unicast", eth_stats.rx_unicast),
12131+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12132+ "rx_multicast", eth_stats.rx_multicast),
12133+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12134+ "rx_broadcast", eth_stats.rx_broadcast),
12135+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12136+ "rx_discards", eth_stats.rx_discards),
12137+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12138+ "rx_unknown_protocol", eth_stats.rx_unknown_protocol),
12139+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12140+ "tx_bytes", eth_stats.tx_bytes),
12141+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12142+ "tx_unicast", eth_stats.tx_unicast),
12143+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12144+ "tx_multicast", eth_stats.tx_multicast),
12145+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12146+ "tx_broadcast", eth_stats.tx_broadcast),
12147+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12148+ "tx_discards", eth_stats.tx_discards),
12149+ I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
12150+ "tx_errors", eth_stats.tx_errors),
12151+};
12152+
12153+#define I40E_STATS_EXTRA_COUNT 128 /* as for now only I40E_MAX_VF_COUNT */
12154+/* Following length value does not include the length values for queues stats */
12155+#define I40E_STATS_EXTRA_LEN ARRAY_SIZE(i40e_gstrings_eth_stats_extra)
12156+/* Length (number) of PF extra stats only (i.e. without core stats / queues): */
12157+#define I40E_PF_STATS_EXTRA_LEN (I40E_STATS_EXTRA_COUNT * I40E_STATS_EXTRA_LEN)
12158+/* Length (number) of enhanced/all PF stats (i.e. core with extra stats): */
12159+#define I40E_PF_STATS_ENHANCE_LEN (I40E_PF_STATS_LEN + I40E_PF_STATS_EXTRA_LEN)
12160+
12161+#endif /* !I40E_PF_EXTRA_STATS_OFF */
12162+#endif /* ETHTOOL_GSTATS */
12163+#ifdef ETHTOOL_TEST
12164 enum i40e_ethtool_test_id {
12165 I40E_ETH_TEST_REG = 0,
12166 I40E_ETH_TEST_EEPROM,
12167@@ -207,6 +269,9 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
12168
12169 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
12170
12171+#endif /* ETHTOOL_TEST */
12172+
12173+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
12174 struct i40e_priv_flags {
12175 char flag_string[ETH_GSTRING_LEN];
12176 u64 flag;
12177@@ -222,11 +287,21 @@ struct i40e_priv_flags {
12178 static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
12179 /* NOTE: MFP setting cannot be changed */
12180 I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
12181+ I40E_PRIV_FLAG("total-port-shutdown", I40E_FLAG_TOTAL_PORT_SHUTDOWN, 1),
12182 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
12183 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
12184 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
12185 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
12186+ I40E_PRIV_FLAG("link-down-on-close",
12187+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
12188+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
12189 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
12190+#endif
12191+ I40E_PRIV_FLAG("disable-source-pruning",
12192+ I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
12193+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
12194+ I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
12195+ I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
12196 };
12197
12198 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
12199@@ -239,6 +314,7 @@ static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
12200
12201 #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
12202
12203+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
12204 /**
12205 * i40e_partition_setting_complaint - generic complaint for MFP restriction
12206 * @pf: the PF struct
12207@@ -251,431 +327,1001 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
12208
12209 /**
12210 * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
12211- * @phy_types: PHY types to convert
12212- * @supported: pointer to the ethtool supported variable to fill in
12213- * @advertising: pointer to the ethtool advertising variable to fill in
12214+ * @pf: PF struct with phy_types
12215+ * @ks: ethtool link ksettings struct to fill out
12216 *
12217 **/
12218-static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
12219- u32 *advertising)
12220+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
12221+ struct ethtool_link_ksettings *ks)
12222 {
12223 struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
12224 u64 phy_types = pf->hw.phy.phy_types;
12225
12226- *supported = 0x0;
12227- *advertising = 0x0;
12228+ ethtool_link_ksettings_zero_link_mode(ks, supported);
12229+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
12230
12231 if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
12232- *supported |= SUPPORTED_Autoneg |
12233- SUPPORTED_1000baseT_Full;
12234- *advertising |= ADVERTISED_Autoneg;
12235+ ethtool_link_ksettings_add_link_mode(ks, supported,
12236+ 1000baseT_Full);
12237 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12238- *advertising |= ADVERTISED_1000baseT_Full;
12239+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12240+ 1000baseT_Full);
12241 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
12242- *supported |= SUPPORTED_100baseT_Full;
12243- *advertising |= ADVERTISED_100baseT_Full;
12244+ ethtool_link_ksettings_add_link_mode(ks, supported,
12245+ 100baseT_Full);
12246+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12247+ 100baseT_Full);
12248 }
12249 }
12250 if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
12251 phy_types & I40E_CAP_PHY_TYPE_XFI ||
12252 phy_types & I40E_CAP_PHY_TYPE_SFI ||
12253 phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
12254- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
12255- *supported |= SUPPORTED_10000baseT_Full;
12256- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
12257- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
12258- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
12259- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
12260- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
12261- *supported |= SUPPORTED_Autoneg |
12262- SUPPORTED_10000baseT_Full;
12263- *advertising |= ADVERTISED_Autoneg;
12264+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) {
12265+ ethtool_link_ksettings_add_link_mode(ks, supported,
12266+ 10000baseT_Full);
12267+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12268+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12269+ 10000baseT_Full);
12270+ }
12271+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) {
12272+ ethtool_link_ksettings_add_link_mode(ks, supported,
12273+ 10000baseT_Full);
12274 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12275- *advertising |= ADVERTISED_10000baseT_Full;
12276+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12277+ 10000baseT_Full);
12278+ }
12279+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
12280+ if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
12281+ ethtool_link_ksettings_add_link_mode(ks, supported,
12282+ 2500baseT_Full);
12283+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
12284+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12285+ 2500baseT_Full);
12286 }
12287+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
12288+#ifdef HAVE_ETHTOOL_5G_BITS
12289+ if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
12290+ ethtool_link_ksettings_add_link_mode(ks, supported,
12291+ 5000baseT_Full);
12292+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
12293+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12294+ 5000baseT_Full);
12295+ }
12296+#endif /* HAVE_ETHTOOL_5G_BITS */
12297 if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
12298 phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
12299 phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
12300- *supported |= SUPPORTED_40000baseCR4_Full;
12301+ ethtool_link_ksettings_add_link_mode(ks, supported,
12302+ 40000baseCR4_Full);
12303 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
12304 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
12305- *supported |= SUPPORTED_Autoneg |
12306- SUPPORTED_40000baseCR4_Full;
12307- *advertising |= ADVERTISED_Autoneg;
12308+ ethtool_link_ksettings_add_link_mode(ks, supported,
12309+ 40000baseCR4_Full);
12310 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
12311- *advertising |= ADVERTISED_40000baseCR4_Full;
12312+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12313+ 40000baseCR4_Full);
12314 }
12315 if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
12316- *supported |= SUPPORTED_Autoneg |
12317- SUPPORTED_100baseT_Full;
12318- *advertising |= ADVERTISED_Autoneg;
12319+ ethtool_link_ksettings_add_link_mode(ks, supported,
12320+ 100baseT_Full);
12321 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
12322- *advertising |= ADVERTISED_100baseT_Full;
12323+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12324+ 100baseT_Full);
12325 }
12326- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
12327- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
12328- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
12329- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
12330- *supported |= SUPPORTED_Autoneg |
12331- SUPPORTED_1000baseT_Full;
12332- *advertising |= ADVERTISED_Autoneg;
12333+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) {
12334+ ethtool_link_ksettings_add_link_mode(ks, supported,
12335+ 1000baseT_Full);
12336 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12337- *advertising |= ADVERTISED_1000baseT_Full;
12338+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12339+ 1000baseT_Full);
12340+ }
12341+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
12342+ ethtool_link_ksettings_add_link_mode(ks, supported,
12343+ 40000baseSR4_Full);
12344+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12345+ 40000baseSR4_Full);
12346+ }
12347+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
12348+ ethtool_link_ksettings_add_link_mode(ks, supported,
12349+ 40000baseLR4_Full);
12350+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12351+ 40000baseLR4_Full);
12352 }
12353- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
12354- *supported |= SUPPORTED_40000baseSR4_Full;
12355- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
12356- *supported |= SUPPORTED_40000baseLR4_Full;
12357 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
12358- *supported |= SUPPORTED_40000baseKR4_Full |
12359- SUPPORTED_Autoneg;
12360- *advertising |= ADVERTISED_40000baseKR4_Full |
12361- ADVERTISED_Autoneg;
12362+ ethtool_link_ksettings_add_link_mode(ks, supported,
12363+ 40000baseKR4_Full);
12364+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12365+ 40000baseKR4_Full);
12366 }
12367 if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
12368- *supported |= SUPPORTED_20000baseKR2_Full |
12369- SUPPORTED_Autoneg;
12370- *advertising |= ADVERTISED_Autoneg;
12371+ ethtool_link_ksettings_add_link_mode(ks, supported,
12372+ 20000baseKR2_Full);
12373 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
12374- *advertising |= ADVERTISED_20000baseKR2_Full;
12375+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12376+ 20000baseKR2_Full);
12377 }
12378- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
12379- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
12380- *supported |= SUPPORTED_10000baseKR_Full |
12381- SUPPORTED_Autoneg;
12382- *advertising |= ADVERTISED_Autoneg;
12383+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
12384+ ethtool_link_ksettings_add_link_mode(ks, supported,
12385+ 10000baseKX4_Full);
12386 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12387- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
12388- *advertising |= ADVERTISED_10000baseKR_Full;
12389+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12390+ 10000baseKX4_Full);
12391 }
12392- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
12393- *supported |= SUPPORTED_10000baseKX4_Full |
12394- SUPPORTED_Autoneg;
12395- *advertising |= ADVERTISED_Autoneg;
12396+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
12397+ !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
12398+ ethtool_link_ksettings_add_link_mode(ks, supported,
12399+ 10000baseKR_Full);
12400 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12401- *advertising |= ADVERTISED_10000baseKX4_Full;
12402+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12403+ 10000baseKR_Full);
12404 }
12405- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
12406- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
12407- *supported |= SUPPORTED_1000baseKX_Full |
12408- SUPPORTED_Autoneg;
12409- *advertising |= ADVERTISED_Autoneg;
12410+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
12411+ !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
12412+ ethtool_link_ksettings_add_link_mode(ks, supported,
12413+ 1000baseKX_Full);
12414 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12415- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
12416- *advertising |= ADVERTISED_1000baseKX_Full;
12417+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12418+ 1000baseKX_Full);
12419+ }
12420+#ifdef HAVE_ETHTOOL_25G_BITS
12421+ /* need to add 25G PHY types */
12422+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) {
12423+ ethtool_link_ksettings_add_link_mode(ks, supported,
12424+ 25000baseKR_Full);
12425+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
12426+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12427+ 25000baseKR_Full);
12428+ }
12429+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) {
12430+ ethtool_link_ksettings_add_link_mode(ks, supported,
12431+ 25000baseCR_Full);
12432+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
12433+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12434+ 25000baseCR_Full);
12435 }
12436+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
12437+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
12438+ ethtool_link_ksettings_add_link_mode(ks, supported,
12439+ 25000baseSR_Full);
12440+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
12441+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12442+ 25000baseSR_Full);
12443+ }
12444+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
12445+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
12446+ ethtool_link_ksettings_add_link_mode(ks, supported,
12447+ 25000baseCR_Full);
12448+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
12449+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12450+ 25000baseCR_Full);
12451+ }
12452+#ifdef ETHTOOL_GFECPARAM
12453 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
12454 phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
12455 phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
12456- phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
12457- *supported |= SUPPORTED_Autoneg;
12458- *advertising |= ADVERTISED_Autoneg;
12459+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
12460+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
12461+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
12462+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
12463+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
12464+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
12465+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
12466+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12467+ FEC_NONE);
12468+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12469+ FEC_RS);
12470+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12471+ FEC_BASER);
12472+ }
12473+ }
12474+#endif /* ETHTOOL_GFECPARAM */
12475+#endif /* HAVE_ETHTOOL_25G_BITS */
12476+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
12477+ /* need to add new 10G PHY types */
12478+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
12479+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
12480+ ethtool_link_ksettings_add_link_mode(ks, supported,
12481+ 10000baseCR_Full);
12482+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12483+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12484+ 10000baseCR_Full);
12485+ }
12486+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) {
12487+ ethtool_link_ksettings_add_link_mode(ks, supported,
12488+ 10000baseSR_Full);
12489+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12490+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12491+ 10000baseSR_Full);
12492+ }
12493+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
12494+ ethtool_link_ksettings_add_link_mode(ks, supported,
12495+ 10000baseLR_Full);
12496+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12497+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12498+ 10000baseLR_Full);
12499+ }
12500+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
12501+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
12502+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
12503+ ethtool_link_ksettings_add_link_mode(ks, supported,
12504+ 1000baseX_Full);
12505+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12506+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12507+ 1000baseX_Full);
12508+ }
12509+#else
12510+ /* need to keep backward compatibility with older kernels */
12511+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
12512+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
12513+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
12514+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
12515+ ethtool_link_ksettings_add_link_mode(ks, supported,
12516+ 10000baseT_Full);
12517+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12518+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12519+ 10000baseT_Full);
12520+ }
12521+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
12522+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
12523+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
12524+ ethtool_link_ksettings_add_link_mode(ks, supported,
12525+ 1000baseT_Full);
12526+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12527+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12528+ 1000baseT_Full);
12529+ }
12530+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
12531+ /* Autoneg PHY types */
12532+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII ||
12533+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 ||
12534+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
12535+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 ||
12536+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
12537+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
12538+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
12539+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
12540+ phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
12541+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
12542+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
12543+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
12544+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
12545+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
12546+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
12547+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
12548+ phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
12549+ phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
12550+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
12551+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
12552+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
12553+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
12554+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX ||
12555+ phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
12556+ ethtool_link_ksettings_add_link_mode(ks, supported,
12557+ Autoneg);
12558+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12559+ Autoneg);
12560 }
12561 }
12562
12563+#ifdef ETHTOOL_GFECPARAM
12564 /**
12565- * i40e_get_settings_link_up - Get the Link settings for when link is up
12566+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
12567+ * @req_fec_info: mask request fec info
12568+ * @ks: ethtool ksettings to fill in
12569+ **/
12570+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
12571+ struct ethtool_link_ksettings *ks)
12572+{
12573+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
12574+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
12575+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
12576+
12577+ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
12578+ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
12579+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12580+ FEC_NONE);
12581+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12582+ FEC_BASER);
12583+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
12584+ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
12585+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
12586+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
12587+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12588+ FEC_BASER);
12589+ } else {
12590+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12591+ FEC_NONE);
12592+ }
12593+}
12594+#endif /* ETHTOOL_GFECPARAM */
12595+
12596+/**
12597+ * i40e_get_settings_link_up - Get Link settings for when link is up
12598 * @hw: hw structure
12599- * @ecmd: ethtool command to fill in
12600+ * @ks: ethtool ksettings to fill in
12601 * @netdev: network interface device structure
12602- *
12603+ * @pf: pointer to physical function struct
12604 **/
12605 static void i40e_get_settings_link_up(struct i40e_hw *hw,
12606- struct ethtool_link_ksettings *cmd,
12607+ struct ethtool_link_ksettings *ks,
12608 struct net_device *netdev,
12609 struct i40e_pf *pf)
12610 {
12611 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
12612+ struct ethtool_link_ksettings cap_ksettings;
12613 u32 link_speed = hw_link_info->link_speed;
12614- u32 e_advertising = 0x0;
12615- u32 e_supported = 0x0;
12616- u32 supported, advertising;
12617-
12618- ethtool_convert_link_mode_to_legacy_u32(&supported,
12619- cmd->link_modes.supported);
12620- ethtool_convert_link_mode_to_legacy_u32(&advertising,
12621- cmd->link_modes.advertising);
12622
12623 /* Initialize supported and advertised settings based on phy settings */
12624 switch (hw_link_info->phy_type) {
12625 case I40E_PHY_TYPE_40GBASE_CR4:
12626 case I40E_PHY_TYPE_40GBASE_CR4_CU:
12627- supported = SUPPORTED_Autoneg |
12628- SUPPORTED_40000baseCR4_Full;
12629- advertising = ADVERTISED_Autoneg |
12630- ADVERTISED_40000baseCR4_Full;
12631+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12632+ ethtool_link_ksettings_add_link_mode(ks, supported,
12633+ 40000baseCR4_Full);
12634+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12635+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12636+ 40000baseCR4_Full);
12637 break;
12638 case I40E_PHY_TYPE_XLAUI:
12639 case I40E_PHY_TYPE_XLPPI:
12640 case I40E_PHY_TYPE_40GBASE_AOC:
12641- supported = SUPPORTED_40000baseCR4_Full;
12642+ ethtool_link_ksettings_add_link_mode(ks, supported,
12643+ 40000baseCR4_Full);
12644+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12645+ 40000baseCR4_Full);
12646 break;
12647 case I40E_PHY_TYPE_40GBASE_SR4:
12648- supported = SUPPORTED_40000baseSR4_Full;
12649+ ethtool_link_ksettings_add_link_mode(ks, supported,
12650+ 40000baseSR4_Full);
12651+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12652+ 40000baseSR4_Full);
12653 break;
12654 case I40E_PHY_TYPE_40GBASE_LR4:
12655- supported = SUPPORTED_40000baseLR4_Full;
12656+ ethtool_link_ksettings_add_link_mode(ks, supported,
12657+ 40000baseLR4_Full);
12658+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12659+ 40000baseLR4_Full);
12660 break;
12661+ case I40E_PHY_TYPE_25GBASE_SR:
12662+ case I40E_PHY_TYPE_25GBASE_LR:
12663 case I40E_PHY_TYPE_10GBASE_SR:
12664 case I40E_PHY_TYPE_10GBASE_LR:
12665 case I40E_PHY_TYPE_1000BASE_SX:
12666 case I40E_PHY_TYPE_1000BASE_LX:
12667- supported = SUPPORTED_10000baseT_Full;
12668+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12669+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12670+#ifdef HAVE_ETHTOOL_25G_BITS
12671+ ethtool_link_ksettings_add_link_mode(ks, supported,
12672+ 25000baseSR_Full);
12673+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12674+ 25000baseSR_Full);
12675+#ifdef ETHTOOL_GFECPARAM
12676+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
12677+#endif /* ETHTOOL_GFECPARAM */
12678+#endif /* HAVE_ETHTOOL_25G_BITS */
12679+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
12680+ ethtool_link_ksettings_add_link_mode(ks, supported,
12681+ 10000baseSR_Full);
12682+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12683+ 10000baseSR_Full);
12684+ ethtool_link_ksettings_add_link_mode(ks, supported,
12685+ 10000baseLR_Full);
12686+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12687+ 10000baseLR_Full);
12688+ ethtool_link_ksettings_add_link_mode(ks, supported,
12689+ 1000baseX_Full);
12690+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12691+ 1000baseX_Full);
12692+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
12693+ ethtool_link_ksettings_add_link_mode(ks, supported,
12694+ 10000baseT_Full);
12695 if (hw_link_info->module_type[2] &
12696 I40E_MODULE_TYPE_1000BASE_SX ||
12697 hw_link_info->module_type[2] &
12698 I40E_MODULE_TYPE_1000BASE_LX) {
12699- supported |= SUPPORTED_1000baseT_Full;
12700+ ethtool_link_ksettings_add_link_mode(ks, supported,
12701+ 1000baseT_Full);
12702 if (hw_link_info->requested_speeds &
12703 I40E_LINK_SPEED_1GB)
12704- advertising |= ADVERTISED_1000baseT_Full;
12705+ ethtool_link_ksettings_add_link_mode(
12706+ ks, advertising, 1000baseT_Full);
12707 }
12708 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12709- advertising |= ADVERTISED_10000baseT_Full;
12710+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12711+ 10000baseT_Full);
12712 break;
12713 case I40E_PHY_TYPE_10GBASE_T:
12714+ case I40E_PHY_TYPE_5GBASE_T:
12715+ case I40E_PHY_TYPE_2_5GBASE_T:
12716 case I40E_PHY_TYPE_1000BASE_T:
12717 case I40E_PHY_TYPE_100BASE_TX:
12718- supported = SUPPORTED_Autoneg |
12719- SUPPORTED_10000baseT_Full |
12720- SUPPORTED_1000baseT_Full |
12721- SUPPORTED_100baseT_Full;
12722- advertising = ADVERTISED_Autoneg;
12723+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12724+ ethtool_link_ksettings_add_link_mode(ks, supported,
12725+ 10000baseT_Full);
12726+#ifdef HAVE_ETHTOOL_5G_BITS
12727+ ethtool_link_ksettings_add_link_mode(ks, supported,
12728+ 5000baseT_Full);
12729+#endif /* HAVE_ETHTOOL_5G_BITS */
12730+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
12731+ ethtool_link_ksettings_add_link_mode(ks, supported,
12732+ 2500baseT_Full);
12733+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
12734+ ethtool_link_ksettings_add_link_mode(ks, supported,
12735+ 1000baseT_Full);
12736+ ethtool_link_ksettings_add_link_mode(ks, supported,
12737+ 100baseT_Full);
12738+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12739 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12740- advertising |= ADVERTISED_10000baseT_Full;
12741+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12742+ 10000baseT_Full);
12743+#ifdef HAVE_ETHTOOL_5G_BITS
12744+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
12745+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12746+ 5000baseT_Full);
12747+#endif /* HAVE_ETHTOOL_5G_BITS */
12748+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
12749+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
12750+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12751+ 2500baseT_Full);
12752+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
12753 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12754- advertising |= ADVERTISED_1000baseT_Full;
12755+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12756+ 1000baseT_Full);
12757 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
12758- advertising |= ADVERTISED_100baseT_Full;
12759+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12760+ 100baseT_Full);
12761 break;
12762 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
12763- supported = SUPPORTED_Autoneg |
12764- SUPPORTED_1000baseT_Full;
12765- advertising = ADVERTISED_Autoneg |
12766- ADVERTISED_1000baseT_Full;
12767+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12768+ ethtool_link_ksettings_add_link_mode(ks, supported,
12769+ 1000baseT_Full);
12770+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12771+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12772+ 1000baseT_Full);
12773 break;
12774 case I40E_PHY_TYPE_10GBASE_CR1_CU:
12775 case I40E_PHY_TYPE_10GBASE_CR1:
12776- supported = SUPPORTED_Autoneg |
12777- SUPPORTED_10000baseT_Full;
12778- advertising = ADVERTISED_Autoneg |
12779- ADVERTISED_10000baseT_Full;
12780+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12781+ ethtool_link_ksettings_add_link_mode(ks, supported,
12782+ 10000baseT_Full);
12783+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12784+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12785+ 10000baseT_Full);
12786 break;
12787 case I40E_PHY_TYPE_XAUI:
12788 case I40E_PHY_TYPE_XFI:
12789 case I40E_PHY_TYPE_SFI:
12790 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
12791 case I40E_PHY_TYPE_10GBASE_AOC:
12792- supported = SUPPORTED_10000baseT_Full;
12793- advertising = SUPPORTED_10000baseT_Full;
12794+ ethtool_link_ksettings_add_link_mode(ks, supported,
12795+ 10000baseT_Full);
12796+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
12797+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12798+ 10000baseT_Full);
12799 break;
12800 case I40E_PHY_TYPE_SGMII:
12801- supported = SUPPORTED_Autoneg |
12802- SUPPORTED_1000baseT_Full;
12803+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12804+ ethtool_link_ksettings_add_link_mode(ks, supported,
12805+ 1000baseT_Full);
12806 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
12807- advertising |= ADVERTISED_1000baseT_Full;
12808+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12809+ 1000baseT_Full);
12810 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
12811- supported |= SUPPORTED_100baseT_Full;
12812+ ethtool_link_ksettings_add_link_mode(ks, supported,
12813+ 100baseT_Full);
12814 if (hw_link_info->requested_speeds &
12815 I40E_LINK_SPEED_100MB)
12816- advertising |= ADVERTISED_100baseT_Full;
12817+ ethtool_link_ksettings_add_link_mode(
12818+ ks, advertising, 100baseT_Full);
12819 }
12820 break;
12821 case I40E_PHY_TYPE_40GBASE_KR4:
12822+ case I40E_PHY_TYPE_25GBASE_KR:
12823 case I40E_PHY_TYPE_20GBASE_KR2:
12824 case I40E_PHY_TYPE_10GBASE_KR:
12825 case I40E_PHY_TYPE_10GBASE_KX4:
12826 case I40E_PHY_TYPE_1000BASE_KX:
12827- supported |= SUPPORTED_40000baseKR4_Full |
12828- SUPPORTED_20000baseKR2_Full |
12829- SUPPORTED_10000baseKR_Full |
12830- SUPPORTED_10000baseKX4_Full |
12831- SUPPORTED_1000baseKX_Full |
12832- SUPPORTED_Autoneg;
12833- advertising |= ADVERTISED_40000baseKR4_Full |
12834- ADVERTISED_20000baseKR2_Full |
12835- ADVERTISED_10000baseKR_Full |
12836- ADVERTISED_10000baseKX4_Full |
12837- ADVERTISED_1000baseKX_Full |
12838- ADVERTISED_Autoneg;
12839+ ethtool_link_ksettings_add_link_mode(ks, supported,
12840+ 40000baseKR4_Full);
12841+#ifdef HAVE_ETHTOOL_25G_BITS
12842+ ethtool_link_ksettings_add_link_mode(ks, supported,
12843+ 25000baseKR_Full);
12844+#endif /* HAVE_ETHTOOL_25G_BITS */
12845+ ethtool_link_ksettings_add_link_mode(ks, supported,
12846+ 20000baseKR2_Full);
12847+ ethtool_link_ksettings_add_link_mode(ks, supported,
12848+ 10000baseKR_Full);
12849+ ethtool_link_ksettings_add_link_mode(ks, supported,
12850+ 10000baseKX4_Full);
12851+ ethtool_link_ksettings_add_link_mode(ks, supported,
12852+ 1000baseKX_Full);
12853+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12854+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12855+ 40000baseKR4_Full);
12856+#ifdef HAVE_ETHTOOL_25G_BITS
12857+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12858+ 25000baseKR_Full);
12859+#ifdef ETHTOOL_GFECPARAM
12860+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
12861+#endif /* ETHTOOL_GFECPARAM */
12862+#endif /* HAVE_ETHTOOL_25G_BITS */
12863+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12864+ 20000baseKR2_Full);
12865+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12866+ 10000baseKR_Full);
12867+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12868+ 10000baseKX4_Full);
12869+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12870+ 1000baseKX_Full);
12871+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12872 break;
12873- case I40E_PHY_TYPE_25GBASE_KR:
12874 case I40E_PHY_TYPE_25GBASE_CR:
12875- case I40E_PHY_TYPE_25GBASE_SR:
12876- case I40E_PHY_TYPE_25GBASE_LR:
12877- supported = SUPPORTED_Autoneg;
12878- advertising = ADVERTISED_Autoneg;
12879- /* TODO: add speeds when ethtool is ready to support*/
12880+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12881+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12882+#ifdef HAVE_ETHTOOL_25G_BITS
12883+ ethtool_link_ksettings_add_link_mode(ks, supported,
12884+ 25000baseCR_Full);
12885+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12886+ 25000baseCR_Full);
12887+#ifdef ETHTOOL_GFECPARAM
12888+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
12889+#endif /* ETHTOOL_GFECPARAM */
12890+
12891+#endif /* HAVE_ETHTOOL_25G_BITS */
12892+ break;
12893+ case I40E_PHY_TYPE_25GBASE_AOC:
12894+ case I40E_PHY_TYPE_25GBASE_ACC:
12895+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
12896+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
12897+#ifdef HAVE_ETHTOOL_25G_BITS
12898+ ethtool_link_ksettings_add_link_mode(ks, supported,
12899+ 25000baseCR_Full);
12900+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12901+ 25000baseCR_Full);
12902+#ifdef ETHTOOL_GFECPARAM
12903+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
12904+#endif /* ETHTOOL_GFECPARAM */
12905+
12906+#endif /* HAVE_ETHTOOL_25G_BITS */
12907+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
12908+ ethtool_link_ksettings_add_link_mode(ks, supported,
12909+ 10000baseCR_Full);
12910+ ethtool_link_ksettings_add_link_mode(ks, advertising,
12911+ 10000baseCR_Full);
12912+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
12913 break;
12914 default:
12915 /* if we got here and link is up something bad is afoot */
12916- netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
12917+ netdev_info(netdev,
12918+ "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
12919 hw_link_info->phy_type);
12920 }
12921
12922 /* Now that we've worked out everything that could be supported by the
12923- * current PHY type, get what is supported by the NVM and them to
12924- * get what is truly supported
12925+ * current PHY type, get what is supported by the NVM and intersect
12926+ * them to get what is truly supported
12927 */
12928- i40e_phy_type_to_ethtool(pf, &e_supported,
12929- &e_advertising);
12930-
12931- supported = supported & e_supported;
12932- advertising = advertising & e_advertising;
12933+ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
12934+ i40e_phy_type_to_ethtool(pf, &cap_ksettings);
12935+ ethtool_intersect_link_masks(ks, &cap_ksettings);
12936
12937 /* Set speed and duplex */
12938 switch (link_speed) {
12939 case I40E_LINK_SPEED_40GB:
12940- cmd->base.speed = SPEED_40000;
12941+ ks->base.speed = SPEED_40000;
12942 break;
12943 case I40E_LINK_SPEED_25GB:
12944-#ifdef SPEED_25000
12945- cmd->base.speed = SPEED_25000;
12946-#else
12947- netdev_info(netdev,
12948- "Speed is 25G, display not supported by this version of ethtool.\n");
12949-#endif
12950+ ks->base.speed = SPEED_25000;
12951 break;
12952 case I40E_LINK_SPEED_20GB:
12953- cmd->base.speed = SPEED_20000;
12954+ ks->base.speed = SPEED_20000;
12955 break;
12956 case I40E_LINK_SPEED_10GB:
12957- cmd->base.speed = SPEED_10000;
12958+ ks->base.speed = SPEED_10000;
12959+ break;
12960+ case I40E_LINK_SPEED_5GB:
12961+ ks->base.speed = SPEED_5000;
12962+ break;
12963+ case I40E_LINK_SPEED_2_5GB:
12964+ ks->base.speed = SPEED_2500;
12965 break;
12966 case I40E_LINK_SPEED_1GB:
12967- cmd->base.speed = SPEED_1000;
12968+ ks->base.speed = SPEED_1000;
12969 break;
12970 case I40E_LINK_SPEED_100MB:
12971- cmd->base.speed = SPEED_100;
12972+ ks->base.speed = SPEED_100;
12973 break;
12974 default:
12975+ ks->base.speed = SPEED_UNKNOWN;
12976 break;
12977 }
12978- cmd->base.duplex = DUPLEX_FULL;
12979-
12980- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12981- supported);
12982- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12983- advertising);
12984+ ks->base.duplex = DUPLEX_FULL;
12985 }
12986
12987 /**
12988- * i40e_get_settings_link_down - Get the Link settings for when link is down
12989+ * i40e_get_settings_link_down - Get the Link settings when link is down
12990 * @hw: hw structure
12991- * @ecmd: ethtool command to fill in
12992+ * @ks: ethtool ksettings to fill in
12993+ * @pf: pointer to physical function struct
12994 *
12995 * Reports link settings that can be determined when link is down
12996 **/
12997 static void i40e_get_settings_link_down(struct i40e_hw *hw,
12998- struct ethtool_link_ksettings *cmd,
12999+ struct ethtool_link_ksettings *ks,
13000 struct i40e_pf *pf)
13001 {
13002- u32 supported, advertising;
13003-
13004 /* link is down and the driver needs to fall back on
13005 * supported phy types to figure out what info to display
13006 */
13007- i40e_phy_type_to_ethtool(pf, &supported, &advertising);
13008-
13009- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
13010- supported);
13011- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
13012- advertising);
13013-
13014+ i40e_phy_type_to_ethtool(pf, ks);
13015 /* With no link speed and duplex are unknown */
13016- cmd->base.speed = SPEED_UNKNOWN;
13017- cmd->base.duplex = DUPLEX_UNKNOWN;
13018+ ks->base.speed = SPEED_UNKNOWN;
13019+ ks->base.duplex = DUPLEX_UNKNOWN;
13020 }
13021
13022 /**
13023- * i40e_get_settings - Get Link Speed and Duplex settings
13024+ * i40e_get_link_ksettings - Get Link Speed and Duplex settings
13025 * @netdev: network interface device structure
13026- * @ecmd: ethtool command
13027+ * @ks: ethtool ksettings
13028 *
13029 * Reports speed/duplex settings based on media_type
13030 **/
13031 static int i40e_get_link_ksettings(struct net_device *netdev,
13032- struct ethtool_link_ksettings *cmd)
13033+ struct ethtool_link_ksettings *ks)
13034 {
13035 struct i40e_netdev_priv *np = netdev_priv(netdev);
13036 struct i40e_pf *pf = np->vsi->back;
13037 struct i40e_hw *hw = &pf->hw;
13038 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
13039 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
13040- u32 advertising;
13041+
13042+ ethtool_link_ksettings_zero_link_mode(ks, supported);
13043+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
13044
13045 if (link_up)
13046- i40e_get_settings_link_up(hw, cmd, netdev, pf);
13047+ i40e_get_settings_link_up(hw, ks, netdev, pf);
13048 else
13049- i40e_get_settings_link_down(hw, cmd, pf);
13050+ i40e_get_settings_link_down(hw, ks, pf);
13051
13052 /* Now set the settings that don't rely on link being up/down */
13053 /* Set autoneg settings */
13054- cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
13055- AUTONEG_ENABLE : AUTONEG_DISABLE);
13056+ ks->base.autoneg = (hw_link_info->an_info & I40E_AQ_AN_COMPLETED ?
13057+ AUTONEG_ENABLE : AUTONEG_DISABLE);
13058
13059+ /* Set media type settings */
13060 switch (hw->phy.media_type) {
13061 case I40E_MEDIA_TYPE_BACKPLANE:
13062- ethtool_link_ksettings_add_link_mode(cmd, supported,
13063- Autoneg);
13064- ethtool_link_ksettings_add_link_mode(cmd, supported,
13065+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
13066+ ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
13067+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
13068+ ethtool_link_ksettings_add_link_mode(ks, advertising,
13069 Backplane);
13070- ethtool_link_ksettings_add_link_mode(cmd, advertising,
13071- Autoneg);
13072- ethtool_link_ksettings_add_link_mode(cmd, advertising,
13073- Backplane);
13074- cmd->base.port = PORT_NONE;
13075+ ks->base.port = PORT_NONE;
13076 break;
13077 case I40E_MEDIA_TYPE_BASET:
13078- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
13079- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
13080- cmd->base.port = PORT_TP;
13081+ ethtool_link_ksettings_add_link_mode(ks, supported, TP);
13082+ ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
13083+ ks->base.port = PORT_TP;
13084 break;
13085 case I40E_MEDIA_TYPE_DA:
13086 case I40E_MEDIA_TYPE_CX4:
13087- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
13088- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
13089- cmd->base.port = PORT_DA;
13090+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
13091+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
13092+ ks->base.port = PORT_DA;
13093 break;
13094 case I40E_MEDIA_TYPE_FIBER:
13095- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
13096- cmd->base.port = PORT_FIBRE;
13097+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
13098+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
13099+ ks->base.port = PORT_FIBRE;
13100 break;
13101 case I40E_MEDIA_TYPE_UNKNOWN:
13102 default:
13103- cmd->base.port = PORT_OTHER;
13104+ ks->base.port = PORT_OTHER;
13105 break;
13106 }
13107
13108 /* Set flow control settings */
13109- ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
13110+ ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
13111
13112 switch (hw->fc.requested_mode) {
13113 case I40E_FC_FULL:
13114- ethtool_link_ksettings_add_link_mode(cmd, advertising,
13115- Pause);
13116+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
13117 break;
13118 case I40E_FC_TX_PAUSE:
13119- ethtool_link_ksettings_add_link_mode(cmd, advertising,
13120+ ethtool_link_ksettings_add_link_mode(ks, advertising,
13121 Asym_Pause);
13122 break;
13123 case I40E_FC_RX_PAUSE:
13124- ethtool_link_ksettings_add_link_mode(cmd, advertising,
13125- Pause);
13126- ethtool_link_ksettings_add_link_mode(cmd, advertising,
13127+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
13128+ ethtool_link_ksettings_add_link_mode(ks, advertising,
13129 Asym_Pause);
13130 break;
13131 default:
13132- ethtool_convert_link_mode_to_legacy_u32(
13133- &advertising, cmd->link_modes.advertising);
13134+ ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
13135+ ethtool_link_ksettings_del_link_mode(ks, advertising,
13136+ Asym_Pause);
13137+ break;
13138+ }
13139+ return 0;
13140+}
13141
13142- advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
13143+#ifdef ETHTOOL_GLINKSETTINGS
13144+/**
13145+ * i40e_set_link_ksettings - Set Speed and Duplex
13146+ * @netdev: network interface device structure
13147+ * @ks: ethtool ksettings
13148+ *
13149+ * Set speed/duplex per media_types advertised/forced
13150+ **/
13151+static int i40e_set_link_ksettings(struct net_device *netdev,
13152+ const struct ethtool_link_ksettings *ks)
13153+{
13154+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13155+ struct i40e_aq_get_phy_abilities_resp abilities;
13156+ struct ethtool_link_ksettings safe_ks;
13157+ struct ethtool_link_ksettings copy_ks;
13158+ struct i40e_aq_set_phy_config config;
13159+ struct i40e_pf *pf = np->vsi->back;
13160+ struct i40e_vsi *vsi = np->vsi;
13161+ struct i40e_hw *hw = &pf->hw;
13162+ bool autoneg_changed = false;
13163+ i40e_status status = 0;
13164+ int timeout = 50;
13165+ int err = 0;
13166+ u8 autoneg;
13167
13168- ethtool_convert_legacy_u32_to_link_mode(
13169- cmd->link_modes.advertising, advertising);
13170- break;
13171+ /* Changing port settings is not supported if this isn't the
13172+ * port's controlling PF
13173+ */
13174+ if (hw->partition_id != 1) {
13175+ i40e_partition_setting_complaint(pf);
13176+ return -EOPNOTSUPP;
13177+ }
13178+ if (vsi != pf->vsi[pf->lan_vsi])
13179+ return -EOPNOTSUPP;
13180+ if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
13181+ hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
13182+ hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
13183+ hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
13184+ hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
13185+ return -EOPNOTSUPP;
13186+ if (hw->device_id == I40E_DEV_ID_KX_B ||
13187+ hw->device_id == I40E_DEV_ID_KX_C ||
13188+ hw->device_id == I40E_DEV_ID_20G_KR2 ||
13189+ hw->device_id == I40E_DEV_ID_20G_KR2_A ||
13190+ hw->device_id == I40E_DEV_ID_25G_B ||
13191+ hw->device_id == I40E_DEV_ID_KX_X722) {
13192+ netdev_info(netdev, "Changing settings is not supported on backplane.\n");
13193+ return -EOPNOTSUPP;
13194+ }
13195+
13196+ /* copy the ksettings to copy_ks to avoid modifying the origin */
13197+ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
13198+
13199+ /* save autoneg out of ksettings */
13200+ autoneg = copy_ks.base.autoneg;
13201+
13202+ /* get our own copy of the bits to check against */
13203+ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
13204+ safe_ks.base.cmd = copy_ks.base.cmd;
13205+ safe_ks.base.link_mode_masks_nwords =
13206+ copy_ks.base.link_mode_masks_nwords;
13207+ i40e_get_link_ksettings(netdev, &safe_ks);
13208+
13209+ /* Get link modes supported by hardware and check against modes
13210+ * requested by user. Return an error if unsupported mode was set.
13211+ */
13212+ if (!bitmap_subset(copy_ks.link_modes.advertising,
13213+ safe_ks.link_modes.supported,
13214+ __ETHTOOL_LINK_MODE_MASK_NBITS))
13215+ return -EINVAL;
13216+
13217+ /* set autoneg back to what it currently is */
13218+ copy_ks.base.autoneg = safe_ks.base.autoneg;
13219+
13220+ /* If copy_ks.base and safe_ks.base are not the same now, then they are
13221+ * trying to set something that we do not support.
13222+ */
13223+ if (memcmp(&copy_ks.base, &safe_ks.base,
13224+ sizeof(struct ethtool_link_settings)))
13225+ return -EOPNOTSUPP;
13226+
13227+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13228+ timeout--;
13229+ if (!timeout)
13230+ return -EBUSY;
13231+ usleep_range(1000, 2000);
13232+ }
13233+
13234+ /* Get the current phy config */
13235+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
13236+ NULL);
13237+ if (status) {
13238+ err = -EAGAIN;
13239+ goto done;
13240+ }
13241+
13242+ /* Copy abilities to config in case autoneg is not
13243+ * set below
13244+ */
13245+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
13246+ config.abilities = abilities.abilities;
13247+
13248+ /* Check autoneg */
13249+ if (autoneg == AUTONEG_ENABLE) {
13250+ /* If autoneg was not already enabled */
13251+ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
13252+ /* If autoneg is not supported, return error */
13253+ if (!ethtool_link_ksettings_test_link_mode(
13254+ &safe_ks, supported, Autoneg)) {
13255+ netdev_info(netdev, "Autoneg not supported on this phy\n");
13256+ err = -EINVAL;
13257+ goto done;
13258+ }
13259+ /* Autoneg is allowed to change */
13260+ config.abilities = abilities.abilities |
13261+ I40E_AQ_PHY_ENABLE_AN;
13262+ autoneg_changed = true;
13263+ }
13264+ } else {
13265+ /* If autoneg is currently enabled */
13266+ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
13267+ /* If autoneg is supported 10GBASE_T is the only PHY
13268+ * that can disable it, so otherwise return error
13269+ */
13270+ if (ethtool_link_ksettings_test_link_mode(
13271+ &safe_ks, supported, Autoneg) &&
13272+ hw->phy.link_info.phy_type !=
13273+ I40E_PHY_TYPE_10GBASE_T) {
13274+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
13275+ err = -EINVAL;
13276+ goto done;
13277+ }
13278+ /* Autoneg is allowed to change */
13279+ config.abilities = abilities.abilities &
13280+ ~I40E_AQ_PHY_ENABLE_AN;
13281+ autoneg_changed = true;
13282+ }
13283+ }
13284+
13285+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13286+ 100baseT_Full))
13287+ config.link_speed |= I40E_LINK_SPEED_100MB;
13288+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13289+ 1000baseT_Full) ||
13290+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
13291+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13292+ 1000baseX_Full) ||
13293+#endif
13294+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13295+ 1000baseKX_Full))
13296+ config.link_speed |= I40E_LINK_SPEED_1GB;
13297+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13298+ 10000baseT_Full) ||
13299+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13300+ 10000baseKX4_Full) ||
13301+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13302+ 10000baseKR_Full) ||
13303+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
13304+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13305+ 10000baseCR_Full) ||
13306+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13307+ 10000baseSR_Full) ||
13308+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13309+ 10000baseLR_Full))
13310+#else
13311+ 0)
13312+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
13313+ config.link_speed |= I40E_LINK_SPEED_10GB;
13314+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
13315+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13316+ 2500baseT_Full))
13317+ config.link_speed |= I40E_LINK_SPEED_2_5GB;
13318+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
13319+#ifdef HAVE_ETHTOOL_5G_BITS
13320+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13321+ 5000baseT_Full))
13322+ config.link_speed |= I40E_LINK_SPEED_5GB;
13323+#endif /* HAVE_ETHTOOL_5G_BITS */
13324+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13325+ 20000baseKR2_Full))
13326+ config.link_speed |= I40E_LINK_SPEED_20GB;
13327+#ifdef HAVE_ETHTOOL_25G_BITS
13328+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13329+ 25000baseCR_Full) ||
13330+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13331+ 25000baseKR_Full) ||
13332+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13333+ 25000baseSR_Full))
13334+ config.link_speed |= I40E_LINK_SPEED_25GB;
13335+#endif /* HAVE_ETHTOOL_25G_BITS */
13336+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
13337+ 40000baseKR4_Full) ||
13338+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13339+ 40000baseCR4_Full) ||
13340+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13341+ 40000baseSR4_Full) ||
13342+ ethtool_link_ksettings_test_link_mode(ks, advertising,
13343+ 40000baseLR4_Full))
13344+ config.link_speed |= I40E_LINK_SPEED_40GB;
13345+
13346+ /* If speed didn't get set, set it to what it currently is.
13347+ * This is needed because if advertise is 0 (as it is when autoneg
13348+ * is disabled) then speed won't get set.
13349+ */
13350+ if (!config.link_speed)
13351+ config.link_speed = abilities.link_speed;
13352+ if (autoneg_changed || (abilities.link_speed != config.link_speed)) {
13353+ /* copy over the rest of the abilities */
13354+ config.phy_type = abilities.phy_type;
13355+ config.phy_type_ext = abilities.phy_type_ext;
13356+ config.eee_capability = abilities.eee_capability;
13357+ config.eeer = abilities.eeer_val;
13358+ config.low_power_ctrl = abilities.d3_lpan;
13359+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
13360+ I40E_AQ_PHY_FEC_CONFIG_MASK;
13361+
13362+ /* save the requested speeds */
13363+ hw->phy.link_info.requested_speeds = config.link_speed;
13364+ /* set link and auto negotiation so changes take effect */
13365+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
13366+ /* If link is up put link down */
13367+ if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
13368+ /* Tell the OS link is going down, the link will go
13369+ * back up when fw says it is ready asynchronously
13370+ */
13371+ i40e_print_link_message(vsi, false);
13372+ netif_carrier_off(netdev);
13373+ netif_tx_stop_all_queues(netdev);
13374+ }
13375+
13376+ /* make the aq call */
13377+ status = i40e_aq_set_phy_config(hw, &config, NULL);
13378+ if (status) {
13379+ netdev_info(netdev,
13380+ "Set phy config failed, err %s aq_err %s\n",
13381+ i40e_stat_str(hw, status),
13382+ i40e_aq_str(hw, hw->aq.asq_last_status));
13383+ err = -EAGAIN;
13384+ goto done;
13385+ }
13386+
13387+ status = i40e_update_link_info(hw);
13388+ if (status)
13389+ netdev_dbg(netdev,
13390+ "Updating link info failed with err %s aq_err %s\n",
13391+ i40e_stat_str(hw, status),
13392+ i40e_aq_str(hw, hw->aq.asq_last_status));
13393+
13394+ } else {
13395+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
13396 }
13397
13398+done:
13399+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
13400+
13401+ return err;
13402+}
13403+
13404+#else /* ETHTOOL_GLINKSETTINGS */
13405+/**
13406+ * i40e_get_settings - Get Link Speed and Duplex settings
13407+ * @netdev: network interface device structure
13408+ * @ecmd: ethtool command
13409+ *
13410+ * Reports speed/duplex settings based on media_type. Since we've backported
13411+ * the new API constructs to use in the old API, this ends up just being a
13412+ * wrapper to i40e_get_link_ksettings.
13413+ **/
13414+static int i40e_get_settings(struct net_device *netdev,
13415+ struct ethtool_cmd *ecmd)
13416+{
13417+ struct ethtool_link_ksettings ks;
13418+
13419+ i40e_get_link_ksettings(netdev, &ks);
13420+ _kc_ethtool_ksettings_to_cmd(&ks, ecmd);
13421+ ecmd->transceiver = XCVR_EXTERNAL;
13422 return 0;
13423 }
13424
13425@@ -686,8 +1332,8 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
13426 *
13427 * Set speed/duplex per media_types advertised/forced
13428 **/
13429-static int i40e_set_link_ksettings(struct net_device *netdev,
13430- const struct ethtool_link_ksettings *cmd)
13431+static int i40e_set_settings(struct net_device *netdev,
13432+ struct ethtool_cmd *ecmd)
13433 {
13434 struct i40e_netdev_priv *np = netdev_priv(netdev);
13435 struct i40e_aq_get_phy_abilities_resp abilities;
13436@@ -695,15 +1341,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
13437 struct i40e_pf *pf = np->vsi->back;
13438 struct i40e_vsi *vsi = np->vsi;
13439 struct i40e_hw *hw = &pf->hw;
13440- struct ethtool_link_ksettings safe_cmd;
13441- struct ethtool_link_ksettings copy_cmd;
13442+ struct ethtool_cmd safe_ecmd;
13443 i40e_status status = 0;
13444 bool change = false;
13445 int timeout = 50;
13446 int err = 0;
13447- u32 autoneg;
13448+ u8 autoneg;
13449 u32 advertise;
13450- u32 tmp;
13451+ u32 old_ethtool_advertising = 0;
13452
13453 /* Changing port settings is not supported if this isn't the
13454 * port's controlling PF
13455@@ -731,31 +1376,40 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
13456 return -EOPNOTSUPP;
13457 }
13458
13459- /* copy the cmd to copy_cmd to avoid modifying the origin */
13460- memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
13461-
13462 /* get our own copy of the bits to check against */
13463- memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
13464- i40e_get_link_ksettings(netdev, &safe_cmd);
13465+ memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
13466+ i40e_get_settings(netdev, &safe_ecmd);
13467
13468- /* save autoneg and speed out of cmd */
13469- autoneg = cmd->base.autoneg;
13470- ethtool_convert_link_mode_to_legacy_u32(&advertise,
13471- cmd->link_modes.advertising);
13472+ /* save autoneg and speed out of ecmd */
13473+ autoneg = ecmd->autoneg;
13474+ advertise = ecmd->advertising;
13475
13476 /* set autoneg and speed back to what they currently are */
13477- copy_cmd.base.autoneg = safe_cmd.base.autoneg;
13478- ethtool_convert_link_mode_to_legacy_u32(
13479- &tmp, safe_cmd.link_modes.advertising);
13480- ethtool_convert_legacy_u32_to_link_mode(
13481- copy_cmd.link_modes.advertising, tmp);
13482-
13483- copy_cmd.base.cmd = safe_cmd.base.cmd;
13484-
13485- /* If copy_cmd and safe_cmd are not the same now, then they are
13486+ ecmd->autoneg = safe_ecmd.autoneg;
13487+ ecmd->advertising = safe_ecmd.advertising;
13488+
13489+ /* Due to a bug in ethtool versions < 3.6 this check is necessary */
13490+ old_ethtool_advertising = ecmd->supported &
13491+ (ADVERTISED_10baseT_Half |
13492+ ADVERTISED_10baseT_Full |
13493+ ADVERTISED_100baseT_Half |
13494+ ADVERTISED_100baseT_Full |
13495+ ADVERTISED_1000baseT_Half |
13496+ ADVERTISED_1000baseT_Full |
13497+ ADVERTISED_2500baseX_Full |
13498+ ADVERTISED_10000baseT_Full);
13499+ old_ethtool_advertising |= (old_ethtool_advertising |
13500+ ADVERTISED_20000baseMLD2_Full |
13501+ ADVERTISED_20000baseKR2_Full);
13502+
13503+ if (advertise == old_ethtool_advertising)
13504+ netdev_info(netdev, "If you are not setting advertising to %x then you may have an old version of ethtool. Please update.\n",
13505+ advertise);
13506+ ecmd->cmd = safe_ecmd.cmd;
13507+ /* If ecmd and safe_ecmd are not the same now, then they are
13508 * trying to set something that we do not support
13509 */
13510- if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
13511+ if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
13512 return -EOPNOTSUPP;
13513
13514 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13515@@ -784,8 +1438,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
13516 /* If autoneg was not already enabled */
13517 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
13518 /* If autoneg is not supported, return error */
13519- if (!ethtool_link_ksettings_test_link_mode(
13520- &safe_cmd, supported, Autoneg)) {
13521+ if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
13522 netdev_info(netdev, "Autoneg not supported on this phy\n");
13523 err = -EINVAL;
13524 goto done;
13525@@ -798,11 +1451,10 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
13526 } else {
13527 /* If autoneg is currently enabled */
13528 if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
13529- /* If autoneg is supported 10GBASE_T is the only PHY
13530+ /* If autoneg is supported 10GBASE_T is the only phy
13531 * that can disable it, so otherwise return error
13532 */
13533- if (ethtool_link_ksettings_test_link_mode(
13534- &safe_cmd, supported, Autoneg) &&
13535+ if (safe_ecmd.supported & SUPPORTED_Autoneg &&
13536 hw->phy.link_info.phy_type !=
13537 I40E_PHY_TYPE_10GBASE_T) {
13538 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
13539@@ -816,9 +1468,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
13540 }
13541 }
13542
13543- ethtool_convert_link_mode_to_legacy_u32(&tmp,
13544- safe_cmd.link_modes.supported);
13545- if (advertise & ~tmp) {
13546+ if (advertise & ~safe_ecmd.supported) {
13547 err = -EINVAL;
13548 goto done;
13549 }
13550@@ -847,7 +1497,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
13551 if (!config.link_speed)
13552 config.link_speed = abilities.link_speed;
13553
13554- if (change || (abilities.link_speed != config.link_speed)) {
13555+ if (change || abilities.link_speed != config.link_speed) {
13556 /* copy over the rest of the abilities */
13557 config.phy_type = abilities.phy_type;
13558 config.phy_type_ext = abilities.phy_type_ext;
13559@@ -897,6 +1547,157 @@ done:
13560 return err;
13561 }
13562
13563+#endif /* ETHTOOL_GLINKSETTINGS */
13564+
13565+static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
13566+{
13567+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13568+ struct i40e_aq_get_phy_abilities_resp abilities;
13569+ struct i40e_pf *pf = np->vsi->back;
13570+ struct i40e_hw *hw = &pf->hw;
13571+ i40e_status status = 0;
13572+ u64 flags = 0;
13573+ int err = 0;
13574+
13575+ flags = READ_ONCE(pf->flags);
13576+ i40e_set_fec_in_flags(fec_cfg, &flags);
13577+
13578+ /* Get the current phy config */
13579+ memset(&abilities, 0, sizeof(abilities));
13580+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
13581+ NULL);
13582+ if (status) {
13583+ err = -EAGAIN;
13584+ goto done;
13585+ }
13586+
13587+ if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
13588+ struct i40e_aq_set_phy_config config;
13589+
13590+ memset(&config, 0, sizeof(config));
13591+ config.phy_type = abilities.phy_type;
13592+ config.abilities = abilities.abilities;
13593+ config.phy_type_ext = abilities.phy_type_ext;
13594+ config.link_speed = abilities.link_speed;
13595+ config.eee_capability = abilities.eee_capability;
13596+ config.eeer = abilities.eeer_val;
13597+ config.low_power_ctrl = abilities.d3_lpan;
13598+ config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
13599+ status = i40e_aq_set_phy_config(hw, &config, NULL);
13600+ if (status) {
13601+ netdev_info(netdev,
13602+ "Set phy config failed, err %s aq_err %s\n",
13603+ i40e_stat_str(hw, status),
13604+ i40e_aq_str(hw, hw->aq.asq_last_status));
13605+ err = -EAGAIN;
13606+ goto done;
13607+ }
13608+ pf->flags = flags;
13609+ status = i40e_update_link_info(hw);
13610+ if (status)
13611+ /* debug level message only due to relation to the link
13612+ * itself rather than to the FEC settings
13613+ * (e.g. no physical connection etc.)
13614+ */
13615+ netdev_dbg(netdev,
13616+ "Updating link info failed with err %s aq_err %s\n",
13617+ i40e_stat_str(hw, status),
13618+ i40e_aq_str(hw, hw->aq.asq_last_status));
13619+ }
13620+
13621+done:
13622+ return err;
13623+}
13624+
13625+#ifdef ETHTOOL_GFECPARAM
13626+static int i40e_get_fec_param(struct net_device *netdev,
13627+ struct ethtool_fecparam *fecparam)
13628+{
13629+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13630+ struct i40e_aq_get_phy_abilities_resp abilities;
13631+ struct i40e_pf *pf = np->vsi->back;
13632+ struct i40e_hw *hw = &pf->hw;
13633+ i40e_status status = 0;
13634+ int err = 0;
13635+ u8 fec_cfg;
13636+
13637+ /* Get the current phy config */
13638+ memset(&abilities, 0, sizeof(abilities));
13639+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
13640+ NULL);
13641+ if (status) {
13642+ err = -EAGAIN;
13643+ goto done;
13644+ }
13645+
13646+ fecparam->fec = 0;
13647+ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
13648+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
13649+ fecparam->fec |= ETHTOOL_FEC_AUTO;
13650+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
13651+ I40E_AQ_SET_FEC_ABILITY_RS))
13652+ fecparam->fec |= ETHTOOL_FEC_RS;
13653+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
13654+ I40E_AQ_SET_FEC_ABILITY_KR))
13655+ fecparam->fec |= ETHTOOL_FEC_BASER;
13656+ if (fec_cfg == 0)
13657+ fecparam->fec |= ETHTOOL_FEC_OFF;
13658+
13659+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
13660+ fecparam->active_fec = ETHTOOL_FEC_BASER;
13661+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
13662+ fecparam->active_fec = ETHTOOL_FEC_RS;
13663+ else
13664+ fecparam->active_fec = ETHTOOL_FEC_OFF;
13665+done:
13666+ return err;
13667+}
13668+
13669+static int i40e_set_fec_param(struct net_device *netdev,
13670+ struct ethtool_fecparam *fecparam)
13671+{
13672+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13673+ struct i40e_pf *pf = np->vsi->back;
13674+ struct i40e_hw *hw = &pf->hw;
13675+ u8 fec_cfg = 0;
13676+ int err = 0;
13677+
13678+ if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
13679+ hw->device_id != I40E_DEV_ID_25G_B) {
13680+ err = -EPERM;
13681+ goto done;
13682+ }
13683+
13684+ switch (fecparam->fec) {
13685+ case ETHTOOL_FEC_AUTO:
13686+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
13687+ break;
13688+ case ETHTOOL_FEC_RS:
13689+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
13690+ I40E_AQ_SET_FEC_ABILITY_RS);
13691+ break;
13692+ case ETHTOOL_FEC_BASER:
13693+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
13694+ I40E_AQ_SET_FEC_ABILITY_KR);
13695+ break;
13696+ case ETHTOOL_FEC_OFF:
13697+ case ETHTOOL_FEC_NONE:
13698+ fec_cfg = 0;
13699+ break;
13700+ default:
13701+ dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
13702+ fecparam->fec);
13703+ err = -EINVAL;
13704+ goto done;
13705+ }
13706+
13707+ err = i40e_set_fec_cfg(netdev, fec_cfg);
13708+
13709+done:
13710+ return err;
13711+}
13712+#endif /* ETHTOOL_GFECPARAM */
13713+
13714 static int i40e_nway_reset(struct net_device *netdev)
13715 {
13716 /* restart autonegotiation */
13717@@ -919,6 +1720,9 @@ static int i40e_nway_reset(struct net_device *netdev)
13718
13719 /**
13720 * i40e_get_pauseparam - Get Flow Control status
13721+ * @netdev: netdevice structure
13722+ * @pause: buffer to return pause parameters
13723+ *
13724 * Return tx/rx-pause status
13725 **/
13726 static void i40e_get_pauseparam(struct net_device *netdev,
13727@@ -927,12 +1731,9 @@ static void i40e_get_pauseparam(struct net_device *netdev,
13728 struct i40e_netdev_priv *np = netdev_priv(netdev);
13729 struct i40e_pf *pf = np->vsi->back;
13730 struct i40e_hw *hw = &pf->hw;
13731- struct i40e_link_status *hw_link_info = &hw->phy.link_info;
13732 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
13733
13734- pause->autoneg =
13735- ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
13736- AUTONEG_ENABLE : AUTONEG_DISABLE);
13737+ pause->autoneg = hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED;
13738
13739 /* PFC enabled so report LFC as off */
13740 if (dcbx_cfg->pfc.pfcenable) {
13741@@ -969,6 +1770,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
13742 i40e_status status;
13743 u8 aq_failures;
13744 int err = 0;
13745+ u32 is_an;
13746
13747 /* Changing the port's flow control is not supported if this isn't the
13748 * port's controlling PF
13749@@ -981,22 +1783,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
13750 if (vsi != pf->vsi[pf->lan_vsi])
13751 return -EOPNOTSUPP;
13752
13753- if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
13754- AUTONEG_ENABLE : AUTONEG_DISABLE)) {
13755+ is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
13756+ if (pause->autoneg != is_an) {
13757 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
13758 return -EOPNOTSUPP;
13759 }
13760
13761 /* If we have link and don't have autoneg */
13762- if (!test_bit(__I40E_DOWN, pf->state) &&
13763- !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
13764+ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
13765 /* Send message that it might not necessarily work*/
13766 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
13767 }
13768
13769 if (dcbx_cfg->pfc.pfcenable) {
13770 netdev_info(netdev,
13771- "Priority flow control enabled. Cannot set link flow control.\n");
13772+ "Priority flow control enabled. Cannot set link flow control.\n");
13773 return -EOPNOTSUPP;
13774 }
13775
13776@@ -1009,7 +1810,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
13777 else if (!pause->rx_pause && !pause->tx_pause)
13778 hw->fc.requested_mode = I40E_FC_NONE;
13779 else
13780- return -EINVAL;
13781+ return -EINVAL;
13782
13783 /* Tell the OS link is going down, the link will go back up when fw
13784 * says it is ready asynchronously
13785@@ -1018,46 +1819,165 @@ static int i40e_set_pauseparam(struct net_device *netdev,
13786 netif_carrier_off(netdev);
13787 netif_tx_stop_all_queues(netdev);
13788
13789- /* Set the fc mode and only restart an if link is up*/
13790- status = i40e_set_fc(hw, &aq_failures, link_up);
13791+ /* Set the fc mode and only restart an if link is up*/
13792+ status = i40e_set_fc(hw, &aq_failures, link_up);
13793+
13794+ if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
13795+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
13796+ i40e_stat_str(hw, status),
13797+ i40e_aq_str(hw, hw->aq.asq_last_status));
13798+ err = -EAGAIN;
13799+ }
13800+ if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
13801+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
13802+ i40e_stat_str(hw, status),
13803+ i40e_aq_str(hw, hw->aq.asq_last_status));
13804+ err = -EAGAIN;
13805+ }
13806+ if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
13807+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
13808+ i40e_stat_str(hw, status),
13809+ i40e_aq_str(hw, hw->aq.asq_last_status));
13810+ err = -EAGAIN;
13811+ }
13812+
13813+ if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
13814+ /* Give it a little more time to try to come back */
13815+ msleep(75);
13816+ if (!test_bit(__I40E_DOWN, pf->state))
13817+ return i40e_nway_reset(netdev);
13818+ }
13819+
13820+ return err;
13821+}
13822+
13823+#ifndef HAVE_NDO_SET_FEATURES
13824+static u32 i40e_get_rx_csum(struct net_device *netdev)
13825+{
13826+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13827+ struct i40e_pf *pf = np->vsi->back;
13828+
13829+ return pf->flags & I40E_FLAG_RX_CSUM_ENABLED;
13830+}
13831+
13832+static int i40e_set_rx_csum(struct net_device *netdev, u32 data)
13833+{
13834+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13835+ struct i40e_pf *pf = np->vsi->back;
13836+
13837+ if (data)
13838+ pf->flags |= I40E_FLAG_RX_CSUM_ENABLED;
13839+ else
13840+ pf->flags &= ~I40E_FLAG_RX_CSUM_ENABLED;
13841+
13842+ return 0;
13843+}
13844+
13845+static u32 i40e_get_tx_csum(struct net_device *netdev)
13846+{
13847+ return (netdev->features & NETIF_F_IP_CSUM) != 0;
13848+}
13849+
13850+static int i40e_set_tx_csum(struct net_device *netdev, u32 data)
13851+{
13852+ if (data) {
13853+#ifdef NETIF_F_IPV6_CSUM
13854+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
13855+#else
13856+ netdev->features |= NETIF_F_IP_CSUM;
13857+#endif
13858+ netdev->features |= NETIF_F_SCTP_CRC;
13859+ } else {
13860+#ifdef NETIF_F_IPV6_CSUM
13861+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13862+ NETIF_F_SCTP_CRC);
13863+#else
13864+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC);
13865+#endif
13866+ }
13867+
13868+ return 0;
13869+}
13870
13871- if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
13872- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
13873- i40e_stat_str(hw, status),
13874- i40e_aq_str(hw, hw->aq.asq_last_status));
13875- err = -EAGAIN;
13876- }
13877- if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
13878- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
13879- i40e_stat_str(hw, status),
13880- i40e_aq_str(hw, hw->aq.asq_last_status));
13881- err = -EAGAIN;
13882- }
13883- if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
13884- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
13885- i40e_stat_str(hw, status),
13886- i40e_aq_str(hw, hw->aq.asq_last_status));
13887- err = -EAGAIN;
13888+static int i40e_set_tso(struct net_device *netdev, u32 data)
13889+{
13890+ if (data) {
13891+#ifndef HAVE_NDO_FEATURES_CHECK
13892+ if (netdev->mtu >= 576) {
13893+ netdev->features |= NETIF_F_TSO;
13894+ netdev->features |= NETIF_F_TSO6;
13895+ } else {
13896+ netdev_info(netdev, "MTU setting is too low to enable TSO\n");
13897+ }
13898+#else
13899+ netdev->features |= NETIF_F_TSO;
13900+ netdev->features |= NETIF_F_TSO6;
13901+#endif
13902+ } else {
13903+#ifndef HAVE_NETDEV_VLAN_FEATURES
13904+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13905+ /* disable TSO on all VLANs if they're present */
13906+ if (np->vsi->vlgrp) {
13907+ int i;
13908+ struct net_device *v_netdev;
13909+
13910+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
13911+ v_netdev =
13912+ vlan_group_get_device(np->vsi->vlgrp, i);
13913+ if (v_netdev) {
13914+ v_netdev->features &= ~NETIF_F_TSO;
13915+ v_netdev->features &= ~NETIF_F_TSO6;
13916+ vlan_group_set_device(np->vsi->vlgrp, i,
13917+ v_netdev);
13918+ }
13919+ }
13920+ }
13921+#endif /* HAVE_NETDEV_VLAN_FEATURES */
13922+ netdev->features &= ~NETIF_F_TSO;
13923+ netdev->features &= ~NETIF_F_TSO6;
13924 }
13925
13926- if (!test_bit(__I40E_DOWN, pf->state)) {
13927- /* Give it a little more time to try to come back */
13928- msleep(75);
13929- if (!test_bit(__I40E_DOWN, pf->state))
13930- return i40e_nway_reset(netdev);
13931- }
13932+ return 0;
13933+}
13934+#ifdef ETHTOOL_GFLAGS
13935+static int i40e_set_flags(struct net_device *netdev, u32 data)
13936+{
13937+#ifdef ETHTOOL_GRXRINGS
13938+ struct i40e_netdev_priv *np = netdev_priv(netdev);
13939+ struct i40e_pf *pf = np->vsi->back;
13940+#endif
13941+ u32 supported_flags = 0;
13942+ bool need_reset = false;
13943+ int rc;
13944
13945- return err;
13946+#ifdef NETIF_F_RXHASH
13947+ supported_flags |= ETH_FLAG_RXHASH;
13948+
13949+#endif
13950+#ifdef ETHTOOL_GRXRINGS
13951+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13952+ supported_flags |= ETH_FLAG_NTUPLE;
13953+#endif
13954+ rc = ethtool_op_set_flags(netdev, data, supported_flags);
13955+ if (rc)
13956+ return rc;
13957+
13958+ /* if state changes we need to update pf->flags and maybe reset */
13959+#ifdef ETHTOOL_GRXRINGS
13960+ need_reset = i40e_set_ntuple(pf, netdev->features);
13961+#endif /* ETHTOOL_GRXRINGS */
13962+ if (need_reset)
13963+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
13964+
13965+ return 0;
13966 }
13967+#endif /* ETHTOOL_GFLAGS */
13968
13969+#endif /* HAVE_NDO_SET_FEATURES */
13970 static u32 i40e_get_msglevel(struct net_device *netdev)
13971 {
13972 struct i40e_netdev_priv *np = netdev_priv(netdev);
13973 struct i40e_pf *pf = np->vsi->back;
13974- u32 debug_mask = pf->hw.debug_mask;
13975-
13976- if (debug_mask)
13977- netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
13978
13979 return pf->msg_enable;
13980 }
13981@@ -1069,8 +1989,7 @@ static void i40e_set_msglevel(struct net_device *netdev, u32 data)
13982
13983 if (I40E_DEBUG_USER & data)
13984 pf->hw.debug_mask = data;
13985- else
13986- pf->msg_enable = data;
13987+ pf->msg_enable = data;
13988 }
13989
13990 static int i40e_get_regs_len(struct net_device *netdev)
13991@@ -1277,9 +2196,11 @@ static void i40e_get_drvinfo(struct net_device *netdev,
13992 sizeof(drvinfo->fw_version));
13993 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
13994 sizeof(drvinfo->bus_info));
13995+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
13996 drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
13997 if (pf->hw.pf_id == 0)
13998 drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
13999+#endif
14000 }
14001
14002 static void i40e_get_ringparam(struct net_device *netdev,
14003@@ -1360,6 +2281,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
14004 if (i40e_enabled_xdp_vsi(vsi))
14005 vsi->xdp_rings[i]->count = new_tx_count;
14006 }
14007+ vsi->num_tx_desc = new_tx_count;
14008+ vsi->num_rx_desc = new_rx_count;
14009 goto done;
14010 }
14011
14012@@ -1373,7 +2296,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
14013 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14014 if (new_tx_count != vsi->tx_rings[0]->count) {
14015 netdev_info(netdev,
14016- "Changing Tx descriptor count from %d to %d.\n",
14017+ "Changing Tx descriptor count from %d to %d\n",
14018 vsi->tx_rings[0]->count, new_tx_count);
14019 tx_rings = kcalloc(tx_alloc_queue_pairs,
14020 sizeof(struct i40e_ring), GFP_KERNEL);
14021@@ -1385,7 +2308,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
14022 for (i = 0; i < tx_alloc_queue_pairs; i++) {
14023 if (!i40e_active_tx_ring_index(vsi, i))
14024 continue;
14025-
14026+ /* clone ring and setup updated count */
14027 tx_rings[i] = *vsi->tx_rings[i];
14028 tx_rings[i].count = new_tx_count;
14029 /* the desc and bi pointers will be reallocated in the
14030@@ -1422,7 +2345,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
14031 }
14032
14033 for (i = 0; i < vsi->num_queue_pairs; i++) {
14034- struct i40e_ring *ring;
14035 u16 unused;
14036
14037 /* clone ring and setup updated count */
14038@@ -1433,6 +2355,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
14039 */
14040 rx_rings[i].desc = NULL;
14041 rx_rings[i].rx_bi = NULL;
14042+#ifdef HAVE_XDP_BUFF_RXQ
14043+ /* Clear cloned XDP RX-queue info before setup call */
14044+ memset(&rx_rings[i].xdp_rxq, 0,
14045+ sizeof(rx_rings[i].xdp_rxq));
14046+#endif
14047 /* this is to allow wr32 to have something to write to
14048 * during early allocation of Rx buffers
14049 */
14050@@ -1444,9 +2371,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
14051 /* now allocate the Rx buffers to make sure the OS
14052 * has enough memory, any failure here means abort
14053 */
14054- ring = &rx_rings[i];
14055- unused = I40E_DESC_UNUSED(ring);
14056- err = i40e_alloc_rx_buffers(ring, unused);
14057+ unused = I40E_DESC_UNUSED(&rx_rings[i]);
14058+ err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
14059 rx_unwind:
14060 if (err) {
14061 do {
14062@@ -1496,6 +2422,8 @@ rx_unwind:
14063 rx_rings = NULL;
14064 }
14065
14066+ vsi->num_tx_desc = new_tx_count;
14067+ vsi->num_rx_desc = new_rx_count;
14068 i40e_up(vsi);
14069
14070 free_tx:
14071@@ -1515,6 +2443,67 @@ done:
14072 return err;
14073 }
14074
14075+/**
14076+ * i40e_get_stats_count - return the stats count for a device
14077+ * @netdev: the netdev to return the count for
14078+ *
14079+ * Returns the total number of statistics for this netdev. Note that even
14080+ * though this is a function, it is required that the count for a specific
14081+ * netdev must never change. Basing the count on static values such as the
14082+ * maximum number of queues or the device type is ok. However, the API for
14083+ * obtaining stats is *not* safe against changes based on non-static
14084+ * values such as the *current* number of queues, or runtime flags.
14085+ *
14086+ * If a statistic is not always enabled, return it as part of the count
14087+ * anyways, always return its string, and report its value as zero.
14088+ **/
14089+static int i40e_get_stats_count(struct net_device *netdev)
14090+{
14091+ struct i40e_netdev_priv *np = netdev_priv(netdev);
14092+ struct i40e_vsi *vsi = np->vsi;
14093+ struct i40e_pf *pf = vsi->back;
14094+ int stats_len;
14095+
14096+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
14097+ stats_len = I40E_PF_STATS_LEN;
14098+ else
14099+ stats_len = I40E_VSI_STATS_LEN;
14100+
14101+ /* The number of stats reported for a given net_device must remain
14102+ * constant throughout the life of that device.
14103+ *
14104+ * This is because the API for obtaining the size, strings, and stats
14105+ * is spread out over three separate ethtool ioctls. There is no safe
14106+ * way to lock the number of stats across these calls, so we must
14107+ * assume that they will never change.
14108+ *
14109+ * Due to this, we report the maximum number of queues, even if not
14110+ * every queue is currently configured. Since we always allocate
14111+ * queues in pairs, we'll just use netdev->num_tx_queues * 2. This
14112+ * works because the num_tx_queues is set at device creation and never
14113+ * changes.
14114+ */
14115+#ifndef I40E_PF_EXTRA_STATS_OFF
14116+ /* The same applies to additional stats showing here the network usage
14117+ * counters for VFs. In order to handle it in a safe way, we also
14118+ * report here, similarly as in the queues case described above,
14119+ * the maximum possible, fixed number of these extra stats items.
14120+ */
14121+#endif /* !I40E_PF_EXTRA_STATS_OFF */
14122+ stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->real_num_tx_queues;
14123+#ifdef HAVE_XDP_SUPPORT
14124+ stats_len += I40E_QUEUE_STATS_XDP_LEN * netdev->real_num_tx_queues;
14125+#endif
14126+
14127+#ifndef I40E_PF_EXTRA_STATS_OFF
14128+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
14129+ stats_len += I40E_PF_STATS_EXTRA_LEN;
14130+
14131+#endif /* !I40E_PF_EXTRA_STATS_OFF */
14132+ return stats_len;
14133+}
14134+
14135+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
14136 static int i40e_get_sset_count(struct net_device *netdev, int sset)
14137 {
14138 struct i40e_netdev_priv *np = netdev_priv(netdev);
14139@@ -1525,16 +2514,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
14140 case ETH_SS_TEST:
14141 return I40E_TEST_LEN;
14142 case ETH_SS_STATS:
14143- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
14144- int len = I40E_PF_STATS_LEN(netdev);
14145-
14146- if ((pf->lan_veb != I40E_NO_VEB) &&
14147- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
14148- len += I40E_VEB_STATS_TOTAL;
14149- return len;
14150- } else {
14151- return I40E_VSI_STATS_LEN(netdev);
14152- }
14153+ return i40e_get_stats_count(netdev);
14154 case ETH_SS_PRIV_FLAGS:
14155 return I40E_PRIV_FLAGS_STR_LEN +
14156 (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
14157@@ -1542,96 +2522,262 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
14158 return -EOPNOTSUPP;
14159 }
14160 }
14161+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
14162+
14163+/**
14164+ * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
14165+ * @pf: the PF device structure
14166+ * @i: the priority value to copy
14167+ *
14168+ * The PFC stats are found as arrays in pf->stats, which is not easy to pass
14169+ * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
14170+ * of the PFC stats for the given priority.
14171+ **/
14172+static inline struct i40e_pfc_stats
14173+i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
14174+{
14175+#define I40E_GET_PFC_STAT(stat, priority) \
14176+ .stat = pf->stats.stat[priority]
14177+
14178+ struct i40e_pfc_stats pfc = {
14179+ I40E_GET_PFC_STAT(priority_xon_rx, i),
14180+ I40E_GET_PFC_STAT(priority_xoff_rx, i),
14181+ I40E_GET_PFC_STAT(priority_xon_tx, i),
14182+ I40E_GET_PFC_STAT(priority_xoff_tx, i),
14183+ I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
14184+ };
14185+ return pfc;
14186+}
14187
14188+/**
14189+ * i40e_get_ethtool_stats - copy stat values into supplied buffer
14190+ * @netdev: the netdev to collect stats for
14191+ * @stats: ethtool stats command structure
14192+ * @data: ethtool supplied buffer
14193+ *
14194+ * Copy the stats values for this netdev into the buffer. Expects data to be
14195+ * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
14196+ * statistics must be copied in a static order, and the count must not change
14197+ * for a given netdev. See i40e_get_stats_count for more details.
14198+ *
14199+ * If a statistic is not currently valid (such as a disabled queue), this
14200+ * function reports its value as zero.
14201+ **/
14202 static void i40e_get_ethtool_stats(struct net_device *netdev,
14203 struct ethtool_stats *stats, u64 *data)
14204 {
14205 struct i40e_netdev_priv *np = netdev_priv(netdev);
14206- struct i40e_ring *tx_ring, *rx_ring;
14207 struct i40e_vsi *vsi = np->vsi;
14208 struct i40e_pf *pf = vsi->back;
14209- unsigned int j;
14210- int i = 0;
14211- char *p;
14212- struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
14213- unsigned int start;
14214+ struct i40e_veb *veb = NULL;
14215+#ifndef I40E_PF_EXTRA_STATS_OFF
14216+ unsigned int vsi_idx;
14217+ unsigned int vf_idx;
14218+ unsigned int vf_id;
14219+ bool is_vf_valid;
14220+#endif /* !I40E_PF_EXTRA_STATS_OFF */
14221+ unsigned int i;
14222+ bool veb_stats;
14223+ u64 *p = data;
14224
14225 i40e_update_stats(vsi);
14226
14227- for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
14228- p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
14229- data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
14230- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
14231- }
14232- for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
14233- p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
14234- data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
14235- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
14236- }
14237- rcu_read_lock();
14238- for (j = 0; j < vsi->num_queue_pairs; j++) {
14239- tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
14240+ i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
14241+ i40e_gstrings_net_stats);
14242
14243- if (!tx_ring)
14244- continue;
14245+ i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
14246
14247- /* process Tx ring statistics */
14248- do {
14249- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
14250- data[i] = tx_ring->stats.packets;
14251- data[i + 1] = tx_ring->stats.bytes;
14252- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
14253- i += 2;
14254-
14255- /* Rx ring is the 2nd half of the queue pair */
14256- rx_ring = &tx_ring[1];
14257- do {
14258- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
14259- data[i] = rx_ring->stats.packets;
14260- data[i + 1] = rx_ring->stats.bytes;
14261- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
14262- i += 2;
14263+ rcu_read_lock();
14264+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
14265+ i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i]));
14266+ i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i]));
14267+#ifdef HAVE_XDP_SUPPORT
14268+ i40e_add_rx_queue_xdp_stats(&data, READ_ONCE(vsi->rx_rings[i]));
14269+#endif
14270 }
14271 rcu_read_unlock();
14272+
14273 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
14274- return;
14275+ goto check_data_pointer;
14276+
14277+ veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
14278+ (pf->lan_veb < I40E_MAX_VEB) &&
14279+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
14280+
14281+ if (veb_stats) {
14282+ veb = pf->veb[pf->lan_veb];
14283+ i40e_update_veb_stats(veb);
14284+ }
14285+
14286+ /* If veb stats aren't enabled, pass NULL instead of the veb so that
14287+ * we initialize stats to zero and update the data pointer
14288+ * intelligently
14289+ */
14290+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
14291+ i40e_gstrings_veb_stats);
14292+
14293+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
14294+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
14295+ i40e_gstrings_veb_tc_stats);
14296
14297- if ((pf->lan_veb != I40E_NO_VEB) &&
14298- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
14299- struct i40e_veb *veb = pf->veb[pf->lan_veb];
14300+ i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
14301+
14302+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
14303+ struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
14304+
14305+ i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
14306+ }
14307+
14308+#ifndef I40E_PF_EXTRA_STATS_OFF
14309+ /* As for now, we only process the SRIOV type VSIs (as extra stats to
14310+ * PF core stats) which are correlated with VF LAN VSI (hence below,
14311+ * in this for-loop instruction block, only VF's LAN VSIs are currently
14312+ * processed).
14313+ */
14314+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
14315+ is_vf_valid = true;
14316+ for (vf_idx = 0; vf_idx < pf->num_alloc_vfs; vf_idx++)
14317+ if (pf->vf[vf_idx].vf_id == vf_id)
14318+ break;
14319+ if (vf_idx >= pf->num_alloc_vfs) {
14320+ dev_info(&pf->pdev->dev,
14321+ "In the PF's array, there is no VF instance with VF_ID identifier %d or it is not set/initialized correctly yet\n",
14322+ vf_id);
14323+ is_vf_valid = false;
14324+ goto check_vf;
14325+ }
14326+ vsi_idx = pf->vf[vf_idx].lan_vsi_idx;
14327
14328- for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
14329- p = (char *)veb;
14330- p += i40e_gstrings_veb_stats[j].stat_offset;
14331- data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
14332- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
14333+ vsi = pf->vsi[vsi_idx];
14334+ if (!vsi) {
14335+ /* It means empty field in the PF VSI array... */
14336+ dev_info(&pf->pdev->dev,
14337+ "No LAN VSI instance referenced by VF %d or it is not set/initialized correctly yet\n",
14338+ vf_id);
14339+ is_vf_valid = false;
14340+ goto check_vf;
14341+ }
14342+ if (vsi->vf_id != vf_id) {
14343+ dev_info(&pf->pdev->dev,
14344+ "In the PF's array, there is incorrectly set/initialized LAN VSI or reference to it from VF %d is not set/initialized correctly yet\n",
14345+ vf_id);
14346+ is_vf_valid = false;
14347+ goto check_vf;
14348+ }
14349+ if (vsi->vf_id != pf->vf[vf_idx].vf_id ||
14350+ !i40e_find_vsi_from_id(pf, pf->vf[vsi->vf_id].lan_vsi_id)) {
14351+ /* Disjointed identifiers or broken references VF-VSI */
14352+ dev_warn(&pf->pdev->dev,
14353+ "SRIOV LAN VSI (index %d in PF VSI array) with invalid VF Identifier %d (referenced by VF %d, ordered as %d in VF array)\n",
14354+ vsi_idx, pf->vsi[vsi_idx]->vf_id,
14355+ pf->vf[vf_idx].vf_id, vf_idx);
14356+ is_vf_valid = false;
14357 }
14358- for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
14359- data[i++] = veb->tc_stats.tc_tx_packets[j];
14360- data[i++] = veb->tc_stats.tc_tx_bytes[j];
14361- data[i++] = veb->tc_stats.tc_rx_packets[j];
14362- data[i++] = veb->tc_stats.tc_rx_bytes[j];
14363+check_vf:
14364+ if (!is_vf_valid) {
14365+ i40e_add_ethtool_stats(&data, NULL,
14366+ i40e_gstrings_eth_stats_extra);
14367+ } else {
14368+ i40e_update_eth_stats(vsi);
14369+ i40e_add_ethtool_stats(&data, vsi,
14370+ i40e_gstrings_eth_stats_extra);
14371 }
14372 }
14373- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
14374- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
14375- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
14376- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
14377+ for (; vf_id < I40E_STATS_EXTRA_COUNT; vf_id++)
14378+ i40e_add_ethtool_stats(&data, NULL,
14379+ i40e_gstrings_eth_stats_extra);
14380+
14381+#endif /* !I40E_PF_EXTRA_STATS_OFF */
14382+check_data_pointer:
14383+ WARN_ONCE(data - p != i40e_get_stats_count(netdev),
14384+ "ethtool stats count mismatch!");
14385+}
14386+
14387+#ifndef I40E_PF_EXTRA_STATS_OFF
14388+/**
14389+ * i40e_update_vfid_in_stats - print VF num to stats names
14390+ * @stats_extra: array of stats structs with stats name strings
14391+ * @strings_num: number of stats name strings in array above (length)
14392+ * @vf_id: VF number to update stats name strings with
14393+ *
14394+ * Helper function to i40e_get_stat_strings() in case of extra stats.
14395+ **/
14396+static inline void
14397+i40e_update_vfid_in_stats(struct i40e_stats stats_extra[],
14398+ int strings_num, int vf_id)
14399+{
14400+ int i;
14401+
14402+ for (i = 0; i < strings_num; i++) {
14403+ snprintf(stats_extra[i].stat_string,
14404+ I40E_STATS_NAME_VFID_EXTRA_LEN, "vf%03d", vf_id);
14405+ stats_extra[i].stat_string[I40E_STATS_NAME_VFID_EXTRA_LEN -
14406+ 1] = '.';
14407 }
14408- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
14409- data[i++] = pf->stats.priority_xon_tx[j];
14410- data[i++] = pf->stats.priority_xoff_tx[j];
14411+}
14412+#endif /* !I40E_PF_EXTRA_STATS_OFF */
14413+/**
14414+ * i40e_get_stat_strings - copy stat strings into supplied buffer
14415+ * @netdev: the netdev to collect strings for
14416+ * @data: supplied buffer to copy strings into
14417+ *
14418+ * Copy the strings related to stats for this netdev. Expects data to be
14419+ * pre-allocated with the size reported by i40e_get_stats_count. Note that the
14420+ * strings must be copied in a static order and the total count must not
14421+ * change for a given netdev. See i40e_get_stats_count for more details.
14422+ **/
14423+static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
14424+{
14425+ struct i40e_netdev_priv *np = netdev_priv(netdev);
14426+ struct i40e_vsi *vsi = np->vsi;
14427+ struct i40e_pf *pf = vsi->back;
14428+ unsigned int i;
14429+ u8 *p = data;
14430+
14431+ i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
14432+
14433+ i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
14434+
14435+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
14436+ i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
14437+ "tx", i);
14438+ i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
14439+ "rx", i);
14440+#ifdef HAVE_XDP_SUPPORT
14441+ i40e_add_stat_strings(&data, i40e_gstrings_rx_queue_xdp_stats,
14442+ "rx", i);
14443+#endif
14444 }
14445- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
14446- data[i++] = pf->stats.priority_xon_rx[j];
14447- data[i++] = pf->stats.priority_xoff_rx[j];
14448+
14449+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
14450+ goto check_data_pointer;
14451+
14452+ i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
14453+
14454+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
14455+ i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
14456+
14457+ i40e_add_stat_strings(&data, i40e_gstrings_stats);
14458+
14459+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
14460+ i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
14461+
14462+#ifndef I40E_PF_EXTRA_STATS_OFF
14463+ for (i = 0; i < I40E_STATS_EXTRA_COUNT; i++) {
14464+ i40e_update_vfid_in_stats
14465+ (i40e_gstrings_eth_stats_extra,
14466+ ARRAY_SIZE(i40e_gstrings_eth_stats_extra), i);
14467+ i40e_add_stat_strings(&data, i40e_gstrings_eth_stats_extra);
14468 }
14469- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
14470- data[i++] = pf->stats.priority_xon_2_xoff[j];
14471+
14472+#endif /* !I40E_PF_EXTRA_STATS_OFF */
14473+check_data_pointer:
14474+ WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
14475+ "stat strings count mismatch!");
14476 }
14477
14478-static void i40e_get_strings(struct net_device *netdev, u32 stringset,
14479- u8 *data)
14480+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
14481+static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
14482 {
14483 struct i40e_netdev_priv *np = netdev_priv(netdev);
14484 struct i40e_vsi *vsi = np->vsi;
14485@@ -1639,107 +2785,47 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
14486 char *p = (char *)data;
14487 unsigned int i;
14488
14489+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
14490+ snprintf(p, ETH_GSTRING_LEN, "%s",
14491+ i40e_gstrings_priv_flags[i].flag_string);
14492+ p += ETH_GSTRING_LEN;
14493+ }
14494+ if (pf->hw.pf_id != 0)
14495+ return;
14496+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
14497+ snprintf(p, ETH_GSTRING_LEN, "%s",
14498+ i40e_gl_gstrings_priv_flags[i].flag_string);
14499+ p += ETH_GSTRING_LEN;
14500+ }
14501+}
14502+#endif
14503+
14504+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
14505+ u8 *data)
14506+{
14507 switch (stringset) {
14508 case ETH_SS_TEST:
14509 memcpy(data, i40e_gstrings_test,
14510 I40E_TEST_LEN * ETH_GSTRING_LEN);
14511 break;
14512 case ETH_SS_STATS:
14513- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
14514- snprintf(p, ETH_GSTRING_LEN, "%s",
14515- i40e_gstrings_net_stats[i].stat_string);
14516- p += ETH_GSTRING_LEN;
14517- }
14518- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
14519- snprintf(p, ETH_GSTRING_LEN, "%s",
14520- i40e_gstrings_misc_stats[i].stat_string);
14521- p += ETH_GSTRING_LEN;
14522- }
14523- for (i = 0; i < vsi->num_queue_pairs; i++) {
14524- snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
14525- p += ETH_GSTRING_LEN;
14526- snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
14527- p += ETH_GSTRING_LEN;
14528- snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
14529- p += ETH_GSTRING_LEN;
14530- snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
14531- p += ETH_GSTRING_LEN;
14532- }
14533- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
14534- return;
14535-
14536- if ((pf->lan_veb != I40E_NO_VEB) &&
14537- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
14538- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
14539- snprintf(p, ETH_GSTRING_LEN, "veb.%s",
14540- i40e_gstrings_veb_stats[i].stat_string);
14541- p += ETH_GSTRING_LEN;
14542- }
14543- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14544- snprintf(p, ETH_GSTRING_LEN,
14545- "veb.tc_%d_tx_packets", i);
14546- p += ETH_GSTRING_LEN;
14547- snprintf(p, ETH_GSTRING_LEN,
14548- "veb.tc_%d_tx_bytes", i);
14549- p += ETH_GSTRING_LEN;
14550- snprintf(p, ETH_GSTRING_LEN,
14551- "veb.tc_%d_rx_packets", i);
14552- p += ETH_GSTRING_LEN;
14553- snprintf(p, ETH_GSTRING_LEN,
14554- "veb.tc_%d_rx_bytes", i);
14555- p += ETH_GSTRING_LEN;
14556- }
14557- }
14558- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
14559- snprintf(p, ETH_GSTRING_LEN, "port.%s",
14560- i40e_gstrings_stats[i].stat_string);
14561- p += ETH_GSTRING_LEN;
14562- }
14563- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
14564- snprintf(p, ETH_GSTRING_LEN,
14565- "port.tx_priority_%d_xon", i);
14566- p += ETH_GSTRING_LEN;
14567- snprintf(p, ETH_GSTRING_LEN,
14568- "port.tx_priority_%d_xoff", i);
14569- p += ETH_GSTRING_LEN;
14570- }
14571- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
14572- snprintf(p, ETH_GSTRING_LEN,
14573- "port.rx_priority_%d_xon", i);
14574- p += ETH_GSTRING_LEN;
14575- snprintf(p, ETH_GSTRING_LEN,
14576- "port.rx_priority_%d_xoff", i);
14577- p += ETH_GSTRING_LEN;
14578- }
14579- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
14580- snprintf(p, ETH_GSTRING_LEN,
14581- "port.rx_priority_%d_xon_2_xoff", i);
14582- p += ETH_GSTRING_LEN;
14583- }
14584- /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
14585+ i40e_get_stat_strings(netdev, data);
14586 break;
14587+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
14588 case ETH_SS_PRIV_FLAGS:
14589- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
14590- snprintf(p, ETH_GSTRING_LEN, "%s",
14591- i40e_gstrings_priv_flags[i].flag_string);
14592- p += ETH_GSTRING_LEN;
14593- }
14594- if (pf->hw.pf_id != 0)
14595- break;
14596- for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
14597- snprintf(p, ETH_GSTRING_LEN, "%s",
14598- i40e_gl_gstrings_priv_flags[i].flag_string);
14599- p += ETH_GSTRING_LEN;
14600- }
14601+ i40e_get_priv_flag_strings(netdev, data);
14602 break;
14603+#endif
14604 default:
14605 break;
14606 }
14607 }
14608
14609+#ifdef HAVE_ETHTOOL_GET_TS_INFO
14610 static int i40e_get_ts_info(struct net_device *dev,
14611 struct ethtool_ts_info *info)
14612 {
14613+#ifdef HAVE_PTP_1588_CLOCK
14614 struct i40e_pf *pf = i40e_netdev_to_pf(dev);
14615
14616 /* only report HW timestamping if PTP is enabled */
14617@@ -1776,9 +2862,13 @@ static int i40e_get_ts_info(struct net_device *dev,
14618 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
14619
14620 return 0;
14621+#else /* HAVE_PTP_1588_CLOCK */
14622+ return ethtool_op_get_ts_info(dev, info);
14623+#endif /* HAVE_PTP_1588_CLOCK */
14624 }
14625
14626-static int i40e_link_test(struct net_device *netdev, u64 *data)
14627+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
14628+static u64 i40e_link_test(struct net_device *netdev, u64 *data)
14629 {
14630 struct i40e_netdev_priv *np = netdev_priv(netdev);
14631 struct i40e_pf *pf = np->vsi->back;
14632@@ -1787,7 +2877,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
14633
14634 netif_info(pf, hw, netdev, "link test\n");
14635 status = i40e_get_link_status(&pf->hw, &link_up);
14636- if (status) {
14637+ if (status != I40E_SUCCESS) {
14638 netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
14639 *data = 1;
14640 return *data;
14641@@ -1801,7 +2891,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
14642 return *data;
14643 }
14644
14645-static int i40e_reg_test(struct net_device *netdev, u64 *data)
14646+static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
14647 {
14648 struct i40e_netdev_priv *np = netdev_priv(netdev);
14649 struct i40e_pf *pf = np->vsi->back;
14650@@ -1812,7 +2902,7 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data)
14651 return *data;
14652 }
14653
14654-static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
14655+static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
14656 {
14657 struct i40e_netdev_priv *np = netdev_priv(netdev);
14658 struct i40e_pf *pf = np->vsi->back;
14659@@ -1826,7 +2916,7 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
14660 return *data;
14661 }
14662
14663-static int i40e_intr_test(struct net_device *netdev, u64 *data)
14664+static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
14665 {
14666 struct i40e_netdev_priv *np = netdev_priv(netdev);
14667 struct i40e_pf *pf = np->vsi->back;
14668@@ -1845,6 +2935,13 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
14669 return *data;
14670 }
14671
14672+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
14673+static int i40e_diag_test_count(struct net_device *netdev)
14674+{
14675+ return I40E_TEST_LEN;
14676+}
14677+
14678+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
14679 static inline bool i40e_active_vfs(struct i40e_pf *pf)
14680 {
14681 struct i40e_vf *vfs = pf->vf;
14682@@ -1993,11 +3090,12 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
14683 return 0;
14684 }
14685
14686+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
14687 static int i40e_set_phys_id(struct net_device *netdev,
14688 enum ethtool_phys_id_state state)
14689 {
14690 struct i40e_netdev_priv *np = netdev_priv(netdev);
14691- i40e_status ret = 0;
14692+ i40e_status ret = I40E_SUCCESS;
14693 struct i40e_pf *pf = np->vsi->back;
14694 struct i40e_hw *hw = &pf->hw;
14695 int blink_freq = 2;
14696@@ -2008,7 +3106,9 @@ static int i40e_set_phys_id(struct net_device *netdev,
14697 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
14698 pf->led_status = i40e_led_get(hw);
14699 } else {
14700- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
14701+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
14702+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
14703+ NULL);
14704 ret = i40e_led_get_phy(hw, &temp_status,
14705 &pf->phy_led_val);
14706 pf->led_status = temp_status;
14707@@ -2033,7 +3133,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
14708 ret = i40e_led_set_phy(hw, false, pf->led_status,
14709 (pf->phy_led_val |
14710 I40E_PHY_LED_MODE_ORIG));
14711- i40e_aq_set_phy_debug(hw, 0, NULL);
14712+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
14713+ i40e_aq_set_phy_debug(hw, 0, NULL);
14714 }
14715 break;
14716 default:
14717@@ -2044,6 +3145,53 @@ static int i40e_set_phys_id(struct net_device *netdev,
14718 else
14719 return 0;
14720 }
14721+#else /* HAVE_ETHTOOL_SET_PHYS_ID */
14722+static int i40e_phys_id(struct net_device *netdev, u32 data)
14723+{
14724+ struct i40e_netdev_priv *np = netdev_priv(netdev);
14725+ struct i40e_pf *pf = np->vsi->back;
14726+ struct i40e_hw *hw = &pf->hw;
14727+ i40e_status ret = I40E_SUCCESS;
14728+ u16 temp_status;
14729+ int i;
14730+
14731+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
14732+ pf->led_status = i40e_led_get(hw);
14733+ } else {
14734+ ret = i40e_led_get_phy(hw, &temp_status,
14735+ &pf->phy_led_val);
14736+ pf->led_status = temp_status;
14737+ }
14738+
14739+ if (!data || data > 300)
14740+ data = 300;
14741+
14742+ /* 10GBaseT PHY controls led's through PHY, not MAC */
14743+ for (i = 0; i < (data * 1000); i += 400) {
14744+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
14745+ i40e_led_set(hw, 0xF, false);
14746+ else
14747+ ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
14748+ msleep_interruptible(200);
14749+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
14750+ i40e_led_set(hw, 0x0, false);
14751+ else
14752+ ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
14753+ msleep_interruptible(200);
14754+ }
14755+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
14756+ i40e_led_set(hw, pf->led_status, false);
14757+ else
14758+ ret = i40e_led_set_phy(hw, false, pf->led_status,
14759+ (pf->led_status |
14760+ I40E_PHY_LED_MODE_ORIG));
14761+
14762+ if (ret)
14763+ return -ENOENT;
14764+ else
14765+ return 0;
14766+}
14767+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
14768
14769 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
14770 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
14771@@ -2071,27 +3219,25 @@ static int __i40e_get_coalesce(struct net_device *netdev,
14772 ec->tx_max_coalesced_frames_irq = vsi->work_limit;
14773 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
14774
14775- /* rx and tx usecs has per queue value. If user doesn't specify the queue,
14776- * return queue 0's value to represent.
14777+ /* rx and tx usecs has per queue value. If user doesn't specify the
14778+ * queue, return queue 0's value to represent.
14779 */
14780- if (queue < 0) {
14781+ if (queue < 0)
14782 queue = 0;
14783- } else if (queue >= vsi->num_queue_pairs) {
14784+ else if (queue >= vsi->num_queue_pairs)
14785 return -EINVAL;
14786- }
14787
14788 rx_ring = vsi->rx_rings[queue];
14789 tx_ring = vsi->tx_rings[queue];
14790
14791- if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
14792+ if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
14793 ec->use_adaptive_rx_coalesce = 1;
14794
14795- if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
14796+ if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
14797 ec->use_adaptive_tx_coalesce = 1;
14798
14799- ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
14800- ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
14801-
14802+ ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
14803+ ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
14804
14805 /* we use the _usecs_high to store/set the interrupt rate limit
14806 * that the hardware supports, that almost but not quite
14807@@ -2120,6 +3266,7 @@ static int i40e_get_coalesce(struct net_device *netdev,
14808 return __i40e_get_coalesce(netdev, ec, -1);
14809 }
14810
14811+#ifdef ETHTOOL_PERQUEUE
14812 /**
14813 * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
14814 * @netdev: netdev structure
14815@@ -2134,6 +3281,7 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
14816 return __i40e_get_coalesce(netdev, ec, queue);
14817 }
14818
14819+#endif /* ETHTOOL_PERQUEUE */
14820 /**
14821 * i40e_set_itr_per_queue - set ITR values for specific queue
14822 * @vsi: the VSI to set values for
14823@@ -2142,45 +3290,93 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
14824 *
14825 * Change the ITR settings for a specific queue.
14826 **/
14827-
14828 static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
14829 struct ethtool_coalesce *ec,
14830 int queue)
14831 {
14832+ struct i40e_ring *rx_ring = vsi->rx_rings[queue];
14833+ struct i40e_ring *tx_ring = vsi->tx_rings[queue];
14834 struct i40e_pf *pf = vsi->back;
14835 struct i40e_hw *hw = &pf->hw;
14836 struct i40e_q_vector *q_vector;
14837- u16 vector, intrl;
14838+ u16 intrl;
14839
14840 intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
14841
14842- vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
14843- vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
14844+ rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
14845+ tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
14846
14847 if (ec->use_adaptive_rx_coalesce)
14848- vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
14849+ rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
14850 else
14851- vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
14852+ rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
14853
14854 if (ec->use_adaptive_tx_coalesce)
14855- vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
14856+ tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
14857 else
14858- vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
14859+ tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
14860+
14861+ q_vector = rx_ring->q_vector;
14862+ q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
14863
14864- q_vector = vsi->rx_rings[queue]->q_vector;
14865- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
14866- vector = vsi->base_vector + q_vector->v_idx;
14867- wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
14868+ q_vector = tx_ring->q_vector;
14869+ q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
14870
14871- q_vector = vsi->tx_rings[queue]->q_vector;
14872- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
14873- vector = vsi->base_vector + q_vector->v_idx;
14874- wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
14875+ /* The interrupt handler itself will take care of programming
14876+ * the Tx and Rx ITR values based on the values we have entered
14877+ * into the q_vector, no need to write the values now.
14878+ */
14879
14880- wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
14881+ wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl);
14882 i40e_flush(hw);
14883 }
14884
14885+/**
14886+ * i40e_is_coalesce_param_invalid - check for unsupported coalesce parameters
14887+ * @netdev: pointer to the netdev associated with this query
14888+ * @ec: ethtool structure to fill with driver's coalesce settings
14889+ *
14890+ * Print netdev info if driver doesn't support one of the parameters
14891+ * and return error. When any parameters will be implemented, remove only
14892+ * this parameter from param array.
14893+ */
14894+static
14895+int i40e_is_coalesce_param_invalid(struct net_device *netdev,
14896+ struct ethtool_coalesce *ec)
14897+{
14898+ struct i40e_ethtool_not_used {
14899+ u32 value;
14900+ const char *name;
14901+ } param[] = {
14902+ {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
14903+ {ec->rate_sample_interval, "sample-interval"},
14904+ {ec->pkt_rate_low, "pkt-rate-low"},
14905+ {ec->pkt_rate_high, "pkt-rate-high"},
14906+ {ec->rx_max_coalesced_frames, "rx-frames"},
14907+ {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
14908+ {ec->tx_max_coalesced_frames, "tx-frames"},
14909+ {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
14910+ {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
14911+ {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
14912+ {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
14913+ {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
14914+ {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
14915+ {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
14916+ };
14917+ int i;
14918+
14919+ for (i = 0; i < ARRAY_SIZE(param); i++) {
14920+ if (param[i].value) {
14921+ netdev_info(netdev,
14922+ "Setting %s not supported\n",
14923+ param[i].name);
14924+ return -EOPNOTSUPP;
14925+ }
14926+ }
14927+
14928+ return 0;
14929+}
14930+
14931 /**
14932 * __i40e_set_coalesce - set coalesce settings for particular queue
14933 * @netdev: the netdev to change
14934@@ -2199,15 +3395,18 @@ static int __i40e_set_coalesce(struct net_device *netdev,
14935 struct i40e_pf *pf = vsi->back;
14936 int i;
14937
14938+ if (i40e_is_coalesce_param_invalid(netdev, ec))
14939+ return -EOPNOTSUPP;
14940+
14941 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
14942 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
14943
14944 if (queue < 0) {
14945- cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting;
14946- cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting;
14947+ cur_rx_itr = vsi->rx_rings[0]->itr_setting;
14948+ cur_tx_itr = vsi->tx_rings[0]->itr_setting;
14949 } else if (queue < vsi->num_queue_pairs) {
14950- cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting;
14951- cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting;
14952+ cur_rx_itr = vsi->rx_rings[queue]->itr_setting;
14953+ cur_tx_itr = vsi->tx_rings[queue]->itr_setting;
14954 } else {
14955 netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
14956 vsi->num_queue_pairs - 1);
14957@@ -2235,7 +3434,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
14958 return -EINVAL;
14959 }
14960
14961- if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) {
14962+ if (ec->rx_coalesce_usecs > I40E_MAX_ITR) {
14963 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
14964 return -EINVAL;
14965 }
14966@@ -2246,16 +3445,16 @@ static int __i40e_set_coalesce(struct net_device *netdev,
14967 return -EINVAL;
14968 }
14969
14970- if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) {
14971+ if (ec->tx_coalesce_usecs > I40E_MAX_ITR) {
14972 netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
14973 return -EINVAL;
14974 }
14975
14976 if (ec->use_adaptive_rx_coalesce && !cur_rx_itr)
14977- ec->rx_coalesce_usecs = I40E_MIN_ITR << 1;
14978+ ec->rx_coalesce_usecs = I40E_MIN_ITR;
14979
14980 if (ec->use_adaptive_tx_coalesce && !cur_tx_itr)
14981- ec->tx_coalesce_usecs = I40E_MIN_ITR << 1;
14982+ ec->tx_coalesce_usecs = I40E_MIN_ITR;
14983
14984 intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
14985 vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
14986@@ -2264,8 +3463,8 @@ static int __i40e_set_coalesce(struct net_device *netdev,
14987 vsi->int_rate_limit);
14988 }
14989
14990- /* rx and tx usecs has per queue value. If user doesn't specify the queue,
14991- * apply to all queues.
14992+ /* rx and tx usecs has per queue value. If user doesn't specify the
14993+ * queue, apply to all queues.
14994 */
14995 if (queue < 0) {
14996 for (i = 0; i < vsi->num_queue_pairs; i++)
14997@@ -2290,6 +3489,20 @@ static int i40e_set_coalesce(struct net_device *netdev,
14998 return __i40e_set_coalesce(netdev, ec, -1);
14999 }
15000
15001+#ifdef ETHTOOL_SRXNTUPLE
15002+/* We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
15003+ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
15004+ * was defined that this function was present.
15005+ */
15006+static int i40e_set_rx_ntuple(struct net_device *dev,
15007+ struct ethtool_rx_ntuple *cmd)
15008+{
15009+ return -EOPNOTSUPP;
15010+}
15011+
15012+#endif /* ETHTOOL_SRXNTUPLE */
15013+
15014+#ifdef ETHTOOL_PERQUEUE
15015 /**
15016 * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
15017 * @netdev: the netdev to change
15018@@ -2303,7 +3516,9 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
15019 {
15020 return __i40e_set_coalesce(netdev, ec, queue);
15021 }
15022+#endif /* ETHTOOL_PERQUEUE */
15023
15024+#ifdef ETHTOOL_GRXRINGS
15025 /**
15026 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
15027 * @pf: pointer to the physical function struct
15028@@ -2385,7 +3600,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
15029 /**
15030 * i40e_check_mask - Check whether a mask field is set
15031 * @mask: the full mask value
15032- * @field; mask of the field to check
15033+ * @field: mask of the field to check
15034 *
15035 * If the given mask is fully set, return positive value. If the mask for the
15036 * field is fully unset, return zero. Otherwise return a negative error code.
15037@@ -2436,18 +3651,69 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
15038 value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
15039 mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
15040
15041-#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0)
15042-#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16)
15043+#define I40E_USERDEF_CLOUD_FILTER BIT_ULL(63)
15044+
15045+#define I40E_USERDEF_CLOUD_RESERVED GENMASK_ULL(62, 32)
15046+#define I40E_USERDEF_TUNNEL_TYPE GENMASK_ULL(31, 24)
15047+#define I40E_USERDEF_TENANT_ID GENMASK_ULL(23, 0)
15048+
15049+#define I40E_USERDEF_RESERVED GENMASK_ULL(62, 32)
15050 #define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0)
15051
15052- valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
15053- if (valid < 0) {
15054- return -EINVAL;
15055- } else if (valid) {
15056- data->flex_word = value & I40E_USERDEF_FLEX_WORD;
15057- data->flex_offset =
15058- (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
15059- data->flex_filter = true;
15060+#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16)
15061+#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0)
15062+
15063+ if ((mask & I40E_USERDEF_CLOUD_FILTER) &&
15064+ (value & I40E_USERDEF_CLOUD_FILTER))
15065+ data->cloud_filter = true;
15066+
15067+ if (data->cloud_filter) {
15068+ /* Make sure that the reserved bits are not set */
15069+ valid = i40e_check_mask(mask, I40E_USERDEF_CLOUD_RESERVED);
15070+ if (valid < 0) {
15071+ return -EINVAL;
15072+ } else if (valid) {
15073+ if ((value & I40E_USERDEF_CLOUD_RESERVED) != 0)
15074+ return -EINVAL;
15075+ }
15076+
15077+ /* These fields are only valid if this is a cloud filter */
15078+ valid = i40e_check_mask(mask, I40E_USERDEF_TENANT_ID);
15079+ if (valid < 0) {
15080+ return -EINVAL;
15081+ } else if (valid) {
15082+ data->tenant_id = value & I40E_USERDEF_TENANT_ID;
15083+ data->tenant_id_valid = true;
15084+ }
15085+
15086+ valid = i40e_check_mask(mask, I40E_USERDEF_TUNNEL_TYPE);
15087+ if (valid < 0) {
15088+ return -EINVAL;
15089+ } else if (valid) {
15090+ data->tunnel_type =
15091+ (value & I40E_USERDEF_TUNNEL_TYPE) >> 24;
15092+ data->tunnel_type_valid = true;
15093+ }
15094+ } else {
15095+ /* Make sure that the reserved bits are not set */
15096+ valid = i40e_check_mask(mask, I40E_USERDEF_RESERVED);
15097+ if (valid < 0) {
15098+ return -EINVAL;
15099+ } else if (valid) {
15100+ if ((value & I40E_USERDEF_RESERVED) != 0)
15101+ return -EINVAL;
15102+ }
15103+
15104+ /* These fields are only valid if this isn't a cloud filter */
15105+ valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
15106+ if (valid < 0) {
15107+ return -EINVAL;
15108+ } else if (valid) {
15109+ data->flex_word = value & I40E_USERDEF_FLEX_WORD;
15110+ data->flex_offset =
15111+ (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
15112+ data->flex_filter = true;
15113+ }
15114 }
15115
15116 return 0;
15117@@ -2456,6 +3722,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
15118 /**
15119 * i40e_fill_rx_flow_user_data - Fill in user-defined data field
15120 * @fsp: pointer to rx_flow specification
15121+ * @data: pointer to return userdef data
15122 *
15123 * Reads the userdef data structure and properly fills in the user defined
15124 * fields of the rx_flow_spec.
15125@@ -2465,10 +3732,25 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
15126 {
15127 u64 value = 0, mask = 0;
15128
15129- if (data->flex_filter) {
15130- value |= data->flex_word;
15131- value |= (u64)data->flex_offset << 16;
15132- mask |= I40E_USERDEF_FLEX_FILTER;
15133+ if (data->cloud_filter) {
15134+ value |= I40E_USERDEF_CLOUD_FILTER;
15135+ mask |= I40E_USERDEF_CLOUD_FILTER;
15136+
15137+ if (data->tenant_id_valid) {
15138+ value |= data->tenant_id;
15139+ mask |= I40E_USERDEF_TENANT_ID;
15140+ }
15141+
15142+ if (data->tunnel_type_valid) {
15143+ value |= (u64)data->tunnel_type << 24;
15144+ mask |= I40E_USERDEF_TUNNEL_TYPE;
15145+ }
15146+ } else {
15147+ if (data->flex_filter) {
15148+ value |= data->flex_word;
15149+ value |= (u64)data->flex_offset << 16;
15150+ mask |= I40E_USERDEF_FLEX_FILTER;
15151+ }
15152 }
15153
15154 if (value || mask)
15155@@ -2479,7 +3761,7 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
15156 }
15157
15158 /**
15159- * i40e_get_ethtool_fdir_all - Populates the rule count of a command
15160+ * i40e_get_rx_filter_ids - Populates the rule count of a command
15161 * @pf: Pointer to the physical function struct
15162 * @cmd: The command to get or set Rx flow classification rules
15163 * @rule_locs: Array of used rule locations
15164@@ -2489,23 +3771,34 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
15165 *
15166 * Returns 0 on success or -EMSGSIZE if entry not found
15167 **/
15168-static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
15169- struct ethtool_rxnfc *cmd,
15170- u32 *rule_locs)
15171+static int i40e_get_rx_filter_ids(struct i40e_pf *pf,
15172+ struct ethtool_rxnfc *cmd,
15173+ u32 *rule_locs)
15174 {
15175- struct i40e_fdir_filter *rule;
15176+ struct i40e_fdir_filter *f_rule;
15177+ struct i40e_cloud_filter *c_rule;
15178 struct hlist_node *node2;
15179- int cnt = 0;
15180+ unsigned int cnt = 0;
15181
15182 /* report total rule count */
15183 cmd->data = i40e_get_fd_cnt_all(pf);
15184
15185- hlist_for_each_entry_safe(rule, node2,
15186+ hlist_for_each_entry_safe(f_rule, node2,
15187 &pf->fdir_filter_list, fdir_node) {
15188 if (cnt == cmd->rule_cnt)
15189 return -EMSGSIZE;
15190
15191- rule_locs[cnt] = rule->fd_id;
15192+ rule_locs[cnt] = f_rule->fd_id;
15193+ cnt++;
15194+ }
15195+
15196+ /* find the cloud filter rule ids */
15197+ hlist_for_each_entry_safe(c_rule, node2,
15198+ &pf->cloud_filter_list, cloud_node) {
15199+ if (cnt == cmd->rule_cnt)
15200+ return -EMSGSIZE;
15201+
15202+ rule_locs[cnt] = c_rule->id;
15203 cnt++;
15204 }
15205
15206@@ -2588,16 +3881,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
15207
15208 no_input_set:
15209 if (input_set & I40E_L3_SRC_MASK)
15210- fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
15211+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
15212
15213 if (input_set & I40E_L3_DST_MASK)
15214- fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
15215+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
15216
15217 if (input_set & I40E_L4_SRC_MASK)
15218- fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
15219+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
15220
15221 if (input_set & I40E_L4_DST_MASK)
15222- fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
15223+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
15224
15225 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
15226 fsp->ring_cookie = RX_CLS_FLOW_DISC;
15227@@ -2630,15 +3923,114 @@ no_input_set:
15228 return 0;
15229 }
15230
15231+#define VXLAN_PORT 8472
15232+
15233+/**
15234+ * i40e_get_cloud_filter_entry - get a cloud filter by loc
15235+ * @pf: pointer to the physical function struct
15236+ * @cmd: The command to get or set Rx flow classification rules
15237+ *
15238+ * get cloud filter by loc.
15239+ * Returns 0 if success.
15240+ **/
15241+static int i40e_get_cloud_filter_entry(struct i40e_pf *pf,
15242+ struct ethtool_rxnfc *cmd)
15243+{
15244+ struct ethtool_rx_flow_spec *fsp =
15245+ (struct ethtool_rx_flow_spec *)&cmd->fs;
15246+ static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
15247+ struct i40e_cloud_filter *rule, *filter = NULL;
15248+ struct i40e_rx_flow_userdef userdef = {0};
15249+ struct hlist_node *node2;
15250+
15251+ hlist_for_each_entry_safe(rule, node2,
15252+ &pf->cloud_filter_list, cloud_node) {
15253+ /* filter found */
15254+ if (rule->id == fsp->location)
15255+ filter = rule;
15256+
15257+ /* bail out if we've passed the likely location in the list */
15258+ if (rule->id >= fsp->location)
15259+ break;
15260+
15261+ }
15262+ if (!filter) {
15263+ dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n",
15264+ fsp->location);
15265+ return -ENOENT;
15266+ }
15267+
15268+ userdef.cloud_filter = true;
15269+
15270+ fsp->ring_cookie = filter->queue_id;
15271+ if (filter->seid != pf->vsi[pf->lan_vsi]->seid) {
15272+ struct i40e_vsi *vsi;
15273+
15274+ vsi = i40e_find_vsi_from_seid(pf, filter->seid);
15275+ if (vsi && vsi->type == I40E_VSI_SRIOV) {
15276+ /* VFs are zero-indexed by the driver, but ethtool
15277+ * expects them to be one-indexed, so add one here
15278+ */
15279+ u64 ring_vf = vsi->vf_id + 1;
15280+
15281+ ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
15282+ fsp->ring_cookie |= ring_vf;
15283+ }
15284+ }
15285+
15286+ ether_addr_copy(fsp->h_u.ether_spec.h_dest, filter->outer_mac);
15287+ ether_addr_copy(fsp->h_u.ether_spec.h_source, filter->inner_mac);
15288+
15289+ if (filter->flags & I40E_CLOUD_FIELD_OMAC)
15290+ ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast);
15291+ if (filter->flags & I40E_CLOUD_FIELD_IMAC)
15292+ ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast);
15293+ if (filter->flags & I40E_CLOUD_FIELD_IVLAN)
15294+ fsp->h_ext.vlan_tci = filter->inner_vlan;
15295+ if (filter->flags & I40E_CLOUD_FIELD_TEN_ID) {
15296+ userdef.tenant_id_valid = true;
15297+ userdef.tenant_id = filter->tenant_id;
15298+ }
15299+ if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) {
15300+ userdef.tunnel_type_valid = true;
15301+ userdef.tunnel_type = filter->tunnel_type;
15302+ }
15303+
15304+ if (filter->flags & I40E_CLOUD_FIELD_IIP) {
15305+ if (i40e_is_l4mode_enabled()) {
15306+ fsp->flow_type = UDP_V4_FLOW;
15307+ fsp->h_u.udp_ip4_spec.pdst = filter->dst_port;
15308+ } else {
15309+ fsp->flow_type = IP_USER_FLOW;
15310+ }
15311+
15312+ fsp->h_u.usr_ip4_spec.ip4dst = filter->inner_ip[0];
15313+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
15314+ } else {
15315+ fsp->flow_type = ETHER_FLOW;
15316+ }
15317+
15318+ i40e_fill_rx_flow_user_data(fsp, &userdef);
15319+
15320+ fsp->flow_type |= FLOW_EXT;
15321+
15322+ return 0;
15323+}
15324+
15325 /**
15326 * i40e_get_rxnfc - command to get RX flow classification rules
15327 * @netdev: network interface device structure
15328 * @cmd: ethtool rxnfc command
15329+ * @rule_locs: pointer to store rule data
15330 *
15331 * Returns Success if the command is supported.
15332 **/
15333 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15334+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
15335+ void *rule_locs)
15336+#else
15337 u32 *rule_locs)
15338+#endif
15339 {
15340 struct i40e_netdev_priv *np = netdev_priv(netdev);
15341 struct i40e_vsi *vsi = np->vsi;
15342@@ -2647,7 +4039,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15343
15344 switch (cmd->cmd) {
15345 case ETHTOOL_GRXRINGS:
15346- cmd->data = vsi->num_queue_pairs;
15347+ cmd->data = vsi->rss_size;
15348 ret = 0;
15349 break;
15350 case ETHTOOL_GRXFH:
15351@@ -2655,15 +4047,23 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15352 break;
15353 case ETHTOOL_GRXCLSRLCNT:
15354 cmd->rule_cnt = pf->fdir_pf_active_filters;
15355+ cmd->rule_cnt += pf->num_cloud_filters;
15356 /* report total rule count */
15357 cmd->data = i40e_get_fd_cnt_all(pf);
15358 ret = 0;
15359 break;
15360 case ETHTOOL_GRXCLSRULE:
15361 ret = i40e_get_ethtool_fdir_entry(pf, cmd);
15362+ /* if no such fdir filter then try the cloud list */
15363+ if (ret)
15364+ ret = i40e_get_cloud_filter_entry(pf, cmd);
15365 break;
15366 case ETHTOOL_GRXCLSRLALL:
15367- ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
15368+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
15369+ ret = i40e_get_rx_filter_ids(pf, cmd, (u32 *)rule_locs);
15370+#else
15371+ ret = i40e_get_rx_filter_ids(pf, cmd, rule_locs);
15372+#endif
15373 break;
15374 default:
15375 break;
15376@@ -2675,7 +4075,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15377 /**
15378 * i40e_get_rss_hash_bits - Read RSS Hash bits from register
15379 * @nfc: pointer to user request
15380- * @i_setc bits currently set
15381+ * @i_setc: bits currently set
15382 *
15383 * Returns value of bits to be set per user request
15384 **/
15385@@ -2720,7 +4120,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
15386 /**
15387 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
15388 * @pf: pointer to the physical function struct
15389- * @cmd: ethtool rxnfc command
15390+ * @nfc: ethtool rxnfc command
15391 *
15392 * Returns Success if the flow input set is supported.
15393 **/
15394@@ -2829,12 +4229,325 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
15395 return 0;
15396 }
15397
15398+/**
15399+ * i40e_cloud_filter_mask2flags- Convert cloud filter details to filter type
15400+ * @pf: pointer to the physical function struct
15401+ * @fsp: RX flow classification rules
15402+ * @userdef: pointer to userdef field data
15403+ * @flags: Resultant combination of all the fields to decide the tuple
15404+ *
15405+ * The general trick in setting these flags is that if the mask field for
15406+ * a value is non-zero, then the field itself was set to something, so we
15407+ * use this to tell us what has been selected.
15408+ *
15409+ * Returns 0 if a valid filter type was identified.
15410+ **/
15411+static int i40e_cloud_filter_mask2flags(struct i40e_pf *pf,
15412+ struct ethtool_rx_flow_spec *fsp,
15413+ struct i40e_rx_flow_userdef *userdef,
15414+ u8 *flags)
15415+{
15416+ u8 i = 0;
15417+
15418+ *flags = 0;
15419+
15420+ switch (fsp->flow_type & ~FLOW_EXT) {
15421+ case ETHER_FLOW:
15422+ /* use is_broadcast and is_zero to check for all 0xf or 0 */
15423+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
15424+ i |= I40E_CLOUD_FIELD_OMAC;
15425+ } else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
15426+ i &= ~I40E_CLOUD_FIELD_OMAC;
15427+ } else {
15428+ dev_info(&pf->pdev->dev, "Bad ether dest mask %pM\n",
15429+ fsp->m_u.ether_spec.h_dest);
15430+ return I40E_ERR_CONFIG;
15431+ }
15432+
15433+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
15434+ i |= I40E_CLOUD_FIELD_IMAC;
15435+ } else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_source)) {
15436+ i &= ~I40E_CLOUD_FIELD_IMAC;
15437+ } else {
15438+ dev_info(&pf->pdev->dev, "Bad ether source mask %pM\n",
15439+ fsp->m_u.ether_spec.h_source);
15440+ return I40E_ERR_CONFIG;
15441+ }
15442+ break;
15443+
15444+ case IP_USER_FLOW:
15445+ if (pf->hw.mac.type == I40E_MAC_X722) {
15446+ dev_info(&pf->pdev->dev, "Failed to set filter. Destination IP filters are not supported for this device.\n");
15447+ return I40E_ERR_CONFIG;
15448+ }
15449+ if (fsp->m_u.usr_ip4_spec.ip4dst == cpu_to_be32(0xffffffff)) {
15450+ i |= I40E_CLOUD_FIELD_IIP;
15451+ } else if (!fsp->m_u.usr_ip4_spec.ip4dst) {
15452+ i &= ~I40E_CLOUD_FIELD_IIP;
15453+ } else {
15454+ dev_info(&pf->pdev->dev, "Bad ip dst mask 0x%08x\n",
15455+ be32_to_cpu(fsp->m_u.usr_ip4_spec.ip4dst));
15456+ return I40E_ERR_CONFIG;
15457+ }
15458+ break;
15459+
15460+ case UDP_V4_FLOW:
15461+ if (fsp->m_u.udp_ip4_spec.pdst == cpu_to_be16(0xffff)) {
15462+ i |= I40E_CLOUD_FIELD_IIP;
15463+ } else {
15464+ dev_info(&pf->pdev->dev, "Bad UDP dst mask 0x%04x\n",
15465+ be32_to_cpu(fsp->m_u.udp_ip4_spec.pdst));
15466+ return I40E_ERR_CONFIG;
15467+ }
15468+ break;
15469+
15470+ default:
15471+ return I40E_ERR_CONFIG;
15472+ }
15473+
15474+ switch (be16_to_cpu(fsp->m_ext.vlan_tci)) {
15475+ case 0xffff:
15476+ if (fsp->h_ext.vlan_tci & cpu_to_be16(~0x7fff)) {
15477+ dev_info(&pf->pdev->dev, "Bad vlan %u\n",
15478+ be16_to_cpu(fsp->h_ext.vlan_tci));
15479+ return I40E_ERR_CONFIG;
15480+ }
15481+ i |= I40E_CLOUD_FIELD_IVLAN;
15482+ break;
15483+ case 0:
15484+ i &= ~I40E_CLOUD_FIELD_IVLAN;
15485+ break;
15486+ default:
15487+ dev_info(&pf->pdev->dev, "Bad vlan mask %u\n",
15488+ be16_to_cpu(fsp->m_ext.vlan_tci));
15489+ return I40E_ERR_CONFIG;
15490+ }
15491+
15492+ /* We already know that we're a cloud filter, so we don't need to
15493+ * re-check that.
15494+ */
15495+ if (userdef->tenant_id_valid) {
15496+ if (userdef->tenant_id == 0)
15497+ i &= ~I40E_CLOUD_FIELD_TEN_ID;
15498+ else
15499+ i |= I40E_CLOUD_FIELD_TEN_ID;
15500+ }
15501+
15502+ /* Make sure the flags produce a valid type */
15503+ if (i40e_get_cloud_filter_type(i, NULL)) {
15504+ dev_info(&pf->pdev->dev, "Invalid mask config, flags = %d\n",
15505+ i);
15506+ return I40E_ERR_CONFIG;
15507+ }
15508+
15509+ *flags = i;
15510+ return I40E_SUCCESS;
15511+}
15512+
15513+/* i40e_add_cloud_filter_ethtool needs i40e_del_fdir_ethtool() */
15514+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
15515+ struct ethtool_rxnfc *cmd);
15516+
15517+/**
15518+ * i40e_add_cloud_filter_ethtool - Add cloud filter
15519+ * @vsi: pointer to the VSI structure
15520+ * @cmd: The command to get or set Rx flow classification rules
15521+ * @userdef: pointer to userdef field data
15522+ *
15523+ * Add cloud filter for a specific flow spec.
15524+ * Returns 0 if the filter were successfully added.
15525+ **/
15526+static int i40e_add_cloud_filter_ethtool(struct i40e_vsi *vsi,
15527+ struct ethtool_rxnfc *cmd,
15528+ struct i40e_rx_flow_userdef *userdef)
15529+{
15530+ struct i40e_cloud_filter *rule, *parent, *filter = NULL;
15531+ struct ethtool_rx_flow_spec *fsp;
15532+ u16 dest_seid = 0, q_index = 0;
15533+ struct i40e_pf *pf = vsi->back;
15534+ struct hlist_node *node2;
15535+ u32 ring, vf;
15536+ u8 flags = 0;
15537+ int ret;
15538+
15539+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
15540+ return -EOPNOTSUPP;
15541+
15542+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
15543+ return -EOPNOTSUPP;
15544+
15545+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
15546+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
15547+ return -EBUSY;
15548+
15549+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
15550+
15551+ /* The ring_cookie is a mask of queue index and VF id we wish to
15552+ * target. This is the same for regular flow director filters.
15553+ */
15554+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
15555+ dev_warn(&pf->pdev->dev, "Cloud filters do not support the drop action.\n");
15556+ return -EOPNOTSUPP;
15557+ }
15558+
15559+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
15560+ vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
15561+
15562+ if (!vf) {
15563+ if (ring >= vsi->num_queue_pairs)
15564+ return -EINVAL;
15565+ dest_seid = vsi->seid;
15566+ } else {
15567+ /* VFs are zero-indexed, so we subtract one here */
15568+ vf--;
15569+
15570+ if (vf >= pf->num_alloc_vfs)
15571+ return -EINVAL;
15572+ if (!i40e_is_l4mode_enabled() &&
15573+ ring >= pf->vf[vf].num_queue_pairs)
15574+ return -EINVAL;
15575+ dest_seid = pf->vsi[pf->vf[vf].lan_vsi_idx]->seid;
15576+ }
15577+ q_index = ring;
15578+
15579+ ret = i40e_cloud_filter_mask2flags(pf, fsp, userdef, &flags);
15580+ if (ret)
15581+ return -EINVAL;
15582+
15583+ /* if filter exists with same id, delete the old one */
15584+ parent = NULL;
15585+ hlist_for_each_entry_safe(rule, node2,
15586+ &pf->cloud_filter_list, cloud_node) {
15587+ /* filter exists with the id */
15588+ if (rule->id == fsp->location)
15589+ filter = rule;
15590+
15591+ /* bail out if we've passed the likely location in the list */
15592+ if (rule->id >= fsp->location)
15593+ break;
15594+
15595+ /* track where we left off */
15596+ parent = rule;
15597+ }
15598+ if (filter && (filter->id == fsp->location)) {
15599+ /* found it in the cloud list, so remove it */
15600+ ret = i40e_add_del_cloud_filter_ex(pf, filter, false);
15601+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
15602+ return ret;
15603+ hlist_del(&filter->cloud_node);
15604+ kfree(filter);
15605+ pf->num_cloud_filters--;
15606+ } else {
15607+ /* not in the cloud list, so check the PF's fdir list */
15608+ (void)i40e_del_fdir_entry(pf->vsi[pf->lan_vsi], cmd);
15609+ }
15610+
15611+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
15612+ if (!filter)
15613+ return -ENOMEM;
15614+
15615+ switch (fsp->flow_type & ~FLOW_EXT) {
15616+ case ETHER_FLOW:
15617+ ether_addr_copy(filter->outer_mac,
15618+ fsp->h_u.ether_spec.h_dest);
15619+ ether_addr_copy(filter->inner_mac,
15620+ fsp->h_u.ether_spec.h_source);
15621+ break;
15622+
15623+ case IP_USER_FLOW:
15624+ if (flags & I40E_CLOUD_FIELD_TEN_ID) {
15625+ dev_info(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
15626+ kfree(filter);
15627+ return I40E_ERR_CONFIG;
15628+ }
15629+ filter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
15630+ break;
15631+
15632+ case UDP_V4_FLOW:
15633+ filter->dst_port = fsp->h_u.udp_ip4_spec.pdst;
15634+ break;
15635+
15636+ default:
15637+ dev_info(&pf->pdev->dev, "unknown flow type 0x%x\n",
15638+ (fsp->flow_type & ~FLOW_EXT));
15639+ kfree(filter);
15640+ return I40E_ERR_CONFIG;
15641+ }
15642+
15643+ if (userdef->tenant_id_valid)
15644+ filter->tenant_id = userdef->tenant_id;
15645+ else
15646+ filter->tenant_id = 0;
15647+ if (userdef->tunnel_type_valid)
15648+ filter->tunnel_type = userdef->tunnel_type;
15649+ else
15650+ filter->tunnel_type = I40E_CLOUD_TNL_TYPE_NONE;
15651+
15652+ filter->id = fsp->location;
15653+ filter->seid = dest_seid;
15654+ filter->queue_id = q_index;
15655+ filter->flags = flags;
15656+ filter->inner_vlan = fsp->h_ext.vlan_tci;
15657+
15658+ ret = i40e_add_del_cloud_filter_ex(pf, filter, true);
15659+ if (ret) {
15660+ kfree(filter);
15661+ return ret;
15662+ }
15663+
15664+ /* add filter to the ordered list */
15665+ INIT_HLIST_NODE(&filter->cloud_node);
15666+ if (parent)
15667+ hlist_add_behind(&filter->cloud_node, &parent->cloud_node);
15668+ else
15669+ hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
15670+ pf->num_cloud_filters++;
15671+
15672+ return 0;
15673+}
15674+
15675+/**
15676+ * i40e_del_cloud_filter_ethtool - del vxlan filter
15677+ * @pf: pointer to the physical function struct
15678+ * @cmd: RX flow classification rules
15679+ *
15680+ * Delete vxlan filter for a specific flow spec.
15681+ * Returns 0 if the filter was successfully deleted.
15682+ **/
15683+static int i40e_del_cloud_filter_ethtool(struct i40e_pf *pf,
15684+ struct ethtool_rxnfc *cmd)
15685+{
15686+ struct i40e_cloud_filter *rule, *filter = NULL;
15687+ struct ethtool_rx_flow_spec *fsp;
15688+ struct hlist_node *node2;
15689+
15690+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
15691+ hlist_for_each_entry_safe(rule, node2,
15692+ &pf->cloud_filter_list, cloud_node) {
15693+ /* filter found */
15694+ if (rule->id == fsp->location)
15695+ filter = rule;
15696+
15697+ /* bail out if we've passed the likely location in the list */
15698+ if (rule->id >= fsp->location)
15699+ break;
15700+ }
15701+ if (!filter)
15702+ return -ENOENT;
15703+
15704+ /* remove filter from the list even if failed to remove from device */
15705+ (void)i40e_add_del_cloud_filter_ex(pf, filter, false);
15706+ hlist_del(&filter->cloud_node);
15707+ kfree(filter);
15708+ pf->num_cloud_filters--;
15709+
15710+ return 0;
15711+}
15712 /**
15713 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
15714 * @vsi: Pointer to the targeted VSI
15715 * @input: The filter to update or NULL to indicate deletion
15716 * @sw_idx: Software index to the filter
15717- * @cmd: The command to get or set Rx flow classification rules
15718 *
15719 * This function updates (or deletes) a Flow Director entry from
15720 * the hlist of the corresponding PF
15721@@ -2843,38 +4556,38 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
15722 **/
15723 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
15724 struct i40e_fdir_filter *input,
15725- u16 sw_idx,
15726- struct ethtool_rxnfc *cmd)
15727+ u16 sw_idx)
15728 {
15729 struct i40e_fdir_filter *rule, *parent;
15730 struct i40e_pf *pf = vsi->back;
15731 struct hlist_node *node2;
15732- int err = -EINVAL;
15733+ int err = -ENOENT;
15734
15735 parent = NULL;
15736 rule = NULL;
15737
15738 hlist_for_each_entry_safe(rule, node2,
15739 &pf->fdir_filter_list, fdir_node) {
15740- /* hash found, or no matching entry */
15741+ /* rule id found, or passed its spot in the list */
15742 if (rule->fd_id >= sw_idx)
15743 break;
15744 parent = rule;
15745 }
15746
15747- /* if there is an old rule occupying our place remove it */
15748+ /* is there is an old rule occupying our target filter slot? */
15749 if (rule && (rule->fd_id == sw_idx)) {
15750 /* Remove this rule, since we're either deleting it, or
15751 * replacing it.
15752 */
15753 err = i40e_add_del_fdir(vsi, rule, false);
15754 hlist_del(&rule->fdir_node);
15755- kfree(rule);
15756 pf->fdir_pf_active_filters--;
15757+
15758+ kfree(rule);
15759 }
15760
15761 /* If we weren't given an input, this is a delete, so just return the
15762- * error code indicating if there was an entry at the requested slot
15763+ * error code indicating if there was an entry at the requested slot.
15764 */
15765 if (!input)
15766 return err;
15767@@ -2882,12 +4595,11 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
15768 /* Otherwise, install the new rule as requested */
15769 INIT_HLIST_NODE(&input->fdir_node);
15770
15771- /* add filter to the list */
15772+ /* add filter to the ordered list */
15773 if (parent)
15774 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
15775 else
15776- hlist_add_head(&input->fdir_node,
15777- &pf->fdir_filter_list);
15778+ hlist_add_head(&input->fdir_node, &pf->fdir_filter_list);
15779
15780 /* update counts */
15781 pf->fdir_pf_active_filters++;
15782@@ -2983,7 +4695,7 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
15783 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
15784 return -EBUSY;
15785
15786- ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
15787+ ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location);
15788
15789 i40e_prune_flex_pit_list(pf);
15790
15791@@ -3119,7 +4831,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list,
15792 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
15793 * @pf: Pointer to the PF structure
15794 * @flex_pit_list: list of flexible src offsets in use
15795- * #flex_pit_start: index to first entry for this section of the table
15796+ * @flex_pit_start: index to first entry for this section of the table
15797 *
15798 * In order to handle flexible data, the hardware uses a table of values
15799 * called the FLX_PIT table. This table is used to indicate which sections of
15800@@ -3233,7 +4945,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
15801
15802 /**
15803 * i40e_flow_str - Converts a flow_type into a human readable string
15804- * @flow_type: the flow type from a flow specification
15805+ * @fsp: the flow specification
15806 *
15807 * Currently only flow types we support are included here, and the string
15808 * value attempts to match what ethtool would use to configure this flow type.
15809@@ -3648,22 +5360,113 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
15810
15811 i40e_write_fd_input_set(pf, index, new_mask);
15812
15813- /* Add the new offset and update table, if necessary */
15814- if (new_flex_offset) {
15815- err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
15816- pit_index);
15817- if (err)
15818- return err;
15819+ /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
15820+ * frames. If we're programming the input set for IPv4/Other, we also
15821+ * need to program the IPv4/Fragmented input set. Since we don't have
15822+ * separate support, we'll always assume and enforce that the two flow
15823+ * types must have matching input sets.
15824+ */
15825+ if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
15826+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
15827+ new_mask);
15828+
15829+ /* Add the new offset and update table, if necessary */
15830+ if (new_flex_offset) {
15831+ err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
15832+ pit_index);
15833+ if (err)
15834+ return err;
15835+
15836+ if (flex_l3) {
15837+ err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
15838+ src_offset,
15839+ pit_index);
15840+ if (err)
15841+ return err;
15842+ }
15843+
15844+ i40e_reprogram_flex_pit(pf);
15845+ }
15846+
15847+ return 0;
15848+}
15849+
15850+/**
15851+ * i40e_match_fdir_filter - Return true of two filters match
15852+ * @a: pointer to filter struct
15853+ * @b: pointer to filter struct
15854+ *
15855+ * Returns true if the two filters match exactly the same criteria. I.e. they
15856+ * match the same flow type and have the same parameters. We don't need to
15857+ * check any input-set since all filters of the same flow type must use the
15858+ * same input set.
15859+ **/
15860+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
15861+ struct i40e_fdir_filter *b)
15862+{
15863+ /* The filters do not match if any of these criteria differ. */
15864+ if (a->dst_ip != b->dst_ip ||
15865+ a->src_ip != b->src_ip ||
15866+ a->dst_port != b->dst_port ||
15867+ a->src_port != b->src_port ||
15868+ a->flow_type != b->flow_type ||
15869+ a->ip4_proto != b->ip4_proto)
15870+ return false;
15871+
15872+ return true;
15873+}
15874+
15875+/**
15876+ * i40e_disallow_matching_filters - Check that new filters differ
15877+ * @vsi: pointer to the targeted VSI
15878+ * @input: new filter to check
15879+ *
15880+ * Due to hardware limitations, it is not possible for two filters that match
15881+ * similar criteria to be programmed at the same time. This is true for a few
15882+ * reasons:
15883+ *
15884+ * (a) all filters matching a particular flow type must use the same input
15885+ * set, that is they must match the same criteria.
15886+ * (b) different flow types will never match the same packet, as the flow type
15887+ * is decided by hardware before checking which rules apply.
15888+ * (c) hardware has no way to distinguish which order filters apply in.
15889+ *
15890+ * Due to this, we can't really support using the location data to order
15891+ * filters in the hardware parsing. It is technically possible for the user to
15892+ * request two filters matching the same criteria but which select different
15893+ * queues. In this case, rather than keep both filters in the list, we reject
15894+ * the 2nd filter when the user requests adding it.
15895+ *
15896+ * This avoids needing to track location for programming the filter to
15897+ * hardware, and ensures that we avoid some strange scenarios involving
15898+ * deleting filters which match the same criteria.
15899+ **/
15900+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
15901+ struct i40e_fdir_filter *input)
15902+{
15903+ struct i40e_pf *pf = vsi->back;
15904+ struct i40e_fdir_filter *rule;
15905+ struct hlist_node *node2;
15906+
15907+ /* Loop through every filter, and check that it doesn't match */
15908+ hlist_for_each_entry_safe(rule, node2,
15909+ &pf->fdir_filter_list, fdir_node) {
15910+ /* Don't check the filters match if they share the same fd_id,
15911+ * since the new filter is actually just updating the target
15912+ * of the old filter.
15913+ */
15914+ if (rule->fd_id == input->fd_id)
15915+ continue;
15916
15917- if (flex_l3) {
15918- err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
15919- src_offset,
15920- pit_index);
15921- if (err)
15922- return err;
15923+ /* If any filters match, then print a warning message to the
15924+ * kernel message buffer and bail out.
15925+ */
15926+ if (i40e_match_fdir_filter(rule, input)) {
15927+ dev_warn(&pf->pdev->dev,
15928+ "Existing user defined filter %d already matches this flow.\n",
15929+ rule->fd_id);
15930+ return -EINVAL;
15931 }
15932-
15933- i40e_reprogram_flex_pit(pf);
15934 }
15935
15936 return 0;
15937@@ -3695,7 +5498,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
15938 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
15939 return -EOPNOTSUPP;
15940
15941- if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
15942+ if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
15943 return -ENOSPC;
15944
15945 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
15946@@ -3711,6 +5514,9 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
15947 if (i40e_parse_rx_flow_user_data(fsp, &userdef))
15948 return -EINVAL;
15949
15950+ if (userdef.cloud_filter)
15951+ return i40e_add_cloud_filter_ethtool(vsi, cmd, &userdef);
15952+
15953 /* Extended MAC field is not supported */
15954 if (fsp->flow_type & FLOW_MAC_EXT)
15955 return -EINVAL;
15956@@ -3752,7 +5558,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
15957 }
15958
15959 input = kzalloc(sizeof(*input), GFP_KERNEL);
15960-
15961 if (!input)
15962 return -ENOMEM;
15963
15964@@ -3762,8 +5567,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
15965 input->dest_ctl = dest_ctl;
15966 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
15967 input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
15968- input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
15969- input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
15970 input->flow_type = fsp->flow_type & ~FLOW_EXT;
15971 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
15972
15973@@ -3781,19 +5584,29 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
15974 input->flex_offset = userdef.flex_offset;
15975 }
15976
15977- ret = i40e_add_del_fdir(vsi, input, true);
15978+ /* Avoid programming two filters with identical match criteria. */
15979+ ret = i40e_disallow_matching_filters(vsi, input);
15980 if (ret)
15981- goto free_input;
15982+ goto free_filter_memory;
15983
15984- /* Add the input filter to the fdir_input_list, possibly replacing
15985+ /* Add the input filter to the fdir_filter_list, possibly replacing
15986 * a previous filter. Do not free the input structure after adding it
15987- * to the list as this would cause a use-after-free bug.
15988+ * to the list as this would cause a use after free bug.
15989 */
15990- i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
15991+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location);
15992+
15993+ (void)i40e_del_cloud_filter_ethtool(pf, cmd);
15994+ ret = i40e_add_del_fdir(vsi, input, true);
15995+
15996+ if (ret)
15997+ goto remove_sw_rule;
15998
15999 return 0;
16000
16001-free_input:
16002+remove_sw_rule:
16003+ hlist_del(&input->fdir_node);
16004+ pf->fdir_pf_active_filters--;
16005+free_filter_memory:
16006 kfree(input);
16007 return ret;
16008 }
16009@@ -3816,12 +5629,17 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
16010 case ETHTOOL_SRXFH:
16011 ret = i40e_set_rss_hash_opt(pf, cmd);
16012 break;
16013+
16014 case ETHTOOL_SRXCLSRLINS:
16015 ret = i40e_add_fdir_ethtool(vsi, cmd);
16016 break;
16017+
16018 case ETHTOOL_SRXCLSRLDEL:
16019 ret = i40e_del_fdir_entry(vsi, cmd);
16020+ if (ret == -ENOENT)
16021+ ret = i40e_del_cloud_filter_ethtool(pf, cmd);
16022 break;
16023+
16024 default:
16025 break;
16026 }
16027@@ -3829,6 +5647,8 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
16028 return ret;
16029 }
16030
16031+#endif /* ETHTOOL_GRXRINGS */
16032+#ifdef ETHTOOL_SCHANNELS
16033 /**
16034 * i40e_max_channels - get Max number of combined channels supported
16035 * @vsi: vsi pointer
16036@@ -3841,7 +5661,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
16037
16038 /**
16039 * i40e_get_channels - Get the current channels enabled and max supported etc.
16040- * @netdev: network interface device structure
16041+ * @dev: network interface device structure
16042 * @ch: ethtool channels structure
16043 *
16044 * We don't support separate tx and rx queues as channels. The other count
16045@@ -3850,7 +5670,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
16046 * q_vectors since we support a lot more queue pairs than q_vectors.
16047 **/
16048 static void i40e_get_channels(struct net_device *dev,
16049- struct ethtool_channels *ch)
16050+ struct ethtool_channels *ch)
16051 {
16052 struct i40e_netdev_priv *np = netdev_priv(dev);
16053 struct i40e_vsi *vsi = np->vsi;
16054@@ -3869,14 +5689,14 @@ static void i40e_get_channels(struct net_device *dev,
16055
16056 /**
16057 * i40e_set_channels - Set the new channels count.
16058- * @netdev: network interface device structure
16059+ * @dev: network interface device structure
16060 * @ch: ethtool channels structure
16061 *
16062 * The new channels count may not be the same as requested by the user
16063 * since it gets rounded down to a power of 2 value.
16064 **/
16065 static int i40e_set_channels(struct net_device *dev,
16066- struct ethtool_channels *ch)
16067+ struct ethtool_channels *ch)
16068 {
16069 const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
16070 struct i40e_netdev_priv *np = netdev_priv(dev);
16071@@ -3892,6 +5712,12 @@ static int i40e_set_channels(struct net_device *dev,
16072 if (vsi->type != I40E_VSI_MAIN)
16073 return -EINVAL;
16074
16075+ /* We do not support setting channels via ethtool when TCs are
16076+ * configured through mqprio
16077+ */
16078+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
16079+ return -EINVAL;
16080+
16081 /* verify they are not requesting separate vectors */
16082 if (!count || ch->rx_count || ch->tx_count)
16083 return -EINVAL;
16084@@ -3937,6 +5763,9 @@ static int i40e_set_channels(struct net_device *dev,
16085 return -EINVAL;
16086 }
16087
16088+#endif /* ETHTOOL_SCHANNELS */
16089+
16090+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
16091 /**
16092 * i40e_get_rxfh_key_size - get the RSS hash key size
16093 * @netdev: network interface device structure
16094@@ -3959,8 +5788,22 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
16095 return I40E_HLUT_ARRAY_SIZE;
16096 }
16097
16098+/**
16099+ * i40e_get_rxfh - get the rx flow hash indirection table
16100+ * @netdev: network interface device structure
16101+ * @indir: indirection table
16102+ * @key: hash key
16103+ * @hfunc: hash function
16104+ *
16105+ * Reads the indirection table directly from the hardware. Returns 0 on
16106+ * success.
16107+ **/
16108+#ifdef HAVE_RXFH_HASHFUNC
16109 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
16110 u8 *hfunc)
16111+#else
16112+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
16113+#endif
16114 {
16115 struct i40e_netdev_priv *np = netdev_priv(netdev);
16116 struct i40e_vsi *vsi = np->vsi;
16117@@ -3968,9 +5811,11 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
16118 int ret;
16119 u16 i;
16120
16121+#ifdef HAVE_RXFH_HASHFUNC
16122 if (hfunc)
16123 *hfunc = ETH_RSS_HASH_TOP;
16124
16125+#endif
16126 if (!indir)
16127 return 0;
16128
16129@@ -3995,12 +5840,22 @@ out:
16130 * @netdev: network interface device structure
16131 * @indir: indirection table
16132 * @key: hash key
16133+ * @hfunc: hash function to use
16134 *
16135 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
16136 * returns 0 after programming the table.
16137 **/
16138+#ifdef HAVE_RXFH_HASHFUNC
16139 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
16140 const u8 *key, const u8 hfunc)
16141+#else
16142+#ifdef HAVE_RXFH_NONCONST
16143+static int i40e_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
16144+#else
16145+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
16146+ const u8 *key)
16147+#endif /* HAVE_RXFH_NONCONST */
16148+#endif /* HAVE_RXFH_HASHFUNC */
16149 {
16150 struct i40e_netdev_priv *np = netdev_priv(netdev);
16151 struct i40e_vsi *vsi = np->vsi;
16152@@ -4008,8 +5863,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
16153 u8 *seed = NULL;
16154 u16 i;
16155
16156+#ifdef HAVE_RXFH_HASHFUNC
16157 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
16158 return -EOPNOTSUPP;
16159+#endif
16160+
16161+ /* Verify user input. */
16162+ if (indir) {
16163+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
16164+ if (indir[i] >= vsi->rss_size)
16165+ return -EINVAL;
16166+ }
16167+ }
16168
16169 if (key) {
16170 if (!vsi->rss_hkey_user) {
16171@@ -4038,7 +5903,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
16172 return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
16173 I40E_HLUT_ARRAY_SIZE);
16174 }
16175+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
16176
16177+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
16178 /**
16179 * i40e_get_priv_flags - report device private flags
16180 * @dev: network interface device structure
16181@@ -4088,9 +5955,12 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
16182 static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
16183 {
16184 struct i40e_netdev_priv *np = netdev_priv(dev);
16185+ u64 orig_flags, new_flags, changed_flags;
16186+ enum i40e_admin_queue_err adq_err;
16187 struct i40e_vsi *vsi = np->vsi;
16188 struct i40e_pf *pf = vsi->back;
16189- u64 orig_flags, new_flags, changed_flags;
16190+ bool is_reset_needed;
16191+ i40e_status status;
16192 u32 i, j;
16193
16194 orig_flags = READ_ONCE(pf->flags);
16195@@ -4132,6 +6002,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
16196 }
16197
16198 flags_complete:
16199+ changed_flags = orig_flags ^ new_flags;
16200+
16201+ is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
16202+ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
16203+ I40E_FLAG_DISABLE_FW_LLDP));
16204+
16205 /* Before we finalize any flag changes, we need to perform some
16206 * checks to ensure that the changes are supported and safe.
16207 */
16208@@ -4141,19 +6017,30 @@ flags_complete:
16209 !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
16210 return -EOPNOTSUPP;
16211
16212- /* Compare and exchange the new flags into place. If we failed, that
16213- * is if cmpxchg64 returns anything but the old value, this means that
16214- * something else has modified the flags variable since we copied it
16215- * originally. We'll just punt with an error and log something in the
16216- * message buffer.
16217+ /* If the driver detected FW LLDP was disabled on init, this flag could
16218+ * be set, however we do not support _changing_ the flag:
16219+ * - on XL710 if NPAR is enabled or FW API version < 1.7
16220+ * - on X722 with FW API version < 1.6
16221+ * There are situations where older FW versions/NPAR enabled PFs could
16222+ * disable LLDP, however we _must_ not allow the user to enable/disable
16223+ * LLDP with this flag on unsupported FW versions.
16224 */
16225- if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
16226- dev_warn(&pf->pdev->dev,
16227- "Unable to update pf->flags as it was modified by another thread...\n");
16228- return -EAGAIN;
16229+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
16230+ if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
16231+ dev_warn(&pf->pdev->dev,
16232+ "Device does not support changing FW LLDP\n");
16233+ return -EOPNOTSUPP;
16234+ }
16235 }
16236
16237- changed_flags = orig_flags ^ new_flags;
16238+ if (((changed_flags & I40E_FLAG_RS_FEC) ||
16239+ (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
16240+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
16241+ pf->hw.device_id != I40E_DEV_ID_25G_B) {
16242+ dev_warn(&pf->pdev->dev,
16243+ "Device does not support changing FEC configuration\n");
16244+ return -EOPNOTSUPP;
16245+ }
16246
16247 /* Process any additional changes needed as a result of flag changes.
16248 * The changed_flags value reflects the list of bits that were
16249@@ -4162,8 +6049,8 @@ flags_complete:
16250
16251 /* Flush current ATR settings if ATR was disabled */
16252 if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
16253- !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
16254- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
16255+ !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
16256+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
16257 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
16258 }
16259
16260@@ -4171,11 +6058,11 @@ flags_complete:
16261 u16 sw_flags = 0, valid_flags = 0;
16262 int ret;
16263
16264- if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
16265+ if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
16266 sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
16267 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
16268 ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
16269- NULL);
16270+ 0, NULL);
16271 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
16272 dev_info(&pf->pdev->dev,
16273 "couldn't set switch config bits, err %s aq_err %s\n",
16274@@ -4186,17 +6073,399 @@ flags_complete:
16275 }
16276 }
16277
16278+ if ((changed_flags & I40E_FLAG_RS_FEC) ||
16279+ (changed_flags & I40E_FLAG_BASE_R_FEC)) {
16280+ u8 fec_cfg = 0;
16281+
16282+ if (new_flags & I40E_FLAG_RS_FEC &&
16283+ new_flags & I40E_FLAG_BASE_R_FEC) {
16284+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
16285+ } else if (new_flags & I40E_FLAG_RS_FEC) {
16286+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
16287+ I40E_AQ_SET_FEC_ABILITY_RS);
16288+ } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
16289+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
16290+ I40E_AQ_SET_FEC_ABILITY_KR);
16291+ }
16292+ if (i40e_set_fec_cfg(dev, fec_cfg))
16293+ dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
16294+ }
16295+
16296+ if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
16297+ (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)) {
16298+ dev_err(&pf->pdev->dev,
16299+ "Setting link-down-on-close not supported on this port\n");
16300+ return -EOPNOTSUPP;
16301+ }
16302+
16303+ if ((changed_flags & new_flags &
16304+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
16305+ (new_flags & I40E_FLAG_MFP_ENABLED))
16306+ dev_warn(&pf->pdev->dev,
16307+ "Turning on link-down-on-close flag may affect other partitions\n");
16308+
16309+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
16310+ if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
16311+ struct i40e_dcbx_config *dcbcfg;
16312+
16313+ i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
16314+ i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
16315+ /* reset local_dcbx_config to default */
16316+ dcbcfg = &pf->hw.local_dcbx_config;
16317+ dcbcfg->etscfg.willing = 1;
16318+ dcbcfg->etscfg.maxtcs = 0;
16319+ dcbcfg->etscfg.tcbwtable[0] = 100;
16320+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
16321+ dcbcfg->etscfg.tcbwtable[i] = 0;
16322+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
16323+ dcbcfg->etscfg.prioritytable[i] = 0;
16324+ dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
16325+ dcbcfg->pfc.willing = 1;
16326+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
16327+ } else {
16328+ status = i40e_aq_start_lldp(&pf->hw, false, NULL);
16329+ if (status != I40E_SUCCESS) {
16330+ adq_err = pf->hw.aq.asq_last_status;
16331+ switch (adq_err) {
16332+ case I40E_AQ_RC_EEXIST:
16333+ dev_warn(&pf->pdev->dev,
16334+ "FW LLDP agent is already running\n");
16335+ is_reset_needed = false;
16336+ break;
16337+ case I40E_AQ_RC_EPERM:
16338+ dev_warn(&pf->pdev->dev,
16339+ "Device configuration forbids SW from starting the LLDP agent.\n");
16340+ return (-EINVAL);
16341+ default:
16342+ dev_warn(&pf->pdev->dev,
16343+ "Starting FW LLDP agent failed: error: %s, %s\n",
16344+ i40e_stat_str(&pf->hw,
16345+ status),
16346+ i40e_aq_str(&pf->hw,
16347+ adq_err));
16348+ return (-EINVAL);
16349+ }
16350+ }
16351+ }
16352+ }
16353+
16354+ /* Now that we've checked to ensure that the new flags are valid, load
16355+ * them into place. Since we only modify flags either (a) during
16356+ * initialization or (b) while holding the RTNL lock, we don't need
16357+ * anything fancy here.
16358+ */
16359+ pf->flags = new_flags;
16360+
16361 /* Issue reset to cause things to take effect, as additional bits
16362 * are added we will need to create a mask of bits requiring reset
16363 */
16364- if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
16365- ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
16366+ if (is_reset_needed)
16367 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
16368
16369 return 0;
16370 }
16371+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
16372+
16373+#ifdef ETHTOOL_GMODULEINFO
16374+
16375+/**
16376+ * i40e_get_module_info - get (Q)SFP+ module type info
16377+ * @netdev: network interface device structure
16378+ * @modinfo: module EEPROM size and layout information structure
16379+ **/
16380+static int i40e_get_module_info(struct net_device *netdev,
16381+ struct ethtool_modinfo *modinfo)
16382+{
16383+ i40e_status status;
16384+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16385+ struct i40e_vsi *vsi = np->vsi;
16386+ struct i40e_pf *pf = vsi->back;
16387+ struct i40e_hw *hw = &pf->hw;
16388+ u32 sff8472_comp = 0;
16389+ u32 sff8472_swap = 0;
16390+ u32 sff8636_rev = 0;
16391+ u8 type;
16392+
16393+ /* Check if firmware supports reading module EEPROM. */
16394+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
16395+ netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
16396+ return -EINVAL;
16397+ }
16398+
16399+ status = i40e_update_link_info(hw);
16400+ if (status)
16401+ return -EIO;
16402+
16403+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
16404+ netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n");
16405+ return -EINVAL;
16406+ }
16407+
16408+ type = hw->phy.link_info.module_type[0];
16409+
16410+ switch (type) {
16411+ case I40E_MODULE_TYPE_SFP:
16412+ status = i40e_aq_get_phy_register(hw,
16413+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
16414+ I40E_I2C_EEPROM_DEV_ADDR, true,
16415+ I40E_MODULE_SFF_8472_COMP,
16416+ &sff8472_comp, NULL);
16417+ if (status)
16418+ return -EIO;
16419+
16420+ status = i40e_aq_get_phy_register(hw,
16421+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
16422+ I40E_I2C_EEPROM_DEV_ADDR, true,
16423+ I40E_MODULE_SFF_8472_SWAP,
16424+ &sff8472_swap, NULL);
16425+ if (status)
16426+ return -EIO;
16427+
16428+ /* Check if the module requires address swap to access
16429+ * the other EEPROM memory page.
16430+ */
16431+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
16432+ netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n");
16433+ modinfo->type = ETH_MODULE_SFF_8079;
16434+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
16435+ } else if (sff8472_comp &&
16436+ (sff8472_swap & I40E_MODULE_SFF_DIAG_CAPAB)) {
16437+ modinfo->type = ETH_MODULE_SFF_8472;
16438+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
16439+ } else {
16440+ /* Module is not SFF-8472 compliant */
16441+ modinfo->type = ETH_MODULE_SFF_8079;
16442+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
16443+ }
16444+ break;
16445+ case I40E_MODULE_TYPE_QSFP_PLUS:
16446+ /* Read from memory page 0. */
16447+ status = i40e_aq_get_phy_register(hw,
16448+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
16449+ 0, true,
16450+ I40E_MODULE_REVISION_ADDR,
16451+ &sff8636_rev, NULL);
16452+ if (status)
16453+ return -EIO;
16454+ /* Determine revision compliance byte */
16455+ if (sff8636_rev > 0x02) {
16456+ /* Module is SFF-8636 compliant */
16457+ modinfo->type = ETH_MODULE_SFF_8636;
16458+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
16459+ } else {
16460+ modinfo->type = ETH_MODULE_SFF_8436;
16461+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
16462+ }
16463+ break;
16464+ case I40E_MODULE_TYPE_QSFP28:
16465+ modinfo->type = ETH_MODULE_SFF_8636;
16466+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
16467+ break;
16468+ default:
16469+ netdev_err(vsi->netdev, "Module type unrecognized\n");
16470+ return -EINVAL;
16471+ }
16472+ return 0;
16473+}
16474+
16475+/**
16476+ * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
16477+ * @netdev: network interface device structure
16478+ * @ee: EEPROM dump request structure
16479+ * @data: buffer to be filled with EEPROM contents
16480+ **/
16481+static int i40e_get_module_eeprom(struct net_device *netdev,
16482+ struct ethtool_eeprom *ee,
16483+ u8 *data)
16484+{
16485+ i40e_status status;
16486+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16487+ struct i40e_vsi *vsi = np->vsi;
16488+ struct i40e_pf *pf = vsi->back;
16489+ struct i40e_hw *hw = &pf->hw;
16490+ bool is_sfp = false;
16491+ u32 value = 0;
16492+ int i;
16493+
16494+ if (!ee || !ee->len || !data)
16495+ return -EINVAL;
16496+
16497+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
16498+ is_sfp = true;
16499+
16500+ for (i = 0; i < ee->len; i++) {
16501+ u32 offset = i + ee->offset;
16502+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
16503+
16504+ /* Check if we need to access the other memory page */
16505+ if (is_sfp) {
16506+ if (offset >= ETH_MODULE_SFF_8079_LEN) {
16507+ offset -= ETH_MODULE_SFF_8079_LEN;
16508+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
16509+ }
16510+ } else {
16511+ while (offset >= ETH_MODULE_SFF_8436_LEN) {
16512+ /* Compute memory page number and offset. */
16513+ offset -= ETH_MODULE_SFF_8436_LEN / 2;
16514+ addr++;
16515+ }
16516+ }
16517+
16518+ status = i40e_aq_get_phy_register(hw,
16519+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
16520+ addr, true, offset, &value, NULL);
16521+ if (status)
16522+ return -EIO;
16523+ data[i] = value;
16524+ }
16525+ return 0;
16526+}
16527+#endif /* ETHTOOL_GMODULEINFO */
16528+
16529+#ifdef ETHTOOL_GEEE
16530+static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
16531+{
16532+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16533+ struct i40e_aq_get_phy_abilities_resp phy_cfg;
16534+ i40e_status status = 0;
16535+ struct i40e_vsi *vsi = np->vsi;
16536+ struct i40e_pf *pf = vsi->back;
16537+ struct i40e_hw *hw = &pf->hw;
16538+
16539+ /* Get initial PHY capabilities */
16540+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
16541+ if (status)
16542+ return -EAGAIN;
16543+
16544+ /* Check whether NIC configuration is compatible with Energy Efficient
16545+ * Ethernet (EEE) mode.
16546+ */
16547+ if (phy_cfg.eee_capability == 0)
16548+ return -EOPNOTSUPP;
16549+
16550+ edata->supported = SUPPORTED_Autoneg;
16551+ edata->lp_advertised = edata->supported;
16552+
16553+ /* Get current configuration */
16554+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
16555+ if (status)
16556+ return -EAGAIN;
16557+
16558+ edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
16559+ edata->eee_enabled = !!edata->advertised;
16560+ edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
16561+
16562+ edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
16563+
16564+ return 0;
16565+}
16566+#endif /* ETHTOOL_GEEE */
16567+
16568+#ifdef ETHTOOL_SEEE
16569+static int i40e_is_eee_param_supported(struct net_device *netdev,
16570+ struct ethtool_eee *edata)
16571+{
16572+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16573+ struct i40e_vsi *vsi = np->vsi;
16574+ struct i40e_pf *pf = vsi->back;
16575+ struct i40e_ethtool_not_used {
16576+ u32 value;
16577+ const char *name;
16578+ } param[] = {
16579+ {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
16580+ {edata->tx_lpi_timer, "tx-timer"},
16581+ {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
16582+ };
16583+ int i;
16584+
16585+ for (i = 0; i < ARRAY_SIZE(param); i++) {
16586+ if (param[i].value) {
16587+ netdev_info(netdev,
16588+ "EEE setting %s not supported\n",
16589+ param[i].name);
16590+ return -EOPNOTSUPP;
16591+ }
16592+ }
16593+
16594+ return 0;
16595+}
16596+
16597+static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
16598+{
16599+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16600+ struct i40e_aq_get_phy_abilities_resp abilities;
16601+ i40e_status status = I40E_SUCCESS;
16602+ struct i40e_aq_set_phy_config config;
16603+ struct i40e_vsi *vsi = np->vsi;
16604+ struct i40e_pf *pf = vsi->back;
16605+ struct i40e_hw *hw = &pf->hw;
16606+ __le16 eee_capability;
16607+
16608+ /* Deny parameters we don't support */
16609+ if (i40e_is_eee_param_supported(netdev, edata))
16610+ return -EOPNOTSUPP;
16611+
16612+ /* Get initial PHY capabilities */
16613+ status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
16614+ NULL);
16615+ if (status)
16616+ return -EAGAIN;
16617+
16618+ /* Check whether NIC configuration is compatible with Energy Efficient
16619+ * Ethernet (EEE) mode.
16620+ */
16621+ if (abilities.eee_capability == 0)
16622+ return -EOPNOTSUPP;
16623+
16624+ /* Cache initial EEE capability */
16625+ eee_capability = abilities.eee_capability;
16626+
16627+ /* Get current PHY configuration */
16628+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
16629+ NULL);
16630+ if (status)
16631+ return -EAGAIN;
16632+
16633+ /* Cache current PHY configuration */
16634+ config.phy_type = abilities.phy_type;
16635+ config.link_speed = abilities.link_speed;
16636+ config.abilities = abilities.abilities |
16637+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
16638+ config.eeer = abilities.eeer_val;
16639+ config.low_power_ctrl = abilities.d3_lpan;
16640+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
16641+ I40E_AQ_PHY_FEC_CONFIG_MASK;
16642+
16643+ /* Set desired EEE state */
16644+ if (edata->eee_enabled) {
16645+ config.eee_capability = eee_capability;
16646+ config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK;
16647+ } else {
16648+ config.eee_capability = 0;
16649+ config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK;
16650+ }
16651+
16652+ /* Apply modified PHY configuration */
16653+ status = i40e_aq_set_phy_config(hw, &config, NULL);
16654+ if (status)
16655+ return -EAGAIN;
16656+
16657+ return 0;
16658+}
16659+#endif /* ETHTOOL_SEEE */
16660+
16661+static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
16662+ .get_drvinfo = i40e_get_drvinfo,
16663+ .set_eeprom = i40e_set_eeprom,
16664+ .get_eeprom_len = i40e_get_eeprom_len,
16665+ .get_eeprom = i40e_get_eeprom,
16666+};
16667
16668 static const struct ethtool_ops i40e_ethtool_ops = {
16669+#ifndef ETHTOOL_GLINKSETTINGS
16670+ .get_settings = i40e_get_settings,
16671+ .set_settings = i40e_set_settings,
16672+#endif
16673 .get_drvinfo = i40e_get_drvinfo,
16674 .get_regs_len = i40e_get_regs_len,
16675 .get_regs = i40e_get_regs,
16676@@ -4213,31 +6482,141 @@ static const struct ethtool_ops i40e_ethtool_ops = {
16677 .set_pauseparam = i40e_set_pauseparam,
16678 .get_msglevel = i40e_get_msglevel,
16679 .set_msglevel = i40e_set_msglevel,
16680+#ifndef HAVE_NDO_SET_FEATURES
16681+ .get_rx_csum = i40e_get_rx_csum,
16682+ .set_rx_csum = i40e_set_rx_csum,
16683+ .get_tx_csum = i40e_get_tx_csum,
16684+ .set_tx_csum = i40e_set_tx_csum,
16685+ .get_sg = ethtool_op_get_sg,
16686+ .set_sg = ethtool_op_set_sg,
16687+ .get_tso = ethtool_op_get_tso,
16688+ .set_tso = i40e_set_tso,
16689+#ifdef ETHTOOL_GFLAGS
16690+ .get_flags = ethtool_op_get_flags,
16691+ .set_flags = i40e_set_flags,
16692+#endif
16693+#endif /* HAVE_NDO_SET_FEATURES */
16694+#ifdef ETHTOOL_GRXRINGS
16695 .get_rxnfc = i40e_get_rxnfc,
16696 .set_rxnfc = i40e_set_rxnfc,
16697+#ifdef ETHTOOL_SRXNTUPLE
16698+ .set_rx_ntuple = i40e_set_rx_ntuple,
16699+#endif
16700+#endif
16701+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
16702+ .self_test_count = i40e_diag_test_count,
16703+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
16704 .self_test = i40e_diag_test,
16705 .get_strings = i40e_get_strings,
16706+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
16707+#ifdef ETHTOOL_GEEE
16708+ .get_eee = i40e_get_eee,
16709+#endif /* ETHTOOL_GEEE */
16710+#ifdef ETHTOOL_SEEE
16711+ .set_eee = i40e_set_eee,
16712+#endif /* ETHTOOL_SEEE */
16713+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
16714 .set_phys_id = i40e_set_phys_id,
16715+#else
16716+ .phys_id = i40e_phys_id,
16717+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
16718+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
16719+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
16720+ .get_stats_count = i40e_get_stats_count,
16721+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
16722 .get_sset_count = i40e_get_sset_count,
16723+ .get_priv_flags = i40e_get_priv_flags,
16724+ .set_priv_flags = i40e_set_priv_flags,
16725+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
16726 .get_ethtool_stats = i40e_get_ethtool_stats,
16727+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
16728+ .get_perm_addr = ethtool_op_get_perm_addr,
16729+#endif
16730 .get_coalesce = i40e_get_coalesce,
16731 .set_coalesce = i40e_set_coalesce,
16732+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
16733+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
16734 .get_rxfh_key_size = i40e_get_rxfh_key_size,
16735 .get_rxfh_indir_size = i40e_get_rxfh_indir_size,
16736 .get_rxfh = i40e_get_rxfh,
16737 .set_rxfh = i40e_set_rxfh,
16738+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
16739+#ifdef ETHTOOL_SCHANNELS
16740 .get_channels = i40e_get_channels,
16741 .set_channels = i40e_set_channels,
16742+#endif
16743+#ifdef ETHTOOL_GMODULEINFO
16744+ .get_module_info = i40e_get_module_info,
16745+ .get_module_eeprom = i40e_get_module_eeprom,
16746+#endif
16747+#ifdef HAVE_ETHTOOL_GET_TS_INFO
16748 .get_ts_info = i40e_get_ts_info,
16749- .get_priv_flags = i40e_get_priv_flags,
16750- .set_priv_flags = i40e_set_priv_flags,
16751+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
16752+#ifdef ETHTOOL_PERQUEUE
16753 .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
16754 .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
16755- .get_link_ksettings = i40e_get_link_ksettings,
16756- .set_link_ksettings = i40e_set_link_ksettings,
16757+#endif /* ETHTOOL_PERQUEUE */
16758+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
16759+#ifdef ETHTOOL_GLINKSETTINGS
16760+ .get_link_ksettings = i40e_get_link_ksettings,
16761+ .set_link_ksettings = i40e_set_link_ksettings,
16762+#endif /* ETHTOOL_GLINKSETTINGS */
16763+#ifdef ETHTOOL_GFECPARAM
16764+ .get_fecparam = i40e_get_fec_param,
16765+ .set_fecparam = i40e_set_fec_param,
16766+#endif /* ETHTOOL_GFECPARAM */
16767+#ifdef HAVE_DDP_PROFILE_UPLOAD_SUPPORT
16768+ .flash_device = i40e_ddp_flash,
16769+#endif /* DDP_PROFILE_UPLOAD_SUPPORT */
16770+};
16771+
16772+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
16773+static const struct ethtool_ops_ext i40e_ethtool_ops_ext = {
16774+ .size = sizeof(struct ethtool_ops_ext),
16775+ .get_ts_info = i40e_get_ts_info,
16776+ .set_phys_id = i40e_set_phys_id,
16777+ .get_channels = i40e_get_channels,
16778+ .set_channels = i40e_set_channels,
16779+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
16780+ .get_rxfh_key_size = i40e_get_rxfh_key_size,
16781+ .get_rxfh_indir_size = i40e_get_rxfh_indir_size,
16782+ .get_rxfh = i40e_get_rxfh,
16783+ .set_rxfh = i40e_set_rxfh,
16784+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
16785+#ifdef ETHTOOL_GEEE
16786+ .get_eee = i40e_get_eee,
16787+#endif /* ETHTOOL_GEEE */
16788+#ifdef ETHTOOL_SEEE
16789+ .set_eee = i40e_set_eee,
16790+#endif /* ETHTOOL_SEEE */
16791+#ifdef ETHTOOL_GMODULEINFO
16792+ .get_module_info = i40e_get_module_info,
16793+ .get_module_eeprom = i40e_get_module_eeprom,
16794+#endif
16795 };
16796
16797 void i40e_set_ethtool_ops(struct net_device *netdev)
16798 {
16799- netdev->ethtool_ops = &i40e_ethtool_ops;
16800+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16801+ struct i40e_pf *pf = np->vsi->back;
16802+
16803+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16804+ netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
16805+ } else {
16806+ netdev->ethtool_ops = &i40e_ethtool_ops;
16807+ set_ethtool_ops_ext(netdev, &i40e_ethtool_ops_ext);
16808+ }
16809+}
16810+#else
16811+void i40e_set_ethtool_ops(struct net_device *netdev)
16812+{
16813+ struct i40e_netdev_priv *np = netdev_priv(netdev);
16814+ struct i40e_pf *pf = np->vsi->back;
16815+
16816+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
16817+ netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
16818+ else
16819+ netdev->ethtool_ops = &i40e_ethtool_ops;
16820 }
16821+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
16822+#endif /* SIOCETHTOOL */
16823diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h b/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h
16824new file mode 100644
16825index 000000000..dc26d38ee
16826--- /dev/null
16827+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h
16828@@ -0,0 +1,293 @@
16829+/* SPDX-License-Identifier: GPL-2.0 */
16830+/* Copyright(c) 2013 - 2020 Intel Corporation. */
16831+
16832+/* ethtool statistics helpers */
16833+
16834+/**
16835+ * struct i40e_stats - definition for an ethtool statistic
16836+ * @stat_string: statistic name to display in ethtool -S output
16837+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
16838+ * @stat_offset: offsetof() the stat from a base pointer
16839+ *
16840+ * This structure defines a statistic to be added to the ethtool stats buffer.
16841+ * It defines a statistic as offset from a common base pointer. Stats should
16842+ * be defined in constant arrays using the I40E_STAT macro, with every element
16843+ * of the array using the same _type for calculating the sizeof_stat and
16844+ * stat_offset.
16845+ *
16846+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
16847+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
16848+ * the i40e_add_ethtool_stat() helper function.
16849+ *
16850+ * The @stat_string is interpreted as a format string, allowing formatted
16851+ * values to be inserted while looping over multiple structures for a given
16852+ * statistics array. Thus, every statistic string in an array should have the
16853+ * same type and number of format specifiers, to be formatted by variadic
16854+ * arguments to the i40e_add_stat_string() helper function.
16855+ **/
16856+struct i40e_stats {
16857+ char stat_string[ETH_GSTRING_LEN];
16858+ int sizeof_stat;
16859+ int stat_offset;
16860+};
16861+
16862+/* Helper macro to define an i40e_stat structure with proper size and type.
16863+ * Use this when defining constant statistics arrays. Note that @_type expects
16864+ * only a type name and is used multiple times.
16865+ */
16866+#define I40E_STAT(_type, _name, _stat) { \
16867+ .stat_string = _name, \
16868+ .sizeof_stat = sizeof_field(_type, _stat), \
16869+ .stat_offset = offsetof(_type, _stat) \
16870+}
16871+
16872+/* Helper macro for defining some statistics directly copied from the netdev
16873+ * stats structure.
16874+ */
16875+#ifdef HAVE_NDO_GET_STATS64
16876+#define I40E_NETDEV_STAT(_net_stat) \
16877+ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
16878+#else
16879+#define I40E_NETDEV_STAT(_net_stat) \
16880+ I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
16881+#endif
16882+
16883+/* Helper macro for defining some statistics related to queues */
16884+#define I40E_QUEUE_STAT(_name, _stat) \
16885+ I40E_STAT(struct i40e_ring, _name, _stat)
16886+
16887+/* Stats associated with a Tx or Rx ring */
16888+static const struct i40e_stats i40e_gstrings_queue_stats[] = {
16889+ I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
16890+ I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
16891+};
16892+
16893+#ifdef HAVE_XDP_SUPPORT
16894+/* Stats associated with Rx ring's XDP prog */
16895+static const struct i40e_stats i40e_gstrings_rx_queue_xdp_stats[] = {
16896+ I40E_QUEUE_STAT("%s-%u.xdp.pass", xdp_stats.xdp_pass),
16897+ I40E_QUEUE_STAT("%s-%u.xdp.drop", xdp_stats.xdp_drop),
16898+ I40E_QUEUE_STAT("%s-%u.xdp.tx", xdp_stats.xdp_tx),
16899+ I40E_QUEUE_STAT("%s-%u.xdp.unknown", xdp_stats.xdp_unknown),
16900+ I40E_QUEUE_STAT("%s-%u.xdp.redirect", xdp_stats.xdp_redirect),
16901+ I40E_QUEUE_STAT("%s-%u.xdp.redirect_fail", xdp_stats.xdp_redirect_fail),
16902+};
16903+#endif
16904+
16905+/**
16906+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
16907+ * @data: location to store the stat value
16908+ * @pointer: basis for where to copy from
16909+ * @stat: the stat definition
16910+ *
16911+ * Copies the stat data defined by the pointer and stat structure pair into
16912+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats and
16913+ * i40e_add_queue_stats. If the pointer is null, data will be zero'd.
16914+ */
16915+static void
16916+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
16917+ const struct i40e_stats *stat)
16918+{
16919+ char *p;
16920+
16921+ if (!pointer) {
16922+ /* ensure that the ethtool data buffer is zero'd for any stats
16923+ * which don't have a valid pointer.
16924+ */
16925+ *data = 0;
16926+ return;
16927+ }
16928+
16929+ p = (char *)pointer + stat->stat_offset;
16930+ switch (stat->sizeof_stat) {
16931+ case sizeof(u64):
16932+ *data = *((u64 *)p);
16933+ break;
16934+ case sizeof(u32):
16935+ *data = *((u32 *)p);
16936+ break;
16937+ case sizeof(u16):
16938+ *data = *((u16 *)p);
16939+ break;
16940+ case sizeof(u8):
16941+ *data = *((u8 *)p);
16942+ break;
16943+ default:
16944+ WARN_ONCE(1, "unexpected stat size for %s",
16945+ stat->stat_string);
16946+ *data = 0;
16947+ }
16948+}
16949+
16950+/**
16951+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
16952+ * @data: ethtool stats buffer
16953+ * @pointer: location to copy stats from
16954+ * @stats: array of stats to copy
16955+ * @size: the size of the stats definition
16956+ *
16957+ * Copy the stats defined by the stats array using the pointer as a base into
16958+ * the data buffer supplied by ethtool. Updates the data pointer to point to
16959+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
16960+ * If pointer is null, set the data values to zero and update the pointer to
16961+ * skip these stats.
16962+ **/
16963+static void
16964+__i40e_add_ethtool_stats(u64 **data, void *pointer,
16965+ const struct i40e_stats stats[],
16966+ const unsigned int size)
16967+{
16968+ unsigned int i;
16969+
16970+ for (i = 0; i < size; i++)
16971+ i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
16972+}
16973+
16974+/**
16975+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
16976+ * @data: ethtool stats buffer
16977+ * @pointer: location where stats are stored
16978+ * @stats: static const array of stat definitions
16979+ *
16980+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
16981+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
16982+ * ensuring that we pass the size associated with the given stats array.
16983+ *
16984+ * The parameter @stats is evaluated twice, so parameters with side effects
16985+ * should be avoided.
16986+ **/
16987+#define i40e_add_ethtool_stats(data, pointer, stats) \
16988+ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
16989+
16990+/**
16991+ * i40e_add_queue_stats - copy queue statistics into supplied buffer
16992+ * @data: ethtool stats buffer
16993+ * @ring: the ring to copy
16994+ *
16995+ * Queue statistics must be copied while protected by
16996+ * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
16997+ * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
16998+ * ring pointer is null, zero out the queue stat values and update the data
16999+ * pointer. Otherwise safely copy the stats from the ring into the supplied
17000+ * buffer and update the data pointer when finished.
17001+ *
17002+ * This function expects to be called while under rcu_read_lock().
17003+ **/
17004+static void
17005+i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
17006+{
17007+ const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats);
17008+ const struct i40e_stats *stats = i40e_gstrings_queue_stats;
17009+#ifdef HAVE_NDO_GET_STATS64
17010+ unsigned int start;
17011+#endif
17012+ unsigned int i;
17013+
17014+ /* To avoid invalid statistics values, ensure that we keep retrying
17015+ * the copy until we get a consistent value according to
17016+ * u64_stats_fetch_retry_irq. But first, make sure our ring is
17017+ * non-null before attempting to access its syncp.
17018+ */
17019+#ifdef HAVE_NDO_GET_STATS64
17020+ do {
17021+ start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
17022+#endif
17023+ for (i = 0; i < size; i++) {
17024+ i40e_add_one_ethtool_stat(&(*data)[i], ring,
17025+ &stats[i]);
17026+ }
17027+#ifdef HAVE_NDO_GET_STATS64
17028+ } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
17029+#endif
17030+
17031+ /* Once we successfully copy the stats in, update the data pointer */
17032+ *data += size;
17033+}
17034+
17035+#ifdef HAVE_XDP_SUPPORT
17036+/**
17037+ * i40e_add_rx_queue_xdp_stats - copy XDP statistics into supplied buffer
17038+ * @data: ethtool stats buffer
17039+ * @rx_ring: the rx ring to copy
17040+ *
17041+ * RX queue XDP statistics must be copied while protected by
17042+ * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
17043+ * Assumes that queue stats are defined in i40e_gstrings_rx_queue_xdp_stats. If
17044+ * the ring pointer is null, zero out the queue stat values and update the data
17045+ * pointer. Otherwise safely copy the stats from the ring into the supplied
17046+ * buffer and update the data pointer when finished.
17047+ *
17048+ * This function expects to be called while under rcu_read_lock().
17049+ **/
17050+static void
17051+i40e_add_rx_queue_xdp_stats(u64 **data, struct i40e_ring *rx_ring)
17052+{
17053+ const unsigned int xdp_size =
17054+ ARRAY_SIZE(i40e_gstrings_rx_queue_xdp_stats);
17055+ const struct i40e_stats *xdp_stats = i40e_gstrings_rx_queue_xdp_stats;
17056+#ifdef HAVE_NDO_GET_STATS64
17057+ unsigned int start;
17058+#endif
17059+ unsigned int i;
17060+
17061+ /* To avoid invalid statistics values, ensure that we keep retrying
17062+ * the copy until we get a consistent value according to
17063+ * u64_stats_fetch_retry_irq. But first, make sure our ring is
17064+ * non-null before attempting to access its syncp.
17065+ */
17066+#ifdef HAVE_NDO_GET_STATS64
17067+ do {
17068+ start = !rx_ring ? 0 :
17069+ u64_stats_fetch_begin_irq(&rx_ring->syncp);
17070+#endif
17071+ for (i = 0; i < xdp_size; i++) {
17072+ i40e_add_one_ethtool_stat(&(*data)[i], rx_ring,
17073+ &xdp_stats[i]);
17074+ }
17075+#ifdef HAVE_NDO_GET_STATS64
17076+ } while (rx_ring && u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
17077+#endif
17078+
17079+ /* Once we successfully copy the stats in, update the data pointer */
17080+ *data += xdp_size;
17081+}
17082+#endif
17083+
17084+/**
17085+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
17086+ * @p: ethtool supplied buffer
17087+ * @stats: stat definitions array
17088+ * @size: size of the stats array
17089+ *
17090+ * Format and copy the strings described by stats into the buffer pointed at
17091+ * by p.
17092+ **/
17093+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
17094+ const unsigned int size, ...)
17095+{
17096+ unsigned int i;
17097+
17098+ for (i = 0; i < size; i++) {
17099+ va_list args;
17100+
17101+ va_start(args, size);
17102+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
17103+ *p += ETH_GSTRING_LEN;
17104+ va_end(args);
17105+ }
17106+}
17107+
17108+/**
17109+ * 40e_add_stat_strings - copy stat strings into ethtool buffer
17110+ * @p: ethtool supplied buffer
17111+ * @stats: stat definitions array
17112+ *
17113+ * Format and copy the strings described by the const static stats value into
17114+ * the buffer pointed at by p.
17115+ *
17116+ * The parameter @stats is evaluated twice, so parameters with side effects
17117+ * should be avoided. Additionally, stats must be an array such that
17118+ * ARRAY_SIZE can be called on it.
17119+ **/
17120+#define i40e_add_stat_strings(p, stats, ...) \
17121+ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
17122diff --git a/drivers/net/ethernet/intel/i40e/i40e_filters.c b/drivers/net/ethernet/intel/i40e/i40e_filters.c
17123new file mode 100644
17124index 000000000..ce301a41e
17125--- /dev/null
17126+++ b/drivers/net/ethernet/intel/i40e/i40e_filters.c
17127@@ -0,0 +1,40 @@
17128+// SPDX-License-Identifier: GPL-2.0
17129+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17130+
17131+#include "i40e_filters.h"
17132+
17133+/**
17134+ * __i40e_del_filter - Remove a specific filter from the VSI
17135+ * @vsi: VSI to remove from
17136+ * @f: the filter to remove from the list
17137+ *
17138+ * This function should be called instead of i40e_del_filter only if you know
17139+ * the exact filter you will remove already, such as via i40e_find_filter or
17140+ * i40e_find_mac.
17141+ *
17142+ * NOTE: This function is expected to be called with mac_filter_hash_lock
17143+ * being held.
17144+ * ANOTHER NOTE: This function MUST be called from within the context of
17145+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
17146+ * instead of list_for_each_entry().
17147+ **/
17148+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
17149+{
17150+ if (!f)
17151+ return;
17152+
17153+ /* If the filter was never added to firmware then we can just delete it
17154+ * directly and we don't want to set the status to remove or else an
17155+ * admin queue command will unnecessarily fire.
17156+ */
17157+ if (f->state == I40E_FILTER_FAILED || f->state == I40E_FILTER_NEW) {
17158+ hash_del(&f->hlist);
17159+ kfree(f);
17160+ } else {
17161+ f->state = I40E_FILTER_REMOVE;
17162+ }
17163+
17164+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
17165+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
17166+}
17167+
17168diff --git a/drivers/net/ethernet/intel/i40e/i40e_filters.h b/drivers/net/ethernet/intel/i40e/i40e_filters.h
17169new file mode 100644
17170index 000000000..7f618fdab
17171--- /dev/null
17172+++ b/drivers/net/ethernet/intel/i40e/i40e_filters.h
17173@@ -0,0 +1,11 @@
17174+/* SPDX-License-Identifier: GPL-2.0 */
17175+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17176+
17177+#ifndef _I40E_FILTERS_H_
17178+#define _I40E_FILTERS_H_
17179+
17180+#include "i40e.h"
17181+
17182+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
17183+
17184+#endif /* _I40E_FILTERS_H_ */
17185diff --git a/drivers/net/ethernet/intel/i40e/i40e_helper.h b/drivers/net/ethernet/intel/i40e/i40e_helper.h
17186new file mode 100644
17187index 000000000..4e25e230c
17188--- /dev/null
17189+++ b/drivers/net/ethernet/intel/i40e/i40e_helper.h
17190@@ -0,0 +1,128 @@
17191+/* SPDX-License-Identifier: GPL-2.0 */
17192+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17193+
17194+#ifndef _I40E_HELPER_H_
17195+#define _I40E_HELPER_H_
17196+
17197+#include "i40e_alloc.h"
17198+
17199+/**
17200+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
17201+ * @hw: pointer to the HW structure
17202+ * @mem: ptr to mem struct to fill out
17203+ * @size: size of memory requested
17204+ * @alignment: what to align the allocation to
17205+ **/
17206+inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw,
17207+ struct i40e_dma_mem *mem,
17208+ __always_unused enum i40e_memory_type mtype,
17209+ u64 size, u32 alignment)
17210+{
17211+ struct i40e_pf *nf = (struct i40e_pf *)hw->back;
17212+
17213+ mem->size = ALIGN(size, alignment);
17214+#ifdef HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM
17215+ mem->va = dma_alloc_coherent(&nf->pdev->dev, mem->size,
17216+ &mem->pa, GFP_KERNEL);
17217+#else
17218+ mem->va = dma_zalloc_coherent(&nf->pdev->dev, mem->size,
17219+ &mem->pa, GFP_KERNEL);
17220+#endif
17221+ if (!mem->va)
17222+ return -ENOMEM;
17223+
17224+ return 0;
17225+}
17226+
17227+/**
17228+ * i40e_free_dma_mem_d - OS specific memory free for shared code
17229+ * @hw: pointer to the HW structure
17230+ * @mem: ptr to mem struct to free
17231+ **/
17232+inline int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
17233+{
17234+ struct i40e_pf *nf = (struct i40e_pf *)hw->back;
17235+
17236+ dma_free_coherent(&nf->pdev->dev, mem->size, mem->va, mem->pa);
17237+ mem->va = NULL;
17238+ mem->pa = 0;
17239+ mem->size = 0;
17240+
17241+ return 0;
17242+}
17243+
17244+/**
17245+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
17246+ * @hw: pointer to the HW structure
17247+ * @mem: ptr to mem struct to fill out
17248+ * @size: size of memory requested
17249+ **/
17250+inline int i40e_allocate_virt_mem_d(struct i40e_hw *hw,
17251+ struct i40e_virt_mem *mem,
17252+ u32 size)
17253+{
17254+ mem->size = size;
17255+ mem->va = kzalloc(size, GFP_KERNEL);
17256+
17257+ if (!mem->va)
17258+ return -ENOMEM;
17259+
17260+ return 0;
17261+}
17262+
17263+/**
17264+ * i40e_free_virt_mem_d - OS specific memory free for shared code
17265+ * @hw: pointer to the HW structure
17266+ * @mem: ptr to mem struct to free
17267+ **/
17268+inline int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
17269+{
17270+ /* it's ok to kfree a NULL pointer */
17271+ kfree(mem->va);
17272+ mem->va = NULL;
17273+ mem->size = 0;
17274+
17275+ return 0;
17276+}
17277+
17278+/* prototype */
17279+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp);
17280+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp);
17281+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp);
17282+
17283+/**
17284+ * i40e_init_spinlock_d - OS specific spinlock init for shared code
17285+ * @sp: pointer to a spinlock declared in driver space
17286+ **/
17287+static inline void i40e_init_spinlock_d(struct i40e_spinlock *sp)
17288+{
17289+ mutex_init((struct mutex *)sp);
17290+}
17291+
17292+/**
17293+ * i40e_acquire_spinlock_d - OS specific spinlock acquire for shared code
17294+ * @sp: pointer to a spinlock declared in driver space
17295+ **/
17296+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
17297+{
17298+ mutex_lock((struct mutex *)sp);
17299+}
17300+
17301+/**
17302+ * i40e_release_spinlock_d - OS specific spinlock release for shared code
17303+ * @sp: pointer to a spinlock declared in driver space
17304+ **/
17305+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp)
17306+{
17307+ mutex_unlock((struct mutex *)sp);
17308+}
17309+
17310+/**
17311+ * i40e_destroy_spinlock_d - OS specific spinlock destroy for shared code
17312+ * @sp: pointer to a spinlock declared in driver space
17313+ **/
17314+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp)
17315+{
17316+ mutex_destroy((struct mutex *)sp);
17317+}
17318+#endif /* _I40E_HELPER_H_ */
17319diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
17320index a7c7b1d9b..5fb10c407 100644
17321--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
17322+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
17323@@ -1,35 +1,14 @@
17324-/*******************************************************************************
17325- *
17326- * Intel Ethernet Controller XL710 Family Linux Driver
17327- * Copyright(c) 2013 - 2014 Intel Corporation.
17328- *
17329- * This program is free software; you can redistribute it and/or modify it
17330- * under the terms and conditions of the GNU General Public License,
17331- * version 2, as published by the Free Software Foundation.
17332- *
17333- * This program is distributed in the hope it will be useful, but WITHOUT
17334- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17335- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17336- * more details.
17337- *
17338- * You should have received a copy of the GNU General Public License along
17339- * with this program. If not, see <http://www.gnu.org/licenses/>.
17340- *
17341- * The full GNU General Public License is included in this distribution in
17342- * the file called "COPYING".
17343- *
17344- * Contact Information:
17345- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
17346- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
17347- *
17348- ******************************************************************************/
17349+// SPDX-License-Identifier: GPL-2.0
17350+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17351
17352 #include "i40e_osdep.h"
17353 #include "i40e_register.h"
17354 #include "i40e_status.h"
17355 #include "i40e_alloc.h"
17356 #include "i40e_hmc.h"
17357+#ifndef I40E_NO_TYPE_HEADER
17358 #include "i40e_type.h"
17359+#endif
17360
17361 /**
17362 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
17363@@ -45,11 +24,11 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
17364 enum i40e_sd_entry_type type,
17365 u64 direct_mode_sz)
17366 {
17367- enum i40e_memory_type mem_type __attribute__((unused));
17368+ i40e_status ret_code = I40E_SUCCESS;
17369 struct i40e_hmc_sd_entry *sd_entry;
17370+ enum i40e_memory_type mem_type;
17371 bool dma_mem_alloc_done = false;
17372 struct i40e_dma_mem mem;
17373- i40e_status ret_code = I40E_SUCCESS;
17374 u64 alloc_len;
17375
17376 if (NULL == hmc_info->sd_table.sd_entry) {
17377@@ -89,9 +68,13 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
17378 sd_entry->u.pd_table.pd_entry =
17379 (struct i40e_hmc_pd_entry *)
17380 sd_entry->u.pd_table.pd_entry_virt_mem.va;
17381- sd_entry->u.pd_table.pd_page_addr = mem;
17382+ i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
17383+ &mem, sizeof(struct i40e_dma_mem),
17384+ I40E_NONDMA_TO_NONDMA);
17385 } else {
17386- sd_entry->u.bp.addr = mem;
17387+ i40e_memcpy(&sd_entry->u.bp.addr,
17388+ &mem, sizeof(struct i40e_dma_mem),
17389+ I40E_NONDMA_TO_NONDMA);
17390 sd_entry->u.bp.sd_pd_index = sd_index;
17391 }
17392 /* initialize the sd entry */
17393@@ -104,7 +87,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
17394 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
17395 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
17396 exit:
17397- if (ret_code)
17398+ if (I40E_SUCCESS != ret_code)
17399 if (dma_mem_alloc_done)
17400 i40e_free_dma_mem(hw, &mem);
17401
17402@@ -133,7 +116,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
17403 u32 pd_index,
17404 struct i40e_dma_mem *rsrc_pg)
17405 {
17406- i40e_status ret_code = 0;
17407+ i40e_status ret_code = I40E_SUCCESS;
17408 struct i40e_hmc_pd_table *pd_table;
17409 struct i40e_hmc_pd_entry *pd_entry;
17410 struct i40e_dma_mem mem;
17411@@ -171,7 +154,8 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
17412 pd_entry->rsrc_pg = false;
17413 }
17414
17415- pd_entry->bp.addr = *page;
17416+ i40e_memcpy(&pd_entry->bp.addr, page,
17417+ sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
17418 pd_entry->bp.sd_pd_index = pd_index;
17419 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
17420 /* Set page address and valid bit */
17421@@ -181,7 +165,8 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
17422 pd_addr += rel_pd_idx;
17423
17424 /* Add the backing page physical address in the pd entry */
17425- memcpy(pd_addr, &page_desc, sizeof(u64));
17426+ i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
17427+ I40E_NONDMA_TO_DMA);
17428
17429 pd_entry->sd_index = sd_idx;
17430 pd_entry->valid = true;
17431@@ -197,7 +182,6 @@ exit:
17432 * @hw: pointer to our HW structure
17433 * @hmc_info: pointer to the HMC configuration information structure
17434 * @idx: the page index
17435- * @is_pf: distinguishes a VF from a PF
17436 *
17437 * This function:
17438 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
17439@@ -212,7 +196,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
17440 struct i40e_hmc_info *hmc_info,
17441 u32 idx)
17442 {
17443- i40e_status ret_code = 0;
17444+ i40e_status ret_code = I40E_SUCCESS;
17445 struct i40e_hmc_pd_entry *pd_entry;
17446 struct i40e_hmc_pd_table *pd_table;
17447 struct i40e_hmc_sd_entry *sd_entry;
17448@@ -245,13 +229,13 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
17449 I40E_DEC_PD_REFCNT(pd_table);
17450 pd_addr = (u64 *)pd_table->pd_page_addr.va;
17451 pd_addr += rel_pd_idx;
17452- memset(pd_addr, 0, sizeof(u64));
17453+ i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
17454 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
17455
17456 /* free memory here */
17457 if (!pd_entry->rsrc_pg)
17458- ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
17459- if (ret_code)
17460+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
17461+ if (I40E_SUCCESS != ret_code)
17462 goto exit;
17463 if (!pd_table->ref_cnt)
17464 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
17465@@ -267,7 +251,7 @@ exit:
17466 i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
17467 u32 idx)
17468 {
17469- i40e_status ret_code = 0;
17470+ i40e_status ret_code = I40E_SUCCESS;
17471 struct i40e_hmc_sd_entry *sd_entry;
17472
17473 /* get the entry and decrease its ref counter */
17474@@ -305,7 +289,7 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
17475 sd_entry = &hmc_info->sd_table.sd_entry[idx];
17476 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
17477
17478- return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
17479+ return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
17480 }
17481
17482 /**
17483@@ -316,7 +300,7 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
17484 i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
17485 u32 idx)
17486 {
17487- i40e_status ret_code = 0;
17488+ i40e_status ret_code = I40E_SUCCESS;
17489 struct i40e_hmc_sd_entry *sd_entry;
17490
17491 sd_entry = &hmc_info->sd_table.sd_entry[idx];
17492@@ -353,5 +337,5 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
17493 sd_entry = &hmc_info->sd_table.sd_entry[idx];
17494 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
17495
17496- return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
17497+ return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
17498 }
17499diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
17500index d90669211..ddfef1603 100644
17501--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
17502+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
17503@@ -1,28 +1,5 @@
17504-/*******************************************************************************
17505- *
17506- * Intel Ethernet Controller XL710 Family Linux Driver
17507- * Copyright(c) 2013 - 2014 Intel Corporation.
17508- *
17509- * This program is free software; you can redistribute it and/or modify it
17510- * under the terms and conditions of the GNU General Public License,
17511- * version 2, as published by the Free Software Foundation.
17512- *
17513- * This program is distributed in the hope it will be useful, but WITHOUT
17514- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17515- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17516- * more details.
17517- *
17518- * You should have received a copy of the GNU General Public License along
17519- * with this program. If not, see <http://www.gnu.org/licenses/>.
17520- *
17521- * The full GNU General Public License is included in this distribution in
17522- * the file called "COPYING".
17523- *
17524- * Contact Information:
17525- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
17526- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
17527- *
17528- ******************************************************************************/
17529+/* SPDX-License-Identifier: GPL-2.0 */
17530+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17531
17532 #ifndef _I40E_HMC_H_
17533 #define _I40E_HMC_H_
17534diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
17535index daa920442..4e55a095e 100644
17536--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
17537+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
17538@@ -1,28 +1,5 @@
17539-/*******************************************************************************
17540- *
17541- * Intel Ethernet Controller XL710 Family Linux Driver
17542- * Copyright(c) 2013 - 2014 Intel Corporation.
17543- *
17544- * This program is free software; you can redistribute it and/or modify it
17545- * under the terms and conditions of the GNU General Public License,
17546- * version 2, as published by the Free Software Foundation.
17547- *
17548- * This program is distributed in the hope it will be useful, but WITHOUT
17549- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17550- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17551- * more details.
17552- *
17553- * You should have received a copy of the GNU General Public License along
17554- * with this program. If not, see <http://www.gnu.org/licenses/>.
17555- *
17556- * The full GNU General Public License is included in this distribution in
17557- * the file called "COPYING".
17558- *
17559- * Contact Information:
17560- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
17561- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
17562- *
17563- ******************************************************************************/
17564+// SPDX-License-Identifier: GPL-2.0
17565+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17566
17567 #include "i40e_osdep.h"
17568 #include "i40e_register.h"
17569@@ -101,7 +78,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17570 u32 fcoe_filt_num)
17571 {
17572 struct i40e_hmc_obj_info *obj, *full_obj;
17573- i40e_status ret_code = 0;
17574+ i40e_status ret_code = I40E_SUCCESS;
17575 u64 l2fpm_size;
17576 u32 size_exp;
17577
17578@@ -136,7 +113,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17579 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
17580 hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
17581 txq_num, obj->max_cnt, ret_code);
17582- goto init_lan_hmc_out;
17583+ goto free_hmc_out;
17584 }
17585
17586 /* aggregate values into the full LAN object for later */
17587@@ -159,7 +136,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17588 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
17589 hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
17590 rxq_num, obj->max_cnt, ret_code);
17591- goto init_lan_hmc_out;
17592+ goto free_hmc_out;
17593 }
17594
17595 /* aggregate values into the full LAN object for later */
17596@@ -182,7 +159,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17597 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
17598 hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
17599 fcoe_cntx_num, obj->max_cnt, ret_code);
17600- goto init_lan_hmc_out;
17601+ goto free_hmc_out;
17602 }
17603
17604 /* aggregate values into the full LAN object for later */
17605@@ -205,7 +182,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17606 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
17607 hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
17608 fcoe_filt_num, obj->max_cnt, ret_code);
17609- goto init_lan_hmc_out;
17610+ goto free_hmc_out;
17611 }
17612
17613 /* aggregate values into the full LAN object for later */
17614@@ -226,7 +203,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17615 (sizeof(struct i40e_hmc_sd_entry) *
17616 hw->hmc.sd_table.sd_cnt));
17617 if (ret_code)
17618- goto init_lan_hmc_out;
17619+ goto free_hmc_out;
17620 hw->hmc.sd_table.sd_entry =
17621 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
17622 }
17623@@ -234,6 +211,11 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
17624 full_obj->size = l2fpm_size;
17625
17626 init_lan_hmc_out:
17627+ return ret_code;
17628+free_hmc_out:
17629+ if (hw->hmc.hmc_obj_virt_mem.va)
17630+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
17631+
17632 return ret_code;
17633 }
17634
17635@@ -255,9 +237,9 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
17636 struct i40e_hmc_info *hmc_info,
17637 u32 idx)
17638 {
17639- i40e_status ret_code = 0;
17640+ i40e_status ret_code = I40E_SUCCESS;
17641
17642- if (!i40e_prep_remove_pd_page(hmc_info, idx))
17643+ if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
17644 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
17645
17646 return ret_code;
17647@@ -282,9 +264,9 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
17648 struct i40e_hmc_info *hmc_info,
17649 u32 idx)
17650 {
17651- i40e_status ret_code = 0;
17652+ i40e_status ret_code = I40E_SUCCESS;
17653
17654- if (!i40e_prep_remove_sd_bp(hmc_info, idx))
17655+ if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
17656 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
17657
17658 return ret_code;
17659@@ -301,7 +283,7 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
17660 static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
17661 struct i40e_hmc_lan_create_obj_info *info)
17662 {
17663- i40e_status ret_code = 0;
17664+ i40e_status ret_code = I40E_SUCCESS;
17665 struct i40e_hmc_sd_entry *sd_entry;
17666 u32 pd_idx1 = 0, pd_lmt1 = 0;
17667 u32 pd_idx = 0, pd_lmt = 0;
17668@@ -371,7 +353,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
17669 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
17670 info->entry_type,
17671 sd_size);
17672- if (ret_code)
17673+ if (I40E_SUCCESS != ret_code)
17674 goto exit_sd_error;
17675 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
17676 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
17677@@ -388,7 +370,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
17678 ret_code = i40e_add_pd_table_entry(hw,
17679 info->hmc_info,
17680 i, NULL);
17681- if (ret_code) {
17682+ if (I40E_SUCCESS != ret_code) {
17683 pd_error = true;
17684 break;
17685 }
17686@@ -461,9 +443,9 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
17687 enum i40e_hmc_model model)
17688 {
17689 struct i40e_hmc_lan_create_obj_info info;
17690- i40e_status ret_code = 0;
17691 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
17692 struct i40e_hmc_obj_info *obj;
17693+ i40e_status ret_code = I40E_SUCCESS;
17694
17695 /* Initialize part of the create object info struct */
17696 info.hmc_info = &hw->hmc;
17697@@ -479,9 +461,9 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
17698 /* Make one big object, a single SD */
17699 info.count = 1;
17700 ret_code = i40e_create_lan_hmc_object(hw, &info);
17701- if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
17702+ if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
17703 goto try_type_paged;
17704- else if (ret_code)
17705+ else if (ret_code != I40E_SUCCESS)
17706 goto configure_lan_hmc_out;
17707 /* else clause falls through the break */
17708 break;
17709@@ -491,7 +473,7 @@ try_type_paged:
17710 /* Make one big object in the PD table */
17711 info.count = 1;
17712 ret_code = i40e_create_lan_hmc_object(hw, &info);
17713- if (ret_code)
17714+ if (ret_code != I40E_SUCCESS)
17715 goto configure_lan_hmc_out;
17716 break;
17717 default:
17718@@ -545,7 +527,7 @@ configure_lan_hmc_out:
17719 static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
17720 struct i40e_hmc_lan_delete_obj_info *info)
17721 {
17722- i40e_status ret_code = 0;
17723+ i40e_status ret_code = I40E_SUCCESS;
17724 struct i40e_hmc_pd_table *pd_table;
17725 u32 pd_idx, pd_lmt, rel_pd_idx;
17726 u32 sd_idx, sd_lmt;
17727@@ -610,7 +592,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
17728 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
17729 if (pd_table->pd_entry[rel_pd_idx].valid) {
17730 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
17731- if (ret_code)
17732+ if (I40E_SUCCESS != ret_code)
17733 goto exit;
17734 }
17735 }
17736@@ -631,12 +613,12 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
17737 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
17738 case I40E_SD_TYPE_DIRECT:
17739 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
17740- if (ret_code)
17741+ if (I40E_SUCCESS != ret_code)
17742 goto exit;
17743 break;
17744 case I40E_SD_TYPE_PAGED:
17745 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
17746- if (ret_code)
17747+ if (I40E_SUCCESS != ret_code)
17748 goto exit;
17749 break;
17750 default:
17751@@ -681,7 +663,7 @@ i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
17752
17753 #define I40E_HMC_STORE(_struct, _ele) \
17754 offsetof(struct _struct, _ele), \
17755- FIELD_SIZEOF(struct _struct, _ele)
17756+ sizeof_field(struct _struct, _ele)
17757
17758 struct i40e_context_ele {
17759 u16 offset;
17760@@ -774,13 +756,13 @@ static void i40e_write_byte(u8 *hmc_bits,
17761 /* get the current bits from the target bit string */
17762 dest = hmc_bits + (ce_info->lsb / 8);
17763
17764- memcpy(&dest_byte, dest, sizeof(dest_byte));
17765+ i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
17766
17767 dest_byte &= ~mask; /* get the bits not changing */
17768 dest_byte |= src_byte; /* add in the new bits */
17769
17770 /* put it all back */
17771- memcpy(dest, &dest_byte, sizeof(dest_byte));
17772+ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
17773 }
17774
17775 /**
17776@@ -818,13 +800,13 @@ static void i40e_write_word(u8 *hmc_bits,
17777 /* get the current bits from the target bit string */
17778 dest = hmc_bits + (ce_info->lsb / 8);
17779
17780- memcpy(&dest_word, dest, sizeof(dest_word));
17781+ i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
17782
17783- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
17784- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
17785+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
17786+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
17787
17788 /* put it all back */
17789- memcpy(dest, &dest_word, sizeof(dest_word));
17790+ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
17791 }
17792
17793 /**
17794@@ -870,13 +852,13 @@ static void i40e_write_dword(u8 *hmc_bits,
17795 /* get the current bits from the target bit string */
17796 dest = hmc_bits + (ce_info->lsb / 8);
17797
17798- memcpy(&dest_dword, dest, sizeof(dest_dword));
17799+ i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
17800
17801- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
17802- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
17803+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
17804+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
17805
17806 /* put it all back */
17807- memcpy(dest, &dest_dword, sizeof(dest_dword));
17808+ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
17809 }
17810
17811 /**
17812@@ -922,13 +904,13 @@ static void i40e_write_qword(u8 *hmc_bits,
17813 /* get the current bits from the target bit string */
17814 dest = hmc_bits + (ce_info->lsb / 8);
17815
17816- memcpy(&dest_qword, dest, sizeof(dest_qword));
17817+ i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
17818
17819- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
17820- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
17821+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
17822+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
17823
17824 /* put it all back */
17825- memcpy(dest, &dest_qword, sizeof(dest_qword));
17826+ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
17827 }
17828
17829 /**
17830@@ -942,9 +924,10 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
17831 enum i40e_hmc_lan_rsrc_type hmc_type)
17832 {
17833 /* clean the bit array */
17834- memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
17835+ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
17836+ I40E_DMA_MEM);
17837
17838- return 0;
17839+ return I40E_SUCCESS;
17840 }
17841
17842 /**
17843@@ -981,12 +964,12 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
17844 }
17845 }
17846
17847- return 0;
17848+ return I40E_SUCCESS;
17849 }
17850
17851 /**
17852 * i40e_hmc_get_object_va - retrieves an object's virtual address
17853- * @hmc_info: pointer to i40e_hmc_info struct
17854+ * @hw: pointer to the hw structure
17855 * @object_base: pointer to u64 to get the va
17856 * @rsrc_type: the hmc resource type
17857 * @obj_idx: hmc object index
17858@@ -995,24 +978,20 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
17859 * base pointer. This function is used for LAN Queue contexts.
17860 **/
17861 static
17862-i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
17863+i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw,
17864 u8 **object_base,
17865 enum i40e_hmc_lan_rsrc_type rsrc_type,
17866 u32 obj_idx)
17867 {
17868 u32 obj_offset_in_sd, obj_offset_in_pd;
17869- i40e_status ret_code = 0;
17870+ struct i40e_hmc_info *hmc_info = &hw->hmc;
17871 struct i40e_hmc_sd_entry *sd_entry;
17872 struct i40e_hmc_pd_entry *pd_entry;
17873 u32 pd_idx, pd_lmt, rel_pd_idx;
17874+ i40e_status ret_code = I40E_SUCCESS;
17875 u64 obj_offset_in_fpm;
17876 u32 sd_idx, sd_lmt;
17877
17878- if (NULL == hmc_info) {
17879- ret_code = I40E_ERR_BAD_PTR;
17880- hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
17881- goto exit;
17882- }
17883 if (NULL == hmc_info->hmc_obj) {
17884 ret_code = I40E_ERR_BAD_PTR;
17885 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
17886@@ -1070,8 +1049,7 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
17887 i40e_status err;
17888 u8 *context_bytes;
17889
17890- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
17891- I40E_HMC_LAN_TX, queue);
17892+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
17893 if (err < 0)
17894 return err;
17895
17896@@ -1091,8 +1069,7 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
17897 i40e_status err;
17898 u8 *context_bytes;
17899
17900- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
17901- I40E_HMC_LAN_TX, queue);
17902+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
17903 if (err < 0)
17904 return err;
17905
17906@@ -1111,8 +1088,7 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
17907 i40e_status err;
17908 u8 *context_bytes;
17909
17910- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
17911- I40E_HMC_LAN_RX, queue);
17912+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
17913 if (err < 0)
17914 return err;
17915
17916@@ -1132,8 +1108,7 @@ i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
17917 i40e_status err;
17918 u8 *context_bytes;
17919
17920- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
17921- I40E_HMC_LAN_RX, queue);
17922+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
17923 if (err < 0)
17924 return err;
17925
17926diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
17927index e74128db5..00b7d6de4 100644
17928--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
17929+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
17930@@ -1,28 +1,5 @@
17931-/*******************************************************************************
17932- *
17933- * Intel Ethernet Controller XL710 Family Linux Driver
17934- * Copyright(c) 2013 - 2014 Intel Corporation.
17935- *
17936- * This program is free software; you can redistribute it and/or modify it
17937- * under the terms and conditions of the GNU General Public License,
17938- * version 2, as published by the Free Software Foundation.
17939- *
17940- * This program is distributed in the hope it will be useful, but WITHOUT
17941- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17942- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17943- * more details.
17944- *
17945- * You should have received a copy of the GNU General Public License along
17946- * with this program. If not, see <http://www.gnu.org/licenses/>.
17947- *
17948- * The full GNU General Public License is included in this distribution in
17949- * the file called "COPYING".
17950- *
17951- * Contact Information:
17952- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
17953- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
17954- *
17955- ******************************************************************************/
17956+/* SPDX-License-Identifier: GPL-2.0 */
17957+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17958
17959 #ifndef _I40E_LAN_HMC_H_
17960 #define _I40E_LAN_HMC_H_
17961diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
17962index 78198072f..7ad12f421 100644
17963--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
17964+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
17965@@ -1,38 +1,29 @@
17966-/*******************************************************************************
17967- *
17968- * Intel Ethernet Controller XL710 Family Linux Driver
17969- * Copyright(c) 2013 - 2017 Intel Corporation.
17970- *
17971- * This program is free software; you can redistribute it and/or modify it
17972- * under the terms and conditions of the GNU General Public License,
17973- * version 2, as published by the Free Software Foundation.
17974- *
17975- * This program is distributed in the hope it will be useful, but WITHOUT
17976- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17977- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17978- * more details.
17979- *
17980- * You should have received a copy of the GNU General Public License along
17981- * with this program. If not, see <http://www.gnu.org/licenses/>.
17982- *
17983- * The full GNU General Public License is included in this distribution in
17984- * the file called "COPYING".
17985- *
17986- * Contact Information:
17987- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
17988- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
17989- *
17990- ******************************************************************************/
17991+// SPDX-License-Identifier: GPL-2.0
17992+/* Copyright(c) 2013 - 2020 Intel Corporation. */
17993
17994-#include <linux/etherdevice.h>
17995-#include <linux/of_net.h>
17996-#include <linux/pci.h>
17997+#ifdef HAVE_XDP_SUPPORT
17998 #include <linux/bpf.h>
17999-
18000+#endif
18001 /* Local includes */
18002 #include "i40e.h"
18003+#include "i40e_helper.h"
18004 #include "i40e_diag.h"
18005+#ifdef HAVE_VXLAN_RX_OFFLOAD
18006+#if IS_ENABLED(CONFIG_VXLAN)
18007+#include <net/vxlan.h>
18008+#endif
18009+#endif /* HAVE_VXLAN_RX_OFFLOAD */
18010+#ifdef HAVE_GRE_ENCAP_OFFLOAD
18011+#include <net/gre.h>
18012+#endif /* HAVE_GRE_ENCAP_OFFLOAD */
18013+#ifdef HAVE_GENEVE_RX_OFFLOAD
18014+#if IS_ENABLED(CONFIG_GENEVE)
18015+#include <net/geneve.h>
18016+#endif
18017+#endif /* HAVE_GENEVE_RX_OFFLOAD */
18018+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
18019 #include <net/udp_tunnel.h>
18020+#endif
18021 /* All i40e tracepoints are defined by the include below, which
18022 * must be included exactly once across the whole kernel with
18023 * CREATE_TRACE_POINTS defined
18024@@ -40,20 +31,25 @@
18025 #define CREATE_TRACE_POINTS
18026 #include "i40e_trace.h"
18027
18028-const char i40e_driver_name[] = "i40e";
18029+char i40e_driver_name[] = "i40e";
18030 static const char i40e_driver_string[] =
18031- "Intel(R) Ethernet Connection XL710 Network Driver";
18032+ "Intel(R) 40-10 Gigabit Ethernet Connection Network Driver";
18033
18034-#define DRV_KERN "-k"
18035+#ifndef DRV_VERSION_LOCAL
18036+#define DRV_VERSION_LOCAL
18037+#endif /* DRV_VERSION_LOCAL */
18038+
18039+#define DRV_VERSION_DESC ""
18040
18041 #define DRV_VERSION_MAJOR 2
18042-#define DRV_VERSION_MINOR 1
18043-#define DRV_VERSION_BUILD 14
18044+#define DRV_VERSION_MINOR 11
18045+#define DRV_VERSION_BUILD 29
18046 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
18047- __stringify(DRV_VERSION_MINOR) "." \
18048- __stringify(DRV_VERSION_BUILD) DRV_KERN
18049+ __stringify(DRV_VERSION_MINOR) "." \
18050+ __stringify(DRV_VERSION_BUILD) \
18051+ DRV_VERSION_DESC __stringify(DRV_VERSION_LOCAL)
18052 const char i40e_driver_version_str[] = DRV_VERSION;
18053-static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
18054+static const char i40e_copyright[] = "Copyright(c) 2013 - 2020 Intel Corporation.";
18055
18056 /* a bit of forward declarations */
18057 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
18058@@ -64,12 +60,20 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
18059 static int i40e_setup_misc_vector(struct i40e_pf *pf);
18060 static void i40e_determine_queue_usage(struct i40e_pf *pf);
18061 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
18062+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi);
18063 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
18064 static int i40e_reset(struct i40e_pf *pf);
18065 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
18066+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
18067+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
18068+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
18069+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
18070+static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up);
18071+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
18072 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
18073 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
18074-
18075+static int i40e_get_capabilities(struct i40e_pf *pf,
18076+ enum i40e_admin_queue_opc list_type);
18077 /* i40e_pci_tbl - PCI Device ID Table
18078 *
18079 * Last entry must be all 0s
18080@@ -85,16 +89,22 @@ static const struct pci_device_id i40e_pci_tbl[] = {
18081 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
18082 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
18083 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
18084+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_5G_BASE_T_BC), 0},
18085 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
18086 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
18087+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
18088+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
18089+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
18090+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
18091+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
18092+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
18093+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
18094 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
18095 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
18096 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
18097 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
18098 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
18099 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
18100- {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
18101- {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
18102 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
18103 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
18104 /* required last entry */
18105@@ -103,86 +113,38 @@ static const struct pci_device_id i40e_pci_tbl[] = {
18106 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
18107
18108 #define I40E_MAX_VF_COUNT 128
18109+#define OPTION_UNSET -1
18110+#define I40E_PARAM_INIT { [0 ... I40E_MAX_NIC] = OPTION_UNSET}
18111+#define I40E_MAX_NIC 64
18112+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
18113+#ifdef CONFIG_PCI_IOV
18114+static int max_vfs[I40E_MAX_NIC+1] = I40E_PARAM_INIT;
18115+module_param_array_named(max_vfs, max_vfs, int, NULL, 0);
18116+MODULE_PARM_DESC(max_vfs,
18117+ "Number of Virtual Functions: 0 = disable (default), 1-"
18118+ __stringify(I40E_MAX_VF_COUNT) " = enable "
18119+ "this many VFs");
18120+#endif /* CONFIG_PCI_IOV */
18121+#endif /* HAVE_SRIOV_CONFIGURE */
18122+
18123 static int debug = -1;
18124-module_param(debug, uint, 0);
18125-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
18126+module_param(debug, int, 0);
18127+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
18128+static int l4mode = L4_MODE_DISABLED;
18129+module_param(l4mode, int, 0000);
18130+MODULE_PARM_DESC(l4mode, "L4 cloud filter mode: 0=UDP,1=TCP,2=Both,-1=Disabled(default)");
18131+
18132
18133 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
18134-MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
18135+MODULE_DESCRIPTION("Intel(R) 40-10 Gigabit Ethernet Connection Network Driver");
18136 MODULE_LICENSE("GPL");
18137 MODULE_VERSION(DRV_VERSION);
18138
18139 static struct workqueue_struct *i40e_wq;
18140
18141-/**
18142- * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
18143- * @hw: pointer to the HW structure
18144- * @mem: ptr to mem struct to fill out
18145- * @size: size of memory requested
18146- * @alignment: what to align the allocation to
18147- **/
18148-int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
18149- u64 size, u32 alignment)
18150-{
18151- struct i40e_pf *pf = (struct i40e_pf *)hw->back;
18152-
18153- mem->size = ALIGN(size, alignment);
18154- mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
18155- &mem->pa, GFP_KERNEL);
18156- if (!mem->va)
18157- return -ENOMEM;
18158-
18159- return 0;
18160-}
18161-
18162-/**
18163- * i40e_free_dma_mem_d - OS specific memory free for shared code
18164- * @hw: pointer to the HW structure
18165- * @mem: ptr to mem struct to free
18166- **/
18167-int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
18168-{
18169- struct i40e_pf *pf = (struct i40e_pf *)hw->back;
18170-
18171- dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
18172- mem->va = NULL;
18173- mem->pa = 0;
18174- mem->size = 0;
18175-
18176- return 0;
18177-}
18178-
18179-/**
18180- * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
18181- * @hw: pointer to the HW structure
18182- * @mem: ptr to mem struct to fill out
18183- * @size: size of memory requested
18184- **/
18185-int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
18186- u32 size)
18187-{
18188- mem->size = size;
18189- mem->va = kzalloc(size, GFP_KERNEL);
18190-
18191- if (!mem->va)
18192- return -ENOMEM;
18193-
18194- return 0;
18195-}
18196-
18197-/**
18198- * i40e_free_virt_mem_d - OS specific memory free for shared code
18199- * @hw: pointer to the HW structure
18200- * @mem: ptr to mem struct to free
18201- **/
18202-int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
18203+bool i40e_is_l4mode_enabled(void)
18204 {
18205- /* it's ok to kfree a NULL pointer */
18206- kfree(mem->va);
18207- mem->va = NULL;
18208- mem->size = 0;
18209-
18210- return 0;
18211+ return l4mode > L4_MODE_DISABLED;
18212 }
18213
18214 /**
18215@@ -206,8 +168,8 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
18216
18217 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
18218 dev_info(&pf->pdev->dev,
18219- "param err: pile=%p needed=%d id=0x%04x\n",
18220- pile, needed, id);
18221+ "param err: pile=%s needed=%d id=0x%04x\n",
18222+ pile ? "<valid>" : "<null>", needed, id);
18223 return -EINVAL;
18224 }
18225
18226@@ -274,8 +236,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
18227
18228 /**
18229 * i40e_find_vsi_from_id - searches for the vsi with the given id
18230- * @pf - the pf structure to search for the vsi
18231- * @id - id of the vsi it is searching for
18232+ * @pf: the pf structure to search for the vsi
18233+ * @id: id of the vsi it is searching for
18234 **/
18235 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
18236 {
18237@@ -288,6 +250,22 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
18238 return NULL;
18239 }
18240
18241+/**
18242+ * i40e_find_vsi_from_seid - searches for the vsi with the given seid
18243+ * @pf: the pf structure to search for the vsi
18244+ * @seid: seid of the vsi it is searching for
18245+ **/
18246+struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_pf *pf, u16 seid)
18247+{
18248+ int i;
18249+
18250+ for (i = 0; i < pf->num_alloc_vsi; i++)
18251+ if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
18252+ return pf->vsi[i];
18253+
18254+ return NULL;
18255+}
18256+
18257 /**
18258 * i40e_service_event_schedule - Schedule the service task to wake up
18259 * @pf: board private structure
18260@@ -296,20 +274,27 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
18261 **/
18262 void i40e_service_event_schedule(struct i40e_pf *pf)
18263 {
18264- if (!test_bit(__I40E_DOWN, pf->state) &&
18265- !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
18266+ if ((!test_bit(__I40E_DOWN, pf->state) &&
18267+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
18268+ test_bit(__I40E_RECOVERY_MODE, pf->state))
18269 queue_work(i40e_wq, &pf->service_task);
18270 }
18271
18272 /**
18273 * i40e_tx_timeout - Respond to a Tx Hang
18274 * @netdev: network interface device structure
18275+ * @txqueue: stuck queue
18276 *
18277 * If any port has noticed a Tx timeout, it is likely that the whole
18278 * device is munged, not just the one netdev port, so go for the full
18279 * reset.
18280 **/
18281+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
18282+static void
18283+i40e_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue)
18284+#else
18285 static void i40e_tx_timeout(struct net_device *netdev)
18286+#endif
18287 {
18288 struct i40e_netdev_priv *np = netdev_priv(netdev);
18289 struct i40e_vsi *vsi = np->vsi;
18290@@ -350,12 +335,23 @@ static void i40e_tx_timeout(struct net_device *netdev)
18291 }
18292 }
18293
18294+#ifdef CONFIG_DEBUG_FS
18295+ if (vsi->block_tx_timeout) {
18296+ netdev_info(netdev, "tx_timeout recovery disabled\n");
18297+ return;
18298+ }
18299+#endif
18300+
18301 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
18302 pf->tx_timeout_recovery_level = 1; /* reset after some time */
18303 else if (time_before(jiffies,
18304 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
18305 return; /* don't do any new action before the next timeout */
18306
18307+ /* don't kick off another recovery if one is already pending */
18308+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
18309+ return;
18310+
18311 if (tx_ring) {
18312 head = i40e_get_head(tx_ring);
18313 /* Read interrupt register */
18314@@ -387,7 +383,9 @@ static void i40e_tx_timeout(struct net_device *netdev)
18315 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
18316 break;
18317 default:
18318- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
18319+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
18320+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
18321+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
18322 break;
18323 }
18324
18325@@ -402,11 +400,23 @@ static void i40e_tx_timeout(struct net_device *netdev)
18326 * Returns the address of the device statistics structure.
18327 * The statistics are actually updated from the service task.
18328 **/
18329+#ifdef HAVE_NDO_GET_STATS64
18330 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
18331 {
18332 return &vsi->net_stats;
18333 }
18334+#else
18335+struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
18336+{
18337+ /* It is possible for a VSIs to not have a netdev */
18338+ if (vsi->netdev)
18339+ return &vsi->netdev->stats;
18340+ else
18341+ return &vsi->net_stats;
18342+}
18343+#endif
18344
18345+#ifdef HAVE_NDO_GET_STATS64
18346 /**
18347 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
18348 * @ring: Tx ring to get statistics from
18349@@ -431,12 +441,19 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
18350 /**
18351 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
18352 * @netdev: network interface device structure
18353+ * @stats: data structure to store statistics
18354 *
18355 * Returns the address of the device statistics structure.
18356 * The statistics are actually updated from the service task.
18357 **/
18358+#ifdef HAVE_VOID_NDO_GET_STATS64
18359 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
18360- struct rtnl_link_stats64 *stats)
18361+ struct rtnl_link_stats64 *stats)
18362+#else
18363+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
18364+ struct net_device *netdev,
18365+ struct rtnl_link_stats64 *stats)
18366+#endif /* HAVE_VOID_NDO_GET_STATS64 */
18367 {
18368 struct i40e_netdev_priv *np = netdev_priv(netdev);
18369 struct i40e_ring *tx_ring, *rx_ring;
18370@@ -445,21 +462,29 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
18371 int i;
18372
18373 if (test_bit(__I40E_VSI_DOWN, vsi->state))
18374+#ifdef HAVE_VOID_NDO_GET_STATS64
18375 return;
18376+#else
18377+ return stats;
18378+#endif /* HAVE_VOID_NDO_GET_STATS_64 */
18379
18380 if (!vsi->tx_rings)
18381+#ifdef HAVE_VOID_NDO_GET_STATS64
18382 return;
18383+#else
18384+ return stats;
18385+#endif /* HAVE_VOID_NDO_GET_STATS_64 */
18386
18387 rcu_read_lock();
18388 for (i = 0; i < vsi->num_queue_pairs; i++) {
18389 u64 bytes, packets;
18390 unsigned int start;
18391
18392- tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
18393+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
18394 if (!tx_ring)
18395 continue;
18396- i40e_get_netdev_stats_struct_tx(tx_ring, stats);
18397
18398+ i40e_get_netdev_stats_struct_tx(tx_ring, stats);
18399 rx_ring = &tx_ring[1];
18400
18401 do {
18402@@ -484,7 +509,21 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
18403 stats->rx_dropped = vsi_stats->rx_dropped;
18404 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
18405 stats->rx_length_errors = vsi_stats->rx_length_errors;
18406+#ifndef HAVE_VOID_NDO_GET_STATS64
18407+
18408+ return stats;
18409+#endif
18410+}
18411+#else
18412+static struct net_device_stats *i40e_get_netdev_stats_struct(
18413+ struct net_device *netdev)
18414+{
18415+ struct i40e_netdev_priv *np = netdev_priv(netdev);
18416+ struct i40e_vsi *vsi = np->vsi;
18417+
18418+ return i40e_get_vsi_stats_struct(vsi);
18419 }
18420+#endif /* HAVE_NDO_GET_STATS64 */
18421
18422 /**
18423 * i40e_vsi_reset_stats - Resets all stats of the given vsi
18424@@ -492,7 +531,11 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
18425 **/
18426 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
18427 {
18428+#ifdef HAVE_NDO_GET_STATS64
18429 struct rtnl_link_stats64 *ns;
18430+#else
18431+ struct net_device_stats *ns;
18432+#endif
18433 int i;
18434
18435 if (!vsi)
18436@@ -509,6 +552,10 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
18437 sizeof(vsi->rx_rings[i]->stats));
18438 memset(&vsi->rx_rings[i]->rx_stats, 0,
18439 sizeof(vsi->rx_rings[i]->rx_stats));
18440+#ifdef HAVE_XDP_SUPPORT
18441+ memset(&vsi->rx_rings[i]->xdp_stats, 0,
18442+ sizeof(vsi->rx_rings[i]->xdp_stats));
18443+#endif
18444 memset(&vsi->tx_rings[i]->stats, 0,
18445 sizeof(vsi->tx_rings[i]->stats));
18446 memset(&vsi->tx_rings[i]->tx_stats, 0,
18447@@ -536,10 +583,30 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
18448 sizeof(pf->veb[i]->stats));
18449 memset(&pf->veb[i]->stats_offsets, 0,
18450 sizeof(pf->veb[i]->stats_offsets));
18451+ memset(&pf->veb[i]->tc_stats, 0,
18452+ sizeof(pf->veb[i]->tc_stats));
18453+ memset(&pf->veb[i]->tc_stats_offsets, 0,
18454+ sizeof(pf->veb[i]->tc_stats_offsets));
18455 pf->veb[i]->stat_offsets_loaded = false;
18456 }
18457 }
18458 pf->hw_csum_rx_error = 0;
18459+#ifdef I40E_ADD_PROBES
18460+ pf->tcp_segs = 0;
18461+ pf->tx_tcp_cso = 0;
18462+ pf->tx_udp_cso = 0;
18463+ pf->tx_sctp_cso = 0;
18464+ pf->tx_ip4_cso = 0;
18465+ pf->rx_tcp_cso = 0;
18466+ pf->rx_udp_cso = 0;
18467+ pf->rx_sctp_cso = 0;
18468+ pf->rx_ip4_cso = 0;
18469+ pf->rx_tcp_cso_err = 0;
18470+ pf->rx_udp_cso_err = 0;
18471+ pf->rx_sctp_cso_err = 0;
18472+ pf->rx_ip4_cso_err = 0;
18473+ pf->hw_csum_rx_outer = 0;
18474+#endif
18475 }
18476
18477 /**
18478@@ -599,6 +666,20 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
18479 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
18480 }
18481
18482+/**
18483+ * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
18484+ * @hw: ptr to the hardware info
18485+ * @reg: the hw reg to read and clear
18486+ * @stat: ptr to the stat
18487+ **/
18488+static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
18489+{
18490+ u32 new_data = rd32(hw, reg);
18491+
18492+ wr32(hw, reg, 1); /* must write a nonzero value to clear register */
18493+ *stat += new_data;
18494+}
18495+
18496 /**
18497 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
18498 * @vsi: the VSI to be updated
18499@@ -624,9 +705,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
18500 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
18501 vsi->stat_offsets_loaded,
18502 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
18503- i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
18504- vsi->stat_offsets_loaded,
18505- &oes->tx_errors, &es->tx_errors);
18506
18507 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
18508 I40E_GLV_GORCL(stat_idx),
18509@@ -668,7 +746,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
18510 * i40e_update_veb_stats - Update Switch component statistics
18511 * @veb: the VEB being updated
18512 **/
18513-static void i40e_update_veb_stats(struct i40e_veb *veb)
18514+void i40e_update_veb_stats(struct i40e_veb *veb)
18515 {
18516 struct i40e_pf *pf = veb->pf;
18517 struct i40e_hw *hw = &pf->hw;
18518@@ -688,11 +766,10 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
18519 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
18520 veb->stat_offsets_loaded,
18521 &oes->tx_discards, &es->tx_discards);
18522- if (hw->revision_id > 0)
18523- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
18524- veb->stat_offsets_loaded,
18525- &oes->rx_unknown_protocol,
18526- &es->rx_unknown_protocol);
18527+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
18528+ veb->stat_offsets_loaded,
18529+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
18530+
18531 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
18532 veb->stat_offsets_loaded,
18533 &oes->rx_bytes, &es->rx_bytes);
18534@@ -756,15 +833,22 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
18535 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
18536 {
18537 struct i40e_pf *pf = vsi->back;
18538+#ifdef HAVE_NDO_GET_STATS64
18539 struct rtnl_link_stats64 *ons;
18540 struct rtnl_link_stats64 *ns; /* netdev stats */
18541+#else
18542+ struct net_device_stats *ons;
18543+ struct net_device_stats *ns; /* netdev stats */
18544+#endif
18545 struct i40e_eth_stats *oes;
18546 struct i40e_eth_stats *es; /* device's eth stats */
18547 u32 tx_restart, tx_busy;
18548 struct i40e_ring *p;
18549 u32 rx_page, rx_buf;
18550 u64 bytes, packets;
18551+#ifdef HAVE_NDO_GET_STATS64
18552 unsigned int start;
18553+#endif
18554 u64 tx_linearize;
18555 u64 tx_force_wb;
18556 u64 rx_p, rx_b;
18557@@ -791,13 +875,17 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
18558 rcu_read_lock();
18559 for (q = 0; q < vsi->num_queue_pairs; q++) {
18560 /* locate Tx ring */
18561- p = ACCESS_ONCE(vsi->tx_rings[q]);
18562+ p = READ_ONCE(vsi->tx_rings[q]);
18563
18564+#ifdef HAVE_NDO_GET_STATS64
18565 do {
18566 start = u64_stats_fetch_begin_irq(&p->syncp);
18567+#endif
18568 packets = p->stats.packets;
18569 bytes = p->stats.bytes;
18570+#ifdef HAVE_NDO_GET_STATS64
18571 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
18572+#endif
18573 tx_b += bytes;
18574 tx_p += packets;
18575 tx_restart += p->tx_stats.restart_queue;
18576@@ -807,11 +895,15 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
18577
18578 /* Rx queue is part of the same block as Tx queue */
18579 p = &p[1];
18580+#ifdef HAVE_NDO_GET_STATS64
18581 do {
18582 start = u64_stats_fetch_begin_irq(&p->syncp);
18583+#endif
18584 packets = p->stats.packets;
18585 bytes = p->stats.bytes;
18586+#ifdef HAVE_NDO_GET_STATS64
18587 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
18588+#endif
18589 rx_b += bytes;
18590 rx_p += packets;
18591 rx_buf += p->rx_stats.alloc_buff_failed;
18592@@ -858,7 +950,7 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
18593 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
18594 struct i40e_hw_port_stats *nsd = &pf->stats;
18595 struct i40e_hw *hw = &pf->hw;
18596- u32 val;
18597+
18598 int i;
18599
18600 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
18601@@ -1040,41 +1132,31 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
18602 &osd->rx_jabber, &nsd->rx_jabber);
18603
18604 /* FDIR stats */
18605- i40e_stat_update32(hw,
18606- I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
18607- pf->stat_offsets_loaded,
18608- &osd->fd_atr_match, &nsd->fd_atr_match);
18609- i40e_stat_update32(hw,
18610- I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
18611- pf->stat_offsets_loaded,
18612- &osd->fd_sb_match, &nsd->fd_sb_match);
18613- i40e_stat_update32(hw,
18614- I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
18615- pf->stat_offsets_loaded,
18616- &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
18617-
18618- val = rd32(hw, I40E_PRTPM_EEE_STAT);
18619- nsd->tx_lpi_status =
18620- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
18621- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
18622- nsd->rx_lpi_status =
18623- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
18624- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
18625- i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
18626- pf->stat_offsets_loaded,
18627- &osd->tx_lpi_count, &nsd->tx_lpi_count);
18628- i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
18629- pf->stat_offsets_loaded,
18630- &osd->rx_lpi_count, &nsd->rx_lpi_count);
18631+ i40e_stat_update_and_clear32(hw,
18632+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
18633+ &nsd->fd_atr_match);
18634+ i40e_stat_update_and_clear32(hw,
18635+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
18636+ &nsd->fd_sb_match);
18637+ i40e_stat_update_and_clear32(hw,
18638+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
18639+ &nsd->fd_atr_tunnel_match);
18640+
18641+ i40e_get_phy_lpi_status(hw, nsd);
18642+ i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
18643+ &osd->tx_lpi_count, &nsd->tx_lpi_count,
18644+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
18645+ i40e_get_lpi_duration(hw, nsd,
18646+ &nsd->tx_lpi_duration, &nsd->rx_lpi_duration);
18647
18648 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
18649- !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
18650+ !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
18651 nsd->fd_sb_status = true;
18652 else
18653 nsd->fd_sb_status = false;
18654
18655 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
18656- !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
18657+ !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
18658 nsd->fd_atr_status = true;
18659 else
18660 nsd->fd_atr_status = false;
18661@@ -1098,6 +1180,25 @@ void i40e_update_stats(struct i40e_vsi *vsi)
18662 i40e_update_vsi_stats(vsi);
18663 }
18664
18665+/**
18666+ * i40e_count_filters - counts VSI mac filters
18667+ * @vsi: the VSI to be searched
18668+ *
18669+ * Returns count of mac filters
18670+ **/
18671+int i40e_count_filters(struct i40e_vsi *vsi)
18672+{
18673+ struct i40e_mac_filter *f;
18674+ struct hlist_node *h;
18675+ int bkt;
18676+ int cnt = 0;
18677+
18678+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
18679+ ++cnt;
18680+
18681+ return cnt;
18682+}
18683+
18684 /**
18685 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
18686 * @vsi: the VSI to be searched
18687@@ -1106,8 +1207,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
18688 *
18689 * Returns ptr to the filter object or NULL
18690 **/
18691-static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
18692- const u8 *macaddr, s16 vlan)
18693+struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
18694+ const u8 *macaddr, s16 vlan)
18695 {
18696 struct i40e_mac_filter *f;
18697 u64 key;
18698@@ -1142,7 +1243,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
18699
18700 key = i40e_addr_to_hkey(macaddr);
18701 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
18702- if ((ether_addr_equal(macaddr, f->macaddr)))
18703+ if (ether_addr_equal(macaddr, f->macaddr))
18704 return f;
18705 }
18706 return NULL;
18707@@ -1172,11 +1273,11 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
18708 * i40e_sync_filters_subtask.
18709 *
18710 * Thus, we can simply use a boolean value, has_vlan_filters which we
18711- * will set to true when we add a VLAN filter in i40e_add_filter. Then
18712+ * will set to true when we add a vlan filter in i40e_add_filter. Then
18713 * we have to perform the full search after deleting filters in
18714 * i40e_sync_filters_subtask, but we already have to search
18715 * filters here and can perform the check at the same time. This
18716- * results in avoiding embedding a loop for VLAN mode inside another
18717+ * results in avoiding embedding a loop for vlan mode inside another
18718 * loop over all the filters, and should maintain correctness as noted
18719 * above.
18720 */
18721@@ -1185,7 +1286,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
18722
18723 /**
18724 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
18725- * @vsi: the VSI to configure
18726+ * @vsi: the vsi to configure
18727 * @tmp_add_list: list of filters ready to be added
18728 * @tmp_del_list: list of filters ready to be deleted
18729 * @vlan_filters: the number of active VLAN filters
18730@@ -1250,7 +1351,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
18731 /* Update the remaining active filters */
18732 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
18733 /* Combine the checks for whether a filter needs to be changed
18734- * and then determine the new VLAN inside the if block, in
18735+ * and then determine the new vlan inside the if block, in
18736 * order to avoid duplicating code for adding the new filter
18737 * then deleting the old filter.
18738 */
18739@@ -1353,28 +1454,22 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
18740 return NULL;
18741
18742 /* Update the boolean indicating if we need to function in
18743- * VLAN mode.
18744+ * vlan mode.
18745 */
18746 if (vlan >= 0)
18747 vsi->has_vlan_filter = true;
18748
18749 ether_addr_copy(f->macaddr, macaddr);
18750 f->vlan = vlan;
18751- /* If we're in overflow promisc mode, set the state directly
18752- * to failed, so we don't bother to try sending the filter
18753- * to the hardware.
18754- */
18755- if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
18756- f->state = I40E_FILTER_FAILED;
18757- else
18758- f->state = I40E_FILTER_NEW;
18759+ f->state = I40E_FILTER_NEW;
18760+
18761 INIT_HLIST_NODE(&f->hlist);
18762
18763 key = i40e_addr_to_hkey(macaddr);
18764 hash_add(vsi->mac_filter_hash, &f->hlist, key);
18765
18766 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
18767- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
18768+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
18769 }
18770
18771 /* If we're asked to add a filter that has been marked for removal, it
18772@@ -1392,46 +1487,10 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
18773 }
18774
18775 /**
18776- * __i40e_del_filter - Remove a specific filter from the VSI
18777- * @vsi: VSI to remove from
18778- * @f: the filter to remove from the list
18779- *
18780- * This function should be called instead of i40e_del_filter only if you know
18781- * the exact filter you will remove already, such as via i40e_find_filter or
18782- * i40e_find_mac.
18783- *
18784- * NOTE: This function is expected to be called with mac_filter_hash_lock
18785- * being held.
18786- * ANOTHER NOTE: This function MUST be called from within the context of
18787- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
18788- * instead of list_for_each_entry().
18789- **/
18790-void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
18791-{
18792- if (!f)
18793- return;
18794-
18795- /* If the filter was never added to firmware then we can just delete it
18796- * directly and we don't want to set the status to remove or else an
18797- * admin queue command will unnecessarily fire.
18798- */
18799- if ((f->state == I40E_FILTER_FAILED) ||
18800- (f->state == I40E_FILTER_NEW)) {
18801- hash_del(&f->hlist);
18802- kfree(f);
18803- } else {
18804- f->state = I40E_FILTER_REMOVE;
18805- }
18806-
18807- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
18808- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
18809-}
18810-
18811-/**
18812- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
18813+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
18814 * @vsi: the VSI to be searched
18815 * @macaddr: the MAC address
18816- * @vlan: the VLAN
18817+ * @vlan: the vlan
18818 *
18819 * NOTE: This function is expected to be called with mac_filter_hash_lock
18820 * being held.
18821@@ -1504,8 +1563,6 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
18822 bool found = false;
18823 int bkt;
18824
18825- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
18826- "Missing mac_filter_hash_lock\n");
18827 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
18828 if (ether_addr_equal(macaddr, f->macaddr)) {
18829 __i40e_del_filter(vsi, f);
18830@@ -1543,8 +1600,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
18831 return 0;
18832 }
18833
18834- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
18835- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
18836+ if (test_bit(__I40E_DOWN, pf->state) ||
18837+ test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
18838 return -EADDRNOTAVAIL;
18839
18840 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
18841@@ -1553,16 +1610,22 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
18842 else
18843 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
18844
18845+ /* Copy the address first, so that we avoid a possible race with
18846+ * .set_rx_mode(). If we copy after changing the address in the filter
18847+ * list, we might open ourselves to a narrow race window where
18848+ * .set_rx_mode could delete our dev_addr filter and prevent traffic
18849+ * from passing.
18850+ */
18851+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
18852+
18853 spin_lock_bh(&vsi->mac_filter_hash_lock);
18854 i40e_del_mac_filter(vsi, netdev->dev_addr);
18855 i40e_add_mac_filter(vsi, addr->sa_data);
18856 spin_unlock_bh(&vsi->mac_filter_hash_lock);
18857- ether_addr_copy(netdev->dev_addr, addr->sa_data);
18858 if (vsi->type == I40E_VSI_MAIN) {
18859 i40e_status ret;
18860
18861- ret = i40e_aq_mac_address_write(&vsi->back->hw,
18862- I40E_AQC_WRITE_TYPE_LAA_WOL,
18863+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
18864 addr->sa_data, NULL);
18865 if (ret)
18866 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
18867@@ -1573,77 +1636,250 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
18868 /* schedule our worker thread which will take care of
18869 * applying the new filter changes
18870 */
18871- i40e_service_event_schedule(vsi->back);
18872+ i40e_service_event_schedule(pf);
18873 return 0;
18874 }
18875
18876 /**
18877- * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
18878- * @vsi: the VSI being setup
18879- * @ctxt: VSI context structure
18880- * @enabled_tc: Enabled TCs bitmap
18881- * @is_add: True if called before Add VSI
18882- *
18883- * Setup VSI queue mapping for enabled traffic classes.
18884+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
18885+ * @vsi: vsi structure
18886+ * @seed: RSS hash seed
18887+ * @lut: Buffer to store the lookup table entries
18888+ * @lut_size: Size of buffer to store the lookup table entries
18889 **/
18890-static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
18891- struct i40e_vsi_context *ctxt,
18892- u8 enabled_tc,
18893- bool is_add)
18894+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
18895+ u8 *lut, u16 lut_size)
18896 {
18897 struct i40e_pf *pf = vsi->back;
18898- u16 sections = 0;
18899- u8 netdev_tc = 0;
18900- u16 numtc = 0;
18901- u16 qcount;
18902- u8 offset;
18903- u16 qmap;
18904- int i;
18905- u16 num_tc_qps = 0;
18906-
18907- sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
18908- offset = 0;
18909+ struct i40e_hw *hw = &pf->hw;
18910+ int ret = 0;
18911
18912- if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
18913- /* Find numtc from enabled TC bitmap */
18914- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
18915- if (enabled_tc & BIT(i)) /* TC is enabled */
18916- numtc++;
18917- }
18918- if (!numtc) {
18919- dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
18920- numtc = 1;
18921+ if (seed) {
18922+ struct i40e_aqc_get_set_rss_key_data *seed_dw =
18923+ (struct i40e_aqc_get_set_rss_key_data *)seed;
18924+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
18925+ if (ret) {
18926+ dev_info(&pf->pdev->dev,
18927+ "Cannot set RSS key, err %s aq_err %s\n",
18928+ i40e_stat_str(hw, ret),
18929+ i40e_aq_str(hw, hw->aq.asq_last_status));
18930+ return ret;
18931 }
18932- } else {
18933- /* At least TC0 is enabled in case of non-DCB case */
18934- numtc = 1;
18935 }
18936+ if (lut) {
18937+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
18938
18939- vsi->tc_config.numtc = numtc;
18940- vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
18941- /* Number of queues per enabled TC */
18942- qcount = vsi->alloc_queue_pairs;
18943+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
18944+ if (ret) {
18945+ dev_info(&pf->pdev->dev,
18946+ "Cannot set RSS lut, err %s aq_err %s\n",
18947+ i40e_stat_str(hw, ret),
18948+ i40e_aq_str(hw, hw->aq.asq_last_status));
18949+ return ret;
18950+ }
18951+ }
18952+ return ret;
18953+}
18954
18955- num_tc_qps = qcount / numtc;
18956- num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
18957+/**
18958+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
18959+ * @vsi: VSI structure
18960+ **/
18961+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
18962+{
18963+ struct i40e_pf *pf = vsi->back;
18964+ u8 seed[I40E_HKEY_ARRAY_SIZE];
18965+ u8 *lut;
18966+ int ret;
18967
18968- /* Setup queue offset/count for all TCs for given VSI */
18969- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
18970- /* See if the given TC is enabled for the given VSI */
18971- if (vsi->tc_config.enabled_tc & BIT(i)) {
18972- /* TC is enabled */
18973- int pow, num_qps;
18974+ if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
18975+ return 0;
18976+ if (!vsi->rss_size)
18977+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
18978+ vsi->num_queue_pairs);
18979+ if (!vsi->rss_size)
18980+ return -EINVAL;
18981+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
18982+ if (!lut)
18983+ return -ENOMEM;
18984
18985- switch (vsi->type) {
18986- case I40E_VSI_MAIN:
18987- qcount = min_t(int, pf->alloc_rss_size,
18988- num_tc_qps);
18989- break;
18990- case I40E_VSI_FDIR:
18991- case I40E_VSI_SRIOV:
18992- case I40E_VSI_VMDQ2:
18993- default:
18994- qcount = num_tc_qps;
18995+ /* Use the user configured hash keys and lookup table if there is one,
18996+ * otherwise use default
18997+ */
18998+ if (vsi->rss_lut_user)
18999+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
19000+ else
19001+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
19002+ if (vsi->rss_hkey_user)
19003+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
19004+ else
19005+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
19006+ ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
19007+ kfree(lut);
19008+ return ret;
19009+}
19010+
19011+#ifdef __TC_MQPRIO_MODE_MAX
19012+/**
19013+ * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
19014+ * @vsi: the VSI being configured,
19015+ * @ctxt: VSI context structure
19016+ * @enabled_tc: number of traffic classes to enable
19017+ *
19018+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
19019+ **/
19020+static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
19021+ struct i40e_vsi_context *ctxt,
19022+ u8 enabled_tc)
19023+{
19024+ u16 qcount = 0, max_qcount, qmap, sections = 0;
19025+ int i, override_q, pow, num_qps, ret;
19026+ u8 netdev_tc = 0, offset = 0;
19027+
19028+ if (vsi->type != I40E_VSI_MAIN)
19029+ return -EINVAL;
19030+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
19031+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
19032+ vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
19033+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
19034+ num_qps = vsi->mqprio_qopt.qopt.count[0];
19035+
19036+ /* find the next higher power-of-2 of num queue pairs */
19037+ pow = ilog2(num_qps);
19038+ if (!is_power_of_2(num_qps))
19039+ pow++;
19040+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
19041+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
19042+
19043+ /* Setup queue offset/count for all TCs for given VSI */
19044+ max_qcount = vsi->mqprio_qopt.qopt.count[0];
19045+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
19046+ /* See if the given TC is enabled for the given VSI */
19047+ if (vsi->tc_config.enabled_tc & BIT(i)) {
19048+ offset = vsi->mqprio_qopt.qopt.offset[i];
19049+ qcount = vsi->mqprio_qopt.qopt.count[i];
19050+ if (qcount > max_qcount)
19051+ max_qcount = qcount;
19052+ vsi->tc_config.tc_info[i].qoffset = offset;
19053+ vsi->tc_config.tc_info[i].qcount = qcount;
19054+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
19055+ } else {
19056+ /* TC is not enabled so set the offset to
19057+ * default queue and allocate one queue
19058+ * for the given TC.
19059+ */
19060+ vsi->tc_config.tc_info[i].qoffset = 0;
19061+ vsi->tc_config.tc_info[i].qcount = 1;
19062+ vsi->tc_config.tc_info[i].netdev_tc = 0;
19063+ }
19064+ }
19065+
19066+ /* Set actual Tx/Rx queue pairs */
19067+ vsi->num_queue_pairs = offset + qcount;
19068+
19069+ /* Setup queue TC[0].qmap for given VSI context */
19070+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
19071+ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
19072+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
19073+ ctxt->info.valid_sections |= cpu_to_le16(sections);
19074+
19075+ /* Reconfigure RSS for main VSI with max queue count */
19076+ vsi->rss_size = max_qcount;
19077+ ret = i40e_vsi_config_rss(vsi);
19078+ if (ret) {
19079+ dev_info(&vsi->back->pdev->dev,
19080+ "Failed to reconfig rss for num_queues (%u)\n",
19081+ max_qcount);
19082+ return ret;
19083+ }
19084+ vsi->reconfig_rss = true;
19085+ dev_dbg(&vsi->back->pdev->dev,
19086+ "Reconfigured rss with num_queues (%u)\n", max_qcount);
19087+
19088+ /* Find queue count available for channel VSIs and starting offset
19089+ * for channel VSIs
19090+ */
19091+ override_q = vsi->mqprio_qopt.qopt.count[0];
19092+ if (override_q && override_q < vsi->num_queue_pairs) {
19093+ vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
19094+ vsi->next_base_queue = override_q;
19095+ }
19096+ return 0;
19097+}
19098+#endif
19099+
19100+/**
19101+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
19102+ * @vsi: the VSI being setup
19103+ * @ctxt: VSI context structure
19104+ * @enabled_tc: Enabled TCs bitmap
19105+ * @is_add: True if called before Add VSI
19106+ *
19107+ * Setup VSI queue mapping for enabled traffic classes.
19108+ **/
19109+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
19110+ struct i40e_vsi_context *ctxt,
19111+ u8 enabled_tc,
19112+ bool is_add)
19113+{
19114+ struct i40e_pf *pf = vsi->back;
19115+ u16 sections = 0;
19116+ u8 netdev_tc = 0;
19117+ u16 qcount = 0;
19118+ u16 numtc = 1;
19119+ u8 offset;
19120+ u16 qmap;
19121+ int i;
19122+ u16 num_tc_qps = 0;
19123+
19124+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
19125+ offset = 0;
19126+
19127+ /* Number of queues per enabled TC */
19128+ num_tc_qps = vsi->alloc_queue_pairs;
19129+ if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
19130+ /* Find numtc from enabled TC bitmap */
19131+ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
19132+ if (enabled_tc & BIT(i)) /* TC is enabled */
19133+ numtc++;
19134+ }
19135+ if (!numtc) {
19136+ dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
19137+ numtc = 1;
19138+ }
19139+ num_tc_qps = num_tc_qps / numtc;
19140+ num_tc_qps = min_t(int, num_tc_qps,
19141+ i40e_pf_get_max_q_per_tc(pf));
19142+ }
19143+
19144+ vsi->tc_config.numtc = numtc;
19145+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
19146+
19147+ /* Do not allow use more TC queue pairs than MSI-X vectors exist */
19148+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
19149+ num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
19150+
19151+ /* Setup queue offset/count for all TCs for given VSI */
19152+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
19153+ /* See if the given TC is enabled for the given VSI */
19154+ if (vsi->tc_config.enabled_tc & BIT(i)) {
19155+ int pow, num_qps;
19156+
19157+ switch (vsi->type) {
19158+ case I40E_VSI_MAIN:
19159+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
19160+ I40E_FLAG_FD_ATR_ENABLED)) ||
19161+ vsi->tc_config.enabled_tc != 1) {
19162+ qcount = min_t(int, pf->alloc_rss_size,
19163+ num_tc_qps);
19164+ break;
19165+ }
19166+ /* fall through */
19167+ case I40E_VSI_FDIR:
19168+ case I40E_VSI_SRIOV:
19169+ case I40E_VSI_VMDQ2:
19170+ default:
19171+ qcount = num_tc_qps;
19172 WARN_ON(i != 0);
19173 break;
19174 }
19175@@ -1681,6 +1917,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
19176 /* Set actual Tx/Rx queue pairs */
19177 vsi->num_queue_pairs = offset;
19178 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
19179+ /* This code helps add more queue to the VSI if we have
19180+ * more cores than RSS can support, the higher cores will
19181+ * be served by ATR or other filters.
19182+ */
19183 if (vsi->req_queue_pairs > 0)
19184 vsi->num_queue_pairs = vsi->req_queue_pairs;
19185 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
19186@@ -1739,6 +1979,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
19187 struct i40e_netdev_priv *np = netdev_priv(netdev);
19188 struct i40e_vsi *vsi = np->vsi;
19189
19190+ /* Under some circumstances, we might receive a request to delete
19191+ * our own device address from our uc list. Because we store the
19192+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
19193+ * such requests and not delete our device address from this list.
19194+ */
19195+ if (ether_addr_equal(addr, netdev->dev_addr))
19196+ return 0;
19197+
19198 i40e_del_mac_filter(vsi, addr);
19199
19200 return 0;
19201@@ -1763,7 +2011,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
19202 /* check for other flag changes */
19203 if (vsi->current_netdev_flags != vsi->netdev->flags) {
19204 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
19205- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
19206+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
19207 }
19208
19209 /* schedule our worker thread which will take care of
19210@@ -1774,7 +2022,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
19211
19212 /**
19213 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
19214- * @vsi: Pointer to VSI struct
19215+ * @vsi: Pointer to vsi struct
19216 * @from: Pointer to list which contains MAC filter entries - changes to
19217 * those entries needs to be undone.
19218 *
19219@@ -1840,7 +2088,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
19220 * from firmware
19221 * @count: Number of filters added
19222 * @add_list: return data from fw
19223- * @head: pointer to first filter in current batch
19224+ * @add_head: pointer to first filter in current batch
19225 *
19226 * MAC filter entries from list were slated to be added to device. Returns
19227 * number of successful filters. Note that 0 does NOT mean success!
19228@@ -1917,17 +2165,16 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
19229 * @list: the list of filters to send to firmware
19230 * @add_head: Position in the add hlist
19231 * @num_add: the number of filters to add
19232- * @promisc_change: set to true on exit if promiscuous mode was forced on
19233 *
19234 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
19235- * promisc_changed to true if the firmware has run out of space for more
19236- * filters.
19237+ * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
19238+ * space for more filters.
19239 */
19240 static
19241 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
19242 struct i40e_aqc_add_macvlan_element_data *list,
19243 struct i40e_new_mac_filter *add_head,
19244- int num_add, bool *promisc_changed)
19245+ int num_add)
19246 {
19247 struct i40e_hw *hw = &vsi->back->hw;
19248 int aq_err, fcnt;
19249@@ -1937,18 +2184,29 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
19250 fcnt = i40e_update_filter_state(num_add, list, add_head);
19251
19252 if (fcnt != num_add) {
19253- *promisc_changed = true;
19254- set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19255- dev_warn(&vsi->back->pdev->dev,
19256- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
19257- i40e_aq_str(hw, aq_err),
19258- vsi_name);
19259+ if (vsi->type == I40E_VSI_MAIN) {
19260+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19261+ dev_warn(&vsi->back->pdev->dev,
19262+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
19263+ i40e_aq_str(hw, aq_err), vsi_name);
19264+ } else if (vsi->type == I40E_VSI_SRIOV ||
19265+ vsi->type == I40E_VSI_VMDQ1 ||
19266+ vsi->type == I40E_VSI_VMDQ2) {
19267+ dev_warn(&vsi->back->pdev->dev,
19268+ "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
19269+ i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
19270+ } else {
19271+ dev_warn(&vsi->back->pdev->dev,
19272+ "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
19273+ i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
19274+ }
19275 }
19276 }
19277
19278 /**
19279 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
19280 * @vsi: pointer to the VSI
19281+ * @vsi_name: the VSI name
19282 * @f: filter data
19283 *
19284 * This function sets or clears the promiscuous broadcast flags for VLAN
19285@@ -1978,11 +2236,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
19286 NULL);
19287 }
19288
19289- if (aq_ret)
19290+ if (aq_ret) {
19291+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19292 dev_warn(&vsi->back->pdev->dev,
19293- "Error %s setting broadcast promiscuous mode on %s\n",
19294+ "Error %s, forcing overflow promiscuous on %s\n",
19295 i40e_aq_str(hw, hw->aq.asq_last_status),
19296 vsi_name);
19297+ }
19298
19299 return aq_ret;
19300 }
19301@@ -1994,7 +2254,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
19302 *
19303 * There are different ways of setting promiscuous mode on a PF depending on
19304 * what state/environment we're in. This identifies and sets it appropriately.
19305- * Returns 0 on success.
19306+ * Returns I40E_SUCCESS on success.
19307 **/
19308 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
19309 {
19310@@ -2054,6 +2314,56 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
19311 return aq_ret;
19312 }
19313
19314+ /**
19315+ * i40e_set_switch_mode - sets up switch mode correctly
19316+ * @pf: working PF
19317+ * @l4type: TCP, UDP, or both
19318+ *
19319+ * Sets up switch mode correctly
19320+ **/
19321+static void i40e_set_switch_mode(struct i40e_pf *pf, u8 l4type)
19322+{
19323+ struct i40e_hw *hw;
19324+ u8 mode;
19325+ int ret;
19326+
19327+ if (!pf)
19328+ return;
19329+
19330+ hw = &pf->hw;
19331+
19332+ /* Set Bit 7 to be valid */
19333+ mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
19334+
19335+ /* We only support destination port filters, so don't set the
19336+ * source port bit here.
19337+ */
19338+ if (l4type > I40E_AQ_SET_SWITCH_L4_TYPE_BOTH ||
19339+ l4type < I40E_AQ_SET_SWITCH_L4_TYPE_TCP) {
19340+ dev_warn(&pf->pdev->dev,
19341+ "invalid L4 type 0x%x, unable to set switch mode\n",
19342+ l4type);
19343+ return;
19344+ }
19345+
19346+ mode |= l4type;
19347+
19348+ /* Set cloud filter mode */
19349+ mode |= I40E_AQ_SET_SWITCH_MODE_L4_PORT;
19350+
19351+ dev_dbg(&pf->pdev->dev, "setting switch mode to 0x%x\n", mode);
19352+ /* Prep mode field for set_switch_config */
19353+ ret = i40e_aq_set_switch_config(hw, 0, 0, mode, NULL);
19354+ /* If the driver is reloaded, the AQ call will fail. So don't make a
19355+ * big deal about it.
19356+ */
19357+ if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
19358+ dev_dbg(&pf->pdev->dev,
19359+ "couldn't set switch config bits, err %s aq_err %s\n",
19360+ i40e_stat_str(hw, ret),
19361+ i40e_aq_str(hw, hw->aq.asq_last_status));
19362+}
19363+
19364 /**
19365 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
19366 * @vsi: ptr to the VSI
19367@@ -2068,9 +2378,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19368 struct i40e_mac_filter *f;
19369 struct i40e_new_mac_filter *new, *add_head = NULL;
19370 struct i40e_hw *hw = &vsi->back->hw;
19371+ bool old_overflow, new_overflow;
19372 unsigned int failed_filters = 0;
19373 unsigned int vlan_filters = 0;
19374- bool promisc_changed = false;
19375 char vsi_name[16] = "PF";
19376 int filter_list_len = 0;
19377 i40e_status aq_ret = 0;
19378@@ -2092,6 +2402,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19379 usleep_range(1000, 2000);
19380 pf = vsi->back;
19381
19382+ old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19383+
19384 if (vsi->netdev) {
19385 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
19386 vsi->current_netdev_flags = vsi->netdev->flags;
19387@@ -2182,7 +2494,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19388 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
19389 } else {
19390 del_list[num_del].vlan_tag =
19391- cpu_to_le16((u16)(f->vlan));
19392+ CPU_TO_LE16((u16)(f->vlan));
19393 }
19394
19395 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
19396@@ -2224,12 +2536,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19397
19398 num_add = 0;
19399 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
19400- if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
19401- vsi->state)) {
19402- new->state = I40E_FILTER_FAILED;
19403- continue;
19404- }
19405-
19406 /* handle broadcast filters by updating the broadcast
19407 * promiscuous flag instead of adding a MAC filter.
19408 */
19409@@ -2253,27 +2559,26 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19410 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
19411 } else {
19412 add_list[num_add].vlan_tag =
19413- cpu_to_le16((u16)(new->f->vlan));
19414+ CPU_TO_LE16((u16)(new->f->vlan));
19415 }
19416 add_list[num_add].queue_number = 0;
19417 /* set invalid match method for later detection */
19418 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
19419 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
19420- add_list[num_add].flags = cpu_to_le16(cmd_flags);
19421+ add_list[num_add].flags = CPU_TO_LE16(cmd_flags);
19422 num_add++;
19423
19424 /* flush a full buffer */
19425 if (num_add == filter_list_len) {
19426 i40e_aqc_add_filters(vsi, vsi_name, add_list,
19427- add_head, num_add,
19428- &promisc_changed);
19429+ add_head, num_add);
19430 memset(add_list, 0, list_size);
19431 num_add = 0;
19432 }
19433 }
19434 if (num_add) {
19435 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
19436- num_add, &promisc_changed);
19437+ num_add);
19438 }
19439 /* Now move all of the filters from the temp add list back to
19440 * the VSI's list.
19441@@ -2302,33 +2607,32 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19442 }
19443 spin_unlock_bh(&vsi->mac_filter_hash_lock);
19444
19445- /* If promiscuous mode has changed, we need to calculate a new
19446- * threshold for when we are safe to exit
19447- */
19448- if (promisc_changed)
19449- vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
19450-
19451 /* Check if we are able to exit overflow promiscuous mode. We can
19452 * safely exit if we didn't just enter, we no longer have any failed
19453 * filters, and we have reduced filters below the threshold value.
19454 */
19455- if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
19456- !promisc_changed && !failed_filters &&
19457- (vsi->active_filters < vsi->promisc_threshold)) {
19458+ if (old_overflow && !failed_filters &&
19459+ vsi->active_filters < vsi->promisc_threshold) {
19460 dev_info(&pf->pdev->dev,
19461 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
19462 vsi_name);
19463 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19464- promisc_changed = true;
19465 vsi->promisc_threshold = 0;
19466 }
19467-
19468 /* if the VF is not trusted do not do promisc */
19469 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
19470 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19471 goto out;
19472 }
19473
19474+ new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
19475+
19476+ /* If we are entering overflow promiscuous, we need to calculate a new
19477+ * threshold for when we are safe to exit
19478+ */
19479+ if (!old_overflow && new_overflow)
19480+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
19481+
19482 /* check for changes in promiscuous modes */
19483 if (changed_flags & IFF_ALLMULTI) {
19484 bool cur_multipromisc;
19485@@ -2346,15 +2650,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19486 vsi_name,
19487 i40e_stat_str(hw, aq_ret),
19488 i40e_aq_str(hw, hw->aq.asq_last_status));
19489+ } else {
19490+ dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
19491+ vsi->netdev->name,
19492+ cur_multipromisc ? "entering" : "leaving");
19493 }
19494 }
19495
19496- if ((changed_flags & IFF_PROMISC) || promisc_changed) {
19497+ if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
19498 bool cur_promisc;
19499
19500 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
19501- test_bit(__I40E_VSI_OVERFLOW_PROMISC,
19502- vsi->state));
19503+ new_overflow);
19504 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
19505 if (aq_ret) {
19506 retval = i40e_aq_rc_to_posix(aq_ret,
19507@@ -2366,6 +2673,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
19508 i40e_stat_str(hw, aq_ret),
19509 i40e_aq_str(hw, hw->aq.asq_last_status));
19510 }
19511+
19512 }
19513 out:
19514 /* if something went wrong then set the changed flag so we try again */
19515@@ -2396,18 +2704,21 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
19516 {
19517 int v;
19518
19519- if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
19520+ if (!pf)
19521+ return;
19522+
19523+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
19524 return;
19525- pf->flags &= ~I40E_FLAG_FILTER_SYNC;
19526
19527 for (v = 0; v < pf->num_alloc_vsi; v++) {
19528 if (pf->vsi[v] &&
19529- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
19530+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
19531+ !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
19532 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
19533-
19534 if (ret) {
19535 /* come back and try again later */
19536- pf->flags |= I40E_FLAG_FILTER_SYNC;
19537+ set_bit(__I40E_MACVLAN_SYNC_PENDING,
19538+ pf->state);
19539 break;
19540 }
19541 }
19542@@ -2436,26 +2747,52 @@ static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
19543 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
19544 {
19545 struct i40e_netdev_priv *np = netdev_priv(netdev);
19546+ int max_frame = new_mtu + I40E_PACKET_HDR_PAD;
19547 struct i40e_vsi *vsi = np->vsi;
19548 struct i40e_pf *pf = vsi->back;
19549
19550- if (i40e_enabled_xdp_vsi(vsi)) {
19551- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
19552+ /* MTU < 68 is an error and causes problems on some kernels */
19553+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
19554+ return -EINVAL;
19555
19556- if (frame_size > i40e_max_xdp_frame_size(vsi))
19557+ if (i40e_enabled_xdp_vsi(vsi)) {
19558+ if (max_frame > i40e_max_xdp_frame_size(vsi))
19559 return -EINVAL;
19560 }
19561
19562+#ifndef HAVE_NDO_FEATURES_CHECK
19563+
19564+ /* MTU < 576 causes problems with TSO */
19565+ if (new_mtu < 576) {
19566+ netdev->features &= ~NETIF_F_TSO;
19567+ netdev->features &= ~NETIF_F_TSO6;
19568+#ifdef HAVE_NDO_SET_FEATURES
19569+ } else {
19570+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
19571+ if (netdev->wanted_features & NETIF_F_TSO)
19572+ netdev->features |= NETIF_F_TSO;
19573+ if (netdev->wanted_features & NETIF_F_TSO6)
19574+ netdev->features |= NETIF_F_TSO6;
19575+#else
19576+ if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO)
19577+ netdev->features |= NETIF_F_TSO;
19578+ if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO6)
19579+ netdev->features |= NETIF_F_TSO6;
19580+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
19581+#endif /* HAVE_NDO_SET_FEATURES */
19582+ }
19583+#endif /* ! HAVE_NDO_FEATURES_CHECK */
19584 netdev_info(netdev, "changing MTU from %d to %d\n",
19585 netdev->mtu, new_mtu);
19586 netdev->mtu = new_mtu;
19587 if (netif_running(netdev))
19588 i40e_vsi_reinit_locked(vsi);
19589- pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
19590- I40E_FLAG_CLIENT_L2_CHANGE);
19591+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
19592+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
19593 return 0;
19594 }
19595
19596+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
19597 /**
19598 * i40e_ioctl - Access the hwtstamp interface
19599 * @netdev: network interface device structure
19600@@ -2464,19 +2801,34 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
19601 **/
19602 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
19603 {
19604+#ifdef HAVE_PTP_1588_CLOCK
19605 struct i40e_netdev_priv *np = netdev_priv(netdev);
19606 struct i40e_pf *pf = np->vsi->back;
19607
19608+#endif /* HAVE_PTP_1588_CLOCK */
19609 switch (cmd) {
19610+#ifdef HAVE_PTP_1588_CLOCK
19611+#ifdef SIOCGHWTSTAMP
19612 case SIOCGHWTSTAMP:
19613 return i40e_ptp_get_ts_config(pf, ifr);
19614+#endif
19615 case SIOCSHWTSTAMP:
19616+ if (!capable(CAP_SYS_ADMIN))
19617+ return -EACCES;
19618 return i40e_ptp_set_ts_config(pf, ifr);
19619+ case SIOCSPINS:
19620+ if (!capable(CAP_SYS_ADMIN))
19621+ return -EACCES;
19622+ return i40e_ptp_set_pins_ioctl(pf, ifr);
19623+ case SIOCGPINS:
19624+ return i40e_ptp_get_pins(pf, ifr);
19625+#endif /* HAVE_PTP_1588_CLOCK */
19626 default:
19627 return -EOPNOTSUPP;
19628 }
19629 }
19630
19631+#endif
19632 /**
19633 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
19634 * @vsi: the vsi being adjusted
19635@@ -2486,6 +2838,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
19636 struct i40e_vsi_context ctxt;
19637 i40e_status ret;
19638
19639+ /* Don't modify stripping options if a port vlan is active */
19640+ if (vsi->info.pvid)
19641+ return;
19642+
19643 if ((vsi->info.valid_sections &
19644 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
19645 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
19646@@ -2516,6 +2872,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
19647 struct i40e_vsi_context ctxt;
19648 i40e_status ret;
19649
19650+ /* Don't modify stripping options if a port vlan is active */
19651+ if (vsi->info.pvid)
19652+ return;
19653+
19654 if ((vsi->info.valid_sections &
19655 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
19656 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
19657@@ -2534,25 +2894,31 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
19658 "update vlan stripping failed, err %s aq_err %s\n",
19659 i40e_stat_str(&vsi->back->hw, ret),
19660 i40e_aq_str(&vsi->back->hw,
19661- vsi->back->hw.aq.asq_last_status));
19662+ vsi->back->hw.aq.asq_last_status));
19663 }
19664 }
19665
19666+#ifdef HAVE_VLAN_RX_REGISTER
19667 /**
19668 * i40e_vlan_rx_register - Setup or shutdown vlan offload
19669 * @netdev: network interface to be adjusted
19670- * @features: netdev features to test if VLAN offload is enabled or not
19671+ * @grp: new vlan group list, NULL if disabling
19672 **/
19673-static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
19674+static void i40e_vlan_rx_register(struct net_device *netdev,
19675+ struct vlan_group *grp)
19676 {
19677 struct i40e_netdev_priv *np = netdev_priv(netdev);
19678 struct i40e_vsi *vsi = np->vsi;
19679+ bool enable;
19680
19681- if (features & NETIF_F_HW_VLAN_CTAG_RX)
19682+ vsi->vlgrp = grp;
19683+ enable = (grp || (vsi->back->flags & I40E_FLAG_DCB_ENABLED));
19684+ if (enable)
19685 i40e_vlan_stripping_enable(vsi);
19686 else
19687 i40e_vlan_stripping_disable(vsi);
19688 }
19689+#endif /* HAVE_VLAN_RX_REGISTER */
19690
19691 /**
19692 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
19693@@ -2562,7 +2928,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
19694 * This is a helper function for adding a new MAC/VLAN filter with the
19695 * specified VLAN for each existing MAC address already in the hash table.
19696 * This function does *not* perform any accounting to update filters based on
19697- * VLAN mode.
19698+ * vlan mode.
19699 *
19700 * NOTE: this function expects to be called while under the
19701 * mac_filter_hash_lock
19702@@ -2589,17 +2955,14 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
19703 }
19704
19705 /**
19706- * i40e_vsi_add_vlan - Add VSI membership for given VLAN
19707- * @vsi: the VSI being configured
19708- * @vid: VLAN id to be added
19709+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
19710+ * @vsi: the vsi being configured
19711+ * @vid: vlan id to be added
19712 **/
19713 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
19714 {
19715 int err;
19716
19717- if (vsi->info.pvid)
19718- return -EINVAL;
19719-
19720 /* The network stack will attempt to add VID=0, with the intention to
19721 * receive priority tagged packets with a VLAN of 0. Our HW receives
19722 * these packets by default when configured to receive untagged
19723@@ -2651,9 +3014,9 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
19724 }
19725
19726 /**
19727- * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
19728- * @vsi: the VSI being configured
19729- * @vid: VLAN id to be removed
19730+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
19731+ * @vsi: the vsi being configured
19732+ * @vid: vlan id to be removed
19733 **/
19734 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
19735 {
19736@@ -2673,36 +3036,126 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
19737 /**
19738 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
19739 * @netdev: network interface to be adjusted
19740+ * @proto: unused protocol value
19741 * @vid: vlan id to be added
19742 *
19743 * net_device_ops implementation for adding vlan ids
19744 **/
19745+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
19746+#ifdef NETIF_F_HW_VLAN_CTAG_RX
19747 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
19748 __always_unused __be16 proto, u16 vid)
19749+#else
19750+static int i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
19751+#endif
19752+#else
19753+static void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
19754+#endif
19755 {
19756 struct i40e_netdev_priv *np = netdev_priv(netdev);
19757 struct i40e_vsi *vsi = np->vsi;
19758 int ret = 0;
19759
19760 if (vid >= VLAN_N_VID)
19761+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
19762 return -EINVAL;
19763+#else
19764+ return;
19765+#endif
19766
19767 ret = i40e_vsi_add_vlan(vsi, vid);
19768+#ifndef HAVE_VLAN_RX_REGISTER
19769 if (!ret)
19770 set_bit(vid, vsi->active_vlans);
19771+#endif /* !HAVE_VLAN_RX_REGISTER */
19772+#ifndef HAVE_NETDEV_VLAN_FEATURES
19773+
19774+ /* Copy feature flags from netdev to the vlan netdev for this vid.
19775+ * This allows things like TSO to bubble down to our vlan device.
19776+ * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
19777+ * we will not have a netdev that needs updating.
19778+ */
19779+ if (vsi->vlgrp) {
19780+ struct vlan_group *vlgrp = vsi->vlgrp;
19781+ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
19782+ if (v_netdev) {
19783+ v_netdev->features |= netdev->features;
19784+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
19785+ v_netdev->enc_features |= netdev->enc_features;
19786+#endif
19787+ vlan_group_set_device(vlgrp, vid, v_netdev);
19788+ }
19789+ }
19790+#endif /* HAVE_NETDEV_VLAN_FEATURES */
19791
19792+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
19793 return ret;
19794+#endif
19795+}
19796+
19797+/**
19798+ * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
19799+ * @netdev: network interface to be adjusted
19800+ * @proto: unused protocol value
19801+ * @vid: vlan id to be added
19802+ **/
19803+#ifdef NETIF_F_HW_VLAN_CTAG_RX
19804+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
19805+ __always_unused __be16 proto, u16 vid)
19806+#else
19807+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, u16 vid)
19808+#endif
19809+{
19810+#if (!defined(HAVE_NETDEV_VLAN_FEATURES) || !defined(HAVE_VLAN_RX_REGISTER))
19811+ struct i40e_netdev_priv *np = netdev_priv(netdev);
19812+ struct i40e_vsi *vsi = np->vsi;
19813+#endif
19814+
19815+ if (vid >= VLAN_N_VID)
19816+ return;
19817+#ifndef HAVE_VLAN_RX_REGISTER
19818+ set_bit(vid, vsi->active_vlans);
19819+#endif /* !HAVE_VLAN_RX_REGISTER */
19820+#ifndef HAVE_NETDEV_VLAN_FEATURES
19821+
19822+ /* Copy feature flags from netdev to the vlan netdev for this vid.
19823+ * This allows things like TSO to bubble down to our vlan device.
19824+ * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
19825+ * we will not have a netdev that needs updating.
19826+ */
19827+ if (vsi->vlgrp) {
19828+ struct vlan_group *vlgrp = vsi->vlgrp;
19829+ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
19830+
19831+ if (v_netdev) {
19832+ v_netdev->features |= netdev->features;
19833+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
19834+ v_netdev->enc_features |= netdev->enc_features;
19835+#endif
19836+ vlan_group_set_device(vlgrp, vid, v_netdev);
19837+ }
19838+ }
19839+#endif /* HAVE_NETDEV_VLAN_FEATURES */
19840 }
19841
19842 /**
19843 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
19844 * @netdev: network interface to be adjusted
19845+ * @proto: unused protocol value
19846 * @vid: vlan id to be removed
19847 *
19848 * net_device_ops implementation for removing vlan ids
19849 **/
19850+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
19851+#ifdef NETIF_F_HW_VLAN_CTAG_RX
19852 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
19853 __always_unused __be16 proto, u16 vid)
19854+#else
19855+static int i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
19856+#endif
19857+#else
19858+static void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
19859+#endif
19860 {
19861 struct i40e_netdev_priv *np = netdev_priv(netdev);
19862 struct i40e_vsi *vsi = np->vsi;
19863@@ -2712,10 +3165,14 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
19864 * already printed from the other function
19865 */
19866 i40e_vsi_kill_vlan(vsi, vid);
19867+#ifndef HAVE_VLAN_RX_REGISTER
19868
19869 clear_bit(vid, vsi->active_vlans);
19870+#endif /* HAVE_VLAN_RX_REGISTER */
19871+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
19872
19873 return 0;
19874+#endif
19875 }
19876
19877 /**
19878@@ -2729,11 +3186,40 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
19879 if (!vsi->netdev)
19880 return;
19881
19882- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
19883+#ifdef HAVE_VLAN_RX_REGISTER
19884+ i40e_vlan_rx_register(vsi->netdev, vsi->vlgrp);
19885+
19886+ if (vsi->vlgrp) {
19887+ for (vid = 0; vid < VLAN_N_VID; vid++) {
19888+ if (!vlan_group_get_device(vsi->vlgrp, vid))
19889+ continue;
19890+#ifdef NETIF_F_HW_VLAN_CTAG_RX
19891+ i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
19892+ vid);
19893+#else
19894+ i40e_vlan_rx_add_vid_up(vsi->netdev, vid);
19895+#endif
19896+ }
19897+ }
19898+#else /* HAVE_VLAN_RX_REGISTER */
19899+#ifdef NETIF_F_HW_VLAN_CTAG_RX
19900+ if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
19901+ i40e_vlan_stripping_enable(vsi);
19902+#else
19903+ if (vsi->netdev->features & NETIF_F_HW_VLAN_RX)
19904+ i40e_vlan_stripping_enable(vsi);
19905+#endif
19906+ else
19907+ i40e_vlan_stripping_disable(vsi);
19908
19909 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
19910- i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
19911- vid);
19912+#ifdef NETIF_F_HW_VLAN_CTAG_RX
19913+ i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
19914+ vid);
19915+#else
19916+ i40e_vlan_rx_add_vid_up(vsi->netdev, vid);
19917+#endif
19918+#endif
19919 }
19920
19921 /**
19922@@ -2760,7 +3246,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
19923 "add pvid failed, err %s aq_err %s\n",
19924 i40e_stat_str(&vsi->back->hw, ret),
19925 i40e_aq_str(&vsi->back->hw,
19926- vsi->back->hw.aq.asq_last_status));
19927+ vsi->back->hw.aq.asq_last_status));
19928 return -ENOENT;
19929 }
19930
19931@@ -2775,9 +3261,47 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
19932 **/
19933 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
19934 {
19935+ vsi->info.pvid = 0;
19936+
19937 i40e_vlan_stripping_disable(vsi);
19938+}
19939+/**
19940+ * i40e_get_cloud_filter_type - Get cloud filter type
19941+ * @flags: set of enabled fields
19942+ * @type: location to return type
19943+ *
19944+ * Given the set of flags indicating which fields are active, look up the type
19945+ * number for programming the cloud filter in firmware. If the flags are
19946+ * invalid, return I40E_ERR_CONFIG. @type may be NULL, in which case the
19947+ * function may be used to verify that the flags would produce a valid type.
19948+ **/
19949+int i40e_get_cloud_filter_type(u8 flags, u16 *type)
19950+{
19951+ static const u16 table[128] = {
19952+ [I40E_CLOUD_FILTER_FLAGS_OMAC] =
19953+ I40E_AQC_ADD_CLOUD_FILTER_OMAC,
19954+ [I40E_CLOUD_FILTER_FLAGS_IMAC] =
19955+ I40E_AQC_ADD_CLOUD_FILTER_IMAC,
19956+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
19957+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
19958+ [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
19959+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
19960+ [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
19961+ I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
19962+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
19963+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
19964+ [I40E_CLOUD_FILTER_FLAGS_IIP] =
19965+ I40E_AQC_ADD_CLOUD_FILTER_IIP,
19966+ };
19967+
19968+ if (flags >= ARRAY_SIZE(table) || table[flags] == 0)
19969+ return I40E_ERR_CONFIG;
19970+
19971+ /* Return type if we're given space to do so */
19972+ if (type)
19973+ *type = table[flags];
19974
19975- vsi->info.pvid = 0;
19976+ return 0;
19977 }
19978
19979 /**
19980@@ -2790,7 +3314,7 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
19981 *
19982 * Return 0 on success, negative on failure
19983 **/
19984-static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
19985+int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
19986 {
19987 int i, err = 0;
19988
19989@@ -2802,7 +3326,6 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
19990
19991 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
19992 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
19993-
19994 return err;
19995 }
19996
19997@@ -2839,7 +3362,7 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
19998 *
19999 * Return 0 on success, negative on failure
20000 **/
20001-static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
20002+int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
20003 {
20004 int i, err = 0;
20005
20006@@ -2875,22 +3398,53 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
20007 **/
20008 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
20009 {
20010+#ifndef HAVE_XPS_QOS_SUPPORT
20011 struct i40e_vsi *vsi = ring->vsi;
20012+#endif
20013+ int cpu;
20014
20015- if (!ring->q_vector || !ring->netdev)
20016+ if (!ring->q_vector || !ring->netdev || ring->ch)
20017 return;
20018
20019- if ((vsi->tc_config.numtc <= 1) &&
20020- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
20021- netif_set_xps_queue(ring->netdev,
20022- get_cpu_mask(ring->q_vector->v_idx),
20023- ring->queue_index);
20024- }
20025+#ifndef HAVE_XPS_QOS_SUPPORT
20026+ /* Some older kernels do not support XPS with QoS */
20027+ if (vsi->tc_config.numtc > 1) {
20028+#ifndef HAVE_NETDEV_TC_RESETS_XPS
20029+ /* Additionally, some kernels do not properly clear the XPS
20030+ * mapping when the number of traffic classes is changed. In
20031+ * order to support these kernels we work around this by
20032+ * setting the XPS mapping to the empty cpu set.
20033+ */
20034+ cpumask_var_t mask;
20035
20036- /* schedule our worker thread which will take care of
20037- * applying the new filter changes
20038- */
20039- i40e_service_event_schedule(vsi->back);
20040+ /* Only clear the settings if we initialized XPS */
20041+ if (!test_and_clear_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
20042+ return;
20043+
20044+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
20045+ return;
20046+
20047+ netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
20048+ free_cpumask_var(mask);
20049+#endif /* !HAVE_NETDEV_TC_RESETS_XPS */
20050+ return;
20051+ }
20052+
20053+#endif /* !HAVE_XPS_QOS_SUPPORT */
20054+ /* We only initialize XPS once, so as not to overwrite user settings */
20055+ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
20056+ return;
20057+
20058+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
20059+#ifndef HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
20060+ /* In kernels before 3.12 the second parameter has no const qualifier.
20061+ * It is generating warning in older kernels.
20062+ */
20063+ netif_set_xps_queue(ring->netdev, (struct cpumask *)get_cpu_mask(cpu),
20064+ ring->queue_index);
20065+#else /* !HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK */
20066+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index);
20067+#endif /* !HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK */
20068 }
20069
20070 /**
20071@@ -2915,7 +3469,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
20072 } else {
20073 ring->atr_sample_rate = 0;
20074 }
20075-
20076 /* configure XPS */
20077 i40e_config_xps_tx_ring(ring);
20078
20079@@ -2927,7 +3480,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
20080 tx_ctx.qlen = ring->count;
20081 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
20082 I40E_FLAG_FD_ATR_ENABLED));
20083+#ifdef HAVE_PTP_1588_CLOCK
20084 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
20085+#endif /* HAVE_PTP_1588_CLOCK */
20086 /* FDIR VSI tx ring can still use RS bit and writebacks */
20087 if (vsi->type != I40E_VSI_FDIR)
20088 tx_ctx.head_wb_ena = 1;
20089@@ -2944,7 +3499,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
20090 * initialization. This has to be done regardless of
20091 * DCB as by default everything is mapped to TC0.
20092 */
20093- tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
20094+
20095+ if (ring->ch)
20096+ tx_ctx.rdylist =
20097+ le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
20098+
20099+ else
20100+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
20101+
20102 tx_ctx.rdylist_act = 0;
20103
20104 /* clear the context in the HMC */
20105@@ -2966,12 +3528,23 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
20106 }
20107
20108 /* Now associate this queue with this PCI function */
20109- if (vsi->type == I40E_VSI_VMDQ2) {
20110- qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
20111- qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
20112- I40E_QTX_CTL_VFVM_INDX_MASK;
20113+ if (ring->ch) {
20114+ if (ring->ch->type == I40E_VSI_VMDQ2)
20115+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
20116+ else
20117+ return -EINVAL;
20118+
20119+ qtx_ctl |= (ring->ch->vsi_number <<
20120+ I40E_QTX_CTL_VFVM_INDX_SHIFT) &
20121+ I40E_QTX_CTL_VFVM_INDX_MASK;
20122 } else {
20123- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
20124+ if (vsi->type == I40E_VSI_VMDQ2) {
20125+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
20126+ qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
20127+ I40E_QTX_CTL_VFVM_INDX_MASK;
20128+ } else {
20129+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
20130+ }
20131 }
20132
20133 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
20134@@ -3000,7 +3573,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
20135 struct i40e_hmc_obj_rxq rx_ctx;
20136 i40e_status err = 0;
20137
20138- ring->state = 0;
20139+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
20140
20141 /* clear the context structure first */
20142 memset(&rx_ctx, 0, sizeof(rx_ctx));
20143@@ -3014,7 +3587,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
20144 rx_ctx.qlen = ring->count;
20145
20146 /* use 32 byte descriptors */
20147+#ifdef I40E_32BYTE_RX
20148 rx_ctx.dsize = 1;
20149+#else
20150+ /* use 16 byte descriptors */
20151+ rx_ctx.dsize = 0;
20152+#endif
20153
20154 /* descriptor type is always zero
20155 * rx_ctx.dtype = 0;
20156@@ -3022,10 +3600,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
20157 rx_ctx.hsplit_0 = 0;
20158
20159 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
20160- if (hw->revision_id == 0)
20161- rx_ctx.lrxqthresh = 0;
20162- else
20163- rx_ctx.lrxqthresh = 2;
20164+ rx_ctx.lrxqthresh = 1;
20165 rx_ctx.crcstrip = 1;
20166 rx_ctx.l2tsel = 1;
20167 /* this controls whether VLAN is stripped from inner headers */
20168@@ -3099,6 +3674,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
20169 {
20170 int err = 0;
20171 u16 i;
20172+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
20173+ u16 max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
20174+
20175+ if (!vsi->netdev)
20176+ max_frame = I40E_RXBUFFER_2048;
20177+ else if (vsi->netdev->mtu + I40E_PACKET_HDR_PAD > max_frame)
20178+ max_frame = vsi->netdev->mtu + I40E_PACKET_HDR_PAD;
20179+
20180+ vsi->max_frame = max_frame;
20181+ vsi->rx_buf_len = max_frame;
20182+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
20183
20184 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
20185 vsi->max_frame = I40E_MAX_RXBUFFER;
20186@@ -3114,6 +3700,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
20187 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
20188 I40E_RXBUFFER_2048;
20189 }
20190+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
20191
20192 /* set up individual rings */
20193 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
20194@@ -3140,6 +3727,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
20195 rx_ring->dcb_tc = 0;
20196 tx_ring->dcb_tc = 0;
20197 }
20198+ return;
20199 }
20200
20201 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
20202@@ -3165,6 +3753,7 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
20203 {
20204 if (vsi->netdev)
20205 i40e_set_rx_mode(vsi->netdev);
20206+
20207 }
20208
20209 /**
20210@@ -3183,7 +3772,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
20211 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
20212 return;
20213
20214- /* Reset FDir counters as we're replaying all existing filters */
20215+ /* reset FDIR counters as we're replaying all existing filters */
20216 pf->fd_tcp4_filter_cnt = 0;
20217 pf->fd_udp4_filter_cnt = 0;
20218 pf->fd_sctp4_filter_cnt = 0;
20219@@ -3195,6 +3784,209 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
20220 }
20221 }
20222
20223+/**
20224+ * i40e_cloud_filter_restore - Restore the switch's cloud filters
20225+ * @pf: Pointer to the targeted VSI
20226+ *
20227+ * This function replays the cloud filter hlist into the hw switch
20228+ **/
20229+static void i40e_cloud_filter_restore(struct i40e_pf *pf)
20230+{
20231+ struct i40e_cloud_filter *filter;
20232+ struct hlist_node *node;
20233+
20234+ hlist_for_each_entry_safe(filter, node,
20235+ &pf->cloud_filter_list, cloud_node) {
20236+ i40e_add_del_cloud_filter_ex(pf, filter, true);
20237+ }
20238+}
20239+
20240+/**
20241+ * i40e_set_cld_element - sets cloud filter element data
20242+ * @filter: cloud filter rule
20243+ * @cld: ptr to cloud filter element data
20244+ *
20245+ * This is helper function to copy data into cloud filter element
20246+ **/
20247+static inline void
20248+i40e_set_cld_element(struct i40e_cloud_filter *filter,
20249+ struct i40e_aqc_cloud_filters_element_data *cld)
20250+{
20251+ int i, j;
20252+ u32 ipa;
20253+
20254+ memset(cld, 0, sizeof(*cld));
20255+ ether_addr_copy(cld->outer_mac, filter->dst_mac);
20256+ ether_addr_copy(cld->inner_mac, filter->src_mac);
20257+
20258+ if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) {
20259+ /* copy parameters from filter to cloud filters element
20260+ * which are not specific to IP protos
20261+ */
20262+ ether_addr_copy(cld->outer_mac, filter->outer_mac);
20263+ ether_addr_copy(cld->inner_mac, filter->inner_mac);
20264+ cld->inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan));
20265+ cld->tenant_id = cpu_to_le32(filter->tenant_id);
20266+ return;
20267+ }
20268+
20269+ if (filter->n_proto == ETH_P_IPV6) {
20270+#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
20271+ for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
20272+ i++, j += 2) {
20273+ ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
20274+ memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
20275+ }
20276+ } else {
20277+ ipa = be32_to_cpu(filter->dst_ipv4);
20278+ memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
20279+ }
20280+
20281+ cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
20282+
20283+ /* tenant_id is not supported by FW now, once the support is enabled
20284+ * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
20285+ */
20286+ if (filter->tenant_id)
20287+ return;
20288+}
20289+
20290+/* i40e_add_del_cloud_filter_ex - Add/del cloud filter using big_buf
20291+ * @pf: working PF
20292+ * @filter: cloud filter rule
20293+ * @add: if true, add, if false, delete
20294+ *
20295+ * Add or delete a cloud filter for a specific flow spec using big buffer.
20296+ * Returns 0 if the filter were successfully added.
20297+ **/
20298+int i40e_add_del_cloud_filter_ex(struct i40e_pf *pf,
20299+ struct i40e_cloud_filter *filter,
20300+ bool add)
20301+{
20302+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
20303+ int ret;
20304+
20305+ if (!i40e_is_l4mode_enabled()) {
20306+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
20307+
20308+ return i40e_add_del_cloud_filter(vsi, filter, add);
20309+ }
20310+
20311+ /* Make sure port is specified, otherwise bail out, for channel
20312+ * specific cloud filter needs 'L4 port' to be non-zero
20313+ */
20314+ if (!filter->dst_port)
20315+ return -EINVAL;
20316+
20317+ memset(&cld_filter, 0, sizeof(cld_filter));
20318+
20319+ /* Switch is in Mode 1, so this is an L4 port filter */
20320+ cld_filter.element.flags =
20321+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
20322+
20323+ /* Now copy L4 port in Byte 6..7 in general fields */
20324+ cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
20325+ be16_to_cpu(filter->dst_port);
20326+
20327+ if (add)
20328+ ret = i40e_aq_add_cloud_filters_bb(&pf->hw,
20329+ filter->seid,
20330+ &cld_filter, 1);
20331+ else
20332+ ret = i40e_aq_rem_cloud_filters_bb(&pf->hw,
20333+ filter->seid,
20334+ &cld_filter, 1);
20335+
20336+ if (ret)
20337+ dev_err(&pf->pdev->dev,
20338+ "fail to %s cloud filter err %d aq_err %d\n",
20339+ add ? "add" : "delete",
20340+ ret, pf->hw.aq.asq_last_status);
20341+
20342+ dev_info(&pf->pdev->dev,
20343+ "%s cloud filter for VSI: %d, L4 port: %d\n",
20344+ add ? "add" : "delete",
20345+ filter->seid, be16_to_cpu(filter->dst_port));
20346+
20347+ return ret;
20348+}
20349+
20350+/**
20351+ * i40e_add_del_cloud_filter - Add/del cloud filter
20352+ * @vsi: pointer to VSI
20353+ * @filter: cloud filter rule
20354+ * @add: if true, add, if false, delete
20355+ *
20356+ * Add or delete a cloud filter for a specific flow spec.
20357+ * Returns 0 if the filter were successfully added.
20358+ **/
20359+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
20360+ struct i40e_cloud_filter *filter, bool add)
20361+{
20362+ struct i40e_aqc_cloud_filters_element_data cld_filter;
20363+ struct i40e_pf *pf = vsi->back;
20364+ int ret;
20365+ static const u16 flag_table[128] = {
20366+ [I40E_CLOUD_FILTER_FLAGS_OMAC] =
20367+ I40E_AQC_ADD_CLOUD_FILTER_OMAC,
20368+ [I40E_CLOUD_FILTER_FLAGS_IMAC] =
20369+ I40E_AQC_ADD_CLOUD_FILTER_IMAC,
20370+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
20371+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
20372+ [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
20373+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
20374+ [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
20375+ I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
20376+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
20377+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
20378+ [I40E_CLOUD_FILTER_FLAGS_IIP] =
20379+ I40E_AQC_ADD_CLOUD_FILTER_IIP,
20380+ };
20381+
20382+ if (filter->flags >= ARRAY_SIZE(flag_table))
20383+ return I40E_ERR_CONFIG;
20384+
20385+ /* copy element needed to add cloud filter from filter */
20386+ i40e_set_cld_element(filter, &cld_filter);
20387+
20388+ if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
20389+ cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
20390+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
20391+
20392+ /* copy queue number from filter to pass to cloud filter engine
20393+ * with flags for targeting traffic to specific queue
20394+ */
20395+ if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC) {
20396+ cld_filter.flags |=
20397+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE);
20398+ cld_filter.queue_number = cpu_to_le16(filter->queue_id);
20399+ }
20400+
20401+ if (filter->n_proto == ETH_P_IPV6)
20402+ cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
20403+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
20404+ else
20405+ cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
20406+ I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
20407+
20408+ if (add)
20409+ ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
20410+ &cld_filter, 1);
20411+ else
20412+ ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
20413+ &cld_filter, 1);
20414+ if (ret)
20415+ dev_dbg(&pf->pdev->dev,
20416+ "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
20417+ add ? "add" : "delete", filter->dst_port, ret,
20418+ pf->hw.aq.asq_last_status);
20419+ else
20420+ dev_info(&pf->pdev->dev,
20421+ "%s cloud filter for VSI: %d\n",
20422+ add ? "Added" : "Deleted", filter->seid);
20423+ return ret;
20424+}
20425+
20426 /**
20427 * i40e_vsi_configure - Set up the VSI for action
20428 * @vsi: the VSI being configured
20429@@ -3235,55 +4027,46 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
20430 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
20431 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
20432
20433- q_vector->itr_countdown = ITR_COUNTDOWN_START;
20434- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
20435- q_vector->rx.latency_range = I40E_LOW_LATENCY;
20436+ q_vector->rx.next_update = jiffies + 1;
20437+ q_vector->rx.target_itr =
20438+ ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
20439 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
20440- q_vector->rx.itr);
20441- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
20442- q_vector->tx.latency_range = I40E_LOW_LATENCY;
20443+ q_vector->rx.target_itr >> 1);
20444+ q_vector->rx.current_itr = q_vector->rx.target_itr;
20445+
20446+ q_vector->tx.next_update = jiffies + 1;
20447+ q_vector->tx.target_itr =
20448+ ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
20449 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
20450- q_vector->tx.itr);
20451+ q_vector->tx.target_itr >> 1);
20452+ q_vector->tx.current_itr = q_vector->tx.target_itr;
20453+
20454 wr32(hw, I40E_PFINT_RATEN(vector - 1),
20455 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
20456
20457- /* Linked list for the queuepairs assigned to this vector */
20458+ /* begin of linked list for RX queue assigned to this vector */
20459 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
20460 for (q = 0; q < q_vector->num_ringpairs; q++) {
20461 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
20462 u32 val;
20463
20464- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
20465- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
20466- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
20467- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
20468- (I40E_QUEUE_TYPE_TX <<
20469- I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
20470-
20471+ /* RX queue in linked list with next queue set to TX */
20472+ val = I40E_QINT_RQCTL_VAL(nextqp, vector, TX);
20473 wr32(hw, I40E_QINT_RQCTL(qp), val);
20474
20475 if (has_xdp) {
20476- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
20477- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
20478- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
20479- (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
20480- (I40E_QUEUE_TYPE_TX <<
20481- I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
20482-
20483+ /* TX queue with next queue set to TX */
20484+ val = I40E_QINT_TQCTL_VAL(qp, vector, TX);
20485 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
20486 }
20487
20488- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
20489- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
20490- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
20491- ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
20492- (I40E_QUEUE_TYPE_RX <<
20493- I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
20494+ /* TX queue with next RX or end of linked list */
20495+ val = I40E_QINT_TQCTL_VAL((qp + 1), vector, RX);
20496
20497 /* Terminate the linked list */
20498 if (q == (q_vector->num_ringpairs - 1))
20499- val |= (I40E_QUEUE_END_OF_LIST <<
20500- I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
20501+ val |= (I40E_QUEUE_END_OF_LIST
20502+ << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
20503
20504 wr32(hw, I40E_QINT_TQCTL(qp), val);
20505 qp++;
20506@@ -3295,7 +4078,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
20507
20508 /**
20509 * i40e_enable_misc_int_causes - enable the non-queue interrupts
20510- * @hw: ptr to the hardware info
20511+ * @pf: pointer to private device data structure
20512 **/
20513 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
20514 {
20515@@ -3314,12 +4097,13 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
20516 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
20517 I40E_PFINT_ICR0_ENA_VFLR_MASK |
20518 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
20519-
20520 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
20521 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
20522+#ifdef HAVE_PTP_1588_CLOCK
20523
20524 if (pf->flags & I40E_FLAG_PTP)
20525 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
20526+#endif /* HAVE_PTP_1588_CLOCK */
20527
20528 wr32(hw, I40E_PFINT_ICR0_ENA, val);
20529
20530@@ -3341,44 +4125,36 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
20531 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
20532 struct i40e_pf *pf = vsi->back;
20533 struct i40e_hw *hw = &pf->hw;
20534- u32 val;
20535
20536 /* set the ITR configuration */
20537- q_vector->itr_countdown = ITR_COUNTDOWN_START;
20538- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
20539- q_vector->rx.latency_range = I40E_LOW_LATENCY;
20540- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
20541- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
20542- q_vector->tx.latency_range = I40E_LOW_LATENCY;
20543- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
20544+ q_vector->rx.next_update = jiffies + 1;
20545+ q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
20546+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
20547+ q_vector->rx.current_itr = q_vector->rx.target_itr;
20548+ q_vector->tx.next_update = jiffies + 1;
20549+ q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
20550+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
20551+ q_vector->tx.current_itr = q_vector->tx.target_itr;
20552
20553 i40e_enable_misc_int_causes(pf);
20554
20555 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
20556 wr32(hw, I40E_PFINT_LNKLST0, 0);
20557
20558- /* Associate the queue pair to the vector and enable the queue int */
20559- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
20560- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
20561- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
20562- (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
20563-
20564- wr32(hw, I40E_QINT_RQCTL(0), val);
20565-
20566- if (i40e_enabled_xdp_vsi(vsi)) {
20567- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
20568- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
20569- (I40E_QUEUE_TYPE_TX
20570- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
20571+ /* Associate the queue pair to the vector and enable the queue
20572+ * interrupt RX queue in linked list with next queue set to TX
20573+ */
20574+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
20575
20576- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
20577+ if (nextqp) {
20578+ /* TX queue in linked list with next queue set to TX */
20579+ wr32(hw, I40E_QINT_TQCTL(nextqp),
20580+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
20581 }
20582
20583- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
20584- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
20585- (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
20586-
20587- wr32(hw, I40E_QINT_TQCTL(0), val);
20588+ /* last TX queue so the next RX queue doesn't matter */
20589+ wr32(hw, I40E_QINT_TQCTL(0),
20590+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
20591 i40e_flush(hw);
20592 }
20593
20594@@ -3398,15 +4174,14 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
20595 /**
20596 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
20597 * @pf: board private structure
20598- * @clearpba: true when all pending interrupt events should be cleared
20599 **/
20600-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
20601+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
20602 {
20603 struct i40e_hw *hw = &pf->hw;
20604 u32 val;
20605
20606 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
20607- (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
20608+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
20609 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
20610
20611 wr32(hw, I40E_PFINT_DYN_CTL0, val);
20612@@ -3430,6 +4205,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
20613 return IRQ_HANDLED;
20614 }
20615
20616+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
20617 /**
20618 * i40e_irq_affinity_notify - Callback for affinity changes
20619 * @notify: context as to what irq was changed
20620@@ -3456,6 +4232,7 @@ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
20621 * receive notifications.
20622 **/
20623 static void i40e_irq_affinity_release(struct kref *ref) {}
20624+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
20625
20626 /**
20627 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
20628@@ -3464,7 +4241,7 @@ static void i40e_irq_affinity_release(struct kref *ref) {}
20629 *
20630 * Allocates MSI-X vectors and requests interrupts from the kernel.
20631 **/
20632-static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
20633+int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
20634 {
20635 int q_vectors = vsi->num_q_vectors;
20636 struct i40e_pf *pf = vsi->back;
20637@@ -3473,6 +4250,9 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
20638 int tx_int_idx = 0;
20639 int vector, err;
20640 int irq_num;
20641+#ifdef HAVE_IRQ_AFFINITY_HINT
20642+ int cpu;
20643+#endif
20644
20645 for (vector = 0; vector < q_vectors; vector++) {
20646 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
20647@@ -3504,14 +4284,22 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
20648 goto free_queue_irqs;
20649 }
20650
20651+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
20652 /* register for affinity change notifications */
20653 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
20654 q_vector->affinity_notify.release = i40e_irq_affinity_release;
20655 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
20656- /* get_cpu_mask returns a static constant mask with
20657- * a permanent lifetime so it's ok to use here.
20658+#endif
20659+#ifdef HAVE_IRQ_AFFINITY_HINT
20660+ /* Spread affinity hints out across online CPUs.
20661+ *
20662+ * get_cpu_mask returns a static constant mask with
20663+ * a permanent lifetime so it's ok to pass to
20664+ * irq_set_affinity_hint without making a copy.
20665 */
20666- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
20667+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
20668+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
20669+#endif /* HAVE_IRQ_AFFINITY_HINT */
20670 }
20671
20672 vsi->irqs_ready = true;
20673@@ -3521,8 +4309,12 @@ free_queue_irqs:
20674 while (vector) {
20675 vector--;
20676 irq_num = pf->msix_entries[base + vector].vector;
20677+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
20678 irq_set_affinity_notifier(irq_num, NULL);
20679+#endif
20680+#ifdef HAVE_IRQ_AFFINITY_HINT
20681 irq_set_affinity_hint(irq_num, NULL);
20682+#endif
20683 free_irq(irq_num, &vsi->q_vectors[vector]);
20684 }
20685 return err;
20686@@ -3587,7 +4379,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
20687 for (i = 0; i < vsi->num_q_vectors; i++)
20688 i40e_irq_dynamic_enable(vsi, i);
20689 } else {
20690- i40e_irq_dynamic_enable_icr0(pf, true);
20691+ i40e_irq_dynamic_enable_icr0(pf);
20692 }
20693
20694 i40e_flush(&pf->hw);
20695@@ -3595,14 +4387,20 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
20696 }
20697
20698 /**
20699- * i40e_stop_misc_vector - Stop the vector that handles non-queue events
20700+ * i40e_free_misc_vector - Free the vector that handles non-queue events
20701 * @pf: board private structure
20702 **/
20703-static void i40e_stop_misc_vector(struct i40e_pf *pf)
20704+static void i40e_free_misc_vector(struct i40e_pf *pf)
20705 {
20706 /* Disable ICR 0 */
20707 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
20708 i40e_flush(&pf->hw);
20709+
20710+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
20711+ synchronize_irq(pf->msix_entries[0].vector);
20712+ free_irq(pf->msix_entries[0].vector, pf);
20713+ clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
20714+ }
20715 }
20716
20717 /**
20718@@ -3633,7 +4431,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
20719 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
20720 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
20721 pf->sw_int_count++;
20722-
20723 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
20724 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
20725 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
20726@@ -3697,15 +4494,20 @@ static irqreturn_t i40e_intr(int irq, void *data)
20727 rd32(hw, I40E_PFHMC_ERRORDATA));
20728 }
20729
20730+#ifdef HAVE_PTP_1588_CLOCK
20731 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
20732 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
20733
20734- if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
20735- icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
20736+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
20737+ schedule_work(&pf->ptp_extts0_work);
20738+
20739+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
20740 i40e_ptp_tx_hwtstamp(pf);
20741- }
20742+
20743+ icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
20744 }
20745
20746+#endif /* HAVE_PTP_1588_CLOCK */
20747 /* If a critical error is pending we have no choice but to reset the
20748 * device.
20749 * Report and mask out any remaining unexpected interrupts.
20750@@ -3728,9 +4530,10 @@ static irqreturn_t i40e_intr(int irq, void *data)
20751 enable_intr:
20752 /* re-enable interrupt causes */
20753 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
20754- if (!test_bit(__I40E_DOWN, pf->state)) {
20755+ if (!test_bit(__I40E_DOWN, pf->state) ||
20756+ test_bit(__I40E_RECOVERY_MODE, pf->state)) {
20757 i40e_service_event_schedule(pf);
20758- i40e_irq_dynamic_enable_icr0(pf, false);
20759+ i40e_irq_dynamic_enable_icr0(pf);
20760 }
20761
20762 return ret;
20763@@ -3762,7 +4565,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
20764 break;
20765
20766 /* prevent any other reads prior to eop_desc */
20767- smp_rmb();
20768+ read_barrier_depends();
20769
20770 /* if the descriptor isn't done, no work yet to do */
20771 if (!(eop_desc->cmd_type_offset_bsz &
20772@@ -3903,6 +4706,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
20773 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
20774
20775 q_vector->num_ringpairs = num_ringpairs;
20776+ q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
20777
20778 q_vector->rx.count = 0;
20779 q_vector->tx.count = 0;
20780@@ -4053,8 +4857,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
20781 * @is_xdp: true if the queue is used for XDP
20782 * @enable: start or stop the queue
20783 **/
20784-static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
20785- bool is_xdp, bool enable)
20786+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
20787+ bool is_xdp, bool enable)
20788 {
20789 int ret;
20790
20791@@ -4096,10 +4900,10 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
20792 ret = i40e_control_wait_tx_q(vsi->seid, pf,
20793 pf_q + vsi->alloc_queue_pairs,
20794 true /*is xdp*/, enable);
20795+
20796 if (ret)
20797 break;
20798 }
20799-
20800 return ret;
20801 }
20802
20803@@ -4138,9 +4942,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
20804 * @pf_q: the PF queue to configure
20805 * @enable: start or stop the queue
20806 *
20807- * This function enables or disables a single queue. Note that any delay
20808- * required after the operation is expected to be handled by the caller of
20809- * this function.
20810+ * This function enables or disables a single queue. Note that
20811+ * any delay required after the operation is expected to be
20812+ * handled by the caller of this function.
20813 **/
20814 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
20815 {
20816@@ -4169,6 +4973,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
20817 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
20818 }
20819
20820+/**
20821+ * i40e_control_wait_rx_q
20822+ * @pf: the PF structure
20823+ * @pf_q: queue being configured
20824+ * @enable: start or stop the rings
20825+ *
20826+ * This function enables or disables a single queue along with waiting
20827+ * for the change to finish. The caller of this function should handle
20828+ * the delays needed in the case of disabling queues.
20829+ **/
20830+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
20831+{
20832+ int ret = 0;
20833+
20834+ i40e_control_rx_q(pf, pf_q, enable);
20835+
20836+ /* wait for the change to finish */
20837+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
20838+ if (ret)
20839+ return ret;
20840+
20841+ return ret;
20842+}
20843+
20844 /**
20845 * i40e_vsi_control_rx - Start or stop a VSI's rings
20846 * @vsi: the VSI being configured
20847@@ -4181,10 +5009,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
20848
20849 pf_q = vsi->base_queue;
20850 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
20851- i40e_control_rx_q(pf, pf_q, enable);
20852-
20853- /* wait for the change to finish */
20854- ret = i40e_pf_rxq_wait(pf, pf_q, enable);
20855+ ret = i40e_control_wait_rx_q(pf, pf_q, enable);
20856 if (ret) {
20857 dev_info(&pf->pdev->dev,
20858 "VSI seid %d Rx ring %d %sable timeout\n",
20859@@ -4193,9 +5018,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
20860 }
20861 }
20862
20863- /* Due to HW errata, on Rx disable only, the register can indicate done
20864- * before it really is. Needs 50ms to be sure
20865- */
20866+ /* HW needs up to 50ms to finish RX queue disable*/
20867 if (!enable)
20868 mdelay(50);
20869
20870@@ -4291,10 +5114,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
20871 !vsi->q_vectors[i]->num_ringpairs)
20872 continue;
20873
20874+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
20875 /* clear the affinity notifier in the IRQ descriptor */
20876 irq_set_affinity_notifier(irq_num, NULL);
20877+#endif
20878+#ifdef HAVE_IRQ_AFFINITY_HINT
20879 /* remove our suggested affinity mask for this IRQ */
20880 irq_set_affinity_hint(irq_num, NULL);
20881+#endif
20882 synchronize_irq(irq_num);
20883 free_irq(irq_num, vsi->q_vectors[i]);
20884
20885@@ -4457,11 +5284,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
20886 {
20887 int i;
20888
20889- i40e_stop_misc_vector(pf);
20890- if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
20891- synchronize_irq(pf->msix_entries[0].vector);
20892- free_irq(pf->msix_entries[0].vector, pf);
20893- }
20894+ i40e_free_misc_vector(pf);
20895
20896 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
20897 I40E_IWARP_IRQ_PILE_ID);
20898@@ -4487,7 +5310,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
20899 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
20900 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
20901
20902- if (q_vector->rx.ring || q_vector->tx.ring)
20903+ if (q_vector->tx.ring || q_vector->rx.ring)
20904 napi_enable(&q_vector->napi);
20905 }
20906 }
20907@@ -4506,7 +5329,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
20908 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
20909 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
20910
20911- if (q_vector->rx.ring || q_vector->tx.ring)
20912+ if (q_vector->tx.ring || q_vector->rx.ring)
20913 napi_disable(&q_vector->napi);
20914 }
20915 }
20916@@ -4524,23 +5347,27 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
20917 i40e_vsi_free_tx_resources(vsi);
20918 i40e_vsi_free_rx_resources(vsi);
20919 vsi->current_netdev_flags = 0;
20920- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
20921+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
20922 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
20923- pf->flags |= I40E_FLAG_CLIENT_RESET;
20924+ set_bit(__I40E_CLIENT_RESET, pf->state);
20925 }
20926
20927 /**
20928 * i40e_quiesce_vsi - Pause a given VSI
20929 * @vsi: the VSI being paused
20930 **/
20931-static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
20932+void i40e_quiesce_vsi(struct i40e_vsi *vsi)
20933 {
20934 if (test_bit(__I40E_VSI_DOWN, vsi->state))
20935 return;
20936
20937 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
20938 if (vsi->netdev && netif_running(vsi->netdev))
20939+#ifdef HAVE_NET_DEVICE_OPS
20940 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
20941+#else /* HAVE_NET_DEVICE_OPS */
20942+ vsi->netdev->stop(vsi->netdev);
20943+#endif /* HAVE_NET_DEVICE_OPS */
20944 else
20945 i40e_vsi_close(vsi);
20946 }
20947@@ -4549,13 +5376,17 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
20948 * i40e_unquiesce_vsi - Resume a given VSI
20949 * @vsi: the VSI being resumed
20950 **/
20951-static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
20952+void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
20953 {
20954 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
20955 return;
20956
20957 if (vsi->netdev && netif_running(vsi->netdev))
20958+#ifdef HAVE_NET_DEVICE_OPS
20959 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
20960+#else /* HAVE_NET_DEVICE_OPS */
20961+ vsi->netdev->open(vsi->netdev);
20962+#endif /* HAVE_NET_DEVICE_OPS */
20963 else
20964 i40e_vsi_open(vsi); /* this clears the DOWN bit */
20965 }
20966@@ -4564,7 +5395,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
20967 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
20968 * @pf: the PF
20969 **/
20970-static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
20971+void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
20972 {
20973 int v;
20974
20975@@ -4578,7 +5409,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
20976 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
20977 * @pf: the PF
20978 **/
20979-static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
20980+void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
20981 {
20982 int v;
20983
20984@@ -4593,7 +5424,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
20985 * @vsi: the VSI being configured
20986 *
20987 * Wait until all queues on a given VSI have been disabled.
20988- **/
20989+**/
20990 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
20991 {
20992 struct i40e_pf *pf = vsi->back;
20993@@ -4616,6 +5447,7 @@ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
20994 /* Check and wait for the XDP Tx queue */
20995 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
20996 false);
20997+
20998 if (ret) {
20999 dev_info(&pf->pdev->dev,
21000 "VSI seid %d XDP Tx ring %d disable timeout\n",
21001@@ -4636,7 +5468,7 @@ wait_rx:
21002 return 0;
21003 }
21004
21005-#ifdef CONFIG_I40E_DCB
21006+#ifdef CONFIG_DCB
21007 /**
21008 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
21009 * @pf: the PF
21010@@ -4658,106 +5490,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
21011
21012 return ret;
21013 }
21014-
21015-#endif
21016-
21017-/**
21018- * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
21019- * @q_idx: TX queue number
21020- * @vsi: Pointer to VSI struct
21021- *
21022- * This function checks specified queue for given VSI. Detects hung condition.
21023- * We proactively detect hung TX queues by checking if interrupts are disabled
21024- * but there are pending descriptors. If it appears hung, attempt to recover
21025- * by triggering a SW interrupt.
21026- **/
21027-static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
21028-{
21029- struct i40e_ring *tx_ring = NULL;
21030- struct i40e_pf *pf;
21031- u32 val, tx_pending;
21032- int i;
21033-
21034- pf = vsi->back;
21035-
21036- /* now that we have an index, find the tx_ring struct */
21037- for (i = 0; i < vsi->num_queue_pairs; i++) {
21038- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
21039- if (q_idx == vsi->tx_rings[i]->queue_index) {
21040- tx_ring = vsi->tx_rings[i];
21041- break;
21042- }
21043- }
21044- }
21045-
21046- if (!tx_ring)
21047- return;
21048-
21049- /* Read interrupt register */
21050- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
21051- val = rd32(&pf->hw,
21052- I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
21053- tx_ring->vsi->base_vector - 1));
21054- else
21055- val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
21056-
21057- tx_pending = i40e_get_tx_pending(tx_ring);
21058-
21059- /* Interrupts are disabled and TX pending is non-zero,
21060- * trigger the SW interrupt (don't wait). Worst case
21061- * there will be one extra interrupt which may result
21062- * into not cleaning any queues because queues are cleaned.
21063- */
21064- if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
21065- i40e_force_wb(vsi, tx_ring->q_vector);
21066-}
21067-
21068-/**
21069- * i40e_detect_recover_hung - Function to detect and recover hung_queues
21070- * @pf: pointer to PF struct
21071- *
21072- * LAN VSI has netdev and netdev has TX queues. This function is to check
21073- * each of those TX queues if they are hung, trigger recovery by issuing
21074- * SW interrupt.
21075- **/
21076-static void i40e_detect_recover_hung(struct i40e_pf *pf)
21077-{
21078- struct net_device *netdev;
21079- struct i40e_vsi *vsi;
21080- unsigned int i;
21081-
21082- /* Only for LAN VSI */
21083- vsi = pf->vsi[pf->lan_vsi];
21084-
21085- if (!vsi)
21086- return;
21087-
21088- /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
21089- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
21090- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
21091- return;
21092-
21093- /* Make sure type is MAIN VSI */
21094- if (vsi->type != I40E_VSI_MAIN)
21095- return;
21096-
21097- netdev = vsi->netdev;
21098- if (!netdev)
21099- return;
21100-
21101- /* Bail out if netif_carrier is not OK */
21102- if (!netif_carrier_ok(netdev))
21103- return;
21104-
21105- /* Go thru' TX queues for netdev */
21106- for (i = 0; i < netdev->num_tx_queues; i++) {
21107- struct netdev_queue *q;
21108-
21109- q = netdev_get_tx_queue(netdev, i);
21110- if (q)
21111- i40e_detect_recover_hung_queue(i, vsi);
21112- }
21113-}
21114+#endif /* I40E_DCB */
21115
21116 /**
21117 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
21118@@ -4849,20 +5582,45 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
21119 return enabled_tc;
21120 }
21121
21122+#ifdef __TC_MQPRIO_MODE_MAX
21123+/**
21124+ * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
21125+ * @pf: PF being queried
21126+ *
21127+ * Query the current MQPRIO configuration and return the number of
21128+ * traffic classes enabled.
21129+ **/
21130+static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
21131+{
21132+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
21133+ u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
21134+ u8 enabled_tc = 1, i;
21135+
21136+ for (i = 1; i < num_tc; i++)
21137+ enabled_tc |= BIT(i);
21138+ return enabled_tc;
21139+}
21140+#endif
21141+
21142 /**
21143 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
21144 * @pf: PF being queried
21145 *
21146 * Return number of traffic classes enabled for the given PF
21147 **/
21148-static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
21149+u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
21150 {
21151 struct i40e_hw *hw = &pf->hw;
21152- u8 i, enabled_tc = 1;
21153+ u8 i, enabled_tc;
21154 u8 num_tc = 0;
21155 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
21156
21157- /* If DCB is not enabled then always in single TC */
21158+#ifdef __TC_MQPRIO_MODE_MAX
21159+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
21160+ return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
21161+#endif
21162+
21163+ /* If neither MQPRIO nor DCB is enabled, then always use single TC */
21164 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
21165 return 1;
21166
21167@@ -4874,7 +5632,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
21168 if (pf->hw.func_caps.iscsi)
21169 enabled_tc = i40e_get_iscsi_tc_map(pf);
21170 else
21171- return 1; /* Only TC0 */
21172+ return 1;/* Only TC0 */
21173
21174 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
21175 if (enabled_tc & BIT(i))
21176@@ -4891,7 +5649,14 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
21177 **/
21178 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
21179 {
21180- /* If DCB is not enabled for this PF then just return default TC */
21181+#ifdef __TC_MQPRIO_MODE_MAX
21182+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
21183+ return i40e_mqprio_get_enabled_tc(pf);
21184+#endif
21185+
21186+ /* If neither MQPRIO nor DCB is enabled for this PF then just return
21187+ * default TC
21188+ */
21189 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
21190 return I40E_DEFAULT_TRAFFIC_CLASS;
21191
21192@@ -4951,14 +5716,14 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
21193 /* Still continuing */
21194 }
21195
21196- vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
21197+ vsi->bw_limit = LE16_TO_CPU(bw_config.port_bw_limit);
21198 vsi->bw_max_quanta = bw_config.max_bw;
21199- tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
21200- (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
21201+ tc_bw_max = LE16_TO_CPU(bw_ets_config.tc_bw_max[0]) |
21202+ (LE16_TO_CPU(bw_ets_config.tc_bw_max[1]) << 16);
21203 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
21204 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
21205 vsi->bw_ets_limit_credits[i] =
21206- le16_to_cpu(bw_ets_config.credits[i]);
21207+ LE16_TO_CPU(bw_ets_config.credits[i]);
21208 /* 3 bits out of 4 for each TC */
21209 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
21210 }
21211@@ -4970,7 +5735,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
21212 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
21213 * @vsi: the VSI being configured
21214 * @enabled_tc: TC bitmap
21215- * @bw_credits: BW shared credits per TC
21216+ * @bw_share: BW shared credits per TC
21217 *
21218 * Returns 0 on success, negative value on failure
21219 **/
21220@@ -4978,19 +5743,32 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
21221 u8 *bw_share)
21222 {
21223 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
21224+ struct i40e_pf *pf = vsi->back;
21225 i40e_status ret;
21226 int i;
21227
21228+#ifdef __TC_MQPRIO_MODE_MAX
21229+ /* There is no need to reset BW when mqprio mode is on. */
21230+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
21231+ return 0;
21232+ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
21233+ ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
21234+ if (ret)
21235+ dev_info(&pf->pdev->dev,
21236+ "Failed to reset tx rate for vsi->seid %u\n",
21237+ vsi->seid);
21238+ return ret;
21239+ }
21240+#endif
21241 bw_data.tc_valid_bits = enabled_tc;
21242 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
21243 bw_data.tc_bw_credits[i] = bw_share[i];
21244
21245- ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
21246- NULL);
21247+ ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
21248 if (ret) {
21249- dev_info(&vsi->back->pdev->dev,
21250+ dev_info(&pf->pdev->dev,
21251 "AQ command Config VSI BW allocation per TC failed = %d\n",
21252- vsi->back->hw.aq.asq_last_status);
21253+ pf->hw.aq.asq_last_status);
21254 return -EINVAL;
21255 }
21256
21257@@ -5043,6 +5821,9 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
21258 vsi->tc_config.tc_info[i].qoffset);
21259 }
21260
21261+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
21262+ return;
21263+
21264 /* Assign UP2TC map for the VSI */
21265 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
21266 /* Get the actual TC# for the UP */
21267@@ -5085,16 +5866,23 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
21268 * It is expected that the VSI queues have been quisced before calling
21269 * this function.
21270 **/
21271-static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
21272+int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
21273 {
21274 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
21275+ struct i40e_pf *pf = vsi->back;
21276+ struct i40e_hw *hw = &pf->hw;
21277 struct i40e_vsi_context ctxt;
21278 int ret = 0;
21279 int i;
21280-
21281+#ifdef __TC_MQPRIO_MODE_MAX
21282 /* Check if enabled_tc is same as existing or new TCs */
21283+ if (vsi->tc_config.enabled_tc == enabled_tc &&
21284+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
21285+ return ret;
21286+#else
21287 if (vsi->tc_config.enabled_tc == enabled_tc)
21288 return ret;
21289+#endif
21290
21291 /* Enable ETS TCs with equal BW Share for now across all VSIs */
21292 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
21293@@ -5104,19 +5892,74 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
21294
21295 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
21296 if (ret) {
21297- dev_info(&vsi->back->pdev->dev,
21298+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
21299+
21300+ dev_info(&pf->pdev->dev,
21301 "Failed configuring TC map %d for VSI %d\n",
21302 enabled_tc, vsi->seid);
21303- goto out;
21304- }
21305+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
21306+ &bw_config, NULL);
21307+ if (ret) {
21308+ dev_info(&pf->pdev->dev,
21309+ "Failed querying vsi bw info, err %s aq_err %s\n",
21310+ i40e_stat_str(hw, ret),
21311+ i40e_aq_str(hw, hw->aq.asq_last_status));
21312+ goto out;
21313+ }
21314+ if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
21315+ u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
21316
21317- /* Update Queue Pairs Mapping for currently enabled UPs */
21318+ if (!valid_tc)
21319+ valid_tc = bw_config.tc_valid_bits;
21320+ /* Always enable TC0, no matter what */
21321+ valid_tc |= 1;
21322+ dev_info(&pf->pdev->dev,
21323+ "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
21324+ enabled_tc, bw_config.tc_valid_bits, valid_tc);
21325+ enabled_tc = valid_tc;
21326+ }
21327+
21328+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
21329+ if (ret) {
21330+ dev_err(&pf->pdev->dev,
21331+ "Unable to configure TC map %d for VSI %d\n",
21332+ enabled_tc, vsi->seid);
21333+ goto out;
21334+ }
21335+ }
21336+
21337+ /* Update Queue Pairs Mapping for currently enabled UPs */
21338 ctxt.seid = vsi->seid;
21339 ctxt.pf_num = vsi->back->hw.pf_id;
21340 ctxt.vf_num = 0;
21341 ctxt.uplink_seid = vsi->uplink_seid;
21342 ctxt.info = vsi->info;
21343+
21344+#ifdef __TC_MQPRIO_MODE_MAX
21345+ if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
21346+ ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
21347+ if (ret)
21348+ goto out;
21349+ } else {
21350+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
21351+ }
21352+ /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
21353+ * queues changed.
21354+ */
21355+ if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
21356+ vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
21357+ vsi->num_queue_pairs);
21358+ ret = i40e_vsi_config_rss(vsi);
21359+ if (ret) {
21360+ dev_info(&vsi->back->pdev->dev,
21361+ "Failed to reconfig rss for num_queues\n");
21362+ return ret;
21363+ }
21364+ vsi->reconfig_rss = false;
21365+ }
21366+#else
21367 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
21368+#endif
21369
21370 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
21371 ctxt.info.valid_sections |=
21372@@ -5124,14 +5967,15 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
21373 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
21374 }
21375
21376- /* Update the VSI after updating the VSI queue-mapping information */
21377- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
21378+ /* Update the VSI after updating the VSI queue-mapping
21379+ * information
21380+ */
21381+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
21382 if (ret) {
21383- dev_info(&vsi->back->pdev->dev,
21384+ dev_info(&pf->pdev->dev,
21385 "Update vsi tc config failed, err %s aq_err %s\n",
21386- i40e_stat_str(&vsi->back->hw, ret),
21387- i40e_aq_str(&vsi->back->hw,
21388- vsi->back->hw.aq.asq_last_status));
21389+ i40e_stat_str(hw, ret),
21390+ i40e_aq_str(hw, hw->aq.asq_last_status));
21391 goto out;
21392 }
21393 /* update the local VSI info with updated queue map */
21394@@ -5141,11 +5985,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
21395 /* Update current VSI BW information */
21396 ret = i40e_vsi_get_bw_info(vsi);
21397 if (ret) {
21398- dev_info(&vsi->back->pdev->dev,
21399+ dev_info(&pf->pdev->dev,
21400 "Failed updating vsi bw info, err %s aq_err %s\n",
21401- i40e_stat_str(&vsi->back->hw, ret),
21402- i40e_aq_str(&vsi->back->hw,
21403- vsi->back->hw.aq.asq_last_status));
21404+ i40e_stat_str(hw, ret),
21405+ i40e_aq_str(hw, hw->aq.asq_last_status));
21406 goto out;
21407 }
21408
21409@@ -5205,7 +6048,7 @@ out:
21410 return ret;
21411 }
21412
21413-#ifdef CONFIG_I40E_DCB
21414+#ifdef CONFIG_DCB
21415 /**
21416 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
21417 * @pf: PF struct
21418@@ -5256,8 +6099,10 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
21419 } else {
21420 /* Re-configure VSI vectors based on updated TC map */
21421 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
21422+#ifdef HAVE_DCBNL_IEEE
21423 if (pf->vsi[v]->netdev)
21424 i40e_dcbnl_set_all(pf->vsi[v]);
21425+#endif /* HAVE_DCBNL_IEEE */
21426 }
21427 }
21428 }
21429@@ -5300,12 +6145,17 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
21430 struct i40e_hw *hw = &pf->hw;
21431 int err = 0;
21432
21433- /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
21434- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
21435+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
21436+ * Also do not enable DCBx if FW LLDP agent is disabled
21437+ */
21438+ if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
21439+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
21440+ dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
21441+ err = I40E_NOT_SUPPORTED;
21442 goto out;
21443+ }
21444
21445- /* Get the initial DCB configuration */
21446- err = i40e_init_dcb(hw);
21447+ err = i40e_init_dcb(hw, true);
21448 if (!err) {
21449 /* Device/Function is not DCBX capable */
21450 if ((!hw->func_caps.dcb) ||
21451@@ -5319,7 +6169,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
21452
21453 pf->flags |= I40E_FLAG_DCB_CAPABLE;
21454 /* Enable DCB tagging only when more than one TC
21455- * or explicitly disable if only one TC
21456+ * or explicity disable if only one TC
21457 */
21458 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
21459 pf->flags |= I40E_FLAG_DCB_ENABLED;
21460@@ -5328,6 +6178,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
21461 dev_dbg(&pf->pdev->dev,
21462 "DCBX offload is supported for this PF.\n");
21463 }
21464+ } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
21465+ dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
21466+ pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
21467 } else {
21468 dev_info(&pf->pdev->dev,
21469 "Query for DCB configuration failed, err %s aq_err %s\n",
21470@@ -5338,28 +6191,36 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
21471 out:
21472 return err;
21473 }
21474-#endif /* CONFIG_I40E_DCB */
21475+
21476+#endif /* CONFIG_DCB */
21477 #define SPEED_SIZE 14
21478 #define FC_SIZE 8
21479 /**
21480 * i40e_print_link_message - print link up or down
21481 * @vsi: the VSI for which link needs a message
21482+ * @isup: true of link is up, false otherwise
21483 */
21484 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
21485 {
21486 enum i40e_aq_link_speed new_speed;
21487- char *speed = "Unknown";
21488+ struct i40e_pf *pf = vsi->back;
21489+ char *speed = "Unknown ";
21490 char *fc = "Unknown";
21491 char *fec = "";
21492 char *req_fec = "";
21493 char *an = "";
21494
21495- new_speed = vsi->back->hw.phy.link_info.link_speed;
21496+ if (isup)
21497+ new_speed = pf->hw.phy.link_info.link_speed;
21498+ else
21499+ new_speed = I40E_LINK_SPEED_UNKNOWN;
21500
21501 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
21502 return;
21503+
21504 vsi->current_isup = isup;
21505 vsi->current_speed = new_speed;
21506+
21507 if (!isup) {
21508 netdev_info(vsi->netdev, "NIC Link is Down\n");
21509 return;
21510@@ -5368,13 +6229,13 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
21511 /* Warn user if link speed on NPAR enabled partition is not at
21512 * least 10GB
21513 */
21514- if (vsi->back->hw.func_caps.npar_enable &&
21515- (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
21516- vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
21517+ if (pf->hw.func_caps.npar_enable &&
21518+ (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
21519+ pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
21520 netdev_warn(vsi->netdev,
21521 "The partition detected link speed that is less than 10Gbps\n");
21522
21523- switch (vsi->back->hw.phy.link_info.link_speed) {
21524+ switch (pf->hw.phy.link_info.link_speed) {
21525 case I40E_LINK_SPEED_40GB:
21526 speed = "40 G";
21527 break;
21528@@ -5387,6 +6248,12 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
21529 case I40E_LINK_SPEED_10GB:
21530 speed = "10 G";
21531 break;
21532+ case I40E_LINK_SPEED_5GB:
21533+ speed = "5 G";
21534+ break;
21535+ case I40E_LINK_SPEED_2_5GB:
21536+ speed = "2.5 G";
21537+ break;
21538 case I40E_LINK_SPEED_1GB:
21539 speed = "1000 M";
21540 break;
21541@@ -5397,7 +6264,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
21542 break;
21543 }
21544
21545- switch (vsi->back->hw.fc.current_mode) {
21546+ switch (pf->hw.fc.current_mode) {
21547 case I40E_FC_FULL:
21548 fc = "RX/TX";
21549 break;
21550@@ -5412,36 +6279,60 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
21551 break;
21552 }
21553
21554- if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
21555- req_fec = ", Requested FEC: None";
21556- fec = ", FEC: None";
21557- an = ", Autoneg: False";
21558+ if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
21559+ req_fec = "None";
21560+ fec = "None";
21561+ an = "False";
21562
21563- if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
21564- an = ", Autoneg: True";
21565+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
21566+ an = "True";
21567
21568- if (vsi->back->hw.phy.link_info.fec_info &
21569+ if (pf->hw.phy.link_info.fec_info &
21570 I40E_AQ_CONFIG_FEC_KR_ENA)
21571- fec = ", FEC: CL74 FC-FEC/BASE-R";
21572- else if (vsi->back->hw.phy.link_info.fec_info &
21573+ fec = "CL74 FC-FEC/BASE-R";
21574+ else if (pf->hw.phy.link_info.fec_info &
21575 I40E_AQ_CONFIG_FEC_RS_ENA)
21576- fec = ", FEC: CL108 RS-FEC";
21577+ fec = "CL108 RS-FEC";
21578
21579 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
21580 * both RS and FC are requested
21581 */
21582- if (vsi->back->hw.phy.link_info.req_fec_info &
21583+ if (pf->hw.phy.link_info.req_fec_info &
21584 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
21585- if (vsi->back->hw.phy.link_info.req_fec_info &
21586+ if (pf->hw.phy.link_info.req_fec_info &
21587 I40E_AQ_REQUEST_FEC_RS)
21588- req_fec = ", Requested FEC: CL108 RS-FEC";
21589+ req_fec = "CL108 RS-FEC";
21590 else
21591- req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
21592+ req_fec = "CL74 FC-FEC/BASE-R";
21593 }
21594+ netdev_info(vsi->netdev,
21595+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
21596+ speed, req_fec, fec, an, fc);
21597+ } else {
21598+ struct ethtool_eee edata;
21599+
21600+ edata.supported = 0;
21601+ edata.eee_enabled = false;
21602+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
21603+ if (get_ethtool_ops_ext(vsi->netdev)->get_eee)
21604+ get_ethtool_ops_ext(vsi->netdev)
21605+ ->get_eee(vsi->netdev, &edata);
21606+#else
21607+ if (vsi->netdev->ethtool_ops->get_eee)
21608+ vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &edata);
21609+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
21610+
21611+ if (edata.supported)
21612+ netdev_info(vsi->netdev,
21613+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n",
21614+ speed, fc,
21615+ edata.eee_enabled ? "Enabled" : "Disabled");
21616+ else
21617+ netdev_info(vsi->netdev,
21618+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
21619+ speed, fc);
21620 }
21621
21622- netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
21623- speed, req_fec, fec, an, fc);
21624 }
21625
21626 /**
21627@@ -5472,29 +6363,20 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
21628 i40e_print_link_message(vsi, true);
21629 netif_tx_start_all_queues(vsi->netdev);
21630 netif_carrier_on(vsi->netdev);
21631- } else if (vsi->netdev) {
21632- i40e_print_link_message(vsi, false);
21633- /* need to check for qualified module here*/
21634- if ((pf->hw.phy.link_info.link_info &
21635- I40E_AQ_MEDIA_AVAILABLE) &&
21636- (!(pf->hw.phy.link_info.an_info &
21637- I40E_AQ_QUALIFIED_MODULE)))
21638- netdev_err(vsi->netdev,
21639- "the driver failed to link because an unqualified module was detected.");
21640- }
21641-
21642- /* replay FDIR SB filters */
21643+ }
21644+
21645+ /* replay flow filters */
21646 if (vsi->type == I40E_VSI_FDIR) {
21647 /* reset fd counters */
21648- pf->fd_add_err = 0;
21649- pf->fd_atr_cnt = 0;
21650+ pf->fd_add_err = pf->fd_atr_cnt = 0;
21651 i40e_fdir_filter_restore(vsi);
21652+ i40e_cloud_filter_restore(pf);
21653 }
21654
21655 /* On the next run of the service_task, notify any clients of the new
21656 * opened netdev
21657 */
21658- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
21659+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
21660 i40e_service_event_schedule(pf);
21661
21662 return 0;
21663@@ -5515,7 +6397,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
21664 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
21665 usleep_range(1000, 2000);
21666 i40e_down(vsi);
21667-
21668 i40e_up(vsi);
21669 clear_bit(__I40E_CONFIG_BUSY, pf->state);
21670 }
21671@@ -5528,6 +6409,11 @@ int i40e_up(struct i40e_vsi *vsi)
21672 {
21673 int err;
21674
21675+ if ((vsi->type == I40E_VSI_MAIN &&
21676+ vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) ||
21677+ (vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN))
21678+ i40e_force_link_state(vsi->back, true);
21679+
21680 err = i40e_vsi_configure(vsi);
21681 if (!err)
21682 err = i40e_up_complete(vsi);
21683@@ -5535,6 +6421,111 @@ int i40e_up(struct i40e_vsi *vsi)
21684 return err;
21685 }
21686
21687+/**
21688+ * i40e_force_link_state - Force the link status
21689+ * @pf: board private structure
21690+ * @is_up: whether the link state should be forced up or down
21691+ **/
21692+static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
21693+{
21694+ struct i40e_aq_get_phy_abilities_resp abilities;
21695+ struct i40e_aq_set_phy_config config = {0};
21696+ struct i40e_hw *hw = &pf->hw;
21697+ bool non_zero_phy_type;
21698+ i40e_status err;
21699+ u64 mask;
21700+ u8 speed;
21701+
21702+ non_zero_phy_type = is_up;
21703+ /* Card might've been put in an unstable state by other drivers
21704+ * and applications, which causes incorrect speed values being
21705+ * set on startup. In order to clear speed registers, we call
21706+ * get_phy_capabilities twice, once to get initial state of
21707+ * available speeds, and once to get current phy config.
21708+ */
21709+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
21710+ NULL);
21711+ if (err) {
21712+ dev_err(&pf->pdev->dev,
21713+ "failed to get phy cap., ret = %s last_status = %s\n",
21714+ i40e_stat_str(hw, err),
21715+ i40e_aq_str(hw, hw->aq.asq_last_status));
21716+ return err;
21717+ }
21718+ speed = abilities.link_speed;
21719+
21720+ /* Get the current phy config */
21721+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
21722+ NULL);
21723+ if (err) {
21724+ dev_err(&pf->pdev->dev,
21725+ "failed to get phy cap., ret = %s last_status = %s\n",
21726+ i40e_stat_str(hw, err),
21727+ i40e_aq_str(hw, hw->aq.asq_last_status));
21728+ return err;
21729+ }
21730+
21731+ /* If link needs to go up, but was not forced to go down,
21732+ * and its speed values are OK, no need for a flap
21733+ * if non_zero_phy_type was set, still need to force up
21734+ */
21735+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)
21736+ non_zero_phy_type = true;
21737+ else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
21738+ return I40E_SUCCESS;
21739+
21740+ /* To force link we need to set bits for all supported PHY types,
21741+ * but there are now more than 32, so we need to split the bitmap
21742+ * across two fields.
21743+ */
21744+ mask = I40E_PHY_TYPES_BITMASK;
21745+ config.phy_type =
21746+ non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
21747+ config.phy_type_ext =
21748+ non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
21749+ /* Copy the old settings, except of phy_type */
21750+ config.abilities = abilities.abilities;
21751+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN) {
21752+ if (is_up)
21753+ config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
21754+ else
21755+ config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
21756+ }
21757+ if (abilities.link_speed != 0)
21758+ config.link_speed = abilities.link_speed;
21759+ else
21760+ config.link_speed = speed;
21761+ config.eee_capability = abilities.eee_capability;
21762+ config.eeer = abilities.eeer_val;
21763+ config.low_power_ctrl = abilities.d3_lpan;
21764+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
21765+ I40E_AQ_PHY_FEC_CONFIG_MASK;
21766+ err = i40e_aq_set_phy_config(hw, &config, NULL);
21767+
21768+ if (err) {
21769+ dev_err(&pf->pdev->dev,
21770+ "set phy config ret = %s last_status = %s\n",
21771+ i40e_stat_str(&pf->hw, err),
21772+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
21773+ return err;
21774+ }
21775+
21776+ /* Update the link info */
21777+ err = i40e_update_link_info(hw);
21778+ if (err) {
21779+ /* Wait a little bit (on 40G cards it sometimes takes a really
21780+ * long time for link to come back from the atomic reset)
21781+ * and try once more
21782+ */
21783+ msleep(1000);
21784+ i40e_update_link_info(hw);
21785+ }
21786+
21787+ i40e_aq_set_link_restart_an(hw, is_up, NULL);
21788+
21789+ return I40E_SUCCESS;
21790+}
21791+
21792 /**
21793 * i40e_down - Shutdown the connection processing
21794 * @vsi: the VSI being stopped
21795@@ -5552,6 +6543,10 @@ void i40e_down(struct i40e_vsi *vsi)
21796 }
21797 i40e_vsi_disable_irq(vsi);
21798 i40e_vsi_stop_rings(vsi);
21799+ if ((vsi->type == I40E_VSI_MAIN &&
21800+ vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) ||
21801+ (vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN))
21802+ i40e_force_link_state(vsi->back, false);
21803 i40e_napi_disable_all(vsi);
21804
21805 for (i = 0; i < vsi->num_queue_pairs; i++) {
21806@@ -5564,3795 +6559,6555 @@ void i40e_down(struct i40e_vsi *vsi)
21807 }
21808
21809 /**
21810- * i40e_setup_tc - configure multiple traffic classes
21811- * @netdev: net device to configure
21812- * @tc: number of traffic classes to enable
21813+ * i40e_get_link_speed - Returns link speed for the interface
21814+ * @vsi: VSI to be configured
21815+ *
21816 **/
21817-static int i40e_setup_tc(struct net_device *netdev, u8 tc)
21818+int i40e_get_link_speed(struct i40e_vsi *vsi)
21819 {
21820- struct i40e_netdev_priv *np = netdev_priv(netdev);
21821- struct i40e_vsi *vsi = np->vsi;
21822 struct i40e_pf *pf = vsi->back;
21823- u8 enabled_tc = 0;
21824- int ret = -EINVAL;
21825- int i;
21826
21827- /* Check if DCB enabled to continue */
21828- if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
21829- netdev_info(netdev, "DCB is not enabled for adapter\n");
21830- goto exit;
21831+ switch (pf->hw.phy.link_info.link_speed) {
21832+ case I40E_LINK_SPEED_40GB:
21833+ return 40000;
21834+ case I40E_LINK_SPEED_25GB:
21835+ return 25000;
21836+ case I40E_LINK_SPEED_20GB:
21837+ return 20000;
21838+ case I40E_LINK_SPEED_10GB:
21839+ return 10000;
21840+ case I40E_LINK_SPEED_1GB:
21841+ return 1000;
21842+ default:
21843+ return -EINVAL;
21844 }
21845+}
21846
21847- /* Check if MFP enabled */
21848- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
21849- netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
21850- goto exit;
21851- }
21852+/**
21853+ * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
21854+ * @vsi: VSI to be configured
21855+ * @seid: seid of the channel/VSI
21856+ * @max_tx_rate: max TX rate to be configured as BW limit
21857+ *
21858+ * Helper function to set BW limit for a given VSI
21859+ **/
21860+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
21861+{
21862+ struct i40e_pf *pf = vsi->back;
21863+ int speed = 0;
21864+ int ret = 0;
21865
21866- /* Check whether tc count is within enabled limit */
21867- if (tc > i40e_pf_get_num_tc(pf)) {
21868- netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
21869- goto exit;
21870+ speed = i40e_get_link_speed(vsi);
21871+ if (max_tx_rate > speed) {
21872+ dev_err(&pf->pdev->dev,
21873+ "Invalid max tx rate %llu specified for VSI seid %d.",
21874+ max_tx_rate, seid);
21875+ return -EINVAL;
21876+ }
21877+ if (max_tx_rate && max_tx_rate < 50) {
21878+ dev_warn(&pf->pdev->dev,
21879+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
21880+ max_tx_rate = 50;
21881 }
21882
21883- /* Generate TC map for number of tc requested */
21884- for (i = 0; i < tc; i++)
21885- enabled_tc |= BIT(i);
21886+ /* Tx rate credits are in values of 50Mbps, 0 is disabled */
21887+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid,
21888+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
21889+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
21890+ if (ret)
21891+ dev_err(&pf->pdev->dev,
21892+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
21893+ max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
21894+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
21895+ return ret;
21896+}
21897
21898- /* Requesting same TC configuration as already enabled */
21899- if (enabled_tc == vsi->tc_config.enabled_tc)
21900- return 0;
21901+#ifdef __TC_MQPRIO_MODE_MAX
21902+/**
21903+ * i40e_validate_and_set_switch_mode - sets up switch mode correctly
21904+ * @vsi: ptr to VSI which has PF backing
21905+ *
21906+ * Sets up switch mode correctly if it needs to be changed and perform
21907+ * what are allowed modes.
21908+ **/
21909+static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
21910+{
21911+ u8 mode;
21912+ struct i40e_pf *pf = vsi->back;
21913+ struct i40e_hw *hw = &pf->hw;
21914+ int ret;
21915
21916- /* Quiesce VSI queues */
21917- i40e_quiesce_vsi(vsi);
21918+ ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
21919+ if (ret)
21920+ return -EINVAL;
21921
21922- /* Configure VSI for enabled TCs */
21923- ret = i40e_vsi_config_tc(vsi, enabled_tc);
21924- if (ret) {
21925- netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
21926- vsi->seid);
21927- goto exit;
21928+ if (hw->dev_caps.switch_mode) {
21929+ /* if switch mode is set, support mode2 (non-tunneled for
21930+ * cloud filter) for now
21931+ */
21932+ u32 switch_mode = hw->dev_caps.switch_mode &
21933+ I40E_SWITCH_MODE_MASK;
21934+ if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
21935+ if (switch_mode == I40E_CLOUD_FILTER_MODE2)
21936+ return 0;
21937+ dev_err(&pf->pdev->dev,
21938+ "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
21939+ hw->dev_caps.switch_mode);
21940+ return -EINVAL;
21941+ }
21942 }
21943
21944- /* Unquiesce VSI */
21945- i40e_unquiesce_vsi(vsi);
21946-
21947-exit:
21948- return ret;
21949-}
21950+ /* Set Bit 7 to be valid */
21951+ mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
21952
21953-static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
21954- void *type_data)
21955-{
21956- struct tc_mqprio_qopt *mqprio = type_data;
21957+ /* Set L4type for TCP support */
21958+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
21959
21960- if (type != TC_SETUP_MQPRIO)
21961- return -EOPNOTSUPP;
21962+ /* Set cloud filter mode */
21963+ mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
21964
21965- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
21966+ /* Prep mode field for set_switch_config */
21967+ ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
21968+ pf->last_sw_conf_valid_flags,
21969+ mode, NULL);
21970+ if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
21971+ dev_err(&pf->pdev->dev,
21972+ "couldn't set switch config bits, err %s aq_err %s\n",
21973+ i40e_stat_str(hw, ret),
21974+ i40e_aq_str(hw,
21975+ hw->aq.asq_last_status));
21976
21977- return i40e_setup_tc(netdev, mqprio->num_tc);
21978+ return ret;
21979 }
21980
21981 /**
21982- * i40e_open - Called when a network interface is made active
21983- * @netdev: network interface device structure
21984- *
21985- * The open entry point is called when a network interface is made
21986- * active by the system (IFF_UP). At this point all resources needed
21987- * for transmit and receive operations are allocated, the interrupt
21988- * handler is registered with the OS, the netdev watchdog subtask is
21989- * enabled, and the stack is notified that the interface is ready.
21990+ * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
21991+ * @vsi: pointer to VSI
21992+ * @filter: cloud filter rule
21993+ * @add: if true, add, if false, delete
21994 *
21995- * Returns 0 on success, negative value on failure
21996+ * Add or delete a cloud filter for a specific flow spec using big buffer.
21997+ * Returns 0 if the filter were successfully added.
21998 **/
21999-int i40e_open(struct net_device *netdev)
22000+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
22001+ struct i40e_cloud_filter *filter,
22002+ bool add)
22003 {
22004- struct i40e_netdev_priv *np = netdev_priv(netdev);
22005- struct i40e_vsi *vsi = np->vsi;
22006+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
22007 struct i40e_pf *pf = vsi->back;
22008- int err;
22009+ int ret;
22010
22011- /* disallow open during test or if eeprom is broken */
22012- if (test_bit(__I40E_TESTING, pf->state) ||
22013- test_bit(__I40E_BAD_EEPROM, pf->state))
22014- return -EBUSY;
22015+ if (i40e_is_l4mode_enabled()) {
22016+ dev_err(&pf->pdev->dev,
22017+ "Not expected to run in L4 cloud filter mode\n");
22018+ return -EINVAL;
22019+ }
22020
22021- netif_carrier_off(netdev);
22022+ /* Both (src/dst) valid mac_addr are not supported */
22023+ if ((is_valid_ether_addr(filter->dst_mac) &&
22024+ is_valid_ether_addr(filter->src_mac)) ||
22025+ (is_multicast_ether_addr(filter->dst_mac) &&
22026+ is_multicast_ether_addr(filter->src_mac)))
22027+ return -EOPNOTSUPP;
22028
22029- err = i40e_vsi_open(vsi);
22030- if (err)
22031- return err;
22032+ /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
22033+ * ports are not supported via big buffer now.
22034+ */
22035+ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
22036+ return -EOPNOTSUPP;
22037
22038- /* configure global TSO hardware offload settings */
22039- wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
22040- TCP_FLAG_FIN) >> 16);
22041- wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
22042- TCP_FLAG_FIN |
22043- TCP_FLAG_CWR) >> 16);
22044- wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
22045+ /* adding filter using src_port/src_ip is not supported at this stage */
22046+ if (filter->src_port || filter->src_ipv4 ||
22047+ !ipv6_addr_any(&filter->ip.v6.src_ip6))
22048+ return -EOPNOTSUPP;
22049
22050- udp_tunnel_get_rx_info(netdev);
22051+ /* copy element needed to add cloud filter from filter */
22052+ i40e_set_cld_element(filter, &cld_filter.element);
22053
22054- return 0;
22055+ if (is_valid_ether_addr(filter->dst_mac) ||
22056+ is_valid_ether_addr(filter->src_mac) ||
22057+ is_multicast_ether_addr(filter->dst_mac) ||
22058+ is_multicast_ether_addr(filter->src_mac)) {
22059+ /* MAC + IP : unsupported mode */
22060+ if (filter->dst_ipv4)
22061+ return -EOPNOTSUPP;
22062+
22063+ /* since we validated that L4 port must be valid before
22064+ * we get here, start with respective "flags" value
22065+ * and update if vlan is present or not
22066+ */
22067+ cld_filter.element.flags =
22068+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
22069+
22070+ if (filter->vlan_id) {
22071+ cld_filter.element.flags =
22072+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
22073+ }
22074+
22075+ } else if (filter->dst_ipv4 ||
22076+ !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
22077+ cld_filter.element.flags =
22078+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
22079+ if (filter->n_proto == ETH_P_IPV6)
22080+ cld_filter.element.flags |=
22081+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
22082+ else
22083+ cld_filter.element.flags |=
22084+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
22085+ } else {
22086+ dev_err(&pf->pdev->dev,
22087+ "either mac or ip has to be valid for cloud filter\n");
22088+ return -EINVAL;
22089+ }
22090+
22091+ /* Now copy L4 port in Byte 6..7 in general fields */
22092+ cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
22093+ be16_to_cpu(filter->dst_port);
22094+
22095+ if (add) {
22096+ /* Validate current device switch mode, change if necessary */
22097+ ret = i40e_validate_and_set_switch_mode(vsi);
22098+ if (ret) {
22099+ dev_err(&pf->pdev->dev,
22100+ "failed to set switch mode, ret %d\n",
22101+ ret);
22102+ return ret;
22103+ }
22104+
22105+ ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
22106+ &cld_filter, 1);
22107+ } else {
22108+ ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
22109+ &cld_filter, 1);
22110+ }
22111+
22112+ if (ret)
22113+ dev_dbg(&pf->pdev->dev,
22114+ "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
22115+ add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
22116+ else
22117+ dev_info(&pf->pdev->dev,
22118+ "%s cloud filter for VSI: %d, L4 port: %d\n",
22119+ add ? "add" : "delete", filter->seid,
22120+ ntohs(filter->dst_port));
22121+ return ret;
22122 }
22123
22124 /**
22125- * i40e_vsi_open -
22126- * @vsi: the VSI to open
22127- *
22128- * Finish initialization of the VSI.
22129- *
22130- * Returns 0 on success, negative value on failure
22131+ * i40e_remove_queue_channels - Remove queue channels for the TCs
22132+ * @vsi: VSI to be configured
22133 *
22134- * Note: expects to be called while under rtnl_lock()
22135+ * Remove queue channels for the TCs
22136 **/
22137-int i40e_vsi_open(struct i40e_vsi *vsi)
22138+static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
22139 {
22140+ enum i40e_admin_queue_err last_aq_status;
22141+ struct i40e_cloud_filter *cfilter;
22142+ struct i40e_channel *ch, *ch_tmp;
22143 struct i40e_pf *pf = vsi->back;
22144- char int_name[I40E_INT_NAME_STR_LEN];
22145- int err;
22146+ struct hlist_node *node;
22147+ int ret, i;
22148
22149- /* allocate descriptors */
22150- err = i40e_vsi_setup_tx_resources(vsi);
22151- if (err)
22152- goto err_setup_tx;
22153- err = i40e_vsi_setup_rx_resources(vsi);
22154- if (err)
22155- goto err_setup_rx;
22156+ /* Reset rss size that was stored when reconfiguring rss for
22157+ * channel VSIs with non-power-of-2 queue count.
22158+ */
22159+ vsi->current_rss_size = 0;
22160
22161- err = i40e_vsi_configure(vsi);
22162- if (err)
22163- goto err_setup_rx;
22164+ /* perform cleanup for channels if they exist */
22165+ if (list_empty(&vsi->ch_list))
22166+ return;
22167
22168- if (vsi->netdev) {
22169- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
22170- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
22171- err = i40e_vsi_request_irq(vsi, int_name);
22172- if (err)
22173- goto err_setup_rx;
22174+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
22175+ struct i40e_vsi *p_vsi;
22176
22177- /* Notify the stack of the actual queue counts. */
22178- err = netif_set_real_num_tx_queues(vsi->netdev,
22179- vsi->num_queue_pairs);
22180- if (err)
22181- goto err_set_queues;
22182+ list_del(&ch->list);
22183+ p_vsi = ch->parent_vsi;
22184+ if (!p_vsi || !ch->initialized) {
22185+ kfree(ch);
22186+ continue;
22187+ }
22188+ /* Reset queue contexts */
22189+ for (i = 0; i < ch->num_queue_pairs; i++) {
22190+ struct i40e_ring *tx_ring, *rx_ring;
22191+ u16 pf_q;
22192
22193- err = netif_set_real_num_rx_queues(vsi->netdev,
22194- vsi->num_queue_pairs);
22195- if (err)
22196- goto err_set_queues;
22197+ pf_q = ch->base_queue + i;
22198+ tx_ring = vsi->tx_rings[pf_q];
22199+ tx_ring->ch = NULL;
22200
22201- } else if (vsi->type == I40E_VSI_FDIR) {
22202- snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
22203- dev_driver_string(&pf->pdev->dev),
22204- dev_name(&pf->pdev->dev));
22205- err = i40e_vsi_request_irq(vsi, int_name);
22206+ rx_ring = vsi->rx_rings[pf_q];
22207+ rx_ring->ch = NULL;
22208+ }
22209
22210- } else {
22211- err = -EINVAL;
22212- goto err_setup_rx;
22213- }
22214+ /* Reset BW configured for this VSI via mqprio */
22215+ ret = i40e_set_bw_limit(vsi, ch->seid, 0);
22216+ if (ret)
22217+ dev_info(&vsi->back->pdev->dev,
22218+ "Failed to reset tx rate for ch->seid %u\n",
22219+ ch->seid);
22220
22221- err = i40e_up_complete(vsi);
22222- if (err)
22223- goto err_up_complete;
22224+ /* delete cloud filters associated with this channel */
22225+ hlist_for_each_entry_safe(cfilter, node,
22226+ &pf->cloud_filter_list, cloud_node) {
22227+ if (cfilter->seid != ch->seid)
22228+ continue;
22229
22230- return 0;
22231+ hash_del(&cfilter->cloud_node);
22232+ if (cfilter->dst_port)
22233+ ret = i40e_add_del_cloud_filter_big_buf(vsi,
22234+ cfilter,
22235+ false);
22236+ else
22237+ ret = i40e_add_del_cloud_filter(vsi, cfilter,
22238+ false);
22239+ last_aq_status = pf->hw.aq.asq_last_status;
22240+ if (ret)
22241+ dev_info(&pf->pdev->dev,
22242+ "Failed to delete cloud filter, err %s aq_err %s\n",
22243+ i40e_stat_str(&pf->hw, ret),
22244+ i40e_aq_str(&pf->hw, last_aq_status));
22245+ kfree(cfilter);
22246+ }
22247
22248-err_up_complete:
22249- i40e_down(vsi);
22250-err_set_queues:
22251- i40e_vsi_free_irq(vsi);
22252-err_setup_rx:
22253- i40e_vsi_free_rx_resources(vsi);
22254-err_setup_tx:
22255- i40e_vsi_free_tx_resources(vsi);
22256- if (vsi == pf->vsi[pf->lan_vsi])
22257- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
22258+ /* delete cloud filters associated with this channel */
22259+ hlist_for_each_entry_safe(cfilter, node,
22260+ &pf->cloud_filter_list, cloud_node) {
22261+ if (cfilter->seid != ch->seid)
22262+ continue;
22263
22264- return err;
22265+ hash_del(&cfilter->cloud_node);
22266+ if (cfilter->dst_port)
22267+ ret = i40e_add_del_cloud_filter_big_buf(vsi,
22268+ cfilter,
22269+ false);
22270+ else
22271+ ret = i40e_add_del_cloud_filter(vsi, cfilter,
22272+ false);
22273+ last_aq_status = pf->hw.aq.asq_last_status;
22274+ if (ret)
22275+ dev_info(&pf->pdev->dev,
22276+ "Failed to delete cloud filter, err %s aq_err %s\n",
22277+ i40e_stat_str(&pf->hw, ret),
22278+ i40e_aq_str(&pf->hw, last_aq_status));
22279+ kfree(cfilter);
22280+ }
22281+
22282+ /* delete VSI from FW */
22283+ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
22284+ NULL);
22285+ if (ret)
22286+ dev_err(&vsi->back->pdev->dev,
22287+ "unable to remove channel (%d) for parent VSI(%d)\n",
22288+ ch->seid, p_vsi->seid);
22289+ kfree(ch);
22290+ }
22291+ INIT_LIST_HEAD(&vsi->ch_list);
22292 }
22293
22294 /**
22295- * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
22296- * @pf: Pointer to PF
22297+ * i40e_get_max_queues_for_channel
22298+ * @vsi: ptr to VSI to which channels are associated with
22299 *
22300- * This function destroys the hlist where all the Flow Director
22301- * filters were saved.
22302+ * Helper function which returns max value among the queue counts set on the
22303+ * channels/TCs created.
22304 **/
22305-static void i40e_fdir_filter_exit(struct i40e_pf *pf)
22306+static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
22307 {
22308- struct i40e_fdir_filter *filter;
22309- struct i40e_flex_pit *pit_entry, *tmp;
22310- struct hlist_node *node2;
22311+ struct i40e_channel *ch, *ch_tmp;
22312+ int max = 0;
22313
22314- hlist_for_each_entry_safe(filter, node2,
22315- &pf->fdir_filter_list, fdir_node) {
22316- hlist_del(&filter->fdir_node);
22317- kfree(filter);
22318+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
22319+ if (!ch->initialized)
22320+ continue;
22321+ if (ch->num_queue_pairs > max)
22322+ max = ch->num_queue_pairs;
22323 }
22324
22325- list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
22326- list_del(&pit_entry->list);
22327- kfree(pit_entry);
22328+ return max;
22329+}
22330+
22331+/**
22332+ * i40e_validate_num_queues - validate num_queues w.r.t channel
22333+ * @pf: ptr to PF device
22334+ * @num_queues: number of queues
22335+ * @vsi: the parent VSI
22336+ * @reconfig_rss: indicates should the RSS be reconfigured or not
22337+ *
22338+ * This function validates number of queues in the context of new channel
22339+ * which is being established and determines if RSS should be reconfigured
22340+ * or not for parent VSI.
22341+ **/
22342+static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
22343+ struct i40e_vsi *vsi, bool *reconfig_rss)
22344+{
22345+ int max_ch_queues;
22346+
22347+ if (!reconfig_rss)
22348+ return -EINVAL;
22349+
22350+ *reconfig_rss = false;
22351+ if (vsi->current_rss_size) {
22352+ if (num_queues > vsi->current_rss_size) {
22353+ dev_dbg(&pf->pdev->dev,
22354+ "Error: num_queues (%d) > vsi's current_size(%d)\n",
22355+ num_queues, vsi->current_rss_size);
22356+ return -EINVAL;
22357+ } else if ((num_queues < vsi->current_rss_size) &&
22358+ (!is_power_of_2(num_queues))) {
22359+ dev_dbg(&pf->pdev->dev,
22360+ "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
22361+ num_queues, vsi->current_rss_size);
22362+ return -EINVAL;
22363+ }
22364 }
22365- INIT_LIST_HEAD(&pf->l3_flex_pit_list);
22366
22367- list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
22368- list_del(&pit_entry->list);
22369- kfree(pit_entry);
22370+ if (!is_power_of_2(num_queues)) {
22371+ /* Find the max num_queues configured for channel if channel
22372+ * exist.
22373+ * if channel exist, then enforce 'num_queues' to be more than
22374+ * max ever queues configured for channel.
22375+ */
22376+ max_ch_queues = i40e_get_max_queues_for_channel(vsi);
22377+ if (num_queues < max_ch_queues) {
22378+ dev_dbg(&pf->pdev->dev,
22379+ "Error: num_queues (%d) < max queues configured for channel(%d)\n",
22380+ num_queues, max_ch_queues);
22381+ return -EINVAL;
22382+ }
22383+ *reconfig_rss = true;
22384 }
22385- INIT_LIST_HEAD(&pf->l4_flex_pit_list);
22386
22387- pf->fdir_pf_active_filters = 0;
22388- pf->fd_tcp4_filter_cnt = 0;
22389- pf->fd_udp4_filter_cnt = 0;
22390- pf->fd_sctp4_filter_cnt = 0;
22391- pf->fd_ip4_filter_cnt = 0;
22392+ return 0;
22393+}
22394
22395- /* Reprogram the default input set for TCP/IPv4 */
22396- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
22397- I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
22398- I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
22399+/**
22400+ * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
22401+ * @vsi: the VSI being setup
22402+ * @rss_size: size of RSS, accordingly LUT gets reprogrammed
22403+ *
22404+ * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
22405+ **/
22406+static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
22407+{
22408+ struct i40e_pf *pf = vsi->back;
22409+ u8 seed[I40E_HKEY_ARRAY_SIZE];
22410+ struct i40e_hw *hw = &pf->hw;
22411+ int local_rss_size;
22412+ u8 *lut;
22413+ int ret;
22414
22415- /* Reprogram the default input set for UDP/IPv4 */
22416- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
22417- I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
22418- I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
22419+ if (!vsi->rss_size)
22420+ return -EINVAL;
22421
22422- /* Reprogram the default input set for SCTP/IPv4 */
22423- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
22424- I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
22425- I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
22426+ if (rss_size > vsi->rss_size)
22427+ return -EINVAL;
22428
22429- /* Reprogram the default input set for Other/IPv4 */
22430- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
22431- I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
22432+ local_rss_size = min_t(int, vsi->rss_size, rss_size);
22433+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
22434+ if (!lut)
22435+ return -ENOMEM;
22436+
22437+ /* Ignoring user configured lut if there is one */
22438+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
22439+
22440+ /* Use user configured hash key if there is one, otherwise
22441+ * use default.
22442+ */
22443+ if (vsi->rss_hkey_user)
22444+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
22445+ else
22446+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
22447+
22448+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
22449+ if (ret) {
22450+ dev_info(&pf->pdev->dev,
22451+ "Cannot set RSS lut, err %s aq_err %s\n",
22452+ i40e_stat_str(hw, ret),
22453+ i40e_aq_str(hw, hw->aq.asq_last_status));
22454+ kfree(lut);
22455+ return ret;
22456+ }
22457+ kfree(lut);
22458+
22459+ /* Do the update w.r.t. storing rss_size */
22460+ if (!vsi->orig_rss_size)
22461+ vsi->orig_rss_size = vsi->rss_size;
22462+ vsi->current_rss_size = local_rss_size;
22463+
22464+ return ret;
22465 }
22466
22467 /**
22468- * i40e_close - Disables a network interface
22469- * @netdev: network interface device structure
22470- *
22471- * The close entry point is called when an interface is de-activated
22472- * by the OS. The hardware is still under the driver's control, but
22473- * this netdev interface is disabled.
22474+ * i40e_channel_setup_queue_map - Setup a channel queue map
22475+ * @pf: ptr to PF device
22476+ * @ctxt: VSI context structure
22477+ * @ch: ptr to channel structure
22478 *
22479- * Returns 0, this is not allowed to fail
22480+ * Setup queue map for a specific channel
22481 **/
22482-int i40e_close(struct net_device *netdev)
22483+static int i40e_channel_setup_queue_map(struct i40e_pf *pf,
22484+ struct i40e_vsi_context *ctxt,
22485+ struct i40e_channel *ch)
22486 {
22487- struct i40e_netdev_priv *np = netdev_priv(netdev);
22488- struct i40e_vsi *vsi = np->vsi;
22489+ u16 qmap, sections = 0;
22490+ u8 offset = 0;
22491+ int pow;
22492
22493- i40e_vsi_close(vsi);
22494+ if (ch->num_queue_pairs > pf->num_lan_msix) {
22495+ dev_err(&pf->pdev->dev,
22496+ "Requested queues number exceeded max available MSI-X vectors. Refused to set queue map\n");
22497+ return -EINVAL;
22498+ }
22499+
22500+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
22501+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
22502+
22503+ /* find the next higher power-of-2 of num queue pairs */
22504+ pow = ilog2(ch->num_queue_pairs);
22505+ if (!is_power_of_2(ch->num_queue_pairs))
22506+ pow++;
22507+
22508+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
22509+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
22510+
22511+ /* Setup queue TC[0].qmap for given VSI context */
22512+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
22513+
22514+ ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
22515+ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
22516+ ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
22517+ ctxt->info.valid_sections |= cpu_to_le16(sections);
22518
22519 return 0;
22520 }
22521
22522 /**
22523- * i40e_do_reset - Start a PF or Core Reset sequence
22524- * @pf: board private structure
22525- * @reset_flags: which reset is requested
22526- * @lock_acquired: indicates whether or not the lock has been acquired
22527- * before this function was called.
22528+ * i40e_add_channel - add a channel by adding VSI
22529+ * @pf: ptr to PF device
22530+ * @uplink_seid: underlying HW switching element (VEB) ID
22531+ * @ch: ptr to channel structure
22532 *
22533- * The essential difference in resets is that the PF Reset
22534- * doesn't clear the packet buffers, doesn't reset the PE
22535- * firmware, and doesn't bother the other PFs on the chip.
22536+ * Add a channel (VSI) using add_vsi and queue_map
22537 **/
22538-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
22539+static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
22540+ struct i40e_channel *ch)
22541 {
22542- u32 val;
22543-
22544- WARN_ON(in_interrupt());
22545+ struct i40e_hw *hw = &pf->hw;
22546+ struct i40e_vsi_context ctxt;
22547+ u8 enabled_tc = 0x1; /* TC0 enabled */
22548+ int ret;
22549
22550+ if (ch->type != I40E_VSI_VMDQ2) {
22551+ dev_info(&pf->pdev->dev,
22552+ "add new vsi failed, ch->type %d\n", ch->type);
22553+ return -EINVAL;
22554+ }
22555
22556- /* do the biggest reset indicated */
22557- if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
22558+ memset(&ctxt, 0, sizeof(ctxt));
22559+ ctxt.pf_num = hw->pf_id;
22560+ ctxt.vf_num = 0;
22561+ ctxt.uplink_seid = uplink_seid;
22562+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
22563+ if (ch->type == I40E_VSI_VMDQ2)
22564+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
22565
22566- /* Request a Global Reset
22567- *
22568- * This will start the chip's countdown to the actual full
22569- * chip reset event, and a warning interrupt to be sent
22570- * to all PFs, including the requestor. Our handler
22571- * for the warning interrupt will deal with the shutdown
22572- * and recovery of the switch setup.
22573- */
22574- dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
22575- val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
22576- val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
22577- wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
22578+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
22579+ ctxt.info.valid_sections |=
22580+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
22581+ ctxt.info.switch_id =
22582+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
22583+ }
22584
22585- } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
22586+ /* Set queue map for a given VSI context */
22587+ ret = i40e_channel_setup_queue_map(pf, &ctxt, ch);
22588+ if (ret)
22589+ return ret;
22590
22591- /* Request a Core Reset
22592- *
22593- * Same as Global Reset, except does *not* include the MAC/PHY
22594- */
22595- dev_dbg(&pf->pdev->dev, "CoreR requested\n");
22596- val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
22597- val |= I40E_GLGEN_RTRIG_CORER_MASK;
22598- wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
22599- i40e_flush(&pf->hw);
22600+ /* Now time to create VSI */
22601+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
22602+ if (ret) {
22603+ dev_info(&pf->pdev->dev,
22604+ "add new vsi failed, err %s aq_err %s\n",
22605+ i40e_stat_str(&pf->hw, ret),
22606+ i40e_aq_str(&pf->hw,
22607+ pf->hw.aq.asq_last_status));
22608+ return -ENOENT;
22609+ }
22610
22611- } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
22612+ /* Success, update channel */
22613+ ch->enabled_tc = enabled_tc;
22614+ ch->seid = ctxt.seid;
22615+ ch->vsi_number = ctxt.vsi_number;
22616+ ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
22617
22618- /* Request a PF Reset
22619- *
22620- * Resets only the PF-specific registers
22621- *
22622- * This goes directly to the tear-down and rebuild of
22623- * the switch, since we need to do all the recovery as
22624- * for the Core Reset.
22625- */
22626- dev_dbg(&pf->pdev->dev, "PFR requested\n");
22627- i40e_handle_reset_warning(pf, lock_acquired);
22628+ /* copy just the sections touched not the entire info
22629+ * since not all sections are valid as returned by
22630+ * update vsi params
22631+ */
22632+ ch->info.mapping_flags = ctxt.info.mapping_flags;
22633+ memcpy(&ch->info.queue_mapping,
22634+ &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
22635+ memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
22636+ sizeof(ctxt.info.tc_mapping));
22637
22638- } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
22639- int v;
22640+ return 0;
22641+}
22642
22643- /* Find the VSI(s) that requested a re-init */
22644- dev_info(&pf->pdev->dev,
22645- "VSI reinit requested\n");
22646- for (v = 0; v < pf->num_alloc_vsi; v++) {
22647- struct i40e_vsi *vsi = pf->vsi[v];
22648-
22649- if (vsi != NULL &&
22650- test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
22651- vsi->state))
22652- i40e_vsi_reinit_locked(pf->vsi[v]);
22653- }
22654- } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
22655- int v;
22656+static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
22657+ u8 *bw_share)
22658+{
22659+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
22660+ i40e_status ret;
22661+ int i;
22662
22663- /* Find the VSI(s) that needs to be brought down */
22664- dev_info(&pf->pdev->dev, "VSI down requested\n");
22665- for (v = 0; v < pf->num_alloc_vsi; v++) {
22666- struct i40e_vsi *vsi = pf->vsi[v];
22667+ bw_data.tc_valid_bits = ch->enabled_tc;
22668+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
22669+ bw_data.tc_bw_credits[i] = bw_share[i];
22670
22671- if (vsi != NULL &&
22672- test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
22673- vsi->state)) {
22674- set_bit(__I40E_VSI_DOWN, vsi->state);
22675- i40e_down(vsi);
22676- }
22677- }
22678- } else {
22679- dev_info(&pf->pdev->dev,
22680- "bad reset request 0x%08x\n", reset_flags);
22681+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
22682+ &bw_data, NULL);
22683+ if (ret) {
22684+ dev_info(&vsi->back->pdev->dev,
22685+ "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
22686+ vsi->back->hw.aq.asq_last_status, ch->seid);
22687+ return -EINVAL;
22688 }
22689+
22690+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
22691+ ch->info.qs_handle[i] = bw_data.qs_handles[i];
22692+
22693+ return 0;
22694 }
22695
22696-#ifdef CONFIG_I40E_DCB
22697 /**
22698- * i40e_dcb_need_reconfig - Check if DCB needs reconfig
22699- * @pf: board private structure
22700- * @old_cfg: current DCB config
22701- * @new_cfg: new DCB config
22702+ * i40e_channel_config_tx_ring - config TX ring associated with new channel
22703+ * @pf: ptr to PF device
22704+ * @vsi: the VSI being setup
22705+ * @ch: ptr to channel structure
22706+ *
22707+ * Configure TX rings associated with channel (VSI) since queues are being
22708+ * from parent VSI.
22709 **/
22710-bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
22711- struct i40e_dcbx_config *old_cfg,
22712- struct i40e_dcbx_config *new_cfg)
22713+static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
22714+ struct i40e_vsi *vsi,
22715+ struct i40e_channel *ch)
22716 {
22717- bool need_reconfig = false;
22718-
22719- /* Check if ETS configuration has changed */
22720- if (memcmp(&new_cfg->etscfg,
22721- &old_cfg->etscfg,
22722- sizeof(new_cfg->etscfg))) {
22723- /* If Priority Table has changed reconfig is needed */
22724- if (memcmp(&new_cfg->etscfg.prioritytable,
22725- &old_cfg->etscfg.prioritytable,
22726- sizeof(new_cfg->etscfg.prioritytable))) {
22727- need_reconfig = true;
22728- dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
22729- }
22730-
22731- if (memcmp(&new_cfg->etscfg.tcbwtable,
22732- &old_cfg->etscfg.tcbwtable,
22733- sizeof(new_cfg->etscfg.tcbwtable)))
22734- dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
22735+ i40e_status ret;
22736+ int i;
22737+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
22738
22739- if (memcmp(&new_cfg->etscfg.tsatable,
22740- &old_cfg->etscfg.tsatable,
22741- sizeof(new_cfg->etscfg.tsatable)))
22742- dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
22743+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
22744+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
22745+ if (ch->enabled_tc & BIT(i))
22746+ bw_share[i] = 1;
22747 }
22748
22749- /* Check if PFC configuration has changed */
22750- if (memcmp(&new_cfg->pfc,
22751- &old_cfg->pfc,
22752- sizeof(new_cfg->pfc))) {
22753- need_reconfig = true;
22754- dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
22755+ /* configure BW for new VSI */
22756+ ret = i40e_channel_config_bw(vsi, ch, bw_share);
22757+ if (ret) {
22758+ dev_info(&vsi->back->pdev->dev,
22759+ "Failed configuring TC map %d for channel (seid %u)\n",
22760+ ch->enabled_tc, ch->seid);
22761+ return ret;
22762 }
22763
22764- /* Check if APP Table has changed */
22765- if (memcmp(&new_cfg->app,
22766- &old_cfg->app,
22767- sizeof(new_cfg->app))) {
22768- need_reconfig = true;
22769- dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
22770+ for (i = 0; i < ch->num_queue_pairs; i++) {
22771+ struct i40e_ring *tx_ring, *rx_ring;
22772+ u16 pf_q;
22773+
22774+ pf_q = ch->base_queue + i;
22775+
22776+ /* Get to TX ring ptr of main VSI, for re-setup TX queue
22777+ * context
22778+ */
22779+ tx_ring = vsi->tx_rings[pf_q];
22780+ tx_ring->ch = ch;
22781+
22782+ /* Get the RX ring ptr */
22783+ rx_ring = vsi->rx_rings[pf_q];
22784+ rx_ring->ch = ch;
22785 }
22786
22787- dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
22788- return need_reconfig;
22789+ return 0;
22790 }
22791
22792 /**
22793- * i40e_handle_lldp_event - Handle LLDP Change MIB event
22794- * @pf: board private structure
22795- * @e: event info posted on ARQ
22796+ * i40e_setup_hw_channel - setup new channel
22797+ * @pf: ptr to PF device
22798+ * @vsi: the VSI being setup
22799+ * @ch: ptr to channel structure
22800+ * @uplink_seid: underlying HW switching element (VEB) ID
22801+ * @type: type of channel to be created (VMDq2/VF)
22802+ *
22803+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
22804+ * and configures TX rings accordingly
22805 **/
22806-static int i40e_handle_lldp_event(struct i40e_pf *pf,
22807- struct i40e_arq_event_info *e)
22808+static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
22809+ struct i40e_vsi *vsi,
22810+ struct i40e_channel *ch,
22811+ u16 uplink_seid, u8 type)
22812 {
22813- struct i40e_aqc_lldp_get_mib *mib =
22814- (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
22815- struct i40e_hw *hw = &pf->hw;
22816- struct i40e_dcbx_config tmp_dcbx_cfg;
22817- bool need_reconfig = false;
22818- int ret = 0;
22819- u8 type;
22820+ int ret;
22821
22822- /* Not DCB capable or capability disabled */
22823- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
22824+ ch->initialized = false;
22825+ ch->base_queue = vsi->next_base_queue;
22826+ ch->type = type;
22827+
22828+ /* Proceed with creation of channel (VMDq2) VSI */
22829+ ret = i40e_add_channel(pf, uplink_seid, ch);
22830+ if (ret) {
22831+ dev_info(&pf->pdev->dev,
22832+ "failed to add_channel using uplink_seid %u\n",
22833+ uplink_seid);
22834 return ret;
22835+ }
22836
22837- /* Ignore if event is not for Nearest Bridge */
22838- type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
22839- & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
22840- dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
22841- if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
22842+ /* Mark the successful creation of channel */
22843+ ch->initialized = true;
22844+
22845+ /* Reconfigure TX queues using QTX_CTL register */
22846+ ret = i40e_channel_config_tx_ring(pf, vsi, ch);
22847+ if (ret) {
22848+ dev_info(&pf->pdev->dev,
22849+ "failed to configure TX rings for channel %u\n",
22850+ ch->seid);
22851 return ret;
22852+ }
22853
22854- /* Check MIB Type and return if event for Remote MIB update */
22855- type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
22856+ /* update 'next_base_queue' */
22857+ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
22858 dev_dbg(&pf->pdev->dev,
22859- "LLDP event mib type %s\n", type ? "remote" : "local");
22860- if (type == I40E_AQ_LLDP_MIB_REMOTE) {
22861- /* Update the remote cached instance and return */
22862- ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
22863- I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
22864- &hw->remote_dcbx_config);
22865- goto exit;
22866+ "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
22867+ ch->seid, ch->vsi_number, ch->stat_counter_idx,
22868+ ch->num_queue_pairs,
22869+ vsi->next_base_queue);
22870+ return ret;
22871+}
22872+
22873+/**
22874+ * i40e_setup_channel - setup new channel using uplink element
22875+ * @pf: ptr to PF device
22876+ * @vsi: the VSI being setup
22877+ * @ch: ptr to channel structure
22878+ *
22879+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
22880+ * and uplink switching element (uplink_seid)
22881+ **/
22882+static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
22883+ struct i40e_channel *ch)
22884+{
22885+ u8 vsi_type;
22886+ u16 seid;
22887+ int ret;
22888+
22889+ if (vsi->type == I40E_VSI_MAIN) {
22890+ vsi_type = I40E_VSI_VMDQ2;
22891+ } else {
22892+ dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
22893+ vsi->type);
22894+ return false;
22895 }
22896
22897- /* Store the old configuration */
22898- tmp_dcbx_cfg = hw->local_dcbx_config;
22899+ /* underlying switching element */
22900+ seid = pf->vsi[pf->lan_vsi]->uplink_seid;
22901
22902- /* Reset the old DCBx configuration data */
22903- memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
22904- /* Get updated DCBX data from firmware */
22905- ret = i40e_get_dcb_config(&pf->hw);
22906+ /* create channel (VSI), configure TX rings */
22907+ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
22908 if (ret) {
22909- dev_info(&pf->pdev->dev,
22910- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
22911- i40e_stat_str(&pf->hw, ret),
22912- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
22913- goto exit;
22914+ dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
22915+ return false;
22916 }
22917
22918- /* No change detected in DCBX configs */
22919- if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
22920- sizeof(tmp_dcbx_cfg))) {
22921- dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
22922- goto exit;
22923+ return ch->initialized ? true : false;
22924+}
22925+
22926+/**
22927+ * i40e_create_queue_channel - function to create channel
22928+ * @vsi: VSI to be configured
22929+ * @ch: ptr to channel (it contains channel specific params)
22930+ *
22931+ * This function creates channel (VSI) using num_queues specified by user,
22932+ * reconfigs RSS if needed.
22933+ **/
22934+int i40e_create_queue_channel(struct i40e_vsi *vsi,
22935+ struct i40e_channel *ch)
22936+{
22937+ struct i40e_pf *pf = vsi->back;
22938+ bool reconfig_rss;
22939+ int err;
22940+
22941+ if (!ch)
22942+ return -EINVAL;
22943+
22944+ if (!ch->num_queue_pairs) {
22945+ dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
22946+ ch->num_queue_pairs);
22947+ return -EINVAL;
22948 }
22949
22950- need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
22951- &hw->local_dcbx_config);
22952+ /* validate user requested num_queues for channel */
22953+ err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
22954+ &reconfig_rss);
22955+ if (err) {
22956+ dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
22957+ ch->num_queue_pairs);
22958+ return -EINVAL;
22959+ }
22960
22961- i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
22962+ /* By default we are in VEPA mode, if this is the first VF/VMDq
22963+ * VSI to be added switch to VEB mode.
22964+ */
22965+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
22966+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
22967
22968- if (!need_reconfig)
22969- goto exit;
22970+ if (vsi->type == I40E_VSI_MAIN) {
22971+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
22972+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
22973+ else
22974+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
22975+ }
22976+ /* now onwards for main VSI, number of queues will be value
22977+ * of TC0's queue count
22978+ */
22979+ }
22980
22981- /* Enable DCB tagging only when more than one TC */
22982- if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
22983- pf->flags |= I40E_FLAG_DCB_ENABLED;
22984- else
22985- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
22986+ /* By this time, vsi->cnt_q_avail shall be set to non-zero and
22987+ * it should be more than num_queues
22988+ */
22989+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
22990+ dev_dbg(&pf->pdev->dev,
22991+ "Error: cnt_q_avail (%u) less than num_queues %d\n",
22992+ vsi->cnt_q_avail, ch->num_queue_pairs);
22993+ return -EINVAL;
22994+ }
22995
22996- set_bit(__I40E_PORT_SUSPENDED, pf->state);
22997- /* Reconfiguration needed quiesce all VSIs */
22998- i40e_pf_quiesce_all_vsi(pf);
22999+ /* reconfig_rss only if vsi type is MAIN_VSI */
23000+ if (reconfig_rss && vsi->type == I40E_VSI_MAIN) {
23001+ err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
23002+ if (err) {
23003+ dev_info(&pf->pdev->dev,
23004+ "Error: unable to reconfig rss for num_queues (%u)\n",
23005+ ch->num_queue_pairs);
23006+ return -EINVAL;
23007+ }
23008+ }
23009
23010- /* Changes in configuration update VEB/VSI */
23011- i40e_dcb_reconfigure(pf);
23012+ if (!i40e_setup_channel(pf, vsi, ch)) {
23013+ dev_info(&pf->pdev->dev, "Failed to setup channel\n");
23014+ return -EINVAL;
23015+ }
23016
23017- ret = i40e_resume_port_tx(pf);
23018+ dev_info(&pf->pdev->dev,
23019+ "Setup channel (id:%u) utilizing num_queues %d\n",
23020+ ch->seid, ch->num_queue_pairs);
23021
23022- clear_bit(__I40E_PORT_SUSPENDED, pf->state);
23023- /* In case of error no point in resuming VSIs */
23024- if (ret)
23025- goto exit;
23026+ /* configure VSI for BW limit */
23027+ if (ch->max_tx_rate) {
23028+ if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
23029+ return -EINVAL;
23030
23031- /* Wait for the PF's queues to be disabled */
23032- ret = i40e_pf_wait_queues_disabled(pf);
23033- if (ret) {
23034- /* Schedule PF reset to recover */
23035- set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
23036- i40e_service_event_schedule(pf);
23037- } else {
23038- i40e_pf_unquiesce_all_vsi(pf);
23039- pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
23040- I40E_FLAG_CLIENT_L2_CHANGE);
23041+ dev_dbg(&pf->pdev->dev,
23042+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
23043+ ch->max_tx_rate,
23044+ ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, ch->seid);
23045 }
23046
23047-exit:
23048- return ret;
23049+ /* in case of VF, this will be main SRIOV VSI */
23050+ ch->parent_vsi = vsi;
23051+
23052+ /* and update main_vsi's count for queue_available to use */
23053+ vsi->cnt_q_avail -= ch->num_queue_pairs;
23054+
23055+ return 0;
23056 }
23057-#endif /* CONFIG_I40E_DCB */
23058
23059 /**
23060- * i40e_do_reset_safe - Protected reset path for userland calls.
23061- * @pf: board private structure
23062- * @reset_flags: which reset is requested
23063+ * i40e_configure_queue_channels - Add queue channel for the given TCs
23064+ * @vsi: VSI to be configured
23065 *
23066+ * Configures queue channel mapping to the given TCs
23067 **/
23068-void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
23069+static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
23070 {
23071- rtnl_lock();
23072- i40e_do_reset(pf, reset_flags, true);
23073- rtnl_unlock();
23074+ struct i40e_channel *ch;
23075+ int ret = 0, i;
23076+
23077+ /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
23078+ vsi->tc_seid_map[0] = vsi->seid;
23079+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
23080+ if (vsi->tc_config.enabled_tc & BIT(i)) {
23081+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
23082+ if (!ch) {
23083+ ret = -ENOMEM;
23084+ goto err_free;
23085+ }
23086+
23087+ INIT_LIST_HEAD(&ch->list);
23088+ ch->num_queue_pairs =
23089+ vsi->tc_config.tc_info[i].qcount;
23090+ ch->base_queue =
23091+ vsi->tc_config.tc_info[i].qoffset;
23092+
23093+ /* Bandwidth limit through tc interface is in bytes/s,
23094+ * change to Mbit/s
23095+ */
23096+ ch->max_tx_rate =
23097+ vsi->mqprio_qopt.max_rate[i] / (1000000 / 8);
23098+
23099+ list_add_tail(&ch->list, &vsi->ch_list);
23100+
23101+ ret = i40e_create_queue_channel(vsi, ch);
23102+ if (ret) {
23103+ dev_err(&vsi->back->pdev->dev,
23104+ "Failed creating queue channel with TC%d: queues %d\n",
23105+ i, ch->num_queue_pairs);
23106+ goto err_free;
23107+ }
23108+ vsi->tc_seid_map[i] = ch->seid;
23109+ }
23110+ }
23111+ return ret;
23112+
23113+err_free:
23114+ i40e_remove_queue_channels(vsi);
23115+ return ret;
23116 }
23117
23118 /**
23119- * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
23120- * @pf: board private structure
23121- * @e: event info posted on ARQ
23122- *
23123- * Handler for LAN Queue Overflow Event generated by the firmware for PF
23124- * and VF queues
23125+ * i40e_validate_mqprio_qopt- validate queue mapping info
23126+ * @vsi: the VSI being configured
23127+ * @mqprio_qopt: queue parametrs
23128 **/
23129-static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
23130- struct i40e_arq_event_info *e)
23131+static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
23132+ struct tc_mqprio_qopt_offload *mqprio_qopt)
23133 {
23134- struct i40e_aqc_lan_overflow *data =
23135- (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
23136- u32 queue = le32_to_cpu(data->prtdcb_rupto);
23137- u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
23138- struct i40e_hw *hw = &pf->hw;
23139- struct i40e_vf *vf;
23140- u16 vf_id;
23141+ u64 sum_max_rate = 0;
23142+ int i;
23143
23144- dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
23145- queue, qtx_ctl);
23146+ if (mqprio_qopt->qopt.offset[0] != 0 ||
23147+ mqprio_qopt->qopt.num_tc < 1 ||
23148+ mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
23149+ return -EINVAL;
23150+ for (i = 0; ; i++) {
23151+ if (!mqprio_qopt->qopt.count[i])
23152+ return -EINVAL;
23153+ if (mqprio_qopt->min_rate[i]) {
23154+ dev_err(&vsi->back->pdev->dev,
23155+ "Invalid min tx rate (greater than 0) specified\n");
23156+ return -EINVAL;
23157+ }
23158+ sum_max_rate += (mqprio_qopt->max_rate[i] / (1000000 / 8));
23159
23160- /* Queue belongs to VF, find the VF and issue VF reset */
23161- if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
23162- >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
23163- vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
23164- >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
23165- vf_id -= hw->func_caps.vf_base_id;
23166- vf = &pf->vf[vf_id];
23167- i40e_vc_notify_vf_reset(vf);
23168- /* Allow VF to process pending reset notification */
23169- msleep(20);
23170- i40e_reset_vf(vf, false);
23171+ if (i >= mqprio_qopt->qopt.num_tc - 1)
23172+ break;
23173+ if (mqprio_qopt->qopt.offset[i + 1] !=
23174+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
23175+ return -EINVAL;
23176+ }
23177+ if (vsi->num_queue_pairs <
23178+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
23179+ dev_err(&vsi->back->pdev->dev,
23180+ "Failed to create traffic channel, insufficient number of queues.\n");
23181+ return -EINVAL;
23182+ }
23183+ if (sum_max_rate > i40e_get_link_speed(vsi)) {
23184+ dev_err(&vsi->back->pdev->dev,
23185+ "Invalid max tx rate specified\n");
23186+ return -EINVAL;
23187 }
23188+ return 0;
23189 }
23190
23191 /**
23192- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
23193- * @pf: board private structure
23194+ * i40e_vsi_set_default_tc_config - set default values for tc configuration
23195+ * @vsi: the VSI being configured
23196 **/
23197-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
23198+static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
23199 {
23200- u32 val, fcnt_prog;
23201+ u16 qcount;
23202+ int i;
23203
23204- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
23205- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
23206- return fcnt_prog;
23207+ /* Only TC0 is enabled */
23208+ vsi->tc_config.numtc = 1;
23209+ vsi->tc_config.enabled_tc = 1;
23210+ qcount = min_t(int, vsi->alloc_queue_pairs,
23211+ i40e_pf_get_max_q_per_tc(vsi->back));
23212+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
23213+ /* For the TC that is not enabled set the offset to to default
23214+ * queue and allocate one queue for the given TC.
23215+ */
23216+ vsi->tc_config.tc_info[i].qoffset = 0;
23217+ if (i == 0)
23218+ vsi->tc_config.tc_info[i].qcount = qcount;
23219+ else
23220+ vsi->tc_config.tc_info[i].qcount = 1;
23221+ vsi->tc_config.tc_info[i].netdev_tc = 0;
23222+ }
23223 }
23224
23225 /**
23226- * i40e_get_current_fd_count - Get total FD filters programmed for this PF
23227- * @pf: board private structure
23228+ * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
23229+ * @vsi: PF main vsi
23230+ * @seid: seid of main or channel VSIs
23231+ *
23232+ * Rebuilds cloud filters associated with main VSI and channel VSIs if they
23233+ * existed before reset
23234 **/
23235-u32 i40e_get_current_fd_count(struct i40e_pf *pf)
23236+static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
23237 {
23238- u32 val, fcnt_prog;
23239+ struct i40e_cloud_filter *cfilter;
23240+ struct i40e_pf *pf = vsi->back;
23241+ struct hlist_node *node;
23242+ i40e_status ret;
23243
23244- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
23245- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
23246- ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
23247- I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
23248- return fcnt_prog;
23249+ /* Add cloud filters back if they exist */
23250+ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
23251+ cloud_node) {
23252+ if (cfilter->seid != seid)
23253+ continue;
23254+
23255+ if (cfilter->dst_port)
23256+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
23257+ true);
23258+ else
23259+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
23260+
23261+ if (ret) {
23262+ dev_dbg(&pf->pdev->dev,
23263+ "Failed to rebuild cloud filter, err %s aq_err %s\n",
23264+ i40e_stat_str(&pf->hw, ret),
23265+ i40e_aq_str(&pf->hw,
23266+ pf->hw.aq.asq_last_status));
23267+ return ret;
23268+ }
23269+ }
23270+ return 0;
23271 }
23272
23273 /**
23274- * i40e_get_global_fd_count - Get total FD filters programmed on device
23275- * @pf: board private structure
23276+ * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
23277+ * @vsi: PF main vsi
23278+ *
23279+ * Rebuilds channel VSIs if they existed before reset
23280 **/
23281-u32 i40e_get_global_fd_count(struct i40e_pf *pf)
23282+static int i40e_rebuild_channels(struct i40e_vsi *vsi)
23283 {
23284- u32 val, fcnt_prog;
23285+ struct i40e_channel *ch, *ch_tmp;
23286+ i40e_status ret;
23287
23288- val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
23289- fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
23290- ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
23291- I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
23292- return fcnt_prog;
23293+ if (list_empty(&vsi->ch_list))
23294+ return 0;
23295+
23296+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
23297+ if (!ch->initialized)
23298+ break;
23299+ /* Proceed with creation of channel (VMDq2) VSI */
23300+ ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
23301+ if (ret) {
23302+ dev_info(&vsi->back->pdev->dev,
23303+ "failed to rebuild channels using uplink_seid %u\n",
23304+ vsi->uplink_seid);
23305+ return ret;
23306+ }
23307+ if (ch->max_tx_rate) {
23308+ if (i40e_set_bw_limit(vsi, ch->seid,
23309+ ch->max_tx_rate))
23310+ return -EINVAL;
23311+
23312+ dev_dbg(&vsi->back->pdev->dev,
23313+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
23314+ ch->max_tx_rate,
23315+ ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR,
23316+ ch->seid);
23317+ }
23318+ ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
23319+ if (ret) {
23320+ dev_dbg(&vsi->back->pdev->dev,
23321+ "Failed to rebuild cloud filters for channel VSI %u\n",
23322+ ch->seid);
23323+ return ret;
23324+ }
23325+ }
23326+ return 0;
23327 }
23328+#endif /* __TC_MQPRIO_MODE_MAX */
23329
23330+#ifdef HAVE_SETUP_TC
23331 /**
23332- * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
23333- * @pf: board private structure
23334+ * i40e_setup_tc - configure multiple traffic classes
23335+ * @netdev: net device to configure
23336+ * @type_data: tc offload data
23337 **/
23338-void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
23339+#ifdef __TC_MQPRIO_MODE_MAX
23340+static int i40e_setup_tc(struct net_device *netdev, void *type_data)
23341+#else
23342+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
23343+#endif
23344 {
23345- struct i40e_fdir_filter *filter;
23346- u32 fcnt_prog, fcnt_avail;
23347- struct hlist_node *node;
23348+#ifdef __TC_MQPRIO_MODE_MAX
23349+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
23350+#endif
23351+ struct i40e_netdev_priv *np = netdev_priv(netdev);
23352+ struct i40e_vsi *vsi = np->vsi;
23353+ struct i40e_pf *pf = vsi->back;
23354+ u8 enabled_tc = 0, num_tc;
23355+#ifdef __TC_MQPRIO_MODE_MAX
23356+ bool need_reset = false;
23357+ int old_queue_pairs;
23358+#endif
23359+ int ret = -EINVAL;
23360+#ifdef __TC_MQPRIO_MODE_MAX
23361+ u16 mode;
23362+ u8 hw;
23363+#endif
23364+ int i;
23365
23366- if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
23367- return;
23368+#ifdef __TC_MQPRIO_MODE_MAX
23369+ old_queue_pairs = vsi->num_queue_pairs;
23370+#endif
23371+ /* Check if MFP enabled */
23372+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
23373+ netdev_info(netdev,
23374+ "Configuring TC not supported in MFP mode\n");
23375+ goto exit;
23376+ }
23377
23378- /* Check if we have enough room to re-enable FDir SB capability. */
23379- fcnt_prog = i40e_get_global_fd_count(pf);
23380- fcnt_avail = pf->fdir_pf_filter_count;
23381- if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
23382- (pf->fd_add_err == 0) ||
23383- (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
23384- if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
23385- pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
23386- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
23387- (I40E_DEBUG_FD & pf->hw.debug_mask))
23388- dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
23389- }
23390+#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
23391+ /* Check whether tc count is within enabled limit */
23392+ if (tc > i40e_pf_get_num_tc(pf)) {
23393+ netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
23394+ goto exit;
23395+ }
23396+#endif
23397+
23398+#ifdef __TC_MQPRIO_MODE_MAX
23399+ num_tc = mqprio_qopt->qopt.num_tc;
23400+ hw = mqprio_qopt->qopt.hw;
23401+ mode = mqprio_qopt->mode;
23402+ if (!hw) {
23403+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
23404+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
23405+ goto config_tc;
23406 }
23407
23408- /* We should wait for even more space before re-enabling ATR.
23409- * Additionally, we cannot enable ATR as long as we still have TCP SB
23410- * rules active.
23411- */
23412- if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
23413- (pf->fd_tcp4_filter_cnt == 0)) {
23414- if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
23415- pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
23416- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
23417- (I40E_DEBUG_FD & pf->hw.debug_mask))
23418- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
23419+ switch (mode) {
23420+ case TC_MQPRIO_MODE_DCB:
23421+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
23422+
23423+ /* Check if DCB enabled to continue */
23424+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
23425+ netdev_info(netdev,
23426+ "DCB is not enabled for adapter\n");
23427+ return ret;
23428+ }
23429+
23430+ /* Check whether tc count is within enabled limit */
23431+ if (num_tc > i40e_pf_get_num_tc(pf)) {
23432+ netdev_info(netdev,
23433+ "TC count greater than enabled on link for adapter\n");
23434+ return ret;
23435+ }
23436+ break;
23437+ case TC_MQPRIO_MODE_CHANNEL:
23438+ if (pf->flags & I40E_FLAG_DCB_ENABLED) {
23439+ netdev_info(netdev,
23440+ "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
23441+ return ret;
23442 }
23443+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
23444+ return ret;
23445+ ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
23446+ if (ret)
23447+ return ret;
23448+ memcpy(&vsi->mqprio_qopt, mqprio_qopt,
23449+ sizeof(*mqprio_qopt));
23450+ pf->flags |= I40E_FLAG_TC_MQPRIO;
23451+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
23452+ break;
23453+ default:
23454+ return -EINVAL;
23455 }
23456
23457- /* if hw had a problem adding a filter, delete it */
23458- if (pf->fd_inv > 0) {
23459- hlist_for_each_entry_safe(filter, node,
23460- &pf->fdir_filter_list, fdir_node) {
23461- if (filter->fd_id == pf->fd_inv) {
23462- hlist_del(&filter->fdir_node);
23463- kfree(filter);
23464- pf->fdir_pf_active_filters--;
23465+config_tc:
23466+#else
23467+ /* Check if DCB enabled to continue */
23468+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
23469+ netdev_info(netdev, "DCB is not enabled for adapter\n");
23470+ goto exit;
23471+ }
23472+ num_tc = tc;
23473+#endif
23474+ /* Generate TC map for number of tc requested */
23475+ for (i = 0; i < num_tc; i++)
23476+ enabled_tc |= BIT(i);
23477+
23478+ /* Requesting same TC configuration as already enabled */
23479+#ifdef __TC_MQPRIO_MODE_MAX
23480+ if (enabled_tc == vsi->tc_config.enabled_tc &&
23481+ mode != TC_MQPRIO_MODE_CHANNEL)
23482+ return 0;
23483+#else
23484+ if (enabled_tc == vsi->tc_config.enabled_tc)
23485+ return 0;
23486+#endif
23487+
23488+ /* Quiesce VSI queues */
23489+ i40e_quiesce_vsi(vsi);
23490+
23491+#ifdef __TC_MQPRIO_MODE_MAX
23492+ if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
23493+ i40e_remove_queue_channels(vsi);
23494+#endif
23495+
23496+ /* Configure VSI for enabled TCs */
23497+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
23498+ if (ret) {
23499+ netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
23500+ vsi->seid);
23501+#ifdef __TC_MQPRIO_MODE_MAX
23502+ need_reset = true;
23503+#endif
23504+ goto exit;
23505+#ifdef __TC_MQPRIO_MODE_MAX
23506+ } else if (enabled_tc &&
23507+ (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
23508+ netdev_info(netdev,
23509+ "Failed to create channel. Override queues (%u) not power of 2\n",
23510+ vsi->tc_config.tc_info[0].qcount);
23511+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
23512+ vsi->num_queue_pairs = old_queue_pairs;
23513+ ret = -EINVAL;
23514+ need_reset = true;
23515+ goto exit;
23516+#endif
23517+ }
23518+
23519+ dev_info(&vsi->back->pdev->dev,
23520+ "Setup channel (id:%u) utilizing num_queues %d\n",
23521+ vsi->seid, vsi->tc_config.tc_info[0].qcount);
23522+
23523+#ifdef __TC_MQPRIO_MODE_MAX
23524+ if (pf->flags & I40E_FLAG_TC_MQPRIO) {
23525+ if (vsi->mqprio_qopt.max_rate[0]) {
23526+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] /
23527+ (1000000 / 8);
23528+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
23529+ if (!ret) {
23530+ dev_dbg(&vsi->back->pdev->dev,
23531+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
23532+ max_tx_rate,
23533+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
23534+ vsi->seid);
23535+ } else {
23536+ need_reset = true;
23537+ goto exit;
23538 }
23539 }
23540+ ret = i40e_configure_queue_channels(vsi);
23541+ if (ret) {
23542+ vsi->num_queue_pairs = old_queue_pairs;
23543+ netdev_info(netdev,
23544+ "Failed configuring queue channels\n");
23545+ need_reset = true;
23546+ goto exit;
23547+ }
23548+ }
23549+
23550+#endif
23551+
23552+exit:
23553+#ifdef __TC_MQPRIO_MODE_MAX
23554+ /* Reset the configuration data to defaults, only TC0 is enabled */
23555+ if (need_reset) {
23556+ i40e_vsi_set_default_tc_config(vsi);
23557+ need_reset = false;
23558 }
23559+#endif
23560+ /* Unquiesce VSI */
23561+ i40e_unquiesce_vsi(vsi);
23562+ return ret;
23563 }
23564
23565-#define I40E_MIN_FD_FLUSH_INTERVAL 10
23566-#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
23567+#ifdef __TC_MQPRIO_MODE_MAX
23568 /**
23569- * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
23570- * @pf: board private structure
23571+ * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
23572+ * @vsi: Pointer to VSI
23573+ * @f: Pointer to struct flow_cls_offload
23574+ * @filter: Pointer to cloud filter structure
23575+ *
23576 **/
23577-static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
23578+static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
23579+ struct flow_cls_offload *f,
23580+ struct i40e_cloud_filter *filter)
23581 {
23582- unsigned long min_flush_time;
23583- int flush_wait_retry = 50;
23584- bool disable_atr = false;
23585- int fd_room;
23586- int reg;
23587+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
23588+ struct flow_dissector *dissector = rule->match.dissector;
23589+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
23590+ struct i40e_pf *pf = vsi->back;
23591+ u8 field_flags = 0;
23592+
23593+ if (dissector->used_keys &
23594+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
23595+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
23596+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23597+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
23598+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
23599+#endif
23600+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
23601+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
23602+#ifdef HAVE_TC_FLOWER_ENC
23603+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
23604+#endif
23605+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
23606+ dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
23607+ dissector->used_keys);
23608+ return -EOPNOTSUPP;
23609+ }
23610
23611- if (!time_after(jiffies, pf->fd_flush_timestamp +
23612- (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
23613- return;
23614+#ifdef HAVE_TC_FLOWER_ENC
23615+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
23616+ struct flow_match_enc_keyid match;
23617
23618- /* If the flush is happening too quick and we have mostly SB rules we
23619- * should not re-enable ATR for some time.
23620- */
23621- min_flush_time = pf->fd_flush_timestamp +
23622- (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
23623- fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
23624+ flow_rule_match_enc_keyid(rule, &match);
23625+ if (match.mask->keyid != 0)
23626+ field_flags |= I40E_CLOUD_FIELD_TEN_ID;
23627
23628- if (!(time_after(jiffies, min_flush_time)) &&
23629- (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
23630- if (I40E_DEBUG_FD & pf->hw.debug_mask)
23631- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
23632- disable_atr = true;
23633+ filter->tenant_id = be32_to_cpu(match.key->keyid);
23634 }
23635+#endif /* HAVE_TC_FLOWER_ENC */
23636
23637- pf->fd_flush_timestamp = jiffies;
23638- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
23639- /* flush all filters */
23640- wr32(&pf->hw, I40E_PFQF_CTL_1,
23641- I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
23642- i40e_flush(&pf->hw);
23643- pf->fd_flush_cnt++;
23644- pf->fd_add_err = 0;
23645- do {
23646- /* Check FD flush status every 5-6msec */
23647- usleep_range(5000, 6000);
23648- reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
23649- if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
23650- break;
23651- } while (flush_wait_retry--);
23652- if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
23653- dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
23654- } else {
23655- /* replay sideband filters */
23656- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
23657- if (!disable_atr && !pf->fd_tcp4_filter_cnt)
23658- pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
23659- clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
23660- if (I40E_DEBUG_FD & pf->hw.debug_mask)
23661- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
23662+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
23663+ struct flow_match_basic match;
23664+
23665+ flow_rule_match_basic(rule, &match);
23666+ n_proto_key = ntohs(match.key->n_proto);
23667+ n_proto_mask = ntohs(match.mask->n_proto);
23668+
23669+ if (n_proto_key == ETH_P_ALL) {
23670+ n_proto_key = 0;
23671+ n_proto_mask = 0;
23672+ }
23673+ filter->n_proto = n_proto_key & n_proto_mask;
23674+ filter->ip_proto = match.key->ip_proto;
23675 }
23676-}
23677
23678-/**
23679- * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
23680- * @pf: board private structure
23681- **/
23682-u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
23683-{
23684- return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
23685-}
23686+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
23687+ struct flow_match_eth_addrs match;
23688
23689-/* We can see up to 256 filter programming desc in transit if the filters are
23690- * being applied really fast; before we see the first
23691- * filter miss error on Rx queue 0. Accumulating enough error messages before
23692- * reacting will make sure we don't cause flush too often.
23693- */
23694-#define I40E_MAX_FD_PROGRAM_ERROR 256
23695+ flow_rule_match_eth_addrs(rule, &match);
23696
23697-/**
23698- * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
23699- * @pf: board private structure
23700- **/
23701-static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
23702-{
23703+ /* use is_broadcast and is_zero to check for all 0xf or 0 */
23704+ if (!is_zero_ether_addr(match.mask->dst)) {
23705+ if (is_broadcast_ether_addr(match.mask->dst)) {
23706+ field_flags |= I40E_CLOUD_FIELD_OMAC;
23707+ } else {
23708+ dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
23709+ match.mask->dst);
23710+ return I40E_ERR_CONFIG;
23711+ }
23712+ }
23713
23714- /* if interface is down do nothing */
23715- if (test_bit(__I40E_DOWN, pf->state))
23716- return;
23717+ if (!is_zero_ether_addr(match.mask->src)) {
23718+ if (is_broadcast_ether_addr(match.mask->src)) {
23719+ field_flags |= I40E_CLOUD_FIELD_IMAC;
23720+ } else {
23721+ dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
23722+ match.mask->src);
23723+ return I40E_ERR_CONFIG;
23724+ }
23725+ }
23726+ ether_addr_copy(filter->dst_mac, match.key->dst);
23727+ ether_addr_copy(filter->src_mac, match.key->src);
23728+ }
23729
23730- if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
23731- i40e_fdir_flush_and_replay(pf);
23732+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
23733+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
23734+ struct flow_match_vlan match;
23735
23736- i40e_fdir_check_and_reenable(pf);
23737+ flow_rule_match_vlan(rule, &match);
23738+ if (match.mask->vlan_id) {
23739+ if (match.mask->vlan_id == VLAN_VID_MASK) {
23740+ field_flags |= I40E_CLOUD_FIELD_IVLAN;
23741
23742-}
23743+ } else {
23744+ dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
23745+ match.mask->vlan_id);
23746+ return I40E_ERR_CONFIG;
23747+ }
23748+ }
23749
23750-/**
23751- * i40e_vsi_link_event - notify VSI of a link event
23752- * @vsi: vsi to be notified
23753- * @link_up: link up or down
23754- **/
23755-static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
23756-{
23757- if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
23758- return;
23759+ filter->vlan_id = cpu_to_be16(match.key->vlan_id);
23760+ }
23761+#endif /* !HAVE_TC_FLOWER_VLAN_IN_TAGS */
23762
23763- switch (vsi->type) {
23764- case I40E_VSI_MAIN:
23765- if (!vsi->netdev || !vsi->netdev_registered)
23766- break;
23767+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
23768+ struct flow_match_control match;
23769
23770- if (link_up) {
23771- netif_carrier_on(vsi->netdev);
23772- netif_tx_wake_all_queues(vsi->netdev);
23773- } else {
23774- netif_carrier_off(vsi->netdev);
23775- netif_tx_stop_all_queues(vsi->netdev);
23776+ flow_rule_match_control(rule, &match);
23777+ addr_type = match.key->addr_type;
23778+ }
23779+
23780+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
23781+ struct flow_match_ipv4_addrs match;
23782+
23783+ flow_rule_match_ipv4_addrs(rule, &match);
23784+ if (match.mask->dst) {
23785+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
23786+ field_flags |= I40E_CLOUD_FIELD_IIP;
23787+ } else {
23788+ dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
23789+ &match.mask->dst);
23790+ return I40E_ERR_CONFIG;
23791+ }
23792 }
23793- break;
23794
23795- case I40E_VSI_SRIOV:
23796- case I40E_VSI_VMDQ2:
23797- case I40E_VSI_CTRL:
23798- case I40E_VSI_IWARP:
23799- case I40E_VSI_MIRROR:
23800- default:
23801- /* there is no notification for other VSIs */
23802- break;
23803+ if (match.mask->src) {
23804+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
23805+ field_flags |= I40E_CLOUD_FIELD_IIP;
23806+ } else {
23807+ dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
23808+ &match.mask->src);
23809+ return I40E_ERR_CONFIG;
23810+ }
23811+ }
23812+
23813+ if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
23814+ dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
23815+ return I40E_ERR_CONFIG;
23816+ }
23817+ filter->dst_ipv4 = match.key->dst;
23818+ filter->src_ipv4 = match.key->src;
23819+ }
23820+
23821+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
23822+ struct flow_match_ipv6_addrs match;
23823+
23824+ flow_rule_match_ipv6_addrs(rule, &match);
23825+
23826+ /* src and dest IPV6 address should not be LOOPBACK
23827+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
23828+ */
23829+ if (ipv6_addr_loopback(&match.key->dst) ||
23830+ ipv6_addr_loopback(&match.key->src)) {
23831+ dev_err(&pf->pdev->dev,
23832+ "Bad ipv6, addr is LOOPBACK\n");
23833+ return I40E_ERR_CONFIG;
23834+ }
23835+ if (!ipv6_addr_any(&match.mask->dst) ||
23836+ !ipv6_addr_any(&match.mask->src))
23837+ field_flags |= I40E_CLOUD_FIELD_IIP;
23838+
23839+ memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
23840+ sizeof(filter->src_ipv6));
23841+ memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
23842+ sizeof(filter->dst_ipv6));
23843+ }
23844+
23845+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
23846+ struct flow_match_ports match;
23847+
23848+ flow_rule_match_ports(rule, &match);
23849+ if (match.mask->src) {
23850+ if (match.mask->src == cpu_to_be16(0xffff)) {
23851+ field_flags |= I40E_CLOUD_FIELD_IIP;
23852+ } else {
23853+ dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
23854+ be16_to_cpu(match.mask->src));
23855+ return I40E_ERR_CONFIG;
23856+ }
23857+ }
23858+
23859+ if (match.mask->dst) {
23860+ if (match.mask->dst == cpu_to_be16(0xffff)) {
23861+ field_flags |= I40E_CLOUD_FIELD_IIP;
23862+ } else {
23863+ dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
23864+ be16_to_cpu(match.mask->dst));
23865+ return I40E_ERR_CONFIG;
23866+ }
23867+ }
23868+
23869+ filter->dst_port = match.key->dst;
23870+ filter->src_port = match.key->src;
23871+
23872+ switch (filter->ip_proto) {
23873+ case IPPROTO_TCP:
23874+ case IPPROTO_UDP:
23875+ break;
23876+ default:
23877+ dev_err(&pf->pdev->dev,
23878+ "Only UDP and TCP transport are supported\n");
23879+ return -EINVAL;
23880+ }
23881 }
23882+ filter->flags = field_flags;
23883+ return 0;
23884 }
23885
23886 /**
23887- * i40e_veb_link_event - notify elements on the veb of a link event
23888- * @veb: veb to be notified
23889- * @link_up: link up or down
23890+ * i40e_handle_tclass: Forward to a traffic class on the device
23891+ * @vsi: Pointer to VSI
23892+ * @tc: traffic class index on the device
23893+ * @filter: Pointer to cloud filter structure
23894+ *
23895 **/
23896-static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
23897+static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
23898+ struct i40e_cloud_filter *filter)
23899 {
23900- struct i40e_pf *pf;
23901- int i;
23902-
23903- if (!veb || !veb->pf)
23904- return;
23905- pf = veb->pf;
23906-
23907- /* depth first... */
23908- for (i = 0; i < I40E_MAX_VEB; i++)
23909- if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
23910- i40e_veb_link_event(pf->veb[i], link_up);
23911+ struct i40e_channel *ch, *ch_tmp;
23912
23913- /* ... now the local VSIs */
23914- for (i = 0; i < pf->num_alloc_vsi; i++)
23915- if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
23916- i40e_vsi_link_event(pf->vsi[i], link_up);
23917+ /* direct to a traffic class on the same device */
23918+ if (tc == 0) {
23919+ filter->seid = vsi->seid;
23920+ return 0;
23921+ } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
23922+ if (!filter->dst_port) {
23923+ dev_err(&vsi->back->pdev->dev,
23924+ "Specify destination port to direct to traffic class that is not default\n");
23925+ return -EINVAL;
23926+ }
23927+ if (list_empty(&vsi->ch_list))
23928+ return -EINVAL;
23929+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
23930+ list) {
23931+ if (ch->seid == vsi->tc_seid_map[tc])
23932+ filter->seid = ch->seid;
23933+ }
23934+ return 0;
23935+ }
23936+ dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
23937+ return -EINVAL;
23938 }
23939
23940 /**
23941- * i40e_link_event - Update netif_carrier status
23942- * @pf: board private structure
23943+ * i40e_configure_clsflower - Configure tc flower filters
23944+ * @vsi: Pointer to VSI
23945+ * @cls_flower: Pointer to struct flow_cls_offload
23946+ *
23947 **/
23948-static void i40e_link_event(struct i40e_pf *pf)
23949+static int i40e_configure_clsflower(struct i40e_vsi *vsi,
23950+ struct flow_cls_offload *cls_flower)
23951 {
23952- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
23953- u8 new_link_speed, old_link_speed;
23954- i40e_status status;
23955- bool new_link, old_link;
23956-
23957- /* save off old link status information */
23958- pf->hw.phy.link_info_old = pf->hw.phy.link_info;
23959+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
23960+ struct i40e_cloud_filter *filter = NULL;
23961+ struct i40e_pf *pf = vsi->back;
23962+ int err = 0;
23963
23964- /* set this to force the get_link_status call to refresh state */
23965- pf->hw.phy.get_link_info = true;
23966+ if (tc < 0) {
23967+ dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
23968+ return -EINVAL;
23969+ }
23970
23971- old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
23972+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
23973+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
23974+ return -EBUSY;
23975
23976- status = i40e_get_link_status(&pf->hw, &new_link);
23977+ if (pf->fdir_pf_active_filters ||
23978+ (!hlist_empty(&pf->fdir_filter_list))) {
23979+ dev_err(&vsi->back->pdev->dev,
23980+ "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
23981+ return -EOPNOTSUPP;
23982+ }
23983
23984- /* On success, disable temp link polling */
23985- if (status == I40E_SUCCESS) {
23986- if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
23987- pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
23988- } else {
23989- /* Enable link polling temporarily until i40e_get_link_status
23990- * returns I40E_SUCCESS
23991- */
23992- pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
23993- dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
23994- status);
23995- return;
23996+ if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
23997+ dev_err(&vsi->back->pdev->dev,
23998+ "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
23999+ vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
24000+ vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
24001 }
24002
24003- old_link_speed = pf->hw.phy.link_info_old.link_speed;
24004- new_link_speed = pf->hw.phy.link_info.link_speed;
24005+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
24006+ if (!filter)
24007+ return -ENOMEM;
24008
24009- if (new_link == old_link &&
24010- new_link_speed == old_link_speed &&
24011- (test_bit(__I40E_VSI_DOWN, vsi->state) ||
24012- new_link == netif_carrier_ok(vsi->netdev)))
24013- return;
24014+ filter->cookie = cls_flower->cookie;
24015
24016- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
24017- i40e_print_link_message(vsi, new_link);
24018+ err = i40e_parse_cls_flower(vsi, cls_flower, filter);
24019+ if (err < 0)
24020+ goto err;
24021
24022- /* Notify the base of the switch tree connected to
24023- * the link. Floating VEBs are not notified.
24024- */
24025- if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
24026- i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
24027+ err = i40e_handle_tclass(vsi, tc, filter);
24028+ if (err < 0)
24029+ goto err;
24030+
24031+ /* Add cloud filter */
24032+ if (filter->dst_port)
24033+ err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
24034 else
24035- i40e_vsi_link_event(vsi, new_link);
24036+ err = i40e_add_del_cloud_filter(vsi, filter, true);
24037
24038- if (pf->vf)
24039- i40e_vc_notify_link_state(pf);
24040+ if (err) {
24041+ dev_err(&pf->pdev->dev,
24042+ "Failed to add cloud filter, err %s\n",
24043+ i40e_stat_str(&pf->hw, err));
24044+ goto err;
24045+ }
24046
24047- if (pf->flags & I40E_FLAG_PTP)
24048- i40e_ptp_set_increment(pf);
24049+ /* add filter to the ordered list */
24050+ INIT_HLIST_NODE(&filter->cloud_node);
24051+
24052+ hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
24053+
24054+ pf->num_cloud_filters++;
24055+
24056+ return err;
24057+err:
24058+ kfree(filter);
24059+ return err;
24060 }
24061
24062 /**
24063- * i40e_watchdog_subtask - periodic checks not using event driven response
24064- * @pf: board private structure
24065+ * i40e_find_cloud_filter - Find the could filter in the list
24066+ * @vsi: Pointer to VSI
24067+ * @cookie: filter specific cookie
24068+ *
24069 **/
24070-static void i40e_watchdog_subtask(struct i40e_pf *pf)
24071+static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
24072+ unsigned long *cookie)
24073 {
24074- int i;
24075+ struct i40e_cloud_filter *filter = NULL;
24076+ struct hlist_node *node2;
24077
24078- /* if interface is down do nothing */
24079- if (test_bit(__I40E_DOWN, pf->state) ||
24080- test_bit(__I40E_CONFIG_BUSY, pf->state))
24081- return;
24082+ hlist_for_each_entry_safe(filter, node2,
24083+ &vsi->back->cloud_filter_list, cloud_node)
24084+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
24085+ return filter;
24086+ return NULL;
24087+}
24088
24089- /* make sure we don't do these things too often */
24090- if (time_before(jiffies, (pf->service_timer_previous +
24091- pf->service_timer_period)))
24092- return;
24093- pf->service_timer_previous = jiffies;
24094+/**
24095+ * i40e_delete_clsflower - Remove tc flower filters
24096+ * @vsi: Pointer to VSI
24097+ * @cls_flower: Pointer to struct flow_cls_offload
24098+ *
24099+ **/
24100+static int i40e_delete_clsflower(struct i40e_vsi *vsi,
24101+ struct flow_cls_offload *cls_flower)
24102+{
24103+ struct i40e_cloud_filter *filter = NULL;
24104+ struct i40e_pf *pf = vsi->back;
24105+ int err = 0;
24106
24107- if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
24108- (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
24109- i40e_link_event(pf);
24110+ filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
24111
24112- /* Update the stats for active netdevs so the network stack
24113- * can look at updated numbers whenever it cares to
24114- */
24115- for (i = 0; i < pf->num_alloc_vsi; i++)
24116- if (pf->vsi[i] && pf->vsi[i]->netdev)
24117- i40e_update_stats(pf->vsi[i]);
24118+ if (!filter)
24119+ return -EINVAL;
24120
24121- if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
24122- /* Update the stats for the active switching components */
24123- for (i = 0; i < I40E_MAX_VEB; i++)
24124- if (pf->veb[i])
24125- i40e_update_veb_stats(pf->veb[i]);
24126+ hash_del(&filter->cloud_node);
24127+
24128+ if (filter->dst_port)
24129+ err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
24130+ else
24131+ err = i40e_add_del_cloud_filter(vsi, filter, false);
24132+
24133+ kfree(filter);
24134+ if (err) {
24135+ dev_err(&pf->pdev->dev,
24136+ "Failed to delete cloud filter, err %s\n",
24137+ i40e_stat_str(&pf->hw, err));
24138+ return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
24139 }
24140
24141- i40e_ptp_rx_hang(pf);
24142- i40e_ptp_tx_hang(pf);
24143+ pf->num_cloud_filters--;
24144+ if (!pf->num_cloud_filters)
24145+ if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
24146+ !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
24147+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
24148+ pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
24149+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
24150+ }
24151+ return 0;
24152 }
24153
24154 /**
24155- * i40e_reset_subtask - Set up for resetting the device and driver
24156- * @pf: board private structure
24157+ * i40e_setup_tc_cls_flower - flower classifier offloads
24158+ * @np: net device to configure
24159+ * @cls_flower: pointer to cls flower offload structure
24160 **/
24161-static void i40e_reset_subtask(struct i40e_pf *pf)
24162+static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
24163+ struct flow_cls_offload *cls_flower)
24164 {
24165- u32 reset_flags = 0;
24166+ struct i40e_vsi *vsi = np->vsi;
24167
24168- if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
24169- reset_flags |= BIT(__I40E_REINIT_REQUESTED);
24170- clear_bit(__I40E_REINIT_REQUESTED, pf->state);
24171- }
24172- if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
24173- reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
24174- clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
24175- }
24176- if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
24177- reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
24178- clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
24179- }
24180- if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
24181- reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
24182- clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
24183- }
24184- if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
24185- reset_flags |= BIT(__I40E_DOWN_REQUESTED);
24186- clear_bit(__I40E_DOWN_REQUESTED, pf->state);
24187- }
24188+ if (cls_flower->common.chain_index)
24189+ return -EOPNOTSUPP;
24190
24191- /* If there's a recovery already waiting, it takes
24192- * precedence before starting a new reset sequence.
24193- */
24194- if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
24195- i40e_prep_for_reset(pf, false);
24196- i40e_reset(pf);
24197- i40e_rebuild(pf, false, false);
24198+ switch (cls_flower->command) {
24199+ case FLOW_CLS_REPLACE:
24200+ return i40e_configure_clsflower(vsi, cls_flower);
24201+ case FLOW_CLS_DESTROY:
24202+ return i40e_delete_clsflower(vsi, cls_flower);
24203+ case FLOW_CLS_STATS:
24204+ return -EOPNOTSUPP;
24205+ default:
24206+ return -EOPNOTSUPP;
24207 }
24208+}
24209
24210- /* If we're already down or resetting, just bail */
24211- if (reset_flags &&
24212- !test_bit(__I40E_DOWN, pf->state) &&
24213- !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
24214- i40e_do_reset(pf, reset_flags, false);
24215+static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
24216+ void *cb_priv)
24217+{
24218+ struct i40e_netdev_priv *np = cb_priv;
24219+
24220+ switch (type) {
24221+ case TC_SETUP_CLSFLOWER:
24222+ return i40e_setup_tc_cls_flower(np, type_data);
24223+
24224+ default:
24225+ return -EOPNOTSUPP;
24226 }
24227 }
24228
24229-/**
24230- * i40e_handle_link_event - Handle link event
24231- * @pf: board private structure
24232- * @e: event info posted on ARQ
24233- **/
24234-static void i40e_handle_link_event(struct i40e_pf *pf,
24235- struct i40e_arq_event_info *e)
24236+static LIST_HEAD(i40e_block_cb_list);
24237+#endif
24238+
24239+#ifdef NETIF_F_HW_TC
24240+#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
24241+static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
24242+ void *type_data)
24243+#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX)
24244+static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
24245+ u32 chain_index, __be16 proto,
24246+ struct tc_to_netdev *tc)
24247+#else
24248+static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
24249+ struct tc_to_netdev *tc)
24250+#endif
24251 {
24252- struct i40e_aqc_get_link_status *status =
24253- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
24254+#ifdef __TC_MQPRIO_MODE_MAX
24255+ struct i40e_netdev_priv *np = netdev_priv(netdev);
24256+#endif
24257+#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
24258+#ifndef __TC_MQPRIO_MODE_MAX
24259+ struct tc_mqprio_qopt *mqprio = type_data;
24260+#endif
24261+#else
24262+#ifdef TC_MQPRIO_HW_OFFLOAD_MAX
24263+ struct tc_mqprio_qopt *mqprio = tc->mqprio;
24264+#endif /* TC_MQPRIO_HW_OFFLOAD_MAX*/
24265+ unsigned int type = tc->type;
24266+#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */
24267+#ifdef __TC_MQPRIO_MODE_MAX
24268+ switch (type) {
24269+ case TC_SETUP_QDISC_MQPRIO:
24270+ return i40e_setup_tc(netdev, type_data);
24271+ case TC_SETUP_BLOCK:
24272+ return flow_block_cb_setup_simple(type_data,
24273+ &i40e_block_cb_list,
24274+ i40e_setup_tc_block_cb,
24275+ np, np, true);
24276+ default:
24277+ return -EOPNOTSUPP;
24278+ }
24279+#else
24280+ if (type != TC_SETUP_QDISC_MQPRIO)
24281+ return -EINVAL;
24282
24283- /* Do a new status request to re-enable LSE reporting
24284- * and load new status information into the hw struct
24285- * This completely ignores any state information
24286- * in the ARQ event info, instead choosing to always
24287- * issue the AQ update link status command.
24288- */
24289- i40e_link_event(pf);
24290+#ifdef TC_MQPRIO_HW_OFFLOAD_MAX
24291+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
24292
24293- /* check for unqualified module, if link is down */
24294- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
24295- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
24296- (!(status->link_info & I40E_AQ_LINK_UP)))
24297- dev_err(&pf->pdev->dev,
24298- "The driver failed to link because an unqualified module was detected.\n");
24299+ return i40e_setup_tc(netdev, mqprio->num_tc);
24300+#else
24301+ return i40e_setup_tc(netdev, tc->tc);
24302+#endif /* TC_MQPRIO_HW_OFFLOAD_MAX */
24303+#endif /* __TC_MQPRIO_MODE_MAX */
24304 }
24305+#endif /* NETIF_F_HW_TC */
24306+#endif /* HAVE_SETUP_TC */
24307
24308 /**
24309- * i40e_clean_adminq_subtask - Clean the AdminQ rings
24310- * @pf: board private structure
24311+ * i40e_open - Called when a network interface is made active
24312+ * @netdev: network interface device structure
24313+ *
24314+ * The open entry point is called when a network interface is made
24315+ * active by the system (IFF_UP). At this point all resources needed
24316+ * for transmit and receive operations are allocated, the interrupt
24317+ * handler is registered with the OS, the netdev watchdog subtask is
24318+ * enabled, and the stack is notified that the interface is ready.
24319+ *
24320+ * Returns 0 on success, negative value on failure
24321 **/
24322-static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
24323+int i40e_open(struct net_device *netdev)
24324 {
24325- struct i40e_arq_event_info event;
24326- struct i40e_hw *hw = &pf->hw;
24327- u16 pending, i = 0;
24328- i40e_status ret;
24329- u16 opcode;
24330- u32 oldval;
24331- u32 val;
24332-
24333- /* Do not run clean AQ when PF reset fails */
24334- if (test_bit(__I40E_RESET_FAILED, pf->state))
24335- return;
24336-
24337- /* check for error indications */
24338- val = rd32(&pf->hw, pf->hw.aq.arq.len);
24339- oldval = val;
24340- if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
24341- if (hw->debug_mask & I40E_DEBUG_AQ)
24342- dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
24343- val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
24344- }
24345- if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
24346- if (hw->debug_mask & I40E_DEBUG_AQ)
24347- dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
24348- val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
24349- pf->arq_overflows++;
24350- }
24351- if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
24352- if (hw->debug_mask & I40E_DEBUG_AQ)
24353- dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
24354- val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
24355- }
24356- if (oldval != val)
24357- wr32(&pf->hw, pf->hw.aq.arq.len, val);
24358-
24359- val = rd32(&pf->hw, pf->hw.aq.asq.len);
24360- oldval = val;
24361- if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
24362- if (pf->hw.debug_mask & I40E_DEBUG_AQ)
24363- dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
24364- val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
24365- }
24366- if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
24367- if (pf->hw.debug_mask & I40E_DEBUG_AQ)
24368- dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
24369- val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
24370- }
24371- if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
24372- if (pf->hw.debug_mask & I40E_DEBUG_AQ)
24373- dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
24374- val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
24375- }
24376- if (oldval != val)
24377- wr32(&pf->hw, pf->hw.aq.asq.len, val);
24378-
24379- event.buf_len = I40E_MAX_AQ_BUF_SIZE;
24380- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
24381- if (!event.msg_buf)
24382- return;
24383+ struct i40e_netdev_priv *np = netdev_priv(netdev);
24384+ struct i40e_vsi *vsi = np->vsi;
24385+ struct i40e_pf *pf = vsi->back;
24386+ int err;
24387
24388- do {
24389- ret = i40e_clean_arq_element(hw, &event, &pending);
24390- if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
24391- break;
24392- else if (ret) {
24393- dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
24394- break;
24395- }
24396+ /* disallow open during test or if eeprom is broken */
24397+ if (test_bit(__I40E_TESTING, pf->state) ||
24398+ test_bit(__I40E_BAD_EEPROM, pf->state))
24399+ return -EBUSY;
24400
24401- opcode = le16_to_cpu(event.desc.opcode);
24402- switch (opcode) {
24403+ netif_carrier_off(netdev);
24404
24405- case i40e_aqc_opc_get_link_status:
24406- i40e_handle_link_event(pf, &event);
24407- break;
24408- case i40e_aqc_opc_send_msg_to_pf:
24409- ret = i40e_vc_process_vf_msg(pf,
24410- le16_to_cpu(event.desc.retval),
24411- le32_to_cpu(event.desc.cookie_high),
24412- le32_to_cpu(event.desc.cookie_low),
24413- event.msg_buf,
24414- event.msg_len);
24415- break;
24416- case i40e_aqc_opc_lldp_update_mib:
24417- dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
24418-#ifdef CONFIG_I40E_DCB
24419- rtnl_lock();
24420- ret = i40e_handle_lldp_event(pf, &event);
24421- rtnl_unlock();
24422-#endif /* CONFIG_I40E_DCB */
24423- break;
24424- case i40e_aqc_opc_event_lan_overflow:
24425- dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
24426- i40e_handle_lan_overflow_event(pf, &event);
24427- break;
24428- case i40e_aqc_opc_send_msg_to_peer:
24429- dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
24430- break;
24431- case i40e_aqc_opc_nvm_erase:
24432- case i40e_aqc_opc_nvm_update:
24433- case i40e_aqc_opc_oem_post_update:
24434- i40e_debug(&pf->hw, I40E_DEBUG_NVM,
24435- "ARQ NVM operation 0x%04x completed\n",
24436- opcode);
24437- break;
24438- default:
24439- dev_info(&pf->pdev->dev,
24440- "ARQ: Unknown event 0x%04x ignored\n",
24441- opcode);
24442- break;
24443- }
24444- } while (i++ < pf->adminq_work_limit);
24445+ if (i40e_force_link_state(pf, true))
24446+ return -EAGAIN;
24447
24448- if (i < pf->adminq_work_limit)
24449- clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
24450+ err = i40e_vsi_open(vsi);
24451+ if (err)
24452+ return err;
24453+ /* configure global TSO hardware offload settings */
24454+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
24455+ TCP_FLAG_FIN) >> 16);
24456+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
24457+ TCP_FLAG_FIN |
24458+ TCP_FLAG_CWR) >> 16);
24459+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
24460
24461- /* re-enable Admin queue interrupt cause */
24462- val = rd32(hw, I40E_PFINT_ICR0_ENA);
24463- val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
24464- wr32(hw, I40E_PFINT_ICR0_ENA, val);
24465- i40e_flush(hw);
24466+#ifdef HAVE_VXLAN_RX_OFFLOAD
24467+#if IS_ENABLED(CONFIG_VXLAN)
24468+ vxlan_get_rx_port(netdev);
24469+#endif
24470+#endif /* HAVE_VXLAN_RX_OFFLOAD */
24471+#ifdef HAVE_GENEVE_RX_OFFLOAD
24472+#if IS_ENABLED(CONFIG_GENEVE)
24473+ if (pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)
24474+ geneve_get_rx_port(netdev);
24475+#endif
24476+#endif /* HAVE_GENEVE_RX_OFFLOAD */
24477+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
24478+ udp_tunnel_get_rx_info(netdev);
24479+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
24480
24481- kfree(event.msg_buf);
24482+ return 0;
24483 }
24484
24485 /**
24486- * i40e_verify_eeprom - make sure eeprom is good to use
24487- * @pf: board private structure
24488+ * i40e_vsi_open -
24489+ * @vsi: the VSI to open
24490+ *
24491+ * Finish initialization of the VSI.
24492+ *
24493+ * Returns 0 on success, negative value on failure
24494+ *
24495+ * Note: expects to be called while under rtnl_lock()
24496 **/
24497-static void i40e_verify_eeprom(struct i40e_pf *pf)
24498+int i40e_vsi_open(struct i40e_vsi *vsi)
24499 {
24500+ struct i40e_pf *pf = vsi->back;
24501+ char int_name[I40E_INT_NAME_STR_LEN];
24502 int err;
24503+ int i;
24504+ u8 enabled_tc = 0;
24505
24506- err = i40e_diag_eeprom_test(&pf->hw);
24507- if (err) {
24508- /* retry in case of garbage read */
24509- err = i40e_diag_eeprom_test(&pf->hw);
24510- if (err) {
24511- dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
24512- err);
24513- set_bit(__I40E_BAD_EEPROM, pf->state);
24514- }
24515- }
24516+ /* allocate descriptors */
24517+ err = i40e_vsi_setup_tx_resources(vsi);
24518+ if (err)
24519+ goto err_setup_tx;
24520+ err = i40e_vsi_setup_rx_resources(vsi);
24521+ if (err)
24522+ goto err_setup_rx;
24523
24524- if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
24525- dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
24526- clear_bit(__I40E_BAD_EEPROM, pf->state);
24527- }
24528-}
24529+ err = i40e_vsi_configure(vsi);
24530+ if (err)
24531+ goto err_setup_rx;
24532
24533-/**
24534- * i40e_enable_pf_switch_lb
24535- * @pf: pointer to the PF structure
24536- *
24537- * enable switch loop back or die - no point in a return value
24538+ if (vsi->netdev) {
24539+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
24540+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
24541+ err = i40e_vsi_request_irq(vsi, int_name);
24542+ if (err)
24543+ goto err_setup_rx;
24544+
24545+ /* If real_num_tx_queues is changed the tc mapping may no
24546+ * longer be valid. Run tc reset function with new value
24547+ * of queues.
24548+ */
24549+#ifdef __TC_MQPRIO_MODE_MAX
24550+ for (i = 0; i < vsi->mqprio_qopt.qopt.num_tc; i++)
24551+#else
24552+ for (i = 0; i < vsi->tc_config.numtc; i++)
24553+#endif
24554+ enabled_tc |= BIT(i);
24555+ if (!enabled_tc)
24556+ netdev_reset_tc(vsi->netdev);
24557+
24558+ /* Notify the stack of the actual queue counts. */
24559+ err = netif_set_real_num_tx_queues(vsi->netdev,
24560+ vsi->num_queue_pairs);
24561+ if (err)
24562+ goto err_set_queues;
24563+
24564+ /* When reducing the number of Tx queues, any pre-existing
24565+ * skbuffs might target a now removed queue. Older versions of
24566+ * the Linux kernel do not check for this, and it can result
24567+ * in a kernel panic. Avoid this by flushing all skbs now, so
24568+ * that we avoid attempting to transmit one that has an
24569+ * invalid queue mapping.
24570+ */
24571+ qdisc_reset_all_tx(vsi->netdev);
24572+
24573+ err = netif_set_real_num_rx_queues(vsi->netdev,
24574+ vsi->num_queue_pairs);
24575+ if (err)
24576+ goto err_set_queues;
24577+
24578+ } else if (vsi->type == I40E_VSI_FDIR) {
24579+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
24580+ dev_driver_string(&pf->pdev->dev),
24581+ dev_name(&pf->pdev->dev));
24582+ err = i40e_vsi_request_irq(vsi, int_name);
24583+
24584+ } else {
24585+ err = -EINVAL;
24586+ goto err_setup_rx;
24587+ }
24588+
24589+ err = i40e_up_complete(vsi);
24590+ if (err)
24591+ goto err_up_complete;
24592+
24593+ return 0;
24594+
24595+err_up_complete:
24596+ i40e_down(vsi);
24597+err_set_queues:
24598+ i40e_vsi_free_irq(vsi);
24599+err_setup_rx:
24600+ i40e_vsi_free_rx_resources(vsi);
24601+err_setup_tx:
24602+ i40e_vsi_free_tx_resources(vsi);
24603+ if (vsi == pf->vsi[pf->lan_vsi])
24604+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
24605+
24606+ return err;
24607+}
24608+
24609+/**
24610+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
24611+ * @pf: Pointer to PF
24612+ *
24613+ * This function destroys the hlist where all the Flow Director
24614+ * filters were saved.
24615 **/
24616-static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
24617+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
24618 {
24619- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
24620- struct i40e_vsi_context ctxt;
24621- int ret;
24622+ struct i40e_fdir_filter *filter;
24623+ struct i40e_flex_pit *pit_entry, *tmp;
24624+ struct hlist_node *node2;
24625
24626- ctxt.seid = pf->main_vsi_seid;
24627- ctxt.pf_num = pf->hw.pf_id;
24628- ctxt.vf_num = 0;
24629- ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
24630- if (ret) {
24631- dev_info(&pf->pdev->dev,
24632- "couldn't get PF vsi config, err %s aq_err %s\n",
24633- i40e_stat_str(&pf->hw, ret),
24634- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
24635- return;
24636+ hlist_for_each_entry_safe(filter, node2,
24637+ &pf->fdir_filter_list, fdir_node) {
24638+ hlist_del(&filter->fdir_node);
24639+ kfree(filter);
24640 }
24641- ctxt.flags = I40E_AQ_VSI_TYPE_PF;
24642- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
24643- ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
24644
24645- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
24646- if (ret) {
24647- dev_info(&pf->pdev->dev,
24648- "update vsi switch failed, err %s aq_err %s\n",
24649- i40e_stat_str(&pf->hw, ret),
24650- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
24651+ list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
24652+ list_del(&pit_entry->list);
24653+ kfree(pit_entry);
24654+ }
24655+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
24656+
24657+ list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
24658+ list_del(&pit_entry->list);
24659+ kfree(pit_entry);
24660 }
24661+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
24662+
24663+ pf->fdir_pf_active_filters = 0;
24664+ pf->fd_tcp4_filter_cnt = 0;
24665+ pf->fd_udp4_filter_cnt = 0;
24666+ pf->fd_sctp4_filter_cnt = 0;
24667+ pf->fd_ip4_filter_cnt = 0;
24668+
24669+ /* Reprogram the default input set for TCP/IPv4 */
24670+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
24671+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
24672+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
24673+
24674+ /* Reprogram the default input set for UDP/IPv4 */
24675+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
24676+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
24677+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
24678+
24679+ /* Reprogram the default input set for SCTP/IPv4 */
24680+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
24681+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
24682+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
24683+
24684+ /* Reprogram the default input set for Other/IPv4 */
24685+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
24686+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
24687+
24688+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
24689+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
24690 }
24691
24692 /**
24693- * i40e_disable_pf_switch_lb
24694- * @pf: pointer to the PF structure
24695+ * i40e_cloud_filter_exit - Cleans up Cloud Filters
24696+ * @pf: Pointer to PF
24697 *
24698- * disable switch loop back or die - no point in a return value
24699+ * This function destroys the hlist which keeps all the Cloud Filters.
24700 **/
24701-static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
24702+static void i40e_cloud_filter_exit(struct i40e_pf *pf)
24703 {
24704- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
24705- struct i40e_vsi_context ctxt;
24706- int ret;
24707+ struct i40e_cloud_filter *cfilter;
24708+ struct hlist_node *node;
24709
24710- ctxt.seid = pf->main_vsi_seid;
24711- ctxt.pf_num = pf->hw.pf_id;
24712- ctxt.vf_num = 0;
24713- ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
24714- if (ret) {
24715- dev_info(&pf->pdev->dev,
24716- "couldn't get PF vsi config, err %s aq_err %s\n",
24717- i40e_stat_str(&pf->hw, ret),
24718- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
24719- return;
24720+ hlist_for_each_entry_safe(cfilter, node,
24721+ &pf->cloud_filter_list, cloud_node) {
24722+ hlist_del(&cfilter->cloud_node);
24723+ kfree(cfilter);
24724 }
24725- ctxt.flags = I40E_AQ_VSI_TYPE_PF;
24726- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
24727- ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
24728+ pf->num_cloud_filters = 0;
24729
24730- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
24731- if (ret) {
24732- dev_info(&pf->pdev->dev,
24733- "update vsi switch failed, err %s aq_err %s\n",
24734- i40e_stat_str(&pf->hw, ret),
24735- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
24736+ if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
24737+ !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
24738+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
24739+ pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
24740+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
24741 }
24742 }
24743
24744 /**
24745- * i40e_config_bridge_mode - Configure the HW bridge mode
24746- * @veb: pointer to the bridge instance
24747+ * i40e_close - Disables a network interface
24748+ * @netdev: network interface device structure
24749 *
24750- * Configure the loop back mode for the LAN VSI that is downlink to the
24751- * specified HW bridge instance. It is expected this function is called
24752- * when a new HW bridge is instantiated.
24753+ * The close entry point is called when an interface is de-activated
24754+ * by the OS. The hardware is still under the driver's control, but
24755+ * this netdev interface is disabled.
24756+ *
24757+ * Returns 0, this is not allowed to fail
24758 **/
24759-static void i40e_config_bridge_mode(struct i40e_veb *veb)
24760+int i40e_close(struct net_device *netdev)
24761 {
24762- struct i40e_pf *pf = veb->pf;
24763+ struct i40e_netdev_priv *np = netdev_priv(netdev);
24764+ struct i40e_vsi *vsi = np->vsi;
24765
24766- if (pf->hw.debug_mask & I40E_DEBUG_LAN)
24767- dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
24768- veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
24769- if (veb->bridge_mode & BRIDGE_MODE_VEPA)
24770- i40e_disable_pf_switch_lb(pf);
24771- else
24772- i40e_enable_pf_switch_lb(pf);
24773+ i40e_vsi_close(vsi);
24774+
24775+ return 0;
24776 }
24777
24778 /**
24779- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
24780- * @veb: pointer to the VEB instance
24781+ * i40e_do_reset - Start a PF or Core Reset sequence
24782+ * @pf: board private structure
24783+ * @reset_flags: which reset is requested
24784+ * @lock_acquired: indicates whether or not the lock has been acquired
24785+ * before this function was called.
24786 *
24787- * This is a recursive function that first builds the attached VSIs then
24788- * recurses in to build the next layer of VEB. We track the connections
24789- * through our own index numbers because the seid's from the HW could
24790- * change across the reset.
24791+ * The essential difference in resets is that the PF Reset
24792+ * doesn't clear the packet buffers, doesn't reset the PE
24793+ * firmware, and doesn't bother the other PFs on the chip.
24794 **/
24795-static int i40e_reconstitute_veb(struct i40e_veb *veb)
24796+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
24797 {
24798- struct i40e_vsi *ctl_vsi = NULL;
24799- struct i40e_pf *pf = veb->pf;
24800- int v, veb_idx;
24801- int ret;
24802+ u32 val;
24803
24804- /* build VSI that owns this VEB, temporarily attached to base VEB */
24805- for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
24806- if (pf->vsi[v] &&
24807- pf->vsi[v]->veb_idx == veb->idx &&
24808- pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
24809- ctl_vsi = pf->vsi[v];
24810- break;
24811- }
24812- }
24813- if (!ctl_vsi) {
24814- dev_info(&pf->pdev->dev,
24815- "missing owner VSI for veb_idx %d\n", veb->idx);
24816- ret = -ENOENT;
24817- goto end_reconstitute;
24818- }
24819- if (ctl_vsi != pf->vsi[pf->lan_vsi])
24820- ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
24821- ret = i40e_add_vsi(ctl_vsi);
24822- if (ret) {
24823- dev_info(&pf->pdev->dev,
24824- "rebuild of veb_idx %d owner VSI failed: %d\n",
24825- veb->idx, ret);
24826- goto end_reconstitute;
24827- }
24828- i40e_vsi_reset_stats(ctl_vsi);
24829+ WARN_ON(in_interrupt());
24830
24831- /* create the VEB in the switch and move the VSI onto the VEB */
24832- ret = i40e_add_veb(veb, ctl_vsi);
24833- if (ret)
24834- goto end_reconstitute;
24835
24836- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
24837- veb->bridge_mode = BRIDGE_MODE_VEB;
24838- else
24839- veb->bridge_mode = BRIDGE_MODE_VEPA;
24840- i40e_config_bridge_mode(veb);
24841+ /* do the biggest reset indicated */
24842+ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
24843
24844- /* create the remaining VSIs attached to this VEB */
24845- for (v = 0; v < pf->num_alloc_vsi; v++) {
24846- if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
24847- continue;
24848+ /* Request a Global Reset
24849+ *
24850+ * This will start the chip's countdown to the actual full
24851+ * chip reset event, and a warning interrupt to be sent
24852+ * to all PFs, including the requestor. Our handler
24853+ * for the warning interrupt will deal with the shutdown
24854+ * and recovery of the switch setup.
24855+ */
24856+ dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
24857+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
24858+ val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
24859+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
24860
24861- if (pf->vsi[v]->veb_idx == veb->idx) {
24862- struct i40e_vsi *vsi = pf->vsi[v];
24863+ } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
24864
24865- vsi->uplink_seid = veb->seid;
24866- ret = i40e_add_vsi(vsi);
24867- if (ret) {
24868- dev_info(&pf->pdev->dev,
24869- "rebuild of vsi_idx %d failed: %d\n",
24870- v, ret);
24871- goto end_reconstitute;
24872- }
24873- i40e_vsi_reset_stats(vsi);
24874- }
24875- }
24876+ /* Request a Core Reset
24877+ *
24878+ * Same as Global Reset, except does *not* include the MAC/PHY
24879+ */
24880+ dev_dbg(&pf->pdev->dev, "CoreR requested\n");
24881+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
24882+ val |= I40E_GLGEN_RTRIG_CORER_MASK;
24883+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
24884+ i40e_flush(&pf->hw);
24885
24886- /* create any VEBs attached to this VEB - RECURSION */
24887- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
24888- if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
24889- pf->veb[veb_idx]->uplink_seid = veb->seid;
24890- ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
24891- if (ret)
24892- break;
24893- }
24894- }
24895+ } else if (reset_flags & I40E_PF_RESET_FLAG) {
24896
24897-end_reconstitute:
24898- return ret;
24899-}
24900+ /* Request a PF Reset
24901+ *
24902+ * Resets only the PF-specific registers
24903+ *
24904+ * This goes directly to the tear-down and rebuild of
24905+ * the switch, since we need to do all the recovery as
24906+ * for the Core Reset.
24907+ */
24908+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
24909+ i40e_handle_reset_warning(pf, lock_acquired);
24910
24911-/**
24912- * i40e_get_capabilities - get info about the HW
24913- * @pf: the PF struct
24914- **/
24915-static int i40e_get_capabilities(struct i40e_pf *pf)
24916-{
24917- struct i40e_aqc_list_capabilities_element_resp *cap_buf;
24918- u16 data_size;
24919- int buf_len;
24920- int err;
24921+ dev_info(&pf->pdev->dev,
24922+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
24923+ "FW LLDP is disabled\n" :
24924+ "FW LLDP is enabled\n");
24925
24926- buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
24927- do {
24928- cap_buf = kzalloc(buf_len, GFP_KERNEL);
24929- if (!cap_buf)
24930- return -ENOMEM;
24931+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
24932+ int v;
24933
24934- /* this loads the data into the hw struct for us */
24935- err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
24936- &data_size,
24937- i40e_aqc_opc_list_func_capabilities,
24938- NULL);
24939- /* data loaded, buffer no longer needed */
24940- kfree(cap_buf);
24941+ /* Find the VSI(s) that requested a re-init */
24942+ dev_info(&pf->pdev->dev,
24943+ "VSI reinit requested\n");
24944+ for (v = 0; v < pf->num_alloc_vsi; v++) {
24945+ struct i40e_vsi *vsi = pf->vsi[v];
24946
24947- if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
24948- /* retry with a larger buffer */
24949- buf_len = data_size;
24950- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
24951- dev_info(&pf->pdev->dev,
24952- "capability discovery failed, err %s aq_err %s\n",
24953- i40e_stat_str(&pf->hw, err),
24954- i40e_aq_str(&pf->hw,
24955- pf->hw.aq.asq_last_status));
24956- return -ENODEV;
24957+ if (vsi != NULL &&
24958+ test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
24959+ vsi->state))
24960+ i40e_vsi_reinit_locked(pf->vsi[v]);
24961 }
24962- } while (err);
24963+ } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
24964+ int v;
24965
24966- if (pf->hw.debug_mask & I40E_DEBUG_USER)
24967- dev_info(&pf->pdev->dev,
24968- "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
24969- pf->hw.pf_id, pf->hw.func_caps.num_vfs,
24970- pf->hw.func_caps.num_msix_vectors,
24971- pf->hw.func_caps.num_msix_vectors_vf,
24972- pf->hw.func_caps.fd_filters_guaranteed,
24973- pf->hw.func_caps.fd_filters_best_effort,
24974- pf->hw.func_caps.num_tx_qp,
24975- pf->hw.func_caps.num_vsis);
24976-
24977-#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
24978- + pf->hw.func_caps.num_vfs)
24979- if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
24980+ /* Find the VSI(s) that needs to be brought down */
24981+ dev_info(&pf->pdev->dev, "VSI down requested\n");
24982+ for (v = 0; v < pf->num_alloc_vsi; v++) {
24983+ struct i40e_vsi *vsi = pf->vsi[v];
24984+
24985+ if (vsi != NULL &&
24986+ test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
24987+ vsi->state)) {
24988+ set_bit(__I40E_VSI_DOWN, vsi->state);
24989+ i40e_down(vsi);
24990+ }
24991+ }
24992+ } else {
24993 dev_info(&pf->pdev->dev,
24994- "got num_vsis %d, setting num_vsis to %d\n",
24995- pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
24996- pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
24997+ "bad reset request 0x%08x\n", reset_flags);
24998 }
24999-
25000- return 0;
25001 }
25002
25003-static int i40e_vsi_clear(struct i40e_vsi *vsi);
25004+/**
25005+ * i40e_do_reset_safe - Protected reset path for userland calls.
25006+ * @pf: board private structure
25007+ * @reset_flags: which reset is requested
25008+ *
25009+ **/
25010+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
25011+{
25012+ rtnl_lock();
25013+ i40e_do_reset(pf, reset_flags, true);
25014+ rtnl_unlock();
25015+}
25016
25017+#ifdef CONFIG_DCB
25018 /**
25019- * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
25020+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
25021 * @pf: board private structure
25022+ * @old_cfg: current DCB config
25023+ * @new_cfg: new DCB config
25024 **/
25025-static void i40e_fdir_sb_setup(struct i40e_pf *pf)
25026+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
25027+ struct i40e_dcbx_config *old_cfg,
25028+ struct i40e_dcbx_config *new_cfg)
25029 {
25030- struct i40e_vsi *vsi;
25031+ bool need_reconfig = false;
25032
25033- /* quick workaround for an NVM issue that leaves a critical register
25034- * uninitialized
25035- */
25036- if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
25037- static const u32 hkey[] = {
25038- 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
25039- 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
25040- 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
25041- 0x95b3a76d};
25042- int i;
25043+ /* Check if ETS configuration has changed */
25044+ if (memcmp(&new_cfg->etscfg,
25045+ &old_cfg->etscfg,
25046+ sizeof(new_cfg->etscfg))) {
25047+ /* If Priority Table has changed reconfig is needed */
25048+ if (memcmp(&new_cfg->etscfg.prioritytable,
25049+ &old_cfg->etscfg.prioritytable,
25050+ sizeof(new_cfg->etscfg.prioritytable))) {
25051+ need_reconfig = true;
25052+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
25053+ }
25054
25055- for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
25056- wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
25057- }
25058+ if (memcmp(&new_cfg->etscfg.tcbwtable,
25059+ &old_cfg->etscfg.tcbwtable,
25060+ sizeof(new_cfg->etscfg.tcbwtable)))
25061+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
25062
25063- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
25064- return;
25065+ if (memcmp(&new_cfg->etscfg.tsatable,
25066+ &old_cfg->etscfg.tsatable,
25067+ sizeof(new_cfg->etscfg.tsatable)))
25068+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
25069+ }
25070
25071- /* find existing VSI and see if it needs configuring */
25072- vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
25073+ /* Check if PFC configuration has changed */
25074+ if (memcmp(&new_cfg->pfc,
25075+ &old_cfg->pfc,
25076+ sizeof(new_cfg->pfc))) {
25077+ need_reconfig = true;
25078+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
25079+ }
25080
25081- /* create a new VSI if none exists */
25082- if (!vsi) {
25083- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
25084- pf->vsi[pf->lan_vsi]->seid, 0);
25085- if (!vsi) {
25086- dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
25087- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
25088- return;
25089- }
25090+ /* Check if APP Table has changed */
25091+ if (memcmp(&new_cfg->app,
25092+ &old_cfg->app,
25093+ sizeof(new_cfg->app))) {
25094+ need_reconfig = true;
25095+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
25096 }
25097
25098- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
25099+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
25100+ return need_reconfig;
25101 }
25102
25103 /**
25104- * i40e_fdir_teardown - release the Flow Director resources
25105+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
25106 * @pf: board private structure
25107+ * @e: event info posted on ARQ
25108 **/
25109-static void i40e_fdir_teardown(struct i40e_pf *pf)
25110-{
25111- struct i40e_vsi *vsi;
25112-
25113- i40e_fdir_filter_exit(pf);
25114- vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
25115- if (vsi)
25116- i40e_vsi_release(vsi);
25117-}
25118-
25119-/**
25120- * i40e_prep_for_reset - prep for the core to reset
25121- * @pf: board private structure
25122- * @lock_acquired: indicates whether or not the lock has been acquired
25123- * before this function was called.
25124- *
25125- * Close up the VFs and other things in prep for PF Reset.
25126- **/
25127-static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
25128+static int i40e_handle_lldp_event(struct i40e_pf *pf,
25129+ struct i40e_arq_event_info *e)
25130 {
25131+ struct i40e_aqc_lldp_get_mib *mib =
25132+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
25133 struct i40e_hw *hw = &pf->hw;
25134- i40e_status ret = 0;
25135- u32 v;
25136+ struct i40e_dcbx_config tmp_dcbx_cfg;
25137+ bool need_reconfig = false;
25138+ int ret = 0;
25139+ u8 type;
25140
25141- clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
25142- if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
25143- return;
25144- if (i40e_check_asq_alive(&pf->hw))
25145- i40e_vc_notify_reset(pf);
25146+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
25147+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
25148+ (hw->phy.link_info.link_speed &
25149+ ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
25150+ !(pf->flags & I40E_FLAG_DCB_CAPABLE))
25151+ /* let firmware decide if the DCB should be disabled */
25152+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
25153
25154- dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
25155+ /* Not DCB capable or capability disabled */
25156+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
25157+ return ret;
25158
25159- /* quiesce the VSIs and their queues that are not already DOWN */
25160- /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
25161- if (!lock_acquired)
25162- rtnl_lock();
25163- i40e_pf_quiesce_all_vsi(pf);
25164- if (!lock_acquired)
25165- rtnl_unlock();
25166+ /* Ignore if event is not for Nearest Bridge */
25167+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
25168+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
25169+ dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
25170+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
25171+ return ret;
25172
25173- for (v = 0; v < pf->num_alloc_vsi; v++) {
25174- if (pf->vsi[v])
25175- pf->vsi[v]->seid = 0;
25176+ /* Check MIB Type and return if event for Remote MIB update */
25177+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
25178+ dev_dbg(&pf->pdev->dev,
25179+ "LLDP event mib type %s\n", type ? "remote" : "local");
25180+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
25181+ /* Update the remote cached instance and return */
25182+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
25183+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
25184+ &hw->remote_dcbx_config);
25185+ goto exit;
25186 }
25187
25188- i40e_shutdown_adminq(&pf->hw);
25189+ /* Store the old configuration */
25190+ tmp_dcbx_cfg = hw->local_dcbx_config;
25191
25192- /* call shutdown HMC */
25193- if (hw->hmc.hmc_obj) {
25194- ret = i40e_shutdown_lan_hmc(hw);
25195- if (ret)
25196- dev_warn(&pf->pdev->dev,
25197- "shutdown_lan_hmc failed: %d\n", ret);
25198- }
25199+ /* Reset the old DCBx configuration data */
25200+ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
25201+ /* Get updated DCBX data from firmware */
25202+ ret = i40e_get_dcb_config(&pf->hw);
25203+ if (ret) {
25204+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
25205+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
25206+ (hw->phy.link_info.link_speed &
25207+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
25208+ dev_warn(&pf->pdev->dev,
25209+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
25210+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
25211+ } else {
25212+ dev_info(&pf->pdev->dev,
25213+ "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
25214+ i40e_stat_str(&pf->hw, ret),
25215+ i40e_aq_str(&pf->hw,
25216+ pf->hw.aq.asq_last_status));
25217+ }
25218+ goto exit;
25219+ }
25220+
25221+ /* No change detected in DCBX configs */
25222+ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
25223+ sizeof(tmp_dcbx_cfg))) {
25224+ dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
25225+ goto exit;
25226+ }
25227+
25228+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
25229+ &hw->local_dcbx_config);
25230+
25231+#ifdef HAVE_DCBNL_IEEE
25232+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
25233+#endif /* HAVE_DCBNL_IEEE */
25234+
25235+ if (!need_reconfig)
25236+ goto exit;
25237+
25238+ /* Enable DCB tagging only when more than one TC */
25239+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
25240+ pf->flags |= I40E_FLAG_DCB_ENABLED;
25241+ else
25242+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
25243+
25244+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
25245+ /* Reconfiguration needed quiesce all VSIs */
25246+ i40e_pf_quiesce_all_vsi(pf);
25247+
25248+ /* Changes in configuration update VEB/VSI */
25249+ i40e_dcb_reconfigure(pf);
25250+
25251+ ret = i40e_resume_port_tx(pf);
25252+
25253+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
25254+ /* In case of error no point in resuming VSIs */
25255+ if (ret)
25256+ goto exit;
25257+
25258+ /* Wait for the PF's queues to be disabled */
25259+ ret = i40e_pf_wait_queues_disabled(pf);
25260+ if (ret) {
25261+ /* Schedule PF reset to recover */
25262+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
25263+ i40e_service_event_schedule(pf);
25264+ } else {
25265+ i40e_pf_unquiesce_all_vsi(pf);
25266+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
25267+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
25268+ }
25269+
25270+exit:
25271+ return ret;
25272 }
25273
25274+#endif /* CONFIG_DCB */
25275 /**
25276- * i40e_send_version - update firmware with driver version
25277- * @pf: PF struct
25278- */
25279-static void i40e_send_version(struct i40e_pf *pf)
25280+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
25281+ * @pf: board private structure
25282+ * @e: event info posted on ARQ
25283+ *
25284+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
25285+ * and VF queues
25286+ **/
25287+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
25288+ struct i40e_arq_event_info *e)
25289 {
25290- struct i40e_driver_version dv;
25291+ struct i40e_aqc_lan_overflow *data =
25292+ (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
25293+ u32 queue = le32_to_cpu(data->prtdcb_rupto);
25294+ u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
25295+ struct i40e_hw *hw = &pf->hw;
25296+ struct i40e_vf *vf;
25297+ u16 vf_id;
25298
25299- dv.major_version = DRV_VERSION_MAJOR;
25300- dv.minor_version = DRV_VERSION_MINOR;
25301- dv.build_version = DRV_VERSION_BUILD;
25302- dv.subbuild_version = 0;
25303- strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
25304- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
25305+ dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
25306+ queue, qtx_ctl);
25307+
25308+ /* Queue belongs to VF, find the VF and issue VF reset */
25309+ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
25310+ >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
25311+ vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
25312+ >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
25313+ vf_id -= hw->func_caps.vf_base_id;
25314+ vf = &pf->vf[vf_id];
25315+ i40e_vc_notify_vf_reset(vf);
25316+ /* Allow VF to process pending reset notification */
25317+ msleep(20);
25318+ i40e_reset_vf(vf, false);
25319+ }
25320 }
25321
25322 /**
25323- * i40e_get_oem_version - get OEM specific version information
25324- * @hw: pointer to the hardware structure
25325+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
25326+ * @pf: board private structure
25327 **/
25328-static void i40e_get_oem_version(struct i40e_hw *hw)
25329+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
25330 {
25331- u16 block_offset = 0xffff;
25332- u16 block_length = 0;
25333- u16 capabilities = 0;
25334- u16 gen_snap = 0;
25335- u16 release = 0;
25336-
25337-#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
25338-#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
25339-#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
25340-#define I40E_NVM_OEM_GEN_OFFSET 0x02
25341-#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
25342-#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
25343-#define I40E_NVM_OEM_LENGTH 3
25344-
25345- /* Check if pointer to OEM version block is valid. */
25346- i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
25347- if (block_offset == 0xffff)
25348- return;
25349+ u32 val, fcnt_prog;
25350
25351- /* Check if OEM version block has correct length. */
25352- i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
25353- &block_length);
25354- if (block_length < I40E_NVM_OEM_LENGTH)
25355- return;
25356+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
25357+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
25358+ return fcnt_prog;
25359+}
25360
25361- /* Check if OEM version format is as expected. */
25362- i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
25363- &capabilities);
25364- if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
25365- return;
25366+/**
25367+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
25368+ * @pf: board private structure
25369+ **/
25370+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
25371+{
25372+ u32 val, fcnt_prog;
25373
25374- i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
25375- &gen_snap);
25376- i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
25377- &release);
25378- hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
25379- hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
25380+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
25381+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
25382+ ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
25383+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
25384+ return fcnt_prog;
25385 }
25386
25387 /**
25388- * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
25389+ * i40e_get_global_fd_count - Get total FD filters programmed on device
25390 * @pf: board private structure
25391 **/
25392-static int i40e_reset(struct i40e_pf *pf)
25393+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
25394 {
25395- struct i40e_hw *hw = &pf->hw;
25396- i40e_status ret;
25397+ u32 val, fcnt_prog;
25398
25399- ret = i40e_pf_reset(hw);
25400- if (ret) {
25401- dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
25402- set_bit(__I40E_RESET_FAILED, pf->state);
25403- clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
25404- } else {
25405- pf->pfr_count++;
25406- }
25407- return ret;
25408+ val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
25409+ fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
25410+ ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
25411+ I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
25412+ return fcnt_prog;
25413 }
25414
25415 /**
25416- * i40e_rebuild - rebuild using a saved config
25417+ * i40e_reenable_fdir_sb - Restore FDir SB capability
25418 * @pf: board private structure
25419- * @reinit: if the Main VSI needs to re-initialized.
25420- * @lock_acquired: indicates whether or not the lock has been acquired
25421- * before this function was called.
25422 **/
25423-static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
25424+static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
25425 {
25426- struct i40e_hw *hw = &pf->hw;
25427- u8 set_fc_aq_fail = 0;
25428- i40e_status ret;
25429- u32 val;
25430- int v;
25431+ if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
25432+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
25433+ (I40E_DEBUG_FD & pf->hw.debug_mask))
25434+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
25435+}
25436
25437- if (test_bit(__I40E_DOWN, pf->state))
25438- goto clear_recovery;
25439- dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
25440+/**
25441+ * i40e_reenable_fdir_atr - Restore FDir ATR capability
25442+ * @pf: board private structure
25443+ **/
25444+static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
25445+{
25446+ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
25447+ /* ATR uses the same filtering logic as SB rules. It only
25448+ * functions properly if the input set mask is at the default
25449+ * settings. It is safe to restore the default input set
25450+ * because there are no active TCPv4 filter rules.
25451+ */
25452+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
25453+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
25454+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
25455
25456- /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
25457- ret = i40e_init_adminq(&pf->hw);
25458- if (ret) {
25459- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
25460- i40e_stat_str(&pf->hw, ret),
25461- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
25462- goto clear_recovery;
25463+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
25464+ (I40E_DEBUG_FD & pf->hw.debug_mask))
25465+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
25466 }
25467- i40e_get_oem_version(&pf->hw);
25468-
25469- /* re-verify the eeprom if we just had an EMP reset */
25470- if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
25471- i40e_verify_eeprom(pf);
25472+}
25473
25474- i40e_clear_pxe_mode(hw);
25475- ret = i40e_get_capabilities(pf);
25476- if (ret)
25477- goto end_core_reset;
25478+/**
25479+ * i40e_delete_invalid_filter - Delete an invalid FDIR filter
25480+ * @pf: board private structure
25481+ * @filter: FDir filter to remove
25482+ */
25483+static void i40e_delete_invalid_filter(struct i40e_pf *pf,
25484+ struct i40e_fdir_filter *filter)
25485+{
25486+ /* Update counters */
25487+ pf->fdir_pf_active_filters--;
25488+ pf->fd_inv = 0;
25489
25490- ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
25491- hw->func_caps.num_rx_qp, 0, 0);
25492- if (ret) {
25493- dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
25494- goto end_core_reset;
25495- }
25496- ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
25497- if (ret) {
25498- dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
25499- goto end_core_reset;
25500+ switch (filter->flow_type) {
25501+ case TCP_V4_FLOW:
25502+ pf->fd_tcp4_filter_cnt--;
25503+ break;
25504+ case UDP_V4_FLOW:
25505+ pf->fd_udp4_filter_cnt--;
25506+ break;
25507+ case SCTP_V4_FLOW:
25508+ pf->fd_sctp4_filter_cnt--;
25509+ break;
25510+ case IP_USER_FLOW:
25511+ switch (filter->ip4_proto) {
25512+ case IPPROTO_TCP:
25513+ pf->fd_tcp4_filter_cnt--;
25514+ break;
25515+ case IPPROTO_UDP:
25516+ pf->fd_udp4_filter_cnt--;
25517+ break;
25518+ case IPPROTO_SCTP:
25519+ pf->fd_sctp4_filter_cnt--;
25520+ break;
25521+ case IPPROTO_IP:
25522+ pf->fd_ip4_filter_cnt--;
25523+ break;
25524+ }
25525+ break;
25526 }
25527
25528-#ifdef CONFIG_I40E_DCB
25529- ret = i40e_init_pf_dcb(pf);
25530- if (ret) {
25531- dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
25532- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
25533- /* Continue without DCB enabled */
25534- }
25535-#endif /* CONFIG_I40E_DCB */
25536- /* do basic switch setup */
25537- if (!lock_acquired)
25538- rtnl_lock();
25539- ret = i40e_setup_pf_switch(pf, reinit);
25540- if (ret)
25541- goto end_unlock;
25542+ /* Remove the filter from the list and free memory */
25543+ hlist_del(&filter->fdir_node);
25544+ kfree(filter);
25545+}
25546
25547- /* The driver only wants link up/down and module qualification
25548- * reports from firmware. Note the negative logic.
25549- */
25550- ret = i40e_aq_set_phy_int_mask(&pf->hw,
25551- ~(I40E_AQ_EVENT_LINK_UPDOWN |
25552- I40E_AQ_EVENT_MEDIA_NA |
25553- I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
25554- if (ret)
25555- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
25556- i40e_stat_str(&pf->hw, ret),
25557- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
25558-
25559- /* make sure our flow control settings are restored */
25560- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
25561- if (ret)
25562- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
25563- i40e_stat_str(&pf->hw, ret),
25564- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
25565-
25566- /* Rebuild the VSIs and VEBs that existed before reset.
25567- * They are still in our local switch element arrays, so only
25568- * need to rebuild the switch model in the HW.
25569- *
25570- * If there were VEBs but the reconstitution failed, we'll try
25571- * try to recover minimal use by getting the basic PF VSI working.
25572- */
25573- if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
25574- dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
25575- /* find the one VEB connected to the MAC, and find orphans */
25576- for (v = 0; v < I40E_MAX_VEB; v++) {
25577- if (!pf->veb[v])
25578- continue;
25579-
25580- if (pf->veb[v]->uplink_seid == pf->mac_seid ||
25581- pf->veb[v]->uplink_seid == 0) {
25582- ret = i40e_reconstitute_veb(pf->veb[v]);
25583-
25584- if (!ret)
25585- continue;
25586+/**
25587+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
25588+ * @pf: board private structure
25589+ **/
25590+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
25591+{
25592+ struct i40e_fdir_filter *filter;
25593+ u32 fcnt_prog, fcnt_avail;
25594+ struct hlist_node *node;
25595
25596- /* If Main VEB failed, we're in deep doodoo,
25597- * so give up rebuilding the switch and set up
25598- * for minimal rebuild of PF VSI.
25599- * If orphan failed, we'll report the error
25600- * but try to keep going.
25601- */
25602- if (pf->veb[v]->uplink_seid == pf->mac_seid) {
25603- dev_info(&pf->pdev->dev,
25604- "rebuild of switch failed: %d, will try to set up simple PF connection\n",
25605- ret);
25606- pf->vsi[pf->lan_vsi]->uplink_seid
25607- = pf->mac_seid;
25608- break;
25609- } else if (pf->veb[v]->uplink_seid == 0) {
25610- dev_info(&pf->pdev->dev,
25611- "rebuild of orphan VEB failed: %d\n",
25612- ret);
25613- }
25614- }
25615- }
25616- }
25617+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
25618+ return;
25619
25620- if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
25621- dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
25622- /* no VEB, so rebuild only the Main VSI */
25623- ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
25624- if (ret) {
25625- dev_info(&pf->pdev->dev,
25626- "rebuild of Main VSI failed: %d\n", ret);
25627- goto end_unlock;
25628- }
25629- }
25630+ /* Check if we have enough room to re-enable FDir SB capability. */
25631+ fcnt_prog = i40e_get_global_fd_count(pf);
25632+ fcnt_avail = pf->fdir_pf_filter_count;
25633+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
25634+ (pf->fd_add_err == 0) ||
25635+ (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
25636+ i40e_reenable_fdir_sb(pf);
25637
25638- /* Reconfigure hardware for allowing smaller MSS in the case
25639- * of TSO, so that we avoid the MDD being fired and causing
25640- * a reset in the case of small MSS+TSO.
25641+ /* We should wait for even more space before re-enabling ATR.
25642+ * Additionally, we cannot enable ATR as long as we still have TCP SB
25643+ * rules active.
25644 */
25645-#define I40E_REG_MSS 0x000E64DC
25646-#define I40E_REG_MSS_MIN_MASK 0x3FF0000
25647-#define I40E_64BYTE_MSS 0x400000
25648- val = rd32(hw, I40E_REG_MSS);
25649- if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
25650- val &= ~I40E_REG_MSS_MIN_MASK;
25651- val |= I40E_64BYTE_MSS;
25652- wr32(hw, I40E_REG_MSS, val);
25653- }
25654+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
25655+ (pf->fd_tcp4_filter_cnt == 0))
25656+ i40e_reenable_fdir_atr(pf);
25657
25658- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
25659- msleep(75);
25660- ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
25661- if (ret)
25662- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
25663- i40e_stat_str(&pf->hw, ret),
25664- i40e_aq_str(&pf->hw,
25665- pf->hw.aq.asq_last_status));
25666+ /* if hw had a problem adding a filter, delete it */
25667+ if (pf->fd_inv > 0) {
25668+ hlist_for_each_entry_safe(filter, node,
25669+ &pf->fdir_filter_list, fdir_node)
25670+ if (filter->fd_id == pf->fd_inv)
25671+ i40e_delete_invalid_filter(pf, filter);
25672 }
25673- /* reinit the misc interrupt */
25674- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
25675- ret = i40e_setup_misc_vector(pf);
25676-
25677- /* Add a filter to drop all Flow control frames from any VSI from being
25678- * transmitted. By doing so we stop a malicious VF from sending out
25679- * PAUSE or PFC frames and potentially controlling traffic for other
25680- * PF/VF VSIs.
25681- * The FW can still send Flow control frames if enabled.
25682- */
25683- i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
25684- pf->main_vsi_seid);
25685-
25686- /* restart the VSIs that were rebuilt and running before the reset */
25687- i40e_pf_unquiesce_all_vsi(pf);
25688+}
25689
25690- /* Release the RTNL lock before we start resetting VFs */
25691- if (!lock_acquired)
25692- rtnl_unlock();
25693+#define I40E_MIN_FD_FLUSH_INTERVAL 10
25694+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
25695+/**
25696+ * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
25697+ * @pf: board private structure
25698+ **/
25699+static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
25700+{
25701+ unsigned long min_flush_time;
25702+ int flush_wait_retry = 50;
25703+ bool disable_atr = false;
25704+ int fd_room;
25705+ int reg;
25706
25707- /* Restore promiscuous settings */
25708- ret = i40e_set_promiscuous(pf, pf->cur_promisc);
25709- if (ret)
25710- dev_warn(&pf->pdev->dev,
25711- "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
25712- pf->cur_promisc ? "on" : "off",
25713- i40e_stat_str(&pf->hw, ret),
25714- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
25715+ if (!time_after(jiffies, pf->fd_flush_timestamp +
25716+ (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
25717+ return;
25718
25719- i40e_reset_all_vfs(pf, true);
25720+ /* If the flush is happening too quick and we have mostly SB rules we
25721+ * should not re-enable ATR for some time.
25722+ */
25723+ min_flush_time = pf->fd_flush_timestamp +
25724+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
25725+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
25726
25727- /* tell the firmware that we're starting */
25728- i40e_send_version(pf);
25729+ if (!(time_after(jiffies, min_flush_time)) &&
25730+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
25731+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
25732+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
25733+ disable_atr = true;
25734+ }
25735
25736- /* We've already released the lock, so don't do it again */
25737- goto end_core_reset;
25738+ pf->fd_flush_timestamp = jiffies;
25739+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
25740+ /* flush all filters */
25741+ wr32(&pf->hw, I40E_PFQF_CTL_1,
25742+ I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
25743+ i40e_flush(&pf->hw);
25744+ pf->fd_flush_cnt++;
25745+ pf->fd_add_err = 0;
25746+ do {
25747+ /* Check FD flush status every 5-6msec */
25748+ usleep_range(5000, 6000);
25749+ reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
25750+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
25751+ break;
25752+ } while (flush_wait_retry--);
25753
25754-end_unlock:
25755- if (!lock_acquired)
25756- rtnl_unlock();
25757-end_core_reset:
25758- clear_bit(__I40E_RESET_FAILED, pf->state);
25759-clear_recovery:
25760- clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
25761+ if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
25762+ dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
25763+ } else {
25764+ /* replay sideband filters */
25765+ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
25766+ if (!disable_atr && !pf->fd_tcp4_filter_cnt)
25767+ clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
25768+ clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
25769+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
25770+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
25771+ }
25772 }
25773
25774 /**
25775- * i40e_reset_and_rebuild - reset and rebuild using a saved config
25776+ * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
25777 * @pf: board private structure
25778- * @reinit: if the Main VSI needs to re-initialized.
25779- * @lock_acquired: indicates whether or not the lock has been acquired
25780- * before this function was called.
25781 **/
25782-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
25783- bool lock_acquired)
25784+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
25785 {
25786- int ret;
25787- /* Now we wait for GRST to settle out.
25788- * We don't have to delete the VEBs or VSIs from the hw switch
25789- * because the reset will make them disappear.
25790- */
25791- ret = i40e_reset(pf);
25792- if (!ret)
25793- i40e_rebuild(pf, reinit, lock_acquired);
25794+ return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
25795 }
25796
25797+/* We can see up to 256 filter programming desc in transit if the filters are
25798+ * being applied really fast; before we see the first
25799+ * filter miss error on Rx queue 0. Accumulating enough error messages before
25800+ * reacting will make sure we don't cause flush too often.
25801+ */
25802+#define I40E_MAX_FD_PROGRAM_ERROR 256
25803 /**
25804- * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
25805+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
25806 * @pf: board private structure
25807- *
25808- * Close up the VFs and other things in prep for a Core Reset,
25809- * then get ready to rebuild the world.
25810- * @lock_acquired: indicates whether or not the lock has been acquired
25811- * before this function was called.
25812 **/
25813-static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
25814+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
25815 {
25816- i40e_prep_for_reset(pf, lock_acquired);
25817- i40e_reset_and_rebuild(pf, false, lock_acquired);
25818+
25819+ /* if interface is down do nothing */
25820+ if (test_bit(__I40E_DOWN, pf->state))
25821+ return;
25822+
25823+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
25824+ i40e_fdir_flush_and_replay(pf);
25825+
25826+ i40e_fdir_check_and_reenable(pf);
25827+
25828 }
25829
25830 /**
25831- * i40e_handle_mdd_event
25832- * @pf: pointer to the PF structure
25833- *
25834- * Called from the MDD irq handler to identify possibly malicious vfs
25835+ * i40e_vsi_link_event - notify VSI of a link event
25836+ * @vsi: vsi to be notified
25837+ * @link_up: link up or down
25838 **/
25839-static void i40e_handle_mdd_event(struct i40e_pf *pf)
25840+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
25841 {
25842- struct i40e_hw *hw = &pf->hw;
25843- bool mdd_detected = false;
25844- bool pf_mdd_detected = false;
25845- struct i40e_vf *vf;
25846- u32 reg;
25847- int i;
25848-
25849- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
25850+ if (!vsi || (test_bit(__I40E_VSI_DOWN, vsi->state)))
25851 return;
25852
25853- /* find what triggered the MDD event */
25854- reg = rd32(hw, I40E_GL_MDET_TX);
25855- if (reg & I40E_GL_MDET_TX_VALID_MASK) {
25856- u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
25857- I40E_GL_MDET_TX_PF_NUM_SHIFT;
25858- u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
25859- I40E_GL_MDET_TX_VF_NUM_SHIFT;
25860- u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
25861- I40E_GL_MDET_TX_EVENT_SHIFT;
25862- u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
25863- I40E_GL_MDET_TX_QUEUE_SHIFT) -
25864- pf->hw.func_caps.base_queue;
25865- if (netif_msg_tx_err(pf))
25866- dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
25867- event, queue, pf_num, vf_num);
25868- wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
25869- mdd_detected = true;
25870- }
25871- reg = rd32(hw, I40E_GL_MDET_RX);
25872- if (reg & I40E_GL_MDET_RX_VALID_MASK) {
25873- u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
25874- I40E_GL_MDET_RX_FUNCTION_SHIFT;
25875- u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
25876- I40E_GL_MDET_RX_EVENT_SHIFT;
25877- u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
25878- I40E_GL_MDET_RX_QUEUE_SHIFT) -
25879- pf->hw.func_caps.base_queue;
25880- if (netif_msg_rx_err(pf))
25881- dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
25882- event, queue, func);
25883- wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
25884- mdd_detected = true;
25885- }
25886-
25887- if (mdd_detected) {
25888- reg = rd32(hw, I40E_PF_MDET_TX);
25889- if (reg & I40E_PF_MDET_TX_VALID_MASK) {
25890- wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
25891- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
25892- pf_mdd_detected = true;
25893- }
25894- reg = rd32(hw, I40E_PF_MDET_RX);
25895- if (reg & I40E_PF_MDET_RX_VALID_MASK) {
25896- wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
25897- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
25898- pf_mdd_detected = true;
25899- }
25900- /* Queue belongs to the PF, initiate a reset */
25901- if (pf_mdd_detected) {
25902- set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
25903- i40e_service_event_schedule(pf);
25904- }
25905- }
25906-
25907- /* see if one of the VFs needs its hand slapped */
25908- for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
25909- vf = &(pf->vf[i]);
25910- reg = rd32(hw, I40E_VP_MDET_TX(i));
25911- if (reg & I40E_VP_MDET_TX_VALID_MASK) {
25912- wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
25913- vf->num_mdd_events++;
25914- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
25915- i);
25916- }
25917-
25918- reg = rd32(hw, I40E_VP_MDET_RX(i));
25919- if (reg & I40E_VP_MDET_RX_VALID_MASK) {
25920- wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
25921- vf->num_mdd_events++;
25922- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
25923- i);
25924- }
25925+ switch (vsi->type) {
25926+ case I40E_VSI_MAIN:
25927+ if (!vsi->netdev || !vsi->netdev_registered)
25928+ break;
25929
25930- if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
25931- dev_info(&pf->pdev->dev,
25932- "Too many MDD events on VF %d, disabled\n", i);
25933- dev_info(&pf->pdev->dev,
25934- "Use PF Control I/F to re-enable the VF\n");
25935- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
25936+ if (link_up) {
25937+ netif_carrier_on(vsi->netdev);
25938+ netif_tx_wake_all_queues(vsi->netdev);
25939+ } else {
25940+ netif_carrier_off(vsi->netdev);
25941+ netif_tx_stop_all_queues(vsi->netdev);
25942 }
25943- }
25944-
25945- /* re-enable mdd interrupt cause */
25946- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
25947- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
25948- reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
25949- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
25950- i40e_flush(hw);
25951-}
25952+ break;
25953
25954-static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
25955-{
25956- switch (port->type) {
25957- case UDP_TUNNEL_TYPE_VXLAN:
25958- return "vxlan";
25959- case UDP_TUNNEL_TYPE_GENEVE:
25960- return "geneve";
25961+ case I40E_VSI_SRIOV:
25962+ case I40E_VSI_VMDQ2:
25963+ case I40E_VSI_CTRL:
25964+ case I40E_VSI_IWARP:
25965+ case I40E_VSI_MIRROR:
25966 default:
25967- return "unknown";
25968+ /* there is no notification for other VSIs */
25969+ break;
25970 }
25971 }
25972
25973 /**
25974- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
25975- * @pf: board private structure
25976+ * i40e_veb_link_event - notify elements on the veb of a link event
25977+ * @veb: veb to be notified
25978+ * @link_up: link up or down
25979 **/
25980-static void i40e_sync_udp_filters(struct i40e_pf *pf)
25981+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
25982 {
25983+ struct i40e_pf *pf;
25984 int i;
25985
25986- /* loop through and set pending bit for all active UDP filters */
25987- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
25988- if (pf->udp_ports[i].port)
25989- pf->pending_udp_bitmap |= BIT_ULL(i);
25990- }
25991+ if (!veb || !veb->pf)
25992+ return;
25993+ pf = veb->pf;
25994+
25995+ /* depth first... */
25996+ for (i = 0; i < I40E_MAX_VEB; i++)
25997+ if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
25998+ i40e_veb_link_event(pf->veb[i], link_up);
25999
26000- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
26001+ /* ... now the local VSIs */
26002+ for (i = 0; i < pf->num_alloc_vsi; i++)
26003+ if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
26004+ i40e_vsi_link_event(pf->vsi[i], link_up);
26005 }
26006
26007 /**
26008- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
26009+ * i40e_link_event - Update netif_carrier status
26010 * @pf: board private structure
26011 **/
26012-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
26013+static void i40e_link_event(struct i40e_pf *pf)
26014 {
26015- struct i40e_hw *hw = &pf->hw;
26016- i40e_status ret;
26017- u16 port;
26018- int i;
26019+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
26020+ u8 new_link_speed, old_link_speed;
26021+ i40e_status status;
26022+ bool new_link, old_link;
26023+
26024+ /* set this to force the get_link_status call to refresh state */
26025+ pf->hw.phy.get_link_info = true;
26026+ old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
26027+ status = i40e_get_link_status(&pf->hw, &new_link);
26028
26029- if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
26030+ /* On success, disable temp link polling */
26031+ if (status == I40E_SUCCESS) {
26032+ clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
26033+ } else {
26034+ /* Enable link polling temporarily until i40e_get_link_status
26035+ * returns I40E_SUCCESS
26036+ */
26037+ set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
26038+ dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
26039+ status);
26040 return;
26041+ }
26042
26043- pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
26044+ old_link_speed = pf->hw.phy.link_info_old.link_speed;
26045+ new_link_speed = pf->hw.phy.link_info.link_speed;
26046
26047- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
26048- if (pf->pending_udp_bitmap & BIT_ULL(i)) {
26049- pf->pending_udp_bitmap &= ~BIT_ULL(i);
26050- port = pf->udp_ports[i].port;
26051- if (port)
26052- ret = i40e_aq_add_udp_tunnel(hw, port,
26053- pf->udp_ports[i].type,
26054- NULL, NULL);
26055- else
26056- ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
26057+ if (new_link == old_link &&
26058+ new_link_speed == old_link_speed &&
26059+ (test_bit(__I40E_VSI_DOWN, vsi->state) ||
26060+ new_link == netif_carrier_ok(vsi->netdev)))
26061+ return;
26062
26063- if (ret) {
26064- dev_info(&pf->pdev->dev,
26065- "%s %s port %d, index %d failed, err %s aq_err %s\n",
26066- i40e_tunnel_name(&pf->udp_ports[i]),
26067- port ? "add" : "delete",
26068- port, i,
26069- i40e_stat_str(&pf->hw, ret),
26070- i40e_aq_str(&pf->hw,
26071- pf->hw.aq.asq_last_status));
26072- pf->udp_ports[i].port = 0;
26073- }
26074- }
26075- }
26076+ i40e_print_link_message(vsi, new_link);
26077+
26078+ /* Notify the base of the switch tree connected to
26079+ * the link. Floating VEBs are not notified.
26080+ */
26081+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
26082+ i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
26083+ else
26084+ i40e_vsi_link_event(vsi, new_link);
26085+
26086+ if (pf->vf)
26087+ i40e_vc_notify_link_state(pf);
26088+#ifdef HAVE_PTP_1588_CLOCK
26089+
26090+ if (pf->flags & I40E_FLAG_PTP)
26091+ i40e_ptp_set_increment(pf);
26092+#endif /* HAVE_PTP_1588_CLOCK */
26093 }
26094
26095 /**
26096- * i40e_service_task - Run the driver's async subtasks
26097- * @work: pointer to work_struct containing our data
26098+ * i40e_watchdog_subtask - periodic checks not using event driven response
26099+ * @pf: board private structure
26100 **/
26101-static void i40e_service_task(struct work_struct *work)
26102+static void i40e_watchdog_subtask(struct i40e_pf *pf)
26103 {
26104- struct i40e_pf *pf = container_of(work,
26105- struct i40e_pf,
26106- service_task);
26107- unsigned long start_time = jiffies;
26108+ int i;
26109
26110- /* don't bother with service tasks if a reset is in progress */
26111- if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
26112+ /* if interface is down do nothing */
26113+ if (test_bit(__I40E_DOWN, pf->state) ||
26114+ test_bit(__I40E_CONFIG_BUSY, pf->state))
26115 return;
26116
26117- if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
26118+ /* make sure we don't do these things too often */
26119+ if (time_before(jiffies, (pf->service_timer_previous +
26120+ pf->service_timer_period)))
26121 return;
26122+ pf->service_timer_previous = jiffies;
26123
26124- i40e_detect_recover_hung(pf);
26125- i40e_sync_filters_subtask(pf);
26126- i40e_reset_subtask(pf);
26127- i40e_handle_mdd_event(pf);
26128- i40e_vc_process_vflr_event(pf);
26129- i40e_watchdog_subtask(pf);
26130- i40e_fdir_reinit_subtask(pf);
26131- if (pf->flags & I40E_FLAG_CLIENT_RESET) {
26132- /* Client subtask will reopen next time through. */
26133- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
26134- pf->flags &= ~I40E_FLAG_CLIENT_RESET;
26135- } else {
26136- i40e_client_subtask(pf);
26137- if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
26138- i40e_notify_client_of_l2_param_changes(
26139- pf->vsi[pf->lan_vsi]);
26140- pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
26141- }
26142- }
26143- i40e_sync_filters_subtask(pf);
26144- i40e_sync_udp_filters_subtask(pf);
26145- i40e_clean_adminq_subtask(pf);
26146-
26147- /* flush memory to make sure state is correct before next watchdog */
26148- smp_mb__before_atomic();
26149- clear_bit(__I40E_SERVICE_SCHED, pf->state);
26150+ if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
26151+ test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
26152+ i40e_link_event(pf);
26153
26154- /* If the tasks have taken longer than one timer cycle or there
26155- * is more work to be done, reschedule the service task now
26156- * rather than wait for the timer to tick again.
26157+ /* Update the stats for active netdevs so the network stack
26158+ * can look at updated numbers whenever it cares to
26159 */
26160- if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
26161- test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
26162- test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
26163- test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
26164- i40e_service_event_schedule(pf);
26165-}
26166+ for (i = 0; i < pf->num_alloc_vsi; i++)
26167+ if (pf->vsi[i] && pf->vsi[i]->netdev)
26168+ i40e_update_stats(pf->vsi[i]);
26169
26170-/**
26171- * i40e_service_timer - timer callback
26172- * @data: pointer to PF struct
26173- **/
26174-static void i40e_service_timer(unsigned long data)
26175-{
26176- struct i40e_pf *pf = (struct i40e_pf *)data;
26177+ if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
26178+ /* Update the stats for the active switching components */
26179+ for (i = 0; i < I40E_MAX_VEB; i++)
26180+ if (pf->veb[i])
26181+ i40e_update_veb_stats(pf->veb[i]);
26182+ }
26183+#ifdef HAVE_PTP_1588_CLOCK
26184
26185- mod_timer(&pf->service_timer,
26186- round_jiffies(jiffies + pf->service_timer_period));
26187- i40e_service_event_schedule(pf);
26188+ i40e_ptp_rx_hang(pf);
26189+ i40e_ptp_tx_hang(pf);
26190+#endif /* HAVE_PTP_1588_CLOCK */
26191 }
26192
26193 /**
26194- * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
26195- * @vsi: the VSI being configured
26196+ * i40e_reset_subtask - Set up for resetting the device and driver
26197+ * @pf: board private structure
26198 **/
26199-static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
26200+static void i40e_reset_subtask(struct i40e_pf *pf)
26201 {
26202- struct i40e_pf *pf = vsi->back;
26203-
26204- switch (vsi->type) {
26205- case I40E_VSI_MAIN:
26206- vsi->alloc_queue_pairs = pf->num_lan_qps;
26207- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
26208- I40E_REQ_DESCRIPTOR_MULTIPLE);
26209- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
26210- vsi->num_q_vectors = pf->num_lan_msix;
26211- else
26212- vsi->num_q_vectors = 1;
26213-
26214- break;
26215-
26216- case I40E_VSI_FDIR:
26217- vsi->alloc_queue_pairs = 1;
26218- vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
26219- I40E_REQ_DESCRIPTOR_MULTIPLE);
26220- vsi->num_q_vectors = pf->num_fdsb_msix;
26221- break;
26222-
26223- case I40E_VSI_VMDQ2:
26224- vsi->alloc_queue_pairs = pf->num_vmdq_qps;
26225- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
26226- I40E_REQ_DESCRIPTOR_MULTIPLE);
26227- vsi->num_q_vectors = pf->num_vmdq_msix;
26228- break;
26229+ u32 reset_flags = 0;
26230
26231- case I40E_VSI_SRIOV:
26232- vsi->alloc_queue_pairs = pf->num_vf_qps;
26233- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
26234- I40E_REQ_DESCRIPTOR_MULTIPLE);
26235- break;
26236+ if (test_and_clear_bit(__I40E_REINIT_REQUESTED, pf->state))
26237+ reset_flags |= BIT(__I40E_REINIT_REQUESTED);
26238+ if (test_and_clear_bit(__I40E_PF_RESET_REQUESTED, pf->state))
26239+ reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
26240+ if (test_and_clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state))
26241+ reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
26242+ if (test_and_clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state))
26243+ reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
26244+ if (test_and_clear_bit(__I40E_DOWN_REQUESTED, pf->state))
26245+ reset_flags |= BIT(__I40E_DOWN_REQUESTED);
26246
26247- default:
26248- WARN_ON(1);
26249- return -ENODATA;
26250+ /* If there's a recovery already waiting, it takes
26251+ * precedence before starting a new reset sequence.
26252+ */
26253+ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
26254+ i40e_prep_for_reset(pf, false);
26255+ i40e_reset(pf);
26256+ i40e_rebuild(pf, false, false);
26257 }
26258
26259- return 0;
26260+ /* If we're already down or resetting, just bail */
26261+ if (reset_flags &&
26262+ !test_bit(__I40E_DOWN, pf->state) &&
26263+ !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
26264+ i40e_do_reset(pf, reset_flags, false);
26265+ }
26266 }
26267
26268 /**
26269- * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
26270- * @type: VSI pointer
26271- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
26272- *
26273- * On error: returns error code (negative)
26274- * On success: returns 0
26275+ * i40e_handle_link_event - Handle link event
26276+ * @pf: board private structure
26277+ * @e: event info posted on ARQ
26278 **/
26279-static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
26280+static void i40e_handle_link_event(struct i40e_pf *pf,
26281+ struct i40e_arq_event_info *e)
26282 {
26283- struct i40e_ring **next_rings;
26284- int size;
26285- int ret = 0;
26286+ struct i40e_aqc_get_link_status *status =
26287+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
26288
26289- /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
26290- size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
26291- (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
26292- vsi->tx_rings = kzalloc(size, GFP_KERNEL);
26293- if (!vsi->tx_rings)
26294- return -ENOMEM;
26295- next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
26296- if (i40e_enabled_xdp_vsi(vsi)) {
26297- vsi->xdp_rings = next_rings;
26298- next_rings += vsi->alloc_queue_pairs;
26299- }
26300- vsi->rx_rings = next_rings;
26301+ /* Do a new status request to re-enable LSE reporting
26302+ * and load new status information into the hw struct
26303+ * This completely ignores any state information
26304+ * in the ARQ event info, instead choosing to always
26305+ * issue the AQ update link status command.
26306+ */
26307+ i40e_link_event(pf);
26308
26309- if (alloc_qvectors) {
26310- /* allocate memory for q_vector pointers */
26311- size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
26312- vsi->q_vectors = kzalloc(size, GFP_KERNEL);
26313- if (!vsi->q_vectors) {
26314- ret = -ENOMEM;
26315- goto err_vectors;
26316+ /* Check if module meets thermal requirements */
26317+ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
26318+ dev_err(&pf->pdev->dev,
26319+ "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
26320+ dev_err(&pf->pdev->dev,
26321+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
26322+ } else {
26323+ /* check for unqualified module, if link is down, suppress
26324+ * the message if link was forced to be down.
26325+ */
26326+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
26327+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
26328+ (!(status->link_info & I40E_AQ_LINK_UP)) &&
26329+ (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
26330+ dev_err(&pf->pdev->dev,
26331+ "Rx/Tx is disabled on this device because an unsupported SFP+ module type was detected.\n");
26332+ dev_err(&pf->pdev->dev,
26333+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
26334 }
26335 }
26336- return ret;
26337-
26338-err_vectors:
26339- kfree(vsi->tx_rings);
26340- return ret;
26341 }
26342
26343 /**
26344- * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
26345+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
26346 * @pf: board private structure
26347- * @type: type of VSI
26348- *
26349- * On error: returns error code (negative)
26350- * On success: returns vsi index in PF (positive)
26351 **/
26352-static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
26353+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
26354 {
26355- int ret = -ENODEV;
26356- struct i40e_vsi *vsi;
26357- int vsi_idx;
26358- int i;
26359+ struct i40e_arq_event_info event;
26360+ struct i40e_hw *hw = &pf->hw;
26361+ u16 pending, i = 0;
26362+ i40e_status ret;
26363+ u16 opcode;
26364+ u32 oldval;
26365+ u32 val;
26366
26367- /* Need to protect the allocation of the VSIs at the PF level */
26368- mutex_lock(&pf->switch_mutex);
26369+ /* Do not run clean AQ when PF reset fails */
26370+ if (test_bit(__I40E_RESET_FAILED, pf->state))
26371+ return;
26372
26373- /* VSI list may be fragmented if VSI creation/destruction has
26374- * been happening. We can afford to do a quick scan to look
26375- * for any free VSIs in the list.
26376- *
26377- * find next empty vsi slot, looping back around if necessary
26378- */
26379- i = pf->next_vsi;
26380- while (i < pf->num_alloc_vsi && pf->vsi[i])
26381- i++;
26382- if (i >= pf->num_alloc_vsi) {
26383- i = 0;
26384- while (i < pf->next_vsi && pf->vsi[i])
26385- i++;
26386+ /* check for error indications */
26387+ val = rd32(&pf->hw, pf->hw.aq.arq.len);
26388+ oldval = val;
26389+ if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
26390+ if (hw->debug_mask & I40E_DEBUG_AQ)
26391+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
26392+ val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
26393 }
26394-
26395- if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
26396- vsi_idx = i; /* Found one! */
26397- } else {
26398- ret = -ENODEV;
26399- goto unlock_pf; /* out of VSI slots! */
26400+ if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
26401+ if (hw->debug_mask & I40E_DEBUG_AQ)
26402+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
26403+ val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
26404+ pf->arq_overflows++;
26405 }
26406- pf->next_vsi = ++i;
26407+ if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
26408+ if (hw->debug_mask & I40E_DEBUG_AQ)
26409+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
26410+ val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
26411+ }
26412+ if (oldval != val)
26413+ wr32(&pf->hw, pf->hw.aq.arq.len, val);
26414
26415- vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
26416- if (!vsi) {
26417- ret = -ENOMEM;
26418- goto unlock_pf;
26419+ val = rd32(&pf->hw, pf->hw.aq.asq.len);
26420+ oldval = val;
26421+ if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
26422+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
26423+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
26424+ val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
26425 }
26426- vsi->type = type;
26427- vsi->back = pf;
26428- set_bit(__I40E_VSI_DOWN, vsi->state);
26429- vsi->flags = 0;
26430- vsi->idx = vsi_idx;
26431- vsi->int_rate_limit = 0;
26432- vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
26433- pf->rss_table_size : 64;
26434- vsi->netdev_registered = false;
26435- vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
26436- hash_init(vsi->mac_filter_hash);
26437- vsi->irqs_ready = false;
26438+ if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
26439+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
26440+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
26441+ val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
26442+ }
26443+ if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
26444+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
26445+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
26446+ val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
26447+ }
26448+ if (oldval != val)
26449+ wr32(&pf->hw, pf->hw.aq.asq.len, val);
26450
26451- ret = i40e_set_num_rings_in_vsi(vsi);
26452- if (ret)
26453- goto err_rings;
26454+ event.buf_len = I40E_MAX_AQ_BUF_SIZE;
26455+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
26456+ if (!event.msg_buf)
26457+ return;
26458
26459- ret = i40e_vsi_alloc_arrays(vsi, true);
26460- if (ret)
26461- goto err_rings;
26462+ do {
26463+ ret = i40e_clean_arq_element(hw, &event, &pending);
26464+ if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
26465+ break;
26466+ else if (ret) {
26467+ dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
26468+ break;
26469+ }
26470
26471- /* Setup default MSIX irq handler for VSI */
26472- i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
26473+ opcode = LE16_TO_CPU(event.desc.opcode);
26474+ switch (opcode) {
26475
26476- /* Initialize VSI lock */
26477- spin_lock_init(&vsi->mac_filter_hash_lock);
26478- pf->vsi[vsi_idx] = vsi;
26479- ret = vsi_idx;
26480- goto unlock_pf;
26481+ case i40e_aqc_opc_get_link_status:
26482+ i40e_handle_link_event(pf, &event);
26483+ break;
26484+ case i40e_aqc_opc_send_msg_to_pf:
26485+ ret = i40e_vc_process_vf_msg(pf,
26486+ le16_to_cpu(event.desc.retval),
26487+ le32_to_cpu(event.desc.cookie_high),
26488+ le32_to_cpu(event.desc.cookie_low),
26489+ event.msg_buf,
26490+ event.msg_len);
26491+ break;
26492+ case i40e_aqc_opc_lldp_update_mib:
26493+ dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
26494+#ifdef CONFIG_DCB
26495+ rtnl_lock();
26496+ ret = i40e_handle_lldp_event(pf, &event);
26497+ rtnl_unlock();
26498+#endif /* CONFIG_DCB */
26499+ break;
26500+ case i40e_aqc_opc_event_lan_overflow:
26501+ dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
26502+ i40e_handle_lan_overflow_event(pf, &event);
26503+ break;
26504+ case i40e_aqc_opc_send_msg_to_peer:
26505+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
26506+ break;
26507+ case i40e_aqc_opc_nvm_erase:
26508+ case i40e_aqc_opc_nvm_update:
26509+ case i40e_aqc_opc_oem_post_update:
26510+ i40e_debug(&pf->hw, I40E_DEBUG_NVM,
26511+ "ARQ NVM operation 0x%04x completed\n",
26512+ opcode);
26513+ break;
26514+ default:
26515+ dev_info(&pf->pdev->dev,
26516+ "ARQ: Unknown event 0x%04x ignored\n",
26517+ opcode);
26518+ break;
26519+ }
26520+ } while (i++ < pf->adminq_work_limit);
26521+
26522+ if (i < pf->adminq_work_limit)
26523+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
26524+
26525+ /* re-enable Admin queue interrupt cause */
26526+ val = rd32(hw, I40E_PFINT_ICR0_ENA);
26527+ val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
26528+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
26529+ i40e_flush(hw);
26530+
26531+ kfree(event.msg_buf);
26532+}
26533+
26534+/**
26535+ * i40e_verify_eeprom - make sure eeprom is good to use
26536+ * @pf: board private structure
26537+ **/
26538+static void i40e_verify_eeprom(struct i40e_pf *pf)
26539+{
26540+ int err;
26541+
26542+ err = i40e_diag_eeprom_test(&pf->hw);
26543+ if (err) {
26544+ /* retry in case of garbage read */
26545+ err = i40e_diag_eeprom_test(&pf->hw);
26546+ if (err) {
26547+ dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
26548+ err);
26549+ set_bit(__I40E_BAD_EEPROM, pf->state);
26550+ }
26551+ }
26552+
26553+ if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
26554+ dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
26555+ clear_bit(__I40E_BAD_EEPROM, pf->state);
26556+ }
26557+}
26558+
26559+/**
26560+ * i40e_enable_pf_switch_lb
26561+ * @pf: pointer to the PF structure
26562+ *
26563+ * enable switch loop back or die - no point in a return value
26564+ **/
26565+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
26566+{
26567+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
26568+ struct i40e_vsi_context ctxt;
26569+ int ret;
26570+
26571+ ctxt.seid = pf->main_vsi_seid;
26572+ ctxt.pf_num = pf->hw.pf_id;
26573+ ctxt.vf_num = 0;
26574+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
26575+ if (ret) {
26576+ dev_info(&pf->pdev->dev,
26577+ "couldn't get PF vsi config, err %s aq_err %s\n",
26578+ i40e_stat_str(&pf->hw, ret),
26579+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
26580+ return;
26581+ }
26582+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
26583+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
26584+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
26585+
26586+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
26587+ if (ret) {
26588+ dev_info(&pf->pdev->dev,
26589+ "update vsi switch failed, err %s aq_err %s\n",
26590+ i40e_stat_str(&pf->hw, ret),
26591+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
26592+ }
26593+}
26594+
26595+/**
26596+ * i40e_disable_pf_switch_lb
26597+ * @pf: pointer to the PF structure
26598+ *
26599+ * disable switch loop back or die - no point in a return value
26600+ **/
26601+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
26602+{
26603+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
26604+ struct i40e_vsi_context ctxt;
26605+ int ret;
26606+
26607+ ctxt.seid = pf->main_vsi_seid;
26608+ ctxt.pf_num = pf->hw.pf_id;
26609+ ctxt.vf_num = 0;
26610+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
26611+ if (ret) {
26612+ dev_info(&pf->pdev->dev,
26613+ "couldn't get PF vsi config, err %s aq_err %s\n",
26614+ i40e_stat_str(&pf->hw, ret),
26615+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
26616+ return;
26617+ }
26618+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
26619+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
26620+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
26621+
26622+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
26623+ if (ret) {
26624+ dev_info(&pf->pdev->dev,
26625+ "update vsi switch failed, err %s aq_err %s\n",
26626+ i40e_stat_str(&pf->hw, ret),
26627+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
26628+ }
26629+}
26630+
26631+/**
26632+ * i40e_config_bridge_mode - Configure the HW bridge mode
26633+ * @veb: pointer to the bridge instance
26634+ *
26635+ * Configure the loop back mode for the LAN VSI that is downlink to the
26636+ * specified HW bridge instance. It is expected this function is called
26637+ * when a new HW bridge is instantiated.
26638+ **/
26639+static void i40e_config_bridge_mode(struct i40e_veb *veb)
26640+{
26641+ struct i40e_pf *pf = veb->pf;
26642+
26643+#ifdef HAVE_BRIDGE_ATTRIBS
26644+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
26645+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
26646+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
26647+ if (veb->bridge_mode & BRIDGE_MODE_VEPA)
26648+ i40e_disable_pf_switch_lb(pf);
26649+ else
26650+ i40e_enable_pf_switch_lb(pf);
26651+#else
26652+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
26653+ i40e_enable_pf_switch_lb(pf);
26654+ else
26655+ i40e_disable_pf_switch_lb(pf);
26656+#endif
26657+}
26658+
26659+/**
26660+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
26661+ * @veb: pointer to the VEB instance
26662+ *
26663+ * This is a recursive function that first builds the attached VSIs then
26664+ * recurses in to build the next layer of VEB. We track the connections
26665+ * through our own index numbers because the seid's from the HW could
26666+ * change across the reset.
26667+ **/
26668+static int i40e_reconstitute_veb(struct i40e_veb *veb)
26669+{
26670+ struct i40e_vsi *ctl_vsi = NULL;
26671+ struct i40e_pf *pf = veb->pf;
26672+ int v, veb_idx;
26673+ int ret;
26674+
26675+ /* build VSI that owns this VEB, temporarily attached to base VEB */
26676+ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
26677+ if (pf->vsi[v] &&
26678+ pf->vsi[v]->veb_idx == veb->idx &&
26679+ pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
26680+ ctl_vsi = pf->vsi[v];
26681+ break;
26682+ }
26683+ }
26684+ if (!ctl_vsi) {
26685+ dev_info(&pf->pdev->dev,
26686+ "missing owner VSI for veb_idx %d\n", veb->idx);
26687+ ret = -ENOENT;
26688+ goto end_reconstitute;
26689+ }
26690+ if (ctl_vsi != pf->vsi[pf->lan_vsi])
26691+ ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
26692+ ret = i40e_add_vsi(ctl_vsi);
26693+ if (ret) {
26694+ dev_info(&pf->pdev->dev,
26695+ "rebuild of veb_idx %d owner VSI failed: %d\n",
26696+ veb->idx, ret);
26697+ goto end_reconstitute;
26698+ }
26699+ i40e_vsi_reset_stats(ctl_vsi);
26700+
26701+ /* create the VEB in the switch and move the VSI onto the VEB */
26702+ ret = i40e_add_veb(veb, ctl_vsi);
26703+ if (ret)
26704+ goto end_reconstitute;
26705+
26706+#ifdef HAVE_BRIDGE_ATTRIBS
26707+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
26708+ veb->bridge_mode = BRIDGE_MODE_VEB;
26709+ else
26710+ veb->bridge_mode = BRIDGE_MODE_VEPA;
26711+#endif
26712+ i40e_config_bridge_mode(veb);
26713+
26714+ /* create the remaining VSIs attached to this VEB */
26715+ for (v = 0; v < pf->num_alloc_vsi; v++) {
26716+ if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
26717+ continue;
26718+
26719+ if (pf->vsi[v]->veb_idx == veb->idx) {
26720+ struct i40e_vsi *vsi = pf->vsi[v];
26721+
26722+ vsi->uplink_seid = veb->seid;
26723+ ret = i40e_add_vsi(vsi);
26724+ if (ret) {
26725+ dev_info(&pf->pdev->dev,
26726+ "rebuild of vsi_idx %d failed: %d\n",
26727+ v, ret);
26728+ goto end_reconstitute;
26729+ }
26730+ i40e_vsi_reset_stats(vsi);
26731+ }
26732+ }
26733+
26734+ /* create any VEBs attached to this VEB - RECURSION */
26735+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
26736+ if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
26737+ pf->veb[veb_idx]->uplink_seid = veb->seid;
26738+ ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
26739+ if (ret)
26740+ break;
26741+ }
26742+ }
26743+
26744+end_reconstitute:
26745+ return ret;
26746+}
26747+
26748+/**
26749+ * i40e_get_capabilities - get info about the HW
26750+ * @pf: the PF struct
26751+ * @list_type: admin queue opcode list type
26752+ **/
26753+static int i40e_get_capabilities(struct i40e_pf *pf,
26754+ enum i40e_admin_queue_opc list_type)
26755+{
26756+ struct i40e_aqc_list_capabilities_element_resp *cap_buf;
26757+ u16 data_size;
26758+ int buf_len;
26759+ int err;
26760+
26761+ buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
26762+ do {
26763+ cap_buf = kzalloc(buf_len, GFP_KERNEL);
26764+ if (!cap_buf)
26765+ return -ENOMEM;
26766+
26767+ /* this loads the data into the hw struct for us */
26768+ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
26769+ &data_size, list_type,
26770+ NULL);
26771+ /* data loaded, buffer no longer needed */
26772+ kfree(cap_buf);
26773+
26774+ if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
26775+ /* retry with a larger buffer */
26776+ buf_len = data_size;
26777+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
26778+ dev_info(&pf->pdev->dev,
26779+ "capability discovery failed, err %s aq_err %s\n",
26780+ i40e_stat_str(&pf->hw, err),
26781+ i40e_aq_str(&pf->hw,
26782+ pf->hw.aq.asq_last_status));
26783+ return -ENODEV;
26784+ }
26785+ } while (err);
26786+
26787+ if (pf->hw.debug_mask & I40E_DEBUG_USER) {
26788+ if (list_type == i40e_aqc_opc_list_func_capabilities) {
26789+ dev_info(&pf->pdev->dev,
26790+ "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
26791+ pf->hw.pf_id, pf->hw.func_caps.num_vfs,
26792+ pf->hw.func_caps.num_msix_vectors,
26793+ pf->hw.func_caps.num_msix_vectors_vf,
26794+ pf->hw.func_caps.fd_filters_guaranteed,
26795+ pf->hw.func_caps.fd_filters_best_effort,
26796+ pf->hw.func_caps.num_tx_qp,
26797+ pf->hw.func_caps.num_vsis);
26798+ } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
26799+ dev_info(&pf->pdev->dev,
26800+ "switch_mode=0x%04x, function_valid=0x%08x\n",
26801+ pf->hw.dev_caps.switch_mode,
26802+ pf->hw.dev_caps.valid_functions);
26803+ dev_info(&pf->pdev->dev,
26804+ "SR-IOV=%d, num_vfs for all function=%u\n",
26805+ pf->hw.dev_caps.sr_iov_1_1,
26806+ pf->hw.dev_caps.num_vfs);
26807+ dev_info(&pf->pdev->dev,
26808+ "num_vsis=%u, num_rx:%u, num_tx=%u\n",
26809+ pf->hw.dev_caps.num_vsis,
26810+ pf->hw.dev_caps.num_rx_qp,
26811+ pf->hw.dev_caps.num_tx_qp);
26812+ }
26813+ }
26814+
26815+ return 0;
26816+}
26817+
26818+static int i40e_vsi_clear(struct i40e_vsi *vsi);
26819+
26820+/**
26821+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
26822+ * @pf: board private structure
26823+ **/
26824+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
26825+{
26826+ struct i40e_vsi *vsi;
26827+
26828+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
26829+ return;
26830+
26831+ /* find existing VSI and see if it needs configuring */
26832+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
26833+
26834+ /* create a new VSI if none exists */
26835+ if (!vsi) {
26836+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
26837+ pf->vsi[pf->lan_vsi]->seid, 0);
26838+ if (!vsi) {
26839+ dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
26840+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
26841+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
26842+ return;
26843+ }
26844+ }
26845+
26846+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
26847+}
26848+
26849+/**
26850+ * i40e_fdir_teardown - release the Flow Director resources
26851+ * @pf: board private structure
26852+ **/
26853+static void i40e_fdir_teardown(struct i40e_pf *pf)
26854+{
26855+ struct i40e_vsi *vsi;
26856+
26857+ i40e_fdir_filter_exit(pf);
26858+ i40e_cloud_filter_exit(pf);
26859+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
26860+ if (vsi)
26861+ i40e_vsi_release(vsi);
26862+}
26863+
26864+/**
26865+ * i40e_prep_for_reset - prep for the core to reset
26866+ * @pf: board private structure
26867+ * @lock_acquired: indicates whether or not the lock has been acquired
26868+ * before this function was called.
26869+ *
26870+ * Close up the VFs and other things in prep for PF Reset.
26871+ **/
26872+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
26873+{
26874+ struct i40e_hw *hw = &pf->hw;
26875+ i40e_status ret = 0;
26876+ u32 v;
26877+
26878+ clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
26879+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
26880+ return;
26881+ if (i40e_check_asq_alive(&pf->hw))
26882+ i40e_vc_notify_reset(pf);
26883+
26884+ dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
26885+
26886+ /* TODO: warn any registered clients */
26887+ /* quiesce the VSIs and their queues that are not already DOWN */
26888+ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
26889+ if (!lock_acquired)
26890+ rtnl_lock();
26891+ i40e_pf_quiesce_all_vsi(pf);
26892+ if (!lock_acquired)
26893+ rtnl_unlock();
26894+
26895+ for (v = 0; v < pf->num_alloc_vsi; v++) {
26896+ if (pf->vsi[v])
26897+ pf->vsi[v]->seid = 0;
26898+ }
26899+
26900+ i40e_shutdown_adminq(&pf->hw);
26901+
26902+ /* call shutdown HMC */
26903+ if (hw->hmc.hmc_obj) {
26904+ ret = i40e_shutdown_lan_hmc(hw);
26905+ if (ret)
26906+ dev_warn(&pf->pdev->dev,
26907+ "shutdown_lan_hmc failed: %d\n", ret);
26908+ }
26909+
26910+#ifdef HAVE_PTP_1588_CLOCK
26911+ /* Save the current PTP time so that we can restore the time after the
26912+ * reset completes.
26913+ */
26914+ i40e_ptp_save_hw_time(pf);
26915+#endif /* HAVE_PTP_1588_CLOCK */
26916+}
26917+
26918+/**
26919+ * i40e_send_version - update firmware with driver version
26920+ * @pf: PF struct
26921+ */
26922+static void i40e_send_version(struct i40e_pf *pf)
26923+{
26924+ struct i40e_driver_version dv;
26925+
26926+ dv.major_version = DRV_VERSION_MAJOR;
26927+ dv.minor_version = DRV_VERSION_MINOR;
26928+ dv.build_version = DRV_VERSION_BUILD;
26929+ dv.subbuild_version = 0;
26930+ strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
26931+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
26932+}
26933+
26934+/**
26935+ * i40e_get_oem_version - get OEM specific version information
26936+ * @hw: pointer to the hardware structure
26937+ **/
26938+static void i40e_get_oem_version(struct i40e_hw *hw)
26939+{
26940+ u16 block_offset = 0xffff;
26941+ u16 block_length = 0;
26942+ u16 capabilities = 0;
26943+ u16 gen_snap = 0;
26944+ u16 release = 0;
26945+
26946+#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
26947+#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
26948+#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
26949+#define I40E_NVM_OEM_GEN_OFFSET 0x02
26950+#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
26951+#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
26952+#define I40E_NVM_OEM_LENGTH 3
26953+
26954+ /* Check if pointer to OEM version block is valid. */
26955+ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
26956+ if ((block_offset & 0x7fff) == 0x7fff)
26957+ return;
26958+
26959+ /* Check if OEM version block has correct length. */
26960+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
26961+ &block_length);
26962+ if (block_length < I40E_NVM_OEM_LENGTH)
26963+ return;
26964+
26965+ /* Check if OEM version format is as expected. */
26966+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
26967+ &capabilities);
26968+ if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
26969+ return;
26970+
26971+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
26972+ &gen_snap);
26973+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
26974+ &release);
26975+ hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
26976+ hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
26977+}
26978+
26979+/**
26980+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
26981+ * @pf: board private structure
26982+ **/
26983+static int i40e_reset(struct i40e_pf *pf)
26984+{
26985+ struct i40e_hw *hw = &pf->hw;
26986+ i40e_status ret;
26987+
26988+ ret = i40e_pf_reset(hw);
26989+ if (ret) {
26990+ dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
26991+ set_bit(__I40E_RESET_FAILED, pf->state);
26992+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
26993+ } else {
26994+ pf->pfr_count++;
26995+ }
26996+ return ret;
26997+}
26998+
26999+/**
27000+ * i40e_rebuild - rebuild using a saved config
27001+ * @pf: board private structure
27002+ * @reinit: if the Main VSI needs to re-initialized.
27003+ * @lock_acquired: indicates whether or not the lock has been acquired
27004+ * before this function was called.
27005+ **/
27006+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
27007+{
27008+ int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
27009+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
27010+ struct i40e_hw *hw = &pf->hw;
27011+ u8 set_fc_aq_fail = 0;
27012+ i40e_status ret;
27013+ u32 val;
27014+ int v;
27015+
27016+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
27017+ i40e_check_recovery_mode(pf)) {
27018+#ifdef SIOCETHTOOL
27019+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
27020+#endif
27021+ }
27022+
27023+ if (test_bit(__I40E_DOWN, pf->state) &&
27024+ !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
27025+ !old_recovery_mode_bit)
27026+ goto clear_recovery;
27027+ dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
27028+
27029+ /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
27030+ ret = i40e_init_adminq(&pf->hw);
27031+ if (ret) {
27032+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
27033+ i40e_stat_str(&pf->hw, ret),
27034+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
27035+ goto clear_recovery;
27036+ }
27037+ i40e_get_oem_version(&pf->hw);
27038+
27039+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
27040+ ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
27041+ hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
27042+ /* The following delay is necessary for 4.33 firmware and older
27043+ * to recover after EMP reset. 200 ms should suffice but we
27044+ * put here 300 ms to be sure that FW is ready to operate
27045+ * after reset.
27046+ */
27047+ mdelay(300);
27048+ }
27049+
27050+ /* re-verify the eeprom if we just had an EMP reset */
27051+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
27052+ i40e_verify_eeprom(pf);
27053+ }
27054+
27055+ /* if we are going out of or into recovery mode we have to act
27056+ * accordingly with regard to resources initialization
27057+ * and deinitialization
27058+ */
27059+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
27060+ old_recovery_mode_bit) {
27061+ if (i40e_get_capabilities(pf,
27062+ i40e_aqc_opc_list_func_capabilities))
27063+ goto end_unlock;
27064+
27065+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
27066+ /* we're staying in recovery mode so we'll reinitialize
27067+ * misc vector here
27068+ */
27069+ if (i40e_setup_misc_vector_for_recovery_mode(pf))
27070+ goto end_unlock;
27071+ } else {
27072+ if (!lock_acquired)
27073+ rtnl_lock();
27074+ /* we're going out of recovery mode so we'll free
27075+ * the IRQ allocated specifically for recovery mode
27076+ * and restore the interrupt scheme
27077+ */
27078+ free_irq(pf->pdev->irq, pf);
27079+ i40e_clear_interrupt_scheme(pf);
27080+ if (i40e_restore_interrupt_scheme(pf))
27081+ goto end_unlock;
27082+ }
27083+
27084+ /* tell the firmware that we're starting */
27085+ i40e_send_version(pf);
27086+
27087+ /* bail out in case recovery mode was detected, as there is
27088+ * no need for further configuration.
27089+ */
27090+ goto end_unlock;
27091+ }
27092+
27093+ i40e_clear_pxe_mode(hw);
27094+ ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
27095+ if (ret)
27096+ goto end_core_reset;
27097+
27098+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
27099+ hw->func_caps.num_rx_qp, 0, 0);
27100+ if (ret) {
27101+ dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
27102+ goto end_core_reset;
27103+ }
27104+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
27105+ if (ret) {
27106+ dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
27107+ goto end_core_reset;
27108+ }
27109+
27110+ /* Enable FW to write a default DCB config on link-up */
27111+ i40e_aq_set_dcb_parameters(hw, true, NULL);
27112+
27113+#ifdef CONFIG_DCB
27114+ ret = i40e_init_pf_dcb(pf);
27115+ if (ret) {
27116+ dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
27117+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
27118+ /* Continue without DCB enabled */
27119+ }
27120+
27121+#endif /* CONFIG_DCB */
27122+ /* do basic switch setup */
27123+ if (!lock_acquired)
27124+ rtnl_lock();
27125+ ret = i40e_setup_pf_switch(pf, reinit);
27126+ if (ret)
27127+ goto end_unlock;
27128+
27129+ /* The driver only wants link up/down and module qualification
27130+ * reports from firmware. Note the negative logic.
27131+ */
27132+ ret = i40e_aq_set_phy_int_mask(&pf->hw,
27133+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
27134+ I40E_AQ_EVENT_MEDIA_NA |
27135+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
27136+ if (ret)
27137+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
27138+ i40e_stat_str(&pf->hw, ret),
27139+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
27140+
27141+ /* make sure our flow control settings are restored */
27142+ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
27143+ if (ret)
27144+ dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
27145+ i40e_stat_str(&pf->hw, ret),
27146+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
27147+
27148+ /* Rebuild the VSIs and VEBs that existed before reset.
27149+ * They are still in our local switch element arrays, so only
27150+ * need to rebuild the switch model in the HW.
27151+ *
27152+ * If there were VEBs but the reconstitution failed, we'll try
27153+ * try to recover minimal use by getting the basic PF VSI working.
27154+ */
27155+ if (vsi->uplink_seid != pf->mac_seid) {
27156+ dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
27157+ /* find the one VEB connected to the MAC, and find orphans */
27158+ for (v = 0; v < I40E_MAX_VEB; v++) {
27159+ if (!pf->veb[v])
27160+ continue;
27161+
27162+ if (pf->veb[v]->uplink_seid == pf->mac_seid ||
27163+ pf->veb[v]->uplink_seid == 0) {
27164+ ret = i40e_reconstitute_veb(pf->veb[v]);
27165+
27166+ if (!ret)
27167+ continue;
27168+
27169+ /* If Main VEB failed, we're in deep doodoo,
27170+ * so give up rebuilding the switch and set up
27171+ * for minimal rebuild of PF VSI.
27172+ * If orphan failed, we'll report the error
27173+ * but try to keep going.
27174+ */
27175+ if (pf->veb[v]->uplink_seid == pf->mac_seid) {
27176+ dev_info(&pf->pdev->dev,
27177+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
27178+ ret);
27179+ vsi->uplink_seid = pf->mac_seid;
27180+ break;
27181+ } else if (pf->veb[v]->uplink_seid == 0) {
27182+ dev_info(&pf->pdev->dev,
27183+ "rebuild of orphan VEB failed: %d\n",
27184+ ret);
27185+ }
27186+ }
27187+ }
27188+ }
27189+
27190+ if (vsi->uplink_seid == pf->mac_seid) {
27191+ dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
27192+ /* no VEB, so rebuild only the Main VSI */
27193+ ret = i40e_add_vsi(vsi);
27194+ if (ret) {
27195+ dev_info(&pf->pdev->dev,
27196+ "rebuild of Main VSI failed: %d\n", ret);
27197+ goto end_unlock;
27198+ }
27199+ }
27200+
27201+#ifdef __TC_MQPRIO_MODE_MAX
27202+ if (vsi->mqprio_qopt.max_rate[0]) {
27203+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / (1000000 / 8);
27204+
27205+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
27206+ if (!ret)
27207+ dev_dbg(&vsi->back->pdev->dev,
27208+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
27209+ max_tx_rate,
27210+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
27211+ vsi->seid);
27212+ else
27213+ goto end_unlock;
27214+ }
27215+#endif
27216+
27217+#ifdef __TC_MQPRIO_MODE_MAX
27218+ /* Not going to support channel VSI in L4 cloud filter mode */
27219+ if (!i40e_is_l4mode_enabled()) {
27220+ /* PF Main VSI is rebuild by now, go ahead and
27221+ * rebuild channel VSIs for this main VSI if they exist
27222+ */
27223+ ret = i40e_rebuild_channels(vsi);
27224+ if (ret)
27225+ goto end_unlock;
27226+ }
27227+#endif
27228+
27229+ /* Reconfigure hardware for allowing smaller MSS in the case
27230+ * of TSO, so that we avoid the MDD being fired and causing
27231+ * a reset in the case of small MSS+TSO.
27232+ */
27233+#define I40E_REG_MSS 0x000E64DC
27234+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
27235+#define I40E_64BYTE_MSS 0x400000
27236+ val = rd32(hw, I40E_REG_MSS);
27237+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
27238+ val &= ~I40E_REG_MSS_MIN_MASK;
27239+ val |= I40E_64BYTE_MSS;
27240+ wr32(hw, I40E_REG_MSS, val);
27241+ }
27242+
27243+ if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
27244+ msleep(75);
27245+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
27246+ if (ret)
27247+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
27248+ i40e_stat_str(&pf->hw, ret),
27249+ i40e_aq_str(&pf->hw,
27250+ pf->hw.aq.asq_last_status));
27251+ }
27252+ /* reinit the misc interrupt */
27253+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
27254+ ret = i40e_setup_misc_vector(pf);
27255+
27256+ /* Add a filter to drop all Flow control frames from any VSI from being
27257+ * transmitted. By doing so we stop a malicious VF from sending out
27258+ * PAUSE or PFC frames and potentially controlling traffic for other
27259+ * PF/VF VSIs.
27260+ * The FW can still send Flow control frames if enabled.
27261+ */
27262+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
27263+ pf->main_vsi_seid);
27264+
27265+ /* restart the VSIs that were rebuilt and running before the reset */
27266+ i40e_pf_unquiesce_all_vsi(pf);
27267+
27268+ /* Release the RTNL lock before we start resetting VFs */
27269+ if (!lock_acquired)
27270+ rtnl_unlock();
27271+
27272+ /* Restore promiscuous settings */
27273+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
27274+ if (ret)
27275+ dev_warn(&pf->pdev->dev,
27276+ "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
27277+ pf->cur_promisc ? "on" : "off",
27278+ i40e_stat_str(&pf->hw, ret),
27279+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
27280+ /* Restore all VF-d config */
27281+
27282+ /* If uncommitted changes were made to the BW share settings, then warn
27283+ * the user that the configuration may not be restored correctly.
27284+ */
27285+ if (!pf->vf_bw_applied)
27286+ dev_info(&pf->pdev->dev, "VF BW shares not restored\n");
27287+
27288+ if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->ingress_vlan)) {
27289+ u16 rule_type;
27290+
27291+ /* The Admin Queue mirroring rules refer to the traffic
27292+ * directions from the perspective of the switch, not the VSI
27293+ * we apply the mirroring rule on - so the behaviour of a VSI
27294+ * ingress mirror is classified as an egress rule
27295+ */
27296+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
27297+ ret = i40e_restore_ingress_egress_mirror(vsi, pf->ingress_vlan,
27298+ rule_type,
27299+ &pf->ingress_rule_id);
27300+ if (ret)
27301+ pf->ingress_vlan = I40E_NO_VF_MIRROR;
27302+ }
27303+
27304+ if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->egress_vlan)) {
27305+ u16 rule_type;
27306+
27307+ /* The Admin Queue mirroring rules refer to the traffic
27308+ * directions from the perspective of the switch, not the VSI
27309+ * we apply the mirroring rule on - so the behaviour of a VSI
27310+ * egress mirror is classified as an ingress rule
27311+ */
27312+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
27313+ ret = i40e_restore_ingress_egress_mirror(vsi, pf->egress_vlan,
27314+ rule_type,
27315+ &pf->egress_rule_id);
27316+ if (ret)
27317+ pf->egress_vlan = I40E_NO_VF_MIRROR;
27318+ }
27319+
27320+ i40e_reset_all_vfs(pf, true);
27321+
27322+ /* TODO: restart clients */
27323+ /* tell the firmware that we're starting */
27324+ i40e_send_version(pf);
27325+
27326+ /* We've already released the lock, so don't do it again */
27327+ goto end_core_reset;
27328+
27329+end_unlock:
27330+ if (!lock_acquired)
27331+ rtnl_unlock();
27332+end_core_reset:
27333+ clear_bit(__I40E_RESET_FAILED, pf->state);
27334+clear_recovery:
27335+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
27336+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
27337+}
27338+
27339+/**
27340+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
27341+ * @pf: board private structure
27342+ * @reinit: if the Main VSI needs to re-initialized.
27343+ * @lock_acquired: indicates whether or not the lock has been acquired
27344+ * before this function was called.
27345+ **/
27346+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
27347+ bool lock_acquired)
27348+{
27349+ int ret;
27350+ /* Now we wait for GRST to settle out.
27351+ * We don't have to delete the VEBs or VSIs from the hw switch
27352+ * because the reset will make them disappear.
27353+ */
27354+ ret = i40e_reset(pf);
27355+ if (!ret)
27356+ i40e_rebuild(pf, reinit, lock_acquired);
27357+}
27358+
27359+/**
27360+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
27361+ * @pf: board private structure
27362+ *
27363+ * Close up the VFs and other things in prep for a Core Reset,
27364+ * then get ready to rebuild the world.
27365+ * @lock_acquired: indicates whether or not the lock has been acquired
27366+ * before this function was called.
27367+ **/
27368+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
27369+{
27370+ i40e_prep_for_reset(pf, lock_acquired);
27371+ i40e_reset_and_rebuild(pf, false, lock_acquired);
27372+}
27373+
27374+/**
27375+ * i40e_handle_mdd_event
27376+ * @pf: pointer to the PF structure
27377+ *
27378+ * Called from the MDD irq handler to identify possibly malicious vfs
27379+ **/
27380+static void i40e_handle_mdd_event(struct i40e_pf *pf)
27381+{
27382+ struct i40e_hw *hw = &pf->hw;
27383+ bool mdd_detected = false;
27384+ struct i40e_vf *vf;
27385+ u32 reg;
27386+ int i;
27387+
27388+ if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
27389+ return;
27390+
27391+ /* find what triggered the MDD event */
27392+ reg = rd32(hw, I40E_GL_MDET_TX);
27393+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
27394+ u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
27395+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
27396+ u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
27397+ I40E_GL_MDET_TX_VF_NUM_SHIFT;
27398+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
27399+ I40E_GL_MDET_TX_EVENT_SHIFT;
27400+ u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
27401+ I40E_GL_MDET_TX_QUEUE_SHIFT) -
27402+ pf->hw.func_caps.base_queue;
27403+ if (netif_msg_tx_err(pf))
27404+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
27405+ event, queue, pf_num, vf_num);
27406+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
27407+ mdd_detected = true;
27408+ }
27409+ reg = rd32(hw, I40E_GL_MDET_RX);
27410+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
27411+ u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
27412+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
27413+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
27414+ I40E_GL_MDET_RX_EVENT_SHIFT;
27415+ u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
27416+ I40E_GL_MDET_RX_QUEUE_SHIFT) -
27417+ pf->hw.func_caps.base_queue;
27418+ if (netif_msg_rx_err(pf))
27419+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
27420+ event, queue, func);
27421+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
27422+ mdd_detected = true;
27423+ }
27424+
27425+ if (mdd_detected) {
27426+ reg = rd32(hw, I40E_PF_MDET_TX);
27427+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
27428+ wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
27429+ dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
27430+ }
27431+ reg = rd32(hw, I40E_PF_MDET_RX);
27432+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
27433+ wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
27434+ dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
27435+ }
27436+ }
27437+
27438+ /* see if one of the VFs needs its hand slapped */
27439+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
27440+ vf = &(pf->vf[i]);
27441+ reg = rd32(hw, I40E_VP_MDET_TX(i));
27442+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
27443+ wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
27444+ vf->num_mdd_events++;
27445+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
27446+ i);
27447+ dev_info(&pf->pdev->dev,
27448+ "Use PF Control I/F to re-enable the VF\n");
27449+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
27450+ }
27451+
27452+ reg = rd32(hw, I40E_VP_MDET_RX(i));
27453+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
27454+ wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
27455+ vf->num_mdd_events++;
27456+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
27457+ i);
27458+ dev_info(&pf->pdev->dev,
27459+ "Use PF Control I/F to re-enable the VF\n");
27460+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
27461+ }
27462+ }
27463+
27464+ /* re-enable mdd interrupt cause */
27465+ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
27466+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
27467+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
27468+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
27469+ i40e_flush(hw);
27470+}
27471+
27472+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
27473+#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
27474+static const char *i40e_tunnel_name(u8 type)
27475+{
27476+ switch (type) {
27477+ case UDP_TUNNEL_TYPE_VXLAN:
27478+ return "vxlan";
27479+ case UDP_TUNNEL_TYPE_GENEVE:
27480+ return "geneve";
27481+ default:
27482+ return "unknown";
27483+ }
27484+}
27485+
27486+/**
27487+ * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
27488+ * @pf: board private structure
27489+ **/
27490+static void i40e_sync_udp_filters(struct i40e_pf *pf)
27491+{
27492+ int i;
27493+
27494+ /* loop through and set pending bit for all active UDP filters */
27495+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
27496+ if (pf->udp_ports[i].port)
27497+ pf->pending_udp_bitmap |= BIT_ULL(i);
27498+ }
27499+
27500+ set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
27501+}
27502+
27503+/**
27504+ * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
27505+ * @pf: board private structure
27506+ **/
27507+static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
27508+{
27509+ struct i40e_hw *hw = &pf->hw;
27510+ u8 filter_index, type;
27511+ u16 port;
27512+ int i;
27513+
27514+ if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
27515+ return;
27516+
27517+ /* acquire RTNL to maintain state of flags and port requests */
27518+ rtnl_lock();
27519+
27520+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
27521+ if (pf->pending_udp_bitmap & BIT_ULL(i)) {
27522+ struct i40e_udp_port_config *udp_port;
27523+ i40e_status ret = 0;
27524+
27525+ udp_port = &pf->udp_ports[i];
27526+ pf->pending_udp_bitmap &= ~BIT_ULL(i);
27527+
27528+ port = READ_ONCE(udp_port->port);
27529+ type = READ_ONCE(udp_port->type);
27530+ filter_index = READ_ONCE(udp_port->filter_index);
27531+
27532+ /* release RTNL while we wait on AQ command */
27533+ rtnl_unlock();
27534+
27535+ if (port)
27536+ ret = i40e_aq_add_udp_tunnel(hw, port,
27537+ type,
27538+ &filter_index,
27539+ NULL);
27540+ else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
27541+ ret = i40e_aq_del_udp_tunnel(hw, filter_index,
27542+ NULL);
27543+
27544+ /* reacquire RTNL so we can update filter_index */
27545+ rtnl_lock();
27546+
27547+ if (ret) {
27548+ dev_info(&pf->pdev->dev,
27549+ "%s %s port %d, index %d failed, err %s aq_err %s\n",
27550+ i40e_tunnel_name(type),
27551+ port ? "add" : "delete",
27552+ port,
27553+ filter_index,
27554+ i40e_stat_str(&pf->hw, ret),
27555+ i40e_aq_str(&pf->hw,
27556+ pf->hw.aq.asq_last_status));
27557+ if (port) {
27558+ /* failed to add, just reset port,
27559+ * drop pending bit for any deletion
27560+ */
27561+ udp_port->port = 0;
27562+ pf->pending_udp_bitmap &= ~BIT_ULL(i);
27563+ }
27564+ } else if (port) {
27565+ /* record filter index on success */
27566+ udp_port->filter_index = filter_index;
27567+ }
27568+ }
27569+ }
27570+
27571+ rtnl_unlock();
27572+}
27573+
27574+#endif /* HAVE_UDP_ENC_TUNNEL || HAVE_UDP_ENC_RX_OFFLOAD */
27575+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
27576+
27577+/**
27578+ * i40e_service_task - Run the driver's async subtasks
27579+ * @work: pointer to work_struct containing our data
27580+ **/
27581+static void i40e_service_task(struct work_struct *work)
27582+{
27583+ struct i40e_pf *pf = container_of(work,
27584+ struct i40e_pf,
27585+ service_task);
27586+ unsigned long start_time = jiffies;
27587+
27588+ /* don't bother with service tasks if a reset is in progress */
27589+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
27590+ test_bit(__I40E_SUSPENDED, pf->state)) {
27591+ return;
27592+ }
27593+
27594+ if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
27595+ return;
27596+
27597+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
27598+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
27599+ i40e_sync_filters_subtask(pf);
27600+ i40e_reset_subtask(pf);
27601+ i40e_handle_mdd_event(pf);
27602+ i40e_vc_process_vflr_event(pf);
27603+ i40e_watchdog_subtask(pf);
27604+ i40e_fdir_reinit_subtask(pf);
27605+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
27606+ /* Client subtask will reopen next time through. */
27607+ i40e_notify_client_of_netdev_close(
27608+ pf->vsi[pf->lan_vsi], true);
27609+ } else {
27610+ i40e_client_subtask(pf);
27611+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
27612+ pf->state))
27613+ i40e_notify_client_of_l2_param_changes(
27614+ pf->vsi[pf->lan_vsi]);
27615+ }
27616+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
27617+#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
27618+ i40e_sync_udp_filters_subtask(pf);
27619+
27620+#endif
27621+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
27622+ } else {
27623+ i40e_reset_subtask(pf);
27624+ }
27625+
27626+ i40e_clean_adminq_subtask(pf);
27627+
27628+ /* flush memory to make sure state is correct before next watchdog */
27629+ smp_mb__before_atomic();
27630+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
27631+
27632+ /* If the tasks have taken longer than one timer cycle or there
27633+ * is more work to be done, reschedule the service task now
27634+ * rather than wait for the timer to tick again.
27635+ */
27636+ if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
27637+ test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
27638+ test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
27639+ test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
27640+ i40e_service_event_schedule(pf);
27641+}
27642+
27643+/**
27644+ * i40e_service_timer - timer callback
27645+ * @t: pointer to timer_list struct
27646+ **/
27647+static void i40e_service_timer(struct timer_list *t)
27648+{
27649+ struct i40e_pf *pf = from_timer(pf, t, service_timer);
27650+
27651+ mod_timer(&pf->service_timer,
27652+ round_jiffies(jiffies + pf->service_timer_period));
27653+ i40e_service_event_schedule(pf);
27654+}
27655+
27656+/**
27657+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
27658+ * @vsi: the VSI being configured
27659+ **/
27660+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
27661+{
27662+ struct i40e_pf *pf = vsi->back;
27663+
27664+ switch (vsi->type) {
27665+ case I40E_VSI_MAIN:
27666+ vsi->alloc_queue_pairs = pf->num_lan_qps;
27667+ if (!vsi->num_tx_desc)
27668+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
27669+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27670+ if (!vsi->num_rx_desc)
27671+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
27672+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27673+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
27674+ vsi->num_q_vectors = pf->num_lan_msix;
27675+ }
27676+ else
27677+ vsi->num_q_vectors = 1;
27678+
27679+ break;
27680+
27681+ case I40E_VSI_FDIR:
27682+ vsi->alloc_queue_pairs = 1;
27683+ vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
27684+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27685+ vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
27686+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27687+ vsi->num_q_vectors = pf->num_fdsb_msix;
27688+ break;
27689+
27690+ case I40E_VSI_VMDQ2:
27691+ vsi->alloc_queue_pairs = pf->num_vmdq_qps;
27692+ if (!vsi->num_tx_desc)
27693+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
27694+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27695+ if (!vsi->num_rx_desc)
27696+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
27697+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27698+ vsi->num_q_vectors = pf->num_vmdq_msix;
27699+ break;
27700+
27701+ case I40E_VSI_SRIOV:
27702+ vsi->alloc_queue_pairs = pf->num_vf_qps;
27703+ if (!vsi->num_tx_desc)
27704+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
27705+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27706+ if (!vsi->num_rx_desc)
27707+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
27708+ I40E_REQ_DESCRIPTOR_MULTIPLE);
27709+ break;
27710+
27711+ default:
27712+ WARN_ON(1);
27713+ return -ENODATA;
27714+ }
27715+
27716+ return 0;
27717+}
27718+
27719+/**
27720+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
27721+ * @vsi: VSI pointer
27722+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
27723+ *
27724+ * On error: returns error code (negative)
27725+ * On success: returns 0
27726+ **/
27727+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
27728+{
27729+ struct i40e_ring **next_rings;
27730+ int size;
27731+ int ret = 0;
27732+
27733+ /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
27734+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
27735+ (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
27736+
27737+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
27738+ if (!vsi->tx_rings)
27739+ return -ENOMEM;
27740+ next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
27741+ if (i40e_enabled_xdp_vsi(vsi)) {
27742+ vsi->xdp_rings = next_rings;
27743+ next_rings += vsi->alloc_queue_pairs;
27744+ }
27745+ vsi->rx_rings = next_rings;
27746+
27747+ if (alloc_qvectors) {
27748+ /* allocate memory for q_vector pointers */
27749+ size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
27750+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
27751+ if (!vsi->q_vectors) {
27752+ ret = -ENOMEM;
27753+ goto err_vectors;
27754+ }
27755+ }
27756+ return ret;
27757+
27758+err_vectors:
27759+ kfree(vsi->tx_rings);
27760+ return ret;
27761+}
27762+
27763+/**
27764+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
27765+ * @pf: board private structure
27766+ * @type: type of VSI
27767+ *
27768+ * On error: returns error code (negative)
27769+ * On success: returns vsi index in PF (positive)
27770+ **/
27771+int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
27772+{
27773+ int ret = -ENODEV;
27774+ struct i40e_vsi *vsi;
27775+ int vsi_idx;
27776+ int i;
27777+
27778+ /* Need to protect the allocation of the VSIs at the PF level */
27779+ mutex_lock(&pf->switch_mutex);
27780+
27781+ /* VSI list may be fragmented if VSI creation/destruction has
27782+ * been happening. We can afford to do a quick scan to look
27783+ * for any free VSIs in the list.
27784+ *
27785+ * find next empty vsi slot, looping back around if necessary
27786+ */
27787+ i = pf->next_vsi;
27788+ while (i < pf->num_alloc_vsi && pf->vsi[i])
27789+ i++;
27790+ if (i >= pf->num_alloc_vsi) {
27791+ i = 0;
27792+ while (i < pf->next_vsi && pf->vsi[i])
27793+ i++;
27794+ }
27795+
27796+ if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
27797+ vsi_idx = i; /* Found one! */
27798+ } else {
27799+ ret = -ENODEV;
27800+ goto unlock_pf; /* out of VSI slots! */
27801+ }
27802+ pf->next_vsi = ++i;
27803+
27804+ vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
27805+ if (!vsi) {
27806+ ret = -ENOMEM;
27807+ goto unlock_pf;
27808+ }
27809+ vsi->type = type;
27810+ vsi->back = pf;
27811+ set_bit(__I40E_VSI_DOWN, vsi->state);
27812+ vsi->flags = 0;
27813+ vsi->idx = vsi_idx;
27814+ vsi->int_rate_limit = 0;
27815+ vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
27816+ pf->rss_table_size : 64;
27817+ vsi->netdev_registered = false;
27818+ vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
27819+ hash_init(vsi->mac_filter_hash);
27820+ vsi->irqs_ready = false;
27821+
27822+ ret = i40e_set_num_rings_in_vsi(vsi);
27823+ if (ret)
27824+ goto err_rings;
27825+
27826+ vsi->block_tx_timeout = false;
27827+
27828+ ret = i40e_vsi_alloc_arrays(vsi, true);
27829+ if (ret)
27830+ goto err_rings;
27831+
27832+ /* Setup default MSIX irq handler for VSI */
27833+ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
27834+
27835+ /* Initialize VSI lock */
27836+ spin_lock_init(&vsi->mac_filter_hash_lock);
27837+ pf->vsi[vsi_idx] = vsi;
27838+ ret = vsi_idx;
27839+
27840+ goto unlock_pf;
27841+
27842+err_rings:
27843+ pf->next_vsi = i - 1;
27844+ kfree(vsi);
27845+unlock_pf:
27846+ mutex_unlock(&pf->switch_mutex);
27847+ return ret;
27848+}
27849+
27850+/**
27851+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
27852+ * @vsi: VSI pointer
27853+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
27854+ *
27855+ * On error: returns error code (negative)
27856+ * On success: returns 0
27857+ **/
27858+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
27859+{
27860+ /* free the ring and vector containers */
27861+ if (free_qvectors) {
27862+ kfree(vsi->q_vectors);
27863+ vsi->q_vectors = NULL;
27864+ }
27865+ kfree(vsi->tx_rings);
27866+ vsi->tx_rings = NULL;
27867+ vsi->rx_rings = NULL;
27868+ vsi->xdp_rings = NULL;
27869+}
27870+
27871+/**
27872+ * i40e_vsi_clear - Deallocate the VSI provided
27873+ * @vsi: the VSI being un-configured
27874+ **/
27875+static int i40e_vsi_clear(struct i40e_vsi *vsi)
27876+{
27877+ struct i40e_pf *pf;
27878+
27879+ if (!vsi)
27880+ return 0;
27881+
27882+ if (!vsi->back)
27883+ goto free_vsi;
27884+ pf = vsi->back;
27885+
27886+ mutex_lock(&pf->switch_mutex);
27887+ if (!pf->vsi[vsi->idx]) {
27888+ dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
27889+ vsi->idx, vsi->idx, vsi->type);
27890+ goto unlock_vsi;
27891+ }
27892+
27893+ if (pf->vsi[vsi->idx] != vsi) {
27894+ dev_err(&pf->pdev->dev,
27895+ "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
27896+ pf->vsi[vsi->idx]->idx,
27897+ pf->vsi[vsi->idx]->type,
27898+ vsi->idx, vsi->type);
27899+ goto unlock_vsi;
27900+ }
27901+
27902+ /* updates the PF for this cleared vsi */
27903+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
27904+ i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
27905+
27906+ i40e_vsi_free_arrays(vsi, true);
27907+ i40e_clear_rss_config_user(vsi);
27908+
27909+ pf->vsi[vsi->idx] = NULL;
27910+ if (vsi->idx < pf->next_vsi)
27911+ pf->next_vsi = vsi->idx;
27912+
27913+unlock_vsi:
27914+ mutex_unlock(&pf->switch_mutex);
27915+free_vsi:
27916+ kfree(vsi);
27917+
27918+ return 0;
27919+}
27920+
27921+/**
27922+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
27923+ * @vsi: the VSI being cleaned
27924+ **/
27925+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
27926+{
27927+ int i;
27928+
27929+ if (vsi->tx_rings && vsi->tx_rings[0]) {
27930+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
27931+ kfree_rcu(vsi->tx_rings[i], rcu);
27932+ vsi->tx_rings[i] = NULL;
27933+ vsi->rx_rings[i] = NULL;
27934+ if (vsi->xdp_rings)
27935+ vsi->xdp_rings[i] = NULL;
27936+ }
27937+ }
27938+}
27939+
27940+/**
27941+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
27942+ * @vsi: the VSI being configured
27943+ **/
27944+static int i40e_alloc_rings(struct i40e_vsi *vsi)
27945+{
27946+ int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
27947+ struct i40e_pf *pf = vsi->back;
27948+ struct i40e_ring *ring;
27949+
27950+ /* Set basic values in the rings to be used later during open() */
27951+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
27952+ /* allocate space for both Tx and Rx in one shot */
27953+ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
27954+ if (!ring)
27955+ goto err_out;
27956+
27957+ ring->queue_index = i;
27958+ ring->reg_idx = vsi->base_queue + i;
27959+ ring->ring_active = false;
27960+ ring->vsi = vsi;
27961+ ring->netdev = vsi->netdev;
27962+ ring->dev = &pf->pdev->dev;
27963+ ring->count = vsi->num_tx_desc;
27964+ ring->size = 0;
27965+ ring->dcb_tc = 0;
27966+
27967+ if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
27968+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
27969+ ring->itr_setting = pf->tx_itr_default;
27970+ vsi->tx_rings[i] = ring++;
27971+
27972+ if (!i40e_enabled_xdp_vsi(vsi))
27973+ goto setup_rx;
27974+
27975+ ring->queue_index = vsi->alloc_queue_pairs + i;
27976+ ring->reg_idx = vsi->base_queue + ring->queue_index;
27977+ ring->ring_active = false;
27978+ ring->vsi = vsi;
27979+ ring->netdev = NULL;
27980+ ring->dev = &pf->pdev->dev;
27981+ ring->count = vsi->num_tx_desc;
27982+ ring->size = 0;
27983+ ring->dcb_tc = 0;
27984+ if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
27985+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
27986+ set_ring_xdp(ring);
27987+ ring->itr_setting = pf->tx_itr_default;
27988+ vsi->xdp_rings[i] = ring++;
27989+
27990+setup_rx:
27991+ ring->queue_index = i;
27992+ ring->reg_idx = vsi->base_queue + i;
27993+ ring->ring_active = false;
27994+ ring->vsi = vsi;
27995+ ring->netdev = vsi->netdev;
27996+ ring->dev = &pf->pdev->dev;
27997+ ring->count = vsi->num_rx_desc;
27998+ ring->size = 0;
27999+ ring->dcb_tc = 0;
28000+ ring->itr_setting = pf->rx_itr_default;
28001+ vsi->rx_rings[i] = ring;
28002+ }
28003+
28004+ return 0;
28005+
28006+err_out:
28007+ i40e_vsi_clear_rings(vsi);
28008+ return -ENOMEM;
28009+}
28010+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
28011+
28012+/**
28013+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
28014+ * @pf: board private structure
28015+ * @v_budget: the number of MSI-X vectors to request
28016+ *
28017+ * Returns the number of vectors reserved, or error
28018+ **/
28019+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int v_budget)
28020+{
28021+ int v_actual = 0;
28022+
28023+ v_actual = pci_enable_msix_range(pf->pdev,
28024+ pf->msix_entries,
28025+ I40E_MIN_MSIX,
28026+ v_budget);
28027+ if (v_actual < 0)
28028+ dev_info(&pf->pdev->dev,
28029+ "MSI-X vector reservation failed: %d\n", v_actual);
28030+
28031+ return v_actual;
28032+}
28033+
28034+/**
28035+ * i40e_init_msix - Setup the MSIX capability
28036+ * @pf: board private structure
28037+ *
28038+ * Work with the OS to set up the MSIX vectors needed.
28039+ *
28040+ * Returns the number of vectors reserved or negative on failure
28041+ **/
28042+static int i40e_init_msix(struct i40e_pf *pf)
28043+{
28044+ struct i40e_hw *hw = &pf->hw;
28045+ int cpus, extra_vectors;
28046+ int vectors_left;
28047+ int v_budget, i;
28048+ int v_actual;
28049+ int iwarp_requested = 0;
28050+
28051+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
28052+ return -ENODEV;
28053+
28054+ /* The number of vectors we'll request will be comprised of:
28055+ * - Add 1 for "other" cause for Admin Queue events, etc.
28056+ * - The number of LAN queue pairs
28057+ * - This takes into account queues for each TC in DCB mode.
28058+ * - Queues being used for RSS.
28059+ * We don't need as many as max_rss_size vectors.
28060+ * use rss_size instead in the calculation since that
28061+ * is governed by number of cpus in the system.
28062+ * - assumes symmetric Tx/Rx pairing
28063+ * - The number of VMDq pairs
28064+ * - The CPU count within the NUMA node if iWARP is enabled
28065+ * Once we count this up, try the request.
28066+ *
28067+ * If we can't get what we want, we'll simplify to nearly nothing
28068+ * and try again. If that still fails, we punt.
28069+ */
28070+ vectors_left = hw->func_caps.num_msix_vectors;
28071+ v_budget = 0;
28072+
28073+ /* reserve one vector for miscellaneous handler */
28074+ if (vectors_left) {
28075+ v_budget++;
28076+ vectors_left--;
28077+ }
28078+
28079+ /* reserve some vectors for the main PF traffic queues. Initially we
28080+ * only reserve at most 50% of the available vectors, in the case that
28081+ * the number of online CPUs is large. This ensures that we can enable
28082+ * extra features as well. Once we've enabled the other features, we
28083+ * will use any remaining vectors to reach as close as we can to the
28084+ * number of online CPUs.
28085+ */
28086+ cpus = num_online_cpus();
28087+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
28088+ vectors_left -= pf->num_lan_msix;
28089+
28090+ /* reserve one vector for sideband flow director */
28091+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
28092+ if (vectors_left) {
28093+ pf->num_fdsb_msix = 1;
28094+ v_budget++;
28095+ vectors_left--;
28096+ } else {
28097+ pf->num_fdsb_msix = 0;
28098+ }
28099+ }
28100+
28101+ /* can we reserve enough for iWARP? */
28102+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
28103+ iwarp_requested = pf->num_iwarp_msix;
28104+
28105+ if (!vectors_left)
28106+ pf->num_iwarp_msix = 0;
28107+ else if (vectors_left < pf->num_iwarp_msix)
28108+ pf->num_iwarp_msix = 1;
28109+ v_budget += pf->num_iwarp_msix;
28110+ vectors_left -= pf->num_iwarp_msix;
28111+ }
28112+
28113+ /* any vectors left over go for VMDq support */
28114+ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
28115+ if (!vectors_left) {
28116+ pf->num_vmdq_msix = 0;
28117+ pf->num_vmdq_qps = 0;
28118+ } else {
28119+ int vmdq_vecs_wanted =
28120+ pf->num_vmdq_vsis * pf->num_vmdq_qps;
28121+ int vmdq_vecs =
28122+ min_t(int, vectors_left, vmdq_vecs_wanted);
28123+
28124+ /* if we're short on vectors for what's desired, we
28125+ * limit the queues per vmdq. If this is still more
28126+ * than are available, the user will need to change
28127+ * the number of queues/vectors used by the PF later
28128+ * with the ethtool channels command
28129+ */
28130+ if (vectors_left < vmdq_vecs_wanted) {
28131+ pf->num_vmdq_qps = 1;
28132+ vmdq_vecs_wanted = pf->num_vmdq_vsis;
28133+ vmdq_vecs = min_t(int,
28134+ vectors_left,
28135+ vmdq_vecs_wanted);
28136+ }
28137+ pf->num_vmdq_msix = pf->num_vmdq_qps;
28138+
28139+ v_budget += vmdq_vecs;
28140+ vectors_left -= vmdq_vecs;
28141+ }
28142+ }
28143+
28144+ /* On systems with a large number of SMP cores, we previously limited
28145+ * the number of vectors for num_lan_msix to be at most 50% of the
28146+ * available vectors, to allow for other features. Now, we add back
28147+ * the remaining vectors. However, we ensure that the total
28148+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
28149+ * calculate the number of vectors we can add without going over the
28150+ * cap of CPUs. For systems with a small number of CPUs this will be
28151+ * zero.
28152+ */
28153+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
28154+ pf->num_lan_msix += extra_vectors;
28155+ vectors_left -= extra_vectors;
28156+
28157+ WARN(vectors_left < 0,
28158+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
28159+
28160+ v_budget += pf->num_lan_msix;
28161+ pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
28162+ GFP_KERNEL);
28163+ if (!pf->msix_entries)
28164+ return -ENOMEM;
28165+
28166+ for (i = 0; i < v_budget; i++)
28167+ pf->msix_entries[i].entry = i;
28168+ v_actual = i40e_reserve_msix_vectors(pf, v_budget);
28169+ if (v_actual < I40E_MIN_MSIX) {
28170+ pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
28171+ kfree(pf->msix_entries);
28172+ pf->msix_entries = NULL;
28173+ pci_disable_msix(pf->pdev);
28174+ return -ENODEV;
28175+ }
28176+
28177+ if (v_actual == I40E_MIN_MSIX) {
28178+ /* Adjust for minimal MSIX use */
28179+ pf->num_vmdq_vsis = 0;
28180+ pf->num_vmdq_qps = 0;
28181+ pf->num_lan_qps = 1;
28182+ pf->num_lan_msix = 1;
28183+
28184+ } else if (!vectors_left) {
28185+ /* If we have limited resources, we will start with no vectors
28186+ * for the special features and then allocate vectors to some
28187+ * of these features based on the policy and at the end disable
28188+ * the features that did not get any vectors.
28189+ */
28190+ int vec;
28191+
28192+ dev_info(&pf->pdev->dev,
28193+ "MSI-X vector limit reached, attempting to redistribute vectors\n");
28194+ /* reserve the misc vector */
28195+ vec = v_actual - 1;
28196+
28197+ /* Scale vector usage down */
28198+ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
28199+ pf->num_vmdq_vsis = 1;
28200+ pf->num_vmdq_qps = 1;
28201+
28202+ /* partition out the remaining vectors */
28203+ switch (vec) {
28204+ case 2:
28205+ pf->num_lan_msix = 1;
28206+ break;
28207+ case 3:
28208+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
28209+ pf->num_lan_msix = 1;
28210+ pf->num_iwarp_msix = 1;
28211+ }
28212+ break;
28213+ default:
28214+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
28215+ pf->num_iwarp_msix = min_t(int, (vec / 3),
28216+ iwarp_requested);
28217+ pf->num_vmdq_vsis = min_t(int, (vec / 3),
28218+ I40E_DEFAULT_NUM_VMDQ_VSI);
28219+ } else {
28220+ pf->num_vmdq_vsis = min_t(int, (vec / 2),
28221+ I40E_DEFAULT_NUM_VMDQ_VSI);
28222+ }
28223+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
28224+ pf->num_fdsb_msix = 1;
28225+ vec--;
28226+ }
28227+ pf->num_lan_msix = min_t(int,
28228+ (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
28229+ pf->num_lan_msix);
28230+ break;
28231+ }
28232+ }
28233+
28234+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
28235+ (pf->num_fdsb_msix == 0)) {
28236+ dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
28237+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
28238+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
28239+ }
28240+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
28241+ (pf->num_vmdq_msix == 0)) {
28242+ dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
28243+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
28244+ }
28245+
28246+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
28247+ (pf->num_iwarp_msix == 0)) {
28248+ dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
28249+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
28250+ }
28251+ return v_actual;
28252+}
28253+#endif
28254+
28255+/**
28256+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
28257+ * @vsi: the VSI being configured
28258+ * @v_idx: index of the vector in the vsi struct
28259+ *
28260+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
28261+ **/
28262+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
28263+{
28264+ struct i40e_q_vector *q_vector;
28265+
28266+ /* allocate q_vector */
28267+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
28268+ if (!q_vector)
28269+ return -ENOMEM;
28270+
28271+ q_vector->vsi = vsi;
28272+ q_vector->v_idx = v_idx;
28273+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
28274+ cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
28275+#endif
28276+ if (vsi->netdev)
28277+ netif_napi_add(vsi->netdev, &q_vector->napi,
28278+ i40e_napi_poll, NAPI_POLL_WEIGHT);
28279+
28280+ /* tie q_vector and vsi together */
28281+ vsi->q_vectors[v_idx] = q_vector;
28282
28283-err_rings:
28284- pf->next_vsi = i - 1;
28285- kfree(vsi);
28286-unlock_pf:
28287- mutex_unlock(&pf->switch_mutex);
28288- return ret;
28289+ return 0;
28290 }
28291
28292 /**
28293- * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
28294- * @type: VSI pointer
28295- * @free_qvectors: a bool to specify if q_vectors need to be freed.
28296+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
28297+ * @vsi: the VSI being configured
28298 *
28299- * On error: returns error code (negative)
28300- * On success: returns 0
28301+ * We allocate one q_vector per queue interrupt. If allocation fails we
28302+ * return -ENOMEM.
28303 **/
28304-static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
28305+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
28306 {
28307- /* free the ring and vector containers */
28308- if (free_qvectors) {
28309- kfree(vsi->q_vectors);
28310- vsi->q_vectors = NULL;
28311+ struct i40e_pf *pf = vsi->back;
28312+ int v_idx, num_q_vectors;
28313+ int err;
28314+
28315+ /* if not MSIX, give the one vector only to the LAN VSI */
28316+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
28317+ num_q_vectors = vsi->num_q_vectors;
28318+ else if (vsi == pf->vsi[pf->lan_vsi])
28319+ num_q_vectors = 1;
28320+ else
28321+ return -EINVAL;
28322+
28323+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
28324+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
28325+ if (err)
28326+ goto err_out;
28327 }
28328- kfree(vsi->tx_rings);
28329- vsi->tx_rings = NULL;
28330- vsi->rx_rings = NULL;
28331- vsi->xdp_rings = NULL;
28332-}
28333
28334-/**
28335- * i40e_clear_rss_config_user - clear the user configured RSS hash keys
28336- * and lookup table
28337- * @vsi: Pointer to VSI structure
28338- */
28339-static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
28340-{
28341- if (!vsi)
28342- return;
28343+ return 0;
28344
28345- kfree(vsi->rss_hkey_user);
28346- vsi->rss_hkey_user = NULL;
28347+err_out:
28348+ while (v_idx--)
28349+ i40e_free_q_vector(vsi, v_idx);
28350
28351- kfree(vsi->rss_lut_user);
28352- vsi->rss_lut_user = NULL;
28353+ return err;
28354 }
28355
28356 /**
28357- * i40e_vsi_clear - Deallocate the VSI provided
28358- * @vsi: the VSI being un-configured
28359+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
28360+ * @pf: board private structure to initialize
28361 **/
28362-static int i40e_vsi_clear(struct i40e_vsi *vsi)
28363+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
28364 {
28365- struct i40e_pf *pf;
28366-
28367- if (!vsi)
28368- return 0;
28369+ int vectors = 0;
28370+ ssize_t size;
28371
28372- if (!vsi->back)
28373- goto free_vsi;
28374- pf = vsi->back;
28375+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
28376+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
28377+ vectors = i40e_init_msix(pf);
28378+#else
28379+ vectors = -1;
28380+#endif
28381+ if (vectors < 0) {
28382+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
28383+ I40E_FLAG_IWARP_ENABLED |
28384+ I40E_FLAG_RSS_ENABLED |
28385+ I40E_FLAG_DCB_CAPABLE |
28386+ I40E_FLAG_DCB_ENABLED |
28387+ I40E_FLAG_SRIOV_ENABLED |
28388+ I40E_FLAG_FD_SB_ENABLED |
28389+ I40E_FLAG_FD_ATR_ENABLED |
28390+ I40E_FLAG_VMDQ_ENABLED);
28391+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
28392
28393- mutex_lock(&pf->switch_mutex);
28394- if (!pf->vsi[vsi->idx]) {
28395- dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
28396- vsi->idx, vsi->idx, vsi, vsi->type);
28397- goto unlock_vsi;
28398+ /* rework the queue expectations without MSIX */
28399+ i40e_determine_queue_usage(pf);
28400+ }
28401 }
28402
28403- if (pf->vsi[vsi->idx] != vsi) {
28404- dev_err(&pf->pdev->dev,
28405- "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
28406- pf->vsi[vsi->idx]->idx,
28407- pf->vsi[vsi->idx],
28408- pf->vsi[vsi->idx]->type,
28409- vsi->idx, vsi, vsi->type);
28410- goto unlock_vsi;
28411+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
28412+ (pf->flags & I40E_FLAG_MSI_ENABLED)) {
28413+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
28414+#ifndef I40E_LEGACY_INTERRUPT
28415+ vectors = pci_enable_msi(pf->pdev);
28416+#else
28417+ vectors = -1;
28418+#endif
28419+ if (vectors < 0) {
28420+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
28421+ vectors);
28422+ pf->flags &= ~I40E_FLAG_MSI_ENABLED;
28423+ }
28424+ vectors = 1; /* one MSI or Legacy vector */
28425 }
28426
28427- /* updates the PF for this cleared vsi */
28428- i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
28429- i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
28430-
28431- i40e_vsi_free_arrays(vsi, true);
28432- i40e_clear_rss_config_user(vsi);
28433+ if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
28434+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
28435
28436- pf->vsi[vsi->idx] = NULL;
28437- if (vsi->idx < pf->next_vsi)
28438- pf->next_vsi = vsi->idx;
28439+ /* set up vector assignment tracking */
28440+ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
28441+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
28442+ if (!pf->irq_pile) {
28443+ dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
28444+ return -ENOMEM;
28445+ }
28446+ pf->irq_pile->num_entries = vectors;
28447+ pf->irq_pile->search_hint = 0;
28448
28449-unlock_vsi:
28450- mutex_unlock(&pf->switch_mutex);
28451-free_vsi:
28452- kfree(vsi);
28453+ /* track first vector for misc interrupts, ignore return */
28454+ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
28455
28456 return 0;
28457 }
28458
28459 /**
28460- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
28461- * @vsi: the VSI being cleaned
28462+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
28463+ * non queue events in recovery mode
28464+ * @pf: board private structure
28465+ *
28466+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
28467+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
28468+ * This is handled differently than in recovery mode since no Tx/Rx resources
28469+ * are being allocated.
28470 **/
28471-static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
28472+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
28473 {
28474- int i;
28475+ int err;
28476
28477- if (vsi->tx_rings && vsi->tx_rings[0]) {
28478- for (i = 0; i < vsi->alloc_queue_pairs; i++) {
28479- kfree_rcu(vsi->tx_rings[i], rcu);
28480- vsi->tx_rings[i] = NULL;
28481- vsi->rx_rings[i] = NULL;
28482- if (vsi->xdp_rings)
28483- vsi->xdp_rings[i] = NULL;
28484+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
28485+ err = i40e_setup_misc_vector(pf);
28486+
28487+ if (err) {
28488+ dev_info(&pf->pdev->dev,
28489+ "MSI-X misc vector request failed, error %d\n",
28490+ err);
28491+ return err;
28492+ }
28493+ } else {
28494+ u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
28495+
28496+ err = request_irq(pf->pdev->irq, i40e_intr, flags,
28497+ pf->int_name, pf);
28498+
28499+ if (err) {
28500+ dev_info(&pf->pdev->dev,
28501+ "MSI/legacy misc vector request failed, error %d\n",
28502+ err);
28503+ return err;
28504 }
28505+ i40e_enable_misc_int_causes(pf);
28506+ i40e_irq_dynamic_enable_icr0(pf);
28507 }
28508+
28509+ return 0;
28510 }
28511
28512 /**
28513- * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
28514- * @vsi: the VSI being configured
28515+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
28516+ * @pf: board private structure
28517+ *
28518+ * This sets up the handler for MSIX 0, which is used to manage the
28519+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
28520+ * when in MSI or Legacy interrupt mode.
28521 **/
28522-static int i40e_alloc_rings(struct i40e_vsi *vsi)
28523+static int i40e_setup_misc_vector(struct i40e_pf *pf)
28524 {
28525- int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
28526- struct i40e_pf *pf = vsi->back;
28527- struct i40e_ring *ring;
28528+ struct i40e_hw *hw = &pf->hw;
28529+ int err = 0;
28530
28531- /* Set basic values in the rings to be used later during open() */
28532- for (i = 0; i < vsi->alloc_queue_pairs; i++) {
28533- /* allocate space for both Tx and Rx in one shot */
28534- ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
28535- if (!ring)
28536- goto err_out;
28537+ /* Only request the IRQ once, the first time through. */
28538+ if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
28539+ err = request_irq(pf->msix_entries[0].vector,
28540+ i40e_intr, 0, pf->int_name, pf);
28541+ if (err) {
28542+ clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
28543+ dev_info(&pf->pdev->dev,
28544+ "request_irq for %s failed: %d\n",
28545+ pf->int_name, err);
28546+ return -EFAULT;
28547+ }
28548+ }
28549
28550- ring->queue_index = i;
28551- ring->reg_idx = vsi->base_queue + i;
28552- ring->ring_active = false;
28553- ring->vsi = vsi;
28554- ring->netdev = vsi->netdev;
28555- ring->dev = &pf->pdev->dev;
28556- ring->count = vsi->num_desc;
28557- ring->size = 0;
28558- ring->dcb_tc = 0;
28559- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
28560- ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
28561- ring->tx_itr_setting = pf->tx_itr_default;
28562- vsi->tx_rings[i] = ring++;
28563+ i40e_enable_misc_int_causes(pf);
28564
28565- if (!i40e_enabled_xdp_vsi(vsi))
28566- goto setup_rx;
28567+ /* associate no queues to the misc vector */
28568+ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
28569+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
28570
28571- ring->queue_index = vsi->alloc_queue_pairs + i;
28572- ring->reg_idx = vsi->base_queue + ring->queue_index;
28573- ring->ring_active = false;
28574- ring->vsi = vsi;
28575- ring->netdev = NULL;
28576- ring->dev = &pf->pdev->dev;
28577- ring->count = vsi->num_desc;
28578- ring->size = 0;
28579- ring->dcb_tc = 0;
28580- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
28581- ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
28582- set_ring_xdp(ring);
28583- ring->tx_itr_setting = pf->tx_itr_default;
28584- vsi->xdp_rings[i] = ring++;
28585+ i40e_flush(hw);
28586
28587-setup_rx:
28588- ring->queue_index = i;
28589- ring->reg_idx = vsi->base_queue + i;
28590- ring->ring_active = false;
28591- ring->vsi = vsi;
28592- ring->netdev = vsi->netdev;
28593- ring->dev = &pf->pdev->dev;
28594- ring->count = vsi->num_desc;
28595- ring->size = 0;
28596- ring->dcb_tc = 0;
28597- ring->rx_itr_setting = pf->rx_itr_default;
28598- vsi->rx_rings[i] = ring;
28599+ i40e_irq_dynamic_enable_icr0(pf);
28600+
28601+ return err;
28602+}
28603+
28604+/**
28605+ * i40e_restore_interrupt_scheme - Restore the interrupt scheme
28606+ * @pf: private board data structure
28607+ *
28608+ * Restore the interrupt scheme that was cleared when we suspended the
28609+ * device. This should be called during resume to re-allocate the q_vectors
28610+ * and reacquire IRQs.
28611+ */
28612+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
28613+{
28614+ int err, i;
28615+
28616+ /* We cleared the MSI and MSI-X flags when disabling the old interrupt
28617+ * scheme. We need to re-enabled them here in order to attempt to
28618+ * re-acquire the MSI or MSI-X vectors
28619+ */
28620+ pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
28621+
28622+ err = i40e_init_interrupt_scheme(pf);
28623+ if (err)
28624+ return err;
28625+
28626+ /* Now that we've re-acquired IRQs, we need to remap the vectors and
28627+ * rings together again.
28628+ */
28629+ for (i = 0; i < pf->num_alloc_vsi; i++) {
28630+ if (pf->vsi[i]) {
28631+ err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
28632+ if (err)
28633+ goto err_unwind;
28634+ i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
28635+ }
28636 }
28637
28638+ err = i40e_setup_misc_vector(pf);
28639+ if (err)
28640+ goto err_unwind;
28641+
28642+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
28643+ i40e_client_update_msix_info(pf);
28644+
28645 return 0;
28646
28647-err_out:
28648- i40e_vsi_clear_rings(vsi);
28649- return -ENOMEM;
28650+err_unwind:
28651+ while (i--) {
28652+ if (pf->vsi[i])
28653+ i40e_vsi_free_q_vectors(pf->vsi[i]);
28654+ }
28655+
28656+ return err;
28657 }
28658
28659 /**
28660- * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
28661- * @pf: board private structure
28662- * @vectors: the number of MSI-X vectors to request
28663+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
28664+ * @vsi: Pointer to vsi structure
28665+ * @seed: Buffter to store the hash keys
28666+ * @lut: Buffer to store the lookup table entries
28667+ * @lut_size: Size of buffer to store the lookup table entries
28668 *
28669- * Returns the number of vectors reserved, or error
28670- **/
28671-static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
28672+ * Return 0 on success, negative on failure
28673+ */
28674+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
28675+ u8 *lut, u16 lut_size)
28676 {
28677- vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
28678- I40E_MIN_MSIX, vectors);
28679- if (vectors < 0) {
28680- dev_info(&pf->pdev->dev,
28681- "MSI-X vector reservation failed: %d\n", vectors);
28682- vectors = 0;
28683+ struct i40e_pf *pf = vsi->back;
28684+ struct i40e_hw *hw = &pf->hw;
28685+ int ret = 0;
28686+
28687+ if (seed) {
28688+ ret = i40e_aq_get_rss_key(hw, vsi->id,
28689+ (struct i40e_aqc_get_set_rss_key_data *)seed);
28690+ if (ret) {
28691+ dev_info(&pf->pdev->dev,
28692+ "Cannot get RSS key, err %s aq_err %s\n",
28693+ i40e_stat_str(&pf->hw, ret),
28694+ i40e_aq_str(&pf->hw,
28695+ pf->hw.aq.asq_last_status));
28696+ return ret;
28697+ }
28698 }
28699
28700- return vectors;
28701+ if (lut) {
28702+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
28703+
28704+ ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
28705+ if (ret) {
28706+ dev_info(&pf->pdev->dev,
28707+ "Cannot get RSS lut, err %s aq_err %s\n",
28708+ i40e_stat_str(&pf->hw, ret),
28709+ i40e_aq_str(&pf->hw,
28710+ pf->hw.aq.asq_last_status));
28711+ return ret;
28712+ }
28713+ }
28714+ return ret;
28715 }
28716
28717 /**
28718- * i40e_init_msix - Setup the MSIX capability
28719- * @pf: board private structure
28720- *
28721- * Work with the OS to set up the MSIX vectors needed.
28722+ * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
28723+ * @vsi: Pointer to vsi structure
28724+ * @seed: RSS hash seed
28725+ * @lut: Lookup table
28726+ * @lut_size: Lookup table size
28727 *
28728- * Returns the number of vectors reserved or negative on failure
28729+ * Returns 0 on success, negative on failure
28730 **/
28731-static int i40e_init_msix(struct i40e_pf *pf)
28732+static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
28733+ const u8 *lut, u16 lut_size)
28734 {
28735+ struct i40e_pf *pf = vsi->back;
28736 struct i40e_hw *hw = &pf->hw;
28737- int cpus, extra_vectors;
28738- int vectors_left;
28739- int v_budget, i;
28740- int v_actual;
28741- int iwarp_requested = 0;
28742-
28743- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
28744- return -ENODEV;
28745-
28746- /* The number of vectors we'll request will be comprised of:
28747- * - Add 1 for "other" cause for Admin Queue events, etc.
28748- * - The number of LAN queue pairs
28749- * - Queues being used for RSS.
28750- * We don't need as many as max_rss_size vectors.
28751- * use rss_size instead in the calculation since that
28752- * is governed by number of cpus in the system.
28753- * - assumes symmetric Tx/Rx pairing
28754- * - The number of VMDq pairs
28755- * - The CPU count within the NUMA node if iWARP is enabled
28756- * Once we count this up, try the request.
28757- *
28758- * If we can't get what we want, we'll simplify to nearly nothing
28759- * and try again. If that still fails, we punt.
28760- */
28761- vectors_left = hw->func_caps.num_msix_vectors;
28762- v_budget = 0;
28763+ u16 vf_id = vsi->vf_id;
28764+ u8 i;
28765
28766- /* reserve one vector for miscellaneous handler */
28767- if (vectors_left) {
28768- v_budget++;
28769- vectors_left--;
28770+ /* Fill out hash function seed */
28771+ if (seed) {
28772+ u32 *seed_dw = (u32 *)seed;
28773+ if (vsi->type == I40E_VSI_MAIN) {
28774+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
28775+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
28776+ } else if (vsi->type == I40E_VSI_SRIOV) {
28777+ for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
28778+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
28779+ } else {
28780+ dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
28781+ }
28782 }
28783
28784- /* reserve some vectors for the main PF traffic queues. Initially we
28785- * only reserve at most 50% of the available vectors, in the case that
28786- * the number of online CPUs is large. This ensures that we can enable
28787- * extra features as well. Once we've enabled the other features, we
28788- * will use any remaining vectors to reach as close as we can to the
28789- * number of online CPUs.
28790- */
28791- cpus = num_online_cpus();
28792- pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
28793- vectors_left -= pf->num_lan_msix;
28794+ if (lut) {
28795+ u32 *lut_dw = (u32 *)lut;
28796
28797- /* reserve one vector for sideband flow director */
28798- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
28799- if (vectors_left) {
28800- pf->num_fdsb_msix = 1;
28801- v_budget++;
28802- vectors_left--;
28803+ if (vsi->type == I40E_VSI_MAIN) {
28804+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
28805+ return -EINVAL;
28806+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
28807+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
28808+ } else if (vsi->type == I40E_VSI_SRIOV) {
28809+ if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
28810+ return -EINVAL;
28811+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
28812+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
28813 } else {
28814- pf->num_fdsb_msix = 0;
28815+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
28816 }
28817 }
28818+ i40e_flush(hw);
28819
28820- /* can we reserve enough for iWARP? */
28821- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
28822- iwarp_requested = pf->num_iwarp_msix;
28823+ return 0;
28824+}
28825
28826- if (!vectors_left)
28827- pf->num_iwarp_msix = 0;
28828- else if (vectors_left < pf->num_iwarp_msix)
28829- pf->num_iwarp_msix = 1;
28830- v_budget += pf->num_iwarp_msix;
28831- vectors_left -= pf->num_iwarp_msix;
28832- }
28833+/**
28834+ * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
28835+ * @vsi: Pointer to VSI structure
28836+ * @seed: Buffer to store the keys
28837+ * @lut: Buffer to store the lookup table entries
28838+ * @lut_size: Size of buffer to store the lookup table entries
28839+ *
28840+ * Returns 0 on success, negative on failure
28841+ */
28842+static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
28843+ u8 *lut, u16 lut_size)
28844+{
28845+ struct i40e_pf *pf = vsi->back;
28846+ struct i40e_hw *hw = &pf->hw;
28847+ u16 i;
28848
28849- /* any vectors left over go for VMDq support */
28850- if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
28851- int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
28852- int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
28853+ if (seed) {
28854+ u32 *seed_dw = (u32 *)seed;
28855
28856- if (!vectors_left) {
28857- pf->num_vmdq_msix = 0;
28858- pf->num_vmdq_qps = 0;
28859- } else {
28860- /* if we're short on vectors for what's desired, we limit
28861- * the queues per vmdq. If this is still more than are
28862- * available, the user will need to change the number of
28863- * queues/vectors used by the PF later with the ethtool
28864- * channels command
28865- */
28866- if (vmdq_vecs < vmdq_vecs_wanted)
28867- pf->num_vmdq_qps = 1;
28868- pf->num_vmdq_msix = pf->num_vmdq_qps;
28869+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
28870+ seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
28871+ }
28872+ if (lut) {
28873+ u32 *lut_dw = (u32 *)lut;
28874
28875- v_budget += vmdq_vecs;
28876- vectors_left -= vmdq_vecs;
28877- }
28878+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
28879+ return -EINVAL;
28880+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
28881+ lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
28882 }
28883
28884- /* On systems with a large number of SMP cores, we previously limited
28885- * the number of vectors for num_lan_msix to be at most 50% of the
28886- * available vectors, to allow for other features. Now, we add back
28887- * the remaining vectors. However, we ensure that the total
28888- * num_lan_msix will not exceed num_online_cpus(). To do this, we
28889- * calculate the number of vectors we can add without going over the
28890- * cap of CPUs. For systems with a small number of CPUs this will be
28891- * zero.
28892- */
28893- extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
28894- pf->num_lan_msix += extra_vectors;
28895- vectors_left -= extra_vectors;
28896+ return 0;
28897+}
28898+
28899+/**
28900+ * i40e_config_rss - Configure RSS keys and lut
28901+ * @vsi: Pointer to VSI structure
28902+ * @seed: RSS hash seed
28903+ * @lut: Lookup table
28904+ * @lut_size: Lookup table size
28905+ *
28906+ * Returns 0 on success, negative on failure
28907+ */
28908+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
28909+{
28910+ struct i40e_pf *pf = vsi->back;
28911+
28912+ if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
28913+ return i40e_config_rss_aq(vsi, seed, lut, lut_size);
28914+ else
28915+ return i40e_config_rss_reg(vsi, seed, lut, lut_size);
28916+}
28917+
28918+/**
28919+ * i40e_get_rss - Get RSS keys and lut
28920+ * @vsi: Pointer to VSI structure
28921+ * @seed: Buffer to store the keys
28922+ * @lut: Buffer to store the lookup table entries
28923+ * @lut_size: Size of buffer to store the lookup table entries
28924+ *
28925+ * Returns 0 on success, negative on failure
28926+ */
28927+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
28928+{
28929+ struct i40e_pf *pf = vsi->back;
28930+
28931+ if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
28932+ return i40e_get_rss_aq(vsi, seed, lut, lut_size);
28933+ else
28934+ return i40e_get_rss_reg(vsi, seed, lut, lut_size);
28935+}
28936+
28937+/**
28938+ * i40e_fill_rss_lut - Fill the RSS lookup table with default values
28939+ * @pf: Pointer to board private structure
28940+ * @lut: Lookup table
28941+ * @rss_table_size: Lookup table size
28942+ * @rss_size: Range of queue number for hashing
28943+ */
28944+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
28945+ u16 rss_table_size, u16 rss_size)
28946+{
28947+ u16 i;
28948
28949- WARN(vectors_left < 0,
28950- "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
28951+ for (i = 0; i < rss_table_size; i++) {
28952+ lut[i] = i % rss_size;
28953+ }
28954+}
28955
28956- v_budget += pf->num_lan_msix;
28957- pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
28958- GFP_KERNEL);
28959- if (!pf->msix_entries)
28960- return -ENOMEM;
28961+/**
28962+ * i40e_pf_config_rss - Prepare for RSS if used
28963+ * @pf: board private structure
28964+ **/
28965+static int i40e_pf_config_rss(struct i40e_pf *pf)
28966+{
28967+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
28968+ u8 seed[I40E_HKEY_ARRAY_SIZE];
28969+ struct i40e_hw *hw = &pf->hw;
28970+ u32 reg_val;
28971+ u64 hena;
28972+ u8 *lut;
28973+ int ret;
28974
28975- for (i = 0; i < v_budget; i++)
28976- pf->msix_entries[i].entry = i;
28977- v_actual = i40e_reserve_msix_vectors(pf, v_budget);
28978+ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
28979+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
28980+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
28981+ hena |= i40e_pf_get_default_rss_hena(pf);
28982
28983- if (v_actual < I40E_MIN_MSIX) {
28984- pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
28985- kfree(pf->msix_entries);
28986- pf->msix_entries = NULL;
28987- pci_disable_msix(pf->pdev);
28988- return -ENODEV;
28989+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
28990+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
28991
28992- } else if (v_actual == I40E_MIN_MSIX) {
28993- /* Adjust for minimal MSIX use */
28994- pf->num_vmdq_vsis = 0;
28995- pf->num_vmdq_qps = 0;
28996- pf->num_lan_qps = 1;
28997- pf->num_lan_msix = 1;
28998+ /* Determine the RSS table size based on the hardware capabilities */
28999+ reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
29000+ reg_val = (pf->rss_table_size == 512) ?
29001+ (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
29002+ (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
29003+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
29004
29005- } else if (!vectors_left) {
29006- /* If we have limited resources, we will start with no vectors
29007- * for the special features and then allocate vectors to some
29008- * of these features based on the policy and at the end disable
29009- * the features that did not get any vectors.
29010+ /* Determine the RSS size of the VSI */
29011+ if (!vsi->rss_size) {
29012+ u16 qcount;
29013+ /* If the firmware does something weird during VSI init, we
29014+ * could end up with zero TCs. Check for that to avoid
29015+ * divide-by-zero. It probably won't pass traffic, but it also
29016+ * won't panic.
29017 */
29018- int vec;
29019+ qcount = vsi->num_queue_pairs /
29020+ (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
29021+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
29022+ }
29023+ if (!vsi->rss_size)
29024+ return -EINVAL;
29025
29026- dev_info(&pf->pdev->dev,
29027- "MSI-X vector limit reached, attempting to redistribute vectors\n");
29028- /* reserve the misc vector */
29029- vec = v_actual - 1;
29030+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
29031+ if (!lut)
29032+ return -ENOMEM;
29033
29034- /* Scale vector usage down */
29035- pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
29036- pf->num_vmdq_vsis = 1;
29037- pf->num_vmdq_qps = 1;
29038+ /* Use user configured lut if there is one, otherwise use default */
29039+ if (vsi->rss_lut_user)
29040+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
29041+ else
29042+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
29043
29044- /* partition out the remaining vectors */
29045- switch (vec) {
29046- case 2:
29047- pf->num_lan_msix = 1;
29048- break;
29049- case 3:
29050- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
29051- pf->num_lan_msix = 1;
29052- pf->num_iwarp_msix = 1;
29053- } else {
29054- pf->num_lan_msix = 2;
29055- }
29056- break;
29057- default:
29058- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
29059- pf->num_iwarp_msix = min_t(int, (vec / 3),
29060- iwarp_requested);
29061- pf->num_vmdq_vsis = min_t(int, (vec / 3),
29062- I40E_DEFAULT_NUM_VMDQ_VSI);
29063- } else {
29064- pf->num_vmdq_vsis = min_t(int, (vec / 2),
29065- I40E_DEFAULT_NUM_VMDQ_VSI);
29066- }
29067- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
29068- pf->num_fdsb_msix = 1;
29069- vec--;
29070- }
29071- pf->num_lan_msix = min_t(int,
29072- (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
29073- pf->num_lan_msix);
29074- pf->num_lan_qps = pf->num_lan_msix;
29075- break;
29076- }
29077- }
29078+ /* Use user configured hash key if there is one, otherwise
29079+ * use default.
29080+ */
29081+ if (vsi->rss_hkey_user)
29082+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
29083+ else
29084+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
29085+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
29086+ kfree(lut);
29087
29088- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
29089- (pf->num_fdsb_msix == 0)) {
29090- dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
29091- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
29092- }
29093- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
29094- (pf->num_vmdq_msix == 0)) {
29095- dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
29096- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
29097- }
29098+ return ret;
29099+}
29100
29101- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
29102- (pf->num_iwarp_msix == 0)) {
29103- dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
29104- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
29105- }
29106- i40e_debug(&pf->hw, I40E_DEBUG_INIT,
29107- "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
29108- pf->num_lan_msix,
29109- pf->num_vmdq_msix * pf->num_vmdq_vsis,
29110- pf->num_fdsb_msix,
29111- pf->num_iwarp_msix);
29112+/**
29113+ * i40e_clear_rss_config_user - clear the user configured RSS hash keys
29114+ * and lookup table
29115+ * @vsi: Pointer to VSI structure
29116+ */
29117+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
29118+{
29119+ if (!vsi)
29120+ return;
29121
29122- return v_actual;
29123+ kfree(vsi->rss_hkey_user);
29124+ vsi->rss_hkey_user = NULL;
29125+
29126+ kfree(vsi->rss_lut_user);
29127+ vsi->rss_lut_user = NULL;
29128 }
29129
29130 /**
29131- * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
29132- * @vsi: the VSI being configured
29133- * @v_idx: index of the vector in the vsi struct
29134- * @cpu: cpu to be used on affinity_mask
29135+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
29136+ * @pf: board private structure
29137+ * @queue_count: the requested queue count for rss.
29138 *
29139- * We allocate one q_vector. If allocation fails we return -ENOMEM.
29140+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
29141+ * count which may be different from the requested queue count.
29142+ * Note: expects to be called while under rtnl_lock()
29143 **/
29144-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
29145+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
29146 {
29147- struct i40e_q_vector *q_vector;
29148+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
29149+ int new_rss_size;
29150
29151- /* allocate q_vector */
29152- q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
29153- if (!q_vector)
29154- return -ENOMEM;
29155+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
29156+ return 0;
29157
29158- q_vector->vsi = vsi;
29159- q_vector->v_idx = v_idx;
29160- cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
29161+ new_rss_size = min_t(int, queue_count, pf->rss_size_max);
29162
29163- if (vsi->netdev)
29164- netif_napi_add(vsi->netdev, &q_vector->napi,
29165- i40e_napi_poll, NAPI_POLL_WEIGHT);
29166+ if (queue_count != vsi->num_queue_pairs) {
29167+ u16 qcount;
29168
29169- q_vector->rx.latency_range = I40E_LOW_LATENCY;
29170- q_vector->tx.latency_range = I40E_LOW_LATENCY;
29171+ vsi->req_queue_pairs = queue_count;
29172+ i40e_prep_for_reset(pf, true);
29173
29174- /* tie q_vector and vsi together */
29175- vsi->q_vectors[v_idx] = q_vector;
29176+ pf->alloc_rss_size = new_rss_size;
29177
29178- return 0;
29179+ i40e_reset_and_rebuild(pf, true, true);
29180+
29181+ /* Discard the user configured hash keys and lut, if less
29182+ * queues are enabled.
29183+ */
29184+ if (queue_count < vsi->rss_size) {
29185+ i40e_clear_rss_config_user(vsi);
29186+ dev_dbg(&pf->pdev->dev,
29187+ "discard user configured hash keys and lut\n");
29188+ }
29189+
29190+ /* Reset vsi->rss_size, as number of enabled queues changed */
29191+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
29192+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
29193+
29194+ i40e_pf_config_rss(pf);
29195+ }
29196+ dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
29197+ vsi->req_queue_pairs, pf->rss_size_max);
29198+ return pf->alloc_rss_size;
29199 }
29200
29201 /**
29202- * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
29203- * @vsi: the VSI being configured
29204- *
29205- * We allocate one q_vector per queue interrupt. If allocation fails we
29206- * return -ENOMEM.
29207+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
29208+ * @pf: board private structure
29209 **/
29210-static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
29211+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
29212 {
29213- struct i40e_pf *pf = vsi->back;
29214- int err, v_idx, num_q_vectors, current_cpu;
29215-
29216- /* if not MSIX, give the one vector only to the LAN VSI */
29217- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
29218- num_q_vectors = vsi->num_q_vectors;
29219- else if (vsi == pf->vsi[pf->lan_vsi])
29220- num_q_vectors = 1;
29221- else
29222- return -EINVAL;
29223+ i40e_status status;
29224+ bool min_valid, max_valid;
29225+ u32 max_bw, min_bw;
29226
29227- current_cpu = cpumask_first(cpu_online_mask);
29228+ status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
29229+ &min_valid, &max_valid);
29230
29231- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
29232- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
29233- if (err)
29234- goto err_out;
29235- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
29236- if (unlikely(current_cpu >= nr_cpu_ids))
29237- current_cpu = cpumask_first(cpu_online_mask);
29238+ if (!status) {
29239+ if (min_valid)
29240+ pf->min_bw = min_bw;
29241+ if (max_valid)
29242+ pf->max_bw = max_bw;
29243 }
29244
29245- return 0;
29246+ return status;
29247+}
29248
29249-err_out:
29250- while (v_idx--)
29251- i40e_free_q_vector(vsi, v_idx);
29252+/**
29253+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
29254+ * @pf: board private structure
29255+ **/
29256+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
29257+{
29258+ struct i40e_aqc_configure_partition_bw_data bw_data;
29259+ i40e_status status;
29260+
29261+ /* Set the valid bit for this PF */
29262+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
29263+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
29264+ bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
29265+
29266+ /* Set the new bandwidths */
29267+ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
29268
29269- return err;
29270+ return status;
29271 }
29272
29273 /**
29274- * i40e_init_interrupt_scheme - Determine proper interrupt scheme
29275- * @pf: board private structure to initialize
29276+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
29277+ * @pf: board private structure
29278 **/
29279-static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
29280+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
29281 {
29282- int vectors = 0;
29283- ssize_t size;
29284+ /* Commit temporary BW setting to permanent NVM image */
29285+ enum i40e_admin_queue_err last_aq_status;
29286+ i40e_status ret;
29287+ u16 nvm_word;
29288
29289- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
29290- vectors = i40e_init_msix(pf);
29291- if (vectors < 0) {
29292- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
29293- I40E_FLAG_IWARP_ENABLED |
29294- I40E_FLAG_RSS_ENABLED |
29295- I40E_FLAG_DCB_CAPABLE |
29296- I40E_FLAG_DCB_ENABLED |
29297- I40E_FLAG_SRIOV_ENABLED |
29298- I40E_FLAG_FD_SB_ENABLED |
29299- I40E_FLAG_FD_ATR_ENABLED |
29300- I40E_FLAG_VMDQ_ENABLED);
29301+ if (pf->hw.partition_id != 1) {
29302+ dev_info(&pf->pdev->dev,
29303+ "Commit BW only works on partition 1! This is partition %d",
29304+ pf->hw.partition_id);
29305+ ret = I40E_NOT_SUPPORTED;
29306+ goto bw_commit_out;
29307+ }
29308
29309- /* rework the queue expectations without MSIX */
29310- i40e_determine_queue_usage(pf);
29311- }
29312+ /* Acquire NVM for read access */
29313+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
29314+ last_aq_status = pf->hw.aq.asq_last_status;
29315+ if (ret) {
29316+ dev_info(&pf->pdev->dev,
29317+ "Cannot acquire NVM for read access, err %s aq_err %s\n",
29318+ i40e_stat_str(&pf->hw, ret),
29319+ i40e_aq_str(&pf->hw, last_aq_status));
29320+ goto bw_commit_out;
29321 }
29322
29323- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
29324- (pf->flags & I40E_FLAG_MSI_ENABLED)) {
29325- dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
29326- vectors = pci_enable_msi(pf->pdev);
29327- if (vectors < 0) {
29328- dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
29329- vectors);
29330- pf->flags &= ~I40E_FLAG_MSI_ENABLED;
29331- }
29332- vectors = 1; /* one MSI or Legacy vector */
29333+ /* Read word 0x10 of NVM - SW compatibility word 1 */
29334+ ret = i40e_aq_read_nvm(&pf->hw,
29335+ I40E_SR_NVM_CONTROL_WORD,
29336+ 0x10, sizeof(nvm_word), &nvm_word,
29337+ false, NULL);
29338+ /* Save off last admin queue command status before releasing
29339+ * the NVM
29340+ */
29341+ last_aq_status = pf->hw.aq.asq_last_status;
29342+ i40e_release_nvm(&pf->hw);
29343+ if (ret) {
29344+ dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
29345+ i40e_stat_str(&pf->hw, ret),
29346+ i40e_aq_str(&pf->hw, last_aq_status));
29347+ goto bw_commit_out;
29348 }
29349
29350- if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
29351- dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
29352+ /* Wait a bit for NVM release to complete */
29353+ msleep(50);
29354
29355- /* set up vector assignment tracking */
29356- size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
29357- pf->irq_pile = kzalloc(size, GFP_KERNEL);
29358- if (!pf->irq_pile) {
29359- dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
29360- return -ENOMEM;
29361+ /* Acquire NVM for write access */
29362+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
29363+ last_aq_status = pf->hw.aq.asq_last_status;
29364+ if (ret) {
29365+ dev_info(&pf->pdev->dev,
29366+ "Cannot acquire NVM for write access, err %s aq_err %s\n",
29367+ i40e_stat_str(&pf->hw, ret),
29368+ i40e_aq_str(&pf->hw, last_aq_status));
29369+ goto bw_commit_out;
29370 }
29371- pf->irq_pile->num_entries = vectors;
29372- pf->irq_pile->search_hint = 0;
29373-
29374- /* track first vector for misc interrupts, ignore return */
29375- (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
29376+ /* Write it back out unchanged to initiate update NVM,
29377+ * which will force a write of the shadow (alt) RAM to
29378+ * the NVM - thus storing the bandwidth values permanently.
29379+ */
29380+ ret = i40e_aq_update_nvm(&pf->hw,
29381+ I40E_SR_NVM_CONTROL_WORD,
29382+ 0x10, sizeof(nvm_word),
29383+ &nvm_word, true, 0, NULL);
29384+ /* Save off last admin queue command status before releasing
29385+ * the NVM
29386+ */
29387+ last_aq_status = pf->hw.aq.asq_last_status;
29388+ i40e_release_nvm(&pf->hw);
29389+ if (ret)
29390+ dev_info(&pf->pdev->dev,
29391+ "BW settings NOT SAVED, err %s aq_err %s\n",
29392+ i40e_stat_str(&pf->hw, ret),
29393+ i40e_aq_str(&pf->hw, last_aq_status));
29394+bw_commit_out:
29395
29396- return 0;
29397+ return ret;
29398 }
29399
29400 /**
29401- * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
29402+ * i40e_is_total_port_shutdown_enabled - read nvm and return value
29403+ * if total port shutdown feature is enabled for this pf
29404 * @pf: board private structure
29405+ **/
29406+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
29407+{
29408+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
29409+#define I40E_FEATURES_ENABLE_PTR 0x2A
29410+#define I40E_CURRENT_SETTING_PTR 0x2B
29411+#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
29412+#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
29413+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
29414+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
29415+ i40e_status read_status = I40E_SUCCESS;
29416+ u16 sr_emp_sr_settings_ptr = 0;
29417+ u16 features_enable = 0;
29418+ u16 link_behavior = 0;
29419+ bool ret = false;
29420+
29421+ read_status = i40e_read_nvm_word(&pf->hw,
29422+ I40E_SR_EMP_SR_SETTINGS_PTR,
29423+ &sr_emp_sr_settings_ptr);
29424+ if (read_status)
29425+ goto err_nvm;
29426+ read_status = i40e_read_nvm_word(&pf->hw,
29427+ sr_emp_sr_settings_ptr +
29428+ I40E_FEATURES_ENABLE_PTR,
29429+ &features_enable);
29430+ if (read_status)
29431+ goto err_nvm;
29432+ if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
29433+ read_status =
29434+ i40e_read_nvm_module_data(&pf->hw,
29435+ I40E_SR_EMP_SR_SETTINGS_PTR,
29436+ I40E_CURRENT_SETTING_PTR,
29437+ I40E_LINK_BEHAVIOR_WORD_OFFSET,
29438+ I40E_LINK_BEHAVIOR_WORD_LENGTH,
29439+ &link_behavior);
29440+ if (read_status)
29441+ goto err_nvm;
29442+ link_behavior >>=
29443+ (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
29444+ ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
29445+ }
29446+ return ret;
29447+
29448+err_nvm:
29449+ dev_warn(&pf->pdev->dev,
29450+ "Total Port Shutdown feature is off due to read nvm error:%d\n",
29451+ read_status);
29452+ return ret;
29453+}
29454+
29455+/**
29456+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
29457+ * @pf: board private structure to initialize
29458 *
29459- * This sets up the handler for MSIX 0, which is used to manage the
29460- * non-queue interrupts, e.g. AdminQ and errors. This is not used
29461- * when in MSI or Legacy interrupt mode.
29462+ * i40e_sw_init initializes the Adapter private data structure.
29463+ * Fields are initialized based on PCI device information and
29464+ * OS network device settings (MTU size).
29465 **/
29466-static int i40e_setup_misc_vector(struct i40e_pf *pf)
29467+static int i40e_sw_init(struct i40e_pf *pf)
29468 {
29469- struct i40e_hw *hw = &pf->hw;
29470 int err = 0;
29471+ int size;
29472+ u16 pow;
29473
29474- /* Only request the irq if this is the first time through, and
29475- * not when we're rebuilding after a Reset
29476+ pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
29477+ (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
29478+ if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
29479+ if (I40E_DEBUG_USER & debug)
29480+ pf->hw.debug_mask = debug;
29481+ pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
29482+ I40E_DEFAULT_MSG_ENABLE);
29483+ }
29484+
29485+ /* Set default capability flags */
29486+ pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
29487+ I40E_FLAG_MSI_ENABLED |
29488+ I40E_FLAG_MSIX_ENABLED;
29489+
29490+ /* Set default ITR */
29491+ pf->rx_itr_default = I40E_ITR_RX_DEF;
29492+ pf->tx_itr_default = I40E_ITR_TX_DEF;
29493+ /* Depending on PF configurations, it is possible that the RSS
29494+ * maximum might end up larger than the available queues
29495 */
29496- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
29497- err = request_irq(pf->msix_entries[0].vector,
29498- i40e_intr, 0, pf->int_name, pf);
29499- if (err) {
29500+ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
29501+ pf->alloc_rss_size = 1;
29502+ pf->rss_table_size = pf->hw.func_caps.rss_table_size;
29503+ pf->rss_size_max = min_t(int, pf->rss_size_max,
29504+ pf->hw.func_caps.num_tx_qp);
29505+
29506+ /* find the next higher power-of-2 of num cpus */
29507+ pow = roundup_pow_of_two(num_online_cpus());
29508+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
29509+
29510+ if (pf->hw.func_caps.rss) {
29511+ pf->flags |= I40E_FLAG_RSS_ENABLED;
29512+ pf->alloc_rss_size = min_t(int, pf->rss_size_max,
29513+ num_online_cpus());
29514+ }
29515+ /* MFP mode enabled */
29516+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
29517+ pf->flags |= I40E_FLAG_MFP_ENABLED;
29518+
29519+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
29520+ if (i40e_get_partition_bw_setting(pf)) {
29521+ dev_warn(&pf->pdev->dev,
29522+ "Could not get partition bw settings\n");
29523+ } else {
29524 dev_info(&pf->pdev->dev,
29525- "request_irq for %s failed: %d\n",
29526- pf->int_name, err);
29527- return -EFAULT;
29528+ "Partition BW Min = %8.8x, Max = %8.8x\n",
29529+ pf->min_bw, pf->max_bw);
29530+
29531+ /* nudge the Tx scheduler */
29532+ i40e_set_partition_bw_setting(pf);
29533 }
29534 }
29535
29536- i40e_enable_misc_int_causes(pf);
29537+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
29538+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
29539+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
29540+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
29541+ if (pf->flags & I40E_FLAG_MFP_ENABLED &&
29542+ pf->hw.num_partitions > 1)
29543+ dev_info(&pf->pdev->dev,
29544+ "Flow Director Sideband mode Disabled in MFP mode\n");
29545+ else
29546+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
29547+ pf->fdir_pf_filter_count =
29548+ pf->hw.func_caps.fd_filters_guaranteed;
29549+ pf->hw.fdir_shared_filter_count =
29550+ pf->hw.func_caps.fd_filters_best_effort;
29551+ }
29552
29553- /* associate no queues to the misc vector */
29554- wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
29555- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
29556+ if (pf->hw.mac.type == I40E_MAC_X722) {
29557+ pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
29558+ I40E_HW_128_QP_RSS_CAPABLE |
29559+ I40E_HW_ATR_EVICT_CAPABLE |
29560+ I40E_HW_WB_ON_ITR_CAPABLE |
29561+ I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
29562+ I40E_HW_NO_PCI_LINK_CHECK |
29563+ I40E_HW_USE_SET_LLDP_MIB |
29564+ I40E_HW_GENEVE_OFFLOAD_CAPABLE |
29565+ I40E_HW_PTP_L4_CAPABLE |
29566+ I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
29567+ I40E_HW_OUTER_UDP_CSUM_CAPABLE);
29568
29569- i40e_flush(hw);
29570+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
29571+ if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
29572+ I40E_FDEVICT_PCTYPE_DEFAULT) {
29573+ dev_warn(&pf->pdev->dev, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
29574+ pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
29575+ }
29576+ } else if ((pf->hw.aq.api_maj_ver > 1) ||
29577+ ((pf->hw.aq.api_maj_ver == 1) &&
29578+ (pf->hw.aq.api_min_ver > 4))) {
29579+ /* Supported in FW API version higher than 1.4 */
29580+ pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
29581
29582- i40e_irq_dynamic_enable_icr0(pf, true);
29583+ /* supports mpls header skip and csum for following headers */
29584+ pf->hw_features |= I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE;
29585+ }
29586
29587- return err;
29588-}
29589+ /* Enable HW ATR eviction if possible */
29590+ if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
29591+ pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
29592
29593-/**
29594- * i40e_config_rss_aq - Prepare for RSS using AQ commands
29595- * @vsi: vsi structure
29596- * @seed: RSS hash seed
29597- **/
29598-static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
29599- u8 *lut, u16 lut_size)
29600-{
29601- struct i40e_pf *pf = vsi->back;
29602- struct i40e_hw *hw = &pf->hw;
29603- int ret = 0;
29604+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
29605+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
29606+ (pf->hw.aq.fw_maj_ver < 4))) {
29607+ pf->hw_features |= I40E_HW_RESTART_AUTONEG;
29608+ /* No DCB support for FW < v4.33 */
29609+ pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
29610+ }
29611+
29612+ /* Disable FW LLDP if FW < v4.3 */
29613+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
29614+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
29615+ (pf->hw.aq.fw_maj_ver < 4)))
29616+ pf->hw_features |= I40E_HW_STOP_FW_LLDP;
29617+
29618+ /* Use the FW Set LLDP MIB API if FW > v4.40 */
29619+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
29620+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
29621+ (pf->hw.aq.fw_maj_ver >= 5)))
29622+ pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
29623+
29624+ /* Enable PTP L4 if FW > v6.0 */
29625+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
29626+ (pf->hw.aq.fw_maj_ver >= 6))
29627+ pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
29628
29629- if (seed) {
29630- struct i40e_aqc_get_set_rss_key_data *seed_dw =
29631- (struct i40e_aqc_get_set_rss_key_data *)seed;
29632- ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
29633- if (ret) {
29634- dev_info(&pf->pdev->dev,
29635- "Cannot set RSS key, err %s aq_err %s\n",
29636- i40e_stat_str(hw, ret),
29637- i40e_aq_str(hw, hw->aq.asq_last_status));
29638- return ret;
29639- }
29640+ if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
29641+ pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
29642+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
29643+ pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
29644 }
29645- if (lut) {
29646- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
29647
29648- ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
29649- if (ret) {
29650+ if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
29651+ pf->flags |= I40E_FLAG_IWARP_ENABLED;
29652+ /* IWARP needs one extra vector for CQP just like MISC.*/
29653+ pf->num_iwarp_msix = (int)num_online_cpus() + 1;
29654+ }
29655+ /* Stopping FW LLDP engine is supported on XL710 and X722
29656+ * starting from FW versions determined in i40e_init_adminq.
29657+ * Stopping the FW LLDP engine is not supported on XL710
29658+ * if NPAR is functioning so unset this hw flag in this case.
29659+ */
29660+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
29661+ pf->hw.func_caps.npar_enable &&
29662+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
29663+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
29664+
29665+#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC
29666+ /* force legacy Rx if SKIP_CPU_SYNC is not supported */
29667+ pf->flags |= I40E_FLAG_LEGACY_RX;
29668+#endif
29669+#ifdef CONFIG_PCI_IOV
29670+ if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
29671+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
29672+ pf->num_req_vfs = 0;
29673+ if (max_vfs[pf->instance] > 0 &&
29674+ max_vfs[pf->instance] <= pf->hw.func_caps.num_vfs) {
29675+ pf->flags |= I40E_FLAG_SRIOV_ENABLED;
29676+ /* assign number of SR-IOV VFs */
29677+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
29678+ pf->num_req_vfs = max_vfs[pf->instance];
29679+ } else if (max_vfs[pf->instance] == 0) {
29680 dev_info(&pf->pdev->dev,
29681- "Cannot set RSS lut, err %s aq_err %s\n",
29682- i40e_stat_str(hw, ret),
29683- i40e_aq_str(hw, hw->aq.asq_last_status));
29684- return ret;
29685+ " SR-IOV is disabled, Module Parameter max_vfs value %d = disabled\n",
29686+ max_vfs[pf->instance]);
29687+ } else if (max_vfs[pf->instance] != -1) {
29688+ dev_err(&pf->pdev->dev,
29689+ "Module Parameter max_vfs value %d is out of range. Maximum value for the device: %d - resetting to zero\n",
29690+ max_vfs[pf->instance],
29691+ pf->hw.func_caps.num_vfs);
29692 }
29693+#else
29694+ pf->flags |= I40E_FLAG_SRIOV_ENABLED;
29695+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
29696+ pf->num_req_vfs = min_t(int,
29697+ pf->hw.func_caps.num_vfs,
29698+ I40E_MAX_VF_COUNT);
29699+#endif /* HAVE_SRIOV_CONFIGURE */
29700 }
29701- return ret;
29702-}
29703+#endif /* CONFIG_PCI_IOV */
29704+ pf->eeprom_version = 0xDEAD;
29705+ pf->lan_veb = I40E_NO_VEB;
29706+ pf->lan_vsi = I40E_NO_VSI;
29707
29708-/**
29709- * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
29710- * @vsi: Pointer to vsi structure
29711- * @seed: Buffter to store the hash keys
29712- * @lut: Buffer to store the lookup table entries
29713- * @lut_size: Size of buffer to store the lookup table entries
29714- *
29715- * Return 0 on success, negative on failure
29716- */
29717-static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
29718- u8 *lut, u16 lut_size)
29719-{
29720- struct i40e_pf *pf = vsi->back;
29721- struct i40e_hw *hw = &pf->hw;
29722- int ret = 0;
29723+ /* By default FW has this off for performance reasons */
29724+ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
29725
29726- if (seed) {
29727- ret = i40e_aq_get_rss_key(hw, vsi->id,
29728- (struct i40e_aqc_get_set_rss_key_data *)seed);
29729- if (ret) {
29730- dev_info(&pf->pdev->dev,
29731- "Cannot get RSS key, err %s aq_err %s\n",
29732- i40e_stat_str(&pf->hw, ret),
29733- i40e_aq_str(&pf->hw,
29734- pf->hw.aq.asq_last_status));
29735- return ret;
29736- }
29737+ /* set up queue assignment tracking */
29738+ size = sizeof(struct i40e_lump_tracking)
29739+ + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
29740+ pf->qp_pile = kzalloc(size, GFP_KERNEL);
29741+ if (!pf->qp_pile) {
29742+ err = -ENOMEM;
29743+ goto sw_init_done;
29744 }
29745+ pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
29746+ pf->qp_pile->search_hint = 0;
29747
29748- if (lut) {
29749- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
29750+ pf->tx_timeout_recovery_level = 1;
29751
29752- ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
29753- if (ret) {
29754- dev_info(&pf->pdev->dev,
29755- "Cannot get RSS lut, err %s aq_err %s\n",
29756- i40e_stat_str(&pf->hw, ret),
29757- i40e_aq_str(&pf->hw,
29758- pf->hw.aq.asq_last_status));
29759- return ret;
29760- }
29761+ if (pf->hw.mac.type != I40E_MAC_X722 &&
29762+ i40e_is_total_port_shutdown_enabled(pf)) {
29763+ /* Link down on close must be on when total port shutdown
29764+ * is enabled for a given port
29765+ */
29766+ pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN
29767+ | I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
29768+ dev_info(&pf->pdev->dev,
29769+ "Total Port Shutdown is enabled, link-down-on-close forced on\n");
29770 }
29771
29772- return ret;
29773+ /* Add default values for ingress and egress vlan */
29774+ pf->ingress_vlan = I40E_NO_VF_MIRROR;
29775+ pf->egress_vlan = I40E_NO_VF_MIRROR;
29776+
29777+ mutex_init(&pf->switch_mutex);
29778+
29779+sw_init_done:
29780+ return err;
29781 }
29782
29783+#ifdef HAVE_NDO_SET_FEATURES
29784+#endif
29785 /**
29786- * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
29787- * @vsi: VSI structure
29788+ * i40e_set_ntuple - set the ntuple feature flag and take action
29789+ * @pf: board private structure to initialize
29790+ * @features: the feature set that the stack is suggesting
29791+ *
29792+ * returns a bool to indicate if reset needs to happen
29793 **/
29794-static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
29795+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
29796 {
29797- u8 seed[I40E_HKEY_ARRAY_SIZE];
29798- struct i40e_pf *pf = vsi->back;
29799- u8 *lut;
29800- int ret;
29801-
29802- if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
29803- return 0;
29804-
29805- if (!vsi->rss_size)
29806- vsi->rss_size = min_t(int, pf->alloc_rss_size,
29807- vsi->num_queue_pairs);
29808- if (!vsi->rss_size)
29809- return -EINVAL;
29810+ bool need_reset = false;
29811
29812- lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
29813- if (!lut)
29814- return -ENOMEM;
29815- /* Use the user configured hash keys and lookup table if there is one,
29816- * otherwise use default
29817+ /* Check if Flow Director n-tuple support was enabled or disabled. If
29818+ * the state changed, we need to reset.
29819 */
29820- if (vsi->rss_lut_user)
29821- memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
29822- else
29823- i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
29824- if (vsi->rss_hkey_user)
29825- memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
29826- else
29827- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
29828- ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
29829- kfree(lut);
29830-
29831- return ret;
29832+ if (features & NETIF_F_NTUPLE) {
29833+ /* Enable filters and mark for reset */
29834+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
29835+ need_reset = true;
29836+ /* enable FD_SB only if there is MSI-X vector and no cloud
29837+ * filters exist
29838+ */
29839+ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
29840+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
29841+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
29842+ }
29843+ } else {
29844+ /* turn off filters, mark for reset and clear SW filter list */
29845+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
29846+ need_reset = true;
29847+ i40e_fdir_filter_exit(pf);
29848+ i40e_cloud_filter_exit(pf);
29849+ }
29850+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
29851+ clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
29852+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
29853+ /* reset fd counters */
29854+ pf->fd_add_err = pf->fd_atr_cnt = 0;
29855+ /* if ATR was auto disabled it can be re-enabled. */
29856+ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
29857+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
29858+ (I40E_DEBUG_FD & pf->hw.debug_mask))
29859+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
29860+ }
29861+ return need_reset;
29862 }
29863
29864+#ifdef HAVE_NDO_SET_FEATURES
29865 /**
29866- * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
29867- * @vsi: Pointer to vsi structure
29868- * @seed: RSS hash seed
29869- * @lut: Lookup table
29870- * @lut_size: Lookup table size
29871- *
29872- * Returns 0 on success, negative on failure
29873+ * i40e_clear_rss_lut - clear the rx hash lookup table
29874+ * @vsi: the VSI being configured
29875 **/
29876-static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
29877- const u8 *lut, u16 lut_size)
29878+static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
29879 {
29880 struct i40e_pf *pf = vsi->back;
29881 struct i40e_hw *hw = &pf->hw;
29882 u16 vf_id = vsi->vf_id;
29883 u8 i;
29884
29885- /* Fill out hash function seed */
29886- if (seed) {
29887- u32 *seed_dw = (u32 *)seed;
29888-
29889- if (vsi->type == I40E_VSI_MAIN) {
29890- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
29891- wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
29892- } else if (vsi->type == I40E_VSI_SRIOV) {
29893- for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
29894- wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
29895- } else {
29896- dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
29897- }
29898- }
29899-
29900- if (lut) {
29901- u32 *lut_dw = (u32 *)lut;
29902-
29903- if (vsi->type == I40E_VSI_MAIN) {
29904- if (lut_size != I40E_HLUT_ARRAY_SIZE)
29905- return -EINVAL;
29906- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
29907- wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
29908- } else if (vsi->type == I40E_VSI_SRIOV) {
29909- if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
29910- return -EINVAL;
29911- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
29912- wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
29913- } else {
29914- dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
29915- }
29916+ if (vsi->type == I40E_VSI_MAIN) {
29917+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
29918+ wr32(hw, I40E_PFQF_HLUT(i), 0);
29919+ } else if (vsi->type == I40E_VSI_SRIOV) {
29920+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
29921+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
29922+ } else {
29923+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
29924 }
29925- i40e_flush(hw);
29926-
29927- return 0;
29928 }
29929
29930 /**
29931- * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
29932- * @vsi: Pointer to VSI structure
29933- * @seed: Buffer to store the keys
29934- * @lut: Buffer to store the lookup table entries
29935- * @lut_size: Size of buffer to store the lookup table entries
29936- *
29937- * Returns 0 on success, negative on failure
29938- */
29939-static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
29940- u8 *lut, u16 lut_size)
29941+ * i40e_set_features - set the netdev feature flags
29942+ * @netdev: ptr to the netdev being adjusted
29943+ * @features: the feature set that the stack is suggesting
29944+ * Note: expects to be called while under rtnl_lock()
29945+ **/
29946+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
29947+static int i40e_set_features(struct net_device *netdev, u32 features)
29948+#else
29949+static int i40e_set_features(struct net_device *netdev,
29950+ netdev_features_t features)
29951+#endif
29952 {
29953+ struct i40e_netdev_priv *np = netdev_priv(netdev);
29954+ struct i40e_vsi *vsi = np->vsi;
29955 struct i40e_pf *pf = vsi->back;
29956- struct i40e_hw *hw = &pf->hw;
29957- u16 i;
29958+ bool need_reset;
29959
29960- if (seed) {
29961- u32 *seed_dw = (u32 *)seed;
29962+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
29963+ i40e_pf_config_rss(pf);
29964+ else if (!(features & NETIF_F_RXHASH) &&
29965+ netdev->features & NETIF_F_RXHASH)
29966+ i40e_clear_rss_lut(vsi);
29967
29968- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
29969- seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
29970- }
29971- if (lut) {
29972- u32 *lut_dw = (u32 *)lut;
29973+#ifdef NETIF_F_HW_VLAN_CTAG_RX
29974+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
29975+#else
29976+ if (features & NETIF_F_HW_VLAN_RX)
29977+#endif
29978+ i40e_vlan_stripping_enable(vsi);
29979+ else
29980+ i40e_vlan_stripping_disable(vsi);
29981
29982- if (lut_size != I40E_HLUT_ARRAY_SIZE)
29983- return -EINVAL;
29984- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
29985- lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
29986+#ifdef NETIF_F_HW_TC
29987+ if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
29988+ dev_err(&pf->pdev->dev,
29989+ "Offloaded tc filters active, can't turn hw_tc_offload off");
29990+ return -EINVAL;
29991 }
29992+#endif
29993+ need_reset = i40e_set_ntuple(pf, features);
29994+
29995+ if (need_reset)
29996+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
29997
29998 return 0;
29999 }
30000
30001+#endif /* HAVE_NDO_SET_FEATURES */
30002 /**
30003- * i40e_config_rss - Configure RSS keys and lut
30004- * @vsi: Pointer to VSI structure
30005- * @seed: RSS hash seed
30006- * @lut: Lookup table
30007- * @lut_size: Lookup table size
30008+ * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
30009+ * @pf: board private structure
30010+ * @port: The UDP port to look up
30011 *
30012- * Returns 0 on success, negative on failure
30013- */
30014-int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
30015+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
30016+ **/
30017+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
30018 {
30019- struct i40e_pf *pf = vsi->back;
30020+ u8 i;
30021
30022- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
30023- return i40e_config_rss_aq(vsi, seed, lut, lut_size);
30024- else
30025- return i40e_config_rss_reg(vsi, seed, lut, lut_size);
30026+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
30027+ /* Do not report ports with pending deletions as
30028+ * being available.
30029+ */
30030+ if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
30031+ continue;
30032+ if (pf->udp_ports[i].port == port)
30033+ return i;
30034+ }
30035+
30036+ return i;
30037 }
30038
30039 /**
30040- * i40e_get_rss - Get RSS keys and lut
30041- * @vsi: Pointer to VSI structure
30042- * @seed: Buffer to store the keys
30043- * @lut: Buffer to store the lookup table entries
30044- * lut_size: Size of buffer to store the lookup table entries
30045- *
30046- * Returns 0 on success, negative on failure
30047- */
30048-int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
30049+ * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
30050+ * @netdev: This physical port's netdev
30051+ * @ti: Tunnel endpoint information
30052+ **/
30053+__maybe_unused
30054+static void i40e_udp_tunnel_add(struct net_device *netdev,
30055+ struct udp_tunnel_info *ti)
30056 {
30057+ struct i40e_netdev_priv *np = netdev_priv(netdev);
30058+ struct i40e_vsi *vsi = np->vsi;
30059 struct i40e_pf *pf = vsi->back;
30060+ u16 port = ntohs(ti->port);
30061+ u8 next_idx;
30062+ u8 idx;
30063
30064- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
30065- return i40e_get_rss_aq(vsi, seed, lut, lut_size);
30066- else
30067- return i40e_get_rss_reg(vsi, seed, lut, lut_size);
30068-}
30069+ idx = i40e_get_udp_port_idx(pf, port);
30070
30071-/**
30072- * i40e_fill_rss_lut - Fill the RSS lookup table with default values
30073- * @pf: Pointer to board private structure
30074- * @lut: Lookup table
30075- * @rss_table_size: Lookup table size
30076- * @rss_size: Range of queue number for hashing
30077- */
30078-void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
30079- u16 rss_table_size, u16 rss_size)
30080-{
30081- u16 i;
30082+ /* Check if port already exists */
30083+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
30084+ netdev_info(netdev, "port %d already offloaded\n", port);
30085+ return;
30086+ }
30087
30088- for (i = 0; i < rss_table_size; i++)
30089- lut[i] = i % rss_size;
30090+ /* Now check if there is space to add the new port */
30091+ next_idx = i40e_get_udp_port_idx(pf, 0);
30092+
30093+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
30094+ netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
30095+ port);
30096+ return;
30097+ }
30098+
30099+ switch (ti->type) {
30100+ case UDP_TUNNEL_TYPE_VXLAN:
30101+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
30102+ break;
30103+ case UDP_TUNNEL_TYPE_GENEVE:
30104+ if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
30105+ return;
30106+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
30107+ break;
30108+ default:
30109+ return;
30110+ }
30111+
30112+ /* New port: add it and mark its index in the bitmap */
30113+ pf->udp_ports[next_idx].port = port;
30114+ pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
30115+ pf->pending_udp_bitmap |= BIT_ULL(next_idx);
30116+ set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
30117 }
30118
30119 /**
30120- * i40e_pf_config_rss - Prepare for RSS if used
30121- * @pf: board private structure
30122+ * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
30123+ * @netdev: This physical port's netdev
30124+ * @ti: Tunnel endpoint information
30125 **/
30126-static int i40e_pf_config_rss(struct i40e_pf *pf)
30127+__maybe_unused
30128+static void i40e_udp_tunnel_del(struct net_device *netdev,
30129+ struct udp_tunnel_info *ti)
30130 {
30131- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
30132- u8 seed[I40E_HKEY_ARRAY_SIZE];
30133- u8 *lut;
30134- struct i40e_hw *hw = &pf->hw;
30135- u32 reg_val;
30136- u64 hena;
30137- int ret;
30138-
30139- /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
30140- hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
30141- ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
30142- hena |= i40e_pf_get_default_rss_hena(pf);
30143-
30144- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
30145- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
30146+ struct i40e_netdev_priv *np = netdev_priv(netdev);
30147+ struct i40e_vsi *vsi = np->vsi;
30148+ struct i40e_pf *pf = vsi->back;
30149+ u16 port = ntohs(ti->port);
30150+ u8 idx;
30151
30152- /* Determine the RSS table size based on the hardware capabilities */
30153- reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
30154- reg_val = (pf->rss_table_size == 512) ?
30155- (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
30156- (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
30157- i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
30158+ idx = i40e_get_udp_port_idx(pf, port);
30159
30160- /* Determine the RSS size of the VSI */
30161- if (!vsi->rss_size) {
30162- u16 qcount;
30163+ /* Check if port already exists */
30164+ if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
30165+ goto not_found;
30166
30167- qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
30168- vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
30169+ switch (ti->type) {
30170+ case UDP_TUNNEL_TYPE_VXLAN:
30171+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
30172+ goto not_found;
30173+ break;
30174+ case UDP_TUNNEL_TYPE_GENEVE:
30175+ if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
30176+ return;
30177+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
30178+ goto not_found;
30179+ break;
30180+ default:
30181+ goto not_found;
30182 }
30183- if (!vsi->rss_size)
30184- return -EINVAL;
30185-
30186- lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
30187- if (!lut)
30188- return -ENOMEM;
30189
30190- /* Use user configured lut if there is one, otherwise use default */
30191- if (vsi->rss_lut_user)
30192- memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
30193- else
30194- i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
30195+ /* if port exists, set it to 0 (mark for deletion)
30196+ * and make it pending
30197+ */
30198+ pf->udp_ports[idx].port = 0;
30199
30200- /* Use user configured hash key if there is one, otherwise
30201- * use default.
30202+ /* Toggle pending bit instead of setting it. This way if we are
30203+ * deleting a port that has yet to be added we just clear the pending
30204+ * bit and don't have to worry about it.
30205 */
30206- if (vsi->rss_hkey_user)
30207- memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
30208- else
30209- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
30210- ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
30211- kfree(lut);
30212+ pf->pending_udp_bitmap ^= BIT_ULL(idx);
30213+ set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
30214
30215- return ret;
30216+ return;
30217+not_found:
30218+ netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
30219+ port);
30220 }
30221
30222+#if defined(HAVE_VXLAN_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD)
30223+#if IS_ENABLED(CONFIG_VXLAN)
30224 /**
30225- * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
30226- * @pf: board private structure
30227- * @queue_count: the requested queue count for rss.
30228- *
30229- * returns 0 if rss is not enabled, if enabled returns the final rss queue
30230- * count which may be different from the requested queue count.
30231- * Note: expects to be called while under rtnl_lock()
30232+ * i40e_add_vxlan_port - Get notifications about vxlan ports that come up
30233+ * @netdev: This physical port's netdev
30234+ * @sa_family: Socket Family that vxlan is notifying us about
30235+ * @port: New UDP port number that vxlan started listening to
30236 **/
30237-int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
30238+static void i40e_add_vxlan_port(struct net_device *netdev,
30239+ sa_family_t sa_family, __be16 port)
30240 {
30241- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
30242- int new_rss_size;
30243-
30244- if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
30245- return 0;
30246-
30247- new_rss_size = min_t(int, queue_count, pf->rss_size_max);
30248-
30249- if (queue_count != vsi->num_queue_pairs) {
30250- u16 qcount;
30251+ struct udp_tunnel_info ti = {
30252+ .type = UDP_TUNNEL_TYPE_VXLAN,
30253+ .sa_family = sa_family,
30254+ .port = port,
30255+ };
30256
30257- vsi->req_queue_pairs = queue_count;
30258- i40e_prep_for_reset(pf, true);
30259-
30260- pf->alloc_rss_size = new_rss_size;
30261-
30262- i40e_reset_and_rebuild(pf, true, true);
30263-
30264- /* Discard the user configured hash keys and lut, if less
30265- * queues are enabled.
30266- */
30267- if (queue_count < vsi->rss_size) {
30268- i40e_clear_rss_config_user(vsi);
30269- dev_dbg(&pf->pdev->dev,
30270- "discard user configured hash keys and lut\n");
30271- }
30272+ i40e_udp_tunnel_add(netdev, &ti);
30273+}
30274
30275- /* Reset vsi->rss_size, as number of enabled queues changed */
30276- qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
30277- vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
30278+/**
30279+ * i40e_del_vxlan_port - Get notifications about vxlan ports that go away
30280+ * @netdev: This physical port's netdev
30281+ * @sa_family: Socket Family that vxlan is notifying us about
30282+ * @port: UDP port number that vxlan stopped listening to
30283+ **/
30284+static void i40e_del_vxlan_port(struct net_device *netdev,
30285+ sa_family_t sa_family, __be16 port)
30286+{
30287+ struct udp_tunnel_info ti = {
30288+ .type = UDP_TUNNEL_TYPE_VXLAN,
30289+ .sa_family = sa_family,
30290+ .port = port,
30291+ };
30292
30293- i40e_pf_config_rss(pf);
30294- }
30295- dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
30296- vsi->req_queue_pairs, pf->rss_size_max);
30297- return pf->alloc_rss_size;
30298+ i40e_udp_tunnel_del(netdev, &ti);
30299 }
30300-
30301+#endif /* CONFIG_VXLAN */
30302+#endif /* HAVE_VXLAN_RX_OFFLOAD && !HAVE_UDP_ENC_RX_OFFLOAD */
30303+#if defined(HAVE_GENEVE_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD)
30304+#if IS_ENABLED(CONFIG_GENEVE)
30305 /**
30306- * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
30307- * @pf: board private structure
30308+ * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
30309+ * @netdev: This physical port's netdev
30310+ * @sa_family: Socket Family that GENEVE is notifying us about
30311+ * @port: New UDP port number that GENEVE started listening to
30312 **/
30313-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
30314+static void i40e_add_geneve_port(struct net_device *netdev,
30315+ sa_family_t sa_family, __be16 port)
30316 {
30317- i40e_status status;
30318- bool min_valid, max_valid;
30319- u32 max_bw, min_bw;
30320+ struct udp_tunnel_info ti = {
30321+ .type = UDP_TUNNEL_TYPE_GENEVE,
30322+ .sa_family = sa_family,
30323+ .port = port,
30324+ };
30325
30326- status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
30327- &min_valid, &max_valid);
30328+ i40e_udp_tunnel_add(netdev, &ti);
30329+}
30330
30331- if (!status) {
30332- if (min_valid)
30333- pf->min_bw = min_bw;
30334- if (max_valid)
30335- pf->max_bw = max_bw;
30336- }
30337+/*
30338+ * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
30339+ * @netdev: This physical port's netdev
30340+ * @sa_family: Socket Family that GENEVE is notifying us about
30341+ * @port: UDP port number that GENEVE stopped listening to
30342+ **/
30343+static void i40e_del_geneve_port(struct net_device *netdev,
30344+ sa_family_t sa_family, __be16 port)
30345+{
30346+ struct udp_tunnel_info ti = {
30347+ .type = UDP_TUNNEL_TYPE_GENEVE,
30348+ .sa_family = sa_family,
30349+ .port = port,
30350+ };
30351
30352- return status;
30353+ i40e_udp_tunnel_del(netdev, &ti);
30354 }
30355
30356-/**
30357- * i40e_set_partition_bw_setting - Set BW settings for this PF partition
30358- * @pf: board private structure
30359- **/
30360-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
30361+#endif /* CONFIG_GENEVE */
30362+#endif /* HAVE_GENEVE_RX_OFFLOAD && !HAVE_UDP_ENC_RX_OFFLOAD */
30363+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
30364+static int i40e_get_phys_port_id(struct net_device *netdev,
30365+ struct netdev_phys_item_id *ppid)
30366 {
30367- struct i40e_aqc_configure_partition_bw_data bw_data;
30368- i40e_status status;
30369+ struct i40e_netdev_priv *np = netdev_priv(netdev);
30370+ struct i40e_pf *pf = np->vsi->back;
30371+ struct i40e_hw *hw = &pf->hw;
30372
30373- /* Set the valid bit for this PF */
30374- bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
30375- bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
30376- bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
30377+ if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
30378+ return -EOPNOTSUPP;
30379
30380- /* Set the new bandwidths */
30381- status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
30382+ ppid->id_len = min_t(int, sizeof(hw->mac.port_addr),
30383+ sizeof(ppid->id));
30384+ memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
30385
30386- return status;
30387+ return 0;
30388 }
30389
30390+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
30391 /**
30392- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
30393- * @pf: board private structure
30394- **/
30395-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
30396+ * i40e_ndo_fdb_add - add an entry to the hardware database
30397+ * @ndm: the input from the stack
30398+ * @tb: pointer to array of nladdr (unused)
30399+ * @dev: the net device pointer
30400+ * @addr: the MAC address entry being added
30401+ * @vid: VLAN ID
30402+ * @flags: instructions from stack about fdb operation
30403+ * @extack: netdev extended ack structure
30404+ */
30405+#ifdef HAVE_FDB_OPS
30406+#if defined(HAVE_NDO_FDB_ADD_EXTACK)
30407+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
30408+ struct net_device *dev, const unsigned char *addr,
30409+ u16 vid, u16 flags, struct netlink_ext_ack *extack)
30410+#elif defined(HAVE_NDO_FDB_ADD_VID)
30411+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
30412+ struct net_device *dev, const unsigned char *addr,
30413+ u16 vid, u16 flags)
30414+#elif defined(HAVE_NDO_FDB_ADD_NLATTR)
30415+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
30416+ struct net_device *dev, const unsigned char *addr,
30417+ u16 flags)
30418+#elif defined(USE_CONST_DEV_UC_CHAR)
30419+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
30420+ const unsigned char *addr, u16 flags)
30421+#else
30422+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
30423+ unsigned char *addr, u16 flags)
30424+#endif
30425 {
30426- /* Commit temporary BW setting to permanent NVM image */
30427- enum i40e_admin_queue_err last_aq_status;
30428- i40e_status ret;
30429- u16 nvm_word;
30430-
30431- if (pf->hw.partition_id != 1) {
30432- dev_info(&pf->pdev->dev,
30433- "Commit BW only works on partition 1! This is partition %d",
30434- pf->hw.partition_id);
30435- ret = I40E_NOT_SUPPORTED;
30436- goto bw_commit_out;
30437- }
30438+ struct i40e_netdev_priv *np = netdev_priv(dev);
30439+ struct i40e_pf *pf = np->vsi->back;
30440+ int err = 0;
30441
30442- /* Acquire NVM for read access */
30443- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
30444- last_aq_status = pf->hw.aq.asq_last_status;
30445- if (ret) {
30446- dev_info(&pf->pdev->dev,
30447- "Cannot acquire NVM for read access, err %s aq_err %s\n",
30448- i40e_stat_str(&pf->hw, ret),
30449- i40e_aq_str(&pf->hw, last_aq_status));
30450- goto bw_commit_out;
30451- }
30452+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
30453+ return -EOPNOTSUPP;
30454
30455- /* Read word 0x10 of NVM - SW compatibility word 1 */
30456- ret = i40e_aq_read_nvm(&pf->hw,
30457- I40E_SR_NVM_CONTROL_WORD,
30458- 0x10, sizeof(nvm_word), &nvm_word,
30459- false, NULL);
30460- /* Save off last admin queue command status before releasing
30461- * the NVM
30462+ /* Hardware does not support aging addresses so if a
30463+ * ndm_state is given only allow permanent addresses
30464 */
30465- last_aq_status = pf->hw.aq.asq_last_status;
30466- i40e_release_nvm(&pf->hw);
30467- if (ret) {
30468- dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
30469- i40e_stat_str(&pf->hw, ret),
30470- i40e_aq_str(&pf->hw, last_aq_status));
30471- goto bw_commit_out;
30472+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
30473+ netdev_info(dev, "FDB only supports static addresses\n");
30474+ return -EINVAL;
30475 }
30476
30477- /* Wait a bit for NVM release to complete */
30478- msleep(50);
30479+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
30480+ err = dev_uc_add_excl(dev, addr);
30481+ else if (is_multicast_ether_addr(addr))
30482+ err = dev_mc_add_excl(dev, addr);
30483+ else
30484+ err = -EINVAL;
30485
30486- /* Acquire NVM for write access */
30487- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
30488- last_aq_status = pf->hw.aq.asq_last_status;
30489- if (ret) {
30490- dev_info(&pf->pdev->dev,
30491- "Cannot acquire NVM for write access, err %s aq_err %s\n",
30492- i40e_stat_str(&pf->hw, ret),
30493- i40e_aq_str(&pf->hw, last_aq_status));
30494- goto bw_commit_out;
30495- }
30496- /* Write it back out unchanged to initiate update NVM,
30497- * which will force a write of the shadow (alt) RAM to
30498- * the NVM - thus storing the bandwidth values permanently.
30499- */
30500- ret = i40e_aq_update_nvm(&pf->hw,
30501- I40E_SR_NVM_CONTROL_WORD,
30502- 0x10, sizeof(nvm_word),
30503- &nvm_word, true, NULL);
30504- /* Save off last admin queue command status before releasing
30505- * the NVM
30506- */
30507- last_aq_status = pf->hw.aq.asq_last_status;
30508- i40e_release_nvm(&pf->hw);
30509- if (ret)
30510- dev_info(&pf->pdev->dev,
30511- "BW settings NOT SAVED, err %s aq_err %s\n",
30512- i40e_stat_str(&pf->hw, ret),
30513- i40e_aq_str(&pf->hw, last_aq_status));
30514-bw_commit_out:
30515+ /* Only return duplicate errors if NLM_F_EXCL is set */
30516+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
30517+ err = 0;
30518
30519- return ret;
30520+ return err;
30521 }
30522
30523+#ifdef HAVE_NDO_FEATURES_CHECK
30524 /**
30525- * i40e_sw_init - Initialize general software structures (struct i40e_pf)
30526- * @pf: board private structure to initialize
30527- *
30528- * i40e_sw_init initializes the Adapter private data structure.
30529- * Fields are initialized based on PCI device information and
30530- * OS network device settings (MTU size).
30531+ * i40e_features_check - Validate encapsulated packet conforms to limits
30532+ * @skb: skb buff
30533+ * @dev: This physical port's netdev
30534+ * @features: Offload features that the stack believes apply
30535 **/
30536-static int i40e_sw_init(struct i40e_pf *pf)
30537+static netdev_features_t i40e_features_check(struct sk_buff *skb,
30538+ struct net_device *dev,
30539+ netdev_features_t features)
30540 {
30541- int err = 0;
30542- int size;
30543-
30544- /* Set default capability flags */
30545- pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
30546- I40E_FLAG_MSI_ENABLED |
30547- I40E_FLAG_MSIX_ENABLED;
30548-
30549- /* Set default ITR */
30550- pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
30551- pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
30552+ size_t len;
30553
30554- /* Depending on PF configurations, it is possible that the RSS
30555- * maximum might end up larger than the available queues
30556+ /* No point in doing any of this if neither checksum nor GSO are
30557+ * being requested for this frame. We can rule out both by just
30558+ * checking for CHECKSUM_PARTIAL
30559 */
30560- pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
30561- pf->alloc_rss_size = 1;
30562- pf->rss_table_size = pf->hw.func_caps.rss_table_size;
30563- pf->rss_size_max = min_t(int, pf->rss_size_max,
30564- pf->hw.func_caps.num_tx_qp);
30565- if (pf->hw.func_caps.rss) {
30566- pf->flags |= I40E_FLAG_RSS_ENABLED;
30567- pf->alloc_rss_size = min_t(int, pf->rss_size_max,
30568- num_online_cpus());
30569- }
30570+ if (skb->ip_summed != CHECKSUM_PARTIAL)
30571+ return features;
30572
30573- /* MFP mode enabled */
30574- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
30575- pf->flags |= I40E_FLAG_MFP_ENABLED;
30576- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
30577- if (i40e_get_partition_bw_setting(pf)) {
30578- dev_warn(&pf->pdev->dev,
30579- "Could not get partition bw settings\n");
30580- } else {
30581- dev_info(&pf->pdev->dev,
30582- "Partition BW Min = %8.8x, Max = %8.8x\n",
30583- pf->min_bw, pf->max_bw);
30584+ /* We cannot support GSO if the MSS is going to be less than
30585+ * 64 bytes. If it is then we need to drop support for GSO.
30586+ */
30587+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
30588+ features &= ~NETIF_F_GSO_MASK;
30589
30590- /* nudge the Tx scheduler */
30591- i40e_set_partition_bw_setting(pf);
30592- }
30593- }
30594+ /* MACLEN can support at most 63 words */
30595+ len = skb_network_header(skb) - skb->data;
30596+ if (len & ~(63 * 2))
30597+ goto out_err;
30598
30599- if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
30600- (pf->hw.func_caps.fd_filters_best_effort > 0)) {
30601- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
30602- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
30603- if (pf->flags & I40E_FLAG_MFP_ENABLED &&
30604- pf->hw.num_partitions > 1)
30605- dev_info(&pf->pdev->dev,
30606- "Flow Director Sideband mode Disabled in MFP mode\n");
30607- else
30608- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
30609- pf->fdir_pf_filter_count =
30610- pf->hw.func_caps.fd_filters_guaranteed;
30611- pf->hw.fdir_shared_filter_count =
30612- pf->hw.func_caps.fd_filters_best_effort;
30613- }
30614+ /* IPLEN and EIPLEN can support at most 127 dwords */
30615+ len = skb_transport_header(skb) - skb_network_header(skb);
30616+ if (len & ~(127 * 4))
30617+ goto out_err;
30618
30619- if (pf->hw.mac.type == I40E_MAC_X722) {
30620- pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
30621- I40E_HW_128_QP_RSS_CAPABLE |
30622- I40E_HW_ATR_EVICT_CAPABLE |
30623- I40E_HW_WB_ON_ITR_CAPABLE |
30624- I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
30625- I40E_HW_NO_PCI_LINK_CHECK |
30626- I40E_HW_USE_SET_LLDP_MIB |
30627- I40E_HW_GENEVE_OFFLOAD_CAPABLE |
30628- I40E_HW_PTP_L4_CAPABLE |
30629- I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
30630- I40E_HW_OUTER_UDP_CSUM_CAPABLE);
30631+ if (skb->encapsulation) {
30632+ /* L4TUNLEN can support 127 words */
30633+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
30634+ if (len & ~(127 * 2))
30635+ goto out_err;
30636
30637-#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
30638- if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
30639- I40E_FDEVICT_PCTYPE_DEFAULT) {
30640- dev_warn(&pf->pdev->dev,
30641- "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
30642- pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
30643- }
30644- } else if ((pf->hw.aq.api_maj_ver > 1) ||
30645- ((pf->hw.aq.api_maj_ver == 1) &&
30646- (pf->hw.aq.api_min_ver > 4))) {
30647- /* Supported in FW API version higher than 1.4 */
30648- pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
30649+ /* IPLEN can support at most 127 dwords */
30650+ len = skb_inner_transport_header(skb) -
30651+ skb_inner_network_header(skb);
30652+ if (len & ~(127 * 4))
30653+ goto out_err;
30654 }
30655
30656- /* Enable HW ATR eviction if possible */
30657- if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
30658- pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
30659-
30660- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
30661- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
30662- (pf->hw.aq.fw_maj_ver < 4))) {
30663- pf->hw_features |= I40E_HW_RESTART_AUTONEG;
30664- /* No DCB support for FW < v4.33 */
30665- pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
30666- }
30667+ /* No need to validate L4LEN as TCP is the only protocol with a
30668+ * a flexible value and we support all possible values supported
30669+ * by TCP, which is at most 15 dwords
30670+ */
30671
30672- /* Disable FW LLDP if FW < v4.3 */
30673- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
30674- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
30675- (pf->hw.aq.fw_maj_ver < 4)))
30676- pf->hw_features |= I40E_HW_STOP_FW_LLDP;
30677+ return features;
30678+out_err:
30679+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
30680+}
30681
30682- /* Use the FW Set LLDP MIB API if FW > v4.40 */
30683- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
30684- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
30685- (pf->hw.aq.fw_maj_ver >= 5)))
30686- pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
30687+#ifdef HAVE_XDP_SUPPORT
30688+/**
30689+ * i40e_enter_busy_conf - Enters busy config state
30690+ * @vsi: vsi
30691+ *
30692+ * Returns 0 on success, <0 for failure.
30693+ **/
30694+static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
30695+{
30696+ struct i40e_pf *pf = vsi->back;
30697+ int timeout = 50;
30698
30699- if (pf->hw.func_caps.vmdq) {
30700- pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
30701- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
30702- pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
30703+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
30704+ timeout--;
30705+ if (!timeout)
30706+ return -EBUSY;
30707+ usleep_range(1000, 2000);
30708 }
30709
30710- if (pf->hw.func_caps.iwarp) {
30711- pf->flags |= I40E_FLAG_IWARP_ENABLED;
30712- /* IWARP needs one extra vector for CQP just like MISC.*/
30713- pf->num_iwarp_msix = (int)num_online_cpus() + 1;
30714- }
30715+ return 0;
30716+}
30717
30718-#ifdef CONFIG_PCI_IOV
30719- if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
30720- pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
30721- pf->flags |= I40E_FLAG_SRIOV_ENABLED;
30722- pf->num_req_vfs = min_t(int,
30723- pf->hw.func_caps.num_vfs,
30724- I40E_MAX_VF_COUNT);
30725- }
30726-#endif /* CONFIG_PCI_IOV */
30727- pf->eeprom_version = 0xDEAD;
30728- pf->lan_veb = I40E_NO_VEB;
30729- pf->lan_vsi = I40E_NO_VSI;
30730+/**
30731+ * i40e_exit_busy_conf - Exits busy config state
30732+ * @vsi: vsi
30733+ **/
30734+static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
30735+{
30736+ struct i40e_pf *pf = vsi->back;
30737
30738- /* By default FW has this off for performance reasons */
30739- pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
30740+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
30741+}
30742
30743- /* set up queue assignment tracking */
30744- size = sizeof(struct i40e_lump_tracking)
30745- + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
30746- pf->qp_pile = kzalloc(size, GFP_KERNEL);
30747- if (!pf->qp_pile) {
30748- err = -ENOMEM;
30749- goto sw_init_done;
30750+/**
30751+ * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
30752+ * @vsi: vsi
30753+ * @queue_pair: queue pair
30754+ **/
30755+static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
30756+{
30757+ memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
30758+ sizeof(vsi->rx_rings[queue_pair]->rx_stats));
30759+ memset(&vsi->tx_rings[queue_pair]->stats, 0,
30760+ sizeof(vsi->tx_rings[queue_pair]->stats));
30761+ if (i40e_enabled_xdp_vsi(vsi)) {
30762+ memset(&vsi->xdp_rings[queue_pair]->stats, 0,
30763+ sizeof(vsi->xdp_rings[queue_pair]->stats));
30764 }
30765- pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
30766- pf->qp_pile->search_hint = 0;
30767+}
30768
30769- pf->tx_timeout_recovery_level = 1;
30770+/**
30771+ * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
30772+ * @vsi: vsi
30773+ * @queue_pair: queue pair
30774+ **/
30775+static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
30776+{
30777+ i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
30778+ if (i40e_enabled_xdp_vsi(vsi))
30779+ i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
30780+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
30781+}
30782
30783- mutex_init(&pf->switch_mutex);
30784+/**
30785+ * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
30786+ * @vsi: vsi
30787+ * @queue_pair: queue pair
30788+ * @enable: true for enable, false for disable
30789+ **/
30790+static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
30791+ bool enable)
30792+{
30793+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
30794+ struct i40e_q_vector *q_vector = rxr->q_vector;
30795
30796-sw_init_done:
30797- return err;
30798+ if (!vsi->netdev)
30799+ return;
30800+
30801+ /* All rings in a qp belong to the same qvector. */
30802+ if (q_vector->rx.ring || q_vector->tx.ring) {
30803+ if (enable)
30804+ napi_enable(&q_vector->napi);
30805+ else
30806+ napi_disable(&q_vector->napi);
30807+ }
30808 }
30809
30810 /**
30811- * i40e_set_ntuple - set the ntuple feature flag and take action
30812- * @pf: board private structure to initialize
30813- * @features: the feature set that the stack is suggesting
30814+ * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
30815+ * @vsi: vsi
30816+ * @queue_pair: queue pair
30817+ * @enable: true for enable, false for disable
30818 *
30819- * returns a bool to indicate if reset needs to happen
30820+ * Returns 0 on success, <0 on failure.
30821 **/
30822-bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
30823+static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
30824+ bool enable)
30825 {
30826- bool need_reset = false;
30827+ struct i40e_pf *pf = vsi->back;
30828+ int pf_q, ret = 0;
30829
30830- /* Check if Flow Director n-tuple support was enabled or disabled. If
30831- * the state changed, we need to reset.
30832+ pf_q = vsi->base_queue + queue_pair;
30833+ ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
30834+ false /*is xdp*/, enable);
30835+ if (ret) {
30836+ dev_info(&pf->pdev->dev,
30837+ "VSI seid %d Tx ring %d %sable timeout\n",
30838+ vsi->seid, pf_q, (enable ? "en" : "dis"));
30839+ return ret;
30840+ }
30841+
30842+ i40e_control_rx_q(pf, pf_q, enable);
30843+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
30844+ if (ret) {
30845+ dev_info(&pf->pdev->dev,
30846+ "VSI seid %d Rx ring %d %sable timeout\n",
30847+ vsi->seid, pf_q, (enable ? "en" : "dis"));
30848+ return ret;
30849+ }
30850+
30851+ /* Due to HW errata, on Rx disable only, the register can
30852+ * indicate done before it really is. Needs 50ms to be sure
30853 */
30854- if (features & NETIF_F_NTUPLE) {
30855- /* Enable filters and mark for reset */
30856- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
30857- need_reset = true;
30858- /* enable FD_SB only if there is MSI-X vector */
30859- if (pf->num_fdsb_msix > 0)
30860- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
30861- } else {
30862- /* turn off filters, mark for reset and clear SW filter list */
30863- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
30864- need_reset = true;
30865- i40e_fdir_filter_exit(pf);
30866- }
30867- pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
30868- I40E_FLAG_FD_SB_AUTO_DISABLED);
30869- /* reset fd counters */
30870- pf->fd_add_err = 0;
30871- pf->fd_atr_cnt = 0;
30872- /* if ATR was auto disabled it can be re-enabled. */
30873- if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
30874- pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
30875- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
30876- (I40E_DEBUG_FD & pf->hw.debug_mask))
30877- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
30878- }
30879+ if (!enable)
30880+ mdelay(50);
30881+
30882+ if (!i40e_enabled_xdp_vsi(vsi))
30883+ return ret;
30884+
30885+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
30886+ pf_q + vsi->alloc_queue_pairs,
30887+ true /*is xdp*/, enable);
30888+ if (ret) {
30889+ dev_info(&pf->pdev->dev,
30890+ "VSI seid %d XDP Tx ring %d %sable timeout\n",
30891+ vsi->seid, pf_q, (enable ? "en" : "dis"));
30892 }
30893- return need_reset;
30894+
30895+ return ret;
30896 }
30897
30898 /**
30899- * i40e_clear_rss_lut - clear the rx hash lookup table
30900- * @vsi: the VSI being configured
30901+ * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
30902+ * @vsi: vsi
30903+ * @queue_pair: queue_pair
30904 **/
30905-static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
30906+static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
30907 {
30908+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
30909 struct i40e_pf *pf = vsi->back;
30910 struct i40e_hw *hw = &pf->hw;
30911- u16 vf_id = vsi->vf_id;
30912- u8 i;
30913
30914- if (vsi->type == I40E_VSI_MAIN) {
30915- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
30916- wr32(hw, I40E_PFQF_HLUT(i), 0);
30917- } else if (vsi->type == I40E_VSI_SRIOV) {
30918- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
30919- i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
30920- } else {
30921- dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
30922- }
30923+ /* All rings in a qp belong to the same qvector. */
30924+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
30925+ i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
30926+ else
30927+ i40e_irq_dynamic_enable_icr0(pf);
30928+
30929+ i40e_flush(hw);
30930 }
30931
30932 /**
30933- * i40e_set_features - set the netdev feature flags
30934- * @netdev: ptr to the netdev being adjusted
30935- * @features: the feature set that the stack is suggesting
30936- * Note: expects to be called while under rtnl_lock()
30937+ * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
30938+ * @vsi: vsi
30939+ * @queue_pair: queue_pair
30940 **/
30941-static int i40e_set_features(struct net_device *netdev,
30942- netdev_features_t features)
30943+static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
30944 {
30945- struct i40e_netdev_priv *np = netdev_priv(netdev);
30946- struct i40e_vsi *vsi = np->vsi;
30947+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
30948 struct i40e_pf *pf = vsi->back;
30949- bool need_reset;
30950-
30951- if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
30952- i40e_pf_config_rss(pf);
30953- else if (!(features & NETIF_F_RXHASH) &&
30954- netdev->features & NETIF_F_RXHASH)
30955- i40e_clear_rss_lut(vsi);
30956-
30957- if (features & NETIF_F_HW_VLAN_CTAG_RX)
30958- i40e_vlan_stripping_enable(vsi);
30959- else
30960- i40e_vlan_stripping_disable(vsi);
30961-
30962- need_reset = i40e_set_ntuple(pf, features);
30963+ struct i40e_hw *hw = &pf->hw;
30964
30965- if (need_reset)
30966- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
30967+ /* For simplicity, instead of removing the qp interrupt causes
30968+ * from the interrupt linked list, we simply disable the interrupt, and
30969+ * leave the list intact.
30970+ *
30971+ * All rings in a qp belong to the same qvector.
30972+ */
30973+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
30974+ u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
30975
30976- return 0;
30977+ wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
30978+ i40e_flush(hw);
30979+ synchronize_irq(pf->msix_entries[intpf].vector);
30980+ } else {
30981+ /* Legacy and MSI mode - this stops all interrupt handling */
30982+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
30983+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
30984+ i40e_flush(hw);
30985+ synchronize_irq(pf->pdev->irq);
30986+ }
30987 }
30988
30989 /**
30990- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
30991- * @pf: board private structure
30992- * @port: The UDP port to look up
30993+ * i40e_queue_pair_disable - Disables a queue pair
30994+ * @vsi: vsi
30995+ * @queue_pair: queue pair
30996 *
30997- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
30998+ * Returns 0 on success, <0 on failure.
30999 **/
31000-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
31001+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
31002 {
31003- u8 i;
31004+ int err;
31005
31006- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
31007- if (pf->udp_ports[i].port == port)
31008- return i;
31009- }
31010+ err = i40e_enter_busy_conf(vsi);
31011+ if (err)
31012+ return err;
31013
31014- return i;
31015+ i40e_queue_pair_disable_irq(vsi, queue_pair);
31016+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
31017+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
31018+ i40e_queue_pair_clean_rings(vsi, queue_pair);
31019+ i40e_queue_pair_reset_stats(vsi, queue_pair);
31020+
31021+ return err;
31022 }
31023
31024 /**
31025- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
31026- * @netdev: This physical port's netdev
31027- * @ti: Tunnel endpoint information
31028+ * i40e_queue_pair_enable - Enables a queue pair
31029+ * @vsi: vsi
31030+ * @queue_pair: queue pair
31031+ *
31032+ * Returns 0 on success, <0 on failure.
31033 **/
31034-static void i40e_udp_tunnel_add(struct net_device *netdev,
31035- struct udp_tunnel_info *ti)
31036+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
31037 {
31038- struct i40e_netdev_priv *np = netdev_priv(netdev);
31039- struct i40e_vsi *vsi = np->vsi;
31040- struct i40e_pf *pf = vsi->back;
31041- u16 port = ntohs(ti->port);
31042- u8 next_idx;
31043- u8 idx;
31044+ int err;
31045
31046- idx = i40e_get_udp_port_idx(pf, port);
31047+ err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
31048+ if (err)
31049+ return err;
31050
31051- /* Check if port already exists */
31052- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
31053- netdev_info(netdev, "port %d already offloaded\n", port);
31054- return;
31055+ if (i40e_enabled_xdp_vsi(vsi)) {
31056+ err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
31057+ if (err)
31058+ return err;
31059 }
31060
31061- /* Now check if there is space to add the new port */
31062- next_idx = i40e_get_udp_port_idx(pf, 0);
31063+ err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
31064+ if (err)
31065+ return err;
31066
31067- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
31068- netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
31069- port);
31070- return;
31071- }
31072+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
31073+ i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
31074+ i40e_queue_pair_enable_irq(vsi, queue_pair);
31075
31076- switch (ti->type) {
31077- case UDP_TUNNEL_TYPE_VXLAN:
31078- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
31079- break;
31080- case UDP_TUNNEL_TYPE_GENEVE:
31081- if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
31082- return;
31083- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
31084- break;
31085- default:
31086- return;
31087- }
31088+ i40e_exit_busy_conf(vsi);
31089
31090- /* New port: add it and mark its index in the bitmap */
31091- pf->udp_ports[next_idx].port = port;
31092- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
31093- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
31094+ return err;
31095 }
31096
31097 /**
31098- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
31099- * @netdev: This physical port's netdev
31100- * @ti: Tunnel endpoint information
31101+ * i40e_xdp_setup - add/remove an XDP program
31102+ * @vsi: VSI to changed
31103+ * @prog: XDP program
31104 **/
31105-static void i40e_udp_tunnel_del(struct net_device *netdev,
31106- struct udp_tunnel_info *ti)
31107+static int i40e_xdp_setup(struct i40e_vsi *vsi,
31108+ struct bpf_prog *prog)
31109 {
31110- struct i40e_netdev_priv *np = netdev_priv(netdev);
31111- struct i40e_vsi *vsi = np->vsi;
31112+ int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
31113 struct i40e_pf *pf = vsi->back;
31114- u16 port = ntohs(ti->port);
31115- u8 idx;
31116+ struct bpf_prog *old_prog;
31117+ bool need_reset;
31118+ int i;
31119
31120- idx = i40e_get_udp_port_idx(pf, port);
31121+ /* Don't allow frames that span over multiple buffers */
31122+ if (frame_size > vsi->rx_buf_len)
31123+ return -EINVAL;
31124
31125- /* Check if port already exists */
31126- if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
31127- goto not_found;
31128+ if (!i40e_enabled_xdp_vsi(vsi) && !prog)
31129+ return 0;
31130
31131- switch (ti->type) {
31132- case UDP_TUNNEL_TYPE_VXLAN:
31133- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
31134- goto not_found;
31135- break;
31136- case UDP_TUNNEL_TYPE_GENEVE:
31137- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
31138- goto not_found;
31139- break;
31140- default:
31141- goto not_found;
31142- }
31143+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
31144+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
31145
31146- /* if port exists, set it to 0 (mark for deletion)
31147- * and make it pending
31148- */
31149- pf->udp_ports[idx].port = 0;
31150- pf->pending_udp_bitmap |= BIT_ULL(idx);
31151- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
31152+ if (need_reset)
31153+ i40e_prep_for_reset(pf, true);
31154
31155- return;
31156-not_found:
31157- netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
31158- port);
31159-}
31160+ old_prog = xchg(&vsi->xdp_prog, prog);
31161
31162-static int i40e_get_phys_port_id(struct net_device *netdev,
31163- struct netdev_phys_item_id *ppid)
31164-{
31165- struct i40e_netdev_priv *np = netdev_priv(netdev);
31166- struct i40e_pf *pf = np->vsi->back;
31167- struct i40e_hw *hw = &pf->hw;
31168+ if (need_reset)
31169+ i40e_reset_and_rebuild(pf, true, true);
31170
31171- if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
31172- return -EOPNOTSUPP;
31173+ for (i = 0; i < vsi->num_queue_pairs; i++)
31174+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
31175
31176- ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
31177- memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
31178+ if (old_prog)
31179+ bpf_prog_put(old_prog);
31180
31181 return 0;
31182 }
31183
31184 /**
31185- * i40e_ndo_fdb_add - add an entry to the hardware database
31186- * @ndm: the input from the stack
31187- * @tb: pointer to array of nladdr (unused)
31188- * @dev: the net device pointer
31189- * @addr: the MAC address entry being added
31190- * @flags: instructions from stack about fdb operation
31191- */
31192-static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
31193- struct net_device *dev,
31194- const unsigned char *addr, u16 vid,
31195- u16 flags)
31196+ * i40e_xdp - implements ndo_xdp for i40e
31197+ * @dev: netdevice
31198+ * @xdp: XDP command
31199+ **/
31200+#ifdef HAVE_NDO_BPF
31201+static int i40e_xdp(struct net_device *dev,
31202+ struct netdev_bpf *xdp)
31203+#else
31204+static int i40e_xdp(struct net_device *dev,
31205+ struct netdev_xdp *xdp)
31206+#endif
31207 {
31208 struct i40e_netdev_priv *np = netdev_priv(dev);
31209- struct i40e_pf *pf = np->vsi->back;
31210- int err = 0;
31211+ struct i40e_vsi *vsi = np->vsi;
31212
31213- if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
31214- return -EOPNOTSUPP;
31215+ if (vsi->type != I40E_VSI_MAIN)
31216+ return -EINVAL;
31217
31218- if (vid) {
31219- pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
31220+ switch (xdp->command) {
31221+ case XDP_SETUP_PROG:
31222+ return i40e_xdp_setup(vsi, xdp->prog);
31223+ case XDP_QUERY_PROG:
31224+#ifndef NO_NETDEV_BPF_PROG_ATTACHED
31225+ xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
31226+#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */
31227+ xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
31228+ return 0;
31229+ default:
31230 return -EINVAL;
31231 }
31232+}
31233+#endif /* HAVE_XDP_SUPPORT */
31234+#endif /* HAVE_NDO_FEATURES_CHECK */
31235+#ifndef USE_DEFAULT_FDB_DEL_DUMP
31236+#if defined(HAVE_NDO_FDB_ADD_VID)
31237+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
31238+ struct net_device *dev, const unsigned char *addr,
31239+ u16 vid)
31240+#elif defined(HAVE_FDB_DEL_NLATTR)
31241+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
31242+ struct net_device *dev, const unsigned char *addr)
31243+#elif defined(USE_CONST_DEV_UC_CHAR)
31244+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
31245+ const unsigned char *addr)
31246+#else
31247+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
31248+ unsigned char *addr)
31249+#endif /* HAVE_NDO_FDB_ADD_VID */
31250+{
31251+ struct i40e_netdev_priv *np = netdev_priv(dev);
31252+ struct i40e_pf *pf = np->vsi->back;
31253+ int err = -EOPNOTSUPP;
31254
31255- /* Hardware does not support aging addresses so if a
31256- * ndm_state is given only allow permanent addresses
31257- */
31258- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
31259+ if (ndm->ndm_state & NUD_PERMANENT) {
31260 netdev_info(dev, "FDB only supports static addresses\n");
31261 return -EINVAL;
31262 }
31263
31264- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
31265- err = dev_uc_add_excl(dev, addr);
31266- else if (is_multicast_ether_addr(addr))
31267- err = dev_mc_add_excl(dev, addr);
31268- else
31269- err = -EINVAL;
31270-
31271- /* Only return duplicate errors if NLM_F_EXCL is set */
31272- if (err == -EEXIST && !(flags & NLM_F_EXCL))
31273- err = 0;
31274+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
31275+ if (is_unicast_ether_addr(addr))
31276+ err = dev_uc_del(dev, addr);
31277+ else if (is_multicast_ether_addr(addr))
31278+ err = dev_mc_del(dev, addr);
31279+ else
31280+ err = -EINVAL;
31281+ }
31282
31283 return err;
31284 }
31285
31286+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
31287+ struct netlink_callback *cb,
31288+ struct net_device *dev,
31289+ int idx)
31290+{
31291+ struct i40e_netdev_priv *np = netdev_priv(dev);
31292+ struct i40e_pf *pf = np->vsi->back;
31293+
31294+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
31295+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
31296+
31297+ return idx;
31298+}
31299+
31300+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
31301+#ifdef HAVE_BRIDGE_ATTRIBS
31302 /**
31303 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
31304 * @dev: the netdev being configured
31305 * @nlh: RTNL message
31306+ * @flags: bridge flags
31307+ * @extack: netdev extended ack structure
31308 *
31309 * Inserts a new hardware bridge if not already created and
31310 * enables the bridging mode requested (VEB or VEPA). If the
31311@@ -9363,9 +13118,19 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
31312 *
31313 * Note: expects to be called while under rtnl_lock()
31314 **/
31315+#if defined(HAVE_NDO_BRIDGE_SETLINK_EXTACK)
31316+static int i40e_ndo_bridge_setlink(struct net_device *dev,
31317+ struct nlmsghdr *nlh,
31318+ u16 flags,
31319+ struct netlink_ext_ack *extack)
31320+#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS)
31321 static int i40e_ndo_bridge_setlink(struct net_device *dev,
31322 struct nlmsghdr *nlh,
31323 u16 flags)
31324+#else
31325+static int i40e_ndo_bridge_setlink(struct net_device *dev,
31326+ struct nlmsghdr *nlh)
31327+#endif
31328 {
31329 struct i40e_netdev_priv *np = netdev_priv(dev);
31330 struct i40e_vsi *vsi = np->vsi;
31331@@ -9412,13 +13177,11 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
31332 } else if (mode != veb->bridge_mode) {
31333 /* Existing HW bridge but different mode needs reset */
31334 veb->bridge_mode = mode;
31335- /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
31336 if (mode == BRIDGE_MODE_VEB)
31337 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
31338 else
31339 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
31340- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
31341- true);
31342+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
31343 break;
31344 }
31345 }
31346@@ -9435,196 +13198,239 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
31347 * @filter_mask: unused
31348 * @nlflags: netlink flags passed in
31349 *
31350- * Return the mode in which the hardware bridge is operating in
31351- * i.e VEB or VEPA.
31352- **/
31353-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
31354- struct net_device *dev,
31355- u32 __always_unused filter_mask,
31356- int nlflags)
31357-{
31358- struct i40e_netdev_priv *np = netdev_priv(dev);
31359- struct i40e_vsi *vsi = np->vsi;
31360- struct i40e_pf *pf = vsi->back;
31361- struct i40e_veb *veb = NULL;
31362- int i;
31363-
31364- /* Only for PF VSI for now */
31365- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
31366- return -EOPNOTSUPP;
31367-
31368- /* Find the HW bridge for the PF VSI */
31369- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
31370- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
31371- veb = pf->veb[i];
31372- }
31373-
31374- if (!veb)
31375- return 0;
31376-
31377- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
31378- 0, 0, nlflags, filter_mask, NULL);
31379-}
31380-
31381-/**
31382- * i40e_features_check - Validate encapsulated packet conforms to limits
31383- * @skb: skb buff
31384- * @dev: This physical port's netdev
31385- * @features: Offload features that the stack believes apply
31386- **/
31387-static netdev_features_t i40e_features_check(struct sk_buff *skb,
31388- struct net_device *dev,
31389- netdev_features_t features)
31390-{
31391- size_t len;
31392-
31393- /* No point in doing any of this if neither checksum nor GSO are
31394- * being requested for this frame. We can rule out both by just
31395- * checking for CHECKSUM_PARTIAL
31396- */
31397- if (skb->ip_summed != CHECKSUM_PARTIAL)
31398- return features;
31399-
31400- /* We cannot support GSO if the MSS is going to be less than
31401- * 64 bytes. If it is then we need to drop support for GSO.
31402- */
31403- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
31404- features &= ~NETIF_F_GSO_MASK;
31405-
31406- /* MACLEN can support at most 63 words */
31407- len = skb_network_header(skb) - skb->data;
31408- if (len & ~(63 * 2))
31409- goto out_err;
31410-
31411- /* IPLEN and EIPLEN can support at most 127 dwords */
31412- len = skb_transport_header(skb) - skb_network_header(skb);
31413- if (len & ~(127 * 4))
31414- goto out_err;
31415-
31416- if (skb->encapsulation) {
31417- /* L4TUNLEN can support 127 words */
31418- len = skb_inner_network_header(skb) - skb_transport_header(skb);
31419- if (len & ~(127 * 2))
31420- goto out_err;
31421-
31422- /* IPLEN can support at most 127 dwords */
31423- len = skb_inner_transport_header(skb) -
31424- skb_inner_network_header(skb);
31425- if (len & ~(127 * 4))
31426- goto out_err;
31427- }
31428-
31429- /* No need to validate L4LEN as TCP is the only protocol with a
31430- * a flexible value and we support all possible values supported
31431- * by TCP, which is at most 15 dwords
31432- */
31433-
31434- return features;
31435-out_err:
31436- return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
31437-}
31438-
31439-/**
31440- * i40e_xdp_setup - add/remove an XDP program
31441- * @vsi: VSI to changed
31442- * @prog: XDP program
31443- **/
31444-static int i40e_xdp_setup(struct i40e_vsi *vsi,
31445- struct bpf_prog *prog)
31446-{
31447- int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
31448- struct i40e_pf *pf = vsi->back;
31449- struct bpf_prog *old_prog;
31450- bool need_reset;
31451- int i;
31452-
31453- /* Don't allow frames that span over multiple buffers */
31454- if (frame_size > vsi->rx_buf_len)
31455- return -EINVAL;
31456-
31457- if (!i40e_enabled_xdp_vsi(vsi) && !prog)
31458- return 0;
31459-
31460- /* When turning XDP on->off/off->on we reset and rebuild the rings. */
31461- need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
31462-
31463- if (need_reset)
31464- i40e_prep_for_reset(pf, true);
31465-
31466- old_prog = xchg(&vsi->xdp_prog, prog);
31467-
31468- if (need_reset)
31469- i40e_reset_and_rebuild(pf, true, true);
31470-
31471- for (i = 0; i < vsi->num_queue_pairs; i++)
31472- WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
31473-
31474- if (old_prog)
31475- bpf_prog_put(old_prog);
31476-
31477- return 0;
31478-}
31479-
31480-/**
31481- * i40e_xdp - implements ndo_xdp for i40e
31482- * @dev: netdevice
31483- * @xdp: XDP command
31484+ * Return the mode in which the hardware bridge is operating in
31485+ * i.e VEB or VEPA.
31486 **/
31487-static int i40e_xdp(struct net_device *dev,
31488- struct netdev_xdp *xdp)
31489+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
31490+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
31491+ struct net_device *dev,
31492+ u32 __always_unused filter_mask,
31493+ int nlflags)
31494+#elif defined(HAVE_BRIDGE_FILTER)
31495+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
31496+ struct net_device *dev,
31497+ u32 __always_unused filter_mask)
31498+#else
31499+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
31500+ struct net_device *dev)
31501+#endif /* NDO_BRIDGE_STUFF */
31502 {
31503 struct i40e_netdev_priv *np = netdev_priv(dev);
31504 struct i40e_vsi *vsi = np->vsi;
31505+ struct i40e_pf *pf = vsi->back;
31506+ struct i40e_veb *veb = NULL;
31507+ int i;
31508
31509- if (vsi->type != I40E_VSI_MAIN)
31510- return -EINVAL;
31511+ /* Only for PF VSI for now */
31512+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
31513+ return -EOPNOTSUPP;
31514
31515- switch (xdp->command) {
31516- case XDP_SETUP_PROG:
31517- return i40e_xdp_setup(vsi, xdp->prog);
31518- case XDP_QUERY_PROG:
31519- xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
31520- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
31521- return 0;
31522- default:
31523- return -EINVAL;
31524+ /* Find the HW bridge for the PF VSI */
31525+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
31526+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
31527+ veb = pf->veb[i];
31528 }
31529+
31530+ if (!veb)
31531+ return 0;
31532+
31533+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
31534+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
31535+ 0, 0, nlflags, filter_mask, NULL);
31536+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
31537+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
31538+ 0, 0, nlflags);
31539+#elif defined(HAVE_NDO_FDB_ADD_VID) || \
31540+ defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
31541+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
31542+ 0, 0);
31543+#else
31544+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
31545+#endif /* HAVE_NDO_BRIDGE_XX */
31546 }
31547+#endif /* HAVE_BRIDGE_ATTRIBS */
31548+#endif /* HAVE_FDB_OPS */
31549
31550+#ifdef HAVE_NET_DEVICE_OPS
31551 static const struct net_device_ops i40e_netdev_ops = {
31552 .ndo_open = i40e_open,
31553 .ndo_stop = i40e_close,
31554 .ndo_start_xmit = i40e_lan_xmit_frame,
31555+#if defined(HAVE_NDO_GET_STATS64) || defined(HAVE_VOID_NDO_GET_STATS64)
31556 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
31557+#else
31558+ .ndo_get_stats = i40e_get_netdev_stats_struct,
31559+#endif
31560 .ndo_set_rx_mode = i40e_set_rx_mode,
31561 .ndo_validate_addr = eth_validate_addr,
31562 .ndo_set_mac_address = i40e_set_mac,
31563+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
31564+ .extended.ndo_change_mtu = i40e_change_mtu,
31565+#else
31566 .ndo_change_mtu = i40e_change_mtu,
31567+#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */
31568+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
31569 .ndo_do_ioctl = i40e_ioctl,
31570+#endif
31571 .ndo_tx_timeout = i40e_tx_timeout,
31572+#ifdef HAVE_VLAN_RX_REGISTER
31573+ .ndo_vlan_rx_register = i40e_vlan_rx_register,
31574+#endif
31575 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
31576 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
31577 #ifdef CONFIG_NET_POLL_CONTROLLER
31578 .ndo_poll_controller = i40e_netpoll,
31579 #endif
31580+#ifdef HAVE_SETUP_TC
31581+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
31582+ .extended.ndo_setup_tc_rh = __i40e_setup_tc,
31583+#else
31584+#ifdef NETIF_F_HW_TC
31585 .ndo_setup_tc = __i40e_setup_tc,
31586- .ndo_set_features = i40e_set_features,
31587+#else
31588+ .ndo_setup_tc = i40e_setup_tc,
31589+#endif /* NETIF_F_HW_TC */
31590+#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */
31591+#endif /* HAVE_SETUP_TC */
31592+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
31593+/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the
31594+ * function get_ndo_ext to retrieve offsets for extended fields from with the
31595+ * net_device_ops struct and ndo_size is checked to determine whether or not
31596+ * the offset is valid.
31597+ */
31598+ .ndo_size = sizeof(const struct net_device_ops),
31599+#endif
31600+#ifdef IFLA_VF_MAX
31601 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
31602+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
31603+ .extended.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
31604+#else
31605 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
31606+#endif
31607+#ifdef HAVE_VF_STATS
31608+ .ndo_get_vf_stats = i40e_get_vf_stats,
31609+#endif
31610+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
31611 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
31612+#else
31613+ .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
31614+#endif
31615 .ndo_get_vf_config = i40e_ndo_get_vf_config,
31616- .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
31617+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
31618 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
31619+#endif
31620+#ifdef HAVE_NDO_SET_VF_TRUST
31621+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
31622+ .extended.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
31623+#else
31624 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
31625+#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
31626+#endif /* HAVE_NDO_SET_VF_TRUST */
31627+#endif /* IFLA_VF_MAX */
31628+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
31629+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL
31630+ .extended.ndo_udp_tunnel_add = i40e_udp_tunnel_add,
31631+ .extended.ndo_udp_tunnel_del = i40e_udp_tunnel_del,
31632+#else
31633 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
31634 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
31635- .ndo_get_phys_port_id = i40e_get_phys_port_id,
31636+#endif
31637+#else /* !HAVE_UDP_ENC_RX_OFFLOAD */
31638+#ifdef HAVE_VXLAN_RX_OFFLOAD
31639+#if IS_ENABLED(CONFIG_VXLAN)
31640+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
31641+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
31642+#endif
31643+#endif /* HAVE_VXLAN_RX_OFFLOAD */
31644+#ifdef HAVE_GENEVE_RX_OFFLOAD
31645+#if IS_ENABLED(CONFIG_GENEVE)
31646+ .ndo_add_geneve_port = i40e_add_geneve_port,
31647+ .ndo_del_geneve_port = i40e_del_geneve_port,
31648+#endif
31649+#endif /* HAVE_GENEVE_RX_OFFLOAD */
31650+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
31651+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
31652+ .ndo_get_phys_port_id = i40e_get_phys_port_id,
31653+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
31654+#ifdef HAVE_FDB_OPS
31655 .ndo_fdb_add = i40e_ndo_fdb_add,
31656+#ifndef USE_DEFAULT_FDB_DEL_DUMP
31657+ .ndo_fdb_del = i40e_ndo_fdb_del,
31658+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
31659+#endif
31660+#ifdef HAVE_NDO_FEATURES_CHECK
31661 .ndo_features_check = i40e_features_check,
31662+#endif /* HAVE_NDO_FEATURES_CHECK */
31663+#ifdef HAVE_BRIDGE_ATTRIBS
31664 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
31665 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
31666- .ndo_xdp = i40e_xdp,
31667+#endif /* HAVE_BRIDGE_ATTRIBS */
31668+#endif /* HAVE_FDB_OPS */
31669+#ifdef HAVE_XDP_SUPPORT
31670+#ifdef HAVE_NDO_BPF
31671+ .ndo_bpf = i40e_xdp,
31672+ .ndo_xdp_xmit = i40e_xdp_xmit,
31673+#else
31674+ .ndo_xdp = i40e_xdp,
31675+ .ndo_xdp_xmit = i40e_xdp_xmit,
31676+ .ndo_xdp_flush = i40e_xdp_flush,
31677+#endif /* HAVE_NDO_BPF */
31678+#endif /* HAVE_XDP_SUPPORT */
31679+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
31680+};
31681+
31682+/* RHEL6 keeps these operations in a separate structure */
31683+static const struct net_device_ops_ext i40e_netdev_ops_ext = {
31684+ .size = sizeof(struct net_device_ops_ext),
31685+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
31686+#ifdef HAVE_NDO_SET_FEATURES
31687+ .ndo_set_features = i40e_set_features,
31688+#endif /* HAVE_NDO_SET_FEATURES */
31689+#ifdef HAVE_NDO_SET_VF_LINK_STATE
31690+ .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
31691+#endif
31692 };
31693+#else /* HAVE_NET_DEVICE_OPS */
31694+/**
31695+ * i40e_assign_netdev_ops - Initialize netdev operations function pointers
31696+ * @dev: ptr to the netdev struct
31697+ **/
31698+#ifdef HAVE_CONFIG_HOTPLUG
31699+static void __devinit i40e_assign_netdev_ops(struct net_device *dev)
31700+#else
31701+static void i40e_assign_netdev_ops(struct net_device *dev)
31702+#endif
31703+{
31704+ dev->open = i40e_open;
31705+ dev->stop = i40e_close;
31706+ dev->hard_start_xmit = i40e_lan_xmit_frame;
31707+ dev->get_stats = i40e_get_netdev_stats_struct;
31708+
31709+#ifdef HAVE_SET_RX_MODE
31710+ dev->set_rx_mode = i40e_set_rx_mode;
31711+#endif
31712+ dev->set_multicast_list = i40e_set_rx_mode;
31713+ dev->set_mac_address = i40e_set_mac;
31714+ dev->change_mtu = i40e_change_mtu;
31715+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
31716+ dev->do_ioctl = i40e_ioctl;
31717+#endif
31718+ dev->tx_timeout = i40e_tx_timeout;
31719+#ifdef NETIF_F_HW_VLAN_TX
31720+#ifdef HAVE_VLAN_RX_REGISTER
31721+ dev->vlan_rx_register = i40e_vlan_rx_register;
31722+#endif
31723+ dev->vlan_rx_add_vid = i40e_vlan_rx_add_vid;
31724+ dev->vlan_rx_kill_vid = i40e_vlan_rx_kill_vid;
31725+#endif
31726+#ifdef CONFIG_NET_POLL_CONTROLLER
31727+ dev->poll_controller = i40e_netpoll;
31728+#endif
31729+#ifdef HAVE_NETDEV_SELECT_QUEUE
31730+ dev->select_queue = i40e_lan_select_queue;
31731+#endif /* HAVE_NETDEV_SELECT_QUEUE */
31732+}
31733+#endif /* HAVE_NET_DEVICE_OPS */
31734
31735 /**
31736 * i40e_config_netdev - Setup the netdev flags
31737@@ -9655,42 +13461,112 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
31738
31739 hw_enc_features = NETIF_F_SG |
31740 NETIF_F_IP_CSUM |
31741+#ifdef NETIF_F_IPV6_CSUM
31742 NETIF_F_IPV6_CSUM |
31743+#endif
31744 NETIF_F_HIGHDMA |
31745+#ifdef NETIF_F_SOFT_FEATURES
31746 NETIF_F_SOFT_FEATURES |
31747+#endif
31748 NETIF_F_TSO |
31749+#ifdef HAVE_ENCAP_TSO_OFFLOAD
31750 NETIF_F_TSO_ECN |
31751 NETIF_F_TSO6 |
31752+#ifdef HAVE_GRE_ENCAP_OFFLOAD
31753 NETIF_F_GSO_GRE |
31754+#ifdef NETIF_F_GSO_PARTIAL
31755 NETIF_F_GSO_GRE_CSUM |
31756 NETIF_F_GSO_PARTIAL |
31757+#endif
31758+#ifdef NETIF_F_GSO_IPXIP4
31759+ NETIF_F_GSO_IPXIP4 |
31760+#ifdef NETIF_F_GSO_IPXIP6
31761+ NETIF_F_GSO_IPXIP6 |
31762+#endif
31763+#else
31764+#ifdef NETIF_F_GSO_IPIP
31765+ NETIF_F_GSO_IPIP |
31766+#endif
31767+#ifdef NETIF_F_GSO_SIT
31768+ NETIF_F_GSO_SIT |
31769+#endif
31770+#endif
31771+#endif
31772 NETIF_F_GSO_UDP_TUNNEL |
31773 NETIF_F_GSO_UDP_TUNNEL_CSUM |
31774+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
31775 NETIF_F_SCTP_CRC |
31776+#ifdef NETIF_F_RXHASH
31777 NETIF_F_RXHASH |
31778+#endif
31779+#ifdef HAVE_NDO_SET_FEATURES
31780 NETIF_F_RXCSUM |
31781+#endif
31782 0;
31783
31784 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
31785+#ifndef NETIF_F_GSO_PARTIAL
31786+ hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM;
31787+#else
31788 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
31789
31790 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
31791+#endif
31792
31793+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
31794 netdev->hw_enc_features |= hw_enc_features;
31795+#endif
31796
31797+#ifdef HAVE_NETDEV_VLAN_FEATURES
31798 /* record features VLANs can make use of */
31799+#ifdef NETIF_F_GSO_PARTIAL
31800 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
31801+#else
31802+ netdev->vlan_features |= hw_enc_features;
31803+#endif
31804+#endif
31805
31806- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
31807- netdev->hw_features |= NETIF_F_NTUPLE;
31808 hw_features = hw_enc_features |
31809+#ifdef NETIF_F_HW_VLAN_CTAG_RX
31810 NETIF_F_HW_VLAN_CTAG_TX |
31811 NETIF_F_HW_VLAN_CTAG_RX;
31812+#else /* NETIF_F_HW_VLAN_CTAG_RX */
31813+ NETIF_F_HW_VLAN_TX |
31814+ NETIF_F_HW_VLAN_RX;
31815+#endif /* !NETIF_F_HW_VLAN_CTAG_RX */
31816+
31817+#if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS)
31818+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
31819+#ifdef NETIF_F_HW_TC
31820+ hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
31821+#else
31822+ hw_features |= NETIF_F_NTUPLE;
31823+#endif
31824+#endif
31825
31826+#ifdef HAVE_NDO_SET_FEATURES
31827+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
31828+ hw_features |= get_netdev_hw_features(netdev);
31829+ set_netdev_hw_features(netdev, hw_features);
31830+#else
31831 netdev->hw_features |= hw_features;
31832+#endif
31833+#endif /* HAVE_NDO_SET_FEATURES */
31834
31835+#ifdef NETIF_F_HW_VLAN_CTAG_RX
31836 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
31837+#else
31838+ netdev->features |= hw_features | NETIF_F_HW_VLAN_FILTER;
31839+#endif
31840+#ifdef NETIF_F_GSO_PARTIAL
31841 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
31842+#endif
31843+
31844+#ifndef HAVE_NDO_SET_FEATURES
31845+#ifdef NETIF_F_GRO
31846+ netdev->features |= NETIF_F_GRO;
31847+#endif
31848+#endif
31849
31850 if (vsi->type == I40E_VSI_MAIN) {
31851 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
31852@@ -9718,7 +13594,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
31853 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
31854 IFNAMSIZ - 4,
31855 pf->vsi[pf->lan_vsi]->netdev->name);
31856- random_ether_addr(mac_addr);
31857+ eth_random_addr(mac_addr);
31858
31859 spin_lock_bh(&vsi->mac_filter_hash_lock);
31860 i40e_add_mac_filter(vsi, mac_addr);
31861@@ -9744,20 +13620,47 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
31862 spin_unlock_bh(&vsi->mac_filter_hash_lock);
31863
31864 ether_addr_copy(netdev->dev_addr, mac_addr);
31865+#ifdef ETHTOOL_GPERMADDR
31866 ether_addr_copy(netdev->perm_addr, mac_addr);
31867+#endif
31868+
31869+#ifdef HAVE_MPLS_FEATURES
31870+ if (pf->hw_features & I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE)
31871+ netdev->mpls_features = NETIF_F_HW_CSUM;
31872+#endif
31873
31874+#ifdef IFF_UNICAST_FLT
31875 netdev->priv_flags |= IFF_UNICAST_FLT;
31876+#endif
31877+#ifdef IFF_SUPP_NOFCS
31878 netdev->priv_flags |= IFF_SUPP_NOFCS;
31879+#endif
31880 /* Setup netdev TC information */
31881 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
31882
31883+#ifdef HAVE_NET_DEVICE_OPS
31884 netdev->netdev_ops = &i40e_netdev_ops;
31885+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
31886+ set_netdev_ops_ext(netdev, &i40e_netdev_ops_ext);
31887+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
31888+#else /* HAVE_NET_DEVICE_OPS */
31889+ i40e_assign_netdev_ops(netdev);
31890+#endif /* HAVE_NET_DEVICE_OPS */
31891 netdev->watchdog_timeo = 5 * HZ;
31892+#ifdef SIOCETHTOOL
31893 i40e_set_ethtool_ops(netdev);
31894+#endif
31895
31896+#ifdef HAVE_NETDEVICE_MIN_MAX_MTU
31897 /* MTU range: 68 - 9706 */
31898+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
31899+ netdev->extended->min_mtu = ETH_MIN_MTU;
31900+ netdev->extended->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
31901+#else
31902 netdev->min_mtu = ETH_MIN_MTU;
31903 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
31904+#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */
31905+#endif /* HAVE_NETDEVICE_MIN_MAX_NTU */
31906
31907 return 0;
31908 }
31909@@ -9789,7 +13692,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
31910 struct i40e_pf *pf = vsi->back;
31911
31912 /* Uplink is not a bridge so default to VEB */
31913- if (vsi->veb_idx == I40E_NO_VEB)
31914+ if (vsi->veb_idx >= I40E_MAX_VEB)
31915 return 1;
31916
31917 veb = pf->veb[vsi->veb_idx];
31918@@ -9799,6 +13702,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
31919 return -ENOENT;
31920 }
31921
31922+#ifdef HAVE_BRIDGE_ATTRIBS
31923 /* Uplink is a bridge in VEPA mode */
31924 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
31925 return 0;
31926@@ -9806,6 +13710,10 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
31927 /* Uplink is a bridge in VEB mode */
31928 return 1;
31929 }
31930+#else
31931+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
31932+ return 1;
31933+#endif
31934
31935 /* VEPA is now default bridge, so return 0 */
31936 return 0;
31937@@ -9830,6 +13738,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
31938
31939 u8 enabled_tc = 0x1; /* TC0 enabled */
31940 int f_count = 0;
31941+ u32 val;
31942
31943 memset(&ctxt, 0, sizeof(ctxt));
31944 switch (vsi->type) {
31945@@ -9860,6 +13769,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
31946
31947 enabled_tc = i40e_pf_get_tc_map(pf);
31948
31949+ /* Source pruning is enabled by default, so the flag is
31950+ * negative logic - if it's set, we need to fiddle with
31951+ * the VSI to disable source pruning.
31952+ */
31953+ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
31954+ memset(&ctxt, 0, sizeof(ctxt));
31955+ ctxt.seid = pf->main_vsi_seid;
31956+ ctxt.pf_num = pf->hw.pf_id;
31957+ ctxt.vf_num = 0;
31958+ ctxt.info.valid_sections |=
31959+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
31960+ ctxt.info.switch_id =
31961+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
31962+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
31963+ if (ret) {
31964+ dev_info(&pf->pdev->dev,
31965+ "update vsi failed, err %s aq_err %s\n",
31966+ i40e_stat_str(&pf->hw, ret),
31967+ i40e_aq_str(&pf->hw,
31968+ pf->hw.aq.asq_last_status));
31969+ ret = -ENOENT;
31970+ goto err;
31971+ }
31972+ }
31973+
31974 /* MFP mode setup queue map and update VSI */
31975 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
31976 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
31977@@ -9967,12 +13901,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
31978
31979 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
31980 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
31981- if (pf->vf[vsi->vf_id].spoofchk) {
31982+ if (pf->vf[vsi->vf_id].mac_anti_spoof) {
31983 ctxt.info.valid_sections |=
31984 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
31985 ctxt.info.sec_flags |=
31986- (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
31987- I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
31988+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
31989 }
31990 /* Setup the VSI tx/rx queue map for TC0 only for now */
31991 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
31992@@ -10001,6 +13934,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
31993 vsi->info.valid_sections = 0;
31994 vsi->seid = ctxt.seid;
31995 vsi->id = ctxt.vsi_number;
31996+ val = rd32(&pf->hw, 0x208800 + (4*(vsi->id)));
31997+ if (!(val & 0x1)) /* MACVSIPRUNEENABLE = 1*/
31998+ dev_warn(&vsi->back->pdev->dev,
31999+ "Note: VSI source pruning is not being set correctly by FW\n");
32000 }
32001
32002 vsi->active_filters = 0;
32003@@ -10015,7 +13952,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
32004
32005 if (f_count) {
32006 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
32007- pf->flags |= I40E_FLAG_FILTER_SYNC;
32008+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
32009 }
32010
32011 /* Update VSI BW information */
32012@@ -10062,6 +13999,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
32013 return -ENODEV;
32014 }
32015
32016+ set_bit(__I40E_VSI_RELEASING, vsi->state);
32017 uplink_seid = vsi->uplink_seid;
32018 if (vsi->type != I40E_VSI_SRIOV) {
32019 if (vsi->netdev_registered) {
32020@@ -10166,6 +14104,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
32021 goto vector_setup_out;
32022 }
32023
32024+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
32025 /* In Legacy mode, we do not have to get any other vector since we
32026 * piggyback on the misc/ICR0 for queue interrupts.
32027 */
32028@@ -10183,6 +14122,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
32029 goto vector_setup_out;
32030 }
32031
32032+#endif
32033 vector_setup_out:
32034 return ret;
32035 }
32036@@ -10280,7 +14220,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
32037 {
32038 struct i40e_vsi *vsi = NULL;
32039 struct i40e_veb *veb = NULL;
32040- u16 alloc_queue_pairs;
32041 int ret, i;
32042 int v_idx;
32043
32044@@ -10330,6 +14269,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
32045 "New VSI creation error, uplink seid of LAN VSI expected.\n");
32046 return NULL;
32047 }
32048+#ifdef HAVE_BRIDGE_ATTRIBS
32049 /* We come up by default in VEPA mode if SRIOV is not
32050 * already enabled, in which case we can't force VEPA
32051 * mode.
32052@@ -10338,6 +14278,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
32053 veb->bridge_mode = BRIDGE_MODE_VEPA;
32054 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
32055 }
32056+#endif
32057 i40e_config_bridge_mode(veb);
32058 }
32059 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
32060@@ -10368,14 +14309,12 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
32061 else if (type == I40E_VSI_SRIOV)
32062 vsi->vf_id = param1;
32063 /* assign it some queues */
32064- alloc_queue_pairs = vsi->alloc_queue_pairs *
32065- (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
32066-
32067- ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
32068+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
32069+ vsi->idx);
32070 if (ret < 0) {
32071 dev_info(&pf->pdev->dev,
32072 "failed to get tracking for %d queues for VSI %d err=%d\n",
32073- alloc_queue_pairs, vsi->seid, ret);
32074+ vsi->alloc_queue_pairs, vsi->seid, ret);
32075 goto err_vsi;
32076 }
32077 vsi->base_queue = ret;
32078@@ -10398,10 +14337,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
32079 goto err_netdev;
32080 vsi->netdev_registered = true;
32081 netif_carrier_off(vsi->netdev);
32082-#ifdef CONFIG_I40E_DCB
32083+ /* make sure transmit queues start off as stopped */
32084+ netif_tx_stop_all_queues(vsi->netdev);
32085+#ifdef CONFIG_DCB
32086+#ifdef HAVE_DCBNL_IEEE
32087 /* Setup DCB netlink interface */
32088 i40e_dcbnl_setup(vsi);
32089-#endif /* CONFIG_I40E_DCB */
32090+#endif /* HAVE_DCBNL_IEEE */
32091+#endif /* CONFIG_DCB */
32092 /* fall through */
32093
32094 case I40E_VSI_FDIR:
32095@@ -10484,16 +14427,16 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
32096 goto out;
32097 }
32098
32099- veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
32100+ veb->bw_limit = LE16_TO_CPU(ets_data.port_bw_limit);
32101 veb->bw_max_quanta = ets_data.tc_bw_max;
32102 veb->is_abs_credits = bw_data.absolute_credits_enable;
32103 veb->enabled_tc = ets_data.tc_valid_bits;
32104- tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
32105- (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
32106+ tc_bw_max = LE16_TO_CPU(bw_data.tc_bw_max[0]) |
32107+ (LE16_TO_CPU(bw_data.tc_bw_max[1]) << 16);
32108 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
32109 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
32110 veb->bw_tc_limit_credits[i] =
32111- le16_to_cpu(bw_data.tc_bw_limits[i]);
32112+ LE16_TO_CPU(bw_data.tc_bw_limits[i]);
32113 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
32114 }
32115
32116@@ -10745,7 +14688,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
32117 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
32118 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
32119 break;
32120- if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
32121+ if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
32122 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
32123 vsi_seid);
32124 return NULL;
32125@@ -10822,7 +14765,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
32126 /* Main VEB? */
32127 if (uplink_seid != pf->mac_seid)
32128 break;
32129- if (pf->lan_veb == I40E_NO_VEB) {
32130+ if (pf->lan_veb >= I40E_MAX_VEB) {
32131 int v;
32132
32133 /* find existing or else empty VEB */
32134@@ -10832,13 +14775,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
32135 break;
32136 }
32137 }
32138- if (pf->lan_veb == I40E_NO_VEB) {
32139+ if (pf->lan_veb >= I40E_MAX_VEB) {
32140 v = i40e_veb_mem_alloc(pf);
32141 if (v < 0)
32142 break;
32143 pf->lan_veb = v;
32144 }
32145 }
32146+ if (pf->lan_veb >= I40E_MAX_VEB)
32147+ break;
32148
32149 pf->veb[pf->lan_veb]->seid = seid;
32150 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
32151@@ -10962,14 +14907,16 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
32152 */
32153
32154 if ((pf->hw.pf_id == 0) &&
32155- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
32156+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
32157 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
32158+ pf->last_sw_conf_flags = flags;
32159+ }
32160
32161 if (pf->hw.pf_id == 0) {
32162 u16 valid_flags;
32163
32164 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
32165- ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
32166+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
32167 NULL);
32168 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
32169 dev_info(&pf->pdev->dev,
32170@@ -10979,6 +14926,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
32171 pf->hw.aq.asq_last_status));
32172 /* not a fatal problem, just keep going */
32173 }
32174+ pf->last_sw_conf_valid_flags = valid_flags;
32175 }
32176
32177 /* first time setup */
32178@@ -10989,7 +14937,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
32179 /* Set up the PF VSI associated with the PF's main VSI
32180 * that is already in the HW switch
32181 */
32182- if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
32183+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
32184 uplink_seid = pf->veb[pf->lan_veb]->seid;
32185 else
32186 uplink_seid = pf->mac_seid;
32187@@ -10999,6 +14947,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
32188 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
32189 if (!vsi) {
32190 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
32191+ i40e_cloud_filter_exit(pf);
32192 i40e_fdir_teardown(pf);
32193 return -EAGAIN;
32194 }
32195@@ -11035,11 +14984,17 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
32196 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
32197 I40E_AQ_AN_COMPLETED) ? true : false);
32198
32199+#ifdef HAVE_PTP_1588_CLOCK
32200 i40e_ptp_init(pf);
32201
32202+#endif /* HAVE_PTP_1588_CLOCK */
32203+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
32204+#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
32205 /* repopulate tunnel port filters */
32206 i40e_sync_udp_filters(pf);
32207
32208+#endif /* HAVE_UDP_ENC_TUNNEL || HAVE_UDP_ENC_RX_OFFLOAD */
32209+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
32210 return ret;
32211 }
32212
32213@@ -11074,6 +15029,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
32214 I40E_FLAG_DCB_ENABLED |
32215 I40E_FLAG_SRIOV_ENABLED |
32216 I40E_FLAG_VMDQ_ENABLED);
32217+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
32218 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
32219 I40E_FLAG_FD_SB_ENABLED |
32220 I40E_FLAG_FD_ATR_ENABLED |
32221@@ -11088,8 +15044,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
32222 I40E_FLAG_FD_ATR_ENABLED |
32223 I40E_FLAG_DCB_ENABLED |
32224 I40E_FLAG_VMDQ_ENABLED);
32225+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
32226 } else {
32227 /* Not enough queues for all TCs */
32228+ bool is_max_n_of_queues_required;
32229+
32230 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
32231 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
32232 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
32233@@ -11098,8 +15057,21 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
32234 }
32235 pf->num_lan_qps = max_t(int, pf->rss_size_max,
32236 num_online_cpus());
32237- pf->num_lan_qps = min_t(int, pf->num_lan_qps,
32238- pf->hw.func_caps.num_tx_qp);
32239+ is_max_n_of_queues_required = pf->hw.func_caps.num_tx_qp <
32240+ pf->num_req_vfs * pf->num_vf_qps + pf->num_vf_qps;
32241+ if (is_max_n_of_queues_required)
32242+ dev_warn(&pf->pdev->dev, "not enough %u queues for PF and %u VFs. Using maximum available queues for PF.\n",
32243+ pf->hw.func_caps.num_tx_qp, pf->num_req_vfs);
32244+ if (pf->hw.func_caps.npar_enable ||
32245+ is_max_n_of_queues_required)
32246+ pf->num_lan_qps = min_t
32247+ (int, pf->num_lan_qps,
32248+ pf->hw.func_caps.num_tx_qp);
32249+ else
32250+ pf->num_lan_qps = min_t
32251+ (int, pf->num_lan_qps,
32252+ pf->hw.func_caps.num_tx_qp -
32253+ pf->num_req_vfs * pf->num_vf_qps);
32254
32255 queues_left -= pf->num_lan_qps;
32256 }
32257@@ -11109,6 +15081,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
32258 queues_left -= 1; /* save 1 queue for FD */
32259 } else {
32260 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
32261+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
32262 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
32263 }
32264 }
32265@@ -11195,36 +15168,271 @@ static void i40e_print_features(struct i40e_pf *pf)
32266 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
32267 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
32268 }
32269+ i += snprintf(&buf[i], REMAIN(i), " CloudF");
32270 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
32271 i += snprintf(&buf[i], REMAIN(i), " DCB");
32272+#if IS_ENABLED(CONFIG_VXLAN)
32273 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
32274+#endif
32275+#if IS_ENABLED(CONFIG_GENEVE)
32276 i += snprintf(&buf[i], REMAIN(i), " Geneve");
32277+#endif
32278+#ifdef HAVE_GRE_ENCAP_OFFLOAD
32279+ i += snprintf(&buf[i], REMAIN(i), " NVGRE");
32280+#endif
32281+#ifdef HAVE_PTP_1588_CLOCK
32282 if (pf->flags & I40E_FLAG_PTP)
32283 i += snprintf(&buf[i], REMAIN(i), " PTP");
32284+#endif
32285 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
32286 i += snprintf(&buf[i], REMAIN(i), " VEB");
32287 else
32288 i += snprintf(&buf[i], REMAIN(i), " VEPA");
32289
32290- dev_info(&pf->pdev->dev, "%s\n", buf);
32291- kfree(buf);
32292- WARN_ON(i > INFO_STRING_LEN);
32293+ dev_info(&pf->pdev->dev, "%s\n", buf);
32294+ kfree(buf);
32295+ WARN_ON(i > INFO_STRING_LEN);
32296+}
32297+
32298+/**
32299+ * i40e_get_platform_mac_addr - get platform-specific MAC address
32300+ * @pdev: PCI device information struct
32301+ * @pf: board private structure
32302+ *
32303+ * Look up the MAC address for the device. First we'll try
32304+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
32305+ * specific fallback. Otherwise, we'll default to the stored value in
32306+ * firmware.
32307+ **/
32308+static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
32309+{
32310+ if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
32311+ i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
32312+}
32313+
32314+/**
32315+ * i40e_check_recovery_mode - check if we are running transition firmware
32316+ * @pf: board private structure
32317+ *
32318+ * Check registers indicating the firmware runs in recovery mode. Sets the
32319+ * appropriate driver state.
32320+ *
32321+ * Returns true if the recovery mode was detected, false otherwise
32322+ **/
32323+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
32324+{
32325+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
32326+
32327+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
32328+ dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
32329+ dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
32330+ set_bit(__I40E_RECOVERY_MODE, pf->state);
32331+
32332+ return true;
32333+ }
32334+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
32335+ dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
32336+
32337+ return false;
32338+}
32339+
32340+/**
32341+ * i40e_pf_loop_reset - perform reset in a loop.
32342+ * @pf: board private structure
32343+ *
32344+ * This function is useful when a NIC is about to enter recovery mode.
32345+ * When a NIC's internal data structures are corrupted the NIC's
32346+ * firmware is going to enter recovery mode.
32347+ * Right after a POR it takes about 7 minutes for firmware to enter
32348+ * recovery mode. Until that time a NIC is in some kind of intermediate
32349+ * state. After that time period the NIC almost surely enters
32350+ * recovery mode. The only way for a driver to detect intermediate
32351+ * state is to issue a series of pf-resets and check a return value.
32352+ * If a PF reset returns success then the firmware could be in recovery
32353+ * mode so the caller of this code needs to check for recovery mode
32354+ * if this function returns success. There is a little chance that
32355+ * firmware will hang in intermediate state forever.
32356+ * Since waiting 7 minutes is quite a lot of time this function waits
32357+ * 10 seconds and then gives up by returning an error.
32358+ *
32359+ * Return 0 on success, negative on failure.
32360+ **/
32361+static i40e_status i40e_pf_loop_reset(struct i40e_pf * const pf)
32362+{
32363+ /* wait max 10 seconds for PF reset to succeed */
32364+ const unsigned long time_end = jiffies + 10 * HZ;
32365+
32366+ struct i40e_hw *hw = &pf->hw;
32367+ i40e_status ret;
32368+
32369+ ret = i40e_pf_reset(hw);
32370+ while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
32371+ usleep_range(10000, 20000);
32372+ ret = i40e_pf_reset(hw);
32373+ }
32374+
32375+ if (ret == I40E_SUCCESS)
32376+ pf->pfr_count++;
32377+ else
32378+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
32379+
32380+ return ret;
32381+}
32382+
32383+/**
32384+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
32385+ * @pf: board private structure
32386+ *
32387+ * Check FW registers to determine if FW issued unexpected EMP Reset.
32388+ * Every time when unexpected EMP Reset occurs the FW increments
32389+ * a counter of unexpected EMP Resets. When the counter reaches 10
32390+ * the FW should enter the Recovery mode
32391+ *
32392+ * Returns true if FW issued unexpected EMP Reset
32393+ **/
32394+static inline bool i40e_check_fw_empr(struct i40e_pf * const pf)
32395+{
32396+ const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
32397+ I40E_GL_FWSTS_FWS1B_MASK;
32398+ const bool is_empr = (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
32399+ (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
32400+
32401+ return is_empr;
32402+}
32403+
32404+/**
32405+ * i40e_handle_resets - handle EMP resets and PF resets
32406+ * @pf: board private structure
32407+ *
32408+ * Handle both EMP resets and PF resets and conclude whether there are
32409+ * any issues regarding these resets. If there are any issues then
32410+ * generate log entry.
32411+ *
32412+ * Return 0 if NIC is healthy or negative value when there are issues
32413+ * with resets
32414+ **/
32415+static inline i40e_status i40e_handle_resets(struct i40e_pf * const pf)
32416+{
32417+ const i40e_status pfr = i40e_pf_loop_reset(pf);
32418+ const bool is_empr = i40e_check_fw_empr(pf);
32419+
32420+ if (is_empr || pfr != I40E_SUCCESS)
32421+ dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
32422+
32423+ return is_empr ? I40E_ERR_RESET_FAILED : pfr;
32424+}
32425+
32426+/**
32427+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
32428+ * @pf: board private structure
32429+ * @hw: ptr to the hardware info
32430+ *
32431+ * This function does a minimal setup of all subsystems needed for running
32432+ * recovery mode.
32433+ *
32434+ * Returns 0 on success, negative on failure
32435+ **/
32436+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
32437+{
32438+ struct i40e_vsi *vsi;
32439+ int err;
32440+ int v_idx;
32441+
32442+#ifdef HAVE_PCI_ERS
32443+ pci_save_state(pf->pdev);
32444+#endif
32445+
32446+ /* set up periodic task facility */
32447+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
32448+ pf->service_timer_period = HZ;
32449+
32450+ INIT_WORK(&pf->service_task, i40e_service_task);
32451+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
32452+
32453+ err = i40e_init_interrupt_scheme(pf);
32454+ if (err)
32455+ goto err_switch_setup;
32456+
32457+ /* The number of VSIs reported by the FW is the minimum guaranteed
32458+ * to us; HW supports far more and we share the remaining pool with
32459+ * the other PFs. We allocate space for more than the guarantee with
32460+ * the understanding that we might not get them all later.
32461+ */
32462+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
32463+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
32464+ else
32465+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
32466+
32467+ /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
32468+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
32469+ GFP_KERNEL);
32470+ if (!pf->vsi) {
32471+ err = -ENOMEM;
32472+ goto err_switch_setup;
32473+ }
32474+
32475+ /* We allocate one VSI which is needed as absolute minimum
32476+ * in order to register the netdev
32477+ */
32478+ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
32479+ if (v_idx < 0)
32480+ goto err_switch_setup;
32481+ pf->lan_vsi = v_idx;
32482+ vsi = pf->vsi[v_idx];
32483+ if (!vsi)
32484+ goto err_switch_setup;
32485+ vsi->alloc_queue_pairs = 1;
32486+ err = i40e_config_netdev(vsi);
32487+ if (err)
32488+ goto err_switch_setup;
32489+ err = register_netdev(vsi->netdev);
32490+ if (err)
32491+ goto err_switch_setup;
32492+ vsi->netdev_registered = true;
32493+ i40e_dbg_pf_init(pf);
32494+
32495+ err = i40e_setup_misc_vector_for_recovery_mode(pf);
32496+ if (err)
32497+ goto err_switch_setup;
32498+
32499+ /* tell the firmware that we're starting */
32500+ i40e_send_version(pf);
32501+
32502+ /* since everything's happy, start the service_task timer */
32503+ mod_timer(&pf->service_timer,
32504+ round_jiffies(jiffies + pf->service_timer_period));
32505+
32506+ return 0;
32507+
32508+err_switch_setup:
32509+ i40e_reset_interrupt_capability(pf);
32510+ del_timer_sync(&pf->service_timer);
32511+ dev_warn(&pf->pdev->dev, "previous errors forcing module to load in debug mode\n");
32512+ i40e_dbg_pf_init(pf);
32513+ set_bit(__I40E_DEBUG_MODE, pf->state);
32514+ return 0;
32515 }
32516
32517 /**
32518- * i40e_get_platform_mac_addr - get platform-specific MAC address
32519- * @pdev: PCI device information struct
32520- * @pf: board private structure
32521- *
32522- * Look up the MAC address for the device. First we'll try
32523- * eth_platform_get_mac_address, which will check Open Firmware, or arch
32524- * specific fallback. Otherwise, we'll default to the stored value in
32525- * firmware.
32526+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
32527+ * @fec_cfg: FEC option to set in flags
32528+ * @flags: ptr to flags in which we set FEC option
32529 **/
32530-static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
32531+void i40e_set_fec_in_flags(u8 fec_cfg, u64 *flags)
32532 {
32533- if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
32534- i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
32535+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
32536+ *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
32537+ else if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
32538+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
32539+ *flags |= I40E_FLAG_RS_FEC;
32540+ *flags &= ~I40E_FLAG_BASE_R_FEC;
32541+ } else if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
32542+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
32543+ *flags |= I40E_FLAG_BASE_R_FEC;
32544+ *flags &= ~I40E_FLAG_RS_FEC;
32545+ }
32546+ if (fec_cfg == 0)
32547+ *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
32548 }
32549
32550 /**
32551@@ -11238,15 +15446,24 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
32552 *
32553 * Returns 0 on success, negative on failure
32554 **/
32555+#ifdef HAVE_CONFIG_HOTPLUG
32556+static int __devinit i40e_probe(struct pci_dev *pdev,
32557+ const struct pci_device_id *ent)
32558+#else
32559 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32560+#endif
32561 {
32562 struct i40e_aq_get_phy_abilities_resp abilities;
32563+#ifdef CONFIG_DCB
32564+ enum i40e_get_fw_lldp_status_resp lldp_status;
32565+ i40e_status status;
32566+#endif /* CONFIG_DCB */
32567 struct i40e_pf *pf;
32568 struct i40e_hw *hw;
32569 static u16 pfs_found;
32570 u16 wol_nvm_bits;
32571 u16 link_status;
32572- int err;
32573+ int err = 0;
32574 u32 val;
32575 u32 i;
32576 u8 set_fc_aq_fail;
32577@@ -11270,7 +15487,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32578 err = pci_request_mem_regions(pdev, i40e_driver_name);
32579 if (err) {
32580 dev_info(&pdev->dev,
32581- "pci_request_selected_regions failed %d\n", err);
32582+ "pci_request_mem_regions failed %d\n", err);
32583 goto err_pci_reg;
32584 }
32585
32586@@ -11289,6 +15506,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32587 }
32588 pf->next_vsi = 0;
32589 pf->pdev = pdev;
32590+ pci_set_drvdata(pdev, pf);
32591 set_bit(__I40E_DOWN, pf->state);
32592
32593 hw = &pf->hw;
32594@@ -11296,7 +15514,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32595
32596 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
32597 I40E_MAX_CSR_SPACE);
32598-
32599+ /* We believe that the highest register to read is
32600+ * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
32601+ * is not less than that before mapping to prevent a
32602+ * kernel panic.
32603+ */
32604+ if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
32605+ dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
32606+ pf->ioremap_len);
32607+ err = -ENOMEM;
32608+ goto err_ioremap;
32609+ }
32610 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
32611 if (!hw->hw_addr) {
32612 err = -EIO;
32613@@ -11315,41 +15543,42 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32614 hw->bus.bus_id = pdev->bus->number;
32615 pf->instance = pfs_found;
32616
32617+ /* Select something other than the 802.1ad ethertype for the
32618+ * switch to use internally and drop on ingress.
32619+ */
32620+ hw->switch_tag = 0xffff;
32621+ hw->first_tag = ETH_P_8021AD;
32622+ hw->second_tag = ETH_P_8021Q;
32623+
32624 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
32625 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
32626+ INIT_LIST_HEAD(&pf->ddp_old_prof);
32627
32628- /* set up the locks for the AQ, do this only once in probe
32629+ /* set up the spinlocks for the AQ, do this only once in probe
32630 * and destroy them only once in remove
32631 */
32632- mutex_init(&hw->aq.asq_mutex);
32633- mutex_init(&hw->aq.arq_mutex);
32634-
32635- pf->msg_enable = netif_msg_init(debug,
32636- NETIF_MSG_DRV |
32637- NETIF_MSG_PROBE |
32638- NETIF_MSG_LINK);
32639- if (debug < -1)
32640- pf->hw.debug_mask = debug;
32641-
32642- /* do a special CORER for clearing PXE mode once at init */
32643- if (hw->revision_id == 0 &&
32644- (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
32645- wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
32646- i40e_flush(hw);
32647- msleep(200);
32648- pf->corer_count++;
32649+ i40e_init_spinlock_d(&hw->aq.asq_spinlock);
32650+ i40e_init_spinlock_d(&hw->aq.arq_spinlock);
32651
32652- i40e_clear_pxe_mode(hw);
32653- }
32654+ if (debug != -1)
32655+ pf->msg_enable = pf->hw.debug_mask = debug;
32656
32657 /* Reset here to make sure all is clean and to define PF 'n' */
32658+ /* have to do the PF reset first to "graceful abort" all queues */
32659 i40e_clear_hw(hw);
32660- err = i40e_pf_reset(hw);
32661+
32662+ err = i40e_set_mac_type(hw);
32663 if (err) {
32664- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
32665+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
32666+ err);
32667 goto err_pf_reset;
32668 }
32669- pf->pfr_count++;
32670+
32671+ err = i40e_handle_resets(pf);
32672+ if (err)
32673+ goto err_pf_reset;
32674+
32675+ i40e_check_recovery_mode(pf);
32676
32677 hw->aq.num_arq_entries = I40E_AQ_LEN;
32678 hw->aq.num_asq_entries = I40E_AQ_LEN;
32679@@ -11375,7 +15604,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32680 if (err) {
32681 if (err == I40E_ERR_FIRMWARE_API_VERSION)
32682 dev_info(&pdev->dev,
32683- "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
32684+ "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
32685+ hw->aq.api_maj_ver,
32686+ hw->aq.api_min_ver,
32687+ I40E_FW_API_VERSION_MAJOR,
32688+ I40E_FW_MINOR_VERSION(hw));
32689 else
32690 dev_info(&pdev->dev,
32691 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
32692@@ -11391,13 +15624,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32693 i40e_nvm_version_str(hw));
32694
32695 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
32696- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
32697+ hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
32698 dev_info(&pdev->dev,
32699- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
32700- else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
32701- hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
32702+ "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
32703+ hw->aq.api_maj_ver,
32704+ hw->aq.api_min_ver,
32705+ I40E_FW_API_VERSION_MAJOR,
32706+ I40E_FW_MINOR_VERSION(hw));
32707+ else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
32708 dev_info(&pdev->dev,
32709- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
32710+ "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
32711+ hw->aq.api_maj_ver,
32712+ hw->aq.api_min_ver,
32713+ I40E_FW_API_VERSION_MAJOR,
32714+ I40E_FW_MINOR_VERSION(hw));
32715
32716 i40e_verify_eeprom(pf);
32717
32718@@ -11406,7 +15646,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32719 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
32720
32721 i40e_clear_pxe_mode(hw);
32722- err = i40e_get_capabilities(pf);
32723+
32724+ err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
32725 if (err)
32726 goto err_adminq_setup;
32727
32728@@ -11416,6 +15657,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32729 goto err_sw_init;
32730 }
32731
32732+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
32733+ return i40e_init_recovery_mode(pf, hw);
32734+
32735 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
32736 hw->func_caps.num_rx_qp, 0, 0);
32737 if (err) {
32738@@ -11436,7 +15680,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32739 */
32740 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
32741 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
32742- i40e_aq_stop_lldp(hw, true, NULL);
32743+ i40e_aq_stop_lldp(hw, true, false, NULL);
32744 }
32745
32746 /* allow a platform config to override the HW addr */
32747@@ -11452,20 +15696,37 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32748 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
32749 if (is_valid_ether_addr(hw->mac.port_addr))
32750 pf->hw_features |= I40E_HW_PORT_ID_VALID;
32751+#ifdef HAVE_PTP_1588_CLOCK
32752+ i40e_ptp_alloc_pins(pf);
32753+#endif /* HAVE_PTP_1588_CLOCK */
32754
32755- pci_set_drvdata(pdev, pf);
32756+#ifdef HAVE_PCI_ERS
32757 pci_save_state(pdev);
32758-#ifdef CONFIG_I40E_DCB
32759+#endif
32760+#ifdef CONFIG_DCB
32761+ status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
32762+ (status == I40E_SUCCESS &&
32763+ lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
32764+ (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
32765+ (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
32766+ dev_info(&pdev->dev,
32767+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
32768+ "FW LLDP is disabled\n" :
32769+ "FW LLDP is enabled\n");
32770+
32771+ /* Enable FW to write default DCB config on link-up */
32772+ i40e_aq_set_dcb_parameters(hw, true, NULL);
32773+
32774 err = i40e_init_pf_dcb(pf);
32775 if (err) {
32776 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
32777 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
32778 /* Continue without DCB enabled */
32779 }
32780-#endif /* CONFIG_I40E_DCB */
32781+#endif /* CONFIG_DCB */
32782
32783 /* set up periodic task facility */
32784- setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
32785+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
32786 pf->service_timer_period = HZ;
32787
32788 INIT_WORK(&pf->service_task, i40e_service_task);
32789@@ -11473,7 +15734,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32790
32791 /* NVM bit on means WoL disabled for the port */
32792 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
32793- if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
32794+ if (BIT(hw->port) & wol_nvm_bits || hw->partition_id != 1)
32795 pf->wol_en = false;
32796 else
32797 pf->wol_en = true;
32798@@ -11510,6 +15771,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32799 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
32800 if (pci_num_vf(pdev))
32801 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
32802+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
32803+ else if (pf->num_req_vfs)
32804+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
32805+#endif
32806 }
32807 #endif
32808 err = i40e_setup_pf_switch(pf, false);
32809@@ -11517,6 +15782,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32810 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
32811 goto err_vsis;
32812 }
32813+ if (i40e_is_l4mode_enabled() && hw->pf_id == 0) {
32814+ u8 l4type = I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
32815+
32816+ switch (l4mode) {
32817+ case L4_MODE_UDP:
32818+ l4type = I40E_AQ_SET_SWITCH_L4_TYPE_UDP;
32819+ break;
32820+ case L4_MODE_TCP:
32821+ l4type = I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
32822+ break;
32823+ }
32824+ i40e_set_switch_mode(pf, l4type);
32825+ }
32826+ INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
32827
32828 /* Make sure flow control is set according to current settings */
32829 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
32830@@ -11615,6 +15894,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32831 dev_info(&pdev->dev,
32832 "Error %d allocating resources for existing VFs\n",
32833 err);
32834+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
32835+ } else if (pf->num_req_vfs) {
32836+ err = i40e_alloc_vfs(pf, pf->num_req_vfs);
32837+ if (err) {
32838+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
32839+ dev_info(&pdev->dev,
32840+ "failed to alloc vfs: %d\n", err);
32841+ }
32842+#endif /* HAVE_SRIOV_CONFIGURE */
32843 }
32844 }
32845 #endif /* CONFIG_PCI_IOV */
32846@@ -11631,6 +15919,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32847 }
32848 }
32849
32850+ pfs_found++;
32851 i40e_dbg_pf_init(pf);
32852
32853 /* tell the firmware that we're starting */
32854@@ -11647,7 +15936,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32855 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
32856 err);
32857 }
32858-
32859 #define PCI_SPEED_SIZE 8
32860 #define PCI_WIDTH_SIZE 8
32861 /* Devices on the IOSF bus do not have this information
32862@@ -11668,23 +15956,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32863
32864 switch (hw->bus.speed) {
32865 case i40e_bus_speed_8000:
32866- strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
32867+ strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
32868 case i40e_bus_speed_5000:
32869- strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
32870+ strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
32871 case i40e_bus_speed_2500:
32872- strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
32873+ strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
32874 default:
32875 break;
32876 }
32877 switch (hw->bus.width) {
32878 case i40e_bus_width_pcie_x8:
32879- strncpy(width, "8", PCI_WIDTH_SIZE); break;
32880+ strlcpy(width, "8", PCI_WIDTH_SIZE); break;
32881 case i40e_bus_width_pcie_x4:
32882- strncpy(width, "4", PCI_WIDTH_SIZE); break;
32883+ strlcpy(width, "4", PCI_WIDTH_SIZE); break;
32884 case i40e_bus_width_pcie_x2:
32885- strncpy(width, "2", PCI_WIDTH_SIZE); break;
32886+ strlcpy(width, "2", PCI_WIDTH_SIZE); break;
32887 case i40e_bus_width_pcie_x1:
32888- strncpy(width, "1", PCI_WIDTH_SIZE); break;
32889+ strlcpy(width, "1", PCI_WIDTH_SIZE); break;
32890 default:
32891 break;
32892 }
32893@@ -11707,6 +15995,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32894 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
32895 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
32896
32897+ /* set the FEC config due to the board capabilities */
32898+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
32899+
32900 /* get the supported phy types from the fw */
32901 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
32902 if (err)
32903@@ -11724,7 +16015,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
32904 pf->main_vsi_seid);
32905
32906 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
32907- (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
32908+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
32909 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
32910 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
32911 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
32912@@ -11770,7 +16061,11 @@ err_dma:
32913 * Hot-Plug event, or because the driver is going to be removed from
32914 * memory.
32915 **/
32916+#ifdef HAVE_CONFIG_HOTPLUG
32917+static void __devexit i40e_remove(struct pci_dev *pdev)
32918+#else
32919 static void i40e_remove(struct pci_dev *pdev)
32920+#endif
32921 {
32922 struct i40e_pf *pf = pci_get_drvdata(pdev);
32923 struct i40e_hw *hw = &pf->hw;
32924@@ -11778,25 +16073,40 @@ static void i40e_remove(struct pci_dev *pdev)
32925 int i;
32926
32927 i40e_dbg_pf_exit(pf);
32928-
32929+#ifdef HAVE_PTP_1588_CLOCK
32930 i40e_ptp_stop(pf);
32931
32932 /* Disable RSS in hw */
32933 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
32934 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
32935
32936+#endif /* HAVE_PTP_1588_CLOCK */
32937 /* no more scheduling of any task */
32938 set_bit(__I40E_SUSPENDED, pf->state);
32939 set_bit(__I40E_DOWN, pf->state);
32940- if (pf->service_timer.data)
32941+ if (pf->service_timer.function)
32942 del_timer_sync(&pf->service_timer);
32943 if (pf->service_task.func)
32944 cancel_work_sync(&pf->service_task);
32945-
32946 /* Client close must be called explicitly here because the timer
32947 * has been stopped.
32948 */
32949 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
32950+ if (test_bit(__I40E_DEBUG_MODE, pf->state))
32951+ goto debug_mode_clear;
32952+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
32953+ struct i40e_vsi *vsi = pf->vsi[0];
32954+
32955+ /* We know that we have allocated only one vsi for this PF,
32956+ * it was just for registering netdevice, so the interface
32957+ * could be visible in the 'ifconfig' output
32958+ */
32959+ unregister_netdev(vsi->netdev);
32960+ free_netdev(vsi->netdev);
32961+ vsi->netdev = NULL;
32962+
32963+ goto unmap;
32964+ }
32965
32966 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
32967 i40e_free_vfs(pf);
32968@@ -11823,6 +16133,8 @@ static void i40e_remove(struct pci_dev *pdev)
32969 if (pf->vsi[pf->lan_vsi])
32970 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
32971
32972+ i40e_cloud_filter_exit(pf);
32973+
32974 /* remove attached clients */
32975 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
32976 ret_code = i40e_lan_del_device(pf);
32977@@ -11840,22 +16152,37 @@ static void i40e_remove(struct pci_dev *pdev)
32978 ret_code);
32979 }
32980
32981- /* shutdown the adminq */
32982- i40e_shutdown_adminq(hw);
32983-
32984- /* destroy the locks only once, here */
32985- mutex_destroy(&hw->aq.arq_mutex);
32986- mutex_destroy(&hw->aq.asq_mutex);
32987+unmap:
32988+ /* Free MSI/legacy interrupt 0 when in recovery mode.
32989+ * This is normally done in i40e_vsi_free_irq on
32990+ * VSI close but since recovery mode doesn't allow to up
32991+ * an interface and we do not allocate all Rx/Tx resources
32992+ * for it we'll just do it here
32993+ */
32994+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
32995+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
32996+ free_irq(pf->pdev->irq, pf);
32997
32998 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
32999+ rtnl_lock();
33000 i40e_clear_interrupt_scheme(pf);
33001 for (i = 0; i < pf->num_alloc_vsi; i++) {
33002 if (pf->vsi[i]) {
33003- i40e_vsi_clear_rings(pf->vsi[i]);
33004+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
33005+ i40e_vsi_clear_rings(pf->vsi[i]);
33006 i40e_vsi_clear(pf->vsi[i]);
33007 pf->vsi[i] = NULL;
33008 }
33009 }
33010+ rtnl_unlock();
33011+
33012+debug_mode_clear:
33013+ /* shutdown the adminq */
33014+ i40e_shutdown_adminq(hw);
33015+
33016+ /* destroy the locks only once, here */
33017+ i40e_destroy_spinlock_d(&hw->aq.arq_spinlock);
33018+ i40e_destroy_spinlock_d(&hw->aq.asq_spinlock);
33019
33020 for (i = 0; i < I40E_MAX_VEB; i++) {
33021 kfree(pf->veb[i]);
33022@@ -11873,9 +16200,11 @@ static void i40e_remove(struct pci_dev *pdev)
33023 pci_disable_device(pdev);
33024 }
33025
33026+#ifdef HAVE_PCI_ERS
33027 /**
33028 * i40e_pci_error_detected - warning that something funky happened in PCI land
33029 * @pdev: PCI device information struct
33030+ * @error: the type of PCI error
33031 *
33032 * Called to warn that something happened and the error handling steps
33033 * are in progress. Allows the driver to quiesce things, be ready for
33034@@ -11890,7 +16219,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
33035
33036 if (!pf) {
33037 dev_info(&pdev->dev,
33038- "Cannot recover - error happened during device probe\n");
33039+ "Cannot recover -error happened during device probe\n");
33040 return PCI_ERS_RESULT_DISCONNECT;
33041 }
33042
33043@@ -11947,6 +16276,49 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
33044 return result;
33045 }
33046
33047+#if defined(HAVE_PCI_ERROR_HANDLER_RESET_PREPARE) || defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) || defined(HAVE_RHEL7_PCI_RESET_NOTIFY)
33048+/**
33049+ * i40e_pci_error_reset_prepare - prepare device driver for pci reset
33050+ * @pdev: PCI device information struct
33051+ */
33052+static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
33053+{
33054+ struct i40e_pf *pf = pci_get_drvdata(pdev);
33055+
33056+ i40e_prep_for_reset(pf, false);
33057+}
33058+
33059+/**
33060+ * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
33061+ * @pdev: PCI device information struct
33062+ */
33063+static void i40e_pci_error_reset_done(struct pci_dev *pdev)
33064+{
33065+ struct i40e_pf *pf = pci_get_drvdata(pdev);
33066+
33067+ i40e_reset_and_rebuild(pf, false, false);
33068+}
33069+
33070+#endif
33071+#if defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) || defined(HAVE_RHEL7_PCI_RESET_NOTIFY)
33072+/**
33073+ * i40e_pci_error_reset_notify - notify device driver of pci reset
33074+ * @pdev: PCI device information struct
33075+ * @prepare: true if device is about to be reset; false if reset attempt
33076+ * completed
33077+ *
33078+ * Called to perform pf reset when a pci function level reset is triggered
33079+ **/
33080+static void i40e_pci_error_reset_notify(struct pci_dev *pdev, bool prepare)
33081+{
33082+ dev_dbg(&pdev->dev, "%s\n", __func__);
33083+ if (prepare)
33084+ i40e_pci_error_reset_prepare(pdev);
33085+ else
33086+ i40e_pci_error_reset_done(pdev);
33087+}
33088+
33089+#endif
33090 /**
33091 * i40e_pci_error_resume - restart operations after PCI error recovery
33092 * @pdev: PCI device information struct
33093@@ -12012,6 +16384,11 @@ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
33094 "Failed to enable Multicast Magic Packet wake up\n");
33095 }
33096
33097+/* FW state indicating on X722 that we need to disable WoL to
33098+ * allow adapter to shutdown completely
33099+ */
33100+#define I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED 0x0F
33101+
33102 /**
33103 * i40e_shutdown - PCI callback for shutting down
33104 * @pdev: PCI device information struct
33105@@ -12020,18 +16397,17 @@ static void i40e_shutdown(struct pci_dev *pdev)
33106 {
33107 struct i40e_pf *pf = pci_get_drvdata(pdev);
33108 struct i40e_hw *hw = &pf->hw;
33109+ u32 val = 0;
33110
33111 set_bit(__I40E_SUSPENDED, pf->state);
33112 set_bit(__I40E_DOWN, pf->state);
33113- rtnl_lock();
33114- i40e_prep_for_reset(pf, true);
33115- rtnl_unlock();
33116
33117- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
33118- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
33119+ if (test_bit(__I40E_DEBUG_MODE, pf->state))
33120+ goto debug_mode;
33121
33122 del_timer_sync(&pf->service_timer);
33123 cancel_work_sync(&pf->service_task);
33124+ i40e_cloud_filter_exit(pf);
33125 i40e_fdir_teardown(pf);
33126
33127 /* Client close must be called explicitly here because the timer
33128@@ -12039,55 +16415,186 @@ static void i40e_shutdown(struct pci_dev *pdev)
33129 */
33130 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
33131
33132- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
33133- i40e_enable_mc_magic_wake(pf);
33134+ val = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS0B_MASK;
33135
33136- i40e_prep_for_reset(pf, false);
33137+ if (pf->hw.mac.type == I40E_MAC_X722) {
33138+ /* We check here if we need to disable the WoL to allow adapter
33139+ * to shutdown completely after a FW update
33140+ */
33141+ if (val != I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED &&
33142+ pf->wol_en) {
33143+ if (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)
33144+ i40e_enable_mc_magic_wake(pf);
33145+
33146+ i40e_prep_for_reset(pf, false);
33147+
33148+ wr32(hw, I40E_PFPM_APM, I40E_PFPM_APM_APME_MASK);
33149+ wr32(hw, I40E_PFPM_WUFC, I40E_PFPM_WUFC_MAG_MASK);
33150+ } else {
33151+ i40e_prep_for_reset(pf, false);
33152+
33153+ wr32(hw, I40E_PFPM_APM, 0);
33154+ wr32(hw, I40E_PFPM_WUFC, 0);
33155+ }
33156+ } else {
33157+ i40e_prep_for_reset(pf, false);
33158+
33159+ wr32(hw, I40E_PFPM_APM,
33160+ (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
33161+ wr32(hw, I40E_PFPM_WUFC,
33162+ (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
33163+ }
33164
33165- wr32(hw, I40E_PFPM_APM,
33166- (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
33167- wr32(hw, I40E_PFPM_WUFC,
33168- (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
33169+ /* Free MSI/legacy interrupt 0 when in recovery mode.
33170+ * This is normally done in i40e_vsi_free_irq on
33171+ * VSI close but since recovery mode doesn't allow to up
33172+ * an interface and we do not allocate all Rx/Tx resources
33173+ * for it we'll just do it here
33174+ */
33175+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
33176+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
33177+ free_irq(pf->pdev->irq, pf);
33178
33179+ /* Since we're going to destroy queues during the
33180+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
33181+ * whole section
33182+ */
33183+ rtnl_lock();
33184 i40e_clear_interrupt_scheme(pf);
33185+ rtnl_unlock();
33186
33187- if (system_state == SYSTEM_POWER_OFF) {
33188+debug_mode:
33189+
33190+ if (pf->hw.mac.type == I40E_MAC_X722 &&
33191+ val == I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED) {
33192+ pci_wake_from_d3(pdev, false);
33193+ device_set_wakeup_enable(&pdev->dev, false);
33194+ } else if (system_state == SYSTEM_POWER_OFF) {
33195 pci_wake_from_d3(pdev, pf->wol_en);
33196- pci_set_power_state(pdev, PCI_D3hot);
33197 }
33198+ pci_set_power_state(pdev, PCI_D3hot);
33199 }
33200
33201 #ifdef CONFIG_PM
33202 /**
33203- * i40e_suspend - PCI callback for moving to D3
33204- * @pdev: PCI device information struct
33205+ * i40e_suspend - PM callback for moving to D3
33206+ * @dev: generic device information structure
33207 **/
33208-static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
33209+static int i40e_suspend(struct device *dev)
33210 {
33211+ struct pci_dev *pdev = to_pci_dev(dev);
33212 struct i40e_pf *pf = pci_get_drvdata(pdev);
33213 struct i40e_hw *hw = &pf->hw;
33214- int retval = 0;
33215
33216- set_bit(__I40E_SUSPENDED, pf->state);
33217+ /* If we're already suspended, then there is nothing to do */
33218+ if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
33219+ return 0;
33220+
33221 set_bit(__I40E_DOWN, pf->state);
33222
33223+ /* Ensure service task will not be running */
33224+ del_timer_sync(&pf->service_timer);
33225+ cancel_work_sync(&pf->service_task);
33226+
33227+ /* Client close must be called explicitly here because the timer
33228+ * has been stopped.
33229+ */
33230+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
33231+
33232+ if (test_bit(__I40E_DEBUG_MODE, pf->state))
33233+ return 0;
33234+
33235 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
33236 i40e_enable_mc_magic_wake(pf);
33237
33238- i40e_prep_for_reset(pf, false);
33239+ /* Since we're going to destroy queues during the
33240+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
33241+ * whole section
33242+ */
33243+ rtnl_lock();
33244+
33245+ i40e_prep_for_reset(pf, true);
33246
33247 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
33248 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
33249
33250- i40e_stop_misc_vector(pf);
33251- if (pf->msix_entries) {
33252- synchronize_irq(pf->msix_entries[0].vector);
33253- free_irq(pf->msix_entries[0].vector, pf);
33254+ /* Clear the interrupt scheme and release our IRQs so that the system
33255+ * can safely hibernate even when there are a large number of CPUs.
33256+ * Otherwise hibernation might fail when mapping all the vectors back
33257+ * to CPU0.
33258+ */
33259+ i40e_clear_interrupt_scheme(pf);
33260+
33261+ rtnl_unlock();
33262+
33263+ return 0;
33264+}
33265+
33266+/**
33267+ * i40e_resume - PM callback for waking up from D3
33268+ * @dev: generic device information structure
33269+ **/
33270+static int i40e_resume(struct device *dev)
33271+{
33272+ struct pci_dev *pdev = to_pci_dev(dev);
33273+ struct i40e_pf *pf = pci_get_drvdata(pdev);
33274+ int err;
33275+
33276+ /* If we're not suspended, then there is nothing to do */
33277+ if (!test_bit(__I40E_SUSPENDED, pf->state))
33278+ return 0;
33279+
33280+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
33281+ * since we're going to be restoring queues
33282+ */
33283+ rtnl_lock();
33284+
33285+ /* We cleared the interrupt scheme when we suspended, so we need to
33286+ * restore it now to resume device functionality.
33287+ */
33288+ err = i40e_restore_interrupt_scheme(pf);
33289+ if (err) {
33290+ dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
33291+ err);
33292 }
33293+
33294+ clear_bit(__I40E_DOWN, pf->state);
33295+ i40e_reset_and_rebuild(pf, false, true);
33296+
33297+ rtnl_unlock();
33298+
33299+ /* Clear suspended state last after everything is recovered */
33300+ clear_bit(__I40E_SUSPENDED, pf->state);
33301+
33302+ /* Restart the service task */
33303+ mod_timer(&pf->service_timer,
33304+ round_jiffies(jiffies + pf->service_timer_period));
33305+
33306+ return 0;
33307+}
33308+
33309+#ifdef USE_LEGACY_PM_SUPPORT
33310+/**
33311+ * i40e_legacy_suspend - PCI callback for moving to D3
33312+ * @pdev: PCI device information struct
33313+ * @state: PCI power state
33314+ *
33315+ * Legacy suspend handler for older kernels which do not support the newer
33316+ * generic callbacks
33317+ **/
33318+static int i40e_legacy_suspend(struct pci_dev *pdev, pm_message_t state)
33319+{
33320+ struct i40e_pf *pf = pci_get_drvdata(pdev);
33321+ int retval = i40e_suspend(&pdev->dev);
33322+
33323+ /* Some older kernels may not handle state correctly for legacy power
33324+ * management, so we'll handle it here ourselves
33325+ */
33326 retval = pci_save_state(pdev);
33327 if (retval)
33328 return retval;
33329
33330+ pci_disable_device(pdev);
33331 pci_wake_from_d3(pdev, pf->wol_en);
33332 pci_set_power_state(pdev, PCI_D3hot);
33333
33334@@ -12095,12 +16602,14 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
33335 }
33336
33337 /**
33338- * i40e_resume - PCI callback for waking up from D3
33339+ * i40e_legacy_resume - PCI callback for waking up from D3
33340 * @pdev: PCI device information struct
33341+ *
33342+ * Legacy resume handler for kernels which do not support the newer generic
33343+ * callbacks.
33344 **/
33345-static int i40e_resume(struct pci_dev *pdev)
33346+static int i40e_legacy_resume(struct pci_dev *pdev)
33347 {
33348- struct i40e_pf *pf = pci_get_drvdata(pdev);
33349 u32 err;
33350
33351 pci_set_power_state(pdev, PCI_D0);
33352@@ -12112,7 +16621,7 @@ static int i40e_resume(struct pci_dev *pdev)
33353
33354 err = pci_enable_device_mem(pdev);
33355 if (err) {
33356- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
33357+ dev_err(pci_dev_to_dev(pdev), "Cannot enable PCI device from suspend\n");
33358 return err;
33359 }
33360 pci_set_master(pdev);
33361@@ -12120,43 +16629,75 @@ static int i40e_resume(struct pci_dev *pdev)
33362 /* no wakeup events while running */
33363 pci_wake_from_d3(pdev, false);
33364
33365- /* handling the reset will rebuild the device state */
33366- if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
33367- clear_bit(__I40E_DOWN, pf->state);
33368- if (pf->msix_entries) {
33369- err = request_irq(pf->msix_entries[0].vector,
33370- i40e_intr, 0, pf->int_name, pf);
33371- if (err) {
33372- dev_err(&pf->pdev->dev,
33373- "request_irq for %s failed: %d\n",
33374- pf->int_name, err);
33375- }
33376- }
33377- i40e_reset_and_rebuild(pf, false, false);
33378- }
33379-
33380- return 0;
33381+ return i40e_resume(&pdev->dev);
33382 }
33383+#endif /* USE_LEGACY_PM_SUPPORT */
33384+#endif /* CONFIG_PM */
33385
33386-#endif
33387+#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
33388 static const struct pci_error_handlers i40e_err_handler = {
33389+#else
33390+static struct pci_error_handlers i40e_err_handler = {
33391+#endif
33392 .error_detected = i40e_pci_error_detected,
33393 .slot_reset = i40e_pci_error_slot_reset,
33394+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
33395+ .reset_notify = i40e_pci_error_reset_notify,
33396+#endif
33397+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
33398+ .reset_prepare = i40e_pci_error_reset_prepare,
33399+ .reset_done = i40e_pci_error_reset_done,
33400+#endif
33401 .resume = i40e_pci_error_resume,
33402 };
33403
33404+#if defined(HAVE_RHEL6_SRIOV_CONFIGURE) || defined(HAVE_RHEL7_PCI_DRIVER_RH)
33405+static struct pci_driver_rh i40e_driver_rh = {
33406+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
33407+ .sriov_configure = i40e_pci_sriov_configure,
33408+#elif defined(HAVE_RHEL7_PCI_RESET_NOTIFY)
33409+ .reset_notify = i40e_pci_error_reset_notify,
33410+#endif
33411+};
33412+
33413+#endif
33414+#endif /* HAVE_PCI_ERS */
33415+#if defined(CONFIG_PM) && !defined(USE_LEGACY_PM_SUPPORT)
33416+static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
33417+#endif /* CONFIG_PM && !USE_LEGACY_PM_SUPPORT */
33418+
33419 static struct pci_driver i40e_driver = {
33420 .name = i40e_driver_name,
33421 .id_table = i40e_pci_tbl,
33422 .probe = i40e_probe,
33423+#ifdef HAVE_CONFIG_HOTPLUG
33424+ .remove = __devexit_p(i40e_remove),
33425+#else
33426 .remove = i40e_remove,
33427-#ifdef CONFIG_PM
33428- .suspend = i40e_suspend,
33429- .resume = i40e_resume,
33430 #endif
33431+#ifdef CONFIG_PM
33432+#ifdef USE_LEGACY_PM_SUPPORT
33433+ .suspend = i40e_legacy_suspend,
33434+ .resume = i40e_legacy_resume,
33435+#else /* USE_LEGACY_PM_SUPPORT */
33436+ .driver = {
33437+ .pm = &i40e_pm_ops,
33438+ },
33439+#endif /* !USE_LEGACY_PM_SUPPORT */
33440+#endif /* CONFIG_PM */
33441 .shutdown = i40e_shutdown,
33442+#ifdef HAVE_PCI_ERS
33443 .err_handler = &i40e_err_handler,
33444+#endif
33445+#ifdef HAVE_SRIOV_CONFIGURE
33446 .sriov_configure = i40e_pci_sriov_configure,
33447+#endif
33448+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
33449+ .rh_reserved = &i40e_driver_rh,
33450+#endif
33451+#ifdef HAVE_RHEL7_PCI_DRIVER_RH
33452+ .pci_driver_rh = &i40e_driver_rh,
33453+#endif
33454 };
33455
33456 /**
33457@@ -12170,11 +16711,6 @@ static int __init i40e_init_module(void)
33458 pr_info("%s: %s - version %s\n", i40e_driver_name,
33459 i40e_driver_string, i40e_driver_version_str);
33460 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
33461-#if defined (CONFIG_ARM64)
33462- mark_tech_preview(i40e_driver.name, THIS_MODULE);
33463-#elif !defined (CONFIG_ARM64) && !defined (CONFIG_PPC)
33464- mark_driver_unsupported(i40e_driver.name);
33465-#endif
33466
33467 /* There is no need to throttle the number of active tasks because
33468 * each device limits its own task using a state bit for scheduling
33469@@ -12188,7 +16724,13 @@ static int __init i40e_init_module(void)
33470 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
33471 return -ENOMEM;
33472 }
33473+#ifdef HAVE_RHEL7_PCI_DRIVER_RH
33474+ /* The size member must be initialized in the driver via a call to
33475+ * set_pci_driver_rh_size before pci_register_driver is called
33476+ */
33477+ set_pci_driver_rh_size(i40e_driver_rh);
33478
33479+#endif
33480 i40e_dbg_init();
33481 return pci_register_driver(&i40e_driver);
33482 }
33483@@ -12205,5 +16747,8 @@ static void __exit i40e_exit_module(void)
33484 pci_unregister_driver(&i40e_driver);
33485 destroy_workqueue(i40e_wq);
33486 i40e_dbg_exit();
33487+#ifdef HAVE_KFREE_RCU_BARRIER
33488+ rcu_barrier();
33489+#endif
33490 }
33491 module_exit(i40e_exit_module);
33492diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
33493index d591b3e6b..04fe349dd 100644
33494--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
33495+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
33496@@ -1,28 +1,5 @@
33497-/*******************************************************************************
33498- *
33499- * Intel Ethernet Controller XL710 Family Linux Driver
33500- * Copyright(c) 2013 - 2014 Intel Corporation.
33501- *
33502- * This program is free software; you can redistribute it and/or modify it
33503- * under the terms and conditions of the GNU General Public License,
33504- * version 2, as published by the Free Software Foundation.
33505- *
33506- * This program is distributed in the hope it will be useful, but WITHOUT
33507- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
33508- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33509- * more details.
33510- *
33511- * You should have received a copy of the GNU General Public License along
33512- * with this program. If not, see <http://www.gnu.org/licenses/>.
33513- *
33514- * The full GNU General Public License is included in this distribution in
33515- * the file called "COPYING".
33516- *
33517- * Contact Information:
33518- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
33519- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33520- *
33521- ******************************************************************************/
33522+// SPDX-License-Identifier: GPL-2.0
33523+/* Copyright(c) 2013 - 2020 Intel Corporation. */
33524
33525 #include "i40e_prototype.h"
33526
33527@@ -34,12 +11,12 @@
33528 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
33529 * Please notice that the NVM term is used here (& in all methods covered
33530 * in this file) as an equivalent of the FLASH part mapped into the SR.
33531- * We are accessing FLASH always thru the Shadow RAM.
33532+ * We are accessing FLASH always through the Shadow RAM.
33533 **/
33534 i40e_status i40e_init_nvm(struct i40e_hw *hw)
33535 {
33536 struct i40e_nvm_info *nvm = &hw->nvm;
33537- i40e_status ret_code = 0;
33538+ i40e_status ret_code = I40E_SUCCESS;
33539 u32 fla, gens;
33540 u8 sr_size;
33541
33542@@ -78,7 +55,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
33543 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
33544 enum i40e_aq_resource_access_type access)
33545 {
33546- i40e_status ret_code = 0;
33547+ i40e_status ret_code = I40E_SUCCESS;
33548 u64 gtime, timeout;
33549 u64 time_left = 0;
33550
33551@@ -108,13 +85,13 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
33552 I40E_NVM_RESOURCE_ID,
33553 access, 0, &time_left,
33554 NULL);
33555- if (!ret_code) {
33556+ if (ret_code == I40E_SUCCESS) {
33557 hw->nvm.hw_semaphore_timeout =
33558 I40E_MS_TO_GTIME(time_left) + gtime;
33559 break;
33560 }
33561 }
33562- if (ret_code) {
33563+ if (ret_code != I40E_SUCCESS) {
33564 hw->nvm.hw_semaphore_timeout = 0;
33565 i40e_debug(hw, I40E_DEBUG_NVM,
33566 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
33567@@ -147,11 +124,10 @@ void i40e_release_nvm(struct i40e_hw *hw)
33568 */
33569 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
33570 (total_delay < hw->aq.asq_cmd_timeout)) {
33571- usleep_range(1000, 2000);
33572- ret_code = i40e_aq_release_resource(hw,
33573- I40E_NVM_RESOURCE_ID,
33574- 0, NULL);
33575- total_delay++;
33576+ usleep_range(1000, 2000);
33577+ ret_code = i40e_aq_release_resource(hw,
33578+ I40E_NVM_RESOURCE_ID, 0, NULL);
33579+ total_delay++;
33580 }
33581 }
33582
33583@@ -170,7 +146,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
33584 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
33585 srctl = rd32(hw, I40E_GLNVM_SRCTL);
33586 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
33587- ret_code = 0;
33588+ ret_code = I40E_SUCCESS;
33589 break;
33590 }
33591 udelay(5);
33592@@ -188,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
33593 *
33594 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
33595 **/
33596-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
33597- u16 *data)
33598+static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw,
33599+ u16 offset, u16 *data)
33600 {
33601 i40e_status ret_code = I40E_ERR_TIMEOUT;
33602 u32 sr_reg;
33603
33604 if (offset >= hw->nvm.sr_size) {
33605 i40e_debug(hw, I40E_DEBUG_NVM,
33606- "NVM read error: offset %d beyond Shadow RAM limit %d\n",
33607+ "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
33608 offset, hw->nvm.sr_size);
33609 ret_code = I40E_ERR_PARAM;
33610 goto read_nvm_exit;
33611@@ -204,7 +180,7 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
33612
33613 /* Poll the done bit first */
33614 ret_code = i40e_poll_sr_srctl_done_bit(hw);
33615- if (!ret_code) {
33616+ if (ret_code == I40E_SUCCESS) {
33617 /* Write the address and start reading */
33618 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
33619 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
33620@@ -212,14 +188,14 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
33621
33622 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
33623 ret_code = i40e_poll_sr_srctl_done_bit(hw);
33624- if (!ret_code) {
33625+ if (ret_code == I40E_SUCCESS) {
33626 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
33627 *data = (u16)((sr_reg &
33628 I40E_GLNVM_SRDATA_RDDATA_MASK)
33629 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
33630 }
33631 }
33632- if (ret_code)
33633+ if (ret_code != I40E_SUCCESS)
33634 i40e_debug(hw, I40E_DEBUG_NVM,
33635 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
33636 offset);
33637@@ -239,9 +215,10 @@ read_nvm_exit:
33638 *
33639 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
33640 **/
33641-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
33642- u32 offset, u16 words, void *data,
33643- bool last_command)
33644+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
33645+ u8 module_pointer, u32 offset,
33646+ u16 words, void *data,
33647+ bool last_command)
33648 {
33649 i40e_status ret_code = I40E_ERR_NVM;
33650 struct i40e_asq_cmd_details cmd_details;
33651@@ -287,18 +264,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
33652 * Reads one 16 bit word from the Shadow RAM using the AdminQ
33653 **/
33654 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
33655- u16 *data)
33656+ u16 *data)
33657 {
33658 i40e_status ret_code = I40E_ERR_TIMEOUT;
33659
33660 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
33661- *data = le16_to_cpu(*(__le16 *)data);
33662+ *data = LE16_TO_CPU(*(__le16 *)data);
33663
33664 return ret_code;
33665 }
33666
33667 /**
33668- * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
33669+ * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
33670 * @hw: pointer to the HW structure
33671 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
33672 * @data: word read from the Shadow RAM
33673@@ -309,19 +286,17 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
33674 * taken via i40e_acquire_nvm().
33675 **/
33676 static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
33677- u16 offset, u16 *data)
33678+ u16 offset, u16 *data)
33679 {
33680- i40e_status ret_code = 0;
33681
33682 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
33683- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
33684- else
33685- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
33686- return ret_code;
33687+ return i40e_read_nvm_word_aq(hw, offset, data);
33688+
33689+ return i40e_read_nvm_word_srctl(hw, offset, data);
33690 }
33691
33692 /**
33693- * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
33694+ * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
33695 * @hw: pointer to the HW structure
33696 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
33697 * @data: word read from the Shadow RAM
33698@@ -329,21 +304,93 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
33699 * Reads one 16 bit word from the Shadow RAM.
33700 **/
33701 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
33702- u16 *data)
33703+ u16 *data)
33704 {
33705- i40e_status ret_code = 0;
33706+ i40e_status ret_code = I40E_SUCCESS;
33707+
33708+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
33709+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
33710
33711- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
33712 if (ret_code)
33713 return ret_code;
33714-
33715 ret_code = __i40e_read_nvm_word(hw, offset, data);
33716
33717- i40e_release_nvm(hw);
33718-
33719+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
33720+ i40e_release_nvm(hw);
33721 return ret_code;
33722 }
33723
33724+/**
33725+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
33726+ * @hw: Pointer to the HW structure
33727+ * @module_ptr: Pointer to module in words with respect to NVM beginning
33728+ * @module_offset: Offset in words from module start
33729+ * @data_offset: Offset in words from reading data area start
33730+ * @words_data_size: Words to read from NVM
33731+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
33732+ **/
33733+enum i40e_status_code
33734+i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
33735+ u16 data_offset, u16 words_data_size, u16 *data_ptr)
33736+{
33737+ i40e_status status;
33738+ u16 specific_ptr = 0;
33739+ u16 ptr_value = 0;
33740+ u16 offset = 0;
33741+
33742+ if (module_ptr != 0) {
33743+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
33744+ if (status != I40E_SUCCESS) {
33745+ i40e_debug(hw, I40E_DEBUG_ALL,
33746+ "Reading nvm word failed.Error code: %d.\n",
33747+ status);
33748+ return I40E_ERR_NVM;
33749+ }
33750+ }
33751+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
33752+#define I40E_NVM_INVALID_VAL 0xFFFF
33753+
33754+ /* Pointer not initialized */
33755+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
33756+ ptr_value == I40E_NVM_INVALID_VAL) {
33757+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
33758+ return I40E_ERR_BAD_PTR;
33759+ }
33760+
33761+ /* Check whether the module is in SR mapped area or outside */
33762+ if (ptr_value & I40E_PTR_TYPE) {
33763+ /* Pointer points outside of the Shared RAM mapped area */
33764+ i40e_debug(hw, I40E_DEBUG_ALL,
33765+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
33766+
33767+ return I40E_ERR_PARAM;
33768+ } else {
33769+ /* Read from the Shadow RAM */
33770+
33771+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
33772+ &specific_ptr);
33773+ if (status != I40E_SUCCESS) {
33774+ i40e_debug(hw, I40E_DEBUG_ALL,
33775+ "Reading nvm word failed.Error code: %d.\n",
33776+ status);
33777+ return I40E_ERR_NVM;
33778+ }
33779+
33780+ offset = ptr_value + module_offset + specific_ptr +
33781+ data_offset;
33782+
33783+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
33784+ data_ptr);
33785+ if (status != I40E_SUCCESS) {
33786+ i40e_debug(hw, I40E_DEBUG_ALL,
33787+ "Reading nvm buffer failed.Error code: %d.\n",
33788+ status);
33789+ }
33790+ }
33791+
33792+ return status;
33793+}
33794+
33795 /**
33796 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
33797 * @hw: pointer to the HW structure
33798@@ -356,16 +403,16 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
33799 * and followed by the release.
33800 **/
33801 static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
33802- u16 *words, u16 *data)
33803+ u16 *words, u16 *data)
33804 {
33805- i40e_status ret_code = 0;
33806+ i40e_status ret_code = I40E_SUCCESS;
33807 u16 index, word;
33808
33809- /* Loop thru the selected region */
33810+ /* Loop through the selected region */
33811 for (word = 0; word < *words; word++) {
33812 index = offset + word;
33813 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
33814- if (ret_code)
33815+ if (ret_code != I40E_SUCCESS)
33816 break;
33817 }
33818
33819@@ -387,7 +434,7 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
33820 * and followed by the release.
33821 **/
33822 static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
33823- u16 *words, u16 *data)
33824+ u16 *words, u16 *data)
33825 {
33826 i40e_status ret_code;
33827 u16 read_size = *words;
33828@@ -414,7 +461,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
33829
33830 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
33831 data + words_read, last_cmd);
33832- if (ret_code)
33833+ if (ret_code != I40E_SUCCESS)
33834 goto read_nvm_buffer_aq_exit;
33835
33836 /* Increment counter for words already read and move offset to
33837@@ -425,7 +472,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
33838 } while (words_read < *words);
33839
33840 for (i = 0; i < *words; i++)
33841- data[i] = le16_to_cpu(((__le16 *)data)[i]);
33842+ data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
33843
33844 read_nvm_buffer_aq_exit:
33845 *words = words_read;
33846@@ -433,7 +480,7 @@ read_nvm_buffer_aq_exit:
33847 }
33848
33849 /**
33850- * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
33851+ * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
33852 * @hw: pointer to the HW structure
33853 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
33854 * @words: (in) number of words to read; (out) number of words actually read
33855@@ -443,15 +490,42 @@ read_nvm_buffer_aq_exit:
33856 * method.
33857 **/
33858 static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
33859- u16 offset, u16 *words,
33860- u16 *data)
33861+ u16 offset, u16 *words,
33862+ u16 *data)
33863 {
33864- i40e_status ret_code = 0;
33865-
33866 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
33867- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
33868- else
33869+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
33870+
33871+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
33872+}
33873+
33874+/**
33875+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
33876+ * @hw: pointer to the HW structure
33877+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
33878+ * @words: (in) number of words to read; (out) number of words actually read
33879+ * @data: words read from the Shadow RAM
33880+ *
33881+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
33882+ * method. The buffer read is preceded by the NVM ownership take
33883+ * and followed by the release.
33884+ **/
33885+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
33886+ u16 *words, u16 *data)
33887+{
33888+ i40e_status ret_code = I40E_SUCCESS;
33889+
33890+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
33891+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
33892+ if (!ret_code) {
33893+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
33894+ data);
33895+ i40e_release_nvm(hw);
33896+ }
33897+ } else {
33898 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
33899+ }
33900+
33901 return ret_code;
33902 }
33903
33904@@ -467,8 +541,8 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
33905 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
33906 **/
33907 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
33908- u32 offset, u16 words, void *data,
33909- bool last_command)
33910+ u32 offset, u16 words, void *data,
33911+ bool last_command)
33912 {
33913 i40e_status ret_code = I40E_ERR_NVM;
33914 struct i40e_asq_cmd_details cmd_details;
33915@@ -482,25 +556,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
33916 * Firmware will check the module-based model.
33917 */
33918 if ((offset + words) > hw->nvm.sr_size)
33919- i40e_debug(hw, I40E_DEBUG_NVM,
33920- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
33921- (offset + words), hw->nvm.sr_size);
33922+ hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
33923 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
33924 /* We can write only up to 4KB (one sector), in one AQ write */
33925- i40e_debug(hw, I40E_DEBUG_NVM,
33926- "NVM write fail error: tried to write %d words, limit is %d.\n",
33927- words, I40E_SR_SECTOR_SIZE_IN_WORDS);
33928+ hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
33929 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
33930 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
33931 /* A single write cannot spread over two sectors */
33932- i40e_debug(hw, I40E_DEBUG_NVM,
33933- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
33934- offset, words);
33935+ hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
33936 else
33937 ret_code = i40e_aq_update_nvm(hw, module_pointer,
33938 2 * offset, /*bytes*/
33939 2 * words, /*bytes*/
33940- data, last_command, &cmd_details);
33941+ data, last_command, 0,
33942+ &cmd_details);
33943
33944 return ret_code;
33945 }
33946@@ -518,7 +587,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
33947 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
33948 u16 *checksum)
33949 {
33950- i40e_status ret_code;
33951+ i40e_status ret_code = I40E_SUCCESS;
33952 struct i40e_virt_mem vmem;
33953 u16 pcie_alt_module = 0;
33954 u16 checksum_local = 0;
33955@@ -534,7 +603,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
33956
33957 /* read pointer to VPD area */
33958 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
33959- if (ret_code) {
33960+ if (ret_code != I40E_SUCCESS) {
33961 ret_code = I40E_ERR_NVM_CHECKSUM;
33962 goto i40e_calc_nvm_checksum_exit;
33963 }
33964@@ -542,7 +611,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
33965 /* read pointer to PCIe Alt Auto-load module */
33966 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
33967 &pcie_alt_module);
33968- if (ret_code) {
33969+ if (ret_code != I40E_SUCCESS) {
33970 ret_code = I40E_ERR_NVM_CHECKSUM;
33971 goto i40e_calc_nvm_checksum_exit;
33972 }
33973@@ -556,7 +625,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
33974 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
33975
33976 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
33977- if (ret_code) {
33978+ if (ret_code != I40E_SUCCESS) {
33979 ret_code = I40E_ERR_NVM_CHECKSUM;
33980 goto i40e_calc_nvm_checksum_exit;
33981 }
33982@@ -598,16 +667,15 @@ i40e_calc_nvm_checksum_exit:
33983 **/
33984 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
33985 {
33986- i40e_status ret_code;
33987+ i40e_status ret_code = I40E_SUCCESS;
33988 u16 checksum;
33989 __le16 le_sum;
33990
33991 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
33992- if (!ret_code) {
33993- le_sum = cpu_to_le16(checksum);
33994+ le_sum = CPU_TO_LE16(checksum);
33995+ if (ret_code == I40E_SUCCESS)
33996 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
33997 1, &le_sum, true);
33998- }
33999
34000 return ret_code;
34001 }
34002@@ -623,7 +691,7 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
34003 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
34004 u16 *checksum)
34005 {
34006- i40e_status ret_code = 0;
34007+ i40e_status ret_code = I40E_SUCCESS;
34008 u16 checksum_sr = 0;
34009 u16 checksum_local = 0;
34010
34011@@ -655,42 +723,51 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
34012 }
34013
34014 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
34015- struct i40e_nvm_access *cmd,
34016- u8 *bytes, int *perrno);
34017+ struct i40e_nvm_access *cmd,
34018+ u8 *bytes, int *perrno);
34019 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
34020- struct i40e_nvm_access *cmd,
34021- u8 *bytes, int *perrno);
34022+ struct i40e_nvm_access *cmd,
34023+ u8 *bytes, int *perrno);
34024 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
34025- struct i40e_nvm_access *cmd,
34026- u8 *bytes, int *errno);
34027+ struct i40e_nvm_access *cmd,
34028+ u8 *bytes, int *perrno);
34029 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
34030- struct i40e_nvm_access *cmd,
34031- int *perrno);
34032+ struct i40e_nvm_access *cmd,
34033+ int *perrno);
34034 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
34035- struct i40e_nvm_access *cmd,
34036- int *perrno);
34037+ struct i40e_nvm_access *cmd,
34038+ int *perrno);
34039 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
34040- struct i40e_nvm_access *cmd,
34041- u8 *bytes, int *perrno);
34042+ struct i40e_nvm_access *cmd,
34043+ u8 *bytes, int *perrno);
34044 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
34045- struct i40e_nvm_access *cmd,
34046- u8 *bytes, int *perrno);
34047+ struct i40e_nvm_access *cmd,
34048+ u8 *bytes, int *perrno);
34049 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34050- struct i40e_nvm_access *cmd,
34051- u8 *bytes, int *perrno);
34052+ struct i40e_nvm_access *cmd,
34053+ u8 *bytes, int *perrno);
34054 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
34055- struct i40e_nvm_access *cmd,
34056- u8 *bytes, int *perrno);
34057-static inline u8 i40e_nvmupd_get_module(u32 val)
34058+ struct i40e_nvm_access *cmd,
34059+ u8 *bytes, int *perrno);
34060+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
34061+ struct i40e_nvm_access *cmd,
34062+ u8 *bytes, int *perrno);
34063+static INLINE u8 i40e_nvmupd_get_module(u32 val)
34064 {
34065 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
34066 }
34067-static inline u8 i40e_nvmupd_get_transaction(u32 val)
34068+static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
34069 {
34070 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
34071 }
34072
34073-static const char * const i40e_nvm_update_state_str[] = {
34074+static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
34075+{
34076+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
34077+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
34078+}
34079+
34080+static const char *i40e_nvm_update_state_str[] = {
34081 "I40E_NVMUPD_INVALID",
34082 "I40E_NVMUPD_READ_CON",
34083 "I40E_NVMUPD_READ_SNT",
34084@@ -707,6 +784,8 @@ static const char * const i40e_nvm_update_state_str[] = {
34085 "I40E_NVMUPD_STATUS",
34086 "I40E_NVMUPD_EXEC_AQ",
34087 "I40E_NVMUPD_GET_AQ_RESULT",
34088+ "I40E_NVMUPD_GET_AQ_EVENT",
34089+ "I40E_NVMUPD_GET_FEATURES",
34090 };
34091
34092 /**
34093@@ -719,8 +798,8 @@ static const char * const i40e_nvm_update_state_str[] = {
34094 * Dispatches command depending on what update state is current
34095 **/
34096 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
34097- struct i40e_nvm_access *cmd,
34098- u8 *bytes, int *perrno)
34099+ struct i40e_nvm_access *cmd,
34100+ u8 *bytes, int *perrno)
34101 {
34102 i40e_status status;
34103 enum i40e_nvmupd_cmd upd_cmd;
34104@@ -764,7 +843,32 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
34105 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
34106 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
34107
34108- return 0;
34109+ return I40E_SUCCESS;
34110+ }
34111+
34112+ /*
34113+ * A supported features request returns immediately
34114+ * rather than going into state machine
34115+ */
34116+ if (upd_cmd == I40E_NVMUPD_FEATURES) {
34117+ if (cmd->data_size < hw->nvmupd_features.size) {
34118+ *perrno = -EFAULT;
34119+ return I40E_ERR_BUF_TOO_SHORT;
34120+ }
34121+
34122+ /*
34123+ * If buffer is bigger than i40e_nvmupd_features structure,
34124+ * make sure the trailing bytes are set to 0x0.
34125+ */
34126+ if (cmd->data_size > hw->nvmupd_features.size)
34127+ i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
34128+ cmd->data_size - hw->nvmupd_features.size,
34129+ I40E_NONDMA_MEM);
34130+
34131+ i40e_memcpy(bytes, &hw->nvmupd_features,
34132+ hw->nvmupd_features.size, I40E_NONDMA_MEM);
34133+
34134+ return I40E_SUCCESS;
34135 }
34136
34137 /* Clear status even it is not read and log */
34138@@ -782,7 +886,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
34139 * ~5ms for most commands. However lock is held for ~60ms for
34140 * NVMUPD_CSUM_LCB command.
34141 */
34142- mutex_lock(&hw->aq.arq_mutex);
34143+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
34144 switch (hw->nvmupd_state) {
34145 case I40E_NVMUPD_STATE_INIT:
34146 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
34147@@ -802,9 +906,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
34148 * the wait info and return before doing anything else
34149 */
34150 if (cmd->offset == 0xffff) {
34151- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
34152- status = 0;
34153- goto exit;
34154+ i40e_nvmupd_clear_wait_state(hw);
34155+ status = I40E_SUCCESS;
34156+ break;
34157 }
34158
34159 status = I40E_ERR_NOT_READY;
34160@@ -819,8 +923,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
34161 *perrno = -ESRCH;
34162 break;
34163 }
34164-exit:
34165- mutex_unlock(&hw->aq.arq_mutex);
34166+
34167+ i40e_release_spinlock(&hw->aq.arq_spinlock);
34168 return status;
34169 }
34170
34171@@ -835,10 +939,10 @@ exit:
34172 * state. Reject all other commands.
34173 **/
34174 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
34175- struct i40e_nvm_access *cmd,
34176- u8 *bytes, int *perrno)
34177+ struct i40e_nvm_access *cmd,
34178+ u8 *bytes, int *perrno)
34179 {
34180- i40e_status status = 0;
34181+ i40e_status status = I40E_SUCCESS;
34182 enum i40e_nvmupd_cmd upd_cmd;
34183
34184 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
34185@@ -948,6 +1052,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
34186 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
34187 break;
34188
34189+ case I40E_NVMUPD_GET_AQ_EVENT:
34190+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
34191+ break;
34192+
34193 default:
34194 i40e_debug(hw, I40E_DEBUG_NVM,
34195 "NVMUPD: bad cmd %s in init state\n",
34196@@ -970,10 +1078,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
34197 * change in state; reject all other commands.
34198 **/
34199 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
34200- struct i40e_nvm_access *cmd,
34201- u8 *bytes, int *perrno)
34202+ struct i40e_nvm_access *cmd,
34203+ u8 *bytes, int *perrno)
34204 {
34205- i40e_status status = 0;
34206+ i40e_status status = I40E_SUCCESS;
34207 enum i40e_nvmupd_cmd upd_cmd;
34208
34209 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
34210@@ -1012,10 +1120,10 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
34211 * change in state; reject all other commands
34212 **/
34213 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
34214- struct i40e_nvm_access *cmd,
34215- u8 *bytes, int *perrno)
34216+ struct i40e_nvm_access *cmd,
34217+ u8 *bytes, int *perrno)
34218 {
34219- i40e_status status = 0;
34220+ i40e_status status = I40E_SUCCESS;
34221 enum i40e_nvmupd_cmd upd_cmd;
34222 bool retry_attempt = false;
34223
34224@@ -1122,38 +1230,55 @@ retry:
34225 }
34226
34227 /**
34228- * i40e_nvmupd_check_wait_event - handle NVM update operation events
34229+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
34230 * @hw: pointer to the hardware structure
34231- * @opcode: the event that just happened
34232 **/
34233-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
34234+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
34235 {
34236- if (opcode == hw->nvm_wait_opcode) {
34237- i40e_debug(hw, I40E_DEBUG_NVM,
34238- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
34239- if (hw->nvm_release_on_done) {
34240- i40e_release_nvm(hw);
34241- hw->nvm_release_on_done = false;
34242- }
34243- hw->nvm_wait_opcode = 0;
34244+ i40e_debug(hw, I40E_DEBUG_NVM,
34245+ "NVMUPD: clearing wait on opcode 0x%04x\n",
34246+ hw->nvm_wait_opcode);
34247
34248- if (hw->aq.arq_last_status) {
34249- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
34250- return;
34251- }
34252+ if (hw->nvm_release_on_done) {
34253+ i40e_release_nvm(hw);
34254+ hw->nvm_release_on_done = false;
34255+ }
34256+ hw->nvm_wait_opcode = 0;
34257
34258- switch (hw->nvmupd_state) {
34259- case I40E_NVMUPD_STATE_INIT_WAIT:
34260- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
34261- break;
34262+ if (hw->aq.arq_last_status) {
34263+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
34264+ return;
34265+ }
34266
34267- case I40E_NVMUPD_STATE_WRITE_WAIT:
34268- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
34269- break;
34270+ switch (hw->nvmupd_state) {
34271+ case I40E_NVMUPD_STATE_INIT_WAIT:
34272+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
34273+ break;
34274
34275- default:
34276- break;
34277- }
34278+ case I40E_NVMUPD_STATE_WRITE_WAIT:
34279+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
34280+ break;
34281+
34282+ default:
34283+ break;
34284+ }
34285+}
34286+
34287+/**
34288+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
34289+ * @hw: pointer to the hardware structure
34290+ * @opcode: the event that just happened
34291+ * @desc: AdminQ descriptor
34292+ **/
34293+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
34294+ struct i40e_aq_desc *desc)
34295+{
34296+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
34297+
34298+ if (opcode == hw->nvm_wait_opcode) {
34299+ i40e_memcpy(&hw->nvm_aq_event_desc, desc,
34300+ aq_desc_len, I40E_NONDMA_TO_NONDMA);
34301+ i40e_nvmupd_clear_wait_state(hw);
34302 }
34303 }
34304
34305@@ -1166,8 +1291,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
34306 * Return one of the valid command types or I40E_NVMUPD_INVALID
34307 **/
34308 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
34309- struct i40e_nvm_access *cmd,
34310- int *perrno)
34311+ struct i40e_nvm_access *cmd,
34312+ int *perrno)
34313 {
34314 enum i40e_nvmupd_cmd upd_cmd;
34315 u8 module, transaction;
34316@@ -1204,10 +1329,23 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
34317 upd_cmd = I40E_NVMUPD_READ_SA;
34318 break;
34319 case I40E_NVM_EXEC:
34320- if (module == 0xf)
34321- upd_cmd = I40E_NVMUPD_STATUS;
34322- else if (module == 0)
34323+ switch (module) {
34324+ case I40E_NVM_EXEC_GET_AQ_RESULT:
34325 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
34326+ break;
34327+ case I40E_NVM_EXEC_FEATURES:
34328+ upd_cmd = I40E_NVMUPD_FEATURES;
34329+ break;
34330+ case I40E_NVM_EXEC_STATUS:
34331+ upd_cmd = I40E_NVMUPD_STATUS;
34332+ break;
34333+ default:
34334+ *perrno = -EFAULT;
34335+ return I40E_NVMUPD_INVALID;
34336+ }
34337+ break;
34338+ case I40E_NVM_AQE:
34339+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
34340 break;
34341 }
34342 break;
34343@@ -1259,8 +1397,8 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
34344 * cmd structure contains identifiers and data buffer
34345 **/
34346 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34347- struct i40e_nvm_access *cmd,
34348- u8 *bytes, int *perrno)
34349+ struct i40e_nvm_access *cmd,
34350+ u8 *bytes, int *perrno)
34351 {
34352 struct i40e_asq_cmd_details cmd_details;
34353 i40e_status status;
34354@@ -1271,6 +1409,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34355 u32 aq_data_len;
34356
34357 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
34358+ if (cmd->offset == 0xffff)
34359+ return I40E_SUCCESS;
34360+
34361 memset(&cmd_details, 0, sizeof(cmd_details));
34362 cmd_details.wb_desc = &hw->nvm_wb_desc;
34363
34364@@ -1289,7 +1430,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34365
34366 /* if data buffer needed, make sure it's ready */
34367 aq_data_len = cmd->data_size - aq_desc_len;
34368- buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
34369+ buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
34370 if (buff_size) {
34371 if (!hw->nvm_buff.va) {
34372 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
34373@@ -1302,10 +1443,14 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34374
34375 if (hw->nvm_buff.va) {
34376 buff = hw->nvm_buff.va;
34377- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
34378+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
34379+ I40E_NONDMA_TO_NONDMA);
34380 }
34381 }
34382
34383+ if (cmd->offset)
34384+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
34385+
34386 /* and away we go! */
34387 status = i40e_asq_send_command(hw, aq_desc, buff,
34388 buff_size, &cmd_details);
34389@@ -1315,6 +1460,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34390 i40e_stat_str(hw, status),
34391 i40e_aq_str(hw, hw->aq.asq_last_status));
34392 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
34393+ return status;
34394 }
34395
34396 /* should we wait for a followup event? */
34397@@ -1336,8 +1482,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
34398 * cmd structure contains identifiers and data buffer
34399 **/
34400 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
34401- struct i40e_nvm_access *cmd,
34402- u8 *bytes, int *perrno)
34403+ struct i40e_nvm_access *cmd,
34404+ u8 *bytes, int *perrno)
34405 {
34406 u32 aq_total_len;
34407 u32 aq_desc_len;
34408@@ -1347,7 +1493,7 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
34409 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
34410
34411 aq_desc_len = sizeof(struct i40e_aq_desc);
34412- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
34413+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
34414
34415 /* check offset range */
34416 if (cmd->offset > aq_total_len) {
34417@@ -1375,13 +1521,13 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
34418 __func__, cmd->offset, cmd->offset + len);
34419
34420 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
34421- memcpy(bytes, buff, len);
34422+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
34423
34424 bytes += len;
34425 remainder -= len;
34426 buff = hw->nvm_buff.va;
34427 } else {
34428- buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
34429+ buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
34430 }
34431
34432 if (remainder > 0) {
34433@@ -1389,10 +1535,45 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
34434
34435 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
34436 __func__, start_byte, start_byte + remainder);
34437- memcpy(bytes, buff, remainder);
34438+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
34439 }
34440
34441- return 0;
34442+ return I40E_SUCCESS;
34443+}
34444+
34445+/**
34446+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
34447+ * @hw: pointer to hardware structure
34448+ * @cmd: pointer to nvm update command buffer
34449+ * @bytes: pointer to the data buffer
34450+ * @perrno: pointer to return error code
34451+ *
34452+ * cmd structure contains identifiers and data buffer
34453+ **/
34454+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
34455+ struct i40e_nvm_access *cmd,
34456+ u8 *bytes, int *perrno)
34457+{
34458+ u32 aq_total_len;
34459+ u32 aq_desc_len;
34460+
34461+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
34462+
34463+ aq_desc_len = sizeof(struct i40e_aq_desc);
34464+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
34465+
34466+ /* check copylength range */
34467+ if (cmd->data_size > aq_total_len) {
34468+ i40e_debug(hw, I40E_DEBUG_NVM,
34469+ "%s: copy length %d too big, trimming to %d\n",
34470+ __func__, cmd->data_size, aq_total_len);
34471+ cmd->data_size = aq_total_len;
34472+ }
34473+
34474+ i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
34475+ I40E_NONDMA_TO_NONDMA);
34476+
34477+ return I40E_SUCCESS;
34478 }
34479
34480 /**
34481@@ -1405,8 +1586,8 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
34482 * cmd structure contains identifiers and data buffer
34483 **/
34484 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
34485- struct i40e_nvm_access *cmd,
34486- u8 *bytes, int *perrno)
34487+ struct i40e_nvm_access *cmd,
34488+ u8 *bytes, int *perrno)
34489 {
34490 struct i40e_asq_cmd_details cmd_details;
34491 i40e_status status;
34492@@ -1444,10 +1625,10 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
34493 * module, offset, data_size and data are in cmd structure
34494 **/
34495 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
34496- struct i40e_nvm_access *cmd,
34497- int *perrno)
34498+ struct i40e_nvm_access *cmd,
34499+ int *perrno)
34500 {
34501- i40e_status status = 0;
34502+ i40e_status status = I40E_SUCCESS;
34503 struct i40e_asq_cmd_details cmd_details;
34504 u8 module, transaction;
34505 bool last;
34506@@ -1484,24 +1665,26 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
34507 * module, offset, data_size and data are in cmd structure
34508 **/
34509 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
34510- struct i40e_nvm_access *cmd,
34511- u8 *bytes, int *perrno)
34512+ struct i40e_nvm_access *cmd,
34513+ u8 *bytes, int *perrno)
34514 {
34515- i40e_status status = 0;
34516+ i40e_status status = I40E_SUCCESS;
34517 struct i40e_asq_cmd_details cmd_details;
34518 u8 module, transaction;
34519+ u8 preservation_flags;
34520 bool last;
34521
34522 transaction = i40e_nvmupd_get_transaction(cmd->config);
34523 module = i40e_nvmupd_get_module(cmd->config);
34524 last = (transaction & I40E_NVM_LCB);
34525+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
34526
34527 memset(&cmd_details, 0, sizeof(cmd_details));
34528 cmd_details.wb_desc = &hw->nvm_wb_desc;
34529
34530 status = i40e_aq_update_nvm(hw, module, cmd->offset,
34531 (u16)cmd->data_size, bytes, last,
34532- &cmd_details);
34533+ preservation_flags, &cmd_details);
34534 if (status) {
34535 i40e_debug(hw, I40E_DEBUG_NVM,
34536 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
34537diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
34538index 80e66da6b..f6ac4b2bd 100644
34539--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
34540+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
34541@@ -1,28 +1,5 @@
34542-/*******************************************************************************
34543- *
34544- * Intel Ethernet Controller XL710 Family Linux Driver
34545- * Copyright(c) 2013 - 2014 Intel Corporation.
34546- *
34547- * This program is free software; you can redistribute it and/or modify it
34548- * under the terms and conditions of the GNU General Public License,
34549- * version 2, as published by the Free Software Foundation.
34550- *
34551- * This program is distributed in the hope it will be useful, but WITHOUT
34552- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
34553- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
34554- * more details.
34555- *
34556- * You should have received a copy of the GNU General Public License along
34557- * with this program. If not, see <http://www.gnu.org/licenses/>.
34558- *
34559- * The full GNU General Public License is included in this distribution in
34560- * the file called "COPYING".
34561- *
34562- * Contact Information:
34563- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
34564- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34565- *
34566- ******************************************************************************/
34567+/* SPDX-License-Identifier: GPL-2.0 */
34568+/* Copyright(c) 2013 - 2020 Intel Corporation. */
34569
34570 #ifndef _I40E_OSDEP_H_
34571 #define _I40E_OSDEP_H_
34572@@ -34,14 +11,41 @@
34573 #include <linux/pci.h>
34574 #include <linux/highuid.h>
34575
34576-/* get readq/writeq support for 32 bit kernels, use the low-first version */
34577-#include <linux/io-64-nonatomic-lo-hi.h>
34578+#include <linux/io.h>
34579+#include <asm-generic/int-ll64.h>
34580+
34581+#ifndef readq
34582+static inline __u64 readq(const volatile void __iomem *addr)
34583+{
34584+ const volatile u32 __iomem *p = addr;
34585+ u32 low, high;
34586+
34587+ low = readl(p);
34588+ high = readl(p + 1);
34589+
34590+ return low + ((u64)high << 32);
34591+}
34592+#endif
34593+
34594+#ifndef writeq
34595+static inline void writeq(__u64 val, volatile void __iomem *addr)
34596+{
34597+ writel(val, addr);
34598+ writel(val >> 32, addr + 4);
34599+}
34600+#endif
34601+#include "kcompat.h"
34602
34603 /* File to be the magic between shared code and
34604 * actual OS primitives
34605 */
34606
34607-#define hw_dbg(hw, S, A...) do {} while (0)
34608+#define hw_dbg(h, s, ...) do { \
34609+ pr_debug("i40e %02x:%02x.%x " s, \
34610+ (h)->bus.bus_id, (h)->bus.device, \
34611+ (h)->bus.func, ##__VA_ARGS__); \
34612+} while (0)
34613+
34614
34615 #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
34616 #define rd32(a, reg) readl((a)->hw_addr + (reg))
34617@@ -49,7 +53,6 @@
34618 #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
34619 #define rd64(a, reg) readq((a)->hw_addr + (reg))
34620 #define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
34621-
34622 /* memory allocation tracking */
34623 struct i40e_dma_mem {
34624 void *va;
34625@@ -58,7 +61,8 @@ struct i40e_dma_mem {
34626 };
34627
34628 #define i40e_allocate_dma_mem(h, m, unused, s, a) \
34629- i40e_allocate_dma_mem_d(h, m, s, a)
34630+ i40e_allocate_dma_mem_d(h, m, unused, s, a)
34631+
34632 #define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
34633
34634 struct i40e_virt_mem {
34635@@ -77,5 +81,40 @@ do { \
34636 (h)->bus.func, ##__VA_ARGS__); \
34637 } while (0)
34638
34639+/* these things are all directly replaced with sed during the kernel build */
34640+#define INLINE inline
34641+
34642+
34643+#define CPU_TO_LE16(o) cpu_to_le16(o)
34644+#define CPU_TO_LE32(s) cpu_to_le32(s)
34645+#define CPU_TO_LE64(h) cpu_to_le64(h)
34646+#define LE16_TO_CPU(a) le16_to_cpu(a)
34647+#define LE32_TO_CPU(c) le32_to_cpu(c)
34648+#define LE64_TO_CPU(k) le64_to_cpu(k)
34649+
34650+/* SW spinlock */
34651+struct i40e_spinlock {
34652+ struct mutex spinlock;
34653+};
34654+
34655+static inline void i40e_no_action(struct i40e_spinlock *sp)
34656+{
34657+ /* nothing */
34658+}
34659+
34660+/* the locks are initialized in _probe and destroyed in _remove
34661+ * so make sure NOT to implement init/destroy here, as to
34662+ * avoid the i40e_init_adminq code trying to reinitialize
34663+ * the persistent lock memory
34664+ */
34665+#define i40e_init_spinlock(_sp) i40e_no_action(_sp)
34666+#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
34667+#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
34668+#define i40e_destroy_spinlock(_sp) i40e_no_action(_sp)
34669+
34670+
34671+#define i40e_memset(a, b, c, d) memset((a), (b), (c))
34672+#define i40e_memcpy(a, b, c, d) memcpy((a), (b), (c))
34673+
34674 typedef enum i40e_status_code i40e_status;
34675 #endif /* _I40E_OSDEP_H_ */
34676diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
34677index a39b13197..0f6120609 100644
34678--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
34679+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
34680@@ -1,35 +1,12 @@
34681-/*******************************************************************************
34682- *
34683- * Intel Ethernet Controller XL710 Family Linux Driver
34684- * Copyright(c) 2013 - 2016 Intel Corporation.
34685- *
34686- * This program is free software; you can redistribute it and/or modify it
34687- * under the terms and conditions of the GNU General Public License,
34688- * version 2, as published by the Free Software Foundation.
34689- *
34690- * This program is distributed in the hope it will be useful, but WITHOUT
34691- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
34692- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
34693- * more details.
34694- *
34695- * You should have received a copy of the GNU General Public License along
34696- * with this program. If not, see <http://www.gnu.org/licenses/>.
34697- *
34698- * The full GNU General Public License is included in this distribution in
34699- * the file called "COPYING".
34700- *
34701- * Contact Information:
34702- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
34703- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34704- *
34705- ******************************************************************************/
34706+/* SPDX-License-Identifier: GPL-2.0 */
34707+/* Copyright(c) 2013 - 2020 Intel Corporation. */
34708
34709 #ifndef _I40E_PROTOTYPE_H_
34710 #define _I40E_PROTOTYPE_H_
34711
34712 #include "i40e_type.h"
34713 #include "i40e_alloc.h"
34714-#include <linux/avf/virtchnl.h>
34715+#include "virtchnl.h"
34716
34717 /* Prototypes for shared code functions that are not in
34718 * the standard function pointer structures. These are
34719@@ -45,6 +22,13 @@ void i40e_adminq_init_ring_data(struct i40e_hw *hw);
34720 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
34721 struct i40e_arq_event_info *e,
34722 u16 *events_pending);
34723+enum i40e_status_code
34724+i40e_asq_send_command_atomic(struct i40e_hw *hw,
34725+ struct i40e_aq_desc *desc,
34726+ void *buff, /* can be NULL */
34727+ u16 buff_size,
34728+ struct i40e_asq_cmd_details *cmd_details,
34729+ bool is_atomic_context);
34730 i40e_status i40e_asq_send_command(struct i40e_hw *hw,
34731 struct i40e_aq_desc *desc,
34732 void *buff, /* can be NULL */
34733@@ -58,29 +42,39 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
34734 void i40e_idle_aq(struct i40e_hw *hw);
34735 bool i40e_check_asq_alive(struct i40e_hw *hw);
34736 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
34737-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
34738-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
34739
34740 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
34741- bool pf_lut, u8 *lut, u16 lut_size);
34742+ bool pf_lut, u8 *lut, u16 lut_size);
34743 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
34744- bool pf_lut, u8 *lut, u16 lut_size);
34745+ bool pf_lut, u8 *lut, u16 lut_size);
34746 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
34747- u16 seid,
34748- struct i40e_aqc_get_set_rss_key_data *key);
34749+ u16 seid,
34750+ struct i40e_aqc_get_set_rss_key_data *key);
34751 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
34752- u16 seid,
34753- struct i40e_aqc_get_set_rss_key_data *key);
34754+ u16 seid,
34755+ struct i40e_aqc_get_set_rss_key_data *key);
34756+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
34757+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
34758
34759 u32 i40e_led_get(struct i40e_hw *hw);
34760 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
34761 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
34762- u16 led_addr, u32 mode);
34763+ u16 led_addr, u32 mode);
34764 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
34765- u16 *val);
34766+ u16 *val);
34767 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
34768- u32 time, u32 interval);
34769-
34770+ u32 time, u32 interval);
34771+i40e_status i40e_get_phy_lpi_status(struct i40e_hw *hw,
34772+ struct i40e_hw_port_stats *stats);
34773+i40e_status i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter,
34774+ u32 *rx_counter, bool *is_clear);
34775+i40e_status i40e_lpi_stat_update(struct i40e_hw *hw,
34776+ bool offset_loaded, u64 *tx_offset,
34777+ u64 *tx_stat, u64 *rx_offset,
34778+ u64 *rx_stat);
34779+i40e_status i40e_get_lpi_duration(struct i40e_hw *hw,
34780+ struct i40e_hw_port_stats *stat,
34781+ u64 *tx_duration, u64 *rx_duration);
34782 /* admin send queue commands */
34783
34784 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
34785@@ -89,8 +83,8 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
34786 u16 *api_major_version, u16 *api_minor_version,
34787 struct i40e_asq_cmd_details *cmd_details);
34788 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
34789- u32 reg_addr, u64 reg_val,
34790- struct i40e_asq_cmd_details *cmd_details);
34791+ u32 reg_addr, u64 reg_val,
34792+ struct i40e_asq_cmd_details *cmd_details);
34793 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
34794 u32 reg_addr, u64 *reg_val,
34795 struct i40e_asq_cmd_details *cmd_details);
34796@@ -99,23 +93,22 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
34797 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
34798 struct i40e_asq_cmd_details *cmd_details);
34799 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
34800- struct i40e_asq_cmd_details *cmd_details);
34801-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
34802+ struct i40e_asq_cmd_details *cmd_details);
34803+i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
34804 bool qualified_modules, bool report_init,
34805 struct i40e_aq_get_phy_abilities_resp *abilities,
34806 struct i40e_asq_cmd_details *cmd_details);
34807-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
34808+i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw,
34809 struct i40e_aq_set_phy_config *config,
34810 struct i40e_asq_cmd_details *cmd_details);
34811-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
34812+i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
34813 bool atomic_reset);
34814 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
34815- struct i40e_asq_cmd_details *cmd_details);
34816-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
34817 struct i40e_asq_cmd_details *cmd_details);
34818+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
34819+ struct i40e_asq_cmd_details *cmd_details);
34820 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
34821- bool enable_link,
34822- struct i40e_asq_cmd_details *cmd_details);
34823+ bool enable_link, struct i40e_asq_cmd_details *cmd_details);
34824 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
34825 bool enable_lse, struct i40e_link_status *link,
34826 struct i40e_asq_cmd_details *cmd_details);
34827@@ -136,13 +129,14 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
34828 bool rx_only_promisc);
34829 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
34830 u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
34831-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
34832- u16 seid, bool enable,
34833- u16 vid,
34834+i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
34835+ u16 seid, bool set,
34836+ struct i40e_asq_cmd_details *cmd_details);
34837+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
34838+ u16 seid, bool enable, u16 vid,
34839 struct i40e_asq_cmd_details *cmd_details);
34840-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
34841- u16 seid, bool enable,
34842- u16 vid,
34843+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
34844+ u16 seid, bool enable, u16 vid,
34845 struct i40e_asq_cmd_details *cmd_details);
34846 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
34847 u16 seid, bool enable, u16 vid,
34848@@ -188,9 +182,8 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
34849 struct i40e_aqc_get_switch_config_resp *buf,
34850 u16 buf_size, u16 *start_seid,
34851 struct i40e_asq_cmd_details *cmd_details);
34852-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
34853- u16 flags,
34854- u16 valid_flags,
34855+i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw,
34856+ u16 flags, u16 valid_flags, u8 mode,
34857 struct i40e_asq_cmd_details *cmd_details);
34858 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
34859 enum i40e_aq_resources_ids resource,
34860@@ -206,36 +199,72 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
34861 bool last_command,
34862 struct i40e_asq_cmd_details *cmd_details);
34863 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
34864- u32 offset, u16 length, bool last_command,
34865- struct i40e_asq_cmd_details *cmd_details);
34866+ u32 offset, u16 length, bool last_command,
34867+ struct i40e_asq_cmd_details *cmd_details);
34868+i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw,
34869+ u8 cmd_flags, u32 field_id, void *data,
34870+ u16 buf_size, u16 *element_count,
34871+ struct i40e_asq_cmd_details *cmd_details);
34872+i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
34873+ u8 cmd_flags, void *data, u16 buf_size,
34874+ u16 element_count,
34875+ struct i40e_asq_cmd_details *cmd_details);
34876+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
34877+ void *buff, u16 buff_size,
34878+ struct i40e_asq_cmd_details *cmd_details);
34879 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
34880 void *buff, u16 buff_size, u16 *data_size,
34881 enum i40e_admin_queue_opc list_type_opc,
34882 struct i40e_asq_cmd_details *cmd_details);
34883 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
34884 u32 offset, u16 length, void *data,
34885- bool last_command,
34886+ bool last_command, u8 preservation_flags,
34887+ struct i40e_asq_cmd_details *cmd_details);
34888+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
34889+ u8 rearrange_nvm,
34890+ struct i40e_asq_cmd_details *cmd_details);
34891+i40e_status i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
34892 struct i40e_asq_cmd_details *cmd_details);
34893 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
34894 u8 mib_type, void *buff, u16 buff_size,
34895 u16 *local_len, u16 *remote_len,
34896 struct i40e_asq_cmd_details *cmd_details);
34897+i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw,
34898+ u8 mib_type, void *buff, u16 buff_size,
34899+ struct i40e_asq_cmd_details *cmd_details);
34900 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
34901 bool enable_update,
34902 struct i40e_asq_cmd_details *cmd_details);
34903+enum i40e_status_code
34904+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
34905+ struct i40e_asq_cmd_details *cmd_details);
34906 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
34907+ bool persist,
34908 struct i40e_asq_cmd_details *cmd_details);
34909+i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
34910+ bool dcb_enable,
34911+ struct i40e_asq_cmd_details
34912+ *cmd_details);
34913 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
34914+ bool persist,
34915 struct i40e_asq_cmd_details *cmd_details);
34916 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
34917- void *buff, u16 buff_size,
34918- struct i40e_asq_cmd_details *cmd_details);
34919+ void *buff, u16 buff_size,
34920+ struct i40e_asq_cmd_details *cmd_details);
34921+i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
34922+ bool start_agent,
34923+ struct i40e_asq_cmd_details *cmd_details);
34924 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
34925 u16 udp_port, u8 protocol_index,
34926 u8 *filter_index,
34927 struct i40e_asq_cmd_details *cmd_details);
34928 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
34929 struct i40e_asq_cmd_details *cmd_details);
34930+i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
34931+ u8 *num_entries,
34932+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
34933+ u16 count,
34934+ struct i40e_asq_cmd_details *cmd_details);
34935 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
34936 struct i40e_asq_cmd_details *cmd_details);
34937 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
34938@@ -282,9 +311,31 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
34939 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
34940 struct i40e_asq_cmd_details *cmd_details);
34941 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
34942- struct i40e_asq_cmd_details *cmd_details);
34943+ struct i40e_asq_cmd_details *cmd_details);
34944+enum i40e_status_code
34945+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
34946+ struct i40e_aqc_cloud_filters_element_bb *filters,
34947+ u8 filter_count);
34948+enum i40e_status_code
34949+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
34950+ struct i40e_aqc_cloud_filters_element_data *filters,
34951+ u8 filter_count);
34952+enum i40e_status_code
34953+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
34954+ struct i40e_aqc_cloud_filters_element_data *filters,
34955+ u8 filter_count);
34956+enum i40e_status_code
34957+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
34958+ struct i40e_aqc_cloud_filters_element_bb *filters,
34959+ u8 filter_count);
34960 i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
34961- struct i40e_lldp_variables *lldp_cfg);
34962+ struct i40e_lldp_variables *lldp_cfg);
34963+i40e_status i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
34964+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
34965+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
34966+i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
34967+ u32 reg_addr0, u32 *reg_val0,
34968+ u32 reg_addr1, u32 *reg_val1);
34969 /* i40e_common */
34970 i40e_status i40e_init_shared_code(struct i40e_hw *hw);
34971 i40e_status i40e_pf_reset(struct i40e_hw *hw);
34972@@ -294,15 +345,13 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
34973 i40e_status i40e_update_link_info(struct i40e_hw *hw);
34974 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
34975 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
34976- u32 *max_bw, u32 *min_bw, bool *min_valid,
34977- bool *max_valid);
34978+ u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
34979 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
34980 struct i40e_aqc_configure_partition_bw_data *bw_data,
34981 struct i40e_asq_cmd_details *cmd_details);
34982 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
34983 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
34984- u32 pba_num_size);
34985-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
34986+ u32 pba_num_size);
34987 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
34988 /* prototype for functions used for NVM access */
34989 i40e_status i40e_init_nvm(struct i40e_hw *hw);
34990@@ -311,23 +360,70 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
34991 void i40e_release_nvm(struct i40e_hw *hw);
34992 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
34993 u16 *data);
34994+enum i40e_status_code
34995+i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
34996+ u16 data_offset, u16 words_data_size, u16 *data_ptr);
34997+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
34998+ u16 *words, u16 *data);
34999 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
35000 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
35001 u16 *checksum);
35002 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
35003- struct i40e_nvm_access *cmd,
35004- u8 *bytes, int *);
35005-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
35006+ struct i40e_nvm_access *cmd,
35007+ u8 *bytes, int *);
35008+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
35009+ struct i40e_aq_desc *desc);
35010+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
35011 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
35012
35013+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
35014+
35015 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
35016
35017-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
35018+static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
35019 {
35020 return i40e_ptype_lookup[ptype];
35021 }
35022
35023-/* prototype for functions used for SW locks */
35024+/**
35025+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
35026+ * @link_speed: the speed to convert
35027+ *
35028+ * Returns the link_speed in terms of the virtchnl interface, for use in
35029+ * converting link_speed as reported by the AdminQ into the format used for
35030+ * talking to virtchnl devices. If we can't represent the link speed properly,
35031+ * report LINK_SPEED_UNKNOWN.
35032+ **/
35033+static INLINE enum virtchnl_link_speed
35034+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
35035+{
35036+ switch (link_speed) {
35037+ case I40E_LINK_SPEED_100MB:
35038+ return VIRTCHNL_LINK_SPEED_100MB;
35039+ case I40E_LINK_SPEED_1GB:
35040+ return VIRTCHNL_LINK_SPEED_1GB;
35041+ case I40E_LINK_SPEED_2_5GB:
35042+ return VIRTCHNL_LINK_SPEED_2_5GB;
35043+ case I40E_LINK_SPEED_5GB:
35044+ return VIRTCHNL_LINK_SPEED_5GB;
35045+ case I40E_LINK_SPEED_10GB:
35046+ return VIRTCHNL_LINK_SPEED_10GB;
35047+ case I40E_LINK_SPEED_40GB:
35048+ return VIRTCHNL_LINK_SPEED_40GB;
35049+ case I40E_LINK_SPEED_20GB:
35050+ return VIRTCHNL_LINK_SPEED_20GB;
35051+ case I40E_LINK_SPEED_25GB:
35052+ return VIRTCHNL_LINK_SPEED_25GB;
35053+ case I40E_LINK_SPEED_UNKNOWN:
35054+ default:
35055+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
35056+ }
35057+}
35058+/* prototype for functions used for SW spinlocks */
35059+void i40e_init_spinlock(struct i40e_spinlock *sp);
35060+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
35061+void i40e_release_spinlock(struct i40e_spinlock *sp);
35062+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
35063
35064 /* i40e_common for VF drivers*/
35065 void i40e_vf_parse_hw_config(struct i40e_hw *hw,
35066@@ -346,10 +442,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
35067 struct i40e_control_filter_stats *stats,
35068 struct i40e_asq_cmd_details *cmd_details);
35069 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
35070- u8 table_id, u32 start_index, u16 buff_size,
35071- void *buff, u16 *ret_buff_size,
35072- u8 *ret_next_table, u32 *ret_next_index,
35073- struct i40e_asq_cmd_details *cmd_details);
35074+ u8 table_id, u32 start_index, u16 buff_size,
35075+ void *buff, u16 *ret_buff_size,
35076+ u8 *ret_next_table, u32 *ret_next_index,
35077+ struct i40e_asq_cmd_details *cmd_details);
35078 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
35079 u16 vsi_seid);
35080 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
35081@@ -360,35 +456,84 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
35082 u32 reg_addr, u32 reg_val,
35083 struct i40e_asq_cmd_details *cmd_details);
35084 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
35085+enum i40e_status_code
35086+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
35087+ u8 phy_select, u8 dev_addr, bool page_change,
35088+ bool set_mdio, u8 mdio_num,
35089+ u32 reg_addr, u32 reg_val,
35090+ struct i40e_asq_cmd_details *cmd_details);
35091+enum i40e_status_code
35092+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
35093+ u8 phy_select, u8 dev_addr, bool page_change,
35094+ bool set_mdio, u8 mdio_num,
35095+ u32 reg_addr, u32 *reg_val,
35096+ struct i40e_asq_cmd_details *cmd_details);
35097+
35098+/* Convenience wrappers for most common use case */
35099+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
35100+ i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
35101+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
35102+ i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
35103+
35104+enum i40e_status_code
35105+i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode,
35106+ u32 *cmd_status, u32 *data0, u32 *data1,
35107+ struct i40e_asq_cmd_details *cmd_details);
35108+
35109+i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
35110+ struct i40e_aqc_arp_proxy_data *proxy_config,
35111+ struct i40e_asq_cmd_details *cmd_details);
35112+i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
35113+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
35114+ struct i40e_asq_cmd_details *cmd_details);
35115+i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
35116+ u8 filter_index,
35117+ struct i40e_aqc_set_wol_filter_data *filter,
35118+ bool set_filter, bool no_wol_tco,
35119+ bool filter_valid, bool no_wol_tco_valid,
35120+ struct i40e_asq_cmd_details *cmd_details);
35121+i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
35122+ u16 *wake_reason,
35123+ struct i40e_asq_cmd_details *cmd_details);
35124+i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
35125+ struct i40e_asq_cmd_details *cmd_details);
35126 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
35127- u16 reg, u8 phy_addr, u16 *value);
35128+ u16 reg, u8 phy_addr, u16 *value);
35129 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
35130- u16 reg, u8 phy_addr, u16 value);
35131+ u16 reg, u8 phy_addr, u16 value);
35132 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
35133 u8 page, u16 reg, u8 phy_addr, u16 *value);
35134 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
35135 u8 page, u16 reg, u8 phy_addr, u16 value);
35136-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
35137- u8 phy_addr, u16 *value);
35138-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
35139- u8 phy_addr, u16 value);
35140+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
35141+ u8 page, u16 reg, u8 phy_addr, u16 *value);
35142+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
35143+ u8 page, u16 reg, u8 phy_addr, u16 value);
35144 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
35145 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
35146- u32 time, u32 interval);
35147-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
35148- u16 buff_size, u32 track_id,
35149- u32 *error_offset, u32 *error_info,
35150- struct i40e_asq_cmd_details *cmd_details);
35151-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
35152- u16 buff_size, u8 flags,
35153- struct i40e_asq_cmd_details *cmd_details);
35154+ u32 time, u32 interval);
35155+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
35156+ u16 buff_size, u32 track_id,
35157+ u32 *error_offset, u32 *error_info,
35158+ struct i40e_asq_cmd_details *
35159+ cmd_details);
35160+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
35161+ u16 buff_size, u8 flags,
35162+ struct i40e_asq_cmd_details *
35163+ cmd_details);
35164 struct i40e_generic_seg_header *
35165 i40e_find_segment_in_package(u32 segment_type,
35166 struct i40e_package_header *pkg_header);
35167+struct i40e_profile_section_header *
35168+i40e_find_section_in_profile(u32 section_type,
35169+ struct i40e_profile_segment *profile);
35170 enum i40e_status_code
35171 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
35172 u32 track_id);
35173 enum i40e_status_code
35174+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
35175+ u32 track_id);
35176+enum i40e_status_code
35177 i40e_add_pinfo_to_list(struct i40e_hw *hw,
35178 struct i40e_profile_segment *profile,
35179 u8 *profile_info_sec, u32 track_id);
35180diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
35181index d8456c381..44373fb7a 100644
35182--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
35183+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
35184@@ -1,31 +1,15 @@
35185-/*******************************************************************************
35186- *
35187- * Intel Ethernet Controller XL710 Family Linux Driver
35188- * Copyright(c) 2013 - 2014 Intel Corporation.
35189- *
35190- * This program is free software; you can redistribute it and/or modify it
35191- * under the terms and conditions of the GNU General Public License,
35192- * version 2, as published by the Free Software Foundation.
35193- *
35194- * This program is distributed in the hope it will be useful, but WITHOUT
35195- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
35196- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
35197- * more details.
35198- *
35199- * You should have received a copy of the GNU General Public License along
35200- * with this program. If not, see <http://www.gnu.org/licenses/>.
35201- *
35202- * The full GNU General Public License is included in this distribution in
35203- * the file called "COPYING".
35204- *
35205- * Contact Information:
35206- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
35207- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35208- *
35209- ******************************************************************************/
35210+// SPDX-License-Identifier: GPL-2.0
35211+/* Copyright(c) 2013 - 2020 Intel Corporation. */
35212
35213+/* this lets the macros that return timespec64 or structs compile cleanly with
35214+ * W=2
35215+ */
35216+#pragma GCC diagnostic ignored "-Waggregate-return"
35217 #include "i40e.h"
35218+#ifdef HAVE_PTP_1588_CLOCK
35219 #include <linux/ptp_classify.h>
35220+#include <linux/posix-clock.h>
35221+
35222
35223 /* The XL710 timesync is very much like Intel's 82599 design when it comes to
35224 * the fundamental clock design. However, the clock operations are much simpler
35225@@ -39,14 +23,210 @@
35226 * At 1Gb link, the period is multiplied by 20. (32ns)
35227 * 1588 functionality is not supported at 100Mbps.
35228 */
35229-#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
35230-#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
35231-#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
35232+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
35233+#define I40E_PTP_10GB_INCVAL_MULT 2
35234+#define I40E_PTP_1GB_INCVAL_MULT 20
35235+#define I40E_ISGN 0x80000000
35236
35237 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
35238 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
35239 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
35240+#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
35241+#define to_dev(obj) container_of(obj, struct device, kobj)
35242+
35243+enum i40e_ptp_pin {
35244+ SDP3_2 = 0,
35245+ SDP3_3,
35246+ GPIO_4
35247+};
35248+
35249+static struct ptp_pin_desc sdp_desc[] = {
35250+/* name idx func chan */
35251+ {"SDP3_2", SDP3_2, PTP_PF_NONE, 0},
35252+ {"SDP3_3", SDP3_3, PTP_PF_NONE, 1},
35253+ {"GPIO_4", GPIO_4, PTP_PF_NONE, 1},
35254+};
35255+
35256+#ifndef HAVE_PTP_1588_CLOCK_PINS
35257+static ssize_t i40e_sysfs_ptp_pins_read(struct kobject *kobj,
35258+ struct kobj_attribute *attr,
35259+ char *buf);
35260+
35261+static ssize_t i40e_sysfs_ptp_pins_write(struct kobject *kobj,
35262+ struct kobj_attribute *attr,
35263+ const char *buf, size_t count);
35264+
35265+static struct kobj_attribute ptp_pins_attribute = __ATTR(pins, 0660,
35266+ i40e_sysfs_ptp_pins_read, i40e_sysfs_ptp_pins_write);
35267+#endif /* HAVE_PTP_1588_CLOCK_PINS */
35268+
35269+enum i40e_ptp_gpio_pin_state {
35270+ end = -2,
35271+ invalid,
35272+ off,
35273+ in_A,
35274+ in_B,
35275+ out_A,
35276+ out_B,
35277+};
35278+
35279+static const char * const i40e_ptp_gpio_pin_state2str[] = {
35280+ "off", "in_A", "in_B", "out_A", "out_B"
35281+};
35282+
35283+enum i40e_ptp_led_pin_state {
35284+ low = 0,
35285+ high,
35286+};
35287+
35288+struct i40e_ptp_pins_settings {
35289+ enum i40e_ptp_gpio_pin_state sdp3_2;
35290+ enum i40e_ptp_gpio_pin_state sdp3_3;
35291+ enum i40e_ptp_gpio_pin_state gpio_4;
35292+ enum i40e_ptp_led_pin_state led2_0;
35293+ enum i40e_ptp_led_pin_state led2_1;
35294+ enum i40e_ptp_led_pin_state led3_0;
35295+ enum i40e_ptp_led_pin_state led3_1;
35296+};
35297+
35298+static const struct i40e_ptp_pins_settings i40e_ptp_pin_led_allowed_states [] = {
35299+ {off, off, off, high, high, high, high},
35300+ {off, in_A, off, high, high, high, low},
35301+ {off, out_A, off, high, low, high, high},
35302+ {off, in_B, off, high, high, high, low},
35303+ {off, out_B, off, high, low, high, high},
35304+ {in_A, off, off, high, high, high, low},
35305+ {in_A, in_B, off, high, high, high, low},
35306+ {in_A, out_B, off, high, low, high, high},
35307+ {out_A, off, off, high, low, high, high},
35308+ {out_A, in_B, off, high, low, high, high},
35309+ {in_B, off, off, high, high, high, low},
35310+ {in_B, in_A, off, high, high, high, low},
35311+ {in_B, out_A, off, high, low, high, high},
35312+ {out_B, off, off, high, low, high, high},
35313+ {out_B, in_A, off, high, low, high, high},
35314+ {off, off, in_A, high, high, low, high},
35315+ {off, out_A, in_A, high, low, low, high},
35316+ {off, in_B, in_A, high, high, low, low},
35317+ {off, out_B, in_A, high, low, low, high},
35318+ {out_A, off, in_A, high, low, low, high},
35319+ {out_A, in_B, in_A, high, low, low, high},
35320+ {in_B, off, in_A, high, high, low, low},
35321+ {in_B, out_A, in_A, high, low, low, high},
35322+ {out_B, off, in_A, high, low, low, high},
35323+ {off, off, out_A, low, high, high, high},
35324+ {off, in_A, out_A, low, high, high, low},
35325+ {off, in_B, out_A, low, high, high, low},
35326+ {off, out_B, out_A, low, low, high, high},
35327+ {in_A, off, out_A, low, high, high, low},
35328+ {in_A, in_B, out_A, low, high, high, low},
35329+ {in_A, out_B, out_A, low, low, high, high},
35330+ {in_B, off, out_A, low, high, high, low},
35331+ {in_B, in_A, out_A, low, high, high, low},
35332+ {out_B, off, out_A, low, low, high, high},
35333+ {out_B, in_A, out_A, low, low, high, high},
35334+ {off, off, in_B, high, high, low, high},
35335+ {off, in_A, in_B, high, high, low, low},
35336+ {off, out_A, in_B, high, low, low, high},
35337+ {off, out_B, in_B, high, low, low, high},
35338+ {in_A, off, in_B, high, high, low, low},
35339+ {in_A, out_B, in_B, high, low, low, high},
35340+ {out_A, off, in_B, high, low, low, high},
35341+ {out_B, off, in_B, high, low, low, high},
35342+ {out_B, in_A, in_B, high, low, low, high},
35343+ {off, off, out_B, low, high, high, high},
35344+ {off, in_A, out_B, low, high, high, low},
35345+ {off, out_A, out_B, low, low, high, high},
35346+ {off, in_B, out_B, low, high, high, low},
35347+ {in_A, off, out_B, low, high, high, low},
35348+ {in_A, in_B, out_B, low, high, high, low},
35349+ {out_A, off, out_B, low, low, high, high},
35350+ {out_A, in_B, out_B, low, low, high, high},
35351+ {in_B, off, out_B, low, high, high, low},
35352+ {in_B, in_A, out_B, low, high, high, low},
35353+ {in_B, out_A, out_B, low, low, high, high},
35354+ {end, end, end, end, end, end, end}
35355+};
35356+
35357+static int i40e_ptp_set_pins(struct i40e_pf *pf,
35358+ struct i40e_ptp_pins_settings *pins);
35359
35360+/**
35361+ * i40e_ptp_extts0_work - workqueue task function
35362+ * @work: workqueue task structure
35363+ *
35364+ * Service for PTP external clock event
35365+ **/
35366+void i40e_ptp_extts0_work(struct work_struct *work)
35367+{
35368+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
35369+ ptp_extts0_work);
35370+ struct i40e_hw *hw = &pf->hw;
35371+ struct ptp_clock_event event;
35372+ u32 hi, lo;
35373+
35374+ /* Event time is captured by one of the two matched registers
35375+ * PRTTSYN_EVNT_L: 32 LSB of sampled time event
35376+ * PRTTSYN_EVNT_H: 32 MSB of sampled time event
35377+ * Event is defined in PRTTSYN_EVNT_0 register
35378+ */
35379+ lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0));
35380+ hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0));
35381+
35382+ event.timestamp = (((u64)hi) << 32) | lo;
35383+
35384+ event.type = PTP_CLOCK_EXTTS;
35385+ event.index = 0;
35386+
35387+ /* fire event */
35388+ ptp_clock_event(pf->ptp_clock, &event);
35389+}
35390+
35391+/**
35392+ * i40e_is_ptp_pin_dev - check if device supports PTP pins
35393+ * @hw: pointer to the hardware structure
35394+ *
35395+ * Return true if device supports PTP pins, false otherwise.
35396+ */
35397+static bool i40e_is_ptp_pin_dev(const struct i40e_hw* const hw)
35398+{
35399+ return I40E_DEV_ID_25G_SFP28 == hw->device_id &&
35400+ I40E_SUBDEV_ID_25G_PTP_PIN == hw->subsystem_device_id;
35401+}
35402+
35403+/**
35404+ * i40_ptp_reset_timing_events - Reset PTP timing events
35405+ * @pf: Board private structure
35406+ *
35407+ * This function resets timing events for pf.
35408+ **/
35409+static void i40_ptp_reset_timing_events(struct i40e_pf *pf)
35410+{
35411+ u32 i;
35412+
35413+ spin_lock_bh(&pf->ptp_rx_lock);
35414+ for(i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++ ) {
35415+ /* reading and automatically clearing timing events registers */
35416+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i));
35417+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i));
35418+ pf->latch_events[i] = 0;
35419+ }
35420+ /* reading and automatically clearing timing events registers */
35421+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L);
35422+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H);
35423+
35424+ pf->tx_hwtstamp_timeouts = 0;
35425+ pf->tx_hwtstamp_skipped = 0;
35426+ pf->rx_hwtstamp_cleared = 0;
35427+ pf->latch_event_flags = 0;
35428+ spin_unlock_bh(&pf->ptp_rx_lock);
35429+}
35430+
35431+int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
35432+ enum ptp_pin_function func, unsigned int chan)
35433+{
35434+ return 0;
35435+}
35436 /**
35437 * i40e_ptp_read - Read the PHC time from the device
35438 * @pf: Board private structure
35439@@ -88,8 +268,8 @@ static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
35440 /* The timer will not update until the high register is written, so
35441 * write the low register first.
35442 */
35443- wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
35444- wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
35445+ wr32(hw, I40E_PRTTSYN_TIME_L, (u32)ns);
35446+ wr32(hw, I40E_PRTTSYN_TIME_H, (u32)(ns >> 32));
35447 }
35448
35449 /**
35450@@ -129,42 +309,103 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
35451 ppb = -ppb;
35452 }
35453
35454- smp_mb(); /* Force any pending update before accessing. */
35455- adj = ACCESS_ONCE(pf->ptp_base_adj);
35456-
35457- freq = adj;
35458+ freq = I40E_PTP_40GB_INCVAL;
35459 freq *= ppb;
35460 diff = div_u64(freq, 1000000000ULL);
35461
35462 if (neg_adj)
35463- adj -= diff;
35464+ adj = I40E_PTP_40GB_INCVAL - diff;
35465 else
35466- adj += diff;
35467+ adj = I40E_PTP_40GB_INCVAL + diff;
35468+
35469+ /* At some link speeds, the base incval is so large that directly
35470+ * multiplying by ppb would result in arithmetic overflow even when
35471+ * using a u64. Avoid this by instead calculating the new incval
35472+ * always in terms of the 40GbE clock rate and then multiplying by the
35473+ * link speed factor afterwards. This does result in slightly lower
35474+ * precision at lower link speeds, but it is fairly minor.
35475+ */
35476+ smp_mb(); /* Force any pending update before accessing. */
35477+ adj *= READ_ONCE(pf->ptp_adj_mult);
35478
35479- wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
35480- wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
35481+ wr32(hw, I40E_PRTTSYN_INC_L, (u32)adj);
35482+ wr32(hw, I40E_PRTTSYN_INC_H, (u32)(adj >> 32));
35483
35484 return 0;
35485 }
35486
35487+/**
35488+ * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins
35489+ * @pf: the PF private data structure
35490+ *
35491+ * Configure 1PPS signal used for PTP pins
35492+ **/
35493+static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf)
35494+{
35495+ struct i40e_hw *hw = &pf->hw;
35496+ struct timespec64 now;
35497+ u64 ns;
35498+
35499+ wr32(hw, I40E_PRTTSYN_AUX_0(1), 0);
35500+ wr32(hw, I40E_PRTTSYN_AUX_1(1),
35501+ I40E_PRTTSYN_AUX_1_MAX_INDEX);
35502+ wr32(hw, I40E_PRTTSYN_AUX_0(1),
35503+ I40E_PRTTSYN_AUX_0_MAX_INDEX);
35504+
35505+ i40e_ptp_read(pf, &now);
35506+ now.tv_sec += I40E_PTP_2_SEC_DELAY;
35507+ now.tv_nsec = 0;
35508+ ns = timespec64_to_ns(&now);
35509+
35510+ /* I40E_PRTTSYN_TGT_L(1) */
35511+ wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF);
35512+ /* I40E_PRTTSYN_TGT_H(1) */
35513+ wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32);
35514+ wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND);
35515+ wr32(hw, I40E_PRTTSYN_AUX_1(1),
35516+ I40E_PRTTSYN_AUX_1_MAX_INDEX);
35517+ wr32(hw, I40E_PRTTSYN_AUX_0(1), 7);
35518+}
35519+
35520 /**
35521 * i40e_ptp_adjtime - Adjust the PHC time
35522 * @ptp: The PTP clock structure
35523 * @delta: Offset in nanoseconds to adjust the PHC time by
35524 *
35525- * Adjust the frequency of the PHC by the indicated parts per billion from the
35526- * base frequency.
35527+ * Adjust the current clock time by a delta specified in nanoseconds.
35528 **/
35529 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
35530 {
35531 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
35532- struct timespec64 now;
35533+ struct i40e_hw *hw = &pf->hw;
35534
35535 mutex_lock(&pf->tmreg_lock);
35536
35537- i40e_ptp_read(pf, &now);
35538- timespec64_add_ns(&now, delta);
35539- i40e_ptp_write(pf, (const struct timespec64 *)&now);
35540+ if (delta > -999999900LL && delta < 999999900LL) {
35541+ int neg_adj = 0;
35542+ u32 timadj;
35543+ u64 tohw;
35544+
35545+ if (delta < 0) {
35546+ neg_adj = 1;
35547+ tohw = -delta;
35548+ } else {
35549+ tohw = delta;
35550+ }
35551+
35552+ timadj = tohw & 0x3FFFFFFF;
35553+ if (neg_adj)
35554+ timadj |= I40E_ISGN;
35555+ wr32(hw, I40E_PRTTSYN_ADJ, timadj);
35556+ } else {
35557+ struct timespec64 then, now;
35558+
35559+ then = ns_to_timespec64(delta);
35560+ i40e_ptp_read(pf, &now);
35561+ now = timespec64_add(now, then);
35562+ i40e_ptp_write(pf, (const struct timespec64 *)&now);
35563+ i40e_ptp_set_1pps_signal_hw(pf);
35564+ }
35565
35566 mutex_unlock(&pf->tmreg_lock);
35567
35568@@ -174,7 +415,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
35569 /**
35570 * i40e_ptp_gettime - Get the time of the PHC
35571 * @ptp: The PTP clock structure
35572- * @ts: timespec structure to hold the current time value
35573+ * @ts: timespec64 structure to hold the current time value
35574 *
35575 * Read the device clock and return the correct value on ns, after converting it
35576 * into a timespec struct.
35577@@ -186,14 +427,13 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
35578 mutex_lock(&pf->tmreg_lock);
35579 i40e_ptp_read(pf, ts);
35580 mutex_unlock(&pf->tmreg_lock);
35581-
35582 return 0;
35583 }
35584
35585 /**
35586 * i40e_ptp_settime - Set the time of the PHC
35587 * @ptp: The PTP clock structure
35588- * @ts: timespec structure that holds the new time value
35589+ * @ts: timespec64 structure that holds the new time value
35590 *
35591 * Set the device clock to the user input value. The conversion from timespec
35592 * to ns happens in the write function.
35593@@ -206,23 +446,155 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
35594 mutex_lock(&pf->tmreg_lock);
35595 i40e_ptp_write(pf, ts);
35596 mutex_unlock(&pf->tmreg_lock);
35597+ return 0;
35598+}
35599+
35600+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
35601+/**
35602+ * i40e_ptp_gettime32 - Get the time of the PHC
35603+ * @ptp: The PTP clock structure
35604+ * @ts: timespec structure to hold the current time value
35605+ *
35606+ * Read the device clock and return the correct value on ns, after converting it
35607+ * into a timespec struct.
35608+ **/
35609+static int i40e_ptp_gettime32(struct ptp_clock_info *ptp, struct timespec *ts)
35610+{
35611+ struct timespec64 ts64;
35612+ int err;
35613
35614+ err = i40e_ptp_gettime(ptp, &ts64);
35615+ if (err)
35616+ return err;
35617+
35618+ *ts = timespec64_to_timespec(ts64);
35619 return 0;
35620 }
35621
35622 /**
35623- * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
35624+ * i40e_ptp_settime32 - Set the time of the PHC
35625 * @ptp: The PTP clock structure
35626- * @rq: The requested feature to change
35627- * @on: Enable/disable flag
35628+ * @ts: timespec structure that holds the new time value
35629 *
35630- * The XL710 does not support any of the ancillary features of the PHC
35631- * subsystem, so this function may just return.
35632+ * Set the device clock to the user input value. The conversion from timespec
35633+ * to ns happens in the write function.
35634+ **/
35635+static int i40e_ptp_settime32(struct ptp_clock_info *ptp,
35636+ const struct timespec *ts)
35637+{
35638+ struct timespec64 ts64 = timespec_to_timespec64(*ts);
35639+
35640+ return i40e_ptp_settime(ptp, &ts64);
35641+}
35642+#endif
35643+
35644+static int i40e_pps_configure(struct ptp_clock_info *ptp,
35645+ struct ptp_clock_request *rq,
35646+ int on)
35647+{
35648+ return 0;
35649+}
35650+
35651+enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func)
35652+{
35653+ enum i40e_ptp_gpio_pin_state state = off;
35654+
35655+ if (index == 0 && func == PTP_PF_EXTTS)
35656+ state = in_A;
35657+ if (index == 1 && func == PTP_PF_EXTTS)
35658+ state = in_B;
35659+ if (index == 0 && func == PTP_PF_PEROUT)
35660+ state = out_A;
35661+ if (index == 1 && func == PTP_PF_PEROUT)
35662+ state = out_B;
35663+
35664+ return state;
35665+}
35666+
35667+int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
35668+ enum ptp_pin_function func, int on)
35669+{
35670+ struct i40e_ptp_pins_settings pins;
35671+ enum i40e_ptp_gpio_pin_state *pin = NULL;
35672+ int pin_index;
35673+
35674+ /* Preserve previous state of pins that we don't touch */
35675+ pins.sdp3_2 = pf->ptp_pins->sdp3_2;
35676+ pins.sdp3_3 = pf->ptp_pins->sdp3_3;
35677+ pins.gpio_4 = pf->ptp_pins->gpio_4;
35678+
35679+ /* If we want to turn on the pin - find the corresponding one based on
35680+ * the given index. If we want to turn the function off - we need to
35681+ * find which pin had it assigned, we can't use ptp_find_pin here
35682+ * because it tries to lock the pincfg_mux which is locked by
35683+ * ptp_pin_store() that calls here.
35684+ */
35685+ if (on) {
35686+ pin_index = ptp_find_pin(pf->ptp_clock, func, chan);
35687+ if (pin_index < 0)
35688+ return -EBUSY;
35689+
35690+ switch (pin_index) {
35691+ case SDP3_2:
35692+ pin = &pins.sdp3_2;
35693+ break;
35694+ case SDP3_3:
35695+ pin = &pins.sdp3_3;
35696+ break;
35697+ case GPIO_4:
35698+ pin = &pins.gpio_4;
35699+ break;
35700+ default:
35701+ return -EINVAL;
35702+ }
35703+
35704+ *pin = i40e_pin_state(chan, func);
35705+ } else {
35706+ if (pins.sdp3_2 == i40e_pin_state(chan, func))
35707+ pins.sdp3_2 = off;
35708+ if (pins.sdp3_3 == i40e_pin_state(chan, func))
35709+ pins.sdp3_3 = off;
35710+ if (pins.gpio_4 == i40e_pin_state(chan, func))
35711+ pins.gpio_4 = off;
35712+ }
35713+
35714+ return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0;
35715+}
35716+
35717+/**
35718+ * i40e_ptp_feature_enable - Enable external clock pins
35719+ * @ptp: The PTP clock structure
35720+ * @rq: The PTP clock request structure
35721+ * @on: To turn feature on/off
35722+ *
35723+ * Setting on/off PTP PPS feature for pin.
35724 **/
35725 static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
35726- struct ptp_clock_request *rq, int on)
35727+ struct ptp_clock_request *rq,
35728+ int on)
35729 {
35730- return -EOPNOTSUPP;
35731+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
35732+
35733+ enum ptp_pin_function func;
35734+ unsigned int chan;
35735+
35736+ /* TODO: Implement flags handling for EXTTS and PEROUT */
35737+ switch (rq->type) {
35738+ case PTP_CLK_REQ_EXTTS:
35739+ func = PTP_PF_EXTTS;
35740+ chan = rq->extts.index;
35741+ break;
35742+ case PTP_CLK_REQ_PEROUT:
35743+ func = PTP_PF_PEROUT;
35744+ chan = rq->perout.index;
35745+ break;
35746+ case PTP_CLK_REQ_PPS:
35747+ return i40e_pps_configure(ptp, rq, on);
35748+ default:
35749+ return -EOPNOTSUPP;
35750+ }
35751+
35752+ return i40e_ptp_enable_pin(pf, chan, func, on);
35753 }
35754
35755 /**
35756@@ -269,7 +641,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
35757 /**
35758 * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
35759 * @pf: The PF private data structure
35760- * @vsi: The VSI with the rings relevant to 1588
35761 *
35762 * This watchdog task is scheduled to detect error case where hardware has
35763 * dropped an Rx packet that was timestamped when the ring is full. The
35764@@ -333,10 +704,12 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
35765 * This watchdog task is run periodically to make sure that we clear the Tx
35766 * timestamp logic if we don't obtain a timestamp in a reasonable amount of
35767 * time. It is unexpected in the normal case but if it occurs it results in
35768- * permanently prevent timestamps of future packets
35769+ * permanently preventing timestamps of future packets.
35770 **/
35771 void i40e_ptp_tx_hang(struct i40e_pf *pf)
35772 {
35773+ struct sk_buff *skb;
35774+
35775 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
35776 return;
35777
35778@@ -349,9 +722,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
35779 * within a second it is reasonable to assume that we never will.
35780 */
35781 if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
35782- dev_kfree_skb_any(pf->ptp_tx_skb);
35783+ skb = pf->ptp_tx_skb;
35784 pf->ptp_tx_skb = NULL;
35785 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
35786+
35787+ /* Free the skb after we clear the bitlock */
35788+ dev_kfree_skb_any(skb);
35789 pf->tx_hwtstamp_timeouts++;
35790 }
35791 }
35792@@ -461,6 +837,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
35793 struct i40e_link_status *hw_link_info;
35794 struct i40e_hw *hw = &pf->hw;
35795 u64 incval;
35796+ u32 mult;
35797
35798 hw_link_info = &hw->phy.link_info;
35799
35800@@ -468,10 +845,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
35801
35802 switch (hw_link_info->link_speed) {
35803 case I40E_LINK_SPEED_10GB:
35804- incval = I40E_PTP_10GB_INCVAL;
35805+ mult = I40E_PTP_10GB_INCVAL_MULT;
35806 break;
35807 case I40E_LINK_SPEED_1GB:
35808- incval = I40E_PTP_1GB_INCVAL;
35809+ mult = I40E_PTP_1GB_INCVAL_MULT;
35810 break;
35811 case I40E_LINK_SPEED_100MB:
35812 {
35813@@ -482,31 +859,36 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
35814 "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
35815 warn_once++;
35816 }
35817- incval = 0;
35818+ mult = 0;
35819 break;
35820 }
35821 case I40E_LINK_SPEED_40GB:
35822 default:
35823- incval = I40E_PTP_40GB_INCVAL;
35824+ mult = 1;
35825 break;
35826 }
35827
35828+ /* The increment value is calculated by taking the base 40GbE incvalue
35829+ * and multiplying it by a factor based on the link speed.
35830+ */
35831+ incval = I40E_PTP_40GB_INCVAL * mult;
35832+
35833 /* Write the new increment value into the increment register. The
35834 * hardware will not update the clock until both registers have been
35835 * written.
35836 */
35837- wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
35838- wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
35839+ wr32(hw, I40E_PRTTSYN_INC_L, (u32)incval);
35840+ wr32(hw, I40E_PRTTSYN_INC_H, (u32)(incval >> 32));
35841
35842 /* Update the base adjustement value. */
35843- ACCESS_ONCE(pf->ptp_base_adj) = incval;
35844+ WRITE_ONCE(pf->ptp_adj_mult, mult);
35845 smp_mb(); /* Force the above update. */
35846 }
35847
35848 /**
35849 * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
35850 * @pf: Board private structure
35851- * @ifreq: ioctl data
35852+ * @ifr: ioctl data
35853 *
35854 * Obtain the current hardware timestamping settigs as requested. To do this,
35855 * keep a shadow copy of the timestamp settings rather than attempting to
35856@@ -523,6 +905,296 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
35857 -EFAULT : 0;
35858 }
35859
35860+/**
35861+ * i40e_ptp_free_pins - free memory used by PTP pins
35862+ * @pf: Board private structure
35863+ *
35864+ * Release memory allocated for PTP pins.
35865+ **/
35866+void i40e_ptp_free_pins(struct i40e_pf *pf) {
35867+ if (pf->hw.pf_id == 0 && i40e_is_ptp_pin_dev(&pf->hw)) {
35868+ kfree(pf->ptp_pins);
35869+ kfree(pf->ptp_caps.pin_config);
35870+ pf->ptp_pins = NULL;
35871+ kobject_put(pf->ptp_kobj);
35872+ }
35873+}
35874+
35875+/**
35876+ * i40e_ptp_set_pin_out_hw - Set HW GPIO pin, cares only for outputs
35877+ * @pf: Board private structure
35878+ *
35879+ * This function sets GPIO pin for PTP, cares only for outputs
35880+ **/
35881+static void i40e_ptp_set_pin_out_hw(struct i40e_pf *pf,
35882+ unsigned int pin,
35883+ enum i40e_ptp_gpio_pin_state state)
35884+{
35885+ struct i40e_hw *hw = &pf->hw;
35886+
35887+ switch (state) {
35888+ case out_A:
35889+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
35890+ I40E_PORT_0_OUT_HIGH_TIMESYNC_0);
35891+ break;
35892+ case out_B:
35893+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
35894+ I40E_PORT_1_OUT_HIGH_TIMESYNC_1);
35895+ break;
35896+ default:
35897+ break;
35898+ }
35899+}
35900+/**
35901+ * i40e_ptp_set_pin_in_hw - Set HW GPIO pin, cares only for inputs
35902+ * @pf: Board private structure
35903+ *
35904+ * This function sets GPIO pin for PTP, cares only for inputs
35905+ **/
35906+static void i40e_ptp_set_pin_in_hw(struct i40e_pf *pf,
35907+ unsigned int pin,
35908+ enum i40e_ptp_gpio_pin_state state)
35909+{
35910+ struct i40e_hw *hw = &pf->hw;
35911+
35912+ switch (state) {
35913+ case off:
35914+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0);
35915+ break;
35916+ case in_A:
35917+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_PORT_0_TIMESYNC_1);
35918+ break;
35919+ case in_B:
35920+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_PORT_1_TIMESYNC_1);
35921+ break;
35922+ default:
35923+ break;
35924+ }
35925+}
35926+
35927+/**
35928+ * i40e_ptp_set_pins_hw - Set HW GPIO pins
35929+ * @pf: Board private structure
35930+ *
35931+ * This function sets GPIO pins for PTP
35932+ **/
35933+static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
35934+{
35935+ const struct i40e_ptp_pins_settings *pins = pf->ptp_pins;
35936+ struct i40e_hw *hw = &pf->hw;
35937+
35938+ if (!i40e_is_ptp_pin_dev(hw)) {
35939+ dev_warn(&pf->pdev->dev,
35940+ "PTP external clock not supported.\n");
35941+ return;
35942+ }
35943+
35944+ if (!pins || pf->hw.pf_id) {
35945+ dev_warn(&pf->pdev->dev,
35946+ "PTP PIN setting allowed for PF0 only.\n");
35947+ return;
35948+ }
35949+
35950+ /* setting SDP PTP pins first to the low/off state */
35951+ i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_2, off);
35952+ i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_3, off);
35953+ i40e_ptp_set_pin_in_hw(pf, I40E_GPIO_4, off);
35954+
35955+ i40e_ptp_set_pin_out_hw(pf, I40E_SDP3_2, pins->sdp3_2);
35956+ i40e_ptp_set_pin_out_hw(pf, I40E_SDP3_3, pins->sdp3_3);
35957+ i40e_ptp_set_pin_out_hw(pf, I40E_GPIO_4, pins->gpio_4);
35958+
35959+ i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_2, pins->sdp3_2);
35960+ i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_3, pins->sdp3_3);
35961+ i40e_ptp_set_pin_in_hw(pf, I40E_GPIO_4, pins->gpio_4);
35962+
35963+ switch (pf->ptp_pins->led2_0) {
35964+ case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35965+ I40E_LED2_0); break;
35966+ case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35967+ I40E_GPIO_SET_HIGH | I40E_LED2_0); break;
35968+ }
35969+ switch (pf->ptp_pins->led2_1) {
35970+ case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35971+ I40E_LED2_1); break;
35972+ case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35973+ I40E_GPIO_SET_HIGH | I40E_LED2_1); break;
35974+ }
35975+ switch (pf->ptp_pins->led3_0) {
35976+ case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35977+ I40E_LED3_0); break;
35978+ case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35979+ I40E_GPIO_SET_HIGH | I40E_LED3_0); break;
35980+ }
35981+ switch (pf->ptp_pins->led3_1) {
35982+ case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35983+ I40E_LED3_1); break;
35984+ case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
35985+ I40E_GPIO_SET_HIGH | I40E_LED3_1); break;
35986+ }
35987+
35988+ dev_info(&pf->pdev->dev,
35989+ "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s\n",
35990+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
35991+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
35992+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
35993+}
35994+
35995+/**
35996+ * i40e_ptp_set_pins - set PTP pins in HW
35997+ * @pf: Board private structure
35998+ * @pins: PTP pins to be applied
35999+ *
36000+ * Validate and set PTP pins in HW for specific PF.
36001+ * Return 0 on success or negative value on error.
36002+ **/
36003+static int i40e_ptp_set_pins(struct i40e_pf *pf,
36004+ struct i40e_ptp_pins_settings *pins)
36005+{
36006+ int i = 0;
36007+
36008+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
36009+ dev_warn(&pf->pdev->dev,
36010+ "PTP external clock not supported.\n");
36011+ return -ENOTSUPP;
36012+ }
36013+
36014+ if (!pf->ptp_pins || pf->hw.pf_id) {
36015+ dev_warn(&pf->pdev->dev,
36016+ "PTP PIN setting allowed for PF0 only.\n");
36017+ return -ENOTSUPP;
36018+ }
36019+
36020+ if (pins->sdp3_2 == invalid)
36021+ pins->sdp3_2 = pf->ptp_pins->sdp3_2;
36022+ if (pins->sdp3_3 == invalid)
36023+ pins->sdp3_3 = pf->ptp_pins->sdp3_3;
36024+ if (pins->gpio_4 == invalid)
36025+ pins->gpio_4 = pf->ptp_pins->gpio_4;
36026+ while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) {
36027+ if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 &&
36028+ pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 &&
36029+ pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) {
36030+ pins->led2_0 = i40e_ptp_pin_led_allowed_states[i].led2_0;
36031+ pins->led2_1 = i40e_ptp_pin_led_allowed_states[i].led2_1;
36032+ pins->led3_0 = i40e_ptp_pin_led_allowed_states[i].led3_0;
36033+ pins->led3_1 = i40e_ptp_pin_led_allowed_states[i].led3_1;
36034+ break;
36035+ }
36036+ i++;
36037+ }
36038+ if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) {
36039+ dev_warn(&pf->pdev->dev,
36040+ "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s\n",
36041+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
36042+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
36043+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
36044+
36045+ return -EPERM;
36046+ }
36047+ memcpy(pf->ptp_pins, pins, sizeof(*pins));
36048+ i40e_ptp_set_pins_hw(pf);
36049+ i40_ptp_reset_timing_events(pf);
36050+
36051+ return 0;
36052+}
36053+
36054+/**
36055+ * i40e_ptp_set_pins_ioctl - ioctl interface to set the HW timestamping
36056+ * gpio pins
36057+ * @pf: board private structure
36058+ * @ifr: ioctl data
36059+ *
36060+ * Set the current hardware timestamping pins for current PF.
36061+ **/
36062+int i40e_ptp_set_pins_ioctl(struct i40e_pf *pf, struct ifreq *ifr)
36063+{
36064+ struct i40e_ptp_pins_settings pins;
36065+ int err;
36066+
36067+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
36068+ dev_warn(&pf->pdev->dev,
36069+ "PTP external clock not supported.\n");
36070+ return -ENOTSUPP;
36071+ }
36072+
36073+ if (!pf->ptp_pins || pf->hw.pf_id) {
36074+ dev_warn(&pf->pdev->dev,
36075+ "PTP PIN setting allowed for PF0 only.\n");
36076+ return -ENOTSUPP;
36077+ }
36078+
36079+ err = copy_from_user(&pins, ifr->ifr_data, sizeof(pins));
36080+ if (err) {
36081+ dev_warn(&pf->pdev->dev, "Cannot read user data during SIOCSPINS ioctl\n");
36082+ return -EIO;
36083+ }
36084+
36085+ return i40e_ptp_set_pins(pf, &pins);
36086+}
36087+
36088+/**
36089+ * i40e_ptp_alloc_pins - allocate PTP pins structure
36090+ * @pf: Board private structure
36091+ *
36092+ * allocate PTP pins structure
36093+ **/
36094+int i40e_ptp_alloc_pins(struct i40e_pf *pf)
36095+{
36096+ dev_info(&pf->pdev->dev,
36097+ "PTP subsystem device ID: %d\n", pf->hw.subsystem_device_id);
36098+
36099+ if (pf->hw.pf_id || !i40e_is_ptp_pin_dev(&pf->hw))
36100+ return 0;
36101+
36102+ pf->ptp_pins = kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL);
36103+
36104+ if (!pf->ptp_pins) {
36105+ dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure\n");
36106+ return -I40E_ERR_NO_MEMORY;
36107+ }
36108+
36109+ pf->ptp_pins->sdp3_2 = off;
36110+ pf->ptp_pins->sdp3_3 = off;
36111+ pf->ptp_pins->gpio_4 = off;
36112+ pf->ptp_pins->led2_0 = high;
36113+ pf->ptp_pins->led2_1 = high;
36114+ pf->ptp_pins->led3_0 = high;
36115+ pf->ptp_pins->led3_1 = high;
36116+
36117+ i40e_ptp_set_pins_hw(pf);
36118+
36119+ return 0;
36120+}
36121+
36122+
36123+/**
36124+ * i40e_ptp_get_pins - ioctl interface to read the HW timestamping gpio pins
36125+ * @pf: Board private structure
36126+ * @ifr: ioctl data
36127+ *
36128+ * Obtain the current hardware timestamping gpio pins settigs as requested.
36129+ **/
36130+int i40e_ptp_get_pins(struct i40e_pf *pf, struct ifreq *ifr)
36131+{
36132+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
36133+ dev_warn(&pf->pdev->dev,
36134+ "PTP external clock not supported.\n");
36135+ return -ENOTSUPP;
36136+ }
36137+
36138+ if (!pf->ptp_pins || pf->hw.pf_id) {
36139+ dev_warn(&pf->pdev->dev,
36140+ "PTP PIN reading allowed for PF0 only.\n");
36141+ return -ENOTSUPP;
36142+ }
36143+
36144+ return copy_to_user(ifr->ifr_data, pf->ptp_pins,
36145+ sizeof(*(pf->ptp_pins)))
36146+ ? -EFAULT
36147+ : 0;
36148+}
36149+
36150 /**
36151 * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
36152 * @pf: Board private structure
36153@@ -541,6 +1213,21 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
36154 struct i40e_hw *hw = &pf->hw;
36155 u32 tsyntype, regval;
36156
36157+ /* Selects external trigger to cause event */
36158+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
36159+ /* Bit 17:16 is EVNTLVL, 01B rising edge */
36160+ regval &= 0;
36161+ regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT);
36162+ /* regval: 0001 0000 0000 0000 0000 */
36163+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
36164+
36165+ /* Enabel interrupts */
36166+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
36167+ regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT;
36168+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
36169+
36170+ INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
36171+
36172 /* Reserved for future extensions. */
36173 if (config->flags)
36174 return -EINVAL;
36175@@ -599,7 +1286,9 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
36176 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
36177 }
36178 break;
36179+#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL
36180 case HWTSTAMP_FILTER_NTP_ALL:
36181+#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */
36182 case HWTSTAMP_FILTER_ALL:
36183 default:
36184 return -ERANGE;
36185@@ -650,7 +1339,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
36186 /**
36187 * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
36188 * @pf: Board private structure
36189- * @ifreq: ioctl data
36190+ * @ifr: ioctl data
36191 *
36192 * Respond to the user filter requests and make the appropriate hardware
36193 * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
36194@@ -683,6 +1372,41 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
36195 -EFAULT : 0;
36196 }
36197
36198+static int i40e_init_pin_config(struct i40e_pf *pf)
36199+{
36200+ int i;
36201+
36202+ if (pf->hw.pf_id != 0)
36203+ return -ENOTSUPP;
36204+
36205+ pf->ptp_caps.n_pins = 3;
36206+ pf->ptp_caps.n_ext_ts = 2;
36207+ pf->ptp_caps.pps = 1;
36208+ pf->ptp_caps.n_per_out = 2;
36209+
36210+ pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins,
36211+ sizeof(*pf->ptp_caps.pin_config),
36212+ GFP_KERNEL);
36213+ if (!pf->ptp_caps.pin_config)
36214+ return -ENOMEM;
36215+
36216+ for (i = 0; i < pf->ptp_caps.n_pins; i++) {
36217+ snprintf(pf->ptp_caps.pin_config[i].name,
36218+ sizeof(pf->ptp_caps.pin_config[i].name),
36219+ "%s", sdp_desc[i].name);
36220+ pf->ptp_caps.pin_config[i].index = sdp_desc[i].index;
36221+ pf->ptp_caps.pin_config[i].func = PTP_PF_NONE;
36222+ pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan;
36223+ }
36224+
36225+ pf->ptp_caps.verify = i40e_ptp_verify;
36226+ pf->ptp_caps.enable = i40e_ptp_feature_enable;
36227+
36228+ pf->ptp_caps.pps = 1;
36229+
36230+ return 0;
36231+}
36232+
36233 /**
36234 * i40e_ptp_create_clock - Create PTP clock device for userspace
36235 * @pf: Board private structure
36236@@ -695,20 +1419,29 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
36237 **/
36238 static long i40e_ptp_create_clock(struct i40e_pf *pf)
36239 {
36240+ int err;
36241 /* no need to create a clock device if we already have one */
36242 if (!IS_ERR_OR_NULL(pf->ptp_clock))
36243 return 0;
36244
36245- strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
36246+ strlcpy(pf->ptp_caps.name, i40e_driver_name,
36247+ sizeof(pf->ptp_caps.name) - 1);
36248 pf->ptp_caps.owner = THIS_MODULE;
36249 pf->ptp_caps.max_adj = 999999999;
36250- pf->ptp_caps.n_ext_ts = 0;
36251- pf->ptp_caps.pps = 0;
36252 pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
36253 pf->ptp_caps.adjtime = i40e_ptp_adjtime;
36254+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
36255 pf->ptp_caps.gettime64 = i40e_ptp_gettime;
36256 pf->ptp_caps.settime64 = i40e_ptp_settime;
36257- pf->ptp_caps.enable = i40e_ptp_feature_enable;
36258+#else
36259+ pf->ptp_caps.gettime = i40e_ptp_gettime32;
36260+ pf->ptp_caps.settime = i40e_ptp_settime32;
36261+#endif
36262+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
36263+ err = i40e_init_pin_config(pf);
36264+ if (err)
36265+ return err;
36266+ }
36267
36268 /* Attempt to register the clock before enabling the hardware. */
36269 pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
36270@@ -722,9 +1455,202 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
36271 pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
36272 pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
36273
36274+ /* Set the previous "reset" time to the current Kernel clock time */
36275+ pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
36276+ pf->ptp_reset_start = ktime_get();
36277+
36278 return 0;
36279 }
36280
36281+/**
36282+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
36283+ * @pf: Board private structure
36284+ *
36285+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
36286+ * be called at the end of preparing to reset, just before hardware reset
36287+ * occurs, in order to preserve the PTP time as close as possible across
36288+ * resets.
36289+ */
36290+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
36291+{
36292+ /* don't try to access the PTP clock if it's not enabled */
36293+ if (!(pf->flags & I40E_FLAG_PTP))
36294+ return;
36295+
36296+ i40e_ptp_gettime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
36297+ /* Get a monotonic starting time for this reset */
36298+ pf->ptp_reset_start = ktime_get();
36299+}
36300+
36301+/**
36302+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
36303+ * @pf: Board private structure
36304+ *
36305+ * Restore the PTP hardware clock registers. We previously cached the PTP
36306+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
36307+ * update this value based on the time delta since the time was saved, using
36308+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
36309+ *
36310+ * This ensures that the hardware clock is restored to nearly what it should
36311+ * have been if a reset had not occurred.
36312+ */
36313+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
36314+{
36315+ ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
36316+
36317+ /* Update the previous HW time with the ktime delta */
36318+ timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
36319+
36320+ /* Restore the hardware clock registers */
36321+ i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
36322+}
36323+
36324+#ifndef HAVE_PTP_1588_CLOCK_PINS
36325+/**
36326+ * __get_pf_pdev - helper function to get the pdev
36327+ * @kobj: kobject passed
36328+ * @pdev: PCI device information struct
36329+ */
36330+static int __get_pf_pdev(struct kobject *kobj, struct pci_dev **pdev)
36331+{
36332+ struct device *dev;
36333+
36334+ if (!kobj->parent)
36335+ return -EINVAL;
36336+
36337+ /* get pdev */
36338+ dev = to_dev(kobj->parent);
36339+ *pdev = to_pci_dev(dev);
36340+
36341+ return 0;
36342+}
36343+
36344+/**
36345+ * i40e_ptp_pins_to_num - convert PTP pins to integer number
36346+ * @pf: PCI physical function
36347+ *
36348+ * Return PTP pins states from pf as integer number.
36349+ **/
36350+static unsigned int i40e_ptp_pins_to_num(struct i40e_pf *pf)
36351+{
36352+ return pf->ptp_pins->gpio_4 +
36353+ pf->ptp_pins->sdp3_3 * 10 +
36354+ pf->ptp_pins->sdp3_2 * 100;
36355+}
36356+
36357+/**
36358+ * i40e_ptp_set_pins_str - wrapper to set PTP pins in HW from string
36359+ * @pf: Board private structure
36360+ * @buf: string with PTP pins to be applied
36361+ * @count: length of a buf argument
36362+ *
36363+ * Set the current hardware timestamping pins for current PF.
36364+ * Return 0 on success and negative value on error.
36365+ **/
36366+static int i40e_ptp_set_pins_str(struct i40e_pf *pf, const char* buf,
36367+ int count)
36368+{
36369+ struct i40e_ptp_pins_settings pins;
36370+ const int PIN_STR_LEN = 4;
36371+ unsigned long res;
36372+
36373+ if (count != PIN_STR_LEN || kstrtoul(buf, 10, &res))
36374+ return -EINVAL;
36375+
36376+ pins.sdp3_2 = res / 100 % 10;
36377+ pins.sdp3_3 = res / 10 % 10;
36378+ pins.gpio_4 = res % 10;
36379+
36380+ if (pins.sdp3_2 > out_B ||
36381+ pins.sdp3_3 > out_B ||
36382+ pins.gpio_4 > out_B)
36383+ return -EINVAL;
36384+
36385+ return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : count;
36386+}
36387+
36388+
36389+
36390+/**
36391+ * i40e_sysfs_ptp_pins_read - sysfs interface for reading PTP pins status
36392+ * @kobj: sysfs node
36393+ * @attr: sysfs node attributes
36394+ * @buf: string representing PTP pins
36395+ *
36396+ * Return number of bytes read on success or negative value on failure.
36397+ **/
36398+static ssize_t i40e_sysfs_ptp_pins_read(struct kobject *kobj,
36399+ struct kobj_attribute *attr,
36400+ char *buf)
36401+{
36402+ struct pci_dev *pdev;
36403+ struct i40e_pf *pf;
36404+ unsigned int pins;
36405+
36406+ if(__get_pf_pdev(kobj, &pdev))
36407+ return -EPERM;
36408+
36409+ pf = pci_get_drvdata(pdev);
36410+ pins = i40e_ptp_pins_to_num(pf);
36411+
36412+ dev_info(&pf->pdev->dev,
36413+ "PTP pins: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s\n",
36414+ i40e_ptp_gpio_pin_state2str[pf->ptp_pins->sdp3_2],
36415+ i40e_ptp_gpio_pin_state2str[pf->ptp_pins->sdp3_3],
36416+ i40e_ptp_gpio_pin_state2str[pf->ptp_pins->gpio_4]);
36417+
36418+ return sprintf(buf, "%.3d\n", pins);
36419+}
36420+
36421+/**
36422+ * i40e_sysfs_ptp_pins_write - sysfs interface for setting PTP pins in HW
36423+ * @kobj: sysfs node
36424+ * @attr: sysfs node attributes
36425+ * @buf: string representing PTP pins
36426+ * @count: length of a 'buf' string
36427+ *
36428+ * Return number of bytes written on success or negative value on failure.
36429+ **/
36430+static ssize_t i40e_sysfs_ptp_pins_write(struct kobject *kobj,
36431+ struct kobj_attribute *attr,
36432+ const char *buf, size_t count)
36433+{
36434+ struct pci_dev *pdev;
36435+ struct i40e_pf *pf;
36436+
36437+ if(__get_pf_pdev(kobj, &pdev))
36438+ return -EPERM;
36439+
36440+ pf = pci_get_drvdata(pdev);
36441+
36442+ return i40e_ptp_set_pins_str(pf, buf, count);
36443+}
36444+
36445+/**
36446+ * i40e_ptp_pins_sysfs_init - initialize sysfs for PTP pins
36447+ * @pf: board private structure
36448+ *
36449+ * Initialize sysfs for handling PTP timestamping pins in HW.
36450+ **/
36451+static void i40e_ptp_pins_sysfs_init(struct i40e_pf *pf)
36452+{
36453+ if (pf->hw.pf_id != 0 || !i40e_is_ptp_pin_dev(&pf->hw))
36454+ return;
36455+
36456+ pf->ptp_kobj = kobject_create_and_add("ptp_pins", &pf->pdev->dev.kobj);
36457+ if(!pf->ptp_kobj) {
36458+ dev_info(&pf->pdev->dev, "Failed to create ptp_pins kobject\n");
36459+ return;
36460+ }
36461+
36462+ if (sysfs_create_file(pf->ptp_kobj, &ptp_pins_attribute.attr)) {
36463+ dev_info(&pf->pdev->dev, "Failed to create PTP pins kobject\n");
36464+ kobject_put(pf->ptp_kobj);
36465+ return;
36466+ }
36467+}
36468+#endif /* HAVE_PTP_1588_CLOCK_PINS */
36469+
36470 /**
36471 * i40e_ptp_init - Initialize the 1588 support after device probe or reset
36472 * @pf: Board private structure
36473@@ -732,10 +1658,14 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
36474 * This function sets device up for 1588 support. The first time it is run, it
36475 * will create a PHC clock device. It does not create a clock device if one
36476 * already exists. It also reconfigures the device after a reset.
36477+ *
36478+ * The first time a clock is created, i40e_ptp_create_clock will set
36479+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
36480+ * expected that this timespec will be set to the last known PTP clock time,
36481+ * in order to preserve the clock time as close as possible across a reset.
36482 **/
36483 void i40e_ptp_init(struct i40e_pf *pf)
36484 {
36485- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
36486 struct i40e_hw *hw = &pf->hw;
36487 u32 pf_id;
36488 long err;
36489@@ -747,9 +1677,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
36490 I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
36491 if (hw->pf_id != pf_id) {
36492 pf->flags &= ~I40E_FLAG_PTP;
36493- dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
36494- __func__,
36495- netdev->name);
36496+ dev_info(&pf->pdev->dev, "PTP not supported on this device\n");
36497 return;
36498 }
36499
36500@@ -760,10 +1688,9 @@ void i40e_ptp_init(struct i40e_pf *pf)
36501 err = i40e_ptp_create_clock(pf);
36502 if (err) {
36503 pf->ptp_clock = NULL;
36504- dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
36505- __func__);
36506- } else if (pf->ptp_clock) {
36507- struct timespec64 ts;
36508+ dev_err(&pf->pdev->dev,
36509+ "PTP clock register failed: %ld\n", err);
36510+ } else {
36511 u32 regval;
36512
36513 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
36514@@ -784,10 +1711,15 @@ void i40e_ptp_init(struct i40e_pf *pf)
36515 /* reset timestamping mode */
36516 i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
36517
36518- /* Set the clock value. */
36519- ts = ktime_to_timespec64(ktime_get_real());
36520- i40e_ptp_settime(&pf->ptp_caps, &ts);
36521+ /* Restore the clock time based on last known value */
36522+ i40e_ptp_restore_hw_time(pf);
36523 }
36524+
36525+#ifndef HAVE_PTP_1588_CLOCK_PINS
36526+ i40e_ptp_pins_sysfs_init(pf);
36527+#endif /* HAVE_PTP_1588_CLOCK_PINS */
36528+
36529+ i40e_ptp_set_1pps_signal_hw(pf);
36530 }
36531
36532 /**
36533@@ -799,20 +1731,42 @@ void i40e_ptp_init(struct i40e_pf *pf)
36534 **/
36535 void i40e_ptp_stop(struct i40e_pf *pf)
36536 {
36537+ struct i40e_hw *hw = &pf->hw;
36538+ u32 regval;
36539+
36540 pf->flags &= ~I40E_FLAG_PTP;
36541 pf->ptp_tx = false;
36542 pf->ptp_rx = false;
36543
36544 if (pf->ptp_tx_skb) {
36545- dev_kfree_skb_any(pf->ptp_tx_skb);
36546+ struct sk_buff *skb = pf->ptp_tx_skb;
36547+
36548 pf->ptp_tx_skb = NULL;
36549 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
36550+ dev_kfree_skb_any(skb);
36551 }
36552
36553 if (pf->ptp_clock) {
36554 ptp_clock_unregister(pf->ptp_clock);
36555 pf->ptp_clock = NULL;
36556- dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
36557+ dev_info(&pf->pdev->dev, "removed PHC from %s\n",
36558 pf->vsi[pf->lan_vsi]->netdev->name);
36559 }
36560+
36561+ /* Set GPIO4 as an input */
36562+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_SDP3_2), 0x0);
36563+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_SDP3_3), 0x0);
36564+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_GPIO_4), 0x0);
36565+
36566+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
36567+ regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK;
36568+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
36569+
36570+ /* Disable interrupts */
36571+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
36572+ regval &= I40E_PRTTSYN_CTL0_FFFB_MASK;
36573+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
36574+
36575+ i40e_ptp_free_pins(pf);
36576 }
36577+#endif /* HAVE_PTP_1588_CLOCK */
36578diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
36579index 86ca27f72..955611c14 100644
36580--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
36581+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
36582@@ -1,51 +1,28 @@
36583-/*******************************************************************************
36584- *
36585- * Intel Ethernet Controller XL710 Family Linux Driver
36586- * Copyright(c) 2013 - 2014 Intel Corporation.
36587- *
36588- * This program is free software; you can redistribute it and/or modify it
36589- * under the terms and conditions of the GNU General Public License,
36590- * version 2, as published by the Free Software Foundation.
36591- *
36592- * This program is distributed in the hope it will be useful, but WITHOUT
36593- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36594- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
36595- * more details.
36596- *
36597- * You should have received a copy of the GNU General Public License along
36598- * with this program. If not, see <http://www.gnu.org/licenses/>.
36599- *
36600- * The full GNU General Public License is included in this distribution in
36601- * the file called "COPYING".
36602- *
36603- * Contact Information:
36604- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
36605- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
36606- *
36607- ******************************************************************************/
36608+/* SPDX-License-Identifier: GPL-2.0 */
36609+/* Copyright(c) 2013 - 2020 Intel Corporation. */
36610
36611 #ifndef _I40E_REGISTER_H_
36612 #define _I40E_REGISTER_H_
36613
36614-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
36615+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
36616 #define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
36617-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
36618-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
36619+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
36620+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
36621 #define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
36622-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
36623-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
36624+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
36625+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
36626 #define I40E_GL_ARQH_ARQH_SHIFT 0
36627-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
36628-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
36629+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
36630+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
36631 #define I40E_GL_ARQT_ARQT_SHIFT 0
36632-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
36633-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
36634+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
36635+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
36636 #define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
36637-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
36638-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
36639+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
36640+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
36641 #define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
36642-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
36643-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
36644+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
36645+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
36646 #define I40E_GL_ATQH_ATQH_SHIFT 0
36647 #define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
36648 #define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
36649@@ -81,7 +58,7 @@
36650 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
36651 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
36652 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
36653-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
36654+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
36655 #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
36656 #define I40E_PF_ARQT_ARQT_SHIFT 0
36657 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
36658@@ -104,7 +81,7 @@
36659 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
36660 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
36661 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
36662-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
36663+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
36664 #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
36665 #define I40E_PF_ATQT_ATQT_SHIFT 0
36666 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
36667@@ -131,7 +108,7 @@
36668 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
36669 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
36670 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
36671-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
36672+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
36673 #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
36674 #define I40E_VF_ARQT_MAX_INDEX 127
36675 #define I40E_VF_ARQT_ARQT_SHIFT 0
36676@@ -159,7 +136,7 @@
36677 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
36678 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
36679 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
36680-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
36681+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
36682 #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
36683 #define I40E_VF_ATQT_MAX_INDEX 127
36684 #define I40E_VF_ATQT_ATQT_SHIFT 0
36685@@ -282,7 +259,7 @@
36686 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
36687 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
36688 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
36689-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
36690+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
36691 #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
36692 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
36693 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
36694@@ -386,6 +363,14 @@
36695 #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
36696 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
36697 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
36698+#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
36699+#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
36700+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
36701+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
36702+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
36703+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
36704+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
36705+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
36706 #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
36707 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
36708 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
36709@@ -526,7 +511,7 @@
36710 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
36711 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
36712 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
36713-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
36714+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
36715 #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
36716 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
36717 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
36718@@ -1265,14 +1250,14 @@
36719 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
36720 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
36721 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
36722-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
36723+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
36724 #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
36725 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
36726 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
36727 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
36728 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
36729 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
36730-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
36731+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
36732 #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
36733 #define I40E_QRX_ENA_MAX_INDEX 1535
36734 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
36735@@ -1681,7 +1666,7 @@
36736 #define I40E_GLNVM_SRCTL_START_SHIFT 30
36737 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
36738 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
36739-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
36740+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
36741 #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
36742 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
36743 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
36744@@ -2794,7 +2779,7 @@
36745 #define I40E_GLV_RUPP_MAX_INDEX 383
36746 #define I40E_GLV_RUPP_RUPP_SHIFT 0
36747 #define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
36748-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
36749+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
36750 #define I40E_GLV_TEPC_MAX_INDEX 383
36751 #define I40E_GLV_TEPC_TEPC_SHIFT 0
36752 #define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
36753@@ -2914,6 +2899,9 @@
36754 #define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
36755 #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
36756 #define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
36757+#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17
36758+#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT)
36759+#define I40E_PRTTSYN_AUX_0_PTP_OUT_SYNC_CLK_IO 0xF
36760 #define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
36761 #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
36762 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
36763@@ -3048,7 +3036,7 @@
36764 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
36765 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
36766 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
36767-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
36768+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
36769 #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
36770 #define I40E_VP_MDET_RX_MAX_INDEX 127
36771 #define I40E_VP_MDET_RX_VALID_SHIFT 0
36772@@ -3184,7 +3172,7 @@
36773 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
36774 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
36775 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
36776-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
36777+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
36778 #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
36779 #define I40E_VF_ARQT1_ARQT_SHIFT 0
36780 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
36781@@ -3207,7 +3195,7 @@
36782 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
36783 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
36784 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
36785-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
36786+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
36787 #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
36788 #define I40E_VF_ATQT1_ATQT_SHIFT 0
36789 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
36790@@ -5290,6 +5278,87 @@
36791 #define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
36792 #define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
36793 #define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
36794+/* Flow Director */
36795+#define I40E_REG_INSET_L2_DMAC_SHIFT 60
36796+#define I40E_REG_INSET_L2_DMAC_MASK I40E_MASK(0xEULL, I40E_REG_INSET_L2_DMAC_SHIFT)
36797+#define I40E_REG_INSET_L2_SMAC_SHIFT 56
36798+#define I40E_REG_INSET_L2_SMAC_MASK I40E_MASK(0x1CULL, I40E_REG_INSET_L2_SMAC_SHIFT)
36799+#define I40E_REG_INSET_L2_OUTER_VLAN_SHIFT 26
36800+#define I40E_REG_INSET_L2_OUTER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_OUTER_VLAN_SHIFT)
36801+#define I40E_REG_INSET_L2_INNER_VLAN_SHIFT 55
36802+#define I40E_REG_INSET_L2_INNER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_INNER_VLAN_SHIFT)
36803+#define I40E_REG_INSET_TUNNEL_VLAN_SHIFT 56
36804+#define I40E_REG_INSET_TUNNEL_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_VLAN_SHIFT)
36805+#define I40E_REG_INSET_L3_SRC_IP4_SHIFT 47
36806+#define I40E_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_SRC_IP4_SHIFT)
36807+#define I40E_REG_INSET_L3_DST_IP4_SHIFT 35
36808+#define I40E_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_DST_IP4_SHIFT)
36809+#define I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT 49
36810+#define I40E_X722_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT)
36811+#define I40E_X722_REG_INSET_L3_DST_IP4_SHIFT 41
36812+#define I40E_X722_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_DST_IP4_SHIFT)
36813+#define I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT 52
36814+#define I40E_X722_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT)
36815+#define I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT 52
36816+#define I40E_X722_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT)
36817+#define I40E_REG_INSET_L3_IP4_TOS_SHIFT 54
36818+#define I40E_REG_INSET_L3_IP4_TOS_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TOS_SHIFT)
36819+#define I40E_REG_INSET_L3_IP4_PROTO_SHIFT 50
36820+#define I40E_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_PROTO_SHIFT)
36821+#define I40E_REG_INSET_L3_IP4_TTL_SHIFT 50
36822+#define I40E_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TTL_SHIFT)
36823+#define I40E_REG_INSET_L3_SRC_IP6_SHIFT 43
36824+#define I40E_REG_INSET_L3_SRC_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_SRC_IP6_SHIFT)
36825+#define I40E_REG_INSET_L3_DST_IP6_SHIFT 35
36826+#define I40E_REG_INSET_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_DST_IP6_SHIFT)
36827+#define I40E_REG_INSET_L3_IP6_TC_SHIFT 54
36828+#define I40E_REG_INSET_L3_IP6_TC_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_TC_SHIFT)
36829+#define I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT 51
36830+#define I40E_REG_INSET_L3_IP6_NEXT_HDR_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT)
36831+#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT 51
36832+#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT)
36833+#define I40E_REG_INSET_L4_SRC_PORT_SHIFT 34
36834+#define I40E_REG_INSET_L4_SRC_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_SRC_PORT_SHIFT)
36835+#define I40E_REG_INSET_L4_DST_PORT_SHIFT 33
36836+#define I40E_REG_INSET_L4_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_DST_PORT_SHIFT)
36837+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT 31
36838+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT)
36839+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT 22
36840+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT)
36841+#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT 11
36842+#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT)
36843+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT 21
36844+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT)
36845+#define I40E_REG_INSET_TUNNEL_ID_SHIFT 18
36846+#define I40E_REG_INSET_TUNNEL_ID_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_ID_SHIFT)
36847+#define I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT 14
36848+#define I40E_REG_INSET_LAST_ETHER_TYPE_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT)
36849+#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT 8
36850+#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT)
36851+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT 6
36852+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT)
36853+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT 6
36854+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT)
36855+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT 13
36856+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT)
36857+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT 12
36858+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT)
36859+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT 11
36860+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT)
36861+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT 10
36862+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT)
36863+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT 9
36864+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT)
36865+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT 8
36866+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT)
36867+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT 7
36868+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT)
36869+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT 6
36870+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT)
36871+#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT 6
36872+#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT)
36873+#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
36874+
36875 #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
36876 #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
36877 #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
36878@@ -5350,4 +5419,5 @@
36879 #define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
36880 #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
36881 #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
36882+
36883 #endif /* _I40E_REGISTER_H_ */
36884diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
36885index 5f9cac55a..ab12f1311 100644
36886--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
36887+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
36888@@ -1,28 +1,5 @@
36889-/*******************************************************************************
36890- *
36891- * Intel Ethernet Controller XL710 Family Linux Driver
36892- * Copyright(c) 2013 - 2014 Intel Corporation.
36893- *
36894- * This program is free software; you can redistribute it and/or modify it
36895- * under the terms and conditions of the GNU General Public License,
36896- * version 2, as published by the Free Software Foundation.
36897- *
36898- * This program is distributed in the hope it will be useful, but WITHOUT
36899- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36900- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
36901- * more details.
36902- *
36903- * You should have received a copy of the GNU General Public License along
36904- * with this program. If not, see <http://www.gnu.org/licenses/>.
36905- *
36906- * The full GNU General Public License is included in this distribution in
36907- * the file called "COPYING".
36908- *
36909- * Contact Information:
36910- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
36911- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
36912- *
36913- ******************************************************************************/
36914+/* SPDX-License-Identifier: GPL-2.0 */
36915+/* Copyright(c) 2013 - 2020 Intel Corporation. */
36916
36917 #ifndef _I40E_STATUS_H_
36918 #define _I40E_STATUS_H_
36919@@ -95,6 +72,7 @@ enum i40e_status_code {
36920 I40E_ERR_NOT_READY = -63,
36921 I40E_NOT_SUPPORTED = -64,
36922 I40E_ERR_FIRMWARE_API_VERSION = -65,
36923+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
36924 };
36925
36926 #endif /* _I40E_STATUS_H_ */
36927diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
36928index d3e55f54a..27e47ccf3 100644
36929--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
36930+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
36931@@ -1,29 +1,23 @@
36932-/*******************************************************************************
36933- *
36934- * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
36935- * Copyright(c) 2013 - 2017 Intel Corporation.
36936- *
36937- * This program is free software; you can redistribute it and/or modify it
36938- * under the terms and conditions of the GNU General Public License,
36939- * version 2, as published by the Free Software Foundation.
36940- *
36941- * This program is distributed in the hope it will be useful, but WITHOUT
36942- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36943- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
36944- * more details.
36945- *
36946- * The full GNU General Public License is included in this distribution in
36947- * the file called "COPYING".
36948- *
36949- * Contact Information:
36950- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
36951- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
36952- *
36953- ******************************************************************************/
36954+/* SPDX-License-Identifier: GPL-2.0 */
36955+/* Copyright(c) 2013 - 2020 Intel Corporation. */
36956
36957-/* Modeled on trace-events-sample.h */
36958+#ifndef CONFIG_TRACEPOINTS
36959+#if !defined(_I40E_TRACE_H_)
36960+#define _I40E_TRACE_H_
36961+/* If the Linux kernel tracepoints are not available then the i40e_trace*
36962+ * macros become nops.
36963+ */
36964
36965-/* The trace subsystem name for i40e will be "i40e".
36966+#define i40e_trace(trace_name, args...)
36967+#define i40e_trace_enabled(trace_name) (0)
36968+#endif /* !defined(_I40E_TRACE_H_) */
36969+#else /* CONFIG_TRACEPOINTS */
36970+/*
36971+ * Modeled on trace-events-sample.h
36972+ */
36973+
36974+/*
36975+ * The trace subsystem name for i40e will be "i40e".
36976 *
36977 * This file is named i40e_trace.h.
36978 *
36979@@ -34,7 +28,8 @@
36980 #undef TRACE_SYSTEM
36981 #define TRACE_SYSTEM i40e
36982
36983-/* See trace-events-sample.h for a detailed description of why this
36984+/*
36985+ * See trace-events-sample.h for a detailed description of why this
36986 * guard clause is different from most normal include files.
36987 */
36988 #if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
36989@@ -69,7 +64,8 @@
36990
36991 #define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
36992
36993-/* Events common to PF and VF. Corresponding versions will be defined
36994+/*
36995+ * Events common to PF and VF. Corresponding versions will be defined
36996 * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
36997 * macro above will select the right trace point name for the driver
36998 * being built from shared code.
36999@@ -85,7 +81,8 @@ DECLARE_EVENT_CLASS(
37000
37001 TP_ARGS(ring, desc, buf),
37002
37003- /* The convention here is to make the first fields in the
37004+ /*
37005+ * The convention here is to make the first fields in the
37006 * TP_STRUCT match the TP_PROTO exactly. This enables the use
37007 * of the args struct generated by the tplist tool (from the
37008 * bcc-tools package) to be used for those fields. To access
37009@@ -132,7 +129,7 @@ DECLARE_EVENT_CLASS(
37010 i40e_rx_template,
37011
37012 TP_PROTO(struct i40e_ring *ring,
37013- union i40e_32byte_rx_desc *desc,
37014+ union i40e_rx_desc *desc,
37015 struct sk_buff *skb),
37016
37017 TP_ARGS(ring, desc, skb),
37018@@ -160,7 +157,7 @@ DECLARE_EVENT_CLASS(
37019 DEFINE_EVENT(
37020 i40e_rx_template, i40e_clean_rx_irq,
37021 TP_PROTO(struct i40e_ring *ring,
37022- union i40e_32byte_rx_desc *desc,
37023+ union i40e_rx_desc *desc,
37024 struct sk_buff *skb),
37025
37026 TP_ARGS(ring, desc, skb));
37027@@ -168,7 +165,7 @@ DEFINE_EVENT(
37028 DEFINE_EVENT(
37029 i40e_rx_template, i40e_clean_rx_irq_rx,
37030 TP_PROTO(struct i40e_ring *ring,
37031- union i40e_32byte_rx_desc *desc,
37032+ union i40e_rx_desc *desc,
37033 struct sk_buff *skb),
37034
37035 TP_ARGS(ring, desc, skb));
37036@@ -213,7 +210,9 @@ DEFINE_EVENT(
37037
37038 TP_ARGS(skb, ring));
37039
37040-/* Events unique to the PF. */
37041+/*
37042+ * Events unique to the PF.
37043+ */
37044
37045 #endif /* _I40E_TRACE_H_ */
37046 /* This must be outside ifdef _I40E_TRACE_H */
37047@@ -227,3 +226,4 @@ DEFINE_EVENT(
37048 #undef TRACE_INCLUDE_FILE
37049 #define TRACE_INCLUDE_FILE i40e_trace
37050 #include <trace/define_trace.h>
37051+#endif /* CONFIG_TRACEPOINTS */
37052diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
37053index 3c07ff171..96bc531ac 100644
37054--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
37055+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
37056@@ -1,32 +1,10 @@
37057-/*******************************************************************************
37058- *
37059- * Intel Ethernet Controller XL710 Family Linux Driver
37060- * Copyright(c) 2013 - 2016 Intel Corporation.
37061- *
37062- * This program is free software; you can redistribute it and/or modify it
37063- * under the terms and conditions of the GNU General Public License,
37064- * version 2, as published by the Free Software Foundation.
37065- *
37066- * This program is distributed in the hope it will be useful, but WITHOUT
37067- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
37068- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37069- * more details.
37070- *
37071- * You should have received a copy of the GNU General Public License along
37072- * with this program. If not, see <http://www.gnu.org/licenses/>.
37073- *
37074- * The full GNU General Public License is included in this distribution in
37075- * the file called "COPYING".
37076- *
37077- * Contact Information:
37078- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
37079- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
37080- *
37081- ******************************************************************************/
37082+// SPDX-License-Identifier: GPL-2.0
37083+/* Copyright(c) 2013 - 2020 Intel Corporation. */
37084
37085 #include <linux/prefetch.h>
37086-#include <net/busy_poll.h>
37087-#include <linux/bpf_trace.h>
37088+#ifdef HAVE_XDP_SUPPORT
37089+#include <net/xdp.h>
37090+#endif
37091 #include "i40e.h"
37092 #include "i40e_trace.h"
37093 #include "i40e_prototype.h"
37094@@ -67,9 +45,6 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
37095 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37096 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
37097
37098- flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
37099- (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
37100-
37101 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
37102 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
37103
37104@@ -152,6 +127,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
37105 /* grab the next descriptor */
37106 i = tx_ring->next_to_use;
37107 first = &tx_ring->tx_bi[i];
37108+
37109 i40e_fdir(tx_ring, fdir_data, add);
37110
37111 /* Now program a dummy descriptor */
37112@@ -192,7 +168,6 @@ dma_fail:
37113 }
37114
37115 #define IP_HEADER_OFFSET 14
37116-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
37117 /**
37118 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
37119 * @vsi: pointer to the targeted VSI
37120@@ -264,7 +239,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
37121 return 0;
37122 }
37123
37124-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
37125 /**
37126 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
37127 * @vsi: pointer to the targeted VSI
37128@@ -285,7 +259,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
37129 /* Dummy packet */
37130 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
37131 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
37132- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
37133+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x11,
37134 0x0, 0x72, 0, 0, 0, 0};
37135
37136 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
37137@@ -334,7 +308,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
37138 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
37139 I40E_DEBUG_FD & pf->hw.debug_mask)
37140 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
37141- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
37142+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
37143 } else {
37144 pf->fd_tcp4_filter_cnt--;
37145 }
37146@@ -342,7 +316,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
37147 return 0;
37148 }
37149
37150-#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
37151 /**
37152 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
37153 * a specific flow spec
37154@@ -416,7 +389,6 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
37155 return 0;
37156 }
37157
37158-#define I40E_IP_DUMMY_PACKET_LEN 34
37159 /**
37160 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
37161 * a specific flow spec
37162@@ -465,9 +437,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
37163 dev_info(&pf->pdev->dev,
37164 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
37165 fd_data->pctype, fd_data->fd_id, ret);
37166- /* The packet buffer wasn't added to the ring so we
37167- * need to free it now.
37168- */
37169 kfree(raw_packet);
37170 return -EOPNOTSUPP;
37171 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
37172@@ -493,7 +462,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
37173 /**
37174 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
37175 * @vsi: pointer to the targeted VSI
37176- * @cmd: command to get or set RX flow classification rules
37177+ * @input: filter to add or delete
37178 * @add: true adds a filter, false removes it
37179 *
37180 **/
37181@@ -503,7 +472,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
37182 struct i40e_pf *pf = vsi->back;
37183 int ret;
37184
37185- switch (input->flow_type & ~FLOW_EXT) {
37186+ switch (input->flow_type) {
37187 case TCP_V4_FLOW:
37188 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
37189 break;
37190@@ -592,8 +561,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
37191 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
37192
37193 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
37194- pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
37195- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
37196+ test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
37197+ /* These set_bit() calls aren't atomic with the
37198+ * test_bit() here, but worse case we potentially
37199+ * disable ATR and queue a flush right after SB
37200+ * support is re-enabled. That shouldn't cause an
37201+ * issue in practice
37202+ */
37203+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
37204 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
37205 }
37206
37207@@ -606,11 +581,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
37208 */
37209 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
37210 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
37211- !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
37212- pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
37213+ !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
37214+ pf->state))
37215 if (I40E_DEBUG_FD & pf->hw.debug_mask)
37216 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
37217- }
37218 }
37219 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
37220 if (I40E_DEBUG_FD & pf->hw.debug_mask)
37221@@ -630,8 +604,14 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
37222 if (tx_buffer->skb) {
37223 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
37224 kfree(tx_buffer->raw_buf);
37225+#ifdef HAVE_XDP_SUPPORT
37226 else if (ring_is_xdp(ring))
37227+#ifdef HAVE_XDP_FRAME_STRUCT
37228+ xdp_return_frame(tx_buffer->xdpf);
37229+#else
37230 page_frag_free(tx_buffer->raw_buf);
37231+#endif
37232+#endif
37233 else
37234 dev_kfree_skb_any(tx_buffer->skb);
37235 if (dma_unmap_len(tx_buffer, len))
37236@@ -706,17 +686,23 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
37237
37238 /**
37239 * i40e_get_tx_pending - how many tx descriptors not processed
37240- * @tx_ring: the ring of descriptors
37241+ * @ring: the ring of descriptors
37242+ * @in_sw: use SW variables
37243 *
37244 * Since there is no access to the ring head register
37245 * in XL710, we need to use our local copies
37246 **/
37247-u32 i40e_get_tx_pending(struct i40e_ring *ring)
37248+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
37249 {
37250 u32 head, tail;
37251
37252- head = i40e_get_head(ring);
37253- tail = readl(ring->tail);
37254+ if (!in_sw) {
37255+ head = i40e_get_head(ring);
37256+ tail = readl(ring->tail);
37257+ } else {
37258+ head = ring->next_to_clean;
37259+ tail = ring->next_to_use;
37260+ }
37261
37262 if (head != tail)
37263 return (head < tail) ?
37264@@ -725,6 +711,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
37265 return 0;
37266 }
37267
37268+/**
37269+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
37270+ * @vsi: pointer to vsi struct with tx queues
37271+ *
37272+ * VSI has netdev and netdev has TX queues. This function is to check each of
37273+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
37274+ **/
37275+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
37276+{
37277+ struct i40e_ring *tx_ring = NULL;
37278+ struct net_device *netdev;
37279+ unsigned int i;
37280+ int packets;
37281+
37282+ if (!vsi)
37283+ return;
37284+
37285+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
37286+ return;
37287+
37288+ netdev = vsi->netdev;
37289+ if (!netdev)
37290+ return;
37291+
37292+ if (!netif_carrier_ok(netdev))
37293+ return;
37294+
37295+ for (i = 0; i < vsi->num_queue_pairs; i++) {
37296+ tx_ring = vsi->tx_rings[i];
37297+ if (tx_ring && tx_ring->desc) {
37298+ /* If packet counter has not changed the queue is
37299+ * likely stalled, so force an interrupt for this
37300+ * queue.
37301+ *
37302+ * prev_pkt_ctr would be negative if there was no
37303+ * pending work.
37304+ */
37305+ packets = tx_ring->stats.packets & INT_MAX;
37306+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
37307+ i40e_force_wb(vsi, tx_ring->q_vector);
37308+ continue;
37309+ }
37310+
37311+ /* Memory barrier between read of packet count and call
37312+ * to i40e_get_tx_pending()
37313+ */
37314+ smp_rmb();
37315+ tx_ring->tx_stats.prev_pkt_ctr =
37316+ i40e_get_tx_pending(tx_ring, true) ? packets : -1;
37317+ }
37318+ }
37319+}
37320+
37321 #define WB_STRIDE 4
37322
37323 /**
37324@@ -738,7 +777,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
37325 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
37326 struct i40e_ring *tx_ring, int napi_budget)
37327 {
37328- u16 i = tx_ring->next_to_clean;
37329+ int i = tx_ring->next_to_clean;
37330 struct i40e_tx_buffer *tx_buf;
37331 struct i40e_tx_desc *tx_head;
37332 struct i40e_tx_desc *tx_desc;
37333@@ -759,7 +798,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
37334 break;
37335
37336 /* prevent any other reads prior to eop_desc */
37337- smp_rmb();
37338+ read_barrier_depends();
37339
37340 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
37341 /* we have caught up to head, no work left to do */
37342@@ -774,9 +813,15 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
37343 total_packets += tx_buf->gso_segs;
37344
37345 /* free the skb/XDP data */
37346+#ifdef HAVE_XDP_SUPPORT
37347 if (ring_is_xdp(tx_ring))
37348+#ifdef HAVE_XDP_FRAME_STRUCT
37349+ xdp_return_frame(tx_buf->xdpf);
37350+#else
37351 page_frag_free(tx_buf->raw_buf);
37352+#endif
37353 else
37354+#endif
37355 napi_consume_skb(tx_buf->skb, napi_budget);
37356
37357 /* unmap skb header data */
37358@@ -844,7 +889,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
37359 * them to be written back in case we stay in NAPI.
37360 * In this mode on X722 we do not enable Interrupt.
37361 */
37362- unsigned int j = i40e_get_tx_pending(tx_ring);
37363+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
37364
37365 if (budget &&
37366 ((j / WB_STRIDE) == 0) && (j > 0) &&
37367@@ -902,8 +947,8 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
37368 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
37369
37370 wr32(&vsi->back->hw,
37371- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
37372- val);
37373+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
37374+ val);
37375 } else {
37376 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
37377 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
37378@@ -929,8 +974,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
37379 /* allow 00 to be written to the index */
37380
37381 wr32(&vsi->back->hw,
37382- I40E_PFINT_DYN_CTLN(q_vector->v_idx +
37383- vsi->base_vector - 1), val);
37384+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
37385 } else {
37386 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
37387 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
37388@@ -942,101 +986,244 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
37389 }
37390 }
37391
37392+static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
37393+ struct i40e_ring_container *rc)
37394+{
37395+ return &q_vector->rx == rc;
37396+}
37397+
37398+static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
37399+{
37400+ unsigned int divisor;
37401+
37402+ switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
37403+ case I40E_LINK_SPEED_40GB:
37404+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
37405+ break;
37406+ case I40E_LINK_SPEED_25GB:
37407+ case I40E_LINK_SPEED_20GB:
37408+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
37409+ break;
37410+ default:
37411+ case I40E_LINK_SPEED_10GB:
37412+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
37413+ break;
37414+ case I40E_LINK_SPEED_1GB:
37415+ case I40E_LINK_SPEED_100MB:
37416+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
37417+ break;
37418+ }
37419+
37420+ return divisor;
37421+}
37422+
37423 /**
37424- * i40e_set_new_dynamic_itr - Find new ITR level
37425+ * i40e_update_itr - update the dynamic ITR value based on statistics
37426+ * @q_vector: structure containing interrupt and ring information
37427 * @rc: structure containing ring performance data
37428 *
37429- * Returns true if ITR changed, false if not
37430- *
37431- * Stores a new ITR value based on packets and byte counts during
37432- * the last interrupt. The advantage of per interrupt computation
37433- * is faster updates and more accurate ITR for the current traffic
37434- * pattern. Constants in this function were computed based on
37435- * theoretical maximum wire speed and thresholds were set based on
37436- * testing data as well as attempting to minimize response time
37437+ * Stores a new ITR value based on packets and byte
37438+ * counts during the last interrupt. The advantage of per interrupt
37439+ * computation is faster updates and more accurate ITR for the current
37440+ * traffic pattern. Constants in this function were computed
37441+ * based on theoretical maximum wire speed and thresholds were set based
37442+ * on testing data as well as attempting to minimize response time
37443 * while increasing bulk throughput.
37444 **/
37445-static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
37446+static void i40e_update_itr(struct i40e_q_vector *q_vector,
37447+ struct i40e_ring_container *rc)
37448 {
37449- enum i40e_latency_range new_latency_range = rc->latency_range;
37450- u32 new_itr = rc->itr;
37451- int bytes_per_int;
37452- unsigned int usecs, estimated_usecs;
37453+ unsigned int avg_wire_size, packets, bytes, itr;
37454+ unsigned long next_update = jiffies;
37455
37456- if (rc->total_packets == 0 || !rc->itr)
37457- return false;
37458+ /* If we don't have any rings just leave ourselves set for maximum
37459+ * possible latency so we take ourselves out of the equation.
37460+ */
37461+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
37462+ return;
37463
37464- usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
37465- bytes_per_int = rc->total_bytes / usecs;
37466+ /* For Rx we want to push the delay up and default to low latency.
37467+ * for Tx we want to pull the delay down and default to high latency.
37468+ */
37469+ itr = i40e_container_is_rx(q_vector, rc) ?
37470+ I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
37471+ I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
37472+
37473+ /* If we didn't update within up to 1 - 2 jiffies we can assume
37474+ * that either packets are coming in so slow there hasn't been
37475+ * any work, or that there is so much work that NAPI is dealing
37476+ * with interrupt moderation and we don't need to do anything.
37477+ */
37478+ if (time_after(next_update, rc->next_update))
37479+ goto clear_counts;
37480+
37481+ /* If itr_countdown is set it means we programmed an ITR within
37482+ * the last 4 interrupt cycles. This has a side effect of us
37483+ * potentially firing an early interrupt. In order to work around
37484+ * this we need to throw out any data received for a few
37485+ * interrupts following the update.
37486+ */
37487+ if (q_vector->itr_countdown) {
37488+ itr = rc->target_itr;
37489+ goto clear_counts;
37490+ }
37491
37492- /* The calculations in this algorithm depend on interrupts actually
37493- * firing at the ITR rate. This may not happen if the packet rate is
37494- * really low, or if we've been napi polling. Check to make sure
37495- * that's not the case before we continue.
37496+ packets = rc->total_packets;
37497+ bytes = rc->total_bytes;
37498+
37499+ if (i40e_container_is_rx(q_vector, rc)) {
37500+ /* If Rx there are 1 to 4 packets and bytes are less than
37501+ * 9000 assume insufficient data to use bulk rate limiting
37502+ * approach unless Tx is already in bulk rate limiting. We
37503+ * are likely latency driven.
37504+ */
37505+ if (packets && packets < 4 && bytes < 9000 &&
37506+ (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
37507+ itr = I40E_ITR_ADAPTIVE_LATENCY;
37508+ goto adjust_by_size;
37509+ }
37510+ } else if (packets < 4) {
37511+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
37512+ * bulk mode and we are receiving 4 or fewer packets just
37513+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
37514+ * that the Rx can relax.
37515+ */
37516+ if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
37517+ (q_vector->rx.target_itr & I40E_ITR_MASK) ==
37518+ I40E_ITR_ADAPTIVE_MAX_USECS)
37519+ goto clear_counts;
37520+ } else if (packets > 32) {
37521+ /* If we have processed over 32 packets in a single interrupt
37522+ * for Tx assume we need to switch over to "bulk" mode.
37523+ */
37524+ rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
37525+ }
37526+
37527+ /* We have no packets to actually measure against. This means
37528+ * either one of the other queues on this vector is active or
37529+ * we are a Tx queue doing TSO with too high of an interrupt rate.
37530+ *
37531+ * Between 4 and 56 we can assume that our current interrupt delay
37532+ * is only slightly too low. As such we should increase it by a small
37533+ * fixed amount.
37534 */
37535- estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
37536- if (estimated_usecs > usecs) {
37537- new_latency_range = I40E_LOW_LATENCY;
37538- goto reset_latency;
37539+ if (packets < 56) {
37540+ itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
37541+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
37542+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
37543+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
37544+ }
37545+ goto clear_counts;
37546+ }
37547+
37548+ if (packets <= 256) {
37549+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
37550+ itr &= I40E_ITR_MASK;
37551+
37552+ /* Between 56 and 112 is our "goldilocks" zone where we are
37553+ * working out "just right". Just report that our current
37554+ * ITR is good for us.
37555+ */
37556+ if (packets <= 112)
37557+ goto clear_counts;
37558+
37559+ /* If packet count is 128 or greater we are likely looking
37560+ * at a slight overrun of the delay we want. Try halving
37561+ * our delay to see if that will cut the number of packets
37562+ * in half per interrupt.
37563+ */
37564+ itr /= 2;
37565+ itr &= I40E_ITR_MASK;
37566+ if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
37567+ itr = I40E_ITR_ADAPTIVE_MIN_USECS;
37568+
37569+ goto clear_counts;
37570 }
37571
37572- /* simple throttlerate management
37573- * 0-10MB/s lowest (50000 ints/s)
37574- * 10-20MB/s low (20000 ints/s)
37575- * 20-1249MB/s bulk (18000 ints/s)
37576+ /* The paths below assume we are dealing with a bulk ITR since
37577+ * number of packets is greater than 256. We are just going to have
37578+ * to compute a value and try to bring the count under control,
37579+ * though for smaller packet sizes there isn't much we can do as
37580+ * NAPI polling will likely be kicking in sooner rather than later.
37581+ */
37582+ itr = I40E_ITR_ADAPTIVE_BULK;
37583+
37584+adjust_by_size:
37585+ /* If packet counts are 256 or greater we can assume we have a gross
37586+ * overestimation of what the rate should be. Instead of trying to fine
37587+ * tune it just use the formula below to try and dial in an exact value
37588+ * give the current packet size of the frame.
37589+ */
37590+ avg_wire_size = bytes / packets;
37591+
37592+ /* The following is a crude approximation of:
37593+ * wmem_default / (size + overhead) = desired_pkts_per_int
37594+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
37595+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
37596 *
37597- * The math works out because the divisor is in 10^(-6) which
37598- * turns the bytes/us input value into MB/s values, but
37599- * make sure to use usecs, as the register values written
37600- * are in 2 usec increments in the ITR registers, and make sure
37601- * to use the smoothed values that the countdown timer gives us.
37602+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
37603+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
37604+ * formula down to
37605+ *
37606+ * (170 * (size + 24)) / (size + 640) = ITR
37607+ *
37608+ * We first do some math on the packet size and then finally bitshift
37609+ * by 8 after rounding up. We also have to account for PCIe link speed
37610+ * difference as ITR scales based on this.
37611 */
37612- switch (new_latency_range) {
37613- case I40E_LOWEST_LATENCY:
37614- if (bytes_per_int > 10)
37615- new_latency_range = I40E_LOW_LATENCY;
37616- break;
37617- case I40E_LOW_LATENCY:
37618- if (bytes_per_int > 20)
37619- new_latency_range = I40E_BULK_LATENCY;
37620- else if (bytes_per_int <= 10)
37621- new_latency_range = I40E_LOWEST_LATENCY;
37622- break;
37623- case I40E_BULK_LATENCY:
37624- default:
37625- if (bytes_per_int <= 20)
37626- new_latency_range = I40E_LOW_LATENCY;
37627- break;
37628+ if (avg_wire_size <= 60) {
37629+ /* Start at 250k ints/sec */
37630+ avg_wire_size = 4096;
37631+ } else if (avg_wire_size <= 380) {
37632+ /* 250K ints/sec to 60K ints/sec */
37633+ avg_wire_size *= 40;
37634+ avg_wire_size += 1696;
37635+ } else if (avg_wire_size <= 1084) {
37636+ /* 60K ints/sec to 36K ints/sec */
37637+ avg_wire_size *= 15;
37638+ avg_wire_size += 11452;
37639+ } else if (avg_wire_size <= 1980) {
37640+ /* 36K ints/sec to 30K ints/sec */
37641+ avg_wire_size *= 5;
37642+ avg_wire_size += 22420;
37643+ } else {
37644+ /* plateau at a limit of 30K ints/sec */
37645+ avg_wire_size = 32256;
37646 }
37647
37648-reset_latency:
37649- rc->latency_range = new_latency_range;
37650+ /* If we are in low latency mode halve our delay which doubles the
37651+ * rate to somewhere between 100K to 16K ints/sec
37652+ */
37653+ if (itr & I40E_ITR_ADAPTIVE_LATENCY)
37654+ avg_wire_size /= 2;
37655
37656- switch (new_latency_range) {
37657- case I40E_LOWEST_LATENCY:
37658- new_itr = I40E_ITR_50K;
37659- break;
37660- case I40E_LOW_LATENCY:
37661- new_itr = I40E_ITR_20K;
37662- break;
37663- case I40E_BULK_LATENCY:
37664- new_itr = I40E_ITR_18K;
37665- break;
37666- default:
37667- break;
37668+ /* Resultant value is 256 times larger than it needs to be. This
37669+ * gives us room to adjust the value as needed to either increase
37670+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
37671+ *
37672+ * Use addition as we have already recorded the new latency flag
37673+ * for the ITR value.
37674+ */
37675+ itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
37676+ I40E_ITR_ADAPTIVE_MIN_INC;
37677+
37678+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
37679+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
37680+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
37681 }
37682
37683+clear_counts:
37684+ /* write back value */
37685+ rc->target_itr = itr;
37686+
37687+ /* next update should occur within next jiffy */
37688+ rc->next_update = next_update + 1;
37689+
37690 rc->total_bytes = 0;
37691 rc->total_packets = 0;
37692- rc->last_itr_update = jiffies;
37693-
37694- if (new_itr != rc->itr) {
37695- rc->itr = new_itr;
37696- return true;
37697- }
37698- return false;
37699 }
37700
37701+#ifndef CONFIG_I40E_DISABLE_PACKET_SPLIT
37702 /**
37703 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
37704 * @rx_ring: rx descriptor ring to store buffers on
37705@@ -1063,6 +1250,33 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
37706 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
37707 }
37708
37709+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37710+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
37711+/**
37712+ * i40e_reuse_rx_skb - Recycle unused skb and store it back on the ring
37713+ * @rx_ring: rx descriptor ring to store buffers on
37714+ * @old_buff: donor buffer to have skb reused
37715+ *
37716+ * Synchronizes skb for reuse by the adapter
37717+ **/
37718+static void i40e_reuse_rx_skb(struct i40e_ring *rx_ring,
37719+ struct i40e_rx_buffer *old_buff)
37720+{
37721+ struct i40e_rx_buffer *new_buff;
37722+ u16 nta = rx_ring->next_to_alloc;
37723+
37724+ new_buff = &rx_ring->rx_bi[nta];
37725+
37726+ /* update, and store next to alloc */
37727+ nta++;
37728+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
37729+
37730+ /* transfer page from old buffer to new buffer */
37731+ new_buff->dma = old_buff->dma;
37732+ new_buff->skb = old_buff->skb;
37733+}
37734+
37735+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37736 /**
37737 * i40e_rx_is_programming_status - check for programming status descriptor
37738 * @qw: qword representing status_error_len in CPU ordering
37739@@ -1108,12 +1322,20 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
37740
37741 prefetch(I40E_RX_DESC(rx_ring, ntc));
37742
37743+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
37744+ /* place unused page back on the ring */
37745+ i40e_reuse_rx_skb(rx_ring, rx_buffer);
37746+
37747+ /* clear contents of buffer_info */
37748+ rx_buffer->skb = NULL;
37749+#else
37750 /* place unused page back on the ring */
37751 i40e_reuse_rx_page(rx_ring, rx_buffer);
37752 rx_ring->rx_stats.page_reuse_count++;
37753
37754 /* clear contents of buffer_info */
37755 rx_buffer->page = NULL;
37756+#endif
37757
37758 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
37759 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
37760@@ -1143,8 +1365,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
37761 if (!tx_ring->tx_bi)
37762 goto err;
37763
37764- u64_stats_init(&tx_ring->syncp);
37765-
37766 /* round up to nearest 4K */
37767 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
37768 /* add u32 for head writeback, align after this takes care of
37769@@ -1162,6 +1382,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
37770
37771 tx_ring->next_to_use = 0;
37772 tx_ring->next_to_clean = 0;
37773+ tx_ring->tx_stats.prev_pkt_ctr = -1;
37774 return 0;
37775
37776 err:
37777@@ -1192,6 +1413,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
37778 for (i = 0; i < rx_ring->count; i++) {
37779 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
37780
37781+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
37782+ if (!rx_bi->skb)
37783+ continue;
37784+
37785+ dma_unmap_single(rx_ring->dev, rx_bi->dma,
37786+ rx_ring->rx_buf_len, DMA_FROM_DEVICE);
37787+ dev_kfree_skb(rx_bi->skb);
37788+ rx_bi->skb = NULL;
37789+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37790 if (!rx_bi->page)
37791 continue;
37792
37793@@ -1214,6 +1444,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
37794
37795 rx_bi->page = NULL;
37796 rx_bi->page_offset = 0;
37797+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37798 }
37799
37800 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
37801@@ -1236,6 +1467,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
37802 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
37803 {
37804 i40e_clean_rx_ring(rx_ring);
37805+#ifdef HAVE_XDP_BUFF_RXQ
37806+ if (rx_ring->vsi->type == I40E_VSI_MAIN)
37807+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
37808+#endif
37809 rx_ring->xdp_prog = NULL;
37810 kfree(rx_ring->rx_bi);
37811 rx_ring->rx_bi = NULL;
37812@@ -1256,6 +1491,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
37813 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
37814 {
37815 struct device *dev = rx_ring->dev;
37816+ int err = -ENOMEM;
37817 int bi_size;
37818
37819 /* warn if we are about to overwrite the pointer */
37820@@ -1264,11 +1500,13 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
37821 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
37822 if (!rx_ring->rx_bi)
37823 goto err;
37824+#ifdef HAVE_NDO_GET_STATS64
37825
37826 u64_stats_init(&rx_ring->syncp);
37827+#endif /* HAVE_NDO_GET_STATS64 */
37828
37829 /* Round up to nearest 4K */
37830- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
37831+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
37832 rx_ring->size = ALIGN(rx_ring->size, 4096);
37833 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
37834 &rx_ring->dma, GFP_KERNEL);
37835@@ -1282,14 +1520,22 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
37836 rx_ring->next_to_alloc = 0;
37837 rx_ring->next_to_clean = 0;
37838 rx_ring->next_to_use = 0;
37839+#ifdef HAVE_XDP_BUFF_RXQ
37840+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
37841+ if (rx_ring->vsi->type == I40E_VSI_MAIN) {
37842+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
37843+ rx_ring->queue_index);
37844+ if (err < 0)
37845+ goto err;
37846+ }
37847
37848 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
37849-
37850+#endif
37851 return 0;
37852 err:
37853 kfree(rx_ring->rx_bi);
37854 rx_ring->rx_bi = NULL;
37855- return -ENOMEM;
37856+ return err;
37857 }
37858
37859 /**
37860@@ -1313,6 +1559,46 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
37861 writel(val, rx_ring->tail);
37862 }
37863
37864+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
37865+static bool i40e_alloc_mapped_skb(struct i40e_ring *rx_ring,
37866+ struct i40e_rx_buffer *bi)
37867+{
37868+ struct sk_buff *skb = bi->skb;
37869+ dma_addr_t dma;
37870+
37871+ if (unlikely(skb))
37872+ return true;
37873+
37874+ if (likely(!skb)) {
37875+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
37876+ rx_ring->rx_buf_len,
37877+ GFP_ATOMIC | __GFP_NOWARN);
37878+ if (unlikely(!skb)) {
37879+ rx_ring->rx_stats.alloc_buff_failed++;
37880+ return false;
37881+ }
37882+ }
37883+
37884+ dma = dma_map_single(rx_ring->dev, skb->data,
37885+ rx_ring->rx_buf_len, DMA_FROM_DEVICE);
37886+
37887+ /*
37888+ * if mapping failed free memory back to system since
37889+ * there isn't much point in holding memory we can't use
37890+ */
37891+ if (dma_mapping_error(rx_ring->dev, dma)) {
37892+ dev_kfree_skb_any(skb);
37893+ rx_ring->rx_stats.alloc_buff_failed++;
37894+ return false;
37895+ }
37896+
37897+ bi->skb = skb;
37898+ bi->dma = dma;
37899+
37900+ return true;
37901+}
37902+
37903+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37904 /**
37905 * i40e_rx_offset - Return expected offset into page to access data
37906 * @rx_ring: Ring we are requesting offset of
37907@@ -1376,6 +1662,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
37908 return true;
37909 }
37910
37911+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37912 /**
37913 * i40e_receive_skb - Send a completed packet up the stack
37914 * @rx_ring: rx ring in play
37915@@ -1386,12 +1673,32 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
37916 struct sk_buff *skb, u16 vlan_tag)
37917 {
37918 struct i40e_q_vector *q_vector = rx_ring->q_vector;
37919+#ifdef HAVE_VLAN_RX_REGISTER
37920+ struct i40e_vsi *vsi = rx_ring->vsi;
37921+#endif
37922
37923+#ifdef HAVE_VLAN_RX_REGISTER
37924+ if (vlan_tag & VLAN_VID_MASK) {
37925+ if (!vsi->vlgrp)
37926+ dev_kfree_skb_any(skb);
37927+ else
37928+ vlan_gro_receive(&q_vector->napi, vsi->vlgrp,
37929+ vlan_tag, skb);
37930+ } else {
37931+ napi_gro_receive(&q_vector->napi, skb);
37932+ }
37933+#else /* HAVE_VLAN_RX_REGISTER */
37934+#ifdef NETIF_F_HW_VLAN_CTAG_RX
37935 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
37936 (vlan_tag & VLAN_VID_MASK))
37937+#else
37938+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_RX) &&
37939+ (vlan_tag & VLAN_VID_MASK))
37940+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
37941 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
37942
37943 napi_gro_receive(&q_vector->napi, skb);
37944+#endif /* HAVE_VLAN_RX_REGISTER */
37945 }
37946
37947 /**
37948@@ -1415,6 +1722,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
37949 bi = &rx_ring->rx_bi[ntu];
37950
37951 do {
37952+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
37953+ if (!i40e_alloc_mapped_skb(rx_ring, bi))
37954+ goto no_buffers;
37955+
37956+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
37957+#else
37958 if (!i40e_alloc_mapped_page(rx_ring, bi))
37959 goto no_buffers;
37960
37961@@ -1428,6 +1741,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
37962 * because each write-back erases this info.
37963 */
37964 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
37965+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
37966
37967 rx_desc++;
37968 bi++;
37969@@ -1459,6 +1773,58 @@ no_buffers:
37970 return true;
37971 }
37972
37973+#define I40E_XDP_PASS 0
37974+#define I40E_XDP_CONSUMED BIT(0)
37975+#define I40E_XDP_TX BIT(1)
37976+#define I40E_XDP_REDIR BIT(2)
37977+
37978+static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
37979+{
37980+ /* Force memory writes to complete before letting h/w
37981+ * know there are new descriptors to fetch.
37982+ */
37983+ wmb();
37984+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
37985+}
37986+
37987+#ifdef I40E_ADD_PROBES
37988+static void i40e_rx_extra_counters(struct i40e_vsi *vsi, u32 rx_error,
37989+ const struct i40e_rx_ptype_decoded decoded)
37990+{
37991+ bool ipv4;
37992+
37993+ ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
37994+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
37995+
37996+ if (ipv4 &&
37997+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
37998+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
37999+ vsi->back->rx_ip4_cso_err++;
38000+
38001+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) {
38002+ if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
38003+ vsi->back->rx_tcp_cso_err++;
38004+ else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
38005+ vsi->back->rx_udp_cso_err++;
38006+ else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
38007+ vsi->back->rx_sctp_cso_err++;
38008+ }
38009+
38010+ if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
38011+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4))
38012+ vsi->back->rx_ip4_cso++;
38013+ if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
38014+ vsi->back->rx_tcp_cso++;
38015+ else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
38016+ vsi->back->rx_udp_cso++;
38017+ else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
38018+ vsi->back->rx_sctp_cso++;
38019+}
38020+
38021+#endif /* I40E_ADD_PROBES */
38022+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
38023+#define I40E_TUNNEL_SUPPORT
38024+#endif
38025 /**
38026 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
38027 * @vsi: the VSI we care about
38028@@ -1488,8 +1854,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
38029 skb_checksum_none_assert(skb);
38030
38031 /* Rx csum enabled and ip headers found? */
38032+#ifdef HAVE_NDO_SET_FEATURES
38033 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
38034 return;
38035+#else
38036+ if (!(vsi->back->flags & I40E_FLAG_RX_CSUM_ENABLED))
38037+ return;
38038+#endif
38039
38040 /* did the hardware decode the packet and checksum? */
38041 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
38042@@ -1498,12 +1869,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
38043 /* both known and outer_ip must be set for the below code to work */
38044 if (!(decoded.known && decoded.outer_ip))
38045 return;
38046+#ifdef I40E_ADD_PROBES
38047+ vsi->back->hw_csum_rx_outer++;
38048+#endif
38049
38050 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
38051 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
38052 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
38053 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
38054
38055+#ifdef I40E_ADD_PROBES
38056+ i40e_rx_extra_counters(vsi, rx_error, decoded);
38057+
38058+#endif /* I40E_ADD_PROBES */
38059 if (ipv4 &&
38060 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
38061 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
38062@@ -1526,12 +1904,18 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
38063 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
38064 return;
38065
38066+#ifdef I40E_TUNNEL_SUPPORT
38067 /* If there is an outer header present that might contain a checksum
38068 * we need to bump the checksum level by 1 to reflect the fact that
38069 * we are indicating we validated the inner checksum.
38070 */
38071 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
38072+#ifdef HAVE_SKBUFF_CSUM_LEVEL
38073 skb->csum_level = 1;
38074+#else
38075+ skb->encapsulation = 1;
38076+#endif
38077+#endif /* I40E_TUNNEL_SUPPORT */
38078
38079 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
38080 switch (decoded.inner_prot) {
38081@@ -1539,11 +1923,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
38082 case I40E_RX_PTYPE_INNER_PROT_UDP:
38083 case I40E_RX_PTYPE_INNER_PROT_SCTP:
38084 skb->ip_summed = CHECKSUM_UNNECESSARY;
38085- /* fall though */
38086+ /* fall through */
38087 default:
38088 break;
38089 }
38090-
38091 return;
38092
38093 checksum_fail:
38094@@ -1556,7 +1939,7 @@ checksum_fail:
38095 *
38096 * Returns a hash type to be used by skb_set_hash
38097 **/
38098-static inline int i40e_ptype_to_htype(u8 ptype)
38099+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
38100 {
38101 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
38102
38103@@ -1577,12 +1960,15 @@ static inline int i40e_ptype_to_htype(u8 ptype)
38104 * i40e_rx_hash - set the hash value in the skb
38105 * @ring: descriptor ring
38106 * @rx_desc: specific descriptor
38107+ * @skb: skb currently being received and modified
38108+ * @rx_ptype: Rx packet type
38109 **/
38110 static inline void i40e_rx_hash(struct i40e_ring *ring,
38111 union i40e_rx_desc *rx_desc,
38112 struct sk_buff *skb,
38113 u8 rx_ptype)
38114 {
38115+#ifdef NETIF_F_RXHASH
38116 u32 hash;
38117 const __le64 rss_mask =
38118 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
38119@@ -1595,6 +1981,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
38120 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
38121 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
38122 }
38123+#endif /* NETIF_F_RXHASH */
38124 }
38125
38126 /**
38127@@ -1613,6 +2000,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
38128 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
38129 u8 rx_ptype)
38130 {
38131+#ifdef HAVE_PTP_1588_CLOCK
38132 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
38133 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
38134 I40E_RXD_QW1_STATUS_SHIFT;
38135@@ -1622,6 +2010,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
38136
38137 if (unlikely(tsynvalid))
38138 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
38139+#endif /* HAVE_PTP_1588_CLOCK */
38140
38141 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
38142
38143@@ -1649,7 +2038,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
38144 **/
38145 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
38146 union i40e_rx_desc *rx_desc)
38147-
38148 {
38149 /* XDP packets use error pointer so abort at this point */
38150 if (IS_ERR(skb))
38151@@ -1673,6 +2061,51 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
38152 return false;
38153 }
38154
38155+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
38156+/**
38157+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
38158+ * @rx_ring: rx descriptor ring to transact packets on
38159+ * @size: size of buffer to add to skb
38160+ *
38161+ * This function will pull an Rx buffer from the ring and synchronize it
38162+ * for use by the CPU.
38163+ *
38164+ * ONE-BUFF version
38165+ */
38166+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
38167+ const unsigned int size)
38168+{
38169+ struct i40e_rx_buffer *rx_buffer;
38170+
38171+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
38172+
38173+ /* we are reusing so sync this buffer for CPU use */
38174+ dma_unmap_single(rx_ring->dev, rx_buffer->dma,
38175+ rx_ring->rx_buf_len, DMA_FROM_DEVICE);
38176+
38177+ prefetch(rx_buffer->skb->data);
38178+
38179+ return rx_buffer;
38180+}
38181+
38182+/**
38183+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
38184+ * @rx_ring: rx descriptor ring to transact packets on
38185+ * @rx_buffer: rx buffer to pull data from
38186+ *
38187+ * This function will clean up the contents of the rx_buffer. It will
38188+ * either recycle the bufer or unmap it and free the associated resources.
38189+ *
38190+ * ONE-BUFF version
38191+ */
38192+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
38193+ struct i40e_rx_buffer *rx_buffer)
38194+{
38195+ /* clear contents of buffer_info */
38196+ rx_buffer->skb = NULL;
38197+}
38198+
38199+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
38200 /**
38201 * i40e_page_is_reusable - check if any reuse is possible
38202 * @page: page struct to check
38203@@ -1737,10 +2170,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
38204 * the pagecnt_bias and page count so that we fully restock the
38205 * number of references the driver holds.
38206 */
38207+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
38208 if (unlikely(!pagecnt_bias)) {
38209 page_ref_add(page, USHRT_MAX);
38210 rx_buffer->pagecnt_bias = USHRT_MAX;
38211 }
38212+#else
38213+ if (likely(!pagecnt_bias)) {
38214+ get_page(page);
38215+ rx_buffer->pagecnt_bias = 1;
38216+ }
38217+#endif
38218
38219 return true;
38220 }
38221@@ -1765,7 +2205,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
38222 #if (PAGE_SIZE < 8192)
38223 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
38224 #else
38225- unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
38226+ unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
38227 #endif
38228
38229 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
38230@@ -1822,11 +2262,13 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
38231 struct i40e_rx_buffer *rx_buffer,
38232 struct xdp_buff *xdp)
38233 {
38234- unsigned int size = xdp->data_end - xdp->data;
38235+ unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data;
38236+
38237 #if (PAGE_SIZE < 8192)
38238 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
38239 #else
38240- unsigned int truesize = SKB_DATA_ALIGN(size);
38241+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
38242+ SKB_DATA_ALIGN(I40E_SKB_PAD + size);
38243 #endif
38244 unsigned int headlen;
38245 struct sk_buff *skb;
38246@@ -1834,7 +2276,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
38247 /* prefetch first cache line of first page */
38248 prefetch(xdp->data);
38249 #if L1_CACHE_BYTES < 128
38250- prefetch(xdp->data + L1_CACHE_BYTES);
38251+ prefetch((void *)((u8 *)xdp->data + L1_CACHE_BYTES));
38252 #endif
38253
38254 /* allocate a skb to store the frags */
38255@@ -1847,7 +2289,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
38256 /* Determine available headroom for copy */
38257 headlen = size;
38258 if (headlen > I40E_RX_HDR_SIZE)
38259- headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
38260+ headlen = eth_get_headlen(skb->dev, xdp->data,
38261+ I40E_RX_HDR_SIZE);
38262
38263 /* align pull length to size of long to optimize memcpy performance */
38264 memcpy(__skb_put(skb, headlen), xdp->data,
38265@@ -1874,10 +2317,11 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
38266 return skb;
38267 }
38268
38269+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
38270 /**
38271 * i40e_build_skb - Build skb around an existing buffer
38272- * @rx_ring: Rx descriptor ring to transact packets on
38273- * @rx_buffer: Rx buffer to pull data from
38274+ * @rx_ring: rx descriptor ring to transact packets on
38275+ * @rx_buffer: rx buffer to pull data from
38276 * @xdp: xdp_buff pointing to the data
38277 *
38278 * This function builds an skb around an existing Rx buffer, taking care
38279@@ -1887,12 +2331,14 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
38280 struct i40e_rx_buffer *rx_buffer,
38281 struct xdp_buff *xdp)
38282 {
38283- unsigned int size = xdp->data_end - xdp->data;
38284+ unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data;
38285+
38286 #if (PAGE_SIZE < 8192)
38287 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
38288 #else
38289 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
38290- SKB_DATA_ALIGN(I40E_SKB_PAD + size);
38291+ SKB_DATA_ALIGN(xdp->data_end -
38292+ xdp->data_hard_start);
38293 #endif
38294 struct sk_buff *skb;
38295
38296@@ -1907,7 +2353,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
38297 return NULL;
38298
38299 /* update pointers within the skb to store the data */
38300- skb_reserve(skb, I40E_SKB_PAD);
38301+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
38302 __skb_put(skb, size);
38303
38304 /* buffer is used by skb, update page_offset */
38305@@ -1920,13 +2366,14 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
38306 return skb;
38307 }
38308
38309+#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */
38310 /**
38311 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
38312 * @rx_ring: rx descriptor ring to transact packets on
38313 * @rx_buffer: rx buffer to pull data from
38314 *
38315 * This function will clean up the contents of the rx_buffer. It will
38316- * either recycle the bufer or unmap it and free the associated resources.
38317+ * either recycle the buffer or unmap it and free the associated resources.
38318 */
38319 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
38320 struct i40e_rx_buffer *rx_buffer)
38321@@ -1948,6 +2395,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
38322 rx_buffer->page = NULL;
38323 }
38324
38325+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
38326 /**
38327 * i40e_is_non_eop - process handling of non-EOP buffers
38328 * @rx_ring: Rx ring being processed
38329@@ -1981,12 +2429,26 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
38330 return true;
38331 }
38332
38333-#define I40E_XDP_PASS 0
38334-#define I40E_XDP_CONSUMED 1
38335-#define I40E_XDP_TX 2
38336+#ifdef HAVE_XDP_SUPPORT
38337+#ifdef HAVE_XDP_FRAME_STRUCT
38338+static int i40e_xmit_xdp_ring(struct xdp_frame *xdp,
38339+ struct i40e_ring *xdp_ring);
38340
38341+static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
38342+ struct i40e_ring *xdp_ring)
38343+{
38344+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
38345+
38346+ if (unlikely(!xdpf))
38347+ return I40E_XDP_CONSUMED;
38348+
38349+ return i40e_xmit_xdp_ring(xdpf, xdp_ring);
38350+}
38351+#else
38352 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
38353 struct i40e_ring *xdp_ring);
38354+#endif
38355+#endif
38356
38357 /**
38358 * i40e_run_xdp - run an XDP program
38359@@ -1997,9 +2459,11 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
38360 struct xdp_buff *xdp)
38361 {
38362 int result = I40E_XDP_PASS;
38363+#ifdef HAVE_XDP_SUPPORT
38364 struct i40e_ring *xdp_ring;
38365 struct bpf_prog *xdp_prog;
38366 u32 act;
38367+ int err;
38368
38369 rcu_read_lock();
38370 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
38371@@ -2007,26 +2471,46 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
38372 if (!xdp_prog)
38373 goto xdp_out;
38374
38375+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
38376+
38377 act = bpf_prog_run_xdp(xdp_prog, xdp);
38378 switch (act) {
38379 case XDP_PASS:
38380+ rx_ring->xdp_stats.xdp_pass++;
38381 break;
38382 case XDP_TX:
38383 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
38384+#ifdef HAVE_XDP_FRAME_STRUCT
38385+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
38386+#else
38387 result = i40e_xmit_xdp_ring(xdp, xdp_ring);
38388+#endif
38389+ rx_ring->xdp_stats.xdp_tx++;
38390+ break;
38391+ case XDP_REDIRECT:
38392+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
38393+ result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
38394+ if (!err)
38395+ rx_ring->xdp_stats.xdp_redirect++;
38396+ else
38397+ rx_ring->xdp_stats.xdp_redirect_fail++;
38398 break;
38399 default:
38400 bpf_warn_invalid_xdp_action(act);
38401+ /* fallthrough -- abort and drop */
38402 case XDP_ABORTED:
38403 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
38404+ rx_ring->xdp_stats.xdp_unknown++;
38405 /* fallthrough -- handle aborts by dropping packet */
38406 case XDP_DROP:
38407 result = I40E_XDP_CONSUMED;
38408+ rx_ring->xdp_stats.xdp_drop++;
38409 break;
38410 }
38411 xdp_out:
38412 rcu_read_unlock();
38413- return ERR_PTR(-result);
38414+#endif /* HAVE_XDP_SUPPORT */
38415+ return (struct sk_buff *)ERR_PTR(-result);
38416 }
38417
38418 /**
38419@@ -2067,12 +2551,17 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38420 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
38421 struct sk_buff *skb = rx_ring->skb;
38422 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
38423- bool failure = false, xdp_xmit = false;
38424+ unsigned int xdp_xmit = 0;
38425+ bool failure = false;
38426+ struct xdp_buff xdp;
38427+
38428+#ifdef HAVE_XDP_BUFF_RXQ
38429+ xdp.rxq = &rx_ring->xdp_rxq;
38430+#endif
38431
38432 while (likely(total_rx_packets < (unsigned int)budget)) {
38433 struct i40e_rx_buffer *rx_buffer;
38434 union i40e_rx_desc *rx_desc;
38435- struct xdp_buff xdp;
38436 unsigned int size;
38437 u16 vlan_tag;
38438 u8 rx_ptype;
38439@@ -2114,19 +2603,27 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38440 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
38441
38442 /* retrieve a buffer from the ring */
38443+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
38444+ /* we are leaking memory if an skb is already present */
38445+ WARN_ON(skb);
38446+ skb = rx_buffer->skb;
38447+ __skb_put(skb, size);
38448+#else
38449 if (!skb) {
38450 xdp.data = page_address(rx_buffer->page) +
38451 rx_buffer->page_offset;
38452- xdp.data_hard_start = xdp.data -
38453- i40e_rx_offset(rx_ring);
38454- xdp.data_end = xdp.data + size;
38455+ xdp.data_hard_start = (void *)((u8 *)xdp.data -
38456+ i40e_rx_offset(rx_ring));
38457+ xdp.data_end = (void *)((u8 *)xdp.data + size);
38458
38459 skb = i40e_run_xdp(rx_ring, &xdp);
38460 }
38461
38462 if (IS_ERR(skb)) {
38463- if (PTR_ERR(skb) == -I40E_XDP_TX) {
38464- xdp_xmit = true;
38465+ unsigned int xdp_res = -PTR_ERR(skb);
38466+
38467+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
38468+ xdp_xmit |= xdp_res;
38469 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
38470 } else {
38471 rx_buffer->pagecnt_bias++;
38472@@ -2135,8 +2632,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38473 total_rx_packets++;
38474 } else if (skb) {
38475 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
38476+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
38477 } else if (ring_uses_build_skb(rx_ring)) {
38478 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
38479+#endif
38480 } else {
38481 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
38482 }
38483@@ -2147,6 +2646,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38484 rx_buffer->pagecnt_bias++;
38485 break;
38486 }
38487+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
38488
38489 i40e_put_rx_buffer(rx_ring, rx_buffer);
38490 cleaned_count++;
38491@@ -2180,19 +2680,15 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38492 total_rx_packets++;
38493 }
38494
38495- if (xdp_xmit) {
38496- struct i40e_ring *xdp_ring;
38497+ if (xdp_xmit & I40E_XDP_REDIR)
38498+ xdp_do_flush_map();
38499
38500- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
38501+ if (xdp_xmit & I40E_XDP_TX) {
38502+ struct i40e_ring *xdp_ring =
38503+ rx_ring->vsi->xdp_rings[rx_ring->queue_index];
38504
38505- /* Force memory writes to complete before letting h/w
38506- * know there are new descriptors to fetch.
38507- */
38508- wmb();
38509-
38510- writel(xdp_ring->next_to_use, xdp_ring->tail);
38511+ i40e_xdp_ring_update_tail(xdp_ring);
38512 }
38513-
38514 rx_ring->skb = skb;
38515
38516 u64_stats_update_begin(&rx_ring->syncp);
38517@@ -2206,31 +2702,45 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38518 return failure ? budget : (int)total_rx_packets;
38519 }
38520
38521-static u32 i40e_buildreg_itr(const int type, const u16 itr)
38522+static inline u32 i40e_buildreg_itr(const int type, u16 itr)
38523 {
38524 u32 val;
38525
38526+ /* We don't bother with setting the CLEARPBA bit as the data sheet
38527+ * points out doing so is "meaningless since it was already
38528+ * auto-cleared". The auto-clearing happens when the interrupt is
38529+ * asserted.
38530+ *
38531+ * Hardware errata 28 for also indicates that writing to a
38532+ * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
38533+ * an event in the PBA anyway so we need to rely on the automask
38534+ * to hold pending events for us until the interrupt is re-enabled
38535+ *
38536+ * The itr value is reported in microseconds, and the register
38537+ * value is recorded in 2 microsecond units. For this reason we
38538+ * only need to shift by the interval shift - 1 instead of the
38539+ * full value.
38540+ */
38541+ itr &= I40E_ITR_MASK;
38542+
38543 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
38544- /* Don't clear PBA because that can cause lost interrupts that
38545- * came in while we were cleaning/polling
38546- */
38547 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
38548- (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
38549+ (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
38550
38551 return val;
38552 }
38553
38554 /* a small macro to shorten up some long lines */
38555 #define INTREG I40E_PFINT_DYN_CTLN
38556-static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
38557-{
38558- return vsi->rx_rings[idx]->rx_itr_setting;
38559-}
38560
38561-static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
38562-{
38563- return vsi->tx_rings[idx]->tx_itr_setting;
38564-}
38565+/* The act of updating the ITR will cause it to immediately trigger. In order
38566+ * to prevent this from throwing off adaptive update statistics we defer the
38567+ * update so that it can only happen so often. So after either Tx or Rx are
38568+ * updated we make the adaptive scheme wait until either the ITR completely
38569+ * expires via the next_update expiration or we have been through at least
38570+ * 3 interrupts.
38571+ */
38572+#define ITR_COUNTDOWN_START 3
38573
38574 /**
38575 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
38576@@ -2242,79 +2752,57 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
38577 struct i40e_q_vector *q_vector)
38578 {
38579 struct i40e_hw *hw = &vsi->back->hw;
38580- bool rx = false, tx = false;
38581- u32 rxval, txval;
38582- int vector;
38583- int idx = q_vector->v_idx;
38584- int rx_itr_setting, tx_itr_setting;
38585+ u32 intval;
38586
38587 /* If we don't have MSIX, then we only need to re-enable icr0 */
38588 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
38589- i40e_irq_dynamic_enable_icr0(vsi->back, false);
38590+ i40e_irq_dynamic_enable_icr0(vsi->back);
38591 return;
38592 }
38593
38594- vector = (q_vector->v_idx + vsi->base_vector);
38595-
38596- /* avoid dynamic calculation if in countdown mode OR if
38597- * all dynamic is disabled
38598- */
38599- rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
38600-
38601- rx_itr_setting = get_rx_itr(vsi, idx);
38602- tx_itr_setting = get_tx_itr(vsi, idx);
38603-
38604- if (q_vector->itr_countdown > 0 ||
38605- (!ITR_IS_DYNAMIC(rx_itr_setting) &&
38606- !ITR_IS_DYNAMIC(tx_itr_setting))) {
38607- goto enable_int;
38608- }
38609-
38610- if (ITR_IS_DYNAMIC(rx_itr_setting)) {
38611- rx = i40e_set_new_dynamic_itr(&q_vector->rx);
38612- rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
38613- }
38614-
38615- if (ITR_IS_DYNAMIC(tx_itr_setting)) {
38616- tx = i40e_set_new_dynamic_itr(&q_vector->tx);
38617- txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
38618- }
38619-
38620- if (rx || tx) {
38621- /* get the higher of the two ITR adjustments and
38622- * use the same value for both ITR registers
38623- * when in adaptive mode (Rx and/or Tx)
38624- */
38625- u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
38626+ /* These will do nothing if dynamic updates are not enabled */
38627+ i40e_update_itr(q_vector, &q_vector->tx);
38628+ i40e_update_itr(q_vector, &q_vector->rx);
38629
38630- q_vector->tx.itr = q_vector->rx.itr = itr;
38631- txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
38632- tx = true;
38633- rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
38634- rx = true;
38635- }
38636-
38637- /* only need to enable the interrupt once, but need
38638- * to possibly update both ITR values
38639+ /* This block of logic allows us to get away with only updating
38640+ * one ITR value with each interrupt. The idea is to perform a
38641+ * pseudo-lazy update with the following criteria.
38642+ *
38643+ * 1. Rx is given higher priority than Tx if both are in same state
38644+ * 2. If we must reduce an ITR that is given highest priority.
38645+ * 3. We then give priority to increasing ITR based on amount.
38646 */
38647- if (rx) {
38648- /* set the INTENA_MSK_MASK so that this first write
38649- * won't actually enable the interrupt, instead just
38650- * updating the ITR (it's bit 31 PF and VF)
38651+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
38652+ /* Rx ITR needs to be reduced, this is highest priority */
38653+ intval = i40e_buildreg_itr(I40E_RX_ITR,
38654+ q_vector->rx.target_itr);
38655+ q_vector->rx.current_itr = q_vector->rx.target_itr;
38656+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
38657+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
38658+ ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
38659+ (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
38660+ /* Tx ITR needs to be reduced, this is second priority
38661+ * Tx ITR needs to be increased more than Rx, fourth priority
38662 */
38663- rxval |= BIT(31);
38664- /* don't check _DOWN because interrupt isn't being enabled */
38665- wr32(hw, INTREG(vector - 1), rxval);
38666+ intval = i40e_buildreg_itr(I40E_TX_ITR,
38667+ q_vector->tx.target_itr);
38668+ q_vector->tx.current_itr = q_vector->tx.target_itr;
38669+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
38670+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
38671+ /* Rx ITR needs to be increased, third priority */
38672+ intval = i40e_buildreg_itr(I40E_RX_ITR,
38673+ q_vector->rx.target_itr);
38674+ q_vector->rx.current_itr = q_vector->rx.target_itr;
38675+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
38676+ } else {
38677+ /* No ITR update, lowest priority */
38678+ intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
38679+ if (q_vector->itr_countdown)
38680+ q_vector->itr_countdown--;
38681 }
38682
38683-enable_int:
38684 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
38685- wr32(hw, INTREG(vector - 1), txval);
38686-
38687- if (q_vector->itr_countdown)
38688- q_vector->itr_countdown--;
38689- else
38690- q_vector->itr_countdown = ITR_COUNTDOWN_START;
38691+ wr32(hw, INTREG(q_vector->reg_idx), intval);
38692 }
38693
38694 /**
38695@@ -2332,6 +2820,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
38696 container_of(napi, struct i40e_q_vector, napi);
38697 struct i40e_vsi *vsi = q_vector->vsi;
38698 struct i40e_ring *ring;
38699+ u64 flags = vsi->back->flags;
38700 bool clean_complete = true;
38701 bool arm_wb = false;
38702 int budget_per_ring;
38703@@ -2372,8 +2861,15 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
38704 clean_complete = false;
38705 }
38706
38707+#ifndef HAVE_NETDEV_NAPI_LIST
38708+ /* if netdev is disabled we need to stop polling */
38709+ if (!netif_running(vsi->netdev))
38710+ clean_complete = true;
38711+
38712+#endif
38713 /* If work not completed, return budget and polling will return */
38714 if (!clean_complete) {
38715+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
38716 int cpu_id = smp_processor_id();
38717
38718 /* It is possible that the interrupt affinity has changed but,
38719@@ -2393,6 +2889,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
38720 /* Return budget-1 so that polling stops */
38721 return budget - 1;
38722 }
38723+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
38724 tx_only:
38725 if (arm_wb) {
38726 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
38727@@ -2401,7 +2898,7 @@ tx_only:
38728 return budget;
38729 }
38730
38731- if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
38732+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR)
38733 q_vector->arm_wb_state = false;
38734
38735 /* Work is done so exit the polling mode and re-enable the interrupt */
38736@@ -2438,7 +2935,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
38737 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
38738 return;
38739
38740- if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
38741+ if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
38742 return;
38743
38744 /* if sampling is disabled do nothing */
38745@@ -2450,8 +2947,12 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
38746 return;
38747
38748 /* snag network header to get L4 type and address */
38749- hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
38750+#ifdef HAVE_SKB_INNER_NETWORK_HEADER
38751+ hdr.network = (tx_flags & I40E_TX_FLAGS_TUNNEL) ?
38752 skb_inner_network_header(skb) : skb_network_header(skb);
38753+#else
38754+ hdr.network = skb_network_header(skb);
38755+#endif /* HAVE_SKB_INNER_NETWORK_HEADER */
38756
38757 /* Note: tx_flags gets modified to reflect inner protocols in
38758 * tx_enable_csum function if encap is enabled.
38759@@ -2466,8 +2967,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
38760 unsigned int h_offset = inner_hlen;
38761
38762 /* this function updates h_offset to the end of the header */
38763- l4_proto =
38764- ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
38765+ l4_proto = ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
38766 /* hlen will contain our best estimate of the tcp header */
38767 hlen = h_offset - inner_hlen;
38768 }
38769@@ -2478,8 +2978,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
38770 th = (struct tcphdr *)(hdr.network + hlen);
38771
38772 /* Due to lack of space, no more new filters can be programmed */
38773- if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
38774+ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
38775 return;
38776+
38777 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
38778 /* HW ATR eviction will take care of removing filters on FIN
38779 * and RST packets.
38780@@ -2531,7 +3032,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
38781 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
38782
38783 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
38784- if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
38785+ if (!(tx_flags & I40E_TX_FLAGS_TUNNEL))
38786 dtype_cmd |=
38787 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
38788 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
38789@@ -2570,8 +3071,13 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
38790 __be16 protocol = skb->protocol;
38791 u32 tx_flags = 0;
38792
38793+#ifdef NETIF_F_HW_VLAN_CTAG_RX
38794 if (protocol == htons(ETH_P_8021Q) &&
38795 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
38796+#else
38797+ if (protocol == htons(ETH_P_8021Q) &&
38798+ !(tx_ring->netdev->features & NETIF_F_HW_VLAN_TX)) {
38799+#endif
38800 /* When HW VLAN acceleration is turned off by the user the
38801 * stack sets the protocol to 8021q so that the driver
38802 * can take any steps required to support the SW only
38803@@ -2629,6 +3135,14 @@ out:
38804 return 0;
38805 }
38806
38807+#ifndef HAVE_ENCAP_TSO_OFFLOAD
38808+#define inner_ip_hdr(skb) 0
38809+#define inner_tcp_hdr(skb) 0
38810+#define inner_ipv6_hdr(skb) 0
38811+#define inner_tcp_hdrlen(skb) 0
38812+#define inner_tcp_hdrlen(skb) 0
38813+#define skb_inner_transport_header(skb) ((skb)->data)
38814+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
38815 /**
38816 * i40e_tso - set up the tso context descriptor
38817 * @first: pointer to first Tx buffer for xmit
38818@@ -2677,14 +3191,32 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
38819 ip.v6->payload_len = 0;
38820 }
38821
38822+#ifdef HAVE_ENCAP_TSO_OFFLOAD
38823 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
38824+#ifdef NETIF_F_GSO_PARTIAL
38825 SKB_GSO_GRE_CSUM |
38826+#endif
38827+#ifdef NETIF_F_GSO_IPXIP4
38828 SKB_GSO_IPXIP4 |
38829+#ifdef NETIF_F_GSO_IPXIP6
38830 SKB_GSO_IPXIP6 |
38831+#endif
38832+#else
38833+#ifdef NETIF_F_GSO_IPIP
38834+ SKB_GSO_IPIP |
38835+#endif
38836+#ifdef NETIF_F_GSO_SIT
38837+ SKB_GSO_SIT |
38838+#endif
38839+#endif
38840 SKB_GSO_UDP_TUNNEL |
38841 SKB_GSO_UDP_TUNNEL_CSUM)) {
38842+#ifndef NETIF_F_GSO_PARTIAL
38843+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
38844+#else
38845 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
38846 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
38847+#endif
38848 l4.udp->len = 0;
38849
38850 /* determine offset of outer transport header */
38851@@ -2709,6 +3241,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
38852 }
38853 }
38854
38855+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
38856 /* determine offset of inner transport header */
38857 l4_offset = l4.hdr - skb->data;
38858
38859@@ -2723,7 +3256,14 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
38860 gso_size = skb_shinfo(skb)->gso_size;
38861 gso_segs = skb_shinfo(skb)->gso_segs;
38862
38863- /* update GSO size and bytecount with header size */
38864+#ifndef HAVE_NDO_FEATURES_CHECK
38865+ /* too small a TSO segment size causes problems */
38866+ if (gso_size < 64) {
38867+ gso_size = 64;
38868+ gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64);
38869+ }
38870+#endif
38871+ /* update gso size and bytecount with header size */
38872 first->gso_segs = gso_segs;
38873 first->bytecount += (first->gso_segs - 1) * *hdr_len;
38874
38875@@ -2737,6 +3277,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
38876 return 1;
38877 }
38878
38879+#ifdef HAVE_PTP_1588_CLOCK
38880 /**
38881 * i40e_tsyn - set up the tsyn context descriptor
38882 * @tx_ring: ptr to the ring to send
38883@@ -2751,7 +3292,11 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
38884 {
38885 struct i40e_pf *pf;
38886
38887+#ifdef SKB_SHARED_TX_IS_UNION
38888+ if (likely(!(skb_tx(skb)->hardware)))
38889+#else
38890 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
38891+#endif
38892 return 0;
38893
38894 /* Tx timestamps cannot be sampled when doing TSO */
38895@@ -2767,7 +3312,11 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
38896
38897 if (pf->ptp_tx &&
38898 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
38899+#ifdef SKB_SHARED_TX_IS_UNION
38900+ skb_tx(skb)->in_progress = 1;
38901+#else
38902 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
38903+#endif
38904 pf->ptp_tx_start = jiffies;
38905 pf->ptp_tx_skb = skb_get(skb);
38906 } else {
38907@@ -2781,6 +3330,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
38908 return 1;
38909 }
38910
38911+#endif /* HAVE_PTP_1588_CLOCK */
38912 /**
38913 * i40e_tx_enable_csum - Enable Tx checksum offloads
38914 * @skb: send buffer
38915@@ -2819,6 +3369,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
38916 /* compute outer L2 header size */
38917 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
38918
38919+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
38920 if (skb->encapsulation) {
38921 u32 tunnel = 0;
38922 /* define outer network header type */
38923@@ -2842,17 +3393,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
38924 switch (l4_proto) {
38925 case IPPROTO_UDP:
38926 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
38927- *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
38928+ *tx_flags |= I40E_TX_FLAGS_TUNNEL;
38929 break;
38930+#ifdef HAVE_GRE_ENCAP_OFFLOAD
38931 case IPPROTO_GRE:
38932 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
38933- *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
38934+ *tx_flags |= I40E_TX_FLAGS_TUNNEL;
38935+ /* There was a long-standing issue in GRE where GSO
38936+ * was not setting the outer transport header unless
38937+ * a GRE checksum was requested. This was fixed in
38938+ * the 4.6 version of the kernel. In the 4.7 kernel
38939+ * support for GRE over IPv6 was added to GSO. So we
38940+ * can assume this workaround for all IPv4 headers
38941+ * without impacting later versions of the GRE.
38942+ */
38943+ if (ip.v4->version == 4)
38944+ l4.hdr = ip.hdr + (ip.v4->ihl * 4);
38945 break;
38946 case IPPROTO_IPIP:
38947 case IPPROTO_IPV6:
38948- *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
38949+ *tx_flags |= I40E_TX_FLAGS_TUNNEL;
38950 l4.hdr = skb_inner_network_header(skb);
38951 break;
38952+#endif
38953 default:
38954 if (*tx_flags & I40E_TX_FLAGS_TSO)
38955 return -1;
38956@@ -2861,6 +3424,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
38957 return 0;
38958 }
38959
38960+#ifdef I40E_ADD_PROBES
38961+ if (*tx_flags & I40E_TX_FLAGS_IPV4)
38962+ if (*tx_flags & I40E_TX_FLAGS_TSO)
38963+ tx_ring->vsi->back->tx_ip4_cso++;
38964+#endif
38965 /* compute outer L3 header size */
38966 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
38967 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
38968@@ -2874,7 +3442,9 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
38969
38970 /* indicate if we need to offload outer UDP header */
38971 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
38972+#ifdef NETIF_F_GSO_PARTIAL
38973 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
38974+#endif
38975 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
38976 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
38977
38978@@ -2892,16 +3462,22 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
38979 if (ip.v6->version == 6)
38980 *tx_flags |= I40E_TX_FLAGS_IPV6;
38981 }
38982+#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
38983
38984 /* Enable IP checksum offloads */
38985 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
38986 l4_proto = ip.v4->protocol;
38987+#ifdef I40E_ADD_PROBES
38988+ if (*tx_flags & I40E_TX_FLAGS_TSO)
38989+ tx_ring->vsi->back->tx_ip4_cso++;
38990+#endif
38991 /* the stack computes the IP header already, the only time we
38992 * need the hardware to recompute it is in the case of TSO.
38993 */
38994 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
38995 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
38996 I40E_TX_DESC_CMD_IIPT_IPV4;
38997+#ifdef NETIF_F_IPV6_CSUM
38998 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
38999 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
39000
39001@@ -2910,6 +3486,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
39002 if (l4.hdr != exthdr)
39003 ipv6_skip_exthdr(skb, exthdr - skb->data,
39004 &l4_proto, &frag_off);
39005+#endif
39006 }
39007
39008 /* compute inner L3 header size */
39009@@ -2921,18 +3498,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
39010 /* enable checksum offloads */
39011 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
39012 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
39013+#ifdef I40E_ADD_PROBES
39014+ tx_ring->vsi->back->tx_tcp_cso++;
39015+#endif
39016 break;
39017 case IPPROTO_SCTP:
39018 /* enable SCTP checksum offload */
39019+#ifdef HAVE_SCTP
39020 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
39021 offset |= (sizeof(struct sctphdr) >> 2) <<
39022 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
39023+#ifdef I40E_ADD_PROBES
39024+ tx_ring->vsi->back->tx_sctp_cso++;
39025+#endif
39026+#endif /* HAVE_SCTP */
39027 break;
39028 case IPPROTO_UDP:
39029 /* enable UDP checksum offload */
39030 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
39031 offset |= (sizeof(struct udphdr) >> 2) <<
39032 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
39033+#ifdef I40E_ADD_PROBES
39034+ tx_ring->vsi->back->tx_udp_cso++;
39035+#endif
39036 break;
39037 default:
39038 if (*tx_flags & I40E_TX_FLAGS_TSO)
39039@@ -3016,7 +3604,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
39040 **/
39041 bool __i40e_chk_linearize(struct sk_buff *skb)
39042 {
39043- const struct skb_frag_struct *frag, *stale;
39044+ const skb_frag_t *frag, *stale;
39045 int nr_frags, sum;
39046
39047 /* no need to check if number of frags is less than 7 */
39048@@ -3048,10 +3636,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
39049 /* Walk through fragments adding latest fragment, testing it, and
39050 * then removing stale fragments from the sum.
39051 */
39052- stale = &skb_shinfo(skb)->frags[0];
39053- for (;;) {
39054+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
39055+ int stale_size = skb_frag_size(stale);
39056+
39057 sum += skb_frag_size(frag++);
39058
39059+ /* The stale fragment may present us with a smaller
39060+ * descriptor than the actual fragment size. To account
39061+ * for that we need to remove all the data on the front and
39062+ * figure out what the remainder would be in the last
39063+ * descriptor associated with the fragment.
39064+ */
39065+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
39066+ int align_pad = -(skb_frag_off(stale)) &
39067+ (I40E_MAX_READ_REQ_SIZE - 1);
39068+
39069+ sum -= align_pad;
39070+ stale_size -= align_pad;
39071+
39072+ do {
39073+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
39074+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
39075+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
39076+ }
39077+
39078 /* if sum is negative we failed to make sufficient progress */
39079 if (sum < 0)
39080 return true;
39081@@ -3059,7 +3667,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
39082 if (!nr_frags--)
39083 break;
39084
39085- sum -= skb_frag_size(stale++);
39086+ sum -= stale_size;
39087 }
39088
39089 return false;
39090@@ -3075,7 +3683,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
39091 * @td_cmd: the command field in the descriptor
39092 * @td_offset: offset for checksum or crc
39093 *
39094- * Returns 0 on success, -1 on failure to DMA
39095+ * Returns 0 on success, negative error code on DMA failure.
39096 **/
39097 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
39098 struct i40e_tx_buffer *first, u32 tx_flags,
39099@@ -3083,7 +3691,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
39100 {
39101 unsigned int data_len = skb->data_len;
39102 unsigned int size = skb_headlen(skb);
39103- struct skb_frag_struct *frag;
39104+ skb_frag_t *frag;
39105 struct i40e_tx_buffer *tx_bi;
39106 struct i40e_tx_desc *tx_desc;
39107 u16 i = tx_ring->next_to_use;
39108@@ -3097,6 +3705,11 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
39109 I40E_TX_FLAGS_VLAN_SHIFT;
39110 }
39111
39112+#ifdef I40E_ADD_PROBES
39113+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
39114+ tx_ring->vsi->back->tcp_segs += first->gso_segs;
39115+
39116+#endif
39117 first->tx_flags = tx_flags;
39118
39119 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
39120@@ -3176,38 +3789,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
39121 /* write last descriptor with EOP bit */
39122 td_cmd |= I40E_TX_DESC_CMD_EOP;
39123
39124- /* We can OR these values together as they both are checked against
39125- * 4 below and at this point desc_count will be used as a boolean value
39126- * after this if/else block.
39127+ /* We OR these values together to check both against 4 (WB_STRIDE)
39128+ * below. This is safe since we don't re-use desc_count afterwards.
39129 */
39130 desc_count |= ++tx_ring->packet_stride;
39131
39132- /* Algorithm to optimize tail and RS bit setting:
39133- * if queue is stopped
39134- * mark RS bit
39135- * reset packet counter
39136- * else if xmit_more is supported and is true
39137- * advance packet counter to 4
39138- * reset desc_count to 0
39139- *
39140- * if desc_count >= 4
39141- * mark RS bit
39142- * reset packet counter
39143- * if desc_count > 0
39144- * update tail
39145- *
39146- * Note: If there are less than 4 descriptors
39147- * pending and interrupts were disabled the service task will
39148- * trigger a force WB.
39149- */
39150- if (netif_xmit_stopped(txring_txq(tx_ring))) {
39151- goto do_rs;
39152- } else if (skb->xmit_more) {
39153- /* set stride to arm on next packet and reset desc_count */
39154- tx_ring->packet_stride = WB_STRIDE;
39155- desc_count = 0;
39156- } else if (desc_count >= WB_STRIDE) {
39157-do_rs:
39158+ if (desc_count >= WB_STRIDE) {
39159 /* write last descriptor with RS bit set */
39160 td_cmd |= I40E_TX_DESC_CMD_RS;
39161 tx_ring->packet_stride = 0;
39162@@ -3216,6 +3803,11 @@ do_rs:
39163 tx_desc->cmd_type_offset_bsz =
39164 build_ctob(td_cmd, td_offset, size, td_tag);
39165
39166+ /* timestamp the skb as late as possible, just prior to notifying
39167+ * the MAC that it should transmit this packet
39168+ */
39169+ skb_tx_timestamp(skb);
39170+
39171 /* Force memory writes to complete before letting h/w know there
39172 * are new descriptors to fetch.
39173 *
39174@@ -3228,14 +3820,37 @@ do_rs:
39175 first->next_to_watch = tx_desc;
39176
39177 /* notify HW of packet */
39178- if (desc_count) {
39179+#ifdef HAVE_SKB_XMIT_MORE
39180+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
39181 writel(i, tx_ring->tail);
39182
39183- /* we need this if more than one processor can write to our tail
39184- * at a time, it synchronizes IO on IA64/Altix systems
39185+#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB
39186+ /* We need this mmiowb on IA64/Altix systems where wmb() isn't
39187+ * guaranteed to synchronize I/O.
39188+ *
39189+ * Note that mmiowb() only provides a guarantee about ordering
39190+ * when in conjunction with a spin_unlock(). This barrier is
39191+ * used to guarantee the I/O ordering with respect to a spin
39192+ * lock in the networking core code.
39193 */
39194 mmiowb();
39195+#endif
39196 }
39197+#else
39198+ writel(i, tx_ring->tail);
39199+
39200+#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB
39201+ /* We need this mmiowb on IA64/Altix systems where wmb() isn't
39202+ * guaranteed to synchronize I/O.
39203+ *
39204+ * Note that mmiowb() only provides a guarantee about ordering when in
39205+ * conjunction with a spin_unlock(). This barrier is used to guarantee
39206+ * the I/O ordering with respect to a spin lock in the networking core
39207+ * code.
39208+ */
39209+ mmiowb();
39210+#endif
39211+#endif /* HAVE_XMIT_MORE */
39212
39213 return 0;
39214
39215@@ -3255,36 +3870,63 @@ dma_error:
39216
39217 tx_ring->next_to_use = i;
39218
39219- return -1;
39220+ return -EIO;
39221+}
39222+
39223+#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
39224+/**
39225+ * i40e_lan_select_queue - Select the right Tx queue for the skb for LAN VSI
39226+ * @netdev: network interface device structure
39227+ * @skb: send buffer
39228+ *
39229+ * Returns the index of the selected Tx queue
39230+ **/
39231+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb)
39232+{
39233+ return skb_tx_hash(netdev, skb);
39234 }
39235
39236+#endif /* !HAVE_NET_DEVICE_OPS && HAVE_NETDEV_SELECT_QUEUE */
39237+
39238+#ifdef HAVE_XDP_SUPPORT
39239 /**
39240 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
39241- * @xdp: data to transmit
39242+ * @xdp: frame data to transmit
39243 * @xdp_ring: XDP Tx ring
39244 **/
39245+#ifdef HAVE_XDP_FRAME_STRUCT
39246+static int i40e_xmit_xdp_ring(struct xdp_frame *xdp,
39247+ struct i40e_ring *xdp_ring)
39248+#else
39249 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
39250 struct i40e_ring *xdp_ring)
39251+#endif
39252 {
39253- u32 size = xdp->data_end - xdp->data;
39254 u16 i = xdp_ring->next_to_use;
39255 struct i40e_tx_buffer *tx_bi;
39256 struct i40e_tx_desc *tx_desc;
39257 dma_addr_t dma;
39258+ void *data;
39259+ u32 size;
39260+
39261+ size = xdp_get_len(xdp);
39262+ data = xdp->data;
39263
39264 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
39265 xdp_ring->tx_stats.tx_busy++;
39266 return I40E_XDP_CONSUMED;
39267 }
39268-
39269- dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
39270+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
39271 if (dma_mapping_error(xdp_ring->dev, dma))
39272 return I40E_XDP_CONSUMED;
39273-
39274 tx_bi = &xdp_ring->tx_bi[i];
39275 tx_bi->bytecount = size;
39276 tx_bi->gso_segs = 1;
39277- tx_bi->raw_buf = xdp->data;
39278+#ifdef HAVE_XDP_FRAME_STRUCT
39279+ tx_bi->xdpf = xdp;
39280+#else
39281+ tx_bi->raw_buf = data;
39282+#endif
39283
39284 /* record length, and DMA address */
39285 dma_unmap_len_set(tx_bi, len, size);
39286@@ -3304,12 +3946,11 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
39287 i++;
39288 if (i == xdp_ring->count)
39289 i = 0;
39290-
39291 tx_bi->next_to_watch = tx_desc;
39292 xdp_ring->next_to_use = i;
39293-
39294 return I40E_XDP_TX;
39295 }
39296+#endif
39297
39298 /**
39299 * i40e_xmit_frame_ring - Sends buffer on Tx ring
39300@@ -3330,7 +3971,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
39301 u32 td_cmd = 0;
39302 u8 hdr_len = 0;
39303 int tso, count;
39304+#ifdef HAVE_PTP_1588_CLOCK
39305 int tsyn;
39306+#endif /* HAVE_PTP_1588_CLOCK */
39307
39308 /* prefetch the data, we'll need it later */
39309 prefetch(skb->data);
39310@@ -3390,12 +4033,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
39311 if (tso < 0)
39312 goto out_drop;
39313
39314+#ifdef HAVE_PTP_1588_CLOCK
39315 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
39316
39317 if (tsyn)
39318 tx_flags |= I40E_TX_FLAGS_TSYN;
39319
39320- skb_tx_timestamp(skb);
39321+#endif /* HAVE_PTP_1588_CLOCK */
39322
39323 /* always enable CRC insertion offload */
39324 td_cmd |= I40E_TX_DESC_CMD_ICRC;
39325@@ -3409,16 +4053,25 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
39326 */
39327 i40e_atr(tx_ring, skb, tx_flags);
39328
39329+#ifdef HAVE_PTP_1588_CLOCK
39330 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
39331 td_cmd, td_offset))
39332 goto cleanup_tx_tstamp;
39333+#else
39334+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
39335+ td_cmd, td_offset);
39336+#endif
39337
39338+#ifndef HAVE_TRANS_START_IN_QUEUE
39339+ tx_ring->netdev->trans_start = jiffies;
39340+#endif
39341 return NETDEV_TX_OK;
39342
39343 out_drop:
39344 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
39345 dev_kfree_skb_any(first->skb);
39346 first->skb = NULL;
39347+#ifdef HAVE_PTP_1588_CLOCK
39348 cleanup_tx_tstamp:
39349 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
39350 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
39351@@ -3427,7 +4080,7 @@ cleanup_tx_tstamp:
39352 pf->ptp_tx_skb = NULL;
39353 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
39354 }
39355-
39356+#endif
39357 return NETDEV_TX_OK;
39358 }
39359
39360@@ -3452,3 +4105,90 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
39361
39362 return i40e_xmit_frame_ring(skb, tx_ring);
39363 }
39364+
39365+#ifdef HAVE_XDP_SUPPORT
39366+/**
39367+ * i40e_xdp_xmit - Implements ndo_xdp_xmit
39368+ * @dev: netdev
39369+ * @n: amount of frames
39370+ * @frames: XDP frames
39371+ * @flags: XDP xmit flags
39372+ *
39373+ * Returns number of frames successfully sent. Frames that fail are
39374+ * free'ed via XDP return API.
39375+ *
39376+ * For error cases, a negative errno code is returned and no-frames
39377+ * are transmitted (caller must handle freeing frames).
39378+ **/
39379+#ifdef HAVE_XDP_FRAME_STRUCT
39380+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
39381+ u32 flags)
39382+#else
39383+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
39384+#endif
39385+{
39386+ struct i40e_netdev_priv *np = netdev_priv(dev);
39387+ unsigned int queue_index = smp_processor_id();
39388+ struct i40e_vsi *vsi = np->vsi;
39389+#ifdef HAVE_XDP_FRAME_STRUCT
39390+ struct i40e_ring *xdp_ring;
39391+ int drops = 0;
39392+ int i;
39393+#endif
39394+ int err;
39395+
39396+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
39397+ return -ENETDOWN;
39398+
39399+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
39400+ return -ENXIO;
39401+#ifdef HAVE_XDP_FRAME_STRUCT
39402+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
39403+ return -EINVAL;
39404+
39405+ xdp_ring = vsi->xdp_rings[queue_index];
39406+
39407+ for (i = 0; i < n; i++) {
39408+ struct xdp_frame *xdpf = frames[i];
39409+
39410+ err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
39411+ if (err != I40E_XDP_TX) {
39412+ xdp_return_frame_rx_napi(xdpf);
39413+ drops++;
39414+ }
39415+ }
39416+
39417+ if (unlikely(flags & XDP_XMIT_FLUSH))
39418+ i40e_xdp_ring_update_tail(xdp_ring);
39419+
39420+ return n - drops;
39421+#else
39422+ err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
39423+
39424+ if (err != I40E_XDP_TX)
39425+ return -ENOSPC;
39426+
39427+ return 0;
39428+#endif
39429+}
39430+
39431+/**
39432+ * i40e_xdp_flush - Implements ndo_xdp_flush
39433+ * @dev: netdev
39434+ **/
39435+void i40e_xdp_flush(struct net_device *dev)
39436+{
39437+ struct i40e_netdev_priv *np = netdev_priv(dev);
39438+ unsigned int queue_index = smp_processor_id();
39439+ struct i40e_vsi *vsi = np->vsi;
39440+
39441+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
39442+ return;
39443+
39444+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
39445+ return;
39446+
39447+ i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
39448+}
39449+#endif
39450+
39451diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
39452index 2f848bc5e..da598721f 100644
39453--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
39454+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
39455@@ -1,57 +1,41 @@
39456-/*******************************************************************************
39457- *
39458- * Intel Ethernet Controller XL710 Family Linux Driver
39459- * Copyright(c) 2013 - 2016 Intel Corporation.
39460- *
39461- * This program is free software; you can redistribute it and/or modify it
39462- * under the terms and conditions of the GNU General Public License,
39463- * version 2, as published by the Free Software Foundation.
39464- *
39465- * This program is distributed in the hope it will be useful, but WITHOUT
39466- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
39467- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39468- * more details.
39469- *
39470- * You should have received a copy of the GNU General Public License along
39471- * with this program. If not, see <http://www.gnu.org/licenses/>.
39472- *
39473- * The full GNU General Public License is included in this distribution in
39474- * the file called "COPYING".
39475- *
39476- * Contact Information:
39477- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
39478- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
39479- *
39480- ******************************************************************************/
39481+/* SPDX-License-Identifier: GPL-2.0 */
39482+/* Copyright(c) 2013 - 2020 Intel Corporation. */
39483
39484 #ifndef _I40E_TXRX_H_
39485 #define _I40E_TXRX_H_
39486
39487 /* Interrupt Throttling and Rate Limiting Goodies */
39488-
39489-#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
39490-#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
39491-#define I40E_ITR_100K 0x0005
39492-#define I40E_ITR_50K 0x000A
39493-#define I40E_ITR_20K 0x0019
39494-#define I40E_ITR_18K 0x001B
39495-#define I40E_ITR_8K 0x003E
39496-#define I40E_ITR_4K 0x007A
39497-#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
39498-#define I40E_ITR_RX_DEF I40E_ITR_20K
39499-#define I40E_ITR_TX_DEF I40E_ITR_20K
39500-#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
39501-#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
39502-#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
39503 #define I40E_DEFAULT_IRQ_WORK 256
39504-#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
39505-#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
39506-#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
39507+
39508+/* The datasheet for the X710 and XL710 indicate that the maximum value for
39509+ * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
39510+ * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
39511+ * the register value which is divided by 2 lets use the actual values and
39512+ * avoid an excessive amount of translation.
39513+ */
39514+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
39515+#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
39516+#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
39517+#define I40E_ITR_100K 10 /* all values below must be even */
39518+#define I40E_ITR_50K 20
39519+#define I40E_ITR_20K 50
39520+#define I40E_ITR_18K 60
39521+#define I40E_ITR_8K 122
39522+#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
39523+#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
39524+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
39525+#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
39526+
39527+#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
39528+#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
39529+
39530 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
39531 * the value of the rate limit is non-zero
39532 */
39533 #define INTRL_ENA BIT(6)
39534+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
39535 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
39536+
39537 /**
39538 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
39539 * @intrl: interrupt rate limit to convert
39540@@ -131,10 +115,18 @@ enum i40e_dyn_idx_t {
39541 */
39542 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
39543 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
39544+#ifdef I40E_32BYTE_RX
39545 #define i40e_rx_desc i40e_32byte_rx_desc
39546+#else
39547+#define i40e_rx_desc i40e_16byte_rx_desc
39548+#endif
39549
39550+#ifdef HAVE_STRUCT_DMA_ATTRS
39551+#define I40E_RX_DMA_ATTR NULL
39552+#else
39553 #define I40E_RX_DMA_ATTR \
39554 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
39555+#endif
39556
39557 /* Attempt to maximize the headroom available for incoming frames. We
39558 * use a 2K buffer for receives and need 1536/1534 to store the data for
39559@@ -206,7 +198,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
39560 }
39561
39562 /* How many Rx Buffers do we bundle into one write to the hardware ? */
39563-#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
39564+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
39565 #define I40E_RX_INCREMENT(r, i) \
39566 do { \
39567 (i)++; \
39568@@ -275,7 +267,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
39569 }
39570
39571 /* Tx Descriptors needed, worst case */
39572-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
39573+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
39574 #define I40E_MIN_DESC_PENDING 4
39575
39576 #define I40E_TX_FLAGS_HW_VLAN BIT(1)
39577@@ -285,9 +277,11 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
39578 #define I40E_TX_FLAGS_IPV6 BIT(5)
39579 #define I40E_TX_FLAGS_FCCRC BIT(6)
39580 #define I40E_TX_FLAGS_FSO BIT(7)
39581+#ifdef HAVE_PTP_1588_CLOCK
39582 #define I40E_TX_FLAGS_TSYN BIT(8)
39583+#endif /* HAVE_PTP_1588_CLOCK */
39584 #define I40E_TX_FLAGS_FD_SB BIT(9)
39585-#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
39586+#define I40E_TX_FLAGS_TUNNEL BIT(10)
39587 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
39588 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
39589 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
39590@@ -296,6 +290,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
39591 struct i40e_tx_buffer {
39592 struct i40e_tx_desc *next_to_watch;
39593 union {
39594+ struct xdp_frame *xdpf;
39595 struct sk_buff *skb;
39596 void *raw_buf;
39597 };
39598@@ -309,6 +304,9 @@ struct i40e_tx_buffer {
39599
39600 struct i40e_rx_buffer {
39601 dma_addr_t dma;
39602+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
39603+ struct sk_buff *skb;
39604+#else
39605 struct page *page;
39606 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
39607 __u32 page_offset;
39608@@ -316,6 +314,7 @@ struct i40e_rx_buffer {
39609 __u16 page_offset;
39610 #endif
39611 __u16 pagecnt_bias;
39612+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
39613 };
39614
39615 struct i40e_queue_stats {
39616@@ -323,12 +322,24 @@ struct i40e_queue_stats {
39617 u64 bytes;
39618 };
39619
39620+#ifdef HAVE_XDP_SUPPORT
39621+struct i40e_xdp_stats {
39622+ u64 xdp_pass;
39623+ u64 xdp_drop;
39624+ u64 xdp_tx;
39625+ u64 xdp_unknown;
39626+ u64 xdp_redirect;
39627+ u64 xdp_redirect_fail;
39628+};
39629+#endif
39630+
39631 struct i40e_tx_queue_stats {
39632 u64 restart_queue;
39633 u64 tx_busy;
39634 u64 tx_done_old;
39635 u64 tx_linearize;
39636 u64 tx_force_wb;
39637+ int prev_pkt_ctr;
39638 };
39639
39640 struct i40e_rx_queue_stats {
39641@@ -342,6 +353,7 @@ struct i40e_rx_queue_stats {
39642 enum i40e_ring_state_t {
39643 __I40E_TX_FDIR_INIT_DONE,
39644 __I40E_TX_XPS_INIT_DONE,
39645+ __I40E_RING_STATE_NBITS /* must be last */
39646 };
39647
39648 /* some useful defines for virtchannel interface, which
39649@@ -366,7 +378,7 @@ struct i40e_ring {
39650 struct i40e_tx_buffer *tx_bi;
39651 struct i40e_rx_buffer *rx_bi;
39652 };
39653- unsigned long state;
39654+ DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
39655 u16 queue_index; /* Queue number of ring */
39656 u8 dcb_tc; /* Traffic class of ring */
39657 u8 __iomem *tail;
39658@@ -376,8 +388,7 @@ struct i40e_ring {
39659 * these values always store the USER setting, and must be converted
39660 * before programming to a register.
39661 */
39662- u16 rx_itr_setting;
39663- u16 tx_itr_setting;
39664+ u16 itr_setting;
39665
39666 u16 count; /* Number of descriptors */
39667 u16 reg_idx; /* HW register index of the ring */
39668@@ -401,7 +412,12 @@ struct i40e_ring {
39669
39670 /* stats structs */
39671 struct i40e_queue_stats stats;
39672+#ifdef HAVE_NDO_GET_STATS64
39673 struct u64_stats_sync syncp;
39674+#endif
39675+#ifdef HAVE_XDP_SUPPORT
39676+ struct i40e_xdp_stats xdp_stats;
39677+#endif
39678 union {
39679 struct i40e_tx_queue_stats tx_stats;
39680 struct i40e_rx_queue_stats rx_stats;
39681@@ -423,6 +439,11 @@ struct i40e_ring {
39682 * i40e_clean_rx_ring_irq() is called
39683 * for this ring.
39684 */
39685+
39686+ struct i40e_channel *ch;
39687+#ifdef HAVE_XDP_BUFF_RXQ
39688+ struct xdp_rxq_info xdp_rxq;
39689+#endif
39690 } ____cacheline_internodealigned_in_smp;
39691
39692 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
39693@@ -440,6 +461,13 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
39694 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
39695 }
39696
39697+#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
39698+#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
39699+#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
39700+#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
39701+#define I40E_ITR_ADAPTIVE_BULK 0x0000
39702+#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
39703+
39704 static inline bool ring_is_xdp(struct i40e_ring *ring)
39705 {
39706 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
39707@@ -450,21 +478,14 @@ static inline void set_ring_xdp(struct i40e_ring *ring)
39708 ring->flags |= I40E_TXR_FLAGS_XDP;
39709 }
39710
39711-enum i40e_latency_range {
39712- I40E_LOWEST_LATENCY = 0,
39713- I40E_LOW_LATENCY = 1,
39714- I40E_BULK_LATENCY = 2,
39715-};
39716-
39717 struct i40e_ring_container {
39718- /* array of pointers to rings */
39719- struct i40e_ring *ring;
39720+ struct i40e_ring *ring; /* pointer to linked list of ring(s) */
39721+ unsigned long next_update; /* jiffies value of next update */
39722 unsigned int total_bytes; /* total bytes processed this int */
39723 unsigned int total_packets; /* total packets processed this int */
39724- unsigned long last_itr_update; /* jiffies of last ITR update */
39725 u16 count;
39726- enum i40e_latency_range latency_range;
39727- u16 itr;
39728+ u16 target_itr; /* target ITR setting for ring(s) */
39729+ u16 current_itr; /* current ITR setting for ring(s) */
39730 };
39731
39732 /* iterator for handling rings in ring container */
39733@@ -484,6 +505,10 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
39734
39735 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
39736 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
39737+#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
39738+extern u16 i40e_lan_select_queue(struct net_device *netdev,
39739+ struct sk_buff *skb);
39740+#endif
39741 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
39742 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
39743 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
39744@@ -492,9 +517,31 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
39745 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
39746 int i40e_napi_poll(struct napi_struct *napi, int budget);
39747 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
39748-u32 i40e_get_tx_pending(struct i40e_ring *ring);
39749+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
39750+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
39751 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
39752 bool __i40e_chk_linearize(struct sk_buff *skb);
39753+#ifdef HAVE_XDP_FRAME_STRUCT
39754+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
39755+ u32 flags);
39756+#else
39757+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
39758+#endif
39759+void i40e_xdp_flush(struct net_device *dev);
39760+
39761+#ifdef HAVE_XDP_SUPPORT
39762+#ifdef HAVE_XDP_FRAME_STRUCT
39763+static inline u32 xdp_get_len(struct xdp_frame *xdp)
39764+{
39765+ return xdp->len;
39766+}
39767+#else
39768+static inline u32 xdp_get_len(struct xdp_buff *xdp)
39769+{
39770+ return xdp->data_end - xdp->data;
39771+}
39772+#endif
39773+#endif
39774
39775 /**
39776 * i40e_get_head - Retrieve head from head writeback
39777@@ -521,7 +568,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
39778 **/
39779 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
39780 {
39781- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
39782+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
39783 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
39784 int count = 0, size = skb_headlen(skb);
39785
39786diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
39787index fd4bbdd88..147d84ec7 100644
39788--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
39789+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
39790@@ -1,28 +1,5 @@
39791-/*******************************************************************************
39792- *
39793- * Intel Ethernet Controller XL710 Family Linux Driver
39794- * Copyright(c) 2013 - 2015 Intel Corporation.
39795- *
39796- * This program is free software; you can redistribute it and/or modify it
39797- * under the terms and conditions of the GNU General Public License,
39798- * version 2, as published by the Free Software Foundation.
39799- *
39800- * This program is distributed in the hope it will be useful, but WITHOUT
39801- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
39802- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39803- * more details.
39804- *
39805- * You should have received a copy of the GNU General Public License along
39806- * with this program. If not, see <http://www.gnu.org/licenses/>.
39807- *
39808- * The full GNU General Public License is included in this distribution in
39809- * the file called "COPYING".
39810- *
39811- * Contact Information:
39812- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
39813- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
39814- *
39815- ******************************************************************************/
39816+/* SPDX-License-Identifier: GPL-2.0 */
39817+/* Copyright(c) 2013 - 2020 Intel Corporation. */
39818
39819 #ifndef _I40E_TYPE_H_
39820 #define _I40E_TYPE_H_
39821@@ -36,16 +13,19 @@
39822 #include "i40e_devids.h"
39823
39824 /* I40E_MASK is a macro used on 32 bit registers */
39825-#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
39826+#define I40E_MASK(mask, shift) (mask << shift)
39827
39828 #define I40E_MAX_VSI_QP 16
39829-#define I40E_MAX_VF_VSI 3
39830+#define I40E_MAX_VF_VSI 4
39831 #define I40E_MAX_CHAINED_RX_BUFFERS 5
39832 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
39833
39834 /* Max default timeout in ms, */
39835 #define I40E_MAX_NVM_TIMEOUT 18000
39836
39837+/* Max timeout in ms for the phy to respond */
39838+#define I40E_MAX_PHY_TIMEOUT 500
39839+
39840 /* Switch from ms to the 1usec global time (this is the GTIME resolution) */
39841 #define I40E_MS_TO_GTIME(time) ((time) * 1000)
39842
39843@@ -53,6 +33,9 @@
39844 struct i40e_hw;
39845 typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
39846
39847+#ifndef ETH_ALEN
39848+#define ETH_ALEN 6
39849+#endif
39850 /* Data type manipulation macros. */
39851
39852 #define I40E_DESC_UNUSED(R) \
39853@@ -79,7 +62,9 @@ enum i40e_debug_mask {
39854 I40E_DEBUG_DIAG = 0x00000800,
39855 I40E_DEBUG_FD = 0x00001000,
39856 I40E_DEBUG_PACKAGE = 0x00002000,
39857+
39858 I40E_DEBUG_IWARP = 0x00F00000,
39859+
39860 I40E_DEBUG_AQ_MESSAGE = 0x01000000,
39861 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
39862 I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
39863@@ -105,16 +90,16 @@ enum i40e_debug_mask {
39864 #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
39865 I40E_GLGEN_MSCA_OPCODE_SHIFT)
39866 #define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
39867- I40E_GLGEN_MSCA_OPCODE_SHIFT)
39868+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
39869 #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
39870- I40E_GLGEN_MSCA_OPCODE_SHIFT)
39871+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
39872
39873-#define I40E_PHY_COM_REG_PAGE 0x1E
39874-#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
39875-#define I40E_PHY_LED_MANUAL_ON 0x100
39876-#define I40E_PHY_LED_PROV_REG_1 0xC430
39877-#define I40E_PHY_LED_MODE_MASK 0xFFFF
39878-#define I40E_PHY_LED_MODE_ORIG 0x80000000
39879+#define I40E_PHY_COM_REG_PAGE 0x1E
39880+#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
39881+#define I40E_PHY_LED_MANUAL_ON 0x100
39882+#define I40E_PHY_LED_PROV_REG_1 0xC430
39883+#define I40E_PHY_LED_MODE_MASK 0xFFFF
39884+#define I40E_PHY_LED_MODE_ORIG 0x80000000
39885
39886 /* These are structs for managing the hardware information and the operations.
39887 * The structures of function pointers are filled out at init time when we
39888@@ -253,7 +238,8 @@ struct i40e_phy_info {
39889 #define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
39890 BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
39891 #define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
39892-/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
39893+/*
39894+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
39895 * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
39896 * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
39897 * a shift is needed to adjust for this with values larger than 31. The
39898@@ -268,7 +254,26 @@ struct i40e_phy_info {
39899 I40E_PHY_TYPE_OFFSET)
39900 #define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
39901 I40E_PHY_TYPE_OFFSET)
39902+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
39903+ I40E_PHY_TYPE_OFFSET)
39904+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
39905+ I40E_PHY_TYPE_OFFSET)
39906+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
39907+#define I40E_PHY_TYPE_OFFSET2 (-10)
39908+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
39909+ I40E_PHY_TYPE_OFFSET2)
39910+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
39911+ I40E_PHY_TYPE_OFFSET2)
39912 #define I40E_HW_CAP_MAX_GPIO 30
39913+enum i40e_acpi_programming_method {
39914+ I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
39915+ I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
39916+};
39917+
39918+#define I40E_WOL_SUPPORT_MASK 0x1
39919+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
39920+#define I40E_PROXY_SUPPORT_MASK 0x4
39921+
39922 /* Capabilities of a PF or a VF or the whole device */
39923 struct i40e_hw_capabilities {
39924 u32 switch_mode;
39925@@ -276,6 +281,16 @@ struct i40e_hw_capabilities {
39926 #define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
39927 #define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
39928
39929+ /* Cloud filter modes:
39930+ * Mode1: Filter on L4 port only
39931+ * Mode2: Filter for non-tunneled traffic
39932+ * Mode3: Filter for tunnel traffic
39933+ */
39934+#define I40E_CLOUD_FILTER_MODE1 0x6
39935+#define I40E_CLOUD_FILTER_MODE2 0x7
39936+#define I40E_CLOUD_FILTER_MODE3 0x8
39937+#define I40E_SWITCH_MODE_MASK 0xF
39938+
39939 u32 management_mode;
39940 u32 mng_protocols_over_mctp;
39941 #define I40E_MNG_PROTOCOL_PLDM 0x2
39942@@ -336,6 +351,9 @@ struct i40e_hw_capabilities {
39943 u32 enabled_tcmap;
39944 u32 maxtc;
39945 u64 wr_csr_prot;
39946+ bool apm_wol_support;
39947+ enum i40e_acpi_programming_method acpi_prog_method;
39948+ bool proxy_support;
39949 };
39950
39951 struct i40e_mac_info {
39952@@ -385,6 +403,8 @@ enum i40e_nvmupd_cmd {
39953 I40E_NVMUPD_STATUS,
39954 I40E_NVMUPD_EXEC_AQ,
39955 I40E_NVMUPD_GET_AQ_RESULT,
39956+ I40E_NVMUPD_GET_AQ_EVENT,
39957+ I40E_NVMUPD_FEATURES,
39958 };
39959
39960 enum i40e_nvmupd_state {
39961@@ -404,18 +424,28 @@ enum i40e_nvmupd_state {
39962
39963 #define I40E_NVM_MOD_PNT_MASK 0xFF
39964
39965-#define I40E_NVM_TRANS_SHIFT 8
39966-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
39967-#define I40E_NVM_CON 0x0
39968-#define I40E_NVM_SNT 0x1
39969-#define I40E_NVM_LCB 0x2
39970-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
39971-#define I40E_NVM_ERA 0x4
39972-#define I40E_NVM_CSUM 0x8
39973-#define I40E_NVM_EXEC 0xf
39974+#define I40E_NVM_TRANS_SHIFT 8
39975+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
39976+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
39977+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
39978+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
39979+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
39980+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
39981+#define I40E_NVM_CON 0x0
39982+#define I40E_NVM_SNT 0x1
39983+#define I40E_NVM_LCB 0x2
39984+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
39985+#define I40E_NVM_ERA 0x4
39986+#define I40E_NVM_CSUM 0x8
39987+#define I40E_NVM_AQE 0xe
39988+#define I40E_NVM_EXEC 0xf
39989+
39990+#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0
39991+#define I40E_NVM_EXEC_FEATURES 0xe
39992+#define I40E_NVM_EXEC_STATUS 0xf
39993
39994 #define I40E_NVM_ADAPT_SHIFT 16
39995-#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
39996+#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
39997
39998 #define I40E_NVMUPD_MAX_DATA 4096
39999 #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
40000@@ -428,6 +458,33 @@ struct i40e_nvm_access {
40001 u8 data[1];
40002 };
40003
40004+/* NVMUpdate features API */
40005+#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0
40006+#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14
40007+#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
40008+
40009+#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
40010+
40011+struct i40e_nvmupd_features {
40012+ u8 major;
40013+ u8 minor;
40014+ u16 size;
40015+ u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
40016+};
40017+
40018+/* (Q)SFP module access definitions */
40019+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
40020+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
40021+#define I40E_MODULE_TYPE_ADDR 0x00
40022+#define I40E_MODULE_REVISION_ADDR 0x01
40023+#define I40E_MODULE_SFF_8472_COMP 0x5E
40024+#define I40E_MODULE_SFF_8472_SWAP 0x5C
40025+#define I40E_MODULE_SFF_ADDR_MODE 0x04
40026+#define I40E_MODULE_SFF_DIAG_CAPAB 0x40
40027+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
40028+#define I40E_MODULE_TYPE_QSFP28 0x11
40029+#define I40E_MODULE_QSFP_MAX_LEN 640
40030+
40031 /* PCI bus types */
40032 enum i40e_bus_type {
40033 i40e_bus_type_unknown = 0,
40034@@ -582,6 +639,7 @@ struct i40e_hw {
40035 /* state of nvm update process */
40036 enum i40e_nvmupd_state nvmupd_state;
40037 struct i40e_aq_desc nvm_wb_desc;
40038+ struct i40e_aq_desc nvm_aq_event_desc;
40039 struct i40e_virt_mem nvm_buff;
40040 bool nvm_release_on_done;
40041 u16 nvm_wait_opcode;
40042@@ -597,15 +655,34 @@ struct i40e_hw {
40043 struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
40044 struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
40045
40046+ /* WoL and proxy support */
40047+ u16 num_wol_proxy_filters;
40048+ u16 wol_proxy_vsi_seid;
40049+
40050 #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
40051+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
40052+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
40053+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
40054+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
40055+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
40056+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
40057+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
40058 u64 flags;
40059
40060+ /* Used in set switch config AQ command */
40061+ u16 switch_tag;
40062+ u16 first_tag;
40063+ u16 second_tag;
40064+
40065+ /* NVMUpdate features */
40066+ struct i40e_nvmupd_features nvmupd_features;
40067+
40068 /* debug mask */
40069 u32 debug_mask;
40070 char err_str[16];
40071 };
40072
40073-static inline bool i40e_is_vf(struct i40e_hw *hw)
40074+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
40075 {
40076 return (hw->mac.type == I40E_MAC_VF ||
40077 hw->mac.type == I40E_MAC_X722_VF);
40078@@ -705,32 +782,28 @@ enum i40e_rx_desc_status_bits {
40079 I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
40080 I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
40081 I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
40082- /* Note: Bit 8 is reserved in X710 and XL710 */
40083 I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
40084+
40085 I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
40086 I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
40087 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
40088 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
40089 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
40090- I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
40091- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
40092- * UDP header
40093- */
40094+ I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
40095 I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
40096 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
40097 };
40098
40099 #define I40E_RXD_QW1_STATUS_SHIFT 0
40100-#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
40101- << I40E_RXD_QW1_STATUS_SHIFT)
40102+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
40103+ I40E_RXD_QW1_STATUS_SHIFT)
40104
40105 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
40106 #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
40107 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
40108
40109 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
40110-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
40111- BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
40112+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
40113
40114 enum i40e_rx_desc_fltstat_values {
40115 I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
40116@@ -793,7 +866,8 @@ enum i40e_rx_l2_ptype {
40117 I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
40118 I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
40119 I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
40120- I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
40121+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153,
40122+ I40E_RX_PTYPE_PARSER_ABORTED = 255
40123 };
40124
40125 struct i40e_rx_ptype_decoded {
40126@@ -1044,8 +1118,7 @@ enum i40e_tx_ctx_desc_eipt_offload {
40127 #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
40128
40129 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
40130-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
40131- BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
40132+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
40133
40134 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
40135
40136@@ -1151,10 +1224,6 @@ enum i40e_filter_program_desc_pcmd {
40137 I40E_TXD_FLTR_QW1_CMD_SHIFT)
40138 #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
40139
40140-#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
40141- I40E_TXD_FLTR_QW1_CMD_SHIFT)
40142-#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
40143-
40144 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
40145 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
40146 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
40147@@ -1268,11 +1337,14 @@ struct i40e_hw_port_stats {
40148 u32 rx_lpi_status;
40149 u64 tx_lpi_count; /* etlpic */
40150 u64 rx_lpi_count; /* erlpic */
40151+ u64 tx_lpi_duration;
40152+ u64 rx_lpi_duration;
40153 };
40154
40155 /* Checksum and Shadow RAM pointers */
40156 #define I40E_SR_NVM_CONTROL_WORD 0x00
40157-#define I40E_SR_EMP_MODULE_PTR 0x0F
40158+#define I40E_EMP_MODULE_PTR 0x0F
40159+#define I40E_SR_EMP_MODULE_PTR 0x48
40160 #define I40E_SR_PBA_FLAGS 0x15
40161 #define I40E_SR_PBA_BLOCK_PTR 0x16
40162 #define I40E_SR_BOOT_CONFIG_PTR 0x17
40163@@ -1280,17 +1352,27 @@ struct i40e_hw_port_stats {
40164 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
40165 #define I40E_SR_NVM_WAKE_ON_LAN 0x19
40166 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
40167+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
40168+#define I40E_SR_NVM_MAP_VERSION 0x29
40169+#define I40E_SR_NVM_IMAGE_VERSION 0x2A
40170+#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
40171 #define I40E_SR_NVM_EETRACK_LO 0x2D
40172 #define I40E_SR_NVM_EETRACK_HI 0x2E
40173 #define I40E_SR_VPD_PTR 0x2F
40174 #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
40175 #define I40E_SR_SW_CHECKSUM_WORD 0x3F
40176+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
40177
40178 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
40179 #define I40E_SR_VPD_MODULE_MAX_SIZE 1024
40180 #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
40181 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
40182 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
40183+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
40184+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
40185+#define I40E_PTR_TYPE BIT(15)
40186+#define I40E_SR_OCP_CFG_WORD0 0x2B
40187+#define I40E_SR_OCP_ENABLED BIT(15)
40188
40189 /* Shadow RAM related */
40190 #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
40191@@ -1405,7 +1487,8 @@ enum i40e_reset_type {
40192 };
40193
40194 /* IEEE 802.1AB LLDP Agent Variables from NVM */
40195-#define I40E_NVM_LLDP_CFG_PTR 0xD
40196+#define I40E_NVM_LLDP_CFG_PTR 0x06
40197+#define I40E_SR_LLDP_CFG_PTR 0x31
40198 struct i40e_lldp_variables {
40199 u16 length;
40200 u16 adminstatus;
40201@@ -1465,19 +1548,19 @@ struct i40e_lldp_variables {
40202 #define I40E_FLEX_57_SHIFT 6
40203 #define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
40204
40205-/* Version format for PPP */
40206-struct i40e_ppp_version {
40207+/* Version format for Dynamic Device Personalization(DDP) */
40208+struct i40e_ddp_version {
40209 u8 major;
40210 u8 minor;
40211 u8 update;
40212 u8 draft;
40213 };
40214
40215-#define I40E_PPP_NAME_SIZE 32
40216+#define I40E_DDP_NAME_SIZE 32
40217
40218 /* Package header */
40219 struct i40e_package_header {
40220- struct i40e_ppp_version version;
40221+ struct i40e_ddp_version version;
40222 u32 segment_count;
40223 u32 segment_offset[1];
40224 };
40225@@ -1489,16 +1572,18 @@ struct i40e_generic_seg_header {
40226 #define SEGMENT_TYPE_I40E 0x00000011
40227 #define SEGMENT_TYPE_X722 0x00000012
40228 u32 type;
40229- struct i40e_ppp_version version;
40230+ struct i40e_ddp_version version;
40231 u32 size;
40232- char name[I40E_PPP_NAME_SIZE];
40233+ char name[I40E_DDP_NAME_SIZE];
40234 };
40235
40236 struct i40e_metadata_segment {
40237 struct i40e_generic_seg_header header;
40238- struct i40e_ppp_version version;
40239+ struct i40e_ddp_version version;
40240+#define I40E_DDP_TRACKID_RDONLY 0
40241+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
40242 u32 track_id;
40243- char name[I40E_PPP_NAME_SIZE];
40244+ char name[I40E_DDP_NAME_SIZE];
40245 };
40246
40247 struct i40e_device_id_entry {
40248@@ -1508,8 +1593,8 @@ struct i40e_device_id_entry {
40249
40250 struct i40e_profile_segment {
40251 struct i40e_generic_seg_header header;
40252- struct i40e_ppp_version version;
40253- char name[I40E_PPP_NAME_SIZE];
40254+ struct i40e_ddp_version version;
40255+ char name[I40E_DDP_NAME_SIZE];
40256 u32 device_table_count;
40257 struct i40e_device_id_entry device_table[1];
40258 };
40259@@ -1525,22 +1610,49 @@ struct i40e_profile_section_header {
40260 struct {
40261 #define SECTION_TYPE_INFO 0x00000010
40262 #define SECTION_TYPE_MMIO 0x00000800
40263+#define SECTION_TYPE_RB_MMIO 0x00001800
40264 #define SECTION_TYPE_AQ 0x00000801
40265+#define SECTION_TYPE_RB_AQ 0x00001801
40266 #define SECTION_TYPE_NOTE 0x80000000
40267 #define SECTION_TYPE_NAME 0x80000001
40268+#define SECTION_TYPE_PROTO 0x80000002
40269+#define SECTION_TYPE_PCTYPE 0x80000003
40270+#define SECTION_TYPE_PTYPE 0x80000004
40271 u32 type;
40272 u32 offset;
40273 u32 size;
40274 } section;
40275 };
40276
40277+struct i40e_profile_tlv_section_record {
40278+ u8 rtype;
40279+ u8 type;
40280+ u16 len;
40281+ u8 data[12];
40282+};
40283+
40284+/* Generic AQ section in proflie */
40285+struct i40e_profile_aq_section {
40286+ u16 opcode;
40287+ u16 flags;
40288+ u8 param[16];
40289+ u16 datalen;
40290+ u8 data[1];
40291+};
40292+
40293 struct i40e_profile_info {
40294 u32 track_id;
40295- struct i40e_ppp_version version;
40296+ struct i40e_ddp_version version;
40297 u8 op;
40298-#define I40E_PPP_ADD_TRACKID 0x01
40299-#define I40E_PPP_REMOVE_TRACKID 0x02
40300+#define I40E_DDP_ADD_TRACKID 0x01
40301+#define I40E_DDP_REMOVE_TRACKID 0x02
40302 u8 reserved[7];
40303- u8 name[I40E_PPP_NAME_SIZE];
40304+ u8 name[I40E_DDP_NAME_SIZE];
40305 };
40306+
40307+#define I40E_BCM_PHY_PCS_STATUS1_PAGE 0x3
40308+#define I40E_BCM_PHY_PCS_STATUS1_REG 0x0001
40309+#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI BIT(8)
40310+#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI BIT(9)
40311+
40312 #endif /* _I40E_TYPE_H_ */
40313diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
40314index 4d1e670f4..6b2966ffe 100644
40315--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
40316+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
40317@@ -1,28 +1,5 @@
40318-/*******************************************************************************
40319- *
40320- * Intel Ethernet Controller XL710 Family Linux Driver
40321- * Copyright(c) 2013 - 2016 Intel Corporation.
40322- *
40323- * This program is free software; you can redistribute it and/or modify it
40324- * under the terms and conditions of the GNU General Public License,
40325- * version 2, as published by the Free Software Foundation.
40326- *
40327- * This program is distributed in the hope it will be useful, but WITHOUT
40328- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
40329- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
40330- * more details.
40331- *
40332- * You should have received a copy of the GNU General Public License along
40333- * with this program. If not, see <http://www.gnu.org/licenses/>.
40334- *
40335- * The full GNU General Public License is included in this distribution in
40336- * the file called "COPYING".
40337- *
40338- * Contact Information:
40339- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
40340- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
40341- *
40342- ******************************************************************************/
40343+// SPDX-License-Identifier: GPL-2.0
40344+/* Copyright(c) 2013 - 2020 Intel Corporation. */
40345
40346 #include "i40e.h"
40347
40348@@ -31,8 +8,8 @@
40349 /**
40350 * i40e_vc_vf_broadcast
40351 * @pf: pointer to the PF structure
40352- * @opcode: operation code
40353- * @retval: return value
40354+ * @v_opcode: operation code
40355+ * @v_retval: return value
40356 * @msg: pointer to the msg buffer
40357 * @msglen: msg length
40358 *
40359@@ -62,6 +39,39 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
40360 }
40361 }
40362
40363+/**
40364+ * i40e_vc_link_speed2mbps - Convert AdminQ link_speed bit represented
40365+ * to integer value of Mbps
40366+ * @link_speed: the speed to convert
40367+ *
40368+ * Returns the speed as direct value of Mbps.
40369+ **/
40370+static INLINE u32
40371+i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
40372+{
40373+ switch (link_speed) {
40374+ case I40E_LINK_SPEED_100MB:
40375+ return SPEED_100;
40376+ case I40E_LINK_SPEED_1GB:
40377+ return SPEED_1000;
40378+ case I40E_LINK_SPEED_2_5GB:
40379+ return SPEED_2500;
40380+ case I40E_LINK_SPEED_5GB:
40381+ return SPEED_5000;
40382+ case I40E_LINK_SPEED_10GB:
40383+ return SPEED_10000;
40384+ case I40E_LINK_SPEED_20GB:
40385+ return SPEED_20000;
40386+ case I40E_LINK_SPEED_25GB:
40387+ return SPEED_25000;
40388+ case I40E_LINK_SPEED_40GB:
40389+ return SPEED_40000;
40390+ case I40E_LINK_SPEED_UNKNOWN:
40391+ return SPEED_UNKNOWN;
40392+ }
40393+ return SPEED_UNKNOWN;
40394+}
40395+
40396 /**
40397 * i40e_vc_notify_vf_link_state
40398 * @vf: pointer to the VF structure
40399@@ -78,18 +88,64 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
40400
40401 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
40402 pfe.severity = PF_EVENT_SEVERITY_INFO;
40403- if (vf->link_forced) {
40404+
40405+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
40406+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
40407+ /* Always report link is down if the VF queues aren't enabled */
40408+ if (!vf->queues_enabled) {
40409+ pfe.event_data.link_event_adv.link_status = false;
40410+ pfe.event_data.link_event_adv.link_speed = 0;
40411+#ifdef HAVE_NDO_SET_VF_LINK_STATE
40412+ } else if (vf->link_forced) {
40413+ pfe.event_data.link_event_adv.link_status = vf->link_up;
40414+ pfe.event_data.link_event_adv.link_speed = vf->link_up ?
40415+ i40e_vc_link_speed2mbps(ls->link_speed) : 0;
40416+#endif
40417+ } else {
40418+ pfe.event_data.link_event_adv.link_status =
40419+ ls->link_info & I40E_AQ_LINK_UP;
40420+ pfe.event_data.link_event_adv.link_speed =
40421+ i40e_vc_link_speed2mbps(ls->link_speed);
40422+ }
40423+ } else {
40424+ /* Always report link is down if the VF queues aren't enabled */
40425+ if (!vf->queues_enabled) {
40426+ pfe.event_data.link_event.link_status = false;
40427+ pfe.event_data.link_event.link_speed = 0;
40428+#ifdef HAVE_NDO_SET_VF_LINK_STATE
40429+ } else if (vf->link_forced) {
40430+ pfe.event_data.link_event.link_status = vf->link_up;
40431+ pfe.event_data.link_event.link_speed = (vf->link_up ?
40432+ i40e_virtchnl_link_speed(ls->link_speed) : 0);
40433+#endif
40434+ } else {
40435+ pfe.event_data.link_event.link_status =
40436+ ls->link_info & I40E_AQ_LINK_UP;
40437+ pfe.event_data.link_event.link_speed =
40438+ i40e_virtchnl_link_speed(ls->link_speed);
40439+ }
40440+ }
40441+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
40442+ /* Always report link is down if the VF queues aren't enabled */
40443+ if (!vf->queues_enabled) {
40444+ pfe.event_data.link_event.link_status = false;
40445+ pfe.event_data.link_event.link_speed = 0;
40446+#ifdef HAVE_NDO_SET_VF_LINK_STATE
40447+ } else if (vf->link_forced) {
40448 pfe.event_data.link_event.link_status = vf->link_up;
40449- pfe.event_data.link_event.link_speed =
40450- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
40451+ pfe.event_data.link_event.link_speed = (vf->link_up ?
40452+ i40e_virtchnl_link_speed(ls->link_speed) : 0);
40453+#endif
40454 } else {
40455 pfe.event_data.link_event.link_status =
40456 ls->link_info & I40E_AQ_LINK_UP;
40457 pfe.event_data.link_event.link_speed =
40458- (enum virtchnl_link_speed)ls->link_speed;
40459+ i40e_virtchnl_link_speed(ls->link_speed);
40460 }
40461+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
40462+
40463 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
40464- 0, (u8 *)&pfe, sizeof(pfe), NULL);
40465+ I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
40466 }
40467
40468 /**
40469@@ -118,7 +174,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
40470
40471 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
40472 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
40473- i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
40474+ i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, I40E_SUCCESS,
40475 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
40476 }
40477
40478@@ -147,22 +203,51 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
40479 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
40480 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
40481 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
40482- 0, (u8 *)&pfe,
40483+ I40E_SUCCESS, (u8 *)&pfe,
40484 sizeof(struct virtchnl_pf_event), NULL);
40485 }
40486 /***********************misc routines*****************************/
40487
40488 /**
40489- * i40e_vc_disable_vf
40490- * @pf: pointer to the PF info
40491+ * i40e_vc_reset_vf
40492 * @vf: pointer to the VF info
40493+ * @notify_vf: notify vf about reset or not
40494 *
40495- * Disable the VF through a SW reset
40496+ * Reset VF handler.
40497 **/
40498-static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
40499+static inline void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
40500 {
40501- i40e_vc_notify_vf_reset(vf);
40502- i40e_reset_vf(vf, false);
40503+ struct i40e_pf *pf = vf->pf;
40504+ int i;
40505+
40506+ if (notify_vf)
40507+ i40e_vc_notify_vf_reset(vf);
40508+
40509+ /* We want to ensure that an actual reset occurs initiated after this
40510+ * function was called. However, we do not want to wait forever, so
40511+ * we'll give a reasonable time and print a message if we failed to
40512+ * ensure a reset.
40513+ */
40514+ for (i = 0; i < 20; i++) {
40515+ /* If pf is in vfs releasing state reset vf is impossible,
40516+ * so leave it.
40517+ */
40518+ if (test_bit(__I40E_VFS_RELEASING, pf->state))
40519+ return;
40520+
40521+ if (i40e_reset_vf(vf, false))
40522+ return;
40523+ usleep_range(10000, 20000);
40524+ }
40525+
40526+ if (notify_vf)
40527+ dev_warn(&vf->pf->pdev->dev,
40528+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
40529+ vf->vf_id);
40530+ else
40531+ dev_dbg(&vf->pf->pdev->dev,
40532+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
40533+ vf->vf_id);
40534 }
40535
40536 /**
40537@@ -189,7 +274,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
40538 * check for the valid queue id
40539 **/
40540 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
40541- u8 qid)
40542+ u16 qid)
40543 {
40544 struct i40e_pf *pf = vf->pf;
40545 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
40546@@ -204,7 +289,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
40547 *
40548 * check for the valid vector id
40549 **/
40550-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
40551+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
40552 {
40553 struct i40e_pf *pf = vf->pf;
40554
40555@@ -242,6 +327,38 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
40556 return pf_queue_id;
40557 }
40558
40559+/**
40560+ * i40e_get_real_pf_qid
40561+ * @vf: pointer to the VF info
40562+ * @vsi_id: vsi id
40563+ * @queue_id: queue number
40564+ *
40565+ * wrapper function to get pf_queue_id handling ADq code as well
40566+ **/
40567+static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
40568+{
40569+ int i;
40570+
40571+ if (vf->adq_enabled) {
40572+ /* Although VF considers all the queues(can be 1 to 16) as its
40573+ * own but they may actually belong to different VSIs(up to 4).
40574+ * We need to find which queues belongs to which VSI.
40575+ */
40576+ for (i = 0; i < vf->num_tc; i++) {
40577+ if (queue_id < vf->ch[i].num_qps) {
40578+ vsi_id = vf->ch[i].vsi_id;
40579+ break;
40580+ }
40581+ /* find right queue id which is relative to a
40582+ * given VSI.
40583+ */
40584+ queue_id -= vf->ch[i].num_qps;
40585+ }
40586+ }
40587+
40588+ return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
40589+}
40590+
40591 /**
40592 * i40e_config_irq_link_list
40593 * @vf: pointer to the VF info
40594@@ -258,7 +375,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
40595 struct i40e_hw *hw = &pf->hw;
40596 u16 vsi_queue_id, pf_queue_id;
40597 enum i40e_queue_type qtype;
40598- u16 next_q, vector_id;
40599+ u16 next_q, vector_id, size;
40600 u32 reg, reg_idx;
40601 u16 itr_idx = 0;
40602
40603@@ -288,17 +405,19 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
40604 vsi_queue_id + 1));
40605 }
40606
40607- next_q = find_first_bit(&linklistmap,
40608- (I40E_MAX_VSI_QP *
40609- I40E_VIRTCHNL_SUPPORTED_QTYPES));
40610+ size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
40611+ next_q = find_first_bit(&linklistmap, size);
40612+ if (unlikely(next_q == size))
40613+ goto irq_list_done;
40614+
40615 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
40616 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
40617- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
40618+ pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
40619 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
40620
40621 wr32(hw, reg_idx, reg);
40622
40623- while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
40624+ while (next_q < size) {
40625 switch (qtype) {
40626 case I40E_QUEUE_TYPE_RX:
40627 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
40628@@ -312,16 +431,13 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
40629 break;
40630 }
40631
40632- next_q = find_next_bit(&linklistmap,
40633- (I40E_MAX_VSI_QP *
40634- I40E_VIRTCHNL_SUPPORTED_QTYPES),
40635- next_q + 1);
40636- if (next_q <
40637- (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
40638+ next_q = find_next_bit(&linklistmap, size, next_q + 1);
40639+ if (next_q < size) {
40640 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
40641 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
40642- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
40643- vsi_queue_id);
40644+ pf_queue_id = i40e_get_real_pf_qid(vf,
40645+ vsi_id,
40646+ vsi_queue_id);
40647 } else {
40648 pf_queue_id = I40E_QUEUE_END_OF_LIST;
40649 qtype = 0;
40650@@ -352,136 +468,6 @@ irq_list_done:
40651 i40e_flush(hw);
40652 }
40653
40654-/**
40655- * i40e_release_iwarp_qvlist
40656- * @vf: pointer to the VF.
40657- *
40658- **/
40659-static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
40660-{
40661- struct i40e_pf *pf = vf->pf;
40662- struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
40663- u32 msix_vf;
40664- u32 i;
40665-
40666- if (!vf->qvlist_info)
40667- return;
40668-
40669- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
40670- for (i = 0; i < qvlist_info->num_vectors; i++) {
40671- struct virtchnl_iwarp_qv_info *qv_info;
40672- u32 next_q_index, next_q_type;
40673- struct i40e_hw *hw = &pf->hw;
40674- u32 v_idx, reg_idx, reg;
40675-
40676- qv_info = &qvlist_info->qv_info[i];
40677- if (!qv_info)
40678- continue;
40679- v_idx = qv_info->v_idx;
40680- if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
40681- /* Figure out the queue after CEQ and make that the
40682- * first queue.
40683- */
40684- reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
40685- reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
40686- next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
40687- >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
40688- next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
40689- >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
40690-
40691- reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
40692- reg = (next_q_index &
40693- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
40694- (next_q_type <<
40695- I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
40696-
40697- wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
40698- }
40699- }
40700- kfree(vf->qvlist_info);
40701- vf->qvlist_info = NULL;
40702-}
40703-
40704-/**
40705- * i40e_config_iwarp_qvlist
40706- * @vf: pointer to the VF info
40707- * @qvlist_info: queue and vector list
40708- *
40709- * Return 0 on success or < 0 on error
40710- **/
40711-static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
40712- struct virtchnl_iwarp_qvlist_info *qvlist_info)
40713-{
40714- struct i40e_pf *pf = vf->pf;
40715- struct i40e_hw *hw = &pf->hw;
40716- struct virtchnl_iwarp_qv_info *qv_info;
40717- u32 v_idx, i, reg_idx, reg;
40718- u32 next_q_idx, next_q_type;
40719- u32 msix_vf, size;
40720-
40721- size = sizeof(struct virtchnl_iwarp_qvlist_info) +
40722- (sizeof(struct virtchnl_iwarp_qv_info) *
40723- (qvlist_info->num_vectors - 1));
40724- vf->qvlist_info = kzalloc(size, GFP_KERNEL);
40725- vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
40726-
40727- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
40728- for (i = 0; i < qvlist_info->num_vectors; i++) {
40729- qv_info = &qvlist_info->qv_info[i];
40730- if (!qv_info)
40731- continue;
40732- v_idx = qv_info->v_idx;
40733-
40734- /* Validate vector id belongs to this vf */
40735- if (!i40e_vc_isvalid_vector_id(vf, v_idx))
40736- goto err;
40737-
40738- vf->qvlist_info->qv_info[i] = *qv_info;
40739-
40740- reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
40741- /* We might be sharing the interrupt, so get the first queue
40742- * index and type, push it down the list by adding the new
40743- * queue on top. Also link it with the new queue in CEQCTL.
40744- */
40745- reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
40746- next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
40747- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
40748- next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
40749- I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
40750-
40751- if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
40752- reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
40753- reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
40754- (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
40755- (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
40756- (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
40757- (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
40758- wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
40759-
40760- reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
40761- reg = (qv_info->ceq_idx &
40762- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
40763- (I40E_QUEUE_TYPE_PE_CEQ <<
40764- I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
40765- wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
40766- }
40767-
40768- if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
40769- reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
40770- (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
40771- (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
40772-
40773- wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
40774- }
40775- }
40776-
40777- return 0;
40778-err:
40779- kfree(vf->qvlist_info);
40780- vf->qvlist_info = NULL;
40781- return -EINVAL;
40782-}
40783-
40784 /**
40785 * i40e_config_vsi_tx_queue
40786 * @vf: pointer to the VF info
40787@@ -599,7 +585,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
40788 }
40789 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
40790
40791- /* set split mode 10b */
40792+ /* set splitalways mode 10b */
40793 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
40794 }
40795
40796@@ -621,7 +607,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
40797 rx_ctx.dsize = 1;
40798
40799 /* default values */
40800- rx_ctx.lrxqthresh = 2;
40801+ rx_ctx.lrxqthresh = 1;
40802 rx_ctx.crcstrip = 1;
40803 rx_ctx.prefena = 1;
40804 rx_ctx.l2tsel = 1;
40805@@ -651,2644 +637,6516 @@ error_param:
40806 }
40807
40808 /**
40809- * i40e_alloc_vsi_res
40810- * @vf: pointer to the VF info
40811- * @type: type of VSI to allocate
40812+ * i40e_validate_vf
40813+ * @pf: the physical function
40814+ * @vf_id: VF identifier
40815 *
40816- * alloc VF vsi context & resources
40817+ * Check that the VF is enabled and the vsi exists.
40818+ *
40819+ * Returns 0 on success, negative on failure
40820 **/
40821-static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
40822+static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
40823 {
40824- struct i40e_mac_filter *f = NULL;
40825- struct i40e_pf *pf = vf->pf;
40826 struct i40e_vsi *vsi;
40827+ struct i40e_vf *vf;
40828 int ret = 0;
40829
40830- vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
40831-
40832- if (!vsi) {
40833+ if (vf_id >= pf->num_alloc_vfs) {
40834 dev_err(&pf->pdev->dev,
40835- "add vsi failed for VF %d, aq_err %d\n",
40836- vf->vf_id, pf->hw.aq.asq_last_status);
40837- ret = -ENOENT;
40838- goto error_alloc_vsi_res;
40839+ "Invalid VF Identifier %d\n", vf_id);
40840+ ret = -EINVAL;
40841+ goto err_out;
40842 }
40843- if (type == I40E_VSI_SRIOV) {
40844- u64 hena = i40e_pf_get_default_rss_hena(pf);
40845- u8 broadcast[ETH_ALEN];
40846+ vf = &pf->vf[vf_id];
40847+ vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
40848+ if (!vsi)
40849+ ret = -EINVAL;
40850+err_out:
40851+ return ret;
40852+}
40853
40854- vf->lan_vsi_idx = vsi->idx;
40855- vf->lan_vsi_id = vsi->id;
40856- /* If the port VLAN has been configured and then the
40857- * VF driver was removed then the VSI port VLAN
40858- * configuration was destroyed. Check if there is
40859- * a port VLAN and restore the VSI configuration if
40860- * needed.
40861- */
40862- if (vf->port_vlan_id)
40863- i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
40864+#ifdef HAVE_NDO_SET_VF_LINK_STATE
40865
40866- spin_lock_bh(&vsi->mac_filter_hash_lock);
40867- if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
40868- f = i40e_add_mac_filter(vsi,
40869- vf->default_lan_addr.addr);
40870- if (!f)
40871- dev_info(&pf->pdev->dev,
40872- "Could not add MAC filter %pM for VF %d\n",
40873- vf->default_lan_addr.addr, vf->vf_id);
40874- }
40875- eth_broadcast_addr(broadcast);
40876- f = i40e_add_mac_filter(vsi, broadcast);
40877- if (!f)
40878- dev_info(&pf->pdev->dev,
40879- "Could not allocate VF broadcast filter\n");
40880- spin_unlock_bh(&vsi->mac_filter_hash_lock);
40881- wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
40882- wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
40883- }
40884+/**
40885+ * i40e_configure_vf_loopback
40886+ * @vsi: VF VSI to configure
40887+ * @vf_id: VF identifier
40888+ * @enable: enable or disable
40889+ *
40890+ * This function configures the VF VSI with the loopback settings
40891+ *
40892+ * Returns 0 on success, negative on failure
40893+ *
40894+ **/
40895+static int i40e_configure_vf_loopback(struct i40e_vsi *vsi, int vf_id,
40896+ bool enable)
40897+{
40898+ struct i40e_pf *pf = vsi->back;
40899+ struct i40e_vsi_context ctxt;
40900+ int ret = 0;
40901
40902- /* program mac filter */
40903- ret = i40e_sync_vsi_filters(vsi);
40904- if (ret)
40905- dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
40906+ vsi->info.valid_sections = CPU_TO_LE16(I40E_AQ_VSI_PROP_SWITCH_VALID);
40907+ if (enable)
40908+ vsi->info.switch_id |=
40909+ CPU_TO_LE16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
40910+ else
40911+ vsi->info.switch_id &=
40912+ ~CPU_TO_LE16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
40913
40914- /* Set VF bandwidth if specified */
40915- if (vf->tx_rate) {
40916- ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
40917- vf->tx_rate / 50, 0, NULL);
40918- if (ret)
40919- dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
40920- vf->vf_id, ret);
40921+ memset(&ctxt, 0, sizeof(ctxt));
40922+ ctxt.seid = vsi->seid;
40923+ ctxt.pf_num = vsi->back->hw.pf_id;
40924+ ctxt.info = vsi->info;
40925+ ret = i40e_aq_update_vsi_params(&pf->hw, &ctxt, NULL);
40926+ if (ret) {
40927+ dev_err(&pf->pdev->dev, "Error %d configuring loopback for VF %d\n",
40928+ ret, vf_id);
40929+ ret = -EIO;
40930 }
40931-
40932-error_alloc_vsi_res:
40933 return ret;
40934 }
40935
40936 /**
40937- * i40e_enable_vf_mappings
40938- * @vf: pointer to the VF info
40939+ * i40e_configure_vf_vlan_stripping
40940+ * @vsi: VF VSI to configure
40941+ * @vf_id: VF identifier
40942+ * @enable: enable or disable
40943 *
40944- * enable VF mappings
40945+ * This function enables or disables vlan stripping on the VF
40946+ *
40947+ * Returns 0 on success, negative on failure
40948 **/
40949-static void i40e_enable_vf_mappings(struct i40e_vf *vf)
40950+static int i40e_configure_vf_vlan_stripping(struct i40e_vsi *vsi, int vf_id,
40951+ bool enable)
40952 {
40953- struct i40e_pf *pf = vf->pf;
40954- struct i40e_hw *hw = &pf->hw;
40955- u32 reg, total_queue_pairs = 0;
40956- int j;
40957-
40958- /* Tell the hardware we're using noncontiguous mapping. HW requires
40959- * that VF queues be mapped using this method, even when they are
40960- * contiguous in real life
40961- */
40962- i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
40963- I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
40964+ struct i40e_pf *pf = vsi->back;
40965+ struct i40e_vsi_context ctxt;
40966+ int ret = 0;
40967+ u8 flag;
40968
40969- /* enable VF vplan_qtable mappings */
40970- reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
40971- wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
40972+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
40973+ if (enable) {
40974+ /* Don't enable vlan stripping if port vlan is set */
40975+ if (vsi->info.pvid) {
40976+ dev_err(&pf->pdev->dev,
40977+ "Cannot enable vlan stripping when port VLAN is set\n");
40978+ ret = -EINVAL;
40979+ goto err_out;
40980+ }
40981+ flag = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
40982+ } else {
40983+ flag = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
40984+ }
40985+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | flag;
40986+ ctxt.seid = vsi->seid;
40987+ ctxt.info = vsi->info;
40988+ ret = i40e_aq_update_vsi_params(&pf->hw, &ctxt, NULL);
40989+ if (ret) {
40990+ dev_err(&pf->pdev->dev, "Error %d configuring vlan stripping for VF %d\n",
40991+ ret, vf_id);
40992+ ret = -EIO;
40993+ }
40994+err_out:
40995+ return ret;
40996+}
40997
40998- /* map PF queues to VF queues */
40999- for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
41000- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
41001+/**
41002+ * i40e_configure_vf_promisc_mode
41003+ * @vf: VF
41004+ * @vsi: VF VSI to configure
41005+ * @promisc_mode: promisc mode to configure
41006+ *
41007+ * This function configures the requested promisc mode for a vf
41008+ *
41009+ * Returns 0 on success, negative on failure
41010+ **/
41011+static int i40e_configure_vf_promisc_mode(struct i40e_vf *vf,
41012+ struct i40e_vsi *vsi,
41013+ u8 promisc_mode)
41014+{
41015+ struct i40e_pf *pf = vsi->back;
41016+ int ret = 0;
41017
41018- reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
41019- wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
41020- total_queue_pairs++;
41021+ if (promisc_mode & VFD_PROMISC_MULTICAST) {
41022+ ret = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, vsi->seid,
41023+ true, NULL);
41024+ if (ret)
41025+ goto err;
41026+ vf->promisc_mode |= VFD_PROMISC_MULTICAST;
41027+ } else {
41028+ ret = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, vsi->seid,
41029+ false, NULL);
41030+ if (ret)
41031+ goto err;
41032+ vf->promisc_mode &= ~VFD_PROMISC_MULTICAST;
41033 }
41034-
41035- /* map PF queues to VSI */
41036- for (j = 0; j < 7; j++) {
41037- if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
41038- reg = 0x07FF07FF; /* unused */
41039- } else {
41040- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
41041- j * 2);
41042- reg = qid;
41043- qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
41044- (j * 2) + 1);
41045- reg |= qid << 16;
41046- }
41047- i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
41048- reg);
41049+ if (promisc_mode & VFD_PROMISC_UNICAST) {
41050+ ret = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, vsi->seid,
41051+ true, NULL, true);
41052+ if (ret)
41053+ goto err;
41054+ vf->promisc_mode |= VFD_PROMISC_UNICAST;
41055+ } else {
41056+ ret = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, vsi->seid,
41057+ false, NULL, true);
41058+ if (ret)
41059+ goto err;
41060+ vf->promisc_mode &= ~VFD_PROMISC_UNICAST;
41061 }
41062+err:
41063+ if (ret)
41064+ dev_err(&pf->pdev->dev, "Error %d configuring promisc mode for VF %d\n",
41065+ ret, vf->vf_id);
41066
41067- i40e_flush(hw);
41068+ return ret;
41069 }
41070
41071 /**
41072- * i40e_disable_vf_mappings
41073- * @vf: pointer to the VF info
41074+ * i40e_add_ingress_egress_mirror
41075+ * @src_vsi: VSI to mirror from
41076+ * @mirror_vsi: VSI to mirror to
41077+ * @rule_type: rule type to configure
41078+ * @rule_id: rule id to store
41079 *
41080- * disable VF mappings
41081+ * This function adds the requested ingress/egress mirror for a vsi
41082+ *
41083+ * Returns 0 on success, negative on failure
41084 **/
41085-static void i40e_disable_vf_mappings(struct i40e_vf *vf)
41086+static int i40e_add_ingress_egress_mirror(struct i40e_vsi *src_vsi,
41087+ struct i40e_vsi *mirror_vsi,
41088+ u16 rule_type, u16 *rule_id)
41089 {
41090- struct i40e_pf *pf = vf->pf;
41091- struct i40e_hw *hw = &pf->hw;
41092- int i;
41093-
41094- /* disable qp mappings */
41095- wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
41096- for (i = 0; i < I40E_MAX_VSI_QP; i++)
41097- wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
41098- I40E_QUEUE_END_OF_LIST);
41099- i40e_flush(hw);
41100-}
41101-
41102-/**
41103- * i40e_free_vf_res
41104- * @vf: pointer to the VF info
41105- *
41106- * free VF resources
41107- **/
41108-static void i40e_free_vf_res(struct i40e_vf *vf)
41109-{
41110- struct i40e_pf *pf = vf->pf;
41111- struct i40e_hw *hw = &pf->hw;
41112- u32 reg_idx, reg;
41113- int i, msix_vf;
41114-
41115- /* Start by disabling VF's configuration API to prevent the OS from
41116- * accessing the VF's VSI after it's freed / invalidated.
41117- */
41118- clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
41119-
41120- /* free vsi & disconnect it from the parent uplink */
41121- if (vf->lan_vsi_idx) {
41122- i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
41123- vf->lan_vsi_idx = 0;
41124- vf->lan_vsi_id = 0;
41125- vf->num_mac = 0;
41126+ u16 dst_seid, rules_used, rules_free, sw_seid;
41127+ struct i40e_pf *pf = src_vsi->back;
41128+ int ret, num = 0, cnt = 1;
41129+ int *vsi_ingress_vlan;
41130+ int *vsi_egress_vlan;
41131+ __le16 *mr_list;
41132+
41133+ mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
41134+ if (!mr_list) {
41135+ ret = -ENOMEM;
41136+ goto err_out;
41137 }
41138- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
41139
41140- /* disable interrupts so the VF starts in a known state */
41141- for (i = 0; i < msix_vf; i++) {
41142- /* format is same for both registers */
41143- if (0 == i)
41144- reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
41145- else
41146- reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
41147- (vf->vf_id))
41148- + (i - 1));
41149- wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
41150- i40e_flush(hw);
41151+ if (src_vsi->type == I40E_VSI_MAIN) {
41152+ vsi_ingress_vlan = &pf->ingress_vlan;
41153+ vsi_egress_vlan = &pf->egress_vlan;
41154+ } else {
41155+ vsi_ingress_vlan = &pf->vf[src_vsi->vf_id].ingress_vlan;
41156+ vsi_egress_vlan = &pf->vf[src_vsi->vf_id].egress_vlan;
41157 }
41158
41159- /* clear the irq settings */
41160- for (i = 0; i < msix_vf; i++) {
41161- /* format is same for both registers */
41162- if (0 == i)
41163- reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
41164+ if (I40E_IS_MIRROR_VLAN_ID_VALID(*vsi_ingress_vlan)) {
41165+ if (src_vsi->type == I40E_VSI_MAIN)
41166+ dev_err(&pf->pdev->dev,
41167+ "PF already has an ingress mirroring configured, only one rule per PF is supported!\n");
41168 else
41169- reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
41170- (vf->vf_id))
41171- + (i - 1));
41172- reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
41173- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
41174- wr32(hw, reg_idx, reg);
41175- i40e_flush(hw);
41176+ dev_err(&pf->pdev->dev,
41177+ "VF=%d already has an ingress mirroring configured, only one rule per VF is supported!\n",
41178+ src_vsi->vf_id);
41179+ ret = -EPERM;
41180+ goto err_out;
41181+ } else if (I40E_IS_MIRROR_VLAN_ID_VALID(*vsi_egress_vlan)) {
41182+ if (src_vsi->type == I40E_VSI_MAIN)
41183+ dev_err(&pf->pdev->dev,
41184+ "PF already has an egress mirroring configured, only one rule per PF is supported!\n");
41185+ else
41186+ dev_err(&pf->pdev->dev,
41187+ "VF=%d already has an egress mirroring configured, only one rule per VF is supported!\n",
41188+ src_vsi->vf_id);
41189+ ret = -EPERM;
41190+ goto err_out;
41191 }
41192- /* reset some of the state variables keeping track of the resources */
41193- vf->num_queue_pairs = 0;
41194- vf->vf_states = 0;
41195+
41196+ sw_seid = src_vsi->uplink_seid;
41197+ dst_seid = mirror_vsi->seid;
41198+ mr_list[num] = CPU_TO_LE16(src_vsi->seid);
41199+ ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid,
41200+ rule_type, dst_seid,
41201+ cnt, mr_list, NULL,
41202+ rule_id, &rules_used,
41203+ &rules_free);
41204+ kfree(mr_list);
41205+err_out:
41206+ return ret;
41207 }
41208
41209 /**
41210- * i40e_alloc_vf_res
41211- * @vf: pointer to the VF info
41212+ * i40e_del_ingress_egress_mirror
41213+ * @src_vsi: the mirrored VSI
41214+ * @rule_type: rule type to configure
41215+ * @rule_id : rule id to delete
41216 *
41217- * allocate VF resources
41218+ * This function deletes the ingress/egress mirror on a VSI
41219+ *
41220+ * Returns 0 on success, negative on failure
41221 **/
41222-static int i40e_alloc_vf_res(struct i40e_vf *vf)
41223+static int i40e_del_ingress_egress_mirror(struct i40e_vsi *src_vsi,
41224+ u16 rule_type, u16 rule_id)
41225 {
41226- struct i40e_pf *pf = vf->pf;
41227- int total_queue_pairs = 0;
41228+ u16 rules_used, rules_free, sw_seid;
41229+ struct i40e_pf *pf = src_vsi->back;
41230 int ret;
41231
41232- /* allocate hw vsi context & associated resources */
41233- ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
41234- if (ret)
41235- goto error_alloc;
41236- total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
41237-
41238- if (vf->trusted)
41239- set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
41240- else
41241- clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
41242-
41243- /* store the total qps number for the runtime
41244- * VF req validation
41245- */
41246- vf->num_queue_pairs = total_queue_pairs;
41247-
41248- /* VF is now completely initialized */
41249- set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
41250-
41251-error_alloc:
41252- if (ret)
41253- i40e_free_vf_res(vf);
41254-
41255+ sw_seid = src_vsi->uplink_seid;
41256+ ret = i40e_aq_delete_mirrorrule(&pf->hw, sw_seid, rule_type,
41257+ rule_id, 0, NULL, NULL,
41258+ &rules_used, &rules_free);
41259 return ret;
41260 }
41261
41262-#define VF_DEVICE_STATUS 0xAA
41263-#define VF_TRANS_PENDING_MASK 0x20
41264 /**
41265- * i40e_quiesce_vf_pci
41266- * @vf: pointer to the VF structure
41267+ * i40e_restore_ingress_egress_mirror
41268+ * @src_vsi: the mirrored VSI
41269+ * @mirror: VSI to mirror to
41270+ * @rule_type: rule type to configure
41271+ * @rule_id : rule id to delete
41272 *
41273- * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
41274- * if the transactions never clear.
41275+ * This function restores the configured ingress/egress mirrors
41276+ *
41277+ * Returns 0 on success, negative on failure
41278 **/
41279-static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
41280+int i40e_restore_ingress_egress_mirror(struct i40e_vsi *src_vsi,
41281+ int mirror, u16 rule_type, u16 *rule_id)
41282 {
41283- struct i40e_pf *pf = vf->pf;
41284- struct i40e_hw *hw = &pf->hw;
41285- int vf_abs_id, i;
41286- u32 reg;
41287+ struct i40e_vsi *mirror_vsi;
41288+ struct i40e_vf *mirror_vf;
41289+ struct i40e_pf *pf;
41290+ int ret = 0;
41291
41292- vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
41293+ pf = src_vsi->back;
41294
41295- wr32(hw, I40E_PF_PCI_CIAA,
41296- VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
41297- for (i = 0; i < 100; i++) {
41298- reg = rd32(hw, I40E_PF_PCI_CIAD);
41299- if ((reg & VF_TRANS_PENDING_MASK) == 0)
41300- return 0;
41301- udelay(1);
41302- }
41303- return -EIO;
41304+ /* validate the mirror */
41305+ ret = i40e_validate_vf(pf, mirror);
41306+ if (ret)
41307+ goto err_out;
41308+ mirror_vf = &pf->vf[mirror];
41309+ mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
41310+ ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, rule_type,
41311+ rule_id);
41312+
41313+err_out:
41314+ return ret;
41315 }
41316
41317 /**
41318- * i40e_trigger_vf_reset
41319- * @vf: pointer to the VF structure
41320- * @flr: VFLR was issued or not
41321+ * i40e_configure_vf_link
41322+ * @vf: VF
41323+ * @link: link state to configure
41324 *
41325- * Trigger hardware to start a reset for a particular VF. Expects the caller
41326- * to wait the proper amount of time to allow hardware to reset the VF before
41327- * it cleans up and restores VF functionality.
41328+ * This function configures the requested link state for a VF
41329+ *
41330+ * Returns 0 on success, negative on failure
41331 **/
41332-static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
41333+static int i40e_configure_vf_link(struct i40e_vf *vf, u8 link)
41334 {
41335+ struct virtchnl_pf_event pfe;
41336+ struct i40e_link_status *ls;
41337 struct i40e_pf *pf = vf->pf;
41338- struct i40e_hw *hw = &pf->hw;
41339- u32 reg, reg_idx, bit_idx;
41340-
41341- /* warn the VF */
41342- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
41343-
41344- /* Disable VF's configuration API during reset. The flag is re-enabled
41345- * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
41346- * It's normally disabled in i40e_free_vf_res(), but it's safer
41347- * to do it earlier to give some time to finish to any VF config
41348- * functions that may still be running at this point.
41349- */
41350- clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
41351+ struct i40e_hw *hw;
41352+ int abs_vf_id;
41353+ int ret = 0;
41354
41355- /* In the case of a VFLR, the HW has already reset the VF and we
41356- * just need to clean up, so don't hit the VFRTRIG register.
41357+ hw = &pf->hw;
41358+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
41359+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
41360+ pfe.severity = PF_EVENT_SEVERITY_INFO;
41361+ ls = &pf->hw.phy.link_info;
41362+ switch (link) {
41363+ case VFD_LINKSTATE_AUTO:
41364+ vf->link_forced = false;
41365+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
41366+ pfe.event_data.link_event_adv.link_status =
41367+ ls->link_info & I40E_AQ_LINK_UP;
41368+ pfe.event_data.link_event_adv.link_speed =
41369+ i40e_vc_link_speed2mbps(ls->link_speed);
41370+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41371+ pfe.event_data.link_event.link_status =
41372+ ls->link_info & I40E_AQ_LINK_UP;
41373+ pfe.event_data.link_event.link_speed =
41374+ i40e_virtchnl_link_speed(ls->link_speed);
41375+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41376+ break;
41377+ case VFD_LINKSTATE_ON:
41378+ vf->link_forced = true;
41379+ vf->link_up = true;
41380+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
41381+ pfe.event_data.link_event_adv.link_status = true;
41382+ pfe.event_data.link_event_adv.link_speed =
41383+ i40e_vc_link_speed2mbps(ls->link_speed);
41384+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41385+ pfe.event_data.link_event.link_status = true;
41386+ pfe.event_data.link_event.link_speed =
41387+ i40e_virtchnl_link_speed(ls->link_speed);
41388+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41389+ break;
41390+ case VFD_LINKSTATE_OFF:
41391+ vf->link_forced = true;
41392+ vf->link_up = false;
41393+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
41394+ pfe.event_data.link_event_adv.link_status = false;
41395+ pfe.event_data.link_event_adv.link_speed = 0;
41396+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41397+ pfe.event_data.link_event.link_status = false;
41398+ pfe.event_data.link_event.link_speed = 0;
41399+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41400+ break;
41401+ default:
41402+ ret = -EINVAL;
41403+ goto error_out;
41404+ }
41405+ /* Do not allow change link state when VF is disabled
41406+ * Check if requested link state is not VFD_LINKSTATE_OFF, to prevent
41407+ * false positive warning in case of reloading the driver
41408 */
41409- if (!flr) {
41410- /* reset VF using VPGEN_VFRTRIG reg */
41411- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
41412- reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
41413- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
41414- i40e_flush(hw);
41415+ if (vf->pf_ctrl_disable && link != VFD_LINKSTATE_OFF) {
41416+ vf->link_up = false;
41417+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
41418+ pfe.event_data.link_event_adv.link_status = false;
41419+ pfe.event_data.link_event_adv.link_speed = 0;
41420+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41421+ pfe.event_data.link_event.link_status = false;
41422+ pfe.event_data.link_event.link_speed = 0;
41423+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
41424+ dev_warn(&pf->pdev->dev,
41425+ "Not possible to change VF link state, please enable it first\n");
41426 }
41427- /* clear the VFLR bit in GLGEN_VFLRSTAT */
41428- reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
41429- bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
41430- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
41431- i40e_flush(hw);
41432
41433- if (i40e_quiesce_vf_pci(vf))
41434- dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
41435- vf->vf_id);
41436+ /* Notify the VF of its new link state */
41437+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
41438+ I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
41439+error_out:
41440+ return ret;
41441 }
41442
41443 /**
41444- * i40e_cleanup_reset_vf
41445+ * i40e_vf_del_vlan_mirror
41446 * @vf: pointer to the VF structure
41447+ * @vsi: pointer to the VSI structure
41448+ *
41449+ * Delete configured mirror vlans
41450+ *
41451+ * Returns 0 on success, negative on failure
41452 *
41453- * Cleanup a VF after the hardware reset is finished. Expects the caller to
41454- * have verified whether the reset is finished properly, and ensure the
41455- * minimum amount of wait time has passed.
41456 **/
41457-static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
41458+static int i40e_vf_del_vlan_mirror(struct i40e_vf *vf, struct i40e_vsi *vsi)
41459 {
41460+ u16 rules_used, rules_free, vid;
41461 struct i40e_pf *pf = vf->pf;
41462- struct i40e_hw *hw = &pf->hw;
41463- u32 reg;
41464-
41465- /* free VF resources to begin resetting the VSI state */
41466- i40e_free_vf_res(vf);
41467+ int ret = 0, num = 0, cnt;
41468+ __le16 *mr_list;
41469+
41470+ cnt = bitmap_weight(vf->mirror_vlans, VLAN_N_VID);
41471+ if (cnt) {
41472+ mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
41473+ if (!mr_list)
41474+ return -ENOMEM;
41475+
41476+ for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) {
41477+ mr_list[num] = CPU_TO_LE16(vid);
41478+ num++;
41479+ }
41480
41481- /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
41482- * By doing this we allow HW to access VF memory at any point. If we
41483- * did it any sooner, HW could access memory while it was being freed
41484- * in i40e_free_vf_res(), causing an IOMMU fault.
41485- *
41486- * On the other hand, this needs to be done ASAP, because the VF driver
41487- * is waiting for this to happen and may report a timeout. It's
41488- * harmless, but it gets logged into Guest OS kernel log, so best avoid
41489- * it.
41490- */
41491- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
41492- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
41493- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
41494+ ret = i40e_aq_delete_mirrorrule(&pf->hw, vsi->uplink_seid,
41495+ I40E_AQC_MIRROR_RULE_TYPE_VLAN,
41496+ vf->vlan_rule_id, cnt, mr_list,
41497+ NULL, &rules_used,
41498+ &rules_free);
41499
41500- /* reallocate VF resources to finish resetting the VSI state */
41501- if (!i40e_alloc_vf_res(vf)) {
41502- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
41503- i40e_enable_vf_mappings(vf);
41504- set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
41505- clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
41506- /* Do not notify the client during VF init */
41507- if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
41508- &vf->vf_states))
41509- i40e_notify_client_of_vf_reset(pf, abs_vf_id);
41510- vf->num_vlan = 0;
41511+ vf->vlan_rule_id = 0;
41512+ kfree(mr_list);
41513 }
41514
41515- /* Tell the VF driver the reset is done. This needs to be done only
41516- * after VF has been fully initialized, because the VF driver may
41517- * request resources immediately after setting this flag.
41518- */
41519- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
41520+ return ret;
41521 }
41522
41523 /**
41524- * i40e_reset_vf
41525+ * i40e_restore_vfd_config
41526 * @vf: pointer to the VF structure
41527- * @flr: VFLR was issued or not
41528+ * @vsi: VF VSI to be configured
41529+ *
41530+ * Restore the VF-d config as per the stored configuration
41531+ *
41532+ * Returns 0 on success, negative on failure
41533 *
41534- * reset the VF
41535 **/
41536-void i40e_reset_vf(struct i40e_vf *vf, bool flr)
41537+static int i40e_restore_vfd_config(struct i40e_vf *vf, struct i40e_vsi *vsi)
41538 {
41539 struct i40e_pf *pf = vf->pf;
41540- struct i40e_hw *hw = &pf->hw;
41541- bool rsd = false;
41542- u32 reg;
41543- int i;
41544+ int ret = 0, cnt = 0;
41545+ u16 vid;
41546
41547- /* If VFs have been disabled, there is no need to reset */
41548- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
41549- return;
41550+ /* Restore all VF-d configuration on reset */
41551+ for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) {
41552+ ret = i40e_vsi_add_vlan(vsi, vid);
41553+ if (ret)
41554+ goto err_out;
41555+ }
41556+ if (!vf->allow_untagged) {
41557+ spin_lock_bh(&vsi->mac_filter_hash_lock);
41558+ i40e_rm_vlan_all_mac(vsi, 0);
41559+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
41560+ i40e_service_event_schedule(vsi->back);
41561+ }
41562+
41563+ cnt = bitmap_weight(vf->mirror_vlans, VLAN_N_VID);
41564+ if (cnt) {
41565+ u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
41566+ u16 rule_id, rules_used, rules_free;
41567+ u16 sw_seid = vsi->uplink_seid;
41568+ u16 dst_seid = vsi->seid;
41569+ __le16 *mr_list;
41570+ int num = 0;
41571+
41572+ mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
41573+ if (!mr_list)
41574+ return -ENOMEM;
41575+ for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) {
41576+ mr_list[num] = CPU_TO_LE16(vid);
41577+ num++;
41578+ }
41579+ ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid, rule_type,
41580+ dst_seid, cnt, mr_list, NULL,
41581+ &rule_id, &rules_used,
41582+ &rules_free);
41583+ if (!ret)
41584+ vf->vlan_rule_id = rule_id;
41585+ kfree(mr_list);
41586+ }
41587
41588- i40e_trigger_vf_reset(vf, flr);
41589+ ret = i40e_configure_vf_loopback(vsi, vf->vf_id, vf->loopback);
41590+ if (ret) {
41591+ vf->loopback = false;
41592+ goto err_out;
41593+ }
41594
41595- /* poll VPGEN_VFRSTAT reg to make sure
41596- * that reset is complete
41597- */
41598- for (i = 0; i < 10; i++) {
41599- /* VF reset requires driver to first reset the VF and then
41600- * poll the status register to make sure that the reset
41601- * completed successfully. Due to internal HW FIFO flushes,
41602- * we must wait 10ms before the register will be valid.
41603- */
41604- usleep_range(10000, 20000);
41605- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
41606- if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
41607- rsd = true;
41608- break;
41609+ if (vf->vlan_stripping) {
41610+ ret = i40e_configure_vf_vlan_stripping(vsi, vf->vf_id, true);
41611+ if (ret) {
41612+ vf->vlan_stripping = false;
41613+ goto err_out;
41614 }
41615 }
41616
41617- if (flr)
41618- usleep_range(10000, 20000);
41619+ if (vf->promisc_mode) {
41620+ ret = i40e_configure_vf_promisc_mode(vf, vsi, vf->promisc_mode);
41621+ if (ret) {
41622+ vf->promisc_mode = VFD_PROMISC_OFF;
41623+ goto err_out;
41624+ }
41625+ }
41626
41627- if (!rsd)
41628- dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
41629- vf->vf_id);
41630- usleep_range(10000, 20000);
41631+ if (vf->link_forced) {
41632+ u8 link;
41633
41634- /* On initial reset, we don't have any queues to disable */
41635- if (vf->lan_vsi_idx != 0)
41636- i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
41637+ link = (vf->link_up ? VFD_LINKSTATE_ON : VFD_LINKSTATE_OFF);
41638+ ret = i40e_configure_vf_link(vf, link);
41639+ if (ret) {
41640+ vf->link_forced = false;
41641+ goto err_out;
41642+ }
41643+ }
41644
41645- i40e_cleanup_reset_vf(vf);
41646+ if (vf->bw_share_applied && vf->bw_share) {
41647+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data = {0};
41648+ int i;
41649
41650- i40e_flush(hw);
41651- clear_bit(__I40E_VF_DISABLE, pf->state);
41652-}
41653+ bw_data.tc_valid_bits = 1;
41654+ bw_data.tc_bw_credits[0] = vf->bw_share;
41655+
41656+ ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
41657+ if (ret) {
41658+ dev_info(&pf->pdev->dev,
41659+ "AQ command Config VSI BW allocation per TC failed = %d\n",
41660+ pf->hw.aq.asq_last_status);
41661+ vf->bw_share_applied = false;
41662+ goto err_out;
41663+ }
41664+
41665+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
41666+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
41667+ }
41668
41669+err_out:
41670+ return ret;
41671+}
41672+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
41673 /**
41674- * i40e_reset_all_vfs
41675- * @pf: pointer to the PF structure
41676- * @flr: VFLR was issued or not
41677+ * i40e_alloc_vsi_res
41678+ * @vf: pointer to the VF info
41679+ * @idx: VSI index, applies only for ADq mode, zero otherwise
41680 *
41681- * Reset all allocated VFs in one go. First, tell the hardware to reset each
41682- * VF, then do all the waiting in one chunk, and finally finish restoring each
41683- * VF after the wait. This is useful during PF routines which need to reset
41684- * all VFs, as otherwise it must perform these resets in a serialized fashion.
41685+ * alloc VF vsi context & resources
41686 **/
41687-void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
41688+static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
41689 {
41690- struct i40e_hw *hw = &pf->hw;
41691- struct i40e_vf *vf;
41692- int i, v;
41693- u32 reg;
41694-
41695- /* If we don't have any VFs, then there is nothing to reset */
41696- if (!pf->num_alloc_vfs)
41697- return;
41698+ struct i40e_mac_filter *f = NULL;
41699+ struct i40e_pf *pf = vf->pf;
41700+ struct i40e_vsi *vsi;
41701+ u64 max_tx_rate = 0;
41702+ int ret = 0;
41703
41704- /* If VFs have been disabled, there is no need to reset */
41705- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
41706- return;
41707+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
41708+ vf->vf_id);
41709
41710- /* Begin reset on all VFs at once */
41711- for (v = 0; v < pf->num_alloc_vfs; v++)
41712- i40e_trigger_vf_reset(&pf->vf[v], flr);
41713+ if (!vsi) {
41714+ dev_err(&pf->pdev->dev,
41715+ "add vsi failed for VF %d, aq_err %d\n",
41716+ vf->vf_id, pf->hw.aq.asq_last_status);
41717+ ret = -ENOENT;
41718+ goto error_alloc_vsi_res;
41719+ }
41720
41721- /* HW requires some time to make sure it can flush the FIFO for a VF
41722- * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
41723- * sequence to make sure that it has completed. We'll keep track of
41724- * the VFs using a simple iterator that increments once that VF has
41725- * finished resetting.
41726- */
41727- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
41728- usleep_range(10000, 20000);
41729+ if (!idx) {
41730+ u64 hena = i40e_pf_get_default_rss_hena(pf);
41731+ bool trunk_conf = false;
41732+ u8 broadcast[ETH_ALEN];
41733+ u16 vid;
41734
41735- /* Check each VF in sequence, beginning with the VF to fail
41736- * the previous check.
41737+ vf->lan_vsi_idx = vsi->idx;
41738+ vf->lan_vsi_id = vsi->id;
41739+ /* If the port VLAN has been configured and then the
41740+ * VF driver was removed then the VSI port VLAN
41741+ * configuration was destroyed. Check if there is
41742+ * a port VLAN and restore the VSI configuration if
41743+ * needed.
41744 */
41745- while (v < pf->num_alloc_vfs) {
41746- vf = &pf->vf[v];
41747- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
41748- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
41749- break;
41750+ for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) {
41751+ if (vid != vf->port_vlan_id)
41752+ trunk_conf = true;
41753+ }
41754+ if (vf->port_vlan_id && !trunk_conf)
41755+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
41756
41757- /* If the current VF has finished resetting, move on
41758- * to the next VF in sequence.
41759- */
41760- v++;
41761+ spin_lock_bh(&vsi->mac_filter_hash_lock);
41762+ if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
41763+ f = i40e_add_mac_filter(vsi,
41764+ vf->default_lan_addr.addr);
41765+ if (!f)
41766+ dev_info(&pf->pdev->dev,
41767+ "Could not add MAC filter %pM for VF %d\n",
41768+ vf->default_lan_addr.addr, vf->vf_id);
41769 }
41770+ eth_broadcast_addr(broadcast);
41771+ f = i40e_add_mac_filter(vsi, broadcast);
41772+ if (!f)
41773+ dev_info(&pf->pdev->dev,
41774+ "Could not allocate VF broadcast filter\n");
41775+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
41776+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
41777+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
41778+ /* program mac filter only for VF VSI */
41779+ ret = i40e_sync_vsi_filters(vsi);
41780+ if (ret)
41781+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
41782 }
41783
41784- if (flr)
41785- usleep_range(10000, 20000);
41786-
41787- /* Display a warning if at least one VF didn't manage to reset in
41788- * time, but continue on with the operation.
41789- */
41790- if (v < pf->num_alloc_vfs)
41791- dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
41792- pf->vf[v].vf_id);
41793- usleep_range(10000, 20000);
41794-
41795- /* Begin disabling all the rings associated with VFs, but do not wait
41796- * between each VF.
41797- */
41798- for (v = 0; v < pf->num_alloc_vfs; v++) {
41799- /* On initial reset, we don't have any queues to disable */
41800- if (pf->vf[v].lan_vsi_idx == 0)
41801- continue;
41802-
41803- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
41804+ /* storing VSI index and id for ADq and don't apply the mac filter */
41805+ if (vf->adq_enabled) {
41806+ vf->ch[idx].vsi_idx = vsi->idx;
41807+ vf->ch[idx].vsi_id = vsi->id;
41808 }
41809
41810- /* Now that we've notified HW to disable all of the VF rings, wait
41811- * until they finish.
41812- */
41813- for (v = 0; v < pf->num_alloc_vfs; v++) {
41814- /* On initial reset, we don't have any queues to disable */
41815- if (pf->vf[v].lan_vsi_idx == 0)
41816- continue;
41817-
41818- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
41819+ /* Set VF bandwidth if specified */
41820+ if (vf->tx_rate) {
41821+ max_tx_rate = vf->tx_rate;
41822+ } else if (vf->ch[idx].max_tx_rate) {
41823+ max_tx_rate = vf->ch[idx].max_tx_rate;
41824 }
41825
41826- /* Hw may need up to 50ms to finish disabling the RX queues. We
41827- * minimize the wait by delaying only once for all VFs.
41828- */
41829- mdelay(50);
41830-
41831- /* Finish the reset on each VF */
41832- for (v = 0; v < pf->num_alloc_vfs; v++)
41833- i40e_cleanup_reset_vf(&pf->vf[v]);
41834+ if (max_tx_rate) {
41835+ max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
41836+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
41837+ max_tx_rate, 0, NULL);
41838+ if (ret)
41839+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
41840+ vf->vf_id, ret);
41841+ }
41842+#ifdef HAVE_NDO_SET_VF_LINK_STATE
41843+ ret = i40e_restore_vfd_config(vf, vsi);
41844+ if (ret)
41845+ dev_err(&pf->pdev->dev,
41846+ "Failed to restore VF-d config error %d\n", ret);
41847+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
41848
41849- i40e_flush(hw);
41850- clear_bit(__I40E_VF_DISABLE, pf->state);
41851+error_alloc_vsi_res:
41852+ return ret;
41853 }
41854
41855 /**
41856- * i40e_free_vfs
41857- * @pf: pointer to the PF structure
41858+ * i40e_map_pf_queues_to_vsi
41859+ * @vf: pointer to the VF info
41860 *
41861- * free VF resources
41862+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
41863+ * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
41864 **/
41865-void i40e_free_vfs(struct i40e_pf *pf)
41866+static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
41867 {
41868+ struct i40e_pf *pf = vf->pf;
41869 struct i40e_hw *hw = &pf->hw;
41870- u32 reg_idx, bit_idx;
41871- int i, tmp, vf_id;
41872-
41873- if (!pf->vf)
41874- return;
41875- while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
41876- usleep_range(1000, 2000);
41877+ u32 reg, num_tc = 1; /* VF has at least one traffic class */
41878+ u16 vsi_id, qps;
41879+ int i, j;
41880
41881- i40e_notify_client_of_vf_enable(pf, 0);
41882+ if (vf->adq_enabled)
41883+ num_tc = vf->num_tc;
41884
41885- /* Amortize wait time by stopping all VFs at the same time */
41886- for (i = 0; i < pf->num_alloc_vfs; i++) {
41887- if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
41888- continue;
41889+ for (i = 0; i < num_tc; i++) {
41890+ if (vf->adq_enabled) {
41891+ qps = vf->ch[i].num_qps;
41892+ vsi_id = vf->ch[i].vsi_id;
41893+ } else {
41894+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
41895+ vsi_id = vf->lan_vsi_id;
41896+ }
41897
41898- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
41899+ for (j = 0; j < 7; j++) {
41900+ if (j * 2 >= qps) {
41901+ /* end of list */
41902+ reg = 0x07FF07FF;
41903+ } else {
41904+ u16 qid = i40e_vc_get_pf_queue_id(vf,
41905+ vsi_id,
41906+ j * 2);
41907+ reg = qid;
41908+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
41909+ (j * 2) + 1);
41910+ reg |= qid << 16;
41911+ }
41912+ i40e_write_rx_ctl(hw,
41913+ I40E_VSILAN_QTABLE(j, vsi_id),
41914+ reg);
41915+ }
41916 }
41917+}
41918
41919- for (i = 0; i < pf->num_alloc_vfs; i++) {
41920- if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
41921- continue;
41922+/**
41923+ * i40e_map_pf_to_vf_queues
41924+ * @vf: pointer to the VF info
41925+ *
41926+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
41927+ * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
41928+ **/
41929+static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
41930+{
41931+ struct i40e_pf *pf = vf->pf;
41932+ struct i40e_hw *hw = &pf->hw;
41933+ u32 reg, total_qps = 0;
41934+ u32 qps, num_tc = 1; /* VF has at least one traffic class */
41935+ u16 vsi_id, qid;
41936+ int i, j;
41937+
41938+ if (vf->adq_enabled)
41939+ num_tc = vf->num_tc;
41940+
41941+ for (i = 0; i < num_tc; i++) {
41942+ if (vf->adq_enabled) {
41943+ qps = vf->ch[i].num_qps;
41944+ vsi_id = vf->ch[i].vsi_id;
41945+ } else {
41946+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
41947+ vsi_id = vf->lan_vsi_id;
41948+ }
41949
41950- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
41951+ for (j = 0; j < qps; j++) {
41952+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
41953+
41954+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
41955+ wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
41956+ reg);
41957+ total_qps++;
41958+ }
41959 }
41960+}
41961
41962- /* Disable IOV before freeing resources. This lets any VF drivers
41963- * running in the host get themselves cleaned up before we yank
41964- * the carpet out from underneath their feet.
41965+/**
41966+ * i40e_enable_vf_mappings
41967+ * @vf: pointer to the VF info
41968+ *
41969+ * enable VF mappings
41970+ **/
41971+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
41972+{
41973+ struct i40e_pf *pf = vf->pf;
41974+ struct i40e_hw *hw = &pf->hw;
41975+ u32 reg;
41976+
41977+ /* Tell the hardware we're using noncontiguous mapping. HW requires
41978+ * that VF queues be mapped using this method, even when they are
41979+ * contiguous in real life
41980 */
41981- if (!pci_vfs_assigned(pf->pdev))
41982- pci_disable_sriov(pf->pdev);
41983- else
41984- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
41985+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
41986+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
41987
41988- /* free up VF resources */
41989- tmp = pf->num_alloc_vfs;
41990- pf->num_alloc_vfs = 0;
41991- for (i = 0; i < tmp; i++) {
41992- if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
41993- i40e_free_vf_res(&pf->vf[i]);
41994- /* disable qp mappings */
41995- i40e_disable_vf_mappings(&pf->vf[i]);
41996- }
41997+ /* enable VF vplan_qtable mappings */
41998+ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
41999+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
42000
42001- kfree(pf->vf);
42002- pf->vf = NULL;
42003+ i40e_map_pf_to_vf_queues(vf);
42004+ i40e_map_pf_queues_to_vsi(vf);
42005
42006- /* This check is for when the driver is unloaded while VFs are
42007- * assigned. Setting the number of VFs to 0 through sysfs is caught
42008- * before this function ever gets called.
42009- */
42010- if (!pci_vfs_assigned(pf->pdev)) {
42011- /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
42012- * work correctly when SR-IOV gets re-enabled.
42013- */
42014- for (vf_id = 0; vf_id < tmp; vf_id++) {
42015- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
42016- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
42017- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
42018- }
42019- }
42020- clear_bit(__I40E_VF_DISABLE, pf->state);
42021+ i40e_flush(hw);
42022 }
42023
42024-#ifdef CONFIG_PCI_IOV
42025 /**
42026- * i40e_alloc_vfs
42027- * @pf: pointer to the PF structure
42028- * @num_alloc_vfs: number of VFs to allocate
42029+ * i40e_disable_vf_mappings
42030+ * @vf: pointer to the VF info
42031 *
42032- * allocate VF resources
42033+ * disable VF mappings
42034 **/
42035-int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
42036+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
42037 {
42038- struct i40e_vf *vfs;
42039- int i, ret = 0;
42040+ struct i40e_pf *pf = vf->pf;
42041+ struct i40e_hw *hw = &pf->hw;
42042+ int i;
42043
42044- /* Disable interrupt 0 so we don't try to handle the VFLR. */
42045- i40e_irq_dynamic_disable_icr0(pf);
42046+ /* disable qp mappings */
42047+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
42048+ for (i = 0; i < I40E_MAX_VSI_QP; i++)
42049+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
42050+ I40E_QUEUE_END_OF_LIST);
42051+ i40e_flush(hw);
42052+}
42053
42054- /* Check to see if we're just allocating resources for extant VFs */
42055- if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
42056- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
42057- if (ret) {
42058- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
42059- pf->num_alloc_vfs = 0;
42060- goto err_iov;
42061- }
42062- }
42063- /* allocate memory */
42064- vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
42065- if (!vfs) {
42066- ret = -ENOMEM;
42067- goto err_alloc;
42068- }
42069- pf->vf = vfs;
42070+/**
42071+ * i40e_free_vf_res
42072+ * @vf: pointer to the VF info
42073+ *
42074+ * free VF resources
42075+ **/
42076+static void i40e_free_vf_res(struct i40e_vf *vf)
42077+{
42078+ struct i40e_pf *pf = vf->pf;
42079+ struct i40e_hw *hw = &pf->hw;
42080+ u32 reg_idx, reg;
42081+ int i, j, msix_vf;
42082
42083- /* apply default profile */
42084- for (i = 0; i < num_alloc_vfs; i++) {
42085- vfs[i].pf = pf;
42086- vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
42087- vfs[i].vf_id = i;
42088+ /* Start by disabling VF's configuration API to prevent the OS from
42089+ * accessing the VF's VSI after it's freed / invalidated.
42090+ */
42091+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
42092
42093- /* assign default capabilities */
42094- set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
42095- vfs[i].spoofchk = true;
42096+#ifdef HAVE_NDO_SET_VF_LINK_STATE
42097+ /* Release vlan mirror */
42098+ if (vf->lan_vsi_idx)
42099+ i40e_vf_del_vlan_mirror(vf, pf->vsi[vf->lan_vsi_idx]);
42100+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
42101
42102- set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
42103+ /* It's possible the VF had requeuested more queues than the default so
42104+ * do the accounting here when we're about to free them.
42105+ */
42106+ if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
42107+ pf->queues_left +=
42108+ vf->num_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
42109+ }
42110
42111+ /* free vsi & disconnect it from the parent uplink */
42112+ if (vf->lan_vsi_idx) {
42113+ i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
42114+ vf->lan_vsi_idx = 0;
42115+ vf->lan_vsi_id = 0;
42116 }
42117- pf->num_alloc_vfs = num_alloc_vfs;
42118
42119- /* VF resources get allocated during reset */
42120- i40e_reset_all_vfs(pf, false);
42121+ /* do the accounting and remove additional ADq VSI's */
42122+ if (vf->adq_enabled && vf->ch[0].vsi_idx) {
42123+ for (j = 0; j < vf->num_tc; j++) {
42124+ /* At this point VSI0 is already released so don't
42125+ * release it again and only clear their values in
42126+ * structure variables
42127+ */
42128+ if (j)
42129+ i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
42130+ vf->ch[j].vsi_idx = 0;
42131+ vf->ch[j].vsi_id = 0;
42132+ }
42133+ }
42134+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
42135
42136- i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
42137+ /* disable interrupts so the VF starts in a known state */
42138+ for (i = 0; i < msix_vf; i++) {
42139+ /* format is same for both registers */
42140+ if (0 == i)
42141+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
42142+ else
42143+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
42144+ (vf->vf_id))
42145+ + (i - 1));
42146+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
42147+ i40e_flush(hw);
42148+ }
42149
42150-err_alloc:
42151- if (ret)
42152- i40e_free_vfs(pf);
42153-err_iov:
42154- /* Re-enable interrupt 0. */
42155- i40e_irq_dynamic_enable_icr0(pf, false);
42156- return ret;
42157+ /* clear the irq settings */
42158+ for (i = 0; i < msix_vf; i++) {
42159+ /* format is same for both registers */
42160+ if (0 == i)
42161+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
42162+ else
42163+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
42164+ (vf->vf_id))
42165+ + (i - 1));
42166+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
42167+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
42168+ wr32(hw, reg_idx, reg);
42169+ i40e_flush(hw);
42170+ }
42171+ /* reset some of the state variables keeping track of the resources */
42172+ vf->num_queue_pairs = 0;
42173+ clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
42174+ clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
42175 }
42176
42177-#endif
42178 /**
42179- * i40e_pci_sriov_enable
42180- * @pdev: pointer to a pci_dev structure
42181- * @num_vfs: number of VFs to allocate
42182+ * i40e_alloc_vf_res
42183+ * @vf: pointer to the VF info
42184 *
42185- * Enable or change the number of VFs
42186+ * allocate VF resources
42187 **/
42188-static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
42189+static int i40e_alloc_vf_res(struct i40e_vf *vf)
42190 {
42191-#ifdef CONFIG_PCI_IOV
42192- struct i40e_pf *pf = pci_get_drvdata(pdev);
42193- int pre_existing_vfs = pci_num_vf(pdev);
42194- int err = 0;
42195+ struct i40e_pf *pf = vf->pf;
42196+ int total_queue_pairs = 0;
42197+ int ret, idx;
42198
42199- if (test_bit(__I40E_TESTING, pf->state)) {
42200- dev_warn(&pdev->dev,
42201- "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
42202- err = -EPERM;
42203- goto err_out;
42204- }
42205+ if (vf->num_req_queues &&
42206+ vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
42207+ pf->num_vf_qps = vf->num_req_queues;
42208+ else
42209+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
42210
42211- if (pre_existing_vfs && pre_existing_vfs != num_vfs)
42212- i40e_free_vfs(pf);
42213- else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
42214- goto out;
42215+ /* allocate hw vsi context & associated resources */
42216+ ret = i40e_alloc_vsi_res(vf, 0);
42217+ if (ret)
42218+ goto error_alloc;
42219+ total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
42220
42221- if (num_vfs > pf->num_req_vfs) {
42222- dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
42223- num_vfs, pf->num_req_vfs);
42224- err = -EPERM;
42225- goto err_out;
42226+ /* allocate additional VSIs based on tc information for ADq */
42227+ if (vf->adq_enabled) {
42228+ if (pf->queues_left >=
42229+ (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
42230+ /* TC 0 always belongs to VF VSI */
42231+ for (idx = 1; idx < vf->num_tc; idx++) {
42232+ ret = i40e_alloc_vsi_res(vf, idx);
42233+ if (ret)
42234+ goto error_alloc;
42235+ }
42236+ /* send correct number of queues */
42237+ total_queue_pairs = I40E_MAX_VF_QUEUES;
42238+ } else {
42239+ dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
42240+ vf->vf_id);
42241+ vf->adq_enabled = false;
42242+ }
42243 }
42244
42245- dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
42246- err = i40e_alloc_vfs(pf, num_vfs);
42247- if (err) {
42248- dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
42249- goto err_out;
42250- }
42251+ /* We account for each VF to get a default number of queue pairs. If
42252+ * the VF has now requested more, we need to account for that to make
42253+ * certain we never request more queues than we actually have left in
42254+ * HW.
42255+ */
42256+ if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
42257+ pf->queues_left -=
42258+ total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
42259
42260-out:
42261- return num_vfs;
42262+ if (vf->trusted)
42263+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
42264+ else
42265+ clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
42266
42267-err_out:
42268- return err;
42269-#endif
42270- return 0;
42271+ /* store the total qps number for the runtime
42272+ * VF req validation
42273+ */
42274+ vf->num_queue_pairs = total_queue_pairs;
42275+
42276+ /* VF is now completely initialized */
42277+ set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
42278+
42279+error_alloc:
42280+ if (ret)
42281+ i40e_free_vf_res(vf);
42282+
42283+ return ret;
42284 }
42285
42286+#define VF_DEVICE_STATUS 0xAA
42287+#define VF_TRANS_PENDING_MASK 0x20
42288 /**
42289- * i40e_pci_sriov_configure
42290- * @pdev: pointer to a pci_dev structure
42291- * @num_vfs: number of VFs to allocate
42292+ * i40e_quiesce_vf_pci
42293+ * @vf: pointer to the VF structure
42294 *
42295- * Enable or change the number of VFs. Called when the user updates the number
42296- * of VFs in sysfs.
42297+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
42298+ * if the transactions never clear.
42299 **/
42300-int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
42301+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
42302 {
42303- struct i40e_pf *pf = pci_get_drvdata(pdev);
42304+ struct i40e_pf *pf = vf->pf;
42305+ struct i40e_hw *hw = &pf->hw;
42306+ int vf_abs_id, i;
42307+ u32 reg;
42308
42309- if (num_vfs) {
42310- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
42311- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
42312- i40e_do_reset_safe(pf,
42313- BIT_ULL(__I40E_PF_RESET_REQUESTED));
42314- }
42315- return i40e_pci_sriov_enable(pdev, num_vfs);
42316- }
42317+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
42318
42319- if (!pci_vfs_assigned(pf->pdev)) {
42320- i40e_free_vfs(pf);
42321- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
42322- i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
42323- } else {
42324- dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
42325- return -EINVAL;
42326+ wr32(hw, I40E_PF_PCI_CIAA,
42327+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
42328+ for (i = 0; i < 100; i++) {
42329+ reg = rd32(hw, I40E_PF_PCI_CIAD);
42330+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
42331+ return 0;
42332+ udelay(1);
42333 }
42334- return 0;
42335+ return -EIO;
42336 }
42337
42338-/***********************virtual channel routines******************/
42339+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
42340+static inline void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
42341+ s16 **vlan_list);
42342+static inline i40e_status
42343+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
42344+ bool unicast_enable, s16 *vl, int num_vlans);
42345
42346 /**
42347- * i40e_vc_send_msg_to_vf
42348+ * i40e_config_vf_promiscuous_mode
42349 * @vf: pointer to the VF info
42350- * @v_opcode: virtual channel opcode
42351- * @v_retval: virtual channel return value
42352- * @msg: pointer to the msg buffer
42353- * @msglen: msg length
42354+ * @vsi_id: VSI id
42355+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
42356+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
42357 *
42358- * send msg to VF
42359+ * Called from the VF to configure the promiscuous mode of
42360+ * VF vsis and from the VF reset path to reset promiscuous mode.
42361 **/
42362-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
42363- u32 v_retval, u8 *msg, u16 msglen)
42364+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
42365+ u16 vsi_id,
42366+ bool allmulti,
42367+ bool alluni)
42368 {
42369- struct i40e_pf *pf;
42370- struct i40e_hw *hw;
42371- int abs_vf_id;
42372- i40e_status aq_ret;
42373+ i40e_status aq_ret = I40E_SUCCESS;
42374+ struct i40e_pf *pf = vf->pf;
42375+ struct i40e_vsi *vsi;
42376+ int num_vlans;
42377+ s16 *vl;
42378
42379- /* validate the request */
42380- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
42381- return -EINVAL;
42382+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
42383+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
42384+ return I40E_ERR_PARAM;
42385
42386- pf = vf->pf;
42387- hw = &pf->hw;
42388- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
42389+ if (vf->port_vlan_id) {
42390+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
42391+ alluni, &vf->port_vlan_id, 1);
42392+ return aq_ret;
42393+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
42394+ i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
42395
42396- /* single place to detect unsuccessful return values */
42397- if (v_retval) {
42398- vf->num_invalid_msgs++;
42399- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
42400- vf->vf_id, v_opcode, v_retval);
42401- if (vf->num_invalid_msgs >
42402- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
42403- dev_err(&pf->pdev->dev,
42404- "Number of invalid messages exceeded for VF %d\n",
42405- vf->vf_id);
42406- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
42407- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
42408- }
42409- } else {
42410- vf->num_valid_msgs++;
42411- /* reset the invalid counter, if a valid message is received. */
42412- vf->num_invalid_msgs = 0;
42413- }
42414+ if (!vl)
42415+ return I40E_ERR_NO_MEMORY;
42416
42417- aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
42418- msg, msglen, NULL);
42419- if (aq_ret) {
42420- dev_info(&pf->pdev->dev,
42421- "Unable to send the message to VF %d aq_err %d\n",
42422- vf->vf_id, pf->hw.aq.asq_last_status);
42423- return -EIO;
42424+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
42425+ vl, num_vlans);
42426+ kfree(vl);
42427+ return aq_ret;
42428 }
42429-
42430- return 0;
42431+ /* no vlans to set on, set on vsi */
42432+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
42433+ NULL, 0);
42434+ return aq_ret;
42435 }
42436
42437 /**
42438- * i40e_vc_send_resp_to_vf
42439- * @vf: pointer to the VF info
42440- * @opcode: operation code
42441- * @retval: return value
42442+ * i40e_trigger_vf_reset
42443+ * @vf: pointer to the VF structure
42444+ * @flr: VFLR was issued or not
42445 *
42446- * send resp msg to VF
42447+ * Trigger hardware to start a reset for a particular VF. Expects the caller
42448+ * to wait the proper amount of time to allow hardware to reset the VF before
42449+ * it cleans up and restores VF functionality.
42450 **/
42451-static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
42452- enum virtchnl_ops opcode,
42453- i40e_status retval)
42454+static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
42455 {
42456- return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
42457+ struct i40e_pf *pf = vf->pf;
42458+ struct i40e_hw *hw = &pf->hw;
42459+ u32 reg, reg_idx, bit_idx;
42460+
42461+ /* warn the VF */
42462+ clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
42463+
42464+ /* Disable VF's configuration API during reset. The flag is re-enabled
42465+ * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
42466+ * It's normally disabled in i40e_free_vf_res(), but it's safer
42467+ * to do it earlier to give some time to finish to any VF config
42468+ * functions that may still be running at this point.
42469+ */
42470+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
42471+
42472+ /* In the case of a VFLR, the HW has already reset the VF and we
42473+ * just need to clean up, so don't hit the VFRTRIG register.
42474+ */
42475+ if (!flr) {
42476+ /* reset VF using VPGEN_VFRTRIG reg */
42477+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
42478+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
42479+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
42480+ }
42481+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
42482+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
42483+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
42484+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
42485+ i40e_flush(hw);
42486+
42487+ if (i40e_quiesce_vf_pci(vf))
42488+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
42489+ vf->vf_id);
42490 }
42491
42492 /**
42493- * i40e_vc_get_version_msg
42494- * @vf: pointer to the VF info
42495+ * i40e_cleanup_reset_vf
42496+ * @vf: pointer to the VF structure
42497 *
42498- * called from the VF to request the API version used by the PF
42499+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
42500+ * have verified whether the reset is finished properly, and ensure the
42501+ * minimum amount of wait time has passed.
42502 **/
42503-static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
42504+static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
42505 {
42506- struct virtchnl_version_info info = {
42507- VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
42508- };
42509+ struct i40e_pf *pf = vf->pf;
42510+ struct i40e_hw *hw = &pf->hw;
42511+ u32 reg;
42512
42513- vf->vf_ver = *(struct virtchnl_version_info *)msg;
42514- /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
42515- if (VF_IS_V10(&vf->vf_ver))
42516- info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
42517- return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
42518- I40E_SUCCESS, (u8 *)&info,
42519- sizeof(struct virtchnl_version_info));
42520+ /* disable promisc modes in case they were enabled */
42521+ i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
42522+
42523+ /* free VF resources to begin resetting the VSI state */
42524+ i40e_free_vf_res(vf);
42525+
42526+ /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
42527+ * By doing this we allow HW to access VF memory at any point. If we
42528+ * did it any sooner, HW could access memory while it was being freed
42529+ * in i40e_free_vf_res(), causing an IOMMU fault.
42530+ *
42531+ * On the other hand, this needs to be done ASAP, because the VF driver
42532+ * is waiting for this to happen and may report a timeout. It's
42533+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
42534+ * it.
42535+ */
42536+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
42537+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
42538+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
42539+
42540+ /* reallocate VF resources to finish resetting the VSI state */
42541+ if (!i40e_alloc_vf_res(vf)) {
42542+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
42543+ i40e_enable_vf_mappings(vf);
42544+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
42545+ clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
42546+ /* Do not notify the client during VF init */
42547+ if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
42548+ &vf->vf_states))
42549+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
42550+ vf->num_vlan = 0;
42551+ }
42552+
42553+ /* Tell the VF driver the reset is done. This needs to be done only
42554+ * after VF has been fully initialized, because the VF driver may
42555+ * request resources immediately after setting this flag.
42556+ */
42557+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
42558 }
42559
42560 /**
42561- * i40e_vc_get_vf_resources_msg
42562- * @vf: pointer to the VF info
42563- * @msg: pointer to the msg buffer
42564- * @msglen: msg length
42565+ * i40e_reset_vf
42566+ * @vf: pointer to the VF structure
42567+ * @flr: VFLR was issued or not
42568 *
42569- * called from the VF to request its resources
42570+ * Returns true if the VF is reset, false otherwise.
42571 **/
42572-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
42573+bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
42574 {
42575- struct virtchnl_vf_resource *vfres = NULL;
42576 struct i40e_pf *pf = vf->pf;
42577- i40e_status aq_ret = 0;
42578- struct i40e_vsi *vsi;
42579- int num_vsis = 1;
42580- int len = 0;
42581- int ret;
42582-
42583- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
42584- aq_ret = I40E_ERR_PARAM;
42585- goto err;
42586- }
42587-
42588- len = (sizeof(struct virtchnl_vf_resource) +
42589- sizeof(struct virtchnl_vsi_resource) * num_vsis);
42590+ struct i40e_hw *hw = &pf->hw;
42591+ bool rsd = false;
42592+ u32 reg;
42593+ int i;
42594
42595- vfres = kzalloc(len, GFP_KERNEL);
42596- if (!vfres) {
42597- aq_ret = I40E_ERR_NO_MEMORY;
42598- len = 0;
42599- goto err;
42600- }
42601- if (VF_IS_V11(&vf->vf_ver))
42602- vf->driver_caps = *(u32 *)msg;
42603- else
42604- vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
42605- VIRTCHNL_VF_OFFLOAD_RSS_REG |
42606- VIRTCHNL_VF_OFFLOAD_VLAN;
42607+ /* If the VFs have been disabled, this means something else is
42608+ * resetting the VF, so we shouldn't continue.
42609+ */
42610+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
42611+ return false;
42612
42613- vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
42614- vsi = pf->vsi[vf->lan_vsi_idx];
42615- if (!vsi->info.pvid)
42616- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
42617+ i40e_trigger_vf_reset(vf, flr);
42618
42619- if (i40e_vf_client_capable(pf, vf->vf_id) &&
42620- (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
42621- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
42622- set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
42623+ /* poll VPGEN_VFRSTAT reg to make sure
42624+ * that reset is complete
42625+ */
42626+ for (i = 0; i < 10; i++) {
42627+ /* VF reset requires driver to first reset the VF and then
42628+ * poll the status register to make sure that the reset
42629+ * completed successfully. Due to internal HW FIFO flushes,
42630+ * we must wait 10ms before the register will be valid.
42631+ */
42632+ usleep_range(10000, 20000);
42633+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
42634+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
42635+ rsd = true;
42636+ break;
42637+ }
42638 }
42639
42640- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
42641- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
42642- } else {
42643- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
42644- (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
42645- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
42646- else
42647- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
42648- }
42649+ if (flr)
42650+ usleep_range(10000, 20000);
42651
42652- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
42653- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
42654- vfres->vf_cap_flags |=
42655- VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
42656- }
42657+ if (!rsd)
42658+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
42659+ vf->vf_id);
42660+ usleep_range(10000, 20000);
42661
42662- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
42663- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
42664+ /* On initial reset, we don't have any queues to disable */
42665+ if (vf->lan_vsi_idx != 0)
42666+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
42667
42668- if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
42669- (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
42670- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
42671+ i40e_cleanup_reset_vf(vf);
42672
42673- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
42674- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
42675- dev_err(&pf->pdev->dev,
42676- "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
42677- vf->vf_id);
42678- aq_ret = I40E_ERR_PARAM;
42679- goto err;
42680- }
42681- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
42682- }
42683+ i40e_flush(hw);
42684+ clear_bit(__I40E_VF_DISABLE, pf->state);
42685
42686- if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
42687- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
42688- vfres->vf_cap_flags |=
42689- VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
42690- }
42691+ return true;
42692+}
42693
42694- vfres->num_vsis = num_vsis;
42695+/**
42696+ * i40e_reset_all_vfs
42697+ * @pf: pointer to the PF structure
42698+ * @flr: VFLR was issued or not
42699+ *
42700+ * Reset all allocated VFs in one go. First, tell the hardware to reset each
42701+ * VF, then do all the waiting in one chunk, and finally finish restoring each
42702+ * VF after the wait. This is useful during PF routines which need to reset
42703+ * all VFs, as otherwise it must perform these resets in a serialized fashion.
42704+ *
42705+ * Returns true if any VFs were reset, and false otherwise.
42706+ **/
42707+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
42708+{
42709+ struct i40e_hw *hw = &pf->hw;
42710+ struct i40e_vf *vf;
42711+ int i, v;
42712+ u32 reg;
42713+
42714+ /* If we don't have any VFs, then there is nothing to reset */
42715+ if (!pf->num_alloc_vfs)
42716+ return false;
42717+
42718+ /* If VFs have been disabled, there is no need to reset */
42719+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
42720+ return false;
42721+
42722+ /* Begin reset on all VFs at once */
42723+ for (v = 0; v < pf->num_alloc_vfs; v++)
42724+ i40e_trigger_vf_reset(&pf->vf[v], flr);
42725+
42726+ /* HW requires some time to make sure it can flush the FIFO for a VF
42727+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
42728+ * sequence to make sure that it has completed. We'll keep track of
42729+ * the VFs using a simple iterator that increments once that VF has
42730+ * finished resetting.
42731+ */
42732+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
42733+ usleep_range(10000, 20000);
42734+
42735+ /* Check each VF in sequence, beginning with the VF to fail
42736+ * the previous check.
42737+ */
42738+ while (v < pf->num_alloc_vfs) {
42739+ vf = &pf->vf[v];
42740+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
42741+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
42742+ break;
42743+
42744+ /* If the current VF has finished resetting, move on
42745+ * to the next VF in sequence.
42746+ */
42747+ v++;
42748+ }
42749+ }
42750+
42751+ if (flr)
42752+ usleep_range(10000, 20000);
42753+
42754+ /* Display a warning if at least one VF didn't manage to reset in
42755+ * time, but continue on with the operation.
42756+ */
42757+ if (v < pf->num_alloc_vfs)
42758+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
42759+ pf->vf[v].vf_id);
42760+ usleep_range(10000, 20000);
42761+
42762+ /* Begin disabling all the rings associated with VFs, but do not wait
42763+ * between each VF.
42764+ */
42765+ for (v = 0; v < pf->num_alloc_vfs; v++) {
42766+ /* On initial reset, we don't have any queues to disable */
42767+ if (pf->vf[v].lan_vsi_idx == 0)
42768+ continue;
42769+
42770+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
42771+ }
42772+
42773+ /* Now that we've notified HW to disable all of the VF rings, wait
42774+ * until they finish.
42775+ */
42776+ for (v = 0; v < pf->num_alloc_vfs; v++) {
42777+ /* On initial reset, we don't have any queues to disable */
42778+ if (pf->vf[v].lan_vsi_idx == 0)
42779+ continue;
42780+
42781+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
42782+ }
42783+
42784+ /* Hw may need up to 50ms to finish disabling the RX queues. We
42785+ * minimize the wait by delaying only once for all VFs.
42786+ */
42787+ mdelay(50);
42788+
42789+ /* Finish the reset on each VF */
42790+ for (v = 0; v < pf->num_alloc_vfs; v++)
42791+ i40e_cleanup_reset_vf(&pf->vf[v]);
42792+
42793+ i40e_flush(hw);
42794+ clear_bit(__I40E_VF_DISABLE, pf->state);
42795+
42796+ return true;
42797+}
42798+
42799+/**
42800+ * i40e_free_vfs
42801+ * @pf: pointer to the PF structure
42802+ *
42803+ * free VF resources
42804+ **/
42805+void i40e_free_vfs(struct i40e_pf *pf)
42806+{
42807+ struct i40e_hw *hw = &pf->hw;
42808+ u32 reg_idx, bit_idx;
42809+ int i, tmp, vf_id;
42810+ struct i40e_vsi *src_vsi;
42811+ u16 rule_type, rule_id;
42812+ int ret;
42813+
42814+ if (!pf->vf)
42815+ return;
42816+
42817+ set_bit(__I40E_VFS_RELEASING, pf->state);
42818+
42819+ while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
42820+ usleep_range(1000, 2000);
42821+
42822+ i40e_notify_client_of_vf_enable(pf, 0);
42823+
42824+ /* At start we need to clear all ingress and egress mirroring setup.
42825+ * We can contiune when we remove all mirroring.
42826+ */
42827+ for (i = 0; i < pf->num_alloc_vfs; i++) {
42828+ src_vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
42829+ if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->vf[i].ingress_vlan)) {
42830+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
42831+ rule_id = pf->vf[i].ingress_rule_id;
42832+ ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, rule_id);
42833+ if (ret)
42834+ dev_warn(&pf->pdev->dev,
42835+ "Error %s when tried to remove ingress mirror on VF %d",
42836+ i40e_aq_str
42837+ (hw, hw->aq.asq_last_status),
42838+ pf->vf[i].vf_id);
42839+ }
42840+ if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->vf[i].egress_vlan)) {
42841+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
42842+ rule_id = pf->vf[i].egress_rule_id;
42843+ ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, rule_id);
42844+ if (ret)
42845+ dev_warn(&pf->pdev->dev,
42846+ "Error %s when tried to remove egress mirror on VF %d",
42847+ i40e_aq_str
42848+ (hw, hw->aq.asq_last_status),
42849+ pf->vf[i].vf_id);
42850+ }
42851+ }
42852+
42853+ /* Amortize wait time by stopping all VFs at the same time */
42854+ for (i = 0; i < pf->num_alloc_vfs; i++) {
42855+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
42856+ continue;
42857+
42858+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
42859+ }
42860+
42861+ for (i = 0; i < pf->num_alloc_vfs; i++) {
42862+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
42863+ continue;
42864+
42865+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
42866+ }
42867+
42868+ /* Disable IOV before freeing resources. This lets any VF drivers
42869+ * running in the host get themselves cleaned up before we yank
42870+ * the carpet out from underneath their feet.
42871+ */
42872+ if (!pci_vfs_assigned(pf->pdev))
42873+ pci_disable_sriov(pf->pdev);
42874+ else
42875+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
42876+
42877+ /* free up VF resources */
42878+ tmp = pf->num_alloc_vfs;
42879+ pf->num_alloc_vfs = 0;
42880+ for (i = 0; i < tmp; i++) {
42881+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
42882+ i40e_free_vf_res(&pf->vf[i]);
42883+ /* disable qp mappings */
42884+ i40e_disable_vf_mappings(&pf->vf[i]);
42885+ }
42886+#ifdef HAVE_NDO_SET_VF_LINK_STATE
42887+ if (pf->vfd_obj) {
42888+ destroy_vfd_sysfs(pf->pdev, pf->vfd_obj);
42889+ pf->vfd_obj = NULL;
42890+ }
42891+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
42892+
42893+ kfree(pf->vf);
42894+ pf->vf = NULL;
42895+
42896+ /* This check is for when the driver is unloaded while VFs are
42897+ * assigned. Setting the number of VFs to 0 through sysfs is caught
42898+ * before this function ever gets called.
42899+ */
42900+ if (!pci_vfs_assigned(pf->pdev)) {
42901+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
42902+ * work correctly when SR-IOV gets re-enabled.
42903+ */
42904+ for (vf_id = 0; vf_id < tmp; vf_id++) {
42905+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
42906+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
42907+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
42908+ }
42909+ }
42910+ clear_bit(__I40E_VF_DISABLE, pf->state);
42911+ clear_bit(__I40E_VFS_RELEASING, pf->state);
42912+}
42913+
42914+#ifdef CONFIG_PCI_IOV
42915+/**
42916+ * i40e_alloc_vfs
42917+ * @pf: pointer to the PF structure
42918+ * @num_alloc_vfs: number of VFs to allocate
42919+ *
42920+ * allocate VF resources
42921+ **/
42922+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
42923+{
42924+ struct i40e_vf *vfs;
42925+ int i, ret = 0;
42926+
42927+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
42928+ i40e_irq_dynamic_disable_icr0(pf);
42929+
42930+ /* Check to see if we're just allocating resources for extant VFs */
42931+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
42932+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
42933+ if (ret) {
42934+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
42935+ pf->num_alloc_vfs = 0;
42936+ goto err_iov;
42937+ }
42938+ }
42939+ /* allocate memory */
42940+ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
42941+ if (!vfs) {
42942+ ret = -ENOMEM;
42943+ goto err_alloc;
42944+ }
42945+ pf->vf = vfs;
42946+
42947+#ifdef HAVE_NDO_SET_VF_LINK_STATE
42948+ /* set vfd ops */
42949+ vfd_ops = &i40e_vfd_ops;
42950+ /* create the sriov kobjects */
42951+ pf->vfd_obj = create_vfd_sysfs(pf->pdev, num_alloc_vfs);
42952+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
42953+
42954+ /* apply default profile */
42955+ for (i = 0; i < num_alloc_vfs; i++) {
42956+ vfs[i].pf = pf;
42957+ vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
42958+ vfs[i].vf_id = i;
42959+
42960+#ifdef HAVE_NDO_SET_VF_LINK_STATE
42961+ /* setup default mirror values */
42962+ vfs[i].ingress_vlan = I40E_NO_VF_MIRROR;
42963+ vfs[i].egress_vlan = I40E_NO_VF_MIRROR;
42964+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
42965+ /* assign default loopback value */
42966+ vfs[i].loopback = true;
42967+ /* assign default mac anti spoof value for untrusted VF */
42968+ vfs[i].mac_anti_spoof = true;
42969+ /* assign default allow_untagged value */
42970+ vfs[i].allow_untagged = true;
42971+ /* assign default capabilities */
42972+ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
42973+ set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
42974+ }
42975+ pf->num_alloc_vfs = num_alloc_vfs;
42976+
42977+ /* VF resources get allocated during reset */
42978+ i40e_reset_all_vfs(pf, false);
42979+
42980+ i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
42981+
42982+err_alloc:
42983+ if (ret)
42984+ i40e_free_vfs(pf);
42985+err_iov:
42986+ /* Re-enable interrupt 0. */
42987+ i40e_irq_dynamic_enable_icr0(pf);
42988+ return ret;
42989+}
42990+
42991+#endif
42992+#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
42993+/**
42994+ * i40e_pci_sriov_enable
42995+ * @pdev: pointer to a pci_dev structure
42996+ * @num_vfs: number of VFs to allocate
42997+ *
42998+ * Enable or change the number of VFs
42999+ **/
43000+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
43001+{
43002+#ifdef CONFIG_PCI_IOV
43003+ struct i40e_pf *pf = pci_get_drvdata(pdev);
43004+ int pre_existing_vfs = pci_num_vf(pdev);
43005+ int err = 0;
43006+
43007+ if (test_bit(__I40E_TESTING, pf->state)) {
43008+ dev_warn(&pdev->dev,
43009+ "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
43010+ err = -EPERM;
43011+ goto err_out;
43012+ }
43013+
43014+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
43015+ i40e_free_vfs(pf);
43016+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
43017+ goto out;
43018+
43019+ if (num_vfs > pf->num_req_vfs) {
43020+ dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
43021+ num_vfs, pf->num_req_vfs);
43022+ err = -EPERM;
43023+ goto err_out;
43024+ }
43025+
43026+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
43027+ err = i40e_alloc_vfs(pf, num_vfs);
43028+ if (err) {
43029+ dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
43030+ goto err_out;
43031+ }
43032+
43033+out:
43034+ return num_vfs;
43035+
43036+err_out:
43037+ return err;
43038+#endif
43039+ return 0;
43040+}
43041+
43042+/**
43043+ * i40e_pci_sriov_configure
43044+ * @pdev: pointer to a pci_dev structure
43045+ * @num_vfs: number of vfs to allocate
43046+ *
43047+ * Enable or change the number of VFs. Called when the user updates the number
43048+ * of VFs in sysfs.
43049+ **/
43050+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
43051+{
43052+ struct i40e_pf *pf = pci_get_drvdata(pdev);
43053+ int ret = 0;
43054+
43055+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
43056+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
43057+ return -EAGAIN;
43058+ }
43059+
43060+ if (num_vfs) {
43061+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
43062+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
43063+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
43064+ }
43065+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
43066+ goto sriov_configure_out;
43067+ }
43068+ if (!pci_vfs_assigned(pdev)) {
43069+ i40e_free_vfs(pf);
43070+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
43071+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
43072+ } else {
43073+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
43074+ ret = -EINVAL;
43075+ goto sriov_configure_out;
43076+ }
43077+sriov_configure_out:
43078+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
43079+ return ret;
43080+}
43081+#endif
43082+
43083+/***********************virtual channel routines******************/
43084+
43085+/**
43086+ * i40e_vc_send_msg_to_vf_ex
43087+ * @vf: pointer to the VF info
43088+ * @v_opcode: virtual channel opcode
43089+ * @v_retval: virtual channel return value
43090+ * @msg: pointer to the msg buffer
43091+ * @msglen: msg length
43092+ * @is_quiet: true for not printing unsuccessful return values, false otherwise
43093+ *
43094+ * send msg to VF
43095+ **/
43096+static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
43097+ u32 v_retval, u8 *msg, u16 msglen,
43098+ bool is_quiet)
43099+{
43100+ struct i40e_pf *pf;
43101+ struct i40e_hw *hw;
43102+ int abs_vf_id;
43103+ i40e_status aq_ret;
43104+
43105+ /* validate the request */
43106+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
43107+ return -EINVAL;
43108+
43109+ pf = vf->pf;
43110+ hw = &pf->hw;
43111+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
43112+
43113+ /* single place to detect unsuccessful return values */
43114+ if (v_retval && !is_quiet) {
43115+ vf->num_invalid_msgs++;
43116+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
43117+ vf->vf_id, v_opcode, v_retval);
43118+ if (vf->num_invalid_msgs >
43119+ I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
43120+ dev_err(&pf->pdev->dev,
43121+ "Number of invalid messages exceeded for VF %d\n",
43122+ vf->vf_id);
43123+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
43124+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
43125+ }
43126+ } else {
43127+ vf->num_valid_msgs++;
43128+ /* reset the invalid counter, if a valid message is received. */
43129+ vf->num_invalid_msgs = 0;
43130+ }
43131+
43132+ aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43133+ msg, msglen, NULL);
43134+ if (aq_ret) {
43135+ dev_info(&pf->pdev->dev,
43136+ "Unable to send the message to VF %d aq_err %d\n",
43137+ vf->vf_id, pf->hw.aq.asq_last_status);
43138+ return -EIO;
43139+ }
43140+
43141+ return 0;
43142+}
43143+
43144+/**
43145+ * i40e_vc_send_msg_to_vf
43146+ * @vf: pointer to the VF info
43147+ * @v_opcode: virtual channel opcode
43148+ * @v_retval: virtual channel return value
43149+ * @msg: pointer to the msg buffer
43150+ * @msglen: msg length
43151+ *
43152+ * send msg to VF
43153+ **/
43154+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
43155+ u32 v_retval, u8 *msg, u16 msglen)
43156+{
43157+ return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
43158+ msg, msglen, false);
43159+}
43160+
43161+/**
43162+ * i40e_vc_send_resp_to_vf
43163+ * @vf: pointer to the VF info
43164+ * @opcode: operation code
43165+ * @retval: return value
43166+ *
43167+ * send resp msg to VF
43168+ **/
43169+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
43170+ enum virtchnl_ops opcode,
43171+ i40e_status retval)
43172+{
43173+ return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
43174+}
43175+
43176+/**
43177+ * i40e_vc_get_version_msg
43178+ * @vf: pointer to the VF info
43179+ * @msg: pointer to the msg buffer
43180+ *
43181+ * called from the VF to request the API version used by the PF
43182+ **/
43183+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
43184+{
43185+ struct virtchnl_version_info info = {
43186+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
43187+ };
43188+
43189+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
43190+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
43191+ if (VF_IS_V10(&vf->vf_ver))
43192+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
43193+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
43194+ I40E_SUCCESS, (u8 *)&info,
43195+ sizeof(struct virtchnl_version_info));
43196+}
43197+
43198+#ifdef __TC_MQPRIO_MODE_MAX
43199+/**
43200+ * i40e_del_qch - delete all the additional VSIs created as a part of ADq
43201+ * @vf: pointer to VF structure
43202+ **/
43203+static void i40e_del_qch(struct i40e_vf *vf)
43204+{
43205+ struct i40e_pf *pf = vf->pf;
43206+ int i;
43207+
43208+ /* first element in the array belongs to primary VF VSI and we shouldn't
43209+ * delete it. We should however delete the rest of the VSIs created
43210+ */
43211+ for (i = 1; i < vf->num_tc; i++) {
43212+ if (vf->ch[i].vsi_idx) {
43213+ i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
43214+ vf->ch[i].vsi_idx = 0;
43215+ vf->ch[i].vsi_id = 0;
43216+ }
43217+ }
43218+}
43219+
43220+#endif /* __TC_MQPRIO_MODE_MAX */
43221+
43222+/**
43223+ * i40e_vc_get_vf_resources_msg
43224+ * @vf: pointer to the VF info
43225+ * @msg: pointer to the msg buffer
43226+ *
43227+ * called from the VF to request its resources
43228+ **/
43229+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
43230+{
43231+ struct virtchnl_vf_resource *vfres = NULL;
43232+ struct i40e_pf *pf = vf->pf;
43233+ i40e_status aq_ret = 0;
43234+ struct i40e_vsi *vsi;
43235+ int num_vsis = 1;
43236+ int len = 0;
43237+ int ret;
43238+
43239+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
43240+ aq_ret = I40E_ERR_PARAM;
43241+ goto err;
43242+ }
43243+
43244+ len = (sizeof(struct virtchnl_vf_resource) +
43245+ sizeof(struct virtchnl_vsi_resource) * num_vsis);
43246+
43247+ vfres = kzalloc(len, GFP_KERNEL);
43248+ if (!vfres) {
43249+ aq_ret = I40E_ERR_NO_MEMORY;
43250+ len = 0;
43251+ goto err;
43252+ }
43253+ if (VF_IS_V11(&vf->vf_ver))
43254+ vf->driver_caps = *(u32 *)msg;
43255+ else
43256+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
43257+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
43258+ VIRTCHNL_VF_OFFLOAD_VLAN;
43259+
43260+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
43261+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
43262+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
43263+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
43264+
43265+ vsi = pf->vsi[vf->lan_vsi_idx];
43266+ if (!vsi->info.pvid)
43267+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
43268+
43269+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
43270+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
43271+ } else {
43272+ if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
43273+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
43274+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
43275+ else
43276+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
43277+ }
43278+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
43279+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
43280+ vfres->vf_cap_flags |=
43281+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
43282+ }
43283+
43284+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
43285+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
43286+
43287+ if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
43288+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
43289+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
43290+
43291+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
43292+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
43293+ dev_err(&pf->pdev->dev,
43294+ "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
43295+ vf->vf_id);
43296+ aq_ret = I40E_ERR_PARAM;
43297+ goto err;
43298+ }
43299+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
43300+ }
43301+
43302+ if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
43303+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
43304+ vfres->vf_cap_flags |=
43305+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
43306+ }
43307+
43308+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
43309+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
43310+
43311+#ifdef __TC_MQPRIO_MODE_MAX
43312+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
43313+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
43314+#endif /* __TC_MQPRIO_MODE_MAX */
43315+
43316+ vfres->num_vsis = num_vsis;
43317 vfres->num_queue_pairs = vf->num_queue_pairs;
43318 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
43319 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
43320 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
43321
43322- if (vf->lan_vsi_idx) {
43323- vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
43324- vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
43325- vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
43326- /* VFs only use TC 0 */
43327- vfres->vsi_res[0].qset_handle
43328- = le16_to_cpu(vsi->info.qs_handle[0]);
43329- ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
43330- vf->default_lan_addr.addr);
43331+ if (vf->lan_vsi_idx) {
43332+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
43333+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
43334+ vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
43335+ /* VFs only use TC 0 */
43336+ vfres->vsi_res[0].qset_handle
43337+ = LE16_TO_CPU(vsi->info.qs_handle[0]);
43338+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
43339+ vf->default_lan_addr.addr);
43340+ }
43341+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
43342+ set_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states);
43343+ /* if vf is in base mode, keep only the base capabilities that are
43344+ * negotiated
43345+ */
43346+ if (pf->vf_base_mode_only)
43347+ vfres->vf_cap_flags &= VF_BASE_MODE_OFFLOADS;
43348+err:
43349+ /* send the response back to the VF */
43350+ ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
43351+ aq_ret, (u8 *)vfres, len);
43352+
43353+ kfree(vfres);
43354+ return ret;
43355+}
43356+
43357+/**
43358+ * i40e_getnum_vf_vsi_vlan_filters
43359+ * @vsi: pointer to the vsi
43360+ *
43361+ * called to get the number of VLANs offloaded on this VF
43362+ **/
43363+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
43364+{
43365+ struct i40e_mac_filter *f;
43366+ int num_vlans = 0, bkt;
43367+
43368+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
43369+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
43370+ num_vlans++;
43371+ }
43372+
43373+ return num_vlans;
43374+}
43375+
43376+/**
43377+ * i40e_get_vlan_list_sync
43378+ * @vsi: pointer to the vsi
43379+ * @num_vlans: number of vlans present in mac_filter_hash, returned to caller
43380+ * @vlan_list: list of vlans present in mac_filter_hash, returned to caller.
43381+ * This array is allocated here, but has to be freed in caller.
43382+ *
43383+ * Called to get number of vlans and vlan list present in mac_filter_hash.
43384+ **/
43385+
43386+static inline void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
43387+ s16 **vlan_list)
43388+{
43389+ struct i40e_mac_filter *f;
43390+ int bkt;
43391+ int i;
43392+
43393+ spin_lock_bh(&vsi->mac_filter_hash_lock);
43394+ *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
43395+ *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list),
43396+ GFP_ATOMIC);
43397+ if (!(*vlan_list))
43398+ goto err;
43399+
43400+ i = 0;
43401+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
43402+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
43403+ continue;
43404+ (*vlan_list)[i++] = f->vlan;
43405+ }
43406+err:
43407+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
43408+}
43409+
43410+/**
43411+ * i40e_set_vsi_promisc
43412+ * @vf: pointer to the vf struct
43413+ * @seid: vsi number
43414+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
43415+ * for a given VLAN
43416+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
43417+ * for a given VLAN
43418+ * @vl: List of vlans - apply filter for given vlans
43419+ * @num_vlans: Number of elements in @vl
43420+ **/
43421+static inline i40e_status
43422+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
43423+ bool unicast_enable, s16 *vl, int num_vlans)
43424+{
43425+ struct i40e_pf *pf = vf->pf;
43426+ struct i40e_hw *hw = &pf->hw;
43427+ i40e_status aq_ret = 0;
43428+ int i;
43429+
43430+ /* No vlan to set promisc on, set on vsi */
43431+ if (!num_vlans || !vl) {
43432+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
43433+ multi_enable,
43434+ NULL);
43435+ if (aq_ret) {
43436+ int aq_err = pf->hw.aq.asq_last_status;
43437+
43438+ dev_err(&pf->pdev->dev,
43439+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
43440+ vf->vf_id,
43441+ i40e_stat_str(&pf->hw, aq_ret),
43442+ i40e_aq_str(&pf->hw, aq_err));
43443+
43444+ return aq_ret;
43445+ }
43446+
43447+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
43448+ unicast_enable,
43449+ NULL, true);
43450+
43451+ if (aq_ret) {
43452+ int aq_err = pf->hw.aq.asq_last_status;
43453+
43454+ dev_err(&pf->pdev->dev,
43455+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
43456+ vf->vf_id,
43457+ i40e_stat_str(&pf->hw, aq_ret),
43458+ i40e_aq_str(&pf->hw, aq_err));
43459+ }
43460+
43461+ return aq_ret;
43462+ }
43463+
43464+ for (i = 0; i < num_vlans; i++) {
43465+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
43466+ multi_enable,
43467+ vl[i], NULL);
43468+ if (aq_ret) {
43469+ int aq_err = pf->hw.aq.asq_last_status;
43470+
43471+ dev_err(&pf->pdev->dev,
43472+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
43473+ vf->vf_id,
43474+ i40e_stat_str(&pf->hw, aq_ret),
43475+ i40e_aq_str(&pf->hw, aq_err));
43476+ }
43477+
43478+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
43479+ unicast_enable,
43480+ vl[i], NULL);
43481+ if (aq_ret) {
43482+ int aq_err = pf->hw.aq.asq_last_status;
43483+
43484+ dev_err(&pf->pdev->dev,
43485+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
43486+ vf->vf_id,
43487+ i40e_stat_str(&pf->hw, aq_ret),
43488+ i40e_aq_str(&pf->hw, aq_err));
43489+ }
43490+ }
43491+ return aq_ret;
43492+}
43493+
43494+/**
43495+ * i40e_vc_config_promiscuous_mode_msg
43496+ * @vf: pointer to the VF info
43497+ * @msg: pointer to the msg buffer
43498+ *
43499+ * called from the VF to configure the promiscuous mode of
43500+ * VF vsis
43501+ **/
43502+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
43503+{
43504+ struct virtchnl_promisc_info *info =
43505+ (struct virtchnl_promisc_info *)msg;
43506+ i40e_status aq_ret = I40E_SUCCESS;
43507+ struct i40e_pf *pf = vf->pf;
43508+ bool allmulti = false;
43509+ bool alluni = false;
43510+
43511+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
43512+ aq_ret = I40E_ERR_PARAM;
43513+ goto err_out;
43514+ }
43515+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
43516+ dev_err(&pf->pdev->dev,
43517+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
43518+ vf->vf_id);
43519+ if (pf->vf_base_mode_only)
43520+ dev_err(&pf->pdev->dev, "VF %d is in base mode only, promiscuous mode is not be supported\n",
43521+ vf->vf_id);
43522+
43523+ /* Lie to the VF on purpose, because this is an error we can
43524+ * ignore. Unprivileged VF is not a virtual channel error.
43525+ */
43526+ aq_ret = I40E_SUCCESS;
43527+ goto err_out;
43528+ }
43529+
43530+ if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
43531+ aq_ret = I40E_ERR_PARAM;
43532+ goto err_out;
43533+ }
43534+
43535+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
43536+ aq_ret = I40E_ERR_PARAM;
43537+ goto err_out;
43538+ }
43539+
43540+ /* Multicast promiscuous handling*/
43541+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
43542+ allmulti = true;
43543+
43544+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
43545+ alluni = true;
43546+
43547+ aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
43548+ alluni);
43549+ if (aq_ret)
43550+ goto err_out;
43551+
43552+ if (allmulti) {
43553+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
43554+ &vf->vf_states))
43555+ dev_info(&pf->pdev->dev,
43556+ "VF %d successfully set multicast promiscuous mode\n",
43557+ vf->vf_id);
43558+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
43559+ &vf->vf_states))
43560+ dev_info(&pf->pdev->dev,
43561+ "VF %d successfully unset multicast promiscuous mode\n",
43562+ vf->vf_id);
43563+
43564+ if (alluni) {
43565+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
43566+ &vf->vf_states))
43567+ dev_info(&pf->pdev->dev,
43568+ "VF %d successfully set unicast promiscuous mode\n",
43569+ vf->vf_id);
43570+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
43571+ &vf->vf_states))
43572+ dev_info(&pf->pdev->dev,
43573+ "VF %d successfully unset unicast promiscuous mode\n",
43574+ vf->vf_id);
43575+
43576+err_out:
43577+ /* send the response to the VF */
43578+ return i40e_vc_send_resp_to_vf(vf,
43579+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
43580+ aq_ret);
43581+}
43582+
43583+/**
43584+ * i40e_vc_config_queues_msg
43585+ * @vf: pointer to the VF info
43586+ * @msg: pointer to the msg buffer
43587+ *
43588+ * called from the VF to configure the rx/tx
43589+ * queues
43590+ **/
43591+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
43592+{
43593+ struct virtchnl_vsi_queue_config_info *qci =
43594+ (struct virtchnl_vsi_queue_config_info *)msg;
43595+ struct virtchnl_queue_pair_info *qpi;
43596+ struct i40e_pf *pf = vf->pf;
43597+ u16 vsi_id, vsi_queue_id = 0;
43598+ u16 num_qps_all = 0;
43599+ i40e_status aq_ret = 0;
43600+ int i, j = 0, idx = 0;
43601+
43602+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
43603+ aq_ret = I40E_ERR_PARAM;
43604+ goto error_param;
43605+ }
43606+
43607+ if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
43608+ aq_ret = I40E_ERR_PARAM;
43609+ goto error_param;
43610+ }
43611+
43612+ if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
43613+ aq_ret = I40E_ERR_PARAM;
43614+ goto error_param;
43615+ }
43616+
43617+ if (vf->adq_enabled) {
43618+ for (i = 0; i < I40E_MAX_VF_VSI; i++)
43619+ num_qps_all += vf->ch[i].num_qps;
43620+ if (num_qps_all != qci->num_queue_pairs) {
43621+ aq_ret = I40E_ERR_PARAM;
43622+ goto error_param;
43623+ }
43624+ }
43625+
43626+ vsi_id = qci->vsi_id;
43627+
43628+ for (i = 0; i < qci->num_queue_pairs; i++) {
43629+ qpi = &qci->qpair[i];
43630+
43631+ if (!vf->adq_enabled) {
43632+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
43633+ qpi->txq.queue_id)) {
43634+ aq_ret = I40E_ERR_PARAM;
43635+ goto error_param;
43636+ }
43637+
43638+ vsi_queue_id = qpi->txq.queue_id;
43639+
43640+ if (qpi->txq.vsi_id != qci->vsi_id ||
43641+ qpi->rxq.vsi_id != qci->vsi_id ||
43642+ qpi->rxq.queue_id != vsi_queue_id) {
43643+ aq_ret = I40E_ERR_PARAM;
43644+ goto error_param;
43645+ }
43646+ }
43647+
43648+ if (vf->adq_enabled) {
43649+ if (idx >= ARRAY_SIZE(vf->ch)) {
43650+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
43651+ goto error_param;
43652+ }
43653+ vsi_id = vf->ch[idx].vsi_id;
43654+ }
43655+
43656+ if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
43657+ &qpi->rxq) ||
43658+ i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
43659+ &qpi->txq)) {
43660+ aq_ret = I40E_ERR_PARAM;
43661+ goto error_param;
43662+ }
43663+
43664+ /* For ADq there can be up to 4 VSIs with max 4 queues each.
43665+ * VF does not know about these additional VSIs and all
43666+ * it cares is about its own queues. PF configures these queues
43667+ * to its appropriate VSIs based on TC mapping
43668+ */
43669+ if (vf->adq_enabled) {
43670+ if (idx >= ARRAY_SIZE(vf->ch)) {
43671+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
43672+ goto error_param;
43673+ }
43674+ if (j == (vf->ch[idx].num_qps - 1)) {
43675+ idx++;
43676+ j = 0; /* resetting the queue count */
43677+ vsi_queue_id = 0;
43678+ } else {
43679+ j++;
43680+ vsi_queue_id++;
43681+ }
43682+ }
43683+ }
43684+
43685+ /* set vsi num_queue_pairs in use to num configured by VF */
43686+ if (!vf->adq_enabled) {
43687+ pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
43688+ qci->num_queue_pairs;
43689+ } else {
43690+ for (i = 0; i < vf->num_tc; i++)
43691+ pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
43692+ vf->ch[i].num_qps;
43693+ }
43694+
43695+error_param:
43696+ /* send the response to the VF */
43697+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
43698+ aq_ret);
43699+}
43700+
43701+/**
43702+ * i40e_validate_queue_map
43703+ * @vf: pointer to the VF info
43704+ * @vsi_id: vsi id
43705+ * @queuemap: Tx or Rx queue map
43706+ *
43707+ * check if Tx or Rx queue map is valid
43708+ **/
43709+static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
43710+ unsigned long queuemap)
43711+{
43712+ u16 vsi_queue_id, queue_id;
43713+
43714+ for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
43715+ if (vf->adq_enabled) {
43716+ vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
43717+ queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
43718+ } else {
43719+ queue_id = vsi_queue_id;
43720+ }
43721+
43722+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
43723+ return -EINVAL;
43724+ }
43725+
43726+ return 0;
43727+}
43728+
43729+/**
43730+ * i40e_vc_config_irq_map_msg
43731+ * @vf: pointer to the VF info
43732+ * @msg: pointer to the msg buffer
43733+ *
43734+ * called from the VF to configure the irq to
43735+ * queue map
43736+ **/
43737+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
43738+{
43739+ struct virtchnl_irq_map_info *irqmap_info =
43740+ (struct virtchnl_irq_map_info *)msg;
43741+ struct virtchnl_vector_map *map;
43742+ u16 vsi_id;
43743+ i40e_status aq_ret = 0;
43744+ int i;
43745+
43746+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
43747+ aq_ret = I40E_ERR_PARAM;
43748+ goto error_param;
43749+ }
43750+
43751+ if (irqmap_info->num_vectors >
43752+ vf->pf->hw.func_caps.num_msix_vectors_vf) {
43753+ aq_ret = I40E_ERR_PARAM;
43754+ goto error_param;
43755+ }
43756+
43757+ for (i = 0; i < irqmap_info->num_vectors; i++) {
43758+ map = &irqmap_info->vecmap[i];
43759+ /* validate msg params */
43760+ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
43761+ !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
43762+ aq_ret = I40E_ERR_PARAM;
43763+ goto error_param;
43764+ }
43765+ vsi_id = map->vsi_id;
43766+
43767+ if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
43768+ aq_ret = I40E_ERR_PARAM;
43769+ goto error_param;
43770+ }
43771+
43772+ if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
43773+ aq_ret = I40E_ERR_PARAM;
43774+ goto error_param;
43775+ }
43776+
43777+ i40e_config_irq_link_list(vf, vsi_id, map);
43778+ }
43779+error_param:
43780+ /* send the response to the VF */
43781+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
43782+ aq_ret);
43783+}
43784+
43785+/**
43786+ * i40e_ctrl_vf_tx_rings
43787+ * @vsi: the SRIOV VSI being configured
43788+ * @q_map: bit map of the queues to be enabled
43789+ * @enable: start or stop the queue
43790+ **/
43791+static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
43792+ bool enable)
43793+{
43794+ struct i40e_pf *pf = vsi->back;
43795+ int ret = 0;
43796+ u16 q_id;
43797+
43798+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
43799+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
43800+ vsi->base_queue + q_id,
43801+ false /*is xdp*/, enable);
43802+ if (ret)
43803+ break;
43804+ }
43805+ return ret;
43806+}
43807+
43808+/**
43809+ * i40e_ctrl_vf_rx_rings
43810+ * @vsi: the SRIOV VSI being configured
43811+ * @q_map: bit map of the queues to be enabled
43812+ * @enable: start or stop the queue
43813+ **/
43814+static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
43815+ bool enable)
43816+{
43817+ struct i40e_pf *pf = vsi->back;
43818+ int ret = 0;
43819+ u16 q_id;
43820+
43821+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
43822+ ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
43823+ enable);
43824+ if (ret)
43825+ break;
43826+ }
43827+ return ret;
43828+}
43829+
43830+/**
43831+ * i40e_vc_enable_queues_msg
43832+ * @vf: pointer to the VF info
43833+ * @msg: pointer to the msg buffer
43834+ *
43835+ * called from the VF to enable all or specific queue(s)
43836+ **/
43837+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
43838+{
43839+ struct virtchnl_queue_select *vqs =
43840+ (struct virtchnl_queue_select *)msg;
43841+ struct i40e_pf *pf = vf->pf;
43842+ i40e_status aq_ret = 0;
43843+ int i;
43844+
43845+ if (vf->pf_ctrl_disable) {
43846+ aq_ret = I40E_ERR_PARAM;
43847+ dev_err(&pf->pdev->dev,
43848+ "Admin has disabled VF %d via sysfs, will not enable queues",
43849+ vf->vf_id);
43850+ goto error_param;
43851+ }
43852+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
43853+ aq_ret = I40E_ERR_PARAM;
43854+ goto error_param;
43855+ }
43856+
43857+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
43858+ aq_ret = I40E_ERR_PARAM;
43859+ goto error_param;
43860+ }
43861+
43862+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
43863+ aq_ret = I40E_ERR_PARAM;
43864+ goto error_param;
43865+ }
43866+
43867+ /* Use the queue bit map sent by the VF */
43868+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
43869+ true)) {
43870+ aq_ret = I40E_ERR_TIMEOUT;
43871+ goto error_param;
43872+ }
43873+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
43874+ true)) {
43875+ aq_ret = I40E_ERR_TIMEOUT;
43876+ goto error_param;
43877+ }
43878+
43879+ /* need to start the rings for additional ADq VSI's as well */
43880+ if (vf->adq_enabled) {
43881+ /* zero belongs to LAN VSI */
43882+ for (i = 1; i < vf->num_tc; i++) {
43883+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->ch[i].vsi_idx],
43884+ vqs->rx_queues, true)) {
43885+ aq_ret = I40E_ERR_TIMEOUT;
43886+ goto error_param;
43887+ }
43888+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->ch[i].vsi_idx],
43889+ vqs->tx_queues, true)) {
43890+ aq_ret = I40E_ERR_TIMEOUT;
43891+ goto error_param;
43892+ }
43893+ }
43894+ }
43895+
43896+ vf->queues_enabled = true;
43897+
43898+error_param:
43899+ /* send the response to the VF */
43900+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
43901+ aq_ret);
43902+}
43903+
43904+/**
43905+ * i40e_vc_disable_queues_msg
43906+ * @vf: pointer to the VF info
43907+ * @msg: pointer to the msg buffer
43908+ *
43909+ * called from the VF to disable all or specific
43910+ * queue(s)
43911+ **/
43912+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
43913+{
43914+ struct virtchnl_queue_select *vqs =
43915+ (struct virtchnl_queue_select *)msg;
43916+ struct i40e_pf *pf = vf->pf;
43917+ i40e_status aq_ret = 0;
43918+
43919+ /* Immediately mark queues as disabled */
43920+ vf->queues_enabled = false;
43921+
43922+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
43923+ aq_ret = I40E_ERR_PARAM;
43924+ goto error_param;
43925+ }
43926+
43927+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
43928+ aq_ret = I40E_ERR_PARAM;
43929+ goto error_param;
43930+ }
43931+
43932+ if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
43933+ vqs->rx_queues > I40E_MAX_VF_QUEUES ||
43934+ vqs->tx_queues > I40E_MAX_VF_QUEUES) {
43935+ aq_ret = I40E_ERR_PARAM;
43936+ goto error_param;
43937+ }
43938+
43939+ /* Use the queue bit map sent by the VF */
43940+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
43941+ false)) {
43942+ aq_ret = I40E_ERR_TIMEOUT;
43943+ goto error_param;
43944+ }
43945+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
43946+ false)) {
43947+ aq_ret = I40E_ERR_TIMEOUT;
43948+ goto error_param;
43949+ }
43950+error_param:
43951+ /* send the response to the VF */
43952+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
43953+ aq_ret);
43954+}
43955+
43956+/**
43957+ * i40e_vc_request_queues_msg
43958+ * @vf: pointer to the VF info
43959+ * @msg: pointer to the msg buffer
43960+ *
43961+ * VFs get a default number of queues but can use this message to request a
43962+ * different number. If the request is successful, PF will reset the VF and
43963+ * return 0. If unsuccessful, PF will send message informing VF of number of
43964+ * available queues and return result of sending VF a message.
43965+ **/
43966+static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
43967+{
43968+ struct virtchnl_vf_res_request *vfres =
43969+ (struct virtchnl_vf_res_request *)msg;
43970+ u16 req_pairs = vfres->num_queue_pairs;
43971+ u8 cur_pairs = vf->num_queue_pairs;
43972+ struct i40e_pf *pf = vf->pf;
43973+
43974+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
43975+ return -EINVAL;
43976+
43977+ if (req_pairs > I40E_MAX_VF_QUEUES) {
43978+ dev_err(&pf->pdev->dev,
43979+ "VF %d tried to request more than %d queues.\n",
43980+ vf->vf_id,
43981+ I40E_MAX_VF_QUEUES);
43982+ vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
43983+ } else if (req_pairs - cur_pairs > pf->queues_left) {
43984+ dev_warn(&pf->pdev->dev,
43985+ "VF %d requested %d more queues, but only %d left.\n",
43986+ vf->vf_id,
43987+ req_pairs - cur_pairs,
43988+ pf->queues_left);
43989+ vfres->num_queue_pairs = pf->queues_left + cur_pairs;
43990+ } else {
43991+ /* successful request */
43992+ vf->num_req_queues = req_pairs;
43993+ i40e_vc_reset_vf(vf, true);
43994+ return 0;
43995+ }
43996+
43997+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
43998+ (u8 *)vfres, sizeof(*vfres));
43999+}
44000+
44001+/**
44002+ * i40e_vc_get_stats_msg
44003+ * @vf: pointer to the VF info
44004+ * @msg: pointer to the msg buffer
44005+ *
44006+ * called from the VF to get vsi stats
44007+ **/
44008+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
44009+{
44010+ struct virtchnl_queue_select *vqs =
44011+ (struct virtchnl_queue_select *)msg;
44012+ struct i40e_pf *pf = vf->pf;
44013+ struct i40e_eth_stats stats;
44014+ i40e_status aq_ret = 0;
44015+ struct i40e_vsi *vsi;
44016+
44017+ memset(&stats, 0, sizeof(struct i40e_eth_stats));
44018+
44019+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44020+ aq_ret = I40E_ERR_PARAM;
44021+ goto error_param;
44022+ }
44023+
44024+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
44025+ aq_ret = I40E_ERR_PARAM;
44026+ goto error_param;
44027+ }
44028+
44029+ vsi = pf->vsi[vf->lan_vsi_idx];
44030+ if (!vsi) {
44031+ aq_ret = I40E_ERR_PARAM;
44032+ goto error_param;
44033+ }
44034+ i40e_update_eth_stats(vsi);
44035+ stats = vsi->eth_stats;
44036+
44037+error_param:
44038+ /* send the response back to the VF */
44039+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
44040+ (u8 *)&stats, sizeof(stats));
44041+}
44042+
44043+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
44044+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
44045+ */
44046+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
44047+#define I40E_VC_MAX_VLAN_PER_VF 16
44048+
44049+/**
44050+ * i40e_check_vf_permission
44051+ * @vf: pointer to the VF info
44052+ * @al: MAC address list from virtchnl
44053+ * @is_quiet: set true for printing msg without opcode info, false otherwise
44054+ *
44055+ * Check that the given list of MAC addresses is allowed. Will return -EPERM
44056+ * if any address in the list is not valid. Checks the following conditions:
44057+ *
44058+ * 1) broadcast and zero addresses are never valid
44059+ * 2) unicast addresses are not allowed if the VMM has administratively set
44060+ * the VF MAC address, unless the VF is marked as privileged.
44061+ * 3) There is enough space to add all the addresses.
44062+ *
44063+ * Note that to guarantee consistency, it is expected this function be called
44064+ * while holding the mac_filter_hash_lock, as otherwise the current number of
44065+ * addresses might not be accurate.
44066+ **/
44067+static inline int i40e_check_vf_permission(struct i40e_vf *vf,
44068+ struct virtchnl_ether_addr_list *al,
44069+ bool *is_quiet)
44070+{
44071+ struct i40e_pf *pf = vf->pf;
44072+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
44073+ int mac2add_cnt = 0;
44074+ int i;
44075+
44076+ if (!is_quiet)
44077+ return -EINVAL;
44078+
44079+ *is_quiet = false;
44080+ for (i = 0; i < al->num_elements; i++) {
44081+ struct i40e_mac_filter *f;
44082+ u8 *addr = al->list[i].addr;
44083+
44084+ if (is_broadcast_ether_addr(addr) ||
44085+ is_zero_ether_addr(addr)) {
44086+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", addr);
44087+ return I40E_ERR_INVALID_MAC_ADDR;
44088+ }
44089+
44090+ /* If the host VMM administrator has set the VF MAC address
44091+ * administratively via the ndo_set_vf_mac command then deny
44092+ * permission to the VF to add or delete unicast MAC addresses.
44093+ * Unless the VF is privileged and then it can do whatever.
44094+ * The VF may request to set the MAC address filter already
44095+ * assigned to it so do not return an error in that case.
44096+ */
44097+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
44098+ !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
44099+ !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
44100+ dev_err(&pf->pdev->dev,
44101+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
44102+ *is_quiet = true;
44103+ return -EPERM;
44104+ }
44105+
44106+ /* count filters that really will be added */
44107+ f = i40e_find_mac(vsi, addr);
44108+ if (!f)
44109+ ++mac2add_cnt;
44110+ }
44111+
44112+ /* If this VF is not privileged, then we can't add more than a limited
44113+ * number of addresses. Check to make sure that the additions do not
44114+ * push us over the limit.
44115+ */
44116+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
44117+ (i40e_count_filters(vsi) + mac2add_cnt) >
44118+ I40E_VC_MAX_MAC_ADDR_PER_VF) {
44119+ dev_err(&pf->pdev->dev,
44120+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
44121+ if (pf->vf_base_mode_only)
44122+ dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d filters\n",
44123+ vf->vf_id, I40E_VC_MAX_MAC_ADDR_PER_VF);
44124+ return -EPERM;
44125+ }
44126+ return 0;
44127+}
44128+
44129+/**
44130+ * i40e_vc_add_mac_addr_msg
44131+ * @vf: pointer to the VF info
44132+ * @msg: pointer to the msg buffer
44133+ *
44134+ * add guest mac address filter
44135+ **/
44136+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
44137+{
44138+ struct virtchnl_ether_addr_list *al =
44139+ (struct virtchnl_ether_addr_list *)msg;
44140+ struct i40e_pf *pf = vf->pf;
44141+ struct i40e_vsi *vsi = NULL;
44142+ bool is_quiet = false;
44143+ i40e_status ret = 0;
44144+ int i;
44145+
44146+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
44147+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
44148+ ret = I40E_ERR_PARAM;
44149+ goto error_param;
44150+ }
44151+
44152+ vsi = pf->vsi[vf->lan_vsi_idx];
44153+
44154+ /* Lock once, because all function inside for loop accesses VSI's
44155+ * MAC filter list which needs to be protected using same lock.
44156+ */
44157+ spin_lock_bh(&vsi->mac_filter_hash_lock);
44158+
44159+ ret = i40e_check_vf_permission(vf, al, &is_quiet);
44160+ if (ret) {
44161+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
44162+ goto error_param;
44163+ }
44164+
44165+ /* add new addresses to the list */
44166+ for (i = 0; i < al->num_elements; i++) {
44167+ struct i40e_mac_filter *f;
44168+
44169+ f = i40e_find_mac(vsi, al->list[i].addr);
44170+ if (!f) {
44171+ f = i40e_add_mac_filter(vsi, al->list[i].addr);
44172+ if (!f) {
44173+ dev_err(&pf->pdev->dev,
44174+ "Unable to add MAC filter %pM for VF %d\n",
44175+ al->list[i].addr, vf->vf_id);
44176+ ret = I40E_ERR_PARAM;
44177+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
44178+ goto error_param;
44179+ }
44180+ if (is_valid_ether_addr(al->list[i].addr))
44181+ ether_addr_copy(vf->default_lan_addr.addr,
44182+ al->list[i].addr);
44183+ }
44184+ }
44185+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
44186+
44187+ /* program the updated filter list */
44188+ ret = i40e_sync_vsi_filters(vsi);
44189+ if (ret)
44190+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
44191+ vf->vf_id, ret);
44192+
44193+error_param:
44194+ /* send the response to the VF */
44195+ return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
44196+ ret, NULL, 0, is_quiet);
44197+}
44198+
44199+/**
44200+ * i40e_vc_del_mac_addr_msg
44201+ * @vf: pointer to the VF info
44202+ * @msg: pointer to the msg buffer
44203+ *
44204+ * remove guest mac address filter
44205+ **/
44206+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
44207+{
44208+ struct virtchnl_ether_addr_list *al =
44209+ (struct virtchnl_ether_addr_list *)msg;
44210+ struct i40e_pf *pf = vf->pf;
44211+ struct i40e_vsi *vsi = NULL;
44212+ i40e_status ret = 0;
44213+ int i;
44214+
44215+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
44216+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
44217+ ret = I40E_ERR_PARAM;
44218+ goto error_param;
44219+ }
44220+
44221+ for (i = 0; i < al->num_elements; i++) {
44222+ if (is_broadcast_ether_addr(al->list[i].addr) ||
44223+ is_zero_ether_addr(al->list[i].addr)) {
44224+ dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
44225+ al->list[i].addr, vf->vf_id);
44226+ ret = I40E_ERR_INVALID_MAC_ADDR;
44227+ goto error_param;
44228+ }
44229+ }
44230+ vsi = pf->vsi[vf->lan_vsi_idx];
44231+
44232+ spin_lock_bh(&vsi->mac_filter_hash_lock);
44233+ /* delete addresses from the list */
44234+ for (i = 0; i < al->num_elements; i++)
44235+ if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
44236+ ret = I40E_ERR_INVALID_MAC_ADDR;
44237+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
44238+ goto error_param;
44239+ }
44240+
44241+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
44242+
44243+ /* program the updated filter list */
44244+ ret = i40e_sync_vsi_filters(vsi);
44245+ if (ret)
44246+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
44247+ vf->vf_id, ret);
44248+
44249+error_param:
44250+ /* send the response to the VF */
44251+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
44252+ ret);
44253+}
44254+
44255+/**
44256+ * i40e_vc_add_vlan_msg
44257+ * @vf: pointer to the VF info
44258+ * @msg: pointer to the msg buffer
44259+ *
44260+ * program guest vlan id
44261+ **/
44262+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
44263+{
44264+ struct virtchnl_vlan_filter_list *vfl =
44265+ (struct virtchnl_vlan_filter_list *)msg;
44266+ struct i40e_pf *pf = vf->pf;
44267+ struct i40e_vsi *vsi = NULL;
44268+ i40e_status aq_ret = 0;
44269+ int i;
44270+
44271+ if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
44272+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
44273+ dev_err(&pf->pdev->dev,
44274+ "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
44275+ if (pf->vf_base_mode_only)
44276+ dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d vlans\n",
44277+ vf->vf_id, I40E_VC_MAX_VLAN_PER_VF);
44278+ goto error_param;
44279+ }
44280+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
44281+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
44282+ aq_ret = I40E_ERR_PARAM;
44283+ goto error_param;
44284+ }
44285+
44286+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
44287+ bitmap_weight(vf->trunk_vlans, VLAN_N_VID))
44288+ /* Silently fail the request if the VF is untrusted and trunk
44289+ * VLANs are configured.
44290+ */
44291+ goto error_param;
44292+
44293+ for (i = 0; i < vfl->num_elements; i++) {
44294+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
44295+ aq_ret = I40E_ERR_PARAM;
44296+ dev_err(&pf->pdev->dev,
44297+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
44298+ goto error_param;
44299+ }
44300+ }
44301+ vsi = pf->vsi[vf->lan_vsi_idx];
44302+ if (vsi->info.pvid) {
44303+ aq_ret = I40E_ERR_PARAM;
44304+ goto error_param;
44305+ }
44306+
44307+ i40e_vlan_stripping_enable(vsi);
44308+ for (i = 0; i < vfl->num_elements; i++) {
44309+ /* add new VLAN filter */
44310+ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
44311+ if (!ret)
44312+ vf->num_vlan++;
44313+
44314+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
44315+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
44316+ true,
44317+ vfl->vlan_id[i],
44318+ NULL);
44319+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
44320+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
44321+ true,
44322+ vfl->vlan_id[i],
44323+ NULL);
44324+
44325+ if (ret)
44326+ dev_err(&pf->pdev->dev,
44327+ "Unable to add VLAN filter %d for VF %d, error %d\n",
44328+ vfl->vlan_id[i], vf->vf_id, ret);
44329+ }
44330+
44331+error_param:
44332+ /* send the response to the VF */
44333+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
44334+}
44335+
44336+/**
44337+ * i40e_vc_remove_vlan_msg
44338+ * @vf: pointer to the VF info
44339+ * @msg: pointer to the msg buffer
44340+ *
44341+ * remove programmed guest vlan id
44342+ **/
44343+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
44344+{
44345+ struct virtchnl_vlan_filter_list *vfl =
44346+ (struct virtchnl_vlan_filter_list *)msg;
44347+ struct i40e_pf *pf = vf->pf;
44348+ struct i40e_vsi *vsi = NULL;
44349+ i40e_status aq_ret = 0;
44350+ int i;
44351+
44352+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
44353+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
44354+ aq_ret = I40E_ERR_PARAM;
44355+ goto error_param;
44356+ }
44357+
44358+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
44359+ bitmap_weight(vf->trunk_vlans, VLAN_N_VID))
44360+ /* Silently fail the request if the VF is untrusted and trunk
44361+ * VLANs are configured.
44362+ */
44363+ goto error_param;
44364+
44365+ for (i = 0; i < vfl->num_elements; i++) {
44366+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
44367+ aq_ret = I40E_ERR_PARAM;
44368+ goto error_param;
44369+ }
44370+ }
44371+
44372+ vsi = pf->vsi[vf->lan_vsi_idx];
44373+ if (vsi->info.pvid) {
44374+ if (vfl->num_elements > 1 || vfl->vlan_id[0])
44375+ aq_ret = I40E_ERR_PARAM;
44376+ goto error_param;
44377+ }
44378+
44379+ for (i = 0; i < vfl->num_elements; i++) {
44380+ i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
44381+ vf->num_vlan--;
44382+
44383+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
44384+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
44385+ false,
44386+ vfl->vlan_id[i],
44387+ NULL);
44388+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
44389+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
44390+ false,
44391+ vfl->vlan_id[i],
44392+ NULL);
44393+ }
44394+
44395+error_param:
44396+ /* send the response to the VF */
44397+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
44398+}
44399+
44400+/**
44401+ * i40e_vc_config_rss_key
44402+ * @vf: pointer to the VF info
44403+ * @msg: pointer to the msg buffer
44404+ *
44405+ * Configure the VF's RSS key
44406+ **/
44407+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
44408+{
44409+ struct virtchnl_rss_key *vrk =
44410+ (struct virtchnl_rss_key *)msg;
44411+ struct i40e_pf *pf = vf->pf;
44412+ struct i40e_vsi *vsi = NULL;
44413+ i40e_status aq_ret = 0;
44414+
44415+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
44416+ !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
44417+ (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
44418+ aq_ret = I40E_ERR_PARAM;
44419+ goto err;
44420+ }
44421+
44422+ vsi = pf->vsi[vf->lan_vsi_idx];
44423+ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
44424+err:
44425+ /* send the response to the VF */
44426+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
44427+ aq_ret);
44428+}
44429+
44430+/**
44431+ * i40e_vc_config_rss_lut
44432+ * @vf: pointer to the VF info
44433+ * @msg: pointer to the msg buffer
44434+ *
44435+ * Configure the VF's RSS LUT
44436+ **/
44437+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
44438+{
44439+ struct virtchnl_rss_lut *vrl =
44440+ (struct virtchnl_rss_lut *)msg;
44441+ struct i40e_pf *pf = vf->pf;
44442+ struct i40e_vsi *vsi = NULL;
44443+ i40e_status aq_ret = 0;
44444+ u16 i;
44445+
44446+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
44447+ !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
44448+ (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
44449+ aq_ret = I40E_ERR_PARAM;
44450+ goto err;
44451+ }
44452+
44453+ for (i = 0; i < vrl->lut_entries; i++)
44454+ if (vrl->lut[i] >= vf->num_queue_pairs) {
44455+ aq_ret = I40E_ERR_PARAM;
44456+ goto err;
44457+ }
44458+
44459+ vsi = pf->vsi[vf->lan_vsi_idx];
44460+ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
44461+ /* send the response to the VF */
44462+err:
44463+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
44464+ aq_ret);
44465+}
44466+
44467+/**
44468+ * i40e_vc_get_rss_hena
44469+ * @vf: pointer to the VF info
44470+ * @msg: pointer to the msg buffer
44471+ *
44472+ * Return the RSS HENA bits allowed by the hardware
44473+ **/
44474+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
44475+{
44476+ struct virtchnl_rss_hena *vrh = NULL;
44477+ struct i40e_pf *pf = vf->pf;
44478+ i40e_status aq_ret = 0;
44479+ int len = 0;
44480+
44481+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44482+ aq_ret = I40E_ERR_PARAM;
44483+ goto err;
44484+ }
44485+ len = sizeof(struct virtchnl_rss_hena);
44486+
44487+ vrh = kzalloc(len, GFP_KERNEL);
44488+ if (!vrh) {
44489+ aq_ret = I40E_ERR_NO_MEMORY;
44490+ len = 0;
44491+ goto err;
44492+ }
44493+ vrh->hena = i40e_pf_get_default_rss_hena(pf);
44494+err:
44495+ /* send the response back to the VF */
44496+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
44497+ aq_ret, (u8 *)vrh, len);
44498+ kfree(vrh);
44499+ return aq_ret;
44500+}
44501+
44502+/**
44503+ * i40e_vc_set_rss_hena
44504+ * @vf: pointer to the VF info
44505+ * @msg: pointer to the msg buffer
44506+ *
44507+ * Set the RSS HENA bits for the VF
44508+ **/
44509+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
44510+{
44511+ struct virtchnl_rss_hena *vrh =
44512+ (struct virtchnl_rss_hena *)msg;
44513+ struct i40e_pf *pf = vf->pf;
44514+ struct i40e_hw *hw = &pf->hw;
44515+ i40e_status aq_ret = 0;
44516+
44517+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44518+ aq_ret = I40E_ERR_PARAM;
44519+ goto err;
44520+ }
44521+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
44522+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
44523+ (u32)(vrh->hena >> 32));
44524+
44525+ /* send the response to the VF */
44526+err:
44527+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
44528+}
44529+
44530+/**
44531+ * i40e_vc_enable_vlan_stripping
44532+ * @vf: pointer to the VF info
44533+ * @msg: pointer to the msg buffer
44534+ *
44535+ * Enable vlan header stripping for the VF
44536+ **/
44537+static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
44538+{
44539+ i40e_status aq_ret = 0;
44540+ struct i40e_vsi *vsi;
44541+
44542+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44543+ aq_ret = I40E_ERR_PARAM;
44544+ goto err;
44545+ }
44546+
44547+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
44548+ i40e_vlan_stripping_enable(vsi);
44549+
44550+ /* send the response to the VF */
44551+err:
44552+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
44553+ aq_ret);
44554+}
44555+
44556+/**
44557+ * i40e_vc_disable_vlan_stripping
44558+ * @vf: pointer to the VF info
44559+ * @msg: pointer to the msg buffer
44560+ *
44561+ * Disable vlan header stripping for the VF
44562+ **/
44563+static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
44564+{
44565+ i40e_status aq_ret = 0;
44566+ struct i40e_vsi *vsi;
44567+
44568+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44569+ aq_ret = I40E_ERR_PARAM;
44570+ goto err;
44571+ }
44572+
44573+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
44574+ i40e_vlan_stripping_disable(vsi);
44575+
44576+ /* send the response to the VF */
44577+err:
44578+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
44579+ aq_ret);
44580+}
44581+
44582+#ifdef __TC_MQPRIO_MODE_MAX
44583+/**
44584+ * i40e_validate_cloud_filter
44585+ * @vf: pointer to the VF info
44586+ * @tc_filter: pointer to virtchnl_filter
44587+ *
44588+ * This function validates cloud filter programmed as TC filter for ADq
44589+ **/
44590+static int i40e_validate_cloud_filter(struct i40e_vf *vf,
44591+ struct virtchnl_filter *tc_filter)
44592+{
44593+ struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
44594+ struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
44595+ struct i40e_pf *pf = vf->pf;
44596+ struct i40e_vsi *vsi = NULL;
44597+ struct i40e_mac_filter *f;
44598+ struct hlist_node *h;
44599+ bool found = false;
44600+ int bkt;
44601+
44602+ if (!tc_filter->action) {
44603+ dev_info(&pf->pdev->dev,
44604+ "VF %d: Currently ADq doesn't support Drop Action\n",
44605+ vf->vf_id);
44606+ goto err;
44607+ }
44608+
44609+ /* action_meta is TC number here to which the filter is applied */
44610+ if (!tc_filter->action_meta ||
44611+ tc_filter->action_meta > I40E_MAX_VF_VSI) {
44612+ dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
44613+ vf->vf_id, tc_filter->action_meta);
44614+ goto err;
44615+ }
44616+
44617+ /* Check filter if it's programmed for advanced mode or basic mode.
44618+ * There are two ADq modes (for VF only),
44619+ * 1. Basic mode: intended to allow as many filter options as possible
44620+ * to be added to a VF in Non-trusted mode. Main goal is
44621+ * to add filters to its own MAC and VLAN id.
44622+ * 2. Advanced mode: is for allowing filters to be applied other than
44623+ * its own MAC or VLAN. This mode requires the VF to be
44624+ * Trusted.
44625+ */
44626+ if (mask.dst_mac[0] && !mask.dst_ip[0]) {
44627+ vsi = pf->vsi[vf->lan_vsi_idx];
44628+ f = i40e_find_mac(vsi, data.dst_mac);
44629+
44630+ if (!f) {
44631+ dev_info(&pf->pdev->dev,
44632+ "Destination MAC %pM doesn't belong to VF %d\n",
44633+ data.dst_mac, vf->vf_id);
44634+ goto err;
44635+ }
44636+
44637+ if (mask.vlan_id) {
44638+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
44639+ hlist) {
44640+ if (f->vlan == ntohs(data.vlan_id)) {
44641+ found = true;
44642+ break;
44643+ }
44644+ }
44645+ if (!found) {
44646+ dev_info(&pf->pdev->dev,
44647+ "VF %d doesn't have any VLAN id %u\n",
44648+ vf->vf_id, ntohs(data.vlan_id));
44649+ goto err;
44650+ }
44651+ }
44652+ } else {
44653+ /* Check if VF is trusted */
44654+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
44655+ dev_err(&pf->pdev->dev,
44656+ "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
44657+ vf->vf_id);
44658+ return I40E_ERR_CONFIG;
44659+ }
44660+ }
44661+
44662+ if (mask.dst_mac[0] & data.dst_mac[0]) {
44663+ if (is_broadcast_ether_addr(data.dst_mac) ||
44664+ is_zero_ether_addr(data.dst_mac)) {
44665+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
44666+ vf->vf_id, data.dst_mac);
44667+ goto err;
44668+ }
44669+ }
44670+
44671+ if (mask.src_mac[0] & data.src_mac[0]) {
44672+ if (is_broadcast_ether_addr(data.src_mac) ||
44673+ is_zero_ether_addr(data.src_mac)) {
44674+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
44675+ vf->vf_id, data.src_mac);
44676+ goto err;
44677+ }
44678+ }
44679+
44680+ if (mask.dst_port & data.dst_port) {
44681+ if (!data.dst_port) {
44682+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
44683+ vf->vf_id);
44684+ goto err;
44685+ }
44686+ }
44687+
44688+ if (mask.src_port & data.src_port) {
44689+ if (!data.src_port) {
44690+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
44691+ vf->vf_id);
44692+ goto err;
44693+ }
44694+ }
44695+
44696+ if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
44697+ tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
44698+ dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
44699+ vf->vf_id);
44700+ goto err;
44701+ }
44702+
44703+ if (mask.vlan_id & data.vlan_id) {
44704+ if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
44705+ dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
44706+ vf->vf_id);
44707+ goto err;
44708+ }
44709+ }
44710+
44711+ return I40E_SUCCESS;
44712+err:
44713+ return I40E_ERR_CONFIG;
44714+}
44715+
44716+/**
44717+ * i40e_find_vf_vsi_from_seid - searches for the vsi with the given seid
44718+ * @vf: pointer to the VF info
44719+ * @seid: seid of the vsi it is searching for
44720+ **/
44721+static struct i40e_vsi *i40e_find_vf_vsi_from_seid(struct i40e_vf *vf, u16 seid)
44722+{
44723+ struct i40e_pf *pf = vf->pf;
44724+ struct i40e_vsi *vsi = NULL;
44725+ int i;
44726+
44727+ for (i = 0; i < vf->num_tc ; i++) {
44728+ vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
44729+ if (vsi && vsi->seid == seid)
44730+ return vsi;
44731+ }
44732+ return NULL;
44733+}
44734+
44735+/**
44736+ * i40e_del_all_cloud_filters
44737+ * @vf: pointer to the VF info
44738+ *
44739+ * This function deletes all cloud filters
44740+ **/
44741+static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
44742+{
44743+ struct i40e_cloud_filter *cfilter = NULL;
44744+ struct i40e_pf *pf = vf->pf;
44745+ struct i40e_vsi *vsi = NULL;
44746+ struct hlist_node *node;
44747+ int ret;
44748+
44749+ hlist_for_each_entry_safe(cfilter, node,
44750+ &vf->cloud_filter_list, cloud_node) {
44751+ vsi = i40e_find_vf_vsi_from_seid(vf, cfilter->seid);
44752+
44753+ if (!vsi) {
44754+ dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
44755+ vf->vf_id, cfilter->seid);
44756+ continue;
44757+ }
44758+
44759+ if (cfilter->dst_port)
44760+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
44761+ false);
44762+ else
44763+ ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
44764+ if (ret)
44765+ dev_err(&pf->pdev->dev,
44766+ "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
44767+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
44768+ i40e_aq_str(&pf->hw,
44769+ pf->hw.aq.asq_last_status));
44770+
44771+ hlist_del(&cfilter->cloud_node);
44772+ kfree(cfilter);
44773+ vf->num_cloud_filters--;
44774+ }
44775+}
44776+
44777+/**
44778+ * i40e_vc_del_cloud_filter
44779+ * @vf: pointer to the VF info
44780+ * @msg: pointer to the msg buffer
44781+ *
44782+ * This function deletes a cloud filter programmed as TC filter for ADq
44783+ **/
44784+static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
44785+{
44786+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
44787+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
44788+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
44789+ struct i40e_cloud_filter cfilter, *cf = NULL;
44790+ struct i40e_pf *pf = vf->pf;
44791+ struct i40e_vsi *vsi = NULL;
44792+ struct hlist_node *node;
44793+ i40e_status aq_ret = 0;
44794+ int i, ret;
44795+
44796+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44797+ aq_ret = I40E_ERR_PARAM;
44798+ goto err;
44799+ }
44800+
44801+ if (!vf->adq_enabled) {
44802+ dev_info(&pf->pdev->dev,
44803+ "VF %d: ADq not enabled, can't apply cloud filter\n",
44804+ vf->vf_id);
44805+ aq_ret = I40E_ERR_PARAM;
44806+ goto err;
44807+ }
44808+
44809+ if (i40e_validate_cloud_filter(vf, vcf)) {
44810+ dev_info(&pf->pdev->dev,
44811+ "VF %d: Invalid input, can't apply cloud filter\n",
44812+ vf->vf_id);
44813+ aq_ret = I40E_ERR_PARAM;
44814+ goto err;
44815+ }
44816+
44817+ memset(&cfilter, 0, sizeof(cfilter));
44818+ /* parse destination mac address */
44819+ for (i = 0; i < ETH_ALEN; i++)
44820+ cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
44821+
44822+ /* parse source mac address */
44823+ for (i = 0; i < ETH_ALEN; i++)
44824+ cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
44825+
44826+ cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
44827+ cfilter.dst_port = mask.dst_port & tcf.dst_port;
44828+ cfilter.src_port = mask.src_port & tcf.src_port;
44829+
44830+ switch (vcf->flow_type) {
44831+ case VIRTCHNL_TCP_V4_FLOW:
44832+ cfilter.n_proto = ETH_P_IP;
44833+ if (mask.dst_ip[0] & tcf.dst_ip[0])
44834+ memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
44835+ ARRAY_SIZE(tcf.dst_ip));
44836+ else if (mask.src_ip[0] & tcf.dst_ip[0])
44837+ memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
44838+ ARRAY_SIZE(tcf.dst_ip));
44839+ break;
44840+ case VIRTCHNL_TCP_V6_FLOW:
44841+ cfilter.n_proto = ETH_P_IPV6;
44842+ if (mask.dst_ip[3] & tcf.dst_ip[3])
44843+ memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
44844+ sizeof(cfilter.ip.v6.dst_ip6));
44845+ if (mask.src_ip[3] & tcf.src_ip[3])
44846+ memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
44847+ sizeof(cfilter.ip.v6.src_ip6));
44848+ break;
44849+ default:
44850+ /* TC filter can be configured based on different combinations
44851+ * and in this case IP is not a part of filter config
44852+ */
44853+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
44854+ vf->vf_id);
44855+ }
44856+
44857+ /* get the vsi to which the tc belongs to */
44858+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
44859+ cfilter.seid = vsi->seid;
44860+ cfilter.flags = vcf->field_flags;
44861+
44862+ /* Deleting TC filter */
44863+ if (tcf.dst_port)
44864+ ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
44865+ else
44866+ ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
44867+ if (ret) {
44868+ dev_err(&pf->pdev->dev,
44869+ "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
44870+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
44871+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
44872+ goto err;
44873+ }
44874+
44875+ hlist_for_each_entry_safe(cf, node,
44876+ &vf->cloud_filter_list, cloud_node) {
44877+ if (cf->seid != cfilter.seid)
44878+ continue;
44879+ if (mask.dst_port)
44880+ if (cfilter.dst_port != cf->dst_port)
44881+ continue;
44882+ if (mask.dst_mac[0])
44883+ if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
44884+ continue;
44885+ /* for ipv4 data to be valid, only first byte of mask is set */
44886+ if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
44887+ if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
44888+ ARRAY_SIZE(tcf.dst_ip)))
44889+ continue;
44890+ /* for ipv6, mask is set for all sixteen bytes (4 words) */
44891+ if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
44892+ if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
44893+ sizeof(cfilter.ip.v6.src_ip6)))
44894+ continue;
44895+ if (mask.vlan_id)
44896+ if (cfilter.vlan_id != cf->vlan_id)
44897+ continue;
44898+
44899+ hlist_del(&cf->cloud_node);
44900+ kfree(cf);
44901+ vf->num_cloud_filters--;
44902+ }
44903+
44904+err:
44905+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
44906+ aq_ret);
44907+}
44908+
44909+/**
44910+ * i40e_vc_add_cloud_filter
44911+ * @vf: pointer to the VF info
44912+ * @msg: pointer to the msg buffer
44913+ *
44914+ * This function adds a cloud filter programmed as TC filter for ADq
44915+ **/
44916+static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
44917+{
44918+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
44919+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
44920+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
44921+ struct i40e_cloud_filter *cfilter = NULL;
44922+ struct i40e_pf *pf = vf->pf;
44923+ struct i40e_vsi *vsi = NULL;
44924+ i40e_status aq_ret = 0;
44925+ char err_msg_buf[100];
44926+ bool is_quiet = false;
44927+ u16 err_msglen = 0;
44928+ u8 *err_msg = NULL;
44929+ int i, ret;
44930+
44931+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
44932+ aq_ret = I40E_ERR_PARAM;
44933+ goto err_out;
44934+ }
44935+
44936+ if (!vf->adq_enabled) {
44937+ dev_info(&pf->pdev->dev,
44938+ "VF %d: ADq is not enabled, can't apply cloud filter\n",
44939+ vf->vf_id);
44940+ aq_ret = I40E_ERR_PARAM;
44941+ goto err_out;
44942+ }
44943+
44944+ if (pf->fdir_pf_active_filters ||
44945+ (!hlist_empty(&pf->fdir_filter_list))) {
44946+ aq_ret = I40E_ERR_PARAM;
44947+ err_msglen = strlcpy(err_msg_buf,
44948+ "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters",
44949+ sizeof(err_msg_buf));
44950+ err_msg = err_msg_buf;
44951+ is_quiet = true;
44952+ goto err_out;
44953+ }
44954+
44955+ if (i40e_validate_cloud_filter(vf, vcf)) {
44956+ dev_info(&pf->pdev->dev,
44957+ "VF %d: Invalid input/s, can't apply cloud filter\n",
44958+ vf->vf_id);
44959+ aq_ret = I40E_ERR_PARAM;
44960+ goto err_out;
44961+ }
44962+
44963+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
44964+ if (!cfilter)
44965+ return -ENOMEM;
44966+
44967+ /* parse destination mac address */
44968+ for (i = 0; i < ETH_ALEN; i++)
44969+ cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
44970+
44971+ /* parse source mac address */
44972+ for (i = 0; i < ETH_ALEN; i++)
44973+ cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
44974+
44975+ cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
44976+ cfilter->dst_port = mask.dst_port & tcf.dst_port;
44977+ cfilter->src_port = mask.src_port & tcf.src_port;
44978+
44979+ switch (vcf->flow_type) {
44980+ case VIRTCHNL_TCP_V4_FLOW:
44981+ cfilter->n_proto = ETH_P_IP;
44982+ if (mask.dst_ip[0] & tcf.dst_ip[0])
44983+ memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
44984+ ARRAY_SIZE(tcf.dst_ip));
44985+ else if (mask.src_ip[0] & tcf.dst_ip[0])
44986+ memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
44987+ ARRAY_SIZE(tcf.dst_ip));
44988+ break;
44989+ case VIRTCHNL_TCP_V6_FLOW:
44990+ cfilter->n_proto = ETH_P_IPV6;
44991+ if (mask.dst_ip[3] & tcf.dst_ip[3])
44992+ memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
44993+ sizeof(cfilter->ip.v6.dst_ip6));
44994+ if (mask.src_ip[3] & tcf.src_ip[3])
44995+ memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
44996+ sizeof(cfilter->ip.v6.src_ip6));
44997+ break;
44998+ default:
44999+ /* TC filter can be configured based on different combinations
45000+ * and in this case IP is not a part of filter config
45001+ */
45002+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
45003+ vf->vf_id);
45004+ }
45005+
45006+ /* get the VSI to which the TC belongs to */
45007+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
45008+ cfilter->seid = vsi->seid;
45009+ cfilter->flags = vcf->field_flags;
45010+
45011+ /* Adding cloud filter programmed as TC filter */
45012+ if (tcf.dst_port)
45013+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
45014+ else
45015+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
45016+ if (ret) {
45017+ dev_err(&pf->pdev->dev,
45018+ "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
45019+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
45020+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
45021+ goto err_free;
45022+ }
45023+
45024+ INIT_HLIST_NODE(&cfilter->cloud_node);
45025+ hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
45026+ /* release the pointer passing it to the collection */
45027+ cfilter = NULL;
45028+ vf->num_cloud_filters++;
45029+err_free:
45030+ kfree(cfilter);
45031+err_out:
45032+ return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
45033+ aq_ret, err_msg, err_msglen,
45034+ is_quiet);
45035+}
45036+
45037+/**
45038+ * i40e_vc_add_qch_msg: Add queue channel and enable ADq
45039+ * @vf: pointer to the VF info
45040+ * @msg: pointer to the msg buffer
45041+ **/
45042+static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
45043+{
45044+ struct virtchnl_tc_info *tci = (struct virtchnl_tc_info *)msg;
45045+ struct i40e_pf *pf = vf->pf;
45046+ struct i40e_link_status *ls;
45047+ int i, adq_request_qps = 0;
45048+ i40e_status aq_ret = 0;
45049+ u32 speed;
45050+
45051+ ls = &pf->hw.phy.link_info;
45052+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
45053+ aq_ret = I40E_ERR_PARAM;
45054+ goto err;
45055+ }
45056+
45057+ /* ADq cannot be applied if spoof check is ON */
45058+ if (vf->mac_anti_spoof) {
45059+ dev_err(&pf->pdev->dev,
45060+ "Spoof check is ON, turn OFF both MAC and VLAN anti spoof to enable ADq\n");
45061+ aq_ret = I40E_ERR_PARAM;
45062+ goto err;
45063+ }
45064+
45065+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
45066+ dev_err(&pf->pdev->dev,
45067+ "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
45068+ vf->vf_id);
45069+ aq_ret = I40E_ERR_PARAM;
45070+ goto err;
45071+ }
45072+
45073+ /* max number of traffic classes for VF currently capped at 4 */
45074+ if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
45075+ dev_err(&pf->pdev->dev,
45076+ "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
45077+ vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
45078+ aq_ret = I40E_ERR_PARAM;
45079+ goto err;
45080+ }
45081+
45082+ /* validate queues for each TC */
45083+ for (i = 0; i < tci->num_tc; i++)
45084+ if (!tci->list[i].count ||
45085+ tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
45086+ dev_err(&pf->pdev->dev,
45087+ "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
45088+ vf->vf_id, i, tci->list[i].count,
45089+ I40E_DEFAULT_QUEUES_PER_VF);
45090+ aq_ret = I40E_ERR_PARAM;
45091+ goto err;
45092+ }
45093+
45094+ /* need Max VF queues but already have default number of queues */
45095+ adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
45096+
45097+ if (pf->queues_left < adq_request_qps) {
45098+ dev_err(&pf->pdev->dev,
45099+ "No queues left to allocate to VF %d\n",
45100+ vf->vf_id);
45101+ aq_ret = I40E_ERR_PARAM;
45102+ goto err;
45103+ } else {
45104+ /* we need to allocate max VF queues to enable ADq so as to
45105+ * make sure ADq enabled VF always gets back queues when it
45106+ * goes through a reset.
45107+ */
45108+ vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
45109+ }
45110+
45111+ /* get link speed in MB to validate rate limit */
45112+ speed = i40e_vc_link_speed2mbps(ls->link_speed);
45113+ if (speed == SPEED_UNKNOWN) {
45114+ dev_err(&pf->pdev->dev, "Cannot detect link speed\n");
45115+ aq_ret = I40E_ERR_PARAM;
45116+ goto err;
45117+ }
45118+
45119+ /* parse data from the queue channel info */
45120+ vf->num_tc = tci->num_tc;
45121+ for (i = 0; i < vf->num_tc; i++) {
45122+ if (tci->list[i].max_tx_rate) {
45123+ if (tci->list[i].max_tx_rate > speed) {
45124+ dev_err(&pf->pdev->dev,
45125+ "Invalid max tx rate %llu specified for VF %d.",
45126+ tci->list[i].max_tx_rate,
45127+ vf->vf_id);
45128+ aq_ret = I40E_ERR_PARAM;
45129+ goto err;
45130+ } else {
45131+ vf->ch[i].max_tx_rate =
45132+ tci->list[i].max_tx_rate;
45133+ }
45134+ }
45135+ vf->ch[i].num_qps = tci->list[i].count;
45136+ }
45137+
45138+ /* set this flag only after making sure all inputs are sane */
45139+ vf->adq_enabled = true;
45140+ /* num_req_queues is set when user changes number of queues via ethtool
45141+ * and this causes issue for default VSI(which depends on this variable)
45142+ * when ADq is enabled, hence reset it.
45143+ */
45144+ vf->num_req_queues = 0;
45145+
45146+ /* reset the VF in order to allocate resources */
45147+ i40e_vc_reset_vf(vf, true);
45148+
45149+ return I40E_SUCCESS;
45150+
45151+ /* send the response to the VF */
45152+err:
45153+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
45154+ aq_ret);
45155+}
45156+
45157+/**
45158+ * i40e_vc_del_qch_msg
45159+ * @vf: pointer to the VF info
45160+ * @msg: pointer to the msg buffer
45161+ **/
45162+static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
45163+{
45164+ struct i40e_pf *pf = vf->pf;
45165+ i40e_status aq_ret = 0;
45166+
45167+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
45168+ aq_ret = I40E_ERR_PARAM;
45169+ goto err;
45170+ }
45171+
45172+ if (vf->adq_enabled) {
45173+ i40e_del_all_cloud_filters(vf);
45174+ i40e_del_qch(vf);
45175+ vf->adq_enabled = false;
45176+ vf->num_tc = 0;
45177+ dev_info(&pf->pdev->dev,
45178+ "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
45179+ vf->vf_id);
45180+ } else {
45181+ dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
45182+ vf->vf_id);
45183+ aq_ret = I40E_ERR_PARAM;
45184+ }
45185+
45186+ /* reset the VF in order to allocate resources */
45187+ i40e_vc_reset_vf(vf, true);
45188+
45189+ return I40E_SUCCESS;
45190+
45191+err:
45192+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
45193+ aq_ret);
45194+}
45195+
45196+#endif /* __TC_MQPRIO_MODE_MAX */
45197+
45198+/**
45199+ * i40e_vc_process_vf_msg
45200+ * @pf: pointer to the PF structure
45201+ * @vf_id: source VF id
45202+ * @v_opcode: operation code
45203+ * @v_retval: unused return value code
45204+ * @msg: pointer to the msg buffer
45205+ * @msglen: msg length
45206+ *
45207+ * called from the common aeq/arq handler to
45208+ * process request from VF
45209+ **/
45210+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
45211+ u32 __always_unused v_retval, u8 *msg, u16 msglen)
45212+{
45213+ struct i40e_hw *hw = &pf->hw;
45214+ int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
45215+ struct i40e_vf *vf;
45216+ int ret;
45217+
45218+ pf->vf_aq_requests++;
45219+ if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
45220+ return -EINVAL;
45221+ vf = &(pf->vf[local_vf_id]);
45222+
45223+ /* Check if VF is disabled. */
45224+ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
45225+ return I40E_ERR_PARAM;
45226+
45227+ /* perform basic checks on the msg */
45228+ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
45229+
45230+ if (ret) {
45231+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
45232+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
45233+ local_vf_id, v_opcode, msglen);
45234+ switch (ret) {
45235+ case VIRTCHNL_STATUS_ERR_PARAM:
45236+ return -EPERM;
45237+ default:
45238+ return -EINVAL;
45239+ }
45240+ }
45241+
45242+ switch (v_opcode) {
45243+ case VIRTCHNL_OP_VERSION:
45244+ ret = i40e_vc_get_version_msg(vf, msg);
45245+ break;
45246+ case VIRTCHNL_OP_GET_VF_RESOURCES:
45247+ ret = i40e_vc_get_vf_resources_msg(vf, msg);
45248+ i40e_vc_notify_vf_link_state(vf);
45249+ break;
45250+ case VIRTCHNL_OP_RESET_VF:
45251+ i40e_vc_reset_vf(vf, false);
45252+ if (!test_bit(__I40E_VFS_RELEASING, pf->state))
45253+ clear_bit(I40E_VF_STATE_LOADED_VF_DRIVER,
45254+ &vf->vf_states);
45255+ ret = 0;
45256+ break;
45257+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
45258+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
45259+ break;
45260+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
45261+ ret = i40e_vc_config_queues_msg(vf, msg);
45262+ break;
45263+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
45264+ ret = i40e_vc_config_irq_map_msg(vf, msg);
45265+ break;
45266+ case VIRTCHNL_OP_ENABLE_QUEUES:
45267+ ret = i40e_vc_enable_queues_msg(vf, msg);
45268+ i40e_vc_notify_vf_link_state(vf);
45269+ break;
45270+ case VIRTCHNL_OP_DISABLE_QUEUES:
45271+ ret = i40e_vc_disable_queues_msg(vf, msg);
45272+ break;
45273+ case VIRTCHNL_OP_ADD_ETH_ADDR:
45274+ ret = i40e_vc_add_mac_addr_msg(vf, msg);
45275+ break;
45276+ case VIRTCHNL_OP_DEL_ETH_ADDR:
45277+ ret = i40e_vc_del_mac_addr_msg(vf, msg);
45278+ break;
45279+ case VIRTCHNL_OP_ADD_VLAN:
45280+ ret = i40e_vc_add_vlan_msg(vf, msg);
45281+ break;
45282+ case VIRTCHNL_OP_DEL_VLAN:
45283+ ret = i40e_vc_remove_vlan_msg(vf, msg);
45284+ break;
45285+ case VIRTCHNL_OP_GET_STATS:
45286+ ret = i40e_vc_get_stats_msg(vf, msg);
45287+ break;
45288+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
45289+ ret = i40e_vc_config_rss_key(vf, msg);
45290+ break;
45291+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
45292+ ret = i40e_vc_config_rss_lut(vf, msg);
45293+ break;
45294+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
45295+ ret = i40e_vc_get_rss_hena(vf, msg);
45296+ break;
45297+ case VIRTCHNL_OP_SET_RSS_HENA:
45298+ ret = i40e_vc_set_rss_hena(vf, msg);
45299+ break;
45300+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
45301+ ret = i40e_vc_enable_vlan_stripping(vf, msg);
45302+ break;
45303+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
45304+ ret = i40e_vc_disable_vlan_stripping(vf, msg);
45305+ break;
45306+ case VIRTCHNL_OP_REQUEST_QUEUES:
45307+ ret = i40e_vc_request_queues_msg(vf, msg);
45308+ break;
45309+#ifdef __TC_MQPRIO_MODE_MAX
45310+ case VIRTCHNL_OP_ENABLE_CHANNELS:
45311+ ret = i40e_vc_add_qch_msg(vf, msg);
45312+ break;
45313+ case VIRTCHNL_OP_DISABLE_CHANNELS:
45314+ ret = i40e_vc_del_qch_msg(vf, msg);
45315+ break;
45316+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
45317+ ret = i40e_vc_add_cloud_filter(vf, msg);
45318+ break;
45319+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
45320+ ret = i40e_vc_del_cloud_filter(vf, msg);
45321+ break;
45322+#endif /* __TC_MQPRIO_MODE_MAX */
45323+ case VIRTCHNL_OP_UNKNOWN:
45324+ default:
45325+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
45326+ v_opcode, local_vf_id);
45327+ ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
45328+ I40E_ERR_NOT_IMPLEMENTED);
45329+ break;
45330+ }
45331+
45332+ return ret;
45333+}
45334+
45335+/**
45336+ * i40e_vc_process_vflr_event
45337+ * @pf: pointer to the PF structure
45338+ *
45339+ * called from the vlfr irq handler to
45340+ * free up VF resources and state variables
45341+ **/
45342+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
45343+{
45344+ struct i40e_hw *hw = &pf->hw;
45345+ u32 reg, reg_idx, bit_idx;
45346+ struct i40e_vf *vf;
45347+ int vf_id;
45348+
45349+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
45350+ return 0;
45351+
45352+ /* Re-enable the VFLR interrupt cause here, before looking for which
45353+ * VF got reset. Otherwise, if another VF gets a reset while the
45354+ * first one is being processed, that interrupt will be lost, and
45355+ * that VF will be stuck in reset forever.
45356+ */
45357+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
45358+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
45359+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
45360+ i40e_flush(hw);
45361+
45362+ clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
45363+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
45364+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
45365+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
45366+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
45367+ vf = &pf->vf[vf_id];
45368+ reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
45369+ if (reg & BIT(bit_idx))
45370+ /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
45371+ i40e_reset_vf(vf, true);
45372+ }
45373+
45374+ return 0;
45375+}
45376+
45377+#ifdef IFLA_VF_MAX
45378+
45379+/**
45380+ * i40e_set_vf_mac
45381+ * @vf: the VF
45382+ * @vsi: VF VSI to configure
45383+ * @mac: the mac address
45384+ *
45385+ * This function allows the administrator to set the mac address for the VF
45386+ *
45387+ * Returns 0 on success, negative on failure
45388+ *
45389+ **/
45390+static int i40e_set_vf_mac(struct i40e_vf *vf, struct i40e_vsi *vsi,
45391+ const u8 *mac)
45392+{
45393+ struct i40e_pf *pf = vsi->back;
45394+ struct i40e_mac_filter *f;
45395+ struct hlist_node *h;
45396+ int ret = 0;
45397+ int bkt;
45398+ u8 i;
45399+
45400+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
45401+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
45402+ return -EAGAIN;
45403+ }
45404+
45405+ if (is_multicast_ether_addr(mac)) {
45406+ dev_err(&pf->pdev->dev,
45407+ "Invalid Ethernet address %pM for VF %d\n",
45408+ mac, vf->vf_id);
45409+ ret = -EINVAL;
45410+ goto error_param;
45411+ }
45412+
45413+ /* When the VF is resetting wait until it is done.
45414+ * It can take up to 200 milliseconds,
45415+ * but wait for up to 300 milliseconds to be safe.
45416+ * If the VF is indeed in reset, the vsi pointer has
45417+ * to show on the newly loaded vsi under pf->vsi[id].
45418+ */
45419+ for (i = 0; i < 15; i++) {
45420+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
45421+ if (i > 0)
45422+ vsi = pf->vsi[vf->lan_vsi_idx];
45423+ break;
45424+ }
45425+ msleep(20);
45426+ }
45427+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
45428+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
45429+ vf->vf_id);
45430+ ret = -EAGAIN;
45431+ goto error_param;
45432+ }
45433+
45434+ /* Lock once because below invoked function add/del_filter requires
45435+ * mac_filter_hash_lock to be held
45436+ */
45437+ spin_lock_bh(&vsi->mac_filter_hash_lock);
45438+
45439+ /* delete the temporary mac address */
45440+ if (!is_zero_ether_addr(vf->default_lan_addr.addr))
45441+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
45442+
45443+ /* Delete all the filters for this VSI - we're going to kill it
45444+ * anyway.
45445+ */
45446+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
45447+ __i40e_del_filter(vsi, f);
45448+
45449+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
45450+
45451+ /* program mac filter */
45452+ if (i40e_sync_vsi_filters(vsi)) {
45453+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
45454+ ret = -EIO;
45455+ goto error_param;
45456+ }
45457+ ether_addr_copy(vf->default_lan_addr.addr, mac);
45458+
45459+ if (is_zero_ether_addr(mac)) {
45460+ vf->pf_set_mac = false;
45461+ dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf->vf_id);
45462+ } else {
45463+ vf->pf_set_mac = true;
45464+ dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
45465+ mac, vf->vf_id);
45466+ }
45467+
45468+ /* Force the VF interface down so it has to bring up with new MAC
45469+ * address
45470+ */
45471+ i40e_vc_reset_vf(vf, true);
45472+ dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
45473+error_param:
45474+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
45475+ return ret;
45476+}
45477+
45478+/**
45479+ * i40e_ndo_set_vf_mac
45480+ * @netdev: network interface device structure
45481+ * @vf_id: VF identifier
45482+ * @mac: mac address
45483+ *
45484+ * program VF mac address
45485+ **/
45486+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
45487+{
45488+ struct i40e_netdev_priv *np = netdev_priv(netdev);
45489+ struct i40e_vsi *vsi = np->vsi;
45490+ struct i40e_pf *pf = vsi->back;
45491+ struct i40e_vf *vf;
45492+ int ret = 0;
45493+
45494+ /* validate the request */
45495+ ret = i40e_validate_vf(pf, vf_id);
45496+ if (ret)
45497+ goto error_param;
45498+
45499+ vf = &pf->vf[vf_id];
45500+ vsi = pf->vsi[vf->lan_vsi_idx];
45501+ ret = i40e_set_vf_mac(vf, vsi, mac);
45502+error_param:
45503+ return ret;
45504+}
45505+
45506+/**
45507+ * i40e_vsi_has_vlans - True if VSI has configured VLANs
45508+ * @vsi: pointer to the vsi
45509+ *
45510+ * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
45511+ * we have no configured VLANs. Do not call while holding the
45512+ * mac_filter_hash_lock.
45513+ */
45514+static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
45515+{
45516+ bool have_vlans;
45517+
45518+ /* If we have a port VLAN, then the VSI cannot have any VLANs
45519+ * configured, as all MAC/VLAN filters will be assigned to the PVID.
45520+ */
45521+ if (vsi->info.pvid)
45522+ return false;
45523+
45524+ /* Since we don't have a PVID, we know that if the device is in VLAN
45525+ * mode it must be because of a VLAN filter configured on this VSI.
45526+ */
45527+ spin_lock_bh(&vsi->mac_filter_hash_lock);
45528+ have_vlans = i40e_is_vsi_in_vlan(vsi);
45529+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
45530+
45531+ return have_vlans;
45532+}
45533+
45534+/**
45535+ * i40e_ndo_set_vf_port_vlan
45536+ * @netdev: network interface device structure
45537+ * @vf_id: VF identifier
45538+ * @vlan_id: mac address
45539+ * @qos: priority setting
45540+ * @vlan_proto: vlan protocol
45541+ *
45542+ * program VF vlan id and/or qos
45543+ **/
45544+#ifdef IFLA_VF_VLAN_INFO_MAX
45545+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
45546+ u16 vlan_id, u8 qos, __be16 vlan_proto)
45547+#else
45548+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
45549+ int vf_id, u16 vlan_id, u8 qos)
45550+#endif /* IFLA_VF_VLAN_INFO_MAX */
45551+{
45552+ u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
45553+ struct i40e_netdev_priv *np = netdev_priv(netdev);
45554+ bool allmulti = false, alluni = false;
45555+ struct i40e_pf *pf = np->vsi->back;
45556+ struct i40e_vsi *vsi;
45557+ struct i40e_vf *vf;
45558+ int ret;
45559+
45560+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
45561+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
45562+ return -EAGAIN;
45563+ }
45564+
45565+ /* validate the request */
45566+ ret = i40e_validate_vf(pf, vf_id);
45567+ if (ret)
45568+ goto error_pvid;
45569+
45570+ if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
45571+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
45572+ ret = -EINVAL;
45573+ goto error_pvid;
45574+ }
45575+#ifdef IFLA_VF_VLAN_INFO_MAX
45576+
45577+ if (vlan_proto != htons(ETH_P_8021Q)) {
45578+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
45579+ ret = -EPROTONOSUPPORT;
45580+ goto error_pvid;
45581+ }
45582+#endif
45583+
45584+ vf = &pf->vf[vf_id];
45585+ vsi = pf->vsi[vf->lan_vsi_idx];
45586+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
45587+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
45588+ vf_id);
45589+ ret = -EAGAIN;
45590+ goto error_pvid;
45591+ }
45592+
45593+ if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
45594+#ifdef HAVE_NDO_SET_VF_LINK_STATE
45595+ /* if vlan is being removed then clear trunk_vlan */
45596+ if (!vsi->info.pvid)
45597+ memset(vf->trunk_vlans, 0,
45598+ BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
45599+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
45600+ goto error_pvid;
45601+ }
45602+
45603+ if (i40e_vsi_has_vlans(vsi)) {
45604+ dev_err(&pf->pdev->dev,
45605+ "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
45606+ vf_id);
45607+ /* Administrator Error - knock the VF offline until he does
45608+ * the right thing by reconfiguring his network correctly
45609+ * and then reloading the VF driver.
45610+ */
45611+ i40e_vc_reset_vf(vf, true);
45612+ /* During reset the VF got a new VSI, so refresh the pointer. */
45613+ vsi = pf->vsi[vf->lan_vsi_idx];
45614+ }
45615+
45616+ /* Locked once because multiple functions below iterate list */
45617+ spin_lock_bh(&vsi->mac_filter_hash_lock);
45618+
45619+ /* Check for condition where there was already a port VLAN ID
45620+ * filter set and now it is being deleted by setting it to zero.
45621+ * Additionally check for the condition where there was a port
45622+ * VLAN but now there is a new and different port VLAN being set.
45623+ * Before deleting all the old VLAN filters we must add new ones
45624+ * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
45625+ * MAC addresses deleted.
45626+ */
45627+ if ((!(vlan_id || qos) ||
45628+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
45629+ vsi->info.pvid) {
45630+ ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
45631+ if (ret) {
45632+ dev_info(&vsi->back->pdev->dev,
45633+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
45634+ vsi->back->hw.aq.asq_last_status);
45635+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
45636+ goto error_pvid;
45637+ }
45638+ }
45639+
45640+ if (vsi->info.pvid) {
45641+ /* remove all filters on the old VLAN */
45642+ i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
45643+ VLAN_VID_MASK));
45644+ }
45645+
45646+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
45647+
45648+ /* disable promisc modes in case they were enabled */
45649+ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
45650+ allmulti, alluni);
45651+ if (ret) {
45652+ dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
45653+ goto error_pvid;
45654+ }
45655+
45656+ if (vlan_id || qos) {
45657+ ret = i40e_vsi_add_pvid(vsi, vlanprio);
45658+ if (ret) {
45659+ dev_info(&vsi->back->pdev->dev,
45660+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
45661+ vsi->back->hw.aq.asq_last_status);
45662+ goto error_pvid;
45663+ }
45664+ } else {
45665+ i40e_vsi_remove_pvid(vsi);
45666+#ifdef HAVE_NDO_SET_VF_LINK_STATE
45667+ /* if vlan is being removed then clear also trunk_vlan */
45668+ if (!vsi->info.pvid)
45669+ memset(vf->trunk_vlans, 0,
45670+ BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
45671+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
45672+ }
45673+ spin_lock_bh(&vsi->mac_filter_hash_lock);
45674+
45675+ if (vlan_id) {
45676+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
45677+ vlan_id, qos, vf_id);
45678+
45679+ /* add new VLAN filter for each MAC */
45680+ ret = i40e_add_vlan_all_mac(vsi, vlan_id);
45681+ if (ret) {
45682+ dev_info(&vsi->back->pdev->dev,
45683+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
45684+ vsi->back->hw.aq.asq_last_status);
45685+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
45686+ goto error_pvid;
45687+ }
45688+#ifdef HAVE_NDO_SET_VF_LINK_STATE
45689+ /* only pvid should be present in trunk */
45690+ memset(vf->trunk_vlans, 0,
45691+ BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
45692+ set_bit(vsi->info.pvid, vf->trunk_vlans);
45693+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
45694+
45695+ /* remove the previously added non-VLAN MAC filters */
45696+ i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
45697+ }
45698+
45699+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
45700+
45701+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
45702+ alluni = true;
45703+
45704+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
45705+ allmulti = true;
45706+
45707+ /* The Port VLAN needs to be saved across resets the same as the
45708+ * default LAN MAC address.
45709+ */
45710+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
45711+ if (vsi->info.pvid) {
45712+ ret = i40e_config_vf_promiscuous_mode(vf,
45713+ vsi->id,
45714+ allmulti,
45715+ alluni);
45716+ if (ret) {
45717+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
45718+ goto error_pvid;
45719+ }
45720+ }
45721+
45722+ /* Schedule the worker thread to take care of applying changes */
45723+ i40e_service_event_schedule(vsi->back);
45724+
45725+error_pvid:
45726+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
45727+ return ret;
45728+}
45729+
45730+/**
45731+ * i40e_ndo_set_vf_bw
45732+ * @netdev: network interface device structure
45733+ * @vf_id: VF identifier
45734+ * @min_tx_rate: Minimum Tx rate
45735+ * @max_tx_rate: Maximum Tx rate
45736+ *
45737+ * configure VF Tx rate
45738+ **/
45739+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
45740+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
45741+ int max_tx_rate)
45742+#else
45743+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate)
45744+#endif
45745+{
45746+ struct i40e_netdev_priv *np = netdev_priv(netdev);
45747+ struct i40e_pf *pf = np->vsi->back;
45748+ struct i40e_vsi *vsi;
45749+ struct i40e_vf *vf;
45750+ int ret = 0;
45751+
45752+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
45753+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
45754+ return -EAGAIN;
45755+ }
45756+
45757+ /* validate the request */
45758+ ret = i40e_validate_vf(pf, vf_id);
45759+ if (ret)
45760+ goto error;
45761+
45762+ vf = &pf->vf[vf_id];
45763+ vsi = pf->vsi[vf->lan_vsi_idx];
45764+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
45765+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
45766+ vf_id);
45767+ ret = -EAGAIN;
45768+ goto error;
45769+ }
45770+
45771+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
45772+ if (ret)
45773+ goto error;
45774+
45775+ vf->tx_rate = max_tx_rate;
45776+error:
45777+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
45778+ return ret;
45779+}
45780+
45781+/**
45782+ * i40e_ndo_enable_vf
45783+ * @netdev: network interface device structure
45784+ * @vf_id: VF identifier
45785+ * @enable: true to enable & false to disable
45786+ *
45787+ * enable/disable VF
45788+ **/
45789+int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable)
45790+{
45791+ return -EOPNOTSUPP;
45792+}
45793+
45794+/**
45795+ * i40e_ndo_get_vf_config
45796+ * @netdev: network interface device structure
45797+ * @vf_id: VF identifier
45798+ * @ivi: VF configuration structure
45799+ *
45800+ * return VF configuration
45801+ **/
45802+int i40e_ndo_get_vf_config(struct net_device *netdev,
45803+ int vf_id, struct ifla_vf_info *ivi)
45804+{
45805+ struct i40e_netdev_priv *np = netdev_priv(netdev);
45806+ struct i40e_vsi *vsi = np->vsi;
45807+ struct i40e_pf *pf = vsi->back;
45808+ struct i40e_vf *vf;
45809+ int ret = 0;
45810+
45811+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
45812+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
45813+ return -EAGAIN;
45814 }
45815- set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
45816
45817-err:
45818- /* send the response back to the VF */
45819- ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
45820- aq_ret, (u8 *)vfres, len);
45821+ /* validate the request */
45822+ ret = i40e_validate_vf(pf, vf_id);
45823+ if (ret)
45824+ goto error_param;
45825
45826- kfree(vfres);
45827+ vf = &pf->vf[vf_id];
45828+ /* first vsi is always the LAN vsi */
45829+ vsi = pf->vsi[vf->lan_vsi_idx];
45830+ if (!vsi) {
45831+ ret = -ENOENT;
45832+ goto error_param;
45833+ }
45834+
45835+ ivi->vf = vf_id;
45836+
45837+ ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
45838+
45839+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
45840+ ivi->max_tx_rate = vf->tx_rate;
45841+ ivi->min_tx_rate = 0;
45842+#else
45843+ ivi->tx_rate = vf->tx_rate;
45844+#endif
45845+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
45846+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
45847+ I40E_VLAN_PRIORITY_SHIFT;
45848+#ifdef HAVE_NDO_SET_VF_LINK_STATE
45849+ if (vf->link_forced == false)
45850+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
45851+ else if (vf->link_up == true)
45852+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
45853+ else
45854+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
45855+#endif
45856+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
45857+ ivi->spoofchk = vf->mac_anti_spoof;
45858+#endif
45859+#ifdef HAVE_NDO_SET_VF_TRUST
45860+ ivi->trusted = vf->trusted;
45861+#endif /* HAVE_NDO_SET_VF_TRUST */
45862+ ret = 0;
45863+
45864+error_param:
45865+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
45866+ return ret;
45867+}
45868+
45869+#ifdef HAVE_NDO_SET_VF_LINK_STATE
45870+/**
45871+ * i40e_ndo_set_vf_link_state
45872+ * @netdev: network interface device structure
45873+ * @vf_id: VF identifier
45874+ * @link: required link state
45875+ *
45876+ * Set the link state of a specified VF, regardless of physical link state
45877+ **/
45878+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
45879+{
45880+ struct i40e_netdev_priv *np = netdev_priv(netdev);
45881+ struct i40e_pf *pf = np->vsi->back;
45882+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
45883+ struct virtchnl_pf_event pfe;
45884+ struct i40e_hw *hw = &pf->hw;
45885+ struct i40e_vf *vf;
45886+ int abs_vf_id;
45887+ int ret = 0;
45888+
45889+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
45890+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
45891+ return -EAGAIN;
45892+ }
45893+
45894+ /* validate the request */
45895+ if (vf_id >= pf->num_alloc_vfs) {
45896+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
45897+ ret = -EINVAL;
45898+ goto error_out;
45899+ }
45900+
45901+ vf = &pf->vf[vf_id];
45902+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
45903+
45904+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
45905+ pfe.severity = PF_EVENT_SEVERITY_INFO;
45906+
45907+ switch (link) {
45908+ case IFLA_VF_LINK_STATE_AUTO:
45909+ vf->link_forced = false;
45910+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
45911+ pfe.event_data.link_event_adv.link_status =
45912+ ls->link_info & I40E_AQ_LINK_UP;
45913+ pfe.event_data.link_event_adv.link_speed =
45914+ i40e_vc_link_speed2mbps(ls->link_speed);
45915+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45916+ pfe.event_data.link_event.link_status =
45917+ ls->link_info & I40E_AQ_LINK_UP;
45918+ pfe.event_data.link_event.link_speed =
45919+ i40e_virtchnl_link_speed(ls->link_speed);
45920+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45921+ break;
45922+ case IFLA_VF_LINK_STATE_ENABLE:
45923+ vf->link_forced = true;
45924+ vf->link_up = true;
45925+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
45926+ pfe.event_data.link_event_adv.link_status = true;
45927+ pfe.event_data.link_event_adv.link_speed =
45928+ i40e_vc_link_speed2mbps(ls->link_speed);
45929+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45930+ pfe.event_data.link_event.link_status = true;
45931+ pfe.event_data.link_event.link_speed =
45932+ i40e_virtchnl_link_speed(ls->link_speed);
45933+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45934+ break;
45935+ case IFLA_VF_LINK_STATE_DISABLE:
45936+ vf->link_forced = true;
45937+ vf->link_up = false;
45938+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
45939+ pfe.event_data.link_event_adv.link_status = false;
45940+ pfe.event_data.link_event_adv.link_speed = 0;
45941+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45942+ pfe.event_data.link_event.link_status = false;
45943+ pfe.event_data.link_event.link_speed = 0;
45944+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45945+ break;
45946+ default:
45947+ ret = -EINVAL;
45948+ goto error_out;
45949+ }
45950+ /* Do not allow change link state when VF is disabled
45951+ * Check if requested link state is not IFLA_VF_LINK_STATE_DISABLE,
45952+ * to prevent false positive warning in case of reloading the driver
45953+ */
45954+ if (vf->pf_ctrl_disable && link != IFLA_VF_LINK_STATE_DISABLE) {
45955+ vf->link_up = false;
45956+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
45957+ pfe.event_data.link_event_adv.link_status = false;
45958+ pfe.event_data.link_event_adv.link_speed = 0;
45959+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45960+ pfe.event_data.link_event.link_status = false;
45961+ pfe.event_data.link_event.link_speed = 0;
45962+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
45963+ dev_warn(&pf->pdev->dev,
45964+ "Not possible to change VF link state, please enable it first\n");
45965+ }
45966+
45967+ /* Notify the VF of its new link state */
45968+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
45969+ I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
45970+
45971+error_out:
45972+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
45973+ return ret;
45974+}
45975+
45976+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
45977+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
45978+/**
45979+ * i40e_ndo_set_vf_spoofchk
45980+ * @netdev: network interface device structure
45981+ * @vf_id: VF identifier
45982+ * @enable: flag to enable or disable feature
45983+ *
45984+ * Enable or disable VF spoof checking
45985+ **/
45986+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
45987+{
45988+ struct i40e_netdev_priv *np = netdev_priv(netdev);
45989+ struct i40e_vsi *vsi = np->vsi;
45990+ struct i40e_pf *pf = vsi->back;
45991+ struct i40e_vsi_context ctxt;
45992+ struct i40e_hw *hw = &pf->hw;
45993+ struct i40e_vf *vf;
45994+ int ret = 0;
45995+
45996+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
45997+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
45998+ return -EAGAIN;
45999+ }
46000+
46001+ /* validate the request */
46002+ if (vf_id >= pf->num_alloc_vfs) {
46003+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
46004+ ret = -EINVAL;
46005+ goto out;
46006+ }
46007+
46008+ vf = &(pf->vf[vf_id]);
46009+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
46010+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
46011+ vf_id);
46012+ ret = -EAGAIN;
46013+ goto out;
46014+ }
46015+
46016+ if (enable == vf->mac_anti_spoof)
46017+ goto out;
46018+
46019+ vf->mac_anti_spoof = enable;
46020+ memset(&ctxt, 0, sizeof(ctxt));
46021+ ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
46022+ ctxt.pf_num = pf->hw.pf_id;
46023+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
46024+ if (enable)
46025+ ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
46026+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
46027+ if (ret) {
46028+ dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
46029+ ret);
46030+ ret = -EIO;
46031+ }
46032+out:
46033+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46034+ return ret;
46035+}
46036+
46037+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
46038+#ifdef HAVE_NDO_SET_VF_TRUST
46039+/**
46040+ * i40e_ndo_set_vf_trust
46041+ * @netdev: network interface device structure of the pf
46042+ * @vf_id: VF identifier
46043+ * @setting: trust setting
46044+ *
46045+ * Enable or disable VF trust setting
46046+ **/
46047+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
46048+{
46049+ struct i40e_netdev_priv *np = netdev_priv(netdev);
46050+ struct i40e_pf *pf = np->vsi->back;
46051+ struct i40e_vf *vf;
46052+ int ret = 0;
46053+
46054+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46055+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46056+ return -EAGAIN;
46057+ }
46058+
46059+ /* validate the request */
46060+ if (vf_id >= pf->num_alloc_vfs) {
46061+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
46062+ ret = -EINVAL;
46063+ goto out;
46064+ }
46065+
46066+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
46067+ dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
46068+ ret = -EINVAL;
46069+ goto out;
46070+ }
46071+
46072+ vf = &pf->vf[vf_id];
46073+
46074+ /* if vf is in base mode, make it untrusted */
46075+ if (pf->vf_base_mode_only)
46076+ setting = false;
46077+ if (setting == vf->trusted)
46078+ goto out;
46079+
46080+ vf->trusted = setting;
46081+ i40e_vc_reset_vf(vf, true);
46082+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
46083+ vf_id, setting ? "" : "un");
46084+
46085+#ifdef __TC_MQPRIO_MODE_MAX
46086+ if (vf->adq_enabled) {
46087+ if (!vf->trusted) {
46088+ dev_info(&pf->pdev->dev,
46089+ "VF %u no longer Trusted, deleting all cloud filters\n",
46090+ vf_id);
46091+ i40e_del_all_cloud_filters(vf);
46092+ }
46093+ }
46094+#endif /* __TC_MQPRIO_MODE_MAX */
46095+
46096+out:
46097+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46098 return ret;
46099 }
46100+#endif /* HAVE_NDO_SET_VF_TRUST */
46101+#ifdef HAVE_VF_STATS
46102
46103 /**
46104- * i40e_vc_reset_vf_msg
46105- * @vf: pointer to the VF info
46106- * @msg: pointer to the msg buffer
46107- * @msglen: msg length
46108- *
46109- * called from the VF to reset itself,
46110- * unlike other virtchnl messages, PF driver
46111- * doesn't send the response back to the VF
46112- **/
46113-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
46114+ * i40e_get_vf_stats - populate some stats for the VF
46115+ * @netdev: the netdev of the PF
46116+ * @vf_id: the host OS identifier (0-127)
46117+ * @vf_stats: pointer to the OS memory to be initialized
46118+ */
46119+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
46120+ struct ifla_vf_stats *vf_stats)
46121 {
46122- if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
46123- i40e_reset_vf(vf, false);
46124+ struct i40e_netdev_priv *np = netdev_priv(netdev);
46125+ struct i40e_pf *pf = np->vsi->back;
46126+ struct i40e_eth_stats *stats;
46127+ struct i40e_vsi *vsi;
46128+ struct i40e_vf *vf;
46129+
46130+ /* validate the request */
46131+ if (i40e_validate_vf(pf, vf_id))
46132+ return -EINVAL;
46133+
46134+ vf = &pf->vf[vf_id];
46135+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
46136+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
46137+ return -EBUSY;
46138+ }
46139+
46140+ vsi = pf->vsi[vf->lan_vsi_idx];
46141+ if (!vsi)
46142+ return -EINVAL;
46143+
46144+ i40e_update_eth_stats(vsi);
46145+ stats = &vsi->eth_stats;
46146+
46147+ memset(vf_stats, 0, sizeof(*vf_stats));
46148+
46149+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
46150+ stats->rx_multicast;
46151+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
46152+ stats->tx_multicast;
46153+ vf_stats->rx_bytes = stats->rx_bytes;
46154+ vf_stats->tx_bytes = stats->tx_bytes;
46155+ vf_stats->broadcast = stats->rx_broadcast;
46156+ vf_stats->multicast = stats->rx_multicast;
46157+#ifdef HAVE_VF_STATS_DROPPED
46158+ vf_stats->rx_dropped = stats->rx_discards;
46159+ vf_stats->tx_dropped = stats->tx_discards;
46160+#endif
46161+
46162+ return 0;
46163 }
46164+#endif /* HAVE_VF_STATS */
46165+#endif /* IFLA_VF_MAX */
46166+#ifdef HAVE_NDO_SET_VF_LINK_STATE
46167
46168 /**
46169- * i40e_getnum_vf_vsi_vlan_filters
46170- * @vsi: pointer to the vsi
46171+ * i40e_get_trunk - Gets the configured VLAN filters
46172+ * @pdev: PCI device information struct
46173+ * @vf_id: VF identifier
46174+ * @trunk_vlans: trunk vlans
46175 *
46176- * called to get the number of VLANs offloaded on this VF
46177+ * Gets the active trunk vlans
46178+ *
46179+ * Returns the number of active vlans filters on success,
46180+ * negative on failure
46181 **/
46182-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
46183+static int i40e_get_trunk(struct pci_dev *pdev, int vf_id,
46184+ unsigned long *trunk_vlans)
46185 {
46186- struct i40e_mac_filter *f;
46187- int num_vlans = 0, bkt;
46188+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46189+ struct i40e_vsi *vsi;
46190+ struct i40e_vf *vf;
46191+ int ret;
46192
46193- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
46194- if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
46195- num_vlans++;
46196+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46197+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46198+ return -EAGAIN;
46199 }
46200
46201- return num_vlans;
46202+ /* validate the request */
46203+ ret = i40e_validate_vf(pf, vf_id);
46204+ if (ret)
46205+ goto out;
46206+ vf = &pf->vf[vf_id];
46207+ /* checking if pvid has been set through netdev */
46208+ vsi = pf->vsi[vf->lan_vsi_idx];
46209+ if (vsi->info.pvid) {
46210+ memset(trunk_vlans, 0,
46211+ BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
46212+ set_bit(vsi->info.pvid, trunk_vlans);
46213+ } else {
46214+ bitmap_copy(trunk_vlans, vf->trunk_vlans, VLAN_N_VID);
46215+ }
46216+
46217+ bitmap_copy(trunk_vlans, vf->trunk_vlans, VLAN_N_VID);
46218+ ret = bitmap_weight(trunk_vlans, VLAN_N_VID);
46219+out:
46220+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46221+ return ret;
46222 }
46223
46224 /**
46225- * i40e_vc_config_promiscuous_mode_msg
46226- * @vf: pointer to the VF info
46227- * @msg: pointer to the msg buffer
46228- * @msglen: msg length
46229+ * i40e_set_trunk - Configure VLAN filters
46230+ * @pdev: PCI device information struct
46231+ * @vf_id: VF identifier
46232+ * @vlan_bitmap: vlans to filter on
46233 *
46234- * called from the VF to configure the promiscuous mode of
46235- * VF vsis
46236+ * Applies the VLAN filters
46237+ *
46238+ * Returns 0 on success, negative on failure
46239 **/
46240-static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
46241- u8 *msg, u16 msglen)
46242+static int i40e_set_trunk(struct pci_dev *pdev, int vf_id,
46243+ const unsigned long *vlan_bitmap)
46244 {
46245- struct virtchnl_promisc_info *info =
46246- (struct virtchnl_promisc_info *)msg;
46247- struct i40e_pf *pf = vf->pf;
46248- struct i40e_hw *hw = &pf->hw;
46249- struct i40e_mac_filter *f;
46250- i40e_status aq_ret = 0;
46251- bool allmulti = false;
46252+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46253 struct i40e_vsi *vsi;
46254- bool alluni = false;
46255- int aq_err = 0;
46256- int bkt;
46257+ struct i40e_vf *vf;
46258+ int ret;
46259+ u16 vid;
46260
46261- vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
46262- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
46263- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
46264- !vsi) {
46265- aq_ret = I40E_ERR_PARAM;
46266- goto error_param;
46267- }
46268- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
46269- dev_err(&pf->pdev->dev,
46270- "Unprivileged VF %d is attempting to configure promiscuous mode\n",
46271- vf->vf_id);
46272- /* Lie to the VF on purpose. */
46273- aq_ret = 0;
46274- goto error_param;
46275+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46276+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46277+ return -EAGAIN;
46278 }
46279- /* Multicast promiscuous handling*/
46280- if (info->flags & FLAG_VF_MULTICAST_PROMISC)
46281- allmulti = true;
46282
46283- if (vf->port_vlan_id) {
46284- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
46285- allmulti,
46286- vf->port_vlan_id,
46287- NULL);
46288- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
46289- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
46290- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
46291- continue;
46292- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
46293- vsi->seid,
46294- allmulti,
46295- f->vlan,
46296- NULL);
46297- aq_err = pf->hw.aq.asq_last_status;
46298- if (aq_ret) {
46299- dev_err(&pf->pdev->dev,
46300- "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
46301- f->vlan,
46302- i40e_stat_str(&pf->hw, aq_ret),
46303- i40e_aq_str(&pf->hw, aq_err));
46304- break;
46305- }
46306+ /* validate the request */
46307+ ret = i40e_validate_vf(pf, vf_id);
46308+ if (ret)
46309+ goto out;
46310+ vf = &pf->vf[vf_id];
46311+ vsi = pf->vsi[vf->lan_vsi_idx];
46312+ i40e_vlan_stripping_enable(vsi);
46313+
46314+ /* checking if pvid has been set through netdev */
46315+ vid = vsi->info.pvid;
46316+ if (vid) {
46317+ struct i40e_vsi *pf_vsi = pf->vsi[pf->lan_vsi];
46318+
46319+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46320+#ifdef IFLA_VF_VLAN_INFO_MAX
46321+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
46322+ pf_vsi->netdev->netdev_ops->extended.ndo_set_vf_vlan
46323+ (pf_vsi->netdev, vf_id, 0, 0, htons(ETH_P_8021Q));
46324+#else
46325+ pf_vsi->netdev->netdev_ops->ndo_set_vf_vlan
46326+ (pf_vsi->netdev, vf_id, 0, 0, htons(ETH_P_8021Q));
46327+#endif // HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
46328+#else // IFLA_VF_VLAN_INFO_MAX
46329+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
46330+ pf_vsi->netdev->netdev_ops->extended.ndo_set_vf_vlan
46331+ (pf_vsi->netdev, vf_id, 0, 0);
46332+#else
46333+ pf_vsi->netdev->netdev_ops->ndo_set_vf_vlan
46334+ (pf_vsi->netdev, vf_id, 0, 0);
46335+#endif // HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
46336+#endif // IFLA_VF_VLAN_INFO_MAX
46337+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46338+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46339+ return -EAGAIN;
46340 }
46341- } else {
46342- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
46343- allmulti, NULL);
46344- aq_err = pf->hw.aq.asq_last_status;
46345- if (aq_ret) {
46346- dev_err(&pf->pdev->dev,
46347- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
46348- vf->vf_id,
46349- i40e_stat_str(&pf->hw, aq_ret),
46350- i40e_aq_str(&pf->hw, aq_err));
46351- goto error_param;
46352+ if (i40e_vsi_add_vlan(vsi, vid)) {
46353+ dev_warn(&pdev->dev, "Unable to restore Port VLAN for trunking.\n");
46354+ goto out;
46355 }
46356 }
46357
46358- if (!aq_ret) {
46359- dev_info(&pf->pdev->dev,
46360- "VF %d successfully set multicast promiscuous mode\n",
46361- vf->vf_id);
46362- if (allmulti)
46363- set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
46364- else
46365- clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
46366+ /* Add vlans */
46367+ for_each_set_bit(vid, vlan_bitmap, VLAN_N_VID) {
46368+ if (!test_bit(vid, vf->trunk_vlans)) {
46369+ ret = i40e_vsi_add_vlan(vsi, vid);
46370+ if (ret)
46371+ goto out;
46372+ }
46373 }
46374
46375- if (info->flags & FLAG_VF_UNICAST_PROMISC)
46376- alluni = true;
46377- if (vf->port_vlan_id) {
46378- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
46379- alluni,
46380- vf->port_vlan_id,
46381- NULL);
46382- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
46383- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
46384- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
46385- continue;
46386- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
46387- vsi->seid,
46388- alluni,
46389- f->vlan,
46390- NULL);
46391- aq_err = pf->hw.aq.asq_last_status;
46392- if (aq_ret)
46393- dev_err(&pf->pdev->dev,
46394- "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
46395- f->vlan,
46396- i40e_stat_str(&pf->hw, aq_ret),
46397- i40e_aq_str(&pf->hw, aq_err));
46398- }
46399- } else {
46400- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
46401- alluni, NULL,
46402- true);
46403- aq_err = pf->hw.aq.asq_last_status;
46404- if (aq_ret) {
46405- dev_err(&pf->pdev->dev,
46406- "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
46407- vf->vf_id, info->flags,
46408- i40e_stat_str(&pf->hw, aq_ret),
46409- i40e_aq_str(&pf->hw, aq_err));
46410- goto error_param;
46411- }
46412+ /* If to empty trunk filter is added, remove I40E_VLAN_ANY.
46413+ * Removal of this filter sets allow_untagged to false.
46414+ */
46415+ if (bitmap_weight(vlan_bitmap, VLAN_N_VID) &&
46416+ !bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) {
46417+ i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
46418+ vf->allow_untagged = false;
46419 }
46420
46421- if (!aq_ret) {
46422- dev_info(&pf->pdev->dev,
46423- "VF %d successfully set unicast promiscuous mode\n",
46424- vf->vf_id);
46425- if (alluni)
46426- set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
46427- else
46428- clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
46429+ /* If deleting all vlan filters, check if we have VLAN 0 filters
46430+ * existing. If we don't, add filters to allow all traffic i.e
46431+ * VLAN tag = -1 before deleting all filters because the in the
46432+ * delete all filters flow, we check if there are VLAN 0 filters
46433+ * and then replace them with filters of VLAN id = -1
46434+ */
46435+ if (!bitmap_weight(vlan_bitmap, VLAN_N_VID) && !vf->allow_untagged) {
46436+ ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
46437+ if (ret)
46438+ goto out;
46439+ vf->allow_untagged = true;
46440 }
46441
46442-error_param:
46443- /* send the response to the VF */
46444- return i40e_vc_send_resp_to_vf(vf,
46445- VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
46446- aq_ret);
46447+ /* Del vlans */
46448+ for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) {
46449+ if (!test_bit(vid, vlan_bitmap))
46450+ i40e_vsi_kill_vlan(vsi, vid);
46451+ }
46452+ /* Copy over the updated bitmap */
46453+ bitmap_copy(vf->trunk_vlans, vlan_bitmap, VLAN_N_VID);
46454+out:
46455+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46456+ return ret;
46457 }
46458
46459 /**
46460- * i40e_vc_config_queues_msg
46461- * @vf: pointer to the VF info
46462- * @msg: pointer to the msg buffer
46463- * @msglen: msg length
46464+ * i40e_get_mirror - Gets the configured VLAN mirrors
46465+ * @pdev: PCI device information struct
46466+ * @vf_id: VF identifier
46467+ * @mirror_vlans: mirror vlans
46468 *
46469- * called from the VF to configure the rx/tx
46470- * queues
46471+ * Gets the active mirror vlans
46472+ *
46473+ * Returns the number of active mirror vlans on success,
46474+ * negative on failure
46475 **/
46476-static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
46477+static int i40e_get_mirror(struct pci_dev *pdev, int vf_id,
46478+ unsigned long *mirror_vlans)
46479 {
46480- struct virtchnl_vsi_queue_config_info *qci =
46481- (struct virtchnl_vsi_queue_config_info *)msg;
46482- struct virtchnl_queue_pair_info *qpi;
46483- struct i40e_pf *pf = vf->pf;
46484- u16 vsi_id, vsi_queue_id;
46485- i40e_status aq_ret = 0;
46486- int i;
46487-
46488- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
46489- aq_ret = I40E_ERR_PARAM;
46490- goto error_param;
46491- }
46492-
46493- vsi_id = qci->vsi_id;
46494- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
46495- aq_ret = I40E_ERR_PARAM;
46496- goto error_param;
46497- }
46498- for (i = 0; i < qci->num_queue_pairs; i++) {
46499- qpi = &qci->qpair[i];
46500- vsi_queue_id = qpi->txq.queue_id;
46501- if ((qpi->txq.vsi_id != vsi_id) ||
46502- (qpi->rxq.vsi_id != vsi_id) ||
46503- (qpi->rxq.queue_id != vsi_queue_id) ||
46504- !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
46505- aq_ret = I40E_ERR_PARAM;
46506- goto error_param;
46507- }
46508+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46509+ struct i40e_vf *vf;
46510+ int ret;
46511
46512- if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
46513- &qpi->rxq) ||
46514- i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
46515- &qpi->txq)) {
46516- aq_ret = I40E_ERR_PARAM;
46517- goto error_param;
46518- }
46519+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46520+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46521+ return -EAGAIN;
46522 }
46523- /* set vsi num_queue_pairs in use to num configured by VF */
46524- pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
46525
46526-error_param:
46527- /* send the response to the VF */
46528- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
46529- aq_ret);
46530+ /* validate the request */
46531+ ret = i40e_validate_vf(pf, vf_id);
46532+ if (ret)
46533+ goto out;
46534+ vf = &pf->vf[vf_id];
46535+ bitmap_copy(mirror_vlans, vf->mirror_vlans, VLAN_N_VID);
46536+ ret = bitmap_weight(mirror_vlans, VLAN_N_VID);
46537+out:
46538+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46539+ return ret;
46540 }
46541
46542 /**
46543- * i40e_vc_config_irq_map_msg
46544- * @vf: pointer to the VF info
46545- * @msg: pointer to the msg buffer
46546- * @msglen: msg length
46547+ * i40e_set_mirror - Configure VLAN mirrors
46548+ * @pdev: PCI device information struct
46549+ * @vf_id: VF identifier
46550+ * @vlan_bitmap: vlans to configure as mirrors
46551 *
46552- * called from the VF to configure the irq to
46553- * queue map
46554+ * Configures the mirror vlans
46555+ *
46556+ * Returns 0 on success, negative on failure
46557 **/
46558-static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
46559+static int i40e_set_mirror(struct pci_dev *pdev, int vf_id,
46560+ const unsigned long *vlan_bitmap)
46561 {
46562- struct virtchnl_irq_map_info *irqmap_info =
46563- (struct virtchnl_irq_map_info *)msg;
46564- struct virtchnl_vector_map *map;
46565- u16 vsi_id, vsi_queue_id, vector_id;
46566- i40e_status aq_ret = 0;
46567- unsigned long tempmap;
46568- int i;
46569+ u16 vid, sw_seid, dst_seid, rule_id, rule_type;
46570+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46571+ int ret = 0, num = 0, cnt = 0, add = 0;
46572+ u16 rules_used, rules_free;
46573+ struct i40e_vsi *vsi;
46574+ struct i40e_vf *vf;
46575+ __le16 *mr_list;
46576
46577- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
46578- aq_ret = I40E_ERR_PARAM;
46579- goto error_param;
46580- }
46581+ DECLARE_BITMAP(num_vlans, VLAN_N_VID);
46582
46583- for (i = 0; i < irqmap_info->num_vectors; i++) {
46584- map = &irqmap_info->vecmap[i];
46585+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46586+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46587+ return -EAGAIN;
46588+ }
46589
46590- vector_id = map->vector_id;
46591- vsi_id = map->vsi_id;
46592- /* validate msg params */
46593- if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
46594- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
46595- aq_ret = I40E_ERR_PARAM;
46596- goto error_param;
46597- }
46598+ /* validate the request */
46599+ ret = i40e_validate_vf(pf, vf_id);
46600+ if (ret)
46601+ goto out;
46602+ vf = &pf->vf[vf_id];
46603+ vsi = pf->vsi[vf->lan_vsi_idx];
46604+ sw_seid = vsi->uplink_seid;
46605+ dst_seid = vsi->seid;
46606+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
46607+ bitmap_xor(num_vlans, vf->mirror_vlans, vlan_bitmap, VLAN_N_VID);
46608+ cnt = bitmap_weight(num_vlans, VLAN_N_VID);
46609+ if (!cnt)
46610+ goto out;
46611+ mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
46612+ if (!mr_list) {
46613+ ret = -ENOMEM;
46614+ goto out;
46615+ }
46616
46617- /* lookout for the invalid queue index */
46618- tempmap = map->rxq_map;
46619- for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
46620- if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
46621- vsi_queue_id)) {
46622- aq_ret = I40E_ERR_PARAM;
46623- goto error_param;
46624+ /* figure out if adding or deleting */
46625+ bitmap_and(num_vlans, vlan_bitmap, num_vlans, VLAN_N_VID);
46626+ add = bitmap_weight(num_vlans, VLAN_N_VID);
46627+ if (add) {
46628+ /* Add mirrors */
46629+ for_each_set_bit(vid, vlan_bitmap, VLAN_N_VID) {
46630+ if (!test_bit(vid, vf->mirror_vlans)) {
46631+ mr_list[num] = CPU_TO_LE16(vid);
46632+ num++;
46633 }
46634 }
46635-
46636- tempmap = map->txq_map;
46637- for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
46638- if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
46639- vsi_queue_id)) {
46640- aq_ret = I40E_ERR_PARAM;
46641- goto error_param;
46642+ ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid,
46643+ rule_type, dst_seid,
46644+ cnt, mr_list, NULL,
46645+ &rule_id, &rules_used,
46646+ &rules_free);
46647+ if (ret)
46648+ goto err_free;
46649+ vf->vlan_rule_id = rule_id;
46650+ } else {
46651+ /* Del mirrors */
46652+ for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) {
46653+ if (!test_bit(vid, vlan_bitmap)) {
46654+ mr_list[num] = CPU_TO_LE16(vid);
46655+ num++;
46656 }
46657 }
46658+ ret = i40e_aq_delete_mirrorrule(&pf->hw, sw_seid, rule_type,
46659+ vf->vlan_rule_id, cnt, mr_list,
46660+ NULL, &rules_used,
46661+ &rules_free);
46662+ if (ret)
46663+ goto err_free;
46664+ }
46665
46666- i40e_config_irq_link_list(vf, vsi_id, map);
46667+ /* Copy over the updated bitmap */
46668+ bitmap_copy(vf->mirror_vlans, vlan_bitmap, VLAN_N_VID);
46669+err_free:
46670+ kfree(mr_list);
46671+out:
46672+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46673+ return ret;
46674+}
46675+
46676+/**
46677+ * i40e_get_allow_untagged
46678+ * @pdev: PCI device information struct
46679+ * @vf_id: VF identifier
46680+ * @on: on or off
46681+ *
46682+ * This functions checks if the untagged packets
46683+ * are allowed or not.
46684+ *
46685+ * Returns 0 on success, negative on failure
46686+ **/
46687+static int i40e_get_allow_untagged(struct pci_dev *pdev, int vf_id,
46688+ bool *on)
46689+{
46690+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46691+ struct i40e_vf *vf;
46692+ int ret;
46693+
46694+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46695+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46696+ return -EAGAIN;
46697 }
46698-error_param:
46699- /* send the response to the VF */
46700- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
46701- aq_ret);
46702+
46703+ /* validate the request */
46704+ ret = i40e_validate_vf(pf, vf_id);
46705+ if (ret)
46706+ goto out;
46707+ vf = &pf->vf[vf_id];
46708+ *on = vf->allow_untagged;
46709+out:
46710+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46711+ return ret;
46712 }
46713
46714 /**
46715- * i40e_vc_enable_queues_msg
46716- * @vf: pointer to the VF info
46717- * @msg: pointer to the msg buffer
46718- * @msglen: msg length
46719+ * i40e_set_allow_untagged
46720+ * @pdev: PCI device information struct
46721+ * @vf_id: VF identifier
46722+ * @on: on or off
46723 *
46724- * called from the VF to enable all or specific queue(s)
46725+ * This functions allows or stops untagged packets
46726+ * on the VF.
46727+ *
46728+ * Returns 0 on success, negative on failure
46729 **/
46730-static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
46731+static int i40e_set_allow_untagged(struct pci_dev *pdev, int vf_id,
46732+ const bool on)
46733 {
46734- struct virtchnl_queue_select *vqs =
46735- (struct virtchnl_queue_select *)msg;
46736- struct i40e_pf *pf = vf->pf;
46737- u16 vsi_id = vqs->vsi_id;
46738- i40e_status aq_ret = 0;
46739+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46740+ struct i40e_vsi *vsi;
46741+ struct i40e_vf *vf;
46742+ int ret;
46743
46744- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
46745- aq_ret = I40E_ERR_PARAM;
46746- goto error_param;
46747+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
46748+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
46749+ return -EAGAIN;
46750 }
46751
46752- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
46753- aq_ret = I40E_ERR_PARAM;
46754- goto error_param;
46755+ /* validate the request */
46756+ ret = i40e_validate_vf(pf, vf_id);
46757+ if (ret)
46758+ goto out;
46759+ vf = &pf->vf[vf_id];
46760+ vsi = pf->vsi[vf->lan_vsi_idx];
46761+ if (vsi->info.pvid) {
46762+ ret = -EINVAL;
46763+ goto out;
46764 }
46765-
46766- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
46767- aq_ret = I40E_ERR_PARAM;
46768- goto error_param;
46769+ spin_lock_bh(&vsi->mac_filter_hash_lock);
46770+ if (!on) {
46771+ i40e_rm_vlan_all_mac(vsi, 0);
46772+ i40e_service_event_schedule(vsi->back);
46773+ } else {
46774+ ret = i40e_add_vlan_all_mac(vsi, 0);
46775+ i40e_service_event_schedule(vsi->back);
46776 }
46777-
46778- if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
46779- aq_ret = I40E_ERR_TIMEOUT;
46780-error_param:
46781- /* send the response to the VF */
46782- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
46783- aq_ret);
46784+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
46785+ if (!ret)
46786+ vf->allow_untagged = on;
46787+out:
46788+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46789+ return ret;
46790 }
46791
46792 /**
46793- * i40e_vc_disable_queues_msg
46794- * @vf: pointer to the VF info
46795- * @msg: pointer to the msg buffer
46796- * @msglen: msg length
46797+ * i40e_get_loopback
46798+ * @pdev: PCI device information struct
46799+ * @vf_id: VF identifier
46800+ * @enable: enable or disable
46801 *
46802- * called from the VF to disable all or specific
46803- * queue(s)
46804+ * This function checks loopback is enabled
46805+ *
46806+ * Returns 1 if enabled, 0 if disabled
46807 **/
46808-static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
46809+static int i40e_get_loopback(struct pci_dev *pdev, int vf_id, bool *enable)
46810 {
46811- struct virtchnl_queue_select *vqs =
46812- (struct virtchnl_queue_select *)msg;
46813- struct i40e_pf *pf = vf->pf;
46814- i40e_status aq_ret = 0;
46815+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46816+ struct i40e_vf *vf;
46817+ int ret = 0;
46818
46819- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
46820- aq_ret = I40E_ERR_PARAM;
46821- goto error_param;
46822- }
46823+ /* validate the request */
46824+ ret = i40e_validate_vf(pf, vf_id);
46825+ if (ret)
46826+ goto err_out;
46827+ vf = &pf->vf[vf_id];
46828+ *enable = vf->loopback;
46829+err_out:
46830+ return ret;
46831+}
46832
46833- if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
46834- aq_ret = I40E_ERR_PARAM;
46835- goto error_param;
46836- }
46837+/**
46838+ * i40e_set_loopback
46839+ * @pdev: PCI device information struct
46840+ * @vf_id: VF identifier
46841+ * @enable: enable or disable
46842+ *
46843+ * This function enables or disables loopback
46844+ *
46845+ * Returns 0 on success, negative on failure
46846+ **/
46847+static int i40e_set_loopback(struct pci_dev *pdev, int vf_id,
46848+ const bool enable)
46849+{
46850+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46851+ struct i40e_vsi *vsi;
46852+ struct i40e_vf *vf;
46853+ int ret;
46854
46855- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
46856- aq_ret = I40E_ERR_PARAM;
46857- goto error_param;
46858- }
46859+ /* validate the request */
46860+ ret = i40e_validate_vf(pf, vf_id);
46861+ if (ret)
46862+ goto err_out;
46863+ vf = &pf->vf[vf_id];
46864+ vsi = pf->vsi[vf->lan_vsi_idx];
46865+ ret = i40e_configure_vf_loopback(vsi, vf_id, enable);
46866+ if (!ret)
46867+ vf->loopback = enable;
46868+err_out:
46869+ return ret;
46870+}
46871
46872- i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
46873+/**
46874+ * i40e_get_vlan_strip
46875+ * @pdev: PCI device information struct
46876+ * @vf_id: VF identifier
46877+ * @enable: enable or disable
46878+ *
46879+ * This function checks if vlan stripping is enabled or not
46880+ *
46881+ * Returns 0 on success, negative on failure
46882+ **/
46883+static int i40e_get_vlan_strip(struct pci_dev *pdev, int vf_id, bool *enable)
46884+{
46885+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46886+ struct i40e_vf *vf;
46887+ int ret;
46888
46889-error_param:
46890- /* send the response to the VF */
46891- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
46892- aq_ret);
46893+ /* validate the request */
46894+ ret = i40e_validate_vf(pf, vf_id);
46895+ if (ret)
46896+ goto err_out;
46897+ vf = &pf->vf[vf_id];
46898+ *enable = vf->vlan_stripping;
46899+err_out:
46900+ return ret;
46901 }
46902
46903 /**
46904- * i40e_vc_get_stats_msg
46905- * @vf: pointer to the VF info
46906- * @msg: pointer to the msg buffer
46907- * @msglen: msg length
46908+ * i40e_set_vlan_strip
46909+ * @pdev: PCI device information struct
46910+ * @vf_id: VF identifier
46911+ * @enable: enable/disable
46912 *
46913- * called from the VF to get vsi stats
46914+ * This function enables or disables VLAN stripping on a VF
46915+ *
46916+ * Returns 0 on success, negative on failure
46917 **/
46918-static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
46919+static int i40e_set_vlan_strip(struct pci_dev *pdev, int vf_id,
46920+ const bool enable)
46921 {
46922- struct virtchnl_queue_select *vqs =
46923- (struct virtchnl_queue_select *)msg;
46924- struct i40e_pf *pf = vf->pf;
46925- struct i40e_eth_stats stats;
46926- i40e_status aq_ret = 0;
46927+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46928 struct i40e_vsi *vsi;
46929+ struct i40e_vf *vf;
46930+ int ret;
46931
46932- memset(&stats, 0, sizeof(struct i40e_eth_stats));
46933-
46934- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
46935- aq_ret = I40E_ERR_PARAM;
46936- goto error_param;
46937- }
46938+ /* validate the request */
46939+ ret = i40e_validate_vf(pf, vf_id);
46940+ if (ret)
46941+ goto err_out;
46942+ vf = &pf->vf[vf_id];
46943+ vsi = pf->vsi[vf->lan_vsi_idx];
46944+ ret = i40e_configure_vf_vlan_stripping(vsi, vf_id, enable);
46945+ if (!ret)
46946+ vf->vlan_stripping = enable;
46947+err_out:
46948+ return ret;
46949+}
46950
46951- if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
46952- aq_ret = I40E_ERR_PARAM;
46953- goto error_param;
46954- }
46955+/**
46956+ * i40e_reset_vf_stats
46957+ * @pdev: PCI device information struct
46958+ * @vf_id: VF identifier
46959+ *
46960+ * This function resets all the stats for the VF
46961+ *
46962+ * Returns 0 on success, negative on failure
46963+ **/
46964+static int i40e_reset_vf_stats(struct pci_dev *pdev, int vf_id)
46965+{
46966+ struct i40e_pf *pf = pci_get_drvdata(pdev);
46967+ struct i40e_vsi *vsi;
46968+ struct i40e_vf *vf;
46969+ int ret = 0;
46970
46971+ /* validate the request */
46972+ ret = i40e_validate_vf(pf, vf_id);
46973+ if (ret)
46974+ goto err_out;
46975+ vf = &pf->vf[vf_id];
46976 vsi = pf->vsi[vf->lan_vsi_idx];
46977- if (!vsi) {
46978- aq_ret = I40E_ERR_PARAM;
46979- goto error_param;
46980- }
46981- i40e_update_eth_stats(vsi);
46982- stats = vsi->eth_stats;
46983+ i40e_vsi_reset_stats(vsi);
46984
46985-error_param:
46986- /* send the response back to the VF */
46987- return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
46988- (u8 *)&stats, sizeof(stats));
46989+err_out:
46990+ return ret;
46991 }
46992
46993-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
46994-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
46995-#define I40E_VC_MAX_VLAN_PER_VF 8
46996+/**
46997+ * i40e_get_vf_bw_share
46998+ * @pdev: PCI device information struct
46999+ * @vf_id: VF identifier
47000+ * @bw_share: bw share of the VF
47001+ *
47002+ * This function retrieves the bw share configured for the VF
47003+ *
47004+ * Returns 0 on success, negative on failure
47005+ **/
47006+static int i40e_get_vf_bw_share(struct pci_dev *pdev, int vf_id, u8 *bw_share)
47007+{
47008+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47009+ struct i40e_vf *vf;
47010+ int ret = 0;
47011+
47012+ /* validate the request */
47013+ ret = i40e_validate_vf(pf, vf_id);
47014+ if (ret)
47015+ goto err_out;
47016+ vf = &pf->vf[vf_id];
47017+ if (vf->bw_share_applied)
47018+ *bw_share = vf->bw_share;
47019+ else
47020+ ret = -EINVAL;
47021+
47022+err_out:
47023+ return ret;
47024+}
47025
47026 /**
47027- * i40e_check_vf_permission
47028- * @vf: pointer to the VF info
47029- * @macaddr: pointer to the MAC Address being checked
47030+ * i40e_store_vf_bw_share
47031+ * @pdev: PCI device information struct
47032+ * @vf_id: VF identifier
47033+ * @bw_share: bw share of the VF
47034+ *
47035+ * This function stores bw share configured for the VF
47036 *
47037- * Check if the VF has permission to add or delete unicast MAC address
47038- * filters and return error code -EPERM if not. Then check if the
47039- * address filter requested is broadcast or zero and if so return
47040- * an invalid MAC address error code.
47041+ * Returns 0 on success, negative on failure
47042 **/
47043-static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
47044+static int i40e_store_vf_bw_share(struct pci_dev *pdev, int vf_id, u8 bw_share)
47045 {
47046- struct i40e_pf *pf = vf->pf;
47047+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47048+ struct i40e_vf *vf;
47049 int ret = 0;
47050
47051- if (is_broadcast_ether_addr(macaddr) ||
47052- is_zero_ether_addr(macaddr)) {
47053- dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
47054- ret = I40E_ERR_INVALID_MAC_ADDR;
47055- } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
47056- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
47057- !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
47058- /* If the host VMM administrator has set the VF MAC address
47059- * administratively via the ndo_set_vf_mac command then deny
47060- * permission to the VF to add or delete unicast MAC addresses.
47061- * Unless the VF is privileged and then it can do whatever.
47062- * The VF may request to set the MAC address filter already
47063- * assigned to it so do not return an error in that case.
47064- */
47065- dev_err(&pf->pdev->dev,
47066- "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
47067- ret = -EPERM;
47068- } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
47069- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
47070- dev_err(&pf->pdev->dev,
47071- "VF is not trusted, switch the VF to trusted to add more functionality\n");
47072- ret = -EPERM;
47073+ /* validate the request */
47074+ ret = i40e_validate_vf(pf, vf_id);
47075+ if (ret)
47076+ goto err_out;
47077+ vf = &pf->vf[vf_id];
47078+ vf->bw_share = bw_share;
47079+
47080+ /* this tracking bool is set to true when 'apply' attribute is used */
47081+ vf->bw_share_applied = false;
47082+ pf->vf_bw_applied = false;
47083+err_out:
47084+ return ret;
47085+}
47086+
47087+/**
47088+ * i40e_get_link_state
47089+ * @pdev: PCI device information struct
47090+ * @vf_id: VF identifier
47091+ * @enabled: link state
47092+ * @link_speed: link speed of the VF
47093+ *
47094+ * Gets the status of link and the link speed
47095+ *
47096+ * Returns 0 on success, negative on failure
47097+ **/
47098+static int i40e_get_link_state(struct pci_dev *pdev, int vf_id, bool *enabled,
47099+ enum vfd_link_speed *link_speed)
47100+{
47101+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47102+ struct i40e_link_status *ls;
47103+ struct i40e_vf *vf;
47104+ int ret;
47105+
47106+ /* validate the request */
47107+ ret = i40e_validate_vf(pf, vf_id);
47108+ if (ret)
47109+ goto err_out;
47110+ vf = &pf->vf[vf_id];
47111+ ls = &pf->hw.phy.link_info;
47112+ if (vf->link_forced)
47113+ *enabled = vf->link_up;
47114+ else
47115+ *enabled = ls->link_info & I40E_AQ_LINK_UP;
47116+ switch (ls->link_speed) {
47117+ case I40E_LINK_SPEED_UNKNOWN:
47118+ *link_speed = VFD_LINK_SPEED_UNKNOWN;
47119+ break;
47120+ case I40E_LINK_SPEED_100MB:
47121+ *link_speed = VFD_LINK_SPEED_100MB;
47122+ break;
47123+ case I40E_LINK_SPEED_1GB:
47124+ *link_speed = VFD_LINK_SPEED_1GB;
47125+ break;
47126+ case I40E_LINK_SPEED_2_5GB:
47127+ *link_speed = VFD_LINK_SPEED_2_5GB;
47128+ break;
47129+ case I40E_LINK_SPEED_5GB:
47130+ *link_speed = VFD_LINK_SPEED_5GB;
47131+ break;
47132+ case I40E_LINK_SPEED_10GB:
47133+ *link_speed = VFD_LINK_SPEED_10GB;
47134+ break;
47135+ case I40E_LINK_SPEED_20GB:
47136+ *link_speed = VFD_LINK_SPEED_20GB;
47137+ break;
47138+ case I40E_LINK_SPEED_25GB:
47139+ *link_speed = VFD_LINK_SPEED_25GB;
47140+ break;
47141+ case I40E_LINK_SPEED_40GB:
47142+ *link_speed = VFD_LINK_SPEED_40GB;
47143+ break;
47144+ default:
47145+ *link_speed = VFD_LINK_SPEED_UNKNOWN;
47146 }
47147+err_out:
47148 return ret;
47149 }
47150
47151 /**
47152- * i40e_vc_add_mac_addr_msg
47153- * @vf: pointer to the VF info
47154- * @msg: pointer to the msg buffer
47155- * @msglen: msg length
47156+ * i40e_set_link_state
47157+ * @pdev: PCI device information struct
47158+ * @vf_id: VF identifier
47159+ * @link: the link state to configure
47160 *
47161- * add guest mac address filter
47162+ * Configures link for a vf
47163+ *
47164+ * Returns 0 on success, negative on failure
47165 **/
47166-static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
47167+static int i40e_set_link_state(struct pci_dev *pdev, int vf_id, const u8 link)
47168 {
47169- struct virtchnl_ether_addr_list *al =
47170- (struct virtchnl_ether_addr_list *)msg;
47171- struct i40e_pf *pf = vf->pf;
47172- struct i40e_vsi *vsi = NULL;
47173- u16 vsi_id = al->vsi_id;
47174- i40e_status ret = 0;
47175- int i;
47176+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47177+ struct i40e_vf *vf;
47178+ int ret;
47179
47180- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47181- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
47182- ret = I40E_ERR_PARAM;
47183- goto error_param;
47184- }
47185+ /* validate the request */
47186+ ret = i40e_validate_vf(pf, vf_id);
47187+ if (ret)
47188+ goto error_out;
47189+ vf = &pf->vf[vf_id];
47190+ ret = i40e_configure_vf_link(vf, link);
47191+error_out:
47192+ return ret;
47193+}
47194
47195- for (i = 0; i < al->num_elements; i++) {
47196- ret = i40e_check_vf_permission(vf, al->list[i].addr);
47197- if (ret)
47198- goto error_param;
47199- }
47200- vsi = pf->vsi[vf->lan_vsi_idx];
47201+/**
47202+ * i40e_set_vf_enable
47203+ * @pdev: PCI device information struct
47204+ * @vf_id: VF identifier
47205+ * @enable: enable/disable
47206+ *
47207+ * This function enables or disables a VF
47208+ *
47209+ * Returns 0 on success, negative on failure
47210+ **/
47211+static int i40e_set_vf_enable(struct pci_dev *pdev, int vf_id, const bool enable)
47212+{
47213+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47214+ struct i40e_vsi *vsi;
47215+ unsigned long q_map;
47216+ struct i40e_vf *vf;
47217+ int ret;
47218
47219- /* Lock once, because all function inside for loop accesses VSI's
47220- * MAC filter list which needs to be protected using same lock.
47221- */
47222- spin_lock_bh(&vsi->mac_filter_hash_lock);
47223+ /* validate the request */
47224+ ret = i40e_validate_vf(pf, vf_id);
47225+ if (ret)
47226+ goto err_out;
47227+ vf = &pf->vf[vf_id];
47228+ vsi = pf->vsi[vf->lan_vsi_idx];
47229+ q_map = BIT(vsi->num_queue_pairs) - 1;
47230
47231- /* add new addresses to the list */
47232- for (i = 0; i < al->num_elements; i++) {
47233- struct i40e_mac_filter *f;
47234+ /* force link down to prevent tx hangs */
47235+ if (!enable) {
47236+ ret = i40e_set_link_state(pdev, vf_id, VFD_LINKSTATE_OFF);
47237+ if (ret)
47238+ goto err_out;
47239+ vf->pf_ctrl_disable = true;
47240+ ret = i40e_ctrl_vf_tx_rings(vsi, q_map, enable);
47241+ if (ret)
47242+ goto err_out;
47243+ ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable);
47244+ if (ret)
47245+ goto err_out;
47246+ vf->queues_enabled = false;
47247+ } else {
47248+ /* Do nothing when there is no iavf driver loaded */
47249+ if (!test_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states))
47250+ goto err_out;
47251+ ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable);
47252+ if (ret)
47253+ goto err_out;
47254+ ret = i40e_ctrl_vf_tx_rings(vsi, q_map, enable);
47255+ if (ret)
47256+ goto err_out;
47257+ vf->queues_enabled = true;
47258+ vf->pf_ctrl_disable = false;
47259+ /* reset need to reinit VF resources */
47260+ i40e_vc_notify_vf_reset(vf);
47261+ i40e_reset_vf(vf, false);
47262+ ret = i40e_set_link_state(pdev, vf_id, VFD_LINKSTATE_AUTO);
47263+ }
47264
47265- f = i40e_find_mac(vsi, al->list[i].addr);
47266- if (!f)
47267- f = i40e_add_mac_filter(vsi, al->list[i].addr);
47268+err_out:
47269+ return ret;
47270+}
47271
47272- if (!f) {
47273- dev_err(&pf->pdev->dev,
47274- "Unable to add MAC filter %pM for VF %d\n",
47275- al->list[i].addr, vf->vf_id);
47276- ret = I40E_ERR_PARAM;
47277- spin_unlock_bh(&vsi->mac_filter_hash_lock);
47278- goto error_param;
47279- } else {
47280- vf->num_mac++;
47281- }
47282- }
47283- spin_unlock_bh(&vsi->mac_filter_hash_lock);
47284+static int i40e_get_vf_enable(struct pci_dev *pdev, int vf_id, bool *enable)
47285+{
47286+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47287+ struct i40e_vf *vf;
47288+ int ret;
47289
47290- /* program the updated filter list */
47291- ret = i40e_sync_vsi_filters(vsi);
47292+ /* validate the request */
47293+ ret = i40e_validate_vf(pf, vf_id);
47294 if (ret)
47295- dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
47296- vf->vf_id, ret);
47297-
47298-error_param:
47299- /* send the response to the VF */
47300- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
47301- ret);
47302+ return ret;
47303+ vf = &pf->vf[vf_id];
47304+ *enable = !vf->pf_ctrl_disable;
47305+ return 0;
47306 }
47307
47308 /**
47309- * i40e_vc_del_mac_addr_msg
47310- * @vf: pointer to the VF info
47311- * @msg: pointer to the msg buffer
47312- * @msglen: msg length
47313+ * i40e_get_rx_bytes
47314+ * @pdev: PCI device information struct
47315+ * @vf_id: VF identifier
47316+ * @rx_bytes: pointer to the caller's rx_bytes variable
47317 *
47318- * remove guest mac address filter
47319+ * This function gets the received bytes on the VF
47320+ *
47321+ * Returns 0 on success, negative on failure
47322 **/
47323-static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
47324+static int i40e_get_rx_bytes(struct pci_dev *pdev, int vf_id,
47325+ u64 *rx_bytes)
47326 {
47327- struct virtchnl_ether_addr_list *al =
47328- (struct virtchnl_ether_addr_list *)msg;
47329- struct i40e_pf *pf = vf->pf;
47330- struct i40e_vsi *vsi = NULL;
47331- u16 vsi_id = al->vsi_id;
47332- i40e_status ret = 0;
47333- int i;
47334-
47335- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47336- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
47337- ret = I40E_ERR_PARAM;
47338- goto error_param;
47339- }
47340+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47341+ struct i40e_vsi *vsi;
47342+ struct i40e_vf *vf;
47343+ int ret;
47344
47345- for (i = 0; i < al->num_elements; i++) {
47346- if (is_broadcast_ether_addr(al->list[i].addr) ||
47347- is_zero_ether_addr(al->list[i].addr)) {
47348- dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
47349- al->list[i].addr, vf->vf_id);
47350- ret = I40E_ERR_INVALID_MAC_ADDR;
47351- goto error_param;
47352- }
47353- }
47354+ /* validate the request */
47355+ ret = i40e_validate_vf(pf, vf_id);
47356+ if (ret)
47357+ goto err_out;
47358+ vf = &pf->vf[vf_id];
47359 vsi = pf->vsi[vf->lan_vsi_idx];
47360+ i40e_update_eth_stats(vsi);
47361+ *rx_bytes = vsi->eth_stats.rx_bytes;
47362+err_out:
47363+ return ret;
47364+}
47365
47366- spin_lock_bh(&vsi->mac_filter_hash_lock);
47367- /* delete addresses from the list */
47368- for (i = 0; i < al->num_elements; i++)
47369- if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
47370- ret = I40E_ERR_INVALID_MAC_ADDR;
47371- spin_unlock_bh(&vsi->mac_filter_hash_lock);
47372- goto error_param;
47373- } else {
47374- vf->num_mac--;
47375- }
47376-
47377- spin_unlock_bh(&vsi->mac_filter_hash_lock);
47378+/**
47379+ * i40e_get_rx_dropped
47380+ * @pdev: PCI device information struct
47381+ * @vf_id: VF identifier
47382+ * @rx_dropped: pointer to the caller's rx_dropped variable
47383+ *
47384+ * This function gets the dropped received bytes on the VF
47385+ *
47386+ * Returns 0 on success, negative on failure
47387+ **/
47388+static int i40e_get_rx_dropped(struct pci_dev *pdev, int vf_id,
47389+ u64 *rx_dropped)
47390+{
47391+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47392+ struct i40e_vsi *vsi;
47393+ struct i40e_vf *vf;
47394+ int ret;
47395
47396- /* program the updated filter list */
47397- ret = i40e_sync_vsi_filters(vsi);
47398+ /* validate the request */
47399+ ret = i40e_validate_vf(pf, vf_id);
47400 if (ret)
47401- dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
47402- vf->vf_id, ret);
47403-
47404-error_param:
47405- /* send the response to the VF */
47406- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
47407- ret);
47408+ goto err_out;
47409+ vf = &pf->vf[vf_id];
47410+ vsi = pf->vsi[vf->lan_vsi_idx];
47411+ i40e_update_eth_stats(vsi);
47412+ *rx_dropped = vsi->eth_stats.rx_discards;
47413+err_out:
47414+ return ret;
47415 }
47416
47417 /**
47418- * i40e_vc_add_vlan_msg
47419- * @vf: pointer to the VF info
47420- * @msg: pointer to the msg buffer
47421- * @msglen: msg length
47422+ * i40e_get_rx_packets
47423+ * @pdev: PCI device information struct
47424+ * @vf_id: VF identifier
47425+ * @rx_packets: pointer to the caller's rx_packets variable
47426 *
47427- * program guest vlan id
47428+ * This function gets the number of packets received on the VF
47429+ *
47430+ * Returns 0 on success, negative on failure
47431 **/
47432-static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
47433+static int i40e_get_rx_packets(struct pci_dev *pdev, int vf_id,
47434+ u64 *rx_packets)
47435 {
47436- struct virtchnl_vlan_filter_list *vfl =
47437- (struct virtchnl_vlan_filter_list *)msg;
47438- struct i40e_pf *pf = vf->pf;
47439- struct i40e_vsi *vsi = NULL;
47440- u16 vsi_id = vfl->vsi_id;
47441- i40e_status aq_ret = 0;
47442- int i;
47443-
47444- if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
47445- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
47446- dev_err(&pf->pdev->dev,
47447- "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
47448- goto error_param;
47449- }
47450- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47451- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
47452- aq_ret = I40E_ERR_PARAM;
47453- goto error_param;
47454- }
47455+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47456+ struct i40e_vsi *vsi;
47457+ struct i40e_vf *vf;
47458+ int ret;
47459
47460- for (i = 0; i < vfl->num_elements; i++) {
47461- if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
47462- aq_ret = I40E_ERR_PARAM;
47463- dev_err(&pf->pdev->dev,
47464- "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
47465- goto error_param;
47466- }
47467- }
47468+ /* validate the request */
47469+ ret = i40e_validate_vf(pf, vf_id);
47470+ if (ret)
47471+ goto err_out;
47472+ vf = &pf->vf[vf_id];
47473 vsi = pf->vsi[vf->lan_vsi_idx];
47474- if (vsi->info.pvid) {
47475- aq_ret = I40E_ERR_PARAM;
47476- goto error_param;
47477- }
47478-
47479- i40e_vlan_stripping_enable(vsi);
47480- for (i = 0; i < vfl->num_elements; i++) {
47481- /* add new VLAN filter */
47482- int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
47483- if (!ret)
47484- vf->num_vlan++;
47485-
47486- if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
47487- i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
47488- true,
47489- vfl->vlan_id[i],
47490- NULL);
47491- if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
47492- i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
47493- true,
47494- vfl->vlan_id[i],
47495- NULL);
47496+ i40e_update_eth_stats(vsi);
47497+ *rx_packets = vsi->eth_stats.rx_unicast + vsi->eth_stats.rx_multicast +
47498+ vsi->eth_stats.rx_broadcast;
47499+err_out:
47500+ return ret;
47501+}
47502
47503- if (ret)
47504- dev_err(&pf->pdev->dev,
47505- "Unable to add VLAN filter %d for VF %d, error %d\n",
47506- vfl->vlan_id[i], vf->vf_id, ret);
47507- }
47508+/**
47509+ * i40e_get_tx_bytes
47510+ * @pdev: PCI device information struct
47511+ * @vf_id: VF identifier
47512+ * @tx_bytes: pointer to the caller's tx_bytes variable
47513+ *
47514+ * This function gets the transmitted bytes by the VF
47515+ *
47516+ * Returns 0 on success, negative on failure
47517+ **/
47518+static int i40e_get_tx_bytes(struct pci_dev *pdev, int vf_id,
47519+ u64 *tx_bytes)
47520+{
47521+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47522+ struct i40e_vsi *vsi;
47523+ struct i40e_vf *vf;
47524+ int ret;
47525
47526-error_param:
47527- /* send the response to the VF */
47528- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
47529+ /* validate the request */
47530+ ret = i40e_validate_vf(pf, vf_id);
47531+ if (ret)
47532+ goto err_out;
47533+ vf = &pf->vf[vf_id];
47534+ vsi = pf->vsi[vf->lan_vsi_idx];
47535+ i40e_update_eth_stats(vsi);
47536+ *tx_bytes = vsi->eth_stats.tx_bytes;
47537+err_out:
47538+ return ret;
47539 }
47540
47541 /**
47542- * i40e_vc_remove_vlan_msg
47543- * @vf: pointer to the VF info
47544- * @msg: pointer to the msg buffer
47545- * @msglen: msg length
47546+ * i40e_get_tx_dropped
47547+ * @pdev: PCI device information struct
47548+ * @vf_id: VF identifier
47549+ * @tx_dropped: pointer to the caller's tx_dropped variable
47550 *
47551- * remove programmed guest vlan id
47552+ * This function gets the dropped tx bytes by the VF
47553+ *
47554+ * Returns 0 on success, negative on failure
47555 **/
47556-static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
47557+static int i40e_get_tx_dropped(struct pci_dev *pdev, int vf_id,
47558+ u64 *tx_dropped)
47559 {
47560- struct virtchnl_vlan_filter_list *vfl =
47561- (struct virtchnl_vlan_filter_list *)msg;
47562- struct i40e_pf *pf = vf->pf;
47563- struct i40e_vsi *vsi = NULL;
47564- u16 vsi_id = vfl->vsi_id;
47565- i40e_status aq_ret = 0;
47566- int i;
47567-
47568- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47569- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
47570- aq_ret = I40E_ERR_PARAM;
47571- goto error_param;
47572- }
47573-
47574- for (i = 0; i < vfl->num_elements; i++) {
47575- if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
47576- aq_ret = I40E_ERR_PARAM;
47577- goto error_param;
47578- }
47579- }
47580+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47581+ struct i40e_vsi *vsi;
47582+ struct i40e_vf *vf;
47583+ int ret;
47584
47585+ /* validate the request */
47586+ ret = i40e_validate_vf(pf, vf_id);
47587+ if (ret)
47588+ goto err_out;
47589+ vf = &pf->vf[vf_id];
47590 vsi = pf->vsi[vf->lan_vsi_idx];
47591- if (vsi->info.pvid) {
47592- aq_ret = I40E_ERR_PARAM;
47593- goto error_param;
47594- }
47595-
47596- for (i = 0; i < vfl->num_elements; i++) {
47597- i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
47598- vf->num_vlan--;
47599-
47600- if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
47601- i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
47602- false,
47603- vfl->vlan_id[i],
47604- NULL);
47605- if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
47606- i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
47607- false,
47608- vfl->vlan_id[i],
47609- NULL);
47610- }
47611-
47612-error_param:
47613- /* send the response to the VF */
47614- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
47615+ i40e_update_eth_stats(vsi);
47616+ *tx_dropped = vsi->eth_stats.tx_discards;
47617+err_out:
47618+ return ret;
47619 }
47620
47621 /**
47622- * i40e_vc_iwarp_msg
47623- * @vf: pointer to the VF info
47624- * @msg: pointer to the msg buffer
47625- * @msglen: msg length
47626+ * i40e_get_tx_packets
47627+ * @pdev: PCI device information struct
47628+ * @vf_id: VF identifier
47629+ * @tx_packets: pointer to the caller's tx_packets variable
47630 *
47631- * called from the VF for the iwarp msgs
47632+ * This function gets the number of packets transmitted by the VF
47633+ *
47634+ * Returns 0 on success, negative on failure
47635 **/
47636-static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
47637+static int i40e_get_tx_packets(struct pci_dev *pdev, int vf_id,
47638+ u64 *tx_packets)
47639 {
47640- struct i40e_pf *pf = vf->pf;
47641- int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
47642- i40e_status aq_ret = 0;
47643-
47644- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47645- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
47646- aq_ret = I40E_ERR_PARAM;
47647- goto error_param;
47648- }
47649-
47650- i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
47651- msg, msglen);
47652+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47653+ struct i40e_vsi *vsi;
47654+ struct i40e_vf *vf;
47655+ int ret;
47656
47657-error_param:
47658- /* send the response to the VF */
47659- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
47660- aq_ret);
47661+ /* validate the request */
47662+ ret = i40e_validate_vf(pf, vf_id);
47663+ if (ret)
47664+ goto err_out;
47665+ vf = &pf->vf[vf_id];
47666+ vsi = pf->vsi[vf->lan_vsi_idx];
47667+ i40e_update_eth_stats(vsi);
47668+ *tx_packets = vsi->eth_stats.tx_unicast + vsi->eth_stats.tx_multicast +
47669+ vsi->eth_stats.tx_broadcast;
47670+err_out:
47671+ return ret;
47672 }
47673
47674 /**
47675- * i40e_vc_iwarp_qvmap_msg
47676- * @vf: pointer to the VF info
47677- * @msg: pointer to the msg buffer
47678- * @msglen: msg length
47679- * @config: config qvmap or release it
47680+ * i40e_get_tx_errors
47681+ * @pdev: PCI device information struct
47682+ * @vf_id: VF identifier
47683+ * @tx_errors: pointer to the caller's tx_errors variable
47684 *
47685- * called from the VF for the iwarp msgs
47686+ * This function gets the number of packets transmitted by the VF
47687+ *
47688+ * Returns 0 on success, negative on failure
47689 **/
47690-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
47691- bool config)
47692+static int i40e_get_tx_errors(struct pci_dev *pdev, int vf_id,
47693+ u64 *tx_errors)
47694 {
47695- struct virtchnl_iwarp_qvlist_info *qvlist_info =
47696- (struct virtchnl_iwarp_qvlist_info *)msg;
47697- i40e_status aq_ret = 0;
47698-
47699- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47700- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
47701- aq_ret = I40E_ERR_PARAM;
47702- goto error_param;
47703- }
47704-
47705- if (config) {
47706- if (i40e_config_iwarp_qvlist(vf, qvlist_info))
47707- aq_ret = I40E_ERR_PARAM;
47708- } else {
47709- i40e_release_iwarp_qvlist(vf);
47710- }
47711+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47712+ struct i40e_vsi *vsi;
47713+ struct i40e_vf *vf;
47714+ int ret;
47715
47716-error_param:
47717- /* send the response to the VF */
47718- return i40e_vc_send_resp_to_vf(vf,
47719- config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
47720- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
47721- aq_ret);
47722+ /* validate the request */
47723+ ret = i40e_validate_vf(pf, vf_id);
47724+ if (ret)
47725+ goto err_out;
47726+ vf = &pf->vf[vf_id];
47727+ vsi = pf->vsi[vf->lan_vsi_idx];
47728+ i40e_update_eth_stats(vsi);
47729+ *tx_errors = vsi->eth_stats.tx_errors;
47730+err_out:
47731+ return ret;
47732 }
47733
47734 /**
47735- * i40e_vc_config_rss_key
47736- * @vf: pointer to the VF info
47737- * @msg: pointer to the msg buffer
47738- * @msglen: msg length
47739+ * i40e_get_mac
47740+ * @pdev: PCI device information struct
47741+ * @vf_id: VF identifier
47742+ * @mac: the default mac address
47743 *
47744- * Configure the VF's RSS key
47745+ * This function gets the default mac address
47746+ *
47747+ * Returns 0 on success, negative on failure
47748 **/
47749-static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
47750+static int i40e_get_mac(struct pci_dev *pdev, int vf_id, u8 *mac)
47751 {
47752- struct virtchnl_rss_key *vrk =
47753- (struct virtchnl_rss_key *)msg;
47754- struct i40e_pf *pf = vf->pf;
47755- struct i40e_vsi *vsi = NULL;
47756- u16 vsi_id = vrk->vsi_id;
47757- i40e_status aq_ret = 0;
47758-
47759- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47760- !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
47761- (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
47762- aq_ret = I40E_ERR_PARAM;
47763- goto err;
47764- }
47765+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47766+ struct i40e_vf *vf;
47767+ int ret;
47768
47769- vsi = pf->vsi[vf->lan_vsi_idx];
47770- aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
47771-err:
47772- /* send the response to the VF */
47773- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
47774- aq_ret);
47775+ /* validate the request */
47776+ ret = i40e_validate_vf(pf, vf_id);
47777+ if (ret)
47778+ goto err_out;
47779+ vf = &pf->vf[vf_id];
47780+ ether_addr_copy(mac, vf->default_lan_addr.addr);
47781+err_out:
47782+ return ret;
47783 }
47784
47785 /**
47786- * i40e_vc_config_rss_lut
47787- * @vf: pointer to the VF info
47788- * @msg: pointer to the msg buffer
47789- * @msglen: msg length
47790+ * i40e_set_mac
47791+ * @pdev: PCI device information struct
47792+ * @vf_id: VF identifier
47793+ * @mac: the default mac address to set
47794 *
47795- * Configure the VF's RSS LUT
47796+ * This function sets the default mac address for the VF
47797+ *
47798+ * Returns 0 on success, negative on failure
47799 **/
47800-static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
47801+static int i40e_set_mac(struct pci_dev *pdev, int vf_id, const u8 *mac)
47802 {
47803- struct virtchnl_rss_lut *vrl =
47804- (struct virtchnl_rss_lut *)msg;
47805- struct i40e_pf *pf = vf->pf;
47806- struct i40e_vsi *vsi = NULL;
47807- u16 vsi_id = vrl->vsi_id;
47808- i40e_status aq_ret = 0;
47809-
47810- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
47811- !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
47812- (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
47813- aq_ret = I40E_ERR_PARAM;
47814- goto err;
47815- }
47816+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47817+ struct i40e_vsi *vsi;
47818+ struct i40e_vf *vf;
47819+ int ret;
47820
47821+ /* validate the request */
47822+ ret = i40e_validate_vf(pf, vf_id);
47823+ if (ret)
47824+ goto err_out;
47825+ vf = &pf->vf[vf_id];
47826 vsi = pf->vsi[vf->lan_vsi_idx];
47827- aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
47828- /* send the response to the VF */
47829-err:
47830- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
47831- aq_ret);
47832+ ret = i40e_set_vf_mac(vf, vsi, mac);
47833+err_out:
47834+ return ret;
47835 }
47836
47837 /**
47838- * i40e_vc_get_rss_hena
47839- * @vf: pointer to the VF info
47840- * @msg: pointer to the msg buffer
47841- * @msglen: msg length
47842+ * i40e_get_promisc
47843+ * @pdev: PCI device information struct
47844+ * @vf_id: VF identifier
47845+ * @promisc_mode: current promiscuous mode
47846 *
47847- * Return the RSS HENA bits allowed by the hardware
47848+ * This function gets the current promiscuous mode configuration.
47849+ *
47850+ * Returns 0 on success, negative on failure
47851 **/
47852-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
47853+static int i40e_get_promisc(struct pci_dev *pdev, int vf_id, u8 *promisc_mode)
47854 {
47855- struct virtchnl_rss_hena *vrh = NULL;
47856- struct i40e_pf *pf = vf->pf;
47857- i40e_status aq_ret = 0;
47858- int len = 0;
47859-
47860- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
47861- aq_ret = I40E_ERR_PARAM;
47862- goto err;
47863- }
47864- len = sizeof(struct virtchnl_rss_hena);
47865+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47866+ struct i40e_vf *vf;
47867+ int ret;
47868
47869- vrh = kzalloc(len, GFP_KERNEL);
47870- if (!vrh) {
47871- aq_ret = I40E_ERR_NO_MEMORY;
47872- len = 0;
47873- goto err;
47874- }
47875- vrh->hena = i40e_pf_get_default_rss_hena(pf);
47876-err:
47877- /* send the response back to the VF */
47878- aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
47879- aq_ret, (u8 *)vrh, len);
47880- kfree(vrh);
47881- return aq_ret;
47882+ /* validate the request */
47883+ ret = i40e_validate_vf(pf, vf_id);
47884+ if (ret)
47885+ goto err_out;
47886+ vf = &pf->vf[vf_id];
47887+ *promisc_mode = vf->promisc_mode;
47888+err_out:
47889+ return ret;
47890 }
47891
47892 /**
47893- * i40e_vc_set_rss_hena
47894- * @vf: pointer to the VF info
47895- * @msg: pointer to the msg buffer
47896- * @msglen: msg length
47897+ * i40e_set_promisc
47898+ * @pdev: PCI device information struct
47899+ * @vf_id: VF identifier
47900+ * @promisc_mode: promiscuous mode to be set
47901 *
47902- * Set the RSS HENA bits for the VF
47903+ * This function sets the promiscuous mode configuration.
47904+ *
47905+ * Returns 0 on success, negative on failure
47906 **/
47907-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
47908+static int i40e_set_promisc(struct pci_dev *pdev, int vf_id,
47909+ const u8 promisc_mode)
47910 {
47911- struct virtchnl_rss_hena *vrh =
47912- (struct virtchnl_rss_hena *)msg;
47913- struct i40e_pf *pf = vf->pf;
47914- struct i40e_hw *hw = &pf->hw;
47915- i40e_status aq_ret = 0;
47916+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47917+ struct i40e_vsi *vsi;
47918+ struct i40e_vf *vf;
47919+ int ret;
47920
47921- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
47922- aq_ret = I40E_ERR_PARAM;
47923+ /* validate the request */
47924+ ret = i40e_validate_vf(pf, vf_id);
47925+ if (ret)
47926 goto err;
47927- }
47928- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
47929- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
47930- (u32)(vrh->hena >> 32));
47931-
47932- /* send the response to the VF */
47933+ vf = &pf->vf[vf_id];
47934+ vsi = pf->vsi[vf->lan_vsi_idx];
47935+ ret = i40e_configure_vf_promisc_mode(vf, vsi, promisc_mode);
47936 err:
47937- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
47938+ return ret;
47939 }
47940
47941 /**
47942- * i40e_vc_enable_vlan_stripping
47943- * @vf: pointer to the VF info
47944- * @msg: pointer to the msg buffer
47945- * @msglen: msg length
47946+ * i40e_get_ingress_mirror - Gets the configured ingress mirror
47947+ * @pdev: PCI device information struct
47948+ * @vf_id: VF identifier
47949+ * @mirror: pointer to return the ingress mirror
47950 *
47951- * Enable vlan header stripping for the VF
47952+ * Gets the ingress mirror configured
47953+ *
47954+ * Returns 0 on success, negative on failure
47955 **/
47956-static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
47957- u16 msglen)
47958+static int i40e_get_ingress_mirror(struct pci_dev *pdev, int vf_id, int *mirror)
47959 {
47960- struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
47961- i40e_status aq_ret = 0;
47962-
47963- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
47964- aq_ret = I40E_ERR_PARAM;
47965- goto err;
47966- }
47967-
47968- i40e_vlan_stripping_enable(vsi);
47969+ struct i40e_pf *pf = pci_get_drvdata(pdev);
47970+ struct i40e_vf *vf;
47971+ int ret;
47972
47973- /* send the response to the VF */
47974-err:
47975- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
47976- aq_ret);
47977+ /* validate the request */
47978+ ret = i40e_validate_vf(pf, vf_id);
47979+ if (ret)
47980+ goto err_out;
47981+ vf = &pf->vf[vf_id];
47982+ *mirror = vf->ingress_vlan;
47983+err_out:
47984+ return ret;
47985 }
47986
47987 /**
47988- * i40e_vc_disable_vlan_stripping
47989- * @vf: pointer to the VF info
47990- * @msg: pointer to the msg buffer
47991- * @msglen: msg length
47992+ * i40e_set_ingress_mirror - Configure ingress mirror
47993+ * @pdev: PCI device information struct
47994+ * @vf_id: VF identifier
47995+ * @mirror: mirror vf
47996 *
47997- * Disable vlan header stripping for the VF
47998+ * Configures the ingress mirror
47999+ *
48000+ * Returns 0 on success, negative on failure
48001 **/
48002-static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
48003- u16 msglen)
48004+static int i40e_set_ingress_mirror(struct pci_dev *pdev, int vf_id,
48005+ const int mirror)
48006 {
48007- struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
48008- i40e_status aq_ret = 0;
48009+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48010+ struct i40e_vsi *src_vsi, *mirror_vsi;
48011+ struct i40e_vf *vf, *mirror_vf;
48012+ u16 rule_type, rule_id;
48013+ int ret;
48014
48015- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
48016- aq_ret = I40E_ERR_PARAM;
48017- goto err;
48018- }
48019+ /* validate the request */
48020+ ret = i40e_validate_vf(pf, vf_id);
48021+ if (ret)
48022+ goto err_out;
48023+ vf = &pf->vf[vf_id];
48024
48025- i40e_vlan_stripping_disable(vsi);
48026+ /* The Admin Queue mirroring rules refer to the traffic
48027+ * directions from the perspective of the switch, not the VSI
48028+ * we apply the mirroring rule on - so the behaviour of a VSI
48029+ * ingress mirror is classified as an egress rule
48030+ */
48031+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
48032+ src_vsi = pf->vsi[vf->lan_vsi_idx];
48033+ if (mirror == I40E_NO_VF_MIRROR) {
48034+ /* Del mirrors */
48035+ rule_id = vf->ingress_rule_id;
48036+ ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
48037+ rule_id);
48038+ if (ret)
48039+ goto err_out;
48040+ vf->ingress_vlan = I40E_NO_VF_MIRROR;
48041+ } else {
48042+ /* validate the mirror */
48043+ ret = i40e_validate_vf(pf, mirror);
48044+ if (ret)
48045+ goto err_out;
48046+ mirror_vf = &pf->vf[mirror];
48047+ mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
48048
48049- /* send the response to the VF */
48050-err:
48051- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
48052- aq_ret);
48053+ /* Add mirrors */
48054+ ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
48055+ rule_type, &rule_id);
48056+ if (ret)
48057+ goto err_out;
48058+ vf->ingress_vlan = mirror;
48059+ vf->ingress_rule_id = rule_id;
48060+ }
48061+err_out:
48062+ return ret;
48063 }
48064
48065 /**
48066- * i40e_vc_process_vf_msg
48067- * @pf: pointer to the PF structure
48068- * @vf_id: source VF id
48069- * @msg: pointer to the msg buffer
48070- * @msglen: msg length
48071- * @msghndl: msg handle
48072+ * i40e_get_egress_mirror - Gets the configured egress mirror
48073+ * @pdev: PCI device information struct
48074+ * @vf_id: VF identifier
48075+ * @mirror: pointer to return the egress mirror
48076 *
48077- * called from the common aeq/arq handler to
48078- * process request from VF
48079+ * Gets the egress mirror configured
48080+ *
48081+ * Returns 0 on success, negative on failure
48082 **/
48083-int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
48084- u32 v_retval, u8 *msg, u16 msglen)
48085+static int i40e_get_egress_mirror(struct pci_dev *pdev, int vf_id, int *mirror)
48086 {
48087- struct i40e_hw *hw = &pf->hw;
48088- int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
48089+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48090 struct i40e_vf *vf;
48091 int ret;
48092
48093- pf->vf_aq_requests++;
48094- if (local_vf_id >= pf->num_alloc_vfs)
48095- return -EINVAL;
48096- vf = &(pf->vf[local_vf_id]);
48097-
48098- /* Check if VF is disabled. */
48099- if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
48100- return I40E_ERR_PARAM;
48101-
48102- /* perform basic checks on the msg */
48103- ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
48104+ /* validate the request */
48105+ ret = i40e_validate_vf(pf, vf_id);
48106+ if (ret)
48107+ goto err_out;
48108+ vf = &pf->vf[vf_id];
48109+ *mirror = vf->egress_vlan;
48110+err_out:
48111+ return ret;
48112+}
48113
48114- /* perform additional checks specific to this driver */
48115- if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
48116- struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
48117+/**
48118+ * i40e_set_egress_mirror - Configure egress mirror
48119+ * @pdev: PCI device information struct
48120+ * @vf_id: VF identifier
48121+ * @mirror: mirror vf
48122+ *
48123+ * Configures the egress mirror
48124+ *
48125+ * Returns 0 on success, negative on failure
48126+ **/
48127+static int i40e_set_egress_mirror(struct pci_dev *pdev, int vf_id,
48128+ const int mirror)
48129+{
48130+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48131+ struct i40e_vsi *src_vsi, *mirror_vsi;
48132+ struct i40e_vf *vf, *mirror_vf;
48133+ u16 rule_type, rule_id;
48134+ int ret;
48135
48136- if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
48137- ret = -EINVAL;
48138- } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
48139- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
48140+ /* validate the request */
48141+ ret = i40e_validate_vf(pf, vf_id);
48142+ if (ret)
48143+ goto err_out;
48144+ vf = &pf->vf[vf_id];
48145
48146- if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
48147- ret = -EINVAL;
48148- }
48149+ /* The Admin Queue mirroring rules refer to the traffic
48150+ * directions from the perspective of the switch, not the VSI
48151+ * we apply the mirroring rule on - so the behaviour of a VSI
48152+ * egress mirror is classified as an ingress rule
48153+ */
48154+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
48155+ src_vsi = pf->vsi[vf->lan_vsi_idx];
48156+ if (mirror == I40E_NO_VF_MIRROR) {
48157+ /* Del mirrors */
48158+ rule_id = vf->egress_rule_id;
48159+ ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
48160+ rule_id);
48161+ if (ret)
48162+ goto err_out;
48163+ vf->egress_vlan = I40E_NO_VF_MIRROR;
48164+ } else {
48165+ /* validate the mirror */
48166+ ret = i40e_validate_vf(pf, mirror);
48167+ if (ret)
48168+ goto err_out;
48169+ mirror_vf = &pf->vf[mirror];
48170+ mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
48171
48172- if (ret) {
48173- i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
48174- dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
48175- local_vf_id, v_opcode, msglen);
48176- switch (ret) {
48177- case VIRTCHNL_ERR_PARAM:
48178- return -EPERM;
48179- default:
48180- return -EINVAL;
48181- }
48182+ /* Add mirrors */
48183+ ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
48184+ rule_type, &rule_id);
48185+ if (ret)
48186+ goto err_out;
48187+ vf->egress_vlan = mirror;
48188+ vf->egress_rule_id = rule_id;
48189 }
48190+err_out:
48191+ return ret;
48192+}
48193
48194- switch (v_opcode) {
48195- case VIRTCHNL_OP_VERSION:
48196- ret = i40e_vc_get_version_msg(vf, msg);
48197- break;
48198- case VIRTCHNL_OP_GET_VF_RESOURCES:
48199- ret = i40e_vc_get_vf_resources_msg(vf, msg);
48200- break;
48201- case VIRTCHNL_OP_RESET_VF:
48202- i40e_vc_reset_vf_msg(vf);
48203- ret = 0;
48204- break;
48205- case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
48206- ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
48207- break;
48208- case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
48209- ret = i40e_vc_config_queues_msg(vf, msg, msglen);
48210- break;
48211- case VIRTCHNL_OP_CONFIG_IRQ_MAP:
48212- ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
48213- break;
48214- case VIRTCHNL_OP_ENABLE_QUEUES:
48215- ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
48216- i40e_vc_notify_vf_link_state(vf);
48217- break;
48218- case VIRTCHNL_OP_DISABLE_QUEUES:
48219- ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
48220- break;
48221- case VIRTCHNL_OP_ADD_ETH_ADDR:
48222- ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
48223- break;
48224- case VIRTCHNL_OP_DEL_ETH_ADDR:
48225- ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
48226- break;
48227- case VIRTCHNL_OP_ADD_VLAN:
48228- ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
48229- break;
48230- case VIRTCHNL_OP_DEL_VLAN:
48231- ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
48232- break;
48233- case VIRTCHNL_OP_GET_STATS:
48234- ret = i40e_vc_get_stats_msg(vf, msg, msglen);
48235- break;
48236- case VIRTCHNL_OP_IWARP:
48237- ret = i40e_vc_iwarp_msg(vf, msg, msglen);
48238- break;
48239- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
48240- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
48241- break;
48242- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
48243- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
48244- break;
48245- case VIRTCHNL_OP_CONFIG_RSS_KEY:
48246- ret = i40e_vc_config_rss_key(vf, msg, msglen);
48247- break;
48248- case VIRTCHNL_OP_CONFIG_RSS_LUT:
48249- ret = i40e_vc_config_rss_lut(vf, msg, msglen);
48250- break;
48251- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
48252- ret = i40e_vc_get_rss_hena(vf, msg, msglen);
48253- break;
48254- case VIRTCHNL_OP_SET_RSS_HENA:
48255- ret = i40e_vc_set_rss_hena(vf, msg, msglen);
48256- break;
48257- case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
48258- ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
48259- break;
48260- case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
48261- ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
48262- break;
48263+/*
48264+ * i40e_get_mac_list
48265+ * @pdev: PCI device information struct
48266+ * @vf_id: VF identifier
48267+ * @list_head: list of mac addresses
48268+ *
48269+ * This function returns the list of mac address configured on the VF. It is
48270+ * the responsibility of the caller to free the allocated list when finished.
48271+ *
48272+ * Returns 0 on success, negative on failure
48273+ */
48274+static int i40e_get_mac_list(struct pci_dev *pdev, int vf_id,
48275+ struct list_head *mac_list)
48276+{
48277+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48278+ struct i40e_mac_filter *f;
48279+ struct vfd_macaddr *elem;
48280+ struct i40e_vsi *vsi;
48281+ struct i40e_vf *vf;
48282+ int ret, bkt;
48283
48284- case VIRTCHNL_OP_UNKNOWN:
48285- default:
48286- dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
48287- v_opcode, local_vf_id);
48288- ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
48289- I40E_ERR_NOT_IMPLEMENTED);
48290- break;
48291+ /* validate the request */
48292+ ret = i40e_validate_vf(pf, vf_id);
48293+ if (ret)
48294+ goto error_out;
48295+ vf = &pf->vf[vf_id];
48296+ vsi = pf->vsi[vf->lan_vsi_idx];
48297+ spin_lock_bh(&vsi->mac_filter_hash_lock);
48298+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
48299+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
48300+ if (!elem) {
48301+ ret = -ENOMEM;
48302+ goto error_unlock;
48303+ }
48304+ INIT_LIST_HEAD(&elem->list);
48305+ ether_addr_copy(elem->mac, f->macaddr);
48306+ list_add_tail(&elem->list, mac_list);
48307 }
48308-
48309+error_unlock:
48310+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
48311+error_out:
48312 return ret;
48313 }
48314
48315-/**
48316- * i40e_vc_process_vflr_event
48317- * @pf: pointer to the PF structure
48318+/*
48319+ * i40e_add_macs_to_list
48320+ * @pdev: PCI device information struct
48321+ * @vf_id: VF identifier
48322+ * @list_head: list of mac addresses
48323 *
48324- * called from the vlfr irq handler to
48325- * free up VF resources and state variables
48326- **/
48327-int i40e_vc_process_vflr_event(struct i40e_pf *pf)
48328+ * This function adds a list of mac addresses for a VF
48329+ *
48330+ * Returns 0 on success, negative on failure
48331+ */
48332+static int i40e_add_macs_to_list(struct pci_dev *pdev, int vf_id,
48333+ struct list_head *mac_list)
48334 {
48335- struct i40e_hw *hw = &pf->hw;
48336- u32 reg, reg_idx, bit_idx;
48337+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48338+ struct vfd_macaddr *tmp;
48339+ struct i40e_vsi *vsi;
48340 struct i40e_vf *vf;
48341- int vf_id;
48342-
48343- if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
48344- return 0;
48345+ int ret;
48346
48347- /* Re-enable the VFLR interrupt cause here, before looking for which
48348- * VF got reset. Otherwise, if another VF gets a reset while the
48349- * first one is being processed, that interrupt will be lost, and
48350- * that VF will be stuck in reset forever.
48351- */
48352- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
48353- reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
48354- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
48355- i40e_flush(hw);
48356+ /* validate the request */
48357+ ret = i40e_validate_vf(pf, vf_id);
48358+ if (ret)
48359+ goto error_out;
48360+ vf = &pf->vf[vf_id];
48361+ vsi = pf->vsi[vf->lan_vsi_idx];
48362+ spin_lock_bh(&vsi->mac_filter_hash_lock);
48363+ list_for_each_entry(tmp, mac_list, list) {
48364+ struct i40e_mac_filter *f;
48365
48366- clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
48367- for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
48368- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
48369- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
48370- /* read GLGEN_VFLRSTAT register to find out the flr VFs */
48371- vf = &pf->vf[vf_id];
48372- reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
48373- if (reg & BIT(bit_idx))
48374- /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
48375- i40e_reset_vf(vf, true);
48376+ f = i40e_find_mac(vsi, tmp->mac);
48377+ if (!f) {
48378+ f = i40e_add_mac_filter(vsi, tmp->mac);
48379+ if (!f) {
48380+ dev_err(&pf->pdev->dev,
48381+ "Unable to add MAC filter %pM for VF %d\n",
48382+ tmp->mac, vf->vf_id);
48383+ ret = I40E_ERR_PARAM;
48384+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
48385+ goto error_out;
48386+ }
48387+ }
48388 }
48389+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
48390
48391- return 0;
48392+ /* program the updated filter list */
48393+ ret = i40e_sync_vsi_filters(vsi);
48394+ if (ret)
48395+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
48396+ vf->vf_id, ret);
48397+error_out:
48398+ return ret;
48399 }
48400
48401-/**
48402- * i40e_ndo_set_vf_mac
48403- * @netdev: network interface device structure
48404+/*
48405+ * i40e_rem_macs_from_list
48406+ * @pdev: PCI device information struct
48407 * @vf_id: VF identifier
48408- * @mac: mac address
48409+ * @list_head: list of mac addresses
48410 *
48411- * program VF mac address
48412- **/
48413-int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
48414+ * This function removes a list of mac addresses from a VF
48415+ *
48416+ * Returns 0 on success, negative on failure
48417+ */
48418+static int i40e_rem_macs_from_list(struct pci_dev *pdev, int vf_id,
48419+ struct list_head *mac_list)
48420 {
48421- struct i40e_netdev_priv *np = netdev_priv(netdev);
48422- struct i40e_vsi *vsi = np->vsi;
48423- struct i40e_pf *pf = vsi->back;
48424- struct i40e_mac_filter *f;
48425+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48426+ struct vfd_macaddr *tmp;
48427+ struct i40e_vsi *vsi;
48428 struct i40e_vf *vf;
48429- int ret = 0;
48430- int bkt;
48431+ int ret;
48432
48433 /* validate the request */
48434- if (vf_id >= pf->num_alloc_vfs) {
48435- dev_err(&pf->pdev->dev,
48436- "Invalid VF Identifier %d\n", vf_id);
48437- ret = -EINVAL;
48438- goto error_param;
48439- }
48440-
48441- vf = &(pf->vf[vf_id]);
48442+ ret = i40e_validate_vf(pf, vf_id);
48443+ if (ret)
48444+ goto error_out;
48445+ vf = &pf->vf[vf_id];
48446 vsi = pf->vsi[vf->lan_vsi_idx];
48447- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
48448- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
48449- vf_id);
48450- ret = -EAGAIN;
48451- goto error_param;
48452+ spin_lock_bh(&vsi->mac_filter_hash_lock);
48453+ list_for_each_entry(tmp, mac_list, list) {
48454+ if (i40e_del_mac_filter(vsi, tmp->mac)) {
48455+ ret = I40E_ERR_INVALID_MAC_ADDR;
48456+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
48457+ goto error_out;
48458+ }
48459 }
48460+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
48461
48462- if (is_multicast_ether_addr(mac)) {
48463- dev_err(&pf->pdev->dev,
48464- "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
48465- ret = -EINVAL;
48466- goto error_param;
48467- }
48468+ /* program the updated filter list */
48469+ ret = i40e_sync_vsi_filters(vsi);
48470+ if (ret)
48471+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
48472+ vf->vf_id, ret);
48473+error_out:
48474+ return ret;
48475+}
48476
48477- /* Lock once because below invoked function add/del_filter requires
48478- * mac_filter_hash_lock to be held
48479- */
48480- spin_lock_bh(&vsi->mac_filter_hash_lock);
48481+/*
48482+ * i40e_set_pf_qos_apply
48483+ * @pdev: PCI device information struct
48484+ *
48485+ * This function applies the bw shares stored across all VFs
48486+ *
48487+ * Returns 0 on success, negative on failure
48488+ */
48489+static int i40e_set_pf_qos_apply(struct pci_dev *pdev)
48490+{
48491+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
48492+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48493+ int i, j, ret = 0, total_share = 0;
48494+ struct i40e_vf *vf = pf->vf;
48495+ struct i40e_vsi *vsi;
48496
48497- /* delete the temporary mac address */
48498- if (!is_zero_ether_addr(vf->default_lan_addr.addr))
48499- i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
48500+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
48501+ total_share += vf->bw_share;
48502+ }
48503
48504- /* Delete all the filters for this VSI - we're going to kill it
48505- * anyway.
48506- */
48507- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
48508- __i40e_del_filter(vsi, f);
48509+ /* verify BW share distribution */
48510+ if (total_share > 100) {
48511+ dev_err(&pdev->dev, "Total share is greater than 100 percent");
48512+ return I40E_ERR_PARAM;
48513+ }
48514
48515- spin_unlock_bh(&vsi->mac_filter_hash_lock);
48516+ memset(&bw_data, 0, sizeof(struct i40e_aqc_configure_vsi_tc_bw_data));
48517+ for (i = 0; i < pf->num_alloc_vfs; i++) {
48518+ ret = i40e_validate_vf(pf, vf->vf_id);
48519+ if (ret)
48520+ continue;
48521+ vf = &pf->vf[i];
48522+ if (!vf->bw_share)
48523+ continue;
48524+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
48525+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
48526+ vf->vf_id);
48527+ ret = I40E_ERR_PARAM;
48528+ goto error_param;
48529+ }
48530+ vsi = pf->vsi[vf->lan_vsi_idx];
48531+ bw_data.tc_valid_bits = 1;
48532+ bw_data.tc_bw_credits[0] = vf->bw_share;
48533
48534- /* program mac filter */
48535- if (i40e_sync_vsi_filters(vsi)) {
48536- dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
48537- ret = -EIO;
48538- goto error_param;
48539- }
48540- ether_addr_copy(vf->default_lan_addr.addr, mac);
48541+ ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
48542+ if (ret) {
48543+ dev_info(&pf->pdev->dev,
48544+ "AQ command Config VSI BW allocation per TC failed = %d\n",
48545+ pf->hw.aq.asq_last_status);
48546+ vf->bw_share_applied = false;
48547+ return -EINVAL;
48548+ }
48549
48550- if (is_zero_ether_addr(mac)) {
48551- vf->pf_set_mac = false;
48552- dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
48553- } else {
48554- vf->pf_set_mac = true;
48555- dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
48556- mac, vf_id);
48557- }
48558+ for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++)
48559+ vsi->info.qs_handle[j] = bw_data.qs_handles[j];
48560
48561- /* Force the VF driver stop so it has to reload with new MAC address */
48562- i40e_vc_disable_vf(pf, vf);
48563- dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
48564+ /* set the tracking bool to true */
48565+ vf->bw_share_applied = true;
48566+ }
48567+ pf->vf_bw_applied = true;
48568
48569 error_param:
48570 return ret;
48571 }
48572
48573 /**
48574- * i40e_ndo_set_vf_port_vlan
48575- * @netdev: network interface device structure
48576- * @vf_id: VF identifier
48577- * @vlan_id: mac address
48578- * @qos: priority setting
48579- * @vlan_proto: vlan protocol
48580+ * i40e_get_pf_ingress_mirror - Gets the configured ingress mirror for PF
48581+ * @pdev: PCI device information struct
48582+ * @mirror: pointer to return the ingress mirror
48583 *
48584- * program VF vlan id and/or qos
48585+ * Gets the ingress mirror configured
48586+ *
48587+ * Returns 0 on success, negative on failure
48588 **/
48589-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
48590- u16 vlan_id, u8 qos, __be16 vlan_proto)
48591+static int i40e_get_pf_ingress_mirror(struct pci_dev *pdev, int *mirror)
48592 {
48593- u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
48594- struct i40e_netdev_priv *np = netdev_priv(netdev);
48595- struct i40e_pf *pf = np->vsi->back;
48596- struct i40e_vsi *vsi;
48597- struct i40e_vf *vf;
48598- int ret = 0;
48599+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48600+ *mirror = pf->ingress_vlan;
48601+ return 0;
48602+}
48603
48604- /* validate the request */
48605- if (vf_id >= pf->num_alloc_vfs) {
48606- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
48607- ret = -EINVAL;
48608- goto error_pvid;
48609- }
48610+/**
48611+ * i40e_set_pf_ingress_mirror - Sets the configured ingress mirror for PF
48612+ * @pdev: PCI device information struct
48613+ * @mirror: pointer to return the ingress mirror
48614+ *
48615+ * Gets the ingress mirror configured
48616+ *
48617+ * Returns 0 on success, negative on failure
48618+ **/
48619+static int i40e_set_pf_ingress_mirror(struct pci_dev *pdev, const int mirror)
48620+{
48621+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48622+ struct i40e_vsi *src_vsi, *mirror_vsi;
48623+ struct i40e_vf *mirror_vf;
48624+ u16 rule_type, rule_id;
48625+ int ret;
48626
48627- if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
48628- dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
48629- ret = -EINVAL;
48630- goto error_pvid;
48631- }
48632+ /* The Admin Queue mirroring rules refer to the traffic
48633+ * directions from the perspective of the switch, not the VSI
48634+ * we apply the mirroring rule on - so the behaviour of a VSI
48635+ * ingress mirror is classified as an egress rule
48636+ */
48637+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
48638+ src_vsi = pf->vsi[pf->lan_vsi];
48639+ if (mirror == I40E_NO_VF_MIRROR) {
48640+ /* Del mirrors */
48641+ rule_id = pf->ingress_rule_id;
48642+ ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
48643+ rule_id);
48644+ if (ret)
48645+ goto err_out;
48646+ pf->ingress_vlan = I40E_NO_VF_MIRROR;
48647+ } else {
48648+ /* validate the mirror */
48649+ ret = i40e_validate_vf(pf, mirror);
48650+ if (ret)
48651+ goto err_out;
48652+ mirror_vf = &pf->vf[mirror];
48653+ mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
48654
48655- if (vlan_proto != htons(ETH_P_8021Q)) {
48656- dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
48657- ret = -EPROTONOSUPPORT;
48658- goto error_pvid;
48659+ /* Add mirrors */
48660+ ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
48661+ rule_type, &rule_id);
48662+ if (ret)
48663+ goto err_out;
48664+ pf->ingress_vlan = mirror;
48665+ pf->ingress_rule_id = rule_id;
48666 }
48667+err_out:
48668+ return ret;
48669+}
48670
48671- vf = &(pf->vf[vf_id]);
48672- vsi = pf->vsi[vf->lan_vsi_idx];
48673- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
48674- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
48675- vf_id);
48676- ret = -EAGAIN;
48677- goto error_pvid;
48678- }
48679+/**
48680+ * i40e_get_pf_egress_mirror - Gets the configured egress mirror for PF
48681+ * @pdev: PCI device information struct
48682+ * @mirror: pointer to return the ingress mirror
48683+ *
48684+ * Gets the ingress mirror configured
48685+ *
48686+ * Returns 0 on success, negative on failure
48687+ **/
48688+static int i40e_get_pf_egress_mirror(struct pci_dev *pdev, int *mirror)
48689+{
48690+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48691+ *mirror = pf->egress_vlan;
48692+ return 0;
48693+}
48694
48695- if (le16_to_cpu(vsi->info.pvid) == vlanprio)
48696- /* duplicate request, so just return success */
48697- goto error_pvid;
48698+/**
48699+ * i40e_set_pf_egress_mirror - Sets the configured egress mirror for PF
48700+ * @pdev: PCI device information struct
48701+ * @mirror: pointer to return the ingress mirror
48702+ *
48703+ * Gets the ingress mirror configured
48704+ *
48705+ * Returns 0 on success, negative on failure
48706+ **/
48707+static int i40e_set_pf_egress_mirror(struct pci_dev *pdev, const int mirror)
48708+{
48709+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48710+ struct i40e_vsi *src_vsi, *mirror_vsi;
48711+ struct i40e_vf *mirror_vf;
48712+ u16 rule_type, rule_id;
48713+ int ret;
48714
48715- /* Locked once because multiple functions below iterate list */
48716- spin_lock_bh(&vsi->mac_filter_hash_lock);
48717+ /* The Admin Queue mirroring rules refer to the traffic
48718+ * directions from the perspective of the switch, not the VSI
48719+ * we apply the mirroring rule on - so the behaviour of a VSI
48720+ * egress mirror is classified as an ingress rule
48721+ */
48722+ rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
48723+ src_vsi = pf->vsi[pf->lan_vsi];
48724+ if (mirror == I40E_NO_VF_MIRROR) {
48725+ /* Del mirrors */
48726+ rule_id = pf->egress_rule_id;
48727+ ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
48728+ rule_id);
48729+ if (ret)
48730+ goto err_out;
48731+ pf->egress_vlan = I40E_NO_VF_MIRROR;
48732+ } else {
48733+ /* validate the mirror */
48734+ ret = i40e_validate_vf(pf, mirror);
48735+ if (ret)
48736+ goto err_out;
48737+ mirror_vf = &pf->vf[mirror];
48738+ mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
48739
48740- if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
48741- dev_err(&pf->pdev->dev,
48742- "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
48743- vf_id);
48744- /* Administrator Error - knock the VF offline until he does
48745- * the right thing by reconfiguring his network correctly
48746- * and then reloading the VF driver.
48747- */
48748- i40e_vc_disable_vf(pf, vf);
48749- /* During reset the VF got a new VSI, so refresh the pointer. */
48750- vsi = pf->vsi[vf->lan_vsi_idx];
48751+ /* Add mirrors */
48752+ ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
48753+ rule_type, &rule_id);
48754+ if (ret)
48755+ goto err_out;
48756+ pf->egress_vlan = mirror;
48757+ pf->egress_rule_id = rule_id;
48758 }
48759+err_out:
48760+ return ret;
48761+}
48762
48763- /* Check for condition where there was already a port VLAN ID
48764- * filter set and now it is being deleted by setting it to zero.
48765- * Additionally check for the condition where there was a port
48766- * VLAN but now there is a new and different port VLAN being set.
48767- * Before deleting all the old VLAN filters we must add new ones
48768- * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
48769- * MAC addresses deleted.
48770- */
48771- if ((!(vlan_id || qos) ||
48772- vlanprio != le16_to_cpu(vsi->info.pvid)) &&
48773- vsi->info.pvid) {
48774- ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
48775- if (ret) {
48776- dev_info(&vsi->back->pdev->dev,
48777- "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
48778- vsi->back->hw.aq.asq_last_status);
48779- spin_unlock_bh(&vsi->mac_filter_hash_lock);
48780- goto error_pvid;
48781- }
48782- }
48783+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
48784+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
48785+#define OUTER_TAG_IDX 2
48786+static int i40e_get_pf_tpid(struct pci_dev *pdev, u16 *tp_id)
48787+{
48788+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48789
48790- if (vsi->info.pvid) {
48791- /* remove all filters on the old VLAN */
48792- i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
48793- VLAN_VID_MASK));
48794- }
48795+ if (!(pf->hw.flags & I40E_HW_FLAG_802_1AD_CAPABLE))
48796+ return -EOPNOTSUPP;
48797
48798- spin_unlock_bh(&vsi->mac_filter_hash_lock);
48799- if (vlan_id || qos)
48800- ret = i40e_vsi_add_pvid(vsi, vlanprio);
48801- else
48802- i40e_vsi_remove_pvid(vsi);
48803- spin_lock_bh(&vsi->mac_filter_hash_lock);
48804+ *tp_id = (u16)(rd32(&pf->hw, I40E_GL_SWT_L2TAGCTRL(OUTER_TAG_IDX)) >>
48805+ I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
48806
48807- if (vlan_id) {
48808- dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
48809- vlan_id, qos, vf_id);
48810+ return 0;
48811+}
48812
48813- /* add new VLAN filter for each MAC */
48814- ret = i40e_add_vlan_all_mac(vsi, vlan_id);
48815- if (ret) {
48816- dev_info(&vsi->back->pdev->dev,
48817- "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
48818- vsi->back->hw.aq.asq_last_status);
48819- spin_unlock_bh(&vsi->mac_filter_hash_lock);
48820- goto error_pvid;
48821- }
48822+static int i40e_set_pf_tpid(struct pci_dev *pdev, u16 tp_id)
48823+{
48824+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48825+ int ret;
48826
48827- /* remove the previously added non-VLAN MAC filters */
48828- i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
48829+ if (!(pf->hw.flags & I40E_HW_FLAG_802_1AD_CAPABLE))
48830+ return -EOPNOTSUPP;
48831+
48832+ if (pf->hw.pf_id != 0) {
48833+ dev_err(&pdev->dev,
48834+ "TPID configuration only supported for PF 0\n");
48835+ return -EOPNOTSUPP;
48836 }
48837
48838- spin_unlock_bh(&vsi->mac_filter_hash_lock);
48839+ pf->hw.first_tag = tp_id;
48840+ ret = i40e_aq_set_switch_config(&pf->hw, 0, 0, 0, NULL);
48841
48842- /* Schedule the worker thread to take care of applying changes */
48843- i40e_service_event_schedule(vsi->back);
48844+ return ret;
48845+}
48846
48847- if (ret) {
48848- dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
48849- goto error_pvid;
48850- }
48851+static int i40e_get_num_queues(struct pci_dev *pdev, int vf_id, int *num_queues)
48852+{
48853+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48854+ struct i40e_vf *vf;
48855+ int ret;
48856+
48857+ /* validate the request */
48858+ ret = i40e_validate_vf(pf, vf_id);
48859+ if (ret)
48860+ return ret;
48861+ vf = &pf->vf[vf_id];
48862
48863- /* The Port VLAN needs to be saved across resets the same as the
48864- * default LAN MAC address.
48865- */
48866- vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
48867- ret = 0;
48868+ *num_queues = vf->num_queue_pairs;
48869
48870-error_pvid:
48871 return ret;
48872 }
48873
48874-#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
48875-#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
48876-/**
48877- * i40e_ndo_set_vf_bw
48878- * @netdev: network interface device structure
48879- * @vf_id: VF identifier
48880- * @tx_rate: Tx rate
48881- *
48882- * configure VF Tx rate
48883- **/
48884-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
48885- int max_tx_rate)
48886+static int i40e_set_num_queues(struct pci_dev *pdev, int vf_id, int num_queues)
48887 {
48888- struct i40e_netdev_priv *np = netdev_priv(netdev);
48889- struct i40e_pf *pf = np->vsi->back;
48890- struct i40e_vsi *vsi;
48891+ struct i40e_pf *pf = pci_get_drvdata(pdev);
48892 struct i40e_vf *vf;
48893- int speed = 0;
48894- int ret = 0;
48895+ int ret;
48896
48897 /* validate the request */
48898- if (vf_id >= pf->num_alloc_vfs) {
48899- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
48900- ret = -EINVAL;
48901- goto error;
48902- }
48903-
48904- if (min_tx_rate) {
48905- dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
48906- min_tx_rate, vf_id);
48907- return -EINVAL;
48908- }
48909+ ret = i40e_validate_vf(pf, vf_id);
48910+ if (ret)
48911+ return ret;
48912+ vf = &pf->vf[vf_id];
48913
48914- vf = &(pf->vf[vf_id]);
48915- vsi = pf->vsi[vf->lan_vsi_idx];
48916- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
48917- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
48918- vf_id);
48919- ret = -EAGAIN;
48920- goto error;
48921+ if (test_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states)) {
48922+ dev_err(&pdev->dev,
48923+ "Unable to configure %d queues, please unbind the driver for VF %d\n",
48924+ num_queues, vf_id);
48925+ return -EAGAIN;
48926 }
48927-
48928- switch (pf->hw.phy.link_info.link_speed) {
48929- case I40E_LINK_SPEED_40GB:
48930- speed = 40000;
48931- break;
48932- case I40E_LINK_SPEED_25GB:
48933- speed = 25000;
48934- break;
48935- case I40E_LINK_SPEED_20GB:
48936- speed = 20000;
48937- break;
48938- case I40E_LINK_SPEED_10GB:
48939- speed = 10000;
48940- break;
48941- case I40E_LINK_SPEED_1GB:
48942- speed = 1000;
48943- break;
48944- default:
48945- break;
48946+ if (num_queues > I40E_MAX_VF_QUEUES) {
48947+ dev_err(&pdev->dev,
48948+ "Unable to configure %d VF queues, the maximum is %d\n",
48949+ num_queues, I40E_MAX_VF_QUEUES);
48950+ return -EINVAL;
48951 }
48952-
48953- if (max_tx_rate > speed) {
48954- dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
48955- max_tx_rate, vf->vf_id);
48956- ret = -EINVAL;
48957- goto error;
48958+ if (num_queues - vf->num_queue_pairs > pf->queues_left) {
48959+ dev_err(&pdev->dev,
48960+ "Unable to configure %d VF queues, only %d available\n",
48961+ num_queues, vf->num_queue_pairs + pf->queues_left);
48962+ return -EINVAL;
48963 }
48964
48965- if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
48966- dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
48967- max_tx_rate = 50;
48968- }
48969+ /* Set vf->num_req_queues to the desired value and reset the VF. When
48970+ * the VSI is reallocated it will be configured with the new queue
48971+ * count.
48972+ */
48973+ vf->num_req_queues = num_queues;
48974+ i40e_vc_notify_vf_reset(vf);
48975+ i40e_reset_vf(vf, false);
48976
48977- /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
48978- ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
48979- max_tx_rate / I40E_BW_CREDIT_DIVISOR,
48980- I40E_MAX_BW_INACTIVE_ACCUM, NULL);
48981- if (ret) {
48982- dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
48983- ret);
48984- ret = -EIO;
48985- goto error;
48986- }
48987- vf->tx_rate = max_tx_rate;
48988-error:
48989- return ret;
48990+ return 0;
48991 }
48992
48993 /**
48994- * i40e_ndo_get_vf_config
48995- * @netdev: network interface device structure
48996+ * i40e_get_max_tx_rate
48997+ * @pdev: PCI device information struct
48998 * @vf_id: VF identifier
48999- * @ivi: VF configuration structure
49000+ * @max_tx_rate: max transmit bandwidth rate
49001 *
49002- * return VF configuration
49003- **/
49004-int i40e_ndo_get_vf_config(struct net_device *netdev,
49005- int vf_id, struct ifla_vf_info *ivi)
49006+ * This function returns the value of transmit bandwidth, in Mbps,
49007+ * for the specified VF,
49008+ * value 0 means rate limiting is disabled.
49009+ *
49010+ * Returns 0 on success, negative on failure
49011+ */
49012+static int i40e_get_max_tx_rate(struct pci_dev *pdev, int vf_id,
49013+ unsigned int *max_tx_rate)
49014 {
49015- struct i40e_netdev_priv *np = netdev_priv(netdev);
49016- struct i40e_vsi *vsi = np->vsi;
49017- struct i40e_pf *pf = vsi->back;
49018+ struct i40e_pf *pf = pci_get_drvdata(pdev);
49019+ struct i40e_vsi *vsi;
49020 struct i40e_vf *vf;
49021- int ret = 0;
49022+ int ret;
49023+
49024+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
49025+ dev_warn(&pf->pdev->dev,
49026+ "Unable to configure VFs, other operation is pending.\n");
49027+ return -EAGAIN;
49028+ }
49029
49030 /* validate the request */
49031- if (vf_id >= pf->num_alloc_vfs) {
49032- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
49033- ret = -EINVAL;
49034+ ret = i40e_validate_vf(pf, vf_id);
49035+ if (ret)
49036 goto error_param;
49037- }
49038
49039- vf = &(pf->vf[vf_id]);
49040- /* first vsi is always the LAN vsi */
49041+ vf = &pf->vf[vf_id];
49042 vsi = pf->vsi[vf->lan_vsi_idx];
49043- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
49044- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
49045- vf_id);
49046- ret = -EAGAIN;
49047+ if (!vsi) {
49048+ ret = -ENOENT;
49049 goto error_param;
49050 }
49051
49052- ivi->vf = vf_id;
49053-
49054- ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
49055-
49056- ivi->max_tx_rate = vf->tx_rate;
49057- ivi->min_tx_rate = 0;
49058- ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
49059- ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
49060- I40E_VLAN_PRIORITY_SHIFT;
49061- if (vf->link_forced == false)
49062- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
49063- else if (vf->link_up == true)
49064- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
49065- else
49066- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
49067- ivi->spoofchk = vf->spoofchk;
49068- ivi->trusted = vf->trusted;
49069- ret = 0;
49070+ *max_tx_rate = vf->tx_rate;
49071
49072 error_param:
49073+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
49074 return ret;
49075 }
49076
49077 /**
49078- * i40e_ndo_set_vf_link_state
49079- * @netdev: network interface device structure
49080+ * i40e_set_max_tx_rate
49081+ * @pdev: PCI device information struct
49082 * @vf_id: VF identifier
49083- * @link: required link state
49084+ * @max_tx_rate: max transmit bandwidth rate to set
49085 *
49086- * Set the link state of a specified VF, regardless of physical link state
49087+ * This function sets the value of max transmit bandwidth, in Mbps,
49088+ * for the specified VF,
49089+ * value 0 means rate limiting is disabled.
49090+ *
49091+ * Returns 0 on success, negative on failure
49092 **/
49093-int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
49094+static int i40e_set_max_tx_rate(struct pci_dev *pdev, int vf_id,
49095+ unsigned int *max_tx_rate)
49096 {
49097- struct i40e_netdev_priv *np = netdev_priv(netdev);
49098- struct i40e_pf *pf = np->vsi->back;
49099- struct virtchnl_pf_event pfe;
49100- struct i40e_hw *hw = &pf->hw;
49101+ struct i40e_pf *pf = pci_get_drvdata(pdev);
49102+ struct i40e_vsi *vsi;
49103 struct i40e_vf *vf;
49104- int abs_vf_id;
49105- int ret = 0;
49106+ int ret;
49107
49108- /* validate the request */
49109- if (vf_id >= pf->num_alloc_vfs) {
49110- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
49111- ret = -EINVAL;
49112- goto error_out;
49113+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
49114+ dev_warn(&pf->pdev->dev,
49115+ "Unable to configure VFs, other operation is pending.\n");
49116+ return -EAGAIN;
49117 }
49118
49119- vf = &pf->vf[vf_id];
49120- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
49121-
49122- pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
49123- pfe.severity = PF_EVENT_SEVERITY_INFO;
49124+ /* validate the request */
49125+ ret = i40e_validate_vf(pf, vf_id);
49126+ if (ret)
49127+ return ret;
49128
49129- switch (link) {
49130- case IFLA_VF_LINK_STATE_AUTO:
49131- vf->link_forced = false;
49132- pfe.event_data.link_event.link_status =
49133- pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
49134- pfe.event_data.link_event.link_speed =
49135- (enum virtchnl_link_speed)
49136- pf->hw.phy.link_info.link_speed;
49137- break;
49138- case IFLA_VF_LINK_STATE_ENABLE:
49139- vf->link_forced = true;
49140- vf->link_up = true;
49141- pfe.event_data.link_event.link_status = true;
49142- pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
49143- break;
49144- case IFLA_VF_LINK_STATE_DISABLE:
49145- vf->link_forced = true;
49146- vf->link_up = false;
49147- pfe.event_data.link_event.link_status = false;
49148- pfe.event_data.link_event.link_speed = 0;
49149- break;
49150- default:
49151- ret = -EINVAL;
49152- goto error_out;
49153+ vf = &pf->vf[vf_id];
49154+ vsi = pf->vsi[vf->lan_vsi_idx];
49155+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
49156+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
49157+ vf_id);
49158+ ret = -EAGAIN;
49159+ goto error;
49160 }
49161- /* Notify the VF of its new link state */
49162- i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
49163- 0, (u8 *)&pfe, sizeof(pfe), NULL);
49164
49165-error_out:
49166+ ret = i40e_set_bw_limit(vsi, vsi->seid, *max_tx_rate);
49167+ if (ret)
49168+ goto error;
49169+
49170+ vf->tx_rate = *max_tx_rate;
49171+error:
49172+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
49173 return ret;
49174 }
49175
49176 /**
49177- * i40e_ndo_set_vf_spoofchk
49178- * @netdev: network interface device structure
49179+ * i40_get_trust_state
49180+ * @pdev: PCI device information struct
49181 * @vf_id: VF identifier
49182- * @enable: flag to enable or disable feature
49183+ * @enable: on success, true if enabled, false if not
49184 *
49185- * Enable or disable VF spoof checking
49186+ * Gets VF trust configure.
49187+ *
49188+ * Returns 0 on success, negative on failure
49189 **/
49190-int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
49191+static int i40e_get_trust_state(struct pci_dev *pdev, int vf_id, bool *enable)
49192 {
49193- struct i40e_netdev_priv *np = netdev_priv(netdev);
49194- struct i40e_vsi *vsi = np->vsi;
49195- struct i40e_pf *pf = vsi->back;
49196- struct i40e_vsi_context ctxt;
49197- struct i40e_hw *hw = &pf->hw;
49198+ struct i40e_pf *pf = pci_get_drvdata(pdev);
49199 struct i40e_vf *vf;
49200- int ret = 0;
49201-
49202- /* validate the request */
49203- if (vf_id >= pf->num_alloc_vfs) {
49204- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
49205- ret = -EINVAL;
49206- goto out;
49207- }
49208+ int ret;
49209
49210- vf = &(pf->vf[vf_id]);
49211- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
49212- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
49213- vf_id);
49214- ret = -EAGAIN;
49215- goto out;
49216- }
49217+ ret = i40e_validate_vf(pf, vf_id);
49218+ if (ret)
49219+ return ret;
49220+ vf = &pf->vf[vf_id];
49221
49222- if (enable == vf->spoofchk)
49223- goto out;
49224+ *enable = vf->trusted;
49225
49226- vf->spoofchk = enable;
49227- memset(&ctxt, 0, sizeof(ctxt));
49228- ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
49229- ctxt.pf_num = pf->hw.pf_id;
49230- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
49231- if (enable)
49232- ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
49233- I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
49234- ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
49235- if (ret) {
49236- dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
49237- ret);
49238- ret = -EIO;
49239- }
49240-out:
49241 return ret;
49242 }
49243
49244 /**
49245- * i40e_ndo_set_vf_trust
49246- * @netdev: network interface device structure of the pf
49247+ * i40e_set_trust_state
49248+ * @pdev: PCI device information struct
49249 * @vf_id: VF identifier
49250- * @setting: trust setting
49251+ * @enable: enable or disable trust
49252 *
49253- * Enable or disable VF trust setting
49254+ * Sets the VF trust configure
49255+ *
49256+ * Returns 0 on success, negative on failure
49257 **/
49258-int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
49259+static int i40e_set_trust_state(struct pci_dev *pdev, int vf_id, bool enable)
49260 {
49261- struct i40e_netdev_priv *np = netdev_priv(netdev);
49262- struct i40e_pf *pf = np->vsi->back;
49263+ struct i40e_pf *pf = pci_get_drvdata(pdev);
49264 struct i40e_vf *vf;
49265- int ret = 0;
49266+ int ret;
49267
49268- /* validate the request */
49269- if (vf_id >= pf->num_alloc_vfs) {
49270- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
49271- return -EINVAL;
49272+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
49273+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
49274+ return -EAGAIN;
49275 }
49276
49277 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
49278 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
49279- return -EINVAL;
49280+ ret = -EINVAL;
49281+ goto out;
49282 }
49283
49284- vf = &pf->vf[vf_id];
49285+ /* validate the request */
49286+ ret = i40e_validate_vf(pf, vf_id);
49287+ if (ret)
49288+ goto out;
49289
49290- if (!vf)
49291- return -EINVAL;
49292- if (setting == vf->trusted)
49293+ vf = &pf->vf[vf_id];
49294+ /* if vf is in base mode, make it untrusted */
49295+ if (pf->vf_base_mode_only)
49296+ enable = false;
49297+ if (enable == vf->trusted)
49298 goto out;
49299
49300- vf->trusted = setting;
49301- i40e_vc_notify_vf_reset(vf);
49302- i40e_reset_vf(vf, false);
49303+ vf->trusted = enable;
49304+ i40e_vc_reset_vf(vf, true);
49305 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
49306- vf_id, setting ? "" : "un");
49307+ vf_id, enable ? "" : "un");
49308+
49309+#ifdef __TC_MQPRIO_MODE_MAX
49310+ if (vf->adq_enabled) {
49311+ if (!vf->trusted) {
49312+ dev_info(&pf->pdev->dev,
49313+ "VF %u no longer Trusted, deleting all cloud filters\n",
49314+ vf_id);
49315+ i40e_del_all_cloud_filters(vf);
49316+ }
49317+ }
49318+#endif /* __TC_MQPRIO_MODE_MAX */
49319+
49320 out:
49321+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
49322 return ret;
49323 }
49324+
49325+const struct vfd_ops i40e_vfd_ops = {
49326+ .get_trunk = i40e_get_trunk,
49327+ .set_trunk = i40e_set_trunk,
49328+ .get_vlan_mirror = i40e_get_mirror,
49329+ .set_vlan_mirror = i40e_set_mirror,
49330+ .set_allow_untagged = i40e_set_allow_untagged,
49331+ .get_allow_untagged = i40e_get_allow_untagged,
49332+ .get_loopback = i40e_get_loopback,
49333+ .set_loopback = i40e_set_loopback,
49334+ .get_vlan_strip = i40e_get_vlan_strip,
49335+ .set_vlan_strip = i40e_set_vlan_strip,
49336+ .get_rx_bytes = i40e_get_rx_bytes,
49337+ .get_rx_dropped = i40e_get_rx_dropped,
49338+ .get_rx_packets = i40e_get_rx_packets,
49339+ .get_tx_bytes = i40e_get_tx_bytes,
49340+ .get_tx_dropped = i40e_get_tx_dropped,
49341+ .get_tx_packets = i40e_get_tx_packets,
49342+ .get_tx_errors = i40e_get_tx_errors,
49343+ .get_mac = i40e_get_mac,
49344+ .set_mac = i40e_set_mac,
49345+ .get_promisc = i40e_get_promisc,
49346+ .set_promisc = i40e_set_promisc,
49347+ .get_ingress_mirror = i40e_get_ingress_mirror,
49348+ .set_ingress_mirror = i40e_set_ingress_mirror,
49349+ .get_egress_mirror = i40e_get_egress_mirror,
49350+ .set_egress_mirror = i40e_set_egress_mirror,
49351+ .get_link_state = i40e_get_link_state,
49352+ .set_link_state = i40e_set_link_state,
49353+ .get_mac_list = i40e_get_mac_list,
49354+ .add_macs_to_list = i40e_add_macs_to_list,
49355+ .rem_macs_from_list = i40e_rem_macs_from_list,
49356+ .get_vf_enable = i40e_get_vf_enable,
49357+ .set_vf_enable = i40e_set_vf_enable,
49358+ .reset_stats = i40e_reset_vf_stats,
49359+ .set_vf_bw_share = i40e_store_vf_bw_share,
49360+ .get_vf_bw_share = i40e_get_vf_bw_share,
49361+ .set_pf_qos_apply = i40e_set_pf_qos_apply,
49362+ .get_pf_ingress_mirror = i40e_get_pf_ingress_mirror,
49363+ .set_pf_ingress_mirror = i40e_set_pf_ingress_mirror,
49364+ .get_pf_egress_mirror = i40e_get_pf_egress_mirror,
49365+ .set_pf_egress_mirror = i40e_set_pf_egress_mirror,
49366+ .get_pf_tpid = i40e_get_pf_tpid,
49367+ .set_pf_tpid = i40e_set_pf_tpid,
49368+ .get_num_queues = i40e_get_num_queues,
49369+ .set_num_queues = i40e_set_num_queues,
49370+ .get_max_tx_rate = i40e_get_max_tx_rate,
49371+ .set_max_tx_rate = i40e_set_max_tx_rate,
49372+ .get_trust_state = i40e_get_trust_state,
49373+ .set_trust_state = i40e_set_trust_state,
49374+};
49375+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
49376diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
49377index 1f4b0c504..77fb5cfef 100644
49378--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
49379+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
49380@@ -1,28 +1,5 @@
49381-/*******************************************************************************
49382- *
49383- * Intel Ethernet Controller XL710 Family Linux Driver
49384- * Copyright(c) 2013 - 2015 Intel Corporation.
49385- *
49386- * This program is free software; you can redistribute it and/or modify it
49387- * under the terms and conditions of the GNU General Public License,
49388- * version 2, as published by the Free Software Foundation.
49389- *
49390- * This program is distributed in the hope it will be useful, but WITHOUT
49391- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
49392- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
49393- * more details.
49394- *
49395- * You should have received a copy of the GNU General Public License along
49396- * with this program. If not, see <http://www.gnu.org/licenses/>.
49397- *
49398- * The full GNU General Public License is included in this distribution in
49399- * the file called "COPYING".
49400- *
49401- * Contact Information:
49402- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
49403- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
49404- *
49405- ******************************************************************************/
49406+/* SPDX-License-Identifier: GPL-2.0 */
49407+/* Copyright(c) 2013 - 2020 Intel Corporation. */
49408
49409 #ifndef _I40E_VIRTCHNL_PF_H_
49410 #define _I40E_VIRTCHNL_PF_H_
49411@@ -36,9 +13,11 @@
49412 #define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
49413 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
49414
49415-#define I40E_VLAN_PRIORITY_SHIFT 12
49416+#define I40E_VLAN_PRIORITY_SHIFT 13
49417 #define I40E_VLAN_MASK 0xFFF
49418-#define I40E_PRIORITY_MASK 0x7000
49419+#define I40E_PRIORITY_MASK 0xE000
49420+
49421+#define I40E_MAX_VF_PROMISC_FLAGS 3
49422
49423 /* Various queue ctrls */
49424 enum i40e_queue_ctrl {
49425@@ -55,19 +34,30 @@ enum i40e_queue_ctrl {
49426 enum i40e_vf_states {
49427 I40E_VF_STATE_INIT = 0,
49428 I40E_VF_STATE_ACTIVE,
49429- I40E_VF_STATE_IWARPENA,
49430- I40E_VF_STATE_FCOEENA,
49431 I40E_VF_STATE_DISABLED,
49432 I40E_VF_STATE_MC_PROMISC,
49433 I40E_VF_STATE_UC_PROMISC,
49434 I40E_VF_STATE_PRE_ENABLE,
49435+ I40E_VF_STATE_LOADED_VF_DRIVER,
49436 };
49437
49438 /* VF capabilities */
49439 enum i40e_vf_capabilities {
49440 I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
49441 I40E_VIRTCHNL_VF_CAP_L2,
49442- I40E_VIRTCHNL_VF_CAP_IWARP,
49443+};
49444+
49445+/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
49446+ * These variables are used to store indices, id's and number of queues
49447+ * for each VSI including that of primary VF VSI. Each Traffic class is
49448+ * termed as channel and each channel can in-turn have 4 queues which
49449+ * means max 16 queues overall per VF.
49450+ */
49451+struct i40evf_channel {
49452+ u16 vsi_idx; /* index in PF struct for all channel VSIs */
49453+ u16 vsi_id; /* VSI ID used by firmware */
49454+ u16 num_qps; /* number of queue pairs requested by user */
49455+ u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
49456 };
49457
49458 /* VF information structure */
49459@@ -97,6 +87,7 @@ struct i40e_vf {
49460 u16 lan_vsi_id; /* ID as used by firmware */
49461
49462 u8 num_queue_pairs; /* num of qps assigned to VF vsis */
49463+ u8 num_req_queues; /* num of requested qps */
49464 u64 num_mdd_events; /* num of mdd events detected */
49465 /* num of continuous malformed or invalid msgs detected */
49466 u64 num_invalid_msgs;
49467@@ -105,39 +96,87 @@ struct i40e_vf {
49468 unsigned long vf_caps; /* vf's adv. capabilities */
49469 unsigned long vf_states; /* vf's runtime states */
49470 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
49471+#ifdef HAVE_NDO_SET_VF_LINK_STATE
49472 bool link_forced;
49473 bool link_up; /* only valid if VF link is forced */
49474- bool spoofchk;
49475- u16 num_mac;
49476+#endif
49477+ bool queues_enabled; /* true if the VF queues are enabled */
49478+ bool mac_anti_spoof;
49479 u16 num_vlan;
49480-
49481- /* RDMA Client */
49482- struct virtchnl_iwarp_qvlist_info *qvlist_info;
49483+ DECLARE_BITMAP(mirror_vlans, VLAN_N_VID);
49484+ u16 vlan_rule_id;
49485+#define I40E_NO_VF_MIRROR -1
49486+/* assuming vf ids' range is <0..max_supported> */
49487+#define I40E_IS_MIRROR_VLAN_ID_VALID(id) ((id) >= 0)
49488+ u16 ingress_rule_id;
49489+ int ingress_vlan;
49490+ u16 egress_rule_id;
49491+ int egress_vlan;
49492+ DECLARE_BITMAP(trunk_vlans, VLAN_N_VID);
49493+ bool allow_untagged;
49494+ bool loopback;
49495+ bool vlan_stripping;
49496+ u8 promisc_mode;
49497+ u8 bw_share;
49498+ bool bw_share_applied; /* true if config is applied to the device */
49499+ bool pf_ctrl_disable; /* tracking bool for PF ctrl of VF enable/disable */
49500+
49501+ /* ADq related variables */
49502+ bool adq_enabled; /* flag to enable adq */
49503+ u8 num_tc;
49504+ struct i40evf_channel ch[I40E_MAX_VF_VSI];
49505+ struct hlist_head cloud_filter_list;
49506+ u16 num_cloud_filters;
49507 };
49508
49509 void i40e_free_vfs(struct i40e_pf *pf);
49510+#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
49511 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
49512+#endif
49513 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
49514 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
49515 u32 v_retval, u8 *msg, u16 msglen);
49516 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
49517-void i40e_reset_vf(struct i40e_vf *vf, bool flr);
49518-void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
49519+bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
49520+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
49521 void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
49522
49523 /* VF configuration related iplink handlers */
49524 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
49525+#ifdef IFLA_VF_VLAN_INFO_MAX
49526 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
49527 u16 vlan_id, u8 qos, __be16 vlan_proto);
49528+#else
49529+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
49530+ int vf_id, u16 vlan_id, u8 qos);
49531+#endif
49532+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
49533 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
49534 int max_tx_rate);
49535+#else
49536+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
49537+#endif
49538+#ifdef HAVE_NDO_SET_VF_TRUST
49539 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
49540+#endif
49541+int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable);
49542+#ifdef IFLA_VF_MAX
49543 int i40e_ndo_get_vf_config(struct net_device *netdev,
49544 int vf_id, struct ifla_vf_info *ivi);
49545+#ifdef HAVE_NDO_SET_VF_LINK_STATE
49546 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
49547+#endif
49548+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
49549 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
49550+#endif
49551+#endif
49552
49553 void i40e_vc_notify_link_state(struct i40e_pf *pf);
49554 void i40e_vc_notify_reset(struct i40e_pf *pf);
49555+#ifdef HAVE_VF_STATS
49556+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
49557+ struct ifla_vf_stats *vf_stats);
49558+#endif
49559+extern const struct vfd_ops i40e_vfd_ops;
49560
49561 #endif /* _I40E_VIRTCHNL_PF_H_ */
49562diff --git a/drivers/net/ethernet/intel/i40e/kcompat.c b/drivers/net/ethernet/intel/i40e/kcompat.c
49563new file mode 100644
49564index 000000000..eb2ba6bd1
49565--- /dev/null
49566+++ b/drivers/net/ethernet/intel/i40e/kcompat.c
49567@@ -0,0 +1,2761 @@
49568+// SPDX-License-Identifier: GPL-2.0
49569+/* Copyright(c) 2013 - 2020 Intel Corporation. */
49570+
49571+#include "i40e.h"
49572+#include "kcompat.h"
49573+
49574+/*****************************************************************************/
49575+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__
49576+/* From lib/vsprintf.c */
49577+#include <asm/div64.h>
49578+
49579+static int skip_atoi(const char **s)
49580+{
49581+ int i=0;
49582+
49583+ while (isdigit(**s))
49584+ i = i*10 + *((*s)++) - '0';
49585+ return i;
49586+}
49587+
49588+#define _kc_ZEROPAD 1 /* pad with zero */
49589+#define _kc_SIGN 2 /* unsigned/signed long */
49590+#define _kc_PLUS 4 /* show plus */
49591+#define _kc_SPACE 8 /* space if plus */
49592+#define _kc_LEFT 16 /* left justified */
49593+#define _kc_SPECIAL 32 /* 0x */
49594+#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
49595+
49596+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
49597+{
49598+ char c,sign,tmp[66];
49599+ const char *digits;
49600+ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
49601+ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
49602+ int i;
49603+
49604+ digits = (type & _kc_LARGE) ? large_digits : small_digits;
49605+ if (type & _kc_LEFT)
49606+ type &= ~_kc_ZEROPAD;
49607+ if (base < 2 || base > 36)
49608+ return 0;
49609+ c = (type & _kc_ZEROPAD) ? '0' : ' ';
49610+ sign = 0;
49611+ if (type & _kc_SIGN) {
49612+ if (num < 0) {
49613+ sign = '-';
49614+ num = -num;
49615+ size--;
49616+ } else if (type & _kc_PLUS) {
49617+ sign = '+';
49618+ size--;
49619+ } else if (type & _kc_SPACE) {
49620+ sign = ' ';
49621+ size--;
49622+ }
49623+ }
49624+ if (type & _kc_SPECIAL) {
49625+ if (base == 16)
49626+ size -= 2;
49627+ else if (base == 8)
49628+ size--;
49629+ }
49630+ i = 0;
49631+ if (num == 0)
49632+ tmp[i++]='0';
49633+ else while (num != 0)
49634+ tmp[i++] = digits[do_div(num,base)];
49635+ if (i > precision)
49636+ precision = i;
49637+ size -= precision;
49638+ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
49639+ while(size-->0) {
49640+ if (buf <= end)
49641+ *buf = ' ';
49642+ ++buf;
49643+ }
49644+ }
49645+ if (sign) {
49646+ if (buf <= end)
49647+ *buf = sign;
49648+ ++buf;
49649+ }
49650+ if (type & _kc_SPECIAL) {
49651+ if (base==8) {
49652+ if (buf <= end)
49653+ *buf = '0';
49654+ ++buf;
49655+ } else if (base==16) {
49656+ if (buf <= end)
49657+ *buf = '0';
49658+ ++buf;
49659+ if (buf <= end)
49660+ *buf = digits[33];
49661+ ++buf;
49662+ }
49663+ }
49664+ if (!(type & _kc_LEFT)) {
49665+ while (size-- > 0) {
49666+ if (buf <= end)
49667+ *buf = c;
49668+ ++buf;
49669+ }
49670+ }
49671+ while (i < precision--) {
49672+ if (buf <= end)
49673+ *buf = '0';
49674+ ++buf;
49675+ }
49676+ while (i-- > 0) {
49677+ if (buf <= end)
49678+ *buf = tmp[i];
49679+ ++buf;
49680+ }
49681+ while (size-- > 0) {
49682+ if (buf <= end)
49683+ *buf = ' ';
49684+ ++buf;
49685+ }
49686+ return buf;
49687+}
49688+
49689+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
49690+{
49691+ int len;
49692+ unsigned long long num;
49693+ int i, base;
49694+ char *str, *end, c;
49695+ const char *s;
49696+
49697+ int flags; /* flags to number() */
49698+
49699+ int field_width; /* width of output field */
49700+ int precision; /* min. # of digits for integers; max
49701+ number of chars for from string */
49702+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
49703+ /* 'z' support added 23/7/1999 S.H. */
49704+ /* 'z' changed to 'Z' --davidm 1/25/99 */
49705+
49706+ str = buf;
49707+ end = buf + size - 1;
49708+
49709+ if (end < buf - 1) {
49710+ end = ((void *) -1);
49711+ size = end - buf + 1;
49712+ }
49713+
49714+ for (; *fmt ; ++fmt) {
49715+ if (*fmt != '%') {
49716+ if (str <= end)
49717+ *str = *fmt;
49718+ ++str;
49719+ continue;
49720+ }
49721+
49722+ /* process flags */
49723+ flags = 0;
49724+ repeat:
49725+ ++fmt; /* this also skips first '%' */
49726+ switch (*fmt) {
49727+ case '-': flags |= _kc_LEFT; goto repeat;
49728+ case '+': flags |= _kc_PLUS; goto repeat;
49729+ case ' ': flags |= _kc_SPACE; goto repeat;
49730+ case '#': flags |= _kc_SPECIAL; goto repeat;
49731+ case '0': flags |= _kc_ZEROPAD; goto repeat;
49732+ }
49733+
49734+ /* get field width */
49735+ field_width = -1;
49736+ if (isdigit(*fmt))
49737+ field_width = skip_atoi(&fmt);
49738+ else if (*fmt == '*') {
49739+ ++fmt;
49740+ /* it's the next argument */
49741+ field_width = va_arg(args, int);
49742+ if (field_width < 0) {
49743+ field_width = -field_width;
49744+ flags |= _kc_LEFT;
49745+ }
49746+ }
49747+
49748+ /* get the precision */
49749+ precision = -1;
49750+ if (*fmt == '.') {
49751+ ++fmt;
49752+ if (isdigit(*fmt))
49753+ precision = skip_atoi(&fmt);
49754+ else if (*fmt == '*') {
49755+ ++fmt;
49756+ /* it's the next argument */
49757+ precision = va_arg(args, int);
49758+ }
49759+ if (precision < 0)
49760+ precision = 0;
49761+ }
49762+
49763+ /* get the conversion qualifier */
49764+ qualifier = -1;
49765+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
49766+ qualifier = *fmt;
49767+ ++fmt;
49768+ }
49769+
49770+ /* default base */
49771+ base = 10;
49772+
49773+ switch (*fmt) {
49774+ case 'c':
49775+ if (!(flags & _kc_LEFT)) {
49776+ while (--field_width > 0) {
49777+ if (str <= end)
49778+ *str = ' ';
49779+ ++str;
49780+ }
49781+ }
49782+ c = (unsigned char) va_arg(args, int);
49783+ if (str <= end)
49784+ *str = c;
49785+ ++str;
49786+ while (--field_width > 0) {
49787+ if (str <= end)
49788+ *str = ' ';
49789+ ++str;
49790+ }
49791+ continue;
49792+
49793+ case 's':
49794+ s = va_arg(args, char *);
49795+ if (!s)
49796+ s = "<NULL>";
49797+
49798+ len = strnlen(s, precision);
49799+
49800+ if (!(flags & _kc_LEFT)) {
49801+ while (len < field_width--) {
49802+ if (str <= end)
49803+ *str = ' ';
49804+ ++str;
49805+ }
49806+ }
49807+ for (i = 0; i < len; ++i) {
49808+ if (str <= end)
49809+ *str = *s;
49810+ ++str; ++s;
49811+ }
49812+ while (len < field_width--) {
49813+ if (str <= end)
49814+ *str = ' ';
49815+ ++str;
49816+ }
49817+ continue;
49818+
49819+ case 'p':
49820+ if ('M' == *(fmt+1)) {
49821+ str = get_mac(str, end, va_arg(args, unsigned char *));
49822+ fmt++;
49823+ } else {
49824+ if (field_width == -1) {
49825+ field_width = 2*sizeof(void *);
49826+ flags |= _kc_ZEROPAD;
49827+ }
49828+ str = number(str, end,
49829+ (unsigned long) va_arg(args, void *),
49830+ 16, field_width, precision, flags);
49831+ }
49832+ continue;
49833+
49834+ case 'n':
49835+ /* FIXME:
49836+ * What does C99 say about the overflow case here? */
49837+ if (qualifier == 'l') {
49838+ long * ip = va_arg(args, long *);
49839+ *ip = (str - buf);
49840+ } else if (qualifier == 'Z') {
49841+ size_t * ip = va_arg(args, size_t *);
49842+ *ip = (str - buf);
49843+ } else {
49844+ int * ip = va_arg(args, int *);
49845+ *ip = (str - buf);
49846+ }
49847+ continue;
49848+
49849+ case '%':
49850+ if (str <= end)
49851+ *str = '%';
49852+ ++str;
49853+ continue;
49854+
49855+ /* integer number formats - set up the flags and "break" */
49856+ case 'o':
49857+ base = 8;
49858+ break;
49859+
49860+ case 'X':
49861+ flags |= _kc_LARGE;
49862+ case 'x':
49863+ base = 16;
49864+ break;
49865+
49866+ case 'd':
49867+ case 'i':
49868+ flags |= _kc_SIGN;
49869+ case 'u':
49870+ break;
49871+
49872+ default:
49873+ if (str <= end)
49874+ *str = '%';
49875+ ++str;
49876+ if (*fmt) {
49877+ if (str <= end)
49878+ *str = *fmt;
49879+ ++str;
49880+ } else {
49881+ --fmt;
49882+ }
49883+ continue;
49884+ }
49885+ if (qualifier == 'L')
49886+ num = va_arg(args, long long);
49887+ else if (qualifier == 'l') {
49888+ num = va_arg(args, unsigned long);
49889+ if (flags & _kc_SIGN)
49890+ num = (signed long) num;
49891+ } else if (qualifier == 'Z') {
49892+ num = va_arg(args, size_t);
49893+ } else if (qualifier == 'h') {
49894+ num = (unsigned short) va_arg(args, int);
49895+ if (flags & _kc_SIGN)
49896+ num = (signed short) num;
49897+ } else {
49898+ num = va_arg(args, unsigned int);
49899+ if (flags & _kc_SIGN)
49900+ num = (signed int) num;
49901+ }
49902+ str = number(str, end, num, base,
49903+ field_width, precision, flags);
49904+ }
49905+ if (str <= end)
49906+ *str = '\0';
49907+ else if (size > 0)
49908+ /* don't write out a null byte if the buf size is zero */
49909+ *end = '\0';
49910+ /* the trailing null byte doesn't count towards the total
49911+ * ++str;
49912+ */
49913+ return str-buf;
49914+}
49915+
49916+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
49917+{
49918+ va_list args;
49919+ int i;
49920+
49921+ va_start(args, fmt);
49922+ i = _kc_vsnprintf(buf,size,fmt,args);
49923+ va_end(args);
49924+ return i;
49925+}
49926+#endif /* < 2.4.8 */
49927+
49928+/*****************************************************************************/
49929+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
49930+
49931+/**************************************/
49932+/* PCI DMA MAPPING */
49933+
49934+#if defined(CONFIG_HIGHMEM)
49935+
49936+#ifndef PCI_DRAM_OFFSET
49937+#define PCI_DRAM_OFFSET 0
49938+#endif
49939+
49940+u64
49941+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
49942+ size_t size, int direction)
49943+{
49944+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
49945+ PCI_DRAM_OFFSET);
49946+}
49947+
49948+#else /* CONFIG_HIGHMEM */
49949+
49950+u64
49951+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
49952+ size_t size, int direction)
49953+{
49954+ return pci_map_single(dev, (void *)page_address(page) + offset, size,
49955+ direction);
49956+}
49957+
49958+#endif /* CONFIG_HIGHMEM */
49959+
49960+void
49961+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
49962+ int direction)
49963+{
49964+ return pci_unmap_single(dev, dma_addr, size, direction);
49965+}
49966+
49967+#endif /* 2.4.13 => 2.4.3 */
49968+
49969+/*****************************************************************************/
49970+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
49971+
49972+/**************************************/
49973+/* PCI DRIVER API */
49974+
49975+int
49976+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
49977+{
49978+ if (!pci_dma_supported(dev, mask))
49979+ return -EIO;
49980+ dev->dma_mask = mask;
49981+ return 0;
49982+}
49983+
49984+int
49985+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
49986+{
49987+ int i;
49988+
49989+ for (i = 0; i < 6; i++) {
49990+ if (pci_resource_len(dev, i) == 0)
49991+ continue;
49992+
49993+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
49994+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
49995+ pci_release_regions(dev);
49996+ return -EBUSY;
49997+ }
49998+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
49999+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
50000+ pci_release_regions(dev);
50001+ return -EBUSY;
50002+ }
50003+ }
50004+ }
50005+ return 0;
50006+}
50007+
50008+void
50009+_kc_pci_release_regions(struct pci_dev *dev)
50010+{
50011+ int i;
50012+
50013+ for (i = 0; i < 6; i++) {
50014+ if (pci_resource_len(dev, i) == 0)
50015+ continue;
50016+
50017+ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
50018+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
50019+
50020+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
50021+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
50022+ }
50023+}
50024+
50025+/**************************************/
50026+/* NETWORK DRIVER API */
50027+
50028+struct net_device *
50029+_kc_alloc_etherdev(int sizeof_priv)
50030+{
50031+ struct net_device *dev;
50032+ int alloc_size;
50033+
50034+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
50035+ dev = kzalloc(alloc_size, GFP_KERNEL);
50036+ if (!dev)
50037+ return NULL;
50038+
50039+ if (sizeof_priv)
50040+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
50041+ dev->name[0] = '\0';
50042+ ether_setup(dev);
50043+
50044+ return dev;
50045+}
50046+
50047+int
50048+_kc_is_valid_ether_addr(u8 *addr)
50049+{
50050+ const char zaddr[6] = { 0, };
50051+
50052+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
50053+}
50054+
50055+#endif /* 2.4.3 => 2.4.0 */
50056+
50057+/*****************************************************************************/
50058+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
50059+
50060+int
50061+_kc_pci_set_power_state(struct pci_dev *dev, int state)
50062+{
50063+ return 0;
50064+}
50065+
50066+int
50067+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
50068+{
50069+ return 0;
50070+}
50071+
50072+#endif /* 2.4.6 => 2.4.3 */
50073+
50074+/*****************************************************************************/
50075+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
50076+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
50077+ int off, int size)
50078+{
50079+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
50080+ frag->page = page;
50081+ frag->page_offset = off;
50082+ frag->size = size;
50083+ skb_shinfo(skb)->nr_frags = i + 1;
50084+}
50085+
50086+/*
50087+ * Original Copyright:
50088+ * find_next_bit.c: fallback find next bit implementation
50089+ *
50090+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
50091+ * Written by David Howells (dhowells@redhat.com)
50092+ */
50093+
50094+/**
50095+ * find_next_bit - find the next set bit in a memory region
50096+ * @addr: The address to base the search on
50097+ * @offset: The bitnumber to start searching at
50098+ * @size: The maximum size to search
50099+ */
50100+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
50101+ unsigned long offset)
50102+{
50103+ const unsigned long *p = addr + BITOP_WORD(offset);
50104+ unsigned long result = offset & ~(BITS_PER_LONG-1);
50105+ unsigned long tmp;
50106+
50107+ if (offset >= size)
50108+ return size;
50109+ size -= result;
50110+ offset %= BITS_PER_LONG;
50111+ if (offset) {
50112+ tmp = *(p++);
50113+ tmp &= (~0UL << offset);
50114+ if (size < BITS_PER_LONG)
50115+ goto found_first;
50116+ if (tmp)
50117+ goto found_middle;
50118+ size -= BITS_PER_LONG;
50119+ result += BITS_PER_LONG;
50120+ }
50121+ while (size & ~(BITS_PER_LONG-1)) {
50122+ if ((tmp = *(p++)))
50123+ goto found_middle;
50124+ result += BITS_PER_LONG;
50125+ size -= BITS_PER_LONG;
50126+ }
50127+ if (!size)
50128+ return result;
50129+ tmp = *p;
50130+
50131+found_first:
50132+ tmp &= (~0UL >> (BITS_PER_LONG - size));
50133+ if (tmp == 0UL) /* Are any bits set? */
50134+ return result + size; /* Nope. */
50135+found_middle:
50136+ return result + ffs(tmp);
50137+}
50138+
50139+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
50140+{
50141+ size_t ret = strlen(src);
50142+
50143+ if (size) {
50144+ size_t len = (ret >= size) ? size - 1 : ret;
50145+ memcpy(dest, src, len);
50146+ dest[len] = '\0';
50147+ }
50148+ return ret;
50149+}
50150+
50151+#ifndef do_div
50152+#if BITS_PER_LONG == 32
50153+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
50154+{
50155+ uint64_t rem = *n;
50156+ uint64_t b = base;
50157+ uint64_t res, d = 1;
50158+ uint32_t high = rem >> 32;
50159+
50160+ /* Reduce the thing a bit first */
50161+ res = 0;
50162+ if (high >= base) {
50163+ high /= base;
50164+ res = (uint64_t) high << 32;
50165+ rem -= (uint64_t) (high*base) << 32;
50166+ }
50167+
50168+ while ((int64_t)b > 0 && b < rem) {
50169+ b = b+b;
50170+ d = d+d;
50171+ }
50172+
50173+ do {
50174+ if (rem >= b) {
50175+ rem -= b;
50176+ res += d;
50177+ }
50178+ b >>= 1;
50179+ d >>= 1;
50180+ } while (d);
50181+
50182+ *n = res;
50183+ return rem;
50184+}
50185+#endif /* BITS_PER_LONG == 32 */
50186+#endif /* do_div */
50187+#endif /* 2.6.0 => 2.4.6 */
50188+
50189+/*****************************************************************************/
50190+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
50191+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
50192+{
50193+ va_list args;
50194+ int i;
50195+
50196+ va_start(args, fmt);
50197+ i = vsnprintf(buf, size, fmt, args);
50198+ va_end(args);
50199+ return (i >= size) ? (size - 1) : i;
50200+}
50201+#endif /* < 2.6.4 */
50202+
50203+/*****************************************************************************/
50204+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
50205+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
50206+#endif /* < 2.6.10 */
50207+
50208+/*****************************************************************************/
50209+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
50210+char *_kc_kstrdup(const char *s, unsigned int gfp)
50211+{
50212+ size_t len;
50213+ char *buf;
50214+
50215+ if (!s)
50216+ return NULL;
50217+
50218+ len = strlen(s) + 1;
50219+ buf = kmalloc(len, gfp);
50220+ if (buf)
50221+ memcpy(buf, s, len);
50222+ return buf;
50223+}
50224+#endif /* < 2.6.13 */
50225+
50226+/*****************************************************************************/
50227+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
50228+void *_kc_kzalloc(size_t size, int flags)
50229+{
50230+ void *ret = kmalloc(size, flags);
50231+ if (ret)
50232+ memset(ret, 0, size);
50233+ return ret;
50234+}
50235+#endif /* <= 2.6.13 */
50236+
50237+/*****************************************************************************/
50238+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
50239+int _kc_skb_pad(struct sk_buff *skb, int pad)
50240+{
50241+ int ntail;
50242+
50243+ /* If the skbuff is non linear tailroom is always zero.. */
50244+ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
50245+ memset(skb->data+skb->len, 0, pad);
50246+ return 0;
50247+ }
50248+
50249+ ntail = skb->data_len + pad - (skb->end - skb->tail);
50250+ if (likely(skb_cloned(skb) || ntail > 0)) {
50251+ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC))
50252+ goto free_skb;
50253+ }
50254+
50255+#ifdef MAX_SKB_FRAGS
50256+ if (skb_is_nonlinear(skb) &&
50257+ !__pskb_pull_tail(skb, skb->data_len))
50258+ goto free_skb;
50259+
50260+#endif
50261+ memset(skb->data + skb->len, 0, pad);
50262+ return 0;
50263+
50264+free_skb:
50265+ kfree_skb(skb);
50266+ return -ENOMEM;
50267+}
50268+
50269+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
50270+int _kc_pci_save_state(struct pci_dev *pdev)
50271+{
50272+ struct adapter_struct *adapter = pci_get_drvdata(pdev);
50273+ int size = PCI_CONFIG_SPACE_LEN, i;
50274+ u16 pcie_cap_offset, pcie_link_status;
50275+
50276+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
50277+ /* no ->dev for 2.4 kernels */
50278+ WARN_ON(pdev->dev.driver_data == NULL);
50279+#endif
50280+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
50281+ if (pcie_cap_offset) {
50282+ if (!pci_read_config_word(pdev,
50283+ pcie_cap_offset + PCIE_LINK_STATUS,
50284+ &pcie_link_status))
50285+ size = PCIE_CONFIG_SPACE_LEN;
50286+ }
50287+ pci_config_space_ich8lan();
50288+#ifdef HAVE_PCI_ERS
50289+ if (adapter->config_space == NULL)
50290+#else
50291+ WARN_ON(adapter->config_space != NULL);
50292+#endif
50293+ adapter->config_space = kmalloc(size, GFP_KERNEL);
50294+ if (!adapter->config_space) {
50295+ printk(KERN_ERR "Out of memory in pci_save_state\n");
50296+ return -ENOMEM;
50297+ }
50298+ for (i = 0; i < (size / 4); i++)
50299+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
50300+ return 0;
50301+}
50302+
50303+void _kc_pci_restore_state(struct pci_dev *pdev)
50304+{
50305+ struct adapter_struct *adapter = pci_get_drvdata(pdev);
50306+ int size = PCI_CONFIG_SPACE_LEN, i;
50307+ u16 pcie_cap_offset;
50308+ u16 pcie_link_status;
50309+
50310+ if (adapter->config_space != NULL) {
50311+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
50312+ if (pcie_cap_offset &&
50313+ !pci_read_config_word(pdev,
50314+ pcie_cap_offset + PCIE_LINK_STATUS,
50315+ &pcie_link_status))
50316+ size = PCIE_CONFIG_SPACE_LEN;
50317+
50318+ pci_config_space_ich8lan();
50319+ for (i = 0; i < (size / 4); i++)
50320+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
50321+#ifndef HAVE_PCI_ERS
50322+ kfree(adapter->config_space);
50323+ adapter->config_space = NULL;
50324+#endif
50325+ }
50326+}
50327+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
50328+
50329+#ifdef HAVE_PCI_ERS
50330+void _kc_free_netdev(struct net_device *netdev)
50331+{
50332+ struct adapter_struct *adapter = netdev_priv(netdev);
50333+
50334+ kfree(adapter->config_space);
50335+#ifdef CONFIG_SYSFS
50336+ if (netdev->reg_state == NETREG_UNINITIALIZED) {
50337+ kfree((char *)netdev - netdev->padded);
50338+ } else {
50339+ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
50340+ netdev->reg_state = NETREG_RELEASED;
50341+ class_device_put(&netdev->class_dev);
50342+ }
50343+#else
50344+ kfree((char *)netdev - netdev->padded);
50345+#endif
50346+}
50347+#endif
50348+
50349+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
50350+{
50351+ void *p;
50352+
50353+ p = kzalloc(len, gfp);
50354+ if (p)
50355+ memcpy(p, src, len);
50356+ return p;
50357+}
50358+#endif /* <= 2.6.19 */
50359+
50360+/*****************************************************************************/
50361+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
50362+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
50363+{
50364+ return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
50365+}
50366+#endif /* < 2.6.21 */
50367+
50368+/*****************************************************************************/
50369+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
50370+/* hexdump code taken from lib/hexdump.c */
50371+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
50372+ int groupsize, unsigned char *linebuf,
50373+ size_t linebuflen, bool ascii)
50374+{
50375+ const u8 *ptr = buf;
50376+ u8 ch;
50377+ int j, lx = 0;
50378+ int ascii_column;
50379+
50380+ if (rowsize != 16 && rowsize != 32)
50381+ rowsize = 16;
50382+
50383+ if (!len)
50384+ goto nil;
50385+ if (len > rowsize) /* limit to one line at a time */
50386+ len = rowsize;
50387+ if ((len % groupsize) != 0) /* no mixed size output */
50388+ groupsize = 1;
50389+
50390+ switch (groupsize) {
50391+ case 8: {
50392+ const u64 *ptr8 = buf;
50393+ int ngroups = len / groupsize;
50394+
50395+ for (j = 0; j < ngroups; j++)
50396+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
50397+ "%s%16.16llx", j ? " " : "",
50398+ (unsigned long long)*(ptr8 + j));
50399+ ascii_column = 17 * ngroups + 2;
50400+ break;
50401+ }
50402+
50403+ case 4: {
50404+ const u32 *ptr4 = buf;
50405+ int ngroups = len / groupsize;
50406+
50407+ for (j = 0; j < ngroups; j++)
50408+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
50409+ "%s%8.8x", j ? " " : "", *(ptr4 + j));
50410+ ascii_column = 9 * ngroups + 2;
50411+ break;
50412+ }
50413+
50414+ case 2: {
50415+ const u16 *ptr2 = buf;
50416+ int ngroups = len / groupsize;
50417+
50418+ for (j = 0; j < ngroups; j++)
50419+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
50420+ "%s%4.4x", j ? " " : "", *(ptr2 + j));
50421+ ascii_column = 5 * ngroups + 2;
50422+ break;
50423+ }
50424+
50425+ default:
50426+ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
50427+ ch = ptr[j];
50428+ linebuf[lx++] = hex_asc(ch >> 4);
50429+ linebuf[lx++] = hex_asc(ch & 0x0f);
50430+ linebuf[lx++] = ' ';
50431+ }
50432+ if (j)
50433+ lx--;
50434+
50435+ ascii_column = 3 * rowsize + 2;
50436+ break;
50437+ }
50438+ if (!ascii)
50439+ goto nil;
50440+
50441+ while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
50442+ linebuf[lx++] = ' ';
50443+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
50444+ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
50445+ : '.';
50446+nil:
50447+ linebuf[lx++] = '\0';
50448+}
50449+
50450+void _kc_print_hex_dump(const char *level,
50451+ const char *prefix_str, int prefix_type,
50452+ int rowsize, int groupsize,
50453+ const void *buf, size_t len, bool ascii)
50454+{
50455+ const u8 *ptr = buf;
50456+ int i, linelen, remaining = len;
50457+ unsigned char linebuf[200];
50458+
50459+ if (rowsize != 16 && rowsize != 32)
50460+ rowsize = 16;
50461+
50462+ for (i = 0; i < len; i += rowsize) {
50463+ linelen = min(remaining, rowsize);
50464+ remaining -= rowsize;
50465+ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
50466+ linebuf, sizeof(linebuf), ascii);
50467+
50468+ switch (prefix_type) {
50469+ case DUMP_PREFIX_ADDRESS:
50470+ printk("%s%s%*p: %s\n", level, prefix_str,
50471+ (int)(2 * sizeof(void *)), ptr + i, linebuf);
50472+ break;
50473+ case DUMP_PREFIX_OFFSET:
50474+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
50475+ break;
50476+ default:
50477+ printk("%s%s%s\n", level, prefix_str, linebuf);
50478+ break;
50479+ }
50480+ }
50481+}
50482+
50483+#endif /* < 2.6.22 */
50484+
50485+/*****************************************************************************/
50486+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
50487+#ifdef NAPI
50488+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
50489+{
50490+ struct adapter_q_vector *q_vector = container_of(napi,
50491+ struct adapter_q_vector,
50492+ napi);
50493+ return &q_vector->poll_dev;
50494+}
50495+
50496+int __kc_adapter_clean(struct net_device *netdev, int *budget)
50497+{
50498+ int work_done;
50499+ int work_to_do = min(*budget, netdev->quota);
50500+ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
50501+ struct napi_struct *napi = netdev->priv;
50502+ work_done = napi->poll(napi, work_to_do);
50503+ *budget -= work_done;
50504+ netdev->quota -= work_done;
50505+ return (work_done >= work_to_do) ? 1 : 0;
50506+}
50507+#endif /* NAPI */
50508+#endif /* <= 2.6.24 */
50509+
50510+/*****************************************************************************/
50511+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
50512+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
50513+{
50514+ struct pci_dev *parent = pdev->bus->self;
50515+ u16 link_state;
50516+ int pos;
50517+
50518+ if (!parent)
50519+ return;
50520+
50521+ pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
50522+ if (pos) {
50523+ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
50524+ link_state &= ~state;
50525+ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
50526+ }
50527+}
50528+#endif /* < 2.6.26 */
50529+
50530+/*****************************************************************************/
50531+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
50532+#ifdef HAVE_TX_MQ
50533+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
50534+{
50535+ struct adapter_struct *adapter = netdev_priv(netdev);
50536+ int i;
50537+
50538+ netif_stop_queue(netdev);
50539+ if (netif_is_multiqueue(netdev))
50540+ for (i = 0; i < adapter->num_tx_queues; i++)
50541+ netif_stop_subqueue(netdev, i);
50542+}
50543+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
50544+{
50545+ struct adapter_struct *adapter = netdev_priv(netdev);
50546+ int i;
50547+
50548+ netif_wake_queue(netdev);
50549+ if (netif_is_multiqueue(netdev))
50550+ for (i = 0; i < adapter->num_tx_queues; i++)
50551+ netif_wake_subqueue(netdev, i);
50552+}
50553+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
50554+{
50555+ struct adapter_struct *adapter = netdev_priv(netdev);
50556+ int i;
50557+
50558+ netif_start_queue(netdev);
50559+ if (netif_is_multiqueue(netdev))
50560+ for (i = 0; i < adapter->num_tx_queues; i++)
50561+ netif_start_subqueue(netdev, i);
50562+}
50563+#endif /* HAVE_TX_MQ */
50564+
50565+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
50566+{
50567+ va_list args;
50568+
50569+ printk(KERN_WARNING "------------[ cut here ]------------\n");
50570+ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
50571+ va_start(args, fmt);
50572+ vprintk(fmt, args);
50573+ va_end(args);
50574+
50575+ dump_stack();
50576+}
50577+#endif /* __VMKLNX__ */
50578+
50579+/*****************************************************************************/
50580+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
50581+
50582+int
50583+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
50584+{
50585+ pci_power_t target_state;
50586+ int error;
50587+
50588+ target_state = pci_choose_state(dev, PMSG_SUSPEND);
50589+
50590+ pci_enable_wake(dev, target_state, true);
50591+
50592+ error = pci_set_power_state(dev, target_state);
50593+
50594+ if (error)
50595+ pci_enable_wake(dev, target_state, false);
50596+
50597+ return error;
50598+}
50599+
50600+int
50601+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
50602+{
50603+ int err;
50604+
50605+ err = pci_enable_wake(dev, PCI_D3cold, enable);
50606+ if (err)
50607+ goto out;
50608+
50609+ err = pci_enable_wake(dev, PCI_D3hot, enable);
50610+
50611+out:
50612+ return err;
50613+}
50614+#endif /* < 2.6.28 */
50615+
50616+/*****************************************************************************/
50617+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
50618+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
50619+{
50620+ u16 old_cmd, cmd;
50621+
50622+ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
50623+ if (enable)
50624+ cmd = old_cmd | PCI_COMMAND_MASTER;
50625+ else
50626+ cmd = old_cmd & ~PCI_COMMAND_MASTER;
50627+ if (cmd != old_cmd) {
50628+ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
50629+ enable ? "enabling" : "disabling");
50630+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
50631+ }
50632+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
50633+ pdev->is_busmaster = enable;
50634+#endif
50635+}
50636+
50637+void _kc_pci_clear_master(struct pci_dev *dev)
50638+{
50639+ __kc_pci_set_master(dev, false);
50640+}
50641+#endif /* < 2.6.29 */
50642+
50643+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
50644+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
50645+int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
50646+{
50647+ int num_vf = 0;
50648+#ifdef CONFIG_PCI_IOV
50649+ struct pci_dev *vfdev;
50650+
50651+ /* loop through all ethernet devices starting at PF dev */
50652+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
50653+ while (vfdev) {
50654+ if (vfdev->is_virtfn && vfdev->physfn == dev)
50655+ num_vf++;
50656+
50657+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
50658+ }
50659+
50660+#endif
50661+ return num_vf;
50662+}
50663+#endif /* RHEL_RELEASE_CODE */
50664+#endif /* < 2.6.34 */
50665+
50666+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
50667+#ifdef HAVE_TX_MQ
50668+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
50669+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
50670+int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
50671+{
50672+ unsigned int real_num = dev->real_num_tx_queues;
50673+ struct Qdisc *qdisc;
50674+ int i;
50675+
50676+ if (txq < 1 || txq > dev->num_tx_queues)
50677+ return -EINVAL;
50678+
50679+ else if (txq > real_num)
50680+ dev->real_num_tx_queues = txq;
50681+ else if (txq < real_num) {
50682+ dev->real_num_tx_queues = txq;
50683+ for (i = txq; i < dev->num_tx_queues; i++) {
50684+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
50685+ if (qdisc) {
50686+ spin_lock_bh(qdisc_lock(qdisc));
50687+ qdisc_reset(qdisc);
50688+ spin_unlock_bh(qdisc_lock(qdisc));
50689+ }
50690+ }
50691+ }
50692+
50693+ return 0;
50694+}
50695+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
50696+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
50697+#endif /* HAVE_TX_MQ */
50698+
50699+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
50700+ const void __user *from, size_t count)
50701+{
50702+ loff_t pos = *ppos;
50703+ size_t res;
50704+
50705+ if (pos < 0)
50706+ return -EINVAL;
50707+ if (pos >= available || !count)
50708+ return 0;
50709+ if (count > available - pos)
50710+ count = available - pos;
50711+ res = copy_from_user(to + pos, from, count);
50712+ if (res == count)
50713+ return -EFAULT;
50714+ count -= res;
50715+ *ppos = pos + count;
50716+ return count;
50717+}
50718+
50719+#endif /* < 2.6.35 */
50720+
50721+/*****************************************************************************/
50722+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
50723+static const u32 _kc_flags_dup_features =
50724+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
50725+
50726+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
50727+{
50728+ return dev->features & _kc_flags_dup_features;
50729+}
50730+
50731+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
50732+{
50733+ if (data & ~supported)
50734+ return -EINVAL;
50735+
50736+ dev->features = ((dev->features & ~_kc_flags_dup_features) |
50737+ (data & _kc_flags_dup_features));
50738+ return 0;
50739+}
50740+#endif /* < 2.6.36 */
50741+
50742+/******************************************************************************/
50743+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
50744+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
50745+#ifdef HAVE_NETDEV_SELECT_QUEUE
50746+#include <net/ip.h>
50747+#include <linux/pkt_sched.h>
50748+
50749+u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
50750+ u16 num_tx_queues)
50751+{
50752+ u32 hash;
50753+ u16 qoffset = 0;
50754+ u16 qcount = num_tx_queues;
50755+
50756+ if (skb_rx_queue_recorded(skb)) {
50757+ hash = skb_get_rx_queue(skb);
50758+ while (unlikely(hash >= num_tx_queues))
50759+ hash -= num_tx_queues;
50760+ return hash;
50761+ }
50762+
50763+ if (skb->sk && skb->sk->sk_hash)
50764+ hash = skb->sk->sk_hash;
50765+ else
50766+#ifdef NETIF_F_RXHASH
50767+ hash = (__force u16) skb->protocol ^ skb->rxhash;
50768+#else
50769+ hash = skb->protocol;
50770+#endif
50771+
50772+ hash = jhash_1word(hash, _kc_hashrnd);
50773+
50774+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
50775+}
50776+#endif /* HAVE_NETDEV_SELECT_QUEUE */
50777+
50778+u8 _kc_netdev_get_num_tc(struct net_device *dev)
50779+{
50780+ struct i40e_netdev_priv *np = netdev_priv(dev);
50781+ struct i40e_vsi *vsi = np->vsi;
50782+ struct i40e_pf *pf = vsi->back;
50783+ if (pf->flags & I40E_FLAG_DCB_ENABLED)
50784+ return vsi->tc_config.numtc;
50785+
50786+ return 0;
50787+}
50788+
50789+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc)
50790+{
50791+ struct i40e_netdev_priv *np = netdev_priv(dev);
50792+ struct i40e_vsi *vsi = np->vsi;
50793+
50794+ if (num_tc > I40E_MAX_TRAFFIC_CLASS)
50795+ return -EINVAL;
50796+
50797+ vsi->tc_config.numtc = num_tc;
50798+
50799+ return 0;
50800+}
50801+
50802+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
50803+{
50804+ struct i40e_netdev_priv *np = netdev_priv(dev);
50805+ struct i40e_vsi *vsi = np->vsi;
50806+ struct i40e_pf *pf = vsi->back;
50807+ struct i40e_hw *hw = &pf->hw;
50808+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
50809+
50810+ return dcbcfg->etscfg.prioritytable[up];
50811+}
50812+
50813+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
50814+#endif /* < 2.6.39 */
50815+
50816+/******************************************************************************/
50817+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
50818+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
50819+ int off, int size, unsigned int truesize)
50820+{
50821+ skb_fill_page_desc(skb, i, page, off, size);
50822+ skb->len += size;
50823+ skb->data_len += size;
50824+ skb->truesize += truesize;
50825+}
50826+
50827+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
50828+int _kc_simple_open(struct inode *inode, struct file *file)
50829+{
50830+ if (inode->i_private)
50831+ file->private_data = inode->i_private;
50832+
50833+ return 0;
50834+}
50835+#endif /* SLE_VERSION < 11,3,0 */
50836+
50837+#endif /* < 3.4.0 */
50838+
50839+/******************************************************************************/
50840+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
50841+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
50842+{
50843+ int pos;
50844+ u16 reg16;
50845+
50846+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
50847+ if (!pos)
50848+ return 0;
50849+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
50850+ return reg16 & PCI_EXP_FLAGS_VERS;
50851+}
50852+
50853+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
50854+{
50855+ return true;
50856+}
50857+
50858+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
50859+{
50860+ int type = pci_pcie_type(dev);
50861+
50862+ return __kc_pcie_cap_version(dev) > 1 ||
50863+ type == PCI_EXP_TYPE_ROOT_PORT ||
50864+ type == PCI_EXP_TYPE_ENDPOINT ||
50865+ type == PCI_EXP_TYPE_LEG_END;
50866+}
50867+
50868+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
50869+{
50870+ int type = pci_pcie_type(dev);
50871+ int pos;
50872+ u16 pcie_flags_reg;
50873+
50874+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
50875+ if (!pos)
50876+ return false;
50877+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
50878+
50879+ return __kc_pcie_cap_version(dev) > 1 ||
50880+ type == PCI_EXP_TYPE_ROOT_PORT ||
50881+ (type == PCI_EXP_TYPE_DOWNSTREAM &&
50882+ pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
50883+}
50884+
50885+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
50886+{
50887+ int type = pci_pcie_type(dev);
50888+
50889+ return __kc_pcie_cap_version(dev) > 1 ||
50890+ type == PCI_EXP_TYPE_ROOT_PORT ||
50891+ type == PCI_EXP_TYPE_RC_EC;
50892+}
50893+
50894+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
50895+{
50896+ if (!pci_is_pcie(dev))
50897+ return false;
50898+
50899+ switch (pos) {
50900+ case PCI_EXP_FLAGS_TYPE:
50901+ return true;
50902+ case PCI_EXP_DEVCAP:
50903+ case PCI_EXP_DEVCTL:
50904+ case PCI_EXP_DEVSTA:
50905+ return __kc_pcie_cap_has_devctl(dev);
50906+ case PCI_EXP_LNKCAP:
50907+ case PCI_EXP_LNKCTL:
50908+ case PCI_EXP_LNKSTA:
50909+ return __kc_pcie_cap_has_lnkctl(dev);
50910+ case PCI_EXP_SLTCAP:
50911+ case PCI_EXP_SLTCTL:
50912+ case PCI_EXP_SLTSTA:
50913+ return __kc_pcie_cap_has_sltctl(dev);
50914+ case PCI_EXP_RTCTL:
50915+ case PCI_EXP_RTCAP:
50916+ case PCI_EXP_RTSTA:
50917+ return __kc_pcie_cap_has_rtctl(dev);
50918+ case PCI_EXP_DEVCAP2:
50919+ case PCI_EXP_DEVCTL2:
50920+ case PCI_EXP_LNKCAP2:
50921+ case PCI_EXP_LNKCTL2:
50922+ case PCI_EXP_LNKSTA2:
50923+ return __kc_pcie_cap_version(dev) > 1;
50924+ default:
50925+ return false;
50926+ }
50927+}
50928+
50929+/*
50930+ * Note that these accessor functions are only for the "PCI Express
50931+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
50932+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
50933+ */
50934+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
50935+{
50936+ int ret;
50937+
50938+ *val = 0;
50939+ if (pos & 1)
50940+ return -EINVAL;
50941+
50942+ if (__kc_pcie_capability_reg_implemented(dev, pos)) {
50943+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
50944+ /*
50945+ * Reset *val to 0 if pci_read_config_word() fails, it may
50946+ * have been written as 0xFFFF if hardware error happens
50947+ * during pci_read_config_word().
50948+ */
50949+ if (ret)
50950+ *val = 0;
50951+ return ret;
50952+ }
50953+
50954+ /*
50955+ * For Functions that do not implement the Slot Capabilities,
50956+ * Slot Status, and Slot Control registers, these spaces must
50957+ * be hardwired to 0b, with the exception of the Presence Detect
50958+ * State bit in the Slot Status register of Downstream Ports,
50959+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
50960+ */
50961+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
50962+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
50963+ *val = PCI_EXP_SLTSTA_PDS;
50964+ }
50965+
50966+ return 0;
50967+}
50968+
50969+int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
50970+{
50971+ int ret;
50972+
50973+ *val = 0;
50974+ if (pos & 3)
50975+ return -EINVAL;
50976+
50977+ if (__kc_pcie_capability_reg_implemented(dev, pos)) {
50978+ ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
50979+ /*
50980+ * Reset *val to 0 if pci_read_config_dword() fails, it may
50981+ * have been written as 0xFFFFFFFF if hardware error happens
50982+ * during pci_read_config_dword().
50983+ */
50984+ if (ret)
50985+ *val = 0;
50986+ return ret;
50987+ }
50988+
50989+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
50990+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
50991+ *val = PCI_EXP_SLTSTA_PDS;
50992+ }
50993+
50994+ return 0;
50995+}
50996+
50997+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
50998+{
50999+ if (pos & 1)
51000+ return -EINVAL;
51001+
51002+ if (!__kc_pcie_capability_reg_implemented(dev, pos))
51003+ return 0;
51004+
51005+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
51006+}
51007+
51008+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
51009+ u16 clear, u16 set)
51010+{
51011+ int ret;
51012+ u16 val;
51013+
51014+ ret = __kc_pcie_capability_read_word(dev, pos, &val);
51015+ if (!ret) {
51016+ val &= ~clear;
51017+ val |= set;
51018+ ret = __kc_pcie_capability_write_word(dev, pos, val);
51019+ }
51020+
51021+ return ret;
51022+}
51023+
51024+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
51025+ u16 clear)
51026+{
51027+ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
51028+}
51029+#endif /* < 3.7.0 */
51030+
51031+/******************************************************************************/
51032+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
51033+#ifdef CONFIG_XPS
51034+#if NR_CPUS < 64
51035+#define _KC_MAX_XPS_CPUS NR_CPUS
51036+#else
51037+#define _KC_MAX_XPS_CPUS 64
51038+#endif
51039+
51040+/*
51041+ * netdev_queue sysfs structures and functions.
51042+ */
51043+struct _kc_netdev_queue_attribute {
51044+ struct attribute attr;
51045+ ssize_t (*show)(struct netdev_queue *queue,
51046+ struct _kc_netdev_queue_attribute *attr, char *buf);
51047+ ssize_t (*store)(struct netdev_queue *queue,
51048+ struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len);
51049+};
51050+
51051+#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \
51052+ struct _kc_netdev_queue_attribute, attr)
51053+
51054+int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
51055+ u16 index)
51056+{
51057+ struct netdev_queue *txq = netdev_get_tx_queue(dev, index);
51058+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
51059+ /* Redhat requires some odd extended netdev structures */
51060+ struct netdev_tx_queue_extended *txq_ext =
51061+ netdev_extended(dev)->_tx_ext + index;
51062+ struct kobj_type *ktype = txq_ext->kobj.ktype;
51063+#else
51064+ struct kobj_type *ktype = txq->kobj.ktype;
51065+#endif
51066+ struct _kc_netdev_queue_attribute *xps_attr;
51067+ struct attribute *attr = NULL;
51068+ int i, len, err;
51069+#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9)
51070+ char buf[_KC_XPS_BUFLEN];
51071+
51072+ if (!ktype)
51073+ return -ENOMEM;
51074+
51075+ /* attempt to locate the XPS attribute in the Tx queue */
51076+ for (i = 0; (attr = ktype->default_attrs[i]); i++) {
51077+ if (!strcmp("xps_cpus", attr->name))
51078+ break;
51079+ }
51080+
51081+ /* if we did not find it return an error */
51082+ if (!attr)
51083+ return -EINVAL;
51084+
51085+ /* copy the mask into a string */
51086+ len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN,
51087+ cpumask_bits(mask), _KC_MAX_XPS_CPUS);
51088+ if (!len)
51089+ return -ENOMEM;
51090+
51091+ xps_attr = to_kc_netdev_queue_attr(attr);
51092+
51093+ /* Store the XPS value using the SYSFS store call */
51094+ err = xps_attr->store(txq, xps_attr, buf, len);
51095+
51096+ /* we only had an error on err < 0 */
51097+ return (err < 0) ? err : 0;
51098+}
51099+#endif /* CONFIG_XPS */
51100+#ifdef HAVE_NETDEV_SELECT_QUEUE
51101+static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb)
51102+{
51103+#ifdef CONFIG_XPS
51104+ struct xps_dev_maps *dev_maps;
51105+ struct xps_map *map;
51106+ int queue_index = -1;
51107+
51108+ rcu_read_lock();
51109+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
51110+ /* Redhat requires some odd extended netdev structures */
51111+ dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps);
51112+#else
51113+ dev_maps = rcu_dereference(dev->xps_maps);
51114+#endif
51115+ if (dev_maps) {
51116+ map = rcu_dereference(
51117+ dev_maps->cpu_map[raw_smp_processor_id()]);
51118+ if (map) {
51119+ if (map->len == 1)
51120+ queue_index = map->queues[0];
51121+ else {
51122+ u32 hash;
51123+ if (skb->sk && skb->sk->sk_hash)
51124+ hash = skb->sk->sk_hash;
51125+ else
51126+ hash = (__force u16) skb->protocol ^
51127+ skb->rxhash;
51128+ hash = jhash_1word(hash, _kc_hashrnd);
51129+ queue_index = map->queues[
51130+ ((u64)hash * map->len) >> 32];
51131+ }
51132+ if (unlikely(queue_index >= dev->real_num_tx_queues))
51133+ queue_index = -1;
51134+ }
51135+ }
51136+ rcu_read_unlock();
51137+
51138+ return queue_index;
51139+#else
51140+ return -1;
51141+#endif
51142+}
51143+
51144+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
51145+{
51146+ struct sock *sk = skb->sk;
51147+ int queue_index = sk_tx_queue_get(sk);
51148+ int new_index;
51149+
51150+ if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) {
51151+#ifdef CONFIG_XPS
51152+ if (!skb->ooo_okay)
51153+#endif
51154+ return queue_index;
51155+ }
51156+
51157+ new_index = kc_get_xps_queue(dev, skb);
51158+ if (new_index < 0)
51159+ new_index = skb_tx_hash(dev, skb);
51160+
51161+ if (queue_index != new_index && sk) {
51162+ struct dst_entry *dst =
51163+ rcu_dereference(sk->sk_dst_cache);
51164+
51165+ if (dst && skb_dst(skb) == dst)
51166+ sk_tx_queue_set(sk, new_index);
51167+
51168+ }
51169+
51170+ return new_index;
51171+}
51172+
51173+#endif /* HAVE_NETDEV_SELECT_QUEUE */
51174+#endif /* 3.9.0 */
51175+
51176+/*****************************************************************************/
51177+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
51178+#ifdef HAVE_FDB_OPS
51179+#ifdef USE_CONST_DEV_UC_CHAR
51180+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
51181+ struct net_device *dev, const unsigned char *addr,
51182+ u16 flags)
51183+#else
51184+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
51185+ unsigned char *addr, u16 flags)
51186+#endif
51187+{
51188+ int err = -EINVAL;
51189+
51190+ /* If aging addresses are supported device will need to
51191+ * implement its own handler for this.
51192+ */
51193+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
51194+ pr_info("%s: FDB only supports static addresses\n", dev->name);
51195+ return err;
51196+ }
51197+
51198+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
51199+ err = dev_uc_add_excl(dev, addr);
51200+ else if (is_multicast_ether_addr(addr))
51201+ err = dev_mc_add_excl(dev, addr);
51202+
51203+ /* Only return duplicate errors if NLM_F_EXCL is set */
51204+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
51205+ err = 0;
51206+
51207+ return err;
51208+}
51209+
51210+#ifdef USE_CONST_DEV_UC_CHAR
51211+#ifdef HAVE_FDB_DEL_NLATTR
51212+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
51213+ struct net_device *dev, const unsigned char *addr)
51214+#else
51215+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
51216+ const unsigned char *addr)
51217+#endif
51218+#else
51219+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
51220+ unsigned char *addr)
51221+#endif
51222+{
51223+ int err = -EINVAL;
51224+
51225+ /* If aging addresses are supported device will need to
51226+ * implement its own handler for this.
51227+ */
51228+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
51229+ pr_info("%s: FDB only supports static addresses\n", dev->name);
51230+ return err;
51231+ }
51232+
51233+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
51234+ err = dev_uc_del(dev, addr);
51235+ else if (is_multicast_ether_addr(addr))
51236+ err = dev_mc_del(dev, addr);
51237+
51238+ return err;
51239+}
51240+
51241+#endif /* HAVE_FDB_OPS */
51242+#ifdef CONFIG_PCI_IOV
51243+int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
51244+{
51245+ unsigned int vfs_assigned = 0;
51246+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
51247+ int pos;
51248+ struct pci_dev *vfdev;
51249+ unsigned short dev_id;
51250+
51251+ /* only search if we are a PF */
51252+ if (!dev->is_physfn)
51253+ return 0;
51254+
51255+ /* find SR-IOV capability */
51256+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
51257+ if (!pos)
51258+ return 0;
51259+
51260+ /*
51261+ * determine the device ID for the VFs, the vendor ID will be the
51262+ * same as the PF so there is no need to check for that one
51263+ */
51264+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
51265+
51266+ /* loop through all the VFs to see if we own any that are assigned */
51267+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
51268+ while (vfdev) {
51269+ /*
51270+ * It is considered assigned if it is a virtual function with
51271+ * our dev as the physical function and the assigned bit is set
51272+ */
51273+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
51274+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
51275+ vfs_assigned++;
51276+
51277+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
51278+ }
51279+
51280+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
51281+ return vfs_assigned;
51282+}
51283+
51284+#endif /* CONFIG_PCI_IOV */
51285+#endif /* 3.10.0 */
51286+
51287+static const unsigned char __maybe_unused pcie_link_speed[] = {
51288+ PCI_SPEED_UNKNOWN, /* 0 */
51289+ PCIE_SPEED_2_5GT, /* 1 */
51290+ PCIE_SPEED_5_0GT, /* 2 */
51291+ PCIE_SPEED_8_0GT, /* 3 */
51292+ PCIE_SPEED_16_0GT, /* 4 */
51293+ PCI_SPEED_UNKNOWN, /* 5 */
51294+ PCI_SPEED_UNKNOWN, /* 6 */
51295+ PCI_SPEED_UNKNOWN, /* 7 */
51296+ PCI_SPEED_UNKNOWN, /* 8 */
51297+ PCI_SPEED_UNKNOWN, /* 9 */
51298+ PCI_SPEED_UNKNOWN, /* A */
51299+ PCI_SPEED_UNKNOWN, /* B */
51300+ PCI_SPEED_UNKNOWN, /* C */
51301+ PCI_SPEED_UNKNOWN, /* D */
51302+ PCI_SPEED_UNKNOWN, /* E */
51303+ PCI_SPEED_UNKNOWN /* F */
51304+};
51305+
51306+/*****************************************************************************/
51307+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
51308+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
51309+ enum pcie_link_width *width)
51310+{
51311+ int ret;
51312+
51313+ *speed = PCI_SPEED_UNKNOWN;
51314+ *width = PCIE_LNK_WIDTH_UNKNOWN;
51315+
51316+ while (dev) {
51317+ u16 lnksta;
51318+ enum pci_bus_speed next_speed;
51319+ enum pcie_link_width next_width;
51320+
51321+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
51322+ if (ret)
51323+ return ret;
51324+
51325+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
51326+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
51327+ PCI_EXP_LNKSTA_NLW_SHIFT;
51328+
51329+ if (next_speed < *speed)
51330+ *speed = next_speed;
51331+
51332+ if (next_width < *width)
51333+ *width = next_width;
51334+
51335+ dev = dev->bus->self;
51336+ }
51337+
51338+ return 0;
51339+}
51340+
51341+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7))
51342+int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev)
51343+{
51344+ int i;
51345+ u16 status;
51346+
51347+ /* Wait for Transaction Pending bit clean */
51348+ for (i = 0; i < 4; i++) {
51349+ if (i)
51350+ msleep((1 << (i - 1)) * 100);
51351+
51352+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
51353+ if (!(status & PCI_EXP_DEVSTA_TRPND))
51354+ return 1;
51355+ }
51356+
51357+ return 0;
51358+}
51359+#endif /* <RHEL6.7 */
51360+
51361+#endif /* <3.12 */
51362+
51363+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
51364+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
51365+{
51366+ int err = dma_set_mask(dev, mask);
51367+
51368+ if (!err)
51369+ /* coherent mask for the same size will always succeed if
51370+ * dma_set_mask does. However we store the error anyways, due
51371+ * to some kernels which use gcc's warn_unused_result on their
51372+ * definition of dma_set_coherent_mask.
51373+ */
51374+ err = dma_set_coherent_mask(dev, mask);
51375+ return err;
51376+}
51377+#endif /* 3.13.0 */
51378+
51379+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
51380+/******************************************************************************
51381+ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright,
51382+ * inferred copyright from kernel
51383+ */
51384+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
51385+ int target, unsigned short *fragoff, int *flags)
51386+{
51387+ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
51388+ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
51389+ unsigned int len;
51390+ bool found;
51391+
51392+#define __KC_IP6_FH_F_FRAG BIT(0)
51393+#define __KC_IP6_FH_F_AUTH BIT(1)
51394+#define __KC_IP6_FH_F_SKIP_RH BIT(2)
51395+
51396+ if (fragoff)
51397+ *fragoff = 0;
51398+
51399+ if (*offset) {
51400+ struct ipv6hdr _ip6, *ip6;
51401+
51402+ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
51403+ if (!ip6 || (ip6->version != 6)) {
51404+ printk(KERN_ERR "IPv6 header not found\n");
51405+ return -EBADMSG;
51406+ }
51407+ start = *offset + sizeof(struct ipv6hdr);
51408+ nexthdr = ip6->nexthdr;
51409+ }
51410+ len = skb->len - start;
51411+
51412+ do {
51413+ struct ipv6_opt_hdr _hdr, *hp;
51414+ unsigned int hdrlen;
51415+ found = (nexthdr == target);
51416+
51417+ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
51418+ if (target < 0 || found)
51419+ break;
51420+ return -ENOENT;
51421+ }
51422+
51423+ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
51424+ if (!hp)
51425+ return -EBADMSG;
51426+
51427+ if (nexthdr == NEXTHDR_ROUTING) {
51428+ struct ipv6_rt_hdr _rh, *rh;
51429+
51430+ rh = skb_header_pointer(skb, start, sizeof(_rh),
51431+ &_rh);
51432+ if (!rh)
51433+ return -EBADMSG;
51434+
51435+ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) &&
51436+ rh->segments_left == 0)
51437+ found = false;
51438+ }
51439+
51440+ if (nexthdr == NEXTHDR_FRAGMENT) {
51441+ unsigned short _frag_off;
51442+ __be16 *fp;
51443+
51444+ if (flags) /* Indicate that this is a fragment */
51445+ *flags |= __KC_IP6_FH_F_FRAG;
51446+ fp = skb_header_pointer(skb,
51447+ start+offsetof(struct frag_hdr,
51448+ frag_off),
51449+ sizeof(_frag_off),
51450+ &_frag_off);
51451+ if (!fp)
51452+ return -EBADMSG;
51453+
51454+ _frag_off = ntohs(*fp) & ~0x7;
51455+ if (_frag_off) {
51456+ if (target < 0 &&
51457+ ((!ipv6_ext_hdr(hp->nexthdr)) ||
51458+ hp->nexthdr == NEXTHDR_NONE)) {
51459+ if (fragoff)
51460+ *fragoff = _frag_off;
51461+ return hp->nexthdr;
51462+ }
51463+ return -ENOENT;
51464+ }
51465+ hdrlen = 8;
51466+ } else if (nexthdr == NEXTHDR_AUTH) {
51467+ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0))
51468+ break;
51469+ hdrlen = (hp->hdrlen + 2) << 2;
51470+ } else
51471+ hdrlen = ipv6_optlen(hp);
51472+
51473+ if (!found) {
51474+ nexthdr = hp->nexthdr;
51475+ len -= hdrlen;
51476+ start += hdrlen;
51477+ }
51478+ } while (!found);
51479+
51480+ *offset = start;
51481+ return nexthdr;
51482+}
51483+
51484+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
51485+ int minvec, int maxvec)
51486+{
51487+ int nvec = maxvec;
51488+ int rc;
51489+
51490+ if (maxvec < minvec)
51491+ return -ERANGE;
51492+
51493+ do {
51494+ rc = pci_enable_msix(dev, entries, nvec);
51495+ if (rc < 0) {
51496+ return rc;
51497+ } else if (rc > 0) {
51498+ if (rc < minvec)
51499+ return -ENOSPC;
51500+ nvec = rc;
51501+ }
51502+ } while (rc);
51503+
51504+ return nvec;
51505+}
51506+#endif /* 3.14.0 */
51507+
51508+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
51509+char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
51510+{
51511+ size_t size;
51512+ char *buf;
51513+
51514+ if (!s)
51515+ return NULL;
51516+
51517+ size = strlen(s) + 1;
51518+ buf = devm_kzalloc(dev, size, gfp);
51519+ if (buf)
51520+ memcpy(buf, s, size);
51521+ return buf;
51522+}
51523+
51524+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
51525+{
51526+ /* Set of random keys generated using kernel random number generator */
51527+ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62,
51528+ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F,
51529+ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95,
51530+ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC,
51531+ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41,
51532+ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A,
51533+ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20};
51534+
51535+ BUG_ON(len > NETDEV_RSS_KEY_LEN);
51536+ memcpy(buffer, seed, len);
51537+}
51538+#endif /* 3.15.0 */
51539+
51540+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
51541+#ifdef HAVE_SET_RX_MODE
51542+#ifdef NETDEV_HW_ADDR_T_UNICAST
51543+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
51544+ struct net_device *dev,
51545+ int (*sync)(struct net_device *, const unsigned char *),
51546+ int (*unsync)(struct net_device *, const unsigned char *))
51547+{
51548+ struct netdev_hw_addr *ha, *tmp;
51549+ int err;
51550+
51551+ /* first go through and flush out any stale entries */
51552+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
51553+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
51554+ if (!ha->synced || ha->refcount != 1)
51555+#else
51556+ if (!ha->sync_cnt || ha->refcount != 1)
51557+#endif
51558+ continue;
51559+
51560+ if (unsync && unsync(dev, ha->addr))
51561+ continue;
51562+
51563+ list_del_rcu(&ha->list);
51564+ kfree_rcu(ha, rcu_head);
51565+ list->count--;
51566+ }
51567+
51568+ /* go through and sync new entries to the list */
51569+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
51570+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
51571+ if (ha->synced)
51572+#else
51573+ if (ha->sync_cnt)
51574+#endif
51575+ continue;
51576+
51577+ err = sync(dev, ha->addr);
51578+ if (err)
51579+ return err;
51580+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
51581+ ha->synced = true;
51582+#else
51583+ ha->sync_cnt++;
51584+#endif
51585+ ha->refcount++;
51586+ }
51587+
51588+ return 0;
51589+}
51590+
51591+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
51592+ struct net_device *dev,
51593+ int (*unsync)(struct net_device *, const unsigned char *))
51594+{
51595+ struct netdev_hw_addr *ha, *tmp;
51596+
51597+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
51598+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
51599+ if (!ha->synced)
51600+#else
51601+ if (!ha->sync_cnt)
51602+#endif
51603+ continue;
51604+
51605+ if (unsync && unsync(dev, ha->addr))
51606+ continue;
51607+
51608+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
51609+ ha->synced = false;
51610+#else
51611+ ha->sync_cnt--;
51612+#endif
51613+ if (--ha->refcount)
51614+ continue;
51615+
51616+ list_del_rcu(&ha->list);
51617+ kfree_rcu(ha, rcu_head);
51618+ list->count--;
51619+ }
51620+}
51621+
51622+#endif /* NETDEV_HW_ADDR_T_UNICAST */
51623+#ifndef NETDEV_HW_ADDR_T_MULTICAST
51624+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
51625+ struct net_device *dev,
51626+ int (*sync)(struct net_device *, const unsigned char *),
51627+ int (*unsync)(struct net_device *, const unsigned char *))
51628+{
51629+ struct dev_addr_list *da, **next = list;
51630+ int err;
51631+
51632+ /* first go through and flush out any stale entries */
51633+ while ((da = *next) != NULL) {
51634+ if (da->da_synced && da->da_users == 1) {
51635+ if (!unsync || !unsync(dev, da->da_addr)) {
51636+ *next = da->next;
51637+ kfree(da);
51638+ (*count)--;
51639+ continue;
51640+ }
51641+ }
51642+ next = &da->next;
51643+ }
51644+
51645+ /* go through and sync new entries to the list */
51646+ for (da = *list; da != NULL; da = da->next) {
51647+ if (da->da_synced)
51648+ continue;
51649+
51650+ err = sync(dev, da->da_addr);
51651+ if (err)
51652+ return err;
51653+
51654+ da->da_synced++;
51655+ da->da_users++;
51656+ }
51657+
51658+ return 0;
51659+}
51660+
51661+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
51662+ struct net_device *dev,
51663+ int (*unsync)(struct net_device *, const unsigned char *))
51664+{
51665+ struct dev_addr_list *da;
51666+
51667+ while ((da = *list) != NULL) {
51668+ if (da->da_synced) {
51669+ if (!unsync || !unsync(dev, da->da_addr)) {
51670+ da->da_synced--;
51671+ if (--da->da_users == 0) {
51672+ *list = da->next;
51673+ kfree(da);
51674+ (*count)--;
51675+ continue;
51676+ }
51677+ }
51678+ }
51679+ list = &da->next;
51680+ }
51681+}
51682+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
51683+#endif /* HAVE_SET_RX_MODE */
51684+void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len,
51685+ gfp_t gfp)
51686+{
51687+ void *p;
51688+
51689+ p = devm_kzalloc(dev, len, gfp);
51690+ if (p)
51691+ memcpy(p, src, len);
51692+
51693+ return p;
51694+}
51695+#endif /* 3.16.0 */
51696+
51697+/******************************************************************************/
51698+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \
51699+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
51700+#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */
51701+
51702+/******************************************************************************/
51703+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
51704+#ifndef NO_PTP_SUPPORT
51705+static void __kc_sock_efree(struct sk_buff *skb)
51706+{
51707+ sock_put(skb->sk);
51708+}
51709+
51710+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb)
51711+{
51712+ struct sock *sk = skb->sk;
51713+ struct sk_buff *clone;
51714+
51715+ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
51716+ return NULL;
51717+
51718+ clone = skb_clone(skb, GFP_ATOMIC);
51719+ if (!clone) {
51720+ sock_put(sk);
51721+ return NULL;
51722+ }
51723+
51724+ clone->sk = sk;
51725+ clone->destructor = __kc_sock_efree;
51726+
51727+ return clone;
51728+}
51729+
51730+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
51731+ struct skb_shared_hwtstamps *hwtstamps)
51732+{
51733+ struct sock_exterr_skb *serr;
51734+ struct sock *sk = skb->sk;
51735+ int err;
51736+
51737+ sock_hold(sk);
51738+
51739+ *skb_hwtstamps(skb) = *hwtstamps;
51740+
51741+ serr = SKB_EXT_ERR(skb);
51742+ memset(serr, 0, sizeof(*serr));
51743+ serr->ee.ee_errno = ENOMSG;
51744+ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
51745+
51746+ err = sock_queue_err_skb(sk, skb);
51747+ if (err)
51748+ kfree_skb(skb);
51749+
51750+ sock_put(sk);
51751+}
51752+#endif
51753+
51754+/* include headers needed for get_headlen function */
51755+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
51756+#include <scsi/fc/fc_fcoe.h>
51757+#endif
51758+#ifdef HAVE_SCTP
51759+#include <linux/sctp.h>
51760+#endif
51761+
51762+u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev,
51763+ unsigned char *data, unsigned int max_len)
51764+{
51765+ union {
51766+ unsigned char *network;
51767+ /* l2 headers */
51768+ struct ethhdr *eth;
51769+ struct vlan_hdr *vlan;
51770+ /* l3 headers */
51771+ struct iphdr *ipv4;
51772+ struct ipv6hdr *ipv6;
51773+ } hdr;
51774+ __be16 proto;
51775+ u8 nexthdr = 0; /* default to not TCP */
51776+ u8 hlen;
51777+
51778+ /* this should never happen, but better safe than sorry */
51779+ if (max_len < ETH_HLEN)
51780+ return max_len;
51781+
51782+ /* initialize network frame pointer */
51783+ hdr.network = data;
51784+
51785+ /* set first protocol and move network header forward */
51786+ proto = hdr.eth->h_proto;
51787+ hdr.network += ETH_HLEN;
51788+
51789+again:
51790+ switch (proto) {
51791+ /* handle any vlan tag if present */
51792+ case __constant_htons(ETH_P_8021AD):
51793+ case __constant_htons(ETH_P_8021Q):
51794+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
51795+ return max_len;
51796+
51797+ proto = hdr.vlan->h_vlan_encapsulated_proto;
51798+ hdr.network += VLAN_HLEN;
51799+ goto again;
51800+ /* handle L3 protocols */
51801+ case __constant_htons(ETH_P_IP):
51802+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
51803+ return max_len;
51804+
51805+ /* access ihl as a u8 to avoid unaligned access on ia64 */
51806+ hlen = (hdr.network[0] & 0x0F) << 2;
51807+
51808+ /* verify hlen meets minimum size requirements */
51809+ if (hlen < sizeof(struct iphdr))
51810+ return hdr.network - data;
51811+
51812+ /* record next protocol if header is present */
51813+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
51814+ nexthdr = hdr.ipv4->protocol;
51815+
51816+ hdr.network += hlen;
51817+ break;
51818+#ifdef NETIF_F_TSO6
51819+ case __constant_htons(ETH_P_IPV6):
51820+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
51821+ return max_len;
51822+
51823+ /* record next protocol */
51824+ nexthdr = hdr.ipv6->nexthdr;
51825+ hdr.network += sizeof(struct ipv6hdr);
51826+ break;
51827+#endif /* NETIF_F_TSO6 */
51828+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
51829+ case __constant_htons(ETH_P_FCOE):
51830+ hdr.network += FCOE_HEADER_LEN;
51831+ break;
51832+#endif
51833+ default:
51834+ return hdr.network - data;
51835+ }
51836+
51837+ /* finally sort out L4 */
51838+ switch (nexthdr) {
51839+ case IPPROTO_TCP:
51840+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
51841+ return max_len;
51842+
51843+ /* access doff as a u8 to avoid unaligned access on ia64 */
51844+ hdr.network += max_t(u8, sizeof(struct tcphdr),
51845+ (hdr.network[12] & 0xF0) >> 2);
51846+
51847+ break;
51848+ case IPPROTO_UDP:
51849+ case IPPROTO_UDPLITE:
51850+ hdr.network += sizeof(struct udphdr);
51851+ break;
51852+#ifdef HAVE_SCTP
51853+ case IPPROTO_SCTP:
51854+ hdr.network += sizeof(struct sctphdr);
51855+ break;
51856+#endif
51857+ }
51858+
51859+ /*
51860+ * If everything has gone correctly hdr.network should be the
51861+ * data section of the packet and will be the end of the header.
51862+ * If not then it probably represents the end of the last recognized
51863+ * header.
51864+ */
51865+ return min_t(unsigned int, hdr.network - data, max_len);
51866+}
51867+
51868+#endif /* < 3.18.0 */
51869+
51870+/******************************************************************************/
51871+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
51872+#ifdef HAVE_NET_GET_RANDOM_ONCE
51873+static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN];
51874+
51875+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
51876+{
51877+ BUG_ON(len > sizeof(__kc_netdev_rss_key));
51878+ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key));
51879+ memcpy(buffer, __kc_netdev_rss_key, len);
51880+}
51881+#endif
51882+
51883+int _kc_bitmap_print_to_pagebuf(bool list, char *buf,
51884+ const unsigned long *maskp,
51885+ int nmaskbits)
51886+{
51887+ ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2;
51888+ int n = 0;
51889+
51890+ if (len > 1) {
51891+ n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) :
51892+ bitmap_scnprintf(buf, len, maskp, nmaskbits);
51893+ buf[n++] = '\n';
51894+ buf[n] = '\0';
51895+ }
51896+ return n;
51897+}
51898+#endif
51899+
51900+/******************************************************************************/
51901+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
51902+#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
51903+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
51904+ (SLE_VERSION_CODE > SLE_VERSION(12,1,0)))
51905+unsigned int _kc_cpumask_local_spread(unsigned int i, int node)
51906+{
51907+ int cpu;
51908+
51909+ /* Wrap: we always want a cpu. */
51910+ i %= num_online_cpus();
51911+
51912+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
51913+ /* Kernels prior to 2.6.28 do not have for_each_cpu or
51914+ * cpumask_of_node, so just use for_each_online_cpu()
51915+ */
51916+ for_each_online_cpu(cpu)
51917+ if (i-- == 0)
51918+ return cpu;
51919+
51920+ return 0;
51921+#else
51922+ if (node == -1) {
51923+ for_each_cpu(cpu, cpu_online_mask)
51924+ if (i-- == 0)
51925+ return cpu;
51926+ } else {
51927+ /* NUMA first. */
51928+ for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
51929+ if (i-- == 0)
51930+ return cpu;
51931+
51932+ for_each_cpu(cpu, cpu_online_mask) {
51933+ /* Skip NUMA nodes, done above. */
51934+ if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
51935+ continue;
51936+
51937+ if (i-- == 0)
51938+ return cpu;
51939+ }
51940+ }
51941+#endif /* KERNEL_VERSION >= 2.6.28 */
51942+ BUG();
51943+}
51944+#endif
51945+#endif
51946+
51947+/******************************************************************************/
51948+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) )
51949+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)))
51950+#ifdef CONFIG_SPARC
51951+#include <asm/idprom.h>
51952+#include <asm/prom.h>
51953+#endif
51954+int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused,
51955+ u8 *mac_addr __maybe_unused)
51956+{
51957+#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \
51958+ !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \
51959+ !defined(CONFIG_SPARC))
51960+ return -ENODEV;
51961+#else
51962+ const unsigned char *addr;
51963+ struct device_node *dp;
51964+
51965+ if (dev_is_pci(dev))
51966+ dp = pci_device_to_OF_node(to_pci_dev(dev));
51967+ else
51968+#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF)
51969+ dp = dev->of_node;
51970+#else
51971+ dp = NULL;
51972+#endif
51973+
51974+ addr = NULL;
51975+ if (dp)
51976+ addr = of_get_mac_address(dp);
51977+#ifdef CONFIG_SPARC
51978+ /* Kernel hasn't implemented arch_get_platform_mac_address, but we
51979+ * should handle the SPARC case here since it was supported
51980+ * originally. This is replaced by arch_get_platform_mac_address()
51981+ * upstream.
51982+ */
51983+ if (!addr)
51984+ addr = idprom->id_ethaddr;
51985+#endif
51986+ if (!addr)
51987+ return -ENODEV;
51988+
51989+ ether_addr_copy(mac_addr, addr);
51990+ return 0;
51991+#endif
51992+}
51993+#endif /* !(RHEL_RELEASE >= 7.3) */
51994+#endif /* < 4.5.0 */
51995+
51996+/*****************************************************************************/
51997+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \
51998+ (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \
51999+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5))))
52000+const char *_kc_phy_speed_to_str(int speed)
52001+{
52002+ switch (speed) {
52003+ case SPEED_10:
52004+ return "10Mbps";
52005+ case SPEED_100:
52006+ return "100Mbps";
52007+ case SPEED_1000:
52008+ return "1Gbps";
52009+ case SPEED_2500:
52010+ return "2.5Gbps";
52011+ case SPEED_5000:
52012+ return "5Gbps";
52013+ case SPEED_10000:
52014+ return "10Gbps";
52015+ case SPEED_14000:
52016+ return "14Gbps";
52017+ case SPEED_20000:
52018+ return "20Gbps";
52019+ case SPEED_25000:
52020+ return "25Gbps";
52021+ case SPEED_40000:
52022+ return "40Gbps";
52023+ case SPEED_50000:
52024+ return "50Gbps";
52025+ case SPEED_56000:
52026+ return "56Gbps";
52027+#ifdef SPEED_100000
52028+ case SPEED_100000:
52029+ return "100Gbps";
52030+#endif
52031+ case SPEED_UNKNOWN:
52032+ return "Unknown";
52033+ default:
52034+ return "Unsupported (update phy-core.c)";
52035+ }
52036+}
52037+#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */
52038+
52039+/******************************************************************************/
52040+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) )
52041+void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
52042+ struct ethtool_link_ksettings *src)
52043+{
52044+ unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
52045+ unsigned int idx = 0;
52046+
52047+ for (; idx < size; idx++) {
52048+ dst->link_modes.supported[idx] &=
52049+ src->link_modes.supported[idx];
52050+ dst->link_modes.advertising[idx] &=
52051+ src->link_modes.advertising[idx];
52052+ }
52053+}
52054+#endif /* 4.15.0 */
52055+
52056+/*****************************************************************************/
52057+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
52058+/* PCIe link information */
52059+#define PCIE_SPEED2STR(speed) \
52060+ ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \
52061+ (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \
52062+ (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \
52063+ (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \
52064+ "Unknown speed")
52065+
52066+/* PCIe speed to Mb/s reduced by encoding overhead */
52067+#define PCIE_SPEED2MBS_ENC(speed) \
52068+ ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
52069+ (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
52070+ (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \
52071+ (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
52072+ 0)
52073+
52074+static u32
52075+_kc_pcie_bandwidth_available(struct pci_dev *dev,
52076+ struct pci_dev **limiting_dev,
52077+ enum pci_bus_speed *speed,
52078+ enum pcie_link_width *width)
52079+{
52080+ u16 lnksta;
52081+ enum pci_bus_speed next_speed;
52082+ enum pcie_link_width next_width;
52083+ u32 bw, next_bw;
52084+
52085+ if (speed)
52086+ *speed = PCI_SPEED_UNKNOWN;
52087+ if (width)
52088+ *width = PCIE_LNK_WIDTH_UNKNOWN;
52089+
52090+ bw = 0;
52091+
52092+ while (dev) {
52093+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
52094+
52095+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
52096+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
52097+ PCI_EXP_LNKSTA_NLW_SHIFT;
52098+
52099+ next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
52100+
52101+ /* Check if current device limits the total bandwidth */
52102+ if (!bw || next_bw <= bw) {
52103+ bw = next_bw;
52104+
52105+ if (limiting_dev)
52106+ *limiting_dev = dev;
52107+ if (speed)
52108+ *speed = next_speed;
52109+ if (width)
52110+ *width = next_width;
52111+ }
52112+
52113+ dev = pci_upstream_bridge(dev);
52114+ }
52115+
52116+ return bw;
52117+}
52118+
52119+static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev)
52120+{
52121+ u32 lnkcap2, lnkcap;
52122+
52123+ /*
52124+ * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
52125+ * Speeds Vector in Link Capabilities 2 when supported, falling
52126+ * back to Max Link Speed in Link Capabilities otherwise.
52127+ */
52128+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
52129+ if (lnkcap2) { /* PCIe r3.0-compliant */
52130+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
52131+ return PCIE_SPEED_16_0GT;
52132+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
52133+ return PCIE_SPEED_8_0GT;
52134+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
52135+ return PCIE_SPEED_5_0GT;
52136+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
52137+ return PCIE_SPEED_2_5GT;
52138+ return PCI_SPEED_UNKNOWN;
52139+ }
52140+
52141+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
52142+ if (lnkcap) {
52143+ if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
52144+ return PCIE_SPEED_16_0GT;
52145+ else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
52146+ return PCIE_SPEED_8_0GT;
52147+ else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
52148+ return PCIE_SPEED_5_0GT;
52149+ else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
52150+ return PCIE_SPEED_2_5GT;
52151+ }
52152+
52153+ return PCI_SPEED_UNKNOWN;
52154+}
52155+
52156+static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev)
52157+{
52158+ u32 lnkcap;
52159+
52160+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
52161+ if (lnkcap)
52162+ return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
52163+
52164+ return PCIE_LNK_WIDTH_UNKNOWN;
52165+}
52166+
52167+static u32
52168+_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
52169+ enum pcie_link_width *width)
52170+{
52171+ *speed = _kc_pcie_get_speed_cap(dev);
52172+ *width = _kc_pcie_get_width_cap(dev);
52173+
52174+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
52175+ return 0;
52176+
52177+ return *width * PCIE_SPEED2MBS_ENC(*speed);
52178+}
52179+
52180+void _kc_pcie_print_link_status(struct pci_dev *dev) {
52181+ enum pcie_link_width width, width_cap;
52182+ enum pci_bus_speed speed, speed_cap;
52183+ struct pci_dev *limiting_dev = NULL;
52184+ u32 bw_avail, bw_cap;
52185+
52186+ bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
52187+ bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed,
52188+ &width);
52189+
52190+ if (bw_avail >= bw_cap)
52191+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
52192+ bw_cap / 1000, bw_cap % 1000,
52193+ PCIE_SPEED2STR(speed_cap), width_cap);
52194+ else
52195+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
52196+ bw_avail / 1000, bw_avail % 1000,
52197+ PCIE_SPEED2STR(speed), width,
52198+ limiting_dev ? pci_name(limiting_dev) : "<unknown>",
52199+ bw_cap / 1000, bw_cap % 1000,
52200+ PCIE_SPEED2STR(speed_cap), width_cap);
52201+}
52202+#endif /* 4.17.0 */
52203+
52204+/*****************************************************************************/
52205+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
52206+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
52207+#define HAVE_NDO_FDB_ADD_EXTACK
52208+#else /* !RHEL || RHEL < 8.1 */
52209+#ifdef HAVE_TC_SETUP_CLSFLOWER
52210+#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
52211+ const struct flow_match *__m = &(__rule)->match; \
52212+ struct flow_dissector *__d = (__m)->dissector; \
52213+ \
52214+ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
52215+ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
52216+
52217+void flow_rule_match_basic(const struct flow_rule *rule,
52218+ struct flow_match_basic *out)
52219+{
52220+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
52221+}
52222+
52223+void flow_rule_match_control(const struct flow_rule *rule,
52224+ struct flow_match_control *out)
52225+{
52226+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
52227+}
52228+
52229+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
52230+ struct flow_match_eth_addrs *out)
52231+{
52232+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
52233+}
52234+
52235+#ifdef HAVE_TC_FLOWER_ENC
52236+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
52237+ struct flow_match_enc_keyid *out)
52238+{
52239+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
52240+}
52241+
52242+void flow_rule_match_enc_ports(const struct flow_rule *rule,
52243+ struct flow_match_ports *out)
52244+{
52245+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
52246+}
52247+
52248+void flow_rule_match_enc_control(const struct flow_rule *rule,
52249+ struct flow_match_control *out)
52250+{
52251+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
52252+}
52253+
52254+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
52255+ struct flow_match_ipv4_addrs *out)
52256+{
52257+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
52258+}
52259+
52260+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
52261+ struct flow_match_ipv6_addrs *out)
52262+{
52263+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
52264+}
52265+#endif
52266+
52267+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
52268+void flow_rule_match_vlan(const struct flow_rule *rule,
52269+ struct flow_match_vlan *out)
52270+{
52271+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
52272+}
52273+#endif
52274+
52275+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
52276+ struct flow_match_ipv4_addrs *out)
52277+{
52278+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
52279+}
52280+
52281+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
52282+ struct flow_match_ipv6_addrs *out)
52283+{
52284+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
52285+}
52286+
52287+void flow_rule_match_ports(const struct flow_rule *rule,
52288+ struct flow_match_ports *out)
52289+{
52290+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
52291+}
52292+#endif /* HAVE_TC_SETUP_CLSFLOWER */
52293+#endif /* !RHEL || RHEL < 8.1 */
52294+#endif /* 5.1.0 */
52295+
52296+/*****************************************************************************/
52297+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0))
52298+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
52299+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
52300+int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f,
52301+ struct list_head __always_unused *driver_list,
52302+ tc_setup_cb_t *cb,
52303+ void *cb_ident, void *cb_priv,
52304+ bool ingress_only)
52305+{
52306+ if (ingress_only &&
52307+ f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
52308+ return -EOPNOTSUPP;
52309+
52310+ /* Note: Upstream has driver_block_list, but older kernels do not */
52311+ switch (f->command) {
52312+ case TC_BLOCK_BIND:
52313+#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
52314+ return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv,
52315+ f->extack);
52316+#else
52317+ return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv);
52318+#endif
52319+ case TC_BLOCK_UNBIND:
52320+ tcf_block_cb_unregister(f->block, cb, cb_ident);
52321+ return 0;
52322+ default:
52323+ return -EOPNOTSUPP;
52324+ }
52325+}
52326+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
52327+#endif /* !RHEL >= 8.2 */
52328+#endif /* 5.3.0 */
52329diff --git a/drivers/net/ethernet/intel/i40e/kcompat.h b/drivers/net/ethernet/intel/i40e/kcompat.h
52330new file mode 100644
52331index 000000000..a5cee4859
52332--- /dev/null
52333+++ b/drivers/net/ethernet/intel/i40e/kcompat.h
52334@@ -0,0 +1,6838 @@
52335+/* SPDX-License-Identifier: GPL-2.0 */
52336+/* Copyright(c) 2013 - 2020 Intel Corporation. */
52337+
52338+#ifndef _KCOMPAT_H_
52339+#define _KCOMPAT_H_
52340+
52341+#ifndef LINUX_VERSION_CODE
52342+#include <linux/version.h>
52343+#else
52344+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
52345+#endif
52346+#include <linux/io.h>
52347+#include <linux/delay.h>
52348+#include <linux/errno.h>
52349+#include <linux/etherdevice.h>
52350+#include <linux/ethtool.h>
52351+#include <linux/if_vlan.h>
52352+#include <linux/in.h>
52353+#include <linux/if_link.h>
52354+#include <linux/init.h>
52355+#include <linux/ioport.h>
52356+#include <linux/ip.h>
52357+#include <linux/ipv6.h>
52358+#include <linux/list.h>
52359+#include <linux/mii.h>
52360+#include <linux/module.h>
52361+#include <linux/netdevice.h>
52362+#include <linux/pci.h>
52363+#include <linux/sched.h>
52364+#include <linux/skbuff.h>
52365+#include <linux/slab.h>
52366+#include <linux/string.h>
52367+#include <linux/tcp.h>
52368+#include <linux/types.h>
52369+#include <linux/udp.h>
52370+#include <linux/vmalloc.h>
52371+
52372+#ifndef GCC_VERSION
52373+#define GCC_VERSION (__GNUC__ * 10000 \
52374+ + __GNUC_MINOR__ * 100 \
52375+ + __GNUC_PATCHLEVEL__)
52376+#endif /* GCC_VERSION */
52377+
52378+/* Backport macros for controlling GCC diagnostics */
52379+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) )
52380+
52381+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
52382+#if GCC_VERSION >= 40600
52383+#define __diag_str1(s) #s
52384+#define __diag_str(s) __diag_str1(s)
52385+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
52386+#else
52387+#define __diag(s)
52388+#endif /* GCC_VERSION >= 4.6 */
52389+#define __diag_push() __diag(push)
52390+#define __diag_pop() __diag(pop)
52391+#endif /* LINUX_VERSION < 4.18.0 */
52392+
52393+#ifndef NSEC_PER_MSEC
52394+#define NSEC_PER_MSEC 1000000L
52395+#endif
52396+#include <net/ipv6.h>
52397+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
52398+#ifndef UTS_RELEASE
52399+/* utsrelease.h changed locations in 2.6.33 */
52400+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
52401+#include <linux/utsrelease.h>
52402+#else
52403+#include <generated/utsrelease.h>
52404+#endif
52405+#endif
52406+
52407+/* NAPI enable/disable flags here */
52408+#define NAPI
52409+
52410+#define adapter_struct i40e_pf
52411+#define adapter_q_vector i40e_q_vector
52412+
52413+/* and finally set defines so that the code sees the changes */
52414+#ifdef NAPI
52415+#ifndef CONFIG_I40E_NAPI
52416+#define CONFIG_I40E_NAPI
52417+#endif
52418+#else
52419+#undef CONFIG_I40E_NAPI
52420+#endif /* NAPI */
52421+
52422+/* Dynamic LTR and deeper C-State support disable/enable */
52423+
52424+/* packet split disable/enable */
52425+#ifdef DISABLE_PACKET_SPLIT
52426+#ifndef CONFIG_I40E_DISABLE_PACKET_SPLIT
52427+#define CONFIG_I40E_DISABLE_PACKET_SPLIT
52428+#endif
52429+#endif /* DISABLE_PACKET_SPLIT */
52430+
52431+/* MSI compatibility code for all kernels and drivers */
52432+#ifdef DISABLE_PCI_MSI
52433+#undef CONFIG_PCI_MSI
52434+#endif
52435+#ifndef CONFIG_PCI_MSI
52436+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
52437+struct msix_entry {
52438+ u16 vector; /* kernel uses to write allocated vector */
52439+ u16 entry; /* driver uses to specify entry, OS writes */
52440+};
52441+#endif
52442+#undef pci_enable_msi
52443+#define pci_enable_msi(a) -ENOTSUPP
52444+#undef pci_disable_msi
52445+#define pci_disable_msi(a) do {} while (0)
52446+#undef pci_enable_msix
52447+#define pci_enable_msix(a, b, c) -ENOTSUPP
52448+#undef pci_disable_msix
52449+#define pci_disable_msix(a) do {} while (0)
52450+#define msi_remove_pci_irq_vectors(a) do {} while (0)
52451+#endif /* CONFIG_PCI_MSI */
52452+#ifdef DISABLE_PM
52453+#undef CONFIG_PM
52454+#endif
52455+
52456+#ifdef DISABLE_NET_POLL_CONTROLLER
52457+#undef CONFIG_NET_POLL_CONTROLLER
52458+#endif
52459+
52460+#ifndef PMSG_SUSPEND
52461+#define PMSG_SUSPEND 3
52462+#endif
52463+
52464+/* generic boolean compatibility */
52465+#undef TRUE
52466+#undef FALSE
52467+#define TRUE true
52468+#define FALSE false
52469+#ifdef GCC_VERSION
52470+#if ( GCC_VERSION < 3000 )
52471+#define _Bool char
52472+#endif
52473+#else
52474+#define _Bool char
52475+#endif
52476+
52477+#ifndef BIT
52478+#define BIT(nr) (1UL << (nr))
52479+#endif
52480+
52481+#undef __always_unused
52482+#define __always_unused __attribute__((__unused__))
52483+
52484+#undef __maybe_unused
52485+#define __maybe_unused __attribute__((__unused__))
52486+
52487+/* kernels less than 2.4.14 don't have this */
52488+#ifndef ETH_P_8021Q
52489+#define ETH_P_8021Q 0x8100
52490+#endif
52491+
52492+#ifndef module_param
52493+#define module_param(v,t,p) MODULE_PARM(v, "i");
52494+#endif
52495+
52496+#ifndef DMA_64BIT_MASK
52497+#define DMA_64BIT_MASK 0xffffffffffffffffULL
52498+#endif
52499+
52500+#ifndef DMA_32BIT_MASK
52501+#define DMA_32BIT_MASK 0x00000000ffffffffULL
52502+#endif
52503+
52504+#ifndef PCI_CAP_ID_EXP
52505+#define PCI_CAP_ID_EXP 0x10
52506+#endif
52507+
52508+#ifndef uninitialized_var
52509+#define uninitialized_var(x) x = x
52510+#endif
52511+
52512+#ifndef PCIE_LINK_STATE_L0S
52513+#define PCIE_LINK_STATE_L0S 1
52514+#endif
52515+#ifndef PCIE_LINK_STATE_L1
52516+#define PCIE_LINK_STATE_L1 2
52517+#endif
52518+
52519+#ifndef SET_NETDEV_DEV
52520+#define SET_NETDEV_DEV(net, pdev)
52521+#endif
52522+
52523+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
52524+#define free_netdev(x) kfree(x)
52525+#endif
52526+
52527+#ifdef HAVE_POLL_CONTROLLER
52528+#define CONFIG_NET_POLL_CONTROLLER
52529+#endif
52530+
52531+#ifndef SKB_DATAREF_SHIFT
52532+/* if we do not have the infrastructure to detect if skb_header is cloned
52533+ just return false in all cases */
52534+#define skb_header_cloned(x) 0
52535+#endif
52536+
52537+#ifndef NETIF_F_GSO
52538+#define gso_size tso_size
52539+#define gso_segs tso_segs
52540+#endif
52541+
52542+#ifndef NETIF_F_GRO
52543+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
52544+ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
52545+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
52546+#endif
52547+
52548+#ifndef NETIF_F_SCTP_CSUM
52549+#define NETIF_F_SCTP_CSUM 0
52550+#endif
52551+
52552+#ifndef NETIF_F_LRO
52553+#define NETIF_F_LRO BIT(15)
52554+#endif
52555+
52556+#ifndef NETIF_F_NTUPLE
52557+#define NETIF_F_NTUPLE BIT(27)
52558+#endif
52559+
52560+#ifndef NETIF_F_ALL_FCOE
52561+#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
52562+ NETIF_F_FSO)
52563+#endif
52564+
52565+#ifndef IPPROTO_SCTP
52566+#define IPPROTO_SCTP 132
52567+#endif
52568+
52569+#ifndef IPPROTO_UDPLITE
52570+#define IPPROTO_UDPLITE 136
52571+#endif
52572+
52573+#ifndef CHECKSUM_PARTIAL
52574+#define CHECKSUM_PARTIAL CHECKSUM_HW
52575+#define CHECKSUM_COMPLETE CHECKSUM_HW
52576+#endif
52577+
52578+#ifndef __read_mostly
52579+#define __read_mostly
52580+#endif
52581+
52582+#ifndef MII_RESV1
52583+#define MII_RESV1 0x17 /* Reserved... */
52584+#endif
52585+
52586+#ifndef unlikely
52587+#define unlikely(_x) _x
52588+#define likely(_x) _x
52589+#endif
52590+
52591+#ifndef WARN_ON
52592+#define WARN_ON(x)
52593+#endif
52594+
52595+#ifndef PCI_DEVICE
52596+#define PCI_DEVICE(vend,dev) \
52597+ .vendor = (vend), .device = (dev), \
52598+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
52599+#endif
52600+
52601+#ifndef node_online
52602+#define node_online(node) ((node) == 0)
52603+#endif
52604+
52605+#ifndef cpu_online
52606+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
52607+#endif
52608+
52609+#ifndef _LINUX_RANDOM_H
52610+#include <linux/random.h>
52611+#endif
52612+
52613+#ifndef BITS_PER_TYPE
52614+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
52615+#endif
52616+
52617+#ifndef BITS_TO_LONGS
52618+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
52619+#endif
52620+
52621+#ifndef DECLARE_BITMAP
52622+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
52623+#endif
52624+
52625+#ifndef VLAN_HLEN
52626+#define VLAN_HLEN 4
52627+#endif
52628+
52629+#ifndef VLAN_ETH_HLEN
52630+#define VLAN_ETH_HLEN 18
52631+#endif
52632+
52633+#ifndef VLAN_ETH_FRAME_LEN
52634+#define VLAN_ETH_FRAME_LEN 1518
52635+#endif
52636+
52637+#ifndef DCA_GET_TAG_TWO_ARGS
52638+#define dca3_get_tag(a,b) dca_get_tag(b)
52639+#endif
52640+
52641+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
52642+#if defined(__i386__) || defined(__x86_64__)
52643+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
52644+#endif
52645+#endif
52646+
52647+/* taken from 2.6.24 definition in linux/kernel.h */
52648+#ifndef IS_ALIGNED
52649+#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
52650+#endif
52651+
52652+#ifdef IS_ENABLED
52653+#undef IS_ENABLED
52654+#undef __ARG_PLACEHOLDER_1
52655+#undef config_enabled
52656+#undef _config_enabled
52657+#undef __config_enabled
52658+#undef ___config_enabled
52659+#endif
52660+
52661+#define __ARG_PLACEHOLDER_1 0,
52662+#define config_enabled(cfg) _config_enabled(cfg)
52663+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
52664+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
52665+#define ___config_enabled(__ignored, val, ...) val
52666+
52667+#define IS_ENABLED(option) \
52668+ (config_enabled(option) || config_enabled(option##_MODULE))
52669+
52670+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
52671+struct _kc_vlan_ethhdr {
52672+ unsigned char h_dest[ETH_ALEN];
52673+ unsigned char h_source[ETH_ALEN];
52674+ __be16 h_vlan_proto;
52675+ __be16 h_vlan_TCI;
52676+ __be16 h_vlan_encapsulated_proto;
52677+};
52678+#define vlan_ethhdr _kc_vlan_ethhdr
52679+struct _kc_vlan_hdr {
52680+ __be16 h_vlan_TCI;
52681+ __be16 h_vlan_encapsulated_proto;
52682+};
52683+#define vlan_hdr _kc_vlan_hdr
52684+#define vlan_tx_tag_present(_skb) 0
52685+#define vlan_tx_tag_get(_skb) 0
52686+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
52687+
52688+#ifndef VLAN_PRIO_SHIFT
52689+#define VLAN_PRIO_SHIFT 13
52690+#endif
52691+
52692+#ifndef PCI_EXP_LNKSTA_CLS_2_5GB
52693+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001
52694+#endif
52695+
52696+#ifndef PCI_EXP_LNKSTA_CLS_5_0GB
52697+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002
52698+#endif
52699+
52700+#ifndef PCI_EXP_LNKSTA_CLS_8_0GB
52701+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003
52702+#endif
52703+
52704+#ifndef PCI_EXP_LNKSTA_NLW_X1
52705+#define PCI_EXP_LNKSTA_NLW_X1 0x0010
52706+#endif
52707+
52708+#ifndef PCI_EXP_LNKSTA_NLW_X2
52709+#define PCI_EXP_LNKSTA_NLW_X2 0x0020
52710+#endif
52711+
52712+#ifndef PCI_EXP_LNKSTA_NLW_X4
52713+#define PCI_EXP_LNKSTA_NLW_X4 0x0040
52714+#endif
52715+
52716+#ifndef PCI_EXP_LNKSTA_NLW_X8
52717+#define PCI_EXP_LNKSTA_NLW_X8 0x0080
52718+#endif
52719+
52720+#ifndef __GFP_COLD
52721+#define __GFP_COLD 0
52722+#endif
52723+
52724+#ifndef __GFP_COMP
52725+#define __GFP_COMP 0
52726+#endif
52727+
52728+#ifndef IP_OFFSET
52729+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
52730+#endif
52731+
52732+/*****************************************************************************/
52733+/* Installations with ethtool version without eeprom, adapter id, or statistics
52734+ * support */
52735+
52736+#ifndef ETH_GSTRING_LEN
52737+#define ETH_GSTRING_LEN 32
52738+#endif
52739+
52740+#ifndef ETHTOOL_GSTATS
52741+#define ETHTOOL_GSTATS 0x1d
52742+#undef ethtool_drvinfo
52743+#define ethtool_drvinfo k_ethtool_drvinfo
52744+struct k_ethtool_drvinfo {
52745+ u32 cmd;
52746+ char driver[32];
52747+ char version[32];
52748+ char fw_version[32];
52749+ char bus_info[32];
52750+ char reserved1[32];
52751+ char reserved2[16];
52752+ u32 n_stats;
52753+ u32 testinfo_len;
52754+ u32 eedump_len;
52755+ u32 regdump_len;
52756+};
52757+
52758+struct ethtool_stats {
52759+ u32 cmd;
52760+ u32 n_stats;
52761+ u64 data[0];
52762+};
52763+#endif /* ETHTOOL_GSTATS */
52764+
52765+#ifndef ETHTOOL_PHYS_ID
52766+#define ETHTOOL_PHYS_ID 0x1c
52767+#endif /* ETHTOOL_PHYS_ID */
52768+
52769+#ifndef ETHTOOL_GSTRINGS
52770+#define ETHTOOL_GSTRINGS 0x1b
52771+enum ethtool_stringset {
52772+ ETH_SS_TEST = 0,
52773+ ETH_SS_STATS,
52774+};
52775+struct ethtool_gstrings {
52776+ u32 cmd; /* ETHTOOL_GSTRINGS */
52777+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
52778+ u32 len; /* number of strings in the string set */
52779+ u8 data[0];
52780+};
52781+#endif /* ETHTOOL_GSTRINGS */
52782+
52783+#ifndef ETHTOOL_TEST
52784+#define ETHTOOL_TEST 0x1a
52785+enum ethtool_test_flags {
52786+ ETH_TEST_FL_OFFLINE = BIT(0),
52787+ ETH_TEST_FL_FAILED = BIT(1),
52788+};
52789+struct ethtool_test {
52790+ u32 cmd;
52791+ u32 flags;
52792+ u32 reserved;
52793+ u32 len;
52794+ u64 data[0];
52795+};
52796+#endif /* ETHTOOL_TEST */
52797+
52798+#ifndef ETHTOOL_GEEPROM
52799+#define ETHTOOL_GEEPROM 0xb
52800+#undef ETHTOOL_GREGS
52801+struct ethtool_eeprom {
52802+ u32 cmd;
52803+ u32 magic;
52804+ u32 offset;
52805+ u32 len;
52806+ u8 data[0];
52807+};
52808+
52809+struct ethtool_value {
52810+ u32 cmd;
52811+ u32 data;
52812+};
52813+#endif /* ETHTOOL_GEEPROM */
52814+
52815+#ifndef ETHTOOL_GLINK
52816+#define ETHTOOL_GLINK 0xa
52817+#endif /* ETHTOOL_GLINK */
52818+
52819+#ifndef ETHTOOL_GWOL
52820+#define ETHTOOL_GWOL 0x5
52821+#define ETHTOOL_SWOL 0x6
52822+#define SOPASS_MAX 6
52823+struct ethtool_wolinfo {
52824+ u32 cmd;
52825+ u32 supported;
52826+ u32 wolopts;
52827+ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
52828+};
52829+#endif /* ETHTOOL_GWOL */
52830+
52831+#ifndef ETHTOOL_GREGS
52832+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
52833+#define ethtool_regs _kc_ethtool_regs
52834+/* for passing big chunks of data */
52835+struct _kc_ethtool_regs {
52836+ u32 cmd;
52837+ u32 version; /* driver-specific, indicates different chips/revs */
52838+ u32 len; /* bytes */
52839+ u8 data[0];
52840+};
52841+#endif /* ETHTOOL_GREGS */
52842+
52843+#ifndef ETHTOOL_GMSGLVL
52844+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
52845+#endif
52846+#ifndef ETHTOOL_SMSGLVL
52847+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
52848+#endif
52849+#ifndef ETHTOOL_NWAY_RST
52850+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
52851+#endif
52852+#ifndef ETHTOOL_GLINK
52853+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
52854+#endif
52855+#ifndef ETHTOOL_GEEPROM
52856+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
52857+#endif
52858+#ifndef ETHTOOL_SEEPROM
52859+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
52860+#endif
52861+#ifndef ETHTOOL_GCOALESCE
52862+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
52863+/* for configuring coalescing parameters of chip */
52864+#define ethtool_coalesce _kc_ethtool_coalesce
52865+struct _kc_ethtool_coalesce {
52866+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
52867+
52868+ /* How many usecs to delay an RX interrupt after
52869+ * a packet arrives. If 0, only rx_max_coalesced_frames
52870+ * is used.
52871+ */
52872+ u32 rx_coalesce_usecs;
52873+
52874+ /* How many packets to delay an RX interrupt after
52875+ * a packet arrives. If 0, only rx_coalesce_usecs is
52876+ * used. It is illegal to set both usecs and max frames
52877+ * to zero as this would cause RX interrupts to never be
52878+ * generated.
52879+ */
52880+ u32 rx_max_coalesced_frames;
52881+
52882+ /* Same as above two parameters, except that these values
52883+ * apply while an IRQ is being serviced by the host. Not
52884+ * all cards support this feature and the values are ignored
52885+ * in that case.
52886+ */
52887+ u32 rx_coalesce_usecs_irq;
52888+ u32 rx_max_coalesced_frames_irq;
52889+
52890+ /* How many usecs to delay a TX interrupt after
52891+ * a packet is sent. If 0, only tx_max_coalesced_frames
52892+ * is used.
52893+ */
52894+ u32 tx_coalesce_usecs;
52895+
52896+ /* How many packets to delay a TX interrupt after
52897+ * a packet is sent. If 0, only tx_coalesce_usecs is
52898+ * used. It is illegal to set both usecs and max frames
52899+ * to zero as this would cause TX interrupts to never be
52900+ * generated.
52901+ */
52902+ u32 tx_max_coalesced_frames;
52903+
52904+ /* Same as above two parameters, except that these values
52905+ * apply while an IRQ is being serviced by the host. Not
52906+ * all cards support this feature and the values are ignored
52907+ * in that case.
52908+ */
52909+ u32 tx_coalesce_usecs_irq;
52910+ u32 tx_max_coalesced_frames_irq;
52911+
52912+ /* How many usecs to delay in-memory statistics
52913+ * block updates. Some drivers do not have an in-memory
52914+ * statistic block, and in such cases this value is ignored.
52915+ * This value must not be zero.
52916+ */
52917+ u32 stats_block_coalesce_usecs;
52918+
52919+ /* Adaptive RX/TX coalescing is an algorithm implemented by
52920+ * some drivers to improve latency under low packet rates and
52921+ * improve throughput under high packet rates. Some drivers
52922+ * only implement one of RX or TX adaptive coalescing. Anything
52923+ * not implemented by the driver causes these values to be
52924+ * silently ignored.
52925+ */
52926+ u32 use_adaptive_rx_coalesce;
52927+ u32 use_adaptive_tx_coalesce;
52928+
52929+ /* When the packet rate (measured in packets per second)
52930+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
52931+ * used.
52932+ */
52933+ u32 pkt_rate_low;
52934+ u32 rx_coalesce_usecs_low;
52935+ u32 rx_max_coalesced_frames_low;
52936+ u32 tx_coalesce_usecs_low;
52937+ u32 tx_max_coalesced_frames_low;
52938+
52939+ /* When the packet rate is below pkt_rate_high but above
52940+ * pkt_rate_low (both measured in packets per second) the
52941+ * normal {rx,tx}_* coalescing parameters are used.
52942+ */
52943+
52944+ /* When the packet rate is (measured in packets per second)
52945+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
52946+ * used.
52947+ */
52948+ u32 pkt_rate_high;
52949+ u32 rx_coalesce_usecs_high;
52950+ u32 rx_max_coalesced_frames_high;
52951+ u32 tx_coalesce_usecs_high;
52952+ u32 tx_max_coalesced_frames_high;
52953+
52954+ /* How often to do adaptive coalescing packet rate sampling,
52955+ * measured in seconds. Must not be zero.
52956+ */
52957+ u32 rate_sample_interval;
52958+};
52959+#endif /* ETHTOOL_GCOALESCE */
52960+
52961+#ifndef ETHTOOL_SCOALESCE
52962+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
52963+#endif
52964+#ifndef ETHTOOL_GRINGPARAM
52965+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
52966+/* for configuring RX/TX ring parameters */
52967+#define ethtool_ringparam _kc_ethtool_ringparam
52968+struct _kc_ethtool_ringparam {
52969+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
52970+
52971+ /* Read only attributes. These indicate the maximum number
52972+ * of pending RX/TX ring entries the driver will allow the
52973+ * user to set.
52974+ */
52975+ u32 rx_max_pending;
52976+ u32 rx_mini_max_pending;
52977+ u32 rx_jumbo_max_pending;
52978+ u32 tx_max_pending;
52979+
52980+ /* Values changeable by the user. The valid values are
52981+ * in the range 1 to the "*_max_pending" counterpart above.
52982+ */
52983+ u32 rx_pending;
52984+ u32 rx_mini_pending;
52985+ u32 rx_jumbo_pending;
52986+ u32 tx_pending;
52987+};
52988+#endif /* ETHTOOL_GRINGPARAM */
52989+
52990+#ifndef ETHTOOL_SRINGPARAM
52991+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
52992+#endif
52993+#ifndef ETHTOOL_GPAUSEPARAM
52994+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
52995+/* for configuring link flow control parameters */
52996+#define ethtool_pauseparam _kc_ethtool_pauseparam
52997+struct _kc_ethtool_pauseparam {
52998+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
52999+
53000+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
53001+ * being true) the user may set 'autoneg' here non-zero to have the
53002+ * pause parameters be auto-negotiated too. In such a case, the
53003+ * {rx,tx}_pause values below determine what capabilities are
53004+ * advertised.
53005+ *
53006+ * If 'autoneg' is zero or the link is not being auto-negotiated,
53007+ * then {rx,tx}_pause force the driver to use/not-use pause
53008+ * flow control.
53009+ */
53010+ u32 autoneg;
53011+ u32 rx_pause;
53012+ u32 tx_pause;
53013+};
53014+#endif /* ETHTOOL_GPAUSEPARAM */
53015+
53016+#ifndef ETHTOOL_SPAUSEPARAM
53017+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
53018+#endif
53019+#ifndef ETHTOOL_GRXCSUM
53020+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
53021+#endif
53022+#ifndef ETHTOOL_SRXCSUM
53023+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
53024+#endif
53025+#ifndef ETHTOOL_GTXCSUM
53026+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
53027+#endif
53028+#ifndef ETHTOOL_STXCSUM
53029+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
53030+#endif
53031+#ifndef ETHTOOL_GSG
53032+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
53033+ * (ethtool_value) */
53034+#endif
53035+#ifndef ETHTOOL_SSG
53036+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
53037+ * (ethtool_value). */
53038+#endif
53039+#ifndef ETHTOOL_TEST
53040+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
53041+#endif
53042+#ifndef ETHTOOL_GSTRINGS
53043+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
53044+#endif
53045+#ifndef ETHTOOL_PHYS_ID
53046+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
53047+#endif
53048+#ifndef ETHTOOL_GSTATS
53049+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
53050+#endif
53051+#ifndef ETHTOOL_GTSO
53052+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
53053+#endif
53054+#ifndef ETHTOOL_STSO
53055+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
53056+#endif
53057+
53058+#ifndef ETHTOOL_BUSINFO_LEN
53059+#define ETHTOOL_BUSINFO_LEN 32
53060+#endif
53061+
53062+#ifndef WAKE_FILTER
53063+#define WAKE_FILTER BIT(7)
53064+#endif
53065+
53066+#ifndef SPEED_2500
53067+#define SPEED_2500 2500
53068+#endif
53069+#ifndef SPEED_5000
53070+#define SPEED_5000 5000
53071+#endif
53072+#ifndef SPEED_14000
53073+#define SPEED_14000 14000
53074+#endif
53075+#ifndef SPEED_25000
53076+#define SPEED_25000 25000
53077+#endif
53078+#ifndef SPEED_50000
53079+#define SPEED_50000 50000
53080+#endif
53081+#ifndef SPEED_56000
53082+#define SPEED_56000 56000
53083+#endif
53084+#ifndef SPEED_100000
53085+#define SPEED_100000 100000
53086+#endif
53087+
53088+#ifndef RHEL_RELEASE_VERSION
53089+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
53090+#endif
53091+#ifndef AX_RELEASE_VERSION
53092+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
53093+#endif
53094+
53095+/* #ifndef AX_RELEASE_CODE */
53096+#define AX_RELEASE_CODE 0
53097+/* #endif */
53098+
53099+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
53100+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
53101+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
53102+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
53103+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
53104+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
53105+#endif
53106+
53107+/* #ifndef RHEL_RELEASE_CODE */
53108+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
53109+#define RHEL_RELEASE_CODE 0
53110+/* #endif */
53111+
53112+/* RHEL 7 didn't backport the parameter change in
53113+ * create_singlethread_workqueue.
53114+ * If/when RH corrects this we will want to tighten up the version check.
53115+ */
53116+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))
53117+#undef create_singlethread_workqueue
53118+#define create_singlethread_workqueue(name) \
53119+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
53120+#endif
53121+
53122+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find
53123+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new
53124+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of
53125+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in
53126+ * the linux-source package, but in the linux-headers package. It begins to
53127+ * appear in later releases of 14.04 and 14.10.
53128+ *
53129+ * Ex:
53130+ * <Ubuntu 14.04.1>
53131+ * $uname -r
53132+ * 3.13.0-45-generic
53133+ * ABI is 45
53134+ *
53135+ * <Ubuntu 14.10>
53136+ * $uname -r
53137+ * 3.16.0-23-generic
53138+ * ABI is 23
53139+ */
53140+#ifndef UTS_UBUNTU_RELEASE_ABI
53141+#define UTS_UBUNTU_RELEASE_ABI 0
53142+#define UBUNTU_VERSION_CODE 0
53143+#else
53144+/* Ubuntu does not provide actual release version macro, so we use the kernel
53145+ * version plus the ABI to generate a unique version code specific to Ubuntu.
53146+ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to
53147+ * ignore differences in sublevel which are not important since we have the
53148+ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for
53149+ * ordering checks.
53150+ */
53151+#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \
53152+ UTS_UBUNTU_RELEASE_ABI)
53153+
53154+#if UTS_UBUNTU_RELEASE_ABI > 255
53155+#error UTS_UBUNTU_RELEASE_ABI is too large...
53156+#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */
53157+
53158+#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) )
53159+/* Our version code scheme does not make sense for non 3.x or newer kernels,
53160+ * and we have no support in kcompat for this scenario. Thus, treat this as a
53161+ * non-Ubuntu kernel. Possibly might be better to error here.
53162+ */
53163+#define UTS_UBUNTU_RELEASE_ABI 0
53164+#define UBUNTU_VERSION_CODE 0
53165+#endif
53166+
53167+#endif
53168+
53169+/* Note that the 3rd digit is always zero, and will be ignored. This is
53170+ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux
53171+ * version codes are 3 digit, this 3rd digit is superseded by the ABI value.
53172+ */
53173+#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d))
53174+
53175+/* SuSE version macros are the same as Linux kernel version macro */
53176+#ifndef SLE_VERSION
53177+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
53178+#endif
53179+#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c)
53180+#ifdef CONFIG_SUSE_KERNEL
53181+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
53182+/* SLES11 GA is 2.6.27 based */
53183+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
53184+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
53185+/* SLES11 SP1 is 2.6.32 based */
53186+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
53187+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) )
53188+/* SLES11 SP2 GA is 3.0.13-0.27 */
53189+#define SLE_VERSION_CODE SLE_VERSION(11,2,0)
53190+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76)))
53191+/* SLES11 SP3 GA is 3.0.76-0.11 */
53192+#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
53193+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101))
53194+ #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0))
53195+ /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */
53196+ #define SLE_VERSION_CODE SLE_VERSION(11,2,0)
53197+ #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0))
53198+ /* most SLES11sp3 update kernels */
53199+ #define SLE_VERSION_CODE SLE_VERSION(11,3,0)
53200+ #else
53201+ /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */
53202+ #define SLE_VERSION_CODE SLE_VERSION(11,4,0)
53203+ #endif
53204+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28))
53205+/* SLES12 GA is 3.12.28-4
53206+ * kernel updates 3.12.xx-<33 through 52>[.yy] */
53207+#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
53208+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49))
53209+/* SLES12 SP1 GA is 3.12.49-11
53210+ * updates 3.12.xx-60.yy where xx={51..} */
53211+#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
53212+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \
53213+ (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \
53214+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \
53215+ LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \
53216+ SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \
53217+ SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0)))
53218+/* SLES12 SP2 GA is 4.4.21-69.
53219+ * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59}
53220+ * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120}
53221+ * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */
53222+#define SLE_VERSION_CODE SLE_VERSION(12,2,0)
53223+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \
53224+ LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \
53225+ LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \
53226+ (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \
53227+ (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \
53228+ SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \
53229+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \
53230+ LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \
53231+ SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \
53232+ SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) )
53233+/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3.
53234+ * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92}
53235+ * SLES12 SP3 updates conflicting with SP2 are:
53236+ * - 4.4.103-6.33.1, 4.4.103-6.38.1
53237+ * - 4.4.{114,120}-94.nn.y */
53238+#define SLE_VERSION_CODE SLE_VERSION(12,3,0)
53239+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \
53240+ (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \
53241+ (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \
53242+ SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0))))
53243+/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */
53244+#define SLE_VERSION_CODE SLE_VERSION(12,4,0)
53245+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \
53246+ (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \
53247+ SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \
53248+ SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \
53249+ (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \
53250+ SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \
53251+ (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \
53252+ SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0))))
53253+/* SLES15 Beta1 is 4.12.14-2
53254+ * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136},
53255+ * and 4.12.14-150.14.
53256+ */
53257+#define SLE_VERSION_CODE SLE_VERSION(15,0,0)
53258+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14) && \
53259+ SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0))
53260+/* SLES15 SP1 Beta1 is 4.12.14-25.23 */
53261+#define SLE_VERSION_CODE SLE_VERSION(15,1,0)
53262+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13))
53263+/* SLES15 SP2 Beta1 is 5.3.13 */
53264+#define SLE_VERSION_CODE SLE_VERSION(15,2,0)
53265+
53266+/* new SLES kernels must be added here with >= based on kernel
53267+ * the idea is to order from newest to oldest and just catch all
53268+ * of them using the >=
53269+ */
53270+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
53271+#endif /* CONFIG_SUSE_KERNEL */
53272+#ifndef SLE_VERSION_CODE
53273+#define SLE_VERSION_CODE 0
53274+#endif /* SLE_VERSION_CODE */
53275+#ifndef SLE_LOCALVERSION_CODE
53276+#define SLE_LOCALVERSION_CODE 0
53277+#endif /* SLE_LOCALVERSION_CODE */
53278+
53279+#ifdef __KLOCWORK__
53280+/* The following are not compiled into the binary driver; they are here
53281+ * only to tune Klocwork scans to workaround false-positive issues.
53282+ */
53283+#ifdef ARRAY_SIZE
53284+#undef ARRAY_SIZE
53285+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
53286+#endif
53287+
53288+#define memcpy(dest, src, len) memcpy_s(dest, len, src, len)
53289+#define memset(dest, ch, len) memset_s(dest, len, ch, len)
53290+
53291+static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr)
53292+{
53293+ unsigned long mask = BIT_MASK(nr);
53294+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
53295+ unsigned long old;
53296+ unsigned long flags = 0;
53297+
53298+ _atomic_spin_lock_irqsave(p, flags);
53299+ old = *p;
53300+ *p = old & ~mask;
53301+ _atomic_spin_unlock_irqrestore(p, flags);
53302+
53303+ return (old & mask) != 0;
53304+}
53305+#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr)
53306+
53307+static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr)
53308+{
53309+ unsigned long mask = BIT_MASK(nr);
53310+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
53311+ unsigned long old;
53312+ unsigned long flags = 0;
53313+
53314+ _atomic_spin_lock_irqsave(p, flags);
53315+ old = *p;
53316+ *p = old | mask;
53317+ _atomic_spin_unlock_irqrestore(p, flags);
53318+
53319+ return (old & mask) != 0;
53320+}
53321+#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr)
53322+
53323+#ifdef CONFIG_DYNAMIC_DEBUG
53324+#undef dev_dbg
53325+#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg)
53326+#undef pr_debug
53327+#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg)
53328+#endif /* CONFIG_DYNAMIC_DEBUG */
53329+
53330+#undef hlist_for_each_entry_safe
53331+#define hlist_for_each_entry_safe(pos, n, head, member) \
53332+ for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \
53333+ member); \
53334+ pos; \
53335+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
53336+
53337+#ifdef uninitialized_var
53338+#undef uninitialized_var
53339+#define uninitialized_var(x) x = *(&(x))
53340+#endif
53341+#endif /* __KLOCWORK__ */
53342+
53343+#include "kcompat_vfd.h"
53344+struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs);
53345+void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj);
53346+
53347+/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const
53348+ * char * strings. Unfortunately, the implementation of do_trace_printk does
53349+ * this, in order to add a storage attribute to the memory. This was fixed in
53350+ * GCC 5.1, but we still use older distributions built with GCC 4.x.
53351+ *
53352+ * The string pointer is only passed as a const char * to the __trace_bprintk
53353+ * function. Since that function has the __printf attribute, it will trigger
53354+ * the warnings. We can't remove the attribute, so instead we'll use the
53355+ * __diag macro to disable -Wformat-nonliteral around the call to
53356+ * __trace_bprintk.
53357+ */
53358+#if GCC_VERSION < 50100
53359+#define __trace_bprintk(ip, fmt, args...) ({ \
53360+ int err; \
53361+ __diag_push(); \
53362+ __diag(ignored "-Wformat-nonliteral"); \
53363+ err = __trace_bprintk(ip, fmt, ##args); \
53364+ __diag_pop(); \
53365+ err; \
53366+})
53367+#endif /* GCC_VERSION < 5.1.0 */
53368+
53369+/* Newer kernels removed <linux/pci-aspm.h> */
53370+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) )
53371+#define HAVE_PCI_ASPM_H
53372+#endif
53373+
53374+/*****************************************************************************/
53375+/* 2.4.3 => 2.4.0 */
53376+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
53377+
53378+/**************************************/
53379+/* PCI DRIVER API */
53380+
53381+#ifndef pci_set_dma_mask
53382+#define pci_set_dma_mask _kc_pci_set_dma_mask
53383+int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
53384+#endif
53385+
53386+#ifndef pci_request_regions
53387+#define pci_request_regions _kc_pci_request_regions
53388+int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
53389+#endif
53390+
53391+#ifndef pci_release_regions
53392+#define pci_release_regions _kc_pci_release_regions
53393+void _kc_pci_release_regions(struct pci_dev *pdev);
53394+#endif
53395+
53396+/**************************************/
53397+/* NETWORK DRIVER API */
53398+
53399+#ifndef alloc_etherdev
53400+#define alloc_etherdev _kc_alloc_etherdev
53401+struct net_device * _kc_alloc_etherdev(int sizeof_priv);
53402+#endif
53403+
53404+#ifndef is_valid_ether_addr
53405+#define is_valid_ether_addr _kc_is_valid_ether_addr
53406+int _kc_is_valid_ether_addr(u8 *addr);
53407+#endif
53408+
53409+/**************************************/
53410+/* MISCELLANEOUS */
53411+
53412+#ifndef INIT_TQUEUE
53413+#define INIT_TQUEUE(_tq, _routine, _data) \
53414+ do { \
53415+ INIT_LIST_HEAD(&(_tq)->list); \
53416+ (_tq)->sync = 0; \
53417+ (_tq)->routine = _routine; \
53418+ (_tq)->data = _data; \
53419+ } while (0)
53420+#endif
53421+
53422+#endif /* 2.4.3 => 2.4.0 */
53423+
53424+/*****************************************************************************/
53425+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
53426+/* Generic MII registers. */
53427+#define MII_BMCR 0x00 /* Basic mode control register */
53428+#define MII_BMSR 0x01 /* Basic mode status register */
53429+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
53430+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
53431+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
53432+#define MII_LPA 0x05 /* Link partner ability reg */
53433+#define MII_EXPANSION 0x06 /* Expansion register */
53434+/* Basic mode control register. */
53435+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
53436+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
53437+/* Basic mode status register. */
53438+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
53439+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
53440+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
53441+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
53442+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
53443+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
53444+/* Advertisement control register. */
53445+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
53446+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
53447+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
53448+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
53449+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
53450+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
53451+ ADVERTISE_100HALF | ADVERTISE_100FULL)
53452+/* Expansion register for auto-negotiation. */
53453+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
53454+#endif
53455+
53456+/*****************************************************************************/
53457+/* 2.4.6 => 2.4.3 */
53458+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
53459+
53460+#ifndef pci_set_power_state
53461+#define pci_set_power_state _kc_pci_set_power_state
53462+int _kc_pci_set_power_state(struct pci_dev *dev, int state);
53463+#endif
53464+
53465+#ifndef pci_enable_wake
53466+#define pci_enable_wake _kc_pci_enable_wake
53467+int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
53468+#endif
53469+
53470+#ifndef pci_disable_device
53471+#define pci_disable_device _kc_pci_disable_device
53472+void _kc_pci_disable_device(struct pci_dev *pdev);
53473+#endif
53474+
53475+/* PCI PM entry point syntax changed, so don't support suspend/resume */
53476+#undef CONFIG_PM
53477+
53478+#endif /* 2.4.6 => 2.4.3 */
53479+
53480+#ifndef HAVE_PCI_SET_MWI
53481+#define pci_set_mwi(X) pci_write_config_word(X, \
53482+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
53483+ PCI_COMMAND_INVALIDATE);
53484+#define pci_clear_mwi(X) pci_write_config_word(X, \
53485+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
53486+ ~PCI_COMMAND_INVALIDATE);
53487+#endif
53488+
53489+/*****************************************************************************/
53490+/* 2.4.10 => 2.4.9 */
53491+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
53492+
53493+/**************************************/
53494+/* MODULE API */
53495+
53496+#ifndef MODULE_LICENSE
53497+ #define MODULE_LICENSE(X)
53498+#endif
53499+
53500+/**************************************/
53501+/* OTHER */
53502+
53503+#undef min
53504+#define min(x,y) ({ \
53505+ const typeof(x) _x = (x); \
53506+ const typeof(y) _y = (y); \
53507+ (void) (&_x == &_y); \
53508+ _x < _y ? _x : _y; })
53509+
53510+#undef max
53511+#define max(x,y) ({ \
53512+ const typeof(x) _x = (x); \
53513+ const typeof(y) _y = (y); \
53514+ (void) (&_x == &_y); \
53515+ _x > _y ? _x : _y; })
53516+
53517+#define min_t(type,x,y) ({ \
53518+ type _x = (x); \
53519+ type _y = (y); \
53520+ _x < _y ? _x : _y; })
53521+
53522+#define max_t(type,x,y) ({ \
53523+ type _x = (x); \
53524+ type _y = (y); \
53525+ _x > _y ? _x : _y; })
53526+
53527+#ifndef list_for_each_safe
53528+#define list_for_each_safe(pos, n, head) \
53529+ for (pos = (head)->next, n = pos->next; pos != (head); \
53530+ pos = n, n = pos->next)
53531+#endif
53532+
53533+#ifndef ____cacheline_aligned_in_smp
53534+#ifdef CONFIG_SMP
53535+#define ____cacheline_aligned_in_smp ____cacheline_aligned
53536+#else
53537+#define ____cacheline_aligned_in_smp
53538+#endif /* CONFIG_SMP */
53539+#endif
53540+
53541+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
53542+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
53543+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
53544+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
53545+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
53546+#else /* 2.4.8 => 2.4.9 */
53547+int snprintf(char * buf, size_t size, const char *fmt, ...);
53548+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
53549+#endif
53550+#endif /* 2.4.10 -> 2.4.6 */
53551+
53552+
53553+/*****************************************************************************/
53554+/* 2.4.12 => 2.4.10 */
53555+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
53556+#ifndef HAVE_NETIF_MSG
53557+#define HAVE_NETIF_MSG 1
53558+enum {
53559+ NETIF_MSG_DRV = 0x0001,
53560+ NETIF_MSG_PROBE = 0x0002,
53561+ NETIF_MSG_LINK = 0x0004,
53562+ NETIF_MSG_TIMER = 0x0008,
53563+ NETIF_MSG_IFDOWN = 0x0010,
53564+ NETIF_MSG_IFUP = 0x0020,
53565+ NETIF_MSG_RX_ERR = 0x0040,
53566+ NETIF_MSG_TX_ERR = 0x0080,
53567+ NETIF_MSG_TX_QUEUED = 0x0100,
53568+ NETIF_MSG_INTR = 0x0200,
53569+ NETIF_MSG_TX_DONE = 0x0400,
53570+ NETIF_MSG_RX_STATUS = 0x0800,
53571+ NETIF_MSG_PKTDATA = 0x1000,
53572+ NETIF_MSG_HW = 0x2000,
53573+ NETIF_MSG_WOL = 0x4000,
53574+};
53575+
53576+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
53577+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
53578+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
53579+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
53580+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
53581+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
53582+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
53583+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
53584+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
53585+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
53586+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
53587+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
53588+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
53589+#endif /* !HAVE_NETIF_MSG */
53590+#endif /* 2.4.12 => 2.4.10 */
53591+
53592+/*****************************************************************************/
53593+/* 2.4.13 => 2.4.12 */
53594+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
53595+
53596+/**************************************/
53597+/* PCI DMA MAPPING */
53598+
53599+#ifndef virt_to_page
53600+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
53601+#endif
53602+
53603+#ifndef pci_map_page
53604+#define pci_map_page _kc_pci_map_page
53605+u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
53606+#endif
53607+
53608+#ifndef pci_unmap_page
53609+#define pci_unmap_page _kc_pci_unmap_page
53610+void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
53611+#endif
53612+
53613+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
53614+
53615+#undef DMA_32BIT_MASK
53616+#define DMA_32BIT_MASK 0xffffffff
53617+#undef DMA_64BIT_MASK
53618+#define DMA_64BIT_MASK 0xffffffff
53619+
53620+/**************************************/
53621+/* OTHER */
53622+
53623+#ifndef cpu_relax
53624+#define cpu_relax() rep_nop()
53625+#endif
53626+
53627+struct vlan_ethhdr {
53628+ unsigned char h_dest[ETH_ALEN];
53629+ unsigned char h_source[ETH_ALEN];
53630+ unsigned short h_vlan_proto;
53631+ unsigned short h_vlan_TCI;
53632+ unsigned short h_vlan_encapsulated_proto;
53633+};
53634+#endif /* 2.4.13 => 2.4.12 */
53635+
53636+/*****************************************************************************/
53637+/* 2.4.17 => 2.4.12 */
53638+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
53639+
53640+#ifndef __devexit_p
53641+ #define __devexit_p(x) &(x)
53642+#endif
53643+
53644+#endif /* 2.4.17 => 2.4.13 */
53645+
53646+/*****************************************************************************/
53647+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
53648+#define NETIF_MSG_HW 0x2000
53649+#define NETIF_MSG_WOL 0x4000
53650+
53651+#ifndef netif_msg_hw
53652+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
53653+#endif
53654+#ifndef netif_msg_wol
53655+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
53656+#endif
53657+#endif /* 2.4.18 */
53658+
53659+/*****************************************************************************/
53660+
53661+/*****************************************************************************/
53662+/* 2.4.20 => 2.4.19 */
53663+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
53664+
53665+/* we won't support NAPI on less than 2.4.20 */
53666+#ifdef NAPI
53667+#undef NAPI
53668+#endif
53669+
53670+#endif /* 2.4.20 => 2.4.19 */
53671+
53672+/*****************************************************************************/
53673+/* 2.4.22 => 2.4.17 */
53674+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
53675+#define pci_name(x) ((x)->slot_name)
53676+
53677+#ifndef SUPPORTED_10000baseT_Full
53678+#define SUPPORTED_10000baseT_Full BIT(12)
53679+#endif
53680+#ifndef ADVERTISED_10000baseT_Full
53681+#define ADVERTISED_10000baseT_Full BIT(12)
53682+#endif
53683+#endif
53684+
53685+/*****************************************************************************/
53686+/*****************************************************************************/
53687+/* 2.4.23 => 2.4.22 */
53688+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
53689+/*****************************************************************************/
53690+#ifdef NAPI
53691+#ifndef netif_poll_disable
53692+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
53693+static inline void _kc_netif_poll_disable(struct net_device *netdev)
53694+{
53695+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
53696+ /* No hurry */
53697+ current->state = TASK_INTERRUPTIBLE;
53698+ schedule_timeout(1);
53699+ }
53700+}
53701+#endif
53702+#ifndef netif_poll_enable
53703+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
53704+static inline void _kc_netif_poll_enable(struct net_device *netdev)
53705+{
53706+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
53707+}
53708+#endif
53709+#endif /* NAPI */
53710+#ifndef netif_tx_disable
53711+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
53712+static inline void _kc_netif_tx_disable(struct net_device *dev)
53713+{
53714+ spin_lock_bh(&dev->xmit_lock);
53715+ netif_stop_queue(dev);
53716+ spin_unlock_bh(&dev->xmit_lock);
53717+}
53718+#endif
53719+#else /* 2.4.23 => 2.4.22 */
53720+#define HAVE_SCTP
53721+#endif /* 2.4.23 => 2.4.22 */
53722+
53723+/*****************************************************************************/
53724+/* 2.6.4 => 2.6.0 */
53725+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
53726+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
53727+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
53728+#define ETHTOOL_OPS_COMPAT
53729+#endif /* 2.6.4 => 2.6.0 */
53730+
53731+/*****************************************************************************/
53732+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
53733+#define __user
53734+#endif /* < 2.4.27 */
53735+
53736+/*****************************************************************************/
53737+/* 2.5.71 => 2.4.x */
53738+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
53739+#define sk_protocol protocol
53740+#define pci_get_device pci_find_device
53741+#endif /* 2.5.70 => 2.4.x */
53742+
53743+/*****************************************************************************/
53744+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
53745+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
53746+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
53747+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
53748+
53749+#ifndef netif_msg_init
53750+#define netif_msg_init _kc_netif_msg_init
53751+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
53752+{
53753+ /* use default */
53754+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
53755+ return default_msg_enable_bits;
53756+ if (debug_value == 0) /* no output */
53757+ return 0;
53758+ /* set low N bits */
53759+ return (1 << debug_value) -1;
53760+}
53761+#endif
53762+
53763+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
53764+/*****************************************************************************/
53765+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
53766+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
53767+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
53768+#define netdev_priv(x) x->priv
53769+#endif
53770+
53771+/*****************************************************************************/
53772+/* <= 2.5.0 */
53773+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
53774+#include <linux/rtnetlink.h>
53775+#undef pci_register_driver
53776+#define pci_register_driver pci_module_init
53777+
53778+/*
53779+ * Most of the dma compat code is copied/modifed from the 2.4.37
53780+ * /include/linux/libata-compat.h header file
53781+ */
53782+/* These definitions mirror those in pci.h, so they can be used
53783+ * interchangeably with their PCI_ counterparts */
53784+enum dma_data_direction {
53785+ DMA_BIDIRECTIONAL = 0,
53786+ DMA_TO_DEVICE = 1,
53787+ DMA_FROM_DEVICE = 2,
53788+ DMA_NONE = 3,
53789+};
53790+
53791+struct device {
53792+ struct pci_dev pdev;
53793+};
53794+
53795+static inline struct pci_dev *to_pci_dev (struct device *dev)
53796+{
53797+ return (struct pci_dev *) dev;
53798+}
53799+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
53800+{
53801+ return (struct device *) pdev;
53802+}
53803+#define pdev_printk(lvl, pdev, fmt, args...) \
53804+ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
53805+#define dev_err(dev, fmt, args...) \
53806+ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
53807+#define dev_info(dev, fmt, args...) \
53808+ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
53809+#define dev_warn(dev, fmt, args...) \
53810+ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
53811+#define dev_notice(dev, fmt, args...) \
53812+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
53813+#define dev_dbg(dev, fmt, args...) \
53814+ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
53815+
53816+/* NOTE: dangerous! we ignore the 'gfp' argument */
53817+#define dma_alloc_coherent(dev,sz,dma,gfp) \
53818+ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
53819+#define dma_free_coherent(dev,sz,addr,dma_addr) \
53820+ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
53821+
53822+#define dma_map_page(dev,a,b,c,d) \
53823+ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
53824+#define dma_unmap_page(dev,a,b,c) \
53825+ pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
53826+
53827+#define dma_map_single(dev,a,b,c) \
53828+ pci_map_single(to_pci_dev(dev),(a),(b),(c))
53829+#define dma_unmap_single(dev,a,b,c) \
53830+ pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
53831+
53832+#define dma_map_sg(dev, sg, nents, dir) \
53833+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
53834+#define dma_unmap_sg(dev, sg, nents, dir) \
53835+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
53836+
53837+#define dma_sync_single(dev,a,b,c) \
53838+ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
53839+
53840+/* for range just sync everything, that's all the pci API can do */
53841+#define dma_sync_single_range(dev,addr,off,sz,dir) \
53842+ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
53843+
53844+#define dma_set_mask(dev,mask) \
53845+ pci_set_dma_mask(to_pci_dev(dev),(mask))
53846+
53847+/* hlist_* code - double linked lists */
53848+struct hlist_head {
53849+ struct hlist_node *first;
53850+};
53851+
53852+struct hlist_node {
53853+ struct hlist_node *next, **pprev;
53854+};
53855+
53856+static inline void __hlist_del(struct hlist_node *n)
53857+{
53858+ struct hlist_node *next = n->next;
53859+ struct hlist_node **pprev = n->pprev;
53860+ *pprev = next;
53861+ if (next)
53862+ next->pprev = pprev;
53863+}
53864+
53865+static inline void hlist_del(struct hlist_node *n)
53866+{
53867+ __hlist_del(n);
53868+ n->next = NULL;
53869+ n->pprev = NULL;
53870+}
53871+
53872+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
53873+{
53874+ struct hlist_node *first = h->first;
53875+ n->next = first;
53876+ if (first)
53877+ first->pprev = &n->next;
53878+ h->first = n;
53879+ n->pprev = &h->first;
53880+}
53881+
53882+static inline int hlist_empty(const struct hlist_head *h)
53883+{
53884+ return !h->first;
53885+}
53886+#define HLIST_HEAD_INIT { .first = NULL }
53887+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
53888+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
53889+static inline void INIT_HLIST_NODE(struct hlist_node *h)
53890+{
53891+ h->next = NULL;
53892+ h->pprev = NULL;
53893+}
53894+
53895+#ifndef might_sleep
53896+#define might_sleep()
53897+#endif
53898+#else
53899+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
53900+{
53901+ return &pdev->dev;
53902+}
53903+#endif /* <= 2.5.0 */
53904+
53905+/*****************************************************************************/
53906+/* 2.5.28 => 2.4.23 */
53907+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
53908+
53909+#include <linux/tqueue.h>
53910+#define work_struct tq_struct
53911+#undef INIT_WORK
53912+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
53913+#undef container_of
53914+#define container_of list_entry
53915+#define schedule_work schedule_task
53916+#define flush_scheduled_work flush_scheduled_tasks
53917+#define cancel_work_sync(x) flush_scheduled_work()
53918+
53919+#endif /* 2.5.28 => 2.4.17 */
53920+
53921+/*****************************************************************************/
53922+/* 2.6.0 => 2.5.28 */
53923+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
53924+#ifndef read_barrier_depends
53925+#define read_barrier_depends() rmb()
53926+#endif
53927+
53928+#ifndef rcu_head
53929+struct __kc_callback_head {
53930+ struct __kc_callback_head *next;
53931+ void (*func)(struct callback_head *head);
53932+};
53933+#define rcu_head __kc_callback_head
53934+#endif
53935+
53936+#undef get_cpu
53937+#define get_cpu() smp_processor_id()
53938+#undef put_cpu
53939+#define put_cpu() do { } while(0)
53940+#define MODULE_INFO(version, _version)
53941+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
53942+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
53943+#endif
53944+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
53945+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
53946+#endif
53947+#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT
53948+#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1
53949+#endif
53950+
53951+#define dma_set_coherent_mask(dev,mask) 1
53952+
53953+#undef dev_put
53954+#define dev_put(dev) __dev_put(dev)
53955+
53956+#ifndef skb_fill_page_desc
53957+#define skb_fill_page_desc _kc_skb_fill_page_desc
53958+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
53959+#endif
53960+
53961+#undef ALIGN
53962+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
53963+
53964+#ifndef page_count
53965+#define page_count(p) atomic_read(&(p)->count)
53966+#endif
53967+
53968+#ifdef MAX_NUMNODES
53969+#undef MAX_NUMNODES
53970+#endif
53971+#define MAX_NUMNODES 1
53972+
53973+/* find_first_bit and find_next bit are not defined for most
53974+ * 2.4 kernels (except for the redhat 2.4.21 kernels
53975+ */
53976+#include <linux/bitops.h>
53977+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
53978+#undef find_next_bit
53979+#define find_next_bit _kc_find_next_bit
53980+unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size,
53981+ unsigned long offset);
53982+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
53983+
53984+#ifndef netdev_name
53985+static inline const char *_kc_netdev_name(const struct net_device *dev)
53986+{
53987+ if (strchr(dev->name, '%'))
53988+ return "(unregistered net_device)";
53989+ return dev->name;
53990+}
53991+#define netdev_name(netdev) _kc_netdev_name(netdev)
53992+#endif /* netdev_name */
53993+
53994+#ifndef strlcpy
53995+#define strlcpy _kc_strlcpy
53996+size_t _kc_strlcpy(char *dest, const char *src, size_t size);
53997+#endif /* strlcpy */
53998+
53999+#ifndef do_div
54000+#if BITS_PER_LONG == 64
54001+# define do_div(n,base) ({ \
54002+ uint32_t __base = (base); \
54003+ uint32_t __rem; \
54004+ __rem = ((uint64_t)(n)) % __base; \
54005+ (n) = ((uint64_t)(n)) / __base; \
54006+ __rem; \
54007+ })
54008+#elif BITS_PER_LONG == 32
54009+uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
54010+# define do_div(n,base) ({ \
54011+ uint32_t __base = (base); \
54012+ uint32_t __rem; \
54013+ if (likely(((n) >> 32) == 0)) { \
54014+ __rem = (uint32_t)(n) % __base; \
54015+ (n) = (uint32_t)(n) / __base; \
54016+ } else \
54017+ __rem = _kc__div64_32(&(n), __base); \
54018+ __rem; \
54019+ })
54020+#else /* BITS_PER_LONG == ?? */
54021+# error do_div() does not yet support the C64
54022+#endif /* BITS_PER_LONG */
54023+#endif /* do_div */
54024+
54025+#ifndef NSEC_PER_SEC
54026+#define NSEC_PER_SEC 1000000000L
54027+#endif
54028+
54029+#undef HAVE_I2C_SUPPORT
54030+#else /* 2.6.0 */
54031+
54032+#endif /* 2.6.0 => 2.5.28 */
54033+/*****************************************************************************/
54034+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
54035+#define dma_pool pci_pool
54036+#define dma_pool_destroy pci_pool_destroy
54037+#define dma_pool_alloc pci_pool_alloc
54038+#define dma_pool_free pci_pool_free
54039+
54040+#define dma_pool_create(name,dev,size,align,allocation) \
54041+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
54042+#endif /* < 2.6.3 */
54043+
54044+/*****************************************************************************/
54045+/* 2.6.4 => 2.6.0 */
54046+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
54047+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
54048+#endif /* 2.6.4 => 2.6.0 */
54049+
54050+/*****************************************************************************/
54051+/* 2.6.5 => 2.6.0 */
54052+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
54053+#define dma_sync_single_for_cpu dma_sync_single
54054+#define dma_sync_single_for_device dma_sync_single
54055+#define dma_sync_single_range_for_cpu dma_sync_single_range
54056+#define dma_sync_single_range_for_device dma_sync_single_range
54057+#ifndef pci_dma_mapping_error
54058+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
54059+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
54060+{
54061+ return dma_addr == 0;
54062+}
54063+#endif
54064+#endif /* 2.6.5 => 2.6.0 */
54065+
54066+/*****************************************************************************/
54067+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
54068+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
54069+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
54070+#endif /* < 2.6.4 */
54071+
54072+/*****************************************************************************/
54073+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
54074+/* taken from 2.6 include/linux/bitmap.h */
54075+#undef bitmap_zero
54076+#define bitmap_zero _kc_bitmap_zero
54077+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
54078+{
54079+ if (nbits <= BITS_PER_LONG)
54080+ *dst = 0UL;
54081+ else {
54082+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
54083+ memset(dst, 0, len);
54084+ }
54085+}
54086+#define page_to_nid(x) 0
54087+
54088+#endif /* < 2.6.6 */
54089+
54090+/*****************************************************************************/
54091+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
54092+#undef if_mii
54093+#define if_mii _kc_if_mii
54094+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
54095+{
54096+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
54097+}
54098+
54099+#ifndef __force
54100+#define __force
54101+#endif
54102+#endif /* < 2.6.7 */
54103+
54104+/*****************************************************************************/
54105+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
54106+#ifndef PCI_EXP_DEVCTL
54107+#define PCI_EXP_DEVCTL 8
54108+#endif
54109+#ifndef PCI_EXP_DEVCTL_CERE
54110+#define PCI_EXP_DEVCTL_CERE 0x0001
54111+#endif
54112+#define PCI_EXP_FLAGS 2 /* Capabilities register */
54113+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
54114+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
54115+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
54116+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
54117+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
54118+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
54119+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
54120+#define PCI_EXP_DEVCAP 4 /* Device capabilities */
54121+#define PCI_EXP_DEVSTA 10 /* Device Status */
54122+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
54123+ schedule_timeout((x * HZ)/1000 + 2); \
54124+ } while (0)
54125+
54126+#endif /* < 2.6.8 */
54127+
54128+/*****************************************************************************/
54129+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
54130+#include <net/dsfield.h>
54131+#define __iomem
54132+
54133+#ifndef kcalloc
54134+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
54135+void *_kc_kzalloc(size_t size, int flags);
54136+#endif
54137+#define MSEC_PER_SEC 1000L
54138+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
54139+{
54140+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
54141+ return (MSEC_PER_SEC / HZ) * j;
54142+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
54143+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
54144+#else
54145+ return (j * MSEC_PER_SEC) / HZ;
54146+#endif
54147+}
54148+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
54149+{
54150+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
54151+ return MAX_JIFFY_OFFSET;
54152+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
54153+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
54154+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
54155+ return m * (HZ / MSEC_PER_SEC);
54156+#else
54157+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
54158+#endif
54159+}
54160+
54161+#define msleep_interruptible _kc_msleep_interruptible
54162+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
54163+{
54164+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
54165+
54166+ while (timeout && !signal_pending(current)) {
54167+ __set_current_state(TASK_INTERRUPTIBLE);
54168+ timeout = schedule_timeout(timeout);
54169+ }
54170+ return _kc_jiffies_to_msecs(timeout);
54171+}
54172+
54173+/* Basic mode control register. */
54174+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
54175+
54176+#ifndef __le16
54177+#define __le16 u16
54178+#endif
54179+#ifndef __le32
54180+#define __le32 u32
54181+#endif
54182+#ifndef __le64
54183+#define __le64 u64
54184+#endif
54185+#ifndef __be16
54186+#define __be16 u16
54187+#endif
54188+#ifndef __be32
54189+#define __be32 u32
54190+#endif
54191+#ifndef __be64
54192+#define __be64 u64
54193+#endif
54194+
54195+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
54196+{
54197+ return (struct vlan_ethhdr *)skb->mac.raw;
54198+}
54199+
54200+/* Wake-On-Lan options. */
54201+#define WAKE_PHY BIT(0)
54202+#define WAKE_UCAST BIT(1)
54203+#define WAKE_MCAST BIT(2)
54204+#define WAKE_BCAST BIT(3)
54205+#define WAKE_ARP BIT(4)
54206+#define WAKE_MAGIC BIT(5)
54207+#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */
54208+
54209+#define skb_header_pointer _kc_skb_header_pointer
54210+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
54211+ int offset, int len, void *buffer)
54212+{
54213+ int hlen = skb_headlen(skb);
54214+
54215+ if (hlen - offset >= len)
54216+ return skb->data + offset;
54217+
54218+#ifdef MAX_SKB_FRAGS
54219+ if (skb_copy_bits(skb, offset, buffer, len) < 0)
54220+ return NULL;
54221+
54222+ return buffer;
54223+#else
54224+ return NULL;
54225+#endif
54226+
54227+#ifndef NETDEV_TX_OK
54228+#define NETDEV_TX_OK 0
54229+#endif
54230+#ifndef NETDEV_TX_BUSY
54231+#define NETDEV_TX_BUSY 1
54232+#endif
54233+#ifndef NETDEV_TX_LOCKED
54234+#define NETDEV_TX_LOCKED -1
54235+#endif
54236+}
54237+
54238+#ifndef __bitwise
54239+#define __bitwise
54240+#endif
54241+#endif /* < 2.6.9 */
54242+
54243+/*****************************************************************************/
54244+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
54245+#ifdef module_param_array_named
54246+#undef module_param_array_named
54247+#define module_param_array_named(name, array, type, nump, perm) \
54248+ static struct kparam_array __param_arr_##name \
54249+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
54250+ sizeof(array[0]), array }; \
54251+ module_param_call(name, param_array_set, param_array_get, \
54252+ &__param_arr_##name, perm)
54253+#endif /* module_param_array_named */
54254+/*
54255+ * num_online is broken for all < 2.6.10 kernels. This is needed to support
54256+ * Node module parameter of ixgbe.
54257+ */
54258+#undef num_online_nodes
54259+#define num_online_nodes(n) 1
54260+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
54261+#undef node_online_map
54262+#define node_online_map _kcompat_node_online_map
54263+#define pci_get_class pci_find_class
54264+#endif /* < 2.6.10 */
54265+
54266+/*****************************************************************************/
54267+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
54268+#define PCI_D0 0
54269+#define PCI_D1 1
54270+#define PCI_D2 2
54271+#define PCI_D3hot 3
54272+#define PCI_D3cold 4
54273+typedef int pci_power_t;
54274+#define pci_choose_state(pdev,state) state
54275+#define PMSG_SUSPEND 3
54276+#define PCI_EXP_LNKCTL 16
54277+
54278+#undef NETIF_F_LLTX
54279+
54280+#ifndef ARCH_HAS_PREFETCH
54281+#define prefetch(X)
54282+#endif
54283+
54284+#ifndef NET_IP_ALIGN
54285+#define NET_IP_ALIGN 2
54286+#endif
54287+
54288+#define KC_USEC_PER_SEC 1000000L
54289+#define usecs_to_jiffies _kc_usecs_to_jiffies
54290+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
54291+{
54292+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
54293+ return (KC_USEC_PER_SEC / HZ) * j;
54294+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
54295+ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
54296+#else
54297+ return (j * KC_USEC_PER_SEC) / HZ;
54298+#endif
54299+}
54300+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
54301+{
54302+ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
54303+ return MAX_JIFFY_OFFSET;
54304+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
54305+ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
54306+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
54307+ return m * (HZ / KC_USEC_PER_SEC);
54308+#else
54309+ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
54310+#endif
54311+}
54312+
54313+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
54314+#define PCI_EXP_LNKSTA 18 /* Link Status */
54315+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
54316+#define PCI_EXP_SLTCTL 24 /* Slot Control */
54317+#define PCI_EXP_SLTSTA 26 /* Slot Status */
54318+#define PCI_EXP_RTCTL 28 /* Root Control */
54319+#define PCI_EXP_RTCAP 30 /* Root Capabilities */
54320+#define PCI_EXP_RTSTA 32 /* Root Status */
54321+#endif /* < 2.6.11 */
54322+
54323+/*****************************************************************************/
54324+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
54325+#include <linux/reboot.h>
54326+#define USE_REBOOT_NOTIFIER
54327+
54328+/* Generic MII registers. */
54329+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
54330+#define MII_STAT1000 0x0a /* 1000BASE-T status */
54331+/* Advertisement control register. */
54332+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
54333+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
54334+/* Link partner ability register. */
54335+#define LPA_PAUSE_CAP 0x0400 /* Can pause */
54336+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
54337+/* 1000BASE-T Control register */
54338+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
54339+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
54340+/* 1000BASE-T Status register */
54341+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
54342+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
54343+
54344+#ifndef is_zero_ether_addr
54345+#define is_zero_ether_addr _kc_is_zero_ether_addr
54346+static inline int _kc_is_zero_ether_addr(const u8 *addr)
54347+{
54348+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
54349+}
54350+#endif /* is_zero_ether_addr */
54351+#ifndef is_multicast_ether_addr
54352+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
54353+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
54354+{
54355+ return addr[0] & 0x01;
54356+}
54357+#endif /* is_multicast_ether_addr */
54358+#endif /* < 2.6.12 */
54359+
54360+/*****************************************************************************/
54361+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
54362+#ifndef kstrdup
54363+#define kstrdup _kc_kstrdup
54364+char *_kc_kstrdup(const char *s, unsigned int gfp);
54365+#endif
54366+#endif /* < 2.6.13 */
54367+
54368+/*****************************************************************************/
54369+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
54370+#define pm_message_t u32
54371+#ifndef kzalloc
54372+#define kzalloc _kc_kzalloc
54373+void *_kc_kzalloc(size_t size, int flags);
54374+#endif
54375+
54376+/* Generic MII registers. */
54377+#define MII_ESTATUS 0x0f /* Extended Status */
54378+/* Basic mode status register. */
54379+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
54380+/* Extended status register. */
54381+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
54382+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
54383+
54384+#define SUPPORTED_Pause BIT(13)
54385+#define SUPPORTED_Asym_Pause BIT(14)
54386+#define ADVERTISED_Pause BIT(13)
54387+#define ADVERTISED_Asym_Pause BIT(14)
54388+
54389+#if (!(RHEL_RELEASE_CODE && \
54390+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
54391+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
54392+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
54393+#define gfp_t unsigned
54394+#else
54395+typedef unsigned gfp_t;
54396+#endif
54397+#endif /* !RHEL4.3->RHEL5.0 */
54398+
54399+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
54400+#ifdef CONFIG_X86_64
54401+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
54402+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
54403+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
54404+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
54405+#endif
54406+#endif
54407+#endif /* < 2.6.14 */
54408+
54409+/*****************************************************************************/
54410+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
54411+#ifndef kfree_rcu
54412+/* this is placed here due to a lack of rcu_barrier in previous kernels */
54413+#define kfree_rcu(_ptr, _offset) kfree(_ptr)
54414+#endif /* kfree_rcu */
54415+#ifndef vmalloc_node
54416+#define vmalloc_node(a,b) vmalloc(a)
54417+#endif /* vmalloc_node*/
54418+
54419+#define setup_timer(_timer, _function, _data) \
54420+do { \
54421+ (_timer)->function = _function; \
54422+ (_timer)->data = _data; \
54423+ init_timer(_timer); \
54424+} while (0)
54425+#ifndef device_can_wakeup
54426+#define device_can_wakeup(dev) (1)
54427+#endif
54428+#ifndef device_set_wakeup_enable
54429+#define device_set_wakeup_enable(dev, val) do{}while(0)
54430+#endif
54431+#ifndef device_init_wakeup
54432+#define device_init_wakeup(dev,val) do {} while (0)
54433+#endif
54434+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
54435+{
54436+ const u16 *a = (const u16 *) addr1;
54437+ const u16 *b = (const u16 *) addr2;
54438+
54439+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
54440+}
54441+#undef compare_ether_addr
54442+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
54443+#endif /* < 2.6.15 */
54444+
54445+/*****************************************************************************/
54446+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
54447+#undef DEFINE_MUTEX
54448+#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
54449+#define mutex_lock(x) down_interruptible(x)
54450+#define mutex_unlock(x) up(x)
54451+
54452+#ifndef ____cacheline_internodealigned_in_smp
54453+#ifdef CONFIG_SMP
54454+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
54455+#else
54456+#define ____cacheline_internodealigned_in_smp
54457+#endif /* CONFIG_SMP */
54458+#endif /* ____cacheline_internodealigned_in_smp */
54459+#undef HAVE_PCI_ERS
54460+#else /* 2.6.16 and above */
54461+#undef HAVE_PCI_ERS
54462+#define HAVE_PCI_ERS
54463+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
54464+#ifdef device_can_wakeup
54465+#undef device_can_wakeup
54466+#endif /* device_can_wakeup */
54467+#define device_can_wakeup(dev) 1
54468+#endif /* SLE_VERSION(10,4,0) */
54469+#endif /* < 2.6.16 */
54470+
54471+/*****************************************************************************/
54472+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
54473+#ifndef dev_notice
54474+#define dev_notice(dev, fmt, args...) \
54475+ dev_printk(KERN_NOTICE, dev, fmt, ## args)
54476+#endif
54477+
54478+#ifndef first_online_node
54479+#define first_online_node 0
54480+#endif
54481+#ifndef NET_SKB_PAD
54482+#define NET_SKB_PAD 16
54483+#endif
54484+#endif /* < 2.6.17 */
54485+
54486+/*****************************************************************************/
54487+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
54488+
54489+#ifndef IRQ_HANDLED
54490+#define irqreturn_t void
54491+#define IRQ_HANDLED
54492+#define IRQ_NONE
54493+#endif
54494+
54495+#ifndef IRQF_PROBE_SHARED
54496+#ifdef SA_PROBEIRQ
54497+#define IRQF_PROBE_SHARED SA_PROBEIRQ
54498+#else
54499+#define IRQF_PROBE_SHARED 0
54500+#endif
54501+#endif
54502+
54503+#ifndef IRQF_SHARED
54504+#define IRQF_SHARED SA_SHIRQ
54505+#endif
54506+
54507+#ifndef ARRAY_SIZE
54508+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
54509+#endif
54510+
54511+#ifndef skb_is_gso
54512+#ifdef NETIF_F_TSO
54513+#define skb_is_gso _kc_skb_is_gso
54514+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
54515+{
54516+ return skb_shinfo(skb)->gso_size;
54517+}
54518+#else
54519+#define skb_is_gso(a) 0
54520+#endif
54521+#endif
54522+
54523+#ifndef resource_size_t
54524+#define resource_size_t unsigned long
54525+#endif
54526+
54527+#ifdef skb_pad
54528+#undef skb_pad
54529+#endif
54530+#define skb_pad(x,y) _kc_skb_pad(x, y)
54531+int _kc_skb_pad(struct sk_buff *skb, int pad);
54532+#ifdef skb_padto
54533+#undef skb_padto
54534+#endif
54535+#define skb_padto(x,y) _kc_skb_padto(x, y)
54536+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
54537+{
54538+ unsigned int size = skb->len;
54539+ if(likely(size >= len))
54540+ return 0;
54541+ return _kc_skb_pad(skb, len - size);
54542+}
54543+
54544+#ifndef DECLARE_PCI_UNMAP_ADDR
54545+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
54546+ dma_addr_t ADDR_NAME
54547+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
54548+ u32 LEN_NAME
54549+#define pci_unmap_addr(PTR, ADDR_NAME) \
54550+ ((PTR)->ADDR_NAME)
54551+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
54552+ (((PTR)->ADDR_NAME) = (VAL))
54553+#define pci_unmap_len(PTR, LEN_NAME) \
54554+ ((PTR)->LEN_NAME)
54555+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
54556+ (((PTR)->LEN_NAME) = (VAL))
54557+#endif /* DECLARE_PCI_UNMAP_ADDR */
54558+#endif /* < 2.6.18 */
54559+
54560+/*****************************************************************************/
54561+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
54562+enum pcie_link_width {
54563+ PCIE_LNK_WIDTH_RESRV = 0x00,
54564+ PCIE_LNK_X1 = 0x01,
54565+ PCIE_LNK_X2 = 0x02,
54566+ PCIE_LNK_X4 = 0x04,
54567+ PCIE_LNK_X8 = 0x08,
54568+ PCIE_LNK_X12 = 0x0C,
54569+ PCIE_LNK_X16 = 0x10,
54570+ PCIE_LNK_X32 = 0x20,
54571+ PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
54572+};
54573+
54574+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
54575+#define i_private u.generic_ip
54576+#endif /* >= RHEL 5.0 */
54577+
54578+#ifndef DIV_ROUND_UP
54579+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
54580+#endif
54581+#ifndef __ALIGN_MASK
54582+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
54583+#endif
54584+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
54585+#if (!((RHEL_RELEASE_CODE && \
54586+ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
54587+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
54588+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
54589+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
54590+#endif
54591+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
54592+#undef CONFIG_INET_LRO
54593+#undef CONFIG_INET_LRO_MODULE
54594+#endif
54595+typedef irqreturn_t (*new_handler_t)(int, void*);
54596+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
54597+#else /* 2.4.x */
54598+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
54599+typedef void (*new_handler_t)(int, void*);
54600+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
54601+#endif /* >= 2.5.x */
54602+{
54603+ irq_handler_t new_handler = (irq_handler_t) handler;
54604+ return request_irq(irq, new_handler, flags, devname, dev_id);
54605+}
54606+
54607+#undef request_irq
54608+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
54609+
54610+#define irq_handler_t new_handler_t
54611+
54612+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) )
54613+#ifndef skb_checksum_help
54614+static inline int __kc_skb_checksum_help(struct sk_buff *skb)
54615+{
54616+ return skb_checksum_help(skb, 0);
54617+}
54618+#define skb_checksum_help(skb) __kc_skb_checksum_help((skb))
54619+#endif
54620+#endif /* < 2.6.19 && >= 2.6.11 */
54621+
54622+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
54623+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
54624+#define PCIE_CONFIG_SPACE_LEN 256
54625+#define PCI_CONFIG_SPACE_LEN 64
54626+#define PCIE_LINK_STATUS 0x12
54627+#define pci_config_space_ich8lan() do {} while(0)
54628+#undef pci_save_state
54629+int _kc_pci_save_state(struct pci_dev *);
54630+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
54631+#undef pci_restore_state
54632+void _kc_pci_restore_state(struct pci_dev *);
54633+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
54634+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
54635+
54636+#ifdef HAVE_PCI_ERS
54637+#undef free_netdev
54638+void _kc_free_netdev(struct net_device *);
54639+#define free_netdev(netdev) _kc_free_netdev(netdev)
54640+#endif
54641+static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev)
54642+{
54643+ return 0;
54644+}
54645+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
54646+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
54647+
54648+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
54649+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
54650+#ifndef bool
54651+#define bool _Bool
54652+#define true 1
54653+#define false 0
54654+#endif
54655+#else /* 2.6.19 */
54656+#include <linux/aer.h>
54657+#include <linux/pci_hotplug.h>
54658+
54659+#define NEW_SKB_CSUM_HELP
54660+#endif /* < 2.6.19 */
54661+
54662+/*****************************************************************************/
54663+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
54664+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
54665+#undef INIT_WORK
54666+#define INIT_WORK(_work, _func) \
54667+do { \
54668+ INIT_LIST_HEAD(&(_work)->entry); \
54669+ (_work)->pending = 0; \
54670+ (_work)->func = (void (*)(void *))_func; \
54671+ (_work)->data = _work; \
54672+ init_timer(&(_work)->timer); \
54673+} while (0)
54674+#endif
54675+
54676+#ifndef PCI_VDEVICE
54677+#define PCI_VDEVICE(ven, dev) \
54678+ PCI_VENDOR_ID_##ven, (dev), \
54679+ PCI_ANY_ID, PCI_ANY_ID, 0, 0
54680+#endif
54681+
54682+#ifndef PCI_VENDOR_ID_INTEL
54683+#define PCI_VENDOR_ID_INTEL 0x8086
54684+#endif
54685+
54686+#ifndef round_jiffies
54687+#define round_jiffies(x) x
54688+#endif
54689+
54690+#define csum_offset csum
54691+
54692+#define HAVE_EARLY_VMALLOC_NODE
54693+#define dev_to_node(dev) -1
54694+#undef set_dev_node
54695+/* remove compiler warning with b=b, for unused variable */
54696+#define set_dev_node(a, b) do { (b) = (b); } while(0)
54697+
54698+#if (!(RHEL_RELEASE_CODE && \
54699+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
54700+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
54701+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
54702+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
54703+typedef __u16 __bitwise __sum16;
54704+typedef __u32 __bitwise __wsum;
54705+#endif
54706+
54707+#if (!(RHEL_RELEASE_CODE && \
54708+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
54709+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
54710+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
54711+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
54712+static inline __wsum csum_unfold(__sum16 n)
54713+{
54714+ return (__force __wsum)n;
54715+}
54716+#endif
54717+
54718+#else /* < 2.6.20 */
54719+#define HAVE_DEVICE_NUMA_NODE
54720+#endif /* < 2.6.20 */
54721+
54722+/*****************************************************************************/
54723+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
54724+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
54725+#define NETDEV_CLASS_DEV
54726+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
54727+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
54728+#define vlan_group_set_device(vg, id, dev) \
54729+ do { \
54730+ if (vg) vg->vlan_devices[id] = dev; \
54731+ } while (0)
54732+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
54733+#define pci_channel_offline(pdev) (pdev->error_state && \
54734+ pdev->error_state != pci_channel_io_normal)
54735+#define pci_request_selected_regions(pdev, bars, name) \
54736+ pci_request_regions(pdev, name)
54737+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
54738+
54739+#ifndef __aligned
54740+#define __aligned(x) __attribute__((aligned(x)))
54741+#endif
54742+
54743+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
54744+#define netdev_to_dev(netdev) \
54745+ pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
54746+#define devm_kzalloc(dev, size, flags) kzalloc(size, flags)
54747+#define devm_kfree(dev, p) kfree(p)
54748+#else /* 2.6.21 */
54749+static inline struct device *netdev_to_dev(struct net_device *netdev)
54750+{
54751+ return &netdev->dev;
54752+}
54753+
54754+#endif /* < 2.6.21 */
54755+
54756+/*****************************************************************************/
54757+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
54758+#define tcp_hdr(skb) (skb->h.th)
54759+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
54760+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
54761+#define skb_transport_header(skb) (skb->h.raw)
54762+#define ipv6_hdr(skb) (skb->nh.ipv6h)
54763+#define ip_hdr(skb) (skb->nh.iph)
54764+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
54765+#define skb_network_header(skb) (skb->nh.raw)
54766+#define skb_tail_pointer(skb) skb->tail
54767+#define skb_reset_tail_pointer(skb) \
54768+ do { \
54769+ skb->tail = skb->data; \
54770+ } while (0)
54771+#define skb_set_tail_pointer(skb, offset) \
54772+ do { \
54773+ skb->tail = skb->data + offset; \
54774+ } while (0)
54775+#define skb_copy_to_linear_data(skb, from, len) \
54776+ memcpy(skb->data, from, len)
54777+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
54778+ memcpy(skb->data + offset, from, len)
54779+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
54780+#define pci_register_driver pci_module_init
54781+#define skb_mac_header(skb) skb->mac.raw
54782+
54783+#ifdef NETIF_F_MULTI_QUEUE
54784+#ifndef alloc_etherdev_mq
54785+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
54786+#endif
54787+#endif /* NETIF_F_MULTI_QUEUE */
54788+
54789+#ifndef ETH_FCS_LEN
54790+#define ETH_FCS_LEN 4
54791+#endif
54792+#define cancel_work_sync(x) flush_scheduled_work()
54793+#ifndef udp_hdr
54794+#define udp_hdr _udp_hdr
54795+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
54796+{
54797+ return (struct udphdr *)skb_transport_header(skb);
54798+}
54799+#endif
54800+
54801+#ifdef cpu_to_be16
54802+#undef cpu_to_be16
54803+#endif
54804+#define cpu_to_be16(x) __constant_htons(x)
54805+
54806+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
54807+enum {
54808+ DUMP_PREFIX_NONE,
54809+ DUMP_PREFIX_ADDRESS,
54810+ DUMP_PREFIX_OFFSET
54811+};
54812+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
54813+#ifndef hex_asc
54814+#define hex_asc(x) "0123456789abcdef"[x]
54815+#endif
54816+#include <linux/ctype.h>
54817+void _kc_print_hex_dump(const char *level, const char *prefix_str,
54818+ int prefix_type, int rowsize, int groupsize,
54819+ const void *buf, size_t len, bool ascii);
54820+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
54821+ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
54822+#ifndef ADVERTISED_2500baseX_Full
54823+#define ADVERTISED_2500baseX_Full BIT(15)
54824+#endif
54825+#ifndef SUPPORTED_2500baseX_Full
54826+#define SUPPORTED_2500baseX_Full BIT(15)
54827+#endif
54828+
54829+#ifndef ETH_P_PAUSE
54830+#define ETH_P_PAUSE 0x8808
54831+#endif
54832+
54833+static inline int compound_order(struct page *page)
54834+{
54835+ return 0;
54836+}
54837+
54838+#define __must_be_array(a) 0
54839+
54840+#ifndef SKB_WITH_OVERHEAD
54841+#define SKB_WITH_OVERHEAD(X) \
54842+ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
54843+#endif
54844+#else /* 2.6.22 */
54845+#define ETH_TYPE_TRANS_SETS_DEV
54846+#define HAVE_NETDEV_STATS_IN_NETDEV
54847+#endif /* < 2.6.22 */
54848+
54849+/*****************************************************************************/
54850+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
54851+#endif /* > 2.6.22 */
54852+
54853+/*****************************************************************************/
54854+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
54855+#define netif_subqueue_stopped(_a, _b) 0
54856+#ifndef PTR_ALIGN
54857+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
54858+#endif
54859+
54860+#ifndef CONFIG_PM_SLEEP
54861+#define CONFIG_PM_SLEEP CONFIG_PM
54862+#endif
54863+
54864+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
54865+#define HAVE_ETHTOOL_GET_PERM_ADDR
54866+#endif /* 2.6.14 through 2.6.22 */
54867+
54868+static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom)
54869+{
54870+ int delta = 0;
54871+
54872+ if (headroom > (skb->data - skb->head))
54873+ delta = headroom - (skb->data - skb->head);
54874+
54875+ if (delta || skb_header_cloned(skb))
54876+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
54877+ GFP_ATOMIC);
54878+ return 0;
54879+}
54880+#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h))
54881+#endif /* < 2.6.23 */
54882+
54883+/*****************************************************************************/
54884+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
54885+#ifndef ETH_FLAG_LRO
54886+#define ETH_FLAG_LRO NETIF_F_LRO
54887+#endif
54888+
54889+#ifndef ACCESS_ONCE
54890+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54891+#endif
54892+
54893+/* if GRO is supported then the napi struct must already exist */
54894+#ifndef NETIF_F_GRO
54895+/* NAPI API changes in 2.6.24 break everything */
54896+struct napi_struct {
54897+ /* used to look up the real NAPI polling routine */
54898+ int (*poll)(struct napi_struct *, int);
54899+ struct net_device *dev;
54900+ int weight;
54901+};
54902+#endif
54903+
54904+#ifdef NAPI
54905+int __kc_adapter_clean(struct net_device *, int *);
54906+/* The following definitions are multi-queue aware, and thus we have a driver
54907+ * define list which determines which drivers support multiple queues, and
54908+ * thus need these stronger defines. If a driver does not support multi-queue
54909+ * functionality, you don't need to add it to this list.
54910+ */
54911+struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
54912+
54913+static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
54914+ int (*poll)(struct napi_struct *, int), int weight)
54915+{
54916+ struct net_device *poll_dev = napi_to_poll_dev(napi);
54917+ poll_dev->poll = __kc_adapter_clean;
54918+ poll_dev->priv = napi;
54919+ poll_dev->weight = weight;
54920+ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state);
54921+ set_bit(__LINK_STATE_START, &poll_dev->state);
54922+ dev_hold(poll_dev);
54923+ napi->poll = poll;
54924+ napi->weight = weight;
54925+ napi->dev = dev;
54926+}
54927+#define netif_napi_add __kc_mq_netif_napi_add
54928+
54929+static inline void __kc_mq_netif_napi_del(struct napi_struct *napi)
54930+{
54931+ struct net_device *poll_dev = napi_to_poll_dev(napi);
54932+ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state));
54933+ dev_put(poll_dev);
54934+ memset(poll_dev, 0, sizeof(struct net_device));
54935+}
54936+
54937+#define netif_napi_del __kc_mq_netif_napi_del
54938+
54939+static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi)
54940+{
54941+ return netif_running(napi->dev) &&
54942+ netif_rx_schedule_prep(napi_to_poll_dev(napi));
54943+}
54944+#define napi_schedule_prep __kc_mq_napi_schedule_prep
54945+
54946+static inline void __kc_mq_napi_schedule(struct napi_struct *napi)
54947+{
54948+ if (napi_schedule_prep(napi))
54949+ __netif_rx_schedule(napi_to_poll_dev(napi));
54950+}
54951+#define napi_schedule __kc_mq_napi_schedule
54952+
54953+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
54954+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
54955+#ifdef CONFIG_SMP
54956+static inline void napi_synchronize(const struct napi_struct *n)
54957+{
54958+ struct net_device *dev = napi_to_poll_dev(n);
54959+
54960+ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
54961+ /* No hurry. */
54962+ msleep(1);
54963+ }
54964+}
54965+#else
54966+#define napi_synchronize(n) barrier()
54967+#endif /* CONFIG_SMP */
54968+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
54969+static inline void _kc_napi_complete(struct napi_struct *napi)
54970+{
54971+#ifdef NETIF_F_GRO
54972+ napi_gro_flush(napi);
54973+#endif
54974+ netif_rx_complete(napi_to_poll_dev(napi));
54975+}
54976+#define napi_complete _kc_napi_complete
54977+#else /* NAPI */
54978+
54979+/* The following definitions are only used if we don't support NAPI at all. */
54980+
54981+static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
54982+ int (*poll)(struct napi_struct *, int), int weight)
54983+{
54984+ dev->poll = poll;
54985+ dev->weight = weight;
54986+ napi->poll = poll;
54987+ napi->weight = weight;
54988+ napi->dev = dev;
54989+}
54990+#define netif_napi_del(_a) do {} while (0)
54991+#endif /* NAPI */
54992+
54993+#undef dev_get_by_name
54994+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
54995+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
54996+#ifndef DMA_BIT_MASK
54997+#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
54998+#endif
54999+
55000+#ifdef NETIF_F_TSO6
55001+#define skb_is_gso_v6 _kc_skb_is_gso_v6
55002+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
55003+{
55004+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
55005+}
55006+#endif /* NETIF_F_TSO6 */
55007+
55008+#ifndef KERN_CONT
55009+#define KERN_CONT ""
55010+#endif
55011+#ifndef pr_err
55012+#define pr_err(fmt, arg...) \
55013+ printk(KERN_ERR fmt, ##arg)
55014+#endif
55015+
55016+#ifndef rounddown_pow_of_two
55017+#define rounddown_pow_of_two(n) \
55018+ __builtin_constant_p(n) ? ( \
55019+ (n == 1) ? 0 : \
55020+ (1UL << ilog2(n))) : \
55021+ (1UL << (fls_long(n) - 1))
55022+#endif
55023+
55024+#else /* < 2.6.24 */
55025+#define HAVE_ETHTOOL_GET_SSET_COUNT
55026+#define HAVE_NETDEV_NAPI_LIST
55027+#endif /* < 2.6.24 */
55028+
55029+/*****************************************************************************/
55030+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
55031+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
55032+#define INCLUDE_PM_QOS_PARAMS_H
55033+#include <linux/pm_qos_params.h>
55034+#else /* >= 3.2.0 */
55035+#include <linux/pm_qos.h>
55036+#endif /* else >= 3.2.0 */
55037+#endif /* > 2.6.24 */
55038+
55039+/*****************************************************************************/
55040+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
55041+#define PM_QOS_CPU_DMA_LATENCY 1
55042+
55043+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
55044+#include <linux/latency.h>
55045+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
55046+#define pm_qos_add_requirement(pm_qos_class, name, value) \
55047+ set_acceptable_latency(name, value)
55048+#define pm_qos_remove_requirement(pm_qos_class, name) \
55049+ remove_acceptable_latency(name)
55050+#define pm_qos_update_requirement(pm_qos_class, name, value) \
55051+ modify_acceptable_latency(name, value)
55052+#else
55053+#define PM_QOS_DEFAULT_VALUE -1
55054+#define pm_qos_add_requirement(pm_qos_class, name, value)
55055+#define pm_qos_remove_requirement(pm_qos_class, name)
55056+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
55057+ if (value != PM_QOS_DEFAULT_VALUE) { \
55058+ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
55059+ pci_name(adapter->pdev)); \
55060+ } \
55061+}
55062+
55063+#endif /* > 2.6.18 */
55064+
55065+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
55066+
55067+#ifndef DEFINE_PCI_DEVICE_TABLE
55068+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
55069+#endif /* DEFINE_PCI_DEVICE_TABLE */
55070+
55071+#ifndef strict_strtol
55072+#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r)
55073+static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res)
55074+{
55075+ /* adapted from strict_strtoul() in 2.6.25 */
55076+ char *tail;
55077+ long val;
55078+ size_t len;
55079+
55080+ *res = 0;
55081+ len = strlen(buf);
55082+ if (!len)
55083+ return -EINVAL;
55084+ val = simple_strtol(buf, &tail, base);
55085+ if (tail == buf)
55086+ return -EINVAL;
55087+ if ((*tail == '\0') ||
55088+ ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) {
55089+ *res = val;
55090+ return 0;
55091+ }
55092+
55093+ return -EINVAL;
55094+}
55095+#endif
55096+
55097+#else /* < 2.6.25 */
55098+
55099+#endif /* < 2.6.25 */
55100+
55101+/*****************************************************************************/
55102+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
55103+#ifndef clamp_t
55104+#define clamp_t(type, val, min, max) ({ \
55105+ type __val = (val); \
55106+ type __min = (min); \
55107+ type __max = (max); \
55108+ __val = __val < __min ? __min : __val; \
55109+ __val > __max ? __max : __val; })
55110+#endif /* clamp_t */
55111+#undef kzalloc_node
55112+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
55113+
55114+void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
55115+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
55116+#else /* < 2.6.26 */
55117+#define NETDEV_CAN_SET_GSO_MAX_SIZE
55118+#ifdef HAVE_PCI_ASPM_H
55119+#include <linux/pci-aspm.h>
55120+#endif
55121+#define HAVE_NETDEV_VLAN_FEATURES
55122+#ifndef PCI_EXP_LNKCAP_ASPMS
55123+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
55124+#endif /* PCI_EXP_LNKCAP_ASPMS */
55125+#endif /* < 2.6.26 */
55126+/*****************************************************************************/
55127+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
55128+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
55129+ __u32 speed)
55130+{
55131+ ep->speed = (__u16)speed;
55132+ /* ep->speed_hi = (__u16)(speed >> 16); */
55133+}
55134+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
55135+
55136+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
55137+{
55138+ /* no speed_hi before 2.6.27, and probably no need for it yet */
55139+ return (__u32)ep->speed;
55140+}
55141+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
55142+
55143+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
55144+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
55145+#define ANCIENT_PM 1
55146+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
55147+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
55148+ defined(CONFIG_PM_SLEEP))
55149+#define NEWER_PM 1
55150+#endif
55151+#if defined(ANCIENT_PM) || defined(NEWER_PM)
55152+#undef device_set_wakeup_enable
55153+#define device_set_wakeup_enable(dev, val) \
55154+ do { \
55155+ u16 pmc = 0; \
55156+ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
55157+ if (pm) { \
55158+ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
55159+ &pmc); \
55160+ } \
55161+ (dev)->power.can_wakeup = !!(pmc >> 11); \
55162+ (dev)->power.should_wakeup = (val && (pmc >> 11)); \
55163+ } while (0)
55164+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
55165+#endif /* 2.6.15 through 2.6.27 */
55166+#ifndef netif_napi_del
55167+#define netif_napi_del(_a) do {} while (0)
55168+#ifdef NAPI
55169+#ifdef CONFIG_NETPOLL
55170+#undef netif_napi_del
55171+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
55172+#endif
55173+#endif
55174+#endif /* netif_napi_del */
55175+#ifdef dma_mapping_error
55176+#undef dma_mapping_error
55177+#endif
55178+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
55179+
55180+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
55181+#define HAVE_TX_MQ
55182+#endif
55183+
55184+#ifndef DMA_ATTR_WEAK_ORDERING
55185+#define DMA_ATTR_WEAK_ORDERING 0
55186+#endif
55187+
55188+#ifdef HAVE_TX_MQ
55189+void _kc_netif_tx_stop_all_queues(struct net_device *);
55190+void _kc_netif_tx_wake_all_queues(struct net_device *);
55191+void _kc_netif_tx_start_all_queues(struct net_device *);
55192+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
55193+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
55194+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
55195+#undef netif_stop_subqueue
55196+#define netif_stop_subqueue(_ndev,_qi) do { \
55197+ if (netif_is_multiqueue((_ndev))) \
55198+ netif_stop_subqueue((_ndev), (_qi)); \
55199+ else \
55200+ netif_stop_queue((_ndev)); \
55201+ } while (0)
55202+#undef netif_start_subqueue
55203+#define netif_start_subqueue(_ndev,_qi) do { \
55204+ if (netif_is_multiqueue((_ndev))) \
55205+ netif_start_subqueue((_ndev), (_qi)); \
55206+ else \
55207+ netif_start_queue((_ndev)); \
55208+ } while (0)
55209+#else /* HAVE_TX_MQ */
55210+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
55211+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
55212+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
55213+#define netif_tx_start_all_queues(a) netif_start_queue(a)
55214+#else
55215+#define netif_tx_start_all_queues(a) do {} while (0)
55216+#endif
55217+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
55218+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
55219+#endif /* HAVE_TX_MQ */
55220+#ifndef NETIF_F_MULTI_QUEUE
55221+#define NETIF_F_MULTI_QUEUE 0
55222+#define netif_is_multiqueue(a) 0
55223+#define netif_wake_subqueue(a, b)
55224+#endif /* NETIF_F_MULTI_QUEUE */
55225+
55226+#ifndef __WARN_printf
55227+void __kc_warn_slowpath(const char *file, const int line,
55228+ const char *fmt, ...) __attribute__((format(printf, 3, 4)));
55229+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
55230+#endif /* __WARN_printf */
55231+
55232+#ifndef WARN
55233+#define WARN(condition, format...) ({ \
55234+ int __ret_warn_on = !!(condition); \
55235+ if (unlikely(__ret_warn_on)) \
55236+ __WARN_printf(format); \
55237+ unlikely(__ret_warn_on); \
55238+})
55239+#endif /* WARN */
55240+#undef HAVE_IXGBE_DEBUG_FS
55241+#undef HAVE_IGB_DEBUG_FS
55242+#define qdisc_reset_all_tx(a)
55243+#else /* < 2.6.27 */
55244+#include <net/sch_generic.h>
55245+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
55246+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
55247+ __u32 speed)
55248+{
55249+ ep->speed = (__u16)(speed & 0xFFFF);
55250+ ep->speed_hi = (__u16)(speed >> 16);
55251+}
55252+#define HAVE_TX_MQ
55253+#define HAVE_NETDEV_SELECT_QUEUE
55254+#ifdef CONFIG_DEBUG_FS
55255+#define HAVE_IXGBE_DEBUG_FS
55256+#define HAVE_IGB_DEBUG_FS
55257+#endif /* CONFIG_DEBUG_FS */
55258+#endif /* < 2.6.27 */
55259+
55260+/*****************************************************************************/
55261+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
55262+#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
55263+ pci_resource_len(pdev, bar))
55264+#define pci_wake_from_d3 _kc_pci_wake_from_d3
55265+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
55266+int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
55267+int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
55268+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
55269+#ifndef __skb_queue_head_init
55270+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
55271+{
55272+ list->prev = list->next = (struct sk_buff *)list;
55273+ list->qlen = 0;
55274+}
55275+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
55276+#endif
55277+
55278+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
55279+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
55280+
55281+#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
55282+#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
55283+
55284+#endif /* < 2.6.28 */
55285+
55286+/*****************************************************************************/
55287+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
55288+#ifndef swap
55289+#define swap(a, b) \
55290+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
55291+#endif
55292+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
55293+ pci_request_selected_regions(pdev, bars, name)
55294+#ifndef CONFIG_NR_CPUS
55295+#define CONFIG_NR_CPUS 1
55296+#endif /* CONFIG_NR_CPUS */
55297+#ifndef pcie_aspm_enabled
55298+#define pcie_aspm_enabled() (1)
55299+#endif /* pcie_aspm_enabled */
55300+
55301+#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
55302+
55303+#ifndef PCI_EXP_LNKSTA_CLS
55304+#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
55305+#endif
55306+#ifndef PCI_EXP_LNKSTA_NLW
55307+#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
55308+#endif
55309+
55310+#ifndef pci_clear_master
55311+void _kc_pci_clear_master(struct pci_dev *dev);
55312+#define pci_clear_master(dev) _kc_pci_clear_master(dev)
55313+#endif
55314+
55315+#ifndef PCI_EXP_LNKCTL_ASPMC
55316+#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
55317+#endif
55318+
55319+#ifndef PCI_EXP_LNKCAP_MLW
55320+#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
55321+#endif
55322+
55323+#else /* < 2.6.29 */
55324+#ifndef HAVE_NET_DEVICE_OPS
55325+#define HAVE_NET_DEVICE_OPS
55326+#endif
55327+#ifdef CONFIG_DCB
55328+#define HAVE_PFC_MODE_ENABLE
55329+#endif /* CONFIG_DCB */
55330+#endif /* < 2.6.29 */
55331+
55332+/*****************************************************************************/
55333+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
55334+#define NO_PTP_SUPPORT
55335+#define skb_rx_queue_recorded(a) false
55336+#define skb_get_rx_queue(a) 0
55337+#define skb_record_rx_queue(a, b) do {} while (0)
55338+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
55339+#ifndef CONFIG_PCI_IOV
55340+#undef pci_enable_sriov
55341+#define pci_enable_sriov(a, b) -ENOTSUPP
55342+#undef pci_disable_sriov
55343+#define pci_disable_sriov(a) do {} while (0)
55344+#endif /* CONFIG_PCI_IOV */
55345+#ifndef pr_cont
55346+#define pr_cont(fmt, ...) \
55347+ printk(KERN_CONT fmt, ##__VA_ARGS__)
55348+#endif /* pr_cont */
55349+static inline void _kc_synchronize_irq(unsigned int a)
55350+{
55351+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
55352+ synchronize_irq();
55353+#else /* < 2.5.28 */
55354+ synchronize_irq(a);
55355+#endif /* < 2.5.28 */
55356+}
55357+#undef synchronize_irq
55358+#define synchronize_irq(a) _kc_synchronize_irq(a)
55359+
55360+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
55361+
55362+#ifdef nr_cpus_node
55363+#undef nr_cpus_node
55364+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
55365+#endif
55366+
55367+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))
55368+#define HAVE_PCI_DEV_IS_VIRTFN_BIT
55369+#endif /* RHEL >= 5.5 */
55370+
55371+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)))
55372+static inline bool pci_is_root_bus(struct pci_bus *pbus)
55373+{
55374+ return !(pbus->parent);
55375+}
55376+#endif
55377+
55378+#else /* < 2.6.30 */
55379+#define HAVE_ASPM_QUIRKS
55380+#define HAVE_PCI_DEV_IS_VIRTFN_BIT
55381+#endif /* < 2.6.30 */
55382+
55383+/*****************************************************************************/
55384+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
55385+#define ETH_P_1588 0x88F7
55386+#define ETH_P_FIP 0x8914
55387+#ifndef netdev_uc_count
55388+#define netdev_uc_count(dev) ((dev)->uc_count)
55389+#endif
55390+#ifndef netdev_for_each_uc_addr
55391+#define netdev_for_each_uc_addr(uclist, dev) \
55392+ for (uclist = dev->uc_list; uclist; uclist = uclist->next)
55393+#endif
55394+#ifndef PORT_OTHER
55395+#define PORT_OTHER 0xff
55396+#endif
55397+#ifndef MDIO_PHY_ID_PRTAD
55398+#define MDIO_PHY_ID_PRTAD 0x03e0
55399+#endif
55400+#ifndef MDIO_PHY_ID_DEVAD
55401+#define MDIO_PHY_ID_DEVAD 0x001f
55402+#endif
55403+#ifndef skb_dst
55404+#define skb_dst(s) ((s)->dst)
55405+#endif
55406+
55407+#ifndef SUPPORTED_1000baseKX_Full
55408+#define SUPPORTED_1000baseKX_Full BIT(17)
55409+#endif
55410+#ifndef SUPPORTED_10000baseKX4_Full
55411+#define SUPPORTED_10000baseKX4_Full BIT(18)
55412+#endif
55413+#ifndef SUPPORTED_10000baseKR_Full
55414+#define SUPPORTED_10000baseKR_Full BIT(19)
55415+#endif
55416+
55417+#ifndef ADVERTISED_1000baseKX_Full
55418+#define ADVERTISED_1000baseKX_Full BIT(17)
55419+#endif
55420+#ifndef ADVERTISED_10000baseKX4_Full
55421+#define ADVERTISED_10000baseKX4_Full BIT(18)
55422+#endif
55423+#ifndef ADVERTISED_10000baseKR_Full
55424+#define ADVERTISED_10000baseKR_Full BIT(19)
55425+#endif
55426+
55427+static inline unsigned long dev_trans_start(struct net_device *dev)
55428+{
55429+ return dev->trans_start;
55430+}
55431+#else /* < 2.6.31 */
55432+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
55433+#define HAVE_NETDEV_STORAGE_ADDRESS
55434+#endif
55435+#ifndef HAVE_NETDEV_HW_ADDR
55436+#define HAVE_NETDEV_HW_ADDR
55437+#endif
55438+#ifndef HAVE_TRANS_START_IN_QUEUE
55439+#define HAVE_TRANS_START_IN_QUEUE
55440+#endif
55441+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
55442+#define HAVE_INCLUDE_LINUX_MDIO_H
55443+#endif
55444+#include <linux/mdio.h>
55445+#endif /* < 2.6.31 */
55446+
55447+/*****************************************************************************/
55448+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
55449+#undef netdev_tx_t
55450+#define netdev_tx_t int
55451+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
55452+#ifndef NETIF_F_FCOE_MTU
55453+#define NETIF_F_FCOE_MTU BIT(26)
55454+#endif
55455+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
55456+
55457+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
55458+static inline int _kc_pm_runtime_get_sync()
55459+{
55460+ return 1;
55461+}
55462+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
55463+#else /* 2.6.0 => 2.6.32 */
55464+static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev)
55465+{
55466+ return 1;
55467+}
55468+#ifndef pm_runtime_get_sync
55469+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
55470+#endif
55471+#endif /* 2.6.0 => 2.6.32 */
55472+#ifndef pm_runtime_put
55473+#define pm_runtime_put(dev) do {} while (0)
55474+#endif
55475+#ifndef pm_runtime_put_sync
55476+#define pm_runtime_put_sync(dev) do {} while (0)
55477+#endif
55478+#ifndef pm_runtime_resume
55479+#define pm_runtime_resume(dev) do {} while (0)
55480+#endif
55481+#ifndef pm_schedule_suspend
55482+#define pm_schedule_suspend(dev, t) do {} while (0)
55483+#endif
55484+#ifndef pm_runtime_set_suspended
55485+#define pm_runtime_set_suspended(dev) do {} while (0)
55486+#endif
55487+#ifndef pm_runtime_disable
55488+#define pm_runtime_disable(dev) do {} while (0)
55489+#endif
55490+#ifndef pm_runtime_put_noidle
55491+#define pm_runtime_put_noidle(dev) do {} while (0)
55492+#endif
55493+#ifndef pm_runtime_set_active
55494+#define pm_runtime_set_active(dev) do {} while (0)
55495+#endif
55496+#ifndef pm_runtime_enable
55497+#define pm_runtime_enable(dev) do {} while (0)
55498+#endif
55499+#ifndef pm_runtime_get_noresume
55500+#define pm_runtime_get_noresume(dev) do {} while (0)
55501+#endif
55502+#else /* < 2.6.32 */
55503+#if (RHEL_RELEASE_CODE && \
55504+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
55505+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
55506+#define HAVE_RHEL6_NET_DEVICE_EXTENDED
55507+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
55508+#if (RHEL_RELEASE_CODE && \
55509+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \
55510+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
55511+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
55512+#define HAVE_NDO_SET_FEATURES
55513+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
55514+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
55515+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
55516+#define HAVE_NETDEV_OPS_FCOE_ENABLE
55517+#endif
55518+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
55519+#ifdef CONFIG_DCB
55520+#ifndef HAVE_DCBNL_OPS_GETAPP
55521+#define HAVE_DCBNL_OPS_GETAPP
55522+#endif
55523+#endif /* CONFIG_DCB */
55524+#include <linux/pm_runtime.h>
55525+/* IOV bad DMA target work arounds require at least this kernel rev support */
55526+#define HAVE_PCIE_TYPE
55527+#endif /* < 2.6.32 */
55528+
55529+/*****************************************************************************/
55530+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
55531+#ifndef pci_pcie_cap
55532+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
55533+#endif
55534+#ifndef IPV4_FLOW
55535+#define IPV4_FLOW 0x10
55536+#endif /* IPV4_FLOW */
55537+#ifndef IPV6_FLOW
55538+#define IPV6_FLOW 0x11
55539+#endif /* IPV6_FLOW */
55540+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
55541+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
55542+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
55543+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
55544+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
55545+#define HAVE_NETDEV_OPS_FCOE_GETWWN
55546+#endif
55547+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
55548+#endif /* RHEL6 or SLES11 SP1 */
55549+#ifndef __percpu
55550+#define __percpu
55551+#endif /* __percpu */
55552+
55553+#ifndef PORT_DA
55554+#define PORT_DA PORT_OTHER
55555+#endif /* PORT_DA */
55556+#ifndef PORT_NONE
55557+#define PORT_NONE PORT_OTHER
55558+#endif
55559+
55560+#if ((RHEL_RELEASE_CODE && \
55561+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
55562+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
55563+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
55564+#undef DEFINE_DMA_UNMAP_ADDR
55565+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
55566+#undef DEFINE_DMA_UNMAP_LEN
55567+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
55568+#undef dma_unmap_addr
55569+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
55570+#undef dma_unmap_addr_set
55571+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
55572+#undef dma_unmap_len
55573+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
55574+#undef dma_unmap_len_set
55575+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
55576+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
55577+#endif /* RHEL_RELEASE_CODE */
55578+
55579+#if (!(RHEL_RELEASE_CODE && \
55580+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
55581+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
55582+ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
55583+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
55584+static inline bool pci_is_pcie(struct pci_dev *dev)
55585+{
55586+ return !!pci_pcie_cap(dev);
55587+}
55588+#endif /* RHEL_RELEASE_CODE */
55589+
55590+#if (!(RHEL_RELEASE_CODE && \
55591+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
55592+#define sk_tx_queue_get(_sk) (-1)
55593+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
55594+#endif /* !(RHEL >= 6.2) */
55595+
55596+#if (RHEL_RELEASE_CODE && \
55597+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
55598+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
55599+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
55600+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
55601+#define HAVE_ETHTOOL_SET_PHYS_ID
55602+#define HAVE_ETHTOOL_GET_TS_INFO
55603+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5))
55604+#define HAVE_ETHTOOL_GSRSSH
55605+#define HAVE_RHEL6_SRIOV_CONFIGURE
55606+#define HAVE_RXFH_NONCONST
55607+#endif /* RHEL > 6.5 */
55608+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
55609+
55610+#else /* < 2.6.33 */
55611+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
55612+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
55613+#define HAVE_NETDEV_OPS_FCOE_GETWWN
55614+#endif
55615+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
55616+#endif /* < 2.6.33 */
55617+
55618+/*****************************************************************************/
55619+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
55620+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
55621+#ifndef pci_num_vf
55622+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
55623+int _kc_pci_num_vf(struct pci_dev *dev);
55624+#endif
55625+#endif /* RHEL_RELEASE_CODE */
55626+
55627+#ifndef dev_is_pci
55628+#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
55629+#endif
55630+
55631+#ifndef ETH_FLAG_NTUPLE
55632+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
55633+#endif
55634+
55635+#ifndef netdev_mc_count
55636+#define netdev_mc_count(dev) ((dev)->mc_count)
55637+#endif
55638+#ifndef netdev_mc_empty
55639+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
55640+#endif
55641+#ifndef netdev_for_each_mc_addr
55642+#define netdev_for_each_mc_addr(mclist, dev) \
55643+ for (mclist = dev->mc_list; mclist; mclist = mclist->next)
55644+#endif
55645+#ifndef netdev_uc_count
55646+#define netdev_uc_count(dev) ((dev)->uc.count)
55647+#endif
55648+#ifndef netdev_uc_empty
55649+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
55650+#endif
55651+#ifndef netdev_for_each_uc_addr
55652+#define netdev_for_each_uc_addr(ha, dev) \
55653+ list_for_each_entry(ha, &dev->uc.list, list)
55654+#endif
55655+#ifndef dma_set_coherent_mask
55656+#define dma_set_coherent_mask(dev,mask) \
55657+ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
55658+#endif
55659+#ifndef pci_dev_run_wake
55660+#define pci_dev_run_wake(pdev) (0)
55661+#endif
55662+
55663+/* netdev logging taken from include/linux/netdevice.h */
55664+#ifndef netdev_name
55665+static inline const char *_kc_netdev_name(const struct net_device *dev)
55666+{
55667+ if (dev->reg_state != NETREG_REGISTERED)
55668+ return "(unregistered net_device)";
55669+ return dev->name;
55670+}
55671+#define netdev_name(netdev) _kc_netdev_name(netdev)
55672+#endif /* netdev_name */
55673+
55674+#undef netdev_printk
55675+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
55676+#define netdev_printk(level, netdev, format, args...) \
55677+do { \
55678+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
55679+ printk(level "%s: " format, pci_name(pdev), ##args); \
55680+} while(0)
55681+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
55682+#define netdev_printk(level, netdev, format, args...) \
55683+do { \
55684+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
55685+ struct device *dev = pci_dev_to_dev(pdev); \
55686+ dev_printk(level, dev, "%s: " format, \
55687+ netdev_name(netdev), ##args); \
55688+} while(0)
55689+#else /* 2.6.21 => 2.6.34 */
55690+#define netdev_printk(level, netdev, format, args...) \
55691+ dev_printk(level, (netdev)->dev.parent, \
55692+ "%s: " format, \
55693+ netdev_name(netdev), ##args)
55694+#endif /* <2.6.0 <2.6.21 <2.6.34 */
55695+#undef netdev_emerg
55696+#define netdev_emerg(dev, format, args...) \
55697+ netdev_printk(KERN_EMERG, dev, format, ##args)
55698+#undef netdev_alert
55699+#define netdev_alert(dev, format, args...) \
55700+ netdev_printk(KERN_ALERT, dev, format, ##args)
55701+#undef netdev_crit
55702+#define netdev_crit(dev, format, args...) \
55703+ netdev_printk(KERN_CRIT, dev, format, ##args)
55704+#undef netdev_err
55705+#define netdev_err(dev, format, args...) \
55706+ netdev_printk(KERN_ERR, dev, format, ##args)
55707+#undef netdev_warn
55708+#define netdev_warn(dev, format, args...) \
55709+ netdev_printk(KERN_WARNING, dev, format, ##args)
55710+#undef netdev_notice
55711+#define netdev_notice(dev, format, args...) \
55712+ netdev_printk(KERN_NOTICE, dev, format, ##args)
55713+#undef netdev_info
55714+#define netdev_info(dev, format, args...) \
55715+ netdev_printk(KERN_INFO, dev, format, ##args)
55716+#undef netdev_dbg
55717+#if defined(DEBUG)
55718+#define netdev_dbg(__dev, format, args...) \
55719+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
55720+#elif defined(CONFIG_DYNAMIC_DEBUG)
55721+#define netdev_dbg(__dev, format, args...) \
55722+do { \
55723+ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
55724+ netdev_name(__dev), ##args); \
55725+} while (0)
55726+#else /* DEBUG */
55727+#define netdev_dbg(__dev, format, args...) \
55728+({ \
55729+ if (0) \
55730+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
55731+ 0; \
55732+})
55733+#endif /* DEBUG */
55734+
55735+#undef netif_printk
55736+#define netif_printk(priv, type, level, dev, fmt, args...) \
55737+do { \
55738+ if (netif_msg_##type(priv)) \
55739+ netdev_printk(level, (dev), fmt, ##args); \
55740+} while (0)
55741+
55742+#undef netif_emerg
55743+#define netif_emerg(priv, type, dev, fmt, args...) \
55744+ netif_level(emerg, priv, type, dev, fmt, ##args)
55745+#undef netif_alert
55746+#define netif_alert(priv, type, dev, fmt, args...) \
55747+ netif_level(alert, priv, type, dev, fmt, ##args)
55748+#undef netif_crit
55749+#define netif_crit(priv, type, dev, fmt, args...) \
55750+ netif_level(crit, priv, type, dev, fmt, ##args)
55751+#undef netif_err
55752+#define netif_err(priv, type, dev, fmt, args...) \
55753+ netif_level(err, priv, type, dev, fmt, ##args)
55754+#undef netif_warn
55755+#define netif_warn(priv, type, dev, fmt, args...) \
55756+ netif_level(warn, priv, type, dev, fmt, ##args)
55757+#undef netif_notice
55758+#define netif_notice(priv, type, dev, fmt, args...) \
55759+ netif_level(notice, priv, type, dev, fmt, ##args)
55760+#undef netif_info
55761+#define netif_info(priv, type, dev, fmt, args...) \
55762+ netif_level(info, priv, type, dev, fmt, ##args)
55763+#undef netif_dbg
55764+#define netif_dbg(priv, type, dev, fmt, args...) \
55765+ netif_level(dbg, priv, type, dev, fmt, ##args)
55766+
55767+#ifdef SET_SYSTEM_SLEEP_PM_OPS
55768+#define HAVE_SYSTEM_SLEEP_PM_OPS
55769+#endif
55770+
55771+#ifndef for_each_set_bit
55772+#define for_each_set_bit(bit, addr, size) \
55773+ for ((bit) = find_first_bit((addr), (size)); \
55774+ (bit) < (size); \
55775+ (bit) = find_next_bit((addr), (size), (bit) + 1))
55776+#endif /* for_each_set_bit */
55777+
55778+#ifndef DEFINE_DMA_UNMAP_ADDR
55779+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
55780+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
55781+#define dma_unmap_addr pci_unmap_addr
55782+#define dma_unmap_addr_set pci_unmap_addr_set
55783+#define dma_unmap_len pci_unmap_len
55784+#define dma_unmap_len_set pci_unmap_len_set
55785+#endif /* DEFINE_DMA_UNMAP_ADDR */
55786+
55787+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
55788+#ifdef IGB_HWMON
55789+#ifdef CONFIG_DEBUG_LOCK_ALLOC
55790+#define sysfs_attr_init(attr) \
55791+ do { \
55792+ static struct lock_class_key __key; \
55793+ (attr)->key = &__key; \
55794+ } while (0)
55795+#else
55796+#define sysfs_attr_init(attr) do {} while (0)
55797+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
55798+#endif /* IGB_HWMON */
55799+#endif /* RHEL_RELEASE_CODE */
55800+
55801+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
55802+static inline bool _kc_pm_runtime_suspended()
55803+{
55804+ return false;
55805+}
55806+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
55807+#else /* 2.6.0 => 2.6.34 */
55808+static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
55809+{
55810+ return false;
55811+}
55812+#ifndef pm_runtime_suspended
55813+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
55814+#endif
55815+#endif /* 2.6.0 => 2.6.34 */
55816+
55817+#ifndef pci_bus_speed
55818+/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */
55819+enum _kc_pci_bus_speed {
55820+ _KC_PCIE_SPEED_2_5GT = 0x14,
55821+ _KC_PCIE_SPEED_5_0GT = 0x15,
55822+ _KC_PCIE_SPEED_8_0GT = 0x16,
55823+ _KC_PCI_SPEED_UNKNOWN = 0xff,
55824+};
55825+#define pci_bus_speed _kc_pci_bus_speed
55826+#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT
55827+#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT
55828+#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT
55829+#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN
55830+#endif /* pci_bus_speed */
55831+
55832+#else /* < 2.6.34 */
55833+#define HAVE_SYSTEM_SLEEP_PM_OPS
55834+#ifndef HAVE_SET_RX_MODE
55835+#define HAVE_SET_RX_MODE
55836+#endif
55837+
55838+#endif /* < 2.6.34 */
55839+
55840+/*****************************************************************************/
55841+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
55842+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
55843+ const void __user *from, size_t count);
55844+#define simple_write_to_buffer _kc_simple_write_to_buffer
55845+
55846+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
55847+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
55848+{
55849+#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT
55850+#ifdef CONFIG_PCI_IOV
55851+ if (dev->is_virtfn)
55852+ dev = dev->physfn;
55853+#endif /* CONFIG_PCI_IOV */
55854+#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */
55855+ return dev;
55856+}
55857+#endif /* ! RHEL >= 6.4 */
55858+
55859+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
55860+#define PCI_EXP_LNKSTA_NLW_SHIFT 4
55861+#endif
55862+
55863+#ifndef numa_node_id
55864+#define numa_node_id() 0
55865+#endif
55866+#ifndef numa_mem_id
55867+#define numa_mem_id numa_node_id
55868+#endif
55869+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
55870+#ifdef HAVE_TX_MQ
55871+#include <net/sch_generic.h>
55872+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
55873+int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
55874+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
55875+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
55876+ unsigned int txq)
55877+{
55878+ dev->egress_subqueue_count = txq;
55879+ return 0;
55880+}
55881+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
55882+#else /* HAVE_TX_MQ */
55883+static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev,
55884+ unsigned int __always_unused txq)
55885+{
55886+ return 0;
55887+}
55888+#endif /* HAVE_TX_MQ */
55889+#define netif_set_real_num_tx_queues(dev, txq) \
55890+ _kc_netif_set_real_num_tx_queues(dev, txq)
55891+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
55892+#ifndef ETH_FLAG_RXHASH
55893+#define ETH_FLAG_RXHASH (1<<28)
55894+#endif /* ETH_FLAG_RXHASH */
55895+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
55896+#define HAVE_IRQ_AFFINITY_HINT
55897+#endif
55898+struct device_node;
55899+#else /* < 2.6.35 */
55900+#define HAVE_STRUCT_DEVICE_OF_NODE
55901+#define HAVE_PM_QOS_REQUEST_LIST
55902+#define HAVE_IRQ_AFFINITY_HINT
55903+#include <linux/of.h>
55904+#endif /* < 2.6.35 */
55905+
55906+/*****************************************************************************/
55907+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
55908+int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
55909+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
55910+u32 _kc_ethtool_op_get_flags(struct net_device *);
55911+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
55912+
55913+enum {
55914+ WQ_UNBOUND = 0,
55915+ WQ_RESCUER = 0,
55916+};
55917+
55918+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
55919+#ifdef NET_IP_ALIGN
55920+#undef NET_IP_ALIGN
55921+#endif
55922+#define NET_IP_ALIGN 0
55923+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
55924+
55925+#ifdef NET_SKB_PAD
55926+#undef NET_SKB_PAD
55927+#endif
55928+
55929+#if (L1_CACHE_BYTES > 32)
55930+#define NET_SKB_PAD L1_CACHE_BYTES
55931+#else
55932+#define NET_SKB_PAD 32
55933+#endif
55934+
55935+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
55936+ unsigned int length)
55937+{
55938+ struct sk_buff *skb;
55939+
55940+ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
55941+ if (skb) {
55942+#if (NET_IP_ALIGN + NET_SKB_PAD)
55943+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
55944+#endif
55945+ skb->dev = dev;
55946+ }
55947+ return skb;
55948+}
55949+
55950+#ifdef netdev_alloc_skb_ip_align
55951+#undef netdev_alloc_skb_ip_align
55952+#endif
55953+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
55954+
55955+#undef netif_level
55956+#define netif_level(level, priv, type, dev, fmt, args...) \
55957+do { \
55958+ if (netif_msg_##type(priv)) \
55959+ netdev_##level(dev, fmt, ##args); \
55960+} while (0)
55961+
55962+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)))
55963+#undef usleep_range
55964+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
55965+#endif
55966+
55967+#define u64_stats_update_begin(a) do { } while(0)
55968+#define u64_stats_update_end(a) do { } while(0)
55969+#define u64_stats_fetch_begin(a) do { } while(0)
55970+#define u64_stats_fetch_retry_bh(a,b) (0)
55971+#define u64_stats_fetch_begin_bh(a) (0)
55972+
55973+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
55974+#define HAVE_8021P_SUPPORT
55975+#endif
55976+
55977+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */
55978+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
55979+ !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0)))
55980+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
55981+{
55982+ return;
55983+}
55984+#endif
55985+
55986+#else /* < 2.6.36 */
55987+
55988+#define HAVE_PM_QOS_REQUEST_ACTIVE
55989+#define HAVE_8021P_SUPPORT
55990+#define HAVE_NDO_GET_STATS64
55991+#endif /* < 2.6.36 */
55992+
55993+/*****************************************************************************/
55994+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
55995+#define HAVE_NON_CONST_PCI_DRIVER_NAME
55996+#ifndef netif_set_real_num_tx_queues
55997+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
55998+ unsigned int txq)
55999+{
56000+ netif_set_real_num_tx_queues(dev, txq);
56001+ return 0;
56002+}
56003+#define netif_set_real_num_tx_queues(dev, txq) \
56004+ _kc_netif_set_real_num_tx_queues(dev, txq)
56005+#endif
56006+#ifndef netif_set_real_num_rx_queues
56007+static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
56008+ unsigned int __always_unused rxq)
56009+{
56010+ return 0;
56011+}
56012+#define netif_set_real_num_rx_queues(dev, rxq) \
56013+ __kc_netif_set_real_num_rx_queues((dev), (rxq))
56014+#endif
56015+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
56016+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
56017+#endif
56018+#ifndef VLAN_N_VID
56019+#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
56020+#endif /* VLAN_N_VID */
56021+#ifndef ETH_FLAG_TXVLAN
56022+#define ETH_FLAG_TXVLAN BIT(7)
56023+#endif /* ETH_FLAG_TXVLAN */
56024+#ifndef ETH_FLAG_RXVLAN
56025+#define ETH_FLAG_RXVLAN BIT(8)
56026+#endif /* ETH_FLAG_RXVLAN */
56027+
56028+#define WQ_MEM_RECLAIM WQ_RESCUER
56029+
56030+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
56031+{
56032+ WARN_ON(skb->ip_summed != CHECKSUM_NONE);
56033+}
56034+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
56035+
56036+static inline void *_kc_vzalloc_node(unsigned long size, int node)
56037+{
56038+ void *addr = vmalloc_node(size, node);
56039+ if (addr)
56040+ memset(addr, 0, size);
56041+ return addr;
56042+}
56043+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
56044+
56045+static inline void *_kc_vzalloc(unsigned long size)
56046+{
56047+ void *addr = vmalloc(size);
56048+ if (addr)
56049+ memset(addr, 0, size);
56050+ return addr;
56051+}
56052+#define vzalloc(_size) _kc_vzalloc(_size)
56053+
56054+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \
56055+ (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0)))
56056+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
56057+{
56058+ if (vlan_tx_tag_present(skb) ||
56059+ skb->protocol != cpu_to_be16(ETH_P_8021Q))
56060+ return skb->protocol;
56061+
56062+ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
56063+ return 0;
56064+
56065+ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
56066+}
56067+#endif /* !RHEL5.7+ || RHEL6.0 */
56068+
56069+#ifdef HAVE_HW_TIME_STAMP
56070+#define SKBTX_HW_TSTAMP BIT(0)
56071+#define SKBTX_IN_PROGRESS BIT(2)
56072+#define SKB_SHARED_TX_IS_UNION
56073+#endif
56074+
56075+#ifndef device_wakeup_enable
56076+#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
56077+#endif
56078+
56079+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
56080+#ifndef HAVE_VLAN_RX_REGISTER
56081+#define HAVE_VLAN_RX_REGISTER
56082+#endif
56083+#endif /* > 2.4.18 */
56084+#endif /* < 2.6.37 */
56085+
56086+/*****************************************************************************/
56087+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
56088+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
56089+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
56090+#else /* 2.6.22 -> 2.6.37 */
56091+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
56092+{
56093+ return skb->csum_start - skb_headroom(skb);
56094+}
56095+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
56096+#endif /* 2.6.22 -> 2.6.37 */
56097+#if IS_ENABLED(CONFIG_DCB)
56098+#ifndef IEEE_8021QAZ_MAX_TCS
56099+#define IEEE_8021QAZ_MAX_TCS 8
56100+#endif
56101+#ifndef DCB_CAP_DCBX_HOST
56102+#define DCB_CAP_DCBX_HOST 0x01
56103+#endif
56104+#ifndef DCB_CAP_DCBX_LLD_MANAGED
56105+#define DCB_CAP_DCBX_LLD_MANAGED 0x02
56106+#endif
56107+#ifndef DCB_CAP_DCBX_VER_CEE
56108+#define DCB_CAP_DCBX_VER_CEE 0x04
56109+#endif
56110+#ifndef DCB_CAP_DCBX_VER_IEEE
56111+#define DCB_CAP_DCBX_VER_IEEE 0x08
56112+#endif
56113+#ifndef DCB_CAP_DCBX_STATIC
56114+#define DCB_CAP_DCBX_STATIC 0x10
56115+#endif
56116+#endif /* CONFIG_DCB */
56117+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
56118+#define CONFIG_XPS
56119+#endif /* RHEL_RELEASE_VERSION(6,2) */
56120+#endif /* < 2.6.38 */
56121+
56122+/*****************************************************************************/
56123+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
56124+#ifndef TC_BITMASK
56125+#define TC_BITMASK 15
56126+#endif
56127+#ifndef NETIF_F_RXCSUM
56128+#define NETIF_F_RXCSUM BIT(29)
56129+#endif
56130+#ifndef skb_queue_reverse_walk_safe
56131+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
56132+ for (skb = (queue)->prev, tmp = skb->prev; \
56133+ skb != (struct sk_buff *)(queue); \
56134+ skb = tmp, tmp = skb->prev)
56135+#endif
56136+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
56137+#ifndef FCOE_MTU
56138+#define FCOE_MTU 2158
56139+#endif
56140+#endif
56141+#if IS_ENABLED(CONFIG_DCB)
56142+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
56143+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
56144+#endif
56145+#endif
56146+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
56147+#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
56148+#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
56149+#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
56150+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
56151+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
56152+u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
56153+#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
56154+u8 _kc_netdev_get_num_tc(struct net_device *dev);
56155+#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
56156+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc);
56157+#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc))
56158+#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0)
56159+#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0)
56160+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
56161+#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
56162+#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
56163+#else /* RHEL6.1 or greater */
56164+#ifndef HAVE_MQPRIO
56165+#define HAVE_MQPRIO
56166+#endif /* HAVE_MQPRIO */
56167+#if IS_ENABLED(CONFIG_DCB)
56168+#ifndef HAVE_DCBNL_IEEE
56169+#define HAVE_DCBNL_IEEE
56170+#ifndef IEEE_8021QAZ_TSA_STRICT
56171+#define IEEE_8021QAZ_TSA_STRICT 0
56172+#endif
56173+#ifndef IEEE_8021QAZ_TSA_ETS
56174+#define IEEE_8021QAZ_TSA_ETS 2
56175+#endif
56176+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
56177+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
56178+#endif
56179+#endif
56180+#endif /* CONFIG_DCB */
56181+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
56182+
56183+#ifndef udp_csum
56184+#define udp_csum __kc_udp_csum
56185+static inline __wsum __kc_udp_csum(struct sk_buff *skb)
56186+{
56187+ __wsum csum = csum_partial(skb_transport_header(skb),
56188+ sizeof(struct udphdr), skb->csum);
56189+
56190+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
56191+ csum = csum_add(csum, skb->csum);
56192+ }
56193+ return csum;
56194+}
56195+#endif /* udp_csum */
56196+#else /* < 2.6.39 */
56197+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
56198+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
56199+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
56200+#endif
56201+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
56202+#ifndef HAVE_MQPRIO
56203+#define HAVE_MQPRIO
56204+#endif
56205+#ifndef HAVE_SETUP_TC
56206+#define HAVE_SETUP_TC
56207+#endif
56208+#ifdef CONFIG_DCB
56209+#ifndef HAVE_DCBNL_IEEE
56210+#define HAVE_DCBNL_IEEE
56211+#endif
56212+#endif /* CONFIG_DCB */
56213+#ifndef HAVE_NDO_SET_FEATURES
56214+#define HAVE_NDO_SET_FEATURES
56215+#endif
56216+#define HAVE_IRQ_AFFINITY_NOTIFY
56217+#endif /* < 2.6.39 */
56218+
56219+/*****************************************************************************/
56220+/* use < 2.6.40 because of a Fedora 15 kernel update where they
56221+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
56222+ * like set_phys_id for ethtool.
56223+ */
56224+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
56225+#ifdef ETHTOOL_GRXRINGS
56226+#ifndef FLOW_EXT
56227+#define FLOW_EXT 0x80000000
56228+union _kc_ethtool_flow_union {
56229+ struct ethtool_tcpip4_spec tcp_ip4_spec;
56230+ struct ethtool_usrip4_spec usr_ip4_spec;
56231+ __u8 hdata[60];
56232+};
56233+struct _kc_ethtool_flow_ext {
56234+ __be16 vlan_etype;
56235+ __be16 vlan_tci;
56236+ __be32 data[2];
56237+};
56238+struct _kc_ethtool_rx_flow_spec {
56239+ __u32 flow_type;
56240+ union _kc_ethtool_flow_union h_u;
56241+ struct _kc_ethtool_flow_ext h_ext;
56242+ union _kc_ethtool_flow_union m_u;
56243+ struct _kc_ethtool_flow_ext m_ext;
56244+ __u64 ring_cookie;
56245+ __u32 location;
56246+};
56247+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
56248+#endif /* FLOW_EXT */
56249+#endif
56250+
56251+#define pci_disable_link_state_locked pci_disable_link_state
56252+
56253+#ifndef PCI_LTR_VALUE_MASK
56254+#define PCI_LTR_VALUE_MASK 0x000003ff
56255+#endif
56256+#ifndef PCI_LTR_SCALE_MASK
56257+#define PCI_LTR_SCALE_MASK 0x00001c00
56258+#endif
56259+#ifndef PCI_LTR_SCALE_SHIFT
56260+#define PCI_LTR_SCALE_SHIFT 10
56261+#endif
56262+
56263+#else /* < 2.6.40 */
56264+#define HAVE_ETHTOOL_SET_PHYS_ID
56265+#endif /* < 2.6.40 */
56266+
56267+/*****************************************************************************/
56268+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
56269+#define USE_LEGACY_PM_SUPPORT
56270+#ifndef kfree_rcu
56271+#define kfree_rcu(_ptr, _rcu_head) do { \
56272+ void __kc_kfree_rcu(struct rcu_head *rcu_head) \
56273+ { \
56274+ void *ptr = container_of(rcu_head, \
56275+ typeof(*_ptr), \
56276+ _rcu_head); \
56277+ kfree(ptr); \
56278+ } \
56279+ call_rcu(&(_ptr)->_rcu_head, __kc_kfree_rcu); \
56280+} while (0)
56281+#define HAVE_KFREE_RCU_BARRIER
56282+#endif /* kfree_rcu */
56283+#ifndef kstrtol_from_user
56284+#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r)
56285+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count,
56286+ unsigned int base, long *res)
56287+{
56288+ /* sign, base 2 representation, newline, terminator */
56289+ char buf[1 + sizeof(long) * 8 + 1 + 1];
56290+
56291+ count = min(count, sizeof(buf) - 1);
56292+ if (copy_from_user(buf, s, count))
56293+ return -EFAULT;
56294+ buf[count] = '\0';
56295+ return strict_strtol(buf, base, res);
56296+}
56297+#endif
56298+
56299+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \
56300+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7)))
56301+/* 20000base_blah_full Supported and Advertised Registers */
56302+#define SUPPORTED_20000baseMLD2_Full BIT(21)
56303+#define SUPPORTED_20000baseKR2_Full BIT(22)
56304+#define ADVERTISED_20000baseMLD2_Full BIT(21)
56305+#define ADVERTISED_20000baseKR2_Full BIT(22)
56306+#endif /* RHEL_RELEASE_CODE */
56307+#endif /* < 3.0.0 */
56308+
56309+/*****************************************************************************/
56310+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
56311+#ifndef __netdev_alloc_skb_ip_align
56312+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
56313+#endif /* __netdev_alloc_skb_ip_align */
56314+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
56315+#define dcb_ieee_delapp(dev, app) 0
56316+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
56317+
56318+/* 1000BASE-T Control register */
56319+#define CTL1000_AS_MASTER 0x0800
56320+#define CTL1000_ENABLE_MASTER 0x1000
56321+
56322+/* kernels less than 3.0.0 don't have this */
56323+#ifndef ETH_P_8021AD
56324+#define ETH_P_8021AD 0x88A8
56325+#endif
56326+
56327+/* Stub definition for !CONFIG_OF is introduced later */
56328+#ifdef CONFIG_OF
56329+static inline struct device_node *
56330+pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev)
56331+{
56332+#ifdef HAVE_STRUCT_DEVICE_OF_NODE
56333+ return pdev ? pdev->dev.of_node : NULL;
56334+#else
56335+ return NULL;
56336+#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */
56337+}
56338+#endif /* CONFIG_OF */
56339+#else /* < 3.1.0 */
56340+#ifndef HAVE_DCBNL_IEEE_DELAPP
56341+#define HAVE_DCBNL_IEEE_DELAPP
56342+#endif
56343+#endif /* < 3.1.0 */
56344+
56345+/*****************************************************************************/
56346+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
56347+#ifndef dma_zalloc_coherent
56348+#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f)
56349+static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size,
56350+ dma_addr_t *dma_handle, gfp_t flag)
56351+{
56352+ void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
56353+ if (ret)
56354+ memset(ret, 0, size);
56355+ return ret;
56356+}
56357+#endif
56358+#ifdef ETHTOOL_GRXRINGS
56359+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
56360+#endif /* ETHTOOL_GRXRINGS */
56361+
56362+#ifndef skb_frag_size
56363+#define skb_frag_size(frag) _kc_skb_frag_size(frag)
56364+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
56365+{
56366+ return frag->size;
56367+}
56368+#endif /* skb_frag_size */
56369+
56370+#ifndef skb_frag_size_sub
56371+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
56372+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
56373+{
56374+ frag->size -= delta;
56375+}
56376+#endif /* skb_frag_size_sub */
56377+
56378+#ifndef skb_frag_page
56379+#define skb_frag_page(frag) _kc_skb_frag_page(frag)
56380+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
56381+{
56382+ return frag->page;
56383+}
56384+#endif /* skb_frag_page */
56385+
56386+#ifndef skb_frag_address
56387+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
56388+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
56389+{
56390+ return page_address(skb_frag_page(frag)) + frag->page_offset;
56391+}
56392+#endif /* skb_frag_address */
56393+
56394+#ifndef skb_frag_dma_map
56395+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
56396+#include <linux/dma-mapping.h>
56397+#endif
56398+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
56399+ _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
56400+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
56401+ const skb_frag_t *frag,
56402+ size_t offset, size_t size,
56403+ enum dma_data_direction dir)
56404+{
56405+ return dma_map_page(dev, skb_frag_page(frag),
56406+ frag->page_offset + offset, size, dir);
56407+}
56408+#endif /* skb_frag_dma_map */
56409+
56410+#ifndef __skb_frag_unref
56411+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
56412+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
56413+{
56414+ put_page(skb_frag_page(frag));
56415+}
56416+#endif /* __skb_frag_unref */
56417+
56418+#ifndef SPEED_UNKNOWN
56419+#define SPEED_UNKNOWN -1
56420+#endif
56421+#ifndef DUPLEX_UNKNOWN
56422+#define DUPLEX_UNKNOWN 0xff
56423+#endif
56424+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\
56425+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
56426+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
56427+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
56428+#endif
56429+#endif
56430+#else /* < 3.2.0 */
56431+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
56432+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
56433+#define HAVE_VF_SPOOFCHK_CONFIGURE
56434+#endif
56435+#ifndef HAVE_SKB_L4_RXHASH
56436+#define HAVE_SKB_L4_RXHASH
56437+#endif
56438+#define HAVE_IOMMU_PRESENT
56439+#define HAVE_PM_QOS_REQUEST_LIST_NEW
56440+#endif /* < 3.2.0 */
56441+
56442+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
56443+#undef ixgbe_get_netdev_tc_txq
56444+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
56445+#endif
56446+/*****************************************************************************/
56447+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
56448+/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than
56449+ * alloc_workqueue() to avoid compiler warning from -Wvarargs
56450+ */
56451+static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4)))
56452+_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active,
56453+ const char *fmt, ...)
56454+{
56455+ struct workqueue_struct *wq;
56456+ va_list args, temp;
56457+ unsigned int len;
56458+ char *p;
56459+
56460+ va_start(args, fmt);
56461+ va_copy(temp, args);
56462+ len = vsnprintf(NULL, 0, fmt, temp);
56463+ va_end(temp);
56464+
56465+ p = kmalloc(len + 1, GFP_KERNEL);
56466+ if (!p) {
56467+ va_end(args);
56468+ return NULL;
56469+ }
56470+
56471+ vsnprintf(p, len + 1, fmt, args);
56472+ va_end(args);
56473+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
56474+ wq = create_workqueue(p);
56475+#else
56476+ wq = alloc_workqueue(p, flags, max_active);
56477+#endif
56478+ kfree(p);
56479+
56480+ return wq;
56481+}
56482+#ifdef alloc_workqueue
56483+#undef alloc_workqueue
56484+#endif
56485+#define alloc_workqueue(fmt, flags, max_active, args...) \
56486+ _kc_alloc_workqueue(flags, max_active, fmt, ##args)
56487+
56488+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
56489+typedef u32 netdev_features_t;
56490+#endif
56491+#undef PCI_EXP_TYPE_RC_EC
56492+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
56493+#ifndef CONFIG_BQL
56494+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
56495+#define netdev_completed_queue(_n, _p, _b) do {} while (0)
56496+#define netdev_tx_sent_queue(_q, _b) do {} while (0)
56497+#define netdev_sent_queue(_n, _b) do {} while (0)
56498+#define netdev_tx_reset_queue(_q) do {} while (0)
56499+#define netdev_reset_queue(_n) do {} while (0)
56500+#endif
56501+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
56502+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
56503+#endif /* SLE_VERSION(11,3,0) */
56504+#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
56505+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
56506+static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
56507+ u8 *nexthdrp,
56508+ __be16 __always_unused *frag_offp)
56509+{
56510+ return ipv6_skip_exthdr(skb, start, nexthdrp);
56511+}
56512+#undef ipv6_skip_exthdr
56513+#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d))
56514+#endif /* !SLES11sp4 or greater */
56515+
56516+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
56517+ !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
56518+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
56519+{
56520+ return index % n_rx_rings;
56521+}
56522+#endif
56523+
56524+#else /* ! < 3.3.0 */
56525+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
56526+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
56527+#ifdef ETHTOOL_SRXNTUPLE
56528+#undef ETHTOOL_SRXNTUPLE
56529+#endif
56530+#endif /* < 3.3.0 */
56531+
56532+/*****************************************************************************/
56533+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
56534+#ifndef NETIF_F_RXFCS
56535+#define NETIF_F_RXFCS 0
56536+#endif /* NETIF_F_RXFCS */
56537+#ifndef NETIF_F_RXALL
56538+#define NETIF_F_RXALL 0
56539+#endif /* NETIF_F_RXALL */
56540+
56541+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
56542+#define NUMTCS_RETURNS_U8
56543+
56544+int _kc_simple_open(struct inode *inode, struct file *file);
56545+#define simple_open _kc_simple_open
56546+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
56547+
56548+#ifndef skb_add_rx_frag
56549+#define skb_add_rx_frag _kc_skb_add_rx_frag
56550+void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page,
56551+ int off, int size, unsigned int truesize);
56552+#endif
56553+#ifdef NET_ADDR_RANDOM
56554+#define eth_hw_addr_random(N) do { \
56555+ eth_random_addr(N->dev_addr); \
56556+ N->addr_assign_type |= NET_ADDR_RANDOM; \
56557+ } while (0)
56558+#else /* NET_ADDR_RANDOM */
56559+#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
56560+#endif /* NET_ADDR_RANDOM */
56561+
56562+#ifndef for_each_set_bit_from
56563+#define for_each_set_bit_from(bit, addr, size) \
56564+ for ((bit) = find_next_bit((addr), (size), (bit)); \
56565+ (bit) < (size); \
56566+ (bit) = find_next_bit((addr), (size), (bit) + 1))
56567+#endif /* for_each_set_bit_from */
56568+
56569+#else /* < 3.4.0 */
56570+#include <linux/kconfig.h>
56571+#endif /* >= 3.4.0 */
56572+
56573+/*****************************************************************************/
56574+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \
56575+ ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) )
56576+#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
56577+#define HAVE_PTP_1588_CLOCK
56578+#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
56579+#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */
56580+
56581+/*****************************************************************************/
56582+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
56583+
56584+#ifndef SIZE_MAX
56585+#define SIZE_MAX (~(size_t)0)
56586+#endif
56587+
56588+#ifndef BITS_PER_LONG_LONG
56589+#define BITS_PER_LONG_LONG 64
56590+#endif
56591+
56592+#ifndef ether_addr_equal
56593+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
56594+{
56595+ return !compare_ether_addr(addr1, addr2);
56596+}
56597+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
56598+#endif
56599+
56600+/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */
56601+#ifdef CONFIG_OF_NET
56602+static inline int of_get_phy_mode(struct device_node __always_unused *np)
56603+{
56604+ return -ENODEV;
56605+}
56606+
56607+static inline const void *
56608+of_get_mac_address(struct device_node __always_unused *np)
56609+{
56610+ return NULL;
56611+}
56612+#endif
56613+#else
56614+#include <linux/of_net.h>
56615+#define HAVE_FDB_OPS
56616+#define HAVE_ETHTOOL_GET_TS_INFO
56617+#endif /* < 3.5.0 */
56618+
56619+/*****************************************************************************/
56620+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
56621+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
56622+
56623+#ifndef MDIO_EEE_100TX
56624+#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
56625+#endif
56626+#ifndef MDIO_EEE_1000T
56627+#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
56628+#endif
56629+#ifndef MDIO_EEE_10GT
56630+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
56631+#endif
56632+#ifndef MDIO_EEE_1000KX
56633+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
56634+#endif
56635+#ifndef MDIO_EEE_10GKX4
56636+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
56637+#endif
56638+#ifndef MDIO_EEE_10GKR
56639+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
56640+#endif
56641+
56642+#ifndef __GFP_MEMALLOC
56643+#define __GFP_MEMALLOC 0
56644+#endif
56645+
56646+#ifndef eth_broadcast_addr
56647+#define eth_broadcast_addr _kc_eth_broadcast_addr
56648+static inline void _kc_eth_broadcast_addr(u8 *addr)
56649+{
56650+ memset(addr, 0xff, ETH_ALEN);
56651+}
56652+#endif
56653+
56654+#ifndef eth_random_addr
56655+#define eth_random_addr _kc_eth_random_addr
56656+static inline void _kc_eth_random_addr(u8 *addr)
56657+{
56658+ get_random_bytes(addr, ETH_ALEN);
56659+ addr[0] &= 0xfe; /* clear multicast */
56660+ addr[0] |= 0x02; /* set local assignment */
56661+}
56662+#endif /* eth_random_addr */
56663+
56664+#ifndef DMA_ATTR_SKIP_CPU_SYNC
56665+#define DMA_ATTR_SKIP_CPU_SYNC 0
56666+#endif
56667+#else /* < 3.6.0 */
56668+#define HAVE_STRUCT_PAGE_PFMEMALLOC
56669+#endif /* < 3.6.0 */
56670+
56671+/******************************************************************************/
56672+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
56673+#include <linux/workqueue.h>
56674+#ifndef ADVERTISED_40000baseKR4_Full
56675+/* these defines were all added in one commit, so should be safe
56676+ * to trigger activiation on one define
56677+ */
56678+#define SUPPORTED_40000baseKR4_Full BIT(23)
56679+#define SUPPORTED_40000baseCR4_Full BIT(24)
56680+#define SUPPORTED_40000baseSR4_Full BIT(25)
56681+#define SUPPORTED_40000baseLR4_Full BIT(26)
56682+#define ADVERTISED_40000baseKR4_Full BIT(23)
56683+#define ADVERTISED_40000baseCR4_Full BIT(24)
56684+#define ADVERTISED_40000baseSR4_Full BIT(25)
56685+#define ADVERTISED_40000baseLR4_Full BIT(26)
56686+#endif
56687+
56688+#ifndef mmd_eee_cap_to_ethtool_sup_t
56689+/**
56690+ * mmd_eee_cap_to_ethtool_sup_t
56691+ * @eee_cap: value of the MMD EEE Capability register
56692+ *
56693+ * A small helper function that translates MMD EEE Capability (3.20) bits
56694+ * to ethtool supported settings.
56695+ */
56696+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
56697+{
56698+ u32 supported = 0;
56699+
56700+ if (eee_cap & MDIO_EEE_100TX)
56701+ supported |= SUPPORTED_100baseT_Full;
56702+ if (eee_cap & MDIO_EEE_1000T)
56703+ supported |= SUPPORTED_1000baseT_Full;
56704+ if (eee_cap & MDIO_EEE_10GT)
56705+ supported |= SUPPORTED_10000baseT_Full;
56706+ if (eee_cap & MDIO_EEE_1000KX)
56707+ supported |= SUPPORTED_1000baseKX_Full;
56708+ if (eee_cap & MDIO_EEE_10GKX4)
56709+ supported |= SUPPORTED_10000baseKX4_Full;
56710+ if (eee_cap & MDIO_EEE_10GKR)
56711+ supported |= SUPPORTED_10000baseKR_Full;
56712+
56713+ return supported;
56714+}
56715+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
56716+ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
56717+#endif /* mmd_eee_cap_to_ethtool_sup_t */
56718+
56719+#ifndef mmd_eee_adv_to_ethtool_adv_t
56720+/**
56721+ * mmd_eee_adv_to_ethtool_adv_t
56722+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
56723+ *
56724+ * A small helper function that translates the MMD EEE Advertisement (7.60)
56725+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
56726+ * settings.
56727+ */
56728+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
56729+{
56730+ u32 adv = 0;
56731+
56732+ if (eee_adv & MDIO_EEE_100TX)
56733+ adv |= ADVERTISED_100baseT_Full;
56734+ if (eee_adv & MDIO_EEE_1000T)
56735+ adv |= ADVERTISED_1000baseT_Full;
56736+ if (eee_adv & MDIO_EEE_10GT)
56737+ adv |= ADVERTISED_10000baseT_Full;
56738+ if (eee_adv & MDIO_EEE_1000KX)
56739+ adv |= ADVERTISED_1000baseKX_Full;
56740+ if (eee_adv & MDIO_EEE_10GKX4)
56741+ adv |= ADVERTISED_10000baseKX4_Full;
56742+ if (eee_adv & MDIO_EEE_10GKR)
56743+ adv |= ADVERTISED_10000baseKR_Full;
56744+
56745+ return adv;
56746+}
56747+
56748+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
56749+ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
56750+#endif /* mmd_eee_adv_to_ethtool_adv_t */
56751+
56752+#ifndef ethtool_adv_to_mmd_eee_adv_t
56753+/**
56754+ * ethtool_adv_to_mmd_eee_adv_t
56755+ * @adv: the ethtool advertisement settings
56756+ *
56757+ * A small helper function that translates ethtool advertisement settings
56758+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
56759+ * MMD EEE Link Partner Ability (7.61) registers.
56760+ */
56761+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
56762+{
56763+ u16 reg = 0;
56764+
56765+ if (adv & ADVERTISED_100baseT_Full)
56766+ reg |= MDIO_EEE_100TX;
56767+ if (adv & ADVERTISED_1000baseT_Full)
56768+ reg |= MDIO_EEE_1000T;
56769+ if (adv & ADVERTISED_10000baseT_Full)
56770+ reg |= MDIO_EEE_10GT;
56771+ if (adv & ADVERTISED_1000baseKX_Full)
56772+ reg |= MDIO_EEE_1000KX;
56773+ if (adv & ADVERTISED_10000baseKX4_Full)
56774+ reg |= MDIO_EEE_10GKX4;
56775+ if (adv & ADVERTISED_10000baseKR_Full)
56776+ reg |= MDIO_EEE_10GKR;
56777+
56778+ return reg;
56779+}
56780+#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
56781+#endif /* ethtool_adv_to_mmd_eee_adv_t */
56782+
56783+#ifndef pci_pcie_type
56784+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
56785+static inline u8 pci_pcie_type(struct pci_dev *pdev)
56786+{
56787+ int pos;
56788+ u16 reg16;
56789+
56790+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
56791+ BUG_ON(!pos);
56792+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
56793+ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
56794+}
56795+#else /* < 2.6.24 */
56796+#define pci_pcie_type(x) (x)->pcie_type
56797+#endif /* < 2.6.24 */
56798+#endif /* pci_pcie_type */
56799+
56800+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \
56801+ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \
56802+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) )
56803+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
56804+#endif
56805+
56806+#ifndef pcie_capability_read_word
56807+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
56808+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
56809+#endif /* pcie_capability_read_word */
56810+
56811+#ifndef pcie_capability_read_dword
56812+int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
56813+#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v)
56814+#endif
56815+
56816+#ifndef pcie_capability_write_word
56817+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
56818+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
56819+#endif /* pcie_capability_write_word */
56820+
56821+#ifndef pcie_capability_clear_and_set_word
56822+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
56823+ u16 clear, u16 set);
56824+#define pcie_capability_clear_and_set_word(d,p,c,s) \
56825+ __kc_pcie_capability_clear_and_set_word(d,p,c,s)
56826+#endif /* pcie_capability_clear_and_set_word */
56827+
56828+#ifndef pcie_capability_clear_word
56829+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
56830+ u16 clear);
56831+#define pcie_capability_clear_word(d, p, c) \
56832+ __kc_pcie_capability_clear_word(d, p, c)
56833+#endif /* pcie_capability_clear_word */
56834+
56835+#ifndef PCI_EXP_LNKSTA2
56836+#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
56837+#endif
56838+
56839+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
56840+#define USE_CONST_DEV_UC_CHAR
56841+#define HAVE_NDO_FDB_ADD_NLATTR
56842+#endif
56843+
56844+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8))
56845+#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
56846+#endif /* !RHEL6.8+ */
56847+
56848+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6))
56849+#include <linux/hashtable.h>
56850+#else
56851+
56852+#define DEFINE_HASHTABLE(name, bits) \
56853+ struct hlist_head name[1 << (bits)] = \
56854+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
56855+
56856+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
56857+ struct hlist_head name[1 << (bits)] __read_mostly = \
56858+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
56859+
56860+#define DECLARE_HASHTABLE(name, bits) \
56861+ struct hlist_head name[1 << (bits)]
56862+
56863+#define HASH_SIZE(name) (ARRAY_SIZE(name))
56864+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
56865+
56866+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
56867+#define hash_min(val, bits) \
56868+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
56869+
56870+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
56871+{
56872+ unsigned int i;
56873+
56874+ for (i = 0; i < sz; i++)
56875+ INIT_HLIST_HEAD(&ht[i]);
56876+}
56877+
56878+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
56879+
56880+#define hash_add(hashtable, node, key) \
56881+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
56882+
56883+static inline bool hash_hashed(struct hlist_node *node)
56884+{
56885+ return !hlist_unhashed(node);
56886+}
56887+
56888+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
56889+{
56890+ unsigned int i;
56891+
56892+ for (i = 0; i < sz; i++)
56893+ if (!hlist_empty(&ht[i]))
56894+ return false;
56895+
56896+ return true;
56897+}
56898+
56899+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
56900+
56901+static inline void hash_del(struct hlist_node *node)
56902+{
56903+ hlist_del_init(node);
56904+}
56905+#endif /* RHEL >= 6.6 */
56906+
56907+/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags
56908+ * parameter on these older kernels.
56909+ */
56910+#define __setup_timer(_timer, _fn, _data, _flags) \
56911+ setup_timer((_timer), (_fn), (_data)) \
56912+
56913+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \
56914+ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) )
56915+
56916+#ifndef mod_delayed_work
56917+/**
56918+ * __mod_delayed_work - modify delay or queue delayed work
56919+ * @wq: workqueue to use
56920+ * @dwork: delayed work to queue
56921+ * @delay: number of jiffies to wait before queueing
56922+ *
56923+ * Return: %true if @dwork was pending and was rescheduled;
56924+ * %false if it wasn't pending
56925+ *
56926+ * Note: the dwork parameter was declared as a void*
56927+ * to avoid comptibility problems with early 2.6 kernels
56928+ * where struct delayed_work is not declared. Unlike the original
56929+ * implementation flags are not preserved and it shouldn't be
56930+ * used in the interrupt context.
56931+ */
56932+static inline bool __mod_delayed_work(struct workqueue_struct *wq,
56933+ void *dwork,
56934+ unsigned long delay)
56935+{
56936+ bool ret = cancel_delayed_work(dwork);
56937+ queue_delayed_work(wq, dwork, delay);
56938+ return ret;
56939+}
56940+#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay)
56941+#endif /* mod_delayed_work */
56942+
56943+#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */
56944+#else /* >= 3.7.0 */
56945+#include <linux/hashtable.h>
56946+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
56947+#define USE_CONST_DEV_UC_CHAR
56948+#define HAVE_NDO_FDB_ADD_NLATTR
56949+#endif /* >= 3.7.0 */
56950+
56951+/*****************************************************************************/
56952+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
56953+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \
56954+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
56955+#ifndef pci_sriov_set_totalvfs
56956+static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs)
56957+{
56958+ return 0;
56959+}
56960+#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b))
56961+#endif
56962+#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */
56963+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
56964+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
56965+#endif
56966+#ifndef PCI_EXP_LNKCTL_ASPM_L1
56967+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
56968+#endif
56969+#define HAVE_CONFIG_HOTPLUG
56970+/* Reserved Ethernet Addresses per IEEE 802.1Q */
56971+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
56972+ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
56973+
56974+#ifndef is_link_local_ether_addr
56975+static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
56976+{
56977+ __be16 *a = (__be16 *)addr;
56978+ static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
56979+ static const __be16 m = cpu_to_be16(0xfff0);
56980+
56981+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
56982+}
56983+#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
56984+#endif /* is_link_local_ether_addr */
56985+
56986+#ifndef FLOW_MAC_EXT
56987+#define FLOW_MAC_EXT 0x40000000
56988+#endif /* FLOW_MAC_EXT */
56989+
56990+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
56991+#define HAVE_SRIOV_CONFIGURE
56992+#endif
56993+
56994+#ifndef PCI_EXP_LNKCAP_SLS_2_5GB
56995+#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
56996+#endif
56997+
56998+#ifndef PCI_EXP_LNKCAP_SLS_5_0GB
56999+#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
57000+#endif
57001+
57002+#undef PCI_EXP_LNKCAP2_SLS_2_5GB
57003+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
57004+
57005+#undef PCI_EXP_LNKCAP2_SLS_5_0GB
57006+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
57007+
57008+#undef PCI_EXP_LNKCAP2_SLS_8_0GB
57009+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
57010+
57011+#else /* >= 3.8.0 */
57012+#ifndef __devinit
57013+#define __devinit
57014+#endif
57015+
57016+#ifndef __devinitdata
57017+#define __devinitdata
57018+#endif
57019+
57020+#ifndef __devinitconst
57021+#define __devinitconst
57022+#endif
57023+
57024+#ifndef __devexit
57025+#define __devexit
57026+#endif
57027+
57028+#ifndef __devexit_p
57029+#define __devexit_p
57030+#endif
57031+
57032+#ifndef HAVE_ENCAP_CSUM_OFFLOAD
57033+#define HAVE_ENCAP_CSUM_OFFLOAD
57034+#endif
57035+
57036+#ifndef HAVE_GRE_ENCAP_OFFLOAD
57037+#define HAVE_GRE_ENCAP_OFFLOAD
57038+#endif
57039+
57040+#ifndef HAVE_SRIOV_CONFIGURE
57041+#define HAVE_SRIOV_CONFIGURE
57042+#endif
57043+
57044+#define HAVE_BRIDGE_ATTRIBS
57045+#ifndef BRIDGE_MODE_VEB
57046+#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
57047+#endif /* BRIDGE_MODE_VEB */
57048+#ifndef BRIDGE_MODE_VEPA
57049+#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
57050+#endif /* BRIDGE_MODE_VEPA */
57051+#endif /* >= 3.8.0 */
57052+
57053+/*****************************************************************************/
57054+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
57055+
57056+#undef BUILD_BUG_ON
57057+#ifdef __CHECKER__
57058+#define BUILD_BUG_ON(condition) (0)
57059+#else /* __CHECKER__ */
57060+#ifndef __compiletime_warning
57061+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
57062+#define __compiletime_warning(message) __attribute__((warning(message)))
57063+#else /* __GNUC__ */
57064+#define __compiletime_warning(message)
57065+#endif /* __GNUC__ */
57066+#endif /* __compiletime_warning */
57067+#ifndef __compiletime_error
57068+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
57069+#define __compiletime_error(message) __attribute__((error(message)))
57070+#define __compiletime_error_fallback(condition) do { } while (0)
57071+#else /* __GNUC__ */
57072+#define __compiletime_error(message)
57073+#define __compiletime_error_fallback(condition) \
57074+ do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
57075+#endif /* __GNUC__ */
57076+#else /* __compiletime_error */
57077+#define __compiletime_error_fallback(condition) do { } while (0)
57078+#endif /* __compiletime_error */
57079+#define __compiletime_assert(condition, msg, prefix, suffix) \
57080+ do { \
57081+ bool __cond = !(condition); \
57082+ extern void prefix ## suffix(void) __compiletime_error(msg); \
57083+ if (__cond) \
57084+ prefix ## suffix(); \
57085+ __compiletime_error_fallback(__cond); \
57086+ } while (0)
57087+
57088+#define _compiletime_assert(condition, msg, prefix, suffix) \
57089+ __compiletime_assert(condition, msg, prefix, suffix)
57090+#define compiletime_assert(condition, msg) \
57091+ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
57092+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
57093+#ifndef __OPTIMIZE__
57094+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
57095+#else /* __OPTIMIZE__ */
57096+#define BUILD_BUG_ON(condition) \
57097+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
57098+#endif /* __OPTIMIZE__ */
57099+#endif /* __CHECKER__ */
57100+
57101+#undef hlist_entry
57102+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
57103+
57104+#undef hlist_entry_safe
57105+#define hlist_entry_safe(ptr, type, member) \
57106+ ({ typeof(ptr) ____ptr = (ptr); \
57107+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
57108+ })
57109+
57110+#undef hlist_for_each_entry
57111+#define hlist_for_each_entry(pos, head, member) \
57112+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
57113+ pos; \
57114+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
57115+
57116+#undef hlist_for_each_entry_safe
57117+#define hlist_for_each_entry_safe(pos, n, head, member) \
57118+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
57119+ pos && ({ n = pos->member.next; 1; }); \
57120+ pos = hlist_entry_safe(n, typeof(*pos), member))
57121+
57122+#undef hlist_for_each_entry_continue
57123+#define hlist_for_each_entry_continue(pos, member) \
57124+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
57125+ pos; \
57126+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
57127+
57128+#undef hlist_for_each_entry_from
57129+#define hlist_for_each_entry_from(pos, member) \
57130+ for (; pos; \
57131+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
57132+
57133+#undef hash_for_each
57134+#define hash_for_each(name, bkt, obj, member) \
57135+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
57136+ (bkt)++)\
57137+ hlist_for_each_entry(obj, &name[bkt], member)
57138+
57139+#undef hash_for_each_safe
57140+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
57141+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
57142+ (bkt)++)\
57143+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
57144+
57145+#undef hash_for_each_possible
57146+#define hash_for_each_possible(name, obj, member, key) \
57147+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
57148+
57149+#undef hash_for_each_possible_safe
57150+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
57151+ hlist_for_each_entry_safe(obj, tmp,\
57152+ &name[hash_min(key, HASH_BITS(name))], member)
57153+
57154+#ifdef CONFIG_XPS
57155+int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16);
57156+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
57157+#else /* CONFIG_XPS */
57158+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
57159+#endif /* CONFIG_XPS */
57160+
57161+#ifdef HAVE_NETDEV_SELECT_QUEUE
57162+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
57163+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
57164+#define __netdev_pick_tx __kc_netdev_pick_tx
57165+#endif /* HAVE_NETDEV_SELECT_QUEUE */
57166+#else
57167+#define HAVE_BRIDGE_FILTER
57168+#define HAVE_FDB_DEL_NLATTR
57169+#endif /* < 3.9.0 */
57170+
57171+/*****************************************************************************/
57172+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
57173+#ifndef NAPI_POLL_WEIGHT
57174+#define NAPI_POLL_WEIGHT 64
57175+#endif
57176+#ifdef CONFIG_PCI_IOV
57177+int __kc_pci_vfs_assigned(struct pci_dev *dev);
57178+#else
57179+static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev)
57180+{
57181+ return 0;
57182+}
57183+#endif
57184+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
57185+
57186+#ifndef list_first_entry_or_null
57187+#define list_first_entry_or_null(ptr, type, member) \
57188+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
57189+#endif
57190+
57191+#ifndef VLAN_TX_COOKIE_MAGIC
57192+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
57193+ u16 vlan_tci)
57194+{
57195+#ifdef VLAN_TAG_PRESENT
57196+ vlan_tci |= VLAN_TAG_PRESENT;
57197+#endif
57198+ skb->vlan_tci = vlan_tci;
57199+ return skb;
57200+}
57201+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
57202+ __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
57203+#endif
57204+
57205+#ifdef HAVE_FDB_OPS
57206+#if defined(HAVE_NDO_FDB_ADD_NLATTR)
57207+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
57208+ struct net_device *dev,
57209+ const unsigned char *addr, u16 flags);
57210+#elif defined(USE_CONST_DEV_UC_CHAR)
57211+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
57212+ const unsigned char *addr, u16 flags);
57213+#else
57214+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
57215+ unsigned char *addr, u16 flags);
57216+#endif /* HAVE_NDO_FDB_ADD_NLATTR */
57217+#if defined(HAVE_FDB_DEL_NLATTR)
57218+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
57219+ struct net_device *dev,
57220+ const unsigned char *addr);
57221+#elif defined(USE_CONST_DEV_UC_CHAR)
57222+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
57223+ const unsigned char *addr);
57224+#else
57225+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
57226+ unsigned char *addr);
57227+#endif /* HAVE_FDB_DEL_NLATTR */
57228+#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add
57229+#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del
57230+#endif /* HAVE_FDB_OPS */
57231+
57232+#ifndef PCI_DEVID
57233+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
57234+#endif
57235+
57236+/* The definitions for these functions when CONFIG_OF_NET is defined are
57237+ * pulled in from <linux/of_net.h>. For kernels older than 3.5 we already have
57238+ * backports for when CONFIG_OF_NET is true. These are separated and
57239+ * duplicated in order to cover all cases so that all kernels get either the
57240+ * real definitions (when CONFIG_OF_NET is defined) or the stub definitions
57241+ * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real
57242+ * definitions).
57243+ */
57244+#ifndef CONFIG_OF_NET
57245+static inline int of_get_phy_mode(struct device_node __always_unused *np)
57246+{
57247+ return -ENODEV;
57248+}
57249+
57250+static inline const void *
57251+of_get_mac_address(struct device_node __always_unused *np)
57252+{
57253+ return NULL;
57254+}
57255+#endif
57256+
57257+#else /* >= 3.10.0 */
57258+#define HAVE_ENCAP_TSO_OFFLOAD
57259+#define USE_DEFAULT_FDB_DEL_DUMP
57260+#define HAVE_SKB_INNER_NETWORK_HEADER
57261+
57262+#if (RHEL_RELEASE_CODE && \
57263+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \
57264+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)))
57265+#define HAVE_RHEL7_PCI_DRIVER_RH
57266+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))
57267+#define HAVE_RHEL7_PCI_RESET_NOTIFY
57268+#endif /* RHEL >= 7.2 */
57269+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
57270+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))
57271+#define HAVE_GENEVE_RX_OFFLOAD
57272+#endif /* RHEL >=7.3 && RHEL < 7.5 */
57273+#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC
57274+#define HAVE_RHEL7_NET_DEVICE_OPS_EXT
57275+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
57276+#define HAVE_UDP_ENC_TUNNEL
57277+#endif
57278+#endif /* RHEL >= 7.3 */
57279+
57280+/* new hooks added to net_device_ops_extended in RHEL7.4 */
57281+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
57282+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
57283+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL
57284+#define HAVE_UDP_ENC_RX_OFFLOAD
57285+#endif /* RHEL >= 7.4 */
57286+#endif /* RHEL >= 7.0 && RHEL < 8.0 */
57287+
57288+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))
57289+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
57290+#define NO_NETDEV_BPF_PROG_ATTACHED
57291+#endif /* RHEL >= 8.0 */
57292+#endif /* >= 3.10.0 */
57293+
57294+/*****************************************************************************/
57295+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
57296+#define netdev_notifier_info_to_dev(ptr) ptr
57297+#ifndef time_in_range64
57298+#define time_in_range64(a, b, c) \
57299+ (time_after_eq64(a, b) && \
57300+ time_before_eq64(a, c))
57301+#endif /* time_in_range64 */
57302+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\
57303+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
57304+#define HAVE_NDO_SET_VF_LINK_STATE
57305+#endif
57306+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
57307+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
57308+#endif
57309+#else /* >= 3.11.0 */
57310+#define HAVE_NDO_SET_VF_LINK_STATE
57311+#define HAVE_SKB_INNER_PROTOCOL
57312+#define HAVE_MPLS_FEATURES
57313+#endif /* >= 3.11.0 */
57314+
57315+/*****************************************************************************/
57316+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
57317+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
57318+ enum pcie_link_width *width);
57319+#ifndef pcie_get_minimum_link
57320+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w)
57321+#endif
57322+
57323+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7))
57324+int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev);
57325+#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction
57326+#endif /* <RHEL6.7 */
57327+
57328+#else /* >= 3.12.0 */
57329+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))
57330+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
57331+#endif
57332+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
57333+#define HAVE_VXLAN_RX_OFFLOAD
57334+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN)
57335+#define HAVE_UDP_ENC_TUNNEL
57336+#endif
57337+#endif /* < 4.8.0 */
57338+#define HAVE_NDO_GET_PHYS_PORT_ID
57339+#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
57340+#endif /* >= 3.12.0 */
57341+
57342+/*****************************************************************************/
57343+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
57344+#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m)
57345+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask);
57346+#ifndef u64_stats_init
57347+#define u64_stats_init(a) do { } while(0)
57348+#endif
57349+#undef BIT_ULL
57350+#define BIT_ULL(n) (1ULL << (n))
57351+
57352+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \
57353+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)))
57354+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
57355+{
57356+ dev = pci_physfn(dev);
57357+ if (pci_is_root_bus(dev->bus))
57358+ return NULL;
57359+
57360+ return dev->bus->self;
57361+}
57362+#endif
57363+
57364+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))
57365+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
57366+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
57367+#endif
57368+#ifndef list_next_entry
57369+#define list_next_entry(pos, member) \
57370+ list_entry((pos)->member.next, typeof(*(pos)), member)
57371+#endif
57372+#ifndef list_prev_entry
57373+#define list_prev_entry(pos, member) \
57374+ list_entry((pos)->member.prev, typeof(*(pos)), member)
57375+#endif
57376+
57377+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) )
57378+#define devm_kcalloc(dev, cnt, size, flags) \
57379+ devm_kzalloc(dev, cnt * size, flags)
57380+#endif /* > 2.6.20 */
57381+
57382+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))
57383+#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member)
57384+#endif
57385+
57386+#else /* >= 3.13.0 */
57387+#define HAVE_VXLAN_CHECKS
57388+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24))
57389+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
57390+#else
57391+#define HAVE_NDO_SELECT_QUEUE_ACCEL
57392+#endif
57393+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
57394+#endif
57395+
57396+/*****************************************************************************/
57397+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
57398+
57399+#ifndef U16_MAX
57400+#define U16_MAX ((u16)~0U)
57401+#endif
57402+
57403+#ifndef U32_MAX
57404+#define U32_MAX ((u32)~0U)
57405+#endif
57406+
57407+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))
57408+#define dev_consume_skb_any(x) dev_kfree_skb_any(x)
57409+#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x)
57410+#endif
57411+
57412+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \
57413+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)))
57414+
57415+/* it isn't expected that this would be a #define unless we made it so */
57416+#ifndef skb_set_hash
57417+
57418+#define PKT_HASH_TYPE_NONE 0
57419+#define PKT_HASH_TYPE_L2 1
57420+#define PKT_HASH_TYPE_L3 2
57421+#define PKT_HASH_TYPE_L4 3
57422+
57423+enum _kc_pkt_hash_types {
57424+ _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE,
57425+ _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2,
57426+ _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3,
57427+ _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4,
57428+};
57429+#define pkt_hash_types _kc_pkt_hash_types
57430+
57431+#define skb_set_hash __kc_skb_set_hash
57432+static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
57433+ u32 __maybe_unused hash,
57434+ int __maybe_unused type)
57435+{
57436+#ifdef HAVE_SKB_L4_RXHASH
57437+ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
57438+#endif
57439+#ifdef NETIF_F_RXHASH
57440+ skb->rxhash = hash;
57441+#endif
57442+}
57443+#endif /* !skb_set_hash */
57444+
57445+#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */
57446+
57447+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
57448+#ifndef HAVE_VXLAN_RX_OFFLOAD
57449+#define HAVE_VXLAN_RX_OFFLOAD
57450+#endif /* HAVE_VXLAN_RX_OFFLOAD */
57451+#endif
57452+
57453+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN)
57454+#define HAVE_UDP_ENC_TUNNEL
57455+#endif
57456+
57457+#ifndef HAVE_VXLAN_CHECKS
57458+#define HAVE_VXLAN_CHECKS
57459+#endif /* HAVE_VXLAN_CHECKS */
57460+#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */
57461+
57462+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\
57463+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)))
57464+#define HAVE_NDO_DFWD_OPS
57465+#endif
57466+
57467+#ifndef pci_enable_msix_range
57468+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
57469+ int minvec, int maxvec);
57470+#define pci_enable_msix_range __kc_pci_enable_msix_range
57471+#endif
57472+
57473+#ifndef ether_addr_copy
57474+#define ether_addr_copy __kc_ether_addr_copy
57475+static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src)
57476+{
57477+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
57478+ *(u32 *)dst = *(const u32 *)src;
57479+ *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
57480+#else
57481+ u16 *a = (u16 *)dst;
57482+ const u16 *b = (const u16 *)src;
57483+
57484+ a[0] = b[0];
57485+ a[1] = b[1];
57486+ a[2] = b[2];
57487+#endif
57488+}
57489+#endif /* ether_addr_copy */
57490+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
57491+ int target, unsigned short *fragoff, int *flags);
57492+#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e))
57493+
57494+#ifndef OPTIMIZE_HIDE_VAR
57495+#ifdef __GNUC__
57496+#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
57497+#else
57498+#include <linux/barrier.h>
57499+#define OPTIMIZE_HIDE_VAR(var) barrier()
57500+#endif
57501+#endif
57502+
57503+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \
57504+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0)))
57505+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
57506+{
57507+#ifdef NETIF_F_RXHASH
57508+ return skb->rxhash;
57509+#else
57510+ return 0;
57511+#endif /* NETIF_F_RXHASH */
57512+}
57513+#endif /* !RHEL > 5.9 && !SLES >= 10.4 */
57514+
57515+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))
57516+#define request_firmware_direct request_firmware
57517+#endif /* !RHEL || RHEL < 7.5 */
57518+
57519+#else /* >= 3.14.0 */
57520+
57521+/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */
57522+#ifndef HAVE_NDO_DFWD_OPS
57523+#define HAVE_NDO_DFWD_OPS
57524+#endif
57525+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
57526+#endif /* 3.14.0 */
57527+
57528+/*****************************************************************************/
57529+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) )
57530+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \
57531+ !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30)))
57532+#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh
57533+#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh
57534+#endif
57535+
57536+char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
57537+#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp)
57538+
57539+#else
57540+#define HAVE_NET_GET_RANDOM_ONCE
57541+#define HAVE_PTP_1588_CLOCK_PINS
57542+#define HAVE_NETDEV_PORT
57543+#endif /* 3.15.0 */
57544+
57545+/*****************************************************************************/
57546+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
57547+#ifndef smp_mb__before_atomic
57548+#define smp_mb__before_atomic() smp_mb()
57549+#define smp_mb__after_atomic() smp_mb()
57550+#endif
57551+#ifndef __dev_uc_sync
57552+#ifdef HAVE_SET_RX_MODE
57553+#ifdef NETDEV_HW_ADDR_T_UNICAST
57554+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
57555+ struct net_device *dev,
57556+ int (*sync)(struct net_device *, const unsigned char *),
57557+ int (*unsync)(struct net_device *, const unsigned char *));
57558+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
57559+ struct net_device *dev,
57560+ int (*unsync)(struct net_device *, const unsigned char *));
57561+#endif
57562+#ifndef NETDEV_HW_ADDR_T_MULTICAST
57563+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
57564+ struct net_device *dev,
57565+ int (*sync)(struct net_device *, const unsigned char *),
57566+ int (*unsync)(struct net_device *, const unsigned char *));
57567+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
57568+ struct net_device *dev,
57569+ int (*unsync)(struct net_device *, const unsigned char *));
57570+#endif
57571+#endif /* HAVE_SET_RX_MODE */
57572+
57573+static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev,
57574+ int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
57575+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
57576+{
57577+#ifdef NETDEV_HW_ADDR_T_UNICAST
57578+ return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
57579+#elif defined(HAVE_SET_RX_MODE)
57580+ return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count,
57581+ dev, sync, unsync);
57582+#else
57583+ return 0;
57584+#endif
57585+}
57586+#define __dev_uc_sync __kc_dev_uc_sync
57587+
57588+static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev,
57589+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
57590+{
57591+#ifdef HAVE_SET_RX_MODE
57592+#ifdef NETDEV_HW_ADDR_T_UNICAST
57593+ __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync);
57594+#else /* NETDEV_HW_ADDR_T_MULTICAST */
57595+ __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync);
57596+#endif /* NETDEV_HW_ADDR_T_UNICAST */
57597+#endif /* HAVE_SET_RX_MODE */
57598+}
57599+#define __dev_uc_unsync __kc_dev_uc_unsync
57600+
57601+static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev,
57602+ int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
57603+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
57604+{
57605+#ifdef NETDEV_HW_ADDR_T_MULTICAST
57606+ return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
57607+#elif defined(HAVE_SET_RX_MODE)
57608+ return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count,
57609+ dev, sync, unsync);
57610+#else
57611+ return 0;
57612+#endif
57613+
57614+}
57615+#define __dev_mc_sync __kc_dev_mc_sync
57616+
57617+static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
57618+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
57619+{
57620+#ifdef HAVE_SET_RX_MODE
57621+#ifdef NETDEV_HW_ADDR_T_MULTICAST
57622+ __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync);
57623+#else /* NETDEV_HW_ADDR_T_MULTICAST */
57624+ __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync);
57625+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
57626+#endif /* HAVE_SET_RX_MODE */
57627+}
57628+#define __dev_mc_unsync __kc_dev_mc_unsync
57629+#endif /* __dev_uc_sync */
57630+
57631+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
57632+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
57633+#endif
57634+
57635+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM
57636+/* if someone backports this, hopefully they backport as a #define.
57637+ * declare it as zero on older kernels so that if it get's or'd in
57638+ * it won't effect anything, therefore preventing core driver changes
57639+ */
57640+#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0
57641+#define SKB_GSO_UDP_TUNNEL_CSUM 0
57642+#endif
57643+void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len,
57644+ gfp_t gfp);
57645+#define devm_kmemdup __kc_devm_kmemdup
57646+
57647+#else
57648+#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \
57649+ ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) )
57650+#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
57651+#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */
57652+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
57653+#endif /* 3.16.0 */
57654+
57655+/*****************************************************************************/
57656+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
57657+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
57658+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
57659+ !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))
57660+#ifndef timespec64
57661+#define timespec64 timespec
57662+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
57663+{
57664+ return ts;
57665+}
57666+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
57667+{
57668+ return ts64;
57669+}
57670+#define timespec64_equal timespec_equal
57671+#define timespec64_compare timespec_compare
57672+#define set_normalized_timespec64 set_normalized_timespec
57673+#define timespec64_add_safe timespec_add_safe
57674+#define timespec64_add timespec_add
57675+#define timespec64_sub timespec_sub
57676+#define timespec64_valid timespec_valid
57677+#define timespec64_valid_strict timespec_valid_strict
57678+#define timespec64_to_ns timespec_to_ns
57679+#define ns_to_timespec64 ns_to_timespec
57680+#define ktime_to_timespec64 ktime_to_timespec
57681+#define ktime_get_ts64 ktime_get_ts
57682+#define ktime_get_real_ts64 ktime_get_real_ts
57683+#define timespec64_add_ns timespec_add_ns
57684+#endif /* timespec64 */
57685+#endif /* !(RHEL6.8<RHEL7.0) && !RHEL7.2+ */
57686+
57687+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
57688+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))
57689+static inline void ktime_get_real_ts64(struct timespec64 *ts)
57690+{
57691+ *ts = ktime_to_timespec64(ktime_get_real());
57692+}
57693+
57694+static inline void ktime_get_ts64(struct timespec64 *ts)
57695+{
57696+ *ts = ktime_to_timespec64(ktime_get());
57697+}
57698+#endif
57699+
57700+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
57701+#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
57702+#endif
57703+
57704+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))
57705+#endif /* RHEL_RELEASE_CODE < RHEL7.5 */
57706+
57707+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3))
57708+static inline u64 ktime_get_ns(void)
57709+{
57710+ return ktime_to_ns(ktime_get());
57711+}
57712+
57713+static inline u64 ktime_get_real_ns(void)
57714+{
57715+ return ktime_to_ns(ktime_get_real());
57716+}
57717+
57718+static inline u64 ktime_get_boot_ns(void)
57719+{
57720+ return ktime_to_ns(ktime_get_boottime());
57721+}
57722+#endif /* RHEL < 7.3 */
57723+
57724+#else
57725+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
57726+#include <linux/time64.h>
57727+#endif /* 3.17.0 */
57728+
57729+/*****************************************************************************/
57730+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
57731+#ifndef NO_PTP_SUPPORT
57732+#include <linux/errqueue.h>
57733+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb);
57734+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
57735+ struct skb_shared_hwtstamps *hwtstamps);
57736+#define skb_clone_sk __kc_skb_clone_sk
57737+#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp
57738+#endif
57739+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
57740+u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data,
57741+ unsigned int max_len);
57742+#else
57743+unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len);
57744+#endif /* !RHEL >= 8.2 */
57745+
57746+#define eth_get_headlen __kc_eth_get_headlen
57747+#ifndef ETH_P_XDSA
57748+#define ETH_P_XDSA 0x00F8
57749+#endif
57750+/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */
57751+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1))
57752+#define HAVE_SKBUFF_CSUM_LEVEL
57753+#endif /* >= RH 7.1 */
57754+
57755+/* RHEL 7.3 backported xmit_more */
57756+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
57757+#define HAVE_SKB_XMIT_MORE
57758+#endif /* >= RH 7.3 */
57759+
57760+#undef GENMASK
57761+#define GENMASK(h, l) \
57762+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
57763+#undef GENMASK_ULL
57764+#define GENMASK_ULL(h, l) \
57765+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
57766+
57767+#else /* 3.18.0 */
57768+#define HAVE_SKBUFF_CSUM_LEVEL
57769+#define HAVE_SKB_XMIT_MORE
57770+#define HAVE_SKB_INNER_PROTOCOL_TYPE
57771+#endif /* 3.18.0 */
57772+
57773+/*****************************************************************************/
57774+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) )
57775+#else
57776+#define HAVE_NDO_FEATURES_CHECK
57777+#endif /* 3.18.4 */
57778+
57779+/*****************************************************************************/
57780+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) )
57781+#ifndef WRITE_ONCE
57782+#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); })
57783+#endif
57784+#endif /* 3.18.13 */
57785+
57786+/*****************************************************************************/
57787+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
57788+/* netdev_phys_port_id renamed to netdev_phys_item_id */
57789+#define netdev_phys_item_id netdev_phys_port_id
57790+
57791+static inline void _kc_napi_complete_done(struct napi_struct *napi,
57792+ int __always_unused work_done) {
57793+ napi_complete(napi);
57794+}
57795+/* don't use our backport if the distro kernels already have it */
57796+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \
57797+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
57798+#define napi_complete_done _kc_napi_complete_done
57799+#endif
57800+
57801+int _kc_bitmap_print_to_pagebuf(bool list, char *buf,
57802+ const unsigned long *maskp, int nmaskbits);
57803+#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf
57804+
57805+#ifndef NETDEV_RSS_KEY_LEN
57806+#define NETDEV_RSS_KEY_LEN (13 * 4)
57807+#endif
57808+#if (!(RHEL_RELEASE_CODE && \
57809+ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \
57810+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))))
57811+#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len)
57812+#endif /* RHEL_RELEASE_CODE */
57813+void __kc_netdev_rss_key_fill(void *buffer, size_t len);
57814+#define SPEED_20000 20000
57815+#define SPEED_40000 40000
57816+#ifndef dma_rmb
57817+#define dma_rmb() rmb()
57818+#endif
57819+#ifndef dev_alloc_pages
57820+#ifndef NUMA_NO_NODE
57821+#define NUMA_NO_NODE -1
57822+#endif
57823+#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order))
57824+#endif
57825+#ifndef dev_alloc_page
57826+#define dev_alloc_page() dev_alloc_pages(0)
57827+#endif
57828+#if !defined(eth_skb_pad) && !defined(skb_put_padto)
57829+/**
57830+ * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size
57831+ * @skb: buffer to pad
57832+ * @len: minimal length
57833+ *
57834+ * Pads up a buffer to ensure the trailing bytes exist and are
57835+ * blanked. If the buffer already contains sufficient data it
57836+ * is untouched. Otherwise it is extended. Returns zero on
57837+ * success. The skb is freed on error.
57838+ */
57839+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len)
57840+{
57841+ unsigned int size = skb->len;
57842+
57843+ if (unlikely(size < len)) {
57844+ len -= size;
57845+ if (skb_pad(skb, len))
57846+ return -ENOMEM;
57847+ __skb_put(skb, len);
57848+ }
57849+ return 0;
57850+}
57851+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len)
57852+
57853+static inline int __kc_eth_skb_pad(struct sk_buff *skb)
57854+{
57855+ return __kc_skb_put_padto(skb, ETH_ZLEN);
57856+}
57857+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
57858+#endif /* eth_skb_pad && skb_put_padto */
57859+
57860+#ifndef SKB_ALLOC_NAPI
57861+/* RHEL 7.2 backported napi_alloc_skb and friends */
57862+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
57863+{
57864+ return netdev_alloc_skb_ip_align(napi->dev, length);
57865+}
57866+#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
57867+#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len)
57868+#endif /* SKB_ALLOC_NAPI */
57869+#define HAVE_CONFIG_PM_RUNTIME
57870+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \
57871+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
57872+#define HAVE_RXFH_HASHFUNC
57873+#endif /* 6.7 < RHEL < 7.0 */
57874+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
57875+#define HAVE_RXFH_HASHFUNC
57876+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
57877+#endif /* RHEL > 7.1 */
57878+#ifndef napi_schedule_irqoff
57879+#define napi_schedule_irqoff napi_schedule
57880+#endif
57881+#ifndef READ_ONCE
57882+#define READ_ONCE(_x) ACCESS_ONCE(_x)
57883+#endif
57884+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
57885+#define HAVE_NDO_FDB_ADD_VID
57886+#endif
57887+#ifndef ETH_MODULE_SFF_8636
57888+#define ETH_MODULE_SFF_8636 0x3
57889+#endif
57890+#ifndef ETH_MODULE_SFF_8636_LEN
57891+#define ETH_MODULE_SFF_8636_LEN 256
57892+#endif
57893+#ifndef ETH_MODULE_SFF_8436
57894+#define ETH_MODULE_SFF_8436 0x4
57895+#endif
57896+#ifndef ETH_MODULE_SFF_8436_LEN
57897+#define ETH_MODULE_SFF_8436_LEN 256
57898+#endif
57899+#ifndef writel_relaxed
57900+#define writel_relaxed writel
57901+#endif
57902+#else /* 3.19.0 */
57903+#define HAVE_NDO_FDB_ADD_VID
57904+#define HAVE_RXFH_HASHFUNC
57905+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
57906+#endif /* 3.19.0 */
57907+
57908+/*****************************************************************************/
57909+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
57910+/* vlan_tx_xx functions got renamed to skb_vlan */
57911+#ifndef skb_vlan_tag_get
57912+#define skb_vlan_tag_get vlan_tx_tag_get
57913+#endif
57914+#ifndef skb_vlan_tag_present
57915+#define skb_vlan_tag_present vlan_tx_tag_present
57916+#endif
57917+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
57918+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
57919+#endif
57920+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
57921+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
57922+#endif
57923+#else
57924+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
57925+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
57926+#endif /* 3.20.0 */
57927+
57928+/*****************************************************************************/
57929+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) )
57930+/* Definition for CONFIG_OF was introduced earlier */
57931+#if !defined(CONFIG_OF) && \
57932+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
57933+static inline struct device_node *
57934+pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; }
57935+#else /* !CONFIG_OF && RHEL < 7.3 */
57936+#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT
57937+#endif /* !CONFIG_OF && RHEL < 7.3 */
57938+#else /* < 4.0 */
57939+#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT
57940+#endif /* < 4.0 */
57941+
57942+/*****************************************************************************/
57943+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
57944+#ifndef NO_PTP_SUPPORT
57945+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
57946+#include <linux/timecounter.h>
57947+#else
57948+#include <linux/clocksource.h>
57949+#endif
57950+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
57951+{
57952+ tc->nsec += delta;
57953+}
57954+
57955+static inline struct net_device *
57956+of_find_net_device_by_node(struct device_node __always_unused *np)
57957+{
57958+ return NULL;
57959+}
57960+
57961+#define timecounter_adjtime __kc_timecounter_adjtime
57962+#endif
57963+#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \
57964+ (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))))
57965+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
57966+#endif
57967+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
57968+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
57969+#endif
57970+#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
57971+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
57972+ (SLE_VERSION_CODE > SLE_VERSION(12,1,0)))
57973+unsigned int _kc_cpumask_local_spread(unsigned int i, int node);
57974+#define cpumask_local_spread _kc_cpumask_local_spread
57975+#endif
57976+#else /* >= 4,1,0 */
57977+#define HAVE_PTP_CLOCK_INFO_GETTIME64
57978+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
57979+#define HAVE_PASSTHRU_FEATURES_CHECK
57980+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
57981+#define HAVE_NDO_SET_TX_MAXRATE
57982+#endif /* 4,1,0 */
57983+
57984+/*****************************************************************************/
57985+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
57986+#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
57987+ !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \
57988+ (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \
57989+ !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \
57990+ (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \
57991+ !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
57992+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
57993+{
57994+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
57995+ return page->pfmemalloc;
57996+#else
57997+ return false;
57998+#endif
57999+}
58000+#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */
58001+#else
58002+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
58003+#endif /* 4.1.9 */
58004+
58005+/*****************************************************************************/
58006+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0))
58007+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \
58008+ !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
58009+#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL
58010+#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL
58011+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
58012+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
58013+{
58014+ return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
58015+};
58016+
58017+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
58018+{
58019+ return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
58020+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
58021+};
58022+#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */
58023+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58024+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
58025+#endif
58026+
58027+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
58028+#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
58029+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \
58030+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))
58031+static inline bool pci_ari_enabled(struct pci_bus *bus)
58032+{
58033+ return bus->self && bus->self->ari_enabled;
58034+}
58035+#endif /* !(RHEL6.8+ || RHEL7.2+) */
58036+#else
58037+static inline bool pci_ari_enabled(struct pci_bus *bus)
58038+{
58039+ return false;
58040+}
58041+#endif /* 2.6.27 */
58042+#else
58043+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
58044+#define HAVE_VF_STATS
58045+#endif /* 4.2.0 */
58046+
58047+/*****************************************************************************/
58048+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0))
58049+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
58050+#define HAVE_NDO_SET_VF_TRUST
58051+#endif /* (RHEL_RELEASE >= 7.3) */
58052+#ifndef CONFIG_64BIT
58053+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
58054+#include <asm-generic/io-64-nonatomic-lo-hi.h> /* 32-bit readq/writeq */
58055+#else /* 3.3.0 => 4.3.x */
58056+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
58057+#include <asm-generic/int-ll64.h>
58058+#endif /* 2.6.26 => 3.3.0 */
58059+#ifndef readq
58060+static inline __u64 readq(const volatile void __iomem *addr)
58061+{
58062+ const volatile u32 __iomem *p = addr;
58063+ u32 low, high;
58064+
58065+ low = readl(p);
58066+ high = readl(p + 1);
58067+
58068+ return low + ((u64)high << 32);
58069+}
58070+#define readq readq
58071+#endif
58072+
58073+#ifndef writeq
58074+static inline void writeq(__u64 val, volatile void __iomem *addr)
58075+{
58076+ writel(val, addr);
58077+ writel(val >> 32, addr + 4);
58078+}
58079+#define writeq writeq
58080+#endif
58081+#endif /* < 3.3.0 */
58082+#endif /* !CONFIG_64BIT */
58083+#else /* < 4.4.0 */
58084+#define HAVE_NDO_SET_VF_TRUST
58085+
58086+#ifndef CONFIG_64BIT
58087+#include <linux/io-64-nonatomic-lo-hi.h> /* 32-bit readq/writeq */
58088+#endif /* !CONFIG_64BIT */
58089+#endif /* 4.4.0 */
58090+
58091+/*****************************************************************************/
58092+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0))
58093+/* protect against a likely backport */
58094+#ifndef NETIF_F_CSUM_MASK
58095+#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM
58096+#endif /* NETIF_F_CSUM_MASK */
58097+#ifndef NETIF_F_SCTP_CRC
58098+#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM
58099+#endif /* NETIF_F_SCTP_CRC */
58100+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)))
58101+#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address
58102+int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused,
58103+ u8 *mac_addr __maybe_unused);
58104+#endif /* !(RHEL_RELEASE >= 7.3) */
58105+#else /* 4.5.0 */
58106+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
58107+#define HAVE_GENEVE_RX_OFFLOAD
58108+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
58109+#define HAVE_UDP_ENC_TUNNEL
58110+#endif
58111+#endif /* < 4.8.0 */
58112+#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD
58113+#endif /* 4.5.0 */
58114+
58115+/*****************************************************************************/
58116+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0))
58117+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3))
58118+static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
58119+{
58120+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
58121+ return skb->head + skb->csum_start;
58122+#else /* < 2.6.22 */
58123+ return skb_transport_header(skb);
58124+#endif
58125+}
58126+#endif
58127+
58128+#if !(UBUNTU_VERSION_CODE && \
58129+ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \
58130+ !(RHEL_RELEASE_CODE && \
58131+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \
58132+ !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
58133+static inline void napi_consume_skb(struct sk_buff *skb,
58134+ int __always_unused budget)
58135+{
58136+ dev_consume_skb_any(skb);
58137+}
58138+
58139+#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */
58140+#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \
58141+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58142+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
58143+{
58144+ * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
58145+}
58146+#endif
58147+#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \
58148+ !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0)))
58149+static inline void page_ref_inc(struct page *page)
58150+{
58151+ get_page(page);
58152+}
58153+#else
58154+#define HAVE_PAGE_COUNT_BULK_UPDATE
58155+#endif
58156+#ifndef IPV4_USER_FLOW
58157+#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */
58158+#endif
58159+
58160+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58161+#define HAVE_TC_SETUP_CLSFLOWER
58162+#define HAVE_TC_FLOWER_ENC
58163+#endif
58164+
58165+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \
58166+ (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))
58167+#define HAVE_TC_SETUP_CLSU32
58168+#endif
58169+
58170+#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))
58171+#define HAVE_TC_SETUP_CLSFLOWER
58172+#endif
58173+
58174+#else /* >= 4.6.0 */
58175+#define HAVE_PAGE_COUNT_BULK_UPDATE
58176+#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC
58177+#define HAVE_PTP_CROSSTIMESTAMP
58178+#define HAVE_TC_SETUP_CLSFLOWER
58179+#define HAVE_TC_SETUP_CLSU32
58180+#endif /* 4.6.0 */
58181+
58182+/*****************************************************************************/
58183+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
58184+#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\
58185+ (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)))
58186+#define HAVE_NETIF_TRANS_UPDATE
58187+#endif /* SLES12sp3+ || RHEL7.4+ */
58188+#if ((UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) || \
58189+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) || \
58190+ (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
58191+#define HAVE_DEVLINK_SUPPORT
58192+#endif /* UBUNTU 4,4,0,21, RHEL 7.4, SLES12 SP3 */
58193+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\
58194+ (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
58195+#define HAVE_ETHTOOL_25G_BITS
58196+#define HAVE_ETHTOOL_50G_BITS
58197+#define HAVE_ETHTOOL_100G_BITS
58198+#endif /* RHEL7.3+ || SLES12sp3+ */
58199+#else /* 4.7.0 */
58200+#define HAVE_DEVLINK_SUPPORT
58201+#define HAVE_NETIF_TRANS_UPDATE
58202+#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE
58203+#define HAVE_ETHTOOL_25G_BITS
58204+#define HAVE_ETHTOOL_50G_BITS
58205+#define HAVE_ETHTOOL_100G_BITS
58206+#define HAVE_TCF_MIRRED_REDIRECT
58207+#endif /* 4.7.0 */
58208+
58209+/*****************************************************************************/
58210+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0))
58211+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58212+enum udp_parsable_tunnel_type {
58213+ UDP_TUNNEL_TYPE_VXLAN,
58214+ UDP_TUNNEL_TYPE_GENEVE,
58215+};
58216+struct udp_tunnel_info {
58217+ unsigned short type;
58218+ sa_family_t sa_family;
58219+ __be16 port;
58220+};
58221+#endif
58222+
58223+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))
58224+#define HAVE_TCF_EXTS_TO_LIST
58225+#endif
58226+
58227+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0))
58228+#define tc_no_actions(_exts) true
58229+#define tc_for_each_action(_a, _exts) while (0)
58230+#endif
58231+#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\
58232+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58233+static inline int
58234+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
58235+pci_request_io_regions(struct pci_dev *pdev, char *name)
58236+#else
58237+pci_request_io_regions(struct pci_dev *pdev, const char *name)
58238+#endif
58239+{
58240+ return pci_request_selected_regions(pdev,
58241+ pci_select_bars(pdev, IORESOURCE_IO), name);
58242+}
58243+
58244+static inline void
58245+pci_release_io_regions(struct pci_dev *pdev)
58246+{
58247+ return pci_release_selected_regions(pdev,
58248+ pci_select_bars(pdev, IORESOURCE_IO));
58249+}
58250+
58251+static inline int
58252+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
58253+pci_request_mem_regions(struct pci_dev *pdev, char *name)
58254+#else
58255+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
58256+#endif
58257+{
58258+ return pci_request_selected_regions(pdev,
58259+ pci_select_bars(pdev, IORESOURCE_MEM), name);
58260+}
58261+
58262+static inline void
58263+pci_release_mem_regions(struct pci_dev *pdev)
58264+{
58265+ return pci_release_selected_regions(pdev,
58266+ pci_select_bars(pdev, IORESOURCE_MEM));
58267+}
58268+#endif /* !SLE_VERSION(12,3,0) */
58269+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\
58270+ (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
58271+#define HAVE_ETHTOOL_NEW_50G_BITS
58272+#endif /* RHEL7.4+ || SLES12sp3+ */
58273+#else
58274+#define HAVE_UDP_ENC_RX_OFFLOAD
58275+#define HAVE_TCF_EXTS_TO_LIST
58276+#define HAVE_ETHTOOL_NEW_50G_BITS
58277+#endif /* 4.8.0 */
58278+
58279+/*****************************************************************************/
58280+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0))
58281+#ifdef HAVE_TC_SETUP_CLSFLOWER
58282+#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \
58283+ (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))))
58284+#define HAVE_TC_FLOWER_VLAN_IN_TAGS
58285+#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || <SLE_VERSION(12,3,0) */
58286+#endif /* HAVE_TC_SETUP_CLSFLOWER */
58287+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58288+#define HAVE_ETHTOOL_NEW_1G_BITS
58289+#define HAVE_ETHTOOL_NEW_10G_BITS
58290+#endif /* RHEL7.4+ */
58291+#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \
58292+ SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \
58293+ RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5))
58294+#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
58295+#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */
58296+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4))
58297+static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
58298+{
58299+ dst[0] = mask & ULONG_MAX;
58300+
58301+ if (sizeof(mask) > sizeof(unsigned long))
58302+ dst[1] = mask >> 32;
58303+}
58304+#endif /* <RHEL7.4 */
58305+#else /* >=4.9 */
58306+#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO
58307+#define HAVE_ETHTOOL_NEW_1G_BITS
58308+#define HAVE_ETHTOOL_NEW_10G_BITS
58309+#endif /* KERNEL_VERSION(4.9.0) */
58310+
58311+/*****************************************************************************/
58312+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
58313+/* SLES 12.3 and RHEL 7.5 backported this interface */
58314+#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \
58315+ (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \
58316+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
58317+static inline bool _kc_napi_complete_done2(struct napi_struct *napi,
58318+ int __always_unused work_done)
58319+{
58320+ /* it was really hard to get napi_complete_done to be safe to call
58321+ * recursively without running into our own kcompat, so just use
58322+ * napi_complete
58323+ */
58324+ napi_complete(napi);
58325+
58326+ /* true means that the stack is telling the driver to go-ahead and
58327+ * re-enable interrupts
58328+ */
58329+ return true;
58330+}
58331+
58332+#ifdef napi_complete_done
58333+#undef napi_complete_done
58334+#endif
58335+#define napi_complete_done _kc_napi_complete_done2
58336+#endif /* sles and rhel exclusion for < 4.10 */
58337+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
58338+#define HAVE_DEV_WALK_API
58339+#define HAVE_ETHTOOL_NEW_2500MB_BITS
58340+#define HAVE_ETHTOOL_5G_BITS
58341+#endif /* RHEL7.4+ */
58342+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0)))
58343+#define HAVE_STRUCT_DMA_ATTRS
58344+#endif /* (SLES == 12.3.0) */
58345+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
58346+#define HAVE_NETDEVICE_MIN_MAX_MTU
58347+#endif /* (SLES >= 12.3.0) */
58348+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
58349+#define HAVE_STRUCT_DMA_ATTRS
58350+#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
58351+#define HAVE_NETDEVICE_MIN_MAX_MTU
58352+#endif
58353+#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \
58354+ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))))
58355+#ifndef dma_map_page_attrs
58356+#define dma_map_page_attrs __kc_dma_map_page_attrs
58357+static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev,
58358+ struct page *page,
58359+ size_t offset, size_t size,
58360+ enum dma_data_direction dir,
58361+ unsigned long __always_unused attrs)
58362+{
58363+ return dma_map_page(dev, page, offset, size, dir);
58364+}
58365+#endif
58366+
58367+#ifndef dma_unmap_page_attrs
58368+#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs
58369+static inline void __kc_dma_unmap_page_attrs(struct device *dev,
58370+ dma_addr_t addr, size_t size,
58371+ enum dma_data_direction dir,
58372+ unsigned long __always_unused attrs)
58373+{
58374+ dma_unmap_page(dev, addr, size, dir);
58375+}
58376+#endif
58377+
58378+static inline void __page_frag_cache_drain(struct page *page,
58379+ unsigned int count)
58380+{
58381+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
58382+ if (!page_ref_sub_and_test(page, count))
58383+ return;
58384+
58385+ init_page_count(page);
58386+#else
58387+ BUG_ON(count > 1);
58388+ if (!count)
58389+ return;
58390+#endif
58391+ __free_pages(page, compound_order(page));
58392+}
58393+#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */
58394+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\
58395+ (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
58396+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
58397+#endif
58398+
58399+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\
58400+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4))))
58401+#define page_frag_free __free_page_frag
58402+#endif
58403+#ifndef ETH_MIN_MTU
58404+#define ETH_MIN_MTU 68
58405+#endif /* ETH_MIN_MTU */
58406+#else /* >= 4.10 */
58407+#define HAVE_TC_FLOWER_ENC
58408+#define HAVE_NETDEVICE_MIN_MAX_MTU
58409+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
58410+#define HAVE_NETDEV_TC_RESETS_XPS
58411+#define HAVE_XPS_QOS_SUPPORT
58412+#define HAVE_DEV_WALK_API
58413+#define HAVE_ETHTOOL_NEW_2500MB_BITS
58414+#define HAVE_ETHTOOL_5G_BITS
58415+/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added
58416+ * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL,
58417+ * it means napi_poll is invoked in busy_poll context
58418+ */
58419+#define HAVE_NAPI_STATE_IN_BUSY_POLL
58420+#define HAVE_TCF_MIRRED_EGRESS_REDIRECT
58421+#endif /* 4.10.0 */
58422+
58423+/*****************************************************************************/
58424+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
58425+#ifdef CONFIG_NET_RX_BUSY_POLL
58426+#define HAVE_NDO_BUSY_POLL
58427+#endif /* CONFIG_NET_RX_BUSY_POLL */
58428+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \
58429+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))))
58430+#define HAVE_VOID_NDO_GET_STATS64
58431+#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */
58432+
58433+static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb)
58434+{
58435+ if (!skb)
58436+ return;
58437+ dev_kfree_skb_irq(skb);
58438+}
58439+
58440+#undef dev_kfree_skb_irq
58441+#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq
58442+
58443+static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb)
58444+{
58445+ if (!skb)
58446+ return;
58447+ dev_consume_skb_irq(skb);
58448+}
58449+
58450+#undef dev_consume_skb_irq
58451+#define dev_consume_skb_irq _kc_dev_consume_skb_irq
58452+
58453+static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb)
58454+{
58455+ if (!skb)
58456+ return;
58457+ dev_kfree_skb_any(skb);
58458+}
58459+
58460+#undef dev_kfree_skb_any
58461+#define dev_kfree_skb_any _kc_dev_kfree_skb_any
58462+
58463+static inline void _kc_dev_consume_skb_any(struct sk_buff *skb)
58464+{
58465+ if (!skb)
58466+ return;
58467+ dev_consume_skb_any(skb);
58468+}
58469+
58470+#undef dev_consume_skb_any
58471+#define dev_consume_skb_any _kc_dev_consume_skb_any
58472+
58473+#else /* > 4.11 */
58474+#define HAVE_VOID_NDO_GET_STATS64
58475+#define HAVE_VM_OPS_FAULT_NO_VMA
58476+#endif /* 4.11.0 */
58477+
58478+/*****************************************************************************/
58479+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0))
58480+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \
58481+ (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
58482+#define HAVE_TCF_EXTS_HAS_ACTION
58483+#endif
58484+#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
58485+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))
58486+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
58487+#endif /* SLES >= 12sp4 */
58488+#else /* > 4.13 */
58489+#define HAVE_HWTSTAMP_FILTER_NTP_ALL
58490+#define HAVE_NDO_SETUP_TC_CHAIN_INDEX
58491+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
58492+#define HAVE_PTP_CLOCK_DO_AUX_WORK
58493+#endif /* 4.13.0 */
58494+
58495+/*****************************************************************************/
58496+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0))
58497+#ifdef ETHTOOL_GLINKSETTINGS
58498+#ifndef ethtool_link_ksettings_del_link_mode
58499+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \
58500+ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
58501+#endif
58502+#endif /* ETHTOOL_GLINKSETTINGS */
58503+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))
58504+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
58505+#endif
58506+
58507+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
58508+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
58509+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
58510+#endif
58511+
58512+#define TIMER_DATA_TYPE unsigned long
58513+#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
58514+
58515+#define timer_setup(timer, callback, flags) \
58516+ __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \
58517+ (TIMER_DATA_TYPE)(timer), (flags))
58518+
58519+#define from_timer(var, callback_timer, timer_fieldname) \
58520+ container_of(callback_timer, typeof(*var), timer_fieldname)
58521+
58522+#ifndef xdp_do_flush_map
58523+#define xdp_do_flush_map() do {} while (0)
58524+#endif
58525+struct _kc_xdp_buff {
58526+ void *data;
58527+ void *data_end;
58528+ void *data_hard_start;
58529+};
58530+#define xdp_buff _kc_xdp_buff
58531+struct _kc_bpf_prog {
58532+};
58533+#define bpf_prog _kc_bpf_prog
58534+#ifndef DIV_ROUND_DOWN_ULL
58535+#define DIV_ROUND_DOWN_ULL(ll, d) \
58536+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
58537+#endif /* DIV_ROUND_DOWN_ULL */
58538+#else /* > 4.14 */
58539+#define HAVE_XDP_SUPPORT
58540+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
58541+#define HAVE_TCF_EXTS_HAS_ACTION
58542+#endif /* 4.14.0 */
58543+
58544+/*****************************************************************************/
58545+#ifndef ETHTOOL_GLINKSETTINGS
58546+
58547+#define __ETHTOOL_LINK_MODE_MASK_NBITS 32
58548+#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS)
58549+
58550+/**
58551+ * struct ethtool_link_ksettings
58552+ * @link_modes: supported and advertising, single item arrays
58553+ * @link_modes.supported: bitmask of supported link speeds
58554+ * @link_modes.advertising: bitmask of currently advertised speeds
58555+ * @base: base link details
58556+ * @base.speed: current link speed
58557+ * @base.port: current port type
58558+ * @base.duplex: current duplex mode
58559+ * @base.autoneg: current autonegotiation settings
58560+ *
58561+ * This struct and the following macros provide a way to support the old
58562+ * ethtool get/set_settings API on older kernels, but in the style of the new
58563+ * GLINKSETTINGS API. In this way, the same code can be used to support both
58564+ * APIs as seemlessly as possible.
58565+ *
58566+ * It should be noted the old API only has support up to the first 32 bits.
58567+ */
58568+struct ethtool_link_ksettings {
58569+ struct {
58570+ u32 speed;
58571+ u8 port;
58572+ u8 duplex;
58573+ u8 autoneg;
58574+ } base;
58575+ struct {
58576+ unsigned long supported[ETHTOOL_LINK_MASK_SIZE];
58577+ unsigned long advertising[ETHTOOL_LINK_MASK_SIZE];
58578+ } link_modes;
58579+};
58580+
58581+#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode
58582+#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode
58583+#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name
58584+#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode)
58585+
58586+/**
58587+ * ethtool_link_ksettings_zero_link_mode
58588+ * @ptr: ptr to ksettings struct
58589+ * @name: supported or advertising
58590+ */
58591+#define ethtool_link_ksettings_zero_link_mode(ptr, name)\
58592+ (*((ptr)->link_modes.name) = 0x0)
58593+
58594+/**
58595+ * ethtool_link_ksettings_add_link_mode
58596+ * @ptr: ptr to ksettings struct
58597+ * @name: supported or advertising
58598+ * @mode: link mode to add
58599+ */
58600+#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\
58601+ (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode))
58602+
58603+/**
58604+ * ethtool_link_ksettings_del_link_mode
58605+ * @ptr: ptr to ksettings struct
58606+ * @name: supported or advertising
58607+ * @mode: link mode to delete
58608+ */
58609+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\
58610+ (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode))
58611+
58612+/**
58613+ * ethtool_link_ksettings_test_link_mode
58614+ * @ptr: ptr to ksettings struct
58615+ * @name: supported or advertising
58616+ * @mode: link mode to add
58617+ */
58618+#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\
58619+ (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode)))
58620+
58621+/**
58622+ * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd
58623+ * @ks: ethtool_link_ksettings struct
58624+ * @cmd: ethtool_cmd struct
58625+ *
58626+ * Convert an ethtool_link_ksettings structure into the older ethtool_cmd
58627+ * structure. We provide this in kcompat.h so that drivers can easily
58628+ * implement the older .{get|set}_settings as wrappers around the new api.
58629+ * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually
58630+ * a real function in the kernel.
58631+ */
58632+static inline void
58633+_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks,
58634+ struct ethtool_cmd *cmd)
58635+{
58636+ cmd->supported = (u32)ks->link_modes.supported[0];
58637+ cmd->advertising = (u32)ks->link_modes.advertising[0];
58638+ ethtool_cmd_speed_set(cmd, ks->base.speed);
58639+ cmd->duplex = ks->base.duplex;
58640+ cmd->autoneg = ks->base.autoneg;
58641+ cmd->port = ks->base.port;
58642+}
58643+
58644+#endif /* !ETHTOOL_GLINKSETTINGS */
58645+
58646+/*****************************************************************************/
58647+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \
58648+ (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \
58649+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5))))
58650+#define phy_speed_to_str _kc_phy_speed_to_str
58651+const char *_kc_phy_speed_to_str(int speed);
58652+#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */
58653+#include <linux/phy.h>
58654+#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */
58655+
58656+/*****************************************************************************/
58657+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0))
58658+#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \
58659+ (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))))
58660+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
58661+#define HAVE_TCF_BLOCK
58662+#else /* RHEL >= 7.6 || SLES >= 15.1 */
58663+#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO
58664+#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */
58665+void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
58666+ struct ethtool_link_ksettings *src);
58667+#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks
58668+#else /* >= 4.15 */
58669+#define HAVE_NDO_BPF
58670+#define HAVE_XDP_BUFF_DATA_META
58671+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
58672+#define HAVE_TCF_BLOCK
58673+#endif /* 4.15.0 */
58674+
58675+/*****************************************************************************/
58676+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0))
58677+#define pci_printk(level, pdev, fmt, arg...) \
58678+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
58679+#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
58680+#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
58681+#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
58682+#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
58683+#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
58684+#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
58685+#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
58686+#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
58687+
58688+#ifndef array_index_nospec
58689+static inline unsigned long _kc_array_index_mask_nospec(unsigned long index,
58690+ unsigned long size)
58691+{
58692+ /*
58693+ * Always calculate and emit the mask even if the compiler
58694+ * thinks the mask is not needed. The compiler does not take
58695+ * into account the value of @index under speculation.
58696+ */
58697+ OPTIMIZER_HIDE_VAR(index);
58698+ return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
58699+}
58700+
58701+#define array_index_nospec(index, size) \
58702+({ \
58703+ typeof(index) _i = (index); \
58704+ typeof(size) _s = (size); \
58705+ unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \
58706+ \
58707+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
58708+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
58709+ \
58710+ (typeof(_i)) (_i & _mask); \
58711+})
58712+#endif /* array_index_nospec */
58713+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \
58714+ !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))))
58715+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
58716+#include <net/pkt_cls.h>
58717+static inline bool
58718+tc_cls_can_offload_and_chain0(const struct net_device *dev,
58719+ struct tc_cls_common_offload *common)
58720+{
58721+ if (!tc_can_offload(dev))
58722+ return false;
58723+ if (common->chain_index)
58724+ return false;
58725+
58726+ return true;
58727+}
58728+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
58729+#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */
58730+#ifndef sizeof_field
58731+#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER)))
58732+#endif /* sizeof_field */
58733+#else /* >= 4.16 */
58734+#include <linux/nospec.h>
58735+#define HAVE_XDP_BUFF_RXQ
58736+#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK
58737+#define HAVE_TCF_MIRRED_DEV
58738+#define HAVE_VF_STATS_DROPPED
58739+#endif /* 4.16.0 */
58740+
58741+/*****************************************************************************/
58742+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
58743+#include <linux/pci_regs.h>
58744+#include <linux/pci.h>
58745+#define PCIE_SPEED_16_0GT 0x17
58746+#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
58747+#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
58748+#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
58749+void _kc_pcie_print_link_status(struct pci_dev *dev);
58750+#define pcie_print_link_status _kc_pcie_print_link_status
58751+#else /* >= 4.17.0 */
58752+#define HAVE_XDP_BUFF_IN_XDP_H
58753+#endif /* 4.17.0 */
58754+
58755+/*****************************************************************************/
58756+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0))
58757+#ifdef NETIF_F_HW_L2FW_DOFFLOAD
58758+#include <linux/if_macvlan.h>
58759+#ifndef macvlan_supports_dest_filter
58760+#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter
58761+static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev)
58762+{
58763+ struct macvlan_dev *macvlan = netdev_priv(dev);
58764+
58765+ return macvlan->mode == MACVLAN_MODE_PRIVATE ||
58766+ macvlan->mode == MACVLAN_MODE_VEPA ||
58767+ macvlan->mode == MACVLAN_MODE_BRIDGE;
58768+}
58769+#endif
58770+
58771+#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0)))
58772+#ifndef macvlan_accel_priv
58773+#define macvlan_accel_priv _kc_macvlan_accel_priv
58774+static inline void *_kc_macvlan_accel_priv(struct net_device *dev)
58775+{
58776+ struct macvlan_dev *macvlan = netdev_priv(dev);
58777+
58778+ return macvlan->fwd_priv;
58779+}
58780+#endif
58781+
58782+#ifndef macvlan_release_l2fw_offload
58783+#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload
58784+static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev)
58785+{
58786+ struct macvlan_dev *macvlan = netdev_priv(dev);
58787+
58788+ macvlan->fwd_priv = NULL;
58789+ return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
58790+}
58791+#endif
58792+#endif /* !SLES || SLES < 15.1 */
58793+#endif /* NETIF_F_HW_L2FW_DOFFLOAD */
58794+#include "kcompat_overflow.h"
58795+
58796+#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0))
58797+#define firmware_request_nowarn request_firmware_direct
58798+#endif /* !SLES || SLES < 15.1 */
58799+
58800+#else
58801+#include <linux/overflow.h>
58802+#include <net/xdp_sock.h>
58803+#define HAVE_XDP_FRAME_STRUCT
58804+#define HAVE_XDP_SOCK
58805+#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS
58806+#define NO_NDO_XDP_FLUSH
58807+#define HAVE_AF_XDP_SUPPORT
58808+#ifndef xdp_umem_get_data
58809+static inline char *__kc_xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
58810+{
58811+ return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
58812+}
58813+
58814+#define xdp_umem_get_data __kc_xdp_umem_get_data
58815+#endif /* !xdp_umem_get_data */
58816+#ifndef xdp_umem_get_dma
58817+static inline dma_addr_t __kc_xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
58818+{
58819+ return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
58820+}
58821+
58822+#define xdp_umem_get_dma __kc_xdp_umem_get_dma
58823+#endif /* !xdp_umem_get_dma */
58824+#endif /* 4.18.0 */
58825+
58826+/*****************************************************************************/
58827+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0))
58828+#define bitmap_alloc(nbits, flags) \
58829+ kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags)
58830+#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO))
58831+#define bitmap_free(bitmap) kfree(bitmap)
58832+#ifdef ETHTOOL_GLINKSETTINGS
58833+#define ethtool_ks_clear(ptr, name) \
58834+ ethtool_link_ksettings_zero_link_mode(ptr, name)
58835+#define ethtool_ks_add_mode(ptr, name, mode) \
58836+ ethtool_link_ksettings_add_link_mode(ptr, name, mode)
58837+#define ethtool_ks_del_mode(ptr, name, mode) \
58838+ ethtool_link_ksettings_del_link_mode(ptr, name, mode)
58839+#define ethtool_ks_test(ptr, name, mode) \
58840+ ethtool_link_ksettings_test_link_mode(ptr, name, mode)
58841+#endif /* ETHTOOL_GLINKSETTINGS */
58842+#define HAVE_NETPOLL_CONTROLLER
58843+#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS
58844+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))
58845+#define HAVE_TCF_MIRRED_DEV
58846+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
58847+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
58848+#endif
58849+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\
58850+ (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))
58851+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
58852+#undef HAVE_TCF_EXTS_TO_LIST
58853+#endif /* RHEL8.0+ */
58854+#else /* >= 4.19.0 */
58855+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
58856+#define NO_NETDEV_BPF_PROG_ATTACHED
58857+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
58858+#define HAVE_NETDEV_SB_DEV
58859+#undef HAVE_TCF_EXTS_TO_LIST
58860+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
58861+#define HAVE_TCF_VLAN_TPID
58862+#endif /* 4.19.0 */
58863+
58864+/*****************************************************************************/
58865+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0))
58866+#define HAVE_XDP_UMEM_PROPS
58867+#ifdef HAVE_AF_XDP_SUPPORT
58868+#ifndef napi_if_scheduled_mark_missed
58869+static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n)
58870+{
58871+ unsigned long val, new;
58872+
58873+ do {
58874+ val = READ_ONCE(n->state);
58875+ if (val & NAPIF_STATE_DISABLE)
58876+ return true;
58877+
58878+ if (!(val & NAPIF_STATE_SCHED))
58879+ return false;
58880+
58881+ new = val | NAPIF_STATE_MISSED;
58882+ } while (cmpxchg(&n->state, val, new) != val);
58883+
58884+ return true;
58885+}
58886+
58887+#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed
58888+#endif /* !napi_if_scheduled_mark_missed */
58889+#endif /* HAVE_AF_XDP_SUPPORT */
58890+#else /* >= 4.20.0 */
58891+#define HAVE_AF_XDP_ZC_SUPPORT
58892+#define HAVE_VXLAN_TYPE
58893+#endif /* 4.20.0 */
58894+
58895+/*****************************************************************************/
58896+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0))
58897+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0)))
58898+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
58899+#define NETLINK_MAX_COOKIE_LEN 20
58900+struct netlink_ext_ack {
58901+ const char *_msg;
58902+ const struct nlattr *bad_attr;
58903+ u8 cookie[NETLINK_MAX_COOKIE_LEN];
58904+ u8 cookie_len;
58905+};
58906+
58907+#endif /* < 4.12 */
58908+static inline int _kc_dev_open(struct net_device *netdev,
58909+ struct netlink_ext_ack __always_unused *extack)
58910+{
58911+ return dev_open(netdev);
58912+}
58913+
58914+#define dev_open _kc_dev_open
58915+#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */
58916+#if (RHEL_RELEASE_CODE && \
58917+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \
58918+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \
58919+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
58920+#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL
58921+#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */
58922+struct ptp_system_timestamp {
58923+ struct timespec64 pre_ts;
58924+ struct timespec64 post_ts;
58925+};
58926+
58927+static inline void
58928+ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts)
58929+{
58930+ ;
58931+}
58932+
58933+static inline void
58934+ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts)
58935+{
58936+ ;
58937+}
58938+#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */
58939+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
58940+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
58941+#endif /* RHEL 8.1 */
58942+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))
58943+#define HAVE_TC_INDIR_BLOCK
58944+#endif /* RHEL 8.2 */
58945+#else /* >= 5.0.0 */
58946+#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL
58947+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
58948+#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM
58949+#define HAVE_GENEVE_TYPE
58950+#define HAVE_TC_INDIR_BLOCK
58951+#endif /* 5.0.0 */
58952+
58953+/*****************************************************************************/
58954+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
58955+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
58956+#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE
58957+#define HAVE_NDO_FDB_ADD_EXTACK
58958+#else /* RHEL < 8.1 */
58959+#ifdef HAVE_TC_SETUP_CLSFLOWER
58960+#include <net/pkt_cls.h>
58961+
58962+struct flow_match {
58963+ struct flow_dissector *dissector;
58964+ void *mask;
58965+ void *key;
58966+};
58967+
58968+struct flow_match_basic {
58969+ struct flow_dissector_key_basic *key, *mask;
58970+};
58971+
58972+struct flow_match_control {
58973+ struct flow_dissector_key_control *key, *mask;
58974+};
58975+
58976+struct flow_match_eth_addrs {
58977+ struct flow_dissector_key_eth_addrs *key, *mask;
58978+};
58979+
58980+#ifdef HAVE_TC_FLOWER_ENC
58981+struct flow_match_enc_keyid {
58982+ struct flow_dissector_key_keyid *key, *mask;
58983+};
58984+#endif
58985+
58986+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
58987+struct flow_match_vlan {
58988+ struct flow_dissector_key_vlan *key, *mask;
58989+};
58990+#endif
58991+
58992+struct flow_match_ipv4_addrs {
58993+ struct flow_dissector_key_ipv4_addrs *key, *mask;
58994+};
58995+
58996+struct flow_match_ipv6_addrs {
58997+ struct flow_dissector_key_ipv6_addrs *key, *mask;
58998+};
58999+
59000+struct flow_match_ports {
59001+ struct flow_dissector_key_ports *key, *mask;
59002+};
59003+
59004+struct flow_rule {
59005+ struct flow_match match;
59006+#if 0
59007+ /* In 5.1+ kernels, action is a member of struct flow_rule but is
59008+ * not compatible with how we kcompat tc_cls_flower_offload_flow_rule
59009+ * below. By not declaring it here, any driver that attempts to use
59010+ * action as an element of struct flow_rule will fail to compile
59011+ * instead of silently trying to access memory that shouldn't be.
59012+ */
59013+ struct flow_action action;
59014+#endif
59015+};
59016+
59017+void flow_rule_match_basic(const struct flow_rule *rule,
59018+ struct flow_match_basic *out);
59019+void flow_rule_match_control(const struct flow_rule *rule,
59020+ struct flow_match_control *out);
59021+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
59022+ struct flow_match_eth_addrs *out);
59023+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
59024+void flow_rule_match_vlan(const struct flow_rule *rule,
59025+ struct flow_match_vlan *out);
59026+#endif
59027+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
59028+ struct flow_match_ipv4_addrs *out);
59029+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
59030+ struct flow_match_ipv6_addrs *out);
59031+void flow_rule_match_ports(const struct flow_rule *rule,
59032+ struct flow_match_ports *out);
59033+#ifdef HAVE_TC_FLOWER_ENC
59034+void flow_rule_match_enc_ports(const struct flow_rule *rule,
59035+ struct flow_match_ports *out);
59036+void flow_rule_match_enc_control(const struct flow_rule *rule,
59037+ struct flow_match_control *out);
59038+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
59039+ struct flow_match_ipv4_addrs *out);
59040+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
59041+ struct flow_match_ipv6_addrs *out);
59042+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
59043+ struct flow_match_enc_keyid *out);
59044+#endif
59045+
59046+static inline struct flow_rule *
59047+tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
59048+{
59049+ return (struct flow_rule *)&tc_flow_cmd->dissector;
59050+}
59051+
59052+static inline bool flow_rule_match_key(const struct flow_rule *rule,
59053+ enum flow_dissector_key_id key)
59054+{
59055+ return dissector_uses_key(rule->match.dissector, key);
59056+}
59057+#endif /* HAVE_TC_SETUP_CLSFLOWER */
59058+
59059+#endif /* RHEL < 8.1 */
59060+#else /* >= 5.1.0 */
59061+#define HAVE_NDO_FDB_ADD_EXTACK
59062+#define NO_XDP_QUERY_XSK_UMEM
59063+#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE
59064+#define HAVE_TC_FLOWER_ENC_IP
59065+#endif /* 5.1.0 */
59066+
59067+/*****************************************************************************/
59068+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0))
59069+#if (defined HAVE_SKB_XMIT_MORE) && \
59070+(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
59071+#define netdev_xmit_more() (skb->xmit_more)
59072+#else
59073+#define netdev_xmit_more() (0)
59074+#endif
59075+
59076+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
59077+#ifndef eth_get_headlen
59078+static inline u32
59079+__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data,
59080+ unsigned int len)
59081+{
59082+ return eth_get_headlen(data, len);
59083+}
59084+
59085+#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len)
59086+#endif /* !eth_get_headlen */
59087+#endif /* !RHEL >= 8.2 */
59088+
59089+#ifndef mmiowb
59090+#ifdef CONFIG_IA64
59091+#define mmiowb() asm volatile ("mf.a" ::: "memory")
59092+#else
59093+#define mmiowb()
59094+#endif
59095+#endif /* mmiowb */
59096+
59097+#else /* >= 5.2.0 */
59098+#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED
59099+#define SPIN_UNLOCK_IMPLIES_MMIOWB
59100+#endif /* 5.2.0 */
59101+
59102+/*****************************************************************************/
59103+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0))
59104+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))
59105+#define flow_block_offload tc_block_offload
59106+#define flow_block_command tc_block_command
59107+#define flow_cls_offload tc_cls_flower_offload
59108+#define flow_block_binder_type tcf_block_binder_type
59109+#define flow_cls_common_offload tc_cls_common_offload
59110+#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule
59111+#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE
59112+#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY
59113+#define FLOW_CLS_STATS TC_CLSFLOWER_STATS
59114+#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE
59115+#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY
59116+#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \
59117+ TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
59118+#define FLOW_BLOCK_BIND TC_BLOCK_BIND
59119+#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND
59120+
59121+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
59122+#include <net/pkt_cls.h>
59123+
59124+int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f,
59125+ struct list_head *driver_list,
59126+ tc_setup_cb_t *cb,
59127+ void *cb_ident, void *cb_priv,
59128+ bool ingress_only);
59129+
59130+#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \
59131+ ingress_only) \
59132+ _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \
59133+ ingress_only)
59134+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
59135+#else /* RHEL >= 8.2 */
59136+#define HAVE_FLOW_BLOCK_API
59137+#endif /* RHEL >= 8.2 */
59138+#else /* >= 5.3.0 */
59139+#define XSK_UMEM_RETURNS_XDP_DESC
59140+#define HAVE_FLOW_BLOCK_API
59141+#endif /* 5.3.0 */
59142+
59143+/*****************************************************************************/
59144+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0))
59145+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \
59146+ !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0)))
59147+static inline unsigned int skb_frag_off(const skb_frag_t *frag)
59148+{
59149+ return frag->page_offset;
59150+}
59151+
59152+static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
59153+{
59154+ frag->page_offset += delta;
59155+}
59156+#define __flow_indr_block_cb_register __tc_indr_block_cb_register
59157+#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister
59158+#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */
59159+#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0))
59160+#define HAVE_NDO_XSK_WAKEUP
59161+#endif /* SLES15sp2 */
59162+#else /* >= 5.4.0 */
59163+#define HAVE_NDO_XSK_WAKEUP
59164+#endif /* 5.4.0 */
59165+
59166+/*****************************************************************************/
59167+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0))
59168+#else /* >= 5.6.0 */
59169+#define HAVE_TX_TIMEOUT_TXQUEUE
59170+#endif /* 5.6.0 */
59171+
59172+#endif /* _KCOMPAT_H_ */
59173diff --git a/drivers/net/ethernet/intel/i40e/kcompat_overflow.h b/drivers/net/ethernet/intel/i40e/kcompat_overflow.h
59174new file mode 100644
59175index 000000000..b7848fed0
59176--- /dev/null
59177+++ b/drivers/net/ethernet/intel/i40e/kcompat_overflow.h
59178@@ -0,0 +1,319 @@
59179+/* SPDX-License-Identifier: GPL-2.0 */
59180+/* Copyright(c) 2013 - 2020 Intel Corporation. */
59181+
59182+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
59183+#ifndef __LINUX_OVERFLOW_H
59184+#define __LINUX_OVERFLOW_H
59185+
59186+#include <linux/compiler.h>
59187+
59188+/*
59189+ * In the fallback code below, we need to compute the minimum and
59190+ * maximum values representable in a given type. These macros may also
59191+ * be useful elsewhere, so we provide them outside the
59192+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
59193+ *
59194+ * It would seem more obvious to do something like
59195+ *
59196+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
59197+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
59198+ *
59199+ * Unfortunately, the middle expressions, strictly speaking, have
59200+ * undefined behaviour, and at least some versions of gcc warn about
59201+ * the type_max expression (but not if -fsanitize=undefined is in
59202+ * effect; in that case, the warning is deferred to runtime...).
59203+ *
59204+ * The slightly excessive casting in type_min is to make sure the
59205+ * macros also produce sensible values for the exotic type _Bool. [The
59206+ * overflow checkers only almost work for _Bool, but that's
59207+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
59208+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
59209+ * argument.]
59210+ *
59211+ * Idea stolen from
59212+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
59213+ * credit to Christian Biere.
59214+ */
59215+/* The is_signed_type macro is redefined in a few places in various kernel
59216+ * headers. If this header is included at the same time as one of those, we
59217+ * will generate compilation warnings. Since we can't fix every old kernel,
59218+ * rename is_signed_type for this file to _kc_is_signed_type. This prevents
59219+ * the macro name collision, and should be safe since our drivers do not
59220+ * directly call the macro.
59221+ */
59222+#define _kc_is_signed_type(type) (((type)(-1)) < (type)1)
59223+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type)))
59224+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
59225+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
59226+
59227+
59228+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
59229+/*
59230+ * For simplicity and code hygiene, the fallback code below insists on
59231+ * a, b and *d having the same type (similar to the min() and max()
59232+ * macros), whereas gcc's type-generic overflow checkers accept
59233+ * different types. Hence we don't just make check_add_overflow an
59234+ * alias for __builtin_add_overflow, but add type checks similar to
59235+ * below.
59236+ */
59237+#define check_add_overflow(a, b, d) ({ \
59238+ typeof(a) __a = (a); \
59239+ typeof(b) __b = (b); \
59240+ typeof(d) __d = (d); \
59241+ (void) (&__a == &__b); \
59242+ (void) (&__a == __d); \
59243+ __builtin_add_overflow(__a, __b, __d); \
59244+})
59245+
59246+#define check_sub_overflow(a, b, d) ({ \
59247+ typeof(a) __a = (a); \
59248+ typeof(b) __b = (b); \
59249+ typeof(d) __d = (d); \
59250+ (void) (&__a == &__b); \
59251+ (void) (&__a == __d); \
59252+ __builtin_sub_overflow(__a, __b, __d); \
59253+})
59254+
59255+#define check_mul_overflow(a, b, d) ({ \
59256+ typeof(a) __a = (a); \
59257+ typeof(b) __b = (b); \
59258+ typeof(d) __d = (d); \
59259+ (void) (&__a == &__b); \
59260+ (void) (&__a == __d); \
59261+ __builtin_mul_overflow(__a, __b, __d); \
59262+})
59263+
59264+#else
59265+
59266+
59267+/* Checking for unsigned overflow is relatively easy without causing UB. */
59268+#define __unsigned_add_overflow(a, b, d) ({ \
59269+ typeof(a) __a = (a); \
59270+ typeof(b) __b = (b); \
59271+ typeof(d) __d = (d); \
59272+ (void) (&__a == &__b); \
59273+ (void) (&__a == __d); \
59274+ *__d = __a + __b; \
59275+ *__d < __a; \
59276+})
59277+#define __unsigned_sub_overflow(a, b, d) ({ \
59278+ typeof(a) __a = (a); \
59279+ typeof(b) __b = (b); \
59280+ typeof(d) __d = (d); \
59281+ (void) (&__a == &__b); \
59282+ (void) (&__a == __d); \
59283+ *__d = __a - __b; \
59284+ __a < __b; \
59285+})
59286+/*
59287+ * If one of a or b is a compile-time constant, this avoids a division.
59288+ */
59289+#define __unsigned_mul_overflow(a, b, d) ({ \
59290+ typeof(a) __a = (a); \
59291+ typeof(b) __b = (b); \
59292+ typeof(d) __d = (d); \
59293+ (void) (&__a == &__b); \
59294+ (void) (&__a == __d); \
59295+ *__d = __a * __b; \
59296+ __builtin_constant_p(__b) ? \
59297+ __b > 0 && __a > type_max(typeof(__a)) / __b : \
59298+ __a > 0 && __b > type_max(typeof(__b)) / __a; \
59299+})
59300+
59301+/*
59302+ * For signed types, detecting overflow is much harder, especially if
59303+ * we want to avoid UB. But the interface of these macros is such that
59304+ * we must provide a result in *d, and in fact we must produce the
59305+ * result promised by gcc's builtins, which is simply the possibly
59306+ * wrapped-around value. Fortunately, we can just formally do the
59307+ * operations in the widest relevant unsigned type (u64) and then
59308+ * truncate the result - gcc is smart enough to generate the same code
59309+ * with and without the (u64) casts.
59310+ */
59311+
59312+/*
59313+ * Adding two signed integers can overflow only if they have the same
59314+ * sign, and overflow has happened iff the result has the opposite
59315+ * sign.
59316+ */
59317+#define __signed_add_overflow(a, b, d) ({ \
59318+ typeof(a) __a = (a); \
59319+ typeof(b) __b = (b); \
59320+ typeof(d) __d = (d); \
59321+ (void) (&__a == &__b); \
59322+ (void) (&__a == __d); \
59323+ *__d = (u64)__a + (u64)__b; \
59324+ (((~(__a ^ __b)) & (*__d ^ __a)) \
59325+ & type_min(typeof(__a))) != 0; \
59326+})
59327+
59328+/*
59329+ * Subtraction is similar, except that overflow can now happen only
59330+ * when the signs are opposite. In this case, overflow has happened if
59331+ * the result has the opposite sign of a.
59332+ */
59333+#define __signed_sub_overflow(a, b, d) ({ \
59334+ typeof(a) __a = (a); \
59335+ typeof(b) __b = (b); \
59336+ typeof(d) __d = (d); \
59337+ (void) (&__a == &__b); \
59338+ (void) (&__a == __d); \
59339+ *__d = (u64)__a - (u64)__b; \
59340+ ((((__a ^ __b)) & (*__d ^ __a)) \
59341+ & type_min(typeof(__a))) != 0; \
59342+})
59343+
59344+/*
59345+ * Signed multiplication is rather hard. gcc always follows C99, so
59346+ * division is truncated towards 0. This means that we can write the
59347+ * overflow check like this:
59348+ *
59349+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
59350+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
59351+ * (a == -1 && b == MIN)
59352+ *
59353+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
59354+ * (included in -Wextra) warning: When the type is u8 or u16, the
59355+ * __b_c_e in check_mul_overflow obviously selects
59356+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
59357+ * code and warns about the limited range of __b.
59358+ */
59359+
59360+#define __signed_mul_overflow(a, b, d) ({ \
59361+ typeof(a) __a = (a); \
59362+ typeof(b) __b = (b); \
59363+ typeof(d) __d = (d); \
59364+ typeof(a) __tmax = type_max(typeof(a)); \
59365+ typeof(a) __tmin = type_min(typeof(a)); \
59366+ (void) (&__a == &__b); \
59367+ (void) (&__a == __d); \
59368+ *__d = (u64)__a * (u64)__b; \
59369+ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
59370+ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
59371+ (__b == (typeof(__b))-1 && __a == __tmin); \
59372+})
59373+
59374+
59375+#define check_add_overflow(a, b, d) \
59376+ __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \
59377+ __signed_add_overflow(a, b, d), \
59378+ __unsigned_add_overflow(a, b, d))
59379+
59380+#define check_sub_overflow(a, b, d) \
59381+ __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \
59382+ __signed_sub_overflow(a, b, d), \
59383+ __unsigned_sub_overflow(a, b, d))
59384+
59385+#define check_mul_overflow(a, b, d) \
59386+ __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \
59387+ __signed_mul_overflow(a, b, d), \
59388+ __unsigned_mul_overflow(a, b, d))
59389+
59390+
59391+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
59392+
59393+/** check_shl_overflow() - Calculate a left-shifted value and check overflow
59394+ *
59395+ * @a: Value to be shifted
59396+ * @s: How many bits left to shift
59397+ * @d: Pointer to where to store the result
59398+ *
59399+ * Computes *@d = (@a << @s)
59400+ *
59401+ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
59402+ * make sense. Example conditions:
59403+ * - 'a << s' causes bits to be lost when stored in *d.
59404+ * - 's' is garbage (e.g. negative) or so large that the result of
59405+ * 'a << s' is guaranteed to be 0.
59406+ * - 'a' is negative.
59407+ * - 'a << s' sets the sign bit, if any, in '*d'.
59408+ *
59409+ * '*d' will hold the results of the attempted shift, but is not
59410+ * considered "safe for use" if false is returned.
59411+ */
59412+#define check_shl_overflow(a, s, d) ({ \
59413+ typeof(a) _a = a; \
59414+ typeof(s) _s = s; \
59415+ typeof(d) _d = d; \
59416+ u64 _a_full = _a; \
59417+ unsigned int _to_shift = \
59418+ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
59419+ *_d = (_a_full << _to_shift); \
59420+ (_to_shift != _s || *_d < 0 || _a < 0 || \
59421+ (*_d >> _to_shift) != _a); \
59422+})
59423+
59424+/**
59425+ * array_size() - Calculate size of 2-dimensional array.
59426+ *
59427+ * @a: dimension one
59428+ * @b: dimension two
59429+ *
59430+ * Calculates size of 2-dimensional array: @a * @b.
59431+ *
59432+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
59433+ * overflow.
59434+ */
59435+static inline __must_check size_t array_size(size_t a, size_t b)
59436+{
59437+ size_t bytes;
59438+
59439+ if (check_mul_overflow(a, b, &bytes))
59440+ return SIZE_MAX;
59441+
59442+ return bytes;
59443+}
59444+
59445+/**
59446+ * array3_size() - Calculate size of 3-dimensional array.
59447+ *
59448+ * @a: dimension one
59449+ * @b: dimension two
59450+ * @c: dimension three
59451+ *
59452+ * Calculates size of 3-dimensional array: @a * @b * @c.
59453+ *
59454+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
59455+ * overflow.
59456+ */
59457+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
59458+{
59459+ size_t bytes;
59460+
59461+ if (check_mul_overflow(a, b, &bytes))
59462+ return SIZE_MAX;
59463+ if (check_mul_overflow(bytes, c, &bytes))
59464+ return SIZE_MAX;
59465+
59466+ return bytes;
59467+}
59468+
59469+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
59470+{
59471+ size_t bytes;
59472+
59473+ if (check_mul_overflow(n, size, &bytes))
59474+ return SIZE_MAX;
59475+ if (check_add_overflow(bytes, c, &bytes))
59476+ return SIZE_MAX;
59477+
59478+ return bytes;
59479+}
59480+
59481+/**
59482+ * struct_size() - Calculate size of structure with trailing array.
59483+ * @p: Pointer to the structure.
59484+ * @member: Name of the array member.
59485+ * @n: Number of elements in the array.
59486+ *
59487+ * Calculates size of memory needed for structure @p followed by an
59488+ * array of @n @member elements.
59489+ *
59490+ * Return: number of bytes needed or SIZE_MAX on overflow.
59491+ */
59492+#define struct_size(p, member, n) \
59493+ __ab_c_size(n, \
59494+ sizeof(*(p)->member) + __must_be_array((p)->member),\
59495+ sizeof(*(p)))
59496+
59497+#endif /* __LINUX_OVERFLOW_H */
59498diff --git a/drivers/net/ethernet/intel/i40e/kcompat_vfd.c b/drivers/net/ethernet/intel/i40e/kcompat_vfd.c
59499new file mode 100644
59500index 000000000..ce206f10e
59501--- /dev/null
59502+++ b/drivers/net/ethernet/intel/i40e/kcompat_vfd.c
59503@@ -0,0 +1,2550 @@
59504+// SPDX-License-Identifier: GPL-2.0
59505+/* Copyright(c) 2013 - 2020 Intel Corporation. */
59506+
59507+#include "kcompat.h"
59508+#include "kcompat_vfd.h"
59509+
59510+#define to_dev(obj) container_of(obj, struct device, kobj)
59511+
59512+const struct vfd_ops *vfd_ops = NULL;
59513+
59514+/**
59515+ * __get_pf_pdev - helper function to get the pdev
59516+ * @kobj: kobject passed
59517+ * @pdev: PCI device information struct
59518+ */
59519+static int __get_pf_pdev(struct kobject *kobj, struct pci_dev **pdev)
59520+{
59521+ struct device *dev;
59522+
59523+ if (!kobj->parent)
59524+ return -EINVAL;
59525+
59526+ /* get pdev */
59527+ dev = to_dev(kobj->parent);
59528+ *pdev = to_pci_dev(dev);
59529+
59530+ return 0;
59531+}
59532+
59533+/**
59534+ * __get_pdev_and_vfid - helper function to get the pdev and the vf id
59535+ * @kobj: kobject passed
59536+ * @pdev: PCI device information struct
59537+ * @vf_id: VF id of the VF under consideration
59538+ */
59539+static int __get_pdev_and_vfid(struct kobject *kobj, struct pci_dev **pdev,
59540+ int *vf_id)
59541+{
59542+ struct device *dev;
59543+
59544+ if (!kobj->parent->parent)
59545+ return -EINVAL;
59546+
59547+ /* get pdev */
59548+ dev = to_dev(kobj->parent->parent);
59549+ *pdev = to_pci_dev(dev);
59550+
59551+ /* get vf_id */
59552+ if (kstrtoint(kobj->name, 10, vf_id) != 0) {
59553+ dev_err(&(*pdev)->dev, "Failed to convert %s to vf_id\n",
59554+ kobj->name);
59555+ return -EINVAL;
59556+ }
59557+
59558+ return 0;
59559+}
59560+
59561+/**
59562+ * __parse_bool_data - helper function to parse boolean data
59563+ * @pdev: PCI device information struct
59564+ * @buff: buffer with input data
59565+ * @attr_name: name of the attribute
59566+ * @data: pointer to output data
59567+ */
59568+static int __parse_bool_data(struct pci_dev *pdev, const char *buff,
59569+ const char *attr_name, bool *data)
59570+{
59571+ if (sysfs_streq("on", buff)) {
59572+ *data = true;
59573+ } else if (sysfs_streq("off", buff)) {
59574+ *data = false;
59575+ } else {
59576+ dev_err(&pdev->dev, "set %s: invalid input string", attr_name);
59577+ return -EINVAL;
59578+ }
59579+ return 0;
59580+}
59581+
59582+/**
59583+ * __parse_egress_ingress_input - helper function for ingress/egress_mirror attributes
59584+ * @pdev: PCI device information struct
59585+ * @buff: buffer with input data
59586+ * @attr_name: name of the attribute
59587+ * @data_new: pointer to input data merged with the old data
59588+ * @data_old: pointer to old data of the attribute
59589+ *
59590+ * Get the input data for egress_mirror or ingress_mirror attribute in the form
59591+ * "add <number>" or "rem <number>".
59592+ * Set the output data to off if in "rem <number>", <number> matches old data.
59593+ *
59594+ */
59595+static int __parse_egress_ingress_input(struct pci_dev *pdev, const char *buff,
59596+ const char *attr_name, int *data_new,
59597+ int *data_old)
59598+{
59599+ int ret = 0;
59600+ char *p;
59601+
59602+ if (strstr(buff, "add")) {
59603+ p = strstr(buff, "add");
59604+
59605+ ret = kstrtoint(p + sizeof("add"), 10, data_new);
59606+ if (ret) {
59607+ dev_err(&pdev->dev,
59608+ "add %s: input error %d\n", attr_name, ret);
59609+ return ret;
59610+ }
59611+ } else if (strstr(buff, "rem")) {
59612+ p = strstr(buff, "rem");
59613+
59614+ ret = kstrtoint(p + sizeof("rem"), 10, data_new);
59615+ if (ret) {
59616+ dev_err(&pdev->dev,
59617+ "rem %s: input error %d\n", attr_name, ret);
59618+ return ret;
59619+ }
59620+
59621+ if (*data_new == *data_old) {
59622+ if (!strcmp(attr_name, "egress_mirror"))
59623+ *data_new = VFD_EGRESS_MIRROR_OFF;
59624+ else if (!strcmp(attr_name, "ingress_mirror"))
59625+ *data_new = VFD_INGRESS_MIRROR_OFF;
59626+ } else {
59627+ dev_err(&pdev->dev,
59628+ "rem %s: input doesn't match current value",
59629+ attr_name);
59630+ return -EINVAL;
59631+ }
59632+ } else {
59633+ dev_err(&pdev->dev, "set %s: invalid input string", attr_name);
59634+ return -EINVAL;
59635+ }
59636+
59637+ return ret;
59638+}
59639+
59640+/**
59641+ * __parse_add_rem_bitmap - helper function to parse bitmap data
59642+ * @pdev: PCI device information struct
59643+ * @buff: buffer with input data
59644+ * @attr_name: name of the attribute
59645+ * @data_new: pointer to input data merged with the old data
59646+ * @data_old: pointer to old data of the attribute
59647+ *
59648+ * If passed add: set data_new to "data_old || data_input"
59649+ * If passed rem: set data_new to "data_old || ~data_input"
59650+ */
59651+static int __parse_add_rem_bitmap(struct pci_dev *pdev, const char *buff,
59652+ const char *attr_name,
59653+ unsigned long *data_new,
59654+ unsigned long *data_old)
59655+{
59656+ int ret = 0;
59657+ char *p;
59658+
59659+ if (strstr(buff, "add")) {
59660+ p = strstr(buff, "add");
59661+ bitmap_zero(data_new, VLAN_N_VID);
59662+
59663+ ret = bitmap_parselist(p + sizeof("add"), data_new, VLAN_N_VID);
59664+ if (ret) {
59665+ dev_err(&pdev->dev,
59666+ "add %s: input error %d\n", attr_name, ret);
59667+ return ret;
59668+ }
59669+
59670+ bitmap_or(data_new, data_new, data_old, VLAN_N_VID);
59671+ } else if (strstr(buff, "rem")) {
59672+ p = strstr(buff, "rem");
59673+ bitmap_zero(data_new, VLAN_N_VID);
59674+
59675+ ret = bitmap_parselist(p + sizeof("rem"), data_new, VLAN_N_VID);
59676+ if (ret) {
59677+ dev_err(&pdev->dev,
59678+ "rem %s: input error %d\n", attr_name, ret);
59679+ return ret;
59680+ }
59681+
59682+ /* new = old & ~rem */
59683+ bitmap_andnot(data_new, data_old, data_new, VLAN_N_VID);
59684+ } else {
59685+ dev_err(&pdev->dev, "set %s: invalid input string", attr_name);
59686+ return -EINVAL;
59687+ }
59688+ return 0;
59689+}
59690+
59691+/**
59692+ * __parse_promisc_input - helper function for promisc attributes
59693+ * @buff: buffer with input data
59694+ * @count: size of buff
59695+ * @cmd: return pointer to cmd into buff
59696+ * @subcmd: return pointer to subcmd into buff
59697+ *
59698+ * Get the input data for promisc attributes in the form "add/rem mcast/ucast".
59699+ */
59700+static int __parse_promisc_input(const char *buff, size_t count,
59701+ const char **cmd, const char **subcmd)
59702+{
59703+ size_t idx = 0;
59704+
59705+ /* Remove start spaces */
59706+ while (buff[idx] == ' ' && idx < count)
59707+ idx++;
59708+
59709+ /* Parse cmd */
59710+ if (strncmp(&buff[idx], "add", strlen("add")) == 0) {
59711+ *cmd = &buff[idx];
59712+ idx += strlen("add");
59713+ } else if (strncmp(&buff[idx], "rem", strlen("rem")) == 0) {
59714+ *cmd = &buff[idx];
59715+ idx += strlen("rem");
59716+ } else {
59717+ return -EINVAL;
59718+ }
59719+
59720+ if (buff[idx++] != ' ')
59721+ return -EINVAL;
59722+
59723+ /* Remove spaces between cmd */
59724+ while (buff[idx] == ' ' && idx < count)
59725+ idx++;
59726+
59727+ /* Parse subcmd */
59728+ if (strncmp(&buff[idx], "ucast", strlen("ucast")) == 0) {
59729+ *subcmd = &buff[idx];
59730+ idx += strlen("ucast");
59731+ } else if (strncmp(&buff[idx], "mcast", strlen("mcast")) == 0) {
59732+ *subcmd = &buff[idx];
59733+ idx += strlen("mcast");
59734+ } else {
59735+ return -EINVAL;
59736+ }
59737+
59738+ /* Remove spaces after subcmd */
59739+ while ((buff[idx] == ' ' || buff[idx] == '\n') && idx < count)
59740+ idx++;
59741+
59742+ if (idx != count)
59743+ return -EINVAL;
59744+
59745+ return 0;
59746+}
59747+
59748+/* Handlers for each VFd operation */
59749+
59750+/**
59751+ * vfd_trunk_show - handler for trunk show function
59752+ * @kobj: kobject being called
59753+ * @attr: struct kobj_attribute
59754+ * @buff: buffer with input data
59755+ *
59756+ * Get current data from driver and copy to buffer
59757+ **/
59758+static ssize_t vfd_trunk_show(struct kobject *kobj,
59759+ struct kobj_attribute *attr, char *buff)
59760+{
59761+ struct pci_dev *pdev;
59762+ int vf_id, ret = 0;
59763+
59764+ DECLARE_BITMAP(data, VLAN_N_VID);
59765+ bitmap_zero(data, VLAN_N_VID);
59766+
59767+ if (!vfd_ops->get_trunk)
59768+ return -EOPNOTSUPP;
59769+
59770+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
59771+ if (ret)
59772+ return ret;
59773+
59774+ ret = vfd_ops->get_trunk(pdev, vf_id, data);
59775+ if (ret)
59776+ ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID);
59777+
59778+ return ret;
59779+}
59780+
59781+/**
59782+ * vfd_trunk_store - handler for trunk store function
59783+ * @kobj: kobject being called
59784+ * @attr: struct kobj_attribute
59785+ * @buff: buffer with input data
59786+ * @count: size of buff
59787+ *
59788+ * Get current data from driver, compose new data based on input values
59789+ * depending on "add" or "rem" command, and pass new data to the driver to set.
59790+ *
59791+ * On success return count, indicating that we used the whole buffer. On
59792+ * failure return a negative error condition.
59793+ **/
59794+static ssize_t vfd_trunk_store(struct kobject *kobj,
59795+ struct kobj_attribute *attr,
59796+ const char *buff, size_t count)
59797+{
59798+ unsigned long *data_old, *data_new;
59799+ struct pci_dev *pdev;
59800+ int vf_id, ret = 0;
59801+
59802+ if (!vfd_ops->set_trunk || !vfd_ops->get_trunk)
59803+ return -EOPNOTSUPP;
59804+
59805+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
59806+ if (ret)
59807+ return ret;
59808+
59809+ data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
59810+ GFP_KERNEL);
59811+ if (!data_old)
59812+ return -ENOMEM;
59813+ data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
59814+ GFP_KERNEL);
59815+ if (!data_new) {
59816+ kfree(data_old);
59817+ return -ENOMEM;
59818+ }
59819+
59820+ ret = vfd_ops->get_trunk(pdev, vf_id, data_old);
59821+ if (ret < 0)
59822+ goto err_free;
59823+
59824+ ret = __parse_add_rem_bitmap(pdev, buff, "trunk", data_new, data_old);
59825+ if (ret)
59826+ goto err_free;
59827+
59828+ if (!bitmap_equal(data_new, data_old, VLAN_N_VID))
59829+ ret = vfd_ops->set_trunk(pdev, vf_id, data_new);
59830+
59831+err_free:
59832+ kfree(data_old);
59833+ kfree(data_new);
59834+ return ret ? ret : count;
59835+}
59836+
59837+/**
59838+ * vfd_vlan_mirror_show - handler for vlan_mirror show function
59839+ * @kobj: kobject being called
59840+ * @attr: struct kobj_attribute
59841+ * @buff: buffer with input data
59842+ *
59843+ * Get current data from driver and copy to buffer
59844+ **/
59845+static ssize_t vfd_vlan_mirror_show(struct kobject *kobj,
59846+ struct kobj_attribute *attr, char *buff)
59847+{
59848+ struct pci_dev *pdev;
59849+ int vf_id, ret = 0;
59850+
59851+ DECLARE_BITMAP(data, VLAN_N_VID);
59852+ bitmap_zero(data, VLAN_N_VID);
59853+
59854+ if (!vfd_ops->get_vlan_mirror)
59855+ return -EOPNOTSUPP;
59856+
59857+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
59858+ if (ret)
59859+ return ret;
59860+
59861+ ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data);
59862+ if (ret)
59863+ ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID);
59864+
59865+ return ret;
59866+}
59867+
59868+/**
59869+ * vfd_vlan_mirror_store - handler for vlan_mirror store function
59870+ * @kobj: kobject being called
59871+ * @attr: struct kobj_attribute
59872+ * @buff: buffer with input data
59873+ * @count: size of buff
59874+ *
59875+ * Get current data from driver, compose new data based on input values
59876+ * depending on "add" or "rem" command, and pass new data to the driver to set.
59877+ *
59878+ * On success return count, indicating that we used the whole buffer. On
59879+ * failure return a negative error condition.
59880+ **/
59881+static ssize_t vfd_vlan_mirror_store(struct kobject *kobj,
59882+ struct kobj_attribute *attr,
59883+ const char *buff, size_t count)
59884+{
59885+ unsigned long *data_old, *data_new;
59886+ struct pci_dev *pdev;
59887+ int vf_id, ret = 0;
59888+
59889+ if (!vfd_ops->set_vlan_mirror || !vfd_ops->get_vlan_mirror)
59890+ return -EOPNOTSUPP;
59891+
59892+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
59893+ if (ret)
59894+ return ret;
59895+
59896+ data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
59897+ GFP_KERNEL);
59898+ if (!data_old)
59899+ return -ENOMEM;
59900+ data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
59901+ GFP_KERNEL);
59902+ if (!data_new) {
59903+ kfree(data_old);
59904+ return -ENOMEM;
59905+ }
59906+
59907+ ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data_old);
59908+ if (ret < 0)
59909+ goto err_free;
59910+
59911+ ret = __parse_add_rem_bitmap(pdev, buff, "vlan_mirror",
59912+ data_new, data_old);
59913+ if (ret)
59914+ goto err_free;
59915+
59916+ if (!bitmap_equal(data_new, data_old, VLAN_N_VID))
59917+ ret = vfd_ops->set_vlan_mirror(pdev, vf_id, data_new);
59918+
59919+err_free:
59920+ kfree(data_old);
59921+ kfree(data_new);
59922+ return ret ? ret : count;
59923+}
59924+
59925+/**
59926+ * vfd_egress_mirror_show - handler for egress_mirror show function
59927+ * @kobj: kobject being called
59928+ * @attr: struct kobj_attribute
59929+ * @buff: buffer for data
59930+ **/
59931+static ssize_t vfd_egress_mirror_show(struct kobject *kobj,
59932+ struct kobj_attribute *attr, char *buff)
59933+{
59934+ struct pci_dev *pdev;
59935+ int vf_id, ret = 0;
59936+ int data;
59937+
59938+ if (!vfd_ops->get_egress_mirror)
59939+ return -EOPNOTSUPP;
59940+
59941+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
59942+ if (ret)
59943+ return ret;
59944+
59945+ ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data);
59946+ if (ret < 0)
59947+ return ret;
59948+
59949+ if (data == VFD_EGRESS_MIRROR_OFF)
59950+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
59951+ else
59952+ ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
59953+
59954+ return ret;
59955+}
59956+
59957+/**
59958+ * vfd_egress_mirror_store - handler for egress_mirror store function
59959+ * @kobj: kobject being called
59960+ * @attr: struct kobj_attribute
59961+ * @buff: buffer with input data
59962+ * @count: size of buff
59963+ **/
59964+static ssize_t vfd_egress_mirror_store(struct kobject *kobj,
59965+ struct kobj_attribute *attr,
59966+ const char *buff, size_t count)
59967+{
59968+ int data_new, data_old;
59969+ struct pci_dev *pdev;
59970+ int vf_id, ret = 0;
59971+
59972+ if (!vfd_ops->set_egress_mirror || !vfd_ops->get_egress_mirror)
59973+ return -EOPNOTSUPP;
59974+
59975+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
59976+ if (ret)
59977+ return ret;
59978+
59979+ ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data_old);
59980+ if (ret < 0)
59981+ return ret;
59982+
59983+ ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror",
59984+ &data_new, &data_old);
59985+ if (ret)
59986+ return ret;
59987+ if(data_new == vf_id) {
59988+ dev_err(&pdev->dev, "VF %d: Setting egress_mirror to itself is not allowed\n", vf_id);
59989+ return -EINVAL;
59990+ }
59991+
59992+ if (data_new != data_old)
59993+ ret = vfd_ops->set_egress_mirror(pdev, vf_id, data_new);
59994+
59995+ return ret ? ret : count;
59996+}
59997+
59998+/**
59999+ * vfd_ingress_mirror_show - handler for ingress_mirror show function
60000+ * @kobj: kobject being called
60001+ * @attr: struct kobj_attribute
60002+ * @buff: buffer for data
60003+ **/
60004+static ssize_t vfd_ingress_mirror_show(struct kobject *kobj,
60005+ struct kobj_attribute *attr,
60006+ char *buff)
60007+{
60008+ struct pci_dev *pdev;
60009+ int vf_id, ret = 0;
60010+ int data;
60011+
60012+ if (!vfd_ops->get_ingress_mirror)
60013+ return -EOPNOTSUPP;
60014+
60015+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60016+ if (ret)
60017+ return ret;
60018+
60019+ ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data);
60020+ if (ret < 0)
60021+ return ret;
60022+
60023+ if (data == VFD_INGRESS_MIRROR_OFF)
60024+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60025+ else
60026+ ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
60027+
60028+ return ret;
60029+}
60030+
60031+/**
60032+ * vfd_ingress_mirror_store - handler for ingress_mirror store function
60033+ * @kobj: kobject being called
60034+ * @attr: struct kobj_attribute
60035+ * @buff: buffer with input data
60036+ * @count: size of buff
60037+ **/
60038+static ssize_t vfd_ingress_mirror_store(struct kobject *kobj,
60039+ struct kobj_attribute *attr,
60040+ const char *buff, size_t count)
60041+{
60042+ int data_new, data_old;
60043+ struct pci_dev *pdev;
60044+ int vf_id, ret = 0;
60045+
60046+ if (!vfd_ops->set_ingress_mirror || !vfd_ops->get_ingress_mirror)
60047+ return -EOPNOTSUPP;
60048+
60049+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60050+ if (ret)
60051+ return ret;
60052+
60053+ ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data_old);
60054+ if (ret < 0)
60055+ return ret;
60056+
60057+ ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror",
60058+ &data_new, &data_old);
60059+ if (ret)
60060+ return ret;
60061+ if(data_new == vf_id) {
60062+ dev_err(&pdev->dev, "VF %d: Setting ingress_mirror to itself is not allowed\n", vf_id);
60063+ return -EINVAL;
60064+ }
60065+
60066+ if (data_new != data_old)
60067+ ret = vfd_ops->set_ingress_mirror(pdev, vf_id, data_new);
60068+
60069+ return ret ? ret : count;
60070+}
60071+
60072+/**
60073+ * vfd_mac_anti_spoof_show - handler for mac_anti_spoof show function
60074+ * @kobj: kobject being called
60075+ * @attr: struct kobj_attribute
60076+ * @buff: buffer for data
60077+ **/
60078+static ssize_t vfd_mac_anti_spoof_show(struct kobject *kobj,
60079+ struct kobj_attribute *attr,
60080+ char *buff)
60081+{
60082+ struct pci_dev *pdev;
60083+ int vf_id, ret = 0;
60084+ bool data;
60085+
60086+ if (!vfd_ops->get_mac_anti_spoof)
60087+ return -EOPNOTSUPP;
60088+
60089+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60090+ if (ret)
60091+ return ret;
60092+
60093+ ret = vfd_ops->get_mac_anti_spoof(pdev, vf_id, &data);
60094+ if (ret < 0)
60095+ return ret;
60096+
60097+ if (data)
60098+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
60099+ else
60100+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60101+
60102+ return ret;
60103+}
60104+
60105+/**
60106+ * vfd_mac_anti_spoof_store - handler for mac_anti_spoof store function
60107+ * @kobj: kobject being called
60108+ * @attr: struct kobj_attribute
60109+ * @buff: buffer with input data
60110+ * @count: size of buff
60111+ *
60112+ * On success return count, indicating that we used the whole buffer. On
60113+ * failure return a negative error condition.
60114+ **/
60115+static ssize_t vfd_mac_anti_spoof_store(struct kobject *kobj,
60116+ struct kobj_attribute *attr,
60117+ const char *buff, size_t count)
60118+{
60119+ struct pci_dev *pdev;
60120+ int vf_id, ret = 0;
60121+ bool data_new, data_old;
60122+
60123+ if (!vfd_ops->set_mac_anti_spoof || !vfd_ops->get_mac_anti_spoof)
60124+ return -EOPNOTSUPP;
60125+
60126+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60127+ if (ret)
60128+ return ret;
60129+
60130+ ret = vfd_ops->get_mac_anti_spoof(pdev, vf_id, &data_old);
60131+ if (ret < 0)
60132+ return ret;
60133+
60134+ ret = __parse_bool_data(pdev, buff, "mac_anti_spoof", &data_new);
60135+ if (ret)
60136+ return ret;
60137+
60138+ if (data_new != data_old)
60139+ ret = vfd_ops->set_mac_anti_spoof(pdev, vf_id, data_new);
60140+
60141+ return ret ? ret : count;
60142+}
60143+
60144+/**
60145+ * vfd_vlan_anti_spoof_show - handler for vlan_anti_spoof show function
60146+ * @kobj: kobject being called
60147+ * @attr: struct kobj_attribute
60148+ * @buff: buffer for data
60149+ **/
60150+static ssize_t vfd_vlan_anti_spoof_show(struct kobject *kobj,
60151+ struct kobj_attribute *attr,
60152+ char *buff)
60153+{
60154+ struct pci_dev *pdev;
60155+ int vf_id, ret = 0;
60156+ bool data;
60157+
60158+ if (!vfd_ops->get_vlan_anti_spoof)
60159+ return -EOPNOTSUPP;
60160+
60161+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60162+ if (ret)
60163+ return ret;
60164+
60165+ ret = vfd_ops->get_vlan_anti_spoof(pdev, vf_id, &data);
60166+ if (ret < 0)
60167+ return ret;
60168+
60169+ if (data)
60170+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
60171+ else
60172+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60173+
60174+ return ret;
60175+}
60176+
60177+/**
60178+ * vfd_vlan_anti_spoof_store - handler for vlan_anti_spoof store function
60179+ * @kobj: kobject being called
60180+ * @attr: struct kobj_attribute
60181+ * @buff: buffer with input data
60182+ * @count: size of buff
60183+ *
60184+ * On success return count, indicating that we used the whole buffer. On
60185+ * failure return a negative error condition.
60186+ **/
60187+static ssize_t vfd_vlan_anti_spoof_store(struct kobject *kobj,
60188+ struct kobj_attribute *attr,
60189+ const char *buff, size_t count)
60190+{
60191+ struct pci_dev *pdev;
60192+ int vf_id, ret = 0;
60193+ bool data_new, data_old;
60194+
60195+ if (!vfd_ops->set_vlan_anti_spoof || !vfd_ops->get_vlan_anti_spoof)
60196+ return -EOPNOTSUPP;
60197+
60198+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60199+ if (ret)
60200+ return ret;
60201+
60202+ ret = vfd_ops->get_vlan_anti_spoof(pdev, vf_id, &data_old);
60203+ if (ret < 0)
60204+ return ret;
60205+
60206+ ret = __parse_bool_data(pdev, buff, "vlan_anti_spoof", &data_new);
60207+ if (ret)
60208+ return ret;
60209+
60210+ if (data_new != data_old)
60211+ ret = vfd_ops->set_vlan_anti_spoof(pdev, vf_id, data_new);
60212+
60213+ return ret ? ret : count;
60214+}
60215+
60216+/**
60217+ * vfd_allow_untagged_show - handler for allow_untagged show function
60218+ * @kobj: kobject being called
60219+ * @attr: struct kobj_attribute
60220+ * @buff: buffer for data
60221+ **/
60222+static ssize_t vfd_allow_untagged_show(struct kobject *kobj,
60223+ struct kobj_attribute *attr,
60224+ char *buff)
60225+{
60226+ struct pci_dev *pdev;
60227+ int vf_id, ret = 0;
60228+ bool data;
60229+
60230+ if (!vfd_ops->get_allow_untagged)
60231+ return -EOPNOTSUPP;
60232+
60233+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60234+ if (ret)
60235+ return ret;
60236+
60237+ ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data);
60238+ if (ret < 0)
60239+ return ret;
60240+
60241+ if (data)
60242+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
60243+ else
60244+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60245+
60246+ return ret;
60247+}
60248+
60249+/**
60250+ * vfd_allow_untagged_store - handler for allow_untagged store function
60251+ * @kobj: kobject being called
60252+ * @attr: struct kobj_attribute
60253+ * @buff: buffer with input data
60254+ * @count: size of buff
60255+ **/
60256+static ssize_t vfd_allow_untagged_store(struct kobject *kobj,
60257+ struct kobj_attribute *attr,
60258+ const char *buff, size_t count)
60259+{
60260+ struct pci_dev *pdev;
60261+ int vf_id, ret = 0;
60262+ bool data_new, data_old;
60263+
60264+ if (!vfd_ops->set_allow_untagged || !vfd_ops->get_allow_untagged)
60265+ return -EOPNOTSUPP;
60266+
60267+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60268+ if (ret)
60269+ return ret;
60270+
60271+ ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data_old);
60272+ if (ret < 0)
60273+ return ret;
60274+
60275+ ret = __parse_bool_data(pdev, buff, "allow_untagged", &data_new);
60276+ if (ret)
60277+ return ret;
60278+
60279+ if (data_new != data_old)
60280+ ret = vfd_ops->set_allow_untagged(pdev, vf_id, data_new);
60281+
60282+ return ret ? ret : count;
60283+}
60284+
60285+/**
60286+ * vfd_loopback_show - handler for loopback show function
60287+ * @kobj: kobject being called
60288+ * @attr: struct kobj_attribute
60289+ * @buff: buffer for data
60290+ **/
60291+static ssize_t vfd_loopback_show(struct kobject *kobj,
60292+ struct kobj_attribute *attr, char *buff)
60293+{
60294+ struct pci_dev *pdev;
60295+ int vf_id, ret = 0;
60296+ bool data;
60297+
60298+ if (!vfd_ops->get_loopback)
60299+ return -EOPNOTSUPP;
60300+
60301+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60302+ if (ret)
60303+ return ret;
60304+
60305+ ret = vfd_ops->get_loopback(pdev, vf_id, &data);
60306+ if (ret < 0)
60307+ return ret;
60308+
60309+ if (data)
60310+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
60311+ else
60312+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60313+
60314+ return ret;
60315+}
60316+
60317+/**
60318+ * vfd_loopback_store - handler for loopback store function
60319+ * @kobj: kobject being called
60320+ * @attr: struct kobj_attribute
60321+ * @buff: buffer with input data
60322+ * @count: size of buff
60323+ **/
60324+static ssize_t vfd_loopback_store(struct kobject *kobj,
60325+ struct kobj_attribute *attr,
60326+ const char *buff, size_t count)
60327+{
60328+ struct pci_dev *pdev;
60329+ int vf_id, ret = 0;
60330+ bool data_new, data_old;
60331+
60332+ if (!vfd_ops->set_loopback || !vfd_ops->get_loopback)
60333+ return -EOPNOTSUPP;
60334+
60335+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60336+ if (ret)
60337+ return ret;
60338+
60339+ ret = vfd_ops->get_loopback(pdev, vf_id, &data_old);
60340+ if (ret < 0)
60341+ return ret;
60342+
60343+ ret = __parse_bool_data(pdev, buff, "loopback", &data_new);
60344+ if (ret)
60345+ return ret;
60346+
60347+ if (data_new != data_old)
60348+ ret = vfd_ops->set_loopback(pdev, vf_id, data_new);
60349+
60350+ return ret ? ret : count;
60351+}
60352+
60353+/**
60354+ * vfd_mac_show - handler for mac show function
60355+ * @kobj: kobject being called
60356+ * @attr: struct kobj_attribute
60357+ * @buff: buffer for data
60358+ **/
60359+static ssize_t vfd_mac_show(struct kobject *kobj, struct kobj_attribute *attr,
60360+ char *buff)
60361+{
60362+ u8 macaddr[ETH_ALEN];
60363+ struct pci_dev *pdev;
60364+ int vf_id, ret = 0;
60365+
60366+ if (!vfd_ops->get_mac)
60367+ return -EOPNOTSUPP;
60368+
60369+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60370+ if (ret)
60371+ return ret;
60372+
60373+ ret = vfd_ops->get_mac(pdev, vf_id, macaddr);
60374+ if (ret < 0)
60375+ return ret;
60376+
60377+ ret = scnprintf(buff, PAGE_SIZE, "%pM\n", macaddr);
60378+
60379+ return ret;
60380+}
60381+
60382+/**
60383+ * vfd_mac_store - handler for mac store function
60384+ * @kobj: kobject being called
60385+ * @attr: struct kobj_attribute
60386+ * @buff: buffer with input data
60387+ * @count: size of buff
60388+ **/
60389+static ssize_t vfd_mac_store(struct kobject *kobj,
60390+ struct kobj_attribute *attr,
60391+ const char *buff, size_t count)
60392+{
60393+ u8 macaddr[ETH_ALEN];
60394+ u8 macaddr_old[ETH_ALEN];
60395+ struct pci_dev *pdev;
60396+ int vf_id, ret = 0;
60397+
60398+ if (!vfd_ops->set_mac || !vfd_ops->get_mac)
60399+ return -EOPNOTSUPP;
60400+
60401+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60402+ if (ret)
60403+ return ret;
60404+
60405+ ret = vfd_ops->get_mac(pdev, vf_id, macaddr_old);
60406+ if (ret < 0)
60407+ return ret;
60408+
60409+ ret = sscanf(buff, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
60410+ &macaddr[0], &macaddr[1], &macaddr[2],
60411+ &macaddr[3], &macaddr[4], &macaddr[5]);
60412+
60413+ if (ret != 6)
60414+ return -EINVAL;
60415+
60416+ if (!ether_addr_equal(macaddr, macaddr_old))
60417+ ret = vfd_ops->set_mac(pdev, vf_id, macaddr);
60418+
60419+ return ret ? ret : count;
60420+}
60421+
60422+/**
60423+ * vfd_mac_list_show - handler for mac_list show function
60424+ * @kobj: kobject being called
60425+ * @attr: struct kobj_attribute
60426+ * @buff: buffer for data
60427+ *
60428+ * This function also frees the memory allocated for mac_list in another function.
60429+ *
60430+ **/
60431+static ssize_t vfd_mac_list_show(struct kobject *kobj,
60432+ struct kobj_attribute *attr, char *buff)
60433+{
60434+ unsigned int mac_num_allowed, mac_num_list, mac_num_count;
60435+ const char *overflow_msg = "... and more\n";
60436+ unsigned int mac_msg_len = 3*ETH_ALEN;
60437+ struct list_head *pos, *n;
60438+ struct pci_dev *pdev;
60439+ int vf_id, ret;
60440+ char *written;
60441+ LIST_HEAD(mac_list);
60442+
60443+ if (!vfd_ops->get_mac_list)
60444+ return -EOPNOTSUPP;
60445+
60446+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60447+ if (ret)
60448+ return ret;
60449+
60450+ ret = vfd_ops->get_mac_list(pdev, vf_id, &mac_list);
60451+ if (ret < 0)
60452+ goto err_free;
60453+
60454+ mac_num_list = 0;
60455+ mac_num_count = 0;
60456+ list_for_each_safe(pos, n, &mac_list)
60457+ mac_num_list++;
60458+
60459+ mac_num_allowed = (PAGE_SIZE - 1) / mac_msg_len;
60460+ if (mac_num_list > mac_num_allowed)
60461+ mac_num_allowed = (PAGE_SIZE - 1 - strlen(overflow_msg)) /
60462+ mac_msg_len;
60463+
60464+ written = buff;
60465+ list_for_each_safe(pos, n, &mac_list) {
60466+ struct vfd_macaddr *mac = NULL;
60467+
60468+ mac_num_count++;
60469+ mac = list_entry(pos, struct vfd_macaddr, list);
60470+ if (mac_num_count > mac_num_allowed) {
60471+ ret += scnprintf(written, PAGE_SIZE - ret,
60472+ "%s", overflow_msg);
60473+ goto err_free;
60474+ } else if (list_is_last(pos, &mac_list)) {
60475+ ret += scnprintf(written, PAGE_SIZE - ret,
60476+ "%pM\n", mac->mac);
60477+ } else {
60478+ ret += scnprintf(written, PAGE_SIZE - ret,
60479+ "%pM,", mac->mac);
60480+ }
60481+ written += mac_msg_len;
60482+ }
60483+
60484+err_free:
60485+ list_for_each_safe(pos, n, &mac_list) {
60486+ struct vfd_macaddr *mac = NULL;
60487+
60488+ mac = list_entry(pos, struct vfd_macaddr, list);
60489+ list_del(pos);
60490+ kfree(mac);
60491+ }
60492+ return ret;
60493+}
60494+
60495+/**
60496+ * vfd_mac_list_store - handler for mac_list store function
60497+ * @kobj: kobject being called
60498+ * @attr: struct kobj_attribute
60499+ * @buff: buffer with input data
60500+ * @count: size of buff
60501+ *
60502+ * Get input mac list into the linked list and depending on "add" or "rem" command
60503+ * pass the input mac list to the driver to either add or remove macs to the list.
60504+ *
60505+ * This function also frees the memory allocated for mac_list in another function.
60506+ *
60507+ **/
60508+static ssize_t vfd_mac_list_store(struct kobject *kobj,
60509+ struct kobj_attribute *attr,
60510+ const char *buff, size_t count)
60511+{
60512+ struct list_head *pos, *n;
60513+ struct pci_dev *pdev;
60514+ u8 macaddr[ETH_ALEN];
60515+ int vf_id, ret;
60516+ size_t shift;
60517+ bool add;
60518+ LIST_HEAD(mac_list_inp);
60519+
60520+ if (!vfd_ops->add_macs_to_list || !vfd_ops->rem_macs_from_list)
60521+ return -EOPNOTSUPP;
60522+
60523+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60524+ if (ret)
60525+ return ret;
60526+
60527+ if (strstr(buff, "add")) {
60528+ shift = sizeof("add");
60529+ add = true;
60530+ } else if (strstr(buff, "rem")) {
60531+ shift = sizeof("rem");
60532+ add = false;
60533+ } else {
60534+ dev_err(&pdev->dev, "Invalid input string");
60535+ ret = -EINVAL;
60536+ goto err_free;
60537+ }
60538+
60539+ /* Get input data */
60540+ for (;;) {
60541+ struct vfd_macaddr *mac_new;
60542+
60543+ if (*(buff + shift) == ' ' || *(buff + shift) == ',') {
60544+ shift++;
60545+ continue;
60546+ }
60547+
60548+ ret = sscanf(buff + shift,
60549+ "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
60550+ &macaddr[0], &macaddr[1], &macaddr[2],
60551+ &macaddr[3], &macaddr[4], &macaddr[5]);
60552+
60553+ if (ret != 6)
60554+ break;
60555+
60556+ if (!is_valid_ether_addr(macaddr)) {
60557+ shift += 3*ETH_ALEN;
60558+ continue;
60559+ }
60560+
60561+ mac_new = kmalloc(sizeof(struct vfd_macaddr), GFP_KERNEL);
60562+ if (!mac_new) {
60563+ ret = -ENOMEM;
60564+ goto err_free;
60565+ }
60566+
60567+ ether_addr_copy(mac_new->mac, macaddr);
60568+ list_add(&mac_new->list, &mac_list_inp);
60569+
60570+ shift += 3*ETH_ALEN;
60571+ }
60572+
60573+ if (add)
60574+ ret = vfd_ops->add_macs_to_list(pdev, vf_id, &mac_list_inp);
60575+ else
60576+ ret = vfd_ops->rem_macs_from_list(pdev, vf_id, &mac_list_inp);
60577+
60578+err_free:
60579+ list_for_each_safe(pos, n, &mac_list_inp) {
60580+ struct vfd_macaddr *mac = NULL;
60581+
60582+ mac = list_entry(pos, struct vfd_macaddr, list);
60583+ list_del(pos);
60584+ kfree(mac);
60585+ }
60586+ return ret ? ret : count;
60587+}
60588+
60589+/**
60590+ * vfd_promisc_show - handler for promisc show function
60591+ * @kobj: kobject being called
60592+ * @attr: struct kobj_attribute
60593+ * @buff: buffer for data
60594+ **/
60595+static ssize_t vfd_promisc_show(struct kobject *kobj,
60596+ struct kobj_attribute *attr, char *buff)
60597+{
60598+ struct pci_dev *pdev;
60599+ int vf_id, ret = 0;
60600+ u8 data;
60601+
60602+ if (!vfd_ops->get_promisc)
60603+ return -EOPNOTSUPP;
60604+
60605+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60606+ if (ret)
60607+ return ret;
60608+
60609+ ret = vfd_ops->get_promisc(pdev, vf_id, &data);
60610+ if (ret < 0)
60611+ return ret;
60612+
60613+ if (data == VFD_PROMISC_UNICAST)
60614+ ret = scnprintf(buff, PAGE_SIZE, "ucast\n");
60615+ else if (data == VFD_PROMISC_MULTICAST)
60616+ ret = scnprintf(buff, PAGE_SIZE, "mcast\n");
60617+ else if (data == (VFD_PROMISC_UNICAST | VFD_PROMISC_MULTICAST))
60618+ ret = scnprintf(buff, PAGE_SIZE, "ucast, mcast\n");
60619+ else if (data == VFD_PROMISC_OFF)
60620+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60621+
60622+ return ret;
60623+}
60624+
60625+/**
60626+ * vfd_promisc_store - handler for promisc store function
60627+ * @kobj: kobject being called
60628+ * @attr: struct kobj_attribute
60629+ * @buff: buffer with input data
60630+ * @count: size of buff
60631+ **/
60632+static ssize_t vfd_promisc_store(struct kobject *kobj,
60633+ struct kobj_attribute *attr,
60634+ const char *buff, size_t count)
60635+{
60636+ u8 data_new, data_old;
60637+ struct pci_dev *pdev;
60638+ const char *subcmd;
60639+ const char *cmd;
60640+ int vf_id, ret;
60641+
60642+ if (!vfd_ops->get_promisc || !vfd_ops->set_promisc)
60643+ return -EOPNOTSUPP;
60644+
60645+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60646+ if (ret)
60647+ return ret;
60648+
60649+ ret = vfd_ops->get_promisc(pdev, vf_id, &data_old);
60650+ if (ret < 0)
60651+ return ret;
60652+
60653+ ret = __parse_promisc_input(buff, count, &cmd, &subcmd);
60654+ if (ret)
60655+ goto promisc_err;
60656+
60657+ if (strncmp(cmd, "add", strlen("add")) == 0) {
60658+ if (strncmp(subcmd, "ucast", strlen("ucast")) == 0)
60659+ data_new = data_old | VFD_PROMISC_UNICAST;
60660+ else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0)
60661+ data_new = data_old | VFD_PROMISC_MULTICAST;
60662+ else
60663+ goto promisc_err;
60664+ } else if (strncmp(cmd, "rem", strlen("rem")) == 0) {
60665+ if (strncmp(subcmd, "ucast", strlen("ucast")) == 0)
60666+ data_new = data_old & ~VFD_PROMISC_UNICAST;
60667+ else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0)
60668+ data_new = data_old & ~VFD_PROMISC_MULTICAST;
60669+ else
60670+ goto promisc_err;
60671+ } else {
60672+ goto promisc_err;
60673+ }
60674+
60675+ if (data_new != data_old)
60676+ ret = vfd_ops->set_promisc(pdev, vf_id, data_new);
60677+
60678+ return ret ? ret : count;
60679+
60680+promisc_err:
60681+ dev_err(&pdev->dev, "Invalid input string");
60682+ return -EINVAL;
60683+}
60684+
60685+/**
60686+ * vfd_vlan_strip_show - handler for vlan_strip show function
60687+ * @kobj: kobject being called
60688+ * @attr: struct kobj_attribute
60689+ * @buff: buffer for data
60690+ **/
60691+static ssize_t vfd_vlan_strip_show(struct kobject *kobj,
60692+ struct kobj_attribute *attr, char *buff)
60693+{
60694+ struct pci_dev *pdev;
60695+ int vf_id, ret = 0;
60696+ bool data;
60697+
60698+ if (!vfd_ops->get_vlan_strip)
60699+ return -EOPNOTSUPP;
60700+
60701+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60702+ if (ret)
60703+ return ret;
60704+
60705+ ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data);
60706+ if (ret < 0)
60707+ return ret;
60708+
60709+ if (data)
60710+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
60711+ else
60712+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60713+
60714+ return ret;
60715+}
60716+
60717+/**
60718+ * vfd_vlan_strip_store - handler for vlan_strip store function
60719+ * @kobj: kobject being called
60720+ * @attr: struct kobj_attribute
60721+ * @buff: buffer with input data
60722+ * @count: size of buff
60723+ **/
60724+static ssize_t vfd_vlan_strip_store(struct kobject *kobj,
60725+ struct kobj_attribute *attr,
60726+ const char *buff, size_t count)
60727+{
60728+ struct pci_dev *pdev;
60729+ int vf_id, ret = 0;
60730+ bool data_new, data_old;
60731+
60732+ if (!vfd_ops->set_vlan_strip || !vfd_ops->get_vlan_strip)
60733+ return -EOPNOTSUPP;
60734+
60735+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60736+ if (ret)
60737+ return ret;
60738+
60739+ ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data_old);
60740+ if (ret < 0)
60741+ return ret;
60742+
60743+ ret = __parse_bool_data(pdev, buff, "vlan_strip", &data_new);
60744+ if (ret)
60745+ return ret;
60746+
60747+ if (data_new != data_old)
60748+ ret = vfd_ops->set_vlan_strip(pdev, vf_id, data_new);
60749+
60750+ return ret ? ret : count;
60751+}
60752+
60753+/**
60754+ * vfd_link_state_show - handler for link_state show function
60755+ * @kobj: kobject being called
60756+ * @attr: struct kobj_attribute
60757+ * @buff: buffer for data
60758+ **/
60759+static ssize_t vfd_link_state_show(struct kobject *kobj,
60760+ struct kobj_attribute *attr, char *buff)
60761+{
60762+ enum vfd_link_speed link_speed;
60763+ struct pci_dev *pdev;
60764+ int vf_id, ret = 0;
60765+ bool enabled;
60766+
60767+ if (!vfd_ops->get_link_state)
60768+ return -EOPNOTSUPP;
60769+
60770+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60771+ if (ret)
60772+ return ret;
60773+
60774+ ret = vfd_ops->get_link_state(pdev, vf_id, &enabled, &link_speed);
60775+ if (ret < 0)
60776+ return ret;
60777+
60778+ if (enabled) {
60779+ const char *speed_str;
60780+
60781+ switch (link_speed) {
60782+ case VFD_LINK_SPEED_100MB:
60783+ speed_str = "100 Mbps";
60784+ break;
60785+ case VFD_LINK_SPEED_1GB:
60786+ speed_str = "1 Gbps";
60787+ break;
60788+ case VFD_LINK_SPEED_2_5GB:
60789+ speed_str = "2.5 Gbps";
60790+ break;
60791+ case VFD_LINK_SPEED_5GB:
60792+ speed_str = "5 Gbps";
60793+ break;
60794+ case VFD_LINK_SPEED_10GB:
60795+ speed_str = "10 Gbps";
60796+ break;
60797+ case VFD_LINK_SPEED_40GB:
60798+ speed_str = "40 Gbps";
60799+ break;
60800+ case VFD_LINK_SPEED_20GB:
60801+ speed_str = "20 Gbps";
60802+ break;
60803+ case VFD_LINK_SPEED_25GB:
60804+ speed_str = "25 Gbps";
60805+ break;
60806+ case VFD_LINK_SPEED_UNKNOWN:
60807+ speed_str = "unknown speed";
60808+ break;
60809+ default:
60810+ dev_err(&pdev->dev, "Link speed is not supported");
60811+ return -EOPNOTSUPP;
60812+ }
60813+
60814+ ret = scnprintf(buff, PAGE_SIZE, "%s, %s\n", "up", speed_str);
60815+ } else {
60816+ ret = scnprintf(buff, PAGE_SIZE, "down\n");
60817+ }
60818+
60819+ return ret;
60820+}
60821+
60822+/**
60823+ * vfd_link_state_store - handler for link_state store function
60824+ * @kobj: kobject being called
60825+ * @attr: struct kobj_attribute
60826+ * @buff: buffer with input data
60827+ * @count: size of buff
60828+ **/
60829+static ssize_t vfd_link_state_store(struct kobject *kobj,
60830+ struct kobj_attribute *attr,
60831+ const char *buff, size_t count)
60832+{
60833+ struct pci_dev *pdev;
60834+ int vf_id, ret = 0;
60835+ u8 data;
60836+
60837+ if (!vfd_ops->set_link_state)
60838+ return -EOPNOTSUPP;
60839+
60840+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60841+ if (ret)
60842+ return ret;
60843+
60844+ if (sysfs_streq("enable", buff)) {
60845+ data = VFD_LINKSTATE_ON;
60846+ } else if (sysfs_streq("disable", buff)) {
60847+ data = VFD_LINKSTATE_OFF;
60848+ } else if (sysfs_streq("auto", buff)) {
60849+ data = VFD_LINKSTATE_AUTO;
60850+ } else {
60851+ dev_err(&pdev->dev, "Invalid input string");
60852+ return -EINVAL;
60853+ }
60854+
60855+ ret = vfd_ops->set_link_state(pdev, vf_id, data);
60856+
60857+ return ret ? ret : count;
60858+}
60859+
60860+/**
60861+ * vfd_enable_show - handler for VF enable/disable show function
60862+ * @kobj: kobject being called
60863+ * @attr: struct kobj_attribute
60864+ * @buff: buffer for data
60865+ **/
60866+static ssize_t vfd_enable_show(struct kobject *kobj,
60867+ struct kobj_attribute *attr,
60868+ char *buff)
60869+{
60870+ struct pci_dev *pdev;
60871+ int vf_id, ret;
60872+ bool data;
60873+
60874+ if (!vfd_ops->get_vf_enable)
60875+ return -EOPNOTSUPP;
60876+
60877+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60878+ if (ret)
60879+ return ret;
60880+
60881+ ret = vfd_ops->get_vf_enable(pdev, vf_id, &data);
60882+ if (ret < 0)
60883+ return ret;
60884+
60885+ if (data)
60886+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
60887+ else
60888+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
60889+
60890+ return ret;
60891+}
60892+
60893+/**
60894+ * vfd_enable_store - handler for VF enable/disable store function
60895+ * @kobj: kobject being called
60896+ * @attr: struct kobj_attribute
60897+ * @buff: buffer with input data
60898+ * @count: size of buff
60899+ *
60900+ * On success return count, indicating that we used the whole buffer. On
60901+ * failure return a negative error condition.
60902+ **/
60903+static ssize_t vfd_enable_store(struct kobject *kobj,
60904+ struct kobj_attribute *attr,
60905+ const char *buff, size_t count)
60906+{
60907+ bool data_new, data_old;
60908+ struct pci_dev *pdev;
60909+ int vf_id, ret;
60910+
60911+ if (!vfd_ops->set_vf_enable || !vfd_ops->get_vf_enable)
60912+ return -EOPNOTSUPP;
60913+
60914+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60915+ if (ret)
60916+ return ret;
60917+
60918+ ret = vfd_ops->get_vf_enable(pdev, vf_id, &data_old);
60919+ if (ret < 0)
60920+ return ret;
60921+
60922+ ret = __parse_bool_data(pdev, buff, "enable", &data_new);
60923+ if (ret)
60924+ return ret;
60925+
60926+ if (data_new != data_old)
60927+ ret = vfd_ops->set_vf_enable(pdev, vf_id, data_new);
60928+
60929+ return ret ? ret : count;
60930+}
60931+
60932+/**
60933+ * vfd_max_tx_rate_show - handler for mac_tx_rate show function
60934+ * @kobj: kobject being called
60935+ * @attr: struct kobj_attribute
60936+ * @buff: buffer for data
60937+ **/
60938+static ssize_t vfd_max_tx_rate_show(struct kobject *kobj,
60939+ struct kobj_attribute *attr, char *buff)
60940+{
60941+ unsigned int max_tx_rate;
60942+ struct pci_dev *pdev;
60943+ int vf_id, ret;
60944+
60945+ if (!vfd_ops->get_max_tx_rate)
60946+ return -EOPNOTSUPP;
60947+
60948+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60949+ if (ret)
60950+ return ret;
60951+
60952+ ret = vfd_ops->get_max_tx_rate(pdev, vf_id, &max_tx_rate);
60953+ if (ret < 0)
60954+ return ret;
60955+
60956+ ret = scnprintf(buff, PAGE_SIZE, "%u\n", max_tx_rate);
60957+ return ret;
60958+}
60959+
60960+/**
60961+ * vfd_max_tx_rate_store - handler for max_tx_rate store function
60962+ * @kobj: kobject being called
60963+ * @attr: struct kobj_attribute
60964+ * @buff: buffer with input data
60965+ * @count: size of buff
60966+ **/
60967+static ssize_t vfd_max_tx_rate_store(struct kobject *kobj,
60968+ struct kobj_attribute *attr,
60969+ const char *buff, size_t count)
60970+{
60971+ unsigned int max_tx_rate;
60972+ struct pci_dev *pdev;
60973+ int vf_id, ret;
60974+
60975+ if (!vfd_ops->set_max_tx_rate)
60976+ return -EOPNOTSUPP;
60977+
60978+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
60979+ if (ret)
60980+ return ret;
60981+
60982+ ret = kstrtouint(buff, 10, &max_tx_rate);
60983+ if (ret) {
60984+ dev_err(&pdev->dev,
60985+ "Invalid argument, not a decimal number: %s", buff);
60986+ return ret;
60987+ }
60988+
60989+ ret = vfd_ops->set_max_tx_rate(pdev, vf_id, &max_tx_rate);
60990+
60991+ return ret ? ret : count;
60992+}
60993+
60994+/**
60995+ * vfd_min_tx_rate_show - handler for min_tx_rate show function
60996+ * @kobj: kobject being called
60997+ * @attr: struct kobj_attribute
60998+ * @buff: buffer for data
60999+ **/
61000+static ssize_t vfd_min_tx_rate_show(struct kobject *kobj,
61001+ struct kobj_attribute *attr, char *buff)
61002+{
61003+ if (!vfd_ops->get_min_tx_rate)
61004+ return -EOPNOTSUPP;
61005+
61006+ return vfd_ops->get_min_tx_rate(kobj, attr, buff);
61007+}
61008+
61009+/**
61010+ * vfd_min_tx_rate_store - handler for min_tx_rate store function
61011+ * @kobj: kobject being called
61012+ * @attr: struct kobj_attribute
61013+ * @buff: buffer with input data
61014+ * @count: size of buff
61015+ **/
61016+static ssize_t vfd_min_tx_rate_store(struct kobject *kobj,
61017+ struct kobj_attribute *attr,
61018+ const char *buff, size_t count)
61019+{
61020+ if (!vfd_ops->set_min_tx_rate)
61021+ return -EOPNOTSUPP;
61022+
61023+ return vfd_ops->set_min_tx_rate(kobj, attr, buff, count);
61024+}
61025+
61026+/**
61027+ * vfd_spoofcheck_show - handler for spoofcheck show function
61028+ * @kobj: kobject being called
61029+ * @attr: struct kobj_attribute
61030+ * @buff: buffer for data
61031+ **/
61032+static ssize_t vfd_spoofcheck_show(struct kobject *kobj,
61033+ struct kobj_attribute *attr, char *buff)
61034+{
61035+ if (!vfd_ops->get_spoofcheck)
61036+ return -EOPNOTSUPP;
61037+
61038+ return vfd_ops->get_spoofcheck(kobj, attr, buff);
61039+}
61040+
61041+/**
61042+ * vfd_spoofcheck_store - handler for spoofcheck store function
61043+ * @kobj: kobject being called
61044+ * @attr: struct kobj_attribute
61045+ * @buff: buffer with input data
61046+ * @count: size of buff
61047+ **/
61048+static ssize_t vfd_spoofcheck_store(struct kobject *kobj,
61049+ struct kobj_attribute *attr,
61050+ const char *buff, size_t count)
61051+{
61052+ if (!vfd_ops->set_spoofcheck)
61053+ return -EOPNOTSUPP;
61054+
61055+ return vfd_ops->set_spoofcheck(kobj, attr, buff, count);
61056+}
61057+
61058+/**
61059+ * vfd_trust_show - handler for trust show function
61060+ * @kobj: kobject being called
61061+ * @attr: struct kobj_attribute
61062+ * @buff: buffer for data
61063+ **/
61064+static ssize_t vfd_trust_show(struct kobject *kobj,
61065+ struct kobj_attribute *attr, char *buff)
61066+{
61067+ struct pci_dev *pdev;
61068+ int vf_id, ret;
61069+ bool data;
61070+
61071+ if (!vfd_ops->get_trust_state)
61072+ return -EOPNOTSUPP;
61073+
61074+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61075+ if (ret)
61076+ return ret;
61077+
61078+ ret = vfd_ops->get_trust_state(pdev, vf_id, &data);
61079+ if (ret < 0)
61080+ return ret;
61081+
61082+ if (data)
61083+ ret = scnprintf(buff, PAGE_SIZE, "on\n");
61084+ else
61085+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
61086+
61087+ return ret;
61088+}
61089+
61090+/**
61091+ * vfd_trust_store - handler for trust store function
61092+ * @kobj: kobject being called
61093+ * @attr: struct kobj_attribute
61094+ * @buff: buffer with input data
61095+ * @count: size of buff
61096+ **/
61097+static ssize_t vfd_trust_store(struct kobject *kobj,
61098+ struct kobj_attribute *attr,
61099+ const char *buff, size_t count)
61100+{
61101+ bool data_new, data_old;
61102+ struct pci_dev *pdev;
61103+ int vf_id, ret;
61104+
61105+ if (!vfd_ops->set_trust_state || !vfd_ops->get_trust_state)
61106+ return -EOPNOTSUPP;
61107+
61108+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61109+ if (ret)
61110+ return ret;
61111+
61112+ ret = vfd_ops->get_trust_state(pdev, vf_id, &data_old);
61113+ if (ret < 0)
61114+ return ret;
61115+
61116+ ret = __parse_bool_data(pdev, buff, "trust", &data_new);
61117+ if (ret)
61118+ return ret;
61119+
61120+ if (data_new != data_old)
61121+ ret = vfd_ops->set_trust_state(pdev, vf_id, data_new);
61122+
61123+ return ret ? ret : count;
61124+}
61125+
61126+/**
61127+ * vfd_reset_stats_store - handler for reset stats store function
61128+ * @kobj: kobject being called
61129+ * @attr: struct kobj_attribute
61130+ * @buff: buffer with input data
61131+ * @count: size of buff
61132+ **/
61133+static ssize_t vfd_reset_stats_store(struct kobject *kobj,
61134+ struct kobj_attribute *attr,
61135+ const char *buff, size_t count)
61136+{
61137+ int vf_id, reset, ret;
61138+ struct pci_dev *pdev;
61139+
61140+ if (!vfd_ops->reset_stats)
61141+ return -EOPNOTSUPP;
61142+
61143+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61144+ if (ret)
61145+ return ret;
61146+ ret = kstrtoint(buff, 10, &reset);
61147+ if (ret) {
61148+ dev_err(&pdev->dev, "Invalid input\n");
61149+ return ret;
61150+ }
61151+
61152+ if (reset != 1)
61153+ return -EINVAL;
61154+
61155+ ret = vfd_ops->reset_stats(pdev, vf_id);
61156+
61157+ return ret ? ret : count;
61158+}
61159+
61160+/**
61161+ * vfd_rx_bytes_show - handler for rx_bytes show function
61162+ * @kobj: kobject being called
61163+ * @attr: struct kobj_attribute
61164+ * @buff: buffer for data
61165+ **/
61166+static ssize_t vfd_rx_bytes_show(struct kobject *kobj,
61167+ struct kobj_attribute *attr, char *buff)
61168+{
61169+ struct pci_dev *pdev;
61170+ int vf_id, ret = 0;
61171+ u64 data;
61172+
61173+ if (!vfd_ops->get_rx_bytes)
61174+ return -EOPNOTSUPP;
61175+
61176+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61177+ if (ret)
61178+ return ret;
61179+
61180+ ret = vfd_ops->get_rx_bytes(pdev, vf_id, &data);
61181+ if (ret < 0)
61182+ return ret;
61183+
61184+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61185+
61186+ return ret;
61187+}
61188+
61189+/**
61190+ * vfd_rx_dropped_show - handler for rx_dropped show function
61191+ * @kobj: kobject being called
61192+ * @attr: struct kobj_attribute
61193+ * @buff: buffer for data
61194+ **/
61195+static ssize_t vfd_rx_dropped_show(struct kobject *kobj,
61196+ struct kobj_attribute *attr, char *buff)
61197+{
61198+ struct pci_dev *pdev;
61199+ int vf_id, ret = 0;
61200+ u64 data;
61201+
61202+ if (!vfd_ops->get_rx_dropped)
61203+ return -EOPNOTSUPP;
61204+
61205+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61206+ if (ret)
61207+ return ret;
61208+
61209+ ret = vfd_ops->get_rx_dropped(pdev, vf_id, &data);
61210+ if (ret < 0)
61211+ return ret;
61212+
61213+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61214+
61215+ return ret;
61216+}
61217+
61218+/**
61219+ * vfd_rx_packets_show - handler for rx_packets show function
61220+ * @kobj: kobject being called
61221+ * @attr: struct kobj_attribute
61222+ * @buff: buffer for data
61223+ **/
61224+static ssize_t vfd_rx_packets_show(struct kobject *kobj,
61225+ struct kobj_attribute *attr, char *buff)
61226+{
61227+ struct pci_dev *pdev;
61228+ int vf_id, ret = 0;
61229+ u64 data;
61230+
61231+ if (!vfd_ops->get_rx_packets)
61232+ return -EOPNOTSUPP;
61233+
61234+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61235+ if (ret)
61236+ return ret;
61237+
61238+ ret = vfd_ops->get_rx_packets(pdev, vf_id, &data);
61239+ if (ret < 0)
61240+ return ret;
61241+
61242+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61243+
61244+ return ret;
61245+}
61246+
61247+/**
61248+ * vfd_tx_bytes_show - handler for tx_bytes show function
61249+ * @kobj: kobject being called
61250+ * @attr: struct kobj_attribute
61251+ * @buff: buffer for data
61252+ **/
61253+static ssize_t vfd_tx_bytes_show(struct kobject *kobj,
61254+ struct kobj_attribute *attr, char *buff)
61255+{
61256+ struct pci_dev *pdev;
61257+ int vf_id, ret = 0;
61258+ u64 data;
61259+
61260+ if (!vfd_ops->get_tx_bytes)
61261+ return -EOPNOTSUPP;
61262+
61263+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61264+ if (ret)
61265+ return ret;
61266+
61267+ ret = vfd_ops->get_tx_bytes(pdev, vf_id, &data);
61268+ if (ret < 0)
61269+ return ret;
61270+
61271+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61272+
61273+ return ret;
61274+}
61275+
61276+/**
61277+ * vfd_tx_dropped_show - handler for tx_dropped show function
61278+ * @kobj: kobject being called
61279+ * @attr: struct kobj_attribute
61280+ * @buff: buffer for data
61281+ **/
61282+static ssize_t vfd_tx_dropped_show(struct kobject *kobj,
61283+ struct kobj_attribute *attr, char *buff)
61284+{
61285+ struct pci_dev *pdev;
61286+ int vf_id, ret = 0;
61287+ u64 data;
61288+
61289+ if (!vfd_ops->get_tx_dropped)
61290+ return -EOPNOTSUPP;
61291+
61292+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61293+ if (ret)
61294+ return ret;
61295+
61296+ ret = vfd_ops->get_tx_dropped(pdev, vf_id, &data);
61297+ if (ret < 0)
61298+ return ret;
61299+
61300+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61301+
61302+ return ret;
61303+}
61304+
61305+/**
61306+ * vfd_tx_packets_show - handler for tx_packets show function
61307+ * @kobj: kobject being called
61308+ * @attr: struct kobj_attribute
61309+ * @buff: buffer for data
61310+ **/
61311+static ssize_t vfd_tx_packets_show(struct kobject *kobj,
61312+ struct kobj_attribute *attr, char *buff)
61313+{
61314+ struct pci_dev *pdev;
61315+ int vf_id, ret = 0;
61316+ u64 data;
61317+
61318+ if (!vfd_ops->get_tx_packets)
61319+ return -EOPNOTSUPP;
61320+
61321+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61322+ if (ret)
61323+ return ret;
61324+
61325+ ret = vfd_ops->get_tx_packets(pdev, vf_id, &data);
61326+ if (ret < 0)
61327+ return ret;
61328+
61329+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61330+
61331+ return ret;
61332+}
61333+
61334+/**
61335+ * vfd_tx_spoofed_show - handler for tx_spoofed show function
61336+ * @kobj: kobject being called
61337+ * @attr: struct kobj_attribute
61338+ * @buff: buffer for data
61339+ **/
61340+static ssize_t vfd_tx_spoofed_show(struct kobject *kobj,
61341+ struct kobj_attribute *attr, char *buff)
61342+{
61343+ struct pci_dev *pdev;
61344+ int vf_id, ret = 0;
61345+ u64 data;
61346+
61347+ if (!vfd_ops->get_tx_spoofed)
61348+ return -EOPNOTSUPP;
61349+
61350+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61351+ if (ret)
61352+ return ret;
61353+
61354+ ret = vfd_ops->get_tx_spoofed(pdev, vf_id, &data);
61355+ if (ret < 0)
61356+ return ret;
61357+
61358+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61359+
61360+ return ret;
61361+}
61362+
61363+/**
61364+ * vfd_tx_errors_show - handler for tx_errors show function
61365+ * @kobj: kobject being called
61366+ * @attr: struct kobj_attribute
61367+ * @buff: buffer for data
61368+ **/
61369+static ssize_t vfd_tx_errors_show(struct kobject *kobj,
61370+ struct kobj_attribute *attr, char *buff)
61371+{
61372+ struct pci_dev *pdev;
61373+ int vf_id, ret = 0;
61374+ u64 data;
61375+
61376+ if (!vfd_ops->get_tx_errors)
61377+ return -EOPNOTSUPP;
61378+
61379+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61380+ if (ret)
61381+ return ret;
61382+
61383+ ret = vfd_ops->get_tx_errors(pdev, vf_id, &data);
61384+ if (ret < 0)
61385+ return ret;
61386+
61387+ ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
61388+
61389+ return ret;
61390+}
61391+
61392+/**
61393+ * qos_share_show - handler for the bw_share show function
61394+ * @kobj: kobject being called
61395+ * @attr: struct kobj_attribute
61396+ * @buff: buffer for data
61397+ **/
61398+static ssize_t qos_share_show(struct kobject *kobj,
61399+ struct kobj_attribute *attr, char *buff)
61400+{
61401+ struct pci_dev *pdev;
61402+ int vf_id, ret;
61403+ u8 data = 0;
61404+
61405+ if (!vfd_ops->get_vf_bw_share)
61406+ return -EOPNOTSUPP;
61407+
61408+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61409+ if (ret)
61410+ return ret;
61411+
61412+ ret = vfd_ops->get_vf_bw_share(pdev, vf_id, &data);
61413+ if (ret < 0) {
61414+ dev_err(&pdev->dev, "No bw share applied for VF %d\n", vf_id);
61415+ return ret;
61416+ }
61417+
61418+ ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
61419+
61420+ return ret;
61421+}
61422+
61423+/**
61424+ * qos_share_store - handler for the bw_share store function
61425+ * @kobj: kobject being called
61426+ * @attr: struct kobj_attribute
61427+ * @buff: buffer with input data
61428+ * @count: size of buff
61429+ **/
61430+static ssize_t qos_share_store(struct kobject *kobj,
61431+ struct kobj_attribute *attr,
61432+ const char *buff, size_t count)
61433+{
61434+ struct pci_dev *pdev;
61435+ int vf_id, ret;
61436+ u8 bw_share;
61437+
61438+ if (!vfd_ops->set_vf_bw_share)
61439+ return -EOPNOTSUPP;
61440+
61441+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61442+ if (ret)
61443+ return ret;
61444+
61445+ /* parse the bw_share */
61446+ ret = kstrtou8(buff, 10, &bw_share);
61447+ if (ret) {
61448+ dev_err(&pdev->dev, "Invalid input\n");
61449+ return ret;
61450+ }
61451+
61452+ /* check that the BW is between 1 and 100 */
61453+ if (bw_share < 1 || bw_share > 100) {
61454+ dev_err(&pdev->dev, "BW share has to be between 1-100\n");
61455+ return -EINVAL;
61456+ }
61457+ ret = vfd_ops->set_vf_bw_share(pdev, vf_id, bw_share);
61458+ return ret ? ret : count;
61459+}
61460+
61461+/**
61462+ * pf_qos_apply_store - handler for pf qos apply store function
61463+ * @kobj: kobject being called
61464+ * @attr: struct kobj_attribute
61465+ * @buff: buffer with input data
61466+ * @count: size of buff
61467+ **/
61468+static ssize_t pf_qos_apply_store(struct kobject *kobj,
61469+ struct kobj_attribute *attr,
61470+ const char *buff, size_t count)
61471+{
61472+ int ret, apply;
61473+ struct pci_dev *pdev;
61474+
61475+ if (!vfd_ops->set_pf_qos_apply)
61476+ return -EOPNOTSUPP;
61477+
61478+ ret = __get_pf_pdev(kobj, &pdev);
61479+ if (ret)
61480+ return ret;
61481+
61482+ ret = kstrtoint(buff, 10, &apply);
61483+ if (ret) {
61484+ dev_err(&pdev->dev,
61485+ "Invalid input\n");
61486+ return ret;
61487+ }
61488+
61489+ if (apply != 1)
61490+ return -EINVAL;
61491+
61492+ ret = vfd_ops->set_pf_qos_apply(pdev);
61493+
61494+ return ret ? ret : count;
61495+}
61496+
61497+/**
61498+ * pf_ingress_mirror_show - handler for PF ingress mirror show function
61499+ * @kobj: kobject being called
61500+ * @attr: struct kobj_attribute
61501+ * @buff: buffer for data
61502+ **/
61503+static ssize_t pf_ingress_mirror_show(struct kobject *kobj,
61504+ struct kobj_attribute *attr, char *buff)
61505+{
61506+ struct pci_dev *pdev;
61507+ int ret, data;
61508+
61509+ if (!vfd_ops->get_pf_ingress_mirror)
61510+ return -EOPNOTSUPP;
61511+
61512+ ret = __get_pf_pdev(kobj, &pdev);
61513+ if (ret)
61514+ return ret;
61515+
61516+ ret = vfd_ops->get_pf_ingress_mirror(pdev, &data);
61517+ if (ret < 0)
61518+ return ret;
61519+
61520+ if (data == VFD_INGRESS_MIRROR_OFF)
61521+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
61522+ else
61523+ ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
61524+
61525+ return ret;
61526+}
61527+
61528+/**
61529+ * pf_ingress_mirror_store - handler for pf ingress mirror store function
61530+ * @kobj: kobject being called
61531+ * @attr: struct kobj_attribute
61532+ * @buff: buffer with input data
61533+ * @count: size of buff
61534+ **/
61535+static ssize_t pf_ingress_mirror_store(struct kobject *kobj,
61536+ struct kobj_attribute *attr,
61537+ const char *buff, size_t count)
61538+{
61539+ int data_new, data_old;
61540+ struct pci_dev *pdev;
61541+ int ret;
61542+
61543+ if (!vfd_ops->set_pf_ingress_mirror || !vfd_ops->get_pf_ingress_mirror)
61544+ return -EOPNOTSUPP;
61545+
61546+ ret = __get_pf_pdev(kobj, &pdev);
61547+ if (ret)
61548+ return ret;
61549+
61550+ ret = vfd_ops->get_pf_ingress_mirror(pdev, &data_old);
61551+ if (ret < 0)
61552+ return ret;
61553+
61554+ ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror",
61555+ &data_new, &data_old);
61556+ if (ret)
61557+ return ret;
61558+
61559+ if (data_new != data_old)
61560+ ret = vfd_ops->set_pf_ingress_mirror(pdev, data_new);
61561+
61562+ return ret ? ret : count;
61563+}
61564+
61565+/**
61566+ * pf_egress_mirror_show - handler for PF egress mirror show function
61567+ * @kobj: kobject being called
61568+ * @attr: struct kobj_attribute
61569+ * @buff: buffer for data
61570+ **/
61571+static ssize_t pf_egress_mirror_show(struct kobject *kobj,
61572+ struct kobj_attribute *attr, char *buff)
61573+{
61574+ struct pci_dev *pdev;
61575+ int ret, data;
61576+
61577+ if (!vfd_ops->get_pf_egress_mirror)
61578+ return -EOPNOTSUPP;
61579+
61580+ ret = __get_pf_pdev(kobj, &pdev);
61581+ if (ret)
61582+ return ret;
61583+
61584+ ret = vfd_ops->get_pf_egress_mirror(pdev, &data);
61585+ if (ret < 0)
61586+ return ret;
61587+
61588+ if (data == VFD_EGRESS_MIRROR_OFF)
61589+ ret = scnprintf(buff, PAGE_SIZE, "off\n");
61590+ else
61591+ ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
61592+
61593+ return ret;
61594+}
61595+
61596+/**
61597+ * pf_egress_mirror_store - handler for pf egress mirror store function
61598+ * @kobj: kobject being called
61599+ * @attr: struct kobj_attribute
61600+ * @buff: buffer with input data
61601+ * @count: size of buff
61602+ **/
61603+static ssize_t pf_egress_mirror_store(struct kobject *kobj,
61604+ struct kobj_attribute *attr,
61605+ const char *buff, size_t count)
61606+{
61607+ int data_new, data_old;
61608+ struct pci_dev *pdev;
61609+ int ret;
61610+
61611+ if (!vfd_ops->set_pf_egress_mirror || !vfd_ops->get_pf_egress_mirror)
61612+ return -EOPNOTSUPP;
61613+
61614+ ret = __get_pf_pdev(kobj, &pdev);
61615+ if (ret)
61616+ return ret;
61617+
61618+ ret = vfd_ops->get_pf_egress_mirror(pdev, &data_old);
61619+ if (ret < 0)
61620+ return ret;
61621+
61622+ ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror",
61623+ &data_new, &data_old);
61624+ if (ret)
61625+ return ret;
61626+
61627+ if (data_new != data_old)
61628+ ret = vfd_ops->set_pf_egress_mirror(pdev, data_new);
61629+
61630+ return ret ? ret : count;
61631+}
61632+
61633+/**
61634+ * pf_tpid_show - handler for pf tpid show function
61635+ * @kobj: kobject being called
61636+ * @attr: struct kobj_attribute
61637+ * @buff: buffer for data
61638+ **/
61639+static ssize_t pf_tpid_show(struct kobject *kobj,
61640+ struct kobj_attribute *attr, char *buff)
61641+{
61642+ struct pci_dev *pdev;
61643+ u16 data;
61644+ int ret;
61645+
61646+ if (!vfd_ops->get_pf_tpid)
61647+ return -EOPNOTSUPP;
61648+
61649+ ret = __get_pf_pdev(kobj, &pdev);
61650+ if (ret)
61651+ return ret;
61652+
61653+ ret = vfd_ops->get_pf_tpid(pdev, &data);
61654+ if (ret < 0)
61655+ return ret;
61656+
61657+ ret = scnprintf(buff, PAGE_SIZE, "%x\n", data);
61658+
61659+ return ret;
61660+}
61661+
61662+/**
61663+ * pf_tpid_store - handler for pf tpid store function
61664+ * @kobj: kobject being called
61665+ * @attr: struct kobj_attribute
61666+ * @buff: buffer with input data
61667+ * @count: size of buff
61668+ **/
61669+static ssize_t pf_tpid_store(struct kobject *kobj,
61670+ struct kobj_attribute *attr,
61671+ const char *buff, size_t count)
61672+{
61673+ struct pci_dev *pdev;
61674+ u16 data;
61675+ int ret;
61676+
61677+ if (!vfd_ops->set_pf_tpid)
61678+ return -EOPNOTSUPP;
61679+
61680+ ret = __get_pf_pdev(kobj, &pdev);
61681+ if (ret)
61682+ return ret;
61683+
61684+ ret = kstrtou16(buff, 16, &data);
61685+ if (ret) {
61686+ dev_err(&pdev->dev, "Invalid input\n");
61687+ return ret;
61688+ }
61689+
61690+ ret = vfd_ops->set_pf_tpid(pdev, data);
61691+
61692+ return ret ? ret : count;
61693+}
61694+
61695+/**
61696+ * vfd_num_queues_show - handler for num_queues show function
61697+ * @kobj: kobject being called
61698+ * @attr: struct kobj_attribute
61699+ * @buff: buffer for data
61700+ **/
61701+static ssize_t vfd_num_queues_show(struct kobject *kobj,
61702+ struct kobj_attribute *attr, char *buff)
61703+{
61704+ struct pci_dev *pdev;
61705+ int vf_id, ret;
61706+ int data;
61707+
61708+ if (!vfd_ops->get_num_queues)
61709+ return -EOPNOTSUPP;
61710+
61711+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61712+ if (ret)
61713+ return ret;
61714+
61715+ ret = vfd_ops->get_num_queues(pdev, vf_id, &data);
61716+ if (ret)
61717+ return ret;
61718+
61719+ ret = scnprintf(buff, PAGE_SIZE, "%d\n", data);
61720+
61721+ return ret;
61722+}
61723+
61724+/**
61725+ * vfd_num_queues_store - handler for num_queues store function
61726+ * @kobj: kobject being called
61727+ * @attr: struct kobj_attribute
61728+ * @buff: buffer with input data
61729+ * @count: size of buff
61730+ **/
61731+static ssize_t vfd_num_queues_store(struct kobject *kobj,
61732+ struct kobj_attribute *attr,
61733+ const char *buff, size_t count)
61734+{
61735+ int data_new, data_old;
61736+ struct pci_dev *pdev;
61737+ int vf_id, ret;
61738+
61739+ if (!vfd_ops->set_num_queues)
61740+ return -EOPNOTSUPP;
61741+
61742+ ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
61743+ if (ret)
61744+ return ret;
61745+
61746+ ret = vfd_ops->get_num_queues(pdev, vf_id, &data_old);
61747+ if (ret)
61748+ return ret;
61749+
61750+ ret = kstrtoint(buff, 10, &data_new);
61751+ if (ret) {
61752+ dev_err(&pdev->dev, "Invalid input\n");
61753+ return ret;
61754+ }
61755+
61756+ if (data_new < 1) {
61757+ dev_err(&pdev->dev, "VF queue count must be at least 1\n");
61758+ return -EINVAL;
61759+ }
61760+
61761+ if (data_new != data_old)
61762+ ret = vfd_ops->set_num_queues(pdev, vf_id, data_new);
61763+
61764+ return ret ? ret : count;
61765+}
61766+
61767+static struct kobj_attribute trunk_attribute =
61768+ __ATTR(trunk, 0644, vfd_trunk_show, vfd_trunk_store);
61769+static struct kobj_attribute vlan_mirror_attribute =
61770+ __ATTR(vlan_mirror, 0644, vfd_vlan_mirror_show, vfd_vlan_mirror_store);
61771+static struct kobj_attribute egress_mirror_attribute =
61772+ __ATTR(egress_mirror, 0644,
61773+ vfd_egress_mirror_show, vfd_egress_mirror_store);
61774+static struct kobj_attribute ingress_mirror_attribute =
61775+ __ATTR(ingress_mirror, 0644,
61776+ vfd_ingress_mirror_show, vfd_ingress_mirror_store);
61777+static struct kobj_attribute mac_anti_spoof_attribute =
61778+ __ATTR(mac_anti_spoof, 0644,
61779+ vfd_mac_anti_spoof_show, vfd_mac_anti_spoof_store);
61780+static struct kobj_attribute vlan_anti_spoof_attribute =
61781+ __ATTR(vlan_anti_spoof, 0644,
61782+ vfd_vlan_anti_spoof_show, vfd_vlan_anti_spoof_store);
61783+static struct kobj_attribute allow_untagged_attribute =
61784+ __ATTR(allow_untagged, 0644,
61785+ vfd_allow_untagged_show, vfd_allow_untagged_store);
61786+static struct kobj_attribute loopback_attribute =
61787+ __ATTR(loopback, 0644, vfd_loopback_show, vfd_loopback_store);
61788+static struct kobj_attribute mac_attribute =
61789+ __ATTR(mac, 0644, vfd_mac_show, vfd_mac_store);
61790+static struct kobj_attribute mac_list_attribute =
61791+ __ATTR(mac_list, 0644, vfd_mac_list_show, vfd_mac_list_store);
61792+static struct kobj_attribute promisc_attribute =
61793+ __ATTR(promisc, 0644, vfd_promisc_show, vfd_promisc_store);
61794+static struct kobj_attribute vlan_strip_attribute =
61795+ __ATTR(vlan_strip, 0644, vfd_vlan_strip_show, vfd_vlan_strip_store);
61796+static struct kobj_attribute link_state_attribute =
61797+ __ATTR(link_state, 0644, vfd_link_state_show, vfd_link_state_store);
61798+static struct kobj_attribute max_tx_rate_attribute =
61799+ __ATTR(max_tx_rate, 0644, vfd_max_tx_rate_show, vfd_max_tx_rate_store);
61800+static struct kobj_attribute min_tx_rate_attribute =
61801+ __ATTR(min_tx_rate, 0644, vfd_min_tx_rate_show, vfd_min_tx_rate_store);
61802+static struct kobj_attribute spoofcheck_attribute =
61803+ __ATTR(spoofcheck, 0644, vfd_spoofcheck_show, vfd_spoofcheck_store);
61804+static struct kobj_attribute trust_attribute =
61805+ __ATTR(trust, 0644, vfd_trust_show, vfd_trust_store);
61806+static struct kobj_attribute reset_stats_attribute =
61807+ __ATTR(reset_stats, 0200, NULL, vfd_reset_stats_store);
61808+static struct kobj_attribute enable_attribute =
61809+ __ATTR(enable, 0644, vfd_enable_show, vfd_enable_store);
61810+static struct kobj_attribute num_queues_attribute =
61811+ __ATTR(num_queues, 0644, vfd_num_queues_show, vfd_num_queues_store);
61812+
61813+static struct attribute *s_attrs[] = {
61814+ &trunk_attribute.attr,
61815+ &vlan_mirror_attribute.attr,
61816+ &egress_mirror_attribute.attr,
61817+ &ingress_mirror_attribute.attr,
61818+ &mac_anti_spoof_attribute.attr,
61819+ &vlan_anti_spoof_attribute.attr,
61820+ &allow_untagged_attribute.attr,
61821+ &loopback_attribute.attr,
61822+ &mac_attribute.attr,
61823+ &mac_list_attribute.attr,
61824+ &promisc_attribute.attr,
61825+ &vlan_strip_attribute.attr,
61826+ &link_state_attribute.attr,
61827+ &max_tx_rate_attribute.attr,
61828+ &min_tx_rate_attribute.attr,
61829+ &spoofcheck_attribute.attr,
61830+ &trust_attribute.attr,
61831+ &reset_stats_attribute.attr,
61832+ &enable_attribute.attr,
61833+ &num_queues_attribute.attr,
61834+ NULL,
61835+};
61836+
61837+static struct attribute_group vfd_group = {
61838+ .attrs = s_attrs,
61839+};
61840+
61841+static struct kobj_attribute rx_bytes_attribute =
61842+ __ATTR(rx_bytes, 0444, vfd_rx_bytes_show, NULL);
61843+static struct kobj_attribute rx_dropped_attribute =
61844+ __ATTR(rx_dropped, 0444, vfd_rx_dropped_show, NULL);
61845+static struct kobj_attribute rx_packets_attribute =
61846+ __ATTR(rx_packets, 0444, vfd_rx_packets_show, NULL);
61847+static struct kobj_attribute tx_bytes_attribute =
61848+ __ATTR(tx_bytes, 0444, vfd_tx_bytes_show, NULL);
61849+static struct kobj_attribute tx_dropped_attribute =
61850+ __ATTR(tx_dropped, 0444, vfd_tx_dropped_show, NULL);
61851+static struct kobj_attribute tx_packets_attribute =
61852+ __ATTR(tx_packets, 0444, vfd_tx_packets_show, NULL);
61853+static struct kobj_attribute tx_spoofed_attribute =
61854+ __ATTR(tx_spoofed, 0444, vfd_tx_spoofed_show, NULL);
61855+static struct kobj_attribute tx_errors_attribute =
61856+ __ATTR(tx_errors, 0444, vfd_tx_errors_show, NULL);
61857+
61858+static struct attribute *stats_attrs[] = {
61859+ &rx_bytes_attribute.attr,
61860+ &rx_dropped_attribute.attr,
61861+ &rx_packets_attribute.attr,
61862+ &tx_bytes_attribute.attr,
61863+ &tx_dropped_attribute.attr,
61864+ &tx_packets_attribute.attr,
61865+ &tx_spoofed_attribute.attr,
61866+ &tx_errors_attribute.attr,
61867+ NULL,
61868+};
61869+
61870+static struct attribute_group stats_group = {
61871+ .name = "stats",
61872+ .attrs = stats_attrs,
61873+};
61874+
61875+static struct kobj_attribute share_attribute =
61876+ __ATTR(share, 0644, qos_share_show, qos_share_store);
61877+
61878+static struct attribute *qos_attrs[] = {
61879+ &share_attribute.attr,
61880+ NULL,
61881+};
61882+
61883+static struct attribute_group qos_group = {
61884+ .name = "qos",
61885+ .attrs = qos_attrs,
61886+};
61887+
61888+static struct kobj_attribute apply_attribute =
61889+ __ATTR(apply, 0200, NULL, pf_qos_apply_store);
61890+
61891+static struct attribute *pf_qos_attrs[] = {
61892+ &apply_attribute.attr,
61893+ NULL,
61894+};
61895+
61896+static struct attribute_group pf_qos_group = {
61897+ .name = "qos",
61898+ .attrs = pf_qos_attrs,
61899+};
61900+
61901+static struct kobj_attribute pf_ingress_mirror_attribute =
61902+ __ATTR(ingress_mirror, 0644, pf_ingress_mirror_show, pf_ingress_mirror_store);
61903+static struct kobj_attribute pf_egress_mirror_attribute =
61904+ __ATTR(egress_mirror, 0644, pf_egress_mirror_show, pf_egress_mirror_store);
61905+static struct kobj_attribute pf_tpid_attribute =
61906+ __ATTR(tpid, 0644, pf_tpid_show, pf_tpid_store);
61907+
61908+static struct attribute *pf_attrs[] = {
61909+ &pf_ingress_mirror_attribute.attr,
61910+ &pf_egress_mirror_attribute.attr,
61911+ &pf_tpid_attribute.attr,
61912+ NULL,
61913+};
61914+
61915+static struct attribute_group pf_attr_group = {
61916+ .attrs = pf_attrs,
61917+};
61918+
61919+/**
61920+ * create_vfs_sysfs - create sysfs hierarchy for VF
61921+ * @pdev: PCI device information struct
61922+ * @vfd_obj: VF-d kobjects information struct
61923+ *
61924+ * Creates a kobject for Virtual Function and assigns attributes to it.
61925+ **/
61926+static int create_vfs_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj)
61927+{
61928+ struct kobject *vf_kobj;
61929+ char kname[4];
61930+ int ret, i;
61931+
61932+ for (i = 0; i < vfd_obj->num_vfs; i++) {
61933+ int length = snprintf(kname, sizeof(kname), "%d", i);
61934+
61935+ if (length >= sizeof(kname)) {
61936+ dev_err(&pdev->dev,
61937+ "cannot request %d vfs, try again with smaller number of vfs\n",
61938+ i);
61939+ --i;
61940+ ret = -EINVAL;
61941+ goto err_vfs_sysfs;
61942+ }
61943+
61944+ vf_kobj = kobject_create_and_add(kname, vfd_obj->sriov_kobj);
61945+ if (!vf_kobj) {
61946+ dev_err(&pdev->dev,
61947+ "failed to create VF kobj: %s\n", kname);
61948+ i--;
61949+ ret = -ENOMEM;
61950+ goto err_vfs_sysfs;
61951+ }
61952+ dev_info(&pdev->dev, "created VF %s sysfs", vf_kobj->name);
61953+ vfd_obj->vf_kobj[i] = vf_kobj;
61954+
61955+ /* create VF sys attr */
61956+ ret = sysfs_create_group(vfd_obj->vf_kobj[i], &vfd_group);
61957+ if (ret) {
61958+ dev_err(&pdev->dev, "failed to create VF sys attribute: %d", i);
61959+ goto err_vfs_sysfs;
61960+ }
61961+
61962+ /* create VF stats sys attr */
61963+ ret = sysfs_create_group(vfd_obj->vf_kobj[i], &stats_group);
61964+ if (ret) {
61965+ dev_err(&pdev->dev, "failed to create VF stats attribute: %d", i);
61966+ goto err_vfs_sysfs;
61967+ }
61968+
61969+ /* create VF qos sys attr */
61970+ ret = sysfs_create_group(vfd_obj->vf_kobj[i], &qos_group);
61971+ if (ret) {
61972+ dev_err(&pdev->dev, "failed to create VF qos attribute: %d", i);
61973+ goto err_vfs_sysfs;
61974+ }
61975+ }
61976+
61977+ return 0;
61978+
61979+err_vfs_sysfs:
61980+ for (; i >= 0; i--)
61981+ kobject_put(vfd_obj->vf_kobj[i]);
61982+ return ret;
61983+}
61984+
61985+/**
61986+ * create_vfd_sysfs - create sysfs hierarchy used by VF-d
61987+ * @pdev: PCI device information struct
61988+ * @num_alloc_vfs: number of VFs to allocate
61989+ *
61990+ * If the kobjects were not able to be created, NULL will be returned.
61991+ **/
61992+struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs)
61993+{
61994+ struct vfd_objects *vfd_obj;
61995+ int ret;
61996+
61997+ vfd_obj = kzalloc(sizeof(*vfd_obj) +
61998+ sizeof(struct kobject *)*num_alloc_vfs, GFP_KERNEL);
61999+ if (!vfd_obj)
62000+ return NULL;
62001+
62002+ vfd_obj->num_vfs = num_alloc_vfs;
62003+
62004+ vfd_obj->sriov_kobj = kobject_create_and_add("sriov", &pdev->dev.kobj);
62005+ if (!vfd_obj->sriov_kobj)
62006+ goto err_sysfs;
62007+
62008+ dev_info(&pdev->dev, "created %s sysfs", vfd_obj->sriov_kobj->name);
62009+
62010+ ret = create_vfs_sysfs(pdev, vfd_obj);
62011+ if (ret)
62012+ goto err_sysfs;
62013+
62014+ /* create PF qos sys attr */
62015+ ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_qos_group);
62016+ if (ret) {
62017+ dev_err(&pdev->dev, "failed to create PF qos sys attribute");
62018+ goto err_sysfs;
62019+ }
62020+
62021+ /* create PF attrs */
62022+ ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_attr_group);
62023+ if (ret) {
62024+ dev_err(&pdev->dev, "failed to create PF attr sys attribute");
62025+ goto err_sysfs;
62026+ }
62027+ return vfd_obj;
62028+
62029+err_sysfs:
62030+ kobject_put(vfd_obj->sriov_kobj);
62031+ kfree(vfd_obj);
62032+ return NULL;
62033+}
62034+
62035+/**
62036+ * destroy_vfd_sysfs - destroy sysfs hierarchy used by VF-d
62037+ * @pdev: PCI device information struct
62038+ * @vfd_obj: VF-d kobjects information struct
62039+ **/
62040+void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj)
62041+{
62042+ int i;
62043+
62044+ for (i = 0; i < vfd_obj->num_vfs; i++) {
62045+ dev_info(&pdev->dev, "deleting VF %s sysfs",
62046+ vfd_obj->vf_kobj[i]->name);
62047+ kobject_put(vfd_obj->vf_kobj[i]);
62048+ }
62049+
62050+ dev_info(&pdev->dev, "deleting %s sysfs", vfd_obj->sriov_kobj->name);
62051+ kobject_put(vfd_obj->sriov_kobj);
62052+ kfree(vfd_obj);
62053+}
62054diff --git a/drivers/net/ethernet/intel/i40e/kcompat_vfd.h b/drivers/net/ethernet/intel/i40e/kcompat_vfd.h
62055new file mode 100644
62056index 000000000..894cd2607
62057--- /dev/null
62058+++ b/drivers/net/ethernet/intel/i40e/kcompat_vfd.h
62059@@ -0,0 +1,141 @@
62060+/* SPDX-License-Identifier: GPL-2.0 */
62061+/* Copyright(c) 2013 - 2020 Intel Corporation. */
62062+
62063+#ifndef _KCOMPAT_VFD_H_
62064+#define _KCOMPAT_VFD_H_
62065+
62066+#define VFD_PROMISC_OFF 0x00
62067+#define VFD_PROMISC_UNICAST 0x01
62068+#define VFD_PROMISC_MULTICAST 0x02
62069+
62070+#define VFD_LINKSTATE_OFF 0x00
62071+#define VFD_LINKSTATE_ON 0x01
62072+#define VFD_LINKSTATE_AUTO 0x02
62073+
62074+#define VFD_EGRESS_MIRROR_OFF -1
62075+#define VFD_INGRESS_MIRROR_OFF -1
62076+
62077+/**
62078+ * struct vfd_objects - VF-d kobjects information struct
62079+ * @num_vfs: number of VFs allocated
62080+ * @sriov_kobj: pointer to the top sriov kobject
62081+ * @vf_kobj: array of pointer to each VF's kobjects
62082+ */
62083+struct vfd_objects {
62084+ int num_vfs;
62085+ struct kobject *sriov_kobj;
62086+ struct kobject *vf_kobj[0];
62087+};
62088+
62089+struct vfd_macaddr {
62090+ u8 mac[ETH_ALEN];
62091+ struct list_head list;
62092+};
62093+
62094+#define VFD_LINK_SPEED_2_5GB_SHIFT 0x0
62095+#define VFD_LINK_SPEED_100MB_SHIFT 0x1
62096+#define VFD_LINK_SPEED_1GB_SHIFT 0x2
62097+#define VFD_LINK_SPEED_10GB_SHIFT 0x3
62098+#define VFD_LINK_SPEED_40GB_SHIFT 0x4
62099+#define VFD_LINK_SPEED_20GB_SHIFT 0x5
62100+#define VFD_LINK_SPEED_25GB_SHIFT 0x6
62101+#define VFD_LINK_SPEED_5GB_SHIFT 0x7
62102+
62103+
62104+enum vfd_link_speed {
62105+ VFD_LINK_SPEED_UNKNOWN = 0,
62106+ VFD_LINK_SPEED_100MB = BIT(VFD_LINK_SPEED_100MB_SHIFT),
62107+ VFD_LINK_SPEED_1GB = BIT(VFD_LINK_SPEED_1GB_SHIFT),
62108+ VFD_LINK_SPEED_2_5GB = BIT(VFD_LINK_SPEED_2_5GB_SHIFT),
62109+ VFD_LINK_SPEED_5GB = BIT(VFD_LINK_SPEED_5GB_SHIFT),
62110+ VFD_LINK_SPEED_10GB = BIT(VFD_LINK_SPEED_10GB_SHIFT),
62111+ VFD_LINK_SPEED_40GB = BIT(VFD_LINK_SPEED_40GB_SHIFT),
62112+ VFD_LINK_SPEED_20GB = BIT(VFD_LINK_SPEED_20GB_SHIFT),
62113+ VFD_LINK_SPEED_25GB = BIT(VFD_LINK_SPEED_25GB_SHIFT),
62114+};
62115+
62116+struct vfd_ops {
62117+ int (*get_trunk)(struct pci_dev *pdev, int vf_id, unsigned long *buff);
62118+ int (*set_trunk)(struct pci_dev *pdev, int vf_id,
62119+ const unsigned long *buff);
62120+ int (*get_vlan_mirror)(struct pci_dev *pdev, int vf_id,
62121+ unsigned long *buff);
62122+ int (*set_vlan_mirror)(struct pci_dev *pdev, int vf_id,
62123+ const unsigned long *buff);
62124+ int (*get_egress_mirror)(struct pci_dev *pdev, int vf_id, int *data);
62125+ int (*set_egress_mirror)(struct pci_dev *pdev, int vf_id,
62126+ const int data);
62127+ int (*get_ingress_mirror)(struct pci_dev *pdev, int vf_id, int *data);
62128+ int (*set_ingress_mirror)(struct pci_dev *pdev, int vf_id,
62129+ const int data);
62130+ int (*get_mac_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data);
62131+ int (*set_mac_anti_spoof)(struct pci_dev *pdev, int vf_id,
62132+ const bool data);
62133+ int (*get_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data);
62134+ int (*set_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id,
62135+ const bool data);
62136+ int (*get_allow_untagged)(struct pci_dev *pdev, int vf_id, bool *data);
62137+ int (*set_allow_untagged)(struct pci_dev *pdev, int vf_id,
62138+ const bool data);
62139+ int (*get_loopback)(struct pci_dev *pdev, int vf_id, bool *data);
62140+ int (*set_loopback)(struct pci_dev *pdev, int vf_id, const bool data);
62141+ int (*get_mac)(struct pci_dev *pdev, int vf_id, u8 *macaddr);
62142+ int (*set_mac)(struct pci_dev *pdev, int vf_id, const u8 *macaddr);
62143+ int (*get_mac_list)(struct pci_dev *pdev, int vf_id,
62144+ struct list_head *mac_list);
62145+ int (*add_macs_to_list)(struct pci_dev *pdev, int vf_id,
62146+ struct list_head *mac_list);
62147+ int (*rem_macs_from_list)(struct pci_dev *pdev, int vf_id,
62148+ struct list_head *mac_list);
62149+ int (*get_promisc)(struct pci_dev *pdev, int vf_id, u8 *data);
62150+ int (*set_promisc)(struct pci_dev *pdev, int vf_id, const u8 data);
62151+ int (*get_vlan_strip)(struct pci_dev *pdev, int vf_id, bool *data);
62152+ int (*set_vlan_strip)(struct pci_dev *pdev, int vf_id, const bool data);
62153+ int (*get_link_state)(struct pci_dev *pdev, int vf_id, bool *enabled,
62154+ enum vfd_link_speed *link_speed);
62155+ int (*set_link_state)(struct pci_dev *pdev, int vf_id, const u8 data);
62156+ int (*get_max_tx_rate)(struct pci_dev *pdev, int vf_id,
62157+ unsigned int *max_tx_rate);
62158+ int (*set_max_tx_rate)(struct pci_dev *pdev, int vf_id,
62159+ unsigned int *max_tx_rate);
62160+ int (*get_min_tx_rate)(struct kobject *,
62161+ struct kobj_attribute *, char *);
62162+ int (*set_min_tx_rate)(struct kobject *, struct kobj_attribute *,
62163+ const char *, size_t);
62164+ int (*get_spoofcheck)(struct kobject *,
62165+ struct kobj_attribute *, char *);
62166+ int (*set_spoofcheck)(struct kobject *, struct kobj_attribute *,
62167+ const char *, size_t);
62168+ int (*get_trust)(struct kobject *,
62169+ struct kobj_attribute *, char *);
62170+ int (*set_trust)(struct kobject *, struct kobj_attribute *,
62171+ const char *, size_t);
62172+ int (*get_vf_enable)(struct pci_dev *pdev, int vf_id, bool *data);
62173+ int (*set_vf_enable)(struct pci_dev *pdev, int vf_id, const bool data);
62174+ int (*get_rx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data);
62175+ int (*get_rx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data);
62176+ int (*get_rx_packets)(struct pci_dev *pdev, int vf_id, u64 *data);
62177+ int (*get_tx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data);
62178+ int (*get_tx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data);
62179+ int (*get_tx_packets)(struct pci_dev *pdev, int vf_id, u64 *data);
62180+ int (*get_tx_spoofed)(struct pci_dev *pdev, int vf_id, u64 *data);
62181+ int (*get_tx_errors)(struct pci_dev *pdev, int vf_id, u64 *data);
62182+ int (*reset_stats)(struct pci_dev *pdev, int vf_id);
62183+ int (*set_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 bw_share);
62184+ int (*get_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 *bw_share);
62185+ int (*set_pf_qos_apply)(struct pci_dev *pdev);
62186+ int (*get_pf_ingress_mirror)(struct pci_dev *pdev, int *data);
62187+ int (*set_pf_ingress_mirror)(struct pci_dev *pdev, const int data);
62188+ int (*get_pf_egress_mirror)(struct pci_dev *pdev, int *data);
62189+ int (*set_pf_egress_mirror)(struct pci_dev *pdev, const int data);
62190+ int (*get_pf_tpid)(struct pci_dev *pdev, u16 *data);
62191+ int (*set_pf_tpid)(struct pci_dev *pdev, const u16 data);
62192+ int (*get_num_queues)(struct pci_dev *pdev, int vf_id, int *num_queues);
62193+ int (*set_num_queues)(struct pci_dev *pdev, int vf_id, const int num_queues);
62194+ int (*get_trust_state)(struct pci_dev *pdev, int vf_id, bool *data);
62195+ int (*set_trust_state)(struct pci_dev *pdev, int vf_id, bool data);
62196+};
62197+
62198+extern const struct vfd_ops *vfd_ops;
62199+
62200+#endif /* _KCOMPAT_VFD_H_ */
62201diff --git a/drivers/net/ethernet/intel/i40e/virtchnl.h b/drivers/net/ethernet/intel/i40e/virtchnl.h
62202new file mode 100644
62203index 000000000..c1b1ab3b7
62204--- /dev/null
62205+++ b/drivers/net/ethernet/intel/i40e/virtchnl.h
62206@@ -0,0 +1,949 @@
62207+/* SPDX-License-Identifier: GPL-2.0 */
62208+/* Copyright(c) 2013 - 2020 Intel Corporation. */
62209+
62210+#ifndef _VIRTCHNL_H_
62211+#define _VIRTCHNL_H_
62212+
62213+/* Description:
62214+ * This header file describes the VF-PF communication protocol used
62215+ * by the drivers for all devices starting from our 40G product line
62216+ *
62217+ * Admin queue buffer usage:
62218+ * desc->opcode is always aqc_opc_send_msg_to_pf
62219+ * flags, retval, datalen, and data addr are all used normally.
62220+ * The Firmware copies the cookie fields when sending messages between the
62221+ * PF and VF, but uses all other fields internally. Due to this limitation,
62222+ * we must send all messages as "indirect", i.e. using an external buffer.
62223+ *
62224+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
62225+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
62226+ * have a maximum of sixteen queues for all of its VSIs.
62227+ *
62228+ * The PF is required to return a status code in v_retval for all messages
62229+ * except RESET_VF, which does not require any response. The return value
62230+ * is of status_code type, defined in the shared type.h.
62231+ *
62232+ * In general, VF driver initialization should roughly follow the order of
62233+ * these opcodes. The VF driver must first validate the API version of the
62234+ * PF driver, then request a reset, then get resources, then configure
62235+ * queues and interrupts. After these operations are complete, the VF
62236+ * driver may start its queues, optionally add MAC and VLAN filters, and
62237+ * process traffic.
62238+ */
62239+
62240+/* START GENERIC DEFINES
62241+ * Need to ensure the following enums and defines hold the same meaning and
62242+ * value in current and future projects
62243+ */
62244+
62245+/* Error Codes */
62246+enum virtchnl_status_code {
62247+ VIRTCHNL_STATUS_SUCCESS = 0,
62248+ VIRTCHNL_STATUS_ERR_PARAM = -5,
62249+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
62250+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
62251+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
62252+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
62253+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
62254+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
62255+};
62256+
62257+/* Backward compatibility */
62258+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
62259+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
62260+
62261+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
62262+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
62263+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
62264+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
62265+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
62266+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
62267+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
62268+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
62269+
62270+enum virtchnl_link_speed {
62271+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
62272+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
62273+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
62274+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
62275+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
62276+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
62277+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
62278+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
62279+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
62280+};
62281+
62282+/* for hsplit_0 field of Rx HMC context */
62283+/* deprecated with AVF 1.0 */
62284+enum virtchnl_rx_hsplit {
62285+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
62286+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
62287+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
62288+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
62289+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
62290+};
62291+
62292+/* END GENERIC DEFINES */
62293+
62294+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
62295+ * of the virtchnl_msg structure.
62296+ */
62297+enum virtchnl_ops {
62298+/* The PF sends status change events to VFs using
62299+ * the VIRTCHNL_OP_EVENT opcode.
62300+ * VFs send requests to the PF using the other ops.
62301+ * Use of "advanced opcode" features must be negotiated as part of capabilities
62302+ * exchange and are not considered part of base mode feature set.
62303+ */
62304+ VIRTCHNL_OP_UNKNOWN = 0,
62305+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
62306+ VIRTCHNL_OP_RESET_VF = 2,
62307+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
62308+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
62309+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
62310+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
62311+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
62312+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
62313+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
62314+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
62315+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
62316+ VIRTCHNL_OP_ADD_VLAN = 12,
62317+ VIRTCHNL_OP_DEL_VLAN = 13,
62318+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
62319+ VIRTCHNL_OP_GET_STATS = 15,
62320+ VIRTCHNL_OP_RSVD = 16,
62321+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
62322+ /* opcode 19 is reserved */
62323+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
62324+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
62325+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
62326+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
62327+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
62328+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
62329+ VIRTCHNL_OP_SET_RSS_HENA = 26,
62330+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
62331+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
62332+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
62333+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
62334+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
62335+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
62336+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
62337+ /* opcodes 34, 35, 36, 37 and 38 are reserved */
62338+};
62339+
62340+/* These macros are used to generate compilation errors if a structure/union
62341+ * is not exactly the correct length. It gives a divide by zero error if the
62342+ * structure/union is not of the correct size, otherwise it creates an enum
62343+ * that is never used.
62344+ */
62345+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
62346+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
62347+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
62348+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
62349+
62350+/* Virtual channel message descriptor. This overlays the admin queue
62351+ * descriptor. All other data is passed in external buffers.
62352+ */
62353+
62354+struct virtchnl_msg {
62355+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
62356+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
62357+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
62358+ u32 vfid; /* used by PF when sending to VF */
62359+};
62360+
62361+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
62362+
62363+/* Message descriptions and data structures. */
62364+
62365+/* VIRTCHNL_OP_VERSION
62366+ * VF posts its version number to the PF. PF responds with its version number
62367+ * in the same format, along with a return code.
62368+ * Reply from PF has its major/minor versions also in param0 and param1.
62369+ * If there is a major version mismatch, then the VF cannot operate.
62370+ * If there is a minor version mismatch, then the VF can operate but should
62371+ * add a warning to the system log.
62372+ *
62373+ * This enum element MUST always be specified as == 1, regardless of other
62374+ * changes in the API. The PF must always respond to this message without
62375+ * error regardless of version mismatch.
62376+ */
62377+#define VIRTCHNL_VERSION_MAJOR 1
62378+#define VIRTCHNL_VERSION_MINOR 1
62379+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
62380+
62381+struct virtchnl_version_info {
62382+ u32 major;
62383+ u32 minor;
62384+};
62385+
62386+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
62387+
62388+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
62389+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
62390+
62391+/* VIRTCHNL_OP_RESET_VF
62392+ * VF sends this request to PF with no parameters
62393+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
62394+ * until reset completion is indicated. The admin queue must be reinitialized
62395+ * after this operation.
62396+ *
62397+ * When reset is complete, PF must ensure that all queues in all VSIs associated
62398+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
62399+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
62400+ * are cleared.
62401+ */
62402+
62403+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
62404+ * vsi_type should always be 6 for backward compatibility. Add other fields
62405+ * as needed.
62406+ */
62407+enum virtchnl_vsi_type {
62408+ VIRTCHNL_VSI_TYPE_INVALID = 0,
62409+ VIRTCHNL_VSI_SRIOV = 6,
62410+};
62411+
62412+/* VIRTCHNL_OP_GET_VF_RESOURCES
62413+ * Version 1.0 VF sends this request to PF with no parameters
62414+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
62415+ * PF responds with an indirect message containing
62416+ * virtchnl_vf_resource and one or more
62417+ * virtchnl_vsi_resource structures.
62418+ */
62419+
62420+struct virtchnl_vsi_resource {
62421+ u16 vsi_id;
62422+ u16 num_queue_pairs;
62423+ enum virtchnl_vsi_type vsi_type;
62424+ u16 qset_handle;
62425+ u8 default_mac_addr[ETH_ALEN];
62426+};
62427+
62428+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
62429+
62430+/* VF capability flags
62431+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
62432+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
62433+ */
62434+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
62435+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
62436+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
62437+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
62438+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
62439+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
62440+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
62441+#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
62442+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
62443+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
62444+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
62445+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
62446+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
62447+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
62448+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
62449+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
62450+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
62451+#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
62452+ /* 0X80000000 is reserved */
62453+
62454+/* Define below the capability flags that are not offloads */
62455+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
62456+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
62457+ VIRTCHNL_VF_OFFLOAD_VLAN | \
62458+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
62459+
62460+struct virtchnl_vf_resource {
62461+ u16 num_vsis;
62462+ u16 num_queue_pairs;
62463+ u16 max_vectors;
62464+ u16 max_mtu;
62465+
62466+ u32 vf_cap_flags;
62467+ u32 rss_key_size;
62468+ u32 rss_lut_size;
62469+
62470+ struct virtchnl_vsi_resource vsi_res[1];
62471+};
62472+
62473+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
62474+
62475+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
62476+ * VF sends this message to set up parameters for one TX queue.
62477+ * External data buffer contains one instance of virtchnl_txq_info.
62478+ * PF configures requested queue and returns a status code.
62479+ */
62480+
62481+/* Tx queue config info */
62482+struct virtchnl_txq_info {
62483+ u16 vsi_id;
62484+ u16 queue_id;
62485+ u16 ring_len; /* number of descriptors, multiple of 8 */
62486+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
62487+ u64 dma_ring_addr;
62488+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
62489+};
62490+
62491+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
62492+
62493+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
62494+ * VF sends this message to set up parameters for one RX queue.
62495+ * External data buffer contains one instance of virtchnl_rxq_info.
62496+ * PF configures requested queue and returns a status code. The
62497+ * crc_disable flag disables CRC stripping on the VF. Setting
62498+ * the crc_disable flag to 1 will disable CRC stripping for each
62499+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
62500+ * offload must have been set prior to sending this info or the PF
62501+ * will ignore the request. This flag should be set the same for
62502+ * all of the queues for a VF.
62503+ */
62504+
62505+/* Rx queue config info */
62506+struct virtchnl_rxq_info {
62507+ u16 vsi_id;
62508+ u16 queue_id;
62509+ u32 ring_len; /* number of descriptors, multiple of 32 */
62510+ u16 hdr_size;
62511+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
62512+ u32 databuffer_size;
62513+ u32 max_pkt_size;
62514+ u8 crc_disable;
62515+ u8 pad1[3];
62516+ u64 dma_ring_addr;
62517+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
62518+ u32 pad2;
62519+};
62520+
62521+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
62522+
62523+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
62524+ * VF sends this message to set parameters for active TX and RX queues
62525+ * associated with the specified VSI.
62526+ * PF configures queues and returns status.
62527+ * If the number of queues specified is greater than the number of queues
62528+ * associated with the VSI, an error is returned and no queues are configured.
62529+ * NOTE: The VF is not required to configure all queues in a single request.
62530+ * It may send multiple messages. PF drivers must correctly handle all VF
62531+ * requests.
62532+ */
62533+struct virtchnl_queue_pair_info {
62534+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
62535+ struct virtchnl_txq_info txq;
62536+ struct virtchnl_rxq_info rxq;
62537+};
62538+
62539+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
62540+
62541+struct virtchnl_vsi_queue_config_info {
62542+ u16 vsi_id;
62543+ u16 num_queue_pairs;
62544+ u32 pad;
62545+ struct virtchnl_queue_pair_info qpair[1];
62546+};
62547+
62548+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
62549+
62550+/* VIRTCHNL_OP_REQUEST_QUEUES
62551+ * VF sends this message to request the PF to allocate additional queues to
62552+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
62553+ * additional queues must be negotiated. This is a best effort request as it
62554+ * is possible the PF does not have enough queues left to support the request.
62555+ * If the PF cannot support the number requested it will respond with the
62556+ * maximum number it is able to support. If the request is successful, PF will
62557+ * then reset the VF to institute required changes.
62558+ */
62559+
62560+/* VF resource request */
62561+struct virtchnl_vf_res_request {
62562+ u16 num_queue_pairs;
62563+};
62564+
62565+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
62566+ * VF uses this message to map vectors to queues.
62567+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
62568+ * are to be associated with the specified vector.
62569+ * The "other" causes are always mapped to vector 0. The VF may not request
62570+ * that vector 0 be used for traffic.
62571+ * PF configures interrupt mapping and returns status.
62572+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
62573+ * should be mapped to interrupts, even if the driver intends to operate
62574+ * only in polling mode. In this case the interrupt may be disabled, but
62575+ * the ITR timer will still run to trigger writebacks.
62576+ */
62577+struct virtchnl_vector_map {
62578+ u16 vsi_id;
62579+ u16 vector_id;
62580+ u16 rxq_map;
62581+ u16 txq_map;
62582+ u16 rxitr_idx;
62583+ u16 txitr_idx;
62584+};
62585+
62586+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
62587+
62588+struct virtchnl_irq_map_info {
62589+ u16 num_vectors;
62590+ struct virtchnl_vector_map vecmap[1];
62591+};
62592+
62593+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
62594+
62595+/* VIRTCHNL_OP_ENABLE_QUEUES
62596+ * VIRTCHNL_OP_DISABLE_QUEUES
62597+ * VF sends these message to enable or disable TX/RX queue pairs.
62598+ * The queues fields are bitmaps indicating which queues to act upon.
62599+ * (Currently, we only support 16 queues per VF, but we make the field
62600+ * u32 to allow for expansion.)
62601+ * PF performs requested action and returns status.
62602+ * NOTE: The VF is not required to enable/disable all queues in a single
62603+ * request. It may send multiple messages.
62604+ * PF drivers must correctly handle all VF requests.
62605+ */
62606+struct virtchnl_queue_select {
62607+ u16 vsi_id;
62608+ u16 pad;
62609+ u32 rx_queues;
62610+ u32 tx_queues;
62611+};
62612+
62613+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
62614+
62615+/* VIRTCHNL_OP_ADD_ETH_ADDR
62616+ * VF sends this message in order to add one or more unicast or multicast
62617+ * address filters for the specified VSI.
62618+ * PF adds the filters and returns status.
62619+ */
62620+
62621+/* VIRTCHNL_OP_DEL_ETH_ADDR
62622+ * VF sends this message in order to remove one or more unicast or multicast
62623+ * filters for the specified VSI.
62624+ * PF removes the filters and returns status.
62625+ */
62626+
62627+struct virtchnl_ether_addr {
62628+ u8 addr[ETH_ALEN];
62629+ u8 pad[2];
62630+};
62631+
62632+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
62633+
62634+struct virtchnl_ether_addr_list {
62635+ u16 vsi_id;
62636+ u16 num_elements;
62637+ struct virtchnl_ether_addr list[1];
62638+};
62639+
62640+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
62641+
62642+/* VIRTCHNL_OP_ADD_VLAN
62643+ * VF sends this message to add one or more VLAN tag filters for receives.
62644+ * PF adds the filters and returns status.
62645+ * If a port VLAN is configured by the PF, this operation will return an
62646+ * error to the VF.
62647+ */
62648+
62649+/* VIRTCHNL_OP_DEL_VLAN
62650+ * VF sends this message to remove one or more VLAN tag filters for receives.
62651+ * PF removes the filters and returns status.
62652+ * If a port VLAN is configured by the PF, this operation will return an
62653+ * error to the VF.
62654+ */
62655+
62656+struct virtchnl_vlan_filter_list {
62657+ u16 vsi_id;
62658+ u16 num_elements;
62659+ u16 vlan_id[1];
62660+};
62661+
62662+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
62663+
62664+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
62665+ * VF sends VSI id and flags.
62666+ * PF returns status code in retval.
62667+ * Note: we assume that broadcast accept mode is always enabled.
62668+ */
62669+struct virtchnl_promisc_info {
62670+ u16 vsi_id;
62671+ u16 flags;
62672+};
62673+
62674+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
62675+
62676+#define FLAG_VF_UNICAST_PROMISC 0x00000001
62677+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
62678+
62679+/* VIRTCHNL_OP_GET_STATS
62680+ * VF sends this message to request stats for the selected VSI. VF uses
62681+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
62682+ * field is ignored by the PF.
62683+ *
62684+ * PF replies with struct virtchnl_eth_stats in an external buffer.
62685+ */
62686+
62687+struct virtchnl_eth_stats {
62688+ u64 rx_bytes; /* received bytes */
62689+ u64 rx_unicast; /* received unicast pkts */
62690+ u64 rx_multicast; /* received multicast pkts */
62691+ u64 rx_broadcast; /* received broadcast pkts */
62692+ u64 rx_discards;
62693+ u64 rx_unknown_protocol;
62694+ u64 tx_bytes; /* transmitted bytes */
62695+ u64 tx_unicast; /* transmitted unicast pkts */
62696+ u64 tx_multicast; /* transmitted multicast pkts */
62697+ u64 tx_broadcast; /* transmitted broadcast pkts */
62698+ u64 tx_discards;
62699+ u64 tx_errors;
62700+};
62701+
62702+/* VIRTCHNL_OP_CONFIG_RSS_KEY
62703+ * VIRTCHNL_OP_CONFIG_RSS_LUT
62704+ * VF sends these messages to configure RSS. Only supported if both PF
62705+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
62706+ * configuration negotiation. If this is the case, then the RSS fields in
62707+ * the VF resource struct are valid.
62708+ * Both the key and LUT are initialized to 0 by the PF, meaning that
62709+ * RSS is effectively disabled until set up by the VF.
62710+ */
62711+struct virtchnl_rss_key {
62712+ u16 vsi_id;
62713+ u16 key_len;
62714+ u8 key[1]; /* RSS hash key, packed bytes */
62715+};
62716+
62717+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
62718+
62719+struct virtchnl_rss_lut {
62720+ u16 vsi_id;
62721+ u16 lut_entries;
62722+ u8 lut[1]; /* RSS lookup table */
62723+};
62724+
62725+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
62726+
62727+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
62728+ * VIRTCHNL_OP_SET_RSS_HENA
62729+ * VF sends these messages to get and set the hash filter enable bits for RSS.
62730+ * By default, the PF sets these to all possible traffic types that the
62731+ * hardware supports. The VF can query this value if it wants to change the
62732+ * traffic types that are hashed by the hardware.
62733+ */
62734+struct virtchnl_rss_hena {
62735+ u64 hena;
62736+};
62737+
62738+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
62739+
62740+/* This is used by PF driver to enforce how many channels can be supported.
62741+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
62742+ * PF driver will allow only max 4 channels
62743+ */
62744+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
62745+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
62746+
62747+/* VIRTCHNL_OP_ENABLE_CHANNELS
62748+ * VIRTCHNL_OP_DISABLE_CHANNELS
62749+ * VF sends these messages to enable or disable channels based on
62750+ * the user specified queue count and queue offset for each traffic class.
62751+ * This struct encompasses all the information that the PF needs from
62752+ * VF to create a channel.
62753+ */
62754+struct virtchnl_channel_info {
62755+ u16 count; /* number of queues in a channel */
62756+ u16 offset; /* queues in a channel start from 'offset' */
62757+ u32 pad;
62758+ u64 max_tx_rate;
62759+};
62760+
62761+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
62762+
62763+struct virtchnl_tc_info {
62764+ u32 num_tc;
62765+ u32 pad;
62766+ struct virtchnl_channel_info list[1];
62767+};
62768+
62769+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
62770+
62771+/* VIRTCHNL_ADD_CLOUD_FILTER
62772+ * VIRTCHNL_DEL_CLOUD_FILTER
62773+ * VF sends these messages to add or delete a cloud filter based on the
62774+ * user specified match and action filters. These structures encompass
62775+ * all the information that the PF needs from the VF to add/delete a
62776+ * cloud filter.
62777+ */
62778+
62779+struct virtchnl_l4_spec {
62780+ u8 src_mac[ETH_ALEN];
62781+ u8 dst_mac[ETH_ALEN];
62782+ /* vlan_prio is part of this 16 bit field even from OS perspective
62783+ * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
62784+ * in future, when decided to offload vlan_prio, pass that information
62785+ * as part of the "vlan_id" field, Bit14..12
62786+ */
62787+ __be16 vlan_id;
62788+ __be16 pad; /* reserved for future use */
62789+ __be32 src_ip[4];
62790+ __be32 dst_ip[4];
62791+ __be16 src_port;
62792+ __be16 dst_port;
62793+};
62794+
62795+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
62796+
62797+union virtchnl_flow_spec {
62798+ struct virtchnl_l4_spec tcp_spec;
62799+ u8 buffer[128]; /* reserved for future use */
62800+};
62801+
62802+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
62803+
62804+enum virtchnl_action {
62805+ /* action types */
62806+ VIRTCHNL_ACTION_DROP = 0,
62807+ VIRTCHNL_ACTION_TC_REDIRECT,
62808+};
62809+
62810+enum virtchnl_flow_type {
62811+ /* flow types */
62812+ VIRTCHNL_TCP_V4_FLOW = 0,
62813+ VIRTCHNL_TCP_V6_FLOW,
62814+ VIRTCHNL_UDP_V4_FLOW,
62815+ VIRTCHNL_UDP_V6_FLOW,
62816+};
62817+
62818+struct virtchnl_filter {
62819+ union virtchnl_flow_spec data;
62820+ union virtchnl_flow_spec mask;
62821+ enum virtchnl_flow_type flow_type;
62822+ enum virtchnl_action action;
62823+ u32 action_meta;
62824+ u8 field_flags;
62825+};
62826+
62827+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
62828+
62829+/* VIRTCHNL_OP_EVENT
62830+ * PF sends this message to inform the VF driver of events that may affect it.
62831+ * No direct response is expected from the VF, though it may generate other
62832+ * messages in response to this one.
62833+ */
62834+enum virtchnl_event_codes {
62835+ VIRTCHNL_EVENT_UNKNOWN = 0,
62836+ VIRTCHNL_EVENT_LINK_CHANGE,
62837+ VIRTCHNL_EVENT_RESET_IMPENDING,
62838+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
62839+};
62840+
62841+#define PF_EVENT_SEVERITY_INFO 0
62842+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
62843+
62844+struct virtchnl_pf_event {
62845+ enum virtchnl_event_codes event;
62846+ union {
62847+ /* If the PF driver does not support the new speed reporting
62848+ * capabilities then use link_event else use link_event_adv to
62849+ * get the speed and link information. The ability to understand
62850+ * new speeds is indicated by setting the capability flag
62851+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
62852+ * in virtchnl_vf_resource struct and can be used to determine
62853+ * which link event struct to use below.
62854+ */
62855+ struct {
62856+ enum virtchnl_link_speed link_speed;
62857+ u8 link_status;
62858+ } link_event;
62859+ struct {
62860+ /* link_speed provided in Mbps */
62861+ u32 link_speed;
62862+ u8 link_status;
62863+ } link_event_adv;
62864+ } event_data;
62865+
62866+ int severity;
62867+};
62868+
62869+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
62870+
62871+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
62872+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
62873+ * The request for this originates from the VF IWARP driver through
62874+ * a client interface between VF LAN and VF IWARP driver.
62875+ * A vector could have an AEQ and CEQ attached to it although
62876+ * there is a single AEQ per VF IWARP instance in which case
62877+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
62878+ * There will never be a case where there will be multiple CEQs attached
62879+ * to a single vector.
62880+ * PF configures interrupt mapping and returns status.
62881+ */
62882+struct virtchnl_iwarp_qv_info {
62883+ u32 v_idx; /* msix_vector */
62884+ u16 ceq_idx;
62885+ u16 aeq_idx;
62886+ u8 itr_idx;
62887+};
62888+
62889+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
62890+
62891+struct virtchnl_iwarp_qvlist_info {
62892+ u32 num_vectors;
62893+ struct virtchnl_iwarp_qv_info qv_info[1];
62894+};
62895+
62896+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
62897+
62898+/* Since VF messages are limited by u16 size, precalculate the maximum possible
62899+ * values of nested elements in virtchnl structures that virtual channel can
62900+ * possibly handle in a single message.
62901+ */
62902+enum virtchnl_vector_limits {
62903+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
62904+ ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
62905+ sizeof(struct virtchnl_queue_pair_info),
62906+
62907+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
62908+ ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
62909+ sizeof(struct virtchnl_vector_map),
62910+
62911+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
62912+ ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
62913+ sizeof(struct virtchnl_ether_addr),
62914+
62915+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
62916+ ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
62917+ sizeof(u16),
62918+
62919+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX =
62920+ ((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) /
62921+ sizeof(struct virtchnl_iwarp_qv_info),
62922+
62923+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
62924+ ((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
62925+ sizeof(struct virtchnl_channel_info),
62926+};
62927+
62928+/* VF reset states - these are written into the RSTAT register:
62929+ * VFGEN_RSTAT on the VF
62930+ * When the PF initiates a reset, it writes 0
62931+ * When the reset is complete, it writes 1
62932+ * When the PF detects that the VF has recovered, it writes 2
62933+ * VF checks this register periodically to determine if a reset has occurred,
62934+ * then polls it to know when the reset is complete.
62935+ * If either the PF or VF reads the register while the hardware
62936+ * is in a reset state, it will return DEADBEEF, which, when masked
62937+ * will result in 3.
62938+ */
62939+enum virtchnl_vfr_states {
62940+ VIRTCHNL_VFR_INPROGRESS = 0,
62941+ VIRTCHNL_VFR_COMPLETED,
62942+ VIRTCHNL_VFR_VFACTIVE,
62943+};
62944+
62945+/**
62946+ * virtchnl_vc_validate_vf_msg
62947+ * @ver: Virtchnl version info
62948+ * @v_opcode: Opcode for the message
62949+ * @msg: pointer to the msg buffer
62950+ * @msglen: msg length
62951+ *
62952+ * validate msg format against struct for each opcode
62953+ */
62954+static inline int
62955+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
62956+ u8 *msg, u16 msglen)
62957+{
62958+ bool err_msg_format = false;
62959+ int valid_len = 0;
62960+
62961+ /* Validate message length. */
62962+ switch (v_opcode) {
62963+ case VIRTCHNL_OP_VERSION:
62964+ valid_len = sizeof(struct virtchnl_version_info);
62965+ break;
62966+ case VIRTCHNL_OP_RESET_VF:
62967+ break;
62968+ case VIRTCHNL_OP_GET_VF_RESOURCES:
62969+ if (VF_IS_V11(ver))
62970+ valid_len = sizeof(u32);
62971+ break;
62972+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
62973+ valid_len = sizeof(struct virtchnl_txq_info);
62974+ break;
62975+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
62976+ valid_len = sizeof(struct virtchnl_rxq_info);
62977+ break;
62978+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
62979+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
62980+ if (msglen >= valid_len) {
62981+ struct virtchnl_vsi_queue_config_info *vqc =
62982+ (struct virtchnl_vsi_queue_config_info *)msg;
62983+
62984+ if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
62985+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
62986+ err_msg_format = true;
62987+ break;
62988+ }
62989+
62990+ valid_len += (vqc->num_queue_pairs *
62991+ sizeof(struct
62992+ virtchnl_queue_pair_info));
62993+ }
62994+ break;
62995+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
62996+ valid_len = sizeof(struct virtchnl_irq_map_info);
62997+ if (msglen >= valid_len) {
62998+ struct virtchnl_irq_map_info *vimi =
62999+ (struct virtchnl_irq_map_info *)msg;
63000+
63001+ if (vimi->num_vectors == 0 || vimi->num_vectors >
63002+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
63003+ err_msg_format = true;
63004+ break;
63005+ }
63006+
63007+ valid_len += (vimi->num_vectors *
63008+ sizeof(struct virtchnl_vector_map));
63009+ }
63010+ break;
63011+ case VIRTCHNL_OP_ENABLE_QUEUES:
63012+ case VIRTCHNL_OP_DISABLE_QUEUES:
63013+ valid_len = sizeof(struct virtchnl_queue_select);
63014+ break;
63015+ case VIRTCHNL_OP_ADD_ETH_ADDR:
63016+ case VIRTCHNL_OP_DEL_ETH_ADDR:
63017+ valid_len = sizeof(struct virtchnl_ether_addr_list);
63018+ if (msglen >= valid_len) {
63019+ struct virtchnl_ether_addr_list *veal =
63020+ (struct virtchnl_ether_addr_list *)msg;
63021+
63022+ if (veal->num_elements == 0 || veal->num_elements >
63023+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
63024+ err_msg_format = true;
63025+ break;
63026+ }
63027+
63028+ valid_len += veal->num_elements *
63029+ sizeof(struct virtchnl_ether_addr);
63030+ }
63031+ break;
63032+ case VIRTCHNL_OP_ADD_VLAN:
63033+ case VIRTCHNL_OP_DEL_VLAN:
63034+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
63035+ if (msglen >= valid_len) {
63036+ struct virtchnl_vlan_filter_list *vfl =
63037+ (struct virtchnl_vlan_filter_list *)msg;
63038+
63039+ if (vfl->num_elements == 0 || vfl->num_elements >
63040+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
63041+ err_msg_format = true;
63042+ break;
63043+ }
63044+
63045+ valid_len += vfl->num_elements * sizeof(u16);
63046+ }
63047+ break;
63048+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
63049+ valid_len = sizeof(struct virtchnl_promisc_info);
63050+ break;
63051+ case VIRTCHNL_OP_GET_STATS:
63052+ valid_len = sizeof(struct virtchnl_queue_select);
63053+ break;
63054+ case VIRTCHNL_OP_IWARP:
63055+ /* These messages are opaque to us and will be validated in
63056+ * the RDMA client code. We just need to check for nonzero
63057+ * length. The firmware will enforce max length restrictions.
63058+ */
63059+ if (msglen)
63060+ valid_len = msglen;
63061+ else
63062+ err_msg_format = true;
63063+ break;
63064+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
63065+ break;
63066+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
63067+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
63068+ if (msglen >= valid_len) {
63069+ struct virtchnl_iwarp_qvlist_info *qv =
63070+ (struct virtchnl_iwarp_qvlist_info *)msg;
63071+
63072+ if (qv->num_vectors == 0 || qv->num_vectors >
63073+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) {
63074+ err_msg_format = true;
63075+ break;
63076+ }
63077+
63078+ valid_len += ((qv->num_vectors - 1) *
63079+ sizeof(struct virtchnl_iwarp_qv_info));
63080+ }
63081+ break;
63082+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
63083+ valid_len = sizeof(struct virtchnl_rss_key);
63084+ if (msglen >= valid_len) {
63085+ struct virtchnl_rss_key *vrk =
63086+ (struct virtchnl_rss_key *)msg;
63087+
63088+ if (vrk->key_len == 0) {
63089+ /* zero length is allowed as input */
63090+ break;
63091+ }
63092+
63093+ valid_len += vrk->key_len - 1;
63094+ }
63095+ break;
63096+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
63097+ valid_len = sizeof(struct virtchnl_rss_lut);
63098+ if (msglen >= valid_len) {
63099+ struct virtchnl_rss_lut *vrl =
63100+ (struct virtchnl_rss_lut *)msg;
63101+
63102+ if (vrl->lut_entries == 0) {
63103+ /* zero entries is allowed as input */
63104+ break;
63105+ }
63106+
63107+ valid_len += vrl->lut_entries - 1;
63108+ }
63109+ break;
63110+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
63111+ break;
63112+ case VIRTCHNL_OP_SET_RSS_HENA:
63113+ valid_len = sizeof(struct virtchnl_rss_hena);
63114+ break;
63115+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
63116+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
63117+ break;
63118+ case VIRTCHNL_OP_REQUEST_QUEUES:
63119+ valid_len = sizeof(struct virtchnl_vf_res_request);
63120+ break;
63121+ case VIRTCHNL_OP_ENABLE_CHANNELS:
63122+ valid_len = sizeof(struct virtchnl_tc_info);
63123+ if (msglen >= valid_len) {
63124+ struct virtchnl_tc_info *vti =
63125+ (struct virtchnl_tc_info *)msg;
63126+
63127+ if (vti->num_tc == 0 || vti->num_tc >
63128+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
63129+ err_msg_format = true;
63130+ break;
63131+ }
63132+
63133+ valid_len += (vti->num_tc - 1) *
63134+ sizeof(struct virtchnl_channel_info);
63135+ }
63136+ break;
63137+ case VIRTCHNL_OP_DISABLE_CHANNELS:
63138+ break;
63139+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
63140+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
63141+ valid_len = sizeof(struct virtchnl_filter);
63142+ break;
63143+ /* These are always errors coming from the VF. */
63144+ case VIRTCHNL_OP_EVENT:
63145+ case VIRTCHNL_OP_UNKNOWN:
63146+ default:
63147+ return VIRTCHNL_STATUS_ERR_PARAM;
63148+ }
63149+ /* few more checks */
63150+ if (err_msg_format || valid_len != msglen)
63151+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
63152+
63153+ return 0;
63154+}
63155+#endif /* _VIRTCHNL_H_ */
63156--
631572.20.1
63158
diff --git a/recipes-kernel/linux/linux-ampere_4.14.bb b/recipes-kernel/linux/linux-ampere_4.14.bb
new file mode 100644
index 0000000..db3cdee
--- /dev/null
+++ b/recipes-kernel/linux/linux-ampere_4.14.bb
@@ -0,0 +1,80 @@
1require linux-ampere_4.14.inc
2include kernel-aufs.inc
3
4################# meta-enea-virtualization/.../linux-intel-host.inc ############
5
6# Generic functionality for containers
7KERNEL_FEATURES_append = " features/vxlan/vxlan_y.scc"
8KERNEL_FEATURES_append = " features/overlayfs/overlayfs_m.scc"
9KERNEL_FEATURES_append = " features/aufs/aufs-enable.scc"
10
11# Docker
12KERNEL_FEATURES_append = " features/netfilter/netfilter_y.scc"
13KERNEL_FEATURES_append = " cfg/net/ip_nf_y.scc"
14
15# LXC
16KERNEL_FEATURES_append = " features/lxc/lxc_y.scc"
17
18# Use Linux kernel tree OVS module (since the OVS tree module does not support 4.14 kernel)
19KERNEL_FEATURES_append = " features/openvswitch/openvswitch_support.scc"
20KERNEL_FEATURES_append = " features/openvswitch/openvswitch.scc"
21KERNEL_MODULE_AUTOLOAD += "tun"
22
23# VFIO/IOMMU
24KERNEL_FEATURES_append = " features/vfio/vfio_m.scc"
25
26# KVM Host
27KERNEL_FEATURES_append = " features/kvm/qemu-kvm-arm64_y.scc"
28KERNEL_FEATURES_append = " features/kvm/guest_n.scc"
29KERNEL_FEATURES_append = " features/vhost/vhost_m.scc"
30KERNEL_FEATURES_append = " features/pci/pci_iov_m.scc"
31
32# KSM (change it to y for high density of VMs)
33KERNEL_FEATURES_append = " features/ksm/ksm_n.scc"
34
35# Full no Hz
36KERNEL_FEATURES_append = " features/full_nohz/full_nohz-enable.scc"
37
38# Enable HPET, UIO, HUGETLB, PCI_MSI
39KERNEL_FEATURES_append = " features/intel-dpdk/intel-dpdk.scc"
40KERNEL_MODULE_AUTOLOAD += "uio"
41
42# Low Latency kernel
43KERNEL_FEATURES_append = " features/lowlatency/lowlatency_y.scc"
44
45# CPU isolation
46KERNEL_FEATURES_append = " features/cgroups/cpusets.scc"
47KERNEL_FEATURES_append = " features/rcu/rcu_nocb_y.scc"
48
49# Enable printk and earlyprintk
50KERNEL_FEATURES_append = " features/printk/printk_y.scc"
51
52# Add USB, IDE, ISO FS and SCSI support to boot off
53# of USB sticks and CDROMs
54KERNEL_FEATURES_append = " features/ide/ide_m.scc"
55KERNEL_FEATURES_append = " features/usb/usb_storage_m.scc"
56KERNEL_FEATURES_append = " features/usb/ohci_m.scc"
57KERNEL_FEATURES_append = " features/cdrom/isofs_m.scc"
58KERNEL_FEATURES_append = " features/cdrom/cdrom_m.scc"
59KERNEL_FEATURES_append = " features/scsi/scsi_y.scc"
60
61# extend storage support for boot
62KERNEL_FEATURES_append = " features/mmc/mmc_sd_y.scc"
63
64# console & keyboard used by usb installer
65KERNEL_FEATURES_append = " features/console/console_txt_y.scc"
66KERNEL_FEATURES_append = " features/hid/keyboard_m.scc"
67
68# extend devices(USB, HSI,...) support
69KERNEL_FEATURES_append = " features/firewire/firewire_m.scc"
70KERNEL_FEATURES_append = " features/hsi/hsi_m.scc"
71KERNEL_FEATURES_append = " features/usbnet/usbnet_m.scc"
72KERNEL_FEATURES_append = " features/usbnet/usb_phy_m.scc"
73KERNEL_FEATURES_append = " features/usbGadget/usbgadget_m.scc"
74
75# Gigapages support
76KERNEL_FEATURES_append = " features/gigapages/gigapages_y.scc"
77
78# WLAN support
79KERNEL_FEATURES_append = " features/wlan/wlan_y.scc"
80KERNEL_FEATURES_append = " features/wlan/wlan_ath10k.scc"
diff --git a/recipes-kernel/linux/linux-ampere_4.14.inc b/recipes-kernel/linux/linux-ampere_4.14.inc
new file mode 100644
index 0000000..e47e3e4
--- /dev/null
+++ b/recipes-kernel/linux/linux-ampere_4.14.inc
@@ -0,0 +1,46 @@
1FILESEXTRAPATHS_prepend := "${THISDIR}/linux-ampere:"
2
3require recipes-kernel/linux/linux-yocto.inc
4require recipes-kernel/linux/linux-deploy-kconfig.inc
5
6# board specific branches
7KBRANCH_emag8180 ?= "amp-centos-7.5-kernel"
8KBRANCH_qemuarm64 ?= "amp-centos-7.5-kernel"
9
10SRCREV_machine_emag8180 ?= "da654c67f134497fd7cf88775e8b206c7d28a67a"
11SRCREV_machine_qemuarm64 ?= "da654c67f134497fd7cf88775e8b206c7d28a67a"
12
13SRCREV_metaenea ?= "2e53208e8c1eaf83fb6b6411f8465f2bdf1d6069"
14KENEABRANCH = "ampere-4.14"
15SRCREV_meta ?= "245d701df6c3691a078a268eff54009959beb842"
16
17SRC_URI = "git://github.com/AmpereComputing/ampere-centos-kernel.git;name=machine;branch=${KBRANCH} \
18 git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.14;destsuffix=kernel-meta \
19 git://git@git.enea.com/linux/enea-kernel-cache.git;protocol=ssh;type=kmeta;name=metaenea;branch=${KENEABRANCH};destsuffix=enea-kernel-cache \
20 file://0001-Upgrade-i40e-drivers-to-2.11.29.patch \
21 "
22
23KERNEL_CONSOLE = "ttyAMA0,115200"
24
25LINUX_KERNEL_TYPE = "standard"
26LINUX_VERSION ?= "4.14.0"
27LINUX_VERSION_EXTENSION = "-ampere-${LINUX_KERNEL_TYPE}"
28
29COMPATIBLE_MACHINE = "emag8180|qemuarm64"
30KMACHINE_emag8180 = "ampere-emag8180"
31KMACHINE_qemuarm64 = "ampere-emag8180"
32
33KERNEL_FEATURES_append = " features/udev/udev.scc"
34
35# Ramdisk boot support
36KERNEL_FEATURES_append = " features/blkdev/ramdisk_blk_dev.scc"
37
38# Intel 10G ports(SoC)
39KERNEL_FEATURES_append = " features/ixgbe/ixgbe_y.scc"
40KERNEL_FEATURES_append = " features/dca/dca_y.scc"
41
42# NMVe SSD
43KERNEL_FEATURES_append = " features/nvme/nvme.scc"
44
45#IPv4 waiting for carrier on
46KERNEL_FEATURES_append = " patches/ipv4/ipv4wait.scc"