summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-am335x-3.2.0-psp04.06.00.08/0002-AM335x-OCF-Driver-for-Linux-3.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/linux-am335x-3.2.0-psp04.06.00.08/0002-AM335x-OCF-Driver-for-Linux-3.patch')
-rw-r--r--recipes-kernel/linux/linux-am335x-3.2.0-psp04.06.00.08/0002-AM335x-OCF-Driver-for-Linux-3.patch7228
1 files changed, 7228 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-am335x-3.2.0-psp04.06.00.08/0002-AM335x-OCF-Driver-for-Linux-3.patch b/recipes-kernel/linux/linux-am335x-3.2.0-psp04.06.00.08/0002-AM335x-OCF-Driver-for-Linux-3.patch
new file mode 100644
index 00000000..916b190c
--- /dev/null
+++ b/recipes-kernel/linux/linux-am335x-3.2.0-psp04.06.00.08/0002-AM335x-OCF-Driver-for-Linux-3.patch
@@ -0,0 +1,7228 @@
1From a97aac248717d62bdbf322c1d6d422ddfde87de0 Mon Sep 17 00:00:00 2001
2From: Greg Turner <gregturner@ti.com>
3Date: Thu, 3 May 2012 10:33:13 -0500
4Subject: [PATCH 2/2] AM335x OCF Driver for Linux 3
5
6---
7 crypto/Kconfig | 3 +
8 crypto/Makefile | 2 +
9 crypto/ocf/Config.in | 20 +
10 crypto/ocf/Kconfig | 48 ++
11 crypto/ocf/Makefile | 138 ++++
12 crypto/ocf/criov.c | 215 +++++
13 crypto/ocf/crypto.c | 1766 ++++++++++++++++++++++++++++++++++++++++++
14 crypto/ocf/cryptodev.c | 1069 +++++++++++++++++++++++++
15 crypto/ocf/cryptodev.h | 480 ++++++++++++
16 crypto/ocf/cryptosoft.c | 1322 +++++++++++++++++++++++++++++++
17 crypto/ocf/ocf-bench.c | 514 ++++++++++++
18 crypto/ocf/ocf-compat.h | 372 +++++++++
19 crypto/ocf/ocfnull/Makefile | 12 +
20 crypto/ocf/ocfnull/ocfnull.c | 204 +++++
21 crypto/ocf/random.c | 317 ++++++++
22 crypto/ocf/rndtest.c | 300 +++++++
23 crypto/ocf/rndtest.h | 54 ++
24 crypto/ocf/uio.h | 54 ++
25 drivers/char/random.c | 67 ++
26 fs/fcntl.c | 1 +
27 include/linux/miscdevice.h | 1 +
28 include/linux/random.h | 28 +
29 kernel/pid.c | 1 +
30 23 files changed, 6988 insertions(+), 0 deletions(-)
31 create mode 100755 crypto/ocf/Config.in
32 create mode 100755 crypto/ocf/Kconfig
33 create mode 100755 crypto/ocf/Makefile
34 create mode 100644 crypto/ocf/criov.c
35 create mode 100644 crypto/ocf/crypto.c
36 create mode 100644 crypto/ocf/cryptodev.c
37 create mode 100644 crypto/ocf/cryptodev.h
38 create mode 100644 crypto/ocf/cryptosoft.c
39 create mode 100644 crypto/ocf/ocf-bench.c
40 create mode 100644 crypto/ocf/ocf-compat.h
41 create mode 100644 crypto/ocf/ocfnull/Makefile
42 create mode 100644 crypto/ocf/ocfnull/ocfnull.c
43 create mode 100644 crypto/ocf/random.c
44 create mode 100644 crypto/ocf/rndtest.c
45 create mode 100644 crypto/ocf/rndtest.h
46 create mode 100644 crypto/ocf/uio.h
47
48diff --git a/crypto/Kconfig b/crypto/Kconfig
49index 527a857..8871f10 100644
50--- a/crypto/Kconfig
51+++ b/crypto/Kconfig
52@@ -923,3 +923,6 @@ config CRYPTO_USER_API_SKCIPHER
53 source "drivers/crypto/Kconfig"
54
55 endif # if CRYPTO
56+
57+source "crypto/ocf/Kconfig"
58+
59diff --git a/crypto/Makefile b/crypto/Makefile
60index 9e6eee2..3cde9f8 100644
61--- a/crypto/Makefile
62+++ b/crypto/Makefile
63@@ -91,6 +91,8 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
64 obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
65 obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
66
67+obj-$(CONFIG_OCF_OCF) += ocf/
68+
69 #
70 # generic algorithms and the async_tx api
71 #
72diff --git a/crypto/ocf/Config.in b/crypto/ocf/Config.in
73new file mode 100755
74index 0000000..423d11f
75--- /dev/null
76+++ b/crypto/ocf/Config.in
77@@ -0,0 +1,20 @@
78+#############################################################################
79+
80+mainmenu_option next_comment
81+comment 'OCF Configuration'
82+tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
83+dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \
84+ CONFIG_OCF_FIPS $CONFIG_OCF_OCF
85+dep_mbool ' enable harvesting entropy for /dev/random' \
86+ CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
87+dep_tristate ' cryptodev (user space support)' \
88+ CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
89+dep_tristate ' cryptosoft (software crypto engine)' \
90+ CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
91+dep_tristate ' ocfnull (does no crypto)' \
92+ CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
93+dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \
94+ CONFIG_OCF_BENCH $CONFIG_OCF_OCF
95+endmenu
96+
97+#############################################################################
98diff --git a/crypto/ocf/Kconfig b/crypto/ocf/Kconfig
99new file mode 100755
100index 0000000..44459f4
101--- /dev/null
102+++ b/crypto/ocf/Kconfig
103@@ -0,0 +1,48 @@
104+menu "OCF Configuration"
105+
106+config OCF_OCF
107+ tristate "OCF (Open Cryptograhic Framework)"
108+ help
109+ A linux port of the OpenBSD/FreeBSD crypto framework.
110+
111+config OCF_RANDOMHARVEST
112+ bool "crypto random --- harvest entropy for /dev/random"
113+ depends on OCF_OCF
114+ help
115+ Includes code to harvest random numbers from devices that support it.
116+
117+config OCF_FIPS
118+ bool "enable fips RNG checks"
119+ depends on OCF_OCF && OCF_RANDOMHARVEST
120+ help
121+ Run all RNG provided data through a fips check before
122+ adding it /dev/random's entropy pool.
123+
124+config OCF_CRYPTODEV
125+ tristate "cryptodev (user space support)"
126+ depends on OCF_OCF
127+ help
128+ The user space API to access crypto hardware.
129+
130+config OCF_CRYPTOSOFT
131+ tristate "cryptosoft (software crypto engine)"
132+ depends on OCF_OCF
133+ help
134+ A software driver for the OCF framework that uses
135+ the kernel CryptoAPI.
136+
137+config OCF_OCFNULL
138+ tristate "ocfnull (fake crypto engine)"
139+ depends on OCF_OCF
140+ help
141+ OCF driver for measuring ipsec overheads (does no crypto)
142+
143+config OCF_BENCH
144+ tristate "ocf-bench (HW crypto in-kernel benchmark)"
145+ depends on OCF_OCF
146+ help
147+ A very simple encryption test for the in-kernel interface
148+ of OCF. Also includes code to benchmark the IXP Access library
149+ for comparison.
150+
151+endmenu
152diff --git a/crypto/ocf/Makefile b/crypto/ocf/Makefile
153new file mode 100755
154index 0000000..29ac280
155--- /dev/null
156+++ b/crypto/ocf/Makefile
157@@ -0,0 +1,138 @@
158+# for SGlinux builds
159+-include $(ROOTDIR)/modules/.config
160+
161+OCF_OBJS = crypto.o criov.o
162+
163+ifdef CONFIG_OCF_RANDOMHARVEST
164+ OCF_OBJS += random.o
165+endif
166+
167+ifdef CONFIG_OCF_FIPS
168+ OCF_OBJS += rndtest.o
169+endif
170+
171+# Add in autoconf.h to get #defines for CONFIG_xxx
172+AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
173+ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
174+ EXTRA_CFLAGS += -include $(AUTOCONF_H)
175+ export EXTRA_CFLAGS
176+endif
177+
178+ifndef obj
179+ obj ?= .
180+ _obj = subdir
181+ mod-subdirs := safe hifn ixp4xx talitos ocfnull
182+ export-objs += crypto.o criov.o random.o
183+ list-multi += ocf.o
184+ _slash :=
185+else
186+ _obj = obj
187+ _slash := /
188+endif
189+
190+EXTRA_CFLAGS += -I$(obj)/.
191+
192+obj-$(CONFIG_OCF_OCF) += ocf.o
193+obj-$(CONFIG_OCF_CRYPTODEV) += cryptodev.o
194+obj-$(CONFIG_OCF_CRYPTOSOFT) += cryptosoft.o
195+obj-$(CONFIG_OCF_BENCH) += ocf-bench.o
196+
197+$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
198+
199+ocf-objs := $(OCF_OBJS)
200+
201+dummy:
202+ @echo "Please consult the README for how to build OCF."
203+ @echo "If you can't wait then the following should do it:"
204+ @echo ""
205+ @echo " make ocf_modules"
206+ @echo " sudo make ocf_install"
207+ @echo ""
208+ @exit 1
209+
210+$(list-multi) dummy1: $(ocf-objs)
211+ $(LD) -r -o $@ $(ocf-objs)
212+
213+.PHONY:
214+clean:
215+ rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
216+ rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
217+ rm -f */modules.order */modules.builtin modules.order modules.builtin
218+
219+ifdef TOPDIR
220+-include $(TOPDIR)/Rules.make
221+endif
222+
223+#
224+# targets to build easily on the current machine
225+#
226+
227+ocf_make:
228+ make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m
229+ make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_CRYPTOSOFT=m
230+ -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_BENCH=m
231+ -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_OCFNULL=m
232+ -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_HIFN=m
233+
234+ocf_modules:
235+ $(MAKE) ocf_make OCF_TARGET=modules
236+
237+ocf_install:
238+ $(MAKE) ocf_make OCF_TARGET="modules modules_install"
239+ depmod
240+ mkdir -p /usr/include/crypto
241+ cp cryptodev.h /usr/include/crypto/.
242+
243+#
244+# generate full kernel patches for 2.4 and 2.6 kernels to make patching
245+# your kernel easier
246+#
247+
248+.PHONY: patch
249+patch:
250+ patchbase=.; \
251+ [ -d $$patchbase/patches ] || patchbase=..; \
252+ patch=ocf-linux-base.patch; \
253+ patch24=ocf-linux-24.patch; \
254+ patch26=ocf-linux-26.patch; \
255+ patch3=ocf-linux-3.patch; \
256+ ( \
257+ find . -name Makefile; \
258+ find . -name Config.in; \
259+ find . -name Kconfig; \
260+ find . -name README; \
261+ find . -name '*.[ch]' | grep -v '.mod.c'; \
262+ ) | while read t; do \
263+ diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
264+ done > $$patch; \
265+ cat $$patchbase/patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
266+ cat $$patchbase/patches/linux-2.6.38-ocf.patch $$patch > $$patch26; \
267+ cat $$patchbase/patches/linux-3.2.1-ocf.patch $$patch > $$patch3; \
268+
269+
270+#
271+# this target probably does nothing for anyone but me - davidm
272+#
273+
274+.PHONY: release
275+release:
276+ REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
277+ CURDIR=`pwd`; \
278+ rm -rf /tmp/ocf-linux-$$REL*; \
279+ mkdir -p $$RELDIR/ocf; \
280+ mkdir -p $$RELDIR/patches; \
281+ mkdir -p $$RELDIR/crypto-tools; \
282+ cp README* $$RELDIR/.; \
283+ cp patches/[!C]* $$RELDIR/patches/.; \
284+ cp tools/[!C]* $$RELDIR/crypto-tools/.; \
285+ cp -r [!C]* Config.in $$RELDIR/ocf/.; \
286+ rm -rf $$RELDIR/ocf/patches $$RELDIR/ocf/tools; \
287+ rm -f $$RELDIR/ocf/README*; \
288+ cp $$CURDIR/../../user/crypto-tools/[!C]* $$RELDIR/crypto-tools/.; \
289+ make -C $$RELDIR/crypto-tools clean; \
290+ make -C $$RELDIR/ocf clean; \
291+ find $$RELDIR/ocf -name CVS | xargs rm -rf; \
292+ cd $$RELDIR/..; \
293+ tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
294+ gzip -9 ocf-linux-$$REL.tar
295+
296diff --git a/crypto/ocf/criov.c b/crypto/ocf/criov.c
297new file mode 100644
298index 0000000..a8c1a8c
299--- /dev/null
300+++ b/crypto/ocf/criov.c
301@@ -0,0 +1,215 @@
302+/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
303+
304+/*
305+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
306+ * Copyright (C) 2006-2010 David McCullough
307+ * Copyright (C) 2004-2005 Intel Corporation.
308+ * The license and original author are listed below.
309+ *
310+ * Copyright (c) 1999 Theo de Raadt
311+ *
312+ * Redistribution and use in source and binary forms, with or without
313+ * modification, are permitted provided that the following conditions
314+ * are met:
315+ *
316+ * 1. Redistributions of source code must retain the above copyright
317+ * notice, this list of conditions and the following disclaimer.
318+ * 2. Redistributions in binary form must reproduce the above copyright
319+ * notice, this list of conditions and the following disclaimer in the
320+ * documentation and/or other materials provided with the distribution.
321+ * 3. The name of the author may not be used to endorse or promote products
322+ * derived from this software without specific prior written permission.
323+ *
324+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
325+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
326+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
327+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
328+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
329+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
330+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
331+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
332+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
333+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
334+ *
335+__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
336+ */
337+
338+#include <linux/version.h>
339+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
340+#include <linux/config.h>
341+#endif
342+#include <linux/module.h>
343+#include <linux/init.h>
344+#include <linux/slab.h>
345+#include <linux/uio.h>
346+#include <linux/skbuff.h>
347+#include <linux/kernel.h>
348+#include <linux/mm.h>
349+#include <asm/io.h>
350+
351+#include <uio.h>
352+#include <cryptodev.h>
353+
354+/*
355+ * This macro is only for avoiding code duplication, as we need to skip
356+ * given number of bytes in the same way in three functions below.
357+ */
358+#define CUIO_SKIP() do { \
359+ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
360+ KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
361+ while (off > 0) { \
362+ KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
363+ if (off < iov->iov_len) \
364+ break; \
365+ off -= iov->iov_len; \
366+ iol--; \
367+ iov++; \
368+ } \
369+} while (0)
370+
371+void
372+cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
373+{
374+ struct iovec *iov = uio->uio_iov;
375+ int iol = uio->uio_iovcnt;
376+ unsigned count;
377+
378+ CUIO_SKIP();
379+ while (len > 0) {
380+ KASSERT(iol >= 0, ("%s: empty", __func__));
381+ count = min((int)(iov->iov_len - off), len);
382+ memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
383+ len -= count;
384+ cp += count;
385+ off = 0;
386+ iol--;
387+ iov++;
388+ }
389+}
390+
391+void
392+cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
393+{
394+ struct iovec *iov = uio->uio_iov;
395+ int iol = uio->uio_iovcnt;
396+ unsigned count;
397+
398+ CUIO_SKIP();
399+ while (len > 0) {
400+ KASSERT(iol >= 0, ("%s: empty", __func__));
401+ count = min((int)(iov->iov_len - off), len);
402+ memcpy(((caddr_t)iov->iov_base) + off, cp, count);
403+ len -= count;
404+ cp += count;
405+ off = 0;
406+ iol--;
407+ iov++;
408+ }
409+}
410+
411+/*
412+ * Return a pointer to iov/offset of location in iovec list.
413+ */
414+struct iovec *
415+cuio_getptr(struct uio *uio, int loc, int *off)
416+{
417+ struct iovec *iov = uio->uio_iov;
418+ int iol = uio->uio_iovcnt;
419+
420+ while (loc >= 0) {
421+ /* Normal end of search */
422+ if (loc < iov->iov_len) {
423+ *off = loc;
424+ return (iov);
425+ }
426+
427+ loc -= iov->iov_len;
428+ if (iol == 0) {
429+ if (loc == 0) {
430+ /* Point at the end of valid data */
431+ *off = iov->iov_len;
432+ return (iov);
433+ } else
434+ return (NULL);
435+ } else {
436+ iov++, iol--;
437+ }
438+ }
439+
440+ return (NULL);
441+}
442+
443+EXPORT_SYMBOL(cuio_copyback);
444+EXPORT_SYMBOL(cuio_copydata);
445+EXPORT_SYMBOL(cuio_getptr);
446+
447+static void
448+skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
449+{
450+ int i;
451+ if (offset < skb_headlen(skb)) {
452+ memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
453+ len -= skb_headlen(skb);
454+ cp += skb_headlen(skb);
455+ }
456+ offset -= skb_headlen(skb);
457+ for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
458+ if (offset < skb_shinfo(skb)->frags[i].size) {
459+ memcpy(page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
460+ skb_shinfo(skb)->frags[i].page_offset,
461+ cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
462+ len -= skb_shinfo(skb)->frags[i].size;
463+ cp += skb_shinfo(skb)->frags[i].size;
464+ }
465+ offset -= skb_shinfo(skb)->frags[i].size;
466+ }
467+}
468+
469+void
470+crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
471+{
472+
473+ if ((flags & CRYPTO_F_SKBUF) != 0)
474+ skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
475+ else if ((flags & CRYPTO_F_IOV) != 0)
476+ cuio_copyback((struct uio *)buf, off, size, in);
477+ else
478+ bcopy(in, buf + off, size);
479+}
480+
481+void
482+crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
483+{
484+
485+ if ((flags & CRYPTO_F_SKBUF) != 0)
486+ skb_copy_bits((struct sk_buff *)buf, off, out, size);
487+ else if ((flags & CRYPTO_F_IOV) != 0)
488+ cuio_copydata((struct uio *)buf, off, size, out);
489+ else
490+ bcopy(buf + off, out, size);
491+}
492+
493+int
494+crypto_apply(int flags, caddr_t buf, int off, int len,
495+ int (*f)(void *, void *, u_int), void *arg)
496+{
497+#if 0
498+ int error;
499+
500+ if ((flags & CRYPTO_F_SKBUF) != 0)
501+ error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
502+ else if ((flags & CRYPTO_F_IOV) != 0)
503+ error = cuio_apply((struct uio *)buf, off, len, f, arg);
504+ else
505+ error = (*f)(arg, buf + off, len);
506+ return (error);
507+#else
508+ KASSERT(0, ("crypto_apply not implemented!\n"));
509+#endif
510+ return 0;
511+}
512+
513+EXPORT_SYMBOL(crypto_copyback);
514+EXPORT_SYMBOL(crypto_copydata);
515+EXPORT_SYMBOL(crypto_apply);
516+
517diff --git a/crypto/ocf/crypto.c b/crypto/ocf/crypto.c
518new file mode 100644
519index 0000000..f48210d
520--- /dev/null
521+++ b/crypto/ocf/crypto.c
522@@ -0,0 +1,1766 @@
523+/*-
524+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
525+ * Copyright (C) 2006-2010 David McCullough
526+ * Copyright (C) 2004-2005 Intel Corporation.
527+ * The license and original author are listed below.
528+ *
529+ * Redistribution and use in source and binary forms, with or without
530+ * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
531+ *
532+ * modification, are permitted provided that the following conditions
533+ * are met:
534+ * 1. Redistributions of source code must retain the above copyright
535+ * notice, this list of conditions and the following disclaimer.
536+ * 2. Redistributions in binary form must reproduce the above copyright
537+ * notice, this list of conditions and the following disclaimer in the
538+ * documentation and/or other materials provided with the distribution.
539+ *
540+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
541+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
542+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
543+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
544+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
545+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
546+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
547+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
548+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
549+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
550+ */
551+
552+#if 0
553+#include <sys/cdefs.h>
554+__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
555+#endif
556+
557+/*
558+ * Cryptographic Subsystem.
559+ *
560+ * This code is derived from the Openbsd Cryptographic Framework (OCF)
561+ * that has the copyright shown below. Very little of the original
562+ * code remains.
563+ */
564+/*-
565+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
566+ *
567+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
568+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
569+ * supported the development of this code.
570+ *
571+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
572+ *
573+ * Permission to use, copy, and modify this software with or without fee
574+ * is hereby granted, provided that this entire notice is included in
575+ * all source code copies of any software which is or includes a copy or
576+ * modification of this software.
577+ *
578+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
579+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
580+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
581+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
582+ * PURPOSE.
583+ *
584+__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
585+ */
586+
587+
588+#include <linux/version.h>
589+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
590+#include <linux/config.h>
591+#endif
592+#include <linux/module.h>
593+#include <linux/init.h>
594+#include <linux/list.h>
595+#include <linux/slab.h>
596+#include <linux/wait.h>
597+#include <linux/sched.h>
598+#include <linux/spinlock.h>
599+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4)
600+#include <linux/kthread.h>
601+#endif
602+#include <cryptodev.h>
603+
604+/*
605+ * keep track of whether or not we have been initialised, a big
606+ * issue if we are linked into the kernel and a driver gets started before
607+ * us
608+ */
609+static int crypto_initted = 0;
610+
611+/*
612+ * Crypto drivers register themselves by allocating a slot in the
613+ * crypto_drivers table with crypto_get_driverid() and then registering
614+ * each algorithm they support with crypto_register() and crypto_kregister().
615+ */
616+
617+/*
618+ * lock on driver table
619+ * we track its state as spin_is_locked does not do anything on non-SMP boxes
620+ */
621+static spinlock_t crypto_drivers_lock;
622+static int crypto_drivers_locked; /* for non-SMP boxes */
623+
624+#define CRYPTO_DRIVER_LOCK() \
625+ ({ \
626+ spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
627+ crypto_drivers_locked = 1; \
628+ dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
629+ })
630+#define CRYPTO_DRIVER_UNLOCK() \
631+ ({ \
632+ dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
633+ crypto_drivers_locked = 0; \
634+ spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
635+ })
636+#define CRYPTO_DRIVER_ASSERT() \
637+ ({ \
638+ if (!crypto_drivers_locked) { \
639+ dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
640+ } \
641+ })
642+
643+/*
644+ * Crypto device/driver capabilities structure.
645+ *
646+ * Synchronization:
647+ * (d) - protected by CRYPTO_DRIVER_LOCK()
648+ * (q) - protected by CRYPTO_Q_LOCK()
649+ * Not tagged fields are read-only.
650+ */
651+struct cryptocap {
652+ device_t cc_dev; /* (d) device/driver */
653+ u_int32_t cc_sessions; /* (d) # of sessions */
654+ u_int32_t cc_koperations; /* (d) # os asym operations */
655+ /*
656+ * Largest possible operator length (in bits) for each type of
657+ * encryption algorithm. XXX not used
658+ */
659+ u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
660+ u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
661+ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
662+
663+ int cc_flags; /* (d) flags */
664+#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
665+ int cc_qblocked; /* (q) symmetric q blocked */
666+ int cc_kqblocked; /* (q) asymmetric q blocked */
667+
668+ int cc_unqblocked; /* (q) symmetric q blocked */
669+ int cc_unkqblocked; /* (q) asymmetric q blocked */
670+};
671+static struct cryptocap *crypto_drivers = NULL;
672+static int crypto_drivers_num = 0;
673+
674+/*
675+ * There are two queues for crypto requests; one for symmetric (e.g.
676+ * cipher) operations and one for asymmetric (e.g. MOD)operations.
677+ * A single mutex is used to lock access to both queues. We could
678+ * have one per-queue but having one simplifies handling of block/unblock
679+ * operations.
680+ */
681+static LIST_HEAD(crp_q); /* crypto request queue */
682+static LIST_HEAD(crp_kq); /* asym request queue */
683+
684+static spinlock_t crypto_q_lock;
685+
686+int crypto_all_qblocked = 0; /* protect with Q_LOCK */
687+module_param(crypto_all_qblocked, int, 0444);
688+MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
689+
690+int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
691+module_param(crypto_all_kqblocked, int, 0444);
692+MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
693+
694+#define CRYPTO_Q_LOCK() \
695+ ({ \
696+ spin_lock_irqsave(&crypto_q_lock, q_flags); \
697+ dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
698+ })
699+#define CRYPTO_Q_UNLOCK() \
700+ ({ \
701+ dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
702+ spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
703+ })
704+
705+/*
706+ * There are two queues for processing completed crypto requests; one
707+ * for the symmetric and one for the asymmetric ops. We only need one
708+ * but have two to avoid type futzing (cryptop vs. cryptkop). A single
709+ * mutex is used to lock access to both queues. Note that this lock
710+ * must be separate from the lock on request queues to insure driver
711+ * callbacks don't generate lock order reversals.
712+ */
713+static LIST_HEAD(crp_ret_q); /* callback queues */
714+static LIST_HEAD(crp_ret_kq);
715+
716+static spinlock_t crypto_ret_q_lock;
717+#define CRYPTO_RETQ_LOCK() \
718+ ({ \
719+ spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
720+ dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
721+ })
722+#define CRYPTO_RETQ_UNLOCK() \
723+ ({ \
724+ dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
725+ spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
726+ })
727+#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
728+
729+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
730+static kmem_cache_t *cryptop_zone;
731+static kmem_cache_t *cryptodesc_zone;
732+#else
733+static struct kmem_cache *cryptop_zone;
734+static struct kmem_cache *cryptodesc_zone;
735+#endif
736+
737+#define debug crypto_debug
738+int crypto_debug = 0;
739+module_param(crypto_debug, int, 0644);
740+MODULE_PARM_DESC(crypto_debug, "Enable debug");
741+EXPORT_SYMBOL(crypto_debug);
742+
743+/*
744+ * Maximum number of outstanding crypto requests before we start
745+ * failing requests. We need this to prevent DOS when too many
746+ * requests are arriving for us to keep up. Otherwise we will
747+ * run the system out of memory. Since crypto is slow, we are
748+ * usually the bottleneck that needs to say, enough is enough.
749+ *
750+ * We cannot print errors when this condition occurs, we are already too
751+ * slow, printing anything will just kill us
752+ */
753+
754+static int crypto_q_cnt = 0;
755+module_param(crypto_q_cnt, int, 0444);
756+MODULE_PARM_DESC(crypto_q_cnt,
757+ "Current number of outstanding crypto requests");
758+
759+static int crypto_q_max = 1000;
760+module_param(crypto_q_max, int, 0644);
761+MODULE_PARM_DESC(crypto_q_max,
762+ "Maximum number of outstanding crypto requests");
763+
764+#define bootverbose crypto_verbose
765+static int crypto_verbose = 0;
766+module_param(crypto_verbose, int, 0644);
767+MODULE_PARM_DESC(crypto_verbose,
768+ "Enable verbose crypto startup");
769+
770+int crypto_usercrypto = 1; /* userland may do crypto reqs */
771+module_param(crypto_usercrypto, int, 0644);
772+MODULE_PARM_DESC(crypto_usercrypto,
773+ "Enable/disable user-mode access to crypto support");
774+
775+int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
776+module_param(crypto_userasymcrypto, int, 0644);
777+MODULE_PARM_DESC(crypto_userasymcrypto,
778+ "Enable/disable user-mode access to asymmetric crypto support");
779+
780+int crypto_devallowsoft = 0; /* only use hardware crypto */
781+module_param(crypto_devallowsoft, int, 0644);
782+MODULE_PARM_DESC(crypto_devallowsoft,
783+ "Enable/disable use of software crypto support");
784+
785+/*
786+ * This parameter controls the maximum number of crypto operations to
787+ * do consecutively in the crypto kernel thread before scheduling to allow
788+ * other processes to run. Without it, it is possible to get into a
789+ * situation where the crypto thread never allows any other processes to run.
790+ * Default to 1000 which should be less than one second.
791+ */
792+static int crypto_max_loopcount = 1000;
793+module_param(crypto_max_loopcount, int, 0644);
794+MODULE_PARM_DESC(crypto_max_loopcount,
795+ "Maximum number of crypto ops to do before yielding to other processes");
796+
797+#ifndef CONFIG_NR_CPUS
798+#define CONFIG_NR_CPUS 1
799+#endif
800+
801+static struct task_struct *cryptoproc[CONFIG_NR_CPUS];
802+static struct task_struct *cryptoretproc[CONFIG_NR_CPUS];
803+static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
804+static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
805+
806+static int crypto_proc(void *arg);
807+static int crypto_ret_proc(void *arg);
808+static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
809+static int crypto_kinvoke(struct cryptkop *krp, int flags);
810+static void crypto_exit(void);
811+static int crypto_init(void);
812+
813+static struct cryptostats cryptostats;
814+
815+static struct cryptocap *
816+crypto_checkdriver(u_int32_t hid)
817+{
818+ if (crypto_drivers == NULL)
819+ return NULL;
820+ return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
821+}
822+
823+/*
824+ * Compare a driver's list of supported algorithms against another
825+ * list; return non-zero if all algorithms are supported.
826+ */
827+static int
828+driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
829+{
830+ const struct cryptoini *cr;
831+
832+ /* See if all the algorithms are supported. */
833+ for (cr = cri; cr; cr = cr->cri_next)
834+ if (cap->cc_alg[cr->cri_alg] == 0)
835+ return 0;
836+ return 1;
837+}
838+
839+
840+/*
841+ * Select a driver for a new session that supports the specified
842+ * algorithms and, optionally, is constrained according to the flags.
843+ * The algorithm we use here is pretty stupid; just use the
844+ * first driver that supports all the algorithms we need. If there
845+ * are multiple drivers we choose the driver with the fewest active
846+ * sessions. We prefer hardware-backed drivers to software ones.
847+ *
848+ * XXX We need more smarts here (in real life too, but that's
849+ * XXX another story altogether).
850+ */
851+static struct cryptocap *
852+crypto_select_driver(const struct cryptoini *cri, int flags)
853+{
854+ struct cryptocap *cap, *best;
855+ int match, hid;
856+
857+ CRYPTO_DRIVER_ASSERT();
858+
859+ /*
860+ * Look first for hardware crypto devices if permitted.
861+ */
862+ if (flags & CRYPTOCAP_F_HARDWARE)
863+ match = CRYPTOCAP_F_HARDWARE;
864+ else
865+ match = CRYPTOCAP_F_SOFTWARE;
866+ best = NULL;
867+again:
868+ for (hid = 0; hid < crypto_drivers_num; hid++) {
869+ cap = &crypto_drivers[hid];
870+ /*
871+ * If it's not initialized, is in the process of
872+ * going away, or is not appropriate (hardware
873+ * or software based on match), then skip.
874+ */
875+ if (cap->cc_dev == NULL ||
876+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
877+ (cap->cc_flags & match) == 0)
878+ continue;
879+
880+ /* verify all the algorithms are supported. */
881+ if (driver_suitable(cap, cri)) {
882+ if (best == NULL ||
883+ cap->cc_sessions < best->cc_sessions)
884+ best = cap;
885+ }
886+ }
887+ if (best != NULL)
888+ return best;
889+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
890+ /* sort of an Algol 68-style for loop */
891+ match = CRYPTOCAP_F_SOFTWARE;
892+ goto again;
893+ }
894+ return best;
895+}
896+
897+/*
898+ * Create a new session. The crid argument specifies a crypto
899+ * driver to use or constraints on a driver to select (hardware
900+ * only, software only, either). Whatever driver is selected
901+ * must be capable of the requested crypto algorithms.
902+ */
903+int
904+crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
905+{
906+ struct cryptocap *cap;
907+ u_int32_t hid, lid;
908+ int err;
909+ unsigned long d_flags;
910+
911+ CRYPTO_DRIVER_LOCK();
912+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
913+ /*
914+ * Use specified driver; verify it is capable.
915+ */
916+ cap = crypto_checkdriver(crid);
917+ if (cap != NULL && !driver_suitable(cap, cri))
918+ cap = NULL;
919+ } else {
920+ /*
921+ * No requested driver; select based on crid flags.
922+ */
923+ cap = crypto_select_driver(cri, crid);
924+ /*
925+ * if NULL then can't do everything in one session.
926+ * XXX Fix this. We need to inject a "virtual" session
927+ * XXX layer right about here.
928+ */
929+ }
930+ if (cap != NULL) {
931+ /* Call the driver initialization routine. */
932+ hid = cap - crypto_drivers;
933+ lid = hid; /* Pass the driver ID. */
934+ cap->cc_sessions++;
935+ CRYPTO_DRIVER_UNLOCK();
936+ err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
937+ CRYPTO_DRIVER_LOCK();
938+ if (err == 0) {
939+ (*sid) = (cap->cc_flags & 0xff000000)
940+ | (hid & 0x00ffffff);
941+ (*sid) <<= 32;
942+ (*sid) |= (lid & 0xffffffff);
943+ } else
944+ cap->cc_sessions--;
945+ } else
946+ err = EINVAL;
947+ CRYPTO_DRIVER_UNLOCK();
948+ return err;
949+}
950+
951+static void
952+crypto_remove(struct cryptocap *cap)
953+{
954+ CRYPTO_DRIVER_ASSERT();
955+ if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
956+ bzero(cap, sizeof(*cap));
957+}
958+
959+/*
960+ * Delete an existing session (or a reserved session on an unregistered
961+ * driver).
962+ */
963+int
964+crypto_freesession(u_int64_t sid)
965+{
966+ struct cryptocap *cap;
967+ u_int32_t hid;
968+ int err = 0;
969+ unsigned long d_flags;
970+
971+ dprintk("%s()\n", __FUNCTION__);
972+ CRYPTO_DRIVER_LOCK();
973+
974+ if (crypto_drivers == NULL) {
975+ err = EINVAL;
976+ goto done;
977+ }
978+
979+ /* Determine two IDs. */
980+ hid = CRYPTO_SESID2HID(sid);
981+
982+ if (hid >= crypto_drivers_num) {
983+ dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
984+ err = ENOENT;
985+ goto done;
986+ }
987+ cap = &crypto_drivers[hid];
988+
989+ if (cap->cc_dev) {
990+ CRYPTO_DRIVER_UNLOCK();
991+ /* Call the driver cleanup routine, if available, unlocked. */
992+ err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
993+ CRYPTO_DRIVER_LOCK();
994+ }
995+
996+ if (cap->cc_sessions)
997+ cap->cc_sessions--;
998+
999+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1000+ crypto_remove(cap);
1001+
1002+done:
1003+ CRYPTO_DRIVER_UNLOCK();
1004+ return err;
1005+}
1006+
1007+/*
1008+ * Return an unused driver id. Used by drivers prior to registering
1009+ * support for the algorithms they handle.
1010+ */
1011+int32_t
1012+crypto_get_driverid(device_t dev, int flags)
1013+{
1014+ struct cryptocap *newdrv;
1015+ int i;
1016+ unsigned long d_flags;
1017+
1018+ if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1019+ printf("%s: no flags specified when registering driver\n",
1020+ device_get_nameunit(dev));
1021+ return -1;
1022+ }
1023+
1024+ CRYPTO_DRIVER_LOCK();
1025+
1026+ for (i = 0; i < crypto_drivers_num; i++) {
1027+ if (crypto_drivers[i].cc_dev == NULL &&
1028+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1029+ break;
1030+ }
1031+ }
1032+
1033+ /* Out of entries, allocate some more. */
1034+ if (i == crypto_drivers_num) {
1035+ /* Be careful about wrap-around. */
1036+ if (2 * crypto_drivers_num <= crypto_drivers_num) {
1037+ CRYPTO_DRIVER_UNLOCK();
1038+ printk("crypto: driver count wraparound!\n");
1039+ return -1;
1040+ }
1041+
1042+ newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
1043+ GFP_KERNEL);
1044+ if (newdrv == NULL) {
1045+ CRYPTO_DRIVER_UNLOCK();
1046+ printk("crypto: no space to expand driver table!\n");
1047+ return -1;
1048+ }
1049+
1050+ memcpy(newdrv, crypto_drivers,
1051+ crypto_drivers_num * sizeof(struct cryptocap));
1052+ memset(&newdrv[crypto_drivers_num], 0,
1053+ crypto_drivers_num * sizeof(struct cryptocap));
1054+
1055+ crypto_drivers_num *= 2;
1056+
1057+ kfree(crypto_drivers);
1058+ crypto_drivers = newdrv;
1059+ }
1060+
1061+ /* NB: state is zero'd on free */
1062+ crypto_drivers[i].cc_sessions = 1; /* Mark */
1063+ crypto_drivers[i].cc_dev = dev;
1064+ crypto_drivers[i].cc_flags = flags;
1065+ if (bootverbose)
1066+ printf("crypto: assign %s driver id %u, flags %u\n",
1067+ device_get_nameunit(dev), i, flags);
1068+
1069+ CRYPTO_DRIVER_UNLOCK();
1070+
1071+ return i;
1072+}
1073+
1074+/*
1075+ * Lookup a driver by name. We match against the full device
1076+ * name and unit, and against just the name. The latter gives
1077+ * us a simple widlcarding by device name. On success return the
1078+ * driver/hardware identifier; otherwise return -1.
1079+ */
1080+int
1081+crypto_find_driver(const char *match)
1082+{
1083+ int i, len = strlen(match);
1084+ unsigned long d_flags;
1085+
1086+ CRYPTO_DRIVER_LOCK();
1087+ for (i = 0; i < crypto_drivers_num; i++) {
1088+ device_t dev = crypto_drivers[i].cc_dev;
1089+ if (dev == NULL ||
1090+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
1091+ continue;
1092+ if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
1093+ strncmp(match, device_get_name(dev), len) == 0)
1094+ break;
1095+ }
1096+ CRYPTO_DRIVER_UNLOCK();
1097+ return i < crypto_drivers_num ? i : -1;
1098+}
1099+
1100+/*
1101+ * Return the device_t for the specified driver or NULL
1102+ * if the driver identifier is invalid.
1103+ */
1104+device_t
1105+crypto_find_device_byhid(int hid)
1106+{
1107+ struct cryptocap *cap = crypto_checkdriver(hid);
1108+ return cap != NULL ? cap->cc_dev : NULL;
1109+}
1110+
1111+/*
1112+ * Return the device/driver capabilities.
1113+ */
1114+int
1115+crypto_getcaps(int hid)
1116+{
1117+ struct cryptocap *cap = crypto_checkdriver(hid);
1118+ return cap != NULL ? cap->cc_flags : 0;
1119+}
1120+
1121+/*
1122+ * Register support for a key-related algorithm. This routine
1123+ * is called once for each algorithm supported a driver.
1124+ */
1125+int
1126+crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
1127+{
1128+ struct cryptocap *cap;
1129+ int err;
1130+ unsigned long d_flags;
1131+
1132+ dprintk("%s()\n", __FUNCTION__);
1133+ CRYPTO_DRIVER_LOCK();
1134+
1135+ cap = crypto_checkdriver(driverid);
1136+ if (cap != NULL &&
1137+ (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1138+ /*
1139+ * XXX Do some performance testing to determine placing.
1140+ * XXX We probably need an auxiliary data structure that
1141+ * XXX describes relative performances.
1142+ */
1143+
1144+ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1145+ if (bootverbose)
1146+ printf("crypto: %s registers key alg %u flags %u\n"
1147+ , device_get_nameunit(cap->cc_dev)
1148+ , kalg
1149+ , flags
1150+ );
1151+ err = 0;
1152+ } else
1153+ err = EINVAL;
1154+
1155+ CRYPTO_DRIVER_UNLOCK();
1156+ return err;
1157+}
1158+
1159+/*
1160+ * Register support for a non-key-related algorithm. This routine
1161+ * is called once for each such algorithm supported by a driver.
1162+ */
1163+int
1164+crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
1165+ u_int32_t flags)
1166+{
1167+ struct cryptocap *cap;
1168+ int err;
1169+ unsigned long d_flags;
1170+
1171+ dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
1172+ driverid, alg, maxoplen, flags);
1173+
1174+ CRYPTO_DRIVER_LOCK();
1175+
1176+ cap = crypto_checkdriver(driverid);
1177+ /* NB: algorithms are in the range [1..max] */
1178+ if (cap != NULL &&
1179+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
1180+ /*
1181+ * XXX Do some performance testing to determine placing.
1182+ * XXX We probably need an auxiliary data structure that
1183+ * XXX describes relative performances.
1184+ */
1185+
1186+ cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1187+ cap->cc_max_op_len[alg] = maxoplen;
1188+ if (bootverbose)
1189+ printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
1190+ , device_get_nameunit(cap->cc_dev)
1191+ , alg
1192+ , flags
1193+ , maxoplen
1194+ );
1195+ cap->cc_sessions = 0; /* Unmark */
1196+ err = 0;
1197+ } else
1198+ err = EINVAL;
1199+
1200+ CRYPTO_DRIVER_UNLOCK();
1201+ return err;
1202+}
1203+
1204+static void
1205+driver_finis(struct cryptocap *cap)
1206+{
1207+ u_int32_t ses, kops;
1208+
1209+ CRYPTO_DRIVER_ASSERT();
1210+
1211+ ses = cap->cc_sessions;
1212+ kops = cap->cc_koperations;
1213+ bzero(cap, sizeof(*cap));
1214+ if (ses != 0 || kops != 0) {
1215+ /*
1216+ * If there are pending sessions,
1217+ * just mark as invalid.
1218+ */
1219+ cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1220+ cap->cc_sessions = ses;
1221+ cap->cc_koperations = kops;
1222+ }
1223+}
1224+
1225+/*
1226+ * Unregister a crypto driver. If there are pending sessions using it,
1227+ * leave enough information around so that subsequent calls using those
1228+ * sessions will correctly detect the driver has been unregistered and
1229+ * reroute requests.
1230+ */
1231+int
1232+crypto_unregister(u_int32_t driverid, int alg)
1233+{
1234+ struct cryptocap *cap;
1235+ int i, err;
1236+ unsigned long d_flags;
1237+
1238+ dprintk("%s()\n", __FUNCTION__);
1239+ CRYPTO_DRIVER_LOCK();
1240+
1241+ cap = crypto_checkdriver(driverid);
1242+ if (cap != NULL &&
1243+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
1244+ cap->cc_alg[alg] != 0) {
1245+ cap->cc_alg[alg] = 0;
1246+ cap->cc_max_op_len[alg] = 0;
1247+
1248+ /* Was this the last algorithm ? */
1249+ for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
1250+ if (cap->cc_alg[i] != 0)
1251+ break;
1252+
1253+ if (i == CRYPTO_ALGORITHM_MAX + 1)
1254+ driver_finis(cap);
1255+ err = 0;
1256+ } else
1257+ err = EINVAL;
1258+ CRYPTO_DRIVER_UNLOCK();
1259+ return err;
1260+}
1261+
1262+/*
1263+ * Unregister all algorithms associated with a crypto driver.
1264+ * If there are pending sessions using it, leave enough information
1265+ * around so that subsequent calls using those sessions will
1266+ * correctly detect the driver has been unregistered and reroute
1267+ * requests.
1268+ */
1269+int
1270+crypto_unregister_all(u_int32_t driverid)
1271+{
1272+ struct cryptocap *cap;
1273+ int err;
1274+ unsigned long d_flags;
1275+
1276+ dprintk("%s()\n", __FUNCTION__);
1277+ CRYPTO_DRIVER_LOCK();
1278+ cap = crypto_checkdriver(driverid);
1279+ if (cap != NULL) {
1280+ driver_finis(cap);
1281+ err = 0;
1282+ } else
1283+ err = EINVAL;
1284+ CRYPTO_DRIVER_UNLOCK();
1285+
1286+ return err;
1287+}
1288+
1289+/*
1290+ * Clear blockage on a driver. The what parameter indicates whether
1291+ * the driver is now ready for cryptop's and/or cryptokop's.
1292+ */
1293+int
1294+crypto_unblock(u_int32_t driverid, int what)
1295+{
1296+ struct cryptocap *cap;
1297+ int err;
1298+ unsigned long q_flags;
1299+
1300+ CRYPTO_Q_LOCK();
1301+ cap = crypto_checkdriver(driverid);
1302+ if (cap != NULL) {
1303+ if (what & CRYPTO_SYMQ) {
1304+ cap->cc_qblocked = 0;
1305+ cap->cc_unqblocked = 0;
1306+ crypto_all_qblocked = 0;
1307+ }
1308+ if (what & CRYPTO_ASYMQ) {
1309+ cap->cc_kqblocked = 0;
1310+ cap->cc_unkqblocked = 0;
1311+ crypto_all_kqblocked = 0;
1312+ }
1313+ wake_up_interruptible(&cryptoproc_wait);
1314+ err = 0;
1315+ } else
1316+ err = EINVAL;
1317+ CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
1318+
1319+ return err;
1320+}
1321+
1322+/*
1323+ * Add a crypto request to a queue, to be processed by the kernel thread.
1324+ */
1325+int
1326+crypto_dispatch(struct cryptop *crp)
1327+{
1328+ struct cryptocap *cap;
1329+ int result = -1;
1330+ unsigned long q_flags;
1331+
1332+ dprintk("%s()\n", __FUNCTION__);
1333+
1334+ cryptostats.cs_ops++;
1335+
1336+ CRYPTO_Q_LOCK();
1337+ if (crypto_q_cnt >= crypto_q_max) {
1338+ cryptostats.cs_drops++;
1339+ CRYPTO_Q_UNLOCK();
1340+ return ENOMEM;
1341+ }
1342+ crypto_q_cnt++;
1343+
1344+ /* make sure we are starting a fresh run on this crp. */
1345+ crp->crp_flags &= ~CRYPTO_F_DONE;
1346+ crp->crp_etype = 0;
1347+
1348+ /*
1349+ * Caller marked the request to be processed immediately; dispatch
1350+ * it directly to the driver unless the driver is currently blocked.
1351+ */
1352+ if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
1353+ int hid = CRYPTO_SESID2HID(crp->crp_sid);
1354+ cap = crypto_checkdriver(hid);
1355+ /* Driver cannot disappear when there is an active session. */
1356+ KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
1357+ if (!cap->cc_qblocked) {
1358+ crypto_all_qblocked = 0;
1359+ crypto_drivers[hid].cc_unqblocked = 1;
1360+ CRYPTO_Q_UNLOCK();
1361+ result = crypto_invoke(cap, crp, 0);
1362+ CRYPTO_Q_LOCK();
1363+ if (result == ERESTART)
1364+ if (crypto_drivers[hid].cc_unqblocked)
1365+ crypto_drivers[hid].cc_qblocked = 1;
1366+ crypto_drivers[hid].cc_unqblocked = 0;
1367+ }
1368+ }
1369+ if (result == ERESTART) {
1370+ /*
1371+ * The driver ran out of resources, mark the
1372+ * driver ``blocked'' for cryptop's and put
1373+ * the request back in the queue. It would
1374+ * best to put the request back where we got
1375+ * it but that's hard so for now we put it
1376+ * at the front. This should be ok; putting
1377+ * it at the end does not work.
1378+ */
1379+ list_add(&crp->crp_next, &crp_q);
1380+ cryptostats.cs_blocks++;
1381+ result = 0;
1382+ } else if (result == -1) {
1383+ TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1384+ result = 0;
1385+ }
1386+ wake_up_interruptible(&cryptoproc_wait);
1387+ CRYPTO_Q_UNLOCK();
1388+ return result;
1389+}
1390+
1391+/*
1392+ * Add an asymetric crypto request to a queue,
1393+ * to be processed by the kernel thread.
1394+ */
1395+int
1396+crypto_kdispatch(struct cryptkop *krp)
1397+{
1398+ int error;
1399+ unsigned long q_flags;
1400+
1401+ cryptostats.cs_kops++;
1402+
1403+ error = crypto_kinvoke(krp, krp->krp_crid);
1404+ if (error == ERESTART) {
1405+ CRYPTO_Q_LOCK();
1406+ TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1407+ wake_up_interruptible(&cryptoproc_wait);
1408+ CRYPTO_Q_UNLOCK();
1409+ error = 0;
1410+ }
1411+ return error;
1412+}
1413+
1414+/*
1415+ * Verify a driver is suitable for the specified operation.
1416+ */
1417+static __inline int
1418+kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1419+{
1420+ return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1421+}
1422+
1423+/*
1424+ * Select a driver for an asym operation. The driver must
1425+ * support the necessary algorithm. The caller can constrain
1426+ * which device is selected with the flags parameter. The
1427+ * algorithm we use here is pretty stupid; just use the first
1428+ * driver that supports the algorithms we need. If there are
1429+ * multiple suitable drivers we choose the driver with the
1430+ * fewest active operations. We prefer hardware-backed
1431+ * drivers to software ones when either may be used.
1432+ */
1433+static struct cryptocap *
1434+crypto_select_kdriver(const struct cryptkop *krp, int flags)
1435+{
1436+ struct cryptocap *cap, *best, *blocked;
1437+ int match, hid;
1438+
1439+ CRYPTO_DRIVER_ASSERT();
1440+
1441+ /*
1442+ * Look first for hardware crypto devices if permitted.
1443+ */
1444+ if (flags & CRYPTOCAP_F_HARDWARE)
1445+ match = CRYPTOCAP_F_HARDWARE;
1446+ else
1447+ match = CRYPTOCAP_F_SOFTWARE;
1448+ best = NULL;
1449+ blocked = NULL;
1450+again:
1451+ for (hid = 0; hid < crypto_drivers_num; hid++) {
1452+ cap = &crypto_drivers[hid];
1453+ /*
1454+ * If it's not initialized, is in the process of
1455+ * going away, or is not appropriate (hardware
1456+ * or software based on match), then skip.
1457+ */
1458+ if (cap->cc_dev == NULL ||
1459+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
1460+ (cap->cc_flags & match) == 0)
1461+ continue;
1462+
1463+ /* verify all the algorithms are supported. */
1464+ if (kdriver_suitable(cap, krp)) {
1465+ if (best == NULL ||
1466+ cap->cc_koperations < best->cc_koperations)
1467+ best = cap;
1468+ }
1469+ }
1470+ if (best != NULL)
1471+ return best;
1472+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1473+ /* sort of an Algol 68-style for loop */
1474+ match = CRYPTOCAP_F_SOFTWARE;
1475+ goto again;
1476+ }
1477+ return best;
1478+}
1479+
1480+/*
1481+ * Dispatch an assymetric crypto request.
1482+ */
1483+static int
1484+crypto_kinvoke(struct cryptkop *krp, int crid)
1485+{
1486+ struct cryptocap *cap = NULL;
1487+ int error;
1488+ unsigned long d_flags;
1489+
1490+ KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1491+ KASSERT(krp->krp_callback != NULL,
1492+ ("%s: krp->crp_callback == NULL", __func__));
1493+
1494+ CRYPTO_DRIVER_LOCK();
1495+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1496+ cap = crypto_checkdriver(crid);
1497+ if (cap != NULL) {
1498+ /*
1499+ * Driver present, it must support the necessary
1500+ * algorithm and, if s/w drivers are excluded,
1501+ * it must be registered as hardware-backed.
1502+ */
1503+ if (!kdriver_suitable(cap, krp) ||
1504+ (!crypto_devallowsoft &&
1505+ (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1506+ cap = NULL;
1507+ }
1508+ } else {
1509+ /*
1510+ * No requested driver; select based on crid flags.
1511+ */
1512+ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
1513+ crid &= ~CRYPTOCAP_F_SOFTWARE;
1514+ cap = crypto_select_kdriver(krp, crid);
1515+ }
1516+ if (cap != NULL && !cap->cc_kqblocked) {
1517+ krp->krp_hid = cap - crypto_drivers;
1518+ cap->cc_koperations++;
1519+ CRYPTO_DRIVER_UNLOCK();
1520+ error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1521+ CRYPTO_DRIVER_LOCK();
1522+ if (error == ERESTART) {
1523+ cap->cc_koperations--;
1524+ CRYPTO_DRIVER_UNLOCK();
1525+ return (error);
1526+ }
1527+ /* return the actual device used */
1528+ krp->krp_crid = krp->krp_hid;
1529+ } else {
1530+ /*
1531+ * NB: cap is !NULL if device is blocked; in
1532+ * that case return ERESTART so the operation
1533+ * is resubmitted if possible.
1534+ */
1535+ error = (cap == NULL) ? ENODEV : ERESTART;
1536+ }
1537+ CRYPTO_DRIVER_UNLOCK();
1538+
1539+ if (error) {
1540+ krp->krp_status = error;
1541+ crypto_kdone(krp);
1542+ }
1543+ return 0;
1544+}
1545+
1546+
1547+/*
1548+ * Dispatch a crypto request to the appropriate crypto devices.
1549+ */
1550+static int
1551+crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1552+{
1553+ KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1554+ KASSERT(crp->crp_callback != NULL,
1555+ ("%s: crp->crp_callback == NULL", __func__));
1556+ KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
1557+
1558+ dprintk("%s()\n", __FUNCTION__);
1559+
1560+#ifdef CRYPTO_TIMING
1561+ if (crypto_timing)
1562+ crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1563+#endif
1564+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1565+ struct cryptodesc *crd;
1566+ u_int64_t nid;
1567+
1568+ /*
1569+ * Driver has unregistered; migrate the session and return
1570+ * an error to the caller so they'll resubmit the op.
1571+ *
1572+ * XXX: What if there are more already queued requests for this
1573+ * session?
1574+ */
1575+ crypto_freesession(crp->crp_sid);
1576+
1577+ for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1578+ crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1579+
1580+ /* XXX propagate flags from initial session? */
1581+ if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
1582+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1583+ crp->crp_sid = nid;
1584+
1585+ crp->crp_etype = EAGAIN;
1586+ crypto_done(crp);
1587+ return 0;
1588+ } else {
1589+ /*
1590+ * Invoke the driver to process the request.
1591+ */
1592+ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1593+ }
1594+}
1595+
1596+/*
1597+ * Release a set of crypto descriptors.
1598+ */
1599+void
1600+crypto_freereq(struct cryptop *crp)
1601+{
1602+ struct cryptodesc *crd;
1603+
1604+ if (crp == NULL)
1605+ return;
1606+
1607+#ifdef DIAGNOSTIC
1608+ {
1609+ struct cryptop *crp2;
1610+ unsigned long q_flags;
1611+
1612+ CRYPTO_Q_LOCK();
1613+ TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1614+ KASSERT(crp2 != crp,
1615+ ("Freeing cryptop from the crypto queue (%p).",
1616+ crp));
1617+ }
1618+ CRYPTO_Q_UNLOCK();
1619+ CRYPTO_RETQ_LOCK();
1620+ TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
1621+ KASSERT(crp2 != crp,
1622+ ("Freeing cryptop from the return queue (%p).",
1623+ crp));
1624+ }
1625+ CRYPTO_RETQ_UNLOCK();
1626+ }
1627+#endif
1628+
1629+ while ((crd = crp->crp_desc) != NULL) {
1630+ crp->crp_desc = crd->crd_next;
1631+ kmem_cache_free(cryptodesc_zone, crd);
1632+ }
1633+ kmem_cache_free(cryptop_zone, crp);
1634+}
1635+
1636+/*
1637+ * Acquire a set of crypto descriptors.
1638+ */
1639+struct cryptop *
1640+crypto_getreq(int num)
1641+{
1642+ struct cryptodesc *crd;
1643+ struct cryptop *crp;
1644+
1645+ crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
1646+ if (crp != NULL) {
1647+ memset(crp, 0, sizeof(*crp));
1648+ INIT_LIST_HEAD(&crp->crp_next);
1649+ init_waitqueue_head(&crp->crp_waitq);
1650+ while (num--) {
1651+ crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
1652+ if (crd == NULL) {
1653+ crypto_freereq(crp);
1654+ return NULL;
1655+ }
1656+ memset(crd, 0, sizeof(*crd));
1657+ crd->crd_next = crp->crp_desc;
1658+ crp->crp_desc = crd;
1659+ }
1660+ }
1661+ return crp;
1662+}
1663+
1664+/*
1665+ * Invoke the callback on behalf of the driver.
1666+ */
1667+void
1668+crypto_done(struct cryptop *crp)
1669+{
1670+ unsigned long q_flags;
1671+
1672+ dprintk("%s()\n", __FUNCTION__);
1673+ if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
1674+ crp->crp_flags |= CRYPTO_F_DONE;
1675+ CRYPTO_Q_LOCK();
1676+ crypto_q_cnt--;
1677+ CRYPTO_Q_UNLOCK();
1678+ } else
1679+ printk("crypto: crypto_done op already done, flags 0x%x",
1680+ crp->crp_flags);
1681+ if (crp->crp_etype != 0)
1682+ cryptostats.cs_errs++;
1683+ /*
1684+ * CBIMM means unconditionally do the callback immediately;
1685+ * CBIFSYNC means do the callback immediately only if the
1686+ * operation was done synchronously. Both are used to avoid
1687+ * doing extraneous context switches; the latter is mostly
1688+ * used with the software crypto driver.
1689+ */
1690+ if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1691+ ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1692+ (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
1693+ /*
1694+ * Do the callback directly. This is ok when the
1695+ * callback routine does very little (e.g. the
1696+ * /dev/crypto callback method just does a wakeup).
1697+ */
1698+ crp->crp_callback(crp);
1699+ } else {
1700+ unsigned long r_flags;
1701+ /*
1702+ * Normal case; queue the callback for the thread.
1703+ */
1704+ CRYPTO_RETQ_LOCK();
1705+ wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
1706+ TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1707+ CRYPTO_RETQ_UNLOCK();
1708+ }
1709+}
1710+
1711+/*
1712+ * Invoke the callback on behalf of the driver.
1713+ */
1714+void
1715+crypto_kdone(struct cryptkop *krp)
1716+{
1717+ struct cryptocap *cap;
1718+ unsigned long d_flags;
1719+
1720+ if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
1721+ printk("crypto: crypto_kdone op already done, flags 0x%x",
1722+ krp->krp_flags);
1723+ krp->krp_flags |= CRYPTO_KF_DONE;
1724+ if (krp->krp_status != 0)
1725+ cryptostats.cs_kerrs++;
1726+
1727+ CRYPTO_DRIVER_LOCK();
1728+ /* XXX: What if driver is loaded in the meantime? */
1729+ if (krp->krp_hid < crypto_drivers_num) {
1730+ cap = &crypto_drivers[krp->krp_hid];
1731+ cap->cc_koperations--;
1732+ KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
1733+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1734+ crypto_remove(cap);
1735+ }
1736+ CRYPTO_DRIVER_UNLOCK();
1737+
1738+ /*
1739+ * CBIMM means unconditionally do the callback immediately;
1740+ * This is used to avoid doing extraneous context switches
1741+ */
1742+ if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
1743+ /*
1744+ * Do the callback directly. This is ok when the
1745+ * callback routine does very little (e.g. the
1746+ * /dev/crypto callback method just does a wakeup).
1747+ */
1748+ krp->krp_callback(krp);
1749+ } else {
1750+ unsigned long r_flags;
1751+ /*
1752+ * Normal case; queue the callback for the thread.
1753+ */
1754+ CRYPTO_RETQ_LOCK();
1755+ wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
1756+ TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1757+ CRYPTO_RETQ_UNLOCK();
1758+ }
1759+}
1760+
1761+int
1762+crypto_getfeat(int *featp)
1763+{
1764+ int hid, kalg, feat = 0;
1765+ unsigned long d_flags;
1766+
1767+ CRYPTO_DRIVER_LOCK();
1768+ for (hid = 0; hid < crypto_drivers_num; hid++) {
1769+ const struct cryptocap *cap = &crypto_drivers[hid];
1770+
1771+ if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1772+ !crypto_devallowsoft) {
1773+ continue;
1774+ }
1775+ for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1776+ if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1777+ feat |= 1 << kalg;
1778+ }
1779+ CRYPTO_DRIVER_UNLOCK();
1780+ *featp = feat;
1781+ return (0);
1782+}
1783+
1784+/*
1785+ * Crypto thread, dispatches crypto requests.
1786+ */
1787+static int
1788+crypto_proc(void *arg)
1789+{
1790+ struct cryptop *crp, *submit;
1791+ struct cryptkop *krp, *krpp;
1792+ struct cryptocap *cap;
1793+ u_int32_t hid;
1794+ int result, hint;
1795+ unsigned long q_flags;
1796+ int loopcount = 0;
1797+
1798+ set_current_state(TASK_INTERRUPTIBLE);
1799+
1800+ CRYPTO_Q_LOCK();
1801+ for (;;) {
1802+ /*
1803+ * we need to make sure we don't get into a busy loop with nothing
1804+ * to do, the two crypto_all_*blocked vars help us find out when
1805+ * we are all full and can do nothing on any driver or Q. If so we
1806+ * wait for an unblock.
1807+ */
1808+ crypto_all_qblocked = !list_empty(&crp_q);
1809+
1810+ /*
1811+ * Find the first element in the queue that can be
1812+ * processed and look-ahead to see if multiple ops
1813+ * are ready for the same driver.
1814+ */
1815+ submit = NULL;
1816+ hint = 0;
1817+ list_for_each_entry(crp, &crp_q, crp_next) {
1818+ hid = CRYPTO_SESID2HID(crp->crp_sid);
1819+ cap = crypto_checkdriver(hid);
1820+ /*
1821+ * Driver cannot disappear when there is an active
1822+ * session.
1823+ */
1824+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1825+ __func__, __LINE__));
1826+ if (cap == NULL || cap->cc_dev == NULL) {
1827+ /* Op needs to be migrated, process it. */
1828+ if (submit == NULL)
1829+ submit = crp;
1830+ break;
1831+ }
1832+ if (!cap->cc_qblocked) {
1833+ if (submit != NULL) {
1834+ /*
1835+ * We stop on finding another op,
1836+ * regardless whether its for the same
1837+ * driver or not. We could keep
1838+ * searching the queue but it might be
1839+ * better to just use a per-driver
1840+ * queue instead.
1841+ */
1842+ if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1843+ hint = CRYPTO_HINT_MORE;
1844+ break;
1845+ } else {
1846+ submit = crp;
1847+ if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1848+ break;
1849+ /* keep scanning for more are q'd */
1850+ }
1851+ }
1852+ }
1853+ if (submit != NULL) {
1854+ hid = CRYPTO_SESID2HID(submit->crp_sid);
1855+ crypto_all_qblocked = 0;
1856+ list_del(&submit->crp_next);
1857+ crypto_drivers[hid].cc_unqblocked = 1;
1858+ cap = crypto_checkdriver(hid);
1859+ CRYPTO_Q_UNLOCK();
1860+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1861+ __func__, __LINE__));
1862+ result = crypto_invoke(cap, submit, hint);
1863+ CRYPTO_Q_LOCK();
1864+ if (result == ERESTART) {
1865+ /*
1866+ * The driver ran out of resources, mark the
1867+ * driver ``blocked'' for cryptop's and put
1868+ * the request back in the queue. It would
1869+ * best to put the request back where we got
1870+ * it but that's hard so for now we put it
1871+ * at the front. This should be ok; putting
1872+ * it at the end does not work.
1873+ */
1874+ /* XXX validate sid again? */
1875+ list_add(&submit->crp_next, &crp_q);
1876+ cryptostats.cs_blocks++;
1877+ if (crypto_drivers[hid].cc_unqblocked)
1878+ crypto_drivers[hid].cc_qblocked=0;
1879+ crypto_drivers[hid].cc_unqblocked=0;
1880+ }
1881+ crypto_drivers[hid].cc_unqblocked = 0;
1882+ }
1883+
1884+ crypto_all_kqblocked = !list_empty(&crp_kq);
1885+
1886+ /* As above, but for key ops */
1887+ krp = NULL;
1888+ list_for_each_entry(krpp, &crp_kq, krp_next) {
1889+ cap = crypto_checkdriver(krpp->krp_hid);
1890+ if (cap == NULL || cap->cc_dev == NULL) {
1891+ /*
1892+ * Operation needs to be migrated, invalidate
1893+ * the assigned device so it will reselect a
1894+ * new one below. Propagate the original
1895+ * crid selection flags if supplied.
1896+ */
1897+ krp->krp_hid = krp->krp_crid &
1898+ (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
1899+ if (krp->krp_hid == 0)
1900+ krp->krp_hid =
1901+ CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
1902+ break;
1903+ }
1904+ if (!cap->cc_kqblocked) {
1905+ krp = krpp;
1906+ break;
1907+ }
1908+ }
1909+ if (krp != NULL) {
1910+ crypto_all_kqblocked = 0;
1911+ list_del(&krp->krp_next);
1912+ crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1913+ CRYPTO_Q_UNLOCK();
1914+ result = crypto_kinvoke(krp, krp->krp_hid);
1915+ CRYPTO_Q_LOCK();
1916+ if (result == ERESTART) {
1917+ /*
1918+ * The driver ran out of resources, mark the
1919+ * driver ``blocked'' for cryptkop's and put
1920+ * the request back in the queue. It would
1921+ * best to put the request back where we got
1922+ * it but that's hard so for now we put it
1923+ * at the front. This should be ok; putting
1924+ * it at the end does not work.
1925+ */
1926+ /* XXX validate sid again? */
1927+ list_add(&krp->krp_next, &crp_kq);
1928+ cryptostats.cs_kblocks++;
1929+ } else
1930+ crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
1931+ }
1932+
1933+ if (submit == NULL && krp == NULL) {
1934+ /*
1935+ * Nothing more to be processed. Sleep until we're
1936+ * woken because there are more ops to process.
1937+ * This happens either by submission or by a driver
1938+ * becoming unblocked and notifying us through
1939+ * crypto_unblock. Note that when we wakeup we
1940+ * start processing each queue again from the
1941+ * front. It's not clear that it's important to
1942+ * preserve this ordering since ops may finish
1943+ * out of order if dispatched to different devices
1944+ * and some become blocked while others do not.
1945+ */
1946+ dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
1947+ __FUNCTION__,
1948+ list_empty(&crp_q), crypto_all_qblocked,
1949+ list_empty(&crp_kq), crypto_all_kqblocked);
1950+ loopcount = 0;
1951+ CRYPTO_Q_UNLOCK();
1952+ wait_event_interruptible(cryptoproc_wait,
1953+ !(list_empty(&crp_q) || crypto_all_qblocked) ||
1954+ !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
1955+ kthread_should_stop());
1956+ if (signal_pending (current)) {
1957+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1958+ spin_lock_irq(&current->sigmask_lock);
1959+#endif
1960+ flush_signals(current);
1961+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1962+ spin_unlock_irq(&current->sigmask_lock);
1963+#endif
1964+ }
1965+ CRYPTO_Q_LOCK();
1966+ dprintk("%s - awake\n", __FUNCTION__);
1967+ if (kthread_should_stop())
1968+ break;
1969+ cryptostats.cs_intrs++;
1970+ } else if (loopcount > crypto_max_loopcount) {
1971+ /*
1972+ * Give other processes a chance to run if we've
1973+ * been using the CPU exclusively for a while.
1974+ */
1975+ loopcount = 0;
1976+ CRYPTO_Q_UNLOCK();
1977+ schedule();
1978+ CRYPTO_Q_LOCK();
1979+ }
1980+ loopcount++;
1981+ }
1982+ CRYPTO_Q_UNLOCK();
1983+ return 0;
1984+}
1985+
1986+/*
1987+ * Crypto returns thread, does callbacks for processed crypto requests.
1988+ * Callbacks are done here, rather than in the crypto drivers, because
1989+ * callbacks typically are expensive and would slow interrupt handling.
1990+ */
1991+static int
1992+crypto_ret_proc(void *arg)
1993+{
1994+ struct cryptop *crpt;
1995+ struct cryptkop *krpt;
1996+ unsigned long r_flags;
1997+
1998+ set_current_state(TASK_INTERRUPTIBLE);
1999+
2000+ CRYPTO_RETQ_LOCK();
2001+ for (;;) {
2002+ /* Harvest return q's for completed ops */
2003+ crpt = NULL;
2004+ if (!list_empty(&crp_ret_q))
2005+ crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
2006+ if (crpt != NULL)
2007+ list_del(&crpt->crp_next);
2008+
2009+ krpt = NULL;
2010+ if (!list_empty(&crp_ret_kq))
2011+ krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
2012+ if (krpt != NULL)
2013+ list_del(&krpt->krp_next);
2014+
2015+ if (crpt != NULL || krpt != NULL) {
2016+ CRYPTO_RETQ_UNLOCK();
2017+ /*
2018+ * Run callbacks unlocked.
2019+ */
2020+ if (crpt != NULL)
2021+ crpt->crp_callback(crpt);
2022+ if (krpt != NULL)
2023+ krpt->krp_callback(krpt);
2024+ CRYPTO_RETQ_LOCK();
2025+ } else {
2026+ /*
2027+ * Nothing more to be processed. Sleep until we're
2028+ * woken because there are more returns to process.
2029+ */
2030+ dprintk("%s - sleeping\n", __FUNCTION__);
2031+ CRYPTO_RETQ_UNLOCK();
2032+ wait_event_interruptible(cryptoretproc_wait,
2033+ !list_empty(&crp_ret_q) ||
2034+ !list_empty(&crp_ret_kq) ||
2035+ kthread_should_stop());
2036+ if (signal_pending (current)) {
2037+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
2038+ spin_lock_irq(&current->sigmask_lock);
2039+#endif
2040+ flush_signals(current);
2041+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
2042+ spin_unlock_irq(&current->sigmask_lock);
2043+#endif
2044+ }
2045+ CRYPTO_RETQ_LOCK();
2046+ dprintk("%s - awake\n", __FUNCTION__);
2047+ if (kthread_should_stop()) {
2048+ dprintk("%s - EXITING!\n", __FUNCTION__);
2049+ break;
2050+ }
2051+ cryptostats.cs_rets++;
2052+ }
2053+ }
2054+ CRYPTO_RETQ_UNLOCK();
2055+ return 0;
2056+}
2057+
2058+
2059+#if 0 /* should put this into /proc or something */
2060+static void
2061+db_show_drivers(void)
2062+{
2063+ int hid;
2064+
2065+ db_printf("%12s %4s %4s %8s %2s %2s\n"
2066+ , "Device"
2067+ , "Ses"
2068+ , "Kops"
2069+ , "Flags"
2070+ , "QB"
2071+ , "KB"
2072+ );
2073+ for (hid = 0; hid < crypto_drivers_num; hid++) {
2074+ const struct cryptocap *cap = &crypto_drivers[hid];
2075+ if (cap->cc_dev == NULL)
2076+ continue;
2077+ db_printf("%-12s %4u %4u %08x %2u %2u\n"
2078+ , device_get_nameunit(cap->cc_dev)
2079+ , cap->cc_sessions
2080+ , cap->cc_koperations
2081+ , cap->cc_flags
2082+ , cap->cc_qblocked
2083+ , cap->cc_kqblocked
2084+ );
2085+ }
2086+}
2087+
2088+DB_SHOW_COMMAND(crypto, db_show_crypto)
2089+{
2090+ struct cryptop *crp;
2091+
2092+ db_show_drivers();
2093+ db_printf("\n");
2094+
2095+ db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2096+ "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2097+ "Desc", "Callback");
2098+ TAILQ_FOREACH(crp, &crp_q, crp_next) {
2099+ db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
2100+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
2101+ , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
2102+ , crp->crp_ilen, crp->crp_olen
2103+ , crp->crp_etype
2104+ , crp->crp_flags
2105+ , crp->crp_desc
2106+ , crp->crp_callback
2107+ );
2108+ }
2109+ if (!TAILQ_EMPTY(&crp_ret_q)) {
2110+ db_printf("\n%4s %4s %4s %8s\n",
2111+ "HID", "Etype", "Flags", "Callback");
2112+ TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
2113+ db_printf("%4u %4u %04x %8p\n"
2114+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
2115+ , crp->crp_etype
2116+ , crp->crp_flags
2117+ , crp->crp_callback
2118+ );
2119+ }
2120+ }
2121+}
2122+
2123+DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2124+{
2125+ struct cryptkop *krp;
2126+
2127+ db_show_drivers();
2128+ db_printf("\n");
2129+
2130+ db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2131+ "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2132+ TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2133+ db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2134+ , krp->krp_op
2135+ , krp->krp_status
2136+ , krp->krp_iparams, krp->krp_oparams
2137+ , krp->krp_crid, krp->krp_hid
2138+ , krp->krp_callback
2139+ );
2140+ }
2141+ if (!TAILQ_EMPTY(&crp_ret_q)) {
2142+ db_printf("%4s %5s %8s %4s %8s\n",
2143+ "Op", "Status", "CRID", "HID", "Callback");
2144+ TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
2145+ db_printf("%4u %5u %08x %4u %8p\n"
2146+ , krp->krp_op
2147+ , krp->krp_status
2148+ , krp->krp_crid, krp->krp_hid
2149+ , krp->krp_callback
2150+ );
2151+ }
2152+ }
2153+}
2154+#endif
2155+
2156+
2157+static int
2158+crypto_init(void)
2159+{
2160+ int error;
2161+ unsigned long cpu;
2162+
2163+ dprintk("%s(%p)\n", __FUNCTION__, (void *) crypto_init);
2164+
2165+ if (crypto_initted)
2166+ return 0;
2167+ crypto_initted = 1;
2168+
2169+ spin_lock_init(&crypto_drivers_lock);
2170+ spin_lock_init(&crypto_q_lock);
2171+ spin_lock_init(&crypto_ret_q_lock);
2172+
2173+ cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
2174+ 0, SLAB_HWCACHE_ALIGN, NULL
2175+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2176+ , NULL
2177+#endif
2178+ );
2179+
2180+ cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
2181+ 0, SLAB_HWCACHE_ALIGN, NULL
2182+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2183+ , NULL
2184+#endif
2185+ );
2186+
2187+ if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
2188+ printk("crypto: crypto_init cannot setup crypto zones\n");
2189+ error = ENOMEM;
2190+ goto bad;
2191+ }
2192+
2193+ crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
2194+ crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
2195+ GFP_KERNEL);
2196+ if (crypto_drivers == NULL) {
2197+ printk("crypto: crypto_init cannot setup crypto drivers\n");
2198+ error = ENOMEM;
2199+ goto bad;
2200+ }
2201+
2202+ memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
2203+
2204+ ocf_for_each_cpu(cpu) {
2205+ cryptoproc[cpu] = kthread_create(crypto_proc, (void *) cpu,
2206+ "ocf_%d", (int) cpu);
2207+ if (IS_ERR(cryptoproc[cpu])) {
2208+ error = PTR_ERR(cryptoproc[cpu]);
2209+ printk("crypto: crypto_init cannot start crypto thread; error %d",
2210+ error);
2211+ goto bad;
2212+ }
2213+ kthread_bind(cryptoproc[cpu], cpu);
2214+ wake_up_process(cryptoproc[cpu]);
2215+
2216+ cryptoretproc[cpu] = kthread_create(crypto_ret_proc, (void *) cpu,
2217+ "ocf_ret_%d", (int) cpu);
2218+ if (IS_ERR(cryptoretproc[cpu])) {
2219+ error = PTR_ERR(cryptoretproc[cpu]);
2220+ printk("crypto: crypto_init cannot start cryptoret thread; error %d",
2221+ error);
2222+ goto bad;
2223+ }
2224+ kthread_bind(cryptoretproc[cpu], cpu);
2225+ wake_up_process(cryptoretproc[cpu]);
2226+ }
2227+
2228+ return 0;
2229+bad:
2230+ crypto_exit();
2231+ return error;
2232+}
2233+
2234+
2235+static void
2236+crypto_exit(void)
2237+{
2238+ int cpu;
2239+
2240+ dprintk("%s()\n", __FUNCTION__);
2241+
2242+ /*
2243+ * Terminate any crypto threads.
2244+ */
2245+ ocf_for_each_cpu(cpu) {
2246+ kthread_stop(cryptoproc[cpu]);
2247+ kthread_stop(cryptoretproc[cpu]);
2248+ }
2249+
2250+ /*
2251+ * Reclaim dynamically allocated resources.
2252+ */
2253+ if (crypto_drivers != NULL)
2254+ kfree(crypto_drivers);
2255+
2256+ if (cryptodesc_zone != NULL)
2257+ kmem_cache_destroy(cryptodesc_zone);
2258+ if (cryptop_zone != NULL)
2259+ kmem_cache_destroy(cryptop_zone);
2260+}
2261+
2262+
2263+EXPORT_SYMBOL(crypto_newsession);
2264+EXPORT_SYMBOL(crypto_freesession);
2265+EXPORT_SYMBOL(crypto_get_driverid);
2266+EXPORT_SYMBOL(crypto_kregister);
2267+EXPORT_SYMBOL(crypto_register);
2268+EXPORT_SYMBOL(crypto_unregister);
2269+EXPORT_SYMBOL(crypto_unregister_all);
2270+EXPORT_SYMBOL(crypto_unblock);
2271+EXPORT_SYMBOL(crypto_dispatch);
2272+EXPORT_SYMBOL(crypto_kdispatch);
2273+EXPORT_SYMBOL(crypto_freereq);
2274+EXPORT_SYMBOL(crypto_getreq);
2275+EXPORT_SYMBOL(crypto_done);
2276+EXPORT_SYMBOL(crypto_kdone);
2277+EXPORT_SYMBOL(crypto_getfeat);
2278+EXPORT_SYMBOL(crypto_userasymcrypto);
2279+EXPORT_SYMBOL(crypto_getcaps);
2280+EXPORT_SYMBOL(crypto_find_driver);
2281+EXPORT_SYMBOL(crypto_find_device_byhid);
2282+
2283+module_init(crypto_init);
2284+module_exit(crypto_exit);
2285+
2286+MODULE_LICENSE("BSD");
2287+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
2288+MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
2289diff --git a/crypto/ocf/cryptodev.c b/crypto/ocf/cryptodev.c
2290new file mode 100644
2291index 0000000..2ee3618
2292--- /dev/null
2293+++ b/crypto/ocf/cryptodev.c
2294@@ -0,0 +1,1069 @@
2295+/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
2296+
2297+/*-
2298+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
2299+ * Copyright (C) 2006-2010 David McCullough
2300+ * Copyright (C) 2004-2005 Intel Corporation.
2301+ * The license and original author are listed below.
2302+ *
2303+ * Copyright (c) 2001 Theo de Raadt
2304+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
2305+ *
2306+ * Redistribution and use in source and binary forms, with or without
2307+ * modification, are permitted provided that the following conditions
2308+ * are met:
2309+ *
2310+ * 1. Redistributions of source code must retain the above copyright
2311+ * notice, this list of conditions and the following disclaimer.
2312+ * 2. Redistributions in binary form must reproduce the above copyright
2313+ * notice, this list of conditions and the following disclaimer in the
2314+ * documentation and/or other materials provided with the distribution.
2315+ * 3. The name of the author may not be used to endorse or promote products
2316+ * derived from this software without specific prior written permission.
2317+ *
2318+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
2319+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
2320+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2321+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2322+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2323+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2324+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2325+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2326+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2327+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2328+ *
2329+ * Effort sponsored in part by the Defense Advanced Research Projects
2330+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
2331+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
2332+ *
2333+__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
2334+ */
2335+
2336+#include <linux/version.h>
2337+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
2338+#include <linux/config.h>
2339+#endif
2340+#include <linux/types.h>
2341+#include <linux/time.h>
2342+#include <linux/delay.h>
2343+#include <linux/list.h>
2344+#include <linux/init.h>
2345+#include <linux/sched.h>
2346+#include <linux/unistd.h>
2347+#include <linux/module.h>
2348+#include <linux/wait.h>
2349+#include <linux/slab.h>
2350+#include <linux/fs.h>
2351+#include <linux/dcache.h>
2352+#include <linux/file.h>
2353+#include <linux/mount.h>
2354+#include <linux/miscdevice.h>
2355+#include <asm/uaccess.h>
2356+
2357+#include <cryptodev.h>
2358+#include <uio.h>
2359+
2360+extern asmlinkage long sys_dup(unsigned int fildes);
2361+
2362+#define debug cryptodev_debug
2363+int cryptodev_debug = 0;
2364+module_param(cryptodev_debug, int, 0644);
2365+MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
2366+
2367+struct csession_info {
2368+ u_int16_t blocksize;
2369+ u_int16_t minkey, maxkey;
2370+
2371+ u_int16_t keysize;
2372+ /* u_int16_t hashsize; */
2373+ u_int16_t authsize;
2374+ u_int16_t authkey;
2375+ /* u_int16_t ctxsize; */
2376+};
2377+
2378+struct csession {
2379+ struct list_head list;
2380+ u_int64_t sid;
2381+ u_int32_t ses;
2382+
2383+ wait_queue_head_t waitq;
2384+
2385+ u_int32_t cipher;
2386+
2387+ u_int32_t mac;
2388+
2389+ caddr_t key;
2390+ int keylen;
2391+ u_char tmp_iv[EALG_MAX_BLOCK_LEN];
2392+
2393+ caddr_t mackey;
2394+ int mackeylen;
2395+
2396+ struct csession_info info;
2397+
2398+ struct iovec iovec;
2399+ struct uio uio;
2400+ int error;
2401+};
2402+
2403+struct fcrypt {
2404+ struct list_head csessions;
2405+ int sesn;
2406+};
2407+
2408+static struct csession *csefind(struct fcrypt *, u_int);
2409+static int csedelete(struct fcrypt *, struct csession *);
2410+static struct csession *cseadd(struct fcrypt *, struct csession *);
2411+static struct csession *csecreate(struct fcrypt *, u_int64_t,
2412+ struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
2413+static int csefree(struct csession *);
2414+
2415+static int cryptodev_op(struct csession *, struct crypt_op *);
2416+static int cryptodev_key(struct crypt_kop *);
2417+static int cryptodev_find(struct crypt_find_op *);
2418+
2419+static int cryptodev_cb(void *);
2420+static int cryptodev_open(struct inode *inode, struct file *filp);
2421+
2422+/*
2423+ * Check a crypto identifier to see if it requested
2424+ * a valid crid and it's capabilities match.
2425+ */
2426+static int
2427+checkcrid(int crid)
2428+{
2429+ int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
2430+ int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
2431+ int caps = 0;
2432+
2433+ /* if the user hasn't selected a driver, then just call newsession */
2434+ if (hid == 0 && typ != 0)
2435+ return 0;
2436+
2437+ caps = crypto_getcaps(hid);
2438+
2439+ /* didn't find anything with capabilities */
2440+ if (caps == 0) {
2441+ dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
2442+ return EINVAL;
2443+ }
2444+
2445+ /* the user didn't specify SW or HW, so the driver is ok */
2446+ if (typ == 0)
2447+ return 0;
2448+
2449+ /* if the type specified didn't match */
2450+ if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
2451+ dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
2452+ hid, typ, caps);
2453+ return EINVAL;
2454+ }
2455+
2456+ return 0;
2457+}
2458+
2459+static int
2460+cryptodev_op(struct csession *cse, struct crypt_op *cop)
2461+{
2462+ struct cryptop *crp = NULL;
2463+ struct cryptodesc *crde = NULL, *crda = NULL;
2464+ int error = 0;
2465+
2466+ dprintk("%s()\n", __FUNCTION__);
2467+ if (cop->len > CRYPTO_MAX_DATA_LEN) {
2468+ dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
2469+ return (E2BIG);
2470+ }
2471+
2472+ if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
2473+ dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
2474+ cop->len);
2475+ return (EINVAL);
2476+ }
2477+
2478+ cse->uio.uio_iov = &cse->iovec;
2479+ cse->uio.uio_iovcnt = 1;
2480+ cse->uio.uio_offset = 0;
2481+#if 0
2482+ cse->uio.uio_resid = cop->len;
2483+ cse->uio.uio_segflg = UIO_SYSSPACE;
2484+ cse->uio.uio_rw = UIO_WRITE;
2485+ cse->uio.uio_td = td;
2486+#endif
2487+ cse->uio.uio_iov[0].iov_len = cop->len;
2488+ if (cse->info.authsize)
2489+ cse->uio.uio_iov[0].iov_len += cse->info.authsize;
2490+ cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
2491+ GFP_KERNEL);
2492+
2493+ if (cse->uio.uio_iov[0].iov_base == NULL) {
2494+ dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
2495+ (int)cse->uio.uio_iov[0].iov_len);
2496+ return (ENOMEM);
2497+ }
2498+
2499+ crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
2500+ if (crp == NULL) {
2501+ dprintk("%s: ENOMEM\n", __FUNCTION__);
2502+ error = ENOMEM;
2503+ goto bail;
2504+ }
2505+
2506+ if (cse->info.authsize && cse->info.blocksize) {
2507+ if (cop->op == COP_ENCRYPT) {
2508+ crde = crp->crp_desc;
2509+ crda = crde->crd_next;
2510+ } else {
2511+ crda = crp->crp_desc;
2512+ crde = crda->crd_next;
2513+ }
2514+ } else if (cse->info.authsize) {
2515+ crda = crp->crp_desc;
2516+ } else if (cse->info.blocksize) {
2517+ crde = crp->crp_desc;
2518+ } else {
2519+ dprintk("%s: bad request\n", __FUNCTION__);
2520+ error = EINVAL;
2521+ goto bail;
2522+ }
2523+
2524+ if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
2525+ cop->len))) {
2526+ dprintk("%s: bad copy\n", __FUNCTION__);
2527+ goto bail;
2528+ }
2529+
2530+ if (crda) {
2531+ crda->crd_skip = 0;
2532+ crda->crd_len = cop->len;
2533+ crda->crd_inject = cop->len;
2534+
2535+ crda->crd_alg = cse->mac;
2536+ crda->crd_key = cse->mackey;
2537+ crda->crd_klen = cse->mackeylen * 8;
2538+ }
2539+
2540+ if (crde) {
2541+ if (cop->op == COP_ENCRYPT)
2542+ crde->crd_flags |= CRD_F_ENCRYPT;
2543+ else
2544+ crde->crd_flags &= ~CRD_F_ENCRYPT;
2545+ crde->crd_len = cop->len;
2546+ crde->crd_inject = 0;
2547+
2548+ crde->crd_alg = cse->cipher;
2549+ crde->crd_key = cse->key;
2550+ crde->crd_klen = cse->keylen * 8;
2551+ }
2552+
2553+ crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
2554+ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
2555+ | (cop->flags & COP_F_BATCH);
2556+ crp->crp_buf = (caddr_t)&cse->uio;
2557+ crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
2558+ crp->crp_sid = cse->sid;
2559+ crp->crp_opaque = (void *)cse;
2560+
2561+ if (cop->iv) {
2562+ if (crde == NULL) {
2563+ error = EINVAL;
2564+ dprintk("%s no crde\n", __FUNCTION__);
2565+ goto bail;
2566+ }
2567+ if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
2568+ error = EINVAL;
2569+ dprintk("%s arc4 with IV\n", __FUNCTION__);
2570+ goto bail;
2571+ }
2572+ if ((error = copy_from_user(cse->tmp_iv, cop->iv,
2573+ cse->info.blocksize))) {
2574+ dprintk("%s bad iv copy\n", __FUNCTION__);
2575+ goto bail;
2576+ }
2577+ memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
2578+ crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
2579+ crde->crd_skip = 0;
2580+ } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
2581+ crde->crd_skip = 0;
2582+ } else if (crde) {
2583+ crde->crd_flags |= CRD_F_IV_PRESENT;
2584+ crde->crd_skip = cse->info.blocksize;
2585+ crde->crd_len -= cse->info.blocksize;
2586+ }
2587+
2588+ if (cop->mac && crda == NULL) {
2589+ error = EINVAL;
2590+ dprintk("%s no crda\n", __FUNCTION__);
2591+ goto bail;
2592+ }
2593+
2594+ /*
2595+ * Let the dispatch run unlocked, then, interlock against the
2596+ * callback before checking if the operation completed and going
2597+ * to sleep. This insures drivers don't inherit our lock which
2598+ * results in a lock order reversal between crypto_dispatch forced
2599+ * entry and the crypto_done callback into us.
2600+ */
2601+ error = crypto_dispatch(crp);
2602+ if (error) {
2603+ dprintk("%s error in crypto_dispatch\n", __FUNCTION__);
2604+ goto bail;
2605+ }
2606+
2607+ dprintk("%s about to WAIT\n", __FUNCTION__);
2608+ /*
2609+ * we really need to wait for driver to complete to maintain
2610+ * state, luckily interrupts will be remembered
2611+ */
2612+ do {
2613+ error = wait_event_interruptible(crp->crp_waitq,
2614+ ((crp->crp_flags & CRYPTO_F_DONE) != 0));
2615+ /*
2616+ * we can't break out of this loop or we will leave behind
2617+ * a huge mess, however, staying here means if your driver
2618+ * is broken user applications can hang and not be killed.
2619+ * The solution, fix your driver :-)
2620+ */
2621+ if (error) {
2622+ schedule();
2623+ error = 0;
2624+ }
2625+ } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
2626+ dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
2627+
2628+ if (crp->crp_etype != 0) {
2629+ error = crp->crp_etype;
2630+ dprintk("%s error in crp processing\n", __FUNCTION__);
2631+ goto bail;
2632+ }
2633+
2634+ if (cse->error) {
2635+ error = cse->error;
2636+ dprintk("%s error in cse processing\n", __FUNCTION__);
2637+ goto bail;
2638+ }
2639+
2640+ if (cop->dst && (error = copy_to_user(cop->dst,
2641+ cse->uio.uio_iov[0].iov_base, cop->len))) {
2642+ dprintk("%s bad dst copy\n", __FUNCTION__);
2643+ goto bail;
2644+ }
2645+
2646+ if (cop->mac &&
2647+ (error=copy_to_user(cop->mac,
2648+ (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
2649+ cse->info.authsize))) {
2650+ dprintk("%s bad mac copy\n", __FUNCTION__);
2651+ goto bail;
2652+ }
2653+
2654+bail:
2655+ if (crp)
2656+ crypto_freereq(crp);
2657+ if (cse->uio.uio_iov[0].iov_base)
2658+ kfree(cse->uio.uio_iov[0].iov_base);
2659+
2660+ return (error);
2661+}
2662+
2663+static int
2664+cryptodev_cb(void *op)
2665+{
2666+ struct cryptop *crp = (struct cryptop *) op;
2667+ struct csession *cse = (struct csession *)crp->crp_opaque;
2668+ int error;
2669+
2670+ dprintk("%s()\n", __FUNCTION__);
2671+ error = crp->crp_etype;
2672+ if (error == EAGAIN) {
2673+ crp->crp_flags &= ~CRYPTO_F_DONE;
2674+#ifdef NOTYET
2675+ /*
2676+ * DAVIDM I am fairly sure that we should turn this into a batch
2677+ * request to stop bad karma/lockup, revisit
2678+ */
2679+ crp->crp_flags |= CRYPTO_F_BATCH;
2680+#endif
2681+ return crypto_dispatch(crp);
2682+ }
2683+ if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
2684+ cse->error = error;
2685+ wake_up_interruptible(&crp->crp_waitq);
2686+ }
2687+ return (0);
2688+}
2689+
2690+static int
2691+cryptodevkey_cb(void *op)
2692+{
2693+ struct cryptkop *krp = (struct cryptkop *) op;
2694+ dprintk("%s()\n", __FUNCTION__);
2695+ wake_up_interruptible(&krp->krp_waitq);
2696+ return (0);
2697+}
2698+
2699+static int
2700+cryptodev_key(struct crypt_kop *kop)
2701+{
2702+ struct cryptkop *krp = NULL;
2703+ int error = EINVAL;
2704+ int in, out, size, i;
2705+
2706+ dprintk("%s()\n", __FUNCTION__);
2707+ if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
2708+ dprintk("%s params too big\n", __FUNCTION__);
2709+ return (EFBIG);
2710+ }
2711+
2712+ in = kop->crk_iparams;
2713+ out = kop->crk_oparams;
2714+ switch (kop->crk_op) {
2715+ case CRK_MOD_EXP:
2716+ if (in == 3 && out == 1)
2717+ break;
2718+ return (EINVAL);
2719+ case CRK_MOD_EXP_CRT:
2720+ if (in == 6 && out == 1)
2721+ break;
2722+ return (EINVAL);
2723+ case CRK_DSA_SIGN:
2724+ if (in == 5 && out == 2)
2725+ break;
2726+ return (EINVAL);
2727+ case CRK_DSA_VERIFY:
2728+ if (in == 7 && out == 0)
2729+ break;
2730+ return (EINVAL);
2731+ case CRK_DH_COMPUTE_KEY:
2732+ if (in == 3 && out == 1)
2733+ break;
2734+ return (EINVAL);
2735+ default:
2736+ return (EINVAL);
2737+ }
2738+
2739+ krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
2740+ if (!krp)
2741+ return (ENOMEM);
2742+ bzero(krp, sizeof *krp);
2743+ krp->krp_op = kop->crk_op;
2744+ krp->krp_status = kop->crk_status;
2745+ krp->krp_iparams = kop->crk_iparams;
2746+ krp->krp_oparams = kop->crk_oparams;
2747+ krp->krp_crid = kop->crk_crid;
2748+ krp->krp_status = 0;
2749+ krp->krp_flags = CRYPTO_KF_CBIMM;
2750+ krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
2751+ init_waitqueue_head(&krp->krp_waitq);
2752+
2753+ for (i = 0; i < CRK_MAXPARAM; i++)
2754+ krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
2755+ for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
2756+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
2757+ if (size == 0)
2758+ continue;
2759+ krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
2760+ if (i >= krp->krp_iparams)
2761+ continue;
2762+ error = copy_from_user(krp->krp_param[i].crp_p,
2763+ kop->crk_param[i].crp_p, size);
2764+ if (error)
2765+ goto fail;
2766+ }
2767+
2768+ error = crypto_kdispatch(krp);
2769+ if (error)
2770+ goto fail;
2771+
2772+ do {
2773+ error = wait_event_interruptible(krp->krp_waitq,
2774+ ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
2775+ /*
2776+ * we can't break out of this loop or we will leave behind
2777+ * a huge mess, however, staying here means if your driver
2778+ * is broken user applications can hang and not be killed.
2779+ * The solution, fix your driver :-)
2780+ */
2781+ if (error) {
2782+ schedule();
2783+ error = 0;
2784+ }
2785+ } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
2786+
2787+ dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
2788+
2789+ kop->crk_crid = krp->krp_crid; /* device that did the work */
2790+ if (krp->krp_status != 0) {
2791+ error = krp->krp_status;
2792+ goto fail;
2793+ }
2794+
2795+ for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
2796+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
2797+ if (size == 0)
2798+ continue;
2799+ error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
2800+ size);
2801+ if (error)
2802+ goto fail;
2803+ }
2804+
2805+fail:
2806+ if (krp) {
2807+ kop->crk_status = krp->krp_status;
2808+ for (i = 0; i < CRK_MAXPARAM; i++) {
2809+ if (krp->krp_param[i].crp_p)
2810+ kfree(krp->krp_param[i].crp_p);
2811+ }
2812+ kfree(krp);
2813+ }
2814+ return (error);
2815+}
2816+
2817+static int
2818+cryptodev_find(struct crypt_find_op *find)
2819+{
2820+ device_t dev;
2821+
2822+ if (find->crid != -1) {
2823+ dev = crypto_find_device_byhid(find->crid);
2824+ if (dev == NULL)
2825+ return (ENOENT);
2826+ strlcpy(find->name, device_get_nameunit(dev),
2827+ sizeof(find->name));
2828+ } else {
2829+ find->crid = crypto_find_driver(find->name);
2830+ if (find->crid == -1)
2831+ return (ENOENT);
2832+ }
2833+ return (0);
2834+}
2835+
2836+static struct csession *
2837+csefind(struct fcrypt *fcr, u_int ses)
2838+{
2839+ struct csession *cse;
2840+
2841+ dprintk("%s()\n", __FUNCTION__);
2842+ list_for_each_entry(cse, &fcr->csessions, list)
2843+ if (cse->ses == ses)
2844+ return (cse);
2845+ return (NULL);
2846+}
2847+
2848+static int
2849+csedelete(struct fcrypt *fcr, struct csession *cse_del)
2850+{
2851+ struct csession *cse;
2852+
2853+ dprintk("%s()\n", __FUNCTION__);
2854+ list_for_each_entry(cse, &fcr->csessions, list) {
2855+ if (cse == cse_del) {
2856+ list_del(&cse->list);
2857+ return (1);
2858+ }
2859+ }
2860+ return (0);
2861+}
2862+
2863+static struct csession *
2864+cseadd(struct fcrypt *fcr, struct csession *cse)
2865+{
2866+ dprintk("%s()\n", __FUNCTION__);
2867+ list_add_tail(&cse->list, &fcr->csessions);
2868+ cse->ses = fcr->sesn++;
2869+ return (cse);
2870+}
2871+
2872+static struct csession *
2873+csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
2874+ struct cryptoini *cria, struct csession_info *info)
2875+{
2876+ struct csession *cse;
2877+
2878+ dprintk("%s()\n", __FUNCTION__);
2879+ cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
2880+ if (cse == NULL)
2881+ return NULL;
2882+ memset(cse, 0, sizeof(struct csession));
2883+
2884+ INIT_LIST_HEAD(&cse->list);
2885+ init_waitqueue_head(&cse->waitq);
2886+
2887+ cse->key = crie->cri_key;
2888+ cse->keylen = crie->cri_klen/8;
2889+ cse->mackey = cria->cri_key;
2890+ cse->mackeylen = cria->cri_klen/8;
2891+ cse->sid = sid;
2892+ cse->cipher = crie->cri_alg;
2893+ cse->mac = cria->cri_alg;
2894+ cse->info = *info;
2895+ cseadd(fcr, cse);
2896+ return (cse);
2897+}
2898+
2899+static int
2900+csefree(struct csession *cse)
2901+{
2902+ int error;
2903+
2904+ dprintk("%s()\n", __FUNCTION__);
2905+ error = crypto_freesession(cse->sid);
2906+ if (cse->key)
2907+ kfree(cse->key);
2908+ if (cse->mackey)
2909+ kfree(cse->mackey);
2910+ kfree(cse);
2911+ return(error);
2912+}
2913+
2914+static int
2915+cryptodev_ioctl(
2916+ struct inode *inode,
2917+ struct file *filp,
2918+ unsigned int cmd,
2919+ unsigned long arg)
2920+{
2921+ struct cryptoini cria, crie;
2922+ struct fcrypt *fcr = filp->private_data;
2923+ struct csession *cse;
2924+ struct csession_info info;
2925+ struct session2_op sop;
2926+ struct crypt_op cop;
2927+ struct crypt_kop kop;
2928+ struct crypt_find_op fop;
2929+ u_int64_t sid;
2930+ u_int32_t ses = 0;
2931+ int feat, fd, error = 0, crid;
2932+ mm_segment_t fs;
2933+
2934+ dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
2935+
2936+ switch (cmd) {
2937+
2938+ case CRIOGET: {
2939+ dprintk("%s(CRIOGET)\n", __FUNCTION__);
2940+ fs = get_fs();
2941+ set_fs(get_ds());
2942+ for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
2943+ if (files_fdtable(current->files)->fd[fd] == filp)
2944+ break;
2945+ fd = sys_dup(fd);
2946+ set_fs(fs);
2947+ put_user(fd, (int *) arg);
2948+ return IS_ERR_VALUE(fd) ? fd : 0;
2949+ }
2950+
2951+#define CIOCGSESSSTR (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
2952+ case CIOCGSESSION:
2953+ case CIOCGSESSION2:
2954+ dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
2955+ memset(&crie, 0, sizeof(crie));
2956+ memset(&cria, 0, sizeof(cria));
2957+ memset(&info, 0, sizeof(info));
2958+ memset(&sop, 0, sizeof(sop));
2959+
2960+ if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
2961+ sizeof(struct session_op) : sizeof(sop))) {
2962+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
2963+ error = EFAULT;
2964+ goto bail;
2965+ }
2966+
2967+ switch (sop.cipher) {
2968+ case 0:
2969+ dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
2970+ break;
2971+ case CRYPTO_NULL_CBC:
2972+ info.blocksize = NULL_BLOCK_LEN;
2973+ info.minkey = NULL_MIN_KEY_LEN;
2974+ info.maxkey = NULL_MAX_KEY_LEN;
2975+ break;
2976+ case CRYPTO_DES_CBC:
2977+ info.blocksize = DES_BLOCK_LEN;
2978+ info.minkey = DES_MIN_KEY_LEN;
2979+ info.maxkey = DES_MAX_KEY_LEN;
2980+ break;
2981+ case CRYPTO_3DES_CBC:
2982+ info.blocksize = DES3_BLOCK_LEN;
2983+ info.minkey = DES3_MIN_KEY_LEN;
2984+ info.maxkey = DES3_MAX_KEY_LEN;
2985+ break;
2986+ case CRYPTO_BLF_CBC:
2987+ info.blocksize = BLOWFISH_BLOCK_LEN;
2988+ info.minkey = BLOWFISH_MIN_KEY_LEN;
2989+ info.maxkey = BLOWFISH_MAX_KEY_LEN;
2990+ break;
2991+ case CRYPTO_CAST_CBC:
2992+ info.blocksize = CAST128_BLOCK_LEN;
2993+ info.minkey = CAST128_MIN_KEY_LEN;
2994+ info.maxkey = CAST128_MAX_KEY_LEN;
2995+ break;
2996+ case CRYPTO_SKIPJACK_CBC:
2997+ info.blocksize = SKIPJACK_BLOCK_LEN;
2998+ info.minkey = SKIPJACK_MIN_KEY_LEN;
2999+ info.maxkey = SKIPJACK_MAX_KEY_LEN;
3000+ break;
3001+ case CRYPTO_AES_CBC:
3002+ info.blocksize = AES_BLOCK_LEN;
3003+ info.minkey = AES_MIN_KEY_LEN;
3004+ info.maxkey = AES_MAX_KEY_LEN;
3005+ break;
3006+ case CRYPTO_ARC4:
3007+ info.blocksize = ARC4_BLOCK_LEN;
3008+ info.minkey = ARC4_MIN_KEY_LEN;
3009+ info.maxkey = ARC4_MAX_KEY_LEN;
3010+ break;
3011+ case CRYPTO_CAMELLIA_CBC:
3012+ info.blocksize = CAMELLIA_BLOCK_LEN;
3013+ info.minkey = CAMELLIA_MIN_KEY_LEN;
3014+ info.maxkey = CAMELLIA_MAX_KEY_LEN;
3015+ break;
3016+ default:
3017+ dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
3018+ error = EINVAL;
3019+ goto bail;
3020+ }
3021+
3022+ switch (sop.mac) {
3023+ case 0:
3024+ dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
3025+ break;
3026+ case CRYPTO_NULL_HMAC:
3027+ info.authsize = NULL_HASH_LEN;
3028+ break;
3029+ case CRYPTO_MD5:
3030+ info.authsize = MD5_HASH_LEN;
3031+ break;
3032+ case CRYPTO_SHA1:
3033+ info.authsize = SHA1_HASH_LEN;
3034+ break;
3035+ case CRYPTO_SHA2_256:
3036+ info.authsize = SHA2_256_HASH_LEN;
3037+ break;
3038+ case CRYPTO_SHA2_384:
3039+ info.authsize = SHA2_384_HASH_LEN;
3040+ break;
3041+ case CRYPTO_SHA2_512:
3042+ info.authsize = SHA2_512_HASH_LEN;
3043+ break;
3044+ case CRYPTO_RIPEMD160:
3045+ info.authsize = RIPEMD160_HASH_LEN;
3046+ break;
3047+ case CRYPTO_MD5_HMAC:
3048+ info.authsize = MD5_HASH_LEN;
3049+ info.authkey = 16;
3050+ break;
3051+ case CRYPTO_SHA1_HMAC:
3052+ info.authsize = SHA1_HASH_LEN;
3053+ info.authkey = 20;
3054+ break;
3055+ case CRYPTO_SHA2_256_HMAC:
3056+ info.authsize = SHA2_256_HASH_LEN;
3057+ info.authkey = 32;
3058+ break;
3059+ case CRYPTO_SHA2_384_HMAC:
3060+ info.authsize = SHA2_384_HASH_LEN;
3061+ info.authkey = 48;
3062+ break;
3063+ case CRYPTO_SHA2_512_HMAC:
3064+ info.authsize = SHA2_512_HASH_LEN;
3065+ info.authkey = 64;
3066+ break;
3067+ case CRYPTO_RIPEMD160_HMAC:
3068+ info.authsize = RIPEMD160_HASH_LEN;
3069+ info.authkey = 20;
3070+ break;
3071+ default:
3072+ dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
3073+ error = EINVAL;
3074+ goto bail;
3075+ }
3076+
3077+ if (info.blocksize) {
3078+ crie.cri_alg = sop.cipher;
3079+ crie.cri_klen = sop.keylen * 8;
3080+ if ((info.maxkey && sop.keylen > info.maxkey) ||
3081+ sop.keylen < info.minkey) {
3082+ dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
3083+ error = EINVAL;
3084+ goto bail;
3085+ }
3086+
3087+ crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
3088+ if (copy_from_user(crie.cri_key, sop.key,
3089+ crie.cri_klen/8)) {
3090+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
3091+ error = EFAULT;
3092+ goto bail;
3093+ }
3094+ if (info.authsize)
3095+ crie.cri_next = &cria;
3096+ }
3097+
3098+ if (info.authsize) {
3099+ cria.cri_alg = sop.mac;
3100+ cria.cri_klen = sop.mackeylen * 8;
3101+ if (info.authkey && sop.mackeylen != info.authkey) {
3102+ dprintk("%s(%s) - mackeylen %d != %d\n", __FUNCTION__,
3103+ CIOCGSESSSTR, sop.mackeylen, info.authkey);
3104+ error = EINVAL;
3105+ goto bail;
3106+ }
3107+
3108+ if (cria.cri_klen) {
3109+ cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
3110+ if (copy_from_user(cria.cri_key, sop.mackey,
3111+ cria.cri_klen / 8)) {
3112+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
3113+ error = EFAULT;
3114+ goto bail;
3115+ }
3116+ }
3117+ }
3118+
3119+ /* NB: CIOGSESSION2 has the crid */
3120+ if (cmd == CIOCGSESSION2) {
3121+ crid = sop.crid;
3122+ error = checkcrid(crid);
3123+ if (error) {
3124+ dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
3125+ CIOCGSESSSTR, error);
3126+ goto bail;
3127+ }
3128+ } else {
3129+ /* allow either HW or SW to be used */
3130+ crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
3131+ }
3132+ error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
3133+ if (error) {
3134+ dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
3135+ goto bail;
3136+ }
3137+
3138+ cse = csecreate(fcr, sid, &crie, &cria, &info);
3139+ if (cse == NULL) {
3140+ crypto_freesession(sid);
3141+ error = EINVAL;
3142+ dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
3143+ goto bail;
3144+ }
3145+ sop.ses = cse->ses;
3146+
3147+ if (cmd == CIOCGSESSION2) {
3148+ /* return hardware/driver id */
3149+ sop.crid = CRYPTO_SESID2HID(cse->sid);
3150+ }
3151+
3152+ if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
3153+ sizeof(struct session_op) : sizeof(sop))) {
3154+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
3155+ error = EFAULT;
3156+ }
3157+bail:
3158+ if (error) {
3159+ dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
3160+ if (crie.cri_key)
3161+ kfree(crie.cri_key);
3162+ if (cria.cri_key)
3163+ kfree(cria.cri_key);
3164+ }
3165+ break;
3166+ case CIOCFSESSION:
3167+ dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
3168+ get_user(ses, (uint32_t*)arg);
3169+ cse = csefind(fcr, ses);
3170+ if (cse == NULL) {
3171+ error = EINVAL;
3172+ dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
3173+ break;
3174+ }
3175+ csedelete(fcr, cse);
3176+ error = csefree(cse);
3177+ break;
3178+ case CIOCCRYPT:
3179+ dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
3180+ if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
3181+ dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
3182+ error = EFAULT;
3183+ goto bail;
3184+ }
3185+ cse = csefind(fcr, cop.ses);
3186+ if (cse == NULL) {
3187+ error = EINVAL;
3188+ dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
3189+ break;
3190+ }
3191+ error = cryptodev_op(cse, &cop);
3192+ if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
3193+ dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
3194+ error = EFAULT;
3195+ goto bail;
3196+ }
3197+ break;
3198+ case CIOCKEY:
3199+ case CIOCKEY2:
3200+ dprintk("%s(CIOCKEY)\n", __FUNCTION__);
3201+ if (!crypto_userasymcrypto)
3202+ return (EPERM); /* XXX compat? */
3203+ if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
3204+ dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
3205+ error = EFAULT;
3206+ goto bail;
3207+ }
3208+ if (cmd == CIOCKEY) {
3209+ /* NB: crypto core enforces s/w driver use */
3210+ kop.crk_crid =
3211+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
3212+ }
3213+ error = cryptodev_key(&kop);
3214+ if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
3215+ dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
3216+ error = EFAULT;
3217+ goto bail;
3218+ }
3219+ break;
3220+ case CIOCASYMFEAT:
3221+ dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
3222+ if (!crypto_userasymcrypto) {
3223+ /*
3224+ * NB: if user asym crypto operations are
3225+ * not permitted return "no algorithms"
3226+ * so well-behaved applications will just
3227+ * fallback to doing them in software.
3228+ */
3229+ feat = 0;
3230+ } else
3231+ error = crypto_getfeat(&feat);
3232+ if (!error) {
3233+ error = copy_to_user((void*)arg, &feat, sizeof(feat));
3234+ }
3235+ break;
3236+ case CIOCFINDDEV:
3237+ if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
3238+ dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
3239+ error = EFAULT;
3240+ goto bail;
3241+ }
3242+ error = cryptodev_find(&fop);
3243+ if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
3244+ dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
3245+ error = EFAULT;
3246+ goto bail;
3247+ }
3248+ break;
3249+ default:
3250+ dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
3251+ error = EINVAL;
3252+ break;
3253+ }
3254+ return(-error);
3255+}
3256+
3257+#ifdef HAVE_UNLOCKED_IOCTL
3258+static long
3259+cryptodev_unlocked_ioctl(
3260+ struct file *filp,
3261+ unsigned int cmd,
3262+ unsigned long arg)
3263+{
3264+ return cryptodev_ioctl(NULL, filp, cmd, arg);
3265+}
3266+#endif
3267+
3268+static int
3269+cryptodev_open(struct inode *inode, struct file *filp)
3270+{
3271+ struct fcrypt *fcr;
3272+
3273+ dprintk("%s()\n", __FUNCTION__);
3274+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
3275+ /*
3276+ * on 2.6.35 private_data points to a miscdevice structure, we override
3277+ * it, which is currently safe to do.
3278+ */
3279+ if (filp->private_data) {
3280+ printk("cryptodev: Private data already exists - %p!\n", filp->private_data);
3281+ return(-ENODEV);
3282+ }
3283+#endif
3284+
3285+ fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
3286+ if (!fcr) {
3287+ dprintk("%s() - malloc failed\n", __FUNCTION__);
3288+ return(-ENOMEM);
3289+ }
3290+ memset(fcr, 0, sizeof(*fcr));
3291+
3292+ INIT_LIST_HEAD(&fcr->csessions);
3293+ filp->private_data = fcr;
3294+ return(0);
3295+}
3296+
3297+static int
3298+cryptodev_release(struct inode *inode, struct file *filp)
3299+{
3300+ struct fcrypt *fcr = filp->private_data;
3301+ struct csession *cse, *tmp;
3302+
3303+ dprintk("%s()\n", __FUNCTION__);
3304+ if (!filp) {
3305+ printk("cryptodev: No private data on release\n");
3306+ return(0);
3307+ }
3308+
3309+ list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
3310+ list_del(&cse->list);
3311+ (void)csefree(cse);
3312+ }
3313+ filp->private_data = NULL;
3314+ kfree(fcr);
3315+ return(0);
3316+}
3317+
3318+static struct file_operations cryptodev_fops = {
3319+ .owner = THIS_MODULE,
3320+ .open = cryptodev_open,
3321+ .release = cryptodev_release,
3322+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
3323+ .ioctl = cryptodev_ioctl,
3324+#endif
3325+#ifdef HAVE_UNLOCKED_IOCTL
3326+ .unlocked_ioctl = cryptodev_unlocked_ioctl,
3327+#endif
3328+};
3329+
3330+static struct miscdevice cryptodev = {
3331+ .minor = CRYPTODEV_MINOR,
3332+ .name = "crypto",
3333+ .fops = &cryptodev_fops,
3334+};
3335+
3336+static int __init
3337+cryptodev_init(void)
3338+{
3339+ int rc;
3340+
3341+ dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
3342+ rc = misc_register(&cryptodev);
3343+ if (rc) {
3344+ printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
3345+ return(rc);
3346+ }
3347+
3348+ return(0);
3349+}
3350+
3351+static void __exit
3352+cryptodev_exit(void)
3353+{
3354+ dprintk("%s()\n", __FUNCTION__);
3355+ misc_deregister(&cryptodev);
3356+}
3357+
3358+module_init(cryptodev_init);
3359+module_exit(cryptodev_exit);
3360+
3361+MODULE_LICENSE("BSD");
3362+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
3363+MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
3364diff --git a/crypto/ocf/cryptodev.h b/crypto/ocf/cryptodev.h
3365new file mode 100644
3366index 0000000..cca0ec8
3367--- /dev/null
3368+++ b/crypto/ocf/cryptodev.h
3369@@ -0,0 +1,480 @@
3370+/* $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $ */
3371+/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
3372+
3373+/*-
3374+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
3375+ * Copyright (C) 2006-2010 David McCullough
3376+ * Copyright (C) 2004-2005 Intel Corporation.
3377+ * The license and original author are listed below.
3378+ *
3379+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3380+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
3381+ *
3382+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
3383+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
3384+ * supported the development of this code.
3385+ *
3386+ * Copyright (c) 2000 Angelos D. Keromytis
3387+ *
3388+ * Permission to use, copy, and modify this software with or without fee
3389+ * is hereby granted, provided that this entire notice is included in
3390+ * all source code copies of any software which is or includes a copy or
3391+ * modification of this software.
3392+ *
3393+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
3394+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
3395+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
3396+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
3397+ * PURPOSE.
3398+ *
3399+ * Copyright (c) 2001 Theo de Raadt
3400+ *
3401+ * Redistribution and use in source and binary forms, with or without
3402+ * modification, are permitted provided that the following conditions
3403+ * are met:
3404+ *
3405+ * 1. Redistributions of source code must retain the above copyright
3406+ * notice, this list of conditions and the following disclaimer.
3407+ * 2. Redistributions in binary form must reproduce the above copyright
3408+ * notice, this list of conditions and the following disclaimer in the
3409+ * documentation and/or other materials provided with the distribution.
3410+ * 3. The name of the author may not be used to endorse or promote products
3411+ * derived from this software without specific prior written permission.
3412+ *
3413+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
3414+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
3415+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
3416+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
3417+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
3418+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3419+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3420+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3421+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
3422+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3423+ *
3424+ * Effort sponsored in part by the Defense Advanced Research Projects
3425+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
3426+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
3427+ *
3428+ */
3429+
3430+#ifndef _CRYPTO_CRYPTO_H_
3431+#define _CRYPTO_CRYPTO_H_
3432+
3433+/* Some initial values */
3434+#define CRYPTO_DRIVERS_INITIAL 4
3435+#define CRYPTO_SW_SESSIONS 32
3436+
3437+/* Hash values */
3438+#define NULL_HASH_LEN 0
3439+#define MD5_HASH_LEN 16
3440+#define SHA1_HASH_LEN 20
3441+#define RIPEMD160_HASH_LEN 20
3442+#define SHA2_256_HASH_LEN 32
3443+#define SHA2_384_HASH_LEN 48
3444+#define SHA2_512_HASH_LEN 64
3445+#define MD5_KPDK_HASH_LEN 16
3446+#define SHA1_KPDK_HASH_LEN 20
3447+/* Maximum hash algorithm result length */
3448+#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
3449+
3450+/* HMAC values */
3451+#define NULL_HMAC_BLOCK_LEN 1
3452+#define MD5_HMAC_BLOCK_LEN 64
3453+#define SHA1_HMAC_BLOCK_LEN 64
3454+#define RIPEMD160_HMAC_BLOCK_LEN 64
3455+#define SHA2_256_HMAC_BLOCK_LEN 64
3456+#define SHA2_384_HMAC_BLOCK_LEN 128
3457+#define SHA2_512_HMAC_BLOCK_LEN 128
3458+/* Maximum HMAC block length */
3459+#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
3460+#define HMAC_IPAD_VAL 0x36
3461+#define HMAC_OPAD_VAL 0x5C
3462+
3463+/* Encryption algorithm block sizes */
3464+#define NULL_BLOCK_LEN 1
3465+#define DES_BLOCK_LEN 8
3466+#define DES3_BLOCK_LEN 8
3467+#define BLOWFISH_BLOCK_LEN 8
3468+#define SKIPJACK_BLOCK_LEN 8
3469+#define CAST128_BLOCK_LEN 8
3470+#define RIJNDAEL128_BLOCK_LEN 16
3471+#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
3472+#define CAMELLIA_BLOCK_LEN 16
3473+#define ARC4_BLOCK_LEN 1
3474+#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
3475+
3476+/* Encryption algorithm min and max key sizes */
3477+#define NULL_MIN_KEY_LEN 0
3478+#define NULL_MAX_KEY_LEN 0
3479+#define DES_MIN_KEY_LEN 8
3480+#define DES_MAX_KEY_LEN 8
3481+#define DES3_MIN_KEY_LEN 24
3482+#define DES3_MAX_KEY_LEN 24
3483+#define BLOWFISH_MIN_KEY_LEN 4
3484+#define BLOWFISH_MAX_KEY_LEN 56
3485+#define SKIPJACK_MIN_KEY_LEN 10
3486+#define SKIPJACK_MAX_KEY_LEN 10
3487+#define CAST128_MIN_KEY_LEN 5
3488+#define CAST128_MAX_KEY_LEN 16
3489+#define RIJNDAEL128_MIN_KEY_LEN 16
3490+#define RIJNDAEL128_MAX_KEY_LEN 32
3491+#define AES_MIN_KEY_LEN RIJNDAEL128_MIN_KEY_LEN
3492+#define AES_MAX_KEY_LEN RIJNDAEL128_MAX_KEY_LEN
3493+#define CAMELLIA_MIN_KEY_LEN 16
3494+#define CAMELLIA_MAX_KEY_LEN 32
3495+#define ARC4_MIN_KEY_LEN 1
3496+#define ARC4_MAX_KEY_LEN 256
3497+
3498+/* Max size of data that can be processed */
3499+#define CRYPTO_MAX_DATA_LEN 64*1024 - 1
3500+
3501+#define CRYPTO_ALGORITHM_MIN 1
3502+#define CRYPTO_DES_CBC 1
3503+#define CRYPTO_3DES_CBC 2
3504+#define CRYPTO_BLF_CBC 3
3505+#define CRYPTO_CAST_CBC 4
3506+#define CRYPTO_SKIPJACK_CBC 5
3507+#define CRYPTO_MD5_HMAC 6
3508+#define CRYPTO_SHA1_HMAC 7
3509+#define CRYPTO_RIPEMD160_HMAC 8
3510+#define CRYPTO_MD5_KPDK 9
3511+#define CRYPTO_SHA1_KPDK 10
3512+#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
3513+#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
3514+#define CRYPTO_ARC4 12
3515+#define CRYPTO_MD5 13
3516+#define CRYPTO_SHA1 14
3517+#define CRYPTO_NULL_HMAC 15
3518+#define CRYPTO_NULL_CBC 16
3519+#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
3520+#define CRYPTO_SHA2_256_HMAC 18
3521+#define CRYPTO_SHA2_384_HMAC 19
3522+#define CRYPTO_SHA2_512_HMAC 20
3523+#define CRYPTO_CAMELLIA_CBC 21
3524+#define CRYPTO_SHA2_256 22
3525+#define CRYPTO_SHA2_384 23
3526+#define CRYPTO_SHA2_512 24
3527+#define CRYPTO_RIPEMD160 25
3528+#define CRYPTO_LZS_COMP 26
3529+#define CRYPTO_ALGORITHM_MAX 26 /* Keep updated - see above */
3530+
3531+/* Algorithm flags */
3532+#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
3533+#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
3534+#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
3535+
3536+/*
3537+ * Crypto driver/device flags. They can set in the crid
3538+ * parameter when creating a session or submitting a key
3539+ * op to affect the device/driver assigned. If neither
3540+ * of these are specified then the crid is assumed to hold
3541+ * the driver id of an existing (and suitable) device that
3542+ * must be used to satisfy the request.
3543+ */
3544+#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
3545+#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
3546+
3547+/* NB: deprecated */
3548+struct session_op {
3549+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
3550+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
3551+
3552+ u_int32_t keylen; /* cipher key */
3553+ caddr_t key;
3554+ int mackeylen; /* mac key */
3555+ caddr_t mackey;
3556+
3557+ u_int32_t ses; /* returns: session # */
3558+};
3559+
3560+struct session2_op {
3561+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
3562+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
3563+
3564+ u_int32_t keylen; /* cipher key */
3565+ caddr_t key;
3566+ int mackeylen; /* mac key */
3567+ caddr_t mackey;
3568+
3569+ u_int32_t ses; /* returns: session # */
3570+ int crid; /* driver id + flags (rw) */
3571+ int pad[4]; /* for future expansion */
3572+};
3573+
3574+struct crypt_op {
3575+ u_int32_t ses;
3576+ u_int16_t op; /* i.e. COP_ENCRYPT */
3577+#define COP_NONE 0
3578+#define COP_ENCRYPT 1
3579+#define COP_DECRYPT 2
3580+ u_int16_t flags;
3581+#define COP_F_BATCH 0x0008 /* Batch op if possible */
3582+ u_int len;
3583+ caddr_t src, dst; /* become iov[] inside kernel */
3584+ caddr_t mac; /* must be big enough for chosen MAC */
3585+ caddr_t iv;
3586+};
3587+
3588+/*
3589+ * Parameters for looking up a crypto driver/device by
3590+ * device name or by id. The latter are returned for
3591+ * created sessions (crid) and completed key operations.
3592+ */
3593+struct crypt_find_op {
3594+ int crid; /* driver id + flags */
3595+ char name[32]; /* device/driver name */
3596+};
3597+
3598+/* bignum parameter, in packed bytes, ... */
3599+struct crparam {
3600+ caddr_t crp_p;
3601+ u_int crp_nbits;
3602+};
3603+
3604+#define CRK_MAXPARAM 8
3605+
3606+struct crypt_kop {
3607+ u_int crk_op; /* ie. CRK_MOD_EXP or other */
3608+ u_int crk_status; /* return status */
3609+ u_short crk_iparams; /* # of input parameters */
3610+ u_short crk_oparams; /* # of output parameters */
3611+ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
3612+ struct crparam crk_param[CRK_MAXPARAM];
3613+};
3614+#define CRK_ALGORITM_MIN 0
3615+#define CRK_MOD_EXP 0
3616+#define CRK_MOD_EXP_CRT 1
3617+#define CRK_DSA_SIGN 2
3618+#define CRK_DSA_VERIFY 3
3619+#define CRK_DH_COMPUTE_KEY 4
3620+#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
3621+
3622+#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
3623+#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
3624+#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
3625+#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
3626+#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
3627+
3628+/*
3629+ * done against open of /dev/crypto, to get a cloned descriptor.
3630+ * Please use F_SETFD against the cloned descriptor.
3631+ */
3632+#define CRIOGET _IOWR('c', 100, u_int32_t)
3633+#define CRIOASYMFEAT CIOCASYMFEAT
3634+#define CRIOFINDDEV CIOCFINDDEV
3635+
3636+/* the following are done against the cloned descriptor */
3637+#define CIOCGSESSION _IOWR('c', 101, struct session_op)
3638+#define CIOCFSESSION _IOW('c', 102, u_int32_t)
3639+#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
3640+#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
3641+#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
3642+#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
3643+#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
3644+#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
3645+
3646+struct cryptotstat {
3647+ struct timespec acc; /* total accumulated time */
3648+ struct timespec min; /* min time */
3649+ struct timespec max; /* max time */
3650+ u_int32_t count; /* number of observations */
3651+};
3652+
3653+struct cryptostats {
3654+ u_int32_t cs_ops; /* symmetric crypto ops submitted */
3655+ u_int32_t cs_errs; /* symmetric crypto ops that failed */
3656+ u_int32_t cs_kops; /* asymetric/key ops submitted */
3657+ u_int32_t cs_kerrs; /* asymetric/key ops that failed */
3658+ u_int32_t cs_intrs; /* crypto swi thread activations */
3659+ u_int32_t cs_rets; /* crypto return thread activations */
3660+ u_int32_t cs_blocks; /* symmetric op driver block */
3661+ u_int32_t cs_kblocks; /* symmetric op driver block */
3662+ /*
3663+ * When CRYPTO_TIMING is defined at compile time and the
3664+ * sysctl debug.crypto is set to 1, the crypto system will
3665+ * accumulate statistics about how long it takes to process
3666+ * crypto requests at various points during processing.
3667+ */
3668+ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
3669+ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
3670+ struct cryptotstat cs_cb; /* crypto_done -> callback */
3671+ struct cryptotstat cs_finis; /* callback -> callback return */
3672+
3673+ u_int32_t cs_drops; /* crypto ops dropped due to congestion */
3674+};
3675+
3676+#ifdef __KERNEL__
3677+
3678+/* Standard initialization structure beginning */
3679+struct cryptoini {
3680+ int cri_alg; /* Algorithm to use */
3681+ int cri_klen; /* Key length, in bits */
3682+ int cri_mlen; /* Number of bytes we want from the
3683+ entire hash. 0 means all. */
3684+ caddr_t cri_key; /* key to use */
3685+ u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
3686+ struct cryptoini *cri_next;
3687+};
3688+
3689+/* Describe boundaries of a single crypto operation */
3690+struct cryptodesc {
3691+ int crd_skip; /* How many bytes to ignore from start */
3692+ int crd_len; /* How many bytes to process */
3693+ int crd_inject; /* Where to inject results, if applicable */
3694+ int crd_flags;
3695+
3696+#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
3697+#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
3698+ place, so don't copy. */
3699+#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
3700+#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
3701+#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
3702+#define CRD_F_COMP 0x0f /* Set when doing compression */
3703+
3704+ struct cryptoini CRD_INI; /* Initialization/context data */
3705+#define crd_iv CRD_INI.cri_iv
3706+#define crd_key CRD_INI.cri_key
3707+#define crd_alg CRD_INI.cri_alg
3708+#define crd_klen CRD_INI.cri_klen
3709+#define crd_mlen CRD_INI.cri_mlen
3710+
3711+ struct cryptodesc *crd_next;
3712+};
3713+
3714+/* Structure describing complete operation */
3715+struct cryptop {
3716+ struct list_head crp_next;
3717+ wait_queue_head_t crp_waitq;
3718+
3719+ u_int64_t crp_sid; /* Session ID */
3720+ int crp_ilen; /* Input data total length */
3721+ int crp_olen; /* Result total length */
3722+
3723+ int crp_etype; /*
3724+ * Error type (zero means no error).
3725+ * All error codes except EAGAIN
3726+ * indicate possible data corruption (as in,
3727+ * the data have been touched). On all
3728+ * errors, the crp_sid may have changed
3729+ * (reset to a new one), so the caller
3730+ * should always check and use the new
3731+ * value on future requests.
3732+ */
3733+ int crp_flags;
3734+
3735+#define CRYPTO_F_SKBUF 0x0001 /* Input/output are skbuf chains */
3736+#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
3737+#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
3738+#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
3739+#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
3740+#define CRYPTO_F_DONE 0x0020 /* Operation completed */
3741+#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
3742+
3743+ caddr_t crp_buf; /* Data to be processed */
3744+ caddr_t crp_opaque; /* Opaque pointer, passed along */
3745+ struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
3746+
3747+ int (*crp_callback)(struct cryptop *); /* Callback function */
3748+};
3749+
3750+#define CRYPTO_BUF_CONTIG 0x0
3751+#define CRYPTO_BUF_IOV 0x1
3752+#define CRYPTO_BUF_SKBUF 0x2
3753+
3754+#define CRYPTO_OP_DECRYPT 0x0
3755+#define CRYPTO_OP_ENCRYPT 0x1
3756+
3757+/*
3758+ * Hints passed to process methods.
3759+ */
3760+#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
3761+
3762+struct cryptkop {
3763+ struct list_head krp_next;
3764+ wait_queue_head_t krp_waitq;
3765+
3766+ int krp_flags;
3767+#define CRYPTO_KF_DONE 0x0001 /* Operation completed */
3768+#define CRYPTO_KF_CBIMM 0x0002 /* Do callback immediately */
3769+
3770+ u_int krp_op; /* ie. CRK_MOD_EXP or other */
3771+ u_int krp_status; /* return status */
3772+ u_short krp_iparams; /* # of input parameters */
3773+ u_short krp_oparams; /* # of output parameters */
3774+ u_int krp_crid; /* desired device, etc. */
3775+ u_int32_t krp_hid;
3776+ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
3777+ int (*krp_callback)(struct cryptkop *);
3778+};
3779+
3780+#include <ocf-compat.h>
3781+
3782+/*
3783+ * Session ids are 64 bits. The lower 32 bits contain a "local id" which
3784+ * is a driver-private session identifier. The upper 32 bits contain a
3785+ * "hardware id" used by the core crypto code to identify the driver and
3786+ * a copy of the driver's capabilities that can be used by client code to
3787+ * optimize operation.
3788+ */
3789+#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
3790+#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
3791+#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
3792+
3793+extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
3794+extern int crypto_freesession(u_int64_t sid);
3795+#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
3796+#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
3797+#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
3798+extern int32_t crypto_get_driverid(device_t dev, int flags);
3799+extern int crypto_find_driver(const char *);
3800+extern device_t crypto_find_device_byhid(int hid);
3801+extern int crypto_getcaps(int hid);
3802+extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
3803+ u_int32_t flags);
3804+extern int crypto_kregister(u_int32_t, int, u_int32_t);
3805+extern int crypto_unregister(u_int32_t driverid, int alg);
3806+extern int crypto_unregister_all(u_int32_t driverid);
3807+extern int crypto_dispatch(struct cryptop *crp);
3808+extern int crypto_kdispatch(struct cryptkop *);
3809+#define CRYPTO_SYMQ 0x1
3810+#define CRYPTO_ASYMQ 0x2
3811+extern int crypto_unblock(u_int32_t, int);
3812+extern void crypto_done(struct cryptop *crp);
3813+extern void crypto_kdone(struct cryptkop *);
3814+extern int crypto_getfeat(int *);
3815+
3816+extern void crypto_freereq(struct cryptop *crp);
3817+extern struct cryptop *crypto_getreq(int num);
3818+
3819+extern int crypto_usercrypto; /* userland may do crypto requests */
3820+extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
3821+extern int crypto_devallowsoft; /* only use hardware crypto */
3822+
3823+/*
3824+ * random number support, crypto_unregister_all will unregister
3825+ */
3826+extern int crypto_rregister(u_int32_t driverid,
3827+ int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
3828+extern int crypto_runregister_all(u_int32_t driverid);
3829+
3830+/*
3831+ * Crypto-related utility routines used mainly by drivers.
3832+ *
3833+ * XXX these don't really belong here; but for now they're
3834+ * kept apart from the rest of the system.
3835+ */
3836+struct uio;
3837+extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
3838+extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
3839+extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
3840+
3841+extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
3842+ caddr_t in);
3843+extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
3844+ caddr_t out);
3845+extern int crypto_apply(int flags, caddr_t buf, int off, int len,
3846+ int (*f)(void *, void *, u_int), void *arg);
3847+
3848+#endif /* __KERNEL__ */
3849+#endif /* _CRYPTO_CRYPTO_H_ */
3850diff --git a/crypto/ocf/cryptosoft.c b/crypto/ocf/cryptosoft.c
3851new file mode 100644
3852index 0000000..aa2383d
3853--- /dev/null
3854+++ b/crypto/ocf/cryptosoft.c
3855@@ -0,0 +1,1322 @@
3856+/*
3857+ * An OCF module that uses the linux kernel cryptoapi, based on the
3858+ * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
3859+ * but is mostly unrecognisable,
3860+ *
3861+ * Written by David McCullough <david_mccullough@mcafee.com>
3862+ * Copyright (C) 2004-2011 David McCullough
3863+ * Copyright (C) 2004-2005 Intel Corporation.
3864+ *
3865+ * LICENSE TERMS
3866+ *
3867+ * The free distribution and use of this software in both source and binary
3868+ * form is allowed (with or without changes) provided that:
3869+ *
3870+ * 1. distributions of this source code include the above copyright
3871+ * notice, this list of conditions and the following disclaimer;
3872+ *
3873+ * 2. distributions in binary form include the above copyright
3874+ * notice, this list of conditions and the following disclaimer
3875+ * in the documentation and/or other associated materials;
3876+ *
3877+ * 3. the copyright holder's name is not used to endorse products
3878+ * built using this software without specific written permission.
3879+ *
3880+ * ALTERNATIVELY, provided that this notice is retained in full, this product
3881+ * may be distributed under the terms of the GNU General Public License (GPL),
3882+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
3883+ *
3884+ * DISCLAIMER
3885+ *
3886+ * This software is provided 'as is' with no explicit or implied warranties
3887+ * in respect of its properties, including, but not limited to, correctness
3888+ * and/or fitness for purpose.
3889+ * ---------------------------------------------------------------------------
3890+ */
3891+
3892+#include <linux/version.h>
3893+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
3894+#include <linux/config.h>
3895+#endif
3896+#include <linux/module.h>
3897+#include <linux/init.h>
3898+#include <linux/list.h>
3899+#include <linux/slab.h>
3900+#include <linux/sched.h>
3901+#include <linux/wait.h>
3902+#include <linux/crypto.h>
3903+#include <linux/mm.h>
3904+#include <linux/skbuff.h>
3905+#include <linux/random.h>
3906+#include <linux/interrupt.h>
3907+#include <linux/spinlock.h>
3908+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3909+#include <linux/scatterlist.h>
3910+#endif
3911+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3912+#include <crypto/hash.h>
3913+#endif
3914+
3915+#include <cryptodev.h>
3916+#include <uio.h>
3917+
3918+struct {
3919+ softc_device_decl sc_dev;
3920+} swcr_softc;
3921+
3922+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
3923+
3924+#define SW_TYPE_CIPHER 0x01
3925+#define SW_TYPE_HMAC 0x02
3926+#define SW_TYPE_HASH 0x04
3927+#define SW_TYPE_COMP 0x08
3928+#define SW_TYPE_BLKCIPHER 0x10
3929+#define SW_TYPE_ALG_MASK 0x1f
3930+
3931+#define SW_TYPE_ASYNC 0x8000
3932+
3933+#define SW_TYPE_INUSE 0x10000000
3934+
3935+/* We change some of the above if we have an async interface */
3936+
3937+#define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
3938+
3939+#define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
3940+#define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
3941+#define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
3942+
3943+#define SCATTERLIST_MAX 16
3944+
3945+struct swcr_data {
3946+ struct work_struct workq;
3947+ int sw_type;
3948+ int sw_alg;
3949+ struct crypto_tfm *sw_tfm;
3950+ spinlock_t sw_tfm_lock;
3951+ union {
3952+ struct {
3953+ char *sw_key;
3954+ int sw_klen;
3955+ int sw_mlen;
3956+ } hmac;
3957+ void *sw_comp_buf;
3958+ } u;
3959+ struct swcr_data *sw_next;
3960+};
3961+
3962+struct swcr_req {
3963+ struct swcr_data *sw_head;
3964+ struct swcr_data *sw;
3965+ struct cryptop *crp;
3966+ struct cryptodesc *crd;
3967+ struct scatterlist sg[SCATTERLIST_MAX];
3968+ unsigned char iv[EALG_MAX_BLOCK_LEN];
3969+ char result[HASH_MAX_LEN];
3970+ void *crypto_req;
3971+};
3972+
3973+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
3974+static kmem_cache_t *swcr_req_cache;
3975+#else
3976+static struct kmem_cache *swcr_req_cache;
3977+#endif
3978+
3979+#ifndef CRYPTO_TFM_MODE_CBC
3980+/*
3981+ * As of linux-2.6.21 this is no longer defined, and presumably no longer
3982+ * needed to be passed into the crypto core code.
3983+ */
3984+#define CRYPTO_TFM_MODE_CBC 0
3985+#define CRYPTO_TFM_MODE_ECB 0
3986+#endif
3987+
3988+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
3989+ /*
3990+ * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
3991+ * API into old API.
3992+ */
3993+
3994+ /* Symmetric/Block Cipher */
3995+ struct blkcipher_desc
3996+ {
3997+ struct crypto_tfm *tfm;
3998+ void *info;
3999+ };
4000+ #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
4001+ #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
4002+ #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
4003+ #define crypto_blkcipher_cast(X) X
4004+ #define crypto_blkcipher_tfm(X) X
4005+ #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
4006+ #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
4007+ #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
4008+ #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
4009+ #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
4010+ crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
4011+ #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
4012+ crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
4013+ #define crypto_blkcipher_set_flags(x, y) /* nop */
4014+ #define crypto_free_blkcipher(x) crypto_free_tfm(x)
4015+ #define crypto_free_comp crypto_free_tfm
4016+ #define crypto_free_hash crypto_free_tfm
4017+
4018+ /* Hash/HMAC/Digest */
4019+ struct hash_desc
4020+ {
4021+ struct crypto_tfm *tfm;
4022+ };
4023+ #define hmac(X) #X , 0
4024+ #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
4025+ #define crypto_hash_cast(X) X
4026+ #define crypto_hash_tfm(X) X
4027+ #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
4028+ #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
4029+ #define crypto_hash_digest(W, X, Y, Z) \
4030+ crypto_digest_digest((W)->tfm, X, sg_num, Z)
4031+
4032+ /* Asymmetric Cipher */
4033+ #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
4034+
4035+ /* Compression */
4036+ #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
4037+ #define crypto_comp_tfm(X) X
4038+ #define crypto_comp_cast(X) X
4039+ #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
4040+ #define plain(X) #X , 0
4041+#else
4042+ #define ecb(X) "ecb(" #X ")" , 0
4043+ #define cbc(X) "cbc(" #X ")" , 0
4044+ #define hmac(X) "hmac(" #X ")" , 0
4045+ #define plain(X) #X , 0
4046+#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
4047+
4048+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
4049+/* no ablkcipher in older kernels */
4050+#define crypto_alloc_ablkcipher(a,b,c) (NULL)
4051+#define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
4052+#define crypto_ablkcipher_set_flags(a, b) /* nop */
4053+#define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
4054+#define crypto_has_ablkcipher(a,b,c) (0)
4055+#else
4056+#define HAVE_ABLKCIPHER
4057+#endif
4058+
4059+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
4060+/* no ahash in older kernels */
4061+#define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
4062+#define crypto_alloc_ahash(a,b,c) (NULL)
4063+#define crypto_ahash_digestsize(x) 0
4064+#else
4065+#define HAVE_AHASH
4066+#endif
4067+
4068+struct crypto_details {
4069+ char *alg_name;
4070+ int mode;
4071+ int sw_type;
4072+};
4073+
4074+static struct crypto_details crypto_details[] = {
4075+ [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
4076+ [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
4077+ [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
4078+ [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
4079+ [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
4080+ [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
4081+ [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
4082+ [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
4083+ [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
4084+ [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
4085+ [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
4086+ [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
4087+ [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
4088+ [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
4089+ [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
4090+ [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
4091+ [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
4092+ [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
4093+ [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
4094+ [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
4095+ [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
4096+ [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
4097+ [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
4098+ [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
4099+ [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
4100+};
4101+
4102+int32_t swcr_id = -1;
4103+module_param(swcr_id, int, 0444);
4104+MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
4105+
4106+int swcr_fail_if_compression_grows = 1;
4107+module_param(swcr_fail_if_compression_grows, int, 0644);
4108+MODULE_PARM_DESC(swcr_fail_if_compression_grows,
4109+ "Treat compression that results in more data as a failure");
4110+
4111+int swcr_no_ahash = 0;
4112+module_param(swcr_no_ahash, int, 0644);
4113+MODULE_PARM_DESC(swcr_no_ahash,
4114+ "Do not use async hash/hmac even if available");
4115+
4116+int swcr_no_ablk = 0;
4117+module_param(swcr_no_ablk, int, 0644);
4118+MODULE_PARM_DESC(swcr_no_ablk,
4119+ "Do not use async blk ciphers even if available");
4120+
4121+static struct swcr_data **swcr_sessions = NULL;
4122+static u_int32_t swcr_sesnum = 0;
4123+
4124+static int swcr_process(device_t, struct cryptop *, int);
4125+static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
4126+static int swcr_freesession(device_t, u_int64_t);
4127+
4128+static device_method_t swcr_methods = {
4129+ /* crypto device methods */
4130+ DEVMETHOD(cryptodev_newsession, swcr_newsession),
4131+ DEVMETHOD(cryptodev_freesession,swcr_freesession),
4132+ DEVMETHOD(cryptodev_process, swcr_process),
4133+};
4134+
4135+#define debug swcr_debug
4136+int swcr_debug = 0;
4137+module_param(swcr_debug, int, 0644);
4138+MODULE_PARM_DESC(swcr_debug, "Enable debug");
4139+
4140+static void swcr_process_req(struct swcr_req *req);
4141+
4142+/*
4143+ * somethings just need to be run with user context no matter whether
4144+ * the kernel compression libs use vmalloc/vfree for example.
4145+ */
4146+
4147+typedef struct {
4148+ struct work_struct wq;
4149+ void (*func)(void *arg);
4150+ void *arg;
4151+} execute_later_t;
4152+
4153+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
4154+static void
4155+doing_it_now(struct work_struct *wq)
4156+{
4157+ execute_later_t *w = container_of(wq, execute_later_t, wq);
4158+ (w->func)(w->arg);
4159+ kfree(w);
4160+}
4161+#else
4162+static void
4163+doing_it_now(void *arg)
4164+{
4165+ execute_later_t *w = (execute_later_t *) arg;
4166+ (w->func)(w->arg);
4167+ kfree(w);
4168+}
4169+#endif
4170+
4171+static void
4172+execute_later(void (fn)(void *), void *arg)
4173+{
4174+ execute_later_t *w;
4175+
4176+ w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
4177+ if (w) {
4178+ memset(w, '\0', sizeof(w));
4179+ w->func = fn;
4180+ w->arg = arg;
4181+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
4182+ INIT_WORK(&w->wq, doing_it_now);
4183+#else
4184+ INIT_WORK(&w->wq, doing_it_now, w);
4185+#endif
4186+ schedule_work(&w->wq);
4187+ }
4188+}
4189+
4190+/*
4191+ * Generate a new software session.
4192+ */
4193+static int
4194+swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
4195+{
4196+ struct swcr_data **swd;
4197+ u_int32_t i;
4198+ int error;
4199+ char *algo;
4200+ int mode;
4201+
4202+ dprintk("%s()\n", __FUNCTION__);
4203+ if (sid == NULL || cri == NULL) {
4204+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
4205+ return EINVAL;
4206+ }
4207+
4208+ if (swcr_sessions) {
4209+ for (i = 1; i < swcr_sesnum; i++)
4210+ if (swcr_sessions[i] == NULL)
4211+ break;
4212+ } else
4213+ i = 1; /* NB: to silence compiler warning */
4214+
4215+ if (swcr_sessions == NULL || i == swcr_sesnum) {
4216+ if (swcr_sessions == NULL) {
4217+ i = 1; /* We leave swcr_sessions[0] empty */
4218+ swcr_sesnum = CRYPTO_SW_SESSIONS;
4219+ } else
4220+ swcr_sesnum *= 2;
4221+
4222+ swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
4223+ if (swd == NULL) {
4224+ /* Reset session number */
4225+ if (swcr_sesnum == CRYPTO_SW_SESSIONS)
4226+ swcr_sesnum = 0;
4227+ else
4228+ swcr_sesnum /= 2;
4229+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
4230+ return ENOBUFS;
4231+ }
4232+ memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
4233+
4234+ /* Copy existing sessions */
4235+ if (swcr_sessions) {
4236+ memcpy(swd, swcr_sessions,
4237+ (swcr_sesnum / 2) * sizeof(struct swcr_data *));
4238+ kfree(swcr_sessions);
4239+ }
4240+
4241+ swcr_sessions = swd;
4242+ }
4243+
4244+ swd = &swcr_sessions[i];
4245+ *sid = i;
4246+
4247+ while (cri) {
4248+ *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
4249+ SLAB_ATOMIC);
4250+ if (*swd == NULL) {
4251+ swcr_freesession(NULL, i);
4252+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
4253+ return ENOBUFS;
4254+ }
4255+ memset(*swd, 0, sizeof(struct swcr_data));
4256+
4257+ if (cri->cri_alg < 0 ||
4258+ cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
4259+ printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
4260+ swcr_freesession(NULL, i);
4261+ return EINVAL;
4262+ }
4263+
4264+ algo = crypto_details[cri->cri_alg].alg_name;
4265+ if (!algo || !*algo) {
4266+ printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
4267+ swcr_freesession(NULL, i);
4268+ return EINVAL;
4269+ }
4270+
4271+ mode = crypto_details[cri->cri_alg].mode;
4272+ (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
4273+ (*swd)->sw_alg = cri->cri_alg;
4274+
4275+ spin_lock_init(&(*swd)->sw_tfm_lock);
4276+
4277+ /* Algorithm specific configuration */
4278+ switch (cri->cri_alg) {
4279+ case CRYPTO_NULL_CBC:
4280+ cri->cri_klen = 0; /* make it work with crypto API */
4281+ break;
4282+ default:
4283+ break;
4284+ }
4285+
4286+ if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
4287+ dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
4288+ algo, mode);
4289+
4290+ /* try async first */
4291+ (*swd)->sw_tfm = swcr_no_ablk ? NULL :
4292+ crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
4293+ if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
4294+ dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
4295+ (*swd)->sw_type |= SW_TYPE_ASYNC;
4296+ } else {
4297+ (*swd)->sw_tfm = crypto_blkcipher_tfm(
4298+ crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
4299+ if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
4300+ dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
4301+ }
4302+ if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
4303+ int err;
4304+ dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
4305+ algo,mode);
4306+ err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
4307+ (*swd)->sw_tfm = NULL; /* ensure NULL */
4308+ swcr_freesession(NULL, i);
4309+ return err;
4310+ }
4311+
4312+ if (debug) {
4313+ dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
4314+ __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
4315+ for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
4316+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
4317+ cri->cri_key[i] & 0xff);
4318+ dprintk("\n");
4319+ }
4320+ if ((*swd)->sw_type & SW_TYPE_ASYNC) {
4321+ /* OCF doesn't enforce keys */
4322+ crypto_ablkcipher_set_flags(
4323+ __crypto_ablkcipher_cast((*swd)->sw_tfm),
4324+ CRYPTO_TFM_REQ_WEAK_KEY);
4325+ error = crypto_ablkcipher_setkey(
4326+ __crypto_ablkcipher_cast((*swd)->sw_tfm),
4327+ cri->cri_key, (cri->cri_klen + 7) / 8);
4328+ } else {
4329+ /* OCF doesn't enforce keys */
4330+ crypto_blkcipher_set_flags(
4331+ crypto_blkcipher_cast((*swd)->sw_tfm),
4332+ CRYPTO_TFM_REQ_WEAK_KEY);
4333+ error = crypto_blkcipher_setkey(
4334+ crypto_blkcipher_cast((*swd)->sw_tfm),
4335+ cri->cri_key, (cri->cri_klen + 7) / 8);
4336+ }
4337+ if (error) {
4338+ printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
4339+ (*swd)->sw_tfm->crt_flags);
4340+ swcr_freesession(NULL, i);
4341+ return error;
4342+ }
4343+ } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
4344+ dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
4345+ algo, mode);
4346+
4347+ /* try async first */
4348+ (*swd)->sw_tfm = swcr_no_ahash ? NULL :
4349+ crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
4350+ if ((*swd)->sw_tfm) {
4351+ dprintk("%s %s hash is async\n", __FUNCTION__, algo);
4352+ (*swd)->sw_type |= SW_TYPE_ASYNC;
4353+ } else {
4354+ dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
4355+ (*swd)->sw_tfm = crypto_hash_tfm(
4356+ crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
4357+ }
4358+
4359+ if (!(*swd)->sw_tfm) {
4360+ dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
4361+ algo, mode);
4362+ swcr_freesession(NULL, i);
4363+ return EINVAL;
4364+ }
4365+
4366+ (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
4367+ (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
4368+ SLAB_ATOMIC);
4369+ if ((*swd)->u.hmac.sw_key == NULL) {
4370+ swcr_freesession(NULL, i);
4371+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
4372+ return ENOBUFS;
4373+ }
4374+ memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
4375+ if (cri->cri_mlen) {
4376+ (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
4377+ } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
4378+ (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
4379+ __crypto_ahash_cast((*swd)->sw_tfm));
4380+ } else {
4381+ (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
4382+ crypto_hash_cast((*swd)->sw_tfm));
4383+ }
4384+ } else if ((*swd)->sw_type & SW_TYPE_COMP) {
4385+ (*swd)->sw_tfm = crypto_comp_tfm(
4386+ crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
4387+ if (!(*swd)->sw_tfm) {
4388+ dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
4389+ algo, mode);
4390+ swcr_freesession(NULL, i);
4391+ return EINVAL;
4392+ }
4393+ (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
4394+ if ((*swd)->u.sw_comp_buf == NULL) {
4395+ swcr_freesession(NULL, i);
4396+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
4397+ return ENOBUFS;
4398+ }
4399+ } else {
4400+ printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
4401+ swcr_freesession(NULL, i);
4402+ return EINVAL;
4403+ }
4404+
4405+ cri = cri->cri_next;
4406+ swd = &((*swd)->sw_next);
4407+ }
4408+ return 0;
4409+}
4410+
4411+/*
4412+ * Free a session.
4413+ */
4414+static int
4415+swcr_freesession(device_t dev, u_int64_t tid)
4416+{
4417+ struct swcr_data *swd;
4418+ u_int32_t sid = CRYPTO_SESID2LID(tid);
4419+
4420+ dprintk("%s()\n", __FUNCTION__);
4421+ if (sid > swcr_sesnum || swcr_sessions == NULL ||
4422+ swcr_sessions[sid] == NULL) {
4423+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4424+ return(EINVAL);
4425+ }
4426+
4427+ /* Silently accept and return */
4428+ if (sid == 0)
4429+ return(0);
4430+
4431+ while ((swd = swcr_sessions[sid]) != NULL) {
4432+ swcr_sessions[sid] = swd->sw_next;
4433+ if (swd->sw_tfm) {
4434+ switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
4435+#ifdef HAVE_AHASH
4436+ case SW_TYPE_AHMAC:
4437+ case SW_TYPE_AHASH:
4438+ crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
4439+ break;
4440+#endif
4441+#ifdef HAVE_ABLKCIPHER
4442+ case SW_TYPE_ABLKCIPHER:
4443+ crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
4444+ break;
4445+#endif
4446+ case SW_TYPE_BLKCIPHER:
4447+ crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
4448+ break;
4449+ case SW_TYPE_HMAC:
4450+ case SW_TYPE_HASH:
4451+ crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
4452+ break;
4453+ case SW_TYPE_COMP:
4454+ if (in_interrupt())
4455+ execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
4456+ else
4457+ crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
4458+ break;
4459+ default:
4460+ crypto_free_tfm(swd->sw_tfm);
4461+ break;
4462+ }
4463+ swd->sw_tfm = NULL;
4464+ }
4465+ if (swd->sw_type & SW_TYPE_COMP) {
4466+ if (swd->u.sw_comp_buf)
4467+ kfree(swd->u.sw_comp_buf);
4468+ } else {
4469+ if (swd->u.hmac.sw_key)
4470+ kfree(swd->u.hmac.sw_key);
4471+ }
4472+ kfree(swd);
4473+ }
4474+ return 0;
4475+}
4476+
4477+static void swcr_process_req_complete(struct swcr_req *req)
4478+{
4479+ dprintk("%s()\n", __FUNCTION__);
4480+
4481+ if (req->sw->sw_type & SW_TYPE_INUSE) {
4482+ unsigned long flags;
4483+ spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
4484+ req->sw->sw_type &= ~SW_TYPE_INUSE;
4485+ spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
4486+ }
4487+
4488+ if (req->crp->crp_etype)
4489+ goto done;
4490+
4491+ switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
4492+#if defined(HAVE_AHASH)
4493+ case SW_TYPE_AHMAC:
4494+ case SW_TYPE_AHASH:
4495+ crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
4496+ req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
4497+ ahash_request_free(req->crypto_req);
4498+ break;
4499+#endif
4500+#if defined(HAVE_ABLKCIPHER)
4501+ case SW_TYPE_ABLKCIPHER:
4502+ ablkcipher_request_free(req->crypto_req);
4503+ break;
4504+#endif
4505+ case SW_TYPE_CIPHER:
4506+ case SW_TYPE_HMAC:
4507+ case SW_TYPE_HASH:
4508+ case SW_TYPE_COMP:
4509+ case SW_TYPE_BLKCIPHER:
4510+ break;
4511+ default:
4512+ req->crp->crp_etype = EINVAL;
4513+ goto done;
4514+ }
4515+
4516+ req->crd = req->crd->crd_next;
4517+ if (req->crd) {
4518+ swcr_process_req(req);
4519+ return;
4520+ }
4521+
4522+done:
4523+ dprintk("%s crypto_done %p\n", __FUNCTION__, req);
4524+ crypto_done(req->crp);
4525+ kmem_cache_free(swcr_req_cache, req);
4526+}
4527+
4528+#if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
4529+static void swcr_process_callback(struct crypto_async_request *creq, int err)
4530+{
4531+ struct swcr_req *req = creq->data;
4532+
4533+ dprintk("%s()\n", __FUNCTION__);
4534+ if (err) {
4535+ if (err == -EINPROGRESS)
4536+ return;
4537+ dprintk("%s() fail %d\n", __FUNCTION__, -err);
4538+ req->crp->crp_etype = -err;
4539+ }
4540+
4541+ swcr_process_req_complete(req);
4542+}
4543+#endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
4544+
4545+
4546+static void swcr_process_req(struct swcr_req *req)
4547+{
4548+ struct swcr_data *sw;
4549+ struct cryptop *crp = req->crp;
4550+ struct cryptodesc *crd = req->crd;
4551+ struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
4552+ struct uio *uiop = (struct uio *) crp->crp_buf;
4553+ int sg_num, sg_len, skip;
4554+
4555+ dprintk("%s()\n", __FUNCTION__);
4556+
4557+ /*
4558+ * Find the crypto context.
4559+ *
4560+ * XXX Note that the logic here prevents us from having
4561+ * XXX the same algorithm multiple times in a session
4562+ * XXX (or rather, we can but it won't give us the right
4563+ * XXX results). To do that, we'd need some way of differentiating
4564+ * XXX between the various instances of an algorithm (so we can
4565+ * XXX locate the correct crypto context).
4566+ */
4567+ for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
4568+ ;
4569+
4570+ /* No such context ? */
4571+ if (sw == NULL) {
4572+ crp->crp_etype = EINVAL;
4573+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4574+ goto done;
4575+ }
4576+
4577+ /*
4578+ * for some types we need to ensure only one user as info is stored in
4579+ * the tfm during an operation that can get corrupted
4580+ */
4581+ switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
4582+#ifdef HAVE_AHASH
4583+ case SW_TYPE_AHMAC:
4584+ case SW_TYPE_AHASH:
4585+#endif
4586+ case SW_TYPE_HMAC:
4587+ case SW_TYPE_HASH: {
4588+ unsigned long flags;
4589+ spin_lock_irqsave(&sw->sw_tfm_lock, flags);
4590+ if (sw->sw_type & SW_TYPE_INUSE) {
4591+ spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
4592+ execute_later((void (*)(void *))swcr_process_req, (void *)req);
4593+ return;
4594+ }
4595+ sw->sw_type |= SW_TYPE_INUSE;
4596+ spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
4597+ } break;
4598+ }
4599+
4600+ req->sw = sw;
4601+ skip = crd->crd_skip;
4602+
4603+ /*
4604+ * setup the SG list skip from the start of the buffer
4605+ */
4606+ memset(req->sg, 0, sizeof(req->sg));
4607+ sg_init_table(req->sg, SCATTERLIST_MAX);
4608+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
4609+ int i, len;
4610+
4611+ sg_num = 0;
4612+ sg_len = 0;
4613+
4614+ if (skip < skb_headlen(skb)) {
4615+ len = skb_headlen(skb) - skip;
4616+ if (len + sg_len > crd->crd_len)
4617+ len = crd->crd_len - sg_len;
4618+ sg_set_page(&req->sg[sg_num],
4619+ virt_to_page(skb->data + skip), len,
4620+ offset_in_page(skb->data + skip));
4621+ sg_len += len;
4622+ sg_num++;
4623+ skip = 0;
4624+ } else
4625+ skip -= skb_headlen(skb);
4626+
4627+ for (i = 0; sg_len < crd->crd_len &&
4628+ i < skb_shinfo(skb)->nr_frags &&
4629+ sg_num < SCATTERLIST_MAX; i++) {
4630+ if (skip < skb_shinfo(skb)->frags[i].size) {
4631+ len = skb_shinfo(skb)->frags[i].size - skip;
4632+ if (len + sg_len > crd->crd_len)
4633+ len = crd->crd_len - sg_len;
4634+ sg_set_page(&req->sg[sg_num],
4635+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
4636+ len,
4637+ skb_shinfo(skb)->frags[i].page_offset + skip);
4638+ sg_len += len;
4639+ sg_num++;
4640+ skip = 0;
4641+ } else
4642+ skip -= skb_shinfo(skb)->frags[i].size;
4643+ }
4644+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
4645+ int len;
4646+
4647+ sg_len = 0;
4648+ for (sg_num = 0; sg_len < crd->crd_len &&
4649+ sg_num < uiop->uio_iovcnt &&
4650+ sg_num < SCATTERLIST_MAX; sg_num++) {
4651+ if (skip <= uiop->uio_iov[sg_num].iov_len) {
4652+ len = uiop->uio_iov[sg_num].iov_len - skip;
4653+ if (len + sg_len > crd->crd_len)
4654+ len = crd->crd_len - sg_len;
4655+ sg_set_page(&req->sg[sg_num],
4656+ virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
4657+ len,
4658+ offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
4659+ sg_len += len;
4660+ skip = 0;
4661+ } else
4662+ skip -= uiop->uio_iov[sg_num].iov_len;
4663+ }
4664+ } else {
4665+ sg_len = (crp->crp_ilen - skip);
4666+ if (sg_len > crd->crd_len)
4667+ sg_len = crd->crd_len;
4668+ sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
4669+ sg_len, offset_in_page(crp->crp_buf + skip));
4670+ sg_num = 1;
4671+ }
4672+ if (sg_num > 0)
4673+ sg_mark_end(&req->sg[sg_num-1]);
4674+
4675+ switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
4676+
4677+#ifdef HAVE_AHASH
4678+ case SW_TYPE_AHMAC:
4679+ case SW_TYPE_AHASH:
4680+ {
4681+ int ret;
4682+
4683+ /* check we have room for the result */
4684+ if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
4685+ dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
4686+ "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
4687+ crd->crd_inject, sw->u.hmac.sw_mlen);
4688+ crp->crp_etype = EINVAL;
4689+ goto done;
4690+ }
4691+
4692+ req->crypto_req =
4693+ ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
4694+ if (!req->crypto_req) {
4695+ crp->crp_etype = ENOMEM;
4696+ dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
4697+ goto done;
4698+ }
4699+
4700+ ahash_request_set_callback(req->crypto_req,
4701+ CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
4702+
4703+ memset(req->result, 0, sizeof(req->result));
4704+
4705+ if (sw->sw_type & SW_TYPE_AHMAC)
4706+ crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
4707+ sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
4708+ ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
4709+ ret = crypto_ahash_digest(req->crypto_req);
4710+ switch (ret) {
4711+ case -EINPROGRESS:
4712+ case -EBUSY:
4713+ return;
4714+ default:
4715+ case 0:
4716+ dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
4717+ crp->crp_etype = ret;
4718+ goto done;
4719+ }
4720+ } break;
4721+#endif /* HAVE_AHASH */
4722+
4723+#ifdef HAVE_ABLKCIPHER
4724+ case SW_TYPE_ABLKCIPHER: {
4725+ int ret;
4726+ unsigned char *ivp = req->iv;
4727+ int ivsize =
4728+ crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
4729+
4730+ if (sg_len < crypto_ablkcipher_blocksize(
4731+ __crypto_ablkcipher_cast(sw->sw_tfm))) {
4732+ crp->crp_etype = EINVAL;
4733+ dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
4734+ sg_len, crypto_ablkcipher_blocksize(
4735+ __crypto_ablkcipher_cast(sw->sw_tfm)));
4736+ goto done;
4737+ }
4738+
4739+ if (ivsize > sizeof(req->iv)) {
4740+ crp->crp_etype = EINVAL;
4741+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4742+ goto done;
4743+ }
4744+
4745+ req->crypto_req = ablkcipher_request_alloc(
4746+ __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
4747+ if (!req->crypto_req) {
4748+ crp->crp_etype = ENOMEM;
4749+ dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
4750+ __FILE__, __LINE__);
4751+ goto done;
4752+ }
4753+
4754+ ablkcipher_request_set_callback(req->crypto_req,
4755+ CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
4756+
4757+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
4758+ int i, error;
4759+
4760+ if (debug) {
4761+ dprintk("%s key:", __FUNCTION__);
4762+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
4763+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
4764+ crd->crd_key[i] & 0xff);
4765+ dprintk("\n");
4766+ }
4767+ /* OCF doesn't enforce keys */
4768+ crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
4769+ CRYPTO_TFM_REQ_WEAK_KEY);
4770+ error = crypto_ablkcipher_setkey(
4771+ __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
4772+ (crd->crd_klen + 7) / 8);
4773+ if (error) {
4774+ dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
4775+ error, sw->sw_tfm->crt_flags);
4776+ crp->crp_etype = -error;
4777+ }
4778+ }
4779+
4780+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
4781+
4782+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
4783+ ivp = crd->crd_iv;
4784+ else
4785+ get_random_bytes(ivp, ivsize);
4786+ /*
4787+ * do we have to copy the IV back to the buffer ?
4788+ */
4789+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
4790+ crypto_copyback(crp->crp_flags, crp->crp_buf,
4791+ crd->crd_inject, ivsize, (caddr_t)ivp);
4792+ }
4793+ ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
4794+ sg_len, ivp);
4795+ ret = crypto_ablkcipher_encrypt(req->crypto_req);
4796+
4797+ } else { /*decrypt */
4798+
4799+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
4800+ ivp = crd->crd_iv;
4801+ else
4802+ crypto_copydata(crp->crp_flags, crp->crp_buf,
4803+ crd->crd_inject, ivsize, (caddr_t)ivp);
4804+ ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
4805+ sg_len, ivp);
4806+ ret = crypto_ablkcipher_decrypt(req->crypto_req);
4807+ }
4808+
4809+ switch (ret) {
4810+ case -EINPROGRESS:
4811+ case -EBUSY:
4812+ return;
4813+ default:
4814+ case 0:
4815+ dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
4816+ crp->crp_etype = ret;
4817+ goto done;
4818+ }
4819+ } break;
4820+#endif /* HAVE_ABLKCIPHER */
4821+
4822+ case SW_TYPE_BLKCIPHER: {
4823+ unsigned char iv[EALG_MAX_BLOCK_LEN];
4824+ unsigned char *ivp = iv;
4825+ struct blkcipher_desc desc;
4826+ int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
4827+
4828+ if (sg_len < crypto_blkcipher_blocksize(
4829+ crypto_blkcipher_cast(sw->sw_tfm))) {
4830+ crp->crp_etype = EINVAL;
4831+ dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
4832+ sg_len, crypto_blkcipher_blocksize(
4833+ crypto_blkcipher_cast(sw->sw_tfm)));
4834+ goto done;
4835+ }
4836+
4837+ if (ivsize > sizeof(iv)) {
4838+ crp->crp_etype = EINVAL;
4839+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4840+ goto done;
4841+ }
4842+
4843+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
4844+ int i, error;
4845+
4846+ if (debug) {
4847+ dprintk("%s key:", __FUNCTION__);
4848+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
4849+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
4850+ crd->crd_key[i] & 0xff);
4851+ dprintk("\n");
4852+ }
4853+ /* OCF doesn't enforce keys */
4854+ crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
4855+ CRYPTO_TFM_REQ_WEAK_KEY);
4856+ error = crypto_blkcipher_setkey(
4857+ crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
4858+ (crd->crd_klen + 7) / 8);
4859+ if (error) {
4860+ dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
4861+ error, sw->sw_tfm->crt_flags);
4862+ crp->crp_etype = -error;
4863+ }
4864+ }
4865+
4866+ memset(&desc, 0, sizeof(desc));
4867+ desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
4868+
4869+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
4870+
4871+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
4872+ ivp = crd->crd_iv;
4873+ } else {
4874+ get_random_bytes(ivp, ivsize);
4875+ }
4876+ /*
4877+ * do we have to copy the IV back to the buffer ?
4878+ */
4879+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
4880+ crypto_copyback(crp->crp_flags, crp->crp_buf,
4881+ crd->crd_inject, ivsize, (caddr_t)ivp);
4882+ }
4883+ desc.info = ivp;
4884+ crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
4885+
4886+ } else { /*decrypt */
4887+
4888+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
4889+ ivp = crd->crd_iv;
4890+ } else {
4891+ crypto_copydata(crp->crp_flags, crp->crp_buf,
4892+ crd->crd_inject, ivsize, (caddr_t)ivp);
4893+ }
4894+ desc.info = ivp;
4895+ crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
4896+ }
4897+ } break;
4898+
4899+ case SW_TYPE_HMAC:
4900+ case SW_TYPE_HASH:
4901+ {
4902+ char result[HASH_MAX_LEN];
4903+ struct hash_desc desc;
4904+
4905+ /* check we have room for the result */
4906+ if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
4907+ dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
4908+ "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
4909+ crd->crd_inject, sw->u.hmac.sw_mlen);
4910+ crp->crp_etype = EINVAL;
4911+ goto done;
4912+ }
4913+
4914+ memset(&desc, 0, sizeof(desc));
4915+ desc.tfm = crypto_hash_cast(sw->sw_tfm);
4916+
4917+ memset(result, 0, sizeof(result));
4918+
4919+ if (sw->sw_type & SW_TYPE_HMAC) {
4920+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
4921+ crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
4922+ req->sg, sg_num, result);
4923+#else
4924+ crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
4925+ sw->u.hmac.sw_klen);
4926+ crypto_hash_digest(&desc, req->sg, sg_len, result);
4927+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
4928+
4929+ } else { /* SW_TYPE_HASH */
4930+ crypto_hash_digest(&desc, req->sg, sg_len, result);
4931+ }
4932+
4933+ crypto_copyback(crp->crp_flags, crp->crp_buf,
4934+ crd->crd_inject, sw->u.hmac.sw_mlen, result);
4935+ }
4936+ break;
4937+
4938+ case SW_TYPE_COMP: {
4939+ void *ibuf = NULL;
4940+ void *obuf = sw->u.sw_comp_buf;
4941+ int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
4942+ int ret = 0;
4943+
4944+ /*
4945+ * we need to use an additional copy if there is more than one
4946+ * input chunk since the kernel comp routines do not handle
4947+ * SG yet. Otherwise we just use the input buffer as is.
4948+ * Rather than allocate another buffer we just split the tmp
4949+ * buffer we already have.
4950+ * Perhaps we should just use zlib directly ?
4951+ */
4952+ if (sg_num > 1) {
4953+ int blk;
4954+
4955+ ibuf = obuf;
4956+ for (blk = 0; blk < sg_num; blk++) {
4957+ memcpy(obuf, sg_virt(&req->sg[blk]),
4958+ req->sg[blk].length);
4959+ obuf += req->sg[blk].length;
4960+ }
4961+ olen -= sg_len;
4962+ } else
4963+ ibuf = sg_virt(&req->sg[0]);
4964+
4965+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
4966+ ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
4967+ ibuf, ilen, obuf, &olen);
4968+ if (!ret && olen > crd->crd_len) {
4969+ dprintk("cryptosoft: ERANGE compress %d into %d\n",
4970+ crd->crd_len, olen);
4971+ if (swcr_fail_if_compression_grows)
4972+ ret = ERANGE;
4973+ }
4974+ } else { /* decompress */
4975+ ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
4976+ ibuf, ilen, obuf, &olen);
4977+ if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
4978+ dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
4979+ "space for %d,at offset %d\n",
4980+ crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
4981+ ret = ETOOSMALL;
4982+ }
4983+ }
4984+ if (ret)
4985+ dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
4986+
4987+ /*
4988+ * on success copy result back,
4989+ * linux crpyto API returns -errno, we need to fix that
4990+ */
4991+ crp->crp_etype = ret < 0 ? -ret : ret;
4992+ if (ret == 0) {
4993+ /* copy back the result and return it's size */
4994+ crypto_copyback(crp->crp_flags, crp->crp_buf,
4995+ crd->crd_inject, olen, obuf);
4996+ crp->crp_olen = olen;
4997+ }
4998+ } break;
4999+
5000+ default:
5001+ /* Unknown/unsupported algorithm */
5002+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
5003+ crp->crp_etype = EINVAL;
5004+ goto done;
5005+ }
5006+
5007+done:
5008+ swcr_process_req_complete(req);
5009+}
5010+
5011+
5012+/*
5013+ * Process a crypto request.
5014+ */
5015+static int
5016+swcr_process(device_t dev, struct cryptop *crp, int hint)
5017+{
5018+ struct swcr_req *req = NULL;
5019+ u_int32_t lid;
5020+
5021+ dprintk("%s()\n", __FUNCTION__);
5022+ /* Sanity check */
5023+ if (crp == NULL) {
5024+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
5025+ return EINVAL;
5026+ }
5027+
5028+ crp->crp_etype = 0;
5029+
5030+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
5031+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
5032+ crp->crp_etype = EINVAL;
5033+ goto done;
5034+ }
5035+
5036+ lid = crp->crp_sid & 0xffffffff;
5037+ if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
5038+ swcr_sessions[lid] == NULL) {
5039+ crp->crp_etype = ENOENT;
5040+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
5041+ goto done;
5042+ }
5043+
5044+ /*
5045+ * do some error checking outside of the loop for SKB and IOV processing
5046+ * this leaves us with valid skb or uiop pointers for later
5047+ */
5048+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
5049+ struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
5050+ if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
5051+ printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
5052+ skb_shinfo(skb)->nr_frags);
5053+ goto done;
5054+ }
5055+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
5056+ struct uio *uiop = (struct uio *) crp->crp_buf;
5057+ if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
5058+ printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
5059+ uiop->uio_iovcnt);
5060+ goto done;
5061+ }
5062+ }
5063+
5064+ /*
5065+ * setup a new request ready for queuing
5066+ */
5067+ req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
5068+ if (req == NULL) {
5069+ dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
5070+ crp->crp_etype = ENOMEM;
5071+ goto done;
5072+ }
5073+ memset(req, 0, sizeof(*req));
5074+
5075+ req->sw_head = swcr_sessions[lid];
5076+ req->crp = crp;
5077+ req->crd = crp->crp_desc;
5078+
5079+ swcr_process_req(req);
5080+ return 0;
5081+
5082+done:
5083+ crypto_done(crp);
5084+ if (req)
5085+ kmem_cache_free(swcr_req_cache, req);
5086+ return 0;
5087+}
5088+
5089+
5090+static int
5091+cryptosoft_init(void)
5092+{
5093+ int i, sw_type, mode;
5094+ char *algo;
5095+
5096+ dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
5097+
5098+ swcr_req_cache = kmem_cache_create("cryptosoft_req",
5099+ sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
5100+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
5101+ , NULL
5102+#endif
5103+ );
5104+ if (!swcr_req_cache) {
5105+ printk("cryptosoft: failed to create request cache\n");
5106+ return -ENOENT;
5107+ }
5108+
5109+ softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
5110+
5111+ swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
5112+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
5113+ if (swcr_id < 0) {
5114+ printk("cryptosoft: Software crypto device cannot initialize!");
5115+ return -ENODEV;
5116+ }
5117+
5118+#define REGISTER(alg) \
5119+ crypto_register(swcr_id, alg, 0,0)
5120+
5121+ for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
5122+ int found;
5123+
5124+ algo = crypto_details[i].alg_name;
5125+ if (!algo || !*algo) {
5126+ dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
5127+ continue;
5128+ }
5129+
5130+ mode = crypto_details[i].mode;
5131+ sw_type = crypto_details[i].sw_type;
5132+
5133+ found = 0;
5134+ switch (sw_type & SW_TYPE_ALG_MASK) {
5135+ case SW_TYPE_CIPHER:
5136+ found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
5137+ break;
5138+ case SW_TYPE_HMAC:
5139+ found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
5140+ break;
5141+ case SW_TYPE_HASH:
5142+ found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
5143+ break;
5144+ case SW_TYPE_COMP:
5145+ found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
5146+ break;
5147+ case SW_TYPE_BLKCIPHER:
5148+ found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
5149+ if (!found && !swcr_no_ablk)
5150+ found = crypto_has_ablkcipher(algo, 0, 0);
5151+ break;
5152+ }
5153+ if (found) {
5154+ REGISTER(i);
5155+ } else {
5156+ dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
5157+ __FUNCTION__, sw_type, i, algo);
5158+ }
5159+ }
5160+ return 0;
5161+}
5162+
5163+static void
5164+cryptosoft_exit(void)
5165+{
5166+ dprintk("%s()\n", __FUNCTION__);
5167+ crypto_unregister_all(swcr_id);
5168+ swcr_id = -1;
5169+ kmem_cache_destroy(swcr_req_cache);
5170+}
5171+
5172+late_initcall(cryptosoft_init);
5173+module_exit(cryptosoft_exit);
5174+
5175+MODULE_LICENSE("Dual BSD/GPL");
5176+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
5177+MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
5178diff --git a/crypto/ocf/ocf-bench.c b/crypto/ocf/ocf-bench.c
5179new file mode 100644
5180index 0000000..f3fe9d0
5181--- /dev/null
5182+++ b/crypto/ocf/ocf-bench.c
5183@@ -0,0 +1,514 @@
5184+/*
5185+ * A loadable module that benchmarks the OCF crypto speed from kernel space.
5186+ *
5187+ * Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com>
5188+ *
5189+ * LICENSE TERMS
5190+ *
5191+ * The free distribution and use of this software in both source and binary
5192+ * form is allowed (with or without changes) provided that:
5193+ *
5194+ * 1. distributions of this source code include the above copyright
5195+ * notice, this list of conditions and the following disclaimer;
5196+ *
5197+ * 2. distributions in binary form include the above copyright
5198+ * notice, this list of conditions and the following disclaimer
5199+ * in the documentation and/or other associated materials;
5200+ *
5201+ * 3. the copyright holder's name is not used to endorse products
5202+ * built using this software without specific written permission.
5203+ *
5204+ * ALTERNATIVELY, provided that this notice is retained in full, this product
5205+ * may be distributed under the terms of the GNU General Public License (GPL),
5206+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
5207+ *
5208+ * DISCLAIMER
5209+ *
5210+ * This software is provided 'as is' with no explicit or implied warranties
5211+ * in respect of its properties, including, but not limited to, correctness
5212+ * and/or fitness for purpose.
5213+ */
5214+
5215+
5216+#include <linux/version.h>
5217+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
5218+#include <linux/config.h>
5219+#endif
5220+#include <linux/module.h>
5221+#include <linux/init.h>
5222+#include <linux/list.h>
5223+#include <linux/slab.h>
5224+#include <linux/wait.h>
5225+#include <linux/sched.h>
5226+#include <linux/spinlock.h>
5227+#include <linux/interrupt.h>
5228+#include <cryptodev.h>
5229+
5230+#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
5231+#define BENCH_IXP_ACCESS_LIB 1
5232+#endif
5233+#ifdef BENCH_IXP_ACCESS_LIB
5234+#include <IxTypes.h>
5235+#include <IxOsBuffMgt.h>
5236+#include <IxNpeDl.h>
5237+#include <IxCryptoAcc.h>
5238+#include <IxQMgr.h>
5239+#include <IxOsServices.h>
5240+#include <IxOsCacheMMU.h>
5241+#endif
5242+
5243+/*
5244+ * support for access lib version 1.4
5245+ */
5246+#ifndef IX_MBUF_PRIV
5247+#define IX_MBUF_PRIV(x) ((x)->priv)
5248+#endif
5249+
5250+/*
5251+ * the number of simultaneously active requests
5252+ */
5253+static int request_q_len = 40;
5254+module_param(request_q_len, int, 0);
5255+MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
5256+
5257+/*
5258+ * how many requests we want to have processed
5259+ */
5260+static int request_num = 1024;
5261+module_param(request_num, int, 0);
5262+MODULE_PARM_DESC(request_num, "run for at least this many requests");
5263+
5264+/*
5265+ * the size of each request
5266+ */
5267+static int request_size = 1488;
5268+module_param(request_size, int, 0);
5269+MODULE_PARM_DESC(request_size, "size of each request");
5270+
5271+/*
5272+ * OCF batching of requests
5273+ */
5274+static int request_batch = 1;
5275+module_param(request_batch, int, 0);
5276+MODULE_PARM_DESC(request_batch, "enable OCF request batching");
5277+
5278+/*
5279+ * OCF immediate callback on completion
5280+ */
5281+static int request_cbimm = 1;
5282+module_param(request_cbimm, int, 0);
5283+MODULE_PARM_DESC(request_cbimm, "enable OCF immediate callback on completion");
5284+
5285+/*
5286+ * a structure for each request
5287+ */
5288+typedef struct {
5289+ struct work_struct work;
5290+#ifdef BENCH_IXP_ACCESS_LIB
5291+ IX_MBUF mbuf;
5292+#endif
5293+ unsigned char *buffer;
5294+} request_t;
5295+
5296+static request_t *requests;
5297+
5298+static spinlock_t ocfbench_counter_lock;
5299+static int outstanding;
5300+static int total;
5301+
5302+/*************************************************************************/
5303+/*
5304+ * OCF benchmark routines
5305+ */
5306+
5307+static uint64_t ocf_cryptoid;
5308+static unsigned long jstart, jstop;
5309+
5310+static int ocf_init(void);
5311+static int ocf_cb(struct cryptop *crp);
5312+static void ocf_request(void *arg);
5313+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5314+static void ocf_request_wq(struct work_struct *work);
5315+#endif
5316+
5317+static int
5318+ocf_init(void)
5319+{
5320+ int error;
5321+ struct cryptoini crie, cria;
5322+ struct cryptodesc crda, crde;
5323+
5324+ memset(&crie, 0, sizeof(crie));
5325+ memset(&cria, 0, sizeof(cria));
5326+ memset(&crde, 0, sizeof(crde));
5327+ memset(&crda, 0, sizeof(crda));
5328+
5329+ cria.cri_alg = CRYPTO_SHA1_HMAC;
5330+ cria.cri_klen = 20 * 8;
5331+ cria.cri_key = "0123456789abcdefghij";
5332+
5333+ //crie.cri_alg = CRYPTO_3DES_CBC;
5334+ crie.cri_alg = CRYPTO_AES_CBC;
5335+ crie.cri_klen = 24 * 8;
5336+ crie.cri_key = "0123456789abcdefghijklmn";
5337+
5338+ crie.cri_next = &cria;
5339+
5340+ error = crypto_newsession(&ocf_cryptoid, &crie,
5341+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
5342+ if (error) {
5343+ printk("crypto_newsession failed %d\n", error);
5344+ return -1;
5345+ }
5346+ return 0;
5347+}
5348+
5349+static int
5350+ocf_cb(struct cryptop *crp)
5351+{
5352+ request_t *r = (request_t *) crp->crp_opaque;
5353+ unsigned long flags;
5354+
5355+ if (crp->crp_etype)
5356+ printk("Error in OCF processing: %d\n", crp->crp_etype);
5357+ crypto_freereq(crp);
5358+ crp = NULL;
5359+
5360+ /* do all requests but take at least 1 second */
5361+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
5362+ total++;
5363+ if (total > request_num && jstart + HZ < jiffies) {
5364+ outstanding--;
5365+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5366+ return 0;
5367+ }
5368+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5369+
5370+ schedule_work(&r->work);
5371+ return 0;
5372+}
5373+
5374+
5375+static void
5376+ocf_request(void *arg)
5377+{
5378+ request_t *r = arg;
5379+ struct cryptop *crp = crypto_getreq(2);
5380+ struct cryptodesc *crde, *crda;
5381+ unsigned long flags;
5382+
5383+ if (!crp) {
5384+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
5385+ outstanding--;
5386+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5387+ return;
5388+ }
5389+
5390+ crde = crp->crp_desc;
5391+ crda = crde->crd_next;
5392+
5393+ crda->crd_skip = 0;
5394+ crda->crd_flags = 0;
5395+ crda->crd_len = request_size;
5396+ crda->crd_inject = request_size;
5397+ crda->crd_alg = CRYPTO_SHA1_HMAC;
5398+ crda->crd_key = "0123456789abcdefghij";
5399+ crda->crd_klen = 20 * 8;
5400+
5401+ crde->crd_skip = 0;
5402+ crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
5403+ crde->crd_len = request_size;
5404+ crde->crd_inject = request_size;
5405+ //crde->crd_alg = CRYPTO_3DES_CBC;
5406+ crde->crd_alg = CRYPTO_AES_CBC;
5407+ crde->crd_key = "0123456789abcdefghijklmn";
5408+ crde->crd_klen = 24 * 8;
5409+
5410+ crp->crp_ilen = request_size + 64;
5411+ crp->crp_flags = 0;
5412+ if (request_batch)
5413+ crp->crp_flags |= CRYPTO_F_BATCH;
5414+ if (request_cbimm)
5415+ crp->crp_flags |= CRYPTO_F_CBIMM;
5416+ crp->crp_buf = (caddr_t) r->buffer;
5417+ crp->crp_callback = ocf_cb;
5418+ crp->crp_sid = ocf_cryptoid;
5419+ crp->crp_opaque = (caddr_t) r;
5420+ crypto_dispatch(crp);
5421+}
5422+
5423+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5424+static void
5425+ocf_request_wq(struct work_struct *work)
5426+{
5427+ request_t *r = container_of(work, request_t, work);
5428+ ocf_request(r);
5429+}
5430+#endif
5431+
5432+static void
5433+ocf_done(void)
5434+{
5435+ crypto_freesession(ocf_cryptoid);
5436+}
5437+
5438+/*************************************************************************/
5439+#ifdef BENCH_IXP_ACCESS_LIB
5440+/*************************************************************************/
5441+/*
5442+ * CryptoAcc benchmark routines
5443+ */
5444+
5445+static IxCryptoAccCtx ixp_ctx;
5446+static UINT32 ixp_ctx_id;
5447+static IX_MBUF ixp_pri;
5448+static IX_MBUF ixp_sec;
5449+static int ixp_registered = 0;
5450+
5451+static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
5452+ IxCryptoAccStatus status);
5453+static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
5454+ IxCryptoAccStatus status);
5455+static void ixp_request(void *arg);
5456+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5457+static void ixp_request_wq(struct work_struct *work);
5458+#endif
5459+
5460+static int
5461+ixp_init(void)
5462+{
5463+ IxCryptoAccStatus status;
5464+
5465+ ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
5466+ ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
5467+ ixp_ctx.cipherCtx.cipherKeyLen = 24;
5468+ ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
5469+ ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
5470+ memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
5471+
5472+ ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
5473+ ixp_ctx.authCtx.authDigestLen = 12;
5474+ ixp_ctx.authCtx.aadLen = 0;
5475+ ixp_ctx.authCtx.authKeyLen = 20;
5476+ memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
5477+
5478+ ixp_ctx.useDifferentSrcAndDestMbufs = 0;
5479+ ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
5480+
5481+ IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
5482+ IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
5483+ IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
5484+ IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
5485+
5486+ status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
5487+ ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
5488+
5489+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
5490+ while (!ixp_registered)
5491+ schedule();
5492+ return ixp_registered < 0 ? -1 : 0;
5493+ }
5494+
5495+ printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
5496+ return -1;
5497+}
5498+
5499+static void
5500+ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
5501+{
5502+ if (bufp) {
5503+ IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
5504+ kfree(IX_MBUF_MDATA(bufp));
5505+ IX_MBUF_MDATA(bufp) = NULL;
5506+ }
5507+
5508+ if (IX_CRYPTO_ACC_STATUS_WAIT == status)
5509+ return;
5510+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
5511+ ixp_registered = 1;
5512+ else
5513+ ixp_registered = -1;
5514+}
5515+
5516+static void
5517+ixp_perform_cb(
5518+ UINT32 ctx_id,
5519+ IX_MBUF *sbufp,
5520+ IX_MBUF *dbufp,
5521+ IxCryptoAccStatus status)
5522+{
5523+ request_t *r = NULL;
5524+ unsigned long flags;
5525+
5526+ /* do all requests but take at least 1 second */
5527+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
5528+ total++;
5529+ if (total > request_num && jstart + HZ < jiffies) {
5530+ outstanding--;
5531+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5532+ return;
5533+ }
5534+
5535+ if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
5536+ printk("crappo %p %p\n", sbufp, r);
5537+ outstanding--;
5538+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5539+ return;
5540+ }
5541+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5542+
5543+ schedule_work(&r->work);
5544+}
5545+
5546+static void
5547+ixp_request(void *arg)
5548+{
5549+ request_t *r = arg;
5550+ IxCryptoAccStatus status;
5551+ unsigned long flags;
5552+
5553+ memset(&r->mbuf, 0, sizeof(r->mbuf));
5554+ IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
5555+ IX_MBUF_MDATA(&r->mbuf) = r->buffer;
5556+ IX_MBUF_PRIV(&r->mbuf) = r;
5557+ status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
5558+ 0, request_size, 0, request_size, request_size, r->buffer);
5559+ if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
5560+ printk("status1 = %d\n", status);
5561+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
5562+ outstanding--;
5563+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5564+ return;
5565+ }
5566+ return;
5567+}
5568+
5569+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5570+static void
5571+ixp_request_wq(struct work_struct *work)
5572+{
5573+ request_t *r = container_of(work, request_t, work);
5574+ ixp_request(r);
5575+}
5576+#endif
5577+
5578+static void
5579+ixp_done(void)
5580+{
5581+ /* we should free the session here but I am lazy :-) */
5582+}
5583+
5584+/*************************************************************************/
5585+#endif /* BENCH_IXP_ACCESS_LIB */
5586+/*************************************************************************/
5587+
5588+int
5589+ocfbench_init(void)
5590+{
5591+ int i;
5592+ unsigned long mbps;
5593+ unsigned long flags;
5594+
5595+ printk("Crypto Speed tests\n");
5596+
5597+ requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
5598+ if (!requests) {
5599+ printk("malloc failed\n");
5600+ return -EINVAL;
5601+ }
5602+
5603+ for (i = 0; i < request_q_len; i++) {
5604+ /* +64 for return data */
5605+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5606+ INIT_WORK(&requests[i].work, ocf_request_wq);
5607+#else
5608+ INIT_WORK(&requests[i].work, ocf_request, &requests[i]);
5609+#endif
5610+ requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
5611+ if (!requests[i].buffer) {
5612+ printk("malloc failed\n");
5613+ return -EINVAL;
5614+ }
5615+ memset(requests[i].buffer, '0' + i, request_size + 128);
5616+ }
5617+
5618+ /*
5619+ * OCF benchmark
5620+ */
5621+ printk("OCF: testing ...\n");
5622+ if (ocf_init() == -1)
5623+ return -EINVAL;
5624+
5625+ spin_lock_init(&ocfbench_counter_lock);
5626+ total = outstanding = 0;
5627+ jstart = jiffies;
5628+ for (i = 0; i < request_q_len; i++) {
5629+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
5630+ outstanding++;
5631+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5632+ ocf_request(&requests[i]);
5633+ }
5634+ while (outstanding > 0)
5635+ schedule();
5636+ jstop = jiffies;
5637+
5638+ mbps = 0;
5639+ if (jstop > jstart) {
5640+ mbps = (unsigned long) total * (unsigned long) request_size * 8;
5641+ mbps /= ((jstop - jstart) * 1000) / HZ;
5642+ }
5643+ printk("OCF: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
5644+ total, request_size, (int)(jstop - jstart),
5645+ ((int)mbps) / 1000, ((int)mbps) % 1000);
5646+ ocf_done();
5647+
5648+#ifdef BENCH_IXP_ACCESS_LIB
5649+ /*
5650+ * IXP benchmark
5651+ */
5652+ printk("IXP: testing ...\n");
5653+ ixp_init();
5654+ total = outstanding = 0;
5655+ jstart = jiffies;
5656+ for (i = 0; i < request_q_len; i++) {
5657+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5658+ INIT_WORK(&requests[i].work, ixp_request_wq);
5659+#else
5660+ INIT_WORK(&requests[i].work, ixp_request, &requests[i]);
5661+#endif
5662+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
5663+ outstanding++;
5664+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
5665+ ixp_request(&requests[i]);
5666+ }
5667+ while (outstanding > 0)
5668+ schedule();
5669+ jstop = jiffies;
5670+
5671+ mbps = 0;
5672+ if (jstop > jstart) {
5673+ mbps = (unsigned long) total * (unsigned long) request_size * 8;
5674+ mbps /= ((jstop - jstart) * 1000) / HZ;
5675+ }
5676+ printk("IXP: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
5677+ total, request_size, jstop - jstart,
5678+ ((int)mbps) / 1000, ((int)mbps) % 1000);
5679+ ixp_done();
5680+#endif /* BENCH_IXP_ACCESS_LIB */
5681+
5682+ for (i = 0; i < request_q_len; i++)
5683+ kfree(requests[i].buffer);
5684+ kfree(requests);
5685+ return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
5686+}
5687+
5688+static void __exit ocfbench_exit(void)
5689+{
5690+}
5691+
5692+module_init(ocfbench_init);
5693+module_exit(ocfbench_exit);
5694+
5695+MODULE_LICENSE("BSD");
5696+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
5697+MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
5698diff --git a/crypto/ocf/ocf-compat.h b/crypto/ocf/ocf-compat.h
5699new file mode 100644
5700index 0000000..4ad1223
5701--- /dev/null
5702+++ b/crypto/ocf/ocf-compat.h
5703@@ -0,0 +1,372 @@
5704+#ifndef _BSD_COMPAT_H_
5705+#define _BSD_COMPAT_H_ 1
5706+/****************************************************************************/
5707+/*
5708+ * Provide compat routines for older linux kernels and BSD kernels
5709+ *
5710+ * Written by David McCullough <david_mccullough@mcafee.com>
5711+ * Copyright (C) 2010 David McCullough <david_mccullough@mcafee.com>
5712+ *
5713+ * LICENSE TERMS
5714+ *
5715+ * The free distribution and use of this software in both source and binary
5716+ * form is allowed (with or without changes) provided that:
5717+ *
5718+ * 1. distributions of this source code include the above copyright
5719+ * notice, this list of conditions and the following disclaimer;
5720+ *
5721+ * 2. distributions in binary form include the above copyright
5722+ * notice, this list of conditions and the following disclaimer
5723+ * in the documentation and/or other associated materials;
5724+ *
5725+ * 3. the copyright holder's name is not used to endorse products
5726+ * built using this software without specific written permission.
5727+ *
5728+ * ALTERNATIVELY, provided that this notice is retained in full, this file
5729+ * may be distributed under the terms of the GNU General Public License (GPL),
5730+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
5731+ *
5732+ * DISCLAIMER
5733+ *
5734+ * This software is provided 'as is' with no explicit or implied warranties
5735+ * in respect of its properties, including, but not limited to, correctness
5736+ * and/or fitness for purpose.
5737+ */
5738+/****************************************************************************/
5739+#ifdef __KERNEL__
5740+#include <linux/version.h>
5741+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
5742+#include <linux/config.h>
5743+#endif
5744+
5745+/*
5746+ * fake some BSD driver interface stuff specifically for OCF use
5747+ */
5748+
5749+typedef struct ocf_device *device_t;
5750+
5751+typedef struct {
5752+ int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
5753+ int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
5754+ int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
5755+ int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
5756+} device_method_t;
5757+#define DEVMETHOD(id, func) id: func
5758+
5759+struct ocf_device {
5760+ char name[32]; /* the driver name */
5761+ char nameunit[32]; /* the driver name + HW instance */
5762+ int unit;
5763+ device_method_t methods;
5764+ void *softc;
5765+};
5766+
5767+#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
5768+ ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
5769+#define CRYPTODEV_FREESESSION(dev, sid) \
5770+ ((*(dev)->methods.cryptodev_freesession)(dev, sid))
5771+#define CRYPTODEV_PROCESS(dev, crp, hint) \
5772+ ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
5773+#define CRYPTODEV_KPROCESS(dev, krp, hint) \
5774+ ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
5775+
5776+#define device_get_name(dev) ((dev)->name)
5777+#define device_get_nameunit(dev) ((dev)->nameunit)
5778+#define device_get_unit(dev) ((dev)->unit)
5779+#define device_get_softc(dev) ((dev)->softc)
5780+
5781+#define softc_device_decl \
5782+ struct ocf_device _device; \
5783+ device_t
5784+
5785+#define softc_device_init(_sc, _name, _unit, _methods) \
5786+ if (1) {\
5787+ strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
5788+ snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
5789+ (_sc)->_device.unit = _unit; \
5790+ (_sc)->_device.methods = _methods; \
5791+ (_sc)->_device.softc = (void *) _sc; \
5792+ *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
5793+ } else
5794+
5795+#define softc_get_device(_sc) (&(_sc)->_device)
5796+
5797+/*
5798+ * iomem support for 2.4 and 2.6 kernels
5799+ */
5800+#include <linux/version.h>
5801+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
5802+#define ocf_iomem_t unsigned long
5803+
5804+/*
5805+ * implement simple workqueue like support for older kernels
5806+ */
5807+
5808+#include <linux/tqueue.h>
5809+
5810+#define work_struct tq_struct
5811+
5812+#define INIT_WORK(wp, fp, ap) \
5813+ do { \
5814+ (wp)->sync = 0; \
5815+ (wp)->routine = (fp); \
5816+ (wp)->data = (ap); \
5817+ } while (0)
5818+
5819+#define schedule_work(wp) \
5820+ do { \
5821+ queue_task((wp), &tq_immediate); \
5822+ mark_bh(IMMEDIATE_BH); \
5823+ } while (0)
5824+
5825+#define flush_scheduled_work() run_task_queue(&tq_immediate)
5826+
5827+#else
5828+#define ocf_iomem_t void __iomem *
5829+
5830+#include <linux/workqueue.h>
5831+
5832+#endif
5833+
5834+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
5835+#include <linux/fdtable.h>
5836+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
5837+#define files_fdtable(files) (files)
5838+#endif
5839+
5840+#ifdef MODULE_PARM
5841+#undef module_param /* just in case */
5842+#define module_param(a,b,c) MODULE_PARM(a,"i")
5843+#endif
5844+
5845+#define bzero(s,l) memset(s,0,l)
5846+#define bcopy(s,d,l) memcpy(d,s,l)
5847+#define bcmp(x, y, l) memcmp(x,y,l)
5848+
5849+#define MIN(x,y) ((x) < (y) ? (x) : (y))
5850+
5851+#define device_printf(dev, a...) ({ \
5852+ printk("%s: ", device_get_nameunit(dev)); printk(a); \
5853+ })
5854+
5855+#undef printf
5856+#define printf(fmt...) printk(fmt)
5857+
5858+#define KASSERT(c,p) if (!(c)) { printk p ; } else
5859+
5860+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
5861+#define ocf_daemonize(str) \
5862+ daemonize(); \
5863+ spin_lock_irq(&current->sigmask_lock); \
5864+ sigemptyset(&current->blocked); \
5865+ recalc_sigpending(current); \
5866+ spin_unlock_irq(&current->sigmask_lock); \
5867+ sprintf(current->comm, str);
5868+#else
5869+#define ocf_daemonize(str) daemonize(str);
5870+#endif
5871+
5872+#define TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
5873+#define TAILQ_EMPTY(q) list_empty(q)
5874+#define TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
5875+
5876+#define read_random(p,l) get_random_bytes(p,l)
5877+
5878+#define DELAY(x) ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
5879+#define strtoul simple_strtoul
5880+
5881+#define pci_get_vendor(dev) ((dev)->vendor)
5882+#define pci_get_device(dev) ((dev)->device)
5883+
5884+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
5885+#define pci_set_consistent_dma_mask(dev, mask) (0)
5886+#endif
5887+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
5888+#define pci_dma_sync_single_for_cpu pci_dma_sync_single
5889+#endif
5890+
5891+#ifndef DMA_32BIT_MASK
5892+#define DMA_32BIT_MASK 0x00000000ffffffffULL
5893+#endif
5894+
5895+#ifndef htole32
5896+#define htole32(x) cpu_to_le32(x)
5897+#endif
5898+#ifndef htobe32
5899+#define htobe32(x) cpu_to_be32(x)
5900+#endif
5901+#ifndef htole16
5902+#define htole16(x) cpu_to_le16(x)
5903+#endif
5904+#ifndef htobe16
5905+#define htobe16(x) cpu_to_be16(x)
5906+#endif
5907+
5908+/* older kernels don't have these */
5909+
5910+#include <asm/irq.h>
5911+#if !defined(IRQ_NONE) && !defined(IRQ_RETVAL)
5912+#define IRQ_NONE
5913+#define IRQ_HANDLED
5914+#define IRQ_WAKE_THREAD
5915+#define IRQ_RETVAL
5916+#define irqreturn_t void
5917+typedef irqreturn_t (*irq_handler_t)(int irq, void *arg, struct pt_regs *regs);
5918+#endif
5919+#ifndef IRQF_SHARED
5920+#define IRQF_SHARED SA_SHIRQ
5921+#endif
5922+
5923+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
5924+# define strlcpy(dest,src,len) \
5925+ ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
5926+#endif
5927+
5928+#ifndef MAX_ERRNO
5929+#define MAX_ERRNO 4095
5930+#endif
5931+#ifndef IS_ERR_VALUE
5932+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,5)
5933+#include <linux/err.h>
5934+#endif
5935+#ifndef IS_ERR_VALUE
5936+#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
5937+#endif
5938+#endif
5939+
5940+/*
5941+ * common debug for all
5942+ */
5943+#if 1
5944+#define dprintk(a...) do { if (debug) printk(a); } while(0)
5945+#else
5946+#define dprintk(a...)
5947+#endif
5948+
5949+#ifndef SLAB_ATOMIC
5950+/* Changed in 2.6.20, must use GFP_ATOMIC now */
5951+#define SLAB_ATOMIC GFP_ATOMIC
5952+#endif
5953+
5954+/*
5955+ * need some additional support for older kernels */
5956+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
5957+#define pci_register_driver_compat(driver, rc) \
5958+ do { \
5959+ if ((rc) > 0) { \
5960+ (rc) = 0; \
5961+ } else if (rc == 0) { \
5962+ (rc) = -ENODEV; \
5963+ } else { \
5964+ pci_unregister_driver(driver); \
5965+ } \
5966+ } while (0)
5967+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
5968+#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
5969+#else
5970+#define pci_register_driver_compat(driver,rc)
5971+#endif
5972+
5973+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
5974+
5975+#include <linux/mm.h>
5976+#include <asm/scatterlist.h>
5977+
5978+static inline void sg_set_page(struct scatterlist *sg, struct page *page,
5979+ unsigned int len, unsigned int offset)
5980+{
5981+ sg->page = page;
5982+ sg->offset = offset;
5983+ sg->length = len;
5984+}
5985+
5986+static inline void *sg_virt(struct scatterlist *sg)
5987+{
5988+ return page_address(sg->page) + sg->offset;
5989+}
5990+
5991+#define sg_init_table(sg, n)
5992+
5993+#define sg_mark_end(sg)
5994+
5995+#endif
5996+
5997+#ifndef late_initcall
5998+#define late_initcall(init) module_init(init)
5999+#endif
6000+
6001+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) || !defined(CONFIG_SMP)
6002+#define ocf_for_each_cpu(cpu) for ((cpu) = 0; (cpu) == 0; (cpu)++)
6003+#else
6004+#define ocf_for_each_cpu(cpu) for_each_present_cpu(cpu)
6005+#endif
6006+
6007+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
6008+#include <linux/sched.h>
6009+#define kill_proc(p,s,v) send_sig(s,find_task_by_vpid(p),0)
6010+#endif
6011+
6012+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
6013+
6014+struct ocf_thread {
6015+ struct task_struct *task;
6016+ int (*func)(void *arg);
6017+ void *arg;
6018+};
6019+
6020+/* thread startup helper func */
6021+static inline int ocf_run_thread(void *arg)
6022+{
6023+ struct ocf_thread *t = (struct ocf_thread *) arg;
6024+ if (!t)
6025+ return -1; /* very bad */
6026+ t->task = current;
6027+ daemonize();
6028+ spin_lock_irq(&current->sigmask_lock);
6029+ sigemptyset(&current->blocked);
6030+ recalc_sigpending(current);
6031+ spin_unlock_irq(&current->sigmask_lock);
6032+ return (*t->func)(t->arg);
6033+}
6034+
6035+#define kthread_create(f,a,fmt...) \
6036+ ({ \
6037+ struct ocf_thread t; \
6038+ pid_t p; \
6039+ t.task = NULL; \
6040+ t.func = (f); \
6041+ t.arg = (a); \
6042+ p = kernel_thread(ocf_run_thread, &t, CLONE_FS|CLONE_FILES); \
6043+ while (p != (pid_t) -1 && t.task == NULL) \
6044+ schedule(); \
6045+ if (t.task) \
6046+ snprintf(t.task->comm, sizeof(t.task->comm), fmt); \
6047+ (t.task); \
6048+ })
6049+
6050+#define kthread_bind(t,cpu) /**/
6051+
6052+#define kthread_should_stop() (strcmp(current->comm, "stopping") == 0)
6053+
6054+#define kthread_stop(t) \
6055+ ({ \
6056+ strcpy((t)->comm, "stopping"); \
6057+ kill_proc((t)->pid, SIGTERM, 1); \
6058+ do { \
6059+ schedule(); \
6060+ } while (kill_proc((t)->pid, SIGTERM, 1) == 0); \
6061+ })
6062+
6063+#else
6064+#include <linux/kthread.h>
6065+#endif
6066+
6067+
6068+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
6069+#define skb_frag_page(x) ((x)->page)
6070+#endif
6071+
6072+#endif /* __KERNEL__ */
6073+
6074+/****************************************************************************/
6075+#endif /* _BSD_COMPAT_H_ */
6076diff --git a/crypto/ocf/ocfnull/Makefile b/crypto/ocf/ocfnull/Makefile
6077new file mode 100644
6078index 0000000..044bcac
6079--- /dev/null
6080+++ b/crypto/ocf/ocfnull/Makefile
6081@@ -0,0 +1,12 @@
6082+# for SGlinux builds
6083+-include $(ROOTDIR)/modules/.config
6084+
6085+obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
6086+
6087+obj ?= .
6088+EXTRA_CFLAGS += -I$(obj)/..
6089+
6090+ifdef TOPDIR
6091+-include $(TOPDIR)/Rules.make
6092+endif
6093+
6094diff --git a/crypto/ocf/ocfnull/ocfnull.c b/crypto/ocf/ocfnull/ocfnull.c
6095new file mode 100644
6096index 0000000..9cf3f6e
6097--- /dev/null
6098+++ b/crypto/ocf/ocfnull/ocfnull.c
6099@@ -0,0 +1,204 @@
6100+/*
6101+ * An OCF module for determining the cost of crypto versus the cost of
6102+ * IPSec processing outside of OCF. This modules gives us the effect of
6103+ * zero cost encryption, of course you will need to run it at both ends
6104+ * since it does no crypto at all.
6105+ *
6106+ * Written by David McCullough <david_mccullough@mcafee.com>
6107+ * Copyright (C) 2006-2010 David McCullough
6108+ *
6109+ * LICENSE TERMS
6110+ *
6111+ * The free distribution and use of this software in both source and binary
6112+ * form is allowed (with or without changes) provided that:
6113+ *
6114+ * 1. distributions of this source code include the above copyright
6115+ * notice, this list of conditions and the following disclaimer;
6116+ *
6117+ * 2. distributions in binary form include the above copyright
6118+ * notice, this list of conditions and the following disclaimer
6119+ * in the documentation and/or other associated materials;
6120+ *
6121+ * 3. the copyright holder's name is not used to endorse products
6122+ * built using this software without specific written permission.
6123+ *
6124+ * ALTERNATIVELY, provided that this notice is retained in full, this product
6125+ * may be distributed under the terms of the GNU General Public License (GPL),
6126+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
6127+ *
6128+ * DISCLAIMER
6129+ *
6130+ * This software is provided 'as is' with no explicit or implied warranties
6131+ * in respect of its properties, including, but not limited to, correctness
6132+ * and/or fitness for purpose.
6133+ */
6134+
6135+#include <linux/version.h>
6136+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
6137+#include <linux/config.h>
6138+#endif
6139+#include <linux/module.h>
6140+#include <linux/init.h>
6141+#include <linux/list.h>
6142+#include <linux/slab.h>
6143+#include <linux/sched.h>
6144+#include <linux/wait.h>
6145+#include <linux/crypto.h>
6146+#include <linux/interrupt.h>
6147+
6148+#include <cryptodev.h>
6149+#include <uio.h>
6150+
6151+static int32_t null_id = -1;
6152+static u_int32_t null_sesnum = 0;
6153+
6154+static int null_process(device_t, struct cryptop *, int);
6155+static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
6156+static int null_freesession(device_t, u_int64_t);
6157+
6158+#define debug ocfnull_debug
6159+int ocfnull_debug = 0;
6160+module_param(ocfnull_debug, int, 0644);
6161+MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
6162+
6163+/*
6164+ * dummy device structure
6165+ */
6166+
6167+static struct {
6168+ softc_device_decl sc_dev;
6169+} nulldev;
6170+
6171+static device_method_t null_methods = {
6172+ /* crypto device methods */
6173+ DEVMETHOD(cryptodev_newsession, null_newsession),
6174+ DEVMETHOD(cryptodev_freesession,null_freesession),
6175+ DEVMETHOD(cryptodev_process, null_process),
6176+};
6177+
6178+/*
6179+ * Generate a new software session.
6180+ */
6181+static int
6182+null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
6183+{
6184+ dprintk("%s()\n", __FUNCTION__);
6185+ if (sid == NULL || cri == NULL) {
6186+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
6187+ return EINVAL;
6188+ }
6189+
6190+ if (null_sesnum == 0)
6191+ null_sesnum++;
6192+ *sid = null_sesnum++;
6193+ return 0;
6194+}
6195+
6196+
6197+/*
6198+ * Free a session.
6199+ */
6200+static int
6201+null_freesession(device_t arg, u_int64_t tid)
6202+{
6203+ u_int32_t sid = CRYPTO_SESID2LID(tid);
6204+
6205+ dprintk("%s()\n", __FUNCTION__);
6206+ if (sid > null_sesnum) {
6207+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
6208+ return EINVAL;
6209+ }
6210+
6211+ /* Silently accept and return */
6212+ if (sid == 0)
6213+ return 0;
6214+ return 0;
6215+}
6216+
6217+
6218+/*
6219+ * Process a request.
6220+ */
6221+static int
6222+null_process(device_t arg, struct cryptop *crp, int hint)
6223+{
6224+ unsigned int lid;
6225+
6226+ dprintk("%s()\n", __FUNCTION__);
6227+
6228+ /* Sanity check */
6229+ if (crp == NULL) {
6230+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
6231+ return EINVAL;
6232+ }
6233+
6234+ crp->crp_etype = 0;
6235+
6236+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
6237+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
6238+ crp->crp_etype = EINVAL;
6239+ goto done;
6240+ }
6241+
6242+ /*
6243+ * find the session we are using
6244+ */
6245+
6246+ lid = crp->crp_sid & 0xffffffff;
6247+ if (lid >= null_sesnum || lid == 0) {
6248+ crp->crp_etype = ENOENT;
6249+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
6250+ goto done;
6251+ }
6252+
6253+done:
6254+ crypto_done(crp);
6255+ return 0;
6256+}
6257+
6258+
6259+/*
6260+ * our driver startup and shutdown routines
6261+ */
6262+
6263+static int
6264+null_init(void)
6265+{
6266+ dprintk("%s(%p)\n", __FUNCTION__, null_init);
6267+
6268+ memset(&nulldev, 0, sizeof(nulldev));
6269+ softc_device_init(&nulldev, "ocfnull", 0, null_methods);
6270+
6271+ null_id = crypto_get_driverid(softc_get_device(&nulldev),
6272+ CRYPTOCAP_F_HARDWARE);
6273+ if (null_id < 0)
6274+ panic("ocfnull: crypto device cannot initialize!");
6275+
6276+#define REGISTER(alg) \
6277+ crypto_register(null_id,alg,0,0)
6278+ REGISTER(CRYPTO_DES_CBC);
6279+ REGISTER(CRYPTO_3DES_CBC);
6280+ REGISTER(CRYPTO_RIJNDAEL128_CBC);
6281+ REGISTER(CRYPTO_MD5);
6282+ REGISTER(CRYPTO_SHA1);
6283+ REGISTER(CRYPTO_MD5_HMAC);
6284+ REGISTER(CRYPTO_SHA1_HMAC);
6285+#undef REGISTER
6286+
6287+ return 0;
6288+}
6289+
6290+static void
6291+null_exit(void)
6292+{
6293+ dprintk("%s()\n", __FUNCTION__);
6294+ crypto_unregister_all(null_id);
6295+ null_id = -1;
6296+}
6297+
6298+module_init(null_init);
6299+module_exit(null_exit);
6300+
6301+MODULE_LICENSE("Dual BSD/GPL");
6302+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
6303+MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
6304diff --git a/crypto/ocf/random.c b/crypto/ocf/random.c
6305new file mode 100644
6306index 0000000..4bb773f
6307--- /dev/null
6308+++ b/crypto/ocf/random.c
6309@@ -0,0 +1,317 @@
6310+/*
6311+ * A system independant way of adding entropy to the kernels pool
6312+ * this way the drivers can focus on the real work and we can take
6313+ * care of pushing it to the appropriate place in the kernel.
6314+ *
6315+ * This should be fast and callable from timers/interrupts
6316+ *
6317+ * Written by David McCullough <david_mccullough@mcafee.com>
6318+ * Copyright (C) 2006-2010 David McCullough
6319+ * Copyright (C) 2004-2005 Intel Corporation.
6320+ *
6321+ * LICENSE TERMS
6322+ *
6323+ * The free distribution and use of this software in both source and binary
6324+ * form is allowed (with or without changes) provided that:
6325+ *
6326+ * 1. distributions of this source code include the above copyright
6327+ * notice, this list of conditions and the following disclaimer;
6328+ *
6329+ * 2. distributions in binary form include the above copyright
6330+ * notice, this list of conditions and the following disclaimer
6331+ * in the documentation and/or other associated materials;
6332+ *
6333+ * 3. the copyright holder's name is not used to endorse products
6334+ * built using this software without specific written permission.
6335+ *
6336+ * ALTERNATIVELY, provided that this notice is retained in full, this product
6337+ * may be distributed under the terms of the GNU General Public License (GPL),
6338+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
6339+ *
6340+ * DISCLAIMER
6341+ *
6342+ * This software is provided 'as is' with no explicit or implied warranties
6343+ * in respect of its properties, including, but not limited to, correctness
6344+ * and/or fitness for purpose.
6345+ */
6346+
6347+#include <linux/version.h>
6348+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
6349+#include <linux/config.h>
6350+#endif
6351+#include <linux/module.h>
6352+#include <linux/init.h>
6353+#include <linux/list.h>
6354+#include <linux/slab.h>
6355+#include <linux/wait.h>
6356+#include <linux/sched.h>
6357+#include <linux/spinlock.h>
6358+#include <linux/unistd.h>
6359+#include <linux/poll.h>
6360+#include <linux/random.h>
6361+#include <cryptodev.h>
6362+
6363+#ifdef CONFIG_OCF_FIPS
6364+#include "rndtest.h"
6365+#endif
6366+
6367+#ifndef HAS_RANDOM_INPUT_WAIT
6368+#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
6369+#endif
6370+
6371+/*
6372+ * a hack to access the debug levels from the crypto driver
6373+ */
6374+extern int crypto_debug;
6375+#define debug crypto_debug
6376+
6377+/*
6378+ * a list of all registered random providers
6379+ */
6380+static LIST_HEAD(random_ops);
6381+static int started = 0;
6382+static int initted = 0;
6383+
6384+struct random_op {
6385+ struct list_head random_list;
6386+ u_int32_t driverid;
6387+ int (*read_random)(void *arg, u_int32_t *buf, int len);
6388+ void *arg;
6389+};
6390+
6391+static int random_proc(void *arg);
6392+
6393+static pid_t randomproc = (pid_t) -1;
6394+static spinlock_t random_lock;
6395+
6396+/*
6397+ * just init the spin locks
6398+ */
6399+static int
6400+crypto_random_init(void)
6401+{
6402+ spin_lock_init(&random_lock);
6403+ initted = 1;
6404+ return(0);
6405+}
6406+
6407+/*
6408+ * Add the given random reader to our list (if not present)
6409+ * and start the thread (if not already started)
6410+ *
6411+ * we have to assume that driver id is ok for now
6412+ */
6413+int
6414+crypto_rregister(
6415+ u_int32_t driverid,
6416+ int (*read_random)(void *arg, u_int32_t *buf, int len),
6417+ void *arg)
6418+{
6419+ unsigned long flags;
6420+ int ret = 0;
6421+ struct random_op *rops, *tmp;
6422+
6423+ dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
6424+ __FUNCTION__, driverid, read_random, arg);
6425+
6426+ if (!initted)
6427+ crypto_random_init();
6428+
6429+#if 0
6430+ struct cryptocap *cap;
6431+
6432+ cap = crypto_checkdriver(driverid);
6433+ if (!cap)
6434+ return EINVAL;
6435+#endif
6436+
6437+ list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
6438+ if (rops->driverid == driverid && rops->read_random == read_random)
6439+ return EEXIST;
6440+ }
6441+
6442+ rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
6443+ if (!rops)
6444+ return ENOMEM;
6445+
6446+ rops->driverid = driverid;
6447+ rops->read_random = read_random;
6448+ rops->arg = arg;
6449+
6450+ spin_lock_irqsave(&random_lock, flags);
6451+ list_add_tail(&rops->random_list, &random_ops);
6452+ if (!started) {
6453+ randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
6454+ if (randomproc < 0) {
6455+ ret = randomproc;
6456+ printk("crypto: crypto_rregister cannot start random thread; "
6457+ "error %d", ret);
6458+ } else
6459+ started = 1;
6460+ }
6461+ spin_unlock_irqrestore(&random_lock, flags);
6462+
6463+ return ret;
6464+}
6465+EXPORT_SYMBOL(crypto_rregister);
6466+
6467+int
6468+crypto_runregister_all(u_int32_t driverid)
6469+{
6470+ struct random_op *rops, *tmp;
6471+ unsigned long flags;
6472+
6473+ dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
6474+
6475+ list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
6476+ if (rops->driverid == driverid) {
6477+ list_del(&rops->random_list);
6478+ kfree(rops);
6479+ }
6480+ }
6481+
6482+ spin_lock_irqsave(&random_lock, flags);
6483+ if (list_empty(&random_ops) && started)
6484+ kill_proc(randomproc, SIGKILL, 1);
6485+ spin_unlock_irqrestore(&random_lock, flags);
6486+ return(0);
6487+}
6488+EXPORT_SYMBOL(crypto_runregister_all);
6489+
6490+/*
6491+ * while we can add entropy to random.c continue to read random data from
6492+ * the drivers and push it to random.
6493+ */
6494+static int
6495+random_proc(void *arg)
6496+{
6497+ int n;
6498+ int wantcnt;
6499+ int bufcnt = 0;
6500+ int retval = 0;
6501+ int *buf = NULL;
6502+
6503+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
6504+ daemonize();
6505+ spin_lock_irq(&current->sigmask_lock);
6506+ sigemptyset(&current->blocked);
6507+ recalc_sigpending(current);
6508+ spin_unlock_irq(&current->sigmask_lock);
6509+ sprintf(current->comm, "ocf-random");
6510+#else
6511+ daemonize("ocf-random");
6512+ allow_signal(SIGKILL);
6513+#endif
6514+
6515+ (void) get_fs();
6516+ set_fs(get_ds());
6517+
6518+#ifdef CONFIG_OCF_FIPS
6519+#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
6520+#else
6521+#define NUM_INT 32
6522+#endif
6523+
6524+ /*
6525+ * some devices can transferr their RNG data direct into memory,
6526+ * so make sure it is device friendly
6527+ */
6528+ buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
6529+ if (NULL == buf) {
6530+ printk("crypto: RNG could not allocate memory\n");
6531+ retval = -ENOMEM;
6532+ goto bad_alloc;
6533+ }
6534+
6535+ wantcnt = NUM_INT; /* start by adding some entropy */
6536+
6537+ /*
6538+ * its possible due to errors or driver removal that we no longer
6539+ * have anything to do, if so exit or we will consume all the CPU
6540+ * doing nothing
6541+ */
6542+ while (!list_empty(&random_ops)) {
6543+ struct random_op *rops, *tmp;
6544+
6545+#ifdef CONFIG_OCF_FIPS
6546+ if (wantcnt)
6547+ wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
6548+#endif
6549+
6550+ /* see if we can get enough entropy to make the world
6551+ * a better place.
6552+ */
6553+ while (bufcnt < wantcnt && bufcnt < NUM_INT) {
6554+ list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
6555+
6556+ n = (*rops->read_random)(rops->arg, &buf[bufcnt],
6557+ NUM_INT - bufcnt);
6558+
6559+ /* on failure remove the random number generator */
6560+ if (n == -1) {
6561+ list_del(&rops->random_list);
6562+ printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
6563+ rops->driverid);
6564+ kfree(rops);
6565+ } else if (n > 0)
6566+ bufcnt += n;
6567+ }
6568+ /* give up CPU for a bit, just in case as this is a loop */
6569+ schedule();
6570+ }
6571+
6572+
6573+#ifdef CONFIG_OCF_FIPS
6574+ if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
6575+ dprintk("crypto: buffer had fips errors, discarding\n");
6576+ bufcnt = 0;
6577+ }
6578+#endif
6579+
6580+ /*
6581+ * if we have a certified buffer, we can send some data
6582+ * to /dev/random and move along
6583+ */
6584+ if (bufcnt > 0) {
6585+ /* add what we have */
6586+ random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
6587+ bufcnt = 0;
6588+ }
6589+
6590+ /* give up CPU for a bit so we don't hog while filling */
6591+ schedule();
6592+
6593+ /* wait for needing more */
6594+ wantcnt = random_input_wait();
6595+
6596+ if (wantcnt <= 0)
6597+ wantcnt = 0; /* try to get some info again */
6598+ else
6599+ /* round up to one word or we can loop forever */
6600+ wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
6601+ if (wantcnt > NUM_INT) {
6602+ wantcnt = NUM_INT;
6603+ }
6604+
6605+ if (signal_pending(current)) {
6606+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
6607+ spin_lock_irq(&current->sigmask_lock);
6608+#endif
6609+ flush_signals(current);
6610+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
6611+ spin_unlock_irq(&current->sigmask_lock);
6612+#endif
6613+ }
6614+ }
6615+
6616+ kfree(buf);
6617+
6618+bad_alloc:
6619+ spin_lock_irq(&random_lock);
6620+ randomproc = (pid_t) -1;
6621+ started = 0;
6622+ spin_unlock_irq(&random_lock);
6623+
6624+ return retval;
6625+}
6626+
6627diff --git a/crypto/ocf/rndtest.c b/crypto/ocf/rndtest.c
6628new file mode 100644
6629index 0000000..7bed6a1
6630--- /dev/null
6631+++ b/crypto/ocf/rndtest.c
6632@@ -0,0 +1,300 @@
6633+/* $OpenBSD$ */
6634+
6635+/*
6636+ * OCF/Linux port done by David McCullough <david_mccullough@mcafee.com>
6637+ * Copyright (C) 2006-2010 David McCullough
6638+ * Copyright (C) 2004-2005 Intel Corporation.
6639+ * The license and original author are listed below.
6640+ *
6641+ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
6642+ * All rights reserved.
6643+ *
6644+ * Redistribution and use in source and binary forms, with or without
6645+ * modification, are permitted provided that the following conditions
6646+ * are met:
6647+ * 1. Redistributions of source code must retain the above copyright
6648+ * notice, this list of conditions and the following disclaimer.
6649+ * 2. Redistributions in binary form must reproduce the above copyright
6650+ * notice, this list of conditions and the following disclaimer in the
6651+ * documentation and/or other materials provided with the distribution.
6652+ * 3. All advertising materials mentioning features or use of this software
6653+ * must display the following acknowledgement:
6654+ * This product includes software developed by Jason L. Wright
6655+ * 4. The name of the author may not be used to endorse or promote products
6656+ * derived from this software without specific prior written permission.
6657+ *
6658+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
6659+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6660+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6661+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
6662+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6663+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
6664+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
6665+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
6666+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
6667+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6668+ * POSSIBILITY OF SUCH DAMAGE.
6669+ */
6670+
6671+#include <linux/version.h>
6672+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
6673+#include <linux/config.h>
6674+#endif
6675+#include <linux/module.h>
6676+#include <linux/list.h>
6677+#include <linux/wait.h>
6678+#include <linux/time.h>
6679+#include <linux/unistd.h>
6680+#include <linux/kernel.h>
6681+#include <linux/string.h>
6682+#include <linux/time.h>
6683+#include <cryptodev.h>
6684+#include "rndtest.h"
6685+
6686+static struct rndtest_stats rndstats;
6687+
6688+static void rndtest_test(struct rndtest_state *);
6689+
6690+/* The tests themselves */
6691+static int rndtest_monobit(struct rndtest_state *);
6692+static int rndtest_runs(struct rndtest_state *);
6693+static int rndtest_longruns(struct rndtest_state *);
6694+static int rndtest_chi_4(struct rndtest_state *);
6695+
6696+static int rndtest_runs_check(struct rndtest_state *, int, int *);
6697+static void rndtest_runs_record(struct rndtest_state *, int, int *);
6698+
6699+static const struct rndtest_testfunc {
6700+ int (*test)(struct rndtest_state *);
6701+} rndtest_funcs[] = {
6702+ { rndtest_monobit },
6703+ { rndtest_runs },
6704+ { rndtest_chi_4 },
6705+ { rndtest_longruns },
6706+};
6707+
6708+#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
6709+
6710+static void
6711+rndtest_test(struct rndtest_state *rsp)
6712+{
6713+ int i, rv = 0;
6714+
6715+ rndstats.rst_tests++;
6716+ for (i = 0; i < RNDTEST_NTESTS; i++)
6717+ rv |= (*rndtest_funcs[i].test)(rsp);
6718+ rsp->rs_discard = (rv != 0);
6719+}
6720+
6721+
6722+extern int crypto_debug;
6723+#define rndtest_verbose 2
6724+#define rndtest_report(rsp, failure, fmt, a...) \
6725+ { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
6726+
6727+#define RNDTEST_MONOBIT_MINONES 9725
6728+#define RNDTEST_MONOBIT_MAXONES 10275
6729+
6730+static int
6731+rndtest_monobit(struct rndtest_state *rsp)
6732+{
6733+ int i, ones = 0, j;
6734+ u_int8_t r;
6735+
6736+ for (i = 0; i < RNDTEST_NBYTES; i++) {
6737+ r = rsp->rs_buf[i];
6738+ for (j = 0; j < 8; j++, r <<= 1)
6739+ if (r & 0x80)
6740+ ones++;
6741+ }
6742+ if (ones > RNDTEST_MONOBIT_MINONES &&
6743+ ones < RNDTEST_MONOBIT_MAXONES) {
6744+ if (rndtest_verbose > 1)
6745+ rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
6746+ RNDTEST_MONOBIT_MINONES, ones,
6747+ RNDTEST_MONOBIT_MAXONES);
6748+ return (0);
6749+ } else {
6750+ if (rndtest_verbose)
6751+ rndtest_report(rsp, 1,
6752+ "monobit failed (%d ones)", ones);
6753+ rndstats.rst_monobit++;
6754+ return (-1);
6755+ }
6756+}
6757+
6758+#define RNDTEST_RUNS_NINTERVAL 6
6759+
6760+static const struct rndtest_runs_tabs {
6761+ u_int16_t min, max;
6762+} rndtest_runs_tab[] = {
6763+ { 2343, 2657 },
6764+ { 1135, 1365 },
6765+ { 542, 708 },
6766+ { 251, 373 },
6767+ { 111, 201 },
6768+ { 111, 201 },
6769+};
6770+
6771+static int
6772+rndtest_runs(struct rndtest_state *rsp)
6773+{
6774+ int i, j, ones, zeros, rv = 0;
6775+ int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
6776+ u_int8_t c;
6777+
6778+ bzero(onei, sizeof(onei));
6779+ bzero(zeroi, sizeof(zeroi));
6780+ ones = zeros = 0;
6781+ for (i = 0; i < RNDTEST_NBYTES; i++) {
6782+ c = rsp->rs_buf[i];
6783+ for (j = 0; j < 8; j++, c <<= 1) {
6784+ if (c & 0x80) {
6785+ ones++;
6786+ rndtest_runs_record(rsp, zeros, zeroi);
6787+ zeros = 0;
6788+ } else {
6789+ zeros++;
6790+ rndtest_runs_record(rsp, ones, onei);
6791+ ones = 0;
6792+ }
6793+ }
6794+ }
6795+ rndtest_runs_record(rsp, ones, onei);
6796+ rndtest_runs_record(rsp, zeros, zeroi);
6797+
6798+ rv |= rndtest_runs_check(rsp, 0, zeroi);
6799+ rv |= rndtest_runs_check(rsp, 1, onei);
6800+
6801+ if (rv)
6802+ rndstats.rst_runs++;
6803+
6804+ return (rv);
6805+}
6806+
6807+static void
6808+rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
6809+{
6810+ if (len == 0)
6811+ return;
6812+ if (len > RNDTEST_RUNS_NINTERVAL)
6813+ len = RNDTEST_RUNS_NINTERVAL;
6814+ len -= 1;
6815+ intrv[len]++;
6816+}
6817+
6818+static int
6819+rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
6820+{
6821+ int i, rv = 0;
6822+
6823+ for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
6824+ if (src[i] < rndtest_runs_tab[i].min ||
6825+ src[i] > rndtest_runs_tab[i].max) {
6826+ rndtest_report(rsp, 1,
6827+ "%s interval %d failed (%d, %d-%d)",
6828+ val ? "ones" : "zeros",
6829+ i + 1, src[i], rndtest_runs_tab[i].min,
6830+ rndtest_runs_tab[i].max);
6831+ rv = -1;
6832+ } else {
6833+ rndtest_report(rsp, 0,
6834+ "runs pass %s interval %d (%d < %d < %d)",
6835+ val ? "ones" : "zeros",
6836+ i + 1, rndtest_runs_tab[i].min, src[i],
6837+ rndtest_runs_tab[i].max);
6838+ }
6839+ }
6840+ return (rv);
6841+}
6842+
6843+static int
6844+rndtest_longruns(struct rndtest_state *rsp)
6845+{
6846+ int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
6847+ u_int8_t c;
6848+
6849+ for (i = 0; i < RNDTEST_NBYTES; i++) {
6850+ c = rsp->rs_buf[i];
6851+ for (j = 0; j < 8; j++, c <<= 1) {
6852+ if (c & 0x80) {
6853+ zeros = 0;
6854+ ones++;
6855+ if (ones > maxones)
6856+ maxones = ones;
6857+ } else {
6858+ ones = 0;
6859+ zeros++;
6860+ if (zeros > maxzeros)
6861+ maxzeros = zeros;
6862+ }
6863+ }
6864+ }
6865+
6866+ if (maxones < 26 && maxzeros < 26) {
6867+ rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
6868+ maxones, maxzeros);
6869+ return (0);
6870+ } else {
6871+ rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
6872+ maxones, maxzeros);
6873+ rndstats.rst_longruns++;
6874+ return (-1);
6875+ }
6876+}
6877+
6878+/*
6879+ * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
6880+ * but it is really the chi^2 test over 4 bits (the poker test as described
6881+ * by Knuth vol 2 is something different, and I take him as authoritative
6882+ * on nomenclature over NIST).
6883+ */
6884+#define RNDTEST_CHI4_K 16
6885+#define RNDTEST_CHI4_K_MASK (RNDTEST_CHI4_K - 1)
6886+
6887+/*
6888+ * The unnormalized values are used so that we don't have to worry about
6889+ * fractional precision. The "real" value is found by:
6890+ * (V - 1562500) * (16 / 5000) = Vn (where V is the unnormalized value)
6891+ */
6892+#define RNDTEST_CHI4_VMIN 1563181 /* 2.1792 */
6893+#define RNDTEST_CHI4_VMAX 1576929 /* 46.1728 */
6894+
6895+static int
6896+rndtest_chi_4(struct rndtest_state *rsp)
6897+{
6898+ unsigned int freq[RNDTEST_CHI4_K], i, sum;
6899+
6900+ for (i = 0; i < RNDTEST_CHI4_K; i++)
6901+ freq[i] = 0;
6902+
6903+ /* Get number of occurances of each 4 bit pattern */
6904+ for (i = 0; i < RNDTEST_NBYTES; i++) {
6905+ freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
6906+ freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
6907+ }
6908+
6909+ for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
6910+ sum += freq[i] * freq[i];
6911+
6912+ if (sum >= 1563181 && sum <= 1576929) {
6913+ rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
6914+ return (0);
6915+ } else {
6916+ rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
6917+ rndstats.rst_chi++;
6918+ return (-1);
6919+ }
6920+}
6921+
6922+int
6923+rndtest_buf(unsigned char *buf)
6924+{
6925+ struct rndtest_state rsp;
6926+
6927+ memset(&rsp, 0, sizeof(rsp));
6928+ rsp.rs_buf = buf;
6929+ rndtest_test(&rsp);
6930+ return(rsp.rs_discard);
6931+}
6932+
6933diff --git a/crypto/ocf/rndtest.h b/crypto/ocf/rndtest.h
6934new file mode 100644
6935index 0000000..e9d8ec8
6936--- /dev/null
6937+++ b/crypto/ocf/rndtest.h
6938@@ -0,0 +1,54 @@
6939+/* $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $ */
6940+/* $OpenBSD$ */
6941+
6942+/*
6943+ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
6944+ * All rights reserved.
6945+ *
6946+ * Redistribution and use in source and binary forms, with or without
6947+ * modification, are permitted provided that the following conditions
6948+ * are met:
6949+ * 1. Redistributions of source code must retain the above copyright
6950+ * notice, this list of conditions and the following disclaimer.
6951+ * 2. Redistributions in binary form must reproduce the above copyright
6952+ * notice, this list of conditions and the following disclaimer in the
6953+ * documentation and/or other materials provided with the distribution.
6954+ * 3. All advertising materials mentioning features or use of this software
6955+ * must display the following acknowledgement:
6956+ * This product includes software developed by Jason L. Wright
6957+ * 4. The name of the author may not be used to endorse or promote products
6958+ * derived from this software without specific prior written permission.
6959+ *
6960+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
6961+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6962+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6963+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
6964+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6965+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
6966+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
6967+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
6968+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
6969+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6970+ * POSSIBILITY OF SUCH DAMAGE.
6971+ */
6972+
6973+
6974+/* Some of the tests depend on these values */
6975+#define RNDTEST_NBYTES 2500
6976+#define RNDTEST_NBITS (8 * RNDTEST_NBYTES)
6977+
6978+struct rndtest_state {
6979+ int rs_discard; /* discard/accept random data */
6980+ u_int8_t *rs_buf;
6981+};
6982+
6983+struct rndtest_stats {
6984+ u_int32_t rst_discard; /* number of bytes discarded */
6985+ u_int32_t rst_tests; /* number of test runs */
6986+ u_int32_t rst_monobit; /* monobit test failures */
6987+ u_int32_t rst_runs; /* 0/1 runs failures */
6988+ u_int32_t rst_longruns; /* longruns failures */
6989+ u_int32_t rst_chi; /* chi^2 failures */
6990+};
6991+
6992+extern int rndtest_buf(unsigned char *buf);
6993diff --git a/crypto/ocf/uio.h b/crypto/ocf/uio.h
6994new file mode 100644
6995index 0000000..03a6249
6996--- /dev/null
6997+++ b/crypto/ocf/uio.h
6998@@ -0,0 +1,54 @@
6999+#ifndef _OCF_UIO_H_
7000+#define _OCF_UIO_H_
7001+
7002+#include <linux/uio.h>
7003+
7004+/*
7005+ * The linux uio.h doesn't have all we need. To be fully api compatible
7006+ * with the BSD cryptodev, we need to keep this around. Perhaps this can
7007+ * be moved back into the linux/uio.h
7008+ *
7009+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
7010+ * Copyright (C) 2006-2010 David McCullough
7011+ * Copyright (C) 2004-2005 Intel Corporation.
7012+ *
7013+ * LICENSE TERMS
7014+ *
7015+ * The free distribution and use of this software in both source and binary
7016+ * form is allowed (with or without changes) provided that:
7017+ *
7018+ * 1. distributions of this source code include the above copyright
7019+ * notice, this list of conditions and the following disclaimer;
7020+ *
7021+ * 2. distributions in binary form include the above copyright
7022+ * notice, this list of conditions and the following disclaimer
7023+ * in the documentation and/or other associated materials;
7024+ *
7025+ * 3. the copyright holder's name is not used to endorse products
7026+ * built using this software without specific written permission.
7027+ *
7028+ * ALTERNATIVELY, provided that this notice is retained in full, this product
7029+ * may be distributed under the terms of the GNU General Public License (GPL),
7030+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
7031+ *
7032+ * DISCLAIMER
7033+ *
7034+ * This software is provided 'as is' with no explicit or implied warranties
7035+ * in respect of its properties, including, but not limited to, correctness
7036+ * and/or fitness for purpose.
7037+ * ---------------------------------------------------------------------------
7038+ */
7039+
7040+struct uio {
7041+ struct iovec *uio_iov;
7042+ int uio_iovcnt;
7043+ off_t uio_offset;
7044+ int uio_resid;
7045+#if 0
7046+ enum uio_seg uio_segflg;
7047+ enum uio_rw uio_rw;
7048+ struct thread *uio_td;
7049+#endif
7050+};
7051+
7052+#endif
7053diff --git a/drivers/char/random.c b/drivers/char/random.c
7054index 6035ab8..8c3acdf 100644
7055--- a/drivers/char/random.c
7056+++ b/drivers/char/random.c
7057@@ -130,6 +130,9 @@
7058 * void add_interrupt_randomness(int irq);
7059 * void add_disk_randomness(struct gendisk *disk);
7060 *
7061+ * void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
7062+ * int random_input_wait(void);
7063+ *
7064 * add_input_randomness() uses the input layer interrupt timing, as well as
7065 * the event type information from the hardware.
7066 *
7067@@ -147,6 +150,13 @@
7068 * seek times do not make for good sources of entropy, as their seek
7069 * times are usually fairly consistent.
7070 *
7071+ * random_input_words() just provides a raw block of entropy to the input
7072+ * pool, such as from a hardware entropy generator.
7073+ *
7074+ * random_input_wait() suspends the caller until such time as the
7075+ * entropy pool falls below the write threshold, and returns a count of how
7076+ * much entropy (in bits) is needed to sustain the pool.
7077+ *
7078 * All of these routines try to estimate how many bits of randomness a
7079 * particular randomness source. They do this by keeping track of the
7080 * first and second order deltas of the event timings.
7081@@ -722,6 +732,63 @@ void add_disk_randomness(struct gendisk *disk)
7082 }
7083 #endif
7084
7085+/*
7086+ * random_input_words - add bulk entropy to pool
7087+ *
7088+ * @buf: buffer to add
7089+ * @wordcount: number of __u32 words to add
7090+ * @ent_count: total amount of entropy (in bits) to credit
7091+ *
7092+ * this provides bulk input of entropy to the input pool
7093+ *
7094+ */
7095+void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
7096+{
7097+ mix_pool_bytes(&input_pool, buf, wordcount*4);
7098+
7099+ credit_entropy_bits(&input_pool, ent_count);
7100+
7101+ DEBUG_ENT("crediting %d bits => %d\n",
7102+ ent_count, input_pool.entropy_count);
7103+ /*
7104+ * Wake up waiting processes if we have enough
7105+ * entropy.
7106+ */
7107+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
7108+ wake_up_interruptible(&random_read_wait);
7109+}
7110+EXPORT_SYMBOL(random_input_words);
7111+
7112+/*
7113+ * random_input_wait - wait until random needs entropy
7114+ *
7115+ * this function sleeps until the /dev/random subsystem actually
7116+ * needs more entropy, and then return the amount of entropy
7117+ * that it would be nice to have added to the system.
7118+ */
7119+int random_input_wait(void)
7120+{
7121+ int count;
7122+
7123+ wait_event_interruptible(random_write_wait,
7124+ input_pool.entropy_count < random_write_wakeup_thresh);
7125+
7126+ count = random_write_wakeup_thresh - input_pool.entropy_count;
7127+
7128+ /* likely we got woken up due to a signal */
7129+ if (count <= 0) count = random_read_wakeup_thresh;
7130+
7131+ DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
7132+ count,
7133+ input_pool.entropy_count, random_write_wakeup_thresh);
7134+
7135+ return count;
7136+}
7137+EXPORT_SYMBOL(random_input_wait);
7138+
7139+
7140+#define EXTRACT_SIZE 10
7141+
7142 /*********************************************************************
7143 *
7144 * Entropy extraction routines
7145diff --git a/fs/fcntl.c b/fs/fcntl.c
7146index 22764c7..0ffe61f 100644
7147--- a/fs/fcntl.c
7148+++ b/fs/fcntl.c
7149@@ -142,6 +142,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
7150 }
7151 return ret;
7152 }
7153+EXPORT_SYMBOL(sys_dup);
7154
7155 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
7156
7157diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
7158index c41d727..24b73c0 100644
7159--- a/include/linux/miscdevice.h
7160+++ b/include/linux/miscdevice.h
7161@@ -19,6 +19,7 @@
7162 #define APOLLO_MOUSE_MINOR 7
7163 #define PC110PAD_MINOR 9
7164 /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
7165+#define CRYPTODEV_MINOR 70 /* /dev/crypto */
7166 #define WATCHDOG_MINOR 130 /* Watchdog timer */
7167 #define TEMP_MINOR 131 /* Temperature Sensor */
7168 #define RTC_MINOR 135
7169diff --git a/include/linux/random.h b/include/linux/random.h
7170index 8f74538..0ff31a9 100644
7171--- a/include/linux/random.h
7172+++ b/include/linux/random.h
7173@@ -34,6 +34,30 @@
7174 /* Clear the entropy pool and associated counters. (Superuser only.) */
7175 #define RNDCLEARPOOL _IO( 'R', 0x06 )
7176
7177+#ifdef CONFIG_FIPS_RNG
7178+
7179+/* Size of seed value - equal to AES blocksize */
7180+#define AES_BLOCK_SIZE_BYTES 16
7181+#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES
7182+/* Size of AES key */
7183+#define KEY_SIZE_BYTES 16
7184+
7185+/* ioctl() structure used by FIPS 140-2 Tests */
7186+struct rand_fips_test {
7187+ unsigned char key[KEY_SIZE_BYTES]; /* Input */
7188+ unsigned char datetime[SEED_SIZE_BYTES]; /* Input */
7189+ unsigned char seed[SEED_SIZE_BYTES]; /* Input */
7190+ unsigned char result[SEED_SIZE_BYTES]; /* Output */
7191+};
7192+
7193+/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
7194+#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test)
7195+
7196+/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
7197+#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test)
7198+
7199+#endif /* #ifdef CONFIG_FIPS_RNG */
7200+
7201 struct rand_pool_info {
7202 int entropy_count;
7203 int buf_size;
7204@@ -54,6 +78,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
7205 unsigned int value);
7206 extern void add_interrupt_randomness(int irq);
7207
7208+extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
7209+extern int random_input_wait(void);
7210+#define HAS_RANDOM_INPUT_WAIT 1
7211+
7212 extern void get_random_bytes(void *buf, int nbytes);
7213 void generate_random_uuid(unsigned char uuid_out[16]);
7214
7215diff --git a/kernel/pid.c b/kernel/pid.c
7216index fa5f722..2bf49fd 100644
7217--- a/kernel/pid.c
7218+++ b/kernel/pid.c
7219@@ -428,6 +428,7 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
7220 {
7221 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
7222 }
7223+EXPORT_SYMBOL(find_task_by_vpid);
7224
7225 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
7226 {
7227--
72281.7.0.4