summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/cryptodev/sdk_patches/0003-PKC-support-added-in-cryptodev-module.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/cryptodev/sdk_patches/0003-PKC-support-added-in-cryptodev-module.patch')
-rw-r--r--recipes-kernel/cryptodev/sdk_patches/0003-PKC-support-added-in-cryptodev-module.patch898
1 files changed, 898 insertions, 0 deletions
diff --git a/recipes-kernel/cryptodev/sdk_patches/0003-PKC-support-added-in-cryptodev-module.patch b/recipes-kernel/cryptodev/sdk_patches/0003-PKC-support-added-in-cryptodev-module.patch
new file mode 100644
index 0000000..ffa0b45
--- /dev/null
+++ b/recipes-kernel/cryptodev/sdk_patches/0003-PKC-support-added-in-cryptodev-module.patch
@@ -0,0 +1,898 @@
1From 2bda43095b511e0052b3bc27b216ff9909cc03d2 Mon Sep 17 00:00:00 2001
2From: Yashpal Dutta <yashpal.dutta@freescale.com>
3Date: Fri, 7 Mar 2014 06:16:09 +0545
4Subject: [PATCH 03/38] PKC support added in cryptodev module
5
6Upstream-status: Pending
7
8Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
9---
10 cryptlib.c | 66 +++++++++-
11 cryptlib.h | 28 ++++
12 crypto/cryptodev.h | 15 ++-
13 cryptodev_int.h | 20 ++-
14 ioctl.c | 196 +++++++++++++++++++++++++--
15 main.c | 378 +++++++++++++++++++++++++++++++++++++++++++++++++++++
16 6 files changed, 685 insertions(+), 18 deletions(-)
17
18diff --git a/cryptlib.c b/cryptlib.c
19index 44ce763..6900028 100644
20--- a/cryptlib.c
21+++ b/cryptlib.c
22@@ -5,6 +5,8 @@
23 * Portions Copyright (c) 2010 Michael Weiser
24 * Portions Copyright (c) 2010 Phil Sutter
25 *
26+ * Copyright 2012 Freescale Semiconductor, Inc.
27+ *
28 * This file is part of linux cryptodev.
29 *
30 * This program is free software; you can redistribute it and/or
31@@ -39,11 +41,6 @@
32 #include "cryptodev_int.h"
33
34
35-struct cryptodev_result {
36- struct completion completion;
37- int err;
38-};
39-
40 static void cryptodev_complete(struct crypto_async_request *req, int err)
41 {
42 struct cryptodev_result *res = req->data;
43@@ -259,7 +256,6 @@ static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
44 case 0:
45 break;
46 case -EINPROGRESS:
47- case -EBUSY:
48 wait_for_completion(&cr->completion);
49 /* At this point we known for sure the request has finished,
50 * because wait_for_completion above was not interruptible.
51@@ -439,3 +435,61 @@ int cryptodev_hash_final(struct hash_data *hdata, void *output)
52 return waitfor(hdata->async.result, ret);
53 }
54
55+int cryptodev_pkc_offload(struct cryptodev_pkc *pkc)
56+{
57+ int ret = 0;
58+ struct pkc_request *pkc_req = &pkc->req, *pkc_requested;
59+
60+ switch (pkc_req->type) {
61+ case RSA_PUB:
62+ case RSA_PRIV_FORM1:
63+ case RSA_PRIV_FORM2:
64+ case RSA_PRIV_FORM3:
65+ pkc->s = crypto_alloc_pkc("pkc(rsa)",
66+ CRYPTO_ALG_TYPE_PKC_RSA, 0);
67+ break;
68+ case DSA_SIGN:
69+ case DSA_VERIFY:
70+ case ECDSA_SIGN:
71+ case ECDSA_VERIFY:
72+ pkc->s = crypto_alloc_pkc("pkc(dsa)",
73+ CRYPTO_ALG_TYPE_PKC_DSA, 0);
74+ break;
75+ case DH_COMPUTE_KEY:
76+ case ECDH_COMPUTE_KEY:
77+ pkc->s = crypto_alloc_pkc("pkc(dh)",
78+ CRYPTO_ALG_TYPE_PKC_DH, 0);
79+ break;
80+ default:
81+ return -EINVAL;
82+ }
83+
84+ if (IS_ERR_OR_NULL(pkc->s))
85+ return -EINVAL;
86+
87+ init_completion(&pkc->result.completion);
88+ pkc_requested = pkc_request_alloc(pkc->s, GFP_KERNEL);
89+
90+ if (unlikely(IS_ERR_OR_NULL(pkc_requested))) {
91+ ret = -ENOMEM;
92+ goto error;
93+ }
94+ pkc_requested->type = pkc_req->type;
95+ pkc_requested->curve_type = pkc_req->curve_type;
96+ memcpy(&pkc_requested->req_u, &pkc_req->req_u, sizeof(pkc_req->req_u));
97+ pkc_request_set_callback(pkc_requested, CRYPTO_TFM_REQ_MAY_BACKLOG,
98+ cryptodev_complete_asym, pkc);
99+ ret = crypto_pkc_op(pkc_requested);
100+ if (ret != -EINPROGRESS && ret != 0)
101+ goto error2;
102+
103+ if (pkc->type == SYNCHRONOUS)
104+ ret = waitfor(&pkc->result, ret);
105+
106+ return ret;
107+error2:
108+ kfree(pkc_requested);
109+error:
110+ crypto_free_pkc(pkc->s);
111+ return ret;
112+}
113diff --git a/cryptlib.h b/cryptlib.h
114index a0a8a63..56d325a 100644
115--- a/cryptlib.h
116+++ b/cryptlib.h
117@@ -1,3 +1,6 @@
118+/*
119+ * Copyright 2012 Freescale Semiconductor, Inc.
120+ */
121 #ifndef CRYPTLIB_H
122 # define CRYPTLIB_H
123
124@@ -89,5 +92,30 @@ void cryptodev_hash_deinit(struct hash_data *hdata);
125 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
126 int hmac_mode, void *mackey, size_t mackeylen);
127
128+/* Operation Type */
129+enum offload_type {
130+ SYNCHRONOUS,
131+ ASYNCHRONOUS
132+};
133+
134+struct cryptodev_result {
135+ struct completion completion;
136+ int err;
137+};
138+
139+struct cryptodev_pkc {
140+ struct list_head list; /* To maintain the Jobs in completed
141+ cryptodev lists */
142+ struct kernel_crypt_kop kop;
143+ struct crypto_pkc *s; /* Transform pointer from CryptoAPI */
144+ struct cryptodev_result result; /* Result to be updated by
145+ completion handler */
146+ struct pkc_request req; /* PKC request structure allocated
147+ from CryptoAPI */
148+ enum offload_type type; /* Synchronous Vs Asynchronous request */
149+ void *cookie; /*Additional opaque cookie to be used in future */
150+ struct crypt_priv *priv;
151+};
152
153+int cryptodev_pkc_offload(struct cryptodev_pkc *);
154 #endif
155diff --git a/crypto/cryptodev.h b/crypto/cryptodev.h
156index c0e8cd4..96675fe 100644
157--- a/crypto/cryptodev.h
158+++ b/crypto/cryptodev.h
159@@ -1,6 +1,10 @@
160-/* This is a source compatible implementation with the original API of
161+/*
162+ * Copyright 2012 Freescale Semiconductor, Inc.
163+ *
164+ * This is a source compatible implementation with the original API of
165 * cryptodev by Angelos D. Keromytis, found at openbsd cryptodev.h.
166- * Placed under public domain */
167+ * Placed under public domain
168+ */
169
170 #ifndef L_CRYPTODEV_H
171 #define L_CRYPTODEV_H
172@@ -245,6 +249,9 @@ struct crypt_kop {
173 __u16 crk_oparams;
174 __u32 crk_pad1;
175 struct crparam crk_param[CRK_MAXPARAM];
176+ enum curve_t curve_type; /* 0 == Discrete Log,
177+ 1 = EC_PRIME, 2 = EC_BINARY */
178+ void *cookie;
179 };
180
181 enum cryptodev_crk_op_t {
182@@ -289,5 +296,7 @@ enum cryptodev_crk_op_t {
183 */
184 #define CIOCASYNCCRYPT _IOW('c', 110, struct crypt_op)
185 #define CIOCASYNCFETCH _IOR('c', 111, struct crypt_op)
186-
187+/* additional ioctls for asynchronous operation for asymmetric ciphers*/
188+#define CIOCASYMASYNCRYPT _IOW('c', 112, struct crypt_kop)
189+#define CIOCASYMASYNFETCH _IOR('c', 113, struct crypt_kop)
190 #endif /* L_CRYPTODEV_H */
191diff --git a/cryptodev_int.h b/cryptodev_int.h
192index 8e687e7..fdbcc61 100644
193--- a/cryptodev_int.h
194+++ b/cryptodev_int.h
195@@ -1,4 +1,6 @@
196-/* cipher stuff */
197+/* cipher stuff
198+ * Copyright 2012 Freescale Semiconductor, Inc.
199+ */
200 #ifndef CRYPTODEV_INT_H
201 # define CRYPTODEV_INT_H
202
203@@ -112,6 +114,14 @@ struct compat_crypt_auth_op {
204
205 #endif /* CONFIG_COMPAT */
206
207+/* kernel-internal extension to struct crypt_kop */
208+struct kernel_crypt_kop {
209+ struct crypt_kop kop;
210+
211+ struct task_struct *task;
212+ struct mm_struct *mm;
213+};
214+
215 /* kernel-internal extension to struct crypt_op */
216 struct kernel_crypt_op {
217 struct crypt_op cop;
218@@ -157,6 +167,14 @@ int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop);
219
220 #include <cryptlib.h>
221
222+/* Cryptodev Key operation handler */
223+int crypto_bn_modexp(struct cryptodev_pkc *);
224+int crypto_modexp_crt(struct cryptodev_pkc *);
225+int crypto_kop_dsasign(struct cryptodev_pkc *);
226+int crypto_kop_dsaverify(struct cryptodev_pkc *);
227+int crypto_run_asym(struct cryptodev_pkc *);
228+void cryptodev_complete_asym(struct crypto_async_request *, int);
229+
230 /* other internal structs */
231 struct csession {
232 struct list_head entry;
233diff --git a/ioctl.c b/ioctl.c
234index 5a44807..69980e3 100644
235--- a/ioctl.c
236+++ b/ioctl.c
237@@ -4,6 +4,7 @@
238 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
239 * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
240 * Copyright (c) 2010 Phil Sutter
241+ * Copyright 2012 Freescale Semiconductor, Inc.
242 *
243 * This file is part of linux cryptodev.
244 *
245@@ -89,8 +90,37 @@ struct crypt_priv {
246 int itemcount;
247 struct work_struct cryptask;
248 wait_queue_head_t user_waiter;
249+ /* List of pending cryptodev_pkc asym requests */
250+ struct list_head asym_completed_list;
251+ /* For addition/removal of entry in pending list of asymmetric request*/
252+ spinlock_t completion_lock;
253 };
254
255+/* Asymmetric request Completion handler */
256+void cryptodev_complete_asym(struct crypto_async_request *req, int err)
257+{
258+ struct cryptodev_pkc *pkc = req->data;
259+ struct cryptodev_result *res = &pkc->result;
260+
261+ crypto_free_pkc(pkc->s);
262+ res->err = err;
263+ if (pkc->type == SYNCHRONOUS) {
264+ if (err == -EINPROGRESS)
265+ return;
266+ complete(&res->completion);
267+ } else {
268+ struct crypt_priv *pcr = pkc->priv;
269+ unsigned long flags;
270+ spin_lock_irqsave(&pcr->completion_lock, flags);
271+ list_add_tail(&pkc->list, &pcr->asym_completed_list);
272+ spin_unlock_irqrestore(&pcr->completion_lock, flags);
273+ /* wake for POLLIN */
274+ wake_up_interruptible(&pcr->user_waiter);
275+ }
276+
277+ kfree(req);
278+}
279+
280 #define FILL_SG(sg, ptr, len) \
281 do { \
282 (sg)->page = virt_to_page(ptr); \
283@@ -472,7 +502,8 @@ cryptodev_open(struct inode *inode, struct file *filp)
284 INIT_LIST_HEAD(&pcr->free.list);
285 INIT_LIST_HEAD(&pcr->todo.list);
286 INIT_LIST_HEAD(&pcr->done.list);
287-
288+ INIT_LIST_HEAD(&pcr->asym_completed_list);
289+ spin_lock_init(&pcr->completion_lock);
290 INIT_WORK(&pcr->cryptask, cryptask_routine);
291
292 init_waitqueue_head(&pcr->user_waiter);
293@@ -639,6 +670,79 @@ static int crypto_async_fetch(struct crypt_priv *pcr,
294 }
295 #endif
296
297+/* get the first asym cipher completed job from the "done" queue
298+ *
299+ * returns:
300+ * -EBUSY if no completed jobs are ready (yet)
301+ * the return value otherwise */
302+static int crypto_async_fetch_asym(struct cryptodev_pkc *pkc)
303+{
304+ int ret = 0;
305+ struct kernel_crypt_kop *kop = &pkc->kop;
306+ struct crypt_kop *ckop = &kop->kop;
307+ struct pkc_request *pkc_req = &pkc->req;
308+
309+ switch (ckop->crk_op) {
310+ case CRK_MOD_EXP:
311+ {
312+ struct rsa_pub_req_s *rsa_req = &pkc_req->req_u.rsa_pub_req;
313+ copy_to_user(ckop->crk_param[3].crp_p, rsa_req->g,
314+ rsa_req->g_len);
315+ }
316+ break;
317+ case CRK_MOD_EXP_CRT:
318+ {
319+ struct rsa_priv_frm3_req_s *rsa_req =
320+ &pkc_req->req_u.rsa_priv_f3;
321+ copy_to_user(ckop->crk_param[6].crp_p,
322+ rsa_req->f, rsa_req->f_len);
323+ }
324+ break;
325+ case CRK_DSA_SIGN:
326+ {
327+ struct dsa_sign_req_s *dsa_req = &pkc_req->req_u.dsa_sign;
328+
329+ if (pkc_req->type == ECDSA_SIGN) {
330+ copy_to_user(ckop->crk_param[6].crp_p,
331+ dsa_req->c, dsa_req->d_len);
332+ copy_to_user(ckop->crk_param[7].crp_p,
333+ dsa_req->d, dsa_req->d_len);
334+ } else {
335+ copy_to_user(ckop->crk_param[5].crp_p,
336+ dsa_req->c, dsa_req->d_len);
337+ copy_to_user(ckop->crk_param[6].crp_p,
338+ dsa_req->d, dsa_req->d_len);
339+ }
340+ }
341+ break;
342+ case CRK_DSA_VERIFY:
343+ break;
344+ case CRK_DH_COMPUTE_KEY:
345+ {
346+ struct dh_key_req_s *dh_req = &pkc_req->req_u.dh_req;
347+ if (pkc_req->type == ECDH_COMPUTE_KEY)
348+ copy_to_user(ckop->crk_param[4].crp_p,
349+ dh_req->z, dh_req->z_len);
350+ else
351+ copy_to_user(ckop->crk_param[3].crp_p,
352+ dh_req->z, dh_req->z_len);
353+ }
354+ break;
355+ default:
356+ ret = -EINVAL;
357+ }
358+ kfree(pkc->cookie);
359+ return ret;
360+}
361+
362+/* this function has to be called from process context */
363+static int fill_kop_from_cop(struct kernel_crypt_kop *kop)
364+{
365+ kop->task = current;
366+ kop->mm = current->mm;
367+ return 0;
368+}
369+
370 /* this function has to be called from process context */
371 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
372 {
373@@ -662,11 +766,8 @@ static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
374
375 if (cop->iv) {
376 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
377- if (unlikely(rc)) {
378- derr(1, "error copying IV (%d bytes), copy_from_user returned %d for address %p",
379- kcop->ivlen, rc, cop->iv);
380+ if (unlikely(rc))
381 return -EFAULT;
382- }
383 }
384
385 return 0;
386@@ -692,6 +793,25 @@ static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
387 return 0;
388 }
389
390+static int kop_from_user(struct kernel_crypt_kop *kop,
391+ void __user *arg)
392+{
393+ if (unlikely(copy_from_user(&kop->kop, arg, sizeof(kop->kop))))
394+ return -EFAULT;
395+
396+ return fill_kop_from_cop(kop);
397+}
398+
399+static int kop_to_user(struct kernel_crypt_kop *kop,
400+ void __user *arg)
401+{
402+ if (unlikely(copy_to_user(arg, &kop->kop, sizeof(kop->kop)))) {
403+ dprintk(1, KERN_ERR, "Cannot copy to userspace\n");
404+ return -EFAULT;
405+ }
406+ return 0;
407+}
408+
409 static int kcop_from_user(struct kernel_crypt_op *kcop,
410 struct fcrypt *fcr, void __user *arg)
411 {
412@@ -821,7 +941,8 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
413
414 switch (cmd) {
415 case CIOCASYMFEAT:
416- return put_user(0, p);
417+ return put_user(CRF_MOD_EXP_CRT | CRF_MOD_EXP |
418+ CRF_DSA_SIGN | CRF_DSA_VERIFY | CRF_DH_COMPUTE_KEY, p);
419 case CRIOGET:
420 fd = clonefd(filp);
421 ret = put_user(fd, p);
422@@ -857,6 +978,24 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
423 if (unlikely(ret))
424 return ret;
425 return copy_to_user(arg, &siop, sizeof(siop));
426+ case CIOCKEY:
427+ {
428+ struct cryptodev_pkc *pkc =
429+ kzalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
430+
431+ if (!pkc)
432+ return -ENOMEM;
433+
434+ ret = kop_from_user(&pkc->kop, arg);
435+ if (unlikely(ret)) {
436+ kfree(pkc);
437+ return ret;
438+ }
439+ pkc->type = SYNCHRONOUS;
440+ ret = crypto_run_asym(pkc);
441+ kfree(pkc);
442+ }
443+ return ret;
444 case CIOCCRYPT:
445 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
446 dwarning(1, "Error copying from user");
447@@ -895,6 +1034,45 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
448
449 return kcop_to_user(&kcop, fcr, arg);
450 #endif
451+ case CIOCASYMASYNCRYPT:
452+ {
453+ struct cryptodev_pkc *pkc =
454+ kzalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
455+ ret = kop_from_user(&pkc->kop, arg);
456+
457+ if (unlikely(ret))
458+ return -EINVAL;
459+
460+ /* Store associated FD priv data with asymmetric request */
461+ pkc->priv = pcr;
462+ pkc->type = ASYNCHRONOUS;
463+ ret = crypto_run_asym(pkc);
464+ if (ret == -EINPROGRESS)
465+ ret = 0;
466+ }
467+ return ret;
468+ case CIOCASYMASYNFETCH:
469+ {
470+ struct cryptodev_pkc *pkc;
471+ unsigned long flags;
472+
473+ spin_lock_irqsave(&pcr->completion_lock, flags);
474+ if (list_empty(&pcr->asym_completed_list)) {
475+ spin_unlock_irqrestore(&pcr->completion_lock, flags);
476+ return -ENOMEM;
477+ }
478+ pkc = list_first_entry(&pcr->asym_completed_list,
479+ struct cryptodev_pkc, list);
480+ list_del(&pkc->list);
481+ spin_unlock_irqrestore(&pcr->completion_lock, flags);
482+ ret = crypto_async_fetch_asym(pkc);
483+
484+ /* Reflect the updated request to user-space */
485+ if (!ret)
486+ kop_to_user(&pkc->kop, arg);
487+ kfree(pkc);
488+ }
489+ return ret;
490 default:
491 return -EINVAL;
492 }
493@@ -1083,9 +1261,11 @@ static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
494
495 poll_wait(file, &pcr->user_waiter, wait);
496
497- if (!list_empty_careful(&pcr->done.list))
498+ if (!list_empty_careful(&pcr->done.list) ||
499+ !list_empty_careful(&pcr->asym_completed_list))
500 ret |= POLLIN | POLLRDNORM;
501- if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
502+ if (!list_empty_careful(&pcr->free.list) ||
503+ pcr->itemcount < MAX_COP_RINGSIZE)
504 ret |= POLLOUT | POLLWRNORM;
505
506 return ret;
507diff --git a/main.c b/main.c
508index 57e5c38..0b7951e 100644
509--- a/main.c
510+++ b/main.c
511@@ -181,6 +181,384 @@ __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
512 return ret;
513 }
514
515+int crypto_kop_dsasign(struct cryptodev_pkc *pkc)
516+{
517+ struct kernel_crypt_kop *kop = &pkc->kop;
518+ struct crypt_kop *cop = &kop->kop;
519+ struct pkc_request *pkc_req = &pkc->req;
520+ struct dsa_sign_req_s *dsa_req = &pkc_req->req_u.dsa_sign;
521+ int rc, buf_size;
522+ uint8_t *buf;
523+
524+ if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
525+ !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits ||
526+ !cop->crk_param[4].crp_nbits || !cop->crk_param[5].crp_nbits ||
527+ !cop->crk_param[6].crp_nbits || (cop->crk_iparams == 6 &&
528+ !cop->crk_param[7].crp_nbits))
529+ return -EINVAL;
530+
531+ dsa_req->m_len = (cop->crk_param[0].crp_nbits + 7)/8;
532+ dsa_req->q_len = (cop->crk_param[1].crp_nbits + 7)/8;
533+ dsa_req->r_len = (cop->crk_param[2].crp_nbits + 7)/8;
534+ dsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
535+ dsa_req->priv_key_len = (cop->crk_param[4].crp_nbits + 7)/8;
536+ dsa_req->d_len = (cop->crk_param[6].crp_nbits + 7)/8;
537+ buf_size = dsa_req->m_len + dsa_req->q_len + dsa_req->r_len +
538+ dsa_req->g_len + dsa_req->priv_key_len + dsa_req->d_len +
539+ dsa_req->d_len;
540+ if (cop->crk_iparams == 6) {
541+ dsa_req->ab_len = (cop->crk_param[5].crp_nbits + 7)/8;
542+ buf_size += dsa_req->ab_len;
543+ pkc_req->type = ECDSA_SIGN;
544+ pkc_req->curve_type = cop->curve_type;
545+ } else {
546+ pkc_req->type = DSA_SIGN;
547+ }
548+
549+ buf = kzalloc(buf_size, GFP_DMA);
550+
551+ dsa_req->q = buf;
552+ dsa_req->r = dsa_req->q + dsa_req->q_len;
553+ dsa_req->g = dsa_req->r + dsa_req->r_len;
554+ dsa_req->priv_key = dsa_req->g + dsa_req->g_len;
555+ dsa_req->m = dsa_req->priv_key + dsa_req->priv_key_len;
556+ dsa_req->c = dsa_req->m + dsa_req->m_len;
557+ dsa_req->d = dsa_req->c + dsa_req->d_len;
558+ copy_from_user(dsa_req->m, cop->crk_param[0].crp_p, dsa_req->m_len);
559+ copy_from_user(dsa_req->q, cop->crk_param[1].crp_p, dsa_req->q_len);
560+ copy_from_user(dsa_req->r, cop->crk_param[2].crp_p, dsa_req->r_len);
561+ copy_from_user(dsa_req->g, cop->crk_param[3].crp_p, dsa_req->g_len);
562+ copy_from_user(dsa_req->priv_key, cop->crk_param[4].crp_p,
563+ dsa_req->priv_key_len);
564+ if (cop->crk_iparams == 6) {
565+ dsa_req->ab = dsa_req->d + dsa_req->d_len;
566+ copy_from_user(dsa_req->ab, cop->crk_param[5].crp_p,
567+ dsa_req->ab_len);
568+ }
569+ rc = cryptodev_pkc_offload(pkc);
570+ if (pkc->type == SYNCHRONOUS) {
571+ if (rc)
572+ goto err;
573+ if (cop->crk_iparams == 6) {
574+ copy_to_user(cop->crk_param[6].crp_p, dsa_req->c,
575+ dsa_req->d_len);
576+ copy_to_user(cop->crk_param[7].crp_p, dsa_req->d,
577+ dsa_req->d_len);
578+ } else {
579+ copy_to_user(cop->crk_param[5].crp_p, dsa_req->c,
580+ dsa_req->d_len);
581+ copy_to_user(cop->crk_param[6].crp_p, dsa_req->d,
582+ dsa_req->d_len);
583+ }
584+ } else {
585+ if (rc != -EINPROGRESS && rc != 0)
586+ goto err;
587+
588+ pkc->cookie = buf;
589+ return rc;
590+ }
591+err:
592+ kfree(buf);
593+ return rc;
594+}
595+
596+int crypto_kop_dsaverify(struct cryptodev_pkc *pkc)
597+{
598+ struct kernel_crypt_kop *kop = &pkc->kop;
599+ struct crypt_kop *cop = &kop->kop;
600+ struct pkc_request *pkc_req;
601+ struct dsa_verify_req_s *dsa_req;
602+ int rc, buf_size;
603+ uint8_t *buf;
604+
605+ if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
606+ !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits ||
607+ !cop->crk_param[4].crp_nbits || !cop->crk_param[5].crp_nbits ||
608+ !cop->crk_param[6].crp_nbits || (cop->crk_iparams == 8 &&
609+ !cop->crk_param[7].crp_nbits))
610+ return -EINVAL;
611+
612+ pkc_req = &pkc->req;
613+ dsa_req = &pkc_req->req_u.dsa_verify;
614+ dsa_req->m_len = (cop->crk_param[0].crp_nbits + 7)/8;
615+ dsa_req->q_len = (cop->crk_param[1].crp_nbits + 7)/8;
616+ dsa_req->r_len = (cop->crk_param[2].crp_nbits + 7)/8;
617+ dsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
618+ dsa_req->pub_key_len = (cop->crk_param[4].crp_nbits + 7)/8;
619+ dsa_req->d_len = (cop->crk_param[6].crp_nbits + 7)/8;
620+ buf_size = dsa_req->m_len + dsa_req->q_len + dsa_req->r_len +
621+ dsa_req->g_len + dsa_req->pub_key_len + dsa_req->d_len +
622+ dsa_req->d_len;
623+ if (cop->crk_iparams == 8) {
624+ dsa_req->ab_len = (cop->crk_param[5].crp_nbits + 7)/8;
625+ buf_size += dsa_req->ab_len;
626+ pkc_req->type = ECDSA_VERIFY;
627+ pkc_req->curve_type = cop->curve_type;
628+ } else {
629+ pkc_req->type = DSA_VERIFY;
630+ }
631+
632+ buf = kzalloc(buf_size, GFP_DMA);
633+
634+ dsa_req->q = buf;
635+ dsa_req->r = dsa_req->q + dsa_req->q_len;
636+ dsa_req->g = dsa_req->r + dsa_req->r_len;
637+ dsa_req->pub_key = dsa_req->g + dsa_req->g_len;
638+ dsa_req->m = dsa_req->pub_key + dsa_req->pub_key_len;
639+ dsa_req->c = dsa_req->m + dsa_req->m_len;
640+ dsa_req->d = dsa_req->c + dsa_req->d_len;
641+ copy_from_user(dsa_req->m, cop->crk_param[0].crp_p, dsa_req->m_len);
642+ copy_from_user(dsa_req->q, cop->crk_param[1].crp_p, dsa_req->q_len);
643+ copy_from_user(dsa_req->r, cop->crk_param[2].crp_p, dsa_req->r_len);
644+ copy_from_user(dsa_req->g, cop->crk_param[3].crp_p, dsa_req->g_len);
645+ copy_from_user(dsa_req->pub_key, cop->crk_param[4].crp_p,
646+ dsa_req->pub_key_len);
647+ if (cop->crk_iparams == 8) {
648+ dsa_req->ab = dsa_req->d + dsa_req->d_len;
649+ copy_from_user(dsa_req->ab, cop->crk_param[5].crp_p,
650+ dsa_req->ab_len);
651+ copy_from_user(dsa_req->c, cop->crk_param[6].crp_p,
652+ dsa_req->d_len);
653+ copy_from_user(dsa_req->d, cop->crk_param[7].crp_p,
654+ dsa_req->d_len);
655+ } else {
656+ copy_from_user(dsa_req->c, cop->crk_param[5].crp_p,
657+ dsa_req->d_len);
658+ copy_from_user(dsa_req->d, cop->crk_param[6].crp_p,
659+ dsa_req->d_len);
660+ }
661+ rc = cryptodev_pkc_offload(pkc);
662+ if (pkc->type == SYNCHRONOUS) {
663+ if (rc)
664+ goto err;
665+ } else {
666+ if (rc != -EINPROGRESS && !rc)
667+ goto err;
668+ pkc->cookie = buf;
669+ return rc;
670+ }
671+err:
672+ kfree(buf);
673+ return rc;
674+}
675+
676+int crypto_kop_dh_key(struct cryptodev_pkc *pkc)
677+{
678+ struct kernel_crypt_kop *kop = &pkc->kop;
679+ struct crypt_kop *cop = &kop->kop;
680+ struct pkc_request *pkc_req;
681+ struct dh_key_req_s *dh_req;
682+ int buf_size;
683+ uint8_t *buf;
684+ int rc = -EINVAL;
685+
686+ pkc_req = &pkc->req;
687+ dh_req = &pkc_req->req_u.dh_req;
688+ dh_req->s_len = (cop->crk_param[0].crp_nbits + 7)/8;
689+ dh_req->pub_key_len = (cop->crk_param[1].crp_nbits + 7)/8;
690+ dh_req->q_len = (cop->crk_param[2].crp_nbits + 7)/8;
691+ buf_size = dh_req->q_len + dh_req->pub_key_len + dh_req->s_len;
692+ if (cop->crk_iparams == 4) {
693+ pkc_req->type = ECDH_COMPUTE_KEY;
694+ dh_req->ab_len = (cop->crk_param[3].crp_nbits + 7)/8;
695+ dh_req->z_len = (cop->crk_param[4].crp_nbits + 7)/8;
696+ buf_size += dh_req->ab_len;
697+ } else {
698+ dh_req->z_len = (cop->crk_param[3].crp_nbits + 7)/8;
699+ pkc_req->type = DH_COMPUTE_KEY;
700+ }
701+ buf_size += dh_req->z_len;
702+ buf = kzalloc(buf_size, GFP_DMA);
703+ dh_req->q = buf;
704+ dh_req->s = dh_req->q + dh_req->q_len;
705+ dh_req->pub_key = dh_req->s + dh_req->s_len;
706+ dh_req->z = dh_req->pub_key + dh_req->pub_key_len;
707+ if (cop->crk_iparams == 4) {
708+ dh_req->ab = dh_req->z + dh_req->z_len;
709+ pkc_req->curve_type = cop->curve_type;
710+ copy_from_user(dh_req->ab, cop->crk_param[3].crp_p,
711+ dh_req->ab_len);
712+ }
713+ copy_from_user(dh_req->s, cop->crk_param[0].crp_p, dh_req->s_len);
714+ copy_from_user(dh_req->pub_key, cop->crk_param[1].crp_p,
715+ dh_req->pub_key_len);
716+ copy_from_user(dh_req->q, cop->crk_param[2].crp_p, dh_req->q_len);
717+ rc = cryptodev_pkc_offload(pkc);
718+ if (pkc->type == SYNCHRONOUS) {
719+ if (rc)
720+ goto err;
721+ if (cop->crk_iparams == 4)
722+ copy_to_user(cop->crk_param[4].crp_p, dh_req->z,
723+ dh_req->z_len);
724+ else
725+ copy_to_user(cop->crk_param[3].crp_p, dh_req->z,
726+ dh_req->z_len);
727+ } else {
728+ if (rc != -EINPROGRESS && rc != 0)
729+ goto err;
730+
731+ pkc->cookie = buf;
732+ return rc;
733+ }
734+err:
735+ kfree(buf);
736+ return rc;
737+}
738+
739+int crypto_modexp_crt(struct cryptodev_pkc *pkc)
740+{
741+ struct kernel_crypt_kop *kop = &pkc->kop;
742+ struct crypt_kop *cop = &kop->kop;
743+ struct pkc_request *pkc_req;
744+ struct rsa_priv_frm3_req_s *rsa_req;
745+ int rc;
746+ uint8_t *buf;
747+
748+ if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
749+ !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits ||
750+ !cop->crk_param[4].crp_nbits || !cop->crk_param[5].crp_nbits)
751+ return -EINVAL;
752+
753+ pkc_req = &pkc->req;
754+ pkc_req->type = RSA_PRIV_FORM3;
755+ rsa_req = &pkc_req->req_u.rsa_priv_f3;
756+ rsa_req->p_len = (cop->crk_param[0].crp_nbits + 7)/8;
757+ rsa_req->q_len = (cop->crk_param[1].crp_nbits + 7)/8;
758+ rsa_req->g_len = (cop->crk_param[2].crp_nbits + 7)/8;
759+ rsa_req->dp_len = (cop->crk_param[3].crp_nbits + 7)/8;
760+ rsa_req->dq_len = (cop->crk_param[4].crp_nbits + 7)/8;
761+ rsa_req->c_len = (cop->crk_param[5].crp_nbits + 7)/8;
762+ rsa_req->f_len = (cop->crk_param[6].crp_nbits + 7)/8;
763+ buf = kzalloc(rsa_req->p_len + rsa_req->q_len + rsa_req->f_len +
764+ rsa_req->dp_len + rsa_req->dp_len + rsa_req->c_len +
765+ rsa_req->g_len, GFP_DMA);
766+ rsa_req->p = buf;
767+ rsa_req->q = rsa_req->p + rsa_req->p_len;
768+ rsa_req->g = rsa_req->q + rsa_req->q_len;
769+ rsa_req->dp = rsa_req->g + rsa_req->g_len;
770+ rsa_req->dq = rsa_req->dp + rsa_req->dp_len;
771+ rsa_req->c = rsa_req->dq + rsa_req->dq_len;
772+ rsa_req->f = rsa_req->c + rsa_req->c_len;
773+ copy_from_user(rsa_req->p, cop->crk_param[0].crp_p, rsa_req->p_len);
774+ copy_from_user(rsa_req->q, cop->crk_param[1].crp_p, rsa_req->q_len);
775+ copy_from_user(rsa_req->g, cop->crk_param[2].crp_p, rsa_req->g_len);
776+ copy_from_user(rsa_req->dp, cop->crk_param[3].crp_p, rsa_req->dp_len);
777+ copy_from_user(rsa_req->dq, cop->crk_param[4].crp_p, rsa_req->dq_len);
778+ copy_from_user(rsa_req->c, cop->crk_param[5].crp_p, rsa_req->c_len);
779+ rc = cryptodev_pkc_offload(pkc);
780+
781+ if (pkc->type == SYNCHRONOUS) {
782+ if (rc)
783+ goto err;
784+ copy_to_user(cop->crk_param[6].crp_p, rsa_req->f,
785+ rsa_req->f_len);
786+ } else {
787+ if (rc != -EINPROGRESS && rc != 0)
788+ goto err;
789+
790+ pkc->cookie = buf;
791+ return rc;
792+ }
793+err:
794+ kfree(buf);
795+ return rc;
796+}
797+
798+int crypto_bn_modexp(struct cryptodev_pkc *pkc)
799+{
800+ struct pkc_request *pkc_req;
801+ struct rsa_pub_req_s *rsa_req;
802+ int rc;
803+ struct kernel_crypt_kop *kop = &pkc->kop;
804+ struct crypt_kop *cop = &kop->kop;
805+ uint8_t *buf;
806+
807+ if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
808+ !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits)
809+ return -EINVAL;
810+
811+ pkc_req = &pkc->req;
812+ pkc_req->type = RSA_PUB;
813+ rsa_req = &pkc_req->req_u.rsa_pub_req;
814+ rsa_req->f_len = (cop->crk_param[0].crp_nbits + 7)/8;
815+ rsa_req->e_len = (cop->crk_param[1].crp_nbits + 7)/8;
816+ rsa_req->n_len = (cop->crk_param[2].crp_nbits + 7)/8;
817+ rsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
818+ buf = kzalloc(rsa_req->f_len + rsa_req->e_len + rsa_req->n_len
819+ + rsa_req->g_len, GFP_DMA);
820+ if (!buf)
821+ return -ENOMEM;
822+
823+ rsa_req->e = buf;
824+ rsa_req->f = rsa_req->e + rsa_req->e_len;
825+ rsa_req->g = rsa_req->f + rsa_req->f_len;
826+ rsa_req->n = rsa_req->g + rsa_req->g_len;
827+ copy_from_user(rsa_req->f, cop->crk_param[0].crp_p, rsa_req->f_len);
828+ copy_from_user(rsa_req->e, cop->crk_param[1].crp_p, rsa_req->e_len);
829+ copy_from_user(rsa_req->n, cop->crk_param[2].crp_p, rsa_req->n_len);
830+ rc = cryptodev_pkc_offload(pkc);
831+ if (pkc->type == SYNCHRONOUS) {
832+ if (rc)
833+ goto err;
834+
835+ copy_to_user(cop->crk_param[3].crp_p, rsa_req->g,
836+ rsa_req->g_len);
837+ } else {
838+ if (rc != -EINPROGRESS && rc != 0)
839+ goto err;
840+
841+ /* This one will be freed later in fetch handler */
842+ pkc->cookie = buf;
843+ return rc;
844+ }
845+err:
846+ kfree(buf);
847+ return rc;
848+}
849+
850+int crypto_run_asym(struct cryptodev_pkc *pkc)
851+{
852+ int ret = -EINVAL;
853+ struct kernel_crypt_kop *kop = &pkc->kop;
854+
855+ switch (kop->kop.crk_op) {
856+ case CRK_MOD_EXP:
857+ if (kop->kop.crk_iparams != 3 && kop->kop.crk_oparams != 1)
858+ goto err;
859+
860+ ret = crypto_bn_modexp(pkc);
861+ break;
862+ case CRK_MOD_EXP_CRT:
863+ if (kop->kop.crk_iparams != 6 && kop->kop.crk_oparams != 1)
864+ goto err;
865+
866+ ret = crypto_modexp_crt(pkc);
867+ break;
868+ case CRK_DSA_SIGN:
869+ if ((kop->kop.crk_iparams != 5 && kop->kop.crk_iparams != 6) ||
870+ kop->kop.crk_oparams != 2)
871+ goto err;
872+
873+ ret = crypto_kop_dsasign(pkc);
874+ break;
875+ case CRK_DSA_VERIFY:
876+ if ((kop->kop.crk_iparams != 7 && kop->kop.crk_iparams != 8) ||
877+ kop->kop.crk_oparams != 0)
878+ goto err;
879+
880+ ret = crypto_kop_dsaverify(pkc);
881+ break;
882+ case CRK_DH_COMPUTE_KEY:
883+ if ((kop->kop.crk_iparams != 3 && kop->kop.crk_iparams != 4) ||
884+ kop->kop.crk_oparams != 1)
885+ goto err;
886+ ret = crypto_kop_dh_key(pkc);
887+ break;
888+ }
889+err:
890+ return ret;
891+}
892+
893 int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
894 {
895 struct csession *ses_ptr;
896--
8972.7.0
898