summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/cryptodev/sdk_patches/0032-avoid-calls-to-kmalloc-on-hotpaths.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/cryptodev/sdk_patches/0032-avoid-calls-to-kmalloc-on-hotpaths.patch')
-rw-r--r--recipes-kernel/cryptodev/sdk_patches/0032-avoid-calls-to-kmalloc-on-hotpaths.patch220
1 files changed, 220 insertions, 0 deletions
diff --git a/recipes-kernel/cryptodev/sdk_patches/0032-avoid-calls-to-kmalloc-on-hotpaths.patch b/recipes-kernel/cryptodev/sdk_patches/0032-avoid-calls-to-kmalloc-on-hotpaths.patch
new file mode 100644
index 0000000..2aa5810
--- /dev/null
+++ b/recipes-kernel/cryptodev/sdk_patches/0032-avoid-calls-to-kmalloc-on-hotpaths.patch
@@ -0,0 +1,220 @@
1From 9b513838035c35fd3706bb824edd17d705641439 Mon Sep 17 00:00:00 2001
2From: Cristian Stoica <cristian.stoica@nxp.com>
3Date: Tue, 12 Jan 2016 15:13:15 +0200
4Subject: [PATCH 32/38] avoid calls to kmalloc on hotpaths
5
6We replace a pointer to a small structure with the structure itself to
7avoid unnecessary dynamic allocations at runtime. The embedding
8structure is itself dynamically allocated and we get a slight increase
9in performance from elimination of unnecessary code.
10
11Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
12---
13 cryptlib.c | 42 +++++++++++++-----------------------------
14 cryptlib.h | 14 +++++++-------
15 2 files changed, 20 insertions(+), 36 deletions(-)
16
17diff --git a/cryptlib.c b/cryptlib.c
18index 4fd29eb..5972fc2 100644
19--- a/cryptlib.c
20+++ b/cryptlib.c
21@@ -178,13 +178,7 @@ int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
22 out->stream = stream;
23 out->aead = aead;
24
25- out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL);
26- if (unlikely(!out->async.result)) {
27- ret = -ENOMEM;
28- goto error;
29- }
30-
31- init_completion(&out->async.result->completion);
32+ init_completion(&out->async.result.completion);
33
34 if (aead == 0) {
35 out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
36@@ -195,7 +189,7 @@ int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
37 }
38
39 ablkcipher_request_set_callback(out->async.request, 0,
40- cryptodev_complete, out->async.result);
41+ cryptodev_complete, &out->async.result);
42 } else {
43 out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
44 if (unlikely(!out->async.arequest)) {
45@@ -205,7 +199,7 @@ int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
46 }
47
48 aead_request_set_callback(out->async.arequest, 0,
49- cryptodev_complete, out->async.result);
50+ cryptodev_complete, &out->async.result);
51 }
52
53 out->init = 1;
54@@ -222,7 +216,6 @@ error:
55 if (out->async.as)
56 crypto_free_aead(out->async.as);
57 }
58- kfree(out->async.result);
59
60 return ret;
61 }
62@@ -242,7 +235,6 @@ void cryptodev_cipher_deinit(struct cipher_data *cdata)
63 crypto_free_aead(cdata->async.as);
64 }
65
66- kfree(cdata->async.result);
67 cdata->init = 0;
68 }
69 }
70@@ -279,7 +271,7 @@ ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
71 {
72 int ret;
73
74- reinit_completion(&cdata->async.result->completion);
75+ reinit_completion(&cdata->async.result.completion);
76
77 if (cdata->aead == 0) {
78 ablkcipher_request_set_crypt(cdata->async.request,
79@@ -293,7 +285,7 @@ ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
80 ret = crypto_aead_encrypt(cdata->async.arequest);
81 }
82
83- return waitfor(cdata->async.result, ret);
84+ return waitfor(&cdata->async.result, ret);
85 }
86
87 ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
88@@ -302,7 +294,7 @@ ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
89 {
90 int ret;
91
92- reinit_completion(&cdata->async.result->completion);
93+ reinit_completion(&cdata->async.result.completion);
94 if (cdata->aead == 0) {
95 ablkcipher_request_set_crypt(cdata->async.request,
96 (struct scatterlist *)src, dst,
97@@ -315,7 +307,7 @@ ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
98 ret = crypto_aead_decrypt(cdata->async.arequest);
99 }
100
101- return waitfor(cdata->async.result, ret);
102+ return waitfor(&cdata->async.result, ret);
103 }
104
105 /* Hash functions */
106@@ -345,13 +337,7 @@ int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
107 hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
108 hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
109
110- hdata->async.result = kzalloc(sizeof(*hdata->async.result), GFP_KERNEL);
111- if (unlikely(!hdata->async.result)) {
112- ret = -ENOMEM;
113- goto error;
114- }
115-
116- init_completion(&hdata->async.result->completion);
117+ init_completion(&hdata->async.result.completion);
118
119 hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
120 if (unlikely(!hdata->async.request)) {
121@@ -361,12 +347,11 @@ int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
122 }
123
124 ahash_request_set_callback(hdata->async.request, 0,
125- cryptodev_complete, hdata->async.result);
126+ cryptodev_complete, &hdata->async.result);
127 hdata->init = 1;
128 return 0;
129
130 error:
131- kfree(hdata->async.result);
132 crypto_free_ahash(hdata->async.s);
133 return ret;
134 }
135@@ -376,7 +361,6 @@ void cryptodev_hash_deinit(struct hash_data *hdata)
136 if (hdata->init) {
137 if (hdata->async.request)
138 ahash_request_free(hdata->async.request);
139- kfree(hdata->async.result);
140 if (hdata->async.s)
141 crypto_free_ahash(hdata->async.s);
142 hdata->init = 0;
143@@ -402,24 +386,24 @@ ssize_t cryptodev_hash_update(struct hash_data *hdata,
144 {
145 int ret;
146
147- reinit_completion(&hdata->async.result->completion);
148+ reinit_completion(&hdata->async.result.completion);
149 ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
150
151 ret = crypto_ahash_update(hdata->async.request);
152
153- return waitfor(hdata->async.result, ret);
154+ return waitfor(&hdata->async.result, ret);
155 }
156
157 int cryptodev_hash_final(struct hash_data *hdata, void *output)
158 {
159 int ret;
160
161- reinit_completion(&hdata->async.result->completion);
162+ reinit_completion(&hdata->async.result.completion);
163 ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
164
165 ret = crypto_ahash_final(hdata->async.request);
166
167- return waitfor(hdata->async.result, ret);
168+ return waitfor(&hdata->async.result, ret);
169 }
170
171 int cryptodev_pkc_offload(struct cryptodev_pkc *pkc)
172diff --git a/cryptlib.h b/cryptlib.h
173index e1c4e3e..d8e8046 100644
174--- a/cryptlib.h
175+++ b/cryptlib.h
176@@ -6,6 +6,11 @@
177
178 #include <linux/version.h>
179
180+struct cryptodev_result {
181+ struct completion completion;
182+ int err;
183+};
184+
185 struct cipher_data {
186 int init; /* 0 uninitialized */
187 int blocksize;
188@@ -22,7 +27,7 @@ struct cipher_data {
189 struct crypto_aead *as;
190 struct aead_request *arequest;
191
192- struct cryptodev_result *result;
193+ struct cryptodev_result result;
194 uint8_t iv[EALG_MAX_BLOCK_LEN];
195 } async;
196 };
197@@ -85,7 +90,7 @@ struct hash_data {
198 int alignmask;
199 struct {
200 struct crypto_ahash *s;
201- struct cryptodev_result *result;
202+ struct cryptodev_result result;
203 struct ahash_request *request;
204 } async;
205 };
206@@ -104,11 +109,6 @@ enum offload_type {
207 ASYNCHRONOUS
208 };
209
210-struct cryptodev_result {
211- struct completion completion;
212- int err;
213-};
214-
215 struct cryptodev_pkc {
216 struct list_head list; /* To maintain the Jobs in completed
217 cryptodev lists */
218--
2192.7.0
220