summaryrefslogtreecommitdiffstats
path: root/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0004-omap-iommu-simple-virtual-address-space-management.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0004-omap-iommu-simple-virtual-address-space-management.patch')
-rw-r--r--meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0004-omap-iommu-simple-virtual-address-space-management.patch1083
1 files changed, 1083 insertions, 0 deletions
diff --git a/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0004-omap-iommu-simple-virtual-address-space-management.patch b/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0004-omap-iommu-simple-virtual-address-space-management.patch
new file mode 100644
index 0000000000..945778b943
--- /dev/null
+++ b/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0004-omap-iommu-simple-virtual-address-space-management.patch
@@ -0,0 +1,1083 @@
1From 07365182b998af3dc2b79e822b8e21a3f50262c4 Mon Sep 17 00:00:00 2001
2From: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
3Date: Wed, 28 Jan 2009 21:32:08 +0200
4Subject: [PATCH] omap iommu: simple virtual address space management
5
6This patch provides a device drivers, which has a omap iommu, with
7address mapping APIs between device virtual address(iommu), physical
8address and MPU virtual address.
9
10There are 4 possible patterns for iommu virtual address(iova/da) mapping.
11
12 |iova/ mapping iommu_ page
13 | da pa va (d)-(p)-(v) function type
14 ---------------------------------------------------------------------------
15 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
16 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
17 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
18 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
19
20 'iova': device iommu virtual address
21 'da': alias of 'iova'
22 'pa': physical address
23 'va': mpu virtual address
24
25 'c': contiguous memory area
26 'd': dicontiguous memory area
27 'a': anonymous memory allocation
28 '()': optional feature
29
30 'n': a normal page(4KB) size is used.
31 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
32
33 '*': not yet, but feasible.
34
35Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
36---
37 arch/arm/include/asm/io.h | 6 +
38 arch/arm/mm/ioremap.c | 11 +
39 arch/arm/plat-omap/include/mach/iovmm.h | 94 ++++
40 arch/arm/plat-omap/iovmm.c | 891 +++++++++++++++++++++++++++++++
41 4 files changed, 1002 insertions(+), 0 deletions(-)
42 create mode 100644 arch/arm/plat-omap/include/mach/iovmm.h
43 create mode 100644 arch/arm/plat-omap/iovmm.c
44
45diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
46index d2a59cf..cbdadfe 100644
47--- a/arch/arm/include/asm/io.h
48+++ b/arch/arm/include/asm/io.h
49@@ -75,6 +75,12 @@ extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
50 extern void __iounmap(volatile void __iomem *addr);
51
52 /*
53+ * external interface to remap single page with appropriate type
54+ */
55+extern int ioremap_page(unsigned long virt, unsigned long phys,
56+ unsigned int mtype);
57+
58+/*
59 * Bad read/write accesses...
60 */
61 extern void __readwrite_bug(const char *fn);
62diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
63index 9f88dd3..8441351 100644
64--- a/arch/arm/mm/ioremap.c
65+++ b/arch/arm/mm/ioremap.c
66@@ -110,6 +110,17 @@ static int remap_area_pages(unsigned long start, unsigned long pfn,
67 return err;
68 }
69
70+int ioremap_page(unsigned long virt, unsigned long phys, unsigned int mtype)
71+{
72+ const struct mem_type *type;
73+
74+ type = get_mem_type(mtype);
75+ if (!type)
76+ return -EINVAL;
77+
78+ return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, type);
79+}
80+EXPORT_SYMBOL(ioremap_page);
81
82 void __check_kvm_seq(struct mm_struct *mm)
83 {
84diff --git a/arch/arm/plat-omap/include/mach/iovmm.h b/arch/arm/plat-omap/include/mach/iovmm.h
85new file mode 100644
86index 0000000..bdc7ce5
87--- /dev/null
88+++ b/arch/arm/plat-omap/include/mach/iovmm.h
89@@ -0,0 +1,94 @@
90+/*
91+ * omap iommu: simple virtual address space management
92+ *
93+ * Copyright (C) 2008-2009 Nokia Corporation
94+ *
95+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
96+ *
97+ * This program is free software; you can redistribute it and/or modify
98+ * it under the terms of the GNU General Public License version 2 as
99+ * published by the Free Software Foundation.
100+ */
101+
102+#ifndef __IOMMU_MMAP_H
103+#define __IOMMU_MMAP_H
104+
105+struct iovm_struct {
106+ struct iommu *iommu; /* iommu object which this belongs to */
107+ u32 da_start; /* area definition */
108+ u32 da_end;
109+ u32 flags; /* IOVMF_: see below */
110+ struct list_head list; /* linked in ascending order */
111+ const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */
112+ void *va; /* mpu side mapped address */
113+};
114+
115+/*
116+ * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
117+ *
118+ * lower 16 bit is used for h/w and upper 16 bit is for s/w.
119+ */
120+#define IOVMF_SW_SHIFT 16
121+#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT)
122+#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1)
123+#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL
124+
125+/*
126+ * iovma: h/w flags derived from cam and ram attribute
127+ */
128+#define IOVMF_CAM_MASK (~((1 << 10) - 1))
129+#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
130+
131+#define IOVMF_PGSZ_MASK (3 << 0)
132+#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
133+#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
134+#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
135+#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
136+
137+#define IOVMF_ENDIAN_MASK (1 << 9)
138+#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
139+#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE
140+
141+#define IOVMF_ELSZ_MASK (3 << 7)
142+#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8
143+#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
144+#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
145+#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
146+
147+#define IOVMF_MIXED_MASK (1 << 6)
148+#define IOVMF_MIXED MMU_RAM_MIXED
149+
150+/*
151+ * iovma: s/w flags, used for mapping and umapping internally.
152+ */
153+#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
154+#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
155+#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
156+
157+/* "superpages" is supported just with physically linear pages */
158+#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
159+#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
160+#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
161+
162+#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
163+#define IOVMF_DA_ANON (2 << (4 + IOVMF_SW_SHIFT))
164+#define IOVMF_DA_MASK (3 << (4 + IOVMF_SW_SHIFT))
165+
166+
167+extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da);
168+extern u32 iommu_vmap(struct iommu *obj, u32 da,
169+ const struct sg_table *sgt, u32 flags);
170+extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da);
171+extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes,
172+ u32 flags);
173+extern void iommu_vfree(struct iommu *obj, const u32 da);
174+extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
175+ u32 flags);
176+extern void iommu_kunmap(struct iommu *obj, u32 da);
177+extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes,
178+ u32 flags);
179+extern void iommu_kfree(struct iommu *obj, u32 da);
180+
181+extern void *da_to_va(struct iommu *obj, u32 da);
182+
183+#endif /* __IOMMU_MMAP_H */
184diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
185new file mode 100644
186index 0000000..6726d10
187--- /dev/null
188+++ b/arch/arm/plat-omap/iovmm.c
189@@ -0,0 +1,891 @@
190+/*
191+ * omap iommu: simple virtual address space management
192+ *
193+ * Copyright (C) 2008-2009 Nokia Corporation
194+ *
195+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
196+ *
197+ * This program is free software; you can redistribute it and/or modify
198+ * it under the terms of the GNU General Public License version 2 as
199+ * published by the Free Software Foundation.
200+ */
201+
202+#include <linux/err.h>
203+#include <linux/vmalloc.h>
204+#include <linux/device.h>
205+#include <linux/scatterlist.h>
206+
207+#include <asm/io.h>
208+#include <asm/cacheflush.h>
209+
210+#include <mach/iommu.h>
211+#include <mach/iovmm.h>
212+
213+#include "iopgtable.h"
214+
215+/*
216+ * A device driver needs to create address mappings between:
217+ *
218+ * - iommu/device address
219+ * - physical address
220+ * - mpu virtual address
221+ *
222+ * There are 4 possible patterns for them:
223+ *
224+ * |iova/ mapping iommu_ page
225+ * | da pa va (d)-(p)-(v) function type
226+ * ---------------------------------------------------------------------------
227+ * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
228+ * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
229+ * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
230+ * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
231+ *
232+ *
233+ * 'iova': device iommu virtual address
234+ * 'da': alias of 'iova'
235+ * 'pa': physical address
236+ * 'va': mpu virtual address
237+ *
238+ * 'c': contiguous memory area
239+ * 'd': dicontiguous memory area
240+ * 'a': anonymous memory allocation
241+ * '()': optional feature
242+ *
243+ * 'n': a normal page(4KB) size is used.
244+ * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
245+ *
246+ * '*': not yet, but feasible.
247+ */
248+
249+static struct kmem_cache *iovm_area_cachep;
250+
251+/* return total bytes of sg buffers */
252+static size_t sgtable_len(const struct sg_table *sgt)
253+{
254+ unsigned int i, total = 0;
255+ struct scatterlist *sg;
256+
257+ if (!sgt)
258+ return 0;
259+
260+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
261+ size_t bytes;
262+
263+ bytes = sg_dma_len(sg);
264+
265+ if (!iopgsz_ok(bytes)) {
266+ pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
267+ __func__, i, bytes);
268+ return 0;
269+ }
270+
271+ total += bytes;
272+ }
273+
274+ return total;
275+}
276+#define sgtable_ok(x) (!!sgtable_len(x))
277+
278+/*
279+ * calculate the optimal number sg elements from total bytes based on
280+ * iommu superpages
281+ */
282+static unsigned int sgtable_nents(size_t bytes)
283+{
284+ int i;
285+ unsigned int nr_entries;
286+ const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
287+
288+ if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
289+ pr_err("%s: wrong size %08x\n", __func__, bytes);
290+ return 0;
291+ }
292+
293+ nr_entries = 0;
294+ for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
295+ if (bytes >= pagesize[i]) {
296+ nr_entries += (bytes / pagesize[i]);
297+ bytes %= pagesize[i];
298+ }
299+ }
300+ BUG_ON(bytes);
301+
302+ return nr_entries;
303+}
304+
305+/* allocate and initialize sg_table header(a kind of 'superblock') */
306+static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
307+{
308+ unsigned int nr_entries;
309+ int err;
310+ struct sg_table *sgt;
311+
312+ if (!bytes)
313+ return ERR_PTR(-EINVAL);
314+
315+ if (!IS_ALIGNED(bytes, PAGE_SIZE))
316+ return ERR_PTR(-EINVAL);
317+
318+ /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
319+ if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
320+ nr_entries = sgtable_nents(bytes);
321+ if (!nr_entries)
322+ return ERR_PTR(-EINVAL);
323+ } else
324+ nr_entries = bytes / PAGE_SIZE;
325+
326+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
327+ if (!sgt)
328+ return ERR_PTR(-ENOMEM);
329+
330+ err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
331+ if (err)
332+ return ERR_PTR(err);
333+
334+ pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
335+
336+ return sgt;
337+}
338+
339+/* free sg_table header(a kind of superblock) */
340+static void sgtable_free(struct sg_table *sgt)
341+{
342+ if (!sgt)
343+ return;
344+
345+ sg_free_table(sgt);
346+ kfree(sgt);
347+
348+ pr_debug("%s: sgt:%p\n", __func__, sgt);
349+}
350+
351+/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
352+static void *vmap_sg(const struct sg_table *sgt)
353+{
354+ u32 va;
355+ size_t total;
356+ unsigned int i;
357+ struct scatterlist *sg;
358+ struct vm_struct *new;
359+
360+ total = sgtable_len(sgt);
361+ if (!total)
362+ return ERR_PTR(-EINVAL);
363+
364+ new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
365+ if (!new)
366+ return ERR_PTR(-ENOMEM);
367+ va = (u32)new->addr;
368+
369+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
370+ size_t bytes;
371+ u32 pa;
372+ int err;
373+
374+ pa = sg_phys(sg);
375+ bytes = sg_dma_len(sg);
376+
377+ BUG_ON(bytes != PAGE_SIZE);
378+
379+ err = ioremap_page(va, pa, MT_DEVICE);
380+ if (err)
381+ goto err_out;
382+
383+ va += bytes;
384+ }
385+
386+ flush_cache_vmap(new->addr, total);
387+ return new->addr;
388+
389+err_out:
390+ WARN_ON(1); /* FIXME: cleanup some mpu mappings */
391+ vunmap(new->addr);
392+ return ERR_PTR(-EAGAIN);
393+}
394+
395+static inline void vunmap_sg(const void *va)
396+{
397+ vunmap(va);
398+}
399+
400+static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
401+{
402+ struct iovm_struct *tmp;
403+
404+ list_for_each_entry(tmp, &obj->mmap, list) {
405+ if ((da >= tmp->da_start) && (da < tmp->da_end)) {
406+ size_t len;
407+
408+ len = tmp->da_end - tmp->da_start;
409+
410+ dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
411+ __func__, tmp->da_start, da, tmp->da_end, len,
412+ tmp->flags);
413+
414+ return tmp;
415+ }
416+ }
417+
418+ return NULL;
419+}
420+
421+/**
422+ * find_iovm_area - find iovma which includes @da
423+ * @da: iommu device virtual address
424+ *
425+ * Find the existing iovma starting at @da
426+ */
427+struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
428+{
429+ struct iovm_struct *area;
430+
431+ mutex_lock(&obj->mmap_lock);
432+ area = __find_iovm_area(obj, da);
433+ mutex_unlock(&obj->mmap_lock);
434+
435+ return area;
436+}
437+EXPORT_SYMBOL_GPL(find_iovm_area);
438+
439+/*
440+ * This finds the hole(area) which fits the requested address and len
441+ * in iovmas mmap, and returns the new allocated iovma.
442+ */
443+static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
444+ size_t bytes, u32 flags)
445+{
446+ struct iovm_struct *new, *tmp;
447+ u32 start, prev_end, alignement;
448+
449+ if (!obj || !bytes)
450+ return ERR_PTR(-EINVAL);
451+
452+ start = da;
453+ alignement = PAGE_SIZE;
454+
455+ if (flags & IOVMF_DA_ANON) {
456+ /*
457+ * Reserve the first page for NULL
458+ */
459+ start = PAGE_SIZE;
460+ if (flags & IOVMF_LINEAR)
461+ alignement = iopgsz_max(bytes);
462+ start = roundup(start, alignement);
463+ }
464+
465+ tmp = NULL;
466+ if (list_empty(&obj->mmap))
467+ goto found;
468+
469+ prev_end = 0;
470+ list_for_each_entry(tmp, &obj->mmap, list) {
471+
472+ if ((prev_end <= start) && (start + bytes < tmp->da_start))
473+ goto found;
474+
475+ if (flags & IOVMF_DA_ANON)
476+ start = roundup(tmp->da_end, alignement);
477+
478+ prev_end = tmp->da_end;
479+ }
480+
481+ if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
482+ goto found;
483+
484+ dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
485+ __func__, da, bytes, flags);
486+
487+ return ERR_PTR(-EINVAL);
488+
489+found:
490+ new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
491+ if (!new)
492+ return ERR_PTR(-ENOMEM);
493+
494+ new->iommu = obj;
495+ new->da_start = start;
496+ new->da_end = start + bytes;
497+ new->flags = flags;
498+
499+ /*
500+ * keep ascending order of iovmas
501+ */
502+ if (tmp)
503+ list_add_tail(&new->list, &tmp->list);
504+ else
505+ list_add(&new->list, &obj->mmap);
506+
507+ dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
508+ __func__, new->da_start, start, new->da_end, bytes, flags);
509+
510+ return new;
511+}
512+
513+static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
514+{
515+ size_t bytes;
516+
517+ BUG_ON(!obj || !area);
518+
519+ bytes = area->da_end - area->da_start;
520+
521+ dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
522+ __func__, area->da_start, area->da_end, bytes, area->flags);
523+
524+ list_del(&area->list);
525+ kmem_cache_free(iovm_area_cachep, area);
526+}
527+
528+/**
529+ * da_to_va - convert (d) to (v)
530+ * @obj: objective iommu
531+ * @da: iommu device virtual address
532+ * @va: mpu virtual address
533+ *
534+ * Returns mpu virtual addr which corresponds to a given device virtual addr
535+ */
536+void *da_to_va(struct iommu *obj, u32 da)
537+{
538+ void *va = NULL;
539+ struct iovm_struct *area;
540+
541+ mutex_lock(&obj->mmap_lock);
542+
543+ area = __find_iovm_area(obj, da);
544+ if (!area) {
545+ dev_warn(obj->dev, "%s: no da area(%08x)\n", __func__, da);
546+ goto out;
547+ }
548+ va = area->va;
549+ mutex_unlock(&obj->mmap_lock);
550+out:
551+ return va;
552+}
553+EXPORT_SYMBOL_GPL(da_to_va);
554+
555+static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
556+{
557+ unsigned int i;
558+ struct scatterlist *sg;
559+ void *va = _va;
560+ void *va_end;
561+
562+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
563+ struct page *pg;
564+ const size_t bytes = PAGE_SIZE;
565+
566+ /*
567+ * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
568+ */
569+ pg = vmalloc_to_page(va);
570+ BUG_ON(!pg);
571+ sg_set_page(sg, pg, bytes, 0);
572+
573+ va += bytes;
574+ }
575+
576+ va_end = _va + PAGE_SIZE * i;
577+ flush_cache_vmap(_va, va_end);
578+}
579+
580+static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
581+{
582+ /*
583+ * Actually this is not necessary at all, just exists for
584+ * consistency of the code readibility.
585+ */
586+ BUG_ON(!sgt);
587+}
588+
589+static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
590+{
591+ unsigned int i;
592+ struct scatterlist *sg;
593+ void *va;
594+
595+ va = phys_to_virt(pa);
596+
597+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
598+ size_t bytes;
599+
600+ bytes = iopgsz_max(len);
601+
602+ BUG_ON(!iopgsz_ok(bytes));
603+
604+ sg_set_buf(sg, phys_to_virt(pa), bytes);
605+ /*
606+ * 'pa' is cotinuous(linear).
607+ */
608+ pa += bytes;
609+ len -= bytes;
610+ }
611+ BUG_ON(len);
612+
613+ clean_dcache_area(va, len);
614+}
615+
616+static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
617+{
618+ /*
619+ * Actually this is not necessary at all, just exists for
620+ * consistency of the code readibility
621+ */
622+ BUG_ON(!sgt);
623+}
624+
625+/* create 'da' <-> 'pa' mapping from 'sgt' */
626+static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
627+ const struct sg_table *sgt, u32 flags)
628+{
629+ int err;
630+ unsigned int i, j;
631+ struct scatterlist *sg;
632+ u32 da = new->da_start;
633+
634+ if (!obj || !new || !sgt)
635+ return -EINVAL;
636+
637+ BUG_ON(!sgtable_ok(sgt));
638+
639+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
640+ u32 pa;
641+ int pgsz;
642+ size_t bytes;
643+ struct iotlb_entry e;
644+
645+ pa = sg_phys(sg);
646+ bytes = sg_dma_len(sg);
647+
648+ flags &= ~IOVMF_PGSZ_MASK;
649+ pgsz = bytes_to_iopgsz(bytes);
650+ if (pgsz < 0)
651+ goto err_out;
652+ flags |= pgsz;
653+
654+ pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
655+ i, da, pa, bytes);
656+
657+ iotlb_init_entry(&e, da, pa, flags);
658+ err = iopgtable_store_entry(obj, &e);
659+ if (err)
660+ goto err_out;
661+
662+ da += bytes;
663+ }
664+ return 0;
665+
666+err_out:
667+ da = new->da_start;
668+
669+ for_each_sg(sgt->sgl, sg, i, j) {
670+ size_t bytes;
671+
672+ bytes = iopgtable_clear_entry(obj, da);
673+
674+ BUG_ON(!iopgsz_ok(bytes));
675+
676+ da += bytes;
677+ }
678+ return err;
679+}
680+
681+/* release 'da' <-> 'pa' mapping */
682+static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
683+{
684+ u32 start;
685+ size_t total = area->da_end - area->da_start;
686+
687+ BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
688+
689+ start = area->da_start;
690+ while (total > 0) {
691+ size_t bytes;
692+
693+ bytes = iopgtable_clear_entry(obj, start);
694+ if (bytes == 0)
695+ bytes = PAGE_SIZE;
696+ else
697+ dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
698+ __func__, start, bytes, area->flags);
699+
700+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
701+
702+ total -= bytes;
703+ start += bytes;
704+ }
705+ BUG_ON(total);
706+}
707+
708+/* template function for all unmapping */
709+static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
710+ void (*fn)(const void *), u32 flags)
711+{
712+ struct sg_table *sgt = NULL;
713+ struct iovm_struct *area;
714+
715+ BUG_ON(in_interrupt());
716+
717+ if (!IS_ALIGNED(da, PAGE_SIZE)) {
718+ dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
719+ return NULL;
720+ }
721+
722+ mutex_lock(&obj->mmap_lock);
723+
724+ area = __find_iovm_area(obj, da);
725+ if (!area) {
726+ dev_err(obj->dev, "%s: no da area(%08x)\n", __func__, da);
727+ goto out;
728+ }
729+
730+ if ((area->flags & flags) != flags) {
731+ dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
732+ area->flags);
733+ goto out;
734+ }
735+ sgt = (struct sg_table *)area->sgt;
736+
737+ unmap_iovm_area(obj, area);
738+
739+ fn(area->va);
740+
741+ dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
742+ area->da_start, da, area->da_end,
743+ area->da_end - area->da_start, area->flags);
744+
745+ free_iovm_area(obj, area);
746+out:
747+ mutex_unlock(&obj->mmap_lock);
748+
749+ return sgt;
750+}
751+
752+static u32 map_iommu_region(struct iommu *obj, u32 da,
753+ const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
754+{
755+ int err = -ENOMEM;
756+ struct iovm_struct *new;
757+
758+ mutex_lock(&obj->mmap_lock);
759+
760+ new = alloc_iovm_area(obj, da, bytes, flags);
761+ if (IS_ERR(new)) {
762+ err = PTR_ERR(new);
763+ goto err_alloc_iovma;
764+ }
765+ new->va = va;
766+ new->sgt = sgt;
767+
768+ if (map_iovm_area(obj, new, sgt, new->flags))
769+ goto err_map;
770+
771+ mutex_unlock(&obj->mmap_lock);
772+
773+ dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
774+ __func__, new->da_start, bytes, new->flags, va);
775+
776+ return new->da_start;
777+
778+err_map:
779+ free_iovm_area(obj, new);
780+err_alloc_iovma:
781+ mutex_unlock(&obj->mmap_lock);
782+ return err;
783+}
784+
785+static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
786+ const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
787+{
788+ return map_iommu_region(obj, da, sgt, va, bytes, flags);
789+}
790+
791+/**
792+ * iommu_vmap - (d)-(p)-(v) address mapper
793+ * @obj: objective iommu
794+ * @sgt: address of scatter gather table
795+ * @flags: iovma and page property
796+ *
797+ * Creates 1-n-1 mapping with given @sgt and returns @da.
798+ * All @sgt element must be io page size aligned.
799+ */
800+u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
801+ u32 flags)
802+{
803+ size_t bytes;
804+ void *va;
805+
806+ if (!obj || !obj->dev || !sgt)
807+ return -EINVAL;
808+
809+ bytes = sgtable_len(sgt);
810+ if (!bytes)
811+ return -EINVAL;
812+ bytes = PAGE_ALIGN(bytes);
813+
814+ va = vmap_sg(sgt);
815+ if (IS_ERR(va))
816+ return PTR_ERR(va);
817+
818+ flags &= IOVMF_HW_MASK;
819+ flags |= IOVMF_DISCONT;
820+ flags |= IOVMF_MMIO;
821+ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
822+
823+ da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
824+ if (IS_ERR_VALUE(da))
825+ vunmap_sg(va);
826+
827+ return da;
828+}
829+EXPORT_SYMBOL_GPL(iommu_vmap);
830+
831+/**
832+ * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
833+ * @obj: objective iommu
834+ * @da: iommu device virtual address
835+ *
836+ * Free the iommu virtually contiguous memory area starting at
837+ * @da, which was returned by 'iommu_vmap()'.
838+ */
839+struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
840+{
841+ struct sg_table *sgt;
842+ /*
843+ * 'sgt' is allocated before 'iommu_vmalloc()' is called.
844+ * Just returns 'sgt' to the caller to free
845+ */
846+ sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
847+ if (!sgt)
848+ dev_err(obj->dev, "%s: No sgt\n", __func__);
849+ return sgt;
850+}
851+EXPORT_SYMBOL_GPL(iommu_vunmap);
852+
853+/**
854+ * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
855+ * @obj: objective iommu
856+ * @da: contiguous iommu virtual memory
857+ * @bytes: allocation size
858+ * @flags: iovma and page property
859+ *
860+ * Allocate @bytes linearly and creates 1-n-1 mapping and returns
861+ * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
862+ */
863+u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
864+{
865+ void *va;
866+ struct sg_table *sgt;
867+
868+ if (!obj || !obj->dev || !bytes)
869+ return -EINVAL;
870+
871+ bytes = PAGE_ALIGN(bytes);
872+
873+ va = vmalloc(bytes);
874+ if (!va)
875+ return -ENOMEM;
876+
877+ sgt = sgtable_alloc(bytes, flags);
878+ if (IS_ERR(sgt)) {
879+ da = PTR_ERR(sgt);
880+ goto err_sgt_alloc;
881+ }
882+ sgtable_fill_vmalloc(sgt, va);
883+
884+ flags &= IOVMF_HW_MASK;
885+ flags |= IOVMF_DISCONT;
886+ flags |= IOVMF_ALLOC;
887+ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
888+
889+ da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
890+ if (IS_ERR_VALUE(da))
891+ goto err_iommu_vmap;
892+
893+ return da;
894+
895+err_iommu_vmap:
896+ sgtable_drain_vmalloc(sgt);
897+ sgtable_free(sgt);
898+err_sgt_alloc:
899+ vfree(va);
900+ return da;
901+}
902+EXPORT_SYMBOL_GPL(iommu_vmalloc);
903+
904+/**
905+ * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
906+ * @obj: objective iommu
907+ * @da: iommu device virtual address
908+ *
909+ * Frees the iommu virtually continuous memory area starting at
910+ * @da, as obtained from 'iommu_vmalloc()'.
911+ */
912+void iommu_vfree(struct iommu *obj, const u32 da)
913+{
914+ struct sg_table *sgt;
915+
916+ sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
917+ if (!sgt)
918+ dev_err(obj->dev, "%s: No sgt\n", __func__);
919+ sgtable_free(sgt);
920+}
921+EXPORT_SYMBOL_GPL(iommu_vfree);
922+
923+static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
924+ size_t bytes, u32 flags)
925+{
926+ struct sg_table *sgt;
927+
928+ sgt = sgtable_alloc(bytes, flags);
929+ if (IS_ERR(sgt))
930+ return PTR_ERR(sgt);
931+
932+ sgtable_fill_kmalloc(sgt, pa, bytes);
933+
934+ da = map_iommu_region(obj, da, sgt, va, bytes, flags);
935+ if (IS_ERR_VALUE(da)) {
936+ sgtable_drain_kmalloc(sgt);
937+ sgtable_free(sgt);
938+ }
939+
940+ return da;
941+}
942+
943+/**
944+ * iommu_kmap - (d)-(p)-(v) address mapper
945+ * @obj: objective iommu
946+ * @da: contiguous iommu virtual memory
947+ * @pa: contiguous physical memory
948+ * @flags: iovma and page property
949+ *
950+ * Creates 1-1-1 mapping and returns @da again, which can be
951+ * adjusted if 'IOVMF_DA_ANON' is set.
952+ */
953+u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
954+ u32 flags)
955+{
956+ void *va;
957+
958+ if (!obj || !obj->dev || !bytes)
959+ return -EINVAL;
960+
961+ bytes = PAGE_ALIGN(bytes);
962+
963+ va = ioremap(pa, bytes);
964+ if (!va)
965+ return -ENOMEM;
966+
967+ flags &= IOVMF_HW_MASK;
968+ flags |= IOVMF_LINEAR;
969+ flags |= IOVMF_MMIO;
970+ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
971+
972+ da = __iommu_kmap(obj, da, pa, va, bytes, flags);
973+ if (IS_ERR_VALUE(da))
974+ iounmap(va);
975+
976+ return da;
977+}
978+EXPORT_SYMBOL_GPL(iommu_kmap);
979+
980+/**
981+ * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
982+ * @obj: objective iommu
983+ * @da: iommu device virtual address
984+ *
985+ * Frees the iommu virtually contiguous memory area starting at
986+ * @da, which was passed to and was returned by'iommu_kmap()'.
987+ */
988+void iommu_kunmap(struct iommu *obj, u32 da)
989+{
990+ struct sg_table *sgt;
991+
992+ sgt = unmap_vm_area(obj, da, __iounmap, IOVMF_LINEAR | IOVMF_MMIO);
993+ if (!sgt)
994+ dev_err(obj->dev, "%s: No sgt\n", __func__);
995+ sgtable_free(sgt);
996+}
997+EXPORT_SYMBOL_GPL(iommu_kunmap);
998+
999+/**
1000+ * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
1001+ * @obj: objective iommu
1002+ * @da: contiguous iommu virtual memory
1003+ * @bytes: bytes for allocation
1004+ * @flags: iovma and page property
1005+ *
1006+ * Allocate @bytes linearly and creates 1-1-1 mapping and returns
1007+ * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
1008+ */
1009+u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
1010+{
1011+ void *va;
1012+ u32 pa;
1013+
1014+ if (!obj || !obj->dev || !bytes)
1015+ return -EINVAL;
1016+
1017+ bytes = PAGE_ALIGN(bytes);
1018+
1019+ va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
1020+ if (!va)
1021+ return -ENOMEM;
1022+ pa = virt_to_phys(va);
1023+
1024+ flags &= IOVMF_HW_MASK;
1025+ flags |= IOVMF_LINEAR;
1026+ flags |= IOVMF_ALLOC;
1027+ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
1028+
1029+ da = __iommu_kmap(obj, da, pa, va, bytes, flags);
1030+ if (IS_ERR_VALUE(da))
1031+ kfree(va);
1032+
1033+ return da;
1034+}
1035+EXPORT_SYMBOL_GPL(iommu_kmalloc);
1036+
1037+/**
1038+ * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
1039+ * @obj: objective iommu
1040+ * @da: iommu device virtual address
1041+ *
1042+ * Frees the iommu virtually contiguous memory area starting at
1043+ * @da, which was passed to and was returned by'iommu_kmalloc()'.
1044+ */
1045+void iommu_kfree(struct iommu *obj, u32 da)
1046+{
1047+ struct sg_table *sgt;
1048+
1049+ sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
1050+ if (!sgt)
1051+ dev_err(obj->dev, "%s: No sgt\n", __func__);
1052+ sgtable_free(sgt);
1053+}
1054+EXPORT_SYMBOL_GPL(iommu_kfree);
1055+
1056+
1057+static int __init iovmm_init(void)
1058+{
1059+ const unsigned long flags = SLAB_HWCACHE_ALIGN;
1060+ struct kmem_cache *p;
1061+
1062+ p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
1063+ flags, NULL);
1064+ if (!p)
1065+ return -ENOMEM;
1066+ iovm_area_cachep = p;
1067+
1068+ return 0;
1069+}
1070+module_init(iovmm_init);
1071+
1072+static void __exit iovmm_exit(void)
1073+{
1074+ kmem_cache_destroy(iovm_area_cachep);
1075+}
1076+module_exit(iovmm_exit);
1077+
1078+MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
1079+MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
1080+MODULE_LICENSE("GPL v2");
1081--
10821.5.6.5
1083