summaryrefslogtreecommitdiffstats
path: root/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2010-08-27 15:14:24 +0100
committerRichard Purdie <rpurdie@linux.intel.com>2010-08-27 15:29:45 +0100
commit29d6678fd546377459ef75cf54abeef5b969b5cf (patch)
tree8edd65790e37a00d01c3f203f773fe4b5012db18 /meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch
parentda49de6885ee1bc424e70bc02f21f6ab920efb55 (diff)
downloadpoky-29d6678fd546377459ef75cf54abeef5b969b5cf.tar.gz
Major layout change to the packages directory
Having one monolithic packages directory makes it hard to find things and is generally overwhelming. This commit splits it into several logical sections roughly based on function, recipes.txt gives more information about the classifications used. The opportunity is also used to switch from "packages" to "recipes" as used in OpenEmbedded as the term "packages" can be confusing to people and has many different meanings. Not all recipes have been classified yet, this is just a first pass at separating things out. Some packages are moved to meta-extras as they're no longer actively used or maintained. Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
Diffstat (limited to 'meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch')
-rw-r--r--meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch1226
1 files changed, 1226 insertions, 0 deletions
diff --git a/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch b/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch
new file mode 100644
index 0000000000..c2c9bc2b62
--- /dev/null
+++ b/meta/recipes-kernel/linux/linux-omap-2.6.29/isp/iommu/0001-omap-iommu-tlb-and-pagetable-primitives.patch
@@ -0,0 +1,1226 @@
1From a62a047ed02162573e4bece18ecf8bdd66ccd06b Mon Sep 17 00:00:00 2001
2From: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
3Date: Mon, 26 Jan 2009 15:13:40 +0200
4Subject: [PATCH] omap iommu: tlb and pagetable primitives
5
6This patch provides:
7
8- iotlb_*() : iommu tlb operations
9- iopgtable_*() : iommu pagetable(twl) operations
10- iommu_*() : the other generic operations
11
12and the entry points to register and acquire iommu object.
13
14Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
15---
16 arch/arm/plat-omap/include/mach/iommu.h | 157 +++++
17 arch/arm/plat-omap/iommu.c | 953 +++++++++++++++++++++++++++++++
18 arch/arm/plat-omap/iopgtable.h | 72 +++
19 3 files changed, 1182 insertions(+), 0 deletions(-)
20 create mode 100644 arch/arm/plat-omap/include/mach/iommu.h
21 create mode 100644 arch/arm/plat-omap/iommu.c
22 create mode 100644 arch/arm/plat-omap/iopgtable.h
23
24diff --git a/arch/arm/plat-omap/include/mach/iommu.h b/arch/arm/plat-omap/include/mach/iommu.h
25new file mode 100644
26index 0000000..ef04d7a
27--- /dev/null
28+++ b/arch/arm/plat-omap/include/mach/iommu.h
29@@ -0,0 +1,157 @@
30+/*
31+ * omap iommu: main structures
32+ *
33+ * Copyright (C) 2008-2009 Nokia Corporation
34+ *
35+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
36+ *
37+ * This program is free software; you can redistribute it and/or modify
38+ * it under the terms of the GNU General Public License version 2 as
39+ * published by the Free Software Foundation.
40+ */
41+
42+#ifndef __MACH_IOMMU_H
43+#define __MACH_IOMMU_H
44+
45+struct iotlb_entry {
46+ u32 da;
47+ u32 pa;
48+ u32 pgsz, prsvd, valid;
49+ union {
50+ u16 ap;
51+ struct {
52+ u32 endian, elsz, mixed;
53+ };
54+ };
55+};
56+
57+struct iommu {
58+ const char *name;
59+ struct module *owner;
60+ struct clk *clk;
61+ void __iomem *regbase;
62+ struct device *dev;
63+
64+ unsigned int refcount;
65+ struct mutex iommu_lock; /* global for this whole object */
66+
67+ /*
68+ * We don't change iopgd for a situation like pgd for a task,
69+ * but share it globally for each iommu.
70+ */
71+ u32 *iopgd;
72+ spinlock_t page_table_lock; /* protect iopgd */
73+
74+ int nr_tlb_entries;
75+
76+ struct list_head mmap;
77+ struct mutex mmap_lock; /* protect mmap */
78+
79+ int (*isr)(struct iommu *obj);
80+
81+ void *ctx; /* iommu context: registres saved area */
82+};
83+
84+struct cr_regs {
85+ union {
86+ struct {
87+ u16 cam_l;
88+ u16 cam_h;
89+ };
90+ u32 cam;
91+ };
92+ union {
93+ struct {
94+ u16 ram_l;
95+ u16 ram_h;
96+ };
97+ u32 ram;
98+ };
99+};
100+
101+struct iotlb_lock {
102+ short base;
103+ short vict;
104+};
105+
106+/* architecture specific functions */
107+struct iommu_functions {
108+ unsigned long version;
109+
110+ int (*enable)(struct iommu *obj);
111+ void (*disable)(struct iommu *obj);
112+ u32 (*fault_isr)(struct iommu *obj, u32 *ra);
113+
114+ void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr);
115+ void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr);
116+
117+ struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e);
118+ int (*cr_valid)(struct cr_regs *cr);
119+ u32 (*cr_to_virt)(struct cr_regs *cr);
120+ void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
121+ ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf);
122+
123+ u32 (*get_pte_attr)(struct iotlb_entry *e);
124+
125+ void (*save_ctx)(struct iommu *obj);
126+ void (*restore_ctx)(struct iommu *obj);
127+ ssize_t (*dump_ctx)(struct iommu *obj, char *buf);
128+};
129+
130+struct iommu_platform_data {
131+ const char *name;
132+ const char *clk_name;
133+ const int nr_tlb_entries;
134+};
135+
136+#include <mach/iommu2.h>
137+
138+/*
139+ * utilities for super page(16MB, 1MB, 64KB and 4KB)
140+ */
141+
142+#define iopgsz_max(bytes) \
143+ (((bytes) >= SZ_16M) ? SZ_16M : \
144+ ((bytes) >= SZ_1M) ? SZ_1M : \
145+ ((bytes) >= SZ_64K) ? SZ_64K : \
146+ ((bytes) >= SZ_4K) ? SZ_4K : 0)
147+
148+#define bytes_to_iopgsz(bytes) \
149+ (((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M : \
150+ ((bytes) == SZ_1M) ? MMU_CAM_PGSZ_1M : \
151+ ((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K : \
152+ ((bytes) == SZ_4K) ? MMU_CAM_PGSZ_4K : -1)
153+
154+#define iopgsz_to_bytes(iopgsz) \
155+ (((iopgsz) == MMU_CAM_PGSZ_16M) ? SZ_16M : \
156+ ((iopgsz) == MMU_CAM_PGSZ_1M) ? SZ_1M : \
157+ ((iopgsz) == MMU_CAM_PGSZ_64K) ? SZ_64K : \
158+ ((iopgsz) == MMU_CAM_PGSZ_4K) ? SZ_4K : 0)
159+
160+#define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0)
161+
162+/*
163+ * global functions
164+ */
165+extern u32 iommu_arch_version(void);
166+
167+extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e);
168+extern void flush_iotlb_page(struct iommu *obj, u32 da);
169+extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end);
170+extern void flush_iotlb_all(struct iommu *obj);
171+
172+ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf);
173+
174+extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e);
175+extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
176+
177+extern struct iommu *iommu_get(const char *name);
178+extern void iommu_put(struct iommu *obj);
179+
180+extern void iommu_save_ctx(struct iommu *obj);
181+extern void iommu_restore_ctx(struct iommu *obj);
182+
183+extern int install_iommu_arch(const struct iommu_functions *ops);
184+extern void uninstall_iommu_arch(const struct iommu_functions *ops);
185+
186+#endif /* __MACH_IOMMU_H */
187diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
188new file mode 100644
189index 0000000..e638883
190--- /dev/null
191+++ b/arch/arm/plat-omap/iommu.c
192@@ -0,0 +1,953 @@
193+/*
194+ * omap iommu: tlb and pagetable primitives
195+ *
196+ * Copyright (C) 2008-2009 Nokia Corporation
197+ *
198+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
199+ * Paul Mundt and Toshihiro Kobayashi
200+ *
201+ * This program is free software; you can redistribute it and/or modify
202+ * it under the terms of the GNU General Public License version 2 as
203+ * published by the Free Software Foundation.
204+ */
205+
206+#include <linux/err.h>
207+#include <linux/module.h>
208+#include <linux/interrupt.h>
209+#include <linux/ioport.h>
210+#include <linux/clk.h>
211+#include <linux/platform_device.h>
212+
213+#include <asm/io.h>
214+#include <asm/cacheflush.h>
215+
216+#include <mach/clock.h>
217+#include <mach/iommu.h>
218+
219+#include "iopgtable.h"
220+
221+/* accommodate the difference between omap1 and omap2/3 */
222+static const struct iommu_functions *arch_iommu;
223+
224+static struct platform_driver omap_iommu_driver;
225+static struct kmem_cache *iopte_cachep;
226+
227+/**
228+ * install_iommu_arch() - Install archtecure specific iommu functions
229+ * @ops: a pointer to architecture specific iommu functions
230+ *
231+ * There are several kind of iommu algorithm(tlb, pagetable) among
232+ * omap series. This interface installs such an iommu algorighm.
233+ **/
234+int install_iommu_arch(const struct iommu_functions *ops)
235+{
236+ if (arch_iommu)
237+ return -EBUSY;
238+
239+ arch_iommu = ops;
240+ return 0;
241+}
242+EXPORT_SYMBOL_GPL(install_iommu_arch);
243+
244+/**
245+ * uninstall_iommu_arch() - Uninstall archtecure specific iommu functions
246+ * @ops: a pointer to architecture specific iommu functions
247+ *
248+ * This interface uninstalls the iommu algorighm installed previously.
249+ **/
250+void uninstall_iommu_arch(const struct iommu_functions *ops)
251+{
252+ if (arch_iommu != ops)
253+ pr_err("%s: not your arch\n", __func__);
254+
255+ arch_iommu = NULL;
256+}
257+EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
258+
259+/**
260+ * iommu_save_ctx() - Save registers for pm off-mode support
261+ * @obj: target iommu
262+ **/
263+void iommu_save_ctx(struct iommu *obj)
264+{
265+ arch_iommu->save_ctx(obj);
266+}
267+EXPORT_SYMBOL_GPL(iommu_save_ctx);
268+
269+/**
270+ * iommu_restore_ctx() - Restore registers for pm off-mode support
271+ * @obj: target iommu
272+ **/
273+void iommu_restore_ctx(struct iommu *obj)
274+{
275+ arch_iommu->restore_ctx(obj);
276+}
277+EXPORT_SYMBOL_GPL(iommu_restore_ctx);
278+
279+/**
280+ * iommu_arch_version() - Return running iommu arch version
281+ **/
282+u32 iommu_arch_version(void)
283+{
284+ return arch_iommu->version;
285+}
286+EXPORT_SYMBOL_GPL(iommu_arch_version);
287+
288+static int iommu_enable(struct iommu *obj)
289+{
290+ int err;
291+
292+ if (!obj)
293+ return -EINVAL;
294+
295+ clk_enable(obj->clk);
296+
297+ err = arch_iommu->enable(obj);
298+
299+ clk_disable(obj->clk);
300+ return err;
301+}
302+
303+static void iommu_disable(struct iommu *obj)
304+{
305+ if (!obj)
306+ return;
307+
308+ clk_enable(obj->clk);
309+
310+ arch_iommu->disable(obj);
311+
312+ clk_disable(obj->clk);
313+}
314+
315+#ifdef DEBUG
316+static ssize_t iommu_dump_ctx(struct iommu *obj, char *buf)
317+{
318+ if (!obj || !buf)
319+ return -EINVAL;
320+
321+ return arch_iommu->dump_ctx(obj, buf);
322+}
323+#endif
324+
325+/*
326+ * TLB operations
327+ */
328+static inline void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
329+{
330+ BUG_ON(!cr || !e);
331+
332+ arch_iommu->cr_to_e(cr, e);
333+}
334+
335+static inline int iotlb_cr_valid(struct cr_regs *cr)
336+{
337+ if (!cr)
338+ return -EINVAL;
339+
340+ return arch_iommu->cr_valid(cr);
341+}
342+
343+static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
344+ struct iotlb_entry *e)
345+{
346+ if (!e)
347+ return NULL;
348+
349+ return arch_iommu->alloc_cr(obj, e);
350+}
351+
352+static inline u32 iotlb_cr_to_virt(struct cr_regs *cr)
353+{
354+ return arch_iommu->cr_to_virt(cr);
355+}
356+
357+static u32 get_iopte_attr(struct iotlb_entry *e)
358+{
359+ return arch_iommu->get_pte_attr(e);
360+}
361+
362+static u32 iommu_report_fault(struct iommu *obj, u32 *da)
363+{
364+ return arch_iommu->fault_isr(obj, da);
365+}
366+
367+static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
368+{
369+ u32 val;
370+
371+ val = iommu_read_reg(obj, MMU_LOCK);
372+
373+ l->base = MMU_LOCK_BASE(val);
374+ l->vict = MMU_LOCK_VICT(val);
375+
376+ BUG_ON(l->base != 0); /* Currently no preservation is used */
377+}
378+
379+static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
380+{
381+ u32 val;
382+
383+ BUG_ON(l->base != 0); /* Currently no preservation is used */
384+
385+ val = (l->base << MMU_LOCK_BASE_SHIFT);
386+ val |= (l->vict << MMU_LOCK_VICT_SHIFT);
387+
388+ iommu_write_reg(obj, val, MMU_LOCK);
389+}
390+
391+static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
392+{
393+ arch_iommu->tlb_read_cr(obj, cr);
394+}
395+
396+static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
397+{
398+ arch_iommu->tlb_load_cr(obj, cr);
399+
400+ iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
401+ iommu_write_reg(obj, 1, MMU_LD_TLB);
402+}
403+
404+/**
405+ * iotlb_dump_cr() - Dump an iommu tlb entry into buf
406+ * @obj: target iommu
407+ * @cr: contents of cam and ram register
408+ * @buf: output buffer
409+ **/
410+ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
411+{
412+ BUG_ON(!cr || !buf);
413+
414+ return arch_iommu->dump_cr(obj, cr, buf);
415+}
416+EXPORT_SYMBOL_GPL(iotlb_dump_cr);
417+
418+/**
419+ * load_iotlb_entry() - Set an iommu tlb entry
420+ * @obj: target iommu
421+ * @e: an iommu tlb entry info
422+ **/
423+int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
424+{
425+ int i;
426+ int err = 0;
427+ struct iotlb_lock l;
428+ struct cr_regs *cr;
429+
430+ if (!obj || !obj->nr_tlb_entries || !e)
431+ return -EINVAL;
432+
433+ clk_enable(obj->clk);
434+
435+ for (i = 0; i < obj->nr_tlb_entries; i++) {
436+ struct cr_regs tmp;
437+
438+ iotlb_lock_get(obj, &l);
439+ l.vict = i;
440+ iotlb_lock_set(obj, &l);
441+ iotlb_read_cr(obj, &tmp);
442+ if (!iotlb_cr_valid(&tmp))
443+ break;
444+ }
445+
446+ if (i == obj->nr_tlb_entries) {
447+ dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
448+ err = -EBUSY;
449+ goto out;
450+ }
451+
452+ cr = iotlb_alloc_cr(obj, e);
453+ if (IS_ERR(cr)) {
454+ clk_disable(obj->clk);
455+ return PTR_ERR(cr);
456+ }
457+
458+ iotlb_load_cr(obj, cr);
459+ kfree(cr);
460+
461+ /* increment victim for next tlb load */
462+ if (++l.vict == obj->nr_tlb_entries)
463+ l.vict = 0;
464+ iotlb_lock_set(obj, &l);
465+out:
466+ clk_disable(obj->clk);
467+ return err;
468+}
469+EXPORT_SYMBOL_GPL(load_iotlb_entry);
470+
471+/**
472+ * flush_iotlb_page() - Clear an iommu tlb entry
473+ * @obj: target iommu
474+ * @da: iommu device virtual address
475+ *
476+ * Clear an iommu tlb entry which includes 'da' address.
477+ **/
478+void flush_iotlb_page(struct iommu *obj, u32 da)
479+{
480+ struct iotlb_lock l;
481+ int i;
482+
483+ clk_enable(obj->clk);
484+
485+ for (i = 0; i < obj->nr_tlb_entries; i++) {
486+ struct cr_regs cr;
487+ u32 start;
488+ size_t bytes;
489+
490+ iotlb_lock_get(obj, &l);
491+ l.vict = i;
492+ iotlb_lock_set(obj, &l);
493+ iotlb_read_cr(obj, &cr);
494+ if (!iotlb_cr_valid(&cr))
495+ continue;
496+
497+ start = iotlb_cr_to_virt(&cr);
498+ bytes = iopgsz_to_bytes(cr.cam & 3);
499+
500+ if ((start <= da) && (da < start + bytes)) {
501+ dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
502+ __func__, start, da, bytes);
503+
504+ iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
505+ }
506+ }
507+ clk_disable(obj->clk);
508+
509+ if (i == obj->nr_tlb_entries)
510+ dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
511+}
512+EXPORT_SYMBOL_GPL(flush_iotlb_page);
513+
514+/**
515+ * flush_iotlb_range() - Clear an iommu tlb entries
516+ * @obj: target iommu
517+ * @start: iommu device virtual address(start)
518+ * @end: iommu device virtual address(end)
519+ *
520+ * Clear an iommu tlb entry which includes 'da' address.
521+ **/
522+void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
523+{
524+ u32 da = start;
525+
526+ while (da < end) {
527+ flush_iotlb_page(obj, da);
528+ /* FIXME: Optimize for multiple page size */
529+ da += IOPTE_SIZE;
530+ }
531+}
532+EXPORT_SYMBOL_GPL(flush_iotlb_range);
533+
534+/**
535+ * flush_iotlb_all() - Clear all iommu tlb entries
536+ * @obj: target iommu
537+ **/
538+void flush_iotlb_all(struct iommu *obj)
539+{
540+ struct iotlb_lock l;
541+
542+ clk_enable(obj->clk);
543+
544+ l.base = 0;
545+ l.vict = 0;
546+ iotlb_lock_set(obj, &l);
547+
548+ iommu_write_reg(obj, 1, MMU_GFLUSH);
549+
550+ clk_disable(obj->clk);
551+}
552+EXPORT_SYMBOL_GPL(flush_iotlb_all);
553+
554+/*
555+ * H/W pagetable operations
556+ */
557+static void flush_iopgd_range(u32 *first, u32 *last)
558+{
559+ /* FIXME: L2 cache should be taken care of if it exists */
560+ do {
561+ asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
562+ : : "r" (first));
563+ first += L1_CACHE_BYTES / sizeof(*first);
564+ } while (first <= last);
565+}
566+
567+static void flush_iopte_range(u32 *first, u32 *last)
568+{
569+ /* FIXME: L2 cache should be taken care of if it exists */
570+ do {
571+ asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
572+ : : "r" (first));
573+ first += L1_CACHE_BYTES / sizeof(*first);
574+ } while (first <= last);
575+}
576+
577+static void iopte_free(u32 *iopte)
578+{
579+ /* Note: freed iopte's must be clean ready for re-use */
580+ kmem_cache_free(iopte_cachep, iopte);
581+}
582+
583+static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
584+{
585+ u32 *iopte;
586+
587+ /* a table has already existed */
588+ if (*iopgd)
589+ goto pte_ready;
590+
591+ /*
592+ * do the allocation outside the page table lock
593+ */
594+ spin_unlock(&obj->page_table_lock);
595+ iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
596+ spin_lock(&obj->page_table_lock);
597+
598+ if (!*iopgd) {
599+ if (!iopte)
600+ return ERR_PTR(-ENOMEM);
601+
602+ *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
603+ flush_iopgd_range(iopgd, iopgd);
604+
605+ dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
606+ } else {
607+ /* We raced, free the reduniovant table */
608+ iopte_free(iopte);
609+ }
610+
611+pte_ready:
612+ iopte = iopte_offset(iopgd, da);
613+
614+ dev_vdbg(obj->dev,
615+ "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
616+ __func__, da, iopgd, *iopgd, iopte, *iopte);
617+
618+ return iopte;
619+}
620+
621+static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
622+{
623+ u32 *iopgd = iopgd_offset(obj, da);
624+
625+ *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
626+ flush_iopgd_range(iopgd, iopgd);
627+ return 0;
628+}
629+
630+static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
631+{
632+ u32 *iopgd = iopgd_offset(obj, da);
633+ int i;
634+
635+ for (i = 0; i < 16; i++)
636+ *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
637+ flush_iopgd_range(iopgd, iopgd + 15);
638+ return 0;
639+}
640+
641+static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
642+{
643+ u32 *iopgd = iopgd_offset(obj, da);
644+ u32 *iopte = iopte_alloc(obj, iopgd, da);
645+
646+ if (IS_ERR(iopte))
647+ return PTR_ERR(iopte);
648+
649+ *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
650+ flush_iopte_range(iopte, iopte);
651+
652+ dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
653+ __func__, da, pa, iopte, *iopte);
654+
655+ return 0;
656+}
657+
658+static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
659+{
660+ u32 *iopgd = iopgd_offset(obj, da);
661+ u32 *iopte = iopte_alloc(obj, iopgd, da);
662+ int i;
663+
664+ if (IS_ERR(iopte))
665+ return PTR_ERR(iopte);
666+
667+ for (i = 0; i < 16; i++)
668+ *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
669+ flush_iopte_range(iopte, iopte + 15);
670+ return 0;
671+}
672+
673+static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
674+{
675+ int (*fn)(struct iommu *, u32, u32, u32);
676+ u32 prot;
677+ int err;
678+
679+ if (!obj || !e)
680+ return -EINVAL;
681+
682+ switch (e->pgsz) {
683+ case MMU_CAM_PGSZ_16M:
684+ fn = iopgd_alloc_super;
685+ break;
686+ case MMU_CAM_PGSZ_1M:
687+ fn = iopgd_alloc_section;
688+ break;
689+ case MMU_CAM_PGSZ_64K:
690+ fn = iopte_alloc_large;
691+ break;
692+ case MMU_CAM_PGSZ_4K:
693+ fn = iopte_alloc_page;
694+ break;
695+ default:
696+ fn = NULL;
697+ BUG();
698+ break;
699+ }
700+
701+ prot = get_iopte_attr(e);
702+
703+ spin_lock(&obj->page_table_lock);
704+ err = fn(obj, e->da, e->pa, prot);
705+ spin_unlock(&obj->page_table_lock);
706+
707+ return err;
708+}
709+
710+#ifdef DEBUG
711+static void dump_tlb_entries(struct iommu *obj)
712+{
713+ int i;
714+ struct iotlb_lock l;
715+
716+ clk_enable(obj->clk);
717+
718+ pr_info("%8s %8s\n", "cam:", "ram:");
719+ pr_info("-----------------------------------------\n");
720+
721+ for (i = 0; i < obj->nr_tlb_entries; i++) {
722+ struct cr_regs cr;
723+ static char buf[4096];
724+
725+ iotlb_lock_get(obj, &l);
726+ l.vict = i;
727+ iotlb_lock_set(obj, &l);
728+ iotlb_read_cr(obj, &cr);
729+ if (!iotlb_cr_valid(&cr))
730+ continue;
731+
732+ memset(buf, 0, 4096);
733+ iotlb_dump_cr(obj, &cr, buf);
734+ pr_err("%s", buf);
735+ }
736+
737+ clk_disable(obj->clk);
738+}
739+#else
740+static inline void dump_tlb_entries(struct iommu *obj) {}
741+#endif
742+
743+/**
744+ * iopgtable_store_entry() - Make an iommu pte entry
745+ * @obj: target iommu
746+ * @e: an iommu tlb entry info
747+ **/
748+int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
749+{
750+ int err;
751+
752+ flush_iotlb_page(obj, e->da);
753+ err = iopgtable_store_entry_core(obj, e);
754+#ifdef USE_IOTLB
755+ if (!err)
756+ load_iotlb_entry(obj, e);
757+#endif
758+ return err;
759+}
760+EXPORT_SYMBOL_GPL(iopgtable_store_entry);
761+
762+/**
763+ * iopgtable_lookup_entry() - Lookup an iommu pte entry
764+ * @obj: target iommu
765+ * @da: iommu device virtual address
766+ * @ppgd: iommu pgd entry pointer to be returned
767+ * @ppte: iommu pte entry pointer to be returned
768+ **/
769+void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
770+{
771+ u32 *iopgd, *iopte = NULL;
772+
773+ iopgd = iopgd_offset(obj, da);
774+ if (!*iopgd)
775+ goto out;
776+
777+ if (*iopgd & IOPGD_TABLE)
778+ iopte = iopte_offset(iopgd, da);
779+out:
780+ *ppgd = iopgd;
781+ *ppte = iopte;
782+}
783+EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
784+
785+static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
786+{
787+ size_t bytes;
788+ u32 *iopgd = iopgd_offset(obj, da);
789+ int nent = 1;
790+
791+ if (!*iopgd)
792+ return 0;
793+
794+ if (*iopgd & IOPGD_TABLE) {
795+ int i;
796+ u32 *iopte = iopte_offset(iopgd, da);
797+
798+ bytes = IOPTE_SIZE;
799+ if (*iopte & IOPTE_LARGE) {
800+ nent *= 16;
801+ /* rewind to the 1st entry */
802+ iopte = (u32 *)((u32)iopte & IOLARGE_MASK);
803+ }
804+ bytes *= nent;
805+ memset(iopte, 0, nent * sizeof(*iopte));
806+ flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
807+
808+ /*
809+ * do table walk to check if this table is necessary or not
810+ */
811+ iopte = iopte_offset(iopgd, 0);
812+ for (i = 0; i < PTRS_PER_IOPTE; i++)
813+ if (iopte[i])
814+ goto out;
815+
816+ iopte_free(iopte);
817+ nent = 1; /* for the next L1 entry */
818+ } else {
819+ bytes = IOPGD_SIZE;
820+ if (*iopgd & IOPGD_SUPER) {
821+ nent *= 16;
822+ /* rewind to the 1st entry */
823+ iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK);
824+ }
825+ bytes *= nent;
826+ }
827+ memset(iopgd, 0, nent * sizeof(*iopgd));
828+ flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
829+out:
830+ return bytes;
831+}
832+
833+/**
834+ * iopgtable_clear_entry() - Remove an iommu pte entry
835+ * @obj: target iommu
836+ * @da: iommu device virtual address
837+ **/
838+size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
839+{
840+ size_t bytes;
841+
842+ spin_lock(&obj->page_table_lock);
843+
844+ bytes = iopgtable_clear_entry_core(obj, da);
845+ flush_iotlb_page(obj, da);
846+
847+ spin_unlock(&obj->page_table_lock);
848+
849+ return bytes;
850+}
851+EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
852+
853+static void iopgtable_clear_entry_all(struct iommu *obj)
854+{
855+ int i;
856+
857+ spin_lock(&obj->page_table_lock);
858+
859+ for (i = 0; i < PTRS_PER_IOPGD; i++) {
860+ u32 da;
861+ u32 *iopgd;
862+
863+ da = i << IOPGD_SHIFT;
864+ iopgd = iopgd_offset(obj, da);
865+
866+ if (!*iopgd)
867+ continue;
868+
869+ if (*iopgd & IOPGD_TABLE)
870+ iopte_free(iopte_offset(iopgd, 0));
871+
872+ *iopgd = 0;
873+ flush_iopgd_range(iopgd, iopgd);
874+ }
875+
876+ flush_iotlb_all(obj);
877+
878+ spin_unlock(&obj->page_table_lock);
879+}
880+
881+/*
882+ * Device IOMMU generic operations
883+ */
884+static irqreturn_t iommu_fault_handler(int irq, void *data)
885+{
886+ u32 stat, da;
887+ u32 *iopgd, *iopte;
888+ int err = -EIO;
889+ struct iommu *obj = data;
890+
891+ /* Dynamic loading TLB or PTE */
892+ if (obj->isr)
893+ err = obj->isr(obj);
894+
895+ if (!err)
896+ return IRQ_HANDLED;
897+
898+ stat = iommu_report_fault(obj, &da);
899+ if (!stat)
900+ return IRQ_HANDLED;
901+
902+ iopgd = iopgd_offset(obj, da);
903+
904+ if (!(*iopgd & IOPGD_TABLE)) {
905+ dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__,
906+ da, iopgd, *iopgd);
907+ return IRQ_NONE;
908+ }
909+
910+ iopte = iopte_offset(iopgd, da);
911+
912+ dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
913+ __func__, da, iopgd, *iopgd, iopte, *iopte);
914+
915+ dump_tlb_entries(obj);
916+
917+ return IRQ_NONE;
918+}
919+
920+static int device_match_by_alias(struct device *dev, void *data)
921+{
922+ struct iommu *obj = to_iommu(dev);
923+ const char *name = data;
924+
925+ pr_debug("%s: %s %s\n", __func__, obj->name, name);
926+
927+ return strcmp(obj->name, name) == 0;
928+}
929+
930+/**
931+ * iommu_put() - Get iommu handler
932+ * @name: target iommu name
933+ **/
934+struct iommu *iommu_get(const char *name)
935+{
936+ int err = -ENOMEM;
937+ struct device *dev;
938+ struct iommu *obj;
939+
940+ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
941+ device_match_by_alias);
942+ if (!dev)
943+ return ERR_PTR(-ENODEV);
944+
945+ obj = to_iommu(dev);
946+
947+ mutex_lock(&obj->iommu_lock);
948+
949+ if (obj->refcount++ == 0) {
950+ err = iommu_enable(obj);
951+ if (err)
952+ goto err_enable;
953+ flush_iotlb_all(obj);
954+ }
955+
956+ if (!try_module_get(obj->owner))
957+ goto err_module;
958+
959+ mutex_unlock(&obj->iommu_lock);
960+
961+ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
962+ return obj;
963+
964+err_module:
965+ if (obj->refcount == 1)
966+ iommu_disable(obj);
967+err_enable:
968+ mutex_unlock(&obj->iommu_lock);
969+ return ERR_PTR(err);
970+}
971+EXPORT_SYMBOL_GPL(iommu_get);
972+
973+/**
974+ * iommu_put() - Put back iommu handler
975+ * @obj: target iommu
976+ **/
977+void iommu_put(struct iommu *obj)
978+{
979+ if (!obj && IS_ERR(obj))
980+ return;
981+
982+ mutex_lock(&obj->iommu_lock);
983+
984+ if (--obj->refcount == 0)
985+ iommu_disable(obj);
986+
987+ module_put(obj->owner);
988+
989+ mutex_unlock(&obj->iommu_lock);
990+
991+ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
992+}
993+EXPORT_SYMBOL_GPL(iommu_put);
994+
995+/*
996+ * OMAP Device MMU(IOMMU) detection
997+ */
998+static int __devinit omap_iommu_probe(struct platform_device *pdev)
999+{
1000+ int err = -ENODEV;
1001+ void *p;
1002+ int irq;
1003+ struct iommu *obj;
1004+ struct resource *res;
1005+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
1006+
1007+ if (pdev->num_resources != 2)
1008+ return -EINVAL;
1009+
1010+ obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1011+ if (!obj)
1012+ return -ENOMEM;
1013+
1014+ obj->clk = clk_get(&pdev->dev, pdata->clk_name);
1015+ if (IS_ERR(obj->clk))
1016+ goto err_clk;
1017+
1018+ obj->nr_tlb_entries = pdata->nr_tlb_entries;
1019+ obj->name = pdata->name;
1020+ obj->dev = &pdev->dev;
1021+ obj->ctx = (void *)obj + sizeof(*obj);
1022+
1023+ mutex_init(&obj->iommu_lock);
1024+ mutex_init(&obj->mmap_lock);
1025+ spin_lock_init(&obj->page_table_lock);
1026+ INIT_LIST_HEAD(&obj->mmap);
1027+
1028+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1029+ if (!res) {
1030+ err = -ENODEV;
1031+ goto err_mem;
1032+ }
1033+ obj->regbase = ioremap(res->start, resource_size(res));
1034+ if (!obj->regbase) {
1035+ err = -ENOMEM;
1036+ goto err_mem;
1037+ }
1038+
1039+ res = request_mem_region(res->start, resource_size(res),
1040+ dev_name(&pdev->dev));
1041+ if (!res) {
1042+ err = -EIO;
1043+ goto err_mem;
1044+ }
1045+
1046+ irq = platform_get_irq(pdev, 0);
1047+ if (irq < 0) {
1048+ err = -ENODEV;
1049+ goto err_irq;
1050+ }
1051+ err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1052+ dev_name(&pdev->dev), obj);
1053+ if (err < 0)
1054+ goto err_irq;
1055+ platform_set_drvdata(pdev, obj);
1056+
1057+ p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1058+ if (!p) {
1059+ err = -ENOMEM;
1060+ goto err_pgd;
1061+ }
1062+ memset(p, 0, IOPGD_TABLE_SIZE);
1063+ clean_dcache_area(p, IOPGD_TABLE_SIZE);
1064+ obj->iopgd = p;
1065+
1066+ BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1067+
1068+ dev_info(&pdev->dev, "%s registered\n", obj->name);
1069+ return 0;
1070+
1071+err_pgd:
1072+ free_irq(irq, obj);
1073+err_irq:
1074+ release_mem_region(res->start, resource_size(res));
1075+ iounmap(obj->regbase);
1076+err_mem:
1077+ clk_put(obj->clk);
1078+err_clk:
1079+ kfree(obj);
1080+ return err;
1081+}
1082+
1083+static int __devexit omap_iommu_remove(struct platform_device *pdev)
1084+{
1085+ int irq;
1086+ struct resource *res;
1087+ struct iommu *obj = platform_get_drvdata(pdev);
1088+
1089+ platform_set_drvdata(pdev, NULL);
1090+
1091+ iopgtable_clear_entry_all(obj);
1092+ free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1093+
1094+ irq = platform_get_irq(pdev, 0);
1095+ free_irq(irq, obj);
1096+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1097+ release_mem_region(res->start, resource_size(res));
1098+ iounmap(obj->regbase);
1099+
1100+ clk_put(obj->clk);
1101+ dev_info(&pdev->dev, "%s removed\n", obj->name);
1102+ kfree(obj);
1103+ return 0;
1104+}
1105+
1106+static struct platform_driver omap_iommu_driver = {
1107+ .probe = omap_iommu_probe,
1108+ .remove = __devexit_p(omap_iommu_remove),
1109+ .driver = {
1110+ .name = "omap-iommu",
1111+ },
1112+};
1113+
1114+static void iopte_cachep_ctor(void *iopte)
1115+{
1116+ clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1117+}
1118+
1119+static int __init omap_iommu_init(void)
1120+{
1121+ struct kmem_cache *p;
1122+ const unsigned long flags = SLAB_HWCACHE_ALIGN;
1123+
1124+ p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, 0, flags,
1125+ iopte_cachep_ctor);
1126+ if (!p)
1127+ return -ENOMEM;
1128+ iopte_cachep = p;
1129+
1130+ return platform_driver_register(&omap_iommu_driver);
1131+}
1132+module_init(omap_iommu_init);
1133+
1134+static void __exit omap_iommu_exit(void)
1135+{
1136+ kmem_cache_destroy(iopte_cachep);
1137+
1138+ platform_driver_unregister(&omap_iommu_driver);
1139+}
1140+module_exit(omap_iommu_exit);
1141+
1142+MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1143+MODULE_ALIAS("platform:omap-iommu");
1144+MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1145+MODULE_LICENSE("GPL v2");
1146diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/iopgtable.h
1147new file mode 100644
1148index 0000000..37dac43
1149--- /dev/null
1150+++ b/arch/arm/plat-omap/iopgtable.h
1151@@ -0,0 +1,72 @@
1152+/*
1153+ * omap iommu: pagetable definitions
1154+ *
1155+ * Copyright (C) 2008-2009 Nokia Corporation
1156+ *
1157+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
1158+ *
1159+ * This program is free software; you can redistribute it and/or modify
1160+ * it under the terms of the GNU General Public License version 2 as
1161+ * published by the Free Software Foundation.
1162+ */
1163+
1164+#ifndef __PLAT_OMAP_IOMMU_H
1165+#define __PLAT_OMAP_IOMMU_H
1166+
1167+#define IOPGD_SHIFT 20
1168+#define IOPGD_SIZE (1 << IOPGD_SHIFT)
1169+#define IOPGD_MASK (~(IOPGD_SIZE - 1))
1170+#define IOSECTION_MASK IOPGD_MASK
1171+#define PTRS_PER_IOPGD (1 << (32 - IOPGD_SHIFT))
1172+#define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32))
1173+
1174+#define IOSUPER_SIZE (IOPGD_SIZE << 4)
1175+#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
1176+
1177+#define IOPTE_SHIFT 12
1178+#define IOPTE_SIZE (1 << IOPTE_SHIFT)
1179+#define IOPTE_MASK (~(IOPTE_SIZE - 1))
1180+#define IOPAGE_MASK IOPTE_MASK
1181+#define PTRS_PER_IOPTE (1 << (IOPGD_SHIFT - IOPTE_SHIFT))
1182+#define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32))
1183+
1184+#define IOLARGE_SIZE (IOPTE_SIZE << 4)
1185+#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
1186+
1187+#define IOPGD_TABLE (1 << 0)
1188+#define IOPGD_SECTION (2 << 0)
1189+#define IOPGD_SUPER (1 << 18 | 2 << 0)
1190+
1191+#define IOPTE_SMALL (2 << 0)
1192+#define IOPTE_LARGE (1 << 0)
1193+
1194+#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
1195+#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
1196+
1197+#define iopte_paddr(iopgd) (*iopgd & ~((1 << 10) - 1))
1198+#define iopte_vaddr(iopgd) ((u32 *)phys_to_virt(iopte_paddr(iopgd)))
1199+
1200+#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
1201+#define iopte_offset(iopgd, da) (iopte_vaddr(iopgd) + iopte_index(da))
1202+
1203+static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
1204+ u32 flags)
1205+{
1206+ memset(e, 0, sizeof(*e));
1207+
1208+ e->da = da;
1209+ e->pa = pa;
1210+ e->valid = 1;
1211+ /* FIXME: add OMAP1 support */
1212+ e->pgsz = flags & MMU_CAM_PGSZ_MASK;
1213+ e->endian = flags & MMU_RAM_ENDIAN_MASK;
1214+ e->elsz = flags & MMU_RAM_ELSZ_MASK;
1215+ e->mixed = flags & MMU_RAM_MIXED_MASK;
1216+
1217+ return iopgsz_to_bytes(e->pgsz);
1218+}
1219+
1220+#define to_iommu(dev) \
1221+ (struct iommu *)platform_get_drvdata(to_platform_device(dev))
1222+
1223+#endif /* __PLAT_OMAP_IOMMU_H */
1224--
12251.5.6.5
1226