summaryrefslogtreecommitdiffstats
path: root/extras/recipes-kernel/linux/linux-omap/linus/0003-ARM-get-rid-of-kmap_high_l1_vipt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'extras/recipes-kernel/linux/linux-omap/linus/0003-ARM-get-rid-of-kmap_high_l1_vipt.patch')
-rw-r--r--extras/recipes-kernel/linux/linux-omap/linus/0003-ARM-get-rid-of-kmap_high_l1_vipt.patch186
1 files changed, 186 insertions, 0 deletions
diff --git a/extras/recipes-kernel/linux/linux-omap/linus/0003-ARM-get-rid-of-kmap_high_l1_vipt.patch b/extras/recipes-kernel/linux/linux-omap/linus/0003-ARM-get-rid-of-kmap_high_l1_vipt.patch
new file mode 100644
index 00000000..d31b0e69
--- /dev/null
+++ b/extras/recipes-kernel/linux/linux-omap/linus/0003-ARM-get-rid-of-kmap_high_l1_vipt.patch
@@ -0,0 +1,186 @@
1From b4edc88b911049a85162600f579d0364ee311d4e Mon Sep 17 00:00:00 2001
2From: Nicolas Pitre <nicolas.pitre@linaro.org>
3Date: Wed, 15 Dec 2010 15:14:45 -0500
4Subject: [PATCH 03/65] ARM: get rid of kmap_high_l1_vipt()
5
6Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is no longer
7necessary to carry an ad hoc version of kmap_atomic() added in commit
87e5a69e83b "ARM: 6007/1: fix highmem with VIPT cache and DMA" to cope
9with reentrancy.
10
11In fact, it is now actively wrong to rely on fixed kmap type indices
12(namely KM_L1_CACHE) as kmap_atomic() totally ignores them now and a
13concurrent instance of it may reuse any slot for any purpose.
14
15Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
16---
17 arch/arm/include/asm/highmem.h | 3 -
18 arch/arm/mm/dma-mapping.c | 7 ++-
19 arch/arm/mm/flush.c | 7 ++-
20 arch/arm/mm/highmem.c | 87 ----------------------------------------
21 4 files changed, 8 insertions(+), 96 deletions(-)
22
23diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
24index 1fc684e..7080e2c 100644
25--- a/arch/arm/include/asm/highmem.h
26+++ b/arch/arm/include/asm/highmem.h
27@@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page);
28 extern void *kmap_high_get(struct page *page);
29 extern void kunmap_high(struct page *page);
30
31-extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
32-extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
33-
34 /*
35 * The following functions are already defined by <linux/highmem.h>
36 * when CONFIG_HIGHMEM is not set.
37diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
38index ac6a361..809f1bf 100644
39--- a/arch/arm/mm/dma-mapping.c
40+++ b/arch/arm/mm/dma-mapping.c
41@@ -17,6 +17,7 @@
42 #include <linux/init.h>
43 #include <linux/device.h>
44 #include <linux/dma-mapping.h>
45+#include <linux/highmem.h>
46
47 #include <asm/memory.h>
48 #include <asm/highmem.h>
49@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
50 op(vaddr, len, dir);
51 kunmap_high(page);
52 } else if (cache_is_vipt()) {
53- pte_t saved_pte;
54- vaddr = kmap_high_l1_vipt(page, &saved_pte);
55+ /* unmapped pages might still be cached */
56+ vaddr = kmap_atomic(page);
57 op(vaddr + offset, len, dir);
58- kunmap_high_l1_vipt(page, saved_pte);
59+ kunmap_atomic(vaddr);
60 }
61 } else {
62 vaddr = page_address(page) + offset;
63diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
64index 391ffae..c29f283 100644
65--- a/arch/arm/mm/flush.c
66+++ b/arch/arm/mm/flush.c
67@@ -10,6 +10,7 @@
68 #include <linux/module.h>
69 #include <linux/mm.h>
70 #include <linux/pagemap.h>
71+#include <linux/highmem.h>
72
73 #include <asm/cacheflush.h>
74 #include <asm/cachetype.h>
75@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
76 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
77 kunmap_high(page);
78 } else if (cache_is_vipt()) {
79- pte_t saved_pte;
80- addr = kmap_high_l1_vipt(page, &saved_pte);
81+ /* unmapped pages might still be cached */
82+ addr = kmap_atomic(page);
83 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
84- kunmap_high_l1_vipt(page, saved_pte);
85+ kunmap_atomic(addr);
86 }
87 }
88
89diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
90index c435fd9..807c057 100644
91--- a/arch/arm/mm/highmem.c
92+++ b/arch/arm/mm/highmem.c
93@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
94 pte = TOP_PTE(vaddr);
95 return pte_page(*pte);
96 }
97-
98-#ifdef CONFIG_CPU_CACHE_VIPT
99-
100-#include <linux/percpu.h>
101-
102-/*
103- * The VIVT cache of a highmem page is always flushed before the page
104- * is unmapped. Hence unmapped highmem pages need no cache maintenance
105- * in that case.
106- *
107- * However unmapped pages may still be cached with a VIPT cache, and
108- * it is not possible to perform cache maintenance on them using physical
109- * addresses unfortunately. So we have no choice but to set up a temporary
110- * virtual mapping for that purpose.
111- *
112- * Yet this VIPT cache maintenance may be triggered from DMA support
113- * functions which are possibly called from interrupt context. As we don't
114- * want to keep interrupt disabled all the time when such maintenance is
115- * taking place, we therefore allow for some reentrancy by preserving and
116- * restoring the previous fixmap entry before the interrupted context is
117- * resumed. If the reentrancy depth is 0 then there is no need to restore
118- * the previous fixmap, and leaving the current one in place allow it to
119- * be reused the next time without a TLB flush (common with DMA).
120- */
121-
122-static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
123-
124-void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
125-{
126- unsigned int idx, cpu;
127- int *depth;
128- unsigned long vaddr, flags;
129- pte_t pte, *ptep;
130-
131- if (!in_interrupt())
132- preempt_disable();
133-
134- cpu = smp_processor_id();
135- depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
136-
137- idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
138- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
139- ptep = TOP_PTE(vaddr);
140- pte = mk_pte(page, kmap_prot);
141-
142- raw_local_irq_save(flags);
143- (*depth)++;
144- if (pte_val(*ptep) == pte_val(pte)) {
145- *saved_pte = pte;
146- } else {
147- *saved_pte = *ptep;
148- set_pte_ext(ptep, pte, 0);
149- local_flush_tlb_kernel_page(vaddr);
150- }
151- raw_local_irq_restore(flags);
152-
153- return (void *)vaddr;
154-}
155-
156-void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
157-{
158- unsigned int idx, cpu = smp_processor_id();
159- int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
160- unsigned long vaddr, flags;
161- pte_t pte, *ptep;
162-
163- idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
164- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
165- ptep = TOP_PTE(vaddr);
166- pte = mk_pte(page, kmap_prot);
167-
168- BUG_ON(pte_val(*ptep) != pte_val(pte));
169- BUG_ON(*depth <= 0);
170-
171- raw_local_irq_save(flags);
172- (*depth)--;
173- if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
174- set_pte_ext(ptep, saved_pte, 0);
175- local_flush_tlb_kernel_page(vaddr);
176- }
177- raw_local_irq_restore(flags);
178-
179- if (!in_interrupt())
180- preempt_enable();
181-}
182-
183-#endif /* CONFIG_CPU_CACHE_VIPT */
184--
1851.6.6.1
186