diff options
Diffstat (limited to 'extras/recipes-kernel/linux/linux-omap/linus/0004-ARM-fix-cache-xsc3l2-after-stack-based-kmap_atomic.patch')
| -rw-r--r-- | extras/recipes-kernel/linux/linux-omap/linus/0004-ARM-fix-cache-xsc3l2-after-stack-based-kmap_atomic.patch | 189 |
1 files changed, 189 insertions, 0 deletions
diff --git a/extras/recipes-kernel/linux/linux-omap/linus/0004-ARM-fix-cache-xsc3l2-after-stack-based-kmap_atomic.patch b/extras/recipes-kernel/linux/linux-omap/linus/0004-ARM-fix-cache-xsc3l2-after-stack-based-kmap_atomic.patch new file mode 100644 index 00000000..32643f67 --- /dev/null +++ b/extras/recipes-kernel/linux/linux-omap/linus/0004-ARM-fix-cache-xsc3l2-after-stack-based-kmap_atomic.patch | |||
| @@ -0,0 +1,189 @@ | |||
| 1 | From fc077c0fbb09ca255691d05789076d121ae11789 Mon Sep 17 00:00:00 2001 | ||
| 2 | From: Nicolas Pitre <nicolas.pitre@linaro.org> | ||
| 3 | Date: Wed, 15 Dec 2010 23:29:04 -0500 | ||
| 4 | Subject: [PATCH 04/65] ARM: fix cache-xsc3l2 after stack based kmap_atomic() | ||
| 5 | |||
| 6 | Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively | ||
| 7 | wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as | ||
| 8 | kmap_atomic() totally ignores them and a concurrent instance of it may | ||
| 9 | happily reuse any slot for any purpose. Because kmap_atomic() is now | ||
| 10 | able to deal with reentrancy, we can get rid of the ad hoc mapping here, | ||
| 11 | and we even don't have to disable IRQs anymore (highmem case). | ||
| 12 | |||
| 13 | While the code is made much simpler, there is a needless cache flush | ||
| 14 | introduced by the usage of __kunmap_atomic(). It is not clear if the | ||
| 15 | performance difference to remove that is worth the cost in code | ||
| 16 | maintenance (I don't think there are that many highmem users on that | ||
| 17 | platform if at all anyway). | ||
| 18 | |||
| 19 | Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> | ||
| 20 | --- | ||
| 21 | arch/arm/mm/cache-xsc3l2.c | 57 ++++++++++++++++--------------------------- | ||
| 22 | 1 files changed, 21 insertions(+), 36 deletions(-) | ||
| 23 | |||
| 24 | diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c | ||
| 25 | index c315492..5a32020 100644 | ||
| 26 | --- a/arch/arm/mm/cache-xsc3l2.c | ||
| 27 | +++ b/arch/arm/mm/cache-xsc3l2.c | ||
| 28 | @@ -17,14 +17,10 @@ | ||
| 29 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 30 | */ | ||
| 31 | #include <linux/init.h> | ||
| 32 | +#include <linux/highmem.h> | ||
| 33 | #include <asm/system.h> | ||
| 34 | #include <asm/cputype.h> | ||
| 35 | #include <asm/cacheflush.h> | ||
| 36 | -#include <asm/kmap_types.h> | ||
| 37 | -#include <asm/fixmap.h> | ||
| 38 | -#include <asm/pgtable.h> | ||
| 39 | -#include <asm/tlbflush.h> | ||
| 40 | -#include "mm.h" | ||
| 41 | |||
| 42 | #define CR_L2 (1 << 26) | ||
| 43 | |||
| 44 | @@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void) | ||
| 45 | dsb(); | ||
| 46 | } | ||
| 47 | |||
| 48 | +static inline void l2_unmap_va(unsigned long va) | ||
| 49 | +{ | ||
| 50 | #ifdef CONFIG_HIGHMEM | ||
| 51 | -#define l2_map_save_flags(x) raw_local_save_flags(x) | ||
| 52 | -#define l2_map_restore_flags(x) raw_local_irq_restore(x) | ||
| 53 | -#else | ||
| 54 | -#define l2_map_save_flags(x) ((x) = 0) | ||
| 55 | -#define l2_map_restore_flags(x) ((void)(x)) | ||
| 56 | + if (va != -1) | ||
| 57 | + kunmap_atomic((void *)va); | ||
| 58 | #endif | ||
| 59 | +} | ||
| 60 | |||
| 61 | -static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | ||
| 62 | - unsigned long flags) | ||
| 63 | +static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) | ||
| 64 | { | ||
| 65 | #ifdef CONFIG_HIGHMEM | ||
| 66 | unsigned long va = prev_va & PAGE_MASK; | ||
| 67 | @@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | ||
| 68 | /* | ||
| 69 | * Switching to a new page. Because cache ops are | ||
| 70 | * using virtual addresses only, we must put a mapping | ||
| 71 | - * in place for it. We also enable interrupts for a | ||
| 72 | - * short while and disable them again to protect this | ||
| 73 | - * mapping. | ||
| 74 | + * in place for it. | ||
| 75 | */ | ||
| 76 | - unsigned long idx; | ||
| 77 | - raw_local_irq_restore(flags); | ||
| 78 | - idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | ||
| 79 | - va = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 80 | - raw_local_irq_restore(flags | PSR_I_BIT); | ||
| 81 | - set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
| 82 | - local_flush_tlb_kernel_page(va); | ||
| 83 | + l2_unmap_va(prev_va); | ||
| 84 | + va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); | ||
| 85 | } | ||
| 86 | return va + (pa_offset >> (32 - PAGE_SHIFT)); | ||
| 87 | #else | ||
| 88 | @@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | ||
| 89 | |||
| 90 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | ||
| 91 | { | ||
| 92 | - unsigned long vaddr, flags; | ||
| 93 | + unsigned long vaddr; | ||
| 94 | |||
| 95 | if (start == 0 && end == -1ul) { | ||
| 96 | xsc3_l2_inv_all(); | ||
| 97 | @@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | ||
| 98 | } | ||
| 99 | |||
| 100 | vaddr = -1; /* to force the first mapping */ | ||
| 101 | - l2_map_save_flags(flags); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Clean and invalidate partial first cache line. | ||
| 105 | */ | ||
| 106 | if (start & (CACHE_LINE_SIZE - 1)) { | ||
| 107 | - vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); | ||
| 108 | + vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); | ||
| 109 | xsc3_l2_clean_mva(vaddr); | ||
| 110 | xsc3_l2_inv_mva(vaddr); | ||
| 111 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | ||
| 112 | @@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | ||
| 113 | * Invalidate all full cache lines between 'start' and 'end'. | ||
| 114 | */ | ||
| 115 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { | ||
| 116 | - vaddr = l2_map_va(start, vaddr, flags); | ||
| 117 | + vaddr = l2_map_va(start, vaddr); | ||
| 118 | xsc3_l2_inv_mva(vaddr); | ||
| 119 | start += CACHE_LINE_SIZE; | ||
| 120 | } | ||
| 121 | @@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | ||
| 122 | * Clean and invalidate partial last cache line. | ||
| 123 | */ | ||
| 124 | if (start < end) { | ||
| 125 | - vaddr = l2_map_va(start, vaddr, flags); | ||
| 126 | + vaddr = l2_map_va(start, vaddr); | ||
| 127 | xsc3_l2_clean_mva(vaddr); | ||
| 128 | xsc3_l2_inv_mva(vaddr); | ||
| 129 | } | ||
| 130 | |||
| 131 | - l2_map_restore_flags(flags); | ||
| 132 | + l2_unmap_va(vaddr); | ||
| 133 | |||
| 134 | dsb(); | ||
| 135 | } | ||
| 136 | |||
| 137 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) | ||
| 138 | { | ||
| 139 | - unsigned long vaddr, flags; | ||
| 140 | + unsigned long vaddr; | ||
| 141 | |||
| 142 | vaddr = -1; /* to force the first mapping */ | ||
| 143 | - l2_map_save_flags(flags); | ||
| 144 | |||
| 145 | start &= ~(CACHE_LINE_SIZE - 1); | ||
| 146 | while (start < end) { | ||
| 147 | - vaddr = l2_map_va(start, vaddr, flags); | ||
| 148 | + vaddr = l2_map_va(start, vaddr); | ||
| 149 | xsc3_l2_clean_mva(vaddr); | ||
| 150 | start += CACHE_LINE_SIZE; | ||
| 151 | } | ||
| 152 | |||
| 153 | - l2_map_restore_flags(flags); | ||
| 154 | + l2_unmap_va(vaddr); | ||
| 155 | |||
| 156 | dsb(); | ||
| 157 | } | ||
| 158 | @@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void) | ||
| 159 | |||
| 160 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | ||
| 161 | { | ||
| 162 | - unsigned long vaddr, flags; | ||
| 163 | + unsigned long vaddr; | ||
| 164 | |||
| 165 | if (start == 0 && end == -1ul) { | ||
| 166 | xsc3_l2_flush_all(); | ||
| 167 | @@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | ||
| 168 | } | ||
| 169 | |||
| 170 | vaddr = -1; /* to force the first mapping */ | ||
| 171 | - l2_map_save_flags(flags); | ||
| 172 | |||
| 173 | start &= ~(CACHE_LINE_SIZE - 1); | ||
| 174 | while (start < end) { | ||
| 175 | - vaddr = l2_map_va(start, vaddr, flags); | ||
| 176 | + vaddr = l2_map_va(start, vaddr); | ||
| 177 | xsc3_l2_clean_mva(vaddr); | ||
| 178 | xsc3_l2_inv_mva(vaddr); | ||
| 179 | start += CACHE_LINE_SIZE; | ||
| 180 | } | ||
| 181 | |||
| 182 | - l2_map_restore_flags(flags); | ||
| 183 | + l2_unmap_va(vaddr); | ||
| 184 | |||
| 185 | dsb(); | ||
| 186 | } | ||
| 187 | -- | ||
| 188 | 1.6.6.1 | ||
| 189 | |||
