summaryrefslogtreecommitdiffstats
path: root/meta-moblin
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2010-06-08 21:04:38 +0100
committerRichard Purdie <rpurdie@linux.intel.com>2010-06-08 21:04:38 +0100
commitfa64e20621f729341d17036eee224394a90ba2b0 (patch)
tree93839efe764fbfaceb94f068c698bab7e1c6d499 /meta-moblin
parent3a8ee5864dcefae62af4e9e8c54b2537cfb43a7b (diff)
downloadpoky-fa64e20621f729341d17036eee224394a90ba2b0.tar.gz
linux-moblin: Drop old versions (2.6.31.5 and 2.6.29.1)
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
Diffstat (limited to 'meta-moblin')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch486
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch191
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch53
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic2844
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi127
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow8
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst2316
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook52
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow3353
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook2747
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch128
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch11
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch43
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch56
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch55
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch208
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch12
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch20
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch161
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch285
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch40
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch92
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch28
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch57
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch83
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch336
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch21
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch28
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch37524
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch61
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch17
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch66
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch6095
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch130
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch69
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch139
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch44328
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook3220
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch142
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch61
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch19
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch47
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch25
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch33
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch118
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch22
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch66
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch146
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch12910
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch44
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch465
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch26
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch43
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch26
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch307
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch140
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch9290
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch206
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch14
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch173
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch407
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch86
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch91
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch64
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch275
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch95
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb46
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb47
75 files changed, 0 insertions, 131218 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch
deleted file mode 100644
index 2655acfaa5..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch
+++ /dev/null
@@ -1,486 +0,0 @@
1From 84e7ccff650b8f124585ba7d5b9a1544f53457e7 Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 16:53:11 +0100
4Subject: [PATCH 1/8] drm: Split out the mm declarations in a separate header. Add atomic operations.
5
6Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
7---
8 drivers/gpu/drm/drm_mm.c | 173 ++++++++++++++++++++++++++++++++++++++--------
9 include/drm/drmP.h | 37 +----------
10 include/drm/drm_mm.h | 90 ++++++++++++++++++++++++
11 3 files changed, 235 insertions(+), 65 deletions(-)
12 create mode 100644 include/drm/drm_mm.h
13
14Index: linux-2.6.28/drivers/gpu/drm/drm_mm.c
15===================================================================
16--- linux-2.6.28.orig/drivers/gpu/drm/drm_mm.c 2009-03-09 19:19:52.000000000 +0000
17+++ linux-2.6.28/drivers/gpu/drm/drm_mm.c 2009-03-12 13:15:05.000000000 +0000
18@@ -42,8 +43,11 @@
19 */
20
21 #include "drmP.h"
22+#include "drm_mm.h"
23 #include <linux/slab.h>
24
25+#define MM_UNUSED_TARGET 4
26+
27 unsigned long drm_mm_tail_space(struct drm_mm *mm)
28 {
29 struct list_head *tail_node;
30@@ -74,16 +78,66 @@
31 return 0;
32 }
33
34+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
35+{
36+ struct drm_mm_node *child;
37+
38+ if (atomic) {
39+ child =
40+ (struct drm_mm_node *)kmalloc(sizeof(*child), GFP_ATOMIC);
41+ } else {
42+ child =
43+ (struct drm_mm_node *)kmalloc(sizeof(*child), GFP_KERNEL);
44+ }
45+
46+ if (unlikely(child == NULL)) {
47+ spin_lock(&mm->unused_lock);
48+ if (list_empty(&mm->unused_nodes))
49+ child = NULL;
50+ else {
51+ child =
52+ list_entry(mm->unused_nodes.next,
53+ struct drm_mm_node, fl_entry);
54+ list_del(&child->fl_entry);
55+ --mm->num_unused;
56+ }
57+ spin_unlock(&mm->unused_lock);
58+ }
59+ return child;
60+}
61+
62+int drm_mm_pre_get(struct drm_mm *mm)
63+{
64+ struct drm_mm_node *node;
65+
66+ spin_lock(&mm->unused_lock);
67+ while (mm->num_unused < MM_UNUSED_TARGET) {
68+ spin_unlock(&mm->unused_lock);
69+ node = kmalloc(sizeof(*node), GFP_KERNEL);
70+ spin_lock(&mm->unused_lock);
71+
72+ if (unlikely(node == NULL)) {
73+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
74+ spin_unlock(&mm->unused_lock);
75+ return ret;
76+ }
77+ ++mm->num_unused;
78+ list_add_tail(&node->fl_entry, &mm->unused_nodes);
79+ }
80+ spin_unlock(&mm->unused_lock);
81+ return 0;
82+}
83+
84+EXPORT_SYMBOL(drm_mm_pre_get);
85
86 static int drm_mm_create_tail_node(struct drm_mm *mm,
87- unsigned long start,
88- unsigned long size)
89+ unsigned long start,
90+ unsigned long size, int atomic)
91 {
92 struct drm_mm_node *child;
93
94- child = (struct drm_mm_node *)
95- drm_alloc(sizeof(*child), DRM_MEM_MM);
96- if (!child)
97+ child = drm_mm_kmalloc(mm, atomic);
98+ if (unlikely(child == NULL))
99 return -ENOMEM;
100
101 child->free = 1;
102@@ -97,8 +151,7 @@
103 return 0;
104 }
105
106-
107-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
108+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
109 {
110 struct list_head *tail_node;
111 struct drm_mm_node *entry;
112@@ -106,20 +159,21 @@
113 tail_node = mm->ml_entry.prev;
114 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
115 if (!entry->free) {
116- return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
117+ return drm_mm_create_tail_node(mm, entry->start + entry->size,
118+ size, atomic);
119 }
120 entry->size += size;
121 return 0;
122 }
123
124 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
125- unsigned long size)
126+ unsigned long size,
127+ int atomic)
128 {
129 struct drm_mm_node *child;
130
131- child = (struct drm_mm_node *)
132- drm_alloc(sizeof(*child), DRM_MEM_MM);
133- if (!child)
134+ child = drm_mm_kmalloc(parent->mm, atomic);
135+ if (unlikely(child == NULL))
136 return NULL;
137
138 INIT_LIST_HEAD(&child->fl_entry);
139@@ -151,8 +205,9 @@
140 tmp = parent->start % alignment;
141
142 if (tmp) {
143- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
144- if (!align_splitoff)
145+ align_splitoff =
146+ drm_mm_split_at_start(parent, alignment - tmp, 0);
147+ if (unlikely(align_splitoff == NULL))
148 return NULL;
149 }
150
151@@ -161,7 +216,7 @@
152 parent->free = 0;
153 return parent;
154 } else {
155- child = drm_mm_split_at_start(parent, size);
156+ child = drm_mm_split_at_start(parent, size, 0);
157 }
158
159 if (align_splitoff)
160@@ -169,14 +224,50 @@
161
162 return child;
163 }
164+
165 EXPORT_SYMBOL(drm_mm_get_block);
166
167+struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
168+ unsigned long size,
169+ unsigned alignment)
170+{
171+
172+ struct drm_mm_node *align_splitoff = NULL;
173+ struct drm_mm_node *child;
174+ unsigned tmp = 0;
175+
176+ if (alignment)
177+ tmp = parent->start % alignment;
178+
179+ if (tmp) {
180+ align_splitoff =
181+ drm_mm_split_at_start(parent, alignment - tmp, 1);
182+ if (unlikely(align_splitoff == NULL))
183+ return NULL;
184+ }
185+
186+ if (parent->size == size) {
187+ list_del_init(&parent->fl_entry);
188+ parent->free = 0;
189+ return parent;
190+ } else {
191+ child = drm_mm_split_at_start(parent, size, 1);
192+ }
193+
194+ if (align_splitoff)
195+ drm_mm_put_block(align_splitoff);
196+
197+ return child;
198+}
199+
200+EXPORT_SYMBOL(drm_mm_get_block_atomic);
201+
202 /*
203 * Put a block. Merge with the previous and / or next block if they are free.
204 * Otherwise add to the free stack.
205 */
206
207-void drm_mm_put_block(struct drm_mm_node * cur)
208+void drm_mm_put_block(struct drm_mm_node *cur)
209 {
210
211 struct drm_mm *mm = cur->mm;
212@@ -188,21 +279,27 @@
213 int merged = 0;
214
215 if (cur_head->prev != root_head) {
216- prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
217+ prev_node =
218+ list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
219 if (prev_node->free) {
220 prev_node->size += cur->size;
221 merged = 1;
222 }
223 }
224 if (cur_head->next != root_head) {
225- next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
226+ next_node =
227+ list_entry(cur_head->next, struct drm_mm_node, ml_entry);
228 if (next_node->free) {
229 if (merged) {
230 prev_node->size += next_node->size;
231 list_del(&next_node->ml_entry);
232 list_del(&next_node->fl_entry);
233- drm_free(next_node, sizeof(*next_node),
234- DRM_MEM_MM);
235+ if (mm->num_unused < MM_UNUSED_TARGET) {
236+ list_add(&next_node->fl_entry,
237+ &mm->unused_nodes);
238+ ++mm->num_unused;
239+ } else
240+ kfree(next_node);
241 } else {
242 next_node->size += cur->size;
243 next_node->start = cur->start;
244@@ -215,14 +312,19 @@
245 list_add(&cur->fl_entry, &mm->fl_entry);
246 } else {
247 list_del(&cur->ml_entry);
248- drm_free(cur, sizeof(*cur), DRM_MEM_MM);
249+ if (mm->num_unused < MM_UNUSED_TARGET) {
250+ list_add(&cur->fl_entry, &mm->unused_nodes);
251+ ++mm->num_unused;
252+ } else
253+ kfree(cur);
254 }
255 }
256+
257 EXPORT_SYMBOL(drm_mm_put_block);
258
259-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
260- unsigned long size,
261- unsigned alignment, int best_match)
262+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
263+ unsigned long size,
264+ unsigned alignment, int best_match)
265 {
266 struct list_head *list;
267 const struct list_head *free_stack = &mm->fl_entry;
268@@ -247,7 +349,6 @@
269 wasted += alignment - tmp;
270 }
271
272-
273 if (entry->size >= size + wasted) {
274 if (!best_match)
275 return entry;
276@@ -260,6 +361,7 @@
277
278 return best;
279 }
280+EXPORT_SYMBOL(drm_mm_search_free);
281
282 int drm_mm_clean(struct drm_mm * mm)
283 {
284@@ -267,14 +369,17 @@
285
286 return (head->next->next == head);
287 }
288-EXPORT_SYMBOL(drm_mm_search_free);
289+EXPORT_SYMBOL(drm_mm_clean);
290
291 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
292 {
293 INIT_LIST_HEAD(&mm->ml_entry);
294 INIT_LIST_HEAD(&mm->fl_entry);
295+ INIT_LIST_HEAD(&mm->unused_nodes);
296+ mm->num_unused = 0;
297+ spin_lock_init(&mm->unused_lock);
298
299- return drm_mm_create_tail_node(mm, start, size);
300+ return drm_mm_create_tail_node(mm, start, size, 0);
301 }
302 EXPORT_SYMBOL(drm_mm_init);
303
304@@ -282,6 +387,7 @@
305 {
306 struct list_head *bnode = mm->fl_entry.next;
307 struct drm_mm_node *entry;
308+ struct drm_mm_node *next;
309
310 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
311
312@@ -293,7 +399,16 @@
313
314 list_del(&entry->fl_entry);
315 list_del(&entry->ml_entry);
316+ kfree(entry);
317+
318+ spin_lock(&mm->unused_lock);
319+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
320+ list_del(&entry->fl_entry);
321+ kfree(entry);
322+ --mm->num_unused;
323+ }
324+ spin_unlock(&mm->unused_lock);
325
326- drm_free(entry, sizeof(*entry), DRM_MEM_MM);
327+ BUG_ON(mm->num_unused != 0);
328 }
329 EXPORT_SYMBOL(drm_mm_takedown);
330Index: linux-2.6.28/include/drm/drmP.h
331===================================================================
332--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:13:54.000000000 +0000
333+++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:37:59.000000000 +0000
334@@ -86,6 +86,7 @@
335
336 #include "drm_os_linux.h"
337 #include "drm_hashtab.h"
338+#include "drm_mm.h"
339
340 /***********************************************************************/
341 /** \name DRM template customization defaults */
342@@ -502,26 +503,6 @@
343 };
344
345
346-/*
347- * Generic memory manager structs
348- */
349-
350-struct drm_mm_node {
351- struct list_head fl_entry;
352- struct list_head ml_entry;
353- int free;
354- unsigned long start;
355- unsigned long size;
356- struct drm_mm *mm;
357- void *private;
358-};
359-
360-struct drm_mm {
361- struct list_head fl_entry;
362- struct list_head ml_entry;
363-};
364-
365-
366 /**
367 * Mappings list
368 */
369@@ -1307,22 +1288,6 @@
370 extern int drm_sysfs_connector_add(struct drm_connector *connector);
371 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
372
373-/*
374- * Basic memory manager support (drm_mm.c)
375- */
376-extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
377- unsigned long size,
378- unsigned alignment);
379-extern void drm_mm_put_block(struct drm_mm_node * cur);
380-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
381- unsigned alignment, int best_match);
382-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
383-extern void drm_mm_takedown(struct drm_mm *mm);
384-extern int drm_mm_clean(struct drm_mm *mm);
385-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
386-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
387-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
388-
389 /* Graphics Execution Manager library functions (drm_gem.c) */
390 int drm_gem_init(struct drm_device *dev);
391 void drm_gem_destroy(struct drm_device *dev);
392Index: linux-2.6.28/include/drm/drm_mm.h
393===================================================================
394--- /dev/null 1970-01-01 00:00:00.000000000 +0000
395+++ linux-2.6.28/include/drm/drm_mm.h 2009-03-12 13:15:05.000000000 +0000
396@@ -0,0 +1,90 @@
397+/**************************************************************************
398+ *
399+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
400+ * All Rights Reserved.
401+ *
402+ * Permission is hereby granted, free of charge, to any person obtaining a
403+ * copy of this software and associated documentation files (the
404+ * "Software"), to deal in the Software without restriction, including
405+ * without limitation the rights to use, copy, modify, merge, publish,
406+ * distribute, sub license, and/or sell copies of the Software, and to
407+ * permit persons to whom the Software is furnished to do so, subject to
408+ * the following conditions:
409+ *
410+ * The above copyright notice and this permission notice (including the
411+ * next paragraph) shall be included in all copies or substantial portions
412+ * of the Software.
413+ *
414+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
415+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
416+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
417+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
418+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
419+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
420+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
421+ *
422+ *
423+ **************************************************************************/
424+/*
425+ * Authors:
426+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
427+ */
428+
429+#ifndef _DRM_MM_H_
430+#define _DRM_MM_H_
431+
432+/*
433+ * Generic range manager structs
434+ */
435+#include <linux/list.h>
436+
437+struct drm_mm_node {
438+ struct list_head fl_entry;
439+ struct list_head ml_entry;
440+ int free;
441+ unsigned long start;
442+ unsigned long size;
443+ struct drm_mm *mm;
444+ void *private;
445+};
446+
447+struct drm_mm {
448+ struct list_head fl_entry;
449+ struct list_head ml_entry;
450+ struct list_head unused_nodes;
451+ int num_unused;
452+ spinlock_t unused_lock;
453+};
454+
455+/*
456+ * Basic range manager support (drm_mm.c)
457+ */
458+
459+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
460+ unsigned long size,
461+ unsigned alignment);
462+extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
463+ unsigned long size,
464+ unsigned alignment);
465+extern void drm_mm_put_block(struct drm_mm_node *cur);
466+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
467+ unsigned long size,
468+ unsigned alignment,
469+ int best_match);
470+extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
471+ unsigned long size);
472+extern void drm_mm_takedown(struct drm_mm *mm);
473+extern int drm_mm_clean(struct drm_mm *mm);
474+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
475+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
476+ unsigned long size);
477+extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
478+ unsigned long size, int atomic);
479+extern int drm_mm_pre_get(struct drm_mm *mm);
480+
481+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
482+{
483+ return block->mm;
484+}
485+
486+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch
deleted file mode 100644
index 3f07b91f2e..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch
+++ /dev/null
@@ -1,191 +0,0 @@
1From cd04a0500d70ea012089ec38183f20c0c30f8ba5 Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 12:31:58 +0100
4Subject: [PATCH 2/8] drm: Add a tracker for global objects.
5
6Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
7---
8 drivers/gpu/drm/Makefile | 3 +-
9 drivers/gpu/drm/drm_drv.c | 3 +
10 drivers/gpu/drm/drm_global.c | 107 ++++++++++++++++++++++++++++++++++++++++++
11 include/drm/drmP.h | 20 ++++++++
12 4 files changed, 132 insertions(+), 1 deletions(-)
13 create mode 100644 drivers/gpu/drm/drm_global.c
14
15Index: linux-2.6.28/drivers/gpu/drm/Makefile
16===================================================================
17--- linux-2.6.28.orig/drivers/gpu/drm/Makefile 2009-03-12 13:13:54.000000000 +0000
18+++ linux-2.6.28/drivers/gpu/drm/Makefile 2009-03-12 13:15:18.000000000 +0000
19@@ -10,7 +10,8 @@
20 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
21 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
22 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
23- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
24+ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
25+ drm_global.o
26
27 drm-$(CONFIG_COMPAT) += drm_ioc32.o
28
29Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
30===================================================================
31--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-03-12 13:13:54.000000000 +0000
32+++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-03-12 13:37:56.000000000 +0000
33@@ -382,6 +382,8 @@
34
35 DRM_INFO("Initialized %s %d.%d.%d %s\n",
36 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
37+ drm_global_init();
38+
39 return 0;
40 err_p3:
41 drm_sysfs_destroy();
42@@ -395,6 +397,7 @@
43
44 static void __exit drm_core_exit(void)
45 {
46+ drm_global_release();
47 remove_proc_entry("dri", NULL);
48 drm_sysfs_destroy();
49
50Index: linux-2.6.28/drivers/gpu/drm/drm_global.c
51===================================================================
52--- /dev/null 1970-01-01 00:00:00.000000000 +0000
53+++ linux-2.6.28/drivers/gpu/drm/drm_global.c 2009-03-12 13:15:18.000000000 +0000
54@@ -0,0 +1,107 @@
55+/**************************************************************************
56+ *
57+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
58+ * All Rights Reserved.
59+ *
60+ * Permission is hereby granted, free of charge, to any person obtaining a
61+ * copy of this software and associated documentation files (the
62+ * "Software"), to deal in the Software without restriction, including
63+ * without limitation the rights to use, copy, modify, merge, publish,
64+ * distribute, sub license, and/or sell copies of the Software, and to
65+ * permit persons to whom the Software is furnished to do so, subject to
66+ * the following conditions:
67+ *
68+ * The above copyright notice and this permission notice (including the
69+ * next paragraph) shall be included in all copies or substantial portions
70+ * of the Software.
71+ *
72+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
73+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
74+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
75+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
76+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
77+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
78+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
79+ *
80+ **************************************************************************/
81+#include <drmP.h>
82+struct drm_global_item {
83+ struct mutex mutex;
84+ void *object;
85+ int refcount;
86+};
87+
88+static struct drm_global_item glob[DRM_GLOBAL_NUM];
89+
90+void drm_global_init(void)
91+{
92+ int i;
93+
94+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
95+ struct drm_global_item *item = &glob[i];
96+ mutex_init(&item->mutex);
97+ item->object = NULL;
98+ item->refcount = 0;
99+ }
100+}
101+
102+void drm_global_release(void)
103+{
104+ int i;
105+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
106+ struct drm_global_item *item = &glob[i];
107+ BUG_ON(item->object != NULL);
108+ BUG_ON(item->refcount != 0);
109+ }
110+}
111+
112+int drm_global_item_ref(struct drm_global_reference *ref)
113+{
114+ int ret;
115+ struct drm_global_item *item = &glob[ref->global_type];
116+ void *object;
117+
118+ mutex_lock(&item->mutex);
119+ if (item->refcount == 0) {
120+ item->object = kmalloc(ref->size, GFP_KERNEL);
121+ if (unlikely(item->object == NULL)) {
122+ ret = -ENOMEM;
123+ goto out_err;
124+ }
125+
126+ ref->object = item->object;
127+ ret = ref->init(ref);
128+ if (unlikely(ret != 0))
129+ goto out_err;
130+
131+ ++item->refcount;
132+ }
133+ ref->object = item->object;
134+ object = item->object;
135+ mutex_unlock(&item->mutex);
136+ return 0;
137+ out_err:
138+ kfree(item->object);
139+ mutex_unlock(&item->mutex);
140+ item->object = NULL;
141+ return ret;
142+}
143+
144+EXPORT_SYMBOL(drm_global_item_ref);
145+
146+void drm_global_item_unref(struct drm_global_reference *ref)
147+{
148+ struct drm_global_item *item = &glob[ref->global_type];
149+
150+ mutex_lock(&item->mutex);
151+ BUG_ON(item->refcount == 0);
152+ BUG_ON(ref->object != item->object);
153+ if (--item->refcount == 0) {
154+ ref->release(ref);
155+ kfree(item->object);
156+ item->object = NULL;
157+ }
158+ mutex_unlock(&item->mutex);
159+}
160+
161+EXPORT_SYMBOL(drm_global_item_unref);
162Index: linux-2.6.28/include/drm/drmP.h
163===================================================================
164--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:15:05.000000000 +0000
165+++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:37:56.000000000 +0000
166@@ -1412,5 +1412,25 @@
167
168 /*@}*/
169
170+enum drm_global_types {
171+ DRM_GLOBAL_TTM_MEM = 0,
172+ DRM_GLOBAL_TTM_BO,
173+ DRM_GLOBAL_TTM_OBJECT,
174+ DRM_GLOBAL_NUM
175+};
176+
177+struct drm_global_reference {
178+ enum drm_global_types global_type;
179+ size_t size;
180+ void *object;
181+ int (*init) (struct drm_global_reference *);
182+ void (*release) (struct drm_global_reference *);
183+};
184+
185+extern void drm_global_init(void);
186+extern void drm_global_release(void);
187+extern int drm_global_item_ref(struct drm_global_reference *ref);
188+extern void drm_global_item_unref(struct drm_global_reference *ref);
189+
190 #endif /* __KERNEL__ */
191 #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch
deleted file mode 100644
index a54a3cf281..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch
+++ /dev/null
@@ -1,58 +0,0 @@
1From 723cc597790fb648506a44e811415eb88b9dcdfa Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 17:18:37 +0100
4Subject: [PATCH 3/8] drm: Export hash table functionality.
5
6Also fix include file.
7
8Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
9---
10 drivers/gpu/drm/drm_hashtab.c | 4 ++++
11 include/drm/drm_hashtab.h | 1 +
12 2 files changed, 5 insertions(+), 0 deletions(-)
13
14Index: linux-2.6.28/drivers/gpu/drm/drm_hashtab.c
15===================================================================
16--- linux-2.6.28.orig/drivers/gpu/drm/drm_hashtab.c 2009-03-09 19:19:52.000000000 +0000
17+++ linux-2.6.28/drivers/gpu/drm/drm_hashtab.c 2009-03-12 13:15:25.000000000 +0000
18@@ -62,6 +62,7 @@
19 }
20 return 0;
21 }
22+EXPORT_SYMBOL(drm_ht_create);
23
24 void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
25 {
26@@ -156,6 +157,7 @@
27 }
28 return 0;
29 }
30+EXPORT_SYMBOL(drm_ht_just_insert_please);
31
32 int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
33 struct drm_hash_item **item)
34@@ -169,6 +171,7 @@
35 *item = hlist_entry(list, struct drm_hash_item, head);
36 return 0;
37 }
38+EXPORT_SYMBOL(drm_ht_find_item);
39
40 int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
41 {
42@@ -202,3 +205,4 @@
43 ht->table = NULL;
44 }
45 }
46+EXPORT_SYMBOL(drm_ht_remove);
47Index: linux-2.6.28/include/drm/drm_hashtab.h
48===================================================================
49--- linux-2.6.28.orig/include/drm/drm_hashtab.h 2008-12-24 23:26:37.000000000 +0000
50+++ linux-2.6.28/include/drm/drm_hashtab.h 2009-03-12 13:15:25.000000000 +0000
51@@ -34,6 +34,7 @@
52
53 #ifndef DRM_HASHTAB_H
54 #define DRM_HASHTAB_H
55+#include <linux/list.h>
56
57 #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
58
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch
deleted file mode 100644
index a475cc1b7b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch
+++ /dev/null
@@ -1,53 +0,0 @@
1From a5fef5986c407d56f4e4cf618d6099e122a096ef Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 13:04:46 +0100
4Subject: [PATCH 7/8] drm: Add unlocked IOCTL functionality from the drm repo.
5
6---
7 drivers/gpu/drm/drm_drv.c | 11 ++++++++++-
8 include/drm/drmP.h | 2 ++
9 2 files changed, 12 insertions(+), 1 deletions(-)
10
11Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
12===================================================================
13--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-03-12 13:15:18.000000000 +0000
14+++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-03-12 13:15:41.000000000 +0000
15@@ -448,9 +450,16 @@
16 * Looks up the ioctl function in the ::ioctls table, checking for root
17 * previleges if so required, and dispatches to the respective function.
18 */
19+
20 int drm_ioctl(struct inode *inode, struct file *filp,
21 unsigned int cmd, unsigned long arg)
22 {
23+ return drm_unlocked_ioctl(filp, cmd, arg);
24+}
25+EXPORT_SYMBOL(drm_ioctl);
26+
27+long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
28+{
29 struct drm_file *file_priv = filp->private_data;
30 struct drm_device *dev = file_priv->minor->dev;
31 struct drm_ioctl_desc *ioctl;
32@@ -527,7 +536,7 @@
33 return retcode;
34 }
35
36-EXPORT_SYMBOL(drm_ioctl);
37+EXPORT_SYMBOL(drm_unlocked_ioctl);
38
39 drm_local_map_t *drm_getsarea(struct drm_device *dev)
40 {
41Index: linux-2.6.28/include/drm/drmP.h
42===================================================================
43--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:15:18.000000000 +0000
44+++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:15:41.000000000 +0000
45@@ -1025,6 +1025,8 @@
46 extern void drm_exit(struct drm_driver *driver);
47 extern int drm_ioctl(struct inode *inode, struct file *filp,
48 unsigned int cmd, unsigned long arg);
49+extern long drm_unlocked_ioctl(struct file *filp,
50+ unsigned int cmd, unsigned long arg);
51 extern long drm_compat_ioctl(struct file *filp,
52 unsigned int cmd, unsigned long arg);
53 extern int drm_lastclose(struct drm_device *dev);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic
deleted file mode 100644
index edf61c21ad..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic
+++ /dev/null
@@ -1,2844 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.rc7-4.netbook
4# Mon Dec 8 01:05:27 2008
5#
6# CONFIG_64BIT is not set
7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set
9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11CONFIG_GENERIC_TIME=y
12CONFIG_GENERIC_CMOS_UPDATE=y
13CONFIG_CLOCKSOURCE_WATCHDOG=y
14CONFIG_GENERIC_CLOCKEVENTS=y
15CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
16CONFIG_LOCKDEP_SUPPORT=y
17CONFIG_STACKTRACE_SUPPORT=y
18CONFIG_HAVE_LATENCYTOP_SUPPORT=y
19CONFIG_FAST_CMPXCHG_LOCAL=y
20CONFIG_MMU=y
21CONFIG_ZONE_DMA=y
22CONFIG_GENERIC_ISA_DMA=y
23CONFIG_GENERIC_IOMAP=y
24CONFIG_GENERIC_BUG=y
25CONFIG_GENERIC_HWEIGHT=y
26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
30CONFIG_GENERIC_CALIBRATE_DELAY=y
31# CONFIG_GENERIC_TIME_VSYSCALL is not set
32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
37CONFIG_ARCH_HIBERNATION_POSSIBLE=y
38CONFIG_ARCH_SUSPEND_POSSIBLE=y
39# CONFIG_ZONE_DMA32 is not set
40CONFIG_ARCH_POPULATES_NODE_MAP=y
41# CONFIG_AUDIT_ARCH is not set
42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
43CONFIG_GENERIC_HARDIRQS=y
44CONFIG_GENERIC_IRQ_PROBE=y
45CONFIG_GENERIC_PENDING_IRQ=y
46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
48CONFIG_X86_32_SMP=y
49CONFIG_X86_HT=y
50CONFIG_X86_BIOS_REBOOT=y
51CONFIG_X86_TRAMPOLINE=y
52CONFIG_KTIME_SCALAR=y
53CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
54
55#
56# General setup
57#
58CONFIG_EXPERIMENTAL=y
59CONFIG_LOCK_KERNEL=y
60CONFIG_INIT_ENV_ARG_LIMIT=32
61CONFIG_LOCALVERSION=""
62# CONFIG_LOCALVERSION_AUTO is not set
63CONFIG_SWAP=y
64CONFIG_SYSVIPC=y
65CONFIG_SYSVIPC_SYSCTL=y
66CONFIG_POSIX_MQUEUE=y
67CONFIG_BSD_PROCESS_ACCT=y
68CONFIG_BSD_PROCESS_ACCT_V3=y
69# CONFIG_TASKSTATS is not set
70# CONFIG_AUDIT is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=17
73# CONFIG_CGROUPS is not set
74CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
75# CONFIG_GROUP_SCHED is not set
76# CONFIG_SYSFS_DEPRECATED_V2 is not set
77CONFIG_RELAY=y
78CONFIG_NAMESPACES=y
79# CONFIG_UTS_NS is not set
80# CONFIG_IPC_NS is not set
81# CONFIG_USER_NS is not set
82# CONFIG_PID_NS is not set
83CONFIG_BLK_DEV_INITRD=y
84CONFIG_INITRAMFS_SOURCE=""
85CONFIG_CC_OPTIMIZE_FOR_SIZE=y
86CONFIG_FASTBOOT=y
87CONFIG_SYSCTL=y
88# CONFIG_EMBEDDED is not set
89CONFIG_UID16=y
90CONFIG_SYSCTL_SYSCALL=y
91CONFIG_KALLSYMS=y
92CONFIG_KALLSYMS_ALL=y
93CONFIG_KALLSYMS_EXTRA_PASS=y
94CONFIG_KALLSYMS_STRIP_GENERATED=y
95CONFIG_HOTPLUG=y
96CONFIG_PRINTK=y
97CONFIG_BUG=y
98CONFIG_ELF_CORE=y
99CONFIG_PCSPKR_PLATFORM=y
100# CONFIG_COMPAT_BRK is not set
101CONFIG_BASE_FULL=y
102CONFIG_FUTEX=y
103CONFIG_ANON_INODES=y
104CONFIG_EPOLL=y
105CONFIG_SIGNALFD=y
106CONFIG_TIMERFD=y
107CONFIG_EVENTFD=y
108CONFIG_SHMEM=y
109CONFIG_AIO=y
110CONFIG_VM_EVENT_COUNTERS=y
111CONFIG_PCI_QUIRKS=y
112CONFIG_SLAB=y
113# CONFIG_SLUB is not set
114# CONFIG_SLOB is not set
115CONFIG_PROFILING=y
116# CONFIG_MARKERS is not set
117# CONFIG_OPROFILE is not set
118CONFIG_HAVE_OPROFILE=y
119# CONFIG_KPROBES is not set
120CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
121CONFIG_HAVE_IOREMAP_PROT=y
122CONFIG_HAVE_KPROBES=y
123CONFIG_HAVE_KRETPROBES=y
124CONFIG_HAVE_ARCH_TRACEHOOK=y
125CONFIG_HAVE_GENERIC_DMA_COHERENT=y
126CONFIG_SLABINFO=y
127CONFIG_RT_MUTEXES=y
128# CONFIG_TINY_SHMEM is not set
129CONFIG_BASE_SMALL=0
130CONFIG_MODULES=y
131# CONFIG_MODULE_FORCE_LOAD is not set
132CONFIG_MODULE_UNLOAD=y
133# CONFIG_MODULE_FORCE_UNLOAD is not set
134# CONFIG_MODVERSIONS is not set
135# CONFIG_MODULE_SRCVERSION_ALL is not set
136CONFIG_KMOD=y
137CONFIG_STOP_MACHINE=y
138CONFIG_BLOCK=y
139CONFIG_LBD=y
140CONFIG_BLK_DEV_IO_TRACE=y
141# CONFIG_LSF is not set
142CONFIG_BLK_DEV_BSG=y
143# CONFIG_BLK_DEV_INTEGRITY is not set
144
145#
146# IO Schedulers
147#
148CONFIG_IOSCHED_NOOP=y
149# CONFIG_IOSCHED_AS is not set
150# CONFIG_IOSCHED_DEADLINE is not set
151CONFIG_IOSCHED_CFQ=y
152# CONFIG_DEFAULT_AS is not set
153# CONFIG_DEFAULT_DEADLINE is not set
154CONFIG_DEFAULT_CFQ=y
155# CONFIG_DEFAULT_NOOP is not set
156CONFIG_DEFAULT_IOSCHED="cfq"
157CONFIG_CLASSIC_RCU=y
158CONFIG_FREEZER=y
159
160#
161# Processor type and features
162#
163CONFIG_TICK_ONESHOT=y
164CONFIG_NO_HZ=y
165CONFIG_HIGH_RES_TIMERS=y
166CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
167CONFIG_SMP=y
168# CONFIG_SPARSE_IRQ is not set
169CONFIG_X86_FIND_SMP_CONFIG=y
170CONFIG_X86_MPPARSE=y
171# CONFIG_X86_PC is not set
172# CONFIG_X86_ELAN is not set
173# CONFIG_X86_VOYAGER is not set
174CONFIG_X86_GENERICARCH=y
175# CONFIG_X86_NUMAQ is not set
176# CONFIG_X86_SUMMIT is not set
177# CONFIG_X86_ES7000 is not set
178# CONFIG_X86_BIGSMP is not set
179# CONFIG_X86_VSMP is not set
180# CONFIG_X86_RDC321X is not set
181CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
182# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
183# CONFIG_PARAVIRT_GUEST is not set
184# CONFIG_MEMTEST is not set
185CONFIG_X86_CYCLONE_TIMER=y
186# CONFIG_M386 is not set
187# CONFIG_M486 is not set
188# CONFIG_M586 is not set
189# CONFIG_M586TSC is not set
190# CONFIG_M586MMX is not set
191# CONFIG_M686 is not set
192# CONFIG_MPENTIUMII is not set
193# CONFIG_MPENTIUMIII is not set
194CONFIG_MPENTIUMM=y
195# CONFIG_MPENTIUM4 is not set
196# CONFIG_MK6 is not set
197# CONFIG_MK7 is not set
198# CONFIG_MK8 is not set
199# CONFIG_MCRUSOE is not set
200# CONFIG_MEFFICEON is not set
201# CONFIG_MWINCHIPC6 is not set
202# CONFIG_MWINCHIP3D is not set
203# CONFIG_MGEODEGX1 is not set
204# CONFIG_MGEODE_LX is not set
205# CONFIG_MCYRIXIII is not set
206# CONFIG_MVIAC3_2 is not set
207# CONFIG_MVIAC7 is not set
208# CONFIG_MPSC is not set
209# CONFIG_MCORE2 is not set
210# CONFIG_GENERIC_CPU is not set
211CONFIG_X86_GENERIC=y
212CONFIG_X86_CPU=y
213CONFIG_X86_CMPXCHG=y
214CONFIG_X86_L1_CACHE_SHIFT=7
215CONFIG_X86_XADD=y
216CONFIG_X86_WP_WORKS_OK=y
217CONFIG_X86_INVLPG=y
218CONFIG_X86_BSWAP=y
219CONFIG_X86_POPAD_OK=y
220CONFIG_X86_INTEL_USERCOPY=y
221CONFIG_X86_USE_PPRO_CHECKSUM=y
222CONFIG_X86_TSC=y
223CONFIG_X86_CMPXCHG64=y
224CONFIG_X86_CMOV=y
225CONFIG_X86_MINIMUM_CPU_FAMILY=4
226CONFIG_X86_DEBUGCTLMSR=y
227CONFIG_CPU_SUP_INTEL=y
228CONFIG_CPU_SUP_CYRIX_32=y
229CONFIG_CPU_SUP_AMD=y
230CONFIG_CPU_SUP_CENTAUR_32=y
231CONFIG_CPU_SUP_TRANSMETA_32=y
232CONFIG_CPU_SUP_UMC_32=y
233# CONFIG_X86_DS is not set
234# CONFIG_X86_PTRACE_BTS is not set
235CONFIG_HPET_TIMER=y
236CONFIG_HPET_EMULATE_RTC=y
237CONFIG_DMI=y
238# CONFIG_IOMMU_HELPER is not set
239CONFIG_NR_CPUS=8
240CONFIG_SCHED_SMT=y
241CONFIG_SCHED_MC=y
242# CONFIG_PREEMPT_NONE is not set
243# CONFIG_PREEMPT_VOLUNTARY is not set
244CONFIG_PREEMPT=y
245# CONFIG_DEBUG_PREEMPT is not set
246# CONFIG_PREEMPT_TRACER is not set
247CONFIG_X86_LOCAL_APIC=y
248CONFIG_X86_IO_APIC=y
249# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
250CONFIG_X86_MCE=y
251# CONFIG_X86_MCE_NONFATAL is not set
252CONFIG_X86_MCE_P4THERMAL=y
253CONFIG_VM86=y
254CONFIG_TOSHIBA=m
255CONFIG_I8K=m
256CONFIG_X86_REBOOTFIXUPS=y
257CONFIG_MICROCODE=y
258CONFIG_MICROCODE_INTEL=y
259# CONFIG_MICROCODE_AMD is not set
260CONFIG_MICROCODE_OLD_INTERFACE=y
261CONFIG_X86_MSR=y
262CONFIG_X86_CPUID=y
263# CONFIG_NOHIGHMEM is not set
264# CONFIG_HIGHMEM4G is not set
265CONFIG_HIGHMEM64G=y
266CONFIG_PAGE_OFFSET=0xC0000000
267CONFIG_HIGHMEM=y
268CONFIG_X86_PAE=y
269CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
270CONFIG_ARCH_FLATMEM_ENABLE=y
271CONFIG_ARCH_SPARSEMEM_ENABLE=y
272CONFIG_ARCH_SELECT_MEMORY_MODEL=y
273CONFIG_SELECT_MEMORY_MODEL=y
274CONFIG_FLATMEM_MANUAL=y
275# CONFIG_DISCONTIGMEM_MANUAL is not set
276# CONFIG_SPARSEMEM_MANUAL is not set
277CONFIG_FLATMEM=y
278CONFIG_FLAT_NODE_MEM_MAP=y
279CONFIG_SPARSEMEM_STATIC=y
280CONFIG_PAGEFLAGS_EXTENDED=y
281CONFIG_SPLIT_PTLOCK_CPUS=4
282CONFIG_RESOURCES_64BIT=y
283CONFIG_PHYS_ADDR_T_64BIT=y
284CONFIG_ZONE_DMA_FLAG=1
285CONFIG_BOUNCE=y
286CONFIG_VIRT_TO_BUS=y
287CONFIG_UNEVICTABLE_LRU=y
288CONFIG_HIGHPTE=y
289# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
290CONFIG_X86_RESERVE_LOW_64K=y
291# CONFIG_MATH_EMULATION is not set
292CONFIG_MTRR=y
293CONFIG_MTRR_SANITIZER=y
294CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
295CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
296CONFIG_X86_PAT=y
297CONFIG_EFI=y
298# CONFIG_SECCOMP is not set
299# CONFIG_HZ_100 is not set
300# CONFIG_HZ_250 is not set
301# CONFIG_HZ_300 is not set
302CONFIG_HZ_1000=y
303CONFIG_HZ=1000
304CONFIG_SCHED_HRTICK=y
305# CONFIG_KEXEC is not set
306# CONFIG_CRASH_DUMP is not set
307CONFIG_PHYSICAL_START=0x100000
308# CONFIG_RELOCATABLE is not set
309CONFIG_PHYSICAL_ALIGN=0x400000
310CONFIG_HOTPLUG_CPU=y
311# CONFIG_COMPAT_VDSO is not set
312# CONFIG_CMDLINE_BOOL is not set
313# CONFIG_CMDLINE is not set
314# CONFIG_CMDLINE_OVERRIDE is not set
315CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
316
317#
318# Power management and ACPI options
319#
320CONFIG_PM=y
321CONFIG_PM_DEBUG=y
322# CONFIG_PM_VERBOSE is not set
323CONFIG_CAN_PM_TRACE=y
324CONFIG_PM_TRACE=y
325CONFIG_PM_TRACE_RTC=y
326CONFIG_PM_SLEEP_SMP=y
327CONFIG_PM_SLEEP=y
328CONFIG_SUSPEND=y
329# CONFIG_PM_TEST_SUSPEND is not set
330CONFIG_SUSPEND_FREEZER=y
331CONFIG_HIBERNATION=y
332CONFIG_PM_STD_PARTITION=""
333CONFIG_ACPI=y
334CONFIG_ACPI_SLEEP=y
335CONFIG_ACPI_PROCFS=y
336CONFIG_ACPI_PROCFS_POWER=y
337CONFIG_ACPI_SYSFS_POWER=y
338CONFIG_ACPI_PROC_EVENT=y
339CONFIG_ACPI_AC=y
340CONFIG_ACPI_BATTERY=m
341CONFIG_ACPI_BUTTON=y
342CONFIG_ACPI_VIDEO=y
343CONFIG_ACPI_FAN=y
344CONFIG_ACPI_DOCK=y
345CONFIG_ACPI_PROCESSOR=y
346CONFIG_ACPI_HOTPLUG_CPU=y
347CONFIG_ACPI_THERMAL=y
348CONFIG_ACPI_WMI=y
349CONFIG_ACPI_ASUS=m
350CONFIG_ACPI_TOSHIBA=m
351# CONFIG_ACPI_CUSTOM_DSDT is not set
352CONFIG_ACPI_BLACKLIST_YEAR=1999
353# CONFIG_ACPI_DEBUG is not set
354# CONFIG_ACPI_PCI_SLOT is not set
355CONFIG_ACPI_SYSTEM=y
356CONFIG_X86_PM_TIMER=y
357CONFIG_ACPI_CONTAINER=y
358CONFIG_ACPI_SBS=m
359# CONFIG_APM is not set
360
361#
362# CPU Frequency scaling
363#
364CONFIG_CPU_FREQ=y
365CONFIG_CPU_FREQ_TABLE=y
366CONFIG_CPU_FREQ_DEBUG=y
367CONFIG_CPU_FREQ_STAT=y
368CONFIG_CPU_FREQ_STAT_DETAILS=y
369# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
370# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
371# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
372CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
373# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
374CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
375# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
376CONFIG_CPU_FREQ_GOV_USERSPACE=y
377CONFIG_CPU_FREQ_GOV_ONDEMAND=y
378# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
379
380#
381# CPUFreq processor drivers
382#
383CONFIG_X86_ACPI_CPUFREQ=y
384# CONFIG_X86_POWERNOW_K6 is not set
385# CONFIG_X86_POWERNOW_K7 is not set
386# CONFIG_X86_POWERNOW_K8 is not set
387# CONFIG_X86_GX_SUSPMOD is not set
388# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
389# CONFIG_X86_SPEEDSTEP_ICH is not set
390# CONFIG_X86_SPEEDSTEP_SMI is not set
391# CONFIG_X86_P4_CLOCKMOD is not set
392# CONFIG_X86_CPUFREQ_NFORCE2 is not set
393# CONFIG_X86_LONGRUN is not set
394# CONFIG_X86_LONGHAUL is not set
395# CONFIG_X86_E_POWERSAVER is not set
396
397#
398# shared options
399#
400# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
401# CONFIG_X86_SPEEDSTEP_LIB is not set
402CONFIG_CPU_IDLE=y
403CONFIG_CPU_IDLE_GOV_LADDER=y
404CONFIG_CPU_IDLE_GOV_MENU=y
405
406#
407# Bus options (PCI etc.)
408#
409CONFIG_PCI=y
410# CONFIG_PCI_GOBIOS is not set
411# CONFIG_PCI_GOMMCONFIG is not set
412# CONFIG_PCI_GODIRECT is not set
413# CONFIG_PCI_GOOLPC is not set
414CONFIG_PCI_GOANY=y
415CONFIG_PCI_BIOS=y
416CONFIG_PCI_DIRECT=y
417CONFIG_PCI_MMCONFIG=y
418CONFIG_PCI_DOMAINS=y
419CONFIG_PCIEPORTBUS=y
420# CONFIG_PCIEAER is not set
421# CONFIG_PCIEASPM is not set
422# CONFIG_PCIEASPM_DEBUG is not set
423CONFIG_ARCH_SUPPORTS_MSI=y
424CONFIG_PCI_MSI=y
425# CONFIG_PCI_LEGACY is not set
426# CONFIG_PCI_DEBUG is not set
427# CONFIG_PCI_STUB is not set
428# CONFIG_HT_IRQ is not set
429CONFIG_ISA_DMA_API=y
430CONFIG_ISA=y
431# CONFIG_EISA is not set
432# CONFIG_MCA is not set
433# CONFIG_SCx200 is not set
434# CONFIG_OLPC is not set
435CONFIG_PCCARD=y
436# CONFIG_PCMCIA_DEBUG is not set
437# CONFIG_PCMCIA is not set
438CONFIG_CARDBUS=y
439
440#
441# PC-card bridges
442#
443CONFIG_YENTA=y
444CONFIG_YENTA_O2=y
445CONFIG_YENTA_RICOH=y
446CONFIG_YENTA_TI=y
447CONFIG_YENTA_ENE_TUNE=y
448CONFIG_YENTA_TOSHIBA=y
449CONFIG_PCMCIA_PROBE=y
450CONFIG_PCCARD_NONSTATIC=y
451# CONFIG_HOTPLUG_PCI is not set
452
453#
454# Executable file formats / Emulations
455#
456CONFIG_BINFMT_ELF=y
457# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
458CONFIG_HAVE_AOUT=y
459# CONFIG_BINFMT_AOUT is not set
460CONFIG_BINFMT_MISC=y
461CONFIG_HAVE_ATOMIC_IOMAP=y
462CONFIG_NET=y
463
464#
465# Networking options
466#
467# CONFIG_NET_NS is not set
468CONFIG_PACKET=y
469CONFIG_PACKET_MMAP=y
470CONFIG_UNIX=y
471CONFIG_XFRM=y
472CONFIG_XFRM_USER=y
473CONFIG_XFRM_SUB_POLICY=y
474CONFIG_XFRM_MIGRATE=y
475CONFIG_XFRM_STATISTICS=y
476CONFIG_XFRM_IPCOMP=m
477CONFIG_NET_KEY=m
478CONFIG_NET_KEY_MIGRATE=y
479CONFIG_INET=y
480CONFIG_IP_MULTICAST=y
481# CONFIG_IP_ADVANCED_ROUTER is not set
482CONFIG_IP_FIB_HASH=y
483# CONFIG_IP_PNP is not set
484# CONFIG_NET_IPIP is not set
485# CONFIG_NET_IPGRE is not set
486CONFIG_IP_MROUTE=y
487CONFIG_IP_PIMSM_V1=y
488CONFIG_IP_PIMSM_V2=y
489# CONFIG_ARPD is not set
490CONFIG_SYN_COOKIES=y
491CONFIG_INET_AH=m
492CONFIG_INET_ESP=m
493CONFIG_INET_IPCOMP=m
494CONFIG_INET_XFRM_TUNNEL=m
495CONFIG_INET_TUNNEL=m
496CONFIG_INET_XFRM_MODE_TRANSPORT=m
497CONFIG_INET_XFRM_MODE_TUNNEL=m
498CONFIG_INET_XFRM_MODE_BEET=m
499CONFIG_INET_LRO=y
500CONFIG_INET_DIAG=m
501CONFIG_INET_TCP_DIAG=m
502CONFIG_TCP_CONG_ADVANCED=y
503CONFIG_TCP_CONG_BIC=m
504CONFIG_TCP_CONG_CUBIC=y
505# CONFIG_TCP_CONG_WESTWOOD is not set
506# CONFIG_TCP_CONG_HTCP is not set
507# CONFIG_TCP_CONG_HSTCP is not set
508# CONFIG_TCP_CONG_HYBLA is not set
509# CONFIG_TCP_CONG_VEGAS is not set
510# CONFIG_TCP_CONG_SCALABLE is not set
511# CONFIG_TCP_CONG_LP is not set
512# CONFIG_TCP_CONG_VENO is not set
513# CONFIG_TCP_CONG_YEAH is not set
514# CONFIG_TCP_CONG_ILLINOIS is not set
515# CONFIG_DEFAULT_BIC is not set
516CONFIG_DEFAULT_CUBIC=y
517# CONFIG_DEFAULT_HTCP is not set
518# CONFIG_DEFAULT_VEGAS is not set
519# CONFIG_DEFAULT_WESTWOOD is not set
520# CONFIG_DEFAULT_RENO is not set
521CONFIG_DEFAULT_TCP_CONG="cubic"
522CONFIG_TCP_MD5SIG=y
523CONFIG_IPV6=y
524CONFIG_IPV6_PRIVACY=y
525CONFIG_IPV6_ROUTER_PREF=y
526CONFIG_IPV6_ROUTE_INFO=y
527CONFIG_IPV6_OPTIMISTIC_DAD=y
528CONFIG_INET6_AH=m
529CONFIG_INET6_ESP=m
530CONFIG_INET6_IPCOMP=m
531CONFIG_IPV6_MIP6=m
532CONFIG_INET6_XFRM_TUNNEL=m
533CONFIG_INET6_TUNNEL=m
534CONFIG_INET6_XFRM_MODE_TRANSPORT=m
535CONFIG_INET6_XFRM_MODE_TUNNEL=m
536CONFIG_INET6_XFRM_MODE_BEET=m
537CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
538CONFIG_IPV6_SIT=m
539CONFIG_IPV6_NDISC_NODETYPE=y
540CONFIG_IPV6_TUNNEL=m
541CONFIG_IPV6_MULTIPLE_TABLES=y
542CONFIG_IPV6_SUBTREES=y
543# CONFIG_IPV6_MROUTE is not set
544CONFIG_NETWORK_SECMARK=y
545CONFIG_NETFILTER=y
546# CONFIG_NETFILTER_DEBUG is not set
547CONFIG_NETFILTER_ADVANCED=y
548
549#
550# Core Netfilter Configuration
551#
552CONFIG_NETFILTER_NETLINK=m
553CONFIG_NETFILTER_NETLINK_QUEUE=m
554CONFIG_NETFILTER_NETLINK_LOG=m
555CONFIG_NF_CONNTRACK=y
556CONFIG_NF_CT_ACCT=y
557CONFIG_NF_CONNTRACK_MARK=y
558CONFIG_NF_CONNTRACK_SECMARK=y
559CONFIG_NF_CONNTRACK_EVENTS=y
560# CONFIG_NF_CT_PROTO_DCCP is not set
561CONFIG_NF_CT_PROTO_GRE=m
562CONFIG_NF_CT_PROTO_SCTP=m
563CONFIG_NF_CT_PROTO_UDPLITE=m
564CONFIG_NF_CONNTRACK_AMANDA=m
565CONFIG_NF_CONNTRACK_FTP=m
566CONFIG_NF_CONNTRACK_H323=m
567CONFIG_NF_CONNTRACK_IRC=m
568CONFIG_NF_CONNTRACK_NETBIOS_NS=m
569CONFIG_NF_CONNTRACK_PPTP=m
570CONFIG_NF_CONNTRACK_SANE=m
571CONFIG_NF_CONNTRACK_SIP=m
572CONFIG_NF_CONNTRACK_TFTP=m
573CONFIG_NF_CT_NETLINK=m
574# CONFIG_NETFILTER_TPROXY is not set
575CONFIG_NETFILTER_XTABLES=y
576CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
577CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
578CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
579CONFIG_NETFILTER_XT_TARGET_DSCP=m
580CONFIG_NETFILTER_XT_TARGET_MARK=m
581CONFIG_NETFILTER_XT_TARGET_NFLOG=m
582CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
583CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
584CONFIG_NETFILTER_XT_TARGET_RATEEST=m
585CONFIG_NETFILTER_XT_TARGET_TRACE=m
586CONFIG_NETFILTER_XT_TARGET_SECMARK=m
587CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
588CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
589CONFIG_NETFILTER_XT_MATCH_COMMENT=m
590CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
591CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
592CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
593CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
594# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
595CONFIG_NETFILTER_XT_MATCH_DSCP=m
596CONFIG_NETFILTER_XT_MATCH_ESP=m
597CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
598CONFIG_NETFILTER_XT_MATCH_HELPER=m
599CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
600CONFIG_NETFILTER_XT_MATCH_LENGTH=m
601CONFIG_NETFILTER_XT_MATCH_LIMIT=m
602CONFIG_NETFILTER_XT_MATCH_MAC=m
603CONFIG_NETFILTER_XT_MATCH_MARK=m
604CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
605CONFIG_NETFILTER_XT_MATCH_OWNER=m
606CONFIG_NETFILTER_XT_MATCH_POLICY=m
607CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
608CONFIG_NETFILTER_XT_MATCH_QUOTA=m
609CONFIG_NETFILTER_XT_MATCH_RATEEST=m
610CONFIG_NETFILTER_XT_MATCH_REALM=m
611# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
612CONFIG_NETFILTER_XT_MATCH_SCTP=m
613CONFIG_NETFILTER_XT_MATCH_STATE=y
614CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
615CONFIG_NETFILTER_XT_MATCH_STRING=m
616CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
617CONFIG_NETFILTER_XT_MATCH_TIME=m
618CONFIG_NETFILTER_XT_MATCH_U32=m
619# CONFIG_IP_VS is not set
620
621#
622# IP: Netfilter Configuration
623#
624CONFIG_NF_DEFRAG_IPV4=y
625CONFIG_NF_CONNTRACK_IPV4=y
626# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
627CONFIG_IP_NF_QUEUE=m
628CONFIG_IP_NF_IPTABLES=y
629CONFIG_IP_NF_MATCH_ADDRTYPE=m
630CONFIG_IP_NF_MATCH_AH=m
631CONFIG_IP_NF_MATCH_ECN=m
632CONFIG_IP_NF_MATCH_TTL=m
633CONFIG_IP_NF_FILTER=y
634CONFIG_IP_NF_TARGET_REJECT=y
635CONFIG_IP_NF_TARGET_LOG=m
636CONFIG_IP_NF_TARGET_ULOG=m
637CONFIG_NF_NAT=m
638CONFIG_NF_NAT_NEEDED=y
639CONFIG_IP_NF_TARGET_MASQUERADE=m
640CONFIG_IP_NF_TARGET_NETMAP=m
641CONFIG_IP_NF_TARGET_REDIRECT=m
642CONFIG_NF_NAT_SNMP_BASIC=m
643CONFIG_NF_NAT_PROTO_GRE=m
644CONFIG_NF_NAT_PROTO_UDPLITE=m
645CONFIG_NF_NAT_PROTO_SCTP=m
646CONFIG_NF_NAT_FTP=m
647CONFIG_NF_NAT_IRC=m
648CONFIG_NF_NAT_TFTP=m
649CONFIG_NF_NAT_AMANDA=m
650CONFIG_NF_NAT_PPTP=m
651CONFIG_NF_NAT_H323=m
652CONFIG_NF_NAT_SIP=m
653CONFIG_IP_NF_MANGLE=m
654CONFIG_IP_NF_TARGET_CLUSTERIP=m
655CONFIG_IP_NF_TARGET_ECN=m
656CONFIG_IP_NF_TARGET_TTL=m
657CONFIG_IP_NF_RAW=m
658CONFIG_IP_NF_ARPTABLES=m
659CONFIG_IP_NF_ARPFILTER=m
660CONFIG_IP_NF_ARP_MANGLE=m
661
662#
663# IPv6: Netfilter Configuration
664#
665CONFIG_NF_CONNTRACK_IPV6=y
666CONFIG_IP6_NF_QUEUE=m
667CONFIG_IP6_NF_IPTABLES=y
668CONFIG_IP6_NF_MATCH_AH=m
669CONFIG_IP6_NF_MATCH_EUI64=m
670CONFIG_IP6_NF_MATCH_FRAG=m
671CONFIG_IP6_NF_MATCH_OPTS=m
672CONFIG_IP6_NF_MATCH_HL=m
673CONFIG_IP6_NF_MATCH_IPV6HEADER=m
674CONFIG_IP6_NF_MATCH_MH=m
675CONFIG_IP6_NF_MATCH_RT=m
676CONFIG_IP6_NF_TARGET_LOG=m
677CONFIG_IP6_NF_FILTER=y
678CONFIG_IP6_NF_TARGET_REJECT=y
679CONFIG_IP6_NF_MANGLE=m
680CONFIG_IP6_NF_TARGET_HL=m
681CONFIG_IP6_NF_RAW=m
682# CONFIG_IP_DCCP is not set
683# CONFIG_IP_SCTP is not set
684# CONFIG_TIPC is not set
685# CONFIG_ATM is not set
686# CONFIG_BRIDGE is not set
687# CONFIG_NET_DSA is not set
688# CONFIG_VLAN_8021Q is not set
689# CONFIG_DECNET is not set
690# CONFIG_LLC2 is not set
691# CONFIG_IPX is not set
692# CONFIG_ATALK is not set
693# CONFIG_X25 is not set
694# CONFIG_LAPB is not set
695# CONFIG_ECONET is not set
696# CONFIG_WAN_ROUTER is not set
697# CONFIG_NET_SCHED is not set
698CONFIG_NET_CLS_ROUTE=y
699# CONFIG_DCB is not set
700
701#
702# Network testing
703#
704# CONFIG_NET_PKTGEN is not set
705# CONFIG_HAMRADIO is not set
706# CONFIG_CAN is not set
707# CONFIG_IRDA is not set
708CONFIG_BT=y
709CONFIG_BT_L2CAP=y
710CONFIG_BT_SCO=y
711CONFIG_BT_RFCOMM=y
712CONFIG_BT_RFCOMM_TTY=y
713CONFIG_BT_BNEP=y
714CONFIG_BT_BNEP_MC_FILTER=y
715CONFIG_BT_BNEP_PROTO_FILTER=y
716CONFIG_BT_HIDP=y
717
718#
719# Bluetooth device drivers
720#
721CONFIG_BT_HCIBTUSB=y
722CONFIG_BT_HCIBTSDIO=m
723CONFIG_BT_HCIUART=m
724CONFIG_BT_HCIUART_H4=y
725CONFIG_BT_HCIUART_BCSP=y
726CONFIG_BT_HCIUART_LL=y
727CONFIG_BT_HCIBCM203X=m
728CONFIG_BT_HCIBPA10X=m
729CONFIG_BT_HCIBFUSB=m
730CONFIG_BT_HCIVHCI=m
731# CONFIG_AF_RXRPC is not set
732# CONFIG_PHONET is not set
733CONFIG_FIB_RULES=y
734CONFIG_WIRELESS=y
735CONFIG_CFG80211=y
736# CONFIG_CFG80211_REG_DEBUG is not set
737CONFIG_NL80211=y
738CONFIG_WIRELESS_OLD_REGULATORY=y
739CONFIG_WIRELESS_EXT=y
740CONFIG_WIRELESS_EXT_SYSFS=y
741CONFIG_LIB80211=m
742CONFIG_LIB80211_CRYPT_WEP=m
743CONFIG_LIB80211_CRYPT_CCMP=m
744CONFIG_LIB80211_CRYPT_TKIP=m
745CONFIG_MAC80211=y
746
747#
748# Rate control algorithm selection
749#
750CONFIG_MAC80211_RC_PID=y
751# CONFIG_MAC80211_RC_MINSTREL is not set
752CONFIG_MAC80211_RC_DEFAULT_PID=y
753# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
754CONFIG_MAC80211_RC_DEFAULT="pid"
755CONFIG_MAC80211_MESH=y
756CONFIG_MAC80211_LEDS=y
757CONFIG_MAC80211_DEBUGFS=y
758# CONFIG_MAC80211_DEBUG_MENU is not set
759CONFIG_IEEE80211=m
760# CONFIG_IEEE80211_DEBUG is not set
761CONFIG_IEEE80211_CRYPT_WEP=m
762CONFIG_IEEE80211_CRYPT_CCMP=m
763CONFIG_IEEE80211_CRYPT_TKIP=m
764CONFIG_WIMAX=m
765CONFIG_WIMAX_DEBUG_LEVEL=8
766CONFIG_RFKILL=y
767CONFIG_RFKILL_INPUT=y
768CONFIG_RFKILL_LEDS=y
769# CONFIG_NET_9P is not set
770
771#
772# Device Drivers
773#
774
775#
776# Generic Driver Options
777#
778CONFIG_UEVENT_HELPER_PATH=""
779CONFIG_STANDALONE=y
780CONFIG_PREVENT_FIRMWARE_BUILD=y
781CONFIG_FW_LOADER=y
782CONFIG_FIRMWARE_IN_KERNEL=y
783CONFIG_EXTRA_FIRMWARE=""
784# CONFIG_DEBUG_DRIVER is not set
785CONFIG_DEBUG_DEVRES=y
786# CONFIG_SYS_HYPERVISOR is not set
787# CONFIG_CONNECTOR is not set
788# CONFIG_MTD is not set
789# CONFIG_PARPORT is not set
790CONFIG_PNP=y
791# CONFIG_PNP_DEBUG_MESSAGES is not set
792
793#
794# Protocols
795#
796# CONFIG_ISAPNP is not set
797# CONFIG_PNPBIOS is not set
798CONFIG_PNPACPI=y
799CONFIG_BLK_DEV=y
800# CONFIG_BLK_DEV_FD is not set
801# CONFIG_BLK_DEV_XD is not set
802# CONFIG_BLK_CPQ_DA is not set
803# CONFIG_BLK_CPQ_CISS_DA is not set
804# CONFIG_BLK_DEV_DAC960 is not set
805# CONFIG_BLK_DEV_UMEM is not set
806# CONFIG_BLK_DEV_COW_COMMON is not set
807CONFIG_BLK_DEV_LOOP=y
808CONFIG_BLK_DEV_CRYPTOLOOP=m
809# CONFIG_BLK_DEV_NBD is not set
810# CONFIG_BLK_DEV_SX8 is not set
811# CONFIG_BLK_DEV_UB is not set
812CONFIG_BLK_DEV_RAM=m
813CONFIG_BLK_DEV_RAM_COUNT=16
814CONFIG_BLK_DEV_RAM_SIZE=16384
815# CONFIG_BLK_DEV_XIP is not set
816CONFIG_CDROM_PKTCDVD=m
817CONFIG_CDROM_PKTCDVD_BUFFERS=8
818# CONFIG_CDROM_PKTCDVD_WCACHE is not set
819# CONFIG_ATA_OVER_ETH is not set
820# CONFIG_BLK_DEV_HD is not set
821CONFIG_MISC_DEVICES=y
822# CONFIG_IBM_ASM is not set
823# CONFIG_PHANTOM is not set
824CONFIG_EEPROM_93CX6=m
825# CONFIG_SGI_IOC4 is not set
826CONFIG_TIFM_CORE=m
827# CONFIG_TIFM_7XX1 is not set
828# CONFIG_ACER_WMI is not set
829CONFIG_ASUS_LAPTOP=m
830CONFIG_FUJITSU_LAPTOP=m
831# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
832CONFIG_TC1100_WMI=m
833CONFIG_HP_WMI=m
834# CONFIG_ICS932S401 is not set
835CONFIG_MSI_LAPTOP=m
836CONFIG_PANASONIC_LAPTOP=m
837CONFIG_COMPAL_LAPTOP=m
838CONFIG_SONY_LAPTOP=m
839# CONFIG_SONYPI_COMPAT is not set
840CONFIG_THINKPAD_ACPI=m
841# CONFIG_THINKPAD_ACPI_DEBUG is not set
842CONFIG_THINKPAD_ACPI_BAY=y
843CONFIG_THINKPAD_ACPI_VIDEO=y
844CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
845# CONFIG_INTEL_MENLOW is not set
846# CONFIG_EEEPC_LAPTOP is not set
847# CONFIG_ENCLOSURE_SERVICES is not set
848# CONFIG_HP_ILO is not set
849# CONFIG_C2PORT is not set
850CONFIG_HAVE_IDE=y
851# CONFIG_IDE is not set
852
853#
854# SCSI device support
855#
856CONFIG_RAID_ATTRS=m
857CONFIG_SCSI=y
858CONFIG_SCSI_DMA=y
859# CONFIG_SCSI_TGT is not set
860# CONFIG_SCSI_NETLINK is not set
861CONFIG_SCSI_PROC_FS=y
862
863#
864# SCSI support type (disk, tape, CD-ROM)
865#
866CONFIG_BLK_DEV_SD=y
867# CONFIG_CHR_DEV_ST is not set
868# CONFIG_CHR_DEV_OSST is not set
869CONFIG_BLK_DEV_SR=y
870CONFIG_BLK_DEV_SR_VENDOR=y
871CONFIG_CHR_DEV_SG=y
872# CONFIG_CHR_DEV_SCH is not set
873
874#
875# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
876#
877CONFIG_SCSI_MULTI_LUN=y
878CONFIG_SCSI_CONSTANTS=y
879CONFIG_SCSI_LOGGING=y
880CONFIG_SCSI_SCAN_ASYNC=y
881CONFIG_SCSI_WAIT_SCAN=m
882
883#
884# SCSI Transports
885#
886# CONFIG_SCSI_SPI_ATTRS is not set
887# CONFIG_SCSI_FC_ATTRS is not set
888# CONFIG_SCSI_ISCSI_ATTRS is not set
889# CONFIG_SCSI_SAS_ATTRS is not set
890# CONFIG_SCSI_SAS_LIBSAS is not set
891# CONFIG_SCSI_SRP_ATTRS is not set
892CONFIG_SCSI_LOWLEVEL=y
893# CONFIG_ISCSI_TCP is not set
894# CONFIG_SCSI_CXGB3_ISCSI is not set
895# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
896# CONFIG_SCSI_3W_9XXX is not set
897# CONFIG_SCSI_7000FASST is not set
898# CONFIG_SCSI_ACARD is not set
899# CONFIG_SCSI_AHA152X is not set
900# CONFIG_SCSI_AHA1542 is not set
901# CONFIG_SCSI_AACRAID is not set
902# CONFIG_SCSI_AIC7XXX is not set
903# CONFIG_SCSI_AIC7XXX_OLD is not set
904# CONFIG_SCSI_AIC79XX is not set
905# CONFIG_SCSI_AIC94XX is not set
906# CONFIG_SCSI_DPT_I2O is not set
907# CONFIG_SCSI_ADVANSYS is not set
908# CONFIG_SCSI_IN2000 is not set
909# CONFIG_SCSI_ARCMSR is not set
910# CONFIG_MEGARAID_NEWGEN is not set
911# CONFIG_MEGARAID_LEGACY is not set
912# CONFIG_MEGARAID_SAS is not set
913# CONFIG_SCSI_HPTIOP is not set
914# CONFIG_SCSI_BUSLOGIC is not set
915# CONFIG_LIBFC is not set
916# CONFIG_FCOE is not set
917# CONFIG_SCSI_DMX3191D is not set
918# CONFIG_SCSI_DTC3280 is not set
919# CONFIG_SCSI_EATA is not set
920# CONFIG_SCSI_FUTURE_DOMAIN is not set
921# CONFIG_SCSI_GDTH is not set
922# CONFIG_SCSI_GENERIC_NCR5380 is not set
923# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
924# CONFIG_SCSI_IPS is not set
925# CONFIG_SCSI_INITIO is not set
926# CONFIG_SCSI_INIA100 is not set
927# CONFIG_SCSI_MVSAS is not set
928# CONFIG_SCSI_NCR53C406A is not set
929# CONFIG_SCSI_STEX is not set
930# CONFIG_SCSI_SYM53C8XX_2 is not set
931# CONFIG_SCSI_IPR is not set
932# CONFIG_SCSI_PAS16 is not set
933# CONFIG_SCSI_QLOGIC_FAS is not set
934# CONFIG_SCSI_QLOGIC_1280 is not set
935# CONFIG_SCSI_QLA_FC is not set
936# CONFIG_SCSI_QLA_ISCSI is not set
937# CONFIG_SCSI_LPFC is not set
938# CONFIG_SCSI_SYM53C416 is not set
939# CONFIG_SCSI_DC395x is not set
940# CONFIG_SCSI_DC390T is not set
941# CONFIG_SCSI_T128 is not set
942# CONFIG_SCSI_U14_34F is not set
943# CONFIG_SCSI_ULTRASTOR is not set
944# CONFIG_SCSI_NSP32 is not set
945# CONFIG_SCSI_DEBUG is not set
946# CONFIG_SCSI_SRP is not set
947# CONFIG_SCSI_DH is not set
948CONFIG_ATA=y
949# CONFIG_ATA_NONSTANDARD is not set
950CONFIG_ATA_ACPI=y
951# CONFIG_SATA_PMP is not set
952CONFIG_SATA_AHCI=y
953# CONFIG_SATA_SIL24 is not set
954CONFIG_ATA_SFF=y
955# CONFIG_SATA_SVW is not set
956CONFIG_ATA_PIIX=y
957# CONFIG_SATA_MV is not set
958# CONFIG_SATA_NV is not set
959# CONFIG_PDC_ADMA is not set
960# CONFIG_SATA_QSTOR is not set
961# CONFIG_SATA_PROMISE is not set
962# CONFIG_SATA_SX4 is not set
963# CONFIG_SATA_SIL is not set
964# CONFIG_SATA_SIS is not set
965# CONFIG_SATA_ULI is not set
966# CONFIG_SATA_VIA is not set
967# CONFIG_SATA_VITESSE is not set
968# CONFIG_SATA_INIC162X is not set
969# CONFIG_PATA_ACPI is not set
970# CONFIG_PATA_ALI is not set
971# CONFIG_PATA_AMD is not set
972# CONFIG_PATA_ARTOP is not set
973# CONFIG_PATA_ATIIXP is not set
974# CONFIG_PATA_CMD640_PCI is not set
975# CONFIG_PATA_CMD64X is not set
976# CONFIG_PATA_CS5520 is not set
977# CONFIG_PATA_CS5530 is not set
978# CONFIG_PATA_CS5535 is not set
979# CONFIG_PATA_CS5536 is not set
980# CONFIG_PATA_CYPRESS is not set
981# CONFIG_PATA_EFAR is not set
982CONFIG_ATA_GENERIC=y
983# CONFIG_PATA_HPT366 is not set
984# CONFIG_PATA_HPT37X is not set
985# CONFIG_PATA_HPT3X2N is not set
986# CONFIG_PATA_HPT3X3 is not set
987# CONFIG_PATA_ISAPNP is not set
988# CONFIG_PATA_IT821X is not set
989# CONFIG_PATA_IT8213 is not set
990# CONFIG_PATA_JMICRON is not set
991# CONFIG_PATA_LEGACY is not set
992# CONFIG_PATA_TRIFLEX is not set
993# CONFIG_PATA_MARVELL is not set
994CONFIG_PATA_MPIIX=y
995# CONFIG_PATA_OLDPIIX is not set
996# CONFIG_PATA_NETCELL is not set
997# CONFIG_PATA_NINJA32 is not set
998# CONFIG_PATA_NS87410 is not set
999# CONFIG_PATA_NS87415 is not set
1000# CONFIG_PATA_OPTI is not set
1001# CONFIG_PATA_OPTIDMA is not set
1002# CONFIG_PATA_PDC_OLD is not set
1003# CONFIG_PATA_QDI is not set
1004# CONFIG_PATA_RADISYS is not set
1005# CONFIG_PATA_RZ1000 is not set
1006# CONFIG_PATA_SC1200 is not set
1007# CONFIG_PATA_SERVERWORKS is not set
1008# CONFIG_PATA_PDC2027X is not set
1009# CONFIG_PATA_SIL680 is not set
1010# CONFIG_PATA_SIS is not set
1011# CONFIG_PATA_VIA is not set
1012# CONFIG_PATA_WINBOND is not set
1013# CONFIG_PATA_WINBOND_VLB is not set
1014CONFIG_PATA_SCH=y
1015CONFIG_MD=y
1016# CONFIG_BLK_DEV_MD is not set
1017CONFIG_BLK_DEV_DM=m
1018CONFIG_DM_DEBUG=y
1019# CONFIG_DM_CRYPT is not set
1020CONFIG_DM_SNAPSHOT=m
1021CONFIG_DM_MIRROR=m
1022CONFIG_DM_ZERO=m
1023CONFIG_DM_MULTIPATH=m
1024CONFIG_DM_DELAY=m
1025# CONFIG_DM_UEVENT is not set
1026CONFIG_FUSION=y
1027CONFIG_FUSION_SPI=m
1028CONFIG_FUSION_FC=m
1029CONFIG_FUSION_SAS=m
1030CONFIG_FUSION_MAX_SGE=40
1031CONFIG_FUSION_CTL=m
1032CONFIG_FUSION_LAN=m
1033CONFIG_FUSION_LOGGING=y
1034
1035#
1036# IEEE 1394 (FireWire) support
1037#
1038
1039#
1040# Enable only one of the two stacks, unless you know what you are doing
1041#
1042# CONFIG_FIREWIRE is not set
1043# CONFIG_IEEE1394 is not set
1044# CONFIG_I2O is not set
1045# CONFIG_MACINTOSH_DRIVERS is not set
1046CONFIG_NETDEVICES=y
1047# CONFIG_DUMMY is not set
1048# CONFIG_BONDING is not set
1049CONFIG_MACVLAN=m
1050# CONFIG_EQUALIZER is not set
1051CONFIG_TUN=y
1052# CONFIG_VETH is not set
1053# CONFIG_NET_SB1000 is not set
1054# CONFIG_ARCNET is not set
1055CONFIG_PHYLIB=m
1056
1057#
1058# MII PHY device drivers
1059#
1060CONFIG_MARVELL_PHY=m
1061CONFIG_DAVICOM_PHY=m
1062CONFIG_QSEMI_PHY=m
1063CONFIG_LXT_PHY=m
1064CONFIG_CICADA_PHY=m
1065CONFIG_VITESSE_PHY=m
1066CONFIG_SMSC_PHY=m
1067CONFIG_BROADCOM_PHY=m
1068CONFIG_ICPLUS_PHY=m
1069CONFIG_REALTEK_PHY=m
1070CONFIG_MDIO_BITBANG=m
1071CONFIG_NET_ETHERNET=y
1072CONFIG_MII=y
1073# CONFIG_NATIONAL_PHY is not set
1074# CONFIG_STE10XP is not set
1075# CONFIG_LSI_ET1011C_PHY is not set
1076# CONFIG_HAPPYMEAL is not set
1077# CONFIG_SUNGEM is not set
1078# CONFIG_CASSINI is not set
1079# CONFIG_NET_VENDOR_3COM is not set
1080# CONFIG_LANCE is not set
1081# CONFIG_NET_VENDOR_SMC is not set
1082# CONFIG_NET_VENDOR_RACAL is not set
1083# CONFIG_NET_TULIP is not set
1084# CONFIG_AT1700 is not set
1085# CONFIG_DEPCA is not set
1086# CONFIG_HP100 is not set
1087# CONFIG_NET_ISA is not set
1088# CONFIG_IBM_NEW_EMAC_ZMII is not set
1089# CONFIG_IBM_NEW_EMAC_RGMII is not set
1090# CONFIG_IBM_NEW_EMAC_TAH is not set
1091# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1092# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1093# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1094# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1095CONFIG_NET_PCI=y
1096CONFIG_PCNET32=m
1097# CONFIG_AMD8111_ETH is not set
1098# CONFIG_ADAPTEC_STARFIRE is not set
1099# CONFIG_AC3200 is not set
1100# CONFIG_APRICOT is not set
1101# CONFIG_B44 is not set
1102# CONFIG_FORCEDETH is not set
1103# CONFIG_CS89x0 is not set
1104# CONFIG_EEPRO100 is not set
1105CONFIG_E100=y
1106# CONFIG_FEALNX is not set
1107# CONFIG_NATSEMI is not set
1108# CONFIG_NE2K_PCI is not set
1109CONFIG_8139CP=m
1110CONFIG_8139TOO=m
1111CONFIG_8139TOO_PIO=y
1112# CONFIG_8139TOO_TUNE_TWISTER is not set
1113# CONFIG_8139TOO_8129 is not set
1114# CONFIG_8139_OLD_RX_RESET is not set
1115# CONFIG_R6040 is not set
1116CONFIG_SIS900=m
1117# CONFIG_EPIC100 is not set
1118# CONFIG_SMSC9420 is not set
1119# CONFIG_SUNDANCE is not set
1120# CONFIG_TLAN is not set
1121# CONFIG_VIA_RHINE is not set
1122# CONFIG_SC92031 is not set
1123CONFIG_ATL2=m
1124CONFIG_NETDEV_1000=y
1125# CONFIG_ACENIC is not set
1126# CONFIG_DL2K is not set
1127CONFIG_E1000=y
1128CONFIG_E1000E=y
1129# CONFIG_IP1000 is not set
1130CONFIG_IGB=y
1131# CONFIG_IGB_LRO is not set
1132# CONFIG_NS83820 is not set
1133# CONFIG_HAMACHI is not set
1134# CONFIG_YELLOWFIN is not set
1135# CONFIG_R8169 is not set
1136CONFIG_SIS190=m
1137# CONFIG_SKGE is not set
1138CONFIG_SKY2=m
1139# CONFIG_SKY2_DEBUG is not set
1140# CONFIG_VIA_VELOCITY is not set
1141CONFIG_TIGON3=m
1142CONFIG_BNX2=m
1143# CONFIG_QLA3XXX is not set
1144CONFIG_ATL1=m
1145# CONFIG_ATL1E is not set
1146# CONFIG_JME is not set
1147CONFIG_NETDEV_10000=y
1148# CONFIG_CHELSIO_T1 is not set
1149# CONFIG_CHELSIO_T3 is not set
1150# CONFIG_ENIC is not set
1151CONFIG_IXGBE=m
1152CONFIG_IXGB=m
1153# CONFIG_S2IO is not set
1154# CONFIG_MYRI10GE is not set
1155# CONFIG_NETXEN_NIC is not set
1156# CONFIG_NIU is not set
1157# CONFIG_MLX4_EN is not set
1158# CONFIG_MLX4_CORE is not set
1159# CONFIG_TEHUTI is not set
1160CONFIG_BNX2X=m
1161# CONFIG_QLGE is not set
1162# CONFIG_SFC is not set
1163# CONFIG_TR is not set
1164
1165#
1166# Wireless LAN
1167#
1168CONFIG_WLAN_PRE80211=y
1169# CONFIG_STRIP is not set
1170# CONFIG_ARLAN is not set
1171# CONFIG_WAVELAN is not set
1172CONFIG_WLAN_80211=y
1173CONFIG_IPW2100=m
1174# CONFIG_IPW2100_MONITOR is not set
1175# CONFIG_IPW2100_DEBUG is not set
1176CONFIG_IPW2200=m
1177# CONFIG_IPW2200_MONITOR is not set
1178CONFIG_IPW2200_QOS=y
1179# CONFIG_IPW2200_DEBUG is not set
1180# CONFIG_LIBIPW_DEBUG is not set
1181# CONFIG_LIBERTAS is not set
1182# CONFIG_LIBERTAS_THINFIRM is not set
1183# CONFIG_LIBERTAS_USB is not set
1184# CONFIG_LIBERTAS_DEBUG is not set
1185# CONFIG_AIRO is not set
1186# CONFIG_HERMES is not set
1187# CONFIG_ATMEL is not set
1188# CONFIG_PRISM54 is not set
1189CONFIG_USB_ZD1201=m
1190CONFIG_USB_NET_RNDIS_WLAN=m
1191CONFIG_RTL8180=m
1192CONFIG_RTL8187=m
1193# CONFIG_ADM8211 is not set
1194# CONFIG_MAC80211_HWSIM is not set
1195CONFIG_P54_COMMON=m
1196CONFIG_P54_USB=m
1197CONFIG_P54_PCI=m
1198# CONFIG_ATH5K is not set
1199CONFIG_ATH9K=m
1200# CONFIG_ATH9K_DEBUG is not set
1201CONFIG_IWLWIFI=m
1202CONFIG_IWLCORE=m
1203# CONFIG_IWLWIFI_LEDS is not set
1204CONFIG_IWLWIFI_RFKILL=y
1205# CONFIG_IWLWIFI_DEBUG is not set
1206CONFIG_IWLAGN=m
1207# CONFIG_IWLAGN_SPECTRUM_MEASUREMENT is not set
1208# CONFIG_IWLAGN_LEDS is not set
1209CONFIG_IWL4965=y
1210CONFIG_IWL5000=y
1211CONFIG_IWL3945=m
1212CONFIG_IWL3945_RFKILL=y
1213# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
1214# CONFIG_IWL3945_LEDS is not set
1215# CONFIG_IWL3945_DEBUG is not set
1216# CONFIG_HOSTAP is not set
1217CONFIG_B43=m
1218CONFIG_B43_PCI_AUTOSELECT=y
1219CONFIG_B43_PCICORE_AUTOSELECT=y
1220CONFIG_B43_LEDS=y
1221CONFIG_B43_RFKILL=y
1222# CONFIG_B43_DEBUG is not set
1223# CONFIG_B43LEGACY is not set
1224# CONFIG_ZD1211RW is not set
1225CONFIG_RT2X00=m
1226CONFIG_RT2400PCI=m
1227CONFIG_RT2500PCI=m
1228CONFIG_RT61PCI=m
1229CONFIG_RT2500USB=m
1230CONFIG_RT73USB=m
1231CONFIG_RT2X00_LIB_PCI=m
1232CONFIG_RT2X00_LIB_USB=m
1233CONFIG_RT2X00_LIB=m
1234CONFIG_RT2X00_LIB_FIRMWARE=y
1235CONFIG_RT2X00_LIB_CRYPTO=y
1236CONFIG_RT2X00_LIB_RFKILL=y
1237CONFIG_RT2X00_LIB_LEDS=y
1238# CONFIG_RT2X00_LIB_DEBUGFS is not set
1239# CONFIG_RT2X00_DEBUG is not set
1240
1241#
1242# WiMAX Wireless Broadband devices
1243#
1244CONFIG_WIMAX_I2400M_USB=m
1245CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
1246# CONFIG_WIMAX_I2400M_SDIO is not set
1247#
1248#
1249
1250#
1251# USB Network Adapters
1252#
1253CONFIG_USB_CATC=m
1254CONFIG_USB_KAWETH=m
1255CONFIG_USB_PEGASUS=m
1256CONFIG_USB_RTL8150=m
1257CONFIG_USB_USBNET=m
1258CONFIG_USB_NET_AX8817X=m
1259CONFIG_USB_NET_CDCETHER=m
1260CONFIG_USB_NET_DM9601=m
1261CONFIG_USB_NET_SMSC95XX=m
1262CONFIG_USB_NET_GL620A=m
1263CONFIG_USB_NET_NET1080=m
1264CONFIG_USB_NET_PLUSB=m
1265CONFIG_USB_NET_MCS7830=m
1266CONFIG_USB_NET_RNDIS_HOST=m
1267CONFIG_USB_NET_CDC_SUBSET=m
1268CONFIG_USB_ALI_M5632=y
1269CONFIG_USB_AN2720=y
1270CONFIG_USB_BELKIN=y
1271CONFIG_USB_ARMLINUX=y
1272CONFIG_USB_EPSON2888=y
1273CONFIG_USB_KC2190=y
1274CONFIG_USB_NET_ZAURUS=m
1275CONFIG_USB_HSO=m
1276# CONFIG_WAN is not set
1277# CONFIG_FDDI is not set
1278# CONFIG_HIPPI is not set
1279CONFIG_PPP=m
1280CONFIG_PPP_MULTILINK=y
1281CONFIG_PPP_FILTER=y
1282CONFIG_PPP_ASYNC=m
1283CONFIG_PPP_SYNC_TTY=m
1284CONFIG_PPP_DEFLATE=m
1285CONFIG_PPP_BSDCOMP=m
1286CONFIG_PPP_MPPE=m
1287CONFIG_PPPOE=m
1288CONFIG_PPPOL2TP=m
1289# CONFIG_SLIP is not set
1290CONFIG_SLHC=m
1291# CONFIG_NET_FC is not set
1292# CONFIG_NETCONSOLE is not set
1293# CONFIG_NETPOLL is not set
1294# CONFIG_NET_POLL_CONTROLLER is not set
1295# CONFIG_ISDN is not set
1296# CONFIG_PHONE is not set
1297
1298#
1299# Input device support
1300#
1301CONFIG_INPUT=y
1302CONFIG_INPUT_FF_MEMLESS=y
1303CONFIG_INPUT_POLLDEV=m
1304
1305#
1306# Userland interfaces
1307#
1308CONFIG_INPUT_MOUSEDEV=y
1309# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
1310CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1311CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1312CONFIG_INPUT_JOYDEV=m
1313CONFIG_INPUT_EVDEV=y
1314# CONFIG_INPUT_EVBUG is not set
1315
1316#
1317# Input Device Drivers
1318#
1319CONFIG_INPUT_KEYBOARD=y
1320CONFIG_KEYBOARD_ATKBD=y
1321# CONFIG_KEYBOARD_SUNKBD is not set
1322# CONFIG_KEYBOARD_LKKBD is not set
1323# CONFIG_KEYBOARD_XTKBD is not set
1324# CONFIG_KEYBOARD_NEWTON is not set
1325# CONFIG_KEYBOARD_STOWAWAY is not set
1326CONFIG_INPUT_MOUSE=y
1327CONFIG_MOUSE_PS2=y
1328CONFIG_MOUSE_PS2_ALPS=y
1329CONFIG_MOUSE_PS2_LOGIPS2PP=y
1330CONFIG_MOUSE_PS2_SYNAPTICS=y
1331CONFIG_MOUSE_PS2_LIFEBOOK=y
1332CONFIG_MOUSE_PS2_TRACKPOINT=y
1333# CONFIG_MOUSE_PS2_ELANTECH is not set
1334CONFIG_MOUSE_PS2_TOUCHKIT=y
1335CONFIG_MOUSE_SERIAL=m
1336# CONFIG_MOUSE_APPLETOUCH is not set
1337# CONFIG_MOUSE_BCM5974 is not set
1338# CONFIG_MOUSE_INPORT is not set
1339# CONFIG_MOUSE_LOGIBM is not set
1340# CONFIG_MOUSE_PC110PAD is not set
1341CONFIG_MOUSE_VSXXXAA=m
1342CONFIG_INPUT_JOYSTICK=y
1343# CONFIG_JOYSTICK_ANALOG is not set
1344# CONFIG_JOYSTICK_A3D is not set
1345# CONFIG_JOYSTICK_ADI is not set
1346# CONFIG_JOYSTICK_COBRA is not set
1347# CONFIG_JOYSTICK_GF2K is not set
1348# CONFIG_JOYSTICK_GRIP is not set
1349# CONFIG_JOYSTICK_GRIP_MP is not set
1350# CONFIG_JOYSTICK_GUILLEMOT is not set
1351# CONFIG_JOYSTICK_INTERACT is not set
1352# CONFIG_JOYSTICK_SIDEWINDER is not set
1353# CONFIG_JOYSTICK_TMDC is not set
1354# CONFIG_JOYSTICK_IFORCE is not set
1355# CONFIG_JOYSTICK_WARRIOR is not set
1356# CONFIG_JOYSTICK_MAGELLAN is not set
1357# CONFIG_JOYSTICK_SPACEORB is not set
1358# CONFIG_JOYSTICK_SPACEBALL is not set
1359# CONFIG_JOYSTICK_STINGER is not set
1360# CONFIG_JOYSTICK_TWIDJOY is not set
1361# CONFIG_JOYSTICK_ZHENHUA is not set
1362# CONFIG_JOYSTICK_JOYDUMP is not set
1363# CONFIG_JOYSTICK_XPAD is not set
1364# CONFIG_INPUT_TABLET is not set
1365CONFIG_INPUT_TOUCHSCREEN=y
1366CONFIG_TOUCHSCREEN_FUJITSU=m
1367CONFIG_TOUCHSCREEN_GUNZE=m
1368CONFIG_TOUCHSCREEN_ELO=m
1369# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1370CONFIG_TOUCHSCREEN_MTOUCH=m
1371CONFIG_TOUCHSCREEN_INEXIO=m
1372CONFIG_TOUCHSCREEN_MK712=m
1373CONFIG_TOUCHSCREEN_HTCPEN=m
1374CONFIG_TOUCHSCREEN_PENMOUNT=m
1375CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
1376CONFIG_TOUCHSCREEN_TOUCHWIN=m
1377CONFIG_TOUCHSCREEN_WM97XX=m
1378CONFIG_TOUCHSCREEN_WM9705=y
1379CONFIG_TOUCHSCREEN_WM9712=y
1380CONFIG_TOUCHSCREEN_WM9713=y
1381CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
1382CONFIG_TOUCHSCREEN_USB_EGALAX=y
1383CONFIG_TOUCHSCREEN_USB_PANJIT=y
1384CONFIG_TOUCHSCREEN_USB_3M=y
1385CONFIG_TOUCHSCREEN_USB_ITM=y
1386CONFIG_TOUCHSCREEN_USB_ETURBO=y
1387CONFIG_TOUCHSCREEN_USB_GUNZE=y
1388CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
1389CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
1390CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
1391CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
1392CONFIG_TOUCHSCREEN_USB_GOTOP=y
1393CONFIG_TOUCHSCREEN_TOUCHIT213=m
1394# CONFIG_TOUCHSCREEN_TSC2007 is not set
1395CONFIG_INPUT_MISC=y
1396# CONFIG_INPUT_PCSPKR is not set
1397# CONFIG_INPUT_APANEL is not set
1398CONFIG_INPUT_WISTRON_BTNS=m
1399# CONFIG_INPUT_ATLAS_BTNS is not set
1400# CONFIG_INPUT_ATI_REMOTE is not set
1401# CONFIG_INPUT_ATI_REMOTE2 is not set
1402CONFIG_INPUT_KEYSPAN_REMOTE=m
1403CONFIG_INPUT_POWERMATE=m
1404CONFIG_INPUT_YEALINK=m
1405# CONFIG_INPUT_CM109 is not set
1406# CONFIG_INPUT_UINPUT is not set
1407
1408#
1409# Hardware I/O ports
1410#
1411CONFIG_SERIO=y
1412CONFIG_SERIO_I8042=y
1413CONFIG_SERIO_SERPORT=y
1414# CONFIG_SERIO_CT82C710 is not set
1415# CONFIG_SERIO_PCIPS2 is not set
1416CONFIG_SERIO_LIBPS2=y
1417CONFIG_SERIO_RAW=m
1418# CONFIG_GAMEPORT is not set
1419
1420#
1421# Character devices
1422#
1423CONFIG_VT=y
1424CONFIG_CONSOLE_TRANSLATIONS=y
1425CONFIG_VT_CONSOLE=y
1426CONFIG_HW_CONSOLE=y
1427CONFIG_VT_HW_CONSOLE_BINDING=y
1428# CONFIG_DEVKMEM is not set
1429# CONFIG_SERIAL_NONSTANDARD is not set
1430# CONFIG_NOZOMI is not set
1431
1432#
1433# Serial drivers
1434#
1435CONFIG_SERIAL_8250=y
1436# CONFIG_SERIAL_8250_CONSOLE is not set
1437CONFIG_FIX_EARLYCON_MEM=y
1438CONFIG_SERIAL_8250_PCI=y
1439CONFIG_SERIAL_8250_PNP=y
1440CONFIG_SERIAL_8250_NR_UARTS=4
1441CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1442# CONFIG_SERIAL_8250_EXTENDED is not set
1443
1444#
1445# Non-8250 serial port support
1446#
1447CONFIG_SERIAL_CORE=y
1448# CONFIG_SERIAL_JSM is not set
1449CONFIG_UNIX98_PTYS=y
1450# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1451# CONFIG_LEGACY_PTYS is not set
1452# CONFIG_IPMI_HANDLER is not set
1453CONFIG_HW_RANDOM=m
1454# CONFIG_HW_RANDOM_INTEL is not set
1455# CONFIG_HW_RANDOM_AMD is not set
1456# CONFIG_HW_RANDOM_GEODE is not set
1457# CONFIG_HW_RANDOM_VIA is not set
1458CONFIG_NVRAM=m
1459# CONFIG_DTLK is not set
1460# CONFIG_R3964 is not set
1461# CONFIG_APPLICOM is not set
1462# CONFIG_SONYPI is not set
1463# CONFIG_MWAVE is not set
1464# CONFIG_PC8736x_GPIO is not set
1465# CONFIG_NSC_GPIO is not set
1466# CONFIG_CS5535_GPIO is not set
1467# CONFIG_RAW_DRIVER is not set
1468CONFIG_HPET=y
1469# CONFIG_HPET_MMAP is not set
1470# CONFIG_HANGCHECK_TIMER is not set
1471# CONFIG_TCG_TPM is not set
1472# CONFIG_TELCLOCK is not set
1473CONFIG_DEVPORT=y
1474CONFIG_I2C=y
1475CONFIG_I2C_BOARDINFO=y
1476# CONFIG_I2C_CHARDEV is not set
1477# CONFIG_I2C_HELPER_AUTO is not set
1478
1479#
1480# I2C Algorithms
1481#
1482# CONFIG_I2C_ALGOBIT is not set
1483# CONFIG_I2C_ALGOPCF is not set
1484# CONFIG_I2C_ALGOPCA is not set
1485
1486#
1487# I2C Hardware Bus support
1488#
1489
1490#
1491# PC SMBus host controller drivers
1492#
1493# CONFIG_I2C_ALI1535 is not set
1494# CONFIG_I2C_ALI1563 is not set
1495# CONFIG_I2C_ALI15X3 is not set
1496# CONFIG_I2C_AMD756 is not set
1497# CONFIG_I2C_AMD8111 is not set
1498# CONFIG_I2C_I801 is not set
1499# CONFIG_I2C_ISCH is not set
1500# CONFIG_I2C_PIIX4 is not set
1501# CONFIG_I2C_NFORCE2 is not set
1502# CONFIG_I2C_SIS5595 is not set
1503# CONFIG_I2C_SIS630 is not set
1504# CONFIG_I2C_SIS96X is not set
1505# CONFIG_I2C_VIA is not set
1506# CONFIG_I2C_VIAPRO is not set
1507
1508#
1509# I2C system bus drivers (mostly embedded / system-on-chip)
1510#
1511# CONFIG_I2C_OCORES is not set
1512# CONFIG_I2C_SIMTEC is not set
1513
1514#
1515# External I2C/SMBus adapter drivers
1516#
1517# CONFIG_I2C_PARPORT_LIGHT is not set
1518# CONFIG_I2C_TAOS_EVM is not set
1519# CONFIG_I2C_TINY_USB is not set
1520
1521#
1522# Graphics adapter I2C/DDC channel drivers
1523#
1524# CONFIG_I2C_VOODOO3 is not set
1525
1526#
1527# Other I2C/SMBus bus drivers
1528#
1529# CONFIG_I2C_PCA_ISA is not set
1530# CONFIG_I2C_PCA_PLATFORM is not set
1531# CONFIG_I2C_STUB is not set
1532# CONFIG_SCx200_ACB is not set
1533
1534#
1535# Miscellaneous I2C Chip support
1536#
1537# CONFIG_DS1682 is not set
1538# CONFIG_AT24 is not set
1539# CONFIG_SENSORS_EEPROM is not set
1540# CONFIG_EEPROM_AT24 is not set
1541# CONFIG_EEPROM_LEGACY is not set
1542# CONFIG_SENSORS_PCF8574 is not set
1543# CONFIG_PCF8575 is not set
1544# CONFIG_SENSORS_PCA9539 is not set
1545# CONFIG_SENSORS_PCF8591 is not set
1546# CONFIG_SENSORS_MAX6875 is not set
1547# CONFIG_SENSORS_TSL2550 is not set
1548# CONFIG_I2C_DEBUG_CORE is not set
1549# CONFIG_I2C_DEBUG_ALGO is not set
1550# CONFIG_I2C_DEBUG_BUS is not set
1551# CONFIG_I2C_DEBUG_CHIP is not set
1552# CONFIG_SPI is not set
1553CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1554# CONFIG_GPIOLIB is not set
1555# CONFIG_W1 is not set
1556CONFIG_POWER_SUPPLY=y
1557# CONFIG_POWER_SUPPLY_DEBUG is not set
1558# CONFIG_PDA_POWER is not set
1559# CONFIG_BATTERY_DS2760 is not set
1560# CONFIG_BATTERY_BQ27x00 is not set
1561CONFIG_HWMON=y
1562# CONFIG_HWMON_VID is not set
1563# CONFIG_SENSORS_ABITUGURU is not set
1564# CONFIG_SENSORS_ABITUGURU3 is not set
1565# CONFIG_SENSORS_AD7414 is not set
1566# CONFIG_SENSORS_AD7418 is not set
1567# CONFIG_SENSORS_ADM1021 is not set
1568# CONFIG_SENSORS_ADM1025 is not set
1569# CONFIG_SENSORS_ADM1026 is not set
1570# CONFIG_SENSORS_ADM1029 is not set
1571# CONFIG_SENSORS_ADM1031 is not set
1572# CONFIG_SENSORS_ADM9240 is not set
1573# CONFIG_SENSORS_ADT7462 is not set
1574# CONFIG_SENSORS_ADT7470 is not set
1575# CONFIG_SENSORS_ADT7473 is not set
1576# CONFIG_SENSORS_K8TEMP is not set
1577# CONFIG_SENSORS_ASB100 is not set
1578# CONFIG_SENSORS_ATXP1 is not set
1579# CONFIG_SENSORS_DS1621 is not set
1580# CONFIG_SENSORS_I5K_AMB is not set
1581# CONFIG_SENSORS_F71805F is not set
1582# CONFIG_SENSORS_F71882FG is not set
1583# CONFIG_SENSORS_F75375S is not set
1584# CONFIG_SENSORS_FSCHER is not set
1585# CONFIG_SENSORS_FSCPOS is not set
1586# CONFIG_SENSORS_FSCHMD is not set
1587# CONFIG_SENSORS_GL518SM is not set
1588# CONFIG_SENSORS_GL520SM is not set
1589# CONFIG_SENSORS_CORETEMP is not set
1590# CONFIG_SENSORS_IT87 is not set
1591# CONFIG_SENSORS_LM63 is not set
1592# CONFIG_SENSORS_LM75 is not set
1593# CONFIG_SENSORS_LM77 is not set
1594# CONFIG_SENSORS_LM78 is not set
1595# CONFIG_SENSORS_LM80 is not set
1596# CONFIG_SENSORS_LM83 is not set
1597# CONFIG_SENSORS_LM85 is not set
1598# CONFIG_SENSORS_LM87 is not set
1599# CONFIG_SENSORS_LM90 is not set
1600# CONFIG_SENSORS_LM92 is not set
1601# CONFIG_SENSORS_LM93 is not set
1602# CONFIG_SENSORS_LTC4245 is not set
1603# CONFIG_SENSORS_MAX1619 is not set
1604# CONFIG_SENSORS_MAX6650 is not set
1605# CONFIG_SENSORS_PC87360 is not set
1606# CONFIG_SENSORS_PC87427 is not set
1607# CONFIG_SENSORS_SIS5595 is not set
1608# CONFIG_SENSORS_DME1737 is not set
1609# CONFIG_SENSORS_SMSC47M1 is not set
1610# CONFIG_SENSORS_SMSC47M192 is not set
1611# CONFIG_SENSORS_SMSC47B397 is not set
1612# CONFIG_SENSORS_ADS7828 is not set
1613# CONFIG_SENSORS_THMC50 is not set
1614# CONFIG_SENSORS_VIA686A is not set
1615# CONFIG_SENSORS_VT1211 is not set
1616# CONFIG_SENSORS_VT8231 is not set
1617# CONFIG_SENSORS_W83781D is not set
1618# CONFIG_SENSORS_W83791D is not set
1619# CONFIG_SENSORS_W83792D is not set
1620# CONFIG_SENSORS_W83793 is not set
1621# CONFIG_SENSORS_W83L785TS is not set
1622# CONFIG_SENSORS_W83L786NG is not set
1623# CONFIG_SENSORS_W83627HF is not set
1624# CONFIG_SENSORS_W83627EHF is not set
1625# CONFIG_SENSORS_HDAPS is not set
1626# CONFIG_SENSORS_LIS3LV02D is not set
1627# CONFIG_SENSORS_APPLESMC is not set
1628# CONFIG_HWMON_DEBUG_CHIP is not set
1629CONFIG_THERMAL=y
1630CONFIG_THERMAL_HWMON=y
1631# CONFIG_WATCHDOG is not set
1632CONFIG_SSB_POSSIBLE=y
1633
1634#
1635# Sonics Silicon Backplane
1636#
1637CONFIG_SSB=m
1638CONFIG_SSB_SPROM=y
1639CONFIG_SSB_PCIHOST_POSSIBLE=y
1640CONFIG_SSB_PCIHOST=y
1641CONFIG_SSB_B43_PCI_BRIDGE=y
1642# CONFIG_SSB_DEBUG is not set
1643CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1644CONFIG_SSB_DRIVER_PCICORE=y
1645
1646#
1647# Multifunction device drivers
1648#
1649# CONFIG_MFD_CORE is not set
1650# CONFIG_MFD_SM501 is not set
1651# CONFIG_HTC_PASIC3 is not set
1652# CONFIG_TWL4030_CORE is not set
1653# CONFIG_MFD_TMIO is not set
1654# CONFIG_PMIC_DA903X is not set
1655# CONFIG_MFD_WM8400 is not set
1656# CONFIG_MFD_WM8350_I2C is not set
1657# CONFIG_REGULATOR is not set
1658
1659#
1660# Multimedia devices
1661#
1662
1663#
1664# Multimedia core support
1665#
1666CONFIG_VIDEO_DEV=y
1667CONFIG_VIDEO_V4L2_COMMON=y
1668# CONFIG_VIDEO_ALLOW_V4L1 is not set
1669CONFIG_VIDEO_V4L1_COMPAT=y
1670CONFIG_DVB_CORE=m
1671CONFIG_VIDEO_MEDIA=m
1672
1673#
1674# Multimedia drivers
1675#
1676CONFIG_MEDIA_ATTACH=y
1677CONFIG_MEDIA_TUNER=m
1678# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1679CONFIG_MEDIA_TUNER_SIMPLE=m
1680CONFIG_MEDIA_TUNER_TDA8290=m
1681CONFIG_MEDIA_TUNER_TDA9887=m
1682CONFIG_MEDIA_TUNER_TEA5761=m
1683CONFIG_MEDIA_TUNER_TEA5767=m
1684CONFIG_MEDIA_TUNER_MT20XX=m
1685CONFIG_MEDIA_TUNER_XC2028=m
1686CONFIG_MEDIA_TUNER_XC5000=m
1687CONFIG_VIDEO_V4L2=y
1688CONFIG_VIDEOBUF_GEN=m
1689CONFIG_VIDEOBUF_VMALLOC=m
1690CONFIG_VIDEO_CAPTURE_DRIVERS=y
1691# CONFIG_VIDEO_ADV_DEBUG is not set
1692# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1693CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1694# CONFIG_VIDEO_VIVI is not set
1695# CONFIG_VIDEO_BT848 is not set
1696# CONFIG_VIDEO_SAA5246A is not set
1697# CONFIG_VIDEO_SAA5249 is not set
1698# CONFIG_VIDEO_SAA7134 is not set
1699# CONFIG_VIDEO_HEXIUM_ORION is not set
1700# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1701# CONFIG_VIDEO_CX88 is not set
1702# CONFIG_VIDEO_CX23885 is not set
1703# CONFIG_VIDEO_AU0828 is not set
1704# CONFIG_VIDEO_IVTV is not set
1705# CONFIG_VIDEO_CX18 is not set
1706# CONFIG_VIDEO_CAFE_CCIC is not set
1707# CONFIG_SOC_CAMERA is not set
1708CONFIG_V4L_USB_DRIVERS=y
1709CONFIG_USB_VIDEO_CLASS=m
1710CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
1711CONFIG_USB_GSPCA=m
1712# CONFIG_USB_M5602 is not set
1713# CONFIG_USB_STV06XX is not set
1714# CONFIG_USB_GSPCA_CONEX is not set
1715# CONFIG_USB_GSPCA_ETOMS is not set
1716# CONFIG_USB_GSPCA_FINEPIX is not set
1717# CONFIG_USB_GSPCA_MARS is not set
1718# CONFIG_USB_GSPCA_OV519 is not set
1719# CONFIG_USB_GSPCA_OV534 is not set
1720# CONFIG_USB_GSPCA_PAC207 is not set
1721# CONFIG_USB_GSPCA_PAC7311 is not set
1722# CONFIG_USB_GSPCA_SONIXB is not set
1723# CONFIG_USB_GSPCA_SONIXJ is not set
1724# CONFIG_USB_GSPCA_SPCA500 is not set
1725# CONFIG_USB_GSPCA_SPCA501 is not set
1726# CONFIG_USB_GSPCA_SPCA505 is not set
1727# CONFIG_USB_GSPCA_SPCA506 is not set
1728# CONFIG_USB_GSPCA_SPCA508 is not set
1729# CONFIG_USB_GSPCA_SPCA561 is not set
1730# CONFIG_USB_GSPCA_STK014 is not set
1731# CONFIG_USB_GSPCA_SUNPLUS is not set
1732# CONFIG_USB_GSPCA_T613 is not set
1733# CONFIG_USB_GSPCA_TV8532 is not set
1734# CONFIG_USB_GSPCA_VC032X is not set
1735# CONFIG_USB_GSPCA_ZC3XX is not set
1736# CONFIG_VIDEO_PVRUSB2 is not set
1737# CONFIG_VIDEO_EM28XX is not set
1738# CONFIG_VIDEO_USBVISION is not set
1739CONFIG_USB_ET61X251=m
1740CONFIG_USB_SN9C102=m
1741CONFIG_USB_ZC0301=m
1742CONFIG_USB_ZR364XX=m
1743CONFIG_USB_STKWEBCAM=m
1744CONFIG_USB_S2255=m
1745# CONFIG_RADIO_ADAPTERS is not set
1746# CONFIG_DVB_DYNAMIC_MINORS is not set
1747# CONFIG_DVB_CAPTURE_DRIVERS is not set
1748# CONFIG_DAB is not set
1749
1750#
1751# Graphics support
1752#
1753CONFIG_AGP=y
1754# CONFIG_AGP_ALI is not set
1755# CONFIG_AGP_ATI is not set
1756# CONFIG_AGP_AMD is not set
1757# CONFIG_AGP_AMD64 is not set
1758CONFIG_AGP_INTEL=y
1759# CONFIG_AGP_NVIDIA is not set
1760# CONFIG_AGP_SIS is not set
1761# CONFIG_AGP_SWORKS is not set
1762# CONFIG_AGP_VIA is not set
1763# CONFIG_AGP_EFFICEON is not set
1764CONFIG_DRM=y
1765# CONFIG_DRM_TDFX is not set
1766# CONFIG_DRM_R128 is not set
1767# CONFIG_DRM_RADEON is not set
1768CONFIG_DRM_I810=y
1769# CONFIG_DRM_I830 is not set
1770CONFIG_DRM_I915=y
1771# CONFIG_DRM_I915_KMS is not set
1772# CONFIG_DRM_MGA is not set
1773# CONFIG_DRM_SIS is not set
1774# CONFIG_DRM_VIA is not set
1775# CONFIG_DRM_SAVAGE is not set
1776# CONFIG_VGASTATE is not set
1777CONFIG_DRM_PSB=m
1778CONFIG_VIDEO_OUTPUT_CONTROL=y
1779CONFIG_FB=y
1780# CONFIG_FIRMWARE_EDID is not set
1781# CONFIG_FB_TRIDENT_ACCEL is not set
1782# CONFIG_FB_ARK is not set
1783# CONFIG_FB_PM3 is not set
1784# CONFIG_FB_CARMINE is not set
1785# CONFIG_FB_GEODE is not set
1786# CONFIG_FB_VIRTUAL is not set
1787# CONFIG_FB_METRONOME is not set
1788# CONFIG_FB_MB862XX is not set
1789
1790
1791CONFIG_BACKLIGHT_LCD_SUPPORT=y
1792# CONFIG_LCD_CLASS_DEVICE is not set
1793CONFIG_BACKLIGHT_CLASS_DEVICE=y
1794CONFIG_BACKLIGHT_GENERIC=y
1795# CONFIG_BACKLIGHT_CORGI is not set
1796# CONFIG_BACKLIGHT_PROGEAR is not set
1797# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1798# CONFIG_BACKLIGHT_SAHARA is not set
1799
1800
1801#
1802# Frame buffer hardware drivers
1803#
1804# CONFIG_FB_TILEBLITTING is not set
1805# CONFIG_FB_FOREIGN_ENDIAN is not set
1806# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
1807# CONFIG_FB_3DFX_ACCEL is not set
1808# CONFIG_FB_CIRRUS is not set
1809# CONFIG_FB_PM2 is not set
1810# CONFIG_FB_CYBER2000 is not set
1811# CONFIG_FB_ARC is not set
1812# CONFIG_FB_ASILIANT is not set
1813# CONFIG_FB_IMSTT is not set
1814# CONFIG_FB_VGA16 is not set
1815# CONFIG_FB_VESA is not set
1816# CONFIG_FB_EFI is not set
1817# CONFIG_FB_N411 is not set
1818# CONFIG_FB_HGA is not set
1819# CONFIG_FB_S1D13XXX is not set
1820# CONFIG_FB_NVIDIA is not set
1821# CONFIG_FB_RIVA is not set
1822CONFIG_FB_I810=m
1823# CONFIG_FB_I810_GTF is not set
1824# CONFIG_FB_LE80578 is not set
1825# CONFIG_FB_CARILLO_RANCH is not set
1826# CONFIG_FB_INTEL is not set
1827# CONFIG_FB_INTEL_DEBUG is not set
1828# CONFIG_FB_INTEL_I2C is not set
1829# CONFIG_FB_MATROX is not set
1830# CONFIG_FB_RADEON is not set
1831CONFIG_FB_RADEON_I2C=y
1832# CONFIG_FB_RADEON_BACKLIGHT is not set
1833# CONFIG_FB_RADEON_DEBUG is not set
1834# CONFIG_FB_ATY128 is not set
1835# CONFIG_FB_ATY is not set
1836# CONFIG_FB_S3 is not set
1837# CONFIG_FB_SAVAGE is not set
1838# CONFIG_FB_SIS is not set
1839# CONFIG_FB_SIS_300 is not set
1840# CONFIG_FB_SIS_315 is not set
1841# CONFIG_FB_VIA is not set
1842# CONFIG_FB_NEOMAGIC is not set
1843# CONFIG_FB_KYRO is not set
1844# CONFIG_FB_3DFX is not set
1845# CONFIG_FB_VOODOO1 is not set
1846# CONFIG_FB_VT8623 is not set
1847# CONFIG_FB_CYBLA is not set
1848# CONFIG_FB_TRIDENT is not set
1849# CONFIG_FB_ARK is not set
1850# CONFIG_FB_PM3 is not set
1851# CONFIG_FB_CARMINE is not set
1852# CONFIG_FB_GEODE is not set
1853# CONFIG_FB_VIRTUAL is not set
1854# CONFIG_FB_METRONOME is not set
1855# CONFIG_FB_MB862XX is not set
1856
1857#
1858# Display device support
1859#
1860# CONFIG_DISPLAY_SUPPORT is not set
1861
1862#
1863# Console display driver support
1864#
1865CONFIG_VGA_CONSOLE=y
1866CONFIG_VGACON_SOFT_SCROLLBACK=y
1867CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1868# CONFIG_MDA_CONSOLE is not set
1869CONFIG_DUMMY_CONSOLE=y
1870# CONFIG_FRAMEBUFFER_CONSOLE is not set
1871CONFIG_FONT_8x16=y
1872# CONFIG_LOGO is not set
1873CONFIG_SOUND=y
1874# CONFIG_SOUND_OSS_CORE is not set
1875CONFIG_SND=y
1876CONFIG_SND_TIMER=y
1877CONFIG_SND_PCM=y
1878CONFIG_SND_HWDEP=y
1879CONFIG_SND_RAWMIDI=m
1880CONFIG_SND_SEQUENCER=y
1881CONFIG_SND_SEQ_DUMMY=y
1882# CONFIG_SND_OSSEMUL is not set
1883# CONFIG_SND_MIXER_OSS is not set
1884# CONFIG_SND_PCM_OSS is not set
1885# CONFIG_SND_SEQUENCER_OSS is not set
1886# CONFIG_SND_HRTIMER is not set
1887CONFIG_SND_DYNAMIC_MINORS=y
1888# CONFIG_SND_SUPPORT_OLD_API is not set
1889CONFIG_SND_VERBOSE_PROCFS=y
1890CONFIG_SND_VERBOSE_PRINTK=y
1891CONFIG_SND_DEBUG=y
1892# CONFIG_SND_DEBUG_VERBOSE is not set
1893CONFIG_SND_PCM_XRUN_DEBUG=y
1894CONFIG_SND_VMASTER=y
1895CONFIG_SND_AC97_CODEC=y
1896CONFIG_SND_DRIVERS=y
1897# CONFIG_SND_DUMMY is not set
1898# CONFIG_SND_VIRMIDI is not set
1899# CONFIG_SND_MTPAV is not set
1900# CONFIG_SND_SERIAL_U16550 is not set
1901# CONFIG_SND_MPU401 is not set
1902CONFIG_SND_AC97_POWER_SAVE=y
1903CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
1904# CONFIG_SND_ISA is not set
1905CONFIG_SND_PCI=y
1906# CONFIG_SND_AD1889 is not set
1907# CONFIG_SND_ALS300 is not set
1908# CONFIG_SND_ALS4000 is not set
1909# CONFIG_SND_ALI5451 is not set
1910# CONFIG_SND_ATIIXP is not set
1911# CONFIG_SND_ATIIXP_MODEM is not set
1912# CONFIG_SND_AU8810 is not set
1913# CONFIG_SND_AU8820 is not set
1914# CONFIG_SND_AU8830 is not set
1915# CONFIG_SND_AW2 is not set
1916# CONFIG_SND_AZT3328 is not set
1917# CONFIG_SND_BT87X is not set
1918# CONFIG_SND_CA0106 is not set
1919# CONFIG_SND_CMIPCI is not set
1920# CONFIG_SND_OXYGEN is not set
1921# CONFIG_SND_CS4281 is not set
1922# CONFIG_SND_CS46XX is not set
1923# CONFIG_SND_CS5530 is not set
1924# CONFIG_SND_CS5535AUDIO is not set
1925# CONFIG_SND_DARLA20 is not set
1926# CONFIG_SND_GINA20 is not set
1927# CONFIG_SND_LAYLA20 is not set
1928# CONFIG_SND_DARLA24 is not set
1929# CONFIG_SND_GINA24 is not set
1930# CONFIG_SND_LAYLA24 is not set
1931# CONFIG_SND_MONA is not set
1932# CONFIG_SND_MIA is not set
1933# CONFIG_SND_ECHO3G is not set
1934# CONFIG_SND_INDIGO is not set
1935# CONFIG_SND_INDIGOIO is not set
1936# CONFIG_SND_INDIGODJ is not set
1937# CONFIG_SND_EMU10K1 is not set
1938# CONFIG_SND_EMU10K1X is not set
1939# CONFIG_SND_ENS1370 is not set
1940# CONFIG_SND_ENS1371 is not set
1941# CONFIG_SND_ES1938 is not set
1942# CONFIG_SND_ES1968 is not set
1943# CONFIG_SND_FM801 is not set
1944CONFIG_SND_HDA_INTEL=y
1945CONFIG_SND_HDA_HWDEP=y
1946# CONFIG_SND_HDA_RECONFIG is not set
1947# CONFIG_SND_HDA_INPUT_BEEP is not set
1948CONFIG_SND_HDA_CODEC_REALTEK=y
1949CONFIG_SND_HDA_CODEC_ANALOG=y
1950CONFIG_SND_HDA_CODEC_SIGMATEL=y
1951CONFIG_SND_HDA_CODEC_VIA=y
1952CONFIG_SND_HDA_CODEC_ATIHDMI=y
1953CONFIG_SND_HDA_CODEC_NVHDMI=y
1954CONFIG_SND_HDA_CODEC_INTELHDMI=y
1955CONFIG_SND_HDA_CODEC_CONEXANT=y
1956CONFIG_SND_HDA_CODEC_CMEDIA=y
1957CONFIG_SND_HDA_CODEC_SI3054=y
1958CONFIG_SND_HDA_GENERIC=y
1959CONFIG_SND_HDA_POWER_SAVE=y
1960CONFIG_SND_HDA_POWER_SAVE_DEFAULT=5
1961# CONFIG_SND_HDSP is not set
1962# CONFIG_SND_HDSPM is not set
1963# CONFIG_SND_HIFIER is not set
1964# CONFIG_SND_ICE1712 is not set
1965# CONFIG_SND_ICE1724 is not set
1966CONFIG_SND_INTEL8X0=y
1967# CONFIG_SND_INTEL8X0M is not set
1968# CONFIG_SND_KORG1212 is not set
1969# CONFIG_SND_MAESTRO3 is not set
1970# CONFIG_SND_MIXART is not set
1971# CONFIG_SND_NM256 is not set
1972# CONFIG_SND_PCXHR is not set
1973# CONFIG_SND_RIPTIDE is not set
1974# CONFIG_SND_RME32 is not set
1975# CONFIG_SND_RME96 is not set
1976# CONFIG_SND_RME9652 is not set
1977# CONFIG_SND_SIS7019 is not set
1978# CONFIG_SND_SONICVIBES is not set
1979# CONFIG_SND_TRIDENT is not set
1980# CONFIG_SND_VIA82XX is not set
1981# CONFIG_SND_VIA82XX_MODEM is not set
1982# CONFIG_SND_VIRTUOSO is not set
1983# CONFIG_SND_VX222 is not set
1984# CONFIG_SND_YMFPCI is not set
1985CONFIG_SND_USB=y
1986CONFIG_SND_USB_AUDIO=m
1987CONFIG_SND_USB_USX2Y=m
1988CONFIG_SND_USB_CAIAQ=m
1989CONFIG_SND_USB_CAIAQ_INPUT=y
1990# CONFIG_SND_USB_US122L is not set
1991# CONFIG_SND_SOC is not set
1992# CONFIG_SOUND_PRIME is not set
1993CONFIG_AC97_BUS=y
1994CONFIG_HID_SUPPORT=y
1995CONFIG_HID=y
1996CONFIG_HID_DEBUG=y
1997CONFIG_HIDRAW=y
1998
1999#
2000# USB Input Devices
2001#
2002CONFIG_USB_HID=y
2003CONFIG_HID_PID=y
2004CONFIG_USB_HIDDEV=y
2005
2006#
2007# Special HID drivers
2008#
2009CONFIG_HID_COMPAT=y
2010CONFIG_HID_A4TECH=y
2011CONFIG_HID_APPLE=y
2012CONFIG_HID_BELKIN=y
2013CONFIG_HID_BRIGHT=y
2014CONFIG_HID_CHERRY=y
2015CONFIG_HID_CHICONY=y
2016CONFIG_HID_CYPRESS=y
2017CONFIG_HID_DELL=y
2018CONFIG_HID_EZKEY=y
2019CONFIG_HID_GYRATION=y
2020CONFIG_HID_LOGITECH=y
2021# CONFIG_LOGITECH_FF is not set
2022# CONFIG_LOGIRUMBLEPAD2_FF is not set
2023CONFIG_HID_MICROSOFT=y
2024CONFIG_HID_MONTEREY=y
2025CONFIG_HID_PANTHERLORD=y
2026# CONFIG_PANTHERLORD_FF is not set
2027CONFIG_HID_PETALYNX=y
2028CONFIG_HID_SAMSUNG=y
2029CONFIG_HID_SONY=y
2030CONFIG_HID_SUNPLUS=y
2031# CONFIG_GREENASIA_FF is not set
2032# CONFIG_THRUSTMASTER_FF is not set
2033# CONFIG_ZEROPLUS_FF is not set
2034CONFIG_USB_SUPPORT=y
2035CONFIG_USB_ARCH_HAS_HCD=y
2036CONFIG_USB_ARCH_HAS_OHCI=y
2037CONFIG_USB_ARCH_HAS_EHCI=y
2038CONFIG_USB=y
2039# CONFIG_USB_DEBUG is not set
2040CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
2041
2042#
2043# Miscellaneous USB options
2044#
2045CONFIG_USB_DEVICEFS=y
2046# CONFIG_USB_DEVICE_CLASS is not set
2047# CONFIG_USB_DYNAMIC_MINORS is not set
2048CONFIG_USB_SUSPEND=y
2049# CONFIG_USB_OTG is not set
2050CONFIG_USB_MON=y
2051CONFIG_USB_WUSB=m
2052# CONFIG_USB_WUSB_CBAF is not set
2053
2054#
2055# USB Host Controller Drivers
2056#
2057# CONFIG_USB_C67X00_HCD is not set
2058CONFIG_USB_EHCI_HCD=y
2059CONFIG_USB_EHCI_ROOT_HUB_TT=y
2060CONFIG_USB_EHCI_TT_NEWSCHED=y
2061# CONFIG_USB_OXU210HP_HCD is not set
2062CONFIG_USB_ISP116X_HCD=m
2063# CONFIG_USB_ISP1760_HCD is not set
2064CONFIG_USB_OHCI_HCD=y
2065# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
2066# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
2067CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2068CONFIG_USB_UHCI_HCD=y
2069CONFIG_USB_U132_HCD=m
2070CONFIG_USB_SL811_HCD=m
2071# CONFIG_USB_R8A66597_HCD is not set
2072CONFIG_USB_WHCI_HCD=m
2073CONFIG_USB_HWA_HCD=m
2074
2075#
2076# USB Device Class drivers
2077#
2078CONFIG_USB_ACM=m
2079CONFIG_USB_PRINTER=m
2080CONFIG_USB_WDM=m
2081# CONFIG_USB_TMC is not set
2082
2083#
2084# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
2085#
2086
2087#
2088# see USB_STORAGE Help for more information
2089#
2090CONFIG_USB_STORAGE=y
2091# CONFIG_USB_STORAGE_DEBUG is not set
2092CONFIG_USB_STORAGE_DATAFAB=y
2093CONFIG_USB_STORAGE_FREECOM=y
2094CONFIG_USB_STORAGE_ISD200=y
2095CONFIG_USB_STORAGE_DPCM=y
2096CONFIG_USB_STORAGE_USBAT=y
2097CONFIG_USB_STORAGE_SDDR09=y
2098CONFIG_USB_STORAGE_SDDR55=y
2099CONFIG_USB_STORAGE_JUMPSHOT=y
2100CONFIG_USB_STORAGE_ALAUDA=y
2101# CONFIG_USB_STORAGE_ONETOUCH is not set
2102CONFIG_USB_STORAGE_KARMA=y
2103# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
2104CONFIG_USB_LIBUSUAL=y
2105
2106#
2107# USB Imaging devices
2108#
2109CONFIG_USB_MDC800=m
2110CONFIG_USB_MICROTEK=m
2111
2112#
2113# USB port drivers
2114#
2115CONFIG_USB_SERIAL=m
2116CONFIG_USB_EZUSB=y
2117CONFIG_USB_SERIAL_GENERIC=y
2118CONFIG_USB_SERIAL_AIRCABLE=m
2119CONFIG_USB_SERIAL_ARK3116=m
2120CONFIG_USB_SERIAL_BELKIN=m
2121CONFIG_USB_SERIAL_CH341=m
2122CONFIG_USB_SERIAL_WHITEHEAT=m
2123CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
2124CONFIG_USB_SERIAL_CP2101=m
2125CONFIG_USB_SERIAL_CYPRESS_M8=m
2126CONFIG_USB_SERIAL_EMPEG=m
2127CONFIG_USB_SERIAL_FTDI_SIO=m
2128CONFIG_USB_SERIAL_FUNSOFT=m
2129CONFIG_USB_SERIAL_VISOR=m
2130CONFIG_USB_SERIAL_IPAQ=m
2131CONFIG_USB_SERIAL_IR=m
2132CONFIG_USB_SERIAL_EDGEPORT=m
2133CONFIG_USB_SERIAL_EDGEPORT_TI=m
2134CONFIG_USB_SERIAL_GARMIN=m
2135CONFIG_USB_SERIAL_IPW=m
2136CONFIG_USB_SERIAL_IUU=m
2137CONFIG_USB_SERIAL_KEYSPAN_PDA=m
2138CONFIG_USB_SERIAL_KEYSPAN=m
2139CONFIG_USB_SERIAL_KEYSPAN_MPR=y
2140CONFIG_USB_SERIAL_KEYSPAN_USA28=y
2141CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
2142CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
2143CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
2144CONFIG_USB_SERIAL_KEYSPAN_USA19=y
2145CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
2146CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
2147CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
2148CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
2149CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
2150CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
2151CONFIG_USB_SERIAL_KLSI=m
2152CONFIG_USB_SERIAL_KOBIL_SCT=m
2153CONFIG_USB_SERIAL_MCT_U232=m
2154CONFIG_USB_SERIAL_MOS7720=m
2155CONFIG_USB_SERIAL_MOS7840=m
2156# CONFIG_USB_SERIAL_MOTOROLA is not set
2157CONFIG_USB_SERIAL_NAVMAN=m
2158CONFIG_USB_SERIAL_PL2303=m
2159CONFIG_USB_SERIAL_OTI6858=m
2160# CONFIG_USB_SERIAL_SPCP8X5 is not set
2161CONFIG_USB_SERIAL_HP4X=m
2162CONFIG_USB_SERIAL_SAFE=m
2163CONFIG_USB_SERIAL_SAFE_PADDED=y
2164# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
2165CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2166CONFIG_USB_SERIAL_TI=m
2167CONFIG_USB_SERIAL_CYBERJACK=m
2168CONFIG_USB_SERIAL_XIRCOM=m
2169CONFIG_USB_SERIAL_OPTION=m
2170CONFIG_USB_SERIAL_OMNINET=m
2171# CONFIG_USB_SERIAL_OPTICON is not set
2172CONFIG_USB_SERIAL_DEBUG=m
2173
2174#
2175# USB Miscellaneous drivers
2176#
2177CONFIG_USB_EMI62=m
2178CONFIG_USB_EMI26=m
2179CONFIG_USB_ADUTUX=m
2180# CONFIG_USB_SEVSEG is not set
2181# CONFIG_USB_RIO500 is not set
2182CONFIG_USB_LEGOTOWER=m
2183CONFIG_USB_LCD=m
2184CONFIG_USB_BERRY_CHARGE=m
2185CONFIG_USB_LED=m
2186# CONFIG_USB_CYPRESS_CY7C63 is not set
2187# CONFIG_USB_CYTHERM is not set
2188CONFIG_USB_PHIDGET=m
2189CONFIG_USB_PHIDGETKIT=m
2190CONFIG_USB_PHIDGETMOTORCONTROL=m
2191CONFIG_USB_PHIDGETSERVO=m
2192CONFIG_USB_IDMOUSE=m
2193CONFIG_USB_FTDI_ELAN=m
2194CONFIG_USB_APPLEDISPLAY=m
2195CONFIG_USB_SISUSBVGA=m
2196CONFIG_USB_SISUSBVGA_CON=y
2197CONFIG_USB_LD=m
2198CONFIG_USB_TRANCEVIBRATOR=m
2199CONFIG_USB_IOWARRIOR=m
2200# CONFIG_USB_TEST is not set
2201# CONFIG_USB_ISIGHTFW is not set
2202# CONFIG_USB_VST is not set
2203# CONFIG_USB_GADGET is not set
2204CONFIG_UWB=m
2205CONFIG_UWB_HWA=m
2206CONFIG_UWB_WHCI=m
2207# CONFIG_UWB_WLP is not set
2208# CONFIG_UWB_I1480U is not set
2209CONFIG_MMC=y
2210# CONFIG_MMC_DEBUG is not set
2211# CONFIG_MMC_UNSAFE_RESUME is not set
2212
2213#
2214# MMC/SD/SDIO Card Drivers
2215#
2216CONFIG_MMC_BLOCK=y
2217CONFIG_MMC_BLOCK_BOUNCE=y
2218CONFIG_SDIO_UART=m
2219# CONFIG_MMC_TEST is not set
2220
2221#
2222# MMC/SD/SDIO Host Controller Drivers
2223#
2224CONFIG_MMC_SDHCI=y
2225CONFIG_MMC_SDHCI_PCI=y
2226# CONFIG_MMC_RICOH_MMC is not set
2227CONFIG_MMC_WBSD=m
2228CONFIG_MMC_TIFM_SD=m
2229# CONFIG_MEMSTICK is not set
2230CONFIG_NEW_LEDS=y
2231CONFIG_LEDS_CLASS=y
2232# CONFIG_MMC_CEATA_WR is not set
2233# CONFIG_MMC_SPI is not set
2234
2235#
2236# LED drivers
2237#
2238# CONFIG_LEDS_ALIX2 is not set
2239# CONFIG_LEDS_PCA9532 is not set
2240# CONFIG_LEDS_HP_DISK is not set
2241# CONFIG_LEDS_CLEVO_MAIL is not set
2242# CONFIG_LEDS_PCA955X is not set
2243
2244#
2245# LED Triggers
2246#
2247CONFIG_LEDS_TRIGGERS=y
2248# CONFIG_LEDS_TRIGGER_TIMER is not set
2249# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
2250# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
2251# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2252# CONFIG_ACCESSIBILITY is not set
2253# CONFIG_INFINIBAND is not set
2254# CONFIG_EDAC is not set
2255CONFIG_RTC_LIB=y
2256CONFIG_RTC_CLASS=y
2257# CONFIG_RTC_HCTOSYS is not set
2258# CONFIG_RTC_DEBUG is not set
2259
2260#
2261# RTC interfaces
2262#
2263CONFIG_RTC_INTF_SYSFS=y
2264CONFIG_RTC_INTF_PROC=y
2265CONFIG_RTC_INTF_DEV=y
2266# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
2267# CONFIG_RTC_DRV_TEST is not set
2268
2269#
2270# I2C RTC drivers
2271#
2272# CONFIG_RTC_DRV_DS1307 is not set
2273# CONFIG_RTC_DRV_DS1374 is not set
2274# CONFIG_RTC_DRV_DS1672 is not set
2275# CONFIG_RTC_DRV_MAX6900 is not set
2276# CONFIG_RTC_DRV_RS5C372 is not set
2277# CONFIG_RTC_DRV_ISL1208 is not set
2278# CONFIG_RTC_DRV_X1205 is not set
2279# CONFIG_RTC_DRV_PCF8563 is not set
2280# CONFIG_RTC_DRV_PCF8583 is not set
2281# CONFIG_RTC_DRV_M41T80 is not set
2282# CONFIG_RTC_DRV_S35390A is not set
2283# CONFIG_RTC_DRV_FM3130 is not set
2284# CONFIG_RTC_DRV_RX8581 is not set
2285
2286#
2287# SPI RTC drivers
2288#
2289
2290#
2291# Platform RTC drivers
2292#
2293CONFIG_RTC_DRV_CMOS=y
2294# CONFIG_RTC_DRV_DS1286 is not set
2295# CONFIG_RTC_DRV_DS1511 is not set
2296# CONFIG_RTC_DRV_DS1553 is not set
2297# CONFIG_RTC_DRV_DS1742 is not set
2298# CONFIG_RTC_DRV_STK17TA8 is not set
2299# CONFIG_RTC_DRV_M48T86 is not set
2300# CONFIG_RTC_DRV_M48T35 is not set
2301# CONFIG_RTC_DRV_M48T59 is not set
2302# CONFIG_RTC_DRV_BQ4802 is not set
2303# CONFIG_RTC_DRV_V3020 is not set
2304
2305#
2306# on-CPU RTC drivers
2307#
2308# CONFIG_UIO is not set
2309CONFIG_STAGING=y
2310# CONFIG_STAGING_EXCLUDE_BUILD is not set
2311# CONFIG_ET131X is not set
2312# CONFIG_SLICOSS is not set
2313# CONFIG_SXG is not set
2314# CONFIG_ME4000 is not set
2315# CONFIG_MEILHAUS is not set
2316# CONFIG_VIDEO_GO7007 is not set
2317CONFIG_USB_IP_COMMON=m
2318CONFIG_USB_IP_VHCI_HCD=m
2319CONFIG_USB_IP_HOST=m
2320# CONFIG_W35UND is not set
2321CONFIG_PRISM2_USB=m
2322# CONFIG_ECHO is not set
2323CONFIG_RT2860=m
2324CONFIG_RT2870=m
2325# CONFIG_BENET is not set
2326# CONFIG_COMEDI is not set
2327# CONFIG_ASUS_OLED is not set
2328# CONFIG_USB_ATMEL is not set
2329# CONFIG_AGNX is not set
2330# CONFIG_OTUS is not set
2331# CONFIG_ALTERA_PCIE_CHDMA is not set
2332# CONFIG_RTL8187SE is not set
2333# CONFIG_INPUT_MIMIO is not set
2334# CONFIG_TRANZPORT is not set
2335# CONFIG_EPL is not set
2336
2337#
2338# Android
2339#
2340# CONFIG_ANDROID is not set
2341# CONFIG_ANDROID_BINDER_IPC is not set
2342# CONFIG_ANDROID_LOGGER is not set
2343# CONFIG_ANDROID_RAM_CONSOLE is not set
2344# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
2345CONFIG_X86_PLATFORM_DEVICES=y
2346
2347#
2348# Firmware Drivers
2349#
2350# CONFIG_EDD is not set
2351CONFIG_FIRMWARE_MEMMAP=y
2352CONFIG_EFI_VARS=m
2353# CONFIG_DELL_RBU is not set
2354# CONFIG_DCDBAS is not set
2355CONFIG_DMIID=y
2356# CONFIG_ISCSI_IBFT_FIND is not set
2357
2358#
2359# File systems
2360#
2361CONFIG_EXT2_FS=y
2362# CONFIG_EXT2_FS_XATTR is not set
2363# CONFIG_EXT2_FS_XIP is not set
2364CONFIG_EXT3_FS=y
2365CONFIG_EXT3_FS_XATTR=y
2366CONFIG_EXT3_FS_POSIX_ACL=y
2367CONFIG_EXT3_FS_SECURITY=y
2368# CONFIG_EXT4_FS is not set
2369CONFIG_JBD=y
2370# CONFIG_JBD_DEBUG is not set
2371CONFIG_FS_MBCACHE=y
2372# CONFIG_REISERFS_FS is not set
2373# CONFIG_JFS_FS is not set
2374CONFIG_FS_POSIX_ACL=y
2375CONFIG_FILE_LOCKING=y
2376# CONFIG_XFS_FS is not set
2377# CONFIG_GFS2_FS is not set
2378# CONFIG_OCFS2_FS is not set
2379# CONFIG_BTRFS_FS is not set
2380CONFIG_DNOTIFY=y
2381CONFIG_INOTIFY=y
2382CONFIG_INOTIFY_USER=y
2383# CONFIG_QUOTA is not set
2384# CONFIG_AUTOFS_FS is not set
2385# CONFIG_AUTOFS4_FS is not set
2386CONFIG_FUSE_FS=m
2387CONFIG_GENERIC_ACL=y
2388
2389#
2390# CD-ROM/DVD Filesystems
2391#
2392CONFIG_ISO9660_FS=y
2393CONFIG_JOLIET=y
2394CONFIG_ZISOFS=y
2395CONFIG_UDF_FS=m
2396CONFIG_UDF_NLS=y
2397
2398#
2399# DOS/FAT/NT Filesystems
2400#
2401CONFIG_FAT_FS=y
2402CONFIG_MSDOS_FS=y
2403CONFIG_VFAT_FS=y
2404CONFIG_FAT_DEFAULT_CODEPAGE=437
2405CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
2406# CONFIG_NTFS_FS is not set
2407
2408#
2409# Pseudo filesystems
2410#
2411CONFIG_PROC_FS=y
2412# CONFIG_PROC_KCORE is not set
2413CONFIG_PROC_SYSCTL=y
2414CONFIG_PROC_PAGE_MONITOR=y
2415CONFIG_SYSFS=y
2416CONFIG_TMPFS=y
2417CONFIG_TMPFS_POSIX_ACL=y
2418# CONFIG_HUGETLBFS is not set
2419# CONFIG_HUGETLB_PAGE is not set
2420CONFIG_CONFIGFS_FS=m
2421
2422#
2423# Miscellaneous filesystems
2424#
2425CONFIG_MISC_FILESYSTEMS=y
2426# CONFIG_ADFS_FS is not set
2427# CONFIG_AFFS_FS is not set
2428# CONFIG_HFS_FS is not set
2429# CONFIG_HFSPLUS_FS is not set
2430# CONFIG_BEFS_FS is not set
2431# CONFIG_BFS_FS is not set
2432# CONFIG_EFS_FS is not set
2433# CONFIG_CRAMFS is not set
2434CONFIG_SQUASHFS=y
2435# CONFIG_SQUASHFS_EMBEDDED is not set
2436CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
2437# CONFIG_VXFS_FS is not set
2438# CONFIG_MINIX_FS is not set
2439# CONFIG_OMFS_FS is not set
2440# CONFIG_HPFS_FS is not set
2441# CONFIG_QNX4FS_FS is not set
2442# CONFIG_ROMFS_FS is not set
2443# CONFIG_SYSV_FS is not set
2444# CONFIG_UFS_FS is not set
2445CONFIG_NETWORK_FILESYSTEMS=y
2446# CONFIG_NFS_FS is not set
2447# CONFIG_NFSD is not set
2448# CONFIG_SMB_FS is not set
2449CONFIG_CIFS=m
2450# CONFIG_CIFS_STATS is not set
2451CONFIG_CIFS_WEAK_PW_HASH=y
2452# CONFIG_CIFS_XATTR is not set
2453# CONFIG_CIFS_DEBUG2 is not set
2454# CONFIG_CIFS_EXPERIMENTAL is not set
2455# CONFIG_NCP_FS is not set
2456# CONFIG_CODA_FS is not set
2457# CONFIG_AFS_FS is not set
2458
2459#
2460# Partition Types
2461#
2462CONFIG_PARTITION_ADVANCED=y
2463# CONFIG_ACORN_PARTITION is not set
2464# CONFIG_OSF_PARTITION is not set
2465# CONFIG_AMIGA_PARTITION is not set
2466# CONFIG_ATARI_PARTITION is not set
2467# CONFIG_MAC_PARTITION is not set
2468CONFIG_MSDOS_PARTITION=y
2469CONFIG_BSD_DISKLABEL=y
2470# CONFIG_MINIX_SUBPARTITION is not set
2471# CONFIG_SOLARIS_X86_PARTITION is not set
2472# CONFIG_UNIXWARE_DISKLABEL is not set
2473CONFIG_LDM_PARTITION=y
2474# CONFIG_LDM_DEBUG is not set
2475# CONFIG_SGI_PARTITION is not set
2476# CONFIG_ULTRIX_PARTITION is not set
2477# CONFIG_SUN_PARTITION is not set
2478# CONFIG_KARMA_PARTITION is not set
2479CONFIG_EFI_PARTITION=y
2480# CONFIG_SYSV68_PARTITION is not set
2481CONFIG_NLS=y
2482CONFIG_NLS_DEFAULT="utf8"
2483CONFIG_NLS_CODEPAGE_437=y
2484CONFIG_NLS_CODEPAGE_737=m
2485CONFIG_NLS_CODEPAGE_775=m
2486CONFIG_NLS_CODEPAGE_850=m
2487CONFIG_NLS_CODEPAGE_852=m
2488CONFIG_NLS_CODEPAGE_855=m
2489CONFIG_NLS_CODEPAGE_857=m
2490CONFIG_NLS_CODEPAGE_860=m
2491CONFIG_NLS_CODEPAGE_861=m
2492CONFIG_NLS_CODEPAGE_862=m
2493CONFIG_NLS_CODEPAGE_863=m
2494CONFIG_NLS_CODEPAGE_864=m
2495CONFIG_NLS_CODEPAGE_865=m
2496CONFIG_NLS_CODEPAGE_866=m
2497CONFIG_NLS_CODEPAGE_869=m
2498CONFIG_NLS_CODEPAGE_936=m
2499CONFIG_NLS_CODEPAGE_950=m
2500CONFIG_NLS_CODEPAGE_932=m
2501CONFIG_NLS_CODEPAGE_949=m
2502CONFIG_NLS_CODEPAGE_874=m
2503CONFIG_NLS_ISO8859_8=m
2504CONFIG_NLS_CODEPAGE_1250=m
2505CONFIG_NLS_CODEPAGE_1251=m
2506CONFIG_NLS_ASCII=y
2507CONFIG_NLS_ISO8859_1=m
2508CONFIG_NLS_ISO8859_2=m
2509CONFIG_NLS_ISO8859_3=m
2510CONFIG_NLS_ISO8859_4=m
2511CONFIG_NLS_ISO8859_5=m
2512CONFIG_NLS_ISO8859_6=m
2513CONFIG_NLS_ISO8859_7=m
2514CONFIG_NLS_ISO8859_9=m
2515CONFIG_NLS_ISO8859_13=m
2516CONFIG_NLS_ISO8859_14=m
2517CONFIG_NLS_ISO8859_15=m
2518CONFIG_NLS_KOI8_R=m
2519CONFIG_NLS_KOI8_U=m
2520CONFIG_NLS_UTF8=m
2521# CONFIG_DLM is not set
2522
2523#
2524# Kernel hacking
2525#
2526CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2527CONFIG_PRINTK_TIME=y
2528# CONFIG_ENABLE_WARN_DEPRECATED is not set
2529CONFIG_ENABLE_MUST_CHECK=y
2530CONFIG_FRAME_WARN=1024
2531CONFIG_MAGIC_SYSRQ=y
2532# CONFIG_UNUSED_SYMBOLS is not set
2533CONFIG_DEBUG_FS=y
2534# CONFIG_HEADERS_CHECK is not set
2535CONFIG_DEBUG_KERNEL=y
2536CONFIG_DEBUG_SHIRQ=y
2537CONFIG_DETECT_SOFTLOCKUP=y
2538# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2539CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2540# CONFIG_SCHED_DEBUG is not set
2541CONFIG_SCHEDSTATS=y
2542CONFIG_TIMER_STATS=y
2543# CONFIG_DEBUG_OBJECTS is not set
2544# CONFIG_DEBUG_SLAB is not set
2545# CONFIG_DEBUG_RT_MUTEXES is not set
2546# CONFIG_RT_MUTEX_TESTER is not set
2547CONFIG_DEBUG_SPINLOCK=y
2548# CONFIG_DEBUG_MUTEXES is not set
2549# CONFIG_DEBUG_LOCK_ALLOC is not set
2550# CONFIG_PROVE_LOCKING is not set
2551# CONFIG_DEBUG_LOCKDEP is not set
2552# CONFIG_LOCK_STAT is not set
2553CONFIG_DEBUG_SPINLOCK_SLEEP=y
2554# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2555CONFIG_STACKTRACE=y
2556# CONFIG_DEBUG_KOBJECT is not set
2557CONFIG_DEBUG_HIGHMEM=y
2558CONFIG_DEBUG_BUGVERBOSE=y
2559CONFIG_DEBUG_INFO=y
2560# CONFIG_DEBUG_VM is not set
2561# CONFIG_DEBUG_VIRTUAL is not set
2562# CONFIG_DEBUG_WRITECOUNT is not set
2563# CONFIG_DEBUG_MEMORY_INIT it not set
2564CONFIG_DEBUG_LIST=y
2565# CONFIG_DEBUG_SG is not set
2566CONFIG_DEBUG_NOTIFIERS=y
2567CONFIG_FRAME_POINTER=y
2568CONFIG_BOOT_PRINTK_DELAY=y
2569# CONFIG_RCU_TORTURE_TEST is not set
2570# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2571# CONFIG_BACKTRACE_SELF_TEST is not set
2572# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2573# CONFIG_FAULT_INJECTION is not set
2574CONFIG_LATENCYTOP=y
2575CONFIG_SYSCTL_SYSCALL_CHECK=y
2576CONFIG_HAVE_FUNCTION_TRACER=y
2577CONFIG_HAVE_DYNAMIC_FTRACE=y
2578CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2579
2580# CONFIG_X86_VISWS is not set
2581# CONFIG_FTRACE_STARTUP_TEST is not set
2582#
2583# Tracers
2584#
2585# CONFIG_FUNCTION_TRACER is not set
2586# CONFIG_IRQSOFF_TRACER is not set
2587CONFIG_SYSPROF_TRACER=y
2588# CONFIG_SCHED_TRACER is not set
2589# CONFIG_CONTEXT_SWITCH_TRACER is not set
2590CONFIG_OPEN_CLOSE_TRACER=y
2591# CONFIG_BOOT_TRACER is not set
2592CONFIG_POWER_TRACER=y
2593# CONFIG_TRACE_BRANCH_PROFILING is not set
2594# CONFIG_STACK_TRACER is not set
2595# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2596# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2597# CONFIG_SAMPLES is not set
2598CONFIG_HAVE_ARCH_KGDB=y
2599# CONFIG_KGDB is not set
2600CONFIG_STRICT_DEVMEM=y
2601CONFIG_X86_VERBOSE_BOOTUP=y
2602CONFIG_EARLY_PRINTK=y
2603# CONFIG_EARLY_PRINTK_DBGP is not set
2604# CONFIG_DEBUG_STACKOVERFLOW is not set
2605# CONFIG_DEBUG_STACK_USAGE is not set
2606# CONFIG_DEBUG_PAGEALLOC is not set
2607# CONFIG_DEBUG_PER_CPU_MAPS is not set
2608# CONFIG_X86_PTDUMP is not set
2609CONFIG_DEBUG_RODATA=y
2610# CONFIG_DEBUG_RODATA_TEST is not set
2611# CONFIG_DEBUG_NX_TEST is not set
2612# CONFIG_4KSTACKS is not set
2613CONFIG_DOUBLEFAULT=y
2614# CONFIG_MMIOTRACE is not set
2615CONFIG_IO_DELAY_TYPE_0X80=0
2616CONFIG_IO_DELAY_TYPE_0XED=1
2617CONFIG_IO_DELAY_TYPE_UDELAY=2
2618CONFIG_IO_DELAY_TYPE_NONE=3
2619CONFIG_IO_DELAY_0X80=y
2620# CONFIG_IO_DELAY_0XED is not set
2621# CONFIG_IO_DELAY_UDELAY is not set
2622# CONFIG_IO_DELAY_NONE is not set
2623CONFIG_DEFAULT_IO_DELAY_TYPE=0
2624CONFIG_DEBUG_BOOT_PARAMS=y
2625# CONFIG_CPA_DEBUG is not set
2626# CONFIG_OPTIMIZE_INLINING is not set
2627
2628#
2629# Security options
2630#
2631# CONFIG_KEYS is not set
2632# CONFIG_SECURITY is not set
2633# CONFIG_SECURITYFS is not set
2634# CONFIG_SECURITY_FILE_CAPABILITIES is not set
2635CONFIG_CRYPTO=y
2636
2637#
2638# Crypto core or helper
2639#
2640# CONFIG_CRYPTO_FIPS is not set
2641CONFIG_CRYPTO_ALGAPI=y
2642CONFIG_CRYPTO_AEAD=y
2643CONFIG_CRYPTO_BLKCIPHER=y
2644CONFIG_CRYPTO_HASH=y
2645CONFIG_CRYPTO_RNG=y
2646CONFIG_CRYPTO_MANAGER=y
2647CONFIG_CRYPTO_GF128MUL=m
2648CONFIG_CRYPTO_NULL=m
2649# CONFIG_CRYPTO_CRYPTD is not set
2650CONFIG_CRYPTO_AUTHENC=m
2651CONFIG_CRYPTO_TEST=m
2652
2653#
2654# Authenticated Encryption with Associated Data
2655#
2656CONFIG_CRYPTO_CCM=m
2657CONFIG_CRYPTO_GCM=m
2658CONFIG_CRYPTO_SEQIV=m
2659
2660#
2661# Block modes
2662#
2663CONFIG_CRYPTO_CBC=m
2664CONFIG_CRYPTO_CTR=m
2665# CONFIG_CRYPTO_CTS is not set
2666CONFIG_CRYPTO_ECB=y
2667CONFIG_CRYPTO_LRW=m
2668CONFIG_CRYPTO_PCBC=m
2669CONFIG_CRYPTO_XTS=m
2670
2671#
2672# Hash modes
2673#
2674CONFIG_CRYPTO_HMAC=y
2675CONFIG_CRYPTO_XCBC=m
2676
2677#
2678# Digest
2679#
2680CONFIG_CRYPTO_CRC32C=m
2681# CONFIG_CRYPTO_CRC32C_INTEL is not set
2682CONFIG_CRYPTO_MD4=m
2683CONFIG_CRYPTO_MD5=y
2684CONFIG_CRYPTO_MICHAEL_MIC=m
2685# CONFIG_CRYPTO_RMD128 is not set
2686# CONFIG_CRYPTO_RMD160 is not set
2687# CONFIG_CRYPTO_RMD256 is not set
2688# CONFIG_CRYPTO_RMD320 is not set
2689CONFIG_CRYPTO_SHA1=y
2690CONFIG_CRYPTO_SHA256=m
2691CONFIG_CRYPTO_SHA512=m
2692CONFIG_CRYPTO_TGR192=m
2693CONFIG_CRYPTO_WP512=m
2694
2695#
2696# Ciphers
2697#
2698CONFIG_CRYPTO_AES=y
2699CONFIG_CRYPTO_AES_586=m
2700CONFIG_CRYPTO_ANUBIS=m
2701CONFIG_CRYPTO_ARC4=y
2702CONFIG_CRYPTO_BLOWFISH=m
2703CONFIG_CRYPTO_CAMELLIA=m
2704CONFIG_CRYPTO_CAST5=m
2705CONFIG_CRYPTO_CAST6=m
2706CONFIG_CRYPTO_DES=m
2707CONFIG_CRYPTO_FCRYPT=m
2708CONFIG_CRYPTO_KHAZAD=m
2709CONFIG_CRYPTO_SALSA20=m
2710CONFIG_CRYPTO_SALSA20_586=m
2711CONFIG_CRYPTO_SEED=m
2712CONFIG_CRYPTO_SERPENT=m
2713CONFIG_CRYPTO_TEA=m
2714CONFIG_CRYPTO_TWOFISH=m
2715CONFIG_CRYPTO_TWOFISH_COMMON=m
2716CONFIG_CRYPTO_TWOFISH_586=m
2717
2718#
2719# Compression
2720#
2721CONFIG_CRYPTO_DEFLATE=m
2722# CONFIG_CRYPTO_LZO is not set
2723
2724#
2725# Random Number Generation
2726#
2727# CONFIG_CRYPTO_ANSI_CPRNG is not set
2728CONFIG_CRYPTO_HW=y
2729# CONFIG_CRYPTO_DEV_PADLOCK is not set
2730# CONFIG_CRYPTO_DEV_GEODE is not set
2731# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2732CONFIG_HAVE_KVM=y
2733# CONFIG_VIRTUALIZATION is not set
2734
2735#
2736# Library routines
2737#
2738CONFIG_BITREVERSE=y
2739CONFIG_GENERIC_FIND_FIRST_BIT=y
2740CONFIG_GENERIC_FIND_NEXT_BIT=y
2741CONFIG_CRC_CCITT=m
2742CONFIG_CRC16=m
2743# CONFIG_CRC_T10DIF is not set
2744CONFIG_CRC_ITU_T=m
2745CONFIG_CRC32=y
2746# CONFIG_CRC7 is not set
2747CONFIG_LIBCRC32C=m
2748CONFIG_ZLIB_INFLATE=y
2749CONFIG_ZLIB_DEFLATE=m
2750CONFIG_TEXTSEARCH=y
2751CONFIG_TEXTSEARCH_KMP=m
2752CONFIG_TEXTSEARCH_BM=m
2753CONFIG_TEXTSEARCH_FSM=m
2754CONFIG_PLIST=y
2755CONFIG_HAS_IOMEM=y
2756CONFIG_HAS_IOPORT=y
2757CONFIG_HAS_DMA=y
2758CONFIG_CHECK_SIGNATURE=y
2759
2760
2761# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
2762# CONFIG_MFD_PCF50633 is not set
2763# CONFIG_SENSORS_ADT7475 is not set
2764# CONFIG_LIB80211_DEBUG is not set
2765# CONFIG_DNET is not set
2766# CONFIG_BE2NET is not set
2767
2768
2769
2770# CONFIG_LNW_IPC is not set
2771# CONFIG_MRST is not set
2772# CONFIG_SFI is not set
2773# CONFIG_MDIO_GPIO is not set
2774# CONFIG_KEYBOARD_GPIO is not set
2775# CONFIG_MOUSE_GPIO is not set
2776# CONFIG_I2C_GPIO is not set
2777# CONFIG_DEBUG_GPIO is not set
2778# CONFIG_GPIO_SYSFS is not set
2779# CONFIG_GPIO_LANGWELL is not set
2780# CONFIG_GPIO_MAX732X is not set
2781# CONFIG_GPIO_PCA953X is not set
2782# CONFIG_GPIO_PCF857X is not set
2783# CONFIG_GPIO_BT8XX is not set
2784# CONFIG_UCB1400_CORE is not set
2785# CONFIG_TPS65010 is not set
2786# CONFIG_USB_GPIO_VBUS is not set
2787# CONFIG_LEDS_GPIO is not set
2788# CONFIG_ANDROID_TIMED_GPIO is not set
2789# CONFIG_X86_MRST_EARLY_PRINTK is not set
2790
2791# CONFIG_APB_TIMER is not set
2792# CONFIG_MRST_SPI_UART_BOOT_MSG is not set
2793# CONFIG_SFI_DEBUG is not set
2794# CONFIG_SFI_PROCFS is not set
2795# CONFIG_TOUCHSCREEN_UCB1400 is not set
2796# CONFIG_GPIO_LNWPMIC is not set
2797# CONFIG_RTC_DRV_VRTC is not set
2798# CONFIG_MRST_NAND is not set
2799# CONFIG_USB_LANGWELL_OTG is not set
2800# CONFIG_KEYBOARD_MRST is not set
2801# CONFIG_I2C_MRST is not set
2802# CONFIG_USB_OTG_WHITELIST is not set
2803# CONFIG_USB_OTG_BLACKLIST_HUB is not set
2804# CONFIG_SND_PCM_OSS_PLUGINS is not set
2805# CONFIG_SND_INTEL_LPE is not set
2806# CONFIG_LPE_IPC_NOT_INCLUDED is not set
2807# CONFIG_SND_INTELMID is not set
2808# CONFIG_TOUCHSCREEN_INTEL_MRST is not set
2809# CONFIG_ATL1C is not set
2810# CONFIG_MRST_MMC_WR is not set
2811
2812
2813# CONFIG_VIDEO_MRSTCI is not set
2814# CONFIG_VIDEO_MRST_ISP is not set
2815# CONFIG_VIDEO_MRST_SENSOR is not set
2816# CONFIG_VIDEO_MRST_OV2650 is not set
2817# CONFIG_VIDEO_MRST_OV5630 is not set
2818# CONFIG_SENSORS_MRST_THERMAL is not set
2819# CONFIG_SPI2_MRST is not set
2820
2821# CONFIG_SFI_PM is not set
2822# CONFIG_SFI_CPUIDLE is not set
2823# CONFIG_SFI_PROCESSOR_PM is not set
2824# CONFIG_X86_SFI_CPUFREQ is not set
2825# CONFIG_MSTWN_POWER_MGMT is not set
2826# CONFIG_USB_NET_MBM is not set
2827
2828# CONFIG_USB_GADGET_LANGWELL is not set
2829# CONFIG_USB_LANGWELL is not set
2830
2831# CONFIG_INTEL_LNW_DMAC1 is not set
2832# CONFIG_INTEL_LNW_DMAC2 is not set
2833# CONFIG_LNW_DMA_DEBUG is not set
2834# CONFIG_NET_DMA is not set
2835# CONFIG_DMATEST is not set
2836# CONFIG_8688_RC is not set
2837# CONFIG_SSB_SILENT is not set
2838
2839# CONFIG_TOUCHSCREEN_TSC2003 is not set
2840# CONFIG_MFD_TIMBERDALE is not set
2841# CONFIG_MMC_SDHCI_PLTFM is not set
2842# CONFIG_SPI_XILINX is not set
2843# CONFIG_SPI_MRST is not set
2844# CONFIG_GPE is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi
deleted file mode 100644
index 0f61bd77ec..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi
+++ /dev/null
@@ -1,127 +0,0 @@
1CONFIG_LOCALVERSION="-ivi"
2CONFIG_INTEL_MENLOW=y
3CONFIG_DRM_PSB=y
4
5#
6# Cgroups
7#
8CONFIG_CGROUPS=y
9# CONFIG_CGROUP_DEBUG is not set
10CONFIG_CGROUP_NS=y
11CONFIG_CGROUP_FREEZER=y
12CONFIG_CGROUP_DEVICE=y
13# CONFIG_CPUSETS is not set
14# CONFIG_CGROUP_CPUACCT is not set
15# CONFIG_RESOURCE_COUNTERS is not set
16
17CONFIG_4KSTACKS=y
18CONFIG_ACER_WMI=y
19CONFIG_ARCH_WANT_FRAME_POINTERS=y
20# CONFIG_ATH5K_DEBUG is not set
21CONFIG_ATH5K=y
22CONFIG_ATL1E=y
23# CONFIG_BNX2X is not set
24CONFIG_CHELSIO_T3_DEPENDS=y
25CONFIG_COMPAT_NET_DEV_OPS=y
26CONFIG_CRYPTO_AEAD2=y
27CONFIG_CRYPTO_AEAD=m
28CONFIG_CRYPTO_ALGAPI2=y
29CONFIG_CRYPTO_BLKCIPHER2=y
30CONFIG_CRYPTO_HASH2=y
31CONFIG_CRYPTO_MANAGER2=y
32CONFIG_CRYPTO_RNG2=y
33CONFIG_CRYPTO_RNG=m
34# CONFIG_DEBUG_NOTIFIERS is not set
35# CONFIG_DEBUG_SPINLOCK is not set
36CONFIG_EEEPC_LAPTOP=y
37# CONFIG_EEPROM_AT25 is not set
38# CONFIG_ENC28J60 is not set
39# CONFIG_FB_BACKLIGHT is not set
40# CONFIG_FB_BOOT_VESA_SUPPORT is not set
41CONFIG_FB_CFB_COPYAREA=y
42CONFIG_FB_CFB_FILLRECT=y
43CONFIG_FB_CFB_IMAGEBLIT=y
44# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
45# CONFIG_FB_DDC is not set
46# CONFIG_FB_MACMODES is not set
47CONFIG_FB_MODE_HELPERS=y
48# CONFIG_FB_SVGALIB is not set
49# CONFIG_FB_SYS_COPYAREA is not set
50# CONFIG_FB_SYS_FILLRECT is not set
51# CONFIG_FB_SYS_FOPS is not set
52# CONFIG_FB_SYS_IMAGEBLIT is not set
53# CONFIG_FB_TMIO is not set
54CONFIG_GENERIC_FIND_LAST_BIT=y
55CONFIG_GENERIC_GPIO=y
56CONFIG_GPIOLIB=y
57# CONFIG_GPIO_MAX7301 is not set
58# CONFIG_GPIO_MCP23S08 is not set
59CONFIG_GPIO_SYSFS=y
60CONFIG_GPIO_TIMBERDALE=y
61CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
62CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
63CONFIG_HAVE_MMIOTRACE_SUPPORT=y
64CONFIG_HID_NTRIG=y
65CONFIG_HID_TOPSEED=y
66CONFIG_I2C_ALGOBIT=y
67CONFIG_I2C_CHARDEV=m
68CONFIG_I2C_OCORES=m
69# CONFIG_IOMMU_API is not set
70# CONFIG_KS8842 is not set
71CONFIG_LIBIPW=m
72CONFIG_MAC80211_RC_DEFAULT="minstrel"
73CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
74# CONFIG_MAC80211_RC_DEFAULT_PID is not set
75CONFIG_MAC80211_RC_MINSTREL=y
76CONFIG_MFD_CORE=y
77CONFIG_MFD_TIMBERDALE_DMA=m
78CONFIG_MFD_TIMBERDALE_I2S=m
79CONFIG_MFD_TIMBERDALE=y
80CONFIG_MMC_SDHCI_PLTFM=m
81# CONFIG_MOUSE_PS2_TOUCHKIT is not set
82# CONFIG_PREEMPT is not set
83# CONFIG_PREEMPT_RCU is not set
84# CONFIG_PREEMPT_RCU_TRACE is not set
85CONFIG_PREEMPT_VOLUNTARY=y
86CONFIG_R8169=y
87# CONFIG_RT2860 is not set
88# CONFIG_RT2870 is not set
89# CONFIG_RTC_DRV_DS1305 is not set
90# CONFIG_RTC_DRV_DS1390 is not set
91# CONFIG_RTC_DRV_DS3234 is not set
92# CONFIG_RTC_DRV_M41T94 is not set
93# CONFIG_RTC_DRV_MAX6902 is not set
94# CONFIG_RTC_DRV_R9701 is not set
95# CONFIG_RTC_DRV_RS5C348 is not set
96CONFIG_SCSI_FC_ATTRS=m
97CONFIG_SCSI_NETLINK=y
98CONFIG_SCSI_SAS_ATTRS=m
99CONFIG_SCSI_SPI_ATTRS=m
100# CONFIG_SENSORS_ADCXX is not set
101# CONFIG_SENSORS_LM70 is not set
102# CONFIG_SENSORS_MAX1111 is not set
103CONFIG_SERIAL_TIMBERDALE=m
104CONFIG_SND_HDA_ELD=y
105CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
106CONFIG_SND_JACK=y
107CONFIG_SND_SPI=y
108CONFIG_SPI_BITBANG=m
109# CONFIG_SPI_DEBUG is not set
110# CONFIG_SPI_GPIO is not set
111CONFIG_SPI_MASTER=y
112# CONFIG_SPI_SPIDEV is not set
113# CONFIG_SPI_TLE62X0 is not set
114CONFIG_SPI_XILINX=m
115CONFIG_SPI_XILINX_PLTFM=m
116CONFIG_SPI=y
117# CONFIG_TOUCHSCREEN_ADS7846 is not set
118CONFIG_TOUCHSCREEN_TSC2003=m
119CONFIG_TOUCHSCREEN_TSC2007=m
120CONFIG_TRACEPOINTS=y
121# CONFIG_TREE_RCU is not set
122# CONFIG_TREE_RCU_TRACE is not set
123CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
124CONFIG_USER_STACKTRACE_SUPPORT=y
125CONFIG_VGASTATE=m
126CONFIG_VIDEO_TIMBERDALE=m
127CONFIG_WIMAX_I2400M=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow
deleted file mode 100644
index 3f66175e16..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow
+++ /dev/null
@@ -1,8 +0,0 @@
1CONFIG_LOCALVERSION="-menlow"
2
3CONFIG_INTEL_MENLOW=y
4CONFIG_DRM_PSB=y
5
6# LIBERTAS works with Menlow sd8686
7CONFIG_LIBERTAS=m
8CONFIG_LIBERTAS_SDIO=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst
deleted file mode 100644
index 8b067c47c8..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst
+++ /dev/null
@@ -1,2316 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc8
4# Wed Mar 25 08:57:27 2009
5#
6
7#
8#MRST DRIVERS
9#
10
11# Option GTM501L SPI 3g driver
12CONFIG_SPI_MRST_GTM501=y
13
14# Marvell 8688 WiFi and BT
15CONFIG_8688_RC=y
16
17# Ericsson MBM 3G Driver
18CONFIG_USB_NET_MBM=y
19
20# MRST Poulsbo gfx driver
21CONFIG_DRM_PSB=y
22
23# MRST NAND DRIVER
24CONFIG_MRST_NAND=y
25CONFIG_MRST_NAND_POLL=y
26# CONFIG_MRST_NAND_CDMA is not set
27# CONFIG_MRST_NAND_ESL is not set
28# CONFIG_MRST_NAND_EMU is not set
29
30# MRST SFI C and P states
31CONFIG_SFI=y
32CONFIG_SFI_CPUIDLE=y
33CONFIG_SFI_PM=y
34CONFIG_SFI_PROCESSOR_PM=y
35CONFIG_X86_SFI_CPUFREQ=y
36
37# MRST MMC
38CONFIG_MRST_MMC_WR=y
39CONFIG_MMC_CEATA_WR=n
40
41# MRST THERMAL
42CONFIG_SENSORS_MRST_THERMAL=y
43
44# MRST SPI2
45CONFIG_SPI2_MRST=y
46
47# MRST I2C
48CONFIG_I2C_MRST=y
49
50# MRST KEYPAD
51CONFIG_KEYBOARD_MRST=y
52
53# MRST RESISTIVE TOUCHSCREEN
54CONFIG_TOUCHSCREEN_INTEL_MRST=y
55
56# USB OTC ClIENT
57CONFIG_USB_GADGET_LANGWELL=y
58CONFIG_USB_LANGWELL=m
59
60# MRST CAMERA
61CONFIG_VIDEO_V4L2=y
62CONFIG_VIDEO_MRSTCI=y
63CONFIG_I2C=y
64CONFIG_VIDEO_MRST_ISP=y
65CONFIG_VIDEO_MRST_SENSOR=y
66CONFIG_VIDEO_MRST_OV2650=y
67CONFIG_VIDEO_MRST_OV5630=y
68
69# MRST AUDIO
70CONFIG_SND_INTEL_LPE=y
71CONFIG_LPE_OSPM_SUPPORT=y
72CONFIG_LPE_DBG_PRINT=y
73# CONFIG_LPE_IPC_NOT_INCLUDED is not set
74CONFIG_SND_INTELMID=y
75CONFIG_MID_DBG_PRINT=y
76
77# MRST OSPM
78CONFIG_MSTWN_POWER_MGMT=y
79
80# CONFIG_64BIT is not set
81CONFIG_X86_32=y
82# CONFIG_X86_64 is not set
83CONFIG_X86=y
84CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
85CONFIG_GENERIC_TIME=y
86CONFIG_GENERIC_CMOS_UPDATE=y
87CONFIG_CLOCKSOURCE_WATCHDOG=y
88CONFIG_GENERIC_CLOCKEVENTS=y
89CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
90CONFIG_LOCKDEP_SUPPORT=y
91CONFIG_STACKTRACE_SUPPORT=y
92CONFIG_HAVE_LATENCYTOP_SUPPORT=y
93CONFIG_FAST_CMPXCHG_LOCAL=y
94CONFIG_MMU=y
95CONFIG_ZONE_DMA=y
96CONFIG_GENERIC_ISA_DMA=y
97CONFIG_GENERIC_IOMAP=y
98CONFIG_GENERIC_BUG=y
99CONFIG_GENERIC_HWEIGHT=y
100CONFIG_GENERIC_GPIO=y
101CONFIG_ARCH_MAY_HAVE_PC_FDC=y
102# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
103CONFIG_RWSEM_XCHGADD_ALGORITHM=y
104CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
105CONFIG_GENERIC_CALIBRATE_DELAY=y
106# CONFIG_GENERIC_TIME_VSYSCALL is not set
107CONFIG_ARCH_HAS_CPU_RELAX=y
108CONFIG_ARCH_HAS_DEFAULT_IDLE=y
109CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
110CONFIG_HAVE_SETUP_PER_CPU_AREA=y
111# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
112CONFIG_ARCH_HIBERNATION_POSSIBLE=y
113CONFIG_ARCH_SUSPEND_POSSIBLE=y
114# CONFIG_ZONE_DMA32 is not set
115CONFIG_ARCH_POPULATES_NODE_MAP=y
116# CONFIG_AUDIT_ARCH is not set
117CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
118CONFIG_GENERIC_HARDIRQS=y
119CONFIG_GENERIC_IRQ_PROBE=y
120CONFIG_GENERIC_PENDING_IRQ=y
121CONFIG_X86_SMP=y
122CONFIG_USE_GENERIC_SMP_HELPERS=y
123CONFIG_X86_32_SMP=y
124CONFIG_X86_HT=y
125CONFIG_X86_BIOS_REBOOT=y
126CONFIG_X86_TRAMPOLINE=y
127CONFIG_KTIME_SCALAR=y
128CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
129
130#
131# General setup
132#
133CONFIG_EXPERIMENTAL=y
134CONFIG_LOCK_KERNEL=y
135CONFIG_INIT_ENV_ARG_LIMIT=32
136CONFIG_LOCALVERSION="-mrst"
137# CONFIG_LOCALVERSION_AUTO is not set
138CONFIG_SWAP=y
139CONFIG_SYSVIPC=y
140CONFIG_SYSVIPC_SYSCTL=y
141CONFIG_POSIX_MQUEUE=y
142CONFIG_BSD_PROCESS_ACCT=y
143# CONFIG_BSD_PROCESS_ACCT_V3 is not set
144CONFIG_TASKSTATS=y
145CONFIG_TASK_DELAY_ACCT=y
146CONFIG_TASK_XACCT=y
147CONFIG_TASK_IO_ACCOUNTING=y
148CONFIG_AUDIT=y
149CONFIG_AUDITSYSCALL=y
150CONFIG_AUDIT_TREE=y
151
152#
153# RCU Subsystem
154#
155CONFIG_CLASSIC_RCU=y
156# CONFIG_TREE_RCU is not set
157# CONFIG_PREEMPT_RCU is not set
158# CONFIG_TREE_RCU_TRACE is not set
159# CONFIG_PREEMPT_RCU_TRACE is not set
160# CONFIG_IKCONFIG is not set
161CONFIG_LOG_BUF_SHIFT=18
162CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
163CONFIG_GROUP_SCHED=y
164CONFIG_FAIR_GROUP_SCHED=y
165# CONFIG_RT_GROUP_SCHED is not set
166# CONFIG_USER_SCHED is not set
167CONFIG_CGROUP_SCHED=y
168CONFIG_CGROUPS=y
169# CONFIG_CGROUP_DEBUG is not set
170CONFIG_CGROUP_NS=y
171# CONFIG_CGROUP_FREEZER is not set
172# CONFIG_CGROUP_DEVICE is not set
173CONFIG_CPUSETS=y
174CONFIG_PROC_PID_CPUSET=y
175CONFIG_CGROUP_CPUACCT=y
176CONFIG_RESOURCE_COUNTERS=y
177# CONFIG_CGROUP_MEM_RES_CTLR is not set
178# CONFIG_SYSFS_DEPRECATED_V2 is not set
179CONFIG_RELAY=y
180# CONFIG_NAMESPACES is not set
181CONFIG_BLK_DEV_INITRD=y
182CONFIG_INITRAMFS_SOURCE=""
183CONFIG_CC_OPTIMIZE_FOR_SIZE=y
184CONFIG_SYSCTL=y
185CONFIG_ANON_INODES=y
186CONFIG_EMBEDDED=y
187CONFIG_UID16=y
188CONFIG_SYSCTL_SYSCALL=y
189CONFIG_KALLSYMS=y
190CONFIG_KALLSYMS_ALL=y
191CONFIG_KALLSYMS_EXTRA_PASS=y
192CONFIG_HOTPLUG=y
193CONFIG_PRINTK=y
194CONFIG_BUG=y
195CONFIG_ELF_CORE=y
196CONFIG_PCSPKR_PLATFORM=y
197CONFIG_BASE_FULL=y
198CONFIG_FUTEX=y
199CONFIG_EPOLL=y
200CONFIG_SIGNALFD=y
201CONFIG_TIMERFD=y
202CONFIG_EVENTFD=y
203CONFIG_SHMEM=y
204CONFIG_AIO=y
205CONFIG_VM_EVENT_COUNTERS=y
206CONFIG_PCI_QUIRKS=y
207CONFIG_SLUB_DEBUG=y
208# CONFIG_COMPAT_BRK is not set
209# CONFIG_SLAB is not set
210CONFIG_SLUB=y
211# CONFIG_SLOB is not set
212CONFIG_PROFILING=y
213CONFIG_TRACEPOINTS=y
214CONFIG_MARKERS=y
215# CONFIG_OPROFILE is not set
216CONFIG_HAVE_OPROFILE=y
217CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
218CONFIG_HAVE_IOREMAP_PROT=y
219CONFIG_HAVE_KPROBES=y
220CONFIG_HAVE_KRETPROBES=y
221CONFIG_HAVE_ARCH_TRACEHOOK=y
222CONFIG_HAVE_GENERIC_DMA_COHERENT=y
223CONFIG_SLABINFO=y
224CONFIG_RT_MUTEXES=y
225CONFIG_BASE_SMALL=0
226CONFIG_MODULES=y
227CONFIG_STOP_MACHINE=y
228CONFIG_BLOCK=y
229# CONFIG_LBD is not set
230CONFIG_BLK_DEV_IO_TRACE=y
231# CONFIG_BLK_DEV_BSG is not set
232# CONFIG_BLK_DEV_INTEGRITY is not set
233
234#
235# IO Schedulers
236#
237CONFIG_IOSCHED_NOOP=y
238CONFIG_IOSCHED_AS=y
239CONFIG_IOSCHED_DEADLINE=y
240CONFIG_IOSCHED_CFQ=y
241# CONFIG_DEFAULT_AS is not set
242# CONFIG_DEFAULT_DEADLINE is not set
243CONFIG_DEFAULT_CFQ=y
244# CONFIG_DEFAULT_NOOP is not set
245CONFIG_DEFAULT_IOSCHED="cfq"
246# CONFIG_FREEZER is not set
247
248#
249# Processor type and features
250#
251CONFIG_TICK_ONESHOT=y
252CONFIG_NO_HZ=y
253CONFIG_HIGH_RES_TIMERS=y
254CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
255CONFIG_SMP=y
256# CONFIG_SPARSE_IRQ is not set
257CONFIG_X86_FIND_SMP_CONFIG=y
258CONFIG_X86_MPPARSE=y
259CONFIG_X86_PC=y
260# CONFIG_X86_ELAN is not set
261# CONFIG_X86_VOYAGER is not set
262# CONFIG_X86_GENERICARCH is not set
263# CONFIG_X86_VSMP is not set
264# CONFIG_X86_RDC321X is not set
265CONFIG_SCHED_OMIT_FRAME_POINTER=y
266# CONFIG_PARAVIRT_GUEST is not set
267# CONFIG_MEMTEST is not set
268# CONFIG_M386 is not set
269# CONFIG_M486 is not set
270CONFIG_M586=y
271# CONFIG_M586TSC is not set
272# CONFIG_M586MMX is not set
273# CONFIG_M686 is not set
274# CONFIG_MPENTIUMII is not set
275# CONFIG_MPENTIUMIII is not set
276# CONFIG_MPENTIUMM is not set
277# CONFIG_MPENTIUM4 is not set
278# CONFIG_MK6 is not set
279# CONFIG_MK7 is not set
280# CONFIG_MK8 is not set
281# CONFIG_MCRUSOE is not set
282# CONFIG_MEFFICEON is not set
283# CONFIG_MWINCHIPC6 is not set
284# CONFIG_MWINCHIP3D is not set
285# CONFIG_MGEODEGX1 is not set
286# CONFIG_MGEODE_LX is not set
287# CONFIG_MCYRIXIII is not set
288# CONFIG_MVIAC3_2 is not set
289# CONFIG_MVIAC7 is not set
290# CONFIG_MPSC is not set
291# CONFIG_MCORE2 is not set
292# CONFIG_GENERIC_CPU is not set
293CONFIG_X86_GENERIC=y
294CONFIG_X86_CPU=y
295CONFIG_X86_CMPXCHG=y
296CONFIG_X86_L1_CACHE_SHIFT=7
297CONFIG_X86_XADD=y
298# CONFIG_X86_PPRO_FENCE is not set
299CONFIG_X86_F00F_BUG=y
300CONFIG_X86_WP_WORKS_OK=y
301CONFIG_X86_INVLPG=y
302CONFIG_X86_BSWAP=y
303CONFIG_X86_POPAD_OK=y
304CONFIG_X86_ALIGNMENT_16=y
305CONFIG_X86_INTEL_USERCOPY=y
306CONFIG_X86_MINIMUM_CPU_FAMILY=4
307# CONFIG_PROCESSOR_SELECT is not set
308CONFIG_CPU_SUP_INTEL=y
309CONFIG_CPU_SUP_CYRIX_32=y
310CONFIG_CPU_SUP_AMD=y
311CONFIG_CPU_SUP_CENTAUR_32=y
312CONFIG_CPU_SUP_TRANSMETA_32=y
313CONFIG_CPU_SUP_UMC_32=y
314# CONFIG_HPET_TIMER is not set
315CONFIG_APB_TIMER=y
316CONFIG_LNW_IPC=y
317# CONFIG_DMI is not set
318# CONFIG_IOMMU_HELPER is not set
319# CONFIG_IOMMU_API is not set
320CONFIG_NR_CPUS=64
321CONFIG_SCHED_SMT=y
322# CONFIG_SCHED_MC is not set
323# CONFIG_PREEMPT_NONE is not set
324CONFIG_PREEMPT_VOLUNTARY=y
325# CONFIG_PREEMPT is not set
326CONFIG_X86_LOCAL_APIC=y
327CONFIG_X86_IO_APIC=y
328# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
329# CONFIG_X86_MCE is not set
330# CONFIG_VM86 is not set
331# CONFIG_TOSHIBA is not set
332# CONFIG_I8K is not set
333CONFIG_X86_REBOOTFIXUPS=y
334CONFIG_MICROCODE=y
335CONFIG_MICROCODE_INTEL=y
336# CONFIG_MICROCODE_AMD is not set
337CONFIG_MICROCODE_OLD_INTERFACE=y
338CONFIG_X86_MSR=y
339CONFIG_X86_CPUID=y
340CONFIG_NOHIGHMEM=y
341# CONFIG_HIGHMEM4G is not set
342# CONFIG_HIGHMEM64G is not set
343CONFIG_VMSPLIT_3G=y
344# CONFIG_VMSPLIT_3G_OPT is not set
345# CONFIG_VMSPLIT_2G is not set
346# CONFIG_VMSPLIT_2G_OPT is not set
347# CONFIG_VMSPLIT_1G is not set
348CONFIG_PAGE_OFFSET=0xC0000000
349# CONFIG_X86_PAE is not set
350# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
351CONFIG_ARCH_FLATMEM_ENABLE=y
352CONFIG_ARCH_SPARSEMEM_ENABLE=y
353CONFIG_ARCH_SELECT_MEMORY_MODEL=y
354CONFIG_SELECT_MEMORY_MODEL=y
355CONFIG_FLATMEM_MANUAL=y
356# CONFIG_DISCONTIGMEM_MANUAL is not set
357# CONFIG_SPARSEMEM_MANUAL is not set
358CONFIG_FLATMEM=y
359CONFIG_FLAT_NODE_MEM_MAP=y
360CONFIG_SPARSEMEM_STATIC=y
361CONFIG_PAGEFLAGS_EXTENDED=y
362CONFIG_SPLIT_PTLOCK_CPUS=4
363# CONFIG_PHYS_ADDR_T_64BIT is not set
364CONFIG_ZONE_DMA_FLAG=1
365CONFIG_BOUNCE=y
366CONFIG_VIRT_TO_BUS=y
367CONFIG_UNEVICTABLE_LRU=y
368# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
369# CONFIG_X86_RESERVE_LOW_64K is not set
370# CONFIG_MATH_EMULATION is not set
371CONFIG_MTRR=y
372# CONFIG_MTRR_SANITIZER is not set
373# CONFIG_X86_PAT is not set
374# CONFIG_SECCOMP is not set
375CONFIG_HZ_100=y
376# CONFIG_HZ_250 is not set
377# CONFIG_HZ_300 is not set
378# CONFIG_HZ_1000 is not set
379CONFIG_HZ=100
380CONFIG_SCHED_HRTICK=y
381CONFIG_KEXEC=y
382CONFIG_PHYSICAL_START=0x100000
383CONFIG_RELOCATABLE=y
384CONFIG_PHYSICAL_ALIGN=0x100000
385CONFIG_HOTPLUG_CPU=y
386# CONFIG_COMPAT_VDSO is not set
387# CONFIG_CMDLINE_BOOL is not set
388CONFIG_MRST=y
389CONFIG_MRST_SPI_UART_BOOT_MSG=y
390
391#
392# Power management and ACPI options
393#
394CONFIG_PM=y
395CONFIG_PM_DEBUG=y
396CONFIG_PM_VERBOSE=y
397# CONFIG_SUSPEND is not set
398# CONFIG_HIBERNATION is not set
399# CONFIG_ACPI is not set
400CONFIG_SFI=y
401# CONFIG_SFI_DEBUG is not set
402
403#
404# CPU Frequency scaling
405#
406CONFIG_CPU_FREQ=y
407CONFIG_CPU_IDLE=n
408
409#
410# Bus options (PCI etc.)
411#
412CONFIG_PCI=y
413# CONFIG_PCI_GOBIOS is not set
414# CONFIG_PCI_GOMMCONFIG is not set
415# CONFIG_PCI_GODIRECT is not set
416# CONFIG_PCI_GOOLPC is not set
417CONFIG_PCI_GOANY=y
418CONFIG_PCI_BIOS=y
419CONFIG_PCI_DIRECT=y
420CONFIG_PCI_MMCONFIG=y
421CONFIG_PCI_DOMAINS=y
422CONFIG_PCIEPORTBUS=y
423# CONFIG_PCIEAER is not set
424# CONFIG_PCIEASPM is not set
425CONFIG_ARCH_SUPPORTS_MSI=y
426CONFIG_PCI_MSI=y
427# CONFIG_PCI_LEGACY is not set
428CONFIG_PCI_DEBUG=y
429# CONFIG_PCI_STUB is not set
430# CONFIG_HT_IRQ is not set
431CONFIG_ISA_DMA_API=y
432# CONFIG_ISA is not set
433# CONFIG_MCA is not set
434# CONFIG_SCx200 is not set
435# CONFIG_OLPC is not set
436CONFIG_K8_NB=y
437# CONFIG_PCCARD is not set
438# CONFIG_HOTPLUG_PCI is not set
439
440#
441# Executable file formats / Emulations
442#
443CONFIG_BINFMT_ELF=y
444# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
445CONFIG_HAVE_AOUT=y
446# CONFIG_BINFMT_AOUT is not set
447CONFIG_BINFMT_MISC=y
448CONFIG_HAVE_ATOMIC_IOMAP=y
449CONFIG_NET=y
450
451#
452# Networking options
453#
454CONFIG_COMPAT_NET_DEV_OPS=y
455CONFIG_PACKET=y
456CONFIG_PACKET_MMAP=y
457CONFIG_UNIX=y
458CONFIG_XFRM=y
459CONFIG_XFRM_USER=y
460# CONFIG_XFRM_SUB_POLICY is not set
461# CONFIG_XFRM_MIGRATE is not set
462# CONFIG_XFRM_STATISTICS is not set
463# CONFIG_NET_KEY is not set
464CONFIG_INET=y
465CONFIG_IP_MULTICAST=y
466CONFIG_IP_ADVANCED_ROUTER=y
467CONFIG_ASK_IP_FIB_HASH=y
468# CONFIG_IP_FIB_TRIE is not set
469CONFIG_IP_FIB_HASH=y
470CONFIG_IP_MULTIPLE_TABLES=y
471CONFIG_IP_ROUTE_MULTIPATH=y
472CONFIG_IP_ROUTE_VERBOSE=y
473CONFIG_IP_PNP=y
474CONFIG_IP_PNP_DHCP=y
475CONFIG_IP_PNP_BOOTP=y
476CONFIG_IP_PNP_RARP=y
477# CONFIG_NET_IPIP is not set
478# CONFIG_NET_IPGRE is not set
479CONFIG_IP_MROUTE=y
480CONFIG_IP_PIMSM_V1=y
481CONFIG_IP_PIMSM_V2=y
482# CONFIG_ARPD is not set
483CONFIG_SYN_COOKIES=y
484# CONFIG_INET_AH is not set
485# CONFIG_INET_ESP is not set
486# CONFIG_INET_IPCOMP is not set
487# CONFIG_INET_XFRM_TUNNEL is not set
488CONFIG_INET_TUNNEL=y
489# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
490# CONFIG_INET_XFRM_MODE_TUNNEL is not set
491# CONFIG_INET_XFRM_MODE_BEET is not set
492CONFIG_INET_LRO=y
493# CONFIG_INET_DIAG is not set
494CONFIG_TCP_CONG_ADVANCED=y
495# CONFIG_TCP_CONG_BIC is not set
496CONFIG_TCP_CONG_CUBIC=y
497# CONFIG_TCP_CONG_WESTWOOD is not set
498# CONFIG_TCP_CONG_HTCP is not set
499# CONFIG_TCP_CONG_HSTCP is not set
500# CONFIG_TCP_CONG_HYBLA is not set
501# CONFIG_TCP_CONG_VEGAS is not set
502# CONFIG_TCP_CONG_SCALABLE is not set
503# CONFIG_TCP_CONG_LP is not set
504# CONFIG_TCP_CONG_VENO is not set
505# CONFIG_TCP_CONG_YEAH is not set
506# CONFIG_TCP_CONG_ILLINOIS is not set
507# CONFIG_DEFAULT_BIC is not set
508CONFIG_DEFAULT_CUBIC=y
509# CONFIG_DEFAULT_HTCP is not set
510# CONFIG_DEFAULT_VEGAS is not set
511# CONFIG_DEFAULT_WESTWOOD is not set
512# CONFIG_DEFAULT_RENO is not set
513CONFIG_DEFAULT_TCP_CONG="cubic"
514CONFIG_TCP_MD5SIG=y
515CONFIG_IPV6=y
516# CONFIG_IPV6_PRIVACY is not set
517# CONFIG_IPV6_ROUTER_PREF is not set
518# CONFIG_IPV6_OPTIMISTIC_DAD is not set
519CONFIG_INET6_AH=y
520CONFIG_INET6_ESP=y
521# CONFIG_INET6_IPCOMP is not set
522# CONFIG_IPV6_MIP6 is not set
523# CONFIG_INET6_XFRM_TUNNEL is not set
524# CONFIG_INET6_TUNNEL is not set
525CONFIG_INET6_XFRM_MODE_TRANSPORT=y
526CONFIG_INET6_XFRM_MODE_TUNNEL=y
527CONFIG_INET6_XFRM_MODE_BEET=y
528# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
529CONFIG_IPV6_SIT=y
530CONFIG_IPV6_NDISC_NODETYPE=y
531# CONFIG_IPV6_TUNNEL is not set
532# CONFIG_IPV6_MULTIPLE_TABLES is not set
533# CONFIG_IPV6_MROUTE is not set
534CONFIG_NETLABEL=y
535CONFIG_NETWORK_SECMARK=y
536CONFIG_NETFILTER=y
537# CONFIG_NETFILTER_DEBUG is not set
538# CONFIG_NETFILTER_ADVANCED is not set
539
540#
541# Core Netfilter Configuration
542#
543CONFIG_NETFILTER_NETLINK=y
544CONFIG_NETFILTER_NETLINK_LOG=y
545CONFIG_NF_CONNTRACK=y
546CONFIG_NF_CONNTRACK_SECMARK=y
547CONFIG_NF_CONNTRACK_FTP=y
548CONFIG_NF_CONNTRACK_IRC=y
549CONFIG_NF_CONNTRACK_SIP=y
550CONFIG_NF_CT_NETLINK=y
551CONFIG_NETFILTER_XTABLES=y
552CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
553CONFIG_NETFILTER_XT_TARGET_MARK=y
554CONFIG_NETFILTER_XT_TARGET_NFLOG=y
555CONFIG_NETFILTER_XT_TARGET_SECMARK=y
556CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
557CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
558CONFIG_NETFILTER_XT_MATCH_MARK=y
559CONFIG_NETFILTER_XT_MATCH_POLICY=y
560CONFIG_NETFILTER_XT_MATCH_STATE=y
561# CONFIG_IP_VS is not set
562
563#
564# IP: Netfilter Configuration
565#
566CONFIG_NF_DEFRAG_IPV4=y
567CONFIG_NF_CONNTRACK_IPV4=y
568CONFIG_NF_CONNTRACK_PROC_COMPAT=y
569CONFIG_IP_NF_IPTABLES=y
570CONFIG_IP_NF_FILTER=y
571CONFIG_IP_NF_TARGET_REJECT=y
572CONFIG_IP_NF_TARGET_LOG=y
573CONFIG_IP_NF_TARGET_ULOG=y
574CONFIG_NF_NAT=y
575CONFIG_NF_NAT_NEEDED=y
576CONFIG_IP_NF_TARGET_MASQUERADE=y
577CONFIG_NF_NAT_FTP=y
578CONFIG_NF_NAT_IRC=y
579# CONFIG_NF_NAT_TFTP is not set
580# CONFIG_NF_NAT_AMANDA is not set
581# CONFIG_NF_NAT_PPTP is not set
582# CONFIG_NF_NAT_H323 is not set
583CONFIG_NF_NAT_SIP=y
584CONFIG_IP_NF_MANGLE=y
585
586#
587# IPv6: Netfilter Configuration
588#
589CONFIG_NF_CONNTRACK_IPV6=y
590CONFIG_IP6_NF_IPTABLES=y
591CONFIG_IP6_NF_MATCH_IPV6HEADER=y
592CONFIG_IP6_NF_TARGET_LOG=y
593CONFIG_IP6_NF_FILTER=y
594CONFIG_IP6_NF_TARGET_REJECT=y
595CONFIG_IP6_NF_MANGLE=y
596# CONFIG_IP_DCCP is not set
597# CONFIG_IP_SCTP is not set
598# CONFIG_TIPC is not set
599# CONFIG_ATM is not set
600# CONFIG_BRIDGE is not set
601# CONFIG_NET_DSA is not set
602# CONFIG_VLAN_8021Q is not set
603# CONFIG_DECNET is not set
604# CONFIG_LLC2 is not set
605# CONFIG_IPX is not set
606# CONFIG_ATALK is not set
607# CONFIG_X25 is not set
608# CONFIG_LAPB is not set
609# CONFIG_ECONET is not set
610# CONFIG_WAN_ROUTER is not set
611CONFIG_NET_SCHED=y
612
613#
614# Queueing/Scheduling
615#
616# CONFIG_NET_SCH_CBQ is not set
617# CONFIG_NET_SCH_HTB is not set
618# CONFIG_NET_SCH_HFSC is not set
619# CONFIG_NET_SCH_PRIO is not set
620# CONFIG_NET_SCH_MULTIQ is not set
621# CONFIG_NET_SCH_RED is not set
622# CONFIG_NET_SCH_SFQ is not set
623# CONFIG_NET_SCH_TEQL is not set
624# CONFIG_NET_SCH_TBF is not set
625# CONFIG_NET_SCH_GRED is not set
626# CONFIG_NET_SCH_DSMARK is not set
627# CONFIG_NET_SCH_NETEM is not set
628# CONFIG_NET_SCH_DRR is not set
629# CONFIG_NET_SCH_INGRESS is not set
630
631#
632# Classification
633#
634CONFIG_NET_CLS=y
635# CONFIG_NET_CLS_BASIC is not set
636# CONFIG_NET_CLS_TCINDEX is not set
637# CONFIG_NET_CLS_ROUTE4 is not set
638# CONFIG_NET_CLS_FW is not set
639# CONFIG_NET_CLS_U32 is not set
640# CONFIG_NET_CLS_RSVP is not set
641# CONFIG_NET_CLS_RSVP6 is not set
642# CONFIG_NET_CLS_FLOW is not set
643# CONFIG_NET_CLS_CGROUP is not set
644CONFIG_NET_EMATCH=y
645CONFIG_NET_EMATCH_STACK=32
646# CONFIG_NET_EMATCH_CMP is not set
647# CONFIG_NET_EMATCH_NBYTE is not set
648# CONFIG_NET_EMATCH_U32 is not set
649# CONFIG_NET_EMATCH_META is not set
650# CONFIG_NET_EMATCH_TEXT is not set
651CONFIG_NET_CLS_ACT=y
652# CONFIG_NET_ACT_POLICE is not set
653# CONFIG_NET_ACT_GACT is not set
654# CONFIG_NET_ACT_MIRRED is not set
655# CONFIG_NET_ACT_IPT is not set
656# CONFIG_NET_ACT_NAT is not set
657# CONFIG_NET_ACT_PEDIT is not set
658# CONFIG_NET_ACT_SIMP is not set
659# CONFIG_NET_ACT_SKBEDIT is not set
660CONFIG_NET_SCH_FIFO=y
661# CONFIG_DCB is not set
662
663#
664# Network testing
665#
666# CONFIG_NET_PKTGEN is not set
667CONFIG_HAMRADIO=y
668
669#
670# Packet Radio protocols
671#
672# CONFIG_AX25 is not set
673# CONFIG_CAN is not set
674# CONFIG_IRDA is not set
675CONFIG_BT=y
676CONFIG_BT_L2CAP=y
677CONFIG_BT_SCO=y
678CONFIG_BT_RFCOMM=y
679CONFIG_BT_RFCOMM_TTY=y
680CONFIG_BT_BNEP=y
681CONFIG_BT_BNEP_MC_FILTER=y
682CONFIG_BT_BNEP_PROTO_FILTER=y
683CONFIG_BT_HIDP=y
684
685#
686# Bluetooth device drivers
687#
688CONFIG_BT_HCIBTUSB=y
689CONFIG_BT_HCIBTSDIO=y
690CONFIG_BT_HCIUART=y
691CONFIG_BT_HCIUART_H4=y
692CONFIG_BT_HCIUART_BCSP=y
693CONFIG_BT_HCIUART_LL=y
694CONFIG_BT_HCIBCM203X=y
695CONFIG_BT_HCIBPA10X=y
696CONFIG_BT_HCIBFUSB=y
697CONFIG_BT_HCIVHCI=y
698# CONFIG_AF_RXRPC is not set
699# CONFIG_PHONET is not set
700CONFIG_FIB_RULES=y
701CONFIG_WIRELESS=y
702CONFIG_CFG80211=y
703# CONFIG_CFG80211_REG_DEBUG is not set
704CONFIG_NL80211=y
705CONFIG_WIRELESS_OLD_REGULATORY=y
706CONFIG_WIRELESS_EXT=y
707CONFIG_WIRELESS_EXT_SYSFS=y
708# CONFIG_LIB80211 is not set
709CONFIG_MAC80211=y
710
711#
712# Rate control algorithm selection
713#
714# CONFIG_MAC80211_RC_PID is not set
715CONFIG_MAC80211_RC_MINSTREL=y
716# CONFIG_MAC80211_RC_DEFAULT_PID is not set
717CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
718CONFIG_MAC80211_RC_DEFAULT="minstrel"
719# CONFIG_MAC80211_MESH is not set
720CONFIG_MAC80211_LEDS=y
721# CONFIG_MAC80211_DEBUGFS is not set
722# CONFIG_MAC80211_DEBUG_MENU is not set
723# CONFIG_WIMAX is not set
724# CONFIG_RFKILL is not set
725# CONFIG_NET_9P is not set
726
727#
728# Device Drivers
729#
730
731#
732# Generic Driver Options
733#
734CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
735CONFIG_STANDALONE=y
736CONFIG_PREVENT_FIRMWARE_BUILD=y
737CONFIG_FW_LOADER=y
738CONFIG_FIRMWARE_IN_KERNEL=y
739CONFIG_EXTRA_FIRMWARE="mrvl/sd8688.bin mrvl/helper_sd.bin"
740CONFIG_EXTRA_FIRMWARE_DIR="firmware"
741# CONFIG_DEBUG_DRIVER is not set
742CONFIG_DEBUG_DEVRES=y
743# CONFIG_SYS_HYPERVISOR is not set
744CONFIG_CONNECTOR=y
745CONFIG_PROC_EVENTS=y
746# CONFIG_MTD is not set
747# CONFIG_PARPORT is not set
748CONFIG_BLK_DEV=y
749# CONFIG_BLK_DEV_FD is not set
750# CONFIG_BLK_CPQ_DA is not set
751# CONFIG_BLK_CPQ_CISS_DA is not set
752# CONFIG_BLK_DEV_DAC960 is not set
753# CONFIG_BLK_DEV_UMEM is not set
754# CONFIG_BLK_DEV_COW_COMMON is not set
755CONFIG_BLK_DEV_LOOP=y
756# CONFIG_BLK_DEV_CRYPTOLOOP is not set
757# CONFIG_BLK_DEV_NBD is not set
758# CONFIG_BLK_DEV_SX8 is not set
759# CONFIG_BLK_DEV_UB is not set
760CONFIG_BLK_DEV_RAM=y
761CONFIG_BLK_DEV_RAM_COUNT=16
762CONFIG_BLK_DEV_RAM_SIZE=16384
763# CONFIG_BLK_DEV_XIP is not set
764# CONFIG_CDROM_PKTCDVD is not set
765# CONFIG_ATA_OVER_ETH is not set
766# CONFIG_BLK_DEV_HD is not set
767CONFIG_MISC_DEVICES=y
768# CONFIG_IBM_ASM is not set
769# CONFIG_PHANTOM is not set
770# CONFIG_SGI_IOC4 is not set
771# CONFIG_TIFM_CORE is not set
772# CONFIG_ICS932S401 is not set
773# CONFIG_ENCLOSURE_SERVICES is not set
774# CONFIG_HP_ILO is not set
775# CONFIG_C2PORT is not set
776
777#
778# EEPROM support
779#
780# CONFIG_EEPROM_AT24 is not set
781# CONFIG_EEPROM_AT25 is not set
782# CONFIG_EEPROM_LEGACY is not set
783# CONFIG_EEPROM_93CX6 is not set
784CONFIG_HAVE_IDE=y
785# CONFIG_IDE is not set
786
787#
788# SCSI device support
789#
790# CONFIG_RAID_ATTRS is not set
791CONFIG_SCSI=y
792CONFIG_SCSI_DMA=y
793# CONFIG_SCSI_TGT is not set
794# CONFIG_SCSI_NETLINK is not set
795CONFIG_SCSI_PROC_FS=y
796
797#
798# SCSI support type (disk, tape, CD-ROM)
799#
800CONFIG_BLK_DEV_SD=y
801# CONFIG_CHR_DEV_ST is not set
802# CONFIG_CHR_DEV_OSST is not set
803CONFIG_BLK_DEV_SR=y
804CONFIG_BLK_DEV_SR_VENDOR=y
805CONFIG_CHR_DEV_SG=y
806# CONFIG_CHR_DEV_SCH is not set
807
808#
809# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
810#
811# CONFIG_SCSI_MULTI_LUN is not set
812CONFIG_SCSI_CONSTANTS=y
813# CONFIG_SCSI_LOGGING is not set
814# CONFIG_SCSI_SCAN_ASYNC is not set
815
816#
817# SCSI Transports
818#
819CONFIG_SCSI_SPI_ATTRS=y
820# CONFIG_SCSI_FC_ATTRS is not set
821CONFIG_SCSI_ISCSI_ATTRS=y
822# CONFIG_SCSI_SAS_LIBSAS is not set
823# CONFIG_SCSI_SRP_ATTRS is not set
824# CONFIG_SCSI_LOWLEVEL is not set
825# CONFIG_SCSI_DH is not set
826# CONFIG_ATA is not set
827# CONFIG_MD is not set
828# CONFIG_FUSION is not set
829
830#
831# IEEE 1394 (FireWire) support
832#
833
834#
835# Enable only one of the two stacks, unless you know what you are doing
836#
837# CONFIG_FIREWIRE is not set
838# CONFIG_IEEE1394 is not set
839# CONFIG_I2O is not set
840# CONFIG_MACINTOSH_DRIVERS is not set
841CONFIG_NETDEVICES=y
842# CONFIG_IFB is not set
843# CONFIG_DUMMY is not set
844# CONFIG_BONDING is not set
845# CONFIG_MACVLAN is not set
846# CONFIG_EQUALIZER is not set
847# CONFIG_TUN is not set
848# CONFIG_VETH is not set
849# CONFIG_ARCNET is not set
850# CONFIG_NET_ETHERNET is not set
851CONFIG_MII=y
852# CONFIG_NETDEV_1000 is not set
853# CONFIG_NETDEV_10000 is not set
854# CONFIG_TR is not set
855
856#
857# Wireless LAN
858#
859# CONFIG_WLAN_PRE80211 is not set
860CONFIG_WLAN_80211=y
861# CONFIG_IWLWIFI_LEDS is not set
862
863#
864# Enable WiMAX (Networking options) to see the WiMAX drivers
865#
866
867#
868# USB Network Adapters
869#
870# CONFIG_USB_CATC is not set
871# CONFIG_USB_KAWETH is not set
872# CONFIG_USB_PEGASUS is not set
873# CONFIG_USB_RTL8150 is not set
874CONFIG_USB_USBNET=y
875CONFIG_USB_NET_AX8817X=y
876CONFIG_USB_NET_CDCETHER=y
877# CONFIG_USB_NET_DM9601 is not set
878# CONFIG_USB_NET_SMSC95XX is not set
879# CONFIG_USB_NET_GL620A is not set
880CONFIG_USB_NET_NET1080=y
881# CONFIG_USB_NET_PLUSB is not set
882# CONFIG_USB_NET_MCS7830 is not set
883# CONFIG_USB_NET_RNDIS_HOST is not set
884CONFIG_USB_NET_CDC_SUBSET=y
885# CONFIG_USB_ALI_M5632 is not set
886# CONFIG_USB_AN2720 is not set
887# CONFIG_USB_BELKIN is not set
888# CONFIG_USB_ARMLINUX is not set
889# CONFIG_USB_EPSON2888 is not set
890# CONFIG_USB_KC2190 is not set
891# CONFIG_USB_NET_ZAURUS is not set
892# CONFIG_WAN is not set
893# CONFIG_FDDI is not set
894# CONFIG_HIPPI is not set
895# CONFIG_PPP is not set
896# CONFIG_SLIP is not set
897# CONFIG_NET_FC is not set
898# CONFIG_NETCONSOLE is not set
899# CONFIG_NETPOLL is not set
900# CONFIG_NET_POLL_CONTROLLER is not set
901# CONFIG_ISDN is not set
902# CONFIG_PHONE is not set
903
904#
905# Input device support
906#
907CONFIG_INPUT=y
908CONFIG_INPUT_FF_MEMLESS=y
909CONFIG_INPUT_POLLDEV=y
910
911#
912# Userland interfaces
913#
914CONFIG_INPUT_MOUSEDEV=y
915# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
916CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
917CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
918# CONFIG_INPUT_JOYDEV is not set
919CONFIG_INPUT_EVDEV=y
920# CONFIG_INPUT_EVBUG is not set
921
922#
923# Input Device Drivers
924#
925CONFIG_INPUT_KEYBOARD=y
926CONFIG_KEYBOARD_ATKBD=y
927# CONFIG_KEYBOARD_SUNKBD is not set
928# CONFIG_KEYBOARD_LKKBD is not set
929# CONFIG_KEYBOARD_XTKBD is not set
930# CONFIG_KEYBOARD_NEWTON is not set
931# CONFIG_KEYBOARD_STOWAWAY is not set
932CONFIG_KEYBOARD_GPIO=y
933CONFIG_INPUT_MOUSE=y
934CONFIG_MOUSE_PS2=y
935CONFIG_MOUSE_PS2_ALPS=y
936CONFIG_MOUSE_PS2_LOGIPS2PP=y
937CONFIG_MOUSE_PS2_SYNAPTICS=y
938CONFIG_MOUSE_PS2_LIFEBOOK=y
939CONFIG_MOUSE_PS2_TRACKPOINT=y
940# CONFIG_MOUSE_PS2_ELANTECH is not set
941# CONFIG_MOUSE_PS2_TOUCHKIT is not set
942# CONFIG_MOUSE_SERIAL is not set
943# CONFIG_MOUSE_APPLETOUCH is not set
944# CONFIG_MOUSE_BCM5974 is not set
945# CONFIG_MOUSE_VSXXXAA is not set
946# CONFIG_MOUSE_GPIO is not set
947CONFIG_INPUT_JOYSTICK=y
948# CONFIG_JOYSTICK_ANALOG is not set
949# CONFIG_JOYSTICK_A3D is not set
950# CONFIG_JOYSTICK_ADI is not set
951# CONFIG_JOYSTICK_COBRA is not set
952# CONFIG_JOYSTICK_GF2K is not set
953# CONFIG_JOYSTICK_GRIP is not set
954# CONFIG_JOYSTICK_GRIP_MP is not set
955# CONFIG_JOYSTICK_GUILLEMOT is not set
956# CONFIG_JOYSTICK_INTERACT is not set
957# CONFIG_JOYSTICK_SIDEWINDER is not set
958# CONFIG_JOYSTICK_TMDC is not set
959# CONFIG_JOYSTICK_IFORCE is not set
960# CONFIG_JOYSTICK_WARRIOR is not set
961# CONFIG_JOYSTICK_MAGELLAN is not set
962# CONFIG_JOYSTICK_SPACEORB is not set
963# CONFIG_JOYSTICK_SPACEBALL is not set
964# CONFIG_JOYSTICK_STINGER is not set
965# CONFIG_JOYSTICK_TWIDJOY is not set
966# CONFIG_JOYSTICK_ZHENHUA is not set
967# CONFIG_JOYSTICK_JOYDUMP is not set
968# CONFIG_JOYSTICK_XPAD is not set
969CONFIG_INPUT_TABLET=y
970# CONFIG_TABLET_USB_ACECAD is not set
971# CONFIG_TABLET_USB_AIPTEK is not set
972# CONFIG_TABLET_USB_GTCO is not set
973# CONFIG_TABLET_USB_KBTAB is not set
974# CONFIG_TABLET_USB_WACOM is not set
975CONFIG_INPUT_TOUCHSCREEN=y
976# CONFIG_TOUCHSCREEN_ADS7846 is not set
977# CONFIG_TOUCHSCREEN_FUJITSU is not set
978# CONFIG_TOUCHSCREEN_GUNZE is not set
979# CONFIG_TOUCHSCREEN_ELO is not set
980# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
981# CONFIG_TOUCHSCREEN_MTOUCH is not set
982# CONFIG_TOUCHSCREEN_INEXIO is not set
983# CONFIG_TOUCHSCREEN_MK712 is not set
984# CONFIG_TOUCHSCREEN_PENMOUNT is not set
985# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
986# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
987# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
988# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
989# CONFIG_TOUCHSCREEN_TSC2007 is not set
990CONFIG_INPUT_MISC=y
991# CONFIG_INPUT_PCSPKR is not set
992# CONFIG_INPUT_WISTRON_BTNS is not set
993# CONFIG_INPUT_ATI_REMOTE is not set
994# CONFIG_INPUT_ATI_REMOTE2 is not set
995# CONFIG_INPUT_KEYSPAN_REMOTE is not set
996# CONFIG_INPUT_POWERMATE is not set
997# CONFIG_INPUT_YEALINK is not set
998# CONFIG_INPUT_CM109 is not set
999# CONFIG_INPUT_UINPUT is not set
1000
1001#
1002# Hardware I/O ports
1003#
1004CONFIG_SERIO=y
1005CONFIG_SERIO_SERPORT=y
1006# CONFIG_SERIO_CT82C710 is not set
1007# CONFIG_SERIO_PCIPS2 is not set
1008CONFIG_SERIO_LIBPS2=y
1009# CONFIG_SERIO_RAW is not set
1010# CONFIG_GAMEPORT is not set
1011
1012#
1013# Character devices
1014#
1015CONFIG_VT=y
1016CONFIG_CONSOLE_TRANSLATIONS=y
1017CONFIG_VT_CONSOLE=y
1018CONFIG_HW_CONSOLE=y
1019CONFIG_VT_HW_CONSOLE_BINDING=y
1020CONFIG_DEVKMEM=y
1021CONFIG_SERIAL_NONSTANDARD=y
1022# CONFIG_COMPUTONE is not set
1023# CONFIG_ROCKETPORT is not set
1024# CONFIG_CYCLADES is not set
1025# CONFIG_DIGIEPCA is not set
1026# CONFIG_MOXA_INTELLIO is not set
1027# CONFIG_MOXA_SMARTIO is not set
1028# CONFIG_ISI is not set
1029# CONFIG_SYNCLINK is not set
1030# CONFIG_SYNCLINKMP is not set
1031# CONFIG_SYNCLINK_GT is not set
1032# CONFIG_N_HDLC is not set
1033# CONFIG_RISCOM8 is not set
1034# CONFIG_SPECIALIX is not set
1035# CONFIG_SX is not set
1036# CONFIG_RIO is not set
1037# CONFIG_STALDRV is not set
1038# CONFIG_NOZOMI is not set
1039
1040#
1041# Serial drivers
1042#
1043CONFIG_FIX_EARLYCON_MEM=y
1044
1045#
1046# Non-8250 serial port support
1047#
1048CONFIG_SERIAL_CORE=y
1049CONFIG_SERIAL_CORE_CONSOLE=y
1050# CONFIG_SERIAL_JSM is not set
1051CONFIG_UNIX98_PTYS=y
1052# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1053# CONFIG_LEGACY_PTYS is not set
1054# CONFIG_IPMI_HANDLER is not set
1055CONFIG_HW_RANDOM=y
1056CONFIG_HW_RANDOM_INTEL=y
1057CONFIG_HW_RANDOM_AMD=y
1058CONFIG_HW_RANDOM_GEODE=y
1059CONFIG_HW_RANDOM_VIA=y
1060CONFIG_NVRAM=y
1061# CONFIG_R3964 is not set
1062# CONFIG_APPLICOM is not set
1063# CONFIG_SONYPI is not set
1064# CONFIG_MWAVE is not set
1065# CONFIG_PC8736x_GPIO is not set
1066# CONFIG_NSC_GPIO is not set
1067# CONFIG_CS5535_GPIO is not set
1068# CONFIG_RAW_DRIVER is not set
1069# CONFIG_HANGCHECK_TIMER is not set
1070# CONFIG_TCG_TPM is not set
1071# CONFIG_TELCLOCK is not set
1072CONFIG_DEVPORT=y
1073CONFIG_I2C=y
1074CONFIG_I2C_BOARDINFO=y
1075# CONFIG_I2C_CHARDEV is not set
1076CONFIG_I2C_HELPER_AUTO=y
1077CONFIG_I2C_ALGOBIT=y
1078
1079#
1080# I2C Hardware Bus support
1081#
1082
1083#
1084# PC SMBus host controller drivers
1085#
1086# CONFIG_I2C_ALI1535 is not set
1087# CONFIG_I2C_ALI1563 is not set
1088# CONFIG_I2C_ALI15X3 is not set
1089# CONFIG_I2C_AMD756 is not set
1090# CONFIG_I2C_AMD8111 is not set
1091CONFIG_I2C_I801=y
1092# CONFIG_I2C_ISCH is not set
1093# CONFIG_I2C_PIIX4 is not set
1094# CONFIG_I2C_NFORCE2 is not set
1095# CONFIG_I2C_SIS5595 is not set
1096# CONFIG_I2C_SIS630 is not set
1097# CONFIG_I2C_SIS96X is not set
1098# CONFIG_I2C_VIA is not set
1099# CONFIG_I2C_VIAPRO is not set
1100
1101#
1102# I2C system bus drivers (mostly embedded / system-on-chip)
1103#
1104# CONFIG_I2C_GPIO is not set
1105# CONFIG_I2C_OCORES is not set
1106# CONFIG_I2C_SIMTEC is not set
1107
1108#
1109# External I2C/SMBus adapter drivers
1110#
1111# CONFIG_I2C_PARPORT_LIGHT is not set
1112# CONFIG_I2C_TAOS_EVM is not set
1113# CONFIG_I2C_TINY_USB is not set
1114
1115#
1116# Graphics adapter I2C/DDC channel drivers
1117#
1118# CONFIG_I2C_VOODOO3 is not set
1119
1120#
1121# Other I2C/SMBus bus drivers
1122#
1123# CONFIG_I2C_PCA_PLATFORM is not set
1124# CONFIG_SCx200_ACB is not set
1125
1126#
1127# Miscellaneous I2C Chip support
1128#
1129# CONFIG_DS1682 is not set
1130# CONFIG_SENSORS_PCF8574 is not set
1131# CONFIG_PCF8575 is not set
1132# CONFIG_SENSORS_PCA9539 is not set
1133# CONFIG_SENSORS_PCF8591 is not set
1134# CONFIG_SENSORS_MAX6875 is not set
1135# CONFIG_SENSORS_TSL2550 is not set
1136# CONFIG_I2C_DEBUG_CORE is not set
1137# CONFIG_I2C_DEBUG_ALGO is not set
1138# CONFIG_I2C_DEBUG_BUS is not set
1139# CONFIG_I2C_DEBUG_CHIP is not set
1140CONFIG_SPI=y
1141# CONFIG_SPI_DEBUG is not set
1142CONFIG_SPI_MASTER=y
1143
1144#
1145# SPI Master Controller Drivers
1146#
1147CONFIG_SPI_BITBANG=y
1148# CONFIG_SPI_GPIO is not set
1149CONFIG_SPI_MRST=y
1150CONFIG_SPI_MRST_DMA=y
1151
1152#
1153# SPI Protocol Masters
1154#
1155CONFIG_SPI_MRST_MAX3110=y
1156# CONFIG_MRST_MAX3110_IRQ is not set
1157# CONFIG_SPI_SPIDEV is not set
1158# CONFIG_SPI_TLE62X0 is not set
1159CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1160CONFIG_GPIOLIB=y
1161# CONFIG_DEBUG_GPIO is not set
1162CONFIG_GPIO_SYSFS=y
1163CONFIG_GPE=y
1164CONFIG_GPIO_LANGWELL=y
1165CONFIG_GPIO_LNWPMIC=y
1166# CONFIG_GPIO_LNWPMIC_NEC_WORKAROUND is not set
1167CONFIG_MRST_PMIC_BUTTON=y
1168
1169#
1170# Memory mapped GPIO expanders:
1171#
1172
1173#
1174# I2C GPIO expanders:
1175#
1176# CONFIG_GPIO_MAX732X is not set
1177# CONFIG_GPIO_PCA953X is not set
1178# CONFIG_GPIO_PCF857X is not set
1179
1180#
1181# PCI GPIO expanders:
1182#
1183# CONFIG_GPIO_BT8XX is not set
1184
1185#
1186# SPI GPIO expanders:
1187#
1188# CONFIG_GPIO_MAX7301 is not set
1189# CONFIG_GPIO_MCP23S08 is not set
1190# CONFIG_W1 is not set
1191# CONFIG_POWER_SUPPLY is not set
1192CONFIG_HWMON=y
1193# CONFIG_HWMON_VID is not set
1194# CONFIG_SENSORS_ABITUGURU is not set
1195# CONFIG_SENSORS_ABITUGURU3 is not set
1196# CONFIG_SENSORS_AD7414 is not set
1197# CONFIG_SENSORS_AD7418 is not set
1198# CONFIG_SENSORS_ADCXX is not set
1199# CONFIG_SENSORS_ADM1021 is not set
1200# CONFIG_SENSORS_ADM1025 is not set
1201# CONFIG_SENSORS_ADM1026 is not set
1202# CONFIG_SENSORS_ADM1029 is not set
1203# CONFIG_SENSORS_ADM1031 is not set
1204# CONFIG_SENSORS_ADM9240 is not set
1205# CONFIG_SENSORS_ADT7462 is not set
1206# CONFIG_SENSORS_ADT7470 is not set
1207# CONFIG_SENSORS_ADT7473 is not set
1208# CONFIG_SENSORS_ADT7475 is not set
1209# CONFIG_SENSORS_K8TEMP is not set
1210# CONFIG_SENSORS_ASB100 is not set
1211# CONFIG_SENSORS_ATXP1 is not set
1212# CONFIG_SENSORS_DS1621 is not set
1213# CONFIG_SENSORS_I5K_AMB is not set
1214# CONFIG_SENSORS_F71805F is not set
1215# CONFIG_SENSORS_F71882FG is not set
1216# CONFIG_SENSORS_F75375S is not set
1217# CONFIG_SENSORS_FSCHER is not set
1218# CONFIG_SENSORS_FSCPOS is not set
1219# CONFIG_SENSORS_FSCHMD is not set
1220# CONFIG_SENSORS_GL518SM is not set
1221# CONFIG_SENSORS_GL520SM is not set
1222# CONFIG_SENSORS_CORETEMP is not set
1223# CONFIG_SENSORS_IT87 is not set
1224# CONFIG_SENSORS_LM63 is not set
1225# CONFIG_SENSORS_LM70 is not set
1226# CONFIG_SENSORS_LM75 is not set
1227# CONFIG_SENSORS_LM77 is not set
1228# CONFIG_SENSORS_LM78 is not set
1229# CONFIG_SENSORS_LM80 is not set
1230# CONFIG_SENSORS_LM83 is not set
1231# CONFIG_SENSORS_LM85 is not set
1232# CONFIG_SENSORS_LM87 is not set
1233# CONFIG_SENSORS_LM90 is not set
1234# CONFIG_SENSORS_LM92 is not set
1235# CONFIG_SENSORS_LM93 is not set
1236# CONFIG_SENSORS_LTC4245 is not set
1237# CONFIG_SENSORS_MAX1111 is not set
1238# CONFIG_SENSORS_MAX1619 is not set
1239# CONFIG_SENSORS_MAX6650 is not set
1240# CONFIG_SENSORS_PC87360 is not set
1241# CONFIG_SENSORS_PC87427 is not set
1242# CONFIG_SENSORS_SIS5595 is not set
1243# CONFIG_SENSORS_DME1737 is not set
1244# CONFIG_SENSORS_SMSC47M1 is not set
1245# CONFIG_SENSORS_SMSC47M192 is not set
1246# CONFIG_SENSORS_SMSC47B397 is not set
1247# CONFIG_SENSORS_ADS7828 is not set
1248# CONFIG_SENSORS_THMC50 is not set
1249# CONFIG_SENSORS_VIA686A is not set
1250# CONFIG_SENSORS_VT1211 is not set
1251# CONFIG_SENSORS_VT8231 is not set
1252# CONFIG_SENSORS_W83781D is not set
1253# CONFIG_SENSORS_W83791D is not set
1254# CONFIG_SENSORS_W83792D is not set
1255# CONFIG_SENSORS_W83793 is not set
1256# CONFIG_SENSORS_W83L785TS is not set
1257# CONFIG_SENSORS_W83L786NG is not set
1258# CONFIG_SENSORS_W83627HF is not set
1259# CONFIG_SENSORS_W83627EHF is not set
1260# CONFIG_SENSORS_HDAPS is not set
1261# CONFIG_SENSORS_APPLESMC is not set
1262# CONFIG_HWMON_DEBUG_CHIP is not set
1263# CONFIG_THERMAL is not set
1264# CONFIG_THERMAL_HWMON is not set
1265# CONFIG_WATCHDOG is not set
1266CONFIG_SSB_POSSIBLE=y
1267
1268#
1269# Sonics Silicon Backplane
1270#
1271# CONFIG_SSB is not set
1272
1273#
1274# Multifunction device drivers
1275#
1276# CONFIG_MFD_CORE is not set
1277# CONFIG_MFD_SM501 is not set
1278# CONFIG_HTC_PASIC3 is not set
1279# CONFIG_TPS65010 is not set
1280# CONFIG_TWL4030_CORE is not set
1281# CONFIG_MFD_TMIO is not set
1282# CONFIG_PMIC_DA903X is not set
1283# CONFIG_MFD_WM8400 is not set
1284# CONFIG_MFD_WM8350_I2C is not set
1285# CONFIG_MFD_PCF50633 is not set
1286# CONFIG_REGULATOR is not set
1287
1288#
1289# Multimedia devices
1290#
1291
1292#
1293# Multimedia core support
1294#
1295CONFIG_VIDEO_DEV=y
1296CONFIG_VIDEO_V4L2_COMMON=y
1297# CONFIG_VIDEO_ALLOW_V4L1 is not set
1298# CONFIG_VIDEO_V4L1_COMPAT is not set
1299# CONFIG_DVB_CORE is not set
1300CONFIG_VIDEO_MEDIA=y
1301
1302#
1303# Multimedia drivers
1304#
1305# CONFIG_MEDIA_ATTACH is not set
1306CONFIG_MEDIA_TUNER=y
1307CONFIG_MEDIA_TUNER_CUSTOMIZE=y
1308# CONFIG_MEDIA_TUNER_SIMPLE is not set
1309# CONFIG_MEDIA_TUNER_TDA8290 is not set
1310# CONFIG_MEDIA_TUNER_TDA827X is not set
1311# CONFIG_MEDIA_TUNER_TDA18271 is not set
1312# CONFIG_MEDIA_TUNER_TDA9887 is not set
1313# CONFIG_MEDIA_TUNER_TEA5761 is not set
1314# CONFIG_MEDIA_TUNER_TEA5767 is not set
1315# CONFIG_MEDIA_TUNER_MT20XX is not set
1316# CONFIG_MEDIA_TUNER_MT2060 is not set
1317# CONFIG_MEDIA_TUNER_MT2266 is not set
1318# CONFIG_MEDIA_TUNER_MT2131 is not set
1319# CONFIG_MEDIA_TUNER_QT1010 is not set
1320# CONFIG_MEDIA_TUNER_XC2028 is not set
1321# CONFIG_MEDIA_TUNER_XC5000 is not set
1322# CONFIG_MEDIA_TUNER_MXL5005S is not set
1323# CONFIG_MEDIA_TUNER_MXL5007T is not set
1324CONFIG_VIDEO_V4L2=y
1325CONFIG_VIDEO_CAPTURE_DRIVERS=y
1326# CONFIG_VIDEO_ADV_DEBUG is not set
1327# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1328# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
1329
1330#
1331# Encoders/decoders and other helper chips
1332#
1333
1334#
1335# Audio decoders
1336#
1337# CONFIG_VIDEO_TVAUDIO is not set
1338# CONFIG_VIDEO_TDA7432 is not set
1339# CONFIG_VIDEO_TDA9840 is not set
1340# CONFIG_VIDEO_TDA9875 is not set
1341# CONFIG_VIDEO_TEA6415C is not set
1342# CONFIG_VIDEO_TEA6420 is not set
1343# CONFIG_VIDEO_MSP3400 is not set
1344# CONFIG_VIDEO_CS5345 is not set
1345# CONFIG_VIDEO_CS53L32A is not set
1346# CONFIG_VIDEO_M52790 is not set
1347# CONFIG_VIDEO_TLV320AIC23B is not set
1348# CONFIG_VIDEO_WM8775 is not set
1349# CONFIG_VIDEO_WM8739 is not set
1350# CONFIG_VIDEO_VP27SMPX is not set
1351
1352#
1353# Video decoders
1354#
1355# CONFIG_VIDEO_OV7670 is not set
1356# CONFIG_VIDEO_TCM825X is not set
1357# CONFIG_VIDEO_SAA711X is not set
1358# CONFIG_VIDEO_SAA717X is not set
1359# CONFIG_VIDEO_TVP514X is not set
1360# CONFIG_VIDEO_TVP5150 is not set
1361
1362#
1363# Video and audio decoders
1364#
1365# CONFIG_VIDEO_CX25840 is not set
1366
1367#
1368# MPEG video encoders
1369#
1370# CONFIG_VIDEO_CX2341X is not set
1371
1372#
1373# Video encoders
1374#
1375# CONFIG_VIDEO_SAA7127 is not set
1376
1377#
1378# Video improvement chips
1379#
1380# CONFIG_VIDEO_UPD64031A is not set
1381# CONFIG_VIDEO_UPD64083 is not set
1382# CONFIG_VIDEO_VIVI is not set
1383# CONFIG_VIDEO_BT848 is not set
1384# CONFIG_VIDEO_SAA5246A is not set
1385# CONFIG_VIDEO_SAA5249 is not set
1386# CONFIG_VIDEO_SAA7134 is not set
1387# CONFIG_VIDEO_HEXIUM_ORION is not set
1388# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1389# CONFIG_VIDEO_CX88 is not set
1390# CONFIG_VIDEO_IVTV is not set
1391# CONFIG_VIDEO_CAFE_CCIC is not set
1392# CONFIG_SOC_CAMERA is not set
1393# CONFIG_V4L_USB_DRIVERS is not set
1394CONFIG_VIDEO_MRSTCI=y
1395CONFIG_VIDEO_MRST_ISP=y
1396CONFIG_VIDEO_MRST_SENSOR=y
1397CONFIG_VIDEO_MRST_OV2650=y
1398CONFIG_VIDEO_MRST_OV5630=y
1399# CONFIG_RADIO_ADAPTERS is not set
1400CONFIG_DAB=y
1401# CONFIG_USB_DABUSB is not set
1402
1403#
1404# Graphics support
1405#
1406CONFIG_AGP=y
1407# CONFIG_AGP_ALI is not set
1408# CONFIG_AGP_ATI is not set
1409# CONFIG_AGP_AMD is not set
1410CONFIG_AGP_AMD64=y
1411CONFIG_AGP_INTEL=y
1412# CONFIG_AGP_NVIDIA is not set
1413# CONFIG_AGP_SIS is not set
1414# CONFIG_AGP_SWORKS is not set
1415# CONFIG_AGP_VIA is not set
1416# CONFIG_AGP_EFFICEON is not set
1417CONFIG_DRM=y
1418# CONFIG_DRM_TDFX is not set
1419# CONFIG_DRM_R128 is not set
1420# CONFIG_DRM_RADEON is not set
1421# CONFIG_DRM_I810 is not set
1422# CONFIG_DRM_I830 is not set
1423CONFIG_DRM_I915=y
1424# CONFIG_DRM_I915_KMS is not set
1425# CONFIG_DRM_MGA is not set
1426# CONFIG_DRM_SIS is not set
1427# CONFIG_DRM_VIA is not set
1428# CONFIG_DRM_SAVAGE is not set
1429# CONFIG_VGASTATE is not set
1430# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1431CONFIG_FB=y
1432# CONFIG_FIRMWARE_EDID is not set
1433# CONFIG_FB_DDC is not set
1434# CONFIG_FB_BOOT_VESA_SUPPORT is not set
1435CONFIG_FB_CFB_FILLRECT=y
1436CONFIG_FB_CFB_COPYAREA=y
1437CONFIG_FB_CFB_IMAGEBLIT=y
1438# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
1439# CONFIG_FB_SYS_FILLRECT is not set
1440# CONFIG_FB_SYS_COPYAREA is not set
1441# CONFIG_FB_SYS_IMAGEBLIT is not set
1442# CONFIG_FB_FOREIGN_ENDIAN is not set
1443# CONFIG_FB_SYS_FOPS is not set
1444# CONFIG_FB_SVGALIB is not set
1445# CONFIG_FB_MACMODES is not set
1446# CONFIG_FB_BACKLIGHT is not set
1447CONFIG_FB_MODE_HELPERS=y
1448CONFIG_FB_TILEBLITTING=y
1449
1450#
1451# Frame buffer hardware drivers
1452#
1453# CONFIG_FB_CIRRUS is not set
1454# CONFIG_FB_PM2 is not set
1455# CONFIG_FB_CYBER2000 is not set
1456# CONFIG_FB_ARC is not set
1457# CONFIG_FB_ASILIANT is not set
1458# CONFIG_FB_IMSTT is not set
1459# CONFIG_FB_VGA16 is not set
1460# CONFIG_FB_UVESA is not set
1461# CONFIG_FB_VESA is not set
1462# CONFIG_FB_N411 is not set
1463# CONFIG_FB_HGA is not set
1464# CONFIG_FB_S1D13XXX is not set
1465# CONFIG_FB_NVIDIA is not set
1466# CONFIG_FB_RIVA is not set
1467# CONFIG_FB_I810 is not set
1468# CONFIG_FB_LE80578 is not set
1469# CONFIG_FB_INTEL is not set
1470# CONFIG_FB_MATROX is not set
1471# CONFIG_FB_RADEON is not set
1472# CONFIG_FB_ATY128 is not set
1473# CONFIG_FB_ATY is not set
1474# CONFIG_FB_S3 is not set
1475# CONFIG_FB_SAVAGE is not set
1476# CONFIG_FB_SIS is not set
1477# CONFIG_FB_VIA is not set
1478# CONFIG_FB_NEOMAGIC is not set
1479# CONFIG_FB_KYRO is not set
1480# CONFIG_FB_3DFX is not set
1481# CONFIG_FB_VOODOO1 is not set
1482# CONFIG_FB_VT8623 is not set
1483# CONFIG_FB_CYBLA is not set
1484# CONFIG_FB_TRIDENT is not set
1485# CONFIG_FB_ARK is not set
1486# CONFIG_FB_PM3 is not set
1487# CONFIG_FB_CARMINE is not set
1488# CONFIG_FB_GEODE is not set
1489# CONFIG_FB_VIRTUAL is not set
1490# CONFIG_FB_METRONOME is not set
1491# CONFIG_FB_MB862XX is not set
1492CONFIG_BACKLIGHT_LCD_SUPPORT=y
1493# CONFIG_LCD_CLASS_DEVICE is not set
1494CONFIG_BACKLIGHT_CLASS_DEVICE=y
1495CONFIG_BACKLIGHT_GENERIC=y
1496# CONFIG_BACKLIGHT_PROGEAR is not set
1497# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1498# CONFIG_BACKLIGHT_SAHARA is not set
1499
1500#
1501# Display device support
1502#
1503CONFIG_DISPLAY_SUPPORT=y
1504
1505#
1506# Console display driver support
1507#
1508CONFIG_VGA_CONSOLE=y
1509CONFIG_VGACON_SOFT_SCROLLBACK=y
1510CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1511CONFIG_DUMMY_CONSOLE=y
1512CONFIG_FRAMEBUFFER_CONSOLE=y
1513CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
1514CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
1515# CONFIG_FONTS is not set
1516CONFIG_FONT_8x8=y
1517CONFIG_FONT_8x16=y
1518CONFIG_LOGO=y
1519# CONFIG_LOGO_LINUX_MONO is not set
1520# CONFIG_LOGO_LINUX_VGA16 is not set
1521CONFIG_LOGO_LINUX_CLUT224=y
1522CONFIG_SOUND=y
1523CONFIG_SOUND_OSS_CORE=y
1524CONFIG_SND=y
1525CONFIG_SND_TIMER=y
1526CONFIG_SND_PCM=y
1527# CONFIG_SND_SEQUENCER is not set
1528CONFIG_SND_OSSEMUL=y
1529CONFIG_SND_MIXER_OSS=y
1530CONFIG_SND_PCM_OSS=y
1531# CONFIG_SND_PCM_OSS_PLUGINS is not set
1532# CONFIG_SND_HRTIMER is not set
1533# CONFIG_SND_DYNAMIC_MINORS is not set
1534# CONFIG_SND_SUPPORT_OLD_API is not set
1535# CONFIG_SND_VERBOSE_PROCFS is not set
1536# CONFIG_SND_VERBOSE_PRINTK is not set
1537# CONFIG_SND_DEBUG is not set
1538# CONFIG_SND_DRIVERS is not set
1539CONFIG_SND_PCI=y
1540# CONFIG_SND_AD1889 is not set
1541# CONFIG_SND_ALS300 is not set
1542# CONFIG_SND_ALS4000 is not set
1543# CONFIG_SND_ALI5451 is not set
1544# CONFIG_SND_ATIIXP is not set
1545# CONFIG_SND_ATIIXP_MODEM is not set
1546# CONFIG_SND_AU8810 is not set
1547# CONFIG_SND_AU8820 is not set
1548# CONFIG_SND_AU8830 is not set
1549# CONFIG_SND_AW2 is not set
1550# CONFIG_SND_AZT3328 is not set
1551# CONFIG_SND_BT87X is not set
1552# CONFIG_SND_CA0106 is not set
1553# CONFIG_SND_CMIPCI is not set
1554# CONFIG_SND_OXYGEN is not set
1555# CONFIG_SND_CS4281 is not set
1556# CONFIG_SND_CS46XX is not set
1557# CONFIG_SND_CS5530 is not set
1558# CONFIG_SND_CS5535AUDIO is not set
1559# CONFIG_SND_DARLA20 is not set
1560# CONFIG_SND_GINA20 is not set
1561# CONFIG_SND_LAYLA20 is not set
1562# CONFIG_SND_DARLA24 is not set
1563# CONFIG_SND_GINA24 is not set
1564# CONFIG_SND_LAYLA24 is not set
1565# CONFIG_SND_MONA is not set
1566# CONFIG_SND_MIA is not set
1567# CONFIG_SND_ECHO3G is not set
1568# CONFIG_SND_INDIGO is not set
1569# CONFIG_SND_INDIGOIO is not set
1570# CONFIG_SND_INDIGODJ is not set
1571# CONFIG_SND_EMU10K1 is not set
1572# CONFIG_SND_EMU10K1X is not set
1573# CONFIG_SND_ENS1370 is not set
1574# CONFIG_SND_ENS1371 is not set
1575# CONFIG_SND_ES1938 is not set
1576# CONFIG_SND_ES1968 is not set
1577# CONFIG_SND_FM801 is not set
1578# CONFIG_SND_HDA_INTEL is not set
1579# CONFIG_SND_HDSP is not set
1580# CONFIG_SND_HDSPM is not set
1581# CONFIG_SND_HIFIER is not set
1582# CONFIG_SND_ICE1712 is not set
1583# CONFIG_SND_ICE1724 is not set
1584# CONFIG_SND_INTEL8X0 is not set
1585# CONFIG_SND_INTEL8X0M is not set
1586# CONFIG_SND_KORG1212 is not set
1587# CONFIG_SND_MAESTRO3 is not set
1588# CONFIG_SND_MIXART is not set
1589# CONFIG_SND_NM256 is not set
1590# CONFIG_SND_PCXHR is not set
1591# CONFIG_SND_RIPTIDE is not set
1592# CONFIG_SND_RME32 is not set
1593# CONFIG_SND_RME96 is not set
1594# CONFIG_SND_RME9652 is not set
1595# CONFIG_SND_SIS7019 is not set
1596# CONFIG_SND_SONICVIBES is not set
1597# CONFIG_SND_TRIDENT is not set
1598# CONFIG_SND_VIA82XX is not set
1599# CONFIG_SND_VIA82XX_MODEM is not set
1600# CONFIG_SND_VIRTUOSO is not set
1601# CONFIG_SND_VX222 is not set
1602# CONFIG_SND_YMFPCI is not set
1603# CONFIG_SND_SPI is not set
1604# CONFIG_SND_USB is not set
1605# CONFIG_SND_SOC is not set
1606# CONFIG_SOUND_PRIME is not set
1607CONFIG_HID_SUPPORT=y
1608CONFIG_HID=y
1609CONFIG_HID_DEBUG=y
1610CONFIG_HIDRAW=y
1611
1612#
1613# USB Input Devices
1614#
1615CONFIG_USB_HID=y
1616CONFIG_HID_PID=y
1617# CONFIG_USB_HIDDEV is not set
1618
1619#
1620# Special HID drivers
1621#
1622# CONFIG_HID_COMPAT is not set
1623# CONFIG_HID_A4TECH is not set
1624# CONFIG_HID_APPLE is not set
1625# CONFIG_HID_BELKIN is not set
1626# CONFIG_HID_CHERRY is not set
1627# CONFIG_HID_CHICONY is not set
1628# CONFIG_HID_CYPRESS is not set
1629# CONFIG_HID_EZKEY is not set
1630# CONFIG_HID_GYRATION is not set
1631# CONFIG_HID_LOGITECH is not set
1632# CONFIG_HID_MICROSOFT is not set
1633# CONFIG_HID_MONTEREY is not set
1634# CONFIG_HID_NTRIG is not set
1635# CONFIG_HID_PANTHERLORD is not set
1636# CONFIG_HID_PETALYNX is not set
1637# CONFIG_HID_SAMSUNG is not set
1638# CONFIG_HID_SONY is not set
1639# CONFIG_HID_SUNPLUS is not set
1640# CONFIG_GREENASIA_FF is not set
1641# CONFIG_HID_TOPSEED is not set
1642# CONFIG_THRUSTMASTER_FF is not set
1643# CONFIG_ZEROPLUS_FF is not set
1644CONFIG_USB_SUPPORT=y
1645CONFIG_USB_ARCH_HAS_HCD=y
1646CONFIG_USB_ARCH_HAS_OHCI=y
1647CONFIG_USB_ARCH_HAS_EHCI=y
1648CONFIG_USB=y
1649CONFIG_USB_DEBUG=y
1650CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
1651
1652#
1653# Miscellaneous USB options
1654#
1655CONFIG_USB_DEVICEFS=y
1656# CONFIG_USB_DEVICE_CLASS is not set
1657# CONFIG_USB_DYNAMIC_MINORS is not set
1658CONFIG_USB_SUSPEND=y
1659CONFIG_USB_OTG=y
1660# CONFIG_USB_OTG_WHITELIST is not set
1661# CONFIG_USB_OTG_BLACKLIST_HUB is not set
1662CONFIG_USB_MON=y
1663# CONFIG_USB_WUSB is not set
1664# CONFIG_USB_WUSB_CBAF is not set
1665
1666#
1667# OTG and related infrastructure
1668#
1669CONFIG_USB_OTG_UTILS=y
1670# CONFIG_USB_GPIO_VBUS is not set
1671CONFIG_USB_LANGWELL_OTG=y
1672
1673#
1674# USB Host Controller Drivers
1675#
1676# CONFIG_USB_C67X00_HCD is not set
1677CONFIG_USB_EHCI_HCD=y
1678CONFIG_USB_EHCI_ROOT_HUB_TT=y
1679# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1680# CONFIG_USB_OXU210HP_HCD is not set
1681# CONFIG_USB_ISP116X_HCD is not set
1682# CONFIG_USB_ISP1760_HCD is not set
1683# CONFIG_USB_OHCI_HCD is not set
1684# CONFIG_USB_UHCI_HCD is not set
1685# CONFIG_USB_SL811_HCD is not set
1686# CONFIG_USB_R8A66597_HCD is not set
1687# CONFIG_USB_WHCI_HCD is not set
1688# CONFIG_USB_HWA_HCD is not set
1689# CONFIG_USB_GADGET_MUSB_HDRC is not set
1690
1691#
1692# USB Device Class drivers
1693#
1694# CONFIG_USB_ACM is not set
1695CONFIG_USB_PRINTER=y
1696# CONFIG_USB_WDM is not set
1697# CONFIG_USB_TMC is not set
1698
1699#
1700# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
1701#
1702
1703#
1704# see USB_STORAGE Help for more information
1705#
1706CONFIG_USB_STORAGE=y
1707# CONFIG_USB_STORAGE_DEBUG is not set
1708# CONFIG_USB_STORAGE_DATAFAB is not set
1709# CONFIG_USB_STORAGE_FREECOM is not set
1710# CONFIG_USB_STORAGE_ISD200 is not set
1711# CONFIG_USB_STORAGE_USBAT is not set
1712# CONFIG_USB_STORAGE_SDDR09 is not set
1713# CONFIG_USB_STORAGE_SDDR55 is not set
1714# CONFIG_USB_STORAGE_JUMPSHOT is not set
1715# CONFIG_USB_STORAGE_ALAUDA is not set
1716# CONFIG_USB_STORAGE_ONETOUCH is not set
1717# CONFIG_USB_STORAGE_KARMA is not set
1718# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1719CONFIG_USB_LIBUSUAL=y
1720
1721#
1722# USB Imaging devices
1723#
1724# CONFIG_USB_MDC800 is not set
1725# CONFIG_USB_MICROTEK is not set
1726
1727#
1728# USB port drivers
1729#
1730# CONFIG_USB_SERIAL is not set
1731
1732#
1733# USB Miscellaneous drivers
1734#
1735# CONFIG_USB_EMI62 is not set
1736# CONFIG_USB_EMI26 is not set
1737# CONFIG_USB_ADUTUX is not set
1738# CONFIG_USB_SEVSEG is not set
1739# CONFIG_USB_RIO500 is not set
1740# CONFIG_USB_LEGOTOWER is not set
1741# CONFIG_USB_LCD is not set
1742# CONFIG_USB_BERRY_CHARGE is not set
1743# CONFIG_USB_LED is not set
1744# CONFIG_USB_CYPRESS_CY7C63 is not set
1745# CONFIG_USB_CYTHERM is not set
1746# CONFIG_USB_PHIDGET is not set
1747# CONFIG_USB_IDMOUSE is not set
1748# CONFIG_USB_FTDI_ELAN is not set
1749# CONFIG_USB_APPLEDISPLAY is not set
1750# CONFIG_USB_SISUSBVGA is not set
1751# CONFIG_USB_LD is not set
1752# CONFIG_USB_TRANCEVIBRATOR is not set
1753# CONFIG_USB_IOWARRIOR is not set
1754# CONFIG_USB_TEST is not set
1755# CONFIG_USB_ISIGHTFW is not set
1756# CONFIG_USB_VST is not set
1757CONFIG_USB_GADGET=m
1758# CONFIG_USB_GADGET_DEBUG is not set
1759# CONFIG_USB_GADGET_DEBUG_FILES is not set
1760# CONFIG_USB_GADGET_DEBUG_FS is not set
1761CONFIG_USB_GADGET_VBUS_DRAW=2
1762CONFIG_USB_GADGET_SELECTED=y
1763# CONFIG_USB_GADGET_AT91 is not set
1764# CONFIG_USB_GADGET_ATMEL_USBA is not set
1765# CONFIG_USB_GADGET_FSL_USB2 is not set
1766# CONFIG_USB_GADGET_LH7A40X is not set
1767# CONFIG_USB_GADGET_OMAP is not set
1768# CONFIG_USB_GADGET_PXA25X is not set
1769# CONFIG_USB_GADGET_PXA27X is not set
1770# CONFIG_USB_GADGET_S3C2410 is not set
1771# CONFIG_USB_GADGET_IMX is not set
1772# CONFIG_USB_GADGET_M66592 is not set
1773# CONFIG_USB_GADGET_AMD5536UDC is not set
1774# CONFIG_USB_GADGET_FSL_QE is not set
1775# CONFIG_USB_GADGET_CI13XXX is not set
1776# CONFIG_USB_GADGET_NET2280 is not set
1777# CONFIG_USB_GADGET_GOKU is not set
1778# CONFIG_USB_GADGET_DUMMY_HCD is not set
1779CONFIG_USB_GADGET_DUALSPEED=y
1780# CONFIG_USB_ZERO is not set
1781CONFIG_USB_ETH=m
1782CONFIG_USB_ETH_RNDIS=y
1783# CONFIG_USB_GADGETFS is not set
1784CONFIG_USB_FILE_STORAGE=m
1785# CONFIG_USB_FILE_STORAGE_TEST is not set
1786# CONFIG_USB_G_SERIAL is not set
1787# CONFIG_USB_MIDI_GADGET is not set
1788# CONFIG_USB_G_PRINTER is not set
1789# CONFIG_USB_CDC_COMPOSITE is not set
1790# CONFIG_UWB is not set
1791CONFIG_MMC=y
1792# CONFIG_MEMSTICK is not set
1793CONFIG_NEW_LEDS=y
1794# CONFIG_LEDS_CLASS is not set
1795
1796#
1797# LED drivers
1798#
1799
1800#
1801# LED Triggers
1802#
1803CONFIG_LEDS_TRIGGERS=y
1804# CONFIG_LEDS_TRIGGER_TIMER is not set
1805# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
1806# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1807# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1808# CONFIG_ACCESSIBILITY is not set
1809# CONFIG_INFINIBAND is not set
1810# CONFIG_EDAC is not set
1811CONFIG_RTC_LIB=y
1812CONFIG_RTC_CLASS=y
1813# CONFIG_RTC_HCTOSYS is not set
1814# CONFIG_RTC_DEBUG is not set
1815
1816#
1817# RTC interfaces
1818#
1819CONFIG_RTC_INTF_SYSFS=y
1820CONFIG_RTC_INTF_PROC=y
1821CONFIG_RTC_INTF_DEV=y
1822# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1823# CONFIG_RTC_DRV_TEST is not set
1824
1825#
1826# I2C RTC drivers
1827#
1828# CONFIG_RTC_DRV_DS1307 is not set
1829# CONFIG_RTC_DRV_DS1374 is not set
1830# CONFIG_RTC_DRV_DS1672 is not set
1831# CONFIG_RTC_DRV_MAX6900 is not set
1832# CONFIG_RTC_DRV_RS5C372 is not set
1833# CONFIG_RTC_DRV_ISL1208 is not set
1834# CONFIG_RTC_DRV_X1205 is not set
1835# CONFIG_RTC_DRV_PCF8563 is not set
1836# CONFIG_RTC_DRV_PCF8583 is not set
1837# CONFIG_RTC_DRV_M41T80 is not set
1838# CONFIG_RTC_DRV_S35390A is not set
1839# CONFIG_RTC_DRV_FM3130 is not set
1840# CONFIG_RTC_DRV_RX8581 is not set
1841
1842#
1843# SPI RTC drivers
1844#
1845# CONFIG_RTC_DRV_M41T94 is not set
1846# CONFIG_RTC_DRV_DS1305 is not set
1847# CONFIG_RTC_DRV_DS1390 is not set
1848# CONFIG_RTC_DRV_MAX6902 is not set
1849# CONFIG_RTC_DRV_R9701 is not set
1850# CONFIG_RTC_DRV_RS5C348 is not set
1851# CONFIG_RTC_DRV_DS3234 is not set
1852
1853#
1854# Platform RTC drivers
1855#
1856# CONFIG_RTC_DRV_CMOS is not set
1857CONFIG_RTC_DRV_VRTC=y
1858# CONFIG_RTC_DRV_DS1286 is not set
1859# CONFIG_RTC_DRV_DS1511 is not set
1860# CONFIG_RTC_DRV_DS1553 is not set
1861# CONFIG_RTC_DRV_DS1742 is not set
1862# CONFIG_RTC_DRV_STK17TA8 is not set
1863# CONFIG_RTC_DRV_M48T86 is not set
1864# CONFIG_RTC_DRV_M48T35 is not set
1865# CONFIG_RTC_DRV_M48T59 is not set
1866# CONFIG_RTC_DRV_BQ4802 is not set
1867# CONFIG_RTC_DRV_V3020 is not set
1868
1869#
1870# on-CPU RTC drivers
1871#
1872CONFIG_DMADEVICES=y
1873
1874#
1875# DMA Devices
1876#
1877# CONFIG_INTEL_IOATDMA is not set
1878# CONFIG_UIO is not set
1879# CONFIG_STAGING is not set
1880# CONFIG_X86_PLATFORM_DEVICES is not set
1881
1882#
1883# Firmware Drivers
1884#
1885# CONFIG_EDD is not set
1886CONFIG_FIRMWARE_MEMMAP=y
1887# CONFIG_DELL_RBU is not set
1888# CONFIG_DCDBAS is not set
1889# CONFIG_ISCSI_IBFT_FIND is not set
1890
1891#
1892# File systems
1893#
1894# CONFIG_EXT2_FS is not set
1895CONFIG_EXT3_FS=y
1896CONFIG_EXT3_FS_XATTR=y
1897CONFIG_EXT3_FS_POSIX_ACL=y
1898CONFIG_EXT3_FS_SECURITY=y
1899# CONFIG_EXT4_FS is not set
1900CONFIG_JBD=y
1901# CONFIG_JBD_DEBUG is not set
1902CONFIG_FS_MBCACHE=y
1903# CONFIG_REISERFS_FS is not set
1904# CONFIG_JFS_FS is not set
1905CONFIG_FS_POSIX_ACL=y
1906CONFIG_FILE_LOCKING=y
1907# CONFIG_XFS_FS is not set
1908# CONFIG_OCFS2_FS is not set
1909# CONFIG_BTRFS_FS is not set
1910CONFIG_DNOTIFY=y
1911CONFIG_INOTIFY=y
1912CONFIG_INOTIFY_USER=y
1913CONFIG_QUOTA=y
1914CONFIG_QUOTA_NETLINK_INTERFACE=y
1915# CONFIG_PRINT_QUOTA_WARNING is not set
1916CONFIG_QUOTA_TREE=y
1917# CONFIG_QFMT_V1 is not set
1918CONFIG_QFMT_V2=y
1919CONFIG_QUOTACTL=y
1920# CONFIG_AUTOFS_FS is not set
1921CONFIG_AUTOFS4_FS=y
1922# CONFIG_FUSE_FS is not set
1923CONFIG_GENERIC_ACL=y
1924
1925#
1926# CD-ROM/DVD Filesystems
1927#
1928CONFIG_ISO9660_FS=y
1929CONFIG_JOLIET=y
1930CONFIG_ZISOFS=y
1931# CONFIG_UDF_FS is not set
1932
1933#
1934# DOS/FAT/NT Filesystems
1935#
1936CONFIG_FAT_FS=y
1937CONFIG_MSDOS_FS=y
1938CONFIG_VFAT_FS=y
1939CONFIG_FAT_DEFAULT_CODEPAGE=437
1940CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1941# CONFIG_NTFS_FS is not set
1942
1943#
1944# Pseudo filesystems
1945#
1946CONFIG_PROC_FS=y
1947CONFIG_PROC_KCORE=y
1948CONFIG_PROC_SYSCTL=y
1949CONFIG_PROC_PAGE_MONITOR=y
1950CONFIG_SYSFS=y
1951CONFIG_TMPFS=y
1952CONFIG_TMPFS_POSIX_ACL=y
1953CONFIG_HUGETLBFS=y
1954CONFIG_HUGETLB_PAGE=y
1955# CONFIG_CONFIGFS_FS is not set
1956CONFIG_MISC_FILESYSTEMS=y
1957# CONFIG_ADFS_FS is not set
1958# CONFIG_AFFS_FS is not set
1959# CONFIG_ECRYPT_FS is not set
1960# CONFIG_HFS_FS is not set
1961# CONFIG_HFSPLUS_FS is not set
1962# CONFIG_BEFS_FS is not set
1963# CONFIG_BFS_FS is not set
1964# CONFIG_EFS_FS is not set
1965# CONFIG_CRAMFS is not set
1966# CONFIG_SQUASHFS is not set
1967# CONFIG_VXFS_FS is not set
1968# CONFIG_MINIX_FS is not set
1969# CONFIG_OMFS_FS is not set
1970# CONFIG_HPFS_FS is not set
1971# CONFIG_QNX4FS_FS is not set
1972# CONFIG_ROMFS_FS is not set
1973# CONFIG_SYSV_FS is not set
1974# CONFIG_UFS_FS is not set
1975CONFIG_NETWORK_FILESYSTEMS=y
1976CONFIG_NFS_FS=y
1977CONFIG_NFS_V3=y
1978CONFIG_NFS_V3_ACL=y
1979CONFIG_NFS_V4=y
1980CONFIG_ROOT_NFS=y
1981# CONFIG_NFSD is not set
1982CONFIG_LOCKD=y
1983CONFIG_LOCKD_V4=y
1984CONFIG_NFS_ACL_SUPPORT=y
1985CONFIG_NFS_COMMON=y
1986CONFIG_SUNRPC=y
1987CONFIG_SUNRPC_GSS=y
1988# CONFIG_SUNRPC_REGISTER_V4 is not set
1989CONFIG_RPCSEC_GSS_KRB5=y
1990# CONFIG_RPCSEC_GSS_SPKM3 is not set
1991# CONFIG_SMB_FS is not set
1992# CONFIG_CIFS is not set
1993# CONFIG_NCP_FS is not set
1994# CONFIG_CODA_FS is not set
1995# CONFIG_AFS_FS is not set
1996
1997#
1998# Partition Types
1999#
2000CONFIG_PARTITION_ADVANCED=y
2001# CONFIG_ACORN_PARTITION is not set
2002CONFIG_OSF_PARTITION=y
2003CONFIG_AMIGA_PARTITION=y
2004# CONFIG_ATARI_PARTITION is not set
2005CONFIG_MAC_PARTITION=y
2006CONFIG_MSDOS_PARTITION=y
2007CONFIG_BSD_DISKLABEL=y
2008CONFIG_MINIX_SUBPARTITION=y
2009CONFIG_SOLARIS_X86_PARTITION=y
2010CONFIG_UNIXWARE_DISKLABEL=y
2011# CONFIG_LDM_PARTITION is not set
2012CONFIG_SGI_PARTITION=y
2013# CONFIG_ULTRIX_PARTITION is not set
2014CONFIG_SUN_PARTITION=y
2015CONFIG_KARMA_PARTITION=y
2016CONFIG_EFI_PARTITION=y
2017# CONFIG_SYSV68_PARTITION is not set
2018CONFIG_NLS=y
2019CONFIG_NLS_DEFAULT="utf8"
2020CONFIG_NLS_CODEPAGE_437=y
2021# CONFIG_NLS_CODEPAGE_737 is not set
2022# CONFIG_NLS_CODEPAGE_775 is not set
2023# CONFIG_NLS_CODEPAGE_850 is not set
2024# CONFIG_NLS_CODEPAGE_852 is not set
2025# CONFIG_NLS_CODEPAGE_855 is not set
2026# CONFIG_NLS_CODEPAGE_857 is not set
2027# CONFIG_NLS_CODEPAGE_860 is not set
2028# CONFIG_NLS_CODEPAGE_861 is not set
2029# CONFIG_NLS_CODEPAGE_862 is not set
2030# CONFIG_NLS_CODEPAGE_863 is not set
2031# CONFIG_NLS_CODEPAGE_864 is not set
2032# CONFIG_NLS_CODEPAGE_865 is not set
2033# CONFIG_NLS_CODEPAGE_866 is not set
2034# CONFIG_NLS_CODEPAGE_869 is not set
2035# CONFIG_NLS_CODEPAGE_936 is not set
2036# CONFIG_NLS_CODEPAGE_950 is not set
2037# CONFIG_NLS_CODEPAGE_932 is not set
2038# CONFIG_NLS_CODEPAGE_949 is not set
2039# CONFIG_NLS_CODEPAGE_874 is not set
2040# CONFIG_NLS_ISO8859_8 is not set
2041# CONFIG_NLS_CODEPAGE_1250 is not set
2042# CONFIG_NLS_CODEPAGE_1251 is not set
2043CONFIG_NLS_ASCII=y
2044CONFIG_NLS_ISO8859_1=y
2045# CONFIG_NLS_ISO8859_2 is not set
2046# CONFIG_NLS_ISO8859_3 is not set
2047# CONFIG_NLS_ISO8859_4 is not set
2048# CONFIG_NLS_ISO8859_5 is not set
2049# CONFIG_NLS_ISO8859_6 is not set
2050# CONFIG_NLS_ISO8859_7 is not set
2051# CONFIG_NLS_ISO8859_9 is not set
2052# CONFIG_NLS_ISO8859_13 is not set
2053# CONFIG_NLS_ISO8859_14 is not set
2054# CONFIG_NLS_ISO8859_15 is not set
2055# CONFIG_NLS_KOI8_R is not set
2056# CONFIG_NLS_KOI8_U is not set
2057CONFIG_NLS_UTF8=y
2058# CONFIG_DLM is not set
2059
2060#
2061# Kernel hacking
2062#
2063CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2064CONFIG_PRINTK_TIME=y
2065CONFIG_ENABLE_WARN_DEPRECATED=y
2066CONFIG_ENABLE_MUST_CHECK=y
2067CONFIG_FRAME_WARN=2048
2068CONFIG_MAGIC_SYSRQ=y
2069# CONFIG_UNUSED_SYMBOLS is not set
2070CONFIG_DEBUG_FS=y
2071# CONFIG_HEADERS_CHECK is not set
2072CONFIG_DEBUG_KERNEL=y
2073CONFIG_DEBUG_SHIRQ=y
2074CONFIG_DETECT_SOFTLOCKUP=y
2075# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2076CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2077# CONFIG_SCHED_DEBUG is not set
2078CONFIG_SCHEDSTATS=y
2079CONFIG_TIMER_STATS=y
2080# CONFIG_DEBUG_OBJECTS is not set
2081# CONFIG_SLUB_DEBUG_ON is not set
2082# CONFIG_SLUB_STATS is not set
2083# CONFIG_DEBUG_RT_MUTEXES is not set
2084# CONFIG_RT_MUTEX_TESTER is not set
2085CONFIG_DEBUG_SPINLOCK=y
2086CONFIG_DEBUG_MUTEXES=y
2087CONFIG_DEBUG_LOCK_ALLOC=y
2088# CONFIG_PROVE_LOCKING is not set
2089CONFIG_LOCKDEP=y
2090# CONFIG_LOCK_STAT is not set
2091# CONFIG_DEBUG_LOCKDEP is not set
2092CONFIG_DEBUG_SPINLOCK_SLEEP=y
2093# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2094CONFIG_STACKTRACE=y
2095# CONFIG_DEBUG_KOBJECT is not set
2096CONFIG_DEBUG_BUGVERBOSE=y
2097
2098# CONFIG_DEBUG_INFO is not set
2099# CONFIG_DEBUG_VM is not set
2100# CONFIG_DEBUG_VIRTUAL is not set
2101# CONFIG_DEBUG_WRITECOUNT is not set
2102# CONFIG_DEBUG_MEMORY_INIT is not set
2103# CONFIG_DEBUG_LIST is not set
2104# CONFIG_DEBUG_SG is not set
2105# CONFIG_DEBUG_NOTIFIERS is not set
2106CONFIG_ARCH_WANT_FRAME_POINTERS=y
2107CONFIG_FRAME_POINTER=y
2108# CONFIG_BOOT_PRINTK_DELAY is not set
2109# CONFIG_RCU_TORTURE_TEST is not set
2110# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2111# CONFIG_BACKTRACE_SELF_TEST is not set
2112# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2113# CONFIG_FAULT_INJECTION is not set
2114# CONFIG_LATENCYTOP is not set
2115CONFIG_SYSCTL_SYSCALL_CHECK=y
2116CONFIG_USER_STACKTRACE_SUPPORT=y
2117CONFIG_HAVE_FUNCTION_TRACER=y
2118CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
2119CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
2120CONFIG_HAVE_DYNAMIC_FTRACE=y
2121CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2122
2123#
2124# Tracers
2125#
2126# CONFIG_FUNCTION_TRACER is not set
2127# CONFIG_IRQSOFF_TRACER is not set
2128# CONFIG_SYSPROF_TRACER is not set
2129# CONFIG_SCHED_TRACER is not set
2130# CONFIG_CONTEXT_SWITCH_TRACER is not set
2131# CONFIG_BOOT_TRACER is not set
2132# CONFIG_TRACE_BRANCH_PROFILING is not set
2133# CONFIG_POWER_TRACER is not set
2134# CONFIG_STACK_TRACER is not set
2135# CONFIG_MMIOTRACE is not set
2136CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
2137# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2138# CONFIG_SAMPLES is not set
2139CONFIG_HAVE_ARCH_KGDB=y
2140# CONFIG_KGDB is not set
2141# CONFIG_STRICT_DEVMEM is not set
2142CONFIG_X86_VERBOSE_BOOTUP=y
2143CONFIG_EARLY_PRINTK=y
2144CONFIG_X86_MRST_EARLY_PRINTK=y
2145# CONFIG_EARLY_PRINTK_DBGP is not set
2146CONFIG_DEBUG_STACKOVERFLOW=y
2147CONFIG_DEBUG_STACK_USAGE=y
2148# CONFIG_DEBUG_PAGEALLOC is not set
2149# CONFIG_DEBUG_PER_CPU_MAPS is not set
2150# CONFIG_X86_PTDUMP is not set
2151CONFIG_DEBUG_RODATA=y
2152# CONFIG_DEBUG_RODATA_TEST is not set
2153# CONFIG_4KSTACKS is not set
2154CONFIG_DOUBLEFAULT=y
2155CONFIG_HAVE_MMIOTRACE_SUPPORT=y
2156CONFIG_IO_DELAY_TYPE_0X80=0
2157CONFIG_IO_DELAY_TYPE_0XED=1
2158CONFIG_IO_DELAY_TYPE_UDELAY=2
2159CONFIG_IO_DELAY_TYPE_NONE=3
2160CONFIG_IO_DELAY_0X80=y
2161# CONFIG_IO_DELAY_0XED is not set
2162# CONFIG_IO_DELAY_UDELAY is not set
2163# CONFIG_IO_DELAY_NONE is not set
2164CONFIG_DEFAULT_IO_DELAY_TYPE=0
2165CONFIG_DEBUG_BOOT_PARAMS=y
2166# CONFIG_CPA_DEBUG is not set
2167CONFIG_OPTIMIZE_INLINING=y
2168
2169#
2170# Security options
2171#
2172CONFIG_KEYS=y
2173CONFIG_KEYS_DEBUG_PROC_KEYS=y
2174CONFIG_SECURITY=y
2175# CONFIG_SECURITYFS is not set
2176CONFIG_SECURITY_NETWORK=y
2177# CONFIG_SECURITY_NETWORK_XFRM is not set
2178# CONFIG_SECURITY_PATH is not set
2179CONFIG_SECURITY_FILE_CAPABILITIES=y
2180# CONFIG_SECURITY_ROOTPLUG is not set
2181CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
2182# CONFIG_SECURITY_SMACK is not set
2183CONFIG_CRYPTO=y
2184# CONFIG_SECURITY_SELINUX is not set
2185
2186#
2187# Crypto core or helper
2188#
2189# CONFIG_CRYPTO_FIPS is not set
2190CONFIG_CRYPTO_ALGAPI=y
2191CONFIG_CRYPTO_ALGAPI2=y
2192CONFIG_CRYPTO_AEAD=y
2193CONFIG_CRYPTO_AEAD2=y
2194CONFIG_CRYPTO_BLKCIPHER=y
2195CONFIG_CRYPTO_BLKCIPHER2=y
2196CONFIG_CRYPTO_HASH=y
2197CONFIG_CRYPTO_HASH2=y
2198CONFIG_CRYPTO_RNG2=y
2199CONFIG_CRYPTO_MANAGER=y
2200CONFIG_CRYPTO_MANAGER2=y
2201# CONFIG_CRYPTO_GF128MUL is not set
2202# CONFIG_CRYPTO_NULL is not set
2203# CONFIG_CRYPTO_CRYPTD is not set
2204CONFIG_CRYPTO_AUTHENC=y
2205
2206#
2207# Authenticated Encryption with Associated Data
2208#
2209# CONFIG_CRYPTO_CCM is not set
2210# CONFIG_CRYPTO_GCM is not set
2211# CONFIG_CRYPTO_SEQIV is not set
2212
2213#
2214# Block modes
2215#
2216CONFIG_CRYPTO_CBC=y
2217# CONFIG_CRYPTO_CTR is not set
2218# CONFIG_CRYPTO_CTS is not set
2219CONFIG_CRYPTO_ECB=y
2220# CONFIG_CRYPTO_LRW is not set
2221# CONFIG_CRYPTO_PCBC is not set
2222# CONFIG_CRYPTO_XTS is not set
2223
2224#
2225# Hash modes
2226#
2227CONFIG_CRYPTO_HMAC=y
2228# CONFIG_CRYPTO_XCBC is not set
2229
2230#
2231# Digest
2232#
2233# CONFIG_CRYPTO_CRC32C is not set
2234# CONFIG_CRYPTO_CRC32C_INTEL is not set
2235# CONFIG_CRYPTO_MD4 is not set
2236CONFIG_CRYPTO_MD5=y
2237# CONFIG_CRYPTO_MICHAEL_MIC is not set
2238# CONFIG_CRYPTO_RMD128 is not set
2239# CONFIG_CRYPTO_RMD160 is not set
2240# CONFIG_CRYPTO_RMD256 is not set
2241# CONFIG_CRYPTO_RMD320 is not set
2242CONFIG_CRYPTO_SHA1=y
2243# CONFIG_CRYPTO_SHA256 is not set
2244# CONFIG_CRYPTO_SHA512 is not set
2245# CONFIG_CRYPTO_TGR192 is not set
2246# CONFIG_CRYPTO_WP512 is not set
2247
2248#
2249# Ciphers
2250#
2251CONFIG_CRYPTO_AES=y
2252CONFIG_CRYPTO_AES_586=y
2253# CONFIG_CRYPTO_ANUBIS is not set
2254CONFIG_CRYPTO_ARC4=y
2255# CONFIG_CRYPTO_BLOWFISH is not set
2256# CONFIG_CRYPTO_CAMELLIA is not set
2257# CONFIG_CRYPTO_CAST5 is not set
2258# CONFIG_CRYPTO_CAST6 is not set
2259CONFIG_CRYPTO_DES=y
2260# CONFIG_CRYPTO_FCRYPT is not set
2261# CONFIG_CRYPTO_KHAZAD is not set
2262# CONFIG_CRYPTO_SALSA20 is not set
2263# CONFIG_CRYPTO_SALSA20_586 is not set
2264# CONFIG_CRYPTO_SEED is not set
2265# CONFIG_CRYPTO_SERPENT is not set
2266# CONFIG_CRYPTO_TEA is not set
2267# CONFIG_CRYPTO_TWOFISH is not set
2268# CONFIG_CRYPTO_TWOFISH_586 is not set
2269
2270#
2271# Compression
2272#
2273# CONFIG_CRYPTO_DEFLATE is not set
2274# CONFIG_CRYPTO_LZO is not set
2275
2276#
2277# Random Number Generation
2278#
2279# CONFIG_CRYPTO_ANSI_CPRNG is not set
2280CONFIG_CRYPTO_HW=y
2281# CONFIG_CRYPTO_DEV_PADLOCK is not set
2282# CONFIG_CRYPTO_DEV_GEODE is not set
2283# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2284CONFIG_HAVE_KVM=y
2285CONFIG_VIRTUALIZATION=y
2286# CONFIG_KVM is not set
2287# CONFIG_LGUEST is not set
2288# CONFIG_VIRTIO_PCI is not set
2289# CONFIG_VIRTIO_BALLOON is not set
2290
2291#
2292# Library routines
2293#
2294CONFIG_BITREVERSE=y
2295CONFIG_GENERIC_FIND_FIRST_BIT=y
2296CONFIG_GENERIC_FIND_NEXT_BIT=y
2297CONFIG_GENERIC_FIND_LAST_BIT=y
2298# CONFIG_CRC_CCITT is not set
2299# CONFIG_CRC16 is not set
2300CONFIG_CRC_T10DIF=y
2301# CONFIG_CRC_ITU_T is not set
2302CONFIG_CRC32=y
2303# CONFIG_CRC7 is not set
2304# CONFIG_LIBCRC32C is not set
2305CONFIG_AUDIT_GENERIC=y
2306CONFIG_ZLIB_INFLATE=y
2307CONFIG_PLIST=y
2308CONFIG_HAS_IOMEM=y
2309CONFIG_HAS_IOPORT=y
2310CONFIG_HAS_DMA=y
2311
2312CONFIG_INTEL_LNW_DMAC1=y
2313CONFIG_INTEL_LNW_DMAC2=y
2314# CONFIG_LNW_DMA_DEBUG is not set
2315# CONFIG_NET_DMA is not set
2316# CONFIG_DMATEST is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook
deleted file mode 100644
index 9174ff6d5b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook
+++ /dev/null
@@ -1,52 +0,0 @@
1CONFIG_LOCALVERSION="-netbook"
2
3CONFIG_ACER_WMI=y
4
5CONFIG_EEEPC_LAPTOP=m
6
7CONFIG_R8169=y
8# CONFIG_R8169_VLAN is not set
9
10CONFIG_ATL1E=y
11
12CONFIG_ATH5K=y
13# CONFIG_ATH5K_DEBUG is not set
14
15CONFIG_RT2860=m
16
17CONFIG_RT2860=m
18
19CONFIG_RTL8187SE=m
20
21
22CONFIG_DRM_I915_KMS=y
23CONFIG_FRAMEBUFFER_CONSOLE=y
24CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
25# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
26CONFIG_FONTS=y
27CONFIG_FONT_8x8=y
28CONFIG_FONT_6x11=y
29CONFIG_FONT_7x14=y
30# CONFIG_FONT_PEARL_8x8 is not set
31# CONFIG_FONT_ACORN_8x8 is not set
32# CONFIG_FONT_MINI_4x6 is not set
33# CONFIG_FONT_SUN8x16 is not set
34# CONFIG_FONT_SUN12x22 is not set
35CONFIG_FONT_10x18=y
36
37
38#
39# Enable KVM
40#
41CONFIG_VIRTUALIZATION=y
42CONFIG_KVM=m
43CONFIG_KVM_INTEL=m
44# CONFIG_KVM_AMD is not set
45# CONFIG_KVM_TRACE is not set
46# CONFIG_VIRTIO_PCI is not set
47# CONFIG_VIRTIO_BALLOON is not set
48
49#
50# For VMWARE support
51#
52CONFIG_FUSION_SPI=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow
deleted file mode 100644
index 0b0bfd7e6e..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow
+++ /dev/null
@@ -1,3353 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29.1
4# Fri May 29 15:05:21 2009
5#
6# CONFIG_64BIT is not set
7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set
9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11CONFIG_GENERIC_TIME=y
12CONFIG_GENERIC_CMOS_UPDATE=y
13CONFIG_CLOCKSOURCE_WATCHDOG=y
14CONFIG_GENERIC_CLOCKEVENTS=y
15CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
16CONFIG_LOCKDEP_SUPPORT=y
17CONFIG_STACKTRACE_SUPPORT=y
18CONFIG_HAVE_LATENCYTOP_SUPPORT=y
19CONFIG_FAST_CMPXCHG_LOCAL=y
20CONFIG_MMU=y
21CONFIG_ZONE_DMA=y
22CONFIG_GENERIC_ISA_DMA=y
23CONFIG_GENERIC_IOMAP=y
24CONFIG_GENERIC_BUG=y
25CONFIG_GENERIC_HWEIGHT=y
26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
30CONFIG_GENERIC_CALIBRATE_DELAY=y
31# CONFIG_GENERIC_TIME_VSYSCALL is not set
32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
37CONFIG_ARCH_HIBERNATION_POSSIBLE=y
38CONFIG_ARCH_SUSPEND_POSSIBLE=y
39# CONFIG_ZONE_DMA32 is not set
40CONFIG_ARCH_POPULATES_NODE_MAP=y
41# CONFIG_AUDIT_ARCH is not set
42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
43CONFIG_GENERIC_HARDIRQS=y
44CONFIG_GENERIC_IRQ_PROBE=y
45CONFIG_GENERIC_PENDING_IRQ=y
46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
48CONFIG_X86_32_SMP=y
49CONFIG_X86_HT=y
50CONFIG_X86_BIOS_REBOOT=y
51CONFIG_X86_TRAMPOLINE=y
52CONFIG_KTIME_SCALAR=y
53CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
54
55#
56# General setup
57#
58CONFIG_EXPERIMENTAL=y
59CONFIG_LOCK_KERNEL=y
60CONFIG_INIT_ENV_ARG_LIMIT=32
61CONFIG_LOCALVERSION="-default"
62# CONFIG_LOCALVERSION_AUTO is not set
63CONFIG_SWAP=y
64CONFIG_SYSVIPC=y
65CONFIG_SYSVIPC_SYSCTL=y
66CONFIG_POSIX_MQUEUE=y
67CONFIG_BSD_PROCESS_ACCT=y
68CONFIG_BSD_PROCESS_ACCT_V3=y
69CONFIG_TASKSTATS=y
70CONFIG_TASK_DELAY_ACCT=y
71# CONFIG_TASK_XACCT is not set
72CONFIG_AUDIT=y
73CONFIG_AUDITSYSCALL=y
74CONFIG_AUDIT_TREE=y
75
76#
77# RCU Subsystem
78#
79CONFIG_CLASSIC_RCU=y
80# CONFIG_TREE_RCU is not set
81# CONFIG_PREEMPT_RCU is not set
82# CONFIG_TREE_RCU_TRACE is not set
83# CONFIG_PREEMPT_RCU_TRACE is not set
84CONFIG_IKCONFIG=y
85CONFIG_IKCONFIG_PROC=y
86CONFIG_LOG_BUF_SHIFT=15
87CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
88# CONFIG_GROUP_SCHED is not set
89# CONFIG_CGROUPS is not set
90CONFIG_SYSFS_DEPRECATED=y
91CONFIG_SYSFS_DEPRECATED_V2=y
92CONFIG_RELAY=y
93CONFIG_NAMESPACES=y
94# CONFIG_UTS_NS is not set
95# CONFIG_IPC_NS is not set
96# CONFIG_USER_NS is not set
97# CONFIG_PID_NS is not set
98# CONFIG_NET_NS is not set
99CONFIG_BLK_DEV_INITRD=y
100CONFIG_INITRAMFS_SOURCE=""
101CONFIG_CC_OPTIMIZE_FOR_SIZE=y
102CONFIG_SYSCTL=y
103CONFIG_ANON_INODES=y
104# CONFIG_EMBEDDED is not set
105CONFIG_UID16=y
106CONFIG_SYSCTL_SYSCALL=y
107CONFIG_KALLSYMS=y
108CONFIG_KALLSYMS_ALL=y
109# CONFIG_KALLSYMS_EXTRA_PASS is not set
110CONFIG_HOTPLUG=y
111CONFIG_PRINTK=y
112CONFIG_BUG=y
113CONFIG_ELF_CORE=y
114CONFIG_PCSPKR_PLATFORM=y
115CONFIG_BASE_FULL=y
116CONFIG_FUTEX=y
117CONFIG_EPOLL=y
118CONFIG_SIGNALFD=y
119CONFIG_TIMERFD=y
120CONFIG_EVENTFD=y
121CONFIG_SHMEM=y
122CONFIG_AIO=y
123CONFIG_VM_EVENT_COUNTERS=y
124CONFIG_PCI_QUIRKS=y
125CONFIG_COMPAT_BRK=y
126CONFIG_SLAB=y
127# CONFIG_SLUB is not set
128# CONFIG_SLOB is not set
129CONFIG_PROFILING=y
130CONFIG_TRACEPOINTS=y
131# CONFIG_MARKERS is not set
132# CONFIG_OPROFILE is not set
133CONFIG_HAVE_OPROFILE=y
134# CONFIG_KPROBES is not set
135CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
136CONFIG_HAVE_IOREMAP_PROT=y
137CONFIG_HAVE_KPROBES=y
138CONFIG_HAVE_KRETPROBES=y
139CONFIG_HAVE_ARCH_TRACEHOOK=y
140CONFIG_HAVE_GENERIC_DMA_COHERENT=y
141CONFIG_SLABINFO=y
142CONFIG_RT_MUTEXES=y
143CONFIG_BASE_SMALL=0
144CONFIG_MODULES=y
145# CONFIG_MODULE_FORCE_LOAD is not set
146CONFIG_MODULE_UNLOAD=y
147CONFIG_MODULE_FORCE_UNLOAD=y
148CONFIG_MODVERSIONS=y
149CONFIG_MODULE_SRCVERSION_ALL=y
150CONFIG_STOP_MACHINE=y
151CONFIG_BLOCK=y
152CONFIG_LBD=y
153CONFIG_BLK_DEV_IO_TRACE=y
154# CONFIG_BLK_DEV_BSG is not set
155# CONFIG_BLK_DEV_INTEGRITY is not set
156
157#
158# IO Schedulers
159#
160CONFIG_IOSCHED_NOOP=y
161CONFIG_IOSCHED_AS=y
162CONFIG_IOSCHED_DEADLINE=y
163CONFIG_IOSCHED_CFQ=y
164# CONFIG_DEFAULT_AS is not set
165# CONFIG_DEFAULT_DEADLINE is not set
166CONFIG_DEFAULT_CFQ=y
167# CONFIG_DEFAULT_NOOP is not set
168CONFIG_DEFAULT_IOSCHED="cfq"
169CONFIG_FREEZER=y
170
171#
172# Processor type and features
173#
174CONFIG_TICK_ONESHOT=y
175CONFIG_NO_HZ=y
176CONFIG_HIGH_RES_TIMERS=y
177CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
178CONFIG_SMP=y
179# CONFIG_SPARSE_IRQ is not set
180CONFIG_X86_FIND_SMP_CONFIG=y
181CONFIG_X86_MPPARSE=y
182# CONFIG_X86_PC is not set
183# CONFIG_X86_ELAN is not set
184# CONFIG_X86_VOYAGER is not set
185CONFIG_X86_GENERICARCH=y
186# CONFIG_X86_NUMAQ is not set
187# CONFIG_X86_SUMMIT is not set
188# CONFIG_X86_ES7000 is not set
189# CONFIG_X86_BIGSMP is not set
190# CONFIG_X86_VSMP is not set
191# CONFIG_X86_RDC321X is not set
192CONFIG_SCHED_OMIT_FRAME_POINTER=y
193# CONFIG_PARAVIRT_GUEST is not set
194# CONFIG_MEMTEST is not set
195CONFIG_X86_CYCLONE_TIMER=y
196# CONFIG_M386 is not set
197# CONFIG_M486 is not set
198CONFIG_M586=y
199# CONFIG_M586TSC is not set
200# CONFIG_M586MMX is not set
201# CONFIG_M686 is not set
202# CONFIG_MPENTIUMII is not set
203# CONFIG_MPENTIUMIII is not set
204# CONFIG_MPENTIUMM is not set
205# CONFIG_MPENTIUM4 is not set
206# CONFIG_MK6 is not set
207# CONFIG_MK7 is not set
208# CONFIG_MK8 is not set
209# CONFIG_MCRUSOE is not set
210# CONFIG_MEFFICEON is not set
211# CONFIG_MWINCHIPC6 is not set
212# CONFIG_MWINCHIP3D is not set
213# CONFIG_MGEODEGX1 is not set
214# CONFIG_MGEODE_LX is not set
215# CONFIG_MCYRIXIII is not set
216# CONFIG_MVIAC3_2 is not set
217# CONFIG_MVIAC7 is not set
218# CONFIG_MPSC is not set
219# CONFIG_MCORE2 is not set
220# CONFIG_GENERIC_CPU is not set
221CONFIG_X86_GENERIC=y
222CONFIG_X86_CPU=y
223CONFIG_X86_CMPXCHG=y
224CONFIG_X86_L1_CACHE_SHIFT=7
225CONFIG_X86_XADD=y
226CONFIG_X86_PPRO_FENCE=y
227CONFIG_X86_F00F_BUG=y
228CONFIG_X86_WP_WORKS_OK=y
229CONFIG_X86_INVLPG=y
230CONFIG_X86_BSWAP=y
231CONFIG_X86_POPAD_OK=y
232CONFIG_X86_ALIGNMENT_16=y
233CONFIG_X86_INTEL_USERCOPY=y
234CONFIG_X86_MINIMUM_CPU_FAMILY=4
235CONFIG_CPU_SUP_INTEL=y
236CONFIG_CPU_SUP_CYRIX_32=y
237CONFIG_CPU_SUP_AMD=y
238CONFIG_CPU_SUP_CENTAUR_32=y
239CONFIG_CPU_SUP_TRANSMETA_32=y
240CONFIG_CPU_SUP_UMC_32=y
241CONFIG_HPET_TIMER=y
242CONFIG_DMI=y
243# CONFIG_IOMMU_HELPER is not set
244# CONFIG_IOMMU_API is not set
245CONFIG_NR_CPUS=8
246# CONFIG_SCHED_SMT is not set
247CONFIG_SCHED_MC=y
248# CONFIG_PREEMPT_NONE is not set
249CONFIG_PREEMPT_VOLUNTARY=y
250# CONFIG_PREEMPT is not set
251CONFIG_X86_LOCAL_APIC=y
252CONFIG_X86_IO_APIC=y
253# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
254CONFIG_X86_MCE=y
255CONFIG_X86_MCE_NONFATAL=y
256# CONFIG_X86_MCE_P4THERMAL is not set
257CONFIG_VM86=y
258# CONFIG_TOSHIBA is not set
259# CONFIG_I8K is not set
260CONFIG_X86_REBOOTFIXUPS=y
261CONFIG_MICROCODE=m
262CONFIG_MICROCODE_INTEL=y
263# CONFIG_MICROCODE_AMD is not set
264CONFIG_MICROCODE_OLD_INTERFACE=y
265CONFIG_X86_MSR=m
266CONFIG_X86_CPUID=m
267# CONFIG_NOHIGHMEM is not set
268CONFIG_HIGHMEM4G=y
269# CONFIG_HIGHMEM64G is not set
270CONFIG_PAGE_OFFSET=0xC0000000
271CONFIG_HIGHMEM=y
272# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
273CONFIG_ARCH_FLATMEM_ENABLE=y
274CONFIG_ARCH_SPARSEMEM_ENABLE=y
275CONFIG_ARCH_SELECT_MEMORY_MODEL=y
276CONFIG_SELECT_MEMORY_MODEL=y
277CONFIG_FLATMEM_MANUAL=y
278# CONFIG_DISCONTIGMEM_MANUAL is not set
279# CONFIG_SPARSEMEM_MANUAL is not set
280CONFIG_FLATMEM=y
281CONFIG_FLAT_NODE_MEM_MAP=y
282CONFIG_SPARSEMEM_STATIC=y
283CONFIG_PAGEFLAGS_EXTENDED=y
284CONFIG_SPLIT_PTLOCK_CPUS=4
285# CONFIG_PHYS_ADDR_T_64BIT is not set
286CONFIG_ZONE_DMA_FLAG=1
287CONFIG_BOUNCE=y
288CONFIG_VIRT_TO_BUS=y
289CONFIG_UNEVICTABLE_LRU=y
290CONFIG_HIGHPTE=y
291# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
292CONFIG_X86_RESERVE_LOW_64K=y
293# CONFIG_MATH_EMULATION is not set
294CONFIG_MTRR=y
295# CONFIG_MTRR_SANITIZER is not set
296# CONFIG_X86_PAT is not set
297CONFIG_EFI=y
298CONFIG_SECCOMP=y
299# CONFIG_HZ_100 is not set
300CONFIG_HZ_250=y
301# CONFIG_HZ_300 is not set
302# CONFIG_HZ_1000 is not set
303CONFIG_HZ=250
304CONFIG_SCHED_HRTICK=y
305CONFIG_KEXEC=y
306# CONFIG_CRASH_DUMP is not set
307# CONFIG_KEXEC_JUMP is not set
308CONFIG_PHYSICAL_START=0x100000
309# CONFIG_RELOCATABLE is not set
310CONFIG_PHYSICAL_ALIGN=0x100000
311CONFIG_HOTPLUG_CPU=y
312CONFIG_COMPAT_VDSO=y
313# CONFIG_CMDLINE_BOOL is not set
314CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
315
316#
317# Power management and ACPI options
318#
319CONFIG_PM=y
320CONFIG_PM_DEBUG=y
321CONFIG_PM_VERBOSE=y
322CONFIG_CAN_PM_TRACE=y
323CONFIG_PM_TRACE=y
324CONFIG_PM_TRACE_RTC=y
325CONFIG_PM_SLEEP_SMP=y
326CONFIG_PM_SLEEP=y
327CONFIG_SUSPEND=y
328CONFIG_SUSPEND_FREEZER=y
329CONFIG_HIBERNATION=y
330CONFIG_PM_STD_PARTITION=""
331CONFIG_ACPI=y
332CONFIG_ACPI_SLEEP=y
333CONFIG_ACPI_PROCFS=y
334CONFIG_ACPI_PROCFS_POWER=y
335CONFIG_ACPI_SYSFS_POWER=y
336CONFIG_ACPI_PROC_EVENT=y
337CONFIG_ACPI_AC=y
338CONFIG_ACPI_BATTERY=y
339CONFIG_ACPI_BUTTON=y
340CONFIG_ACPI_VIDEO=y
341CONFIG_ACPI_FAN=y
342CONFIG_ACPI_DOCK=y
343CONFIG_ACPI_PROCESSOR=y
344CONFIG_ACPI_HOTPLUG_CPU=y
345CONFIG_ACPI_THERMAL=y
346CONFIG_ACPI_CUSTOM_DSDT_FILE=""
347# CONFIG_ACPI_CUSTOM_DSDT is not set
348CONFIG_ACPI_BLACKLIST_YEAR=2001
349# CONFIG_ACPI_DEBUG is not set
350# CONFIG_ACPI_PCI_SLOT is not set
351CONFIG_X86_PM_TIMER=y
352CONFIG_ACPI_CONTAINER=y
353CONFIG_ACPI_SBS=y
354CONFIG_X86_APM_BOOT=y
355CONFIG_APM=y
356# CONFIG_APM_IGNORE_USER_SUSPEND is not set
357CONFIG_APM_DO_ENABLE=y
358# CONFIG_APM_CPU_IDLE is not set
359CONFIG_APM_DISPLAY_BLANK=y
360CONFIG_APM_ALLOW_INTS=y
361
362#
363# CPU Frequency scaling
364#
365CONFIG_CPU_FREQ=y
366CONFIG_CPU_FREQ_TABLE=y
367# CONFIG_CPU_FREQ_DEBUG is not set
368CONFIG_CPU_FREQ_STAT=m
369CONFIG_CPU_FREQ_STAT_DETAILS=y
370# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
371# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
372CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
373# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
374# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
375CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
376CONFIG_CPU_FREQ_GOV_POWERSAVE=m
377CONFIG_CPU_FREQ_GOV_USERSPACE=y
378CONFIG_CPU_FREQ_GOV_ONDEMAND=m
379CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
380
381#
382# CPUFreq processor drivers
383#
384CONFIG_X86_ACPI_CPUFREQ=y
385# CONFIG_X86_POWERNOW_K6 is not set
386# CONFIG_X86_POWERNOW_K7 is not set
387# CONFIG_X86_POWERNOW_K8 is not set
388# CONFIG_X86_GX_SUSPMOD is not set
389CONFIG_X86_SPEEDSTEP_CENTRINO=m
390CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
391CONFIG_X86_SPEEDSTEP_ICH=m
392CONFIG_X86_SPEEDSTEP_SMI=m
393CONFIG_X86_P4_CLOCKMOD=m
394# CONFIG_X86_CPUFREQ_NFORCE2 is not set
395# CONFIG_X86_LONGRUN is not set
396# CONFIG_X86_LONGHAUL is not set
397# CONFIG_X86_E_POWERSAVER is not set
398
399#
400# shared options
401#
402CONFIG_X86_SPEEDSTEP_LIB=m
403CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
404CONFIG_CPU_IDLE=y
405CONFIG_CPU_IDLE_GOV_LADDER=y
406CONFIG_CPU_IDLE_GOV_MENU=y
407
408#
409# Bus options (PCI etc.)
410#
411CONFIG_PCI=y
412# CONFIG_PCI_GOBIOS is not set
413# CONFIG_PCI_GOMMCONFIG is not set
414# CONFIG_PCI_GODIRECT is not set
415# CONFIG_PCI_GOOLPC is not set
416CONFIG_PCI_GOANY=y
417CONFIG_PCI_BIOS=y
418CONFIG_PCI_DIRECT=y
419CONFIG_PCI_MMCONFIG=y
420CONFIG_PCI_DOMAINS=y
421CONFIG_PCIEPORTBUS=y
422CONFIG_HOTPLUG_PCI_PCIE=m
423CONFIG_PCIEAER=y
424# CONFIG_PCIEASPM is not set
425CONFIG_ARCH_SUPPORTS_MSI=y
426CONFIG_PCI_MSI=y
427CONFIG_PCI_LEGACY=y
428# CONFIG_PCI_DEBUG is not set
429# CONFIG_PCI_STUB is not set
430CONFIG_HT_IRQ=y
431CONFIG_ISA_DMA_API=y
432CONFIG_ISA=y
433# CONFIG_EISA is not set
434# CONFIG_MCA is not set
435# CONFIG_SCx200 is not set
436# CONFIG_OLPC is not set
437# CONFIG_PCCARD is not set
438CONFIG_HOTPLUG_PCI=m
439CONFIG_HOTPLUG_PCI_FAKE=m
440# CONFIG_HOTPLUG_PCI_COMPAQ is not set
441# CONFIG_HOTPLUG_PCI_IBM is not set
442CONFIG_HOTPLUG_PCI_ACPI=m
443CONFIG_HOTPLUG_PCI_ACPI_IBM=m
444CONFIG_HOTPLUG_PCI_CPCI=y
445CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
446CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
447CONFIG_HOTPLUG_PCI_SHPC=m
448
449#
450# Executable file formats / Emulations
451#
452CONFIG_BINFMT_ELF=y
453# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
454CONFIG_HAVE_AOUT=y
455CONFIG_BINFMT_AOUT=m
456CONFIG_BINFMT_MISC=m
457CONFIG_HAVE_ATOMIC_IOMAP=y
458CONFIG_NET=y
459
460#
461# Networking options
462#
463CONFIG_COMPAT_NET_DEV_OPS=y
464CONFIG_PACKET=m
465CONFIG_PACKET_MMAP=y
466CONFIG_UNIX=y
467CONFIG_XFRM=y
468CONFIG_XFRM_USER=m
469# CONFIG_XFRM_SUB_POLICY is not set
470# CONFIG_XFRM_MIGRATE is not set
471# CONFIG_XFRM_STATISTICS is not set
472CONFIG_XFRM_IPCOMP=m
473CONFIG_NET_KEY=m
474# CONFIG_NET_KEY_MIGRATE is not set
475CONFIG_INET=y
476CONFIG_IP_MULTICAST=y
477CONFIG_IP_ADVANCED_ROUTER=y
478CONFIG_ASK_IP_FIB_HASH=y
479# CONFIG_IP_FIB_TRIE is not set
480CONFIG_IP_FIB_HASH=y
481CONFIG_IP_MULTIPLE_TABLES=y
482CONFIG_IP_ROUTE_MULTIPATH=y
483CONFIG_IP_ROUTE_VERBOSE=y
484CONFIG_IP_PNP=y
485CONFIG_IP_PNP_DHCP=y
486CONFIG_IP_PNP_BOOTP=y
487CONFIG_IP_PNP_RARP=y
488CONFIG_NET_IPIP=m
489CONFIG_NET_IPGRE=m
490CONFIG_NET_IPGRE_BROADCAST=y
491CONFIG_IP_MROUTE=y
492CONFIG_IP_PIMSM_V1=y
493CONFIG_IP_PIMSM_V2=y
494# CONFIG_ARPD is not set
495CONFIG_SYN_COOKIES=y
496CONFIG_INET_AH=m
497CONFIG_INET_ESP=m
498CONFIG_INET_IPCOMP=m
499CONFIG_INET_XFRM_TUNNEL=m
500CONFIG_INET_TUNNEL=m
501CONFIG_INET_XFRM_MODE_TRANSPORT=m
502CONFIG_INET_XFRM_MODE_TUNNEL=m
503CONFIG_INET_XFRM_MODE_BEET=y
504# CONFIG_INET_LRO is not set
505CONFIG_INET_DIAG=m
506CONFIG_INET_TCP_DIAG=m
507CONFIG_TCP_CONG_ADVANCED=y
508CONFIG_TCP_CONG_BIC=m
509CONFIG_TCP_CONG_CUBIC=m
510CONFIG_TCP_CONG_WESTWOOD=m
511CONFIG_TCP_CONG_HTCP=m
512CONFIG_TCP_CONG_HSTCP=m
513CONFIG_TCP_CONG_HYBLA=m
514CONFIG_TCP_CONG_VEGAS=m
515CONFIG_TCP_CONG_SCALABLE=m
516CONFIG_TCP_CONG_LP=m
517CONFIG_TCP_CONG_VENO=m
518# CONFIG_TCP_CONG_YEAH is not set
519# CONFIG_TCP_CONG_ILLINOIS is not set
520# CONFIG_DEFAULT_BIC is not set
521# CONFIG_DEFAULT_CUBIC is not set
522# CONFIG_DEFAULT_HTCP is not set
523# CONFIG_DEFAULT_VEGAS is not set
524# CONFIG_DEFAULT_WESTWOOD is not set
525CONFIG_DEFAULT_RENO=y
526CONFIG_DEFAULT_TCP_CONG="reno"
527# CONFIG_TCP_MD5SIG is not set
528CONFIG_IPV6=m
529CONFIG_IPV6_PRIVACY=y
530CONFIG_IPV6_ROUTER_PREF=y
531CONFIG_IPV6_ROUTE_INFO=y
532# CONFIG_IPV6_OPTIMISTIC_DAD is not set
533CONFIG_INET6_AH=m
534CONFIG_INET6_ESP=m
535CONFIG_INET6_IPCOMP=m
536# CONFIG_IPV6_MIP6 is not set
537CONFIG_INET6_XFRM_TUNNEL=m
538CONFIG_INET6_TUNNEL=m
539CONFIG_INET6_XFRM_MODE_TRANSPORT=m
540CONFIG_INET6_XFRM_MODE_TUNNEL=m
541CONFIG_INET6_XFRM_MODE_BEET=m
542# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
543CONFIG_IPV6_SIT=m
544CONFIG_IPV6_NDISC_NODETYPE=y
545CONFIG_IPV6_TUNNEL=m
546# CONFIG_IPV6_MULTIPLE_TABLES is not set
547# CONFIG_IPV6_MROUTE is not set
548# CONFIG_NETLABEL is not set
549CONFIG_NETWORK_SECMARK=y
550CONFIG_NETFILTER=y
551# CONFIG_NETFILTER_DEBUG is not set
552CONFIG_NETFILTER_ADVANCED=y
553CONFIG_BRIDGE_NETFILTER=y
554
555#
556# Core Netfilter Configuration
557#
558CONFIG_NETFILTER_NETLINK=m
559CONFIG_NETFILTER_NETLINK_QUEUE=m
560CONFIG_NETFILTER_NETLINK_LOG=m
561# CONFIG_NF_CONNTRACK is not set
562CONFIG_NETFILTER_XTABLES=m
563CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
564# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
565CONFIG_NETFILTER_XT_TARGET_MARK=m
566# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
567CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
568# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
569# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
570CONFIG_NETFILTER_XT_TARGET_SECMARK=m
571# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
572# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
573CONFIG_NETFILTER_XT_MATCH_COMMENT=m
574CONFIG_NETFILTER_XT_MATCH_DCCP=m
575# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
576CONFIG_NETFILTER_XT_MATCH_ESP=m
577# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
578# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
579CONFIG_NETFILTER_XT_MATCH_LENGTH=m
580CONFIG_NETFILTER_XT_MATCH_LIMIT=m
581CONFIG_NETFILTER_XT_MATCH_MAC=m
582CONFIG_NETFILTER_XT_MATCH_MARK=m
583CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
584# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
585CONFIG_NETFILTER_XT_MATCH_POLICY=m
586CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
587CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
588CONFIG_NETFILTER_XT_MATCH_QUOTA=m
589# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
590CONFIG_NETFILTER_XT_MATCH_REALM=m
591# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
592CONFIG_NETFILTER_XT_MATCH_SCTP=m
593CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
594CONFIG_NETFILTER_XT_MATCH_STRING=m
595CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
596# CONFIG_NETFILTER_XT_MATCH_TIME is not set
597# CONFIG_NETFILTER_XT_MATCH_U32 is not set
598CONFIG_IP_VS=m
599# CONFIG_IP_VS_IPV6 is not set
600# CONFIG_IP_VS_DEBUG is not set
601CONFIG_IP_VS_TAB_BITS=12
602
603#
604# IPVS transport protocol load balancing support
605#
606CONFIG_IP_VS_PROTO_TCP=y
607CONFIG_IP_VS_PROTO_UDP=y
608CONFIG_IP_VS_PROTO_AH_ESP=y
609CONFIG_IP_VS_PROTO_ESP=y
610CONFIG_IP_VS_PROTO_AH=y
611
612#
613# IPVS scheduler
614#
615CONFIG_IP_VS_RR=m
616CONFIG_IP_VS_WRR=m
617CONFIG_IP_VS_LC=m
618CONFIG_IP_VS_WLC=m
619CONFIG_IP_VS_LBLC=m
620CONFIG_IP_VS_LBLCR=m
621CONFIG_IP_VS_DH=m
622CONFIG_IP_VS_SH=m
623CONFIG_IP_VS_SED=m
624CONFIG_IP_VS_NQ=m
625
626#
627# IPVS application helper
628#
629CONFIG_IP_VS_FTP=m
630
631#
632# IP: Netfilter Configuration
633#
634# CONFIG_NF_DEFRAG_IPV4 is not set
635CONFIG_IP_NF_QUEUE=m
636CONFIG_IP_NF_IPTABLES=m
637CONFIG_IP_NF_MATCH_ADDRTYPE=m
638CONFIG_IP_NF_MATCH_AH=m
639CONFIG_IP_NF_MATCH_ECN=m
640CONFIG_IP_NF_MATCH_TTL=m
641CONFIG_IP_NF_FILTER=m
642CONFIG_IP_NF_TARGET_REJECT=m
643CONFIG_IP_NF_TARGET_LOG=m
644CONFIG_IP_NF_TARGET_ULOG=m
645CONFIG_IP_NF_MANGLE=m
646CONFIG_IP_NF_TARGET_ECN=m
647CONFIG_IP_NF_TARGET_TTL=m
648CONFIG_IP_NF_RAW=m
649# CONFIG_IP_NF_SECURITY is not set
650CONFIG_IP_NF_ARPTABLES=m
651CONFIG_IP_NF_ARPFILTER=m
652CONFIG_IP_NF_ARP_MANGLE=m
653
654#
655# IPv6: Netfilter Configuration
656#
657CONFIG_IP6_NF_QUEUE=m
658CONFIG_IP6_NF_IPTABLES=m
659CONFIG_IP6_NF_MATCH_AH=m
660CONFIG_IP6_NF_MATCH_EUI64=m
661CONFIG_IP6_NF_MATCH_FRAG=m
662CONFIG_IP6_NF_MATCH_OPTS=m
663CONFIG_IP6_NF_MATCH_HL=m
664CONFIG_IP6_NF_MATCH_IPV6HEADER=m
665# CONFIG_IP6_NF_MATCH_MH is not set
666CONFIG_IP6_NF_MATCH_RT=m
667CONFIG_IP6_NF_TARGET_LOG=m
668CONFIG_IP6_NF_FILTER=m
669CONFIG_IP6_NF_TARGET_REJECT=m
670CONFIG_IP6_NF_MANGLE=m
671CONFIG_IP6_NF_TARGET_HL=m
672CONFIG_IP6_NF_RAW=m
673# CONFIG_IP6_NF_SECURITY is not set
674
675#
676# DECnet: Netfilter Configuration
677#
678CONFIG_DECNET_NF_GRABULATOR=m
679CONFIG_BRIDGE_NF_EBTABLES=m
680CONFIG_BRIDGE_EBT_BROUTE=m
681CONFIG_BRIDGE_EBT_T_FILTER=m
682CONFIG_BRIDGE_EBT_T_NAT=m
683CONFIG_BRIDGE_EBT_802_3=m
684CONFIG_BRIDGE_EBT_AMONG=m
685CONFIG_BRIDGE_EBT_ARP=m
686CONFIG_BRIDGE_EBT_IP=m
687# CONFIG_BRIDGE_EBT_IP6 is not set
688CONFIG_BRIDGE_EBT_LIMIT=m
689CONFIG_BRIDGE_EBT_MARK=m
690CONFIG_BRIDGE_EBT_PKTTYPE=m
691CONFIG_BRIDGE_EBT_STP=m
692CONFIG_BRIDGE_EBT_VLAN=m
693CONFIG_BRIDGE_EBT_ARPREPLY=m
694CONFIG_BRIDGE_EBT_DNAT=m
695CONFIG_BRIDGE_EBT_MARK_T=m
696CONFIG_BRIDGE_EBT_REDIRECT=m
697CONFIG_BRIDGE_EBT_SNAT=m
698CONFIG_BRIDGE_EBT_LOG=m
699CONFIG_BRIDGE_EBT_ULOG=m
700# CONFIG_BRIDGE_EBT_NFLOG is not set
701CONFIG_IP_DCCP=m
702CONFIG_INET_DCCP_DIAG=m
703
704#
705# DCCP CCIDs Configuration (EXPERIMENTAL)
706#
707# CONFIG_IP_DCCP_CCID2_DEBUG is not set
708CONFIG_IP_DCCP_CCID3=y
709# CONFIG_IP_DCCP_CCID3_DEBUG is not set
710CONFIG_IP_DCCP_CCID3_RTO=100
711CONFIG_IP_DCCP_TFRC_LIB=y
712
713#
714# DCCP Kernel Hacking
715#
716# CONFIG_IP_DCCP_DEBUG is not set
717CONFIG_IP_SCTP=m
718# CONFIG_SCTP_DBG_MSG is not set
719# CONFIG_SCTP_DBG_OBJCNT is not set
720# CONFIG_SCTP_HMAC_NONE is not set
721# CONFIG_SCTP_HMAC_SHA1 is not set
722CONFIG_SCTP_HMAC_MD5=y
723# CONFIG_TIPC is not set
724CONFIG_ATM=m
725CONFIG_ATM_CLIP=m
726CONFIG_ATM_CLIP_NO_ICMP=y
727CONFIG_ATM_LANE=m
728CONFIG_ATM_MPOA=m
729CONFIG_ATM_BR2684=m
730# CONFIG_ATM_BR2684_IPFILTER is not set
731CONFIG_STP=m
732CONFIG_BRIDGE=m
733# CONFIG_NET_DSA is not set
734CONFIG_VLAN_8021Q=m
735# CONFIG_VLAN_8021Q_GVRP is not set
736CONFIG_DECNET=m
737CONFIG_DECNET_ROUTER=y
738CONFIG_LLC=m
739CONFIG_LLC2=m
740CONFIG_IPX=m
741# CONFIG_IPX_INTERN is not set
742CONFIG_ATALK=m
743CONFIG_DEV_APPLETALK=m
744CONFIG_LTPC=m
745CONFIG_COPS=m
746CONFIG_COPS_DAYNA=y
747CONFIG_COPS_TANGENT=y
748CONFIG_IPDDP=m
749CONFIG_IPDDP_ENCAP=y
750CONFIG_IPDDP_DECAP=y
751CONFIG_X25=m
752CONFIG_LAPB=m
753CONFIG_ECONET=m
754# CONFIG_ECONET_AUNUDP is not set
755# CONFIG_ECONET_NATIVE is not set
756CONFIG_WAN_ROUTER=m
757CONFIG_NET_SCHED=y
758
759#
760# Queueing/Scheduling
761#
762CONFIG_NET_SCH_CBQ=m
763CONFIG_NET_SCH_HTB=m
764CONFIG_NET_SCH_HFSC=m
765CONFIG_NET_SCH_ATM=m
766CONFIG_NET_SCH_PRIO=m
767# CONFIG_NET_SCH_MULTIQ is not set
768CONFIG_NET_SCH_RED=m
769CONFIG_NET_SCH_SFQ=m
770CONFIG_NET_SCH_TEQL=m
771CONFIG_NET_SCH_TBF=m
772CONFIG_NET_SCH_GRED=m
773CONFIG_NET_SCH_DSMARK=m
774CONFIG_NET_SCH_NETEM=m
775# CONFIG_NET_SCH_DRR is not set
776CONFIG_NET_SCH_INGRESS=m
777
778#
779# Classification
780#
781CONFIG_NET_CLS=y
782CONFIG_NET_CLS_BASIC=m
783CONFIG_NET_CLS_TCINDEX=m
784CONFIG_NET_CLS_ROUTE4=m
785CONFIG_NET_CLS_ROUTE=y
786CONFIG_NET_CLS_FW=m
787CONFIG_NET_CLS_U32=m
788CONFIG_CLS_U32_PERF=y
789CONFIG_CLS_U32_MARK=y
790CONFIG_NET_CLS_RSVP=m
791CONFIG_NET_CLS_RSVP6=m
792# CONFIG_NET_CLS_FLOW is not set
793# CONFIG_NET_EMATCH is not set
794CONFIG_NET_CLS_ACT=y
795CONFIG_NET_ACT_POLICE=m
796CONFIG_NET_ACT_GACT=m
797CONFIG_GACT_PROB=y
798CONFIG_NET_ACT_MIRRED=m
799CONFIG_NET_ACT_IPT=m
800# CONFIG_NET_ACT_NAT is not set
801CONFIG_NET_ACT_PEDIT=m
802CONFIG_NET_ACT_SIMP=m
803# CONFIG_NET_ACT_SKBEDIT is not set
804# CONFIG_NET_CLS_IND is not set
805CONFIG_NET_SCH_FIFO=y
806# CONFIG_DCB is not set
807
808#
809# Network testing
810#
811CONFIG_NET_PKTGEN=m
812# CONFIG_HAMRADIO is not set
813# CONFIG_CAN is not set
814# CONFIG_IRDA is not set
815CONFIG_BT=m
816CONFIG_BT_L2CAP=m
817CONFIG_BT_SCO=m
818CONFIG_BT_RFCOMM=m
819CONFIG_BT_RFCOMM_TTY=y
820CONFIG_BT_BNEP=m
821CONFIG_BT_BNEP_MC_FILTER=y
822CONFIG_BT_BNEP_PROTO_FILTER=y
823CONFIG_BT_HIDP=m
824
825#
826# Bluetooth device drivers
827#
828# CONFIG_BT_HCIBTUSB is not set
829# CONFIG_BT_HCIBTSDIO is not set
830CONFIG_BT_HCIUART=m
831CONFIG_BT_HCIUART_H4=y
832CONFIG_BT_HCIUART_BCSP=y
833# CONFIG_BT_HCIUART_LL is not set
834CONFIG_BT_HCIBCM203X=m
835CONFIG_BT_HCIBPA10X=m
836CONFIG_BT_HCIBFUSB=m
837CONFIG_BT_HCIVHCI=m
838# CONFIG_AF_RXRPC is not set
839# CONFIG_PHONET is not set
840CONFIG_FIB_RULES=y
841CONFIG_WIRELESS=y
842# CONFIG_CFG80211 is not set
843CONFIG_WIRELESS_OLD_REGULATORY=y
844CONFIG_WIRELESS_EXT=y
845CONFIG_WIRELESS_EXT_SYSFS=y
846CONFIG_LIB80211=m
847CONFIG_LIB80211_CRYPT_WEP=m
848CONFIG_LIB80211_CRYPT_CCMP=m
849CONFIG_LIB80211_CRYPT_TKIP=m
850# CONFIG_LIB80211_DEBUG is not set
851# CONFIG_MAC80211 is not set
852# CONFIG_WIMAX is not set
853# CONFIG_RFKILL is not set
854# CONFIG_NET_9P is not set
855
856#
857# Device Drivers
858#
859
860#
861# Generic Driver Options
862#
863CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
864# CONFIG_STANDALONE is not set
865CONFIG_PREVENT_FIRMWARE_BUILD=y
866CONFIG_FW_LOADER=y
867CONFIG_FIRMWARE_IN_KERNEL=y
868CONFIG_EXTRA_FIRMWARE=""
869# CONFIG_DEBUG_DRIVER is not set
870# CONFIG_DEBUG_DEVRES is not set
871# CONFIG_SYS_HYPERVISOR is not set
872CONFIG_CONNECTOR=y
873CONFIG_PROC_EVENTS=y
874CONFIG_MTD=m
875# CONFIG_MTD_DEBUG is not set
876CONFIG_MTD_CONCAT=m
877CONFIG_MTD_PARTITIONS=y
878# CONFIG_MTD_TESTS is not set
879CONFIG_MTD_REDBOOT_PARTS=m
880CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
881# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
882# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
883# CONFIG_MTD_AR7_PARTS is not set
884
885#
886# User Modules And Translation Layers
887#
888CONFIG_MTD_CHAR=m
889CONFIG_HAVE_MTD_OTP=y
890CONFIG_MTD_BLKDEVS=m
891CONFIG_MTD_BLOCK=m
892# CONFIG_MTD_BLOCK_RO is not set
893# CONFIG_FTL is not set
894# CONFIG_NFTL is not set
895# CONFIG_INFTL is not set
896CONFIG_RFD_FTL=m
897# CONFIG_SSFDC is not set
898# CONFIG_MTD_OOPS is not set
899
900#
901# RAM/ROM/Flash chip drivers
902#
903CONFIG_MTD_CFI=m
904CONFIG_MTD_JEDECPROBE=m
905CONFIG_MTD_GEN_PROBE=m
906CONFIG_MTD_CFI_ADV_OPTIONS=y
907CONFIG_MTD_CFI_NOSWAP=y
908# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
909# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
910# CONFIG_MTD_CFI_GEOMETRY is not set
911CONFIG_MTD_MAP_BANK_WIDTH_1=y
912CONFIG_MTD_MAP_BANK_WIDTH_2=y
913CONFIG_MTD_MAP_BANK_WIDTH_4=y
914# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
915# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
916# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
917CONFIG_MTD_CFI_I1=y
918CONFIG_MTD_CFI_I2=y
919# CONFIG_MTD_CFI_I4 is not set
920# CONFIG_MTD_CFI_I8 is not set
921# CONFIG_MTD_OTP is not set
922CONFIG_MTD_CFI_INTELEXT=m
923CONFIG_MTD_CFI_AMDSTD=m
924CONFIG_MTD_CFI_STAA=m
925CONFIG_MTD_CFI_UTIL=m
926# CONFIG_MTD_RAM is not set
927# CONFIG_MTD_ROM is not set
928CONFIG_MTD_ABSENT=m
929
930#
931# Mapping drivers for chip access
932#
933CONFIG_MTD_COMPLEX_MAPPINGS=y
934CONFIG_MTD_PHYSMAP=m
935# CONFIG_MTD_PHYSMAP_COMPAT is not set
936CONFIG_MTD_SC520CDP=m
937CONFIG_MTD_NETSC520=m
938CONFIG_MTD_TS5500=m
939CONFIG_MTD_SBC_GXX=m
940CONFIG_MTD_AMD76XROM=m
941CONFIG_MTD_ICHXROM=m
942# CONFIG_MTD_ESB2ROM is not set
943# CONFIG_MTD_CK804XROM is not set
944CONFIG_MTD_SCB2_FLASH=m
945CONFIG_MTD_NETtel=m
946CONFIG_MTD_DILNETPC=m
947CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
948CONFIG_MTD_L440GX=m
949CONFIG_MTD_PCI=m
950# CONFIG_MTD_INTEL_VR_NOR is not set
951# CONFIG_MTD_PLATRAM is not set
952
953#
954# Self-contained MTD device drivers
955#
956CONFIG_MTD_PMC551=m
957CONFIG_MTD_PMC551_BUGFIX=y
958# CONFIG_MTD_PMC551_DEBUG is not set
959# CONFIG_MTD_DATAFLASH is not set
960# CONFIG_MTD_M25P80 is not set
961CONFIG_MTD_SLRAM=m
962CONFIG_MTD_PHRAM=m
963CONFIG_MTD_MTDRAM=m
964CONFIG_MTDRAM_TOTAL_SIZE=4096
965CONFIG_MTDRAM_ERASE_SIZE=128
966CONFIG_MTD_BLOCK2MTD=m
967
968#
969# Disk-On-Chip Device Drivers
970#
971CONFIG_MTD_DOC2000=m
972CONFIG_MTD_DOC2001=m
973CONFIG_MTD_DOC2001PLUS=m
974CONFIG_MTD_DOCPROBE=m
975CONFIG_MTD_DOCECC=m
976CONFIG_MTD_DOCPROBE_ADVANCED=y
977CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
978CONFIG_MTD_DOCPROBE_HIGH=y
979CONFIG_MTD_DOCPROBE_55AA=y
980CONFIG_MTD_NAND=m
981# CONFIG_MTD_NAND_VERIFY_WRITE is not set
982CONFIG_MTD_NAND_ECC_SMC=y
983# CONFIG_MTD_NAND_MUSEUM_IDS is not set
984CONFIG_MTD_NAND_IDS=m
985CONFIG_MTD_NAND_DISKONCHIP=m
986# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
987CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
988CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
989# CONFIG_MTD_NAND_CAFE is not set
990CONFIG_MTD_NAND_CS553X=m
991CONFIG_MTD_NAND_NANDSIM=m
992# CONFIG_MTD_NAND_PLATFORM is not set
993# CONFIG_MTD_ALAUDA is not set
994CONFIG_MTD_ONENAND=m
995# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
996CONFIG_MTD_ONENAND_OTP=y
997# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
998# CONFIG_MTD_ONENAND_SIM is not set
999
1000#
1001# LPDDR flash memory drivers
1002#
1003# CONFIG_MTD_LPDDR is not set
1004
1005#
1006# UBI - Unsorted block images
1007#
1008# CONFIG_MTD_UBI is not set
1009# CONFIG_PARPORT is not set
1010CONFIG_PNP=y
1011CONFIG_PNP_DEBUG_MESSAGES=y
1012
1013#
1014# Protocols
1015#
1016# CONFIG_ISAPNP is not set
1017CONFIG_PNPBIOS=y
1018CONFIG_PNPBIOS_PROC_FS=y
1019CONFIG_PNPACPI=y
1020CONFIG_BLK_DEV=y
1021# CONFIG_BLK_DEV_FD is not set
1022CONFIG_BLK_DEV_XD=m
1023CONFIG_BLK_CPQ_DA=m
1024CONFIG_BLK_CPQ_CISS_DA=m
1025CONFIG_CISS_SCSI_TAPE=y
1026CONFIG_BLK_DEV_DAC960=m
1027CONFIG_BLK_DEV_UMEM=m
1028# CONFIG_BLK_DEV_COW_COMMON is not set
1029CONFIG_BLK_DEV_LOOP=y
1030CONFIG_BLK_DEV_CRYPTOLOOP=m
1031CONFIG_BLK_DEV_NBD=m
1032CONFIG_BLK_DEV_SX8=m
1033# CONFIG_BLK_DEV_UB is not set
1034CONFIG_BLK_DEV_RAM=y
1035CONFIG_BLK_DEV_RAM_COUNT=16
1036CONFIG_BLK_DEV_RAM_SIZE=64000
1037# CONFIG_BLK_DEV_XIP is not set
1038CONFIG_CDROM_PKTCDVD=m
1039CONFIG_CDROM_PKTCDVD_BUFFERS=8
1040CONFIG_CDROM_PKTCDVD_WCACHE=y
1041CONFIG_ATA_OVER_ETH=m
1042# CONFIG_BLK_DEV_HD is not set
1043CONFIG_MISC_DEVICES=y
1044# CONFIG_IBM_ASM is not set
1045# CONFIG_PHANTOM is not set
1046# CONFIG_SGI_IOC4 is not set
1047# CONFIG_TIFM_CORE is not set
1048# CONFIG_ICS932S401 is not set
1049# CONFIG_ENCLOSURE_SERVICES is not set
1050# CONFIG_HP_ILO is not set
1051# CONFIG_C2PORT is not set
1052
1053#
1054# EEPROM support
1055#
1056# CONFIG_EEPROM_AT24 is not set
1057# CONFIG_EEPROM_AT25 is not set
1058# CONFIG_EEPROM_LEGACY is not set
1059# CONFIG_EEPROM_93CX6 is not set
1060CONFIG_HAVE_IDE=y
1061# CONFIG_IDE is not set
1062
1063#
1064# SCSI device support
1065#
1066CONFIG_RAID_ATTRS=m
1067CONFIG_SCSI=y
1068CONFIG_SCSI_DMA=y
1069# CONFIG_SCSI_TGT is not set
1070CONFIG_SCSI_NETLINK=y
1071CONFIG_SCSI_PROC_FS=y
1072
1073#
1074# SCSI support type (disk, tape, CD-ROM)
1075#
1076CONFIG_BLK_DEV_SD=y
1077CONFIG_CHR_DEV_ST=m
1078CONFIG_CHR_DEV_OSST=m
1079CONFIG_BLK_DEV_SR=y
1080# CONFIG_BLK_DEV_SR_VENDOR is not set
1081CONFIG_CHR_DEV_SG=y
1082CONFIG_CHR_DEV_SCH=m
1083
1084#
1085# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
1086#
1087CONFIG_SCSI_MULTI_LUN=y
1088CONFIG_SCSI_CONSTANTS=y
1089CONFIG_SCSI_LOGGING=y
1090# CONFIG_SCSI_SCAN_ASYNC is not set
1091CONFIG_SCSI_WAIT_SCAN=m
1092
1093#
1094# SCSI Transports
1095#
1096CONFIG_SCSI_SPI_ATTRS=m
1097CONFIG_SCSI_FC_ATTRS=m
1098CONFIG_SCSI_ISCSI_ATTRS=m
1099# CONFIG_SCSI_SAS_LIBSAS is not set
1100# CONFIG_SCSI_SRP_ATTRS is not set
1101CONFIG_SCSI_LOWLEVEL=y
1102# CONFIG_ISCSI_TCP is not set
1103# CONFIG_SCSI_CXGB3_ISCSI is not set
1104# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
1105# CONFIG_SCSI_3W_9XXX is not set
1106# CONFIG_SCSI_7000FASST is not set
1107# CONFIG_SCSI_ACARD is not set
1108# CONFIG_SCSI_AHA152X is not set
1109# CONFIG_SCSI_AHA1542 is not set
1110# CONFIG_SCSI_AACRAID is not set
1111# CONFIG_SCSI_AIC7XXX is not set
1112# CONFIG_SCSI_AIC7XXX_OLD is not set
1113# CONFIG_SCSI_AIC79XX is not set
1114# CONFIG_SCSI_AIC94XX is not set
1115# CONFIG_SCSI_DPT_I2O is not set
1116# CONFIG_SCSI_ADVANSYS is not set
1117# CONFIG_SCSI_IN2000 is not set
1118# CONFIG_SCSI_ARCMSR is not set
1119# CONFIG_MEGARAID_NEWGEN is not set
1120# CONFIG_MEGARAID_LEGACY is not set
1121# CONFIG_MEGARAID_SAS is not set
1122# CONFIG_SCSI_HPTIOP is not set
1123# CONFIG_SCSI_BUSLOGIC is not set
1124# CONFIG_LIBFC is not set
1125# CONFIG_FCOE is not set
1126# CONFIG_SCSI_DMX3191D is not set
1127# CONFIG_SCSI_DTC3280 is not set
1128# CONFIG_SCSI_EATA is not set
1129# CONFIG_SCSI_FUTURE_DOMAIN is not set
1130CONFIG_SCSI_GDTH=m
1131# CONFIG_SCSI_GENERIC_NCR5380 is not set
1132# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
1133# CONFIG_SCSI_IPS is not set
1134# CONFIG_SCSI_INITIO is not set
1135# CONFIG_SCSI_INIA100 is not set
1136# CONFIG_SCSI_MVSAS is not set
1137# CONFIG_SCSI_NCR53C406A is not set
1138# CONFIG_SCSI_STEX is not set
1139# CONFIG_SCSI_SYM53C8XX_2 is not set
1140# CONFIG_SCSI_IPR is not set
1141# CONFIG_SCSI_PAS16 is not set
1142# CONFIG_SCSI_QLOGIC_FAS is not set
1143# CONFIG_SCSI_QLOGIC_1280 is not set
1144# CONFIG_SCSI_QLA_FC is not set
1145# CONFIG_SCSI_QLA_ISCSI is not set
1146# CONFIG_SCSI_LPFC is not set
1147# CONFIG_SCSI_SYM53C416 is not set
1148# CONFIG_SCSI_DC395x is not set
1149# CONFIG_SCSI_DC390T is not set
1150# CONFIG_SCSI_T128 is not set
1151# CONFIG_SCSI_U14_34F is not set
1152# CONFIG_SCSI_ULTRASTOR is not set
1153# CONFIG_SCSI_NSP32 is not set
1154# CONFIG_SCSI_DEBUG is not set
1155# CONFIG_SCSI_SRP is not set
1156# CONFIG_SCSI_DH is not set
1157CONFIG_ATA=y
1158# CONFIG_ATA_NONSTANDARD is not set
1159CONFIG_ATA_ACPI=y
1160CONFIG_SATA_PMP=y
1161# CONFIG_SATA_AHCI is not set
1162# CONFIG_SATA_SIL24 is not set
1163CONFIG_ATA_SFF=y
1164# CONFIG_SATA_SVW is not set
1165CONFIG_ATA_PIIX=y
1166# CONFIG_SATA_MV is not set
1167# CONFIG_SATA_NV is not set
1168# CONFIG_PDC_ADMA is not set
1169# CONFIG_SATA_QSTOR is not set
1170# CONFIG_SATA_PROMISE is not set
1171# CONFIG_SATA_SX4 is not set
1172# CONFIG_SATA_SIL is not set
1173# CONFIG_SATA_SIS is not set
1174# CONFIG_SATA_ULI is not set
1175# CONFIG_SATA_VIA is not set
1176# CONFIG_SATA_VITESSE is not set
1177# CONFIG_SATA_INIC162X is not set
1178# CONFIG_PATA_ACPI is not set
1179# CONFIG_PATA_ALI is not set
1180# CONFIG_PATA_AMD is not set
1181# CONFIG_PATA_ARTOP is not set
1182# CONFIG_PATA_ATIIXP is not set
1183# CONFIG_PATA_CMD640_PCI is not set
1184# CONFIG_PATA_CMD64X is not set
1185# CONFIG_PATA_CS5520 is not set
1186# CONFIG_PATA_CS5530 is not set
1187# CONFIG_PATA_CS5535 is not set
1188# CONFIG_PATA_CS5536 is not set
1189# CONFIG_PATA_CYPRESS is not set
1190# CONFIG_PATA_EFAR is not set
1191CONFIG_ATA_GENERIC=y
1192# CONFIG_PATA_HPT366 is not set
1193# CONFIG_PATA_HPT37X is not set
1194# CONFIG_PATA_HPT3X2N is not set
1195# CONFIG_PATA_HPT3X3 is not set
1196# CONFIG_PATA_IT821X is not set
1197# CONFIG_PATA_IT8213 is not set
1198# CONFIG_PATA_JMICRON is not set
1199# CONFIG_PATA_LEGACY is not set
1200# CONFIG_PATA_TRIFLEX is not set
1201# CONFIG_PATA_MARVELL is not set
1202CONFIG_PATA_MPIIX=y
1203# CONFIG_PATA_OLDPIIX is not set
1204# CONFIG_PATA_NETCELL is not set
1205# CONFIG_PATA_NINJA32 is not set
1206# CONFIG_PATA_NS87410 is not set
1207# CONFIG_PATA_NS87415 is not set
1208# CONFIG_PATA_OPTI is not set
1209# CONFIG_PATA_OPTIDMA is not set
1210# CONFIG_PATA_PDC_OLD is not set
1211# CONFIG_PATA_QDI is not set
1212# CONFIG_PATA_RADISYS is not set
1213# CONFIG_PATA_RZ1000 is not set
1214# CONFIG_PATA_SC1200 is not set
1215# CONFIG_PATA_SERVERWORKS is not set
1216# CONFIG_PATA_PDC2027X is not set
1217# CONFIG_PATA_SIL680 is not set
1218# CONFIG_PATA_SIS is not set
1219# CONFIG_PATA_VIA is not set
1220# CONFIG_PATA_WINBOND is not set
1221# CONFIG_PATA_WINBOND_VLB is not set
1222# CONFIG_PATA_SCH is not set
1223# CONFIG_MD is not set
1224# CONFIG_FUSION is not set
1225
1226#
1227# IEEE 1394 (FireWire) support
1228#
1229
1230#
1231# Enable only one of the two stacks, unless you know what you are doing
1232#
1233# CONFIG_FIREWIRE is not set
1234CONFIG_IEEE1394=m
1235CONFIG_IEEE1394_OHCI1394=m
1236# CONFIG_IEEE1394_PCILYNX is not set
1237CONFIG_IEEE1394_SBP2=m
1238# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
1239CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
1240CONFIG_IEEE1394_ETH1394=m
1241CONFIG_IEEE1394_RAWIO=m
1242CONFIG_IEEE1394_VIDEO1394=m
1243CONFIG_IEEE1394_DV1394=m
1244# CONFIG_IEEE1394_VERBOSEDEBUG is not set
1245CONFIG_I2O=m
1246CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
1247CONFIG_I2O_EXT_ADAPTEC=y
1248CONFIG_I2O_CONFIG=m
1249CONFIG_I2O_CONFIG_OLD_IOCTL=y
1250CONFIG_I2O_BUS=m
1251CONFIG_I2O_BLOCK=m
1252CONFIG_I2O_SCSI=m
1253CONFIG_I2O_PROC=m
1254# CONFIG_MACINTOSH_DRIVERS is not set
1255CONFIG_NETDEVICES=y
1256CONFIG_IFB=m
1257CONFIG_DUMMY=m
1258CONFIG_BONDING=m
1259# CONFIG_MACVLAN is not set
1260CONFIG_EQUALIZER=m
1261CONFIG_TUN=m
1262# CONFIG_VETH is not set
1263# CONFIG_NET_SB1000 is not set
1264# CONFIG_ARCNET is not set
1265CONFIG_PHYLIB=m
1266
1267#
1268# MII PHY device drivers
1269#
1270CONFIG_MARVELL_PHY=m
1271CONFIG_DAVICOM_PHY=m
1272CONFIG_QSEMI_PHY=m
1273CONFIG_LXT_PHY=m
1274CONFIG_CICADA_PHY=m
1275CONFIG_VITESSE_PHY=m
1276CONFIG_SMSC_PHY=m
1277# CONFIG_BROADCOM_PHY is not set
1278# CONFIG_ICPLUS_PHY is not set
1279# CONFIG_REALTEK_PHY is not set
1280# CONFIG_NATIONAL_PHY is not set
1281# CONFIG_STE10XP is not set
1282# CONFIG_LSI_ET1011C_PHY is not set
1283# CONFIG_MDIO_BITBANG is not set
1284CONFIG_NET_ETHERNET=y
1285CONFIG_MII=y
1286# CONFIG_HAPPYMEAL is not set
1287# CONFIG_SUNGEM is not set
1288# CONFIG_CASSINI is not set
1289CONFIG_NET_VENDOR_3COM=y
1290CONFIG_EL1=m
1291CONFIG_EL2=m
1292CONFIG_ELPLUS=m
1293CONFIG_EL16=m
1294CONFIG_EL3=m
1295CONFIG_3C515=m
1296CONFIG_VORTEX=m
1297CONFIG_TYPHOON=m
1298# CONFIG_LANCE is not set
1299CONFIG_NET_VENDOR_SMC=y
1300CONFIG_WD80x3=m
1301CONFIG_ULTRA=m
1302CONFIG_SMC9194=m
1303# CONFIG_ENC28J60 is not set
1304# CONFIG_NET_VENDOR_RACAL is not set
1305# CONFIG_DNET is not set
1306CONFIG_NET_TULIP=y
1307CONFIG_DE2104X=m
1308CONFIG_TULIP=m
1309# CONFIG_TULIP_MWI is not set
1310# CONFIG_TULIP_MMIO is not set
1311CONFIG_TULIP_NAPI=y
1312CONFIG_TULIP_NAPI_HW_MITIGATION=y
1313CONFIG_DE4X5=m
1314CONFIG_WINBOND_840=m
1315CONFIG_DM9102=m
1316CONFIG_ULI526X=m
1317# CONFIG_AT1700 is not set
1318# CONFIG_DEPCA is not set
1319# CONFIG_HP100 is not set
1320# CONFIG_NET_ISA is not set
1321# CONFIG_IBM_NEW_EMAC_ZMII is not set
1322# CONFIG_IBM_NEW_EMAC_RGMII is not set
1323# CONFIG_IBM_NEW_EMAC_TAH is not set
1324# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1325# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1326# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1327# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1328CONFIG_NET_PCI=y
1329# CONFIG_PCNET32 is not set
1330# CONFIG_AMD8111_ETH is not set
1331# CONFIG_ADAPTEC_STARFIRE is not set
1332# CONFIG_AC3200 is not set
1333# CONFIG_APRICOT is not set
1334# CONFIG_B44 is not set
1335# CONFIG_FORCEDETH is not set
1336# CONFIG_CS89x0 is not set
1337CONFIG_E100=m
1338# CONFIG_FEALNX is not set
1339# CONFIG_NATSEMI is not set
1340CONFIG_NE2K_PCI=m
1341CONFIG_8139CP=m
1342CONFIG_8139TOO=m
1343# CONFIG_8139TOO_PIO is not set
1344# CONFIG_8139TOO_TUNE_TWISTER is not set
1345CONFIG_8139TOO_8129=y
1346# CONFIG_8139_OLD_RX_RESET is not set
1347# CONFIG_R6040 is not set
1348# CONFIG_SIS900 is not set
1349CONFIG_EPIC100=m
1350# CONFIG_SMSC9420 is not set
1351# CONFIG_SUNDANCE is not set
1352# CONFIG_TLAN is not set
1353# CONFIG_VIA_RHINE is not set
1354# CONFIG_SC92031 is not set
1355# CONFIG_ATL2 is not set
1356CONFIG_NETDEV_1000=y
1357# CONFIG_ACENIC is not set
1358# CONFIG_DL2K is not set
1359CONFIG_E1000=m
1360# CONFIG_E1000E is not set
1361# CONFIG_IP1000 is not set
1362# CONFIG_IGB is not set
1363# CONFIG_NS83820 is not set
1364# CONFIG_HAMACHI is not set
1365# CONFIG_YELLOWFIN is not set
1366CONFIG_R8169=m
1367CONFIG_R8169_VLAN=y
1368# CONFIG_SIS190 is not set
1369CONFIG_SKGE=y
1370# CONFIG_SKGE_DEBUG is not set
1371CONFIG_SKY2=y
1372# CONFIG_SKY2_DEBUG is not set
1373# CONFIG_VIA_VELOCITY is not set
1374# CONFIG_TIGON3 is not set
1375# CONFIG_BNX2 is not set
1376# CONFIG_QLA3XXX is not set
1377# CONFIG_ATL1 is not set
1378# CONFIG_ATL1E is not set
1379# CONFIG_ATL1C is not set
1380# CONFIG_JME is not set
1381CONFIG_NETDEV_10000=y
1382# CONFIG_CHELSIO_T1 is not set
1383CONFIG_CHELSIO_T3_DEPENDS=y
1384# CONFIG_CHELSIO_T3 is not set
1385# CONFIG_ENIC is not set
1386# CONFIG_IXGBE is not set
1387CONFIG_IXGB=m
1388# CONFIG_S2IO is not set
1389# CONFIG_MYRI10GE is not set
1390# CONFIG_NETXEN_NIC is not set
1391# CONFIG_NIU is not set
1392# CONFIG_MLX4_EN is not set
1393# CONFIG_MLX4_CORE is not set
1394# CONFIG_TEHUTI is not set
1395# CONFIG_BNX2X is not set
1396# CONFIG_QLGE is not set
1397# CONFIG_SFC is not set
1398# CONFIG_BE2NET is not set
1399# CONFIG_TR is not set
1400
1401#
1402# Wireless LAN
1403#
1404# CONFIG_WLAN_PRE80211 is not set
1405CONFIG_WLAN_80211=y
1406# CONFIG_LIBERTAS is not set
1407# CONFIG_AIRO is not set
1408# CONFIG_HERMES is not set
1409# CONFIG_ATMEL is not set
1410# CONFIG_PRISM54 is not set
1411# CONFIG_USB_ZD1201 is not set
1412# CONFIG_USB_NET_RNDIS_WLAN is not set
1413CONFIG_IPW2100=m
1414# CONFIG_IPW2100_MONITOR is not set
1415# CONFIG_IPW2100_DEBUG is not set
1416CONFIG_IPW2200=m
1417# CONFIG_IPW2200_MONITOR is not set
1418# CONFIG_IPW2200_QOS is not set
1419# CONFIG_IPW2200_DEBUG is not set
1420CONFIG_LIBIPW=m
1421# CONFIG_LIBIPW_DEBUG is not set
1422# CONFIG_IWLWIFI_LEDS is not set
1423# CONFIG_HOSTAP is not set
1424
1425#
1426# Enable WiMAX (Networking options) to see the WiMAX drivers
1427#
1428
1429#
1430# USB Network Adapters
1431#
1432CONFIG_USB_CATC=m
1433CONFIG_USB_KAWETH=m
1434CONFIG_USB_PEGASUS=m
1435CONFIG_USB_RTL8150=m
1436CONFIG_USB_USBNET=y
1437CONFIG_USB_NET_AX8817X=y
1438CONFIG_USB_NET_CDCETHER=m
1439# CONFIG_USB_NET_DM9601 is not set
1440# CONFIG_USB_NET_SMSC95XX is not set
1441CONFIG_USB_NET_GL620A=m
1442CONFIG_USB_NET_NET1080=m
1443CONFIG_USB_NET_PLUSB=m
1444# CONFIG_USB_NET_MCS7830 is not set
1445CONFIG_USB_NET_RNDIS_HOST=m
1446CONFIG_USB_NET_CDC_SUBSET=m
1447CONFIG_USB_ALI_M5632=y
1448CONFIG_USB_AN2720=y
1449CONFIG_USB_BELKIN=y
1450CONFIG_USB_ARMLINUX=y
1451CONFIG_USB_EPSON2888=y
1452# CONFIG_USB_KC2190 is not set
1453CONFIG_USB_NET_ZAURUS=m
1454# CONFIG_WAN is not set
1455CONFIG_ATM_DRIVERS=y
1456# CONFIG_ATM_DUMMY is not set
1457# CONFIG_ATM_TCP is not set
1458# CONFIG_ATM_LANAI is not set
1459# CONFIG_ATM_ENI is not set
1460# CONFIG_ATM_FIRESTREAM is not set
1461# CONFIG_ATM_ZATM is not set
1462# CONFIG_ATM_NICSTAR is not set
1463# CONFIG_ATM_IDT77252 is not set
1464# CONFIG_ATM_AMBASSADOR is not set
1465# CONFIG_ATM_HORIZON is not set
1466# CONFIG_ATM_IA is not set
1467# CONFIG_ATM_FORE200E is not set
1468# CONFIG_ATM_HE is not set
1469# CONFIG_ATM_SOLOS is not set
1470# CONFIG_FDDI is not set
1471# CONFIG_HIPPI is not set
1472CONFIG_PPP=m
1473CONFIG_PPP_MULTILINK=y
1474CONFIG_PPP_FILTER=y
1475CONFIG_PPP_ASYNC=m
1476CONFIG_PPP_SYNC_TTY=m
1477CONFIG_PPP_DEFLATE=m
1478CONFIG_PPP_BSDCOMP=m
1479CONFIG_PPP_MPPE=m
1480CONFIG_PPPOE=m
1481CONFIG_PPPOATM=m
1482# CONFIG_PPPOL2TP is not set
1483CONFIG_SLIP=m
1484CONFIG_SLIP_COMPRESSED=y
1485CONFIG_SLHC=m
1486CONFIG_SLIP_SMART=y
1487CONFIG_SLIP_MODE_SLIP6=y
1488CONFIG_NET_FC=y
1489CONFIG_NETCONSOLE=m
1490# CONFIG_NETCONSOLE_DYNAMIC is not set
1491CONFIG_NETPOLL=y
1492CONFIG_NETPOLL_TRAP=y
1493CONFIG_NET_POLL_CONTROLLER=y
1494# CONFIG_ISDN is not set
1495CONFIG_PHONE=m
1496# CONFIG_PHONE_IXJ is not set
1497
1498#
1499# Input device support
1500#
1501CONFIG_INPUT=y
1502CONFIG_INPUT_FF_MEMLESS=y
1503CONFIG_INPUT_POLLDEV=m
1504
1505#
1506# Userland interfaces
1507#
1508CONFIG_INPUT_MOUSEDEV=y
1509CONFIG_INPUT_MOUSEDEV_PSAUX=y
1510CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1511CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1512CONFIG_INPUT_JOYDEV=m
1513CONFIG_INPUT_EVDEV=y
1514# CONFIG_INPUT_EVBUG is not set
1515
1516#
1517# Input Device Drivers
1518#
1519CONFIG_INPUT_KEYBOARD=y
1520CONFIG_KEYBOARD_ATKBD=y
1521CONFIG_KEYBOARD_SUNKBD=m
1522# CONFIG_KEYBOARD_LKKBD is not set
1523CONFIG_KEYBOARD_XTKBD=m
1524CONFIG_KEYBOARD_NEWTON=m
1525# CONFIG_KEYBOARD_STOWAWAY is not set
1526CONFIG_INPUT_MOUSE=y
1527CONFIG_MOUSE_PS2=y
1528CONFIG_MOUSE_PS2_ALPS=y
1529CONFIG_MOUSE_PS2_LOGIPS2PP=y
1530CONFIG_MOUSE_PS2_SYNAPTICS=y
1531CONFIG_MOUSE_PS2_LIFEBOOK=y
1532CONFIG_MOUSE_PS2_TRACKPOINT=y
1533# CONFIG_MOUSE_PS2_ELANTECH is not set
1534# CONFIG_MOUSE_PS2_TOUCHKIT is not set
1535CONFIG_MOUSE_SERIAL=m
1536# CONFIG_MOUSE_APPLETOUCH is not set
1537# CONFIG_MOUSE_BCM5974 is not set
1538CONFIG_MOUSE_INPORT=m
1539CONFIG_MOUSE_ATIXL=y
1540CONFIG_MOUSE_LOGIBM=m
1541CONFIG_MOUSE_PC110PAD=m
1542# CONFIG_MOUSE_VSXXXAA is not set
1543CONFIG_INPUT_JOYSTICK=y
1544CONFIG_JOYSTICK_ANALOG=m
1545CONFIG_JOYSTICK_A3D=m
1546CONFIG_JOYSTICK_ADI=m
1547CONFIG_JOYSTICK_COBRA=m
1548CONFIG_JOYSTICK_GF2K=m
1549CONFIG_JOYSTICK_GRIP=m
1550CONFIG_JOYSTICK_GRIP_MP=m
1551CONFIG_JOYSTICK_GUILLEMOT=m
1552CONFIG_JOYSTICK_INTERACT=m
1553CONFIG_JOYSTICK_SIDEWINDER=m
1554CONFIG_JOYSTICK_TMDC=m
1555CONFIG_JOYSTICK_IFORCE=m
1556CONFIG_JOYSTICK_IFORCE_USB=y
1557CONFIG_JOYSTICK_IFORCE_232=y
1558CONFIG_JOYSTICK_WARRIOR=m
1559CONFIG_JOYSTICK_MAGELLAN=m
1560CONFIG_JOYSTICK_SPACEORB=m
1561CONFIG_JOYSTICK_SPACEBALL=m
1562CONFIG_JOYSTICK_STINGER=m
1563CONFIG_JOYSTICK_TWIDJOY=m
1564# CONFIG_JOYSTICK_ZHENHUA is not set
1565CONFIG_JOYSTICK_JOYDUMP=m
1566# CONFIG_JOYSTICK_XPAD is not set
1567# CONFIG_INPUT_TABLET is not set
1568CONFIG_INPUT_TOUCHSCREEN=y
1569CONFIG_TOUCHSCREEN_ADS7846=m
1570# CONFIG_TOUCHSCREEN_FUJITSU is not set
1571CONFIG_TOUCHSCREEN_GUNZE=m
1572CONFIG_TOUCHSCREEN_ELO=m
1573# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1574CONFIG_TOUCHSCREEN_MTOUCH=m
1575# CONFIG_TOUCHSCREEN_INEXIO is not set
1576CONFIG_TOUCHSCREEN_MK712=m
1577# CONFIG_TOUCHSCREEN_HTCPEN is not set
1578# CONFIG_TOUCHSCREEN_PENMOUNT is not set
1579# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
1580# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
1581# CONFIG_TOUCHSCREEN_WM97XX is not set
1582# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
1583# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
1584# CONFIG_TOUCHSCREEN_TSC2003 is not set
1585# CONFIG_TOUCHSCREEN_TSC2007 is not set
1586CONFIG_INPUT_MISC=y
1587CONFIG_INPUT_PCSPKR=y
1588# CONFIG_INPUT_APANEL is not set
1589CONFIG_INPUT_WISTRON_BTNS=m
1590# CONFIG_INPUT_ATLAS_BTNS is not set
1591# CONFIG_INPUT_ATI_REMOTE is not set
1592# CONFIG_INPUT_ATI_REMOTE2 is not set
1593# CONFIG_INPUT_KEYSPAN_REMOTE is not set
1594# CONFIG_INPUT_POWERMATE is not set
1595# CONFIG_INPUT_YEALINK is not set
1596# CONFIG_INPUT_CM109 is not set
1597CONFIG_INPUT_UINPUT=m
1598
1599#
1600# Hardware I/O ports
1601#
1602CONFIG_SERIO=y
1603CONFIG_SERIO_I8042=y
1604CONFIG_SERIO_SERPORT=m
1605CONFIG_SERIO_CT82C710=m
1606CONFIG_SERIO_PCIPS2=m
1607CONFIG_SERIO_LIBPS2=y
1608CONFIG_SERIO_RAW=m
1609CONFIG_GAMEPORT=m
1610CONFIG_GAMEPORT_NS558=m
1611CONFIG_GAMEPORT_L4=m
1612CONFIG_GAMEPORT_EMU10K1=m
1613CONFIG_GAMEPORT_FM801=m
1614
1615#
1616# Character devices
1617#
1618CONFIG_VT=y
1619CONFIG_CONSOLE_TRANSLATIONS=y
1620CONFIG_VT_CONSOLE=y
1621CONFIG_HW_CONSOLE=y
1622CONFIG_VT_HW_CONSOLE_BINDING=y
1623CONFIG_DEVKMEM=y
1624CONFIG_SERIAL_NONSTANDARD=y
1625# CONFIG_COMPUTONE is not set
1626# CONFIG_ROCKETPORT is not set
1627# CONFIG_CYCLADES is not set
1628# CONFIG_DIGIEPCA is not set
1629# CONFIG_MOXA_INTELLIO is not set
1630# CONFIG_MOXA_SMARTIO is not set
1631# CONFIG_ISI is not set
1632# CONFIG_SYNCLINK is not set
1633# CONFIG_SYNCLINKMP is not set
1634# CONFIG_SYNCLINK_GT is not set
1635# CONFIG_N_HDLC is not set
1636# CONFIG_RISCOM8 is not set
1637# CONFIG_SPECIALIX is not set
1638# CONFIG_SX is not set
1639# CONFIG_RIO is not set
1640# CONFIG_STALDRV is not set
1641# CONFIG_NOZOMI is not set
1642
1643#
1644# Serial drivers
1645#
1646CONFIG_SERIAL_8250=y
1647CONFIG_SERIAL_8250_CONSOLE=y
1648CONFIG_FIX_EARLYCON_MEM=y
1649CONFIG_SERIAL_8250_PCI=y
1650CONFIG_SERIAL_8250_PNP=y
1651CONFIG_SERIAL_8250_NR_UARTS=8
1652CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1653CONFIG_SERIAL_8250_EXTENDED=y
1654# CONFIG_SERIAL_8250_MANY_PORTS is not set
1655CONFIG_SERIAL_8250_SHARE_IRQ=y
1656# CONFIG_SERIAL_8250_DETECT_IRQ is not set
1657# CONFIG_SERIAL_8250_RSA is not set
1658
1659#
1660# Non-8250 serial port support
1661#
1662CONFIG_SERIAL_CORE=y
1663CONFIG_SERIAL_CORE_CONSOLE=y
1664CONFIG_SERIAL_JSM=y
1665CONFIG_UNIX98_PTYS=y
1666# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1667CONFIG_LEGACY_PTYS=y
1668CONFIG_LEGACY_PTY_COUNT=64
1669CONFIG_IPMI_HANDLER=m
1670CONFIG_IPMI_PANIC_EVENT=y
1671CONFIG_IPMI_PANIC_STRING=y
1672CONFIG_IPMI_DEVICE_INTERFACE=m
1673CONFIG_IPMI_SI=m
1674CONFIG_IPMI_WATCHDOG=m
1675CONFIG_IPMI_POWEROFF=m
1676CONFIG_HW_RANDOM=y
1677CONFIG_HW_RANDOM_INTEL=m
1678# CONFIG_HW_RANDOM_AMD is not set
1679# CONFIG_HW_RANDOM_GEODE is not set
1680# CONFIG_HW_RANDOM_VIA is not set
1681CONFIG_NVRAM=m
1682# CONFIG_DTLK is not set
1683# CONFIG_R3964 is not set
1684# CONFIG_APPLICOM is not set
1685# CONFIG_SONYPI is not set
1686# CONFIG_MWAVE is not set
1687# CONFIG_PC8736x_GPIO is not set
1688# CONFIG_NSC_GPIO is not set
1689# CONFIG_CS5535_GPIO is not set
1690CONFIG_RAW_DRIVER=m
1691CONFIG_MAX_RAW_DEVS=4096
1692CONFIG_HPET=y
1693CONFIG_HPET_MMAP=y
1694CONFIG_HANGCHECK_TIMER=m
1695# CONFIG_TCG_TPM is not set
1696# CONFIG_TELCLOCK is not set
1697CONFIG_DEVPORT=y
1698CONFIG_I2C=m
1699CONFIG_I2C_BOARDINFO=y
1700CONFIG_I2C_CHARDEV=m
1701CONFIG_I2C_HELPER_AUTO=y
1702CONFIG_I2C_ALGOBIT=m
1703CONFIG_I2C_ALGOPCA=m
1704
1705#
1706# I2C Hardware Bus support
1707#
1708
1709#
1710# PC SMBus host controller drivers
1711#
1712CONFIG_I2C_ALI1535=m
1713CONFIG_I2C_ALI1563=m
1714CONFIG_I2C_ALI15X3=m
1715CONFIG_I2C_AMD756=m
1716CONFIG_I2C_AMD756_S4882=m
1717CONFIG_I2C_AMD8111=m
1718CONFIG_I2C_I801=m
1719# CONFIG_I2C_ISCH is not set
1720CONFIG_I2C_PIIX4=m
1721CONFIG_I2C_NFORCE2=m
1722# CONFIG_I2C_NFORCE2_S4985 is not set
1723CONFIG_I2C_SIS5595=m
1724CONFIG_I2C_SIS630=m
1725CONFIG_I2C_SIS96X=m
1726CONFIG_I2C_VIA=m
1727CONFIG_I2C_VIAPRO=m
1728
1729#
1730# I2C system bus drivers (mostly embedded / system-on-chip)
1731#
1732CONFIG_I2C_OCORES=m
1733# CONFIG_I2C_SIMTEC is not set
1734
1735#
1736# External I2C/SMBus adapter drivers
1737#
1738CONFIG_I2C_PARPORT_LIGHT=m
1739# CONFIG_I2C_TAOS_EVM is not set
1740# CONFIG_I2C_TINY_USB is not set
1741
1742#
1743# Graphics adapter I2C/DDC channel drivers
1744#
1745CONFIG_I2C_VOODOO3=m
1746
1747#
1748# Other I2C/SMBus bus drivers
1749#
1750CONFIG_I2C_PCA_ISA=m
1751# CONFIG_I2C_PCA_PLATFORM is not set
1752CONFIG_I2C_STUB=m
1753CONFIG_SCx200_ACB=m
1754
1755#
1756# Miscellaneous I2C Chip support
1757#
1758# CONFIG_DS1682 is not set
1759CONFIG_SENSORS_PCF8574=m
1760# CONFIG_PCF8575 is not set
1761CONFIG_SENSORS_PCA9539=m
1762CONFIG_SENSORS_PCF8591=m
1763CONFIG_SENSORS_MAX6875=m
1764# CONFIG_SENSORS_TSL2550 is not set
1765# CONFIG_I2C_DEBUG_CORE is not set
1766# CONFIG_I2C_DEBUG_ALGO is not set
1767# CONFIG_I2C_DEBUG_BUS is not set
1768# CONFIG_I2C_DEBUG_CHIP is not set
1769CONFIG_SPI=y
1770# CONFIG_SPI_DEBUG is not set
1771CONFIG_SPI_MASTER=y
1772
1773#
1774# SPI Master Controller Drivers
1775#
1776CONFIG_SPI_BITBANG=m
1777# CONFIG_SPI_XILINX is not set
1778
1779#
1780# SPI Protocol Masters
1781#
1782# CONFIG_SPI_SPIDEV is not set
1783# CONFIG_SPI_TLE62X0 is not set
1784CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1785# CONFIG_GPIOLIB is not set
1786CONFIG_W1=m
1787CONFIG_W1_CON=y
1788
1789#
1790# 1-wire Bus Masters
1791#
1792CONFIG_W1_MASTER_MATROX=m
1793CONFIG_W1_MASTER_DS2490=m
1794CONFIG_W1_MASTER_DS2482=m
1795
1796#
1797# 1-wire Slaves
1798#
1799CONFIG_W1_SLAVE_THERM=m
1800CONFIG_W1_SLAVE_SMEM=m
1801# CONFIG_W1_SLAVE_DS2431 is not set
1802CONFIG_W1_SLAVE_DS2433=m
1803CONFIG_W1_SLAVE_DS2433_CRC=y
1804# CONFIG_W1_SLAVE_DS2760 is not set
1805# CONFIG_W1_SLAVE_BQ27000 is not set
1806CONFIG_POWER_SUPPLY=y
1807# CONFIG_POWER_SUPPLY_DEBUG is not set
1808# CONFIG_PDA_POWER is not set
1809# CONFIG_BATTERY_DS2760 is not set
1810# CONFIG_BATTERY_BQ27x00 is not set
1811CONFIG_HWMON=y
1812CONFIG_HWMON_VID=m
1813# CONFIG_SENSORS_ABITUGURU is not set
1814# CONFIG_SENSORS_ABITUGURU3 is not set
1815# CONFIG_SENSORS_AD7414 is not set
1816# CONFIG_SENSORS_AD7418 is not set
1817# CONFIG_SENSORS_ADCXX is not set
1818# CONFIG_SENSORS_ADM1021 is not set
1819# CONFIG_SENSORS_ADM1025 is not set
1820# CONFIG_SENSORS_ADM1026 is not set
1821# CONFIG_SENSORS_ADM1029 is not set
1822# CONFIG_SENSORS_ADM1031 is not set
1823# CONFIG_SENSORS_ADM9240 is not set
1824# CONFIG_SENSORS_ADT7462 is not set
1825# CONFIG_SENSORS_ADT7470 is not set
1826# CONFIG_SENSORS_ADT7473 is not set
1827# CONFIG_SENSORS_ADT7475 is not set
1828# CONFIG_SENSORS_K8TEMP is not set
1829# CONFIG_SENSORS_ASB100 is not set
1830# CONFIG_SENSORS_ATXP1 is not set
1831# CONFIG_SENSORS_DS1621 is not set
1832# CONFIG_SENSORS_I5K_AMB is not set
1833# CONFIG_SENSORS_F71805F is not set
1834# CONFIG_SENSORS_F71882FG is not set
1835# CONFIG_SENSORS_F75375S is not set
1836# CONFIG_SENSORS_FSCHER is not set
1837# CONFIG_SENSORS_FSCPOS is not set
1838# CONFIG_SENSORS_FSCHMD is not set
1839# CONFIG_SENSORS_GL518SM is not set
1840# CONFIG_SENSORS_GL520SM is not set
1841# CONFIG_SENSORS_CORETEMP is not set
1842# CONFIG_SENSORS_IBMAEM is not set
1843# CONFIG_SENSORS_IBMPEX is not set
1844# CONFIG_SENSORS_IT87 is not set
1845# CONFIG_SENSORS_LM63 is not set
1846# CONFIG_SENSORS_LM70 is not set
1847# CONFIG_SENSORS_LM75 is not set
1848# CONFIG_SENSORS_LM77 is not set
1849# CONFIG_SENSORS_LM78 is not set
1850# CONFIG_SENSORS_LM80 is not set
1851# CONFIG_SENSORS_LM83 is not set
1852CONFIG_SENSORS_LM85=m
1853# CONFIG_SENSORS_LM87 is not set
1854# CONFIG_SENSORS_LM90 is not set
1855# CONFIG_SENSORS_LM92 is not set
1856# CONFIG_SENSORS_LM93 is not set
1857# CONFIG_SENSORS_LTC4245 is not set
1858# CONFIG_SENSORS_MAX1111 is not set
1859# CONFIG_SENSORS_MAX1619 is not set
1860# CONFIG_SENSORS_MAX6650 is not set
1861# CONFIG_SENSORS_PC87360 is not set
1862# CONFIG_SENSORS_PC87427 is not set
1863# CONFIG_SENSORS_SIS5595 is not set
1864# CONFIG_SENSORS_DME1737 is not set
1865# CONFIG_SENSORS_SMSC47M1 is not set
1866# CONFIG_SENSORS_SMSC47M192 is not set
1867# CONFIG_SENSORS_SMSC47B397 is not set
1868# CONFIG_SENSORS_ADS7828 is not set
1869# CONFIG_SENSORS_THMC50 is not set
1870# CONFIG_SENSORS_VIA686A is not set
1871# CONFIG_SENSORS_VT1211 is not set
1872# CONFIG_SENSORS_VT8231 is not set
1873# CONFIG_SENSORS_W83781D is not set
1874# CONFIG_SENSORS_W83791D is not set
1875# CONFIG_SENSORS_W83792D is not set
1876# CONFIG_SENSORS_W83793 is not set
1877# CONFIG_SENSORS_W83L785TS is not set
1878# CONFIG_SENSORS_W83L786NG is not set
1879# CONFIG_SENSORS_W83627HF is not set
1880# CONFIG_SENSORS_W83627EHF is not set
1881# CONFIG_SENSORS_HDAPS is not set
1882# CONFIG_SENSORS_LIS3LV02D is not set
1883# CONFIG_SENSORS_APPLESMC is not set
1884# CONFIG_HWMON_DEBUG_CHIP is not set
1885CONFIG_THERMAL=y
1886# CONFIG_THERMAL_HWMON is not set
1887# CONFIG_WATCHDOG is not set
1888CONFIG_SSB_POSSIBLE=y
1889
1890#
1891# Sonics Silicon Backplane
1892#
1893# CONFIG_SSB is not set
1894
1895#
1896# Multifunction device drivers
1897#
1898# CONFIG_MFD_CORE is not set
1899# CONFIG_MFD_SM501 is not set
1900# CONFIG_HTC_PASIC3 is not set
1901# CONFIG_MFD_TMIO is not set
1902# CONFIG_MFD_WM8400 is not set
1903# CONFIG_MFD_WM8350_I2C is not set
1904# CONFIG_MFD_PCF50633 is not set
1905# CONFIG_MFD_TIMBERDALE is not set
1906# CONFIG_REGULATOR is not set
1907
1908#
1909# Multimedia devices
1910#
1911
1912#
1913# Multimedia core support
1914#
1915CONFIG_VIDEO_DEV=m
1916CONFIG_VIDEO_V4L2_COMMON=m
1917CONFIG_VIDEO_ALLOW_V4L1=y
1918CONFIG_VIDEO_V4L1_COMPAT=y
1919CONFIG_DVB_CORE=m
1920CONFIG_VIDEO_MEDIA=m
1921
1922#
1923# Multimedia drivers
1924#
1925# CONFIG_MEDIA_ATTACH is not set
1926CONFIG_MEDIA_TUNER=m
1927# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1928CONFIG_MEDIA_TUNER_SIMPLE=m
1929CONFIG_MEDIA_TUNER_TDA8290=m
1930CONFIG_MEDIA_TUNER_TDA18271=m
1931CONFIG_MEDIA_TUNER_TDA9887=m
1932CONFIG_MEDIA_TUNER_TEA5761=m
1933CONFIG_MEDIA_TUNER_TEA5767=m
1934CONFIG_MEDIA_TUNER_MT20XX=m
1935CONFIG_MEDIA_TUNER_MT2060=m
1936CONFIG_MEDIA_TUNER_XC2028=m
1937CONFIG_MEDIA_TUNER_XC5000=m
1938CONFIG_VIDEO_V4L2=m
1939CONFIG_VIDEO_V4L1=m
1940CONFIG_VIDEOBUF_GEN=m
1941CONFIG_VIDEOBUF_VMALLOC=m
1942CONFIG_VIDEO_IR=m
1943CONFIG_VIDEO_TVEEPROM=m
1944CONFIG_VIDEO_TUNER=m
1945CONFIG_VIDEO_CAPTURE_DRIVERS=y
1946# CONFIG_VIDEO_ADV_DEBUG is not set
1947# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1948CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1949CONFIG_VIDEO_IR_I2C=m
1950CONFIG_VIDEO_MSP3400=m
1951CONFIG_VIDEO_CS53L32A=m
1952CONFIG_VIDEO_WM8775=m
1953CONFIG_VIDEO_SAA711X=m
1954CONFIG_VIDEO_TVP5150=m
1955CONFIG_VIDEO_CX25840=m
1956CONFIG_VIDEO_CX2341X=m
1957# CONFIG_VIDEO_VIVI is not set
1958# CONFIG_VIDEO_BT848 is not set
1959# CONFIG_VIDEO_PMS is not set
1960# CONFIG_VIDEO_CPIA is not set
1961# CONFIG_VIDEO_CPIA2 is not set
1962# CONFIG_VIDEO_SAA5246A is not set
1963# CONFIG_VIDEO_SAA5249 is not set
1964# CONFIG_VIDEO_STRADIS is not set
1965# CONFIG_VIDEO_ZORAN is not set
1966# CONFIG_VIDEO_SAA7134 is not set
1967# CONFIG_VIDEO_MXB is not set
1968# CONFIG_VIDEO_HEXIUM_ORION is not set
1969# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1970# CONFIG_VIDEO_CX88 is not set
1971# CONFIG_VIDEO_CX23885 is not set
1972# CONFIG_VIDEO_AU0828 is not set
1973# CONFIG_VIDEO_IVTV is not set
1974# CONFIG_VIDEO_CX18 is not set
1975# CONFIG_VIDEO_CAFE_CCIC is not set
1976# CONFIG_SOC_CAMERA is not set
1977CONFIG_V4L_USB_DRIVERS=y
1978# CONFIG_USB_VIDEO_CLASS is not set
1979# CONFIG_USB_GSPCA is not set
1980CONFIG_VIDEO_PVRUSB2=m
1981CONFIG_VIDEO_PVRUSB2_SYSFS=y
1982CONFIG_VIDEO_PVRUSB2_DVB=y
1983# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
1984CONFIG_VIDEO_EM28XX=m
1985# CONFIG_VIDEO_EM28XX_ALSA is not set
1986# CONFIG_VIDEO_EM28XX_DVB is not set
1987# CONFIG_VIDEO_USBVISION is not set
1988CONFIG_VIDEO_USBVIDEO=m
1989CONFIG_USB_VICAM=m
1990CONFIG_USB_IBMCAM=m
1991CONFIG_USB_KONICAWC=m
1992CONFIG_USB_QUICKCAM_MESSENGER=m
1993CONFIG_USB_ET61X251=m
1994CONFIG_VIDEO_OVCAMCHIP=m
1995CONFIG_USB_W9968CF=m
1996CONFIG_USB_OV511=m
1997CONFIG_USB_SE401=m
1998CONFIG_USB_SN9C102=m
1999CONFIG_USB_STV680=m
2000# CONFIG_USB_ZC0301 is not set
2001CONFIG_USB_PWC=m
2002# CONFIG_USB_PWC_DEBUG is not set
2003# CONFIG_USB_ZR364XX is not set
2004# CONFIG_USB_STKWEBCAM is not set
2005# CONFIG_USB_S2255 is not set
2006CONFIG_RADIO_ADAPTERS=y
2007# CONFIG_RADIO_CADET is not set
2008# CONFIG_RADIO_RTRACK is not set
2009# CONFIG_RADIO_RTRACK2 is not set
2010# CONFIG_RADIO_AZTECH is not set
2011# CONFIG_RADIO_GEMTEK is not set
2012# CONFIG_RADIO_GEMTEK_PCI is not set
2013# CONFIG_RADIO_MAXIRADIO is not set
2014# CONFIG_RADIO_MAESTRO is not set
2015# CONFIG_RADIO_SF16FMI is not set
2016# CONFIG_RADIO_SF16FMR2 is not set
2017# CONFIG_RADIO_TERRATEC is not set
2018# CONFIG_RADIO_TRUST is not set
2019# CONFIG_RADIO_TYPHOON is not set
2020# CONFIG_RADIO_ZOLTRIX is not set
2021# CONFIG_USB_DSBR is not set
2022# CONFIG_USB_SI470X is not set
2023# CONFIG_USB_MR800 is not set
2024# CONFIG_RADIO_TEA5764 is not set
2025# CONFIG_DVB_DYNAMIC_MINORS is not set
2026CONFIG_DVB_CAPTURE_DRIVERS=y
2027
2028#
2029# Supported SAA7146 based PCI Adapters
2030#
2031# CONFIG_TTPCI_EEPROM is not set
2032# CONFIG_DVB_AV7110 is not set
2033# CONFIG_DVB_BUDGET_CORE is not set
2034
2035#
2036# Supported USB Adapters
2037#
2038CONFIG_DVB_USB=m
2039# CONFIG_DVB_USB_DEBUG is not set
2040CONFIG_DVB_USB_A800=m
2041CONFIG_DVB_USB_DIBUSB_MB=m
2042# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
2043CONFIG_DVB_USB_DIBUSB_MC=m
2044# CONFIG_DVB_USB_DIB0700 is not set
2045CONFIG_DVB_USB_UMT_010=m
2046# CONFIG_DVB_USB_CXUSB is not set
2047# CONFIG_DVB_USB_M920X is not set
2048# CONFIG_DVB_USB_GL861 is not set
2049# CONFIG_DVB_USB_AU6610 is not set
2050CONFIG_DVB_USB_DIGITV=m
2051CONFIG_DVB_USB_VP7045=m
2052CONFIG_DVB_USB_VP702X=m
2053CONFIG_DVB_USB_GP8PSK=m
2054CONFIG_DVB_USB_NOVA_T_USB2=m
2055# CONFIG_DVB_USB_TTUSB2 is not set
2056CONFIG_DVB_USB_DTT200U=m
2057# CONFIG_DVB_USB_OPERA1 is not set
2058# CONFIG_DVB_USB_AF9005 is not set
2059# CONFIG_DVB_USB_DW2102 is not set
2060# CONFIG_DVB_USB_CINERGY_T2 is not set
2061# CONFIG_DVB_USB_ANYSEE is not set
2062# CONFIG_DVB_USB_DTV5100 is not set
2063# CONFIG_DVB_USB_AF9015 is not set
2064# CONFIG_DVB_TTUSB_BUDGET is not set
2065# CONFIG_DVB_TTUSB_DEC is not set
2066# CONFIG_DVB_SIANO_SMS1XXX is not set
2067
2068#
2069# Supported FlexCopII (B2C2) Adapters
2070#
2071# CONFIG_DVB_B2C2_FLEXCOP is not set
2072
2073#
2074# Supported BT878 Adapters
2075#
2076
2077#
2078# Supported Pluto2 Adapters
2079#
2080# CONFIG_DVB_PLUTO2 is not set
2081
2082#
2083# Supported SDMC DM1105 Adapters
2084#
2085# CONFIG_DVB_DM1105 is not set
2086
2087#
2088# Supported FireWire (IEEE 1394) Adapters
2089#
2090# CONFIG_DVB_FIREDTV is not set
2091
2092#
2093# Supported DVB Frontends
2094#
2095
2096#
2097# Customise DVB Frontends
2098#
2099# CONFIG_DVB_FE_CUSTOMISE is not set
2100
2101#
2102# Multistandard (satellite) frontends
2103#
2104# CONFIG_DVB_STB0899 is not set
2105# CONFIG_DVB_STB6100 is not set
2106
2107#
2108# DVB-S (satellite) frontends
2109#
2110CONFIG_DVB_CX24110=m
2111CONFIG_DVB_CX24123=m
2112CONFIG_DVB_MT312=m
2113CONFIG_DVB_S5H1420=m
2114# CONFIG_DVB_STV0288 is not set
2115# CONFIG_DVB_STB6000 is not set
2116CONFIG_DVB_STV0299=m
2117CONFIG_DVB_TDA8083=m
2118CONFIG_DVB_TDA10086=m
2119# CONFIG_DVB_TDA8261 is not set
2120CONFIG_DVB_VES1X93=m
2121# CONFIG_DVB_TUNER_ITD1000 is not set
2122# CONFIG_DVB_TUNER_CX24113 is not set
2123CONFIG_DVB_TDA826X=m
2124CONFIG_DVB_TUA6100=m
2125# CONFIG_DVB_CX24116 is not set
2126# CONFIG_DVB_SI21XX is not set
2127
2128#
2129# DVB-T (terrestrial) frontends
2130#
2131CONFIG_DVB_SP8870=m
2132CONFIG_DVB_SP887X=m
2133CONFIG_DVB_CX22700=m
2134CONFIG_DVB_CX22702=m
2135# CONFIG_DVB_DRX397XD is not set
2136CONFIG_DVB_L64781=m
2137CONFIG_DVB_TDA1004X=m
2138CONFIG_DVB_NXT6000=m
2139CONFIG_DVB_MT352=m
2140CONFIG_DVB_ZL10353=m
2141CONFIG_DVB_DIB3000MB=m
2142CONFIG_DVB_DIB3000MC=m
2143# CONFIG_DVB_DIB7000M is not set
2144# CONFIG_DVB_DIB7000P is not set
2145CONFIG_DVB_TDA10048=m
2146
2147#
2148# DVB-C (cable) frontends
2149#
2150CONFIG_DVB_VES1820=m
2151CONFIG_DVB_TDA10021=m
2152# CONFIG_DVB_TDA10023 is not set
2153CONFIG_DVB_STV0297=m
2154
2155#
2156# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
2157#
2158CONFIG_DVB_NXT200X=m
2159CONFIG_DVB_OR51211=m
2160CONFIG_DVB_OR51132=m
2161CONFIG_DVB_BCM3510=m
2162CONFIG_DVB_LGDT330X=m
2163# CONFIG_DVB_LGDT3304 is not set
2164CONFIG_DVB_S5H1409=m
2165# CONFIG_DVB_AU8522 is not set
2166CONFIG_DVB_S5H1411=m
2167
2168#
2169# ISDB-T (terrestrial) frontends
2170#
2171# CONFIG_DVB_S921 is not set
2172
2173#
2174# Digital terrestrial only tuners/PLL
2175#
2176CONFIG_DVB_PLL=m
2177# CONFIG_DVB_TUNER_DIB0070 is not set
2178
2179#
2180# SEC control devices for DVB-S
2181#
2182CONFIG_DVB_LNBP21=m
2183# CONFIG_DVB_ISL6405 is not set
2184CONFIG_DVB_ISL6421=m
2185# CONFIG_DVB_LGS8GL5 is not set
2186
2187#
2188# Tools to develop new frontends
2189#
2190# CONFIG_DVB_DUMMY_FE is not set
2191# CONFIG_DVB_AF9013 is not set
2192CONFIG_DAB=y
2193CONFIG_USB_DABUSB=m
2194
2195#
2196# Graphics support
2197#
2198CONFIG_AGP=m
2199# CONFIG_AGP_ALI is not set
2200# CONFIG_AGP_ATI is not set
2201# CONFIG_AGP_AMD is not set
2202# CONFIG_AGP_AMD64 is not set
2203CONFIG_AGP_INTEL=m
2204CONFIG_AGP_NVIDIA=m
2205# CONFIG_AGP_SIS is not set
2206# CONFIG_AGP_SWORKS is not set
2207# CONFIG_AGP_VIA is not set
2208# CONFIG_AGP_EFFICEON is not set
2209CONFIG_DRM=m
2210# CONFIG_DRM_TDFX is not set
2211# CONFIG_DRM_R128 is not set
2212# CONFIG_DRM_RADEON is not set
2213# CONFIG_DRM_I810 is not set
2214# CONFIG_DRM_I830 is not set
2215CONFIG_DRM_I915=m
2216CONFIG_DRM_I915_KMS=y
2217# CONFIG_DRM_MGA is not set
2218# CONFIG_DRM_SIS is not set
2219# CONFIG_DRM_VIA is not set
2220# CONFIG_DRM_SAVAGE is not set
2221CONFIG_DRM_PSB=m
2222CONFIG_VGASTATE=m
2223CONFIG_VIDEO_OUTPUT_CONTROL=y
2224CONFIG_FB=y
2225CONFIG_FIRMWARE_EDID=y
2226CONFIG_FB_DDC=m
2227CONFIG_FB_BOOT_VESA_SUPPORT=y
2228CONFIG_FB_CFB_FILLRECT=m
2229CONFIG_FB_CFB_COPYAREA=m
2230CONFIG_FB_CFB_IMAGEBLIT=m
2231# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
2232# CONFIG_FB_SYS_FILLRECT is not set
2233# CONFIG_FB_SYS_COPYAREA is not set
2234# CONFIG_FB_SYS_IMAGEBLIT is not set
2235# CONFIG_FB_FOREIGN_ENDIAN is not set
2236# CONFIG_FB_SYS_FOPS is not set
2237# CONFIG_FB_SVGALIB is not set
2238# CONFIG_FB_MACMODES is not set
2239CONFIG_FB_BACKLIGHT=y
2240CONFIG_FB_MODE_HELPERS=y
2241CONFIG_FB_TILEBLITTING=y
2242
2243#
2244# Frame buffer hardware drivers
2245#
2246# CONFIG_FB_CIRRUS is not set
2247# CONFIG_FB_PM2 is not set
2248# CONFIG_FB_CYBER2000 is not set
2249# CONFIG_FB_ARC is not set
2250# CONFIG_FB_ASILIANT is not set
2251# CONFIG_FB_IMSTT is not set
2252CONFIG_FB_VGA16=m
2253# CONFIG_FB_UVESA is not set
2254# CONFIG_FB_VESA is not set
2255# CONFIG_FB_EFI is not set
2256# CONFIG_FB_N411 is not set
2257# CONFIG_FB_HGA is not set
2258# CONFIG_FB_S1D13XXX is not set
2259CONFIG_FB_NVIDIA=m
2260CONFIG_FB_NVIDIA_I2C=y
2261# CONFIG_FB_NVIDIA_DEBUG is not set
2262CONFIG_FB_NVIDIA_BACKLIGHT=y
2263CONFIG_FB_RIVA=m
2264CONFIG_FB_RIVA_I2C=y
2265# CONFIG_FB_RIVA_DEBUG is not set
2266CONFIG_FB_RIVA_BACKLIGHT=y
2267CONFIG_FB_I810=m
2268CONFIG_FB_I810_GTF=y
2269CONFIG_FB_I810_I2C=y
2270# CONFIG_FB_LE80578 is not set
2271CONFIG_FB_INTEL=m
2272# CONFIG_FB_INTEL_DEBUG is not set
2273CONFIG_FB_INTEL_I2C=y
2274# CONFIG_FB_MATROX is not set
2275CONFIG_FB_RADEON=m
2276CONFIG_FB_RADEON_I2C=y
2277CONFIG_FB_RADEON_BACKLIGHT=y
2278# CONFIG_FB_RADEON_DEBUG is not set
2279# CONFIG_FB_ATY128 is not set
2280CONFIG_FB_ATY=m
2281CONFIG_FB_ATY_CT=y
2282CONFIG_FB_ATY_GENERIC_LCD=y
2283CONFIG_FB_ATY_GX=y
2284CONFIG_FB_ATY_BACKLIGHT=y
2285# CONFIG_FB_S3 is not set
2286# CONFIG_FB_SAVAGE is not set
2287# CONFIG_FB_SIS is not set
2288# CONFIG_FB_VIA is not set
2289# CONFIG_FB_NEOMAGIC is not set
2290# CONFIG_FB_KYRO is not set
2291# CONFIG_FB_3DFX is not set
2292# CONFIG_FB_VOODOO1 is not set
2293# CONFIG_FB_VT8623 is not set
2294# CONFIG_FB_CYBLA is not set
2295# CONFIG_FB_TRIDENT is not set
2296# CONFIG_FB_ARK is not set
2297# CONFIG_FB_PM3 is not set
2298# CONFIG_FB_CARMINE is not set
2299# CONFIG_FB_GEODE is not set
2300# CONFIG_FB_VIRTUAL is not set
2301# CONFIG_FB_METRONOME is not set
2302# CONFIG_FB_MB862XX is not set
2303CONFIG_BACKLIGHT_LCD_SUPPORT=y
2304CONFIG_LCD_CLASS_DEVICE=m
2305# CONFIG_LCD_LTV350QV is not set
2306# CONFIG_LCD_ILI9320 is not set
2307# CONFIG_LCD_TDO24M is not set
2308# CONFIG_LCD_VGG2432A4 is not set
2309# CONFIG_LCD_PLATFORM is not set
2310CONFIG_BACKLIGHT_CLASS_DEVICE=y
2311CONFIG_BACKLIGHT_GENERIC=y
2312# CONFIG_BACKLIGHT_PROGEAR is not set
2313# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
2314# CONFIG_BACKLIGHT_SAHARA is not set
2315
2316#
2317# Display device support
2318#
2319# CONFIG_DISPLAY_SUPPORT is not set
2320
2321#
2322# Console display driver support
2323#
2324CONFIG_VGA_CONSOLE=y
2325CONFIG_VGACON_SOFT_SCROLLBACK=y
2326CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
2327CONFIG_MDA_CONSOLE=m
2328CONFIG_DUMMY_CONSOLE=y
2329CONFIG_FRAMEBUFFER_CONSOLE=y
2330# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
2331CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
2332# CONFIG_FONTS is not set
2333CONFIG_FONT_8x8=y
2334CONFIG_FONT_8x16=y
2335# CONFIG_LOGO is not set
2336CONFIG_SOUND=m
2337CONFIG_SOUND_OSS_CORE=y
2338CONFIG_SND=m
2339CONFIG_SND_TIMER=m
2340CONFIG_SND_PCM=m
2341CONFIG_SND_HWDEP=m
2342CONFIG_SND_RAWMIDI=m
2343CONFIG_SND_JACK=y
2344CONFIG_SND_SEQUENCER=m
2345CONFIG_SND_SEQ_DUMMY=m
2346CONFIG_SND_OSSEMUL=y
2347CONFIG_SND_MIXER_OSS=m
2348CONFIG_SND_PCM_OSS=m
2349CONFIG_SND_PCM_OSS_PLUGINS=y
2350CONFIG_SND_SEQUENCER_OSS=y
2351# CONFIG_SND_HRTIMER is not set
2352CONFIG_SND_DYNAMIC_MINORS=y
2353CONFIG_SND_SUPPORT_OLD_API=y
2354CONFIG_SND_VERBOSE_PROCFS=y
2355CONFIG_SND_VERBOSE_PRINTK=y
2356CONFIG_SND_DEBUG=y
2357# CONFIG_SND_DEBUG_VERBOSE is not set
2358# CONFIG_SND_PCM_XRUN_DEBUG is not set
2359CONFIG_SND_VMASTER=y
2360CONFIG_SND_MPU401_UART=m
2361CONFIG_SND_AC97_CODEC=m
2362CONFIG_SND_DRIVERS=y
2363CONFIG_SND_DUMMY=m
2364CONFIG_SND_VIRMIDI=m
2365CONFIG_SND_MTPAV=m
2366CONFIG_SND_SERIAL_U16550=m
2367CONFIG_SND_MPU401=m
2368CONFIG_SND_AC97_POWER_SAVE=y
2369CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
2370CONFIG_SND_ISA=y
2371# CONFIG_SND_ADLIB is not set
2372# CONFIG_SND_AD1816A is not set
2373# CONFIG_SND_AD1848 is not set
2374# CONFIG_SND_ALS100 is not set
2375# CONFIG_SND_AZT2320 is not set
2376# CONFIG_SND_CMI8330 is not set
2377# CONFIG_SND_CS4231 is not set
2378# CONFIG_SND_CS4232 is not set
2379# CONFIG_SND_CS4236 is not set
2380# CONFIG_SND_DT019X is not set
2381# CONFIG_SND_ES968 is not set
2382# CONFIG_SND_ES1688 is not set
2383# CONFIG_SND_ES18XX is not set
2384# CONFIG_SND_SC6000 is not set
2385# CONFIG_SND_GUSCLASSIC is not set
2386# CONFIG_SND_GUSEXTREME is not set
2387# CONFIG_SND_GUSMAX is not set
2388# CONFIG_SND_INTERWAVE is not set
2389# CONFIG_SND_INTERWAVE_STB is not set
2390# CONFIG_SND_OPL3SA2 is not set
2391# CONFIG_SND_OPTI92X_AD1848 is not set
2392# CONFIG_SND_OPTI92X_CS4231 is not set
2393# CONFIG_SND_OPTI93X is not set
2394# CONFIG_SND_MIRO is not set
2395# CONFIG_SND_SB8 is not set
2396# CONFIG_SND_SB16 is not set
2397# CONFIG_SND_SBAWE is not set
2398# CONFIG_SND_SGALAXY is not set
2399# CONFIG_SND_SSCAPE is not set
2400# CONFIG_SND_WAVEFRONT is not set
2401CONFIG_SND_PCI=y
2402# CONFIG_SND_AD1889 is not set
2403# CONFIG_SND_ALS300 is not set
2404# CONFIG_SND_ALS4000 is not set
2405# CONFIG_SND_ALI5451 is not set
2406# CONFIG_SND_ATIIXP is not set
2407# CONFIG_SND_ATIIXP_MODEM is not set
2408# CONFIG_SND_AU8810 is not set
2409# CONFIG_SND_AU8820 is not set
2410# CONFIG_SND_AU8830 is not set
2411# CONFIG_SND_AW2 is not set
2412# CONFIG_SND_AZT3328 is not set
2413# CONFIG_SND_BT87X is not set
2414# CONFIG_SND_CA0106 is not set
2415# CONFIG_SND_CMIPCI is not set
2416# CONFIG_SND_OXYGEN is not set
2417# CONFIG_SND_CS4281 is not set
2418# CONFIG_SND_CS46XX is not set
2419# CONFIG_SND_CS5530 is not set
2420# CONFIG_SND_CS5535AUDIO is not set
2421# CONFIG_SND_DARLA20 is not set
2422# CONFIG_SND_GINA20 is not set
2423# CONFIG_SND_LAYLA20 is not set
2424# CONFIG_SND_DARLA24 is not set
2425# CONFIG_SND_GINA24 is not set
2426# CONFIG_SND_LAYLA24 is not set
2427# CONFIG_SND_MONA is not set
2428# CONFIG_SND_MIA is not set
2429# CONFIG_SND_ECHO3G is not set
2430# CONFIG_SND_INDIGO is not set
2431# CONFIG_SND_INDIGOIO is not set
2432# CONFIG_SND_INDIGODJ is not set
2433# CONFIG_SND_EMU10K1 is not set
2434# CONFIG_SND_EMU10K1X is not set
2435# CONFIG_SND_ENS1370 is not set
2436# CONFIG_SND_ENS1371 is not set
2437# CONFIG_SND_ES1938 is not set
2438# CONFIG_SND_ES1968 is not set
2439# CONFIG_SND_FM801 is not set
2440CONFIG_SND_HDA_INTEL=m
2441# CONFIG_SND_HDA_HWDEP is not set
2442# CONFIG_SND_HDA_INPUT_BEEP is not set
2443CONFIG_SND_HDA_CODEC_REALTEK=y
2444CONFIG_SND_HDA_CODEC_ANALOG=y
2445CONFIG_SND_HDA_CODEC_SIGMATEL=y
2446CONFIG_SND_HDA_CODEC_VIA=y
2447CONFIG_SND_HDA_CODEC_ATIHDMI=y
2448CONFIG_SND_HDA_CODEC_NVHDMI=y
2449CONFIG_SND_HDA_CODEC_INTELHDMI=y
2450CONFIG_SND_HDA_ELD=y
2451CONFIG_SND_HDA_CODEC_CONEXANT=y
2452CONFIG_SND_HDA_CODEC_CMEDIA=y
2453CONFIG_SND_HDA_CODEC_SI3054=y
2454CONFIG_SND_HDA_GENERIC=y
2455# CONFIG_SND_HDA_POWER_SAVE is not set
2456# CONFIG_SND_HDSP is not set
2457# CONFIG_SND_HDSPM is not set
2458# CONFIG_SND_HIFIER is not set
2459# CONFIG_SND_ICE1712 is not set
2460# CONFIG_SND_ICE1724 is not set
2461CONFIG_SND_INTEL8X0=m
2462CONFIG_SND_INTEL8X0M=m
2463# CONFIG_SND_KORG1212 is not set
2464# CONFIG_SND_MAESTRO3 is not set
2465# CONFIG_SND_MIXART is not set
2466# CONFIG_SND_NM256 is not set
2467# CONFIG_SND_PCXHR is not set
2468# CONFIG_SND_RIPTIDE is not set
2469# CONFIG_SND_RME32 is not set
2470# CONFIG_SND_RME96 is not set
2471# CONFIG_SND_RME9652 is not set
2472# CONFIG_SND_SIS7019 is not set
2473# CONFIG_SND_SONICVIBES is not set
2474# CONFIG_SND_TRIDENT is not set
2475# CONFIG_SND_VIA82XX is not set
2476# CONFIG_SND_VIA82XX_MODEM is not set
2477# CONFIG_SND_VIRTUOSO is not set
2478# CONFIG_SND_VX222 is not set
2479# CONFIG_SND_YMFPCI is not set
2480CONFIG_SND_SPI=y
2481CONFIG_SND_USB=y
2482CONFIG_SND_USB_AUDIO=m
2483# CONFIG_SND_USB_USX2Y is not set
2484# CONFIG_SND_USB_CAIAQ is not set
2485# CONFIG_SND_USB_US122L is not set
2486# CONFIG_SND_SOC is not set
2487# CONFIG_SOUND_PRIME is not set
2488CONFIG_AC97_BUS=m
2489CONFIG_HID_SUPPORT=y
2490CONFIG_HID=y
2491# CONFIG_HID_DEBUG is not set
2492# CONFIG_HIDRAW is not set
2493
2494#
2495# USB Input Devices
2496#
2497CONFIG_USB_HID=y
2498CONFIG_HID_PID=y
2499CONFIG_USB_HIDDEV=y
2500
2501#
2502# Special HID drivers
2503#
2504CONFIG_HID_COMPAT=y
2505CONFIG_HID_A4TECH=y
2506CONFIG_HID_APPLE=y
2507CONFIG_HID_BELKIN=y
2508CONFIG_HID_CHERRY=y
2509CONFIG_HID_CHICONY=y
2510CONFIG_HID_CYPRESS=y
2511CONFIG_HID_EZKEY=y
2512CONFIG_HID_GYRATION=y
2513CONFIG_HID_LOGITECH=y
2514CONFIG_LOGITECH_FF=y
2515# CONFIG_LOGIRUMBLEPAD2_FF is not set
2516CONFIG_HID_MICROSOFT=y
2517CONFIG_HID_MONTEREY=y
2518CONFIG_HID_NTRIG=y
2519CONFIG_HID_PANTHERLORD=y
2520# CONFIG_PANTHERLORD_FF is not set
2521CONFIG_HID_PETALYNX=y
2522CONFIG_HID_SAMSUNG=y
2523CONFIG_HID_SONY=y
2524CONFIG_HID_SUNPLUS=y
2525# CONFIG_GREENASIA_FF is not set
2526CONFIG_HID_TOPSEED=y
2527CONFIG_THRUSTMASTER_FF=y
2528# CONFIG_ZEROPLUS_FF is not set
2529CONFIG_USB_SUPPORT=y
2530CONFIG_USB_ARCH_HAS_HCD=y
2531CONFIG_USB_ARCH_HAS_OHCI=y
2532CONFIG_USB_ARCH_HAS_EHCI=y
2533CONFIG_USB=y
2534# CONFIG_USB_DEBUG is not set
2535# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
2536
2537#
2538# Miscellaneous USB options
2539#
2540CONFIG_USB_DEVICEFS=y
2541CONFIG_USB_DEVICE_CLASS=y
2542# CONFIG_USB_DYNAMIC_MINORS is not set
2543CONFIG_USB_SUSPEND=y
2544# CONFIG_USB_OTG is not set
2545CONFIG_USB_MON=y
2546# CONFIG_USB_WUSB is not set
2547# CONFIG_USB_WUSB_CBAF is not set
2548
2549#
2550# USB Host Controller Drivers
2551#
2552# CONFIG_USB_C67X00_HCD is not set
2553CONFIG_USB_EHCI_HCD=y
2554CONFIG_USB_EHCI_ROOT_HUB_TT=y
2555CONFIG_USB_EHCI_TT_NEWSCHED=y
2556# CONFIG_USB_OXU210HP_HCD is not set
2557# CONFIG_USB_ISP116X_HCD is not set
2558# CONFIG_USB_ISP1760_HCD is not set
2559CONFIG_USB_OHCI_HCD=y
2560# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
2561# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
2562CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2563CONFIG_USB_UHCI_HCD=y
2564# CONFIG_USB_SL811_HCD is not set
2565# CONFIG_USB_R8A66597_HCD is not set
2566# CONFIG_USB_WHCI_HCD is not set
2567# CONFIG_USB_HWA_HCD is not set
2568# CONFIG_USB_GADGET_MUSB_HDRC is not set
2569
2570#
2571# USB Device Class drivers
2572#
2573CONFIG_USB_ACM=m
2574CONFIG_USB_PRINTER=m
2575# CONFIG_USB_WDM is not set
2576# CONFIG_USB_TMC is not set
2577
2578#
2579# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
2580#
2581
2582#
2583# see USB_STORAGE Help for more information
2584#
2585CONFIG_USB_STORAGE=y
2586# CONFIG_USB_STORAGE_DEBUG is not set
2587CONFIG_USB_STORAGE_DATAFAB=y
2588CONFIG_USB_STORAGE_FREECOM=y
2589# CONFIG_USB_STORAGE_ISD200 is not set
2590CONFIG_USB_STORAGE_USBAT=y
2591CONFIG_USB_STORAGE_SDDR09=y
2592CONFIG_USB_STORAGE_SDDR55=y
2593CONFIG_USB_STORAGE_JUMPSHOT=y
2594CONFIG_USB_STORAGE_ALAUDA=y
2595# CONFIG_USB_STORAGE_ONETOUCH is not set
2596# CONFIG_USB_STORAGE_KARMA is not set
2597# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
2598# CONFIG_USB_LIBUSUAL is not set
2599
2600#
2601# USB Imaging devices
2602#
2603CONFIG_USB_MDC800=m
2604CONFIG_USB_MICROTEK=m
2605
2606#
2607# USB port drivers
2608#
2609CONFIG_USB_SERIAL=m
2610CONFIG_USB_EZUSB=y
2611CONFIG_USB_SERIAL_GENERIC=y
2612# CONFIG_USB_SERIAL_AIRCABLE is not set
2613CONFIG_USB_SERIAL_ARK3116=m
2614CONFIG_USB_SERIAL_BELKIN=m
2615# CONFIG_USB_SERIAL_CH341 is not set
2616CONFIG_USB_SERIAL_WHITEHEAT=m
2617CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
2618CONFIG_USB_SERIAL_CP2101=m
2619CONFIG_USB_SERIAL_CYPRESS_M8=m
2620CONFIG_USB_SERIAL_EMPEG=m
2621CONFIG_USB_SERIAL_FTDI_SIO=m
2622CONFIG_USB_SERIAL_FUNSOFT=m
2623CONFIG_USB_SERIAL_VISOR=m
2624CONFIG_USB_SERIAL_IPAQ=m
2625CONFIG_USB_SERIAL_IR=m
2626CONFIG_USB_SERIAL_EDGEPORT=m
2627CONFIG_USB_SERIAL_EDGEPORT_TI=m
2628CONFIG_USB_SERIAL_GARMIN=m
2629CONFIG_USB_SERIAL_IPW=m
2630# CONFIG_USB_SERIAL_IUU is not set
2631CONFIG_USB_SERIAL_KEYSPAN_PDA=m
2632CONFIG_USB_SERIAL_KEYSPAN=m
2633CONFIG_USB_SERIAL_KEYSPAN_MPR=y
2634CONFIG_USB_SERIAL_KEYSPAN_USA28=y
2635CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
2636CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
2637CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
2638CONFIG_USB_SERIAL_KEYSPAN_USA19=y
2639CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
2640CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
2641CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
2642CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
2643CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
2644CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
2645CONFIG_USB_SERIAL_KLSI=m
2646CONFIG_USB_SERIAL_KOBIL_SCT=m
2647CONFIG_USB_SERIAL_MCT_U232=m
2648# CONFIG_USB_SERIAL_MOS7720 is not set
2649# CONFIG_USB_SERIAL_MOS7840 is not set
2650# CONFIG_USB_SERIAL_MOTOROLA is not set
2651CONFIG_USB_SERIAL_NAVMAN=m
2652CONFIG_USB_SERIAL_PL2303=m
2653# CONFIG_USB_SERIAL_OTI6858 is not set
2654# CONFIG_USB_SERIAL_SPCP8X5 is not set
2655CONFIG_USB_SERIAL_HP4X=m
2656CONFIG_USB_SERIAL_SAFE=m
2657CONFIG_USB_SERIAL_SAFE_PADDED=y
2658# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
2659CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2660CONFIG_USB_SERIAL_TI=m
2661CONFIG_USB_SERIAL_CYBERJACK=m
2662CONFIG_USB_SERIAL_XIRCOM=m
2663CONFIG_USB_SERIAL_OPTION=m
2664CONFIG_USB_SERIAL_OMNINET=m
2665# CONFIG_USB_SERIAL_OPTICON is not set
2666# CONFIG_USB_SERIAL_DEBUG is not set
2667
2668#
2669# USB Miscellaneous drivers
2670#
2671CONFIG_USB_EMI62=m
2672CONFIG_USB_EMI26=m
2673# CONFIG_USB_ADUTUX is not set
2674# CONFIG_USB_SEVSEG is not set
2675CONFIG_USB_RIO500=m
2676CONFIG_USB_LEGOTOWER=m
2677CONFIG_USB_LCD=m
2678# CONFIG_USB_BERRY_CHARGE is not set
2679CONFIG_USB_LED=m
2680CONFIG_USB_CYPRESS_CY7C63=m
2681CONFIG_USB_CYTHERM=m
2682# CONFIG_USB_PHIDGET is not set
2683CONFIG_USB_IDMOUSE=m
2684# CONFIG_USB_FTDI_ELAN is not set
2685CONFIG_USB_APPLEDISPLAY=m
2686CONFIG_USB_SISUSBVGA=m
2687CONFIG_USB_SISUSBVGA_CON=y
2688CONFIG_USB_LD=m
2689# CONFIG_USB_TRANCEVIBRATOR is not set
2690# CONFIG_USB_IOWARRIOR is not set
2691# CONFIG_USB_TEST is not set
2692# CONFIG_USB_ISIGHTFW is not set
2693# CONFIG_USB_VST is not set
2694CONFIG_USB_ATM=m
2695CONFIG_USB_SPEEDTOUCH=m
2696CONFIG_USB_CXACRU=m
2697CONFIG_USB_UEAGLEATM=m
2698CONFIG_USB_XUSBATM=m
2699CONFIG_USB_GADGET=y
2700# CONFIG_USB_GADGET_DEBUG is not set
2701CONFIG_USB_GADGET_DEBUG_FILES=y
2702# CONFIG_USB_GADGET_DEBUG_FS is not set
2703CONFIG_USB_GADGET_VBUS_DRAW=2
2704CONFIG_USB_GADGET_SELECTED=y
2705# CONFIG_USB_GADGET_AT91 is not set
2706# CONFIG_USB_GADGET_ATMEL_USBA is not set
2707# CONFIG_USB_GADGET_FSL_USB2 is not set
2708# CONFIG_USB_GADGET_LH7A40X is not set
2709# CONFIG_USB_GADGET_OMAP is not set
2710# CONFIG_USB_GADGET_PXA25X is not set
2711# CONFIG_USB_GADGET_PXA27X is not set
2712# CONFIG_USB_GADGET_S3C2410 is not set
2713# CONFIG_USB_GADGET_IMX is not set
2714# CONFIG_USB_GADGET_M66592 is not set
2715CONFIG_USB_GADGET_AMD5536UDC=y
2716CONFIG_USB_AMD5536UDC=y
2717# CONFIG_USB_GADGET_FSL_QE is not set
2718# CONFIG_USB_GADGET_CI13XXX is not set
2719# CONFIG_USB_GADGET_NET2280 is not set
2720# CONFIG_USB_GADGET_GOKU is not set
2721# CONFIG_USB_GADGET_DUMMY_HCD is not set
2722CONFIG_USB_GADGET_DUALSPEED=y
2723# CONFIG_USB_ZERO is not set
2724CONFIG_USB_ETH=m
2725CONFIG_USB_ETH_RNDIS=y
2726# CONFIG_USB_GADGETFS is not set
2727CONFIG_USB_FILE_STORAGE=m
2728CONFIG_USB_FILE_STORAGE_TEST=y
2729# CONFIG_USB_G_SERIAL is not set
2730# CONFIG_USB_MIDI_GADGET is not set
2731# CONFIG_USB_G_PRINTER is not set
2732# CONFIG_USB_CDC_COMPOSITE is not set
2733
2734#
2735# OTG and related infrastructure
2736#
2737# CONFIG_UWB is not set
2738CONFIG_MMC=y
2739# CONFIG_MMC_DEBUG is not set
2740CONFIG_MMC_UNSAFE_RESUME=y
2741
2742#
2743# MMC/SD/SDIO Card Drivers
2744#
2745CONFIG_MMC_BLOCK=y
2746CONFIG_MMC_BLOCK_BOUNCE=y
2747# CONFIG_SDIO_UART is not set
2748# CONFIG_MMC_TEST is not set
2749
2750#
2751# MMC/SD/SDIO Host Controller Drivers
2752#
2753CONFIG_MMC_SDHCI=y
2754# CONFIG_MMC_SDHCI_PCI is not set
2755# CONFIG_MMC_SDHCI_PLTFM is not set
2756# CONFIG_MMC_WBSD is not set
2757# CONFIG_MMC_TIFM_SD is not set
2758# CONFIG_MEMSTICK is not set
2759CONFIG_NEW_LEDS=y
2760CONFIG_LEDS_CLASS=m
2761
2762#
2763# LED drivers
2764#
2765# CONFIG_LEDS_ALIX2 is not set
2766# CONFIG_LEDS_PCA9532 is not set
2767# CONFIG_LEDS_CLEVO_MAIL is not set
2768# CONFIG_LEDS_PCA955X is not set
2769
2770#
2771# LED Triggers
2772#
2773CONFIG_LEDS_TRIGGERS=y
2774CONFIG_LEDS_TRIGGER_TIMER=m
2775CONFIG_LEDS_TRIGGER_HEARTBEAT=m
2776# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
2777# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2778# CONFIG_ACCESSIBILITY is not set
2779# CONFIG_INFINIBAND is not set
2780# CONFIG_EDAC is not set
2781CONFIG_RTC_LIB=m
2782CONFIG_RTC_CLASS=m
2783
2784#
2785# RTC interfaces
2786#
2787CONFIG_RTC_INTF_SYSFS=y
2788CONFIG_RTC_INTF_PROC=y
2789CONFIG_RTC_INTF_DEV=y
2790CONFIG_RTC_INTF_DEV_UIE_EMUL=y
2791CONFIG_RTC_DRV_TEST=m
2792
2793#
2794# I2C RTC drivers
2795#
2796CONFIG_RTC_DRV_DS1307=m
2797# CONFIG_RTC_DRV_DS1374 is not set
2798CONFIG_RTC_DRV_DS1672=m
2799# CONFIG_RTC_DRV_MAX6900 is not set
2800CONFIG_RTC_DRV_RS5C372=m
2801CONFIG_RTC_DRV_ISL1208=m
2802CONFIG_RTC_DRV_X1205=m
2803CONFIG_RTC_DRV_PCF8563=m
2804CONFIG_RTC_DRV_PCF8583=m
2805# CONFIG_RTC_DRV_M41T80 is not set
2806# CONFIG_RTC_DRV_S35390A is not set
2807# CONFIG_RTC_DRV_FM3130 is not set
2808# CONFIG_RTC_DRV_RX8581 is not set
2809
2810#
2811# SPI RTC drivers
2812#
2813# CONFIG_RTC_DRV_M41T94 is not set
2814# CONFIG_RTC_DRV_DS1305 is not set
2815# CONFIG_RTC_DRV_DS1390 is not set
2816CONFIG_RTC_DRV_MAX6902=m
2817# CONFIG_RTC_DRV_R9701 is not set
2818CONFIG_RTC_DRV_RS5C348=m
2819# CONFIG_RTC_DRV_DS3234 is not set
2820
2821#
2822# Platform RTC drivers
2823#
2824# CONFIG_RTC_DRV_CMOS is not set
2825# CONFIG_RTC_DRV_DS1286 is not set
2826# CONFIG_RTC_DRV_DS1511 is not set
2827CONFIG_RTC_DRV_DS1553=m
2828CONFIG_RTC_DRV_DS1742=m
2829# CONFIG_RTC_DRV_STK17TA8 is not set
2830CONFIG_RTC_DRV_M48T86=m
2831# CONFIG_RTC_DRV_M48T35 is not set
2832# CONFIG_RTC_DRV_M48T59 is not set
2833# CONFIG_RTC_DRV_BQ4802 is not set
2834CONFIG_RTC_DRV_V3020=m
2835
2836#
2837# on-CPU RTC drivers
2838#
2839# CONFIG_DMADEVICES is not set
2840# CONFIG_UIO is not set
2841# CONFIG_STAGING is not set
2842CONFIG_X86_PLATFORM_DEVICES=y
2843# CONFIG_ASUS_LAPTOP is not set
2844# CONFIG_FUJITSU_LAPTOP is not set
2845# CONFIG_TC1100_WMI is not set
2846# CONFIG_MSI_LAPTOP is not set
2847# CONFIG_PANASONIC_LAPTOP is not set
2848# CONFIG_COMPAL_LAPTOP is not set
2849# CONFIG_SONY_LAPTOP is not set
2850# CONFIG_THINKPAD_ACPI is not set
2851# CONFIG_INTEL_MENLOW is not set
2852# CONFIG_EEEPC_LAPTOP is not set
2853# CONFIG_ACPI_WMI is not set
2854# CONFIG_ACPI_ASUS is not set
2855# CONFIG_ACPI_TOSHIBA is not set
2856
2857#
2858# Firmware Drivers
2859#
2860CONFIG_EDD=m
2861# CONFIG_EDD_OFF is not set
2862CONFIG_FIRMWARE_MEMMAP=y
2863# CONFIG_EFI_VARS is not set
2864# CONFIG_DELL_RBU is not set
2865# CONFIG_DCDBAS is not set
2866CONFIG_DMIID=y
2867# CONFIG_ISCSI_IBFT_FIND is not set
2868
2869#
2870# File systems
2871#
2872CONFIG_EXT2_FS=y
2873CONFIG_EXT2_FS_XATTR=y
2874CONFIG_EXT2_FS_POSIX_ACL=y
2875CONFIG_EXT2_FS_SECURITY=y
2876# CONFIG_EXT2_FS_XIP is not set
2877CONFIG_EXT3_FS=y
2878CONFIG_EXT3_FS_XATTR=y
2879CONFIG_EXT3_FS_POSIX_ACL=y
2880CONFIG_EXT3_FS_SECURITY=y
2881# CONFIG_EXT4_FS is not set
2882CONFIG_JBD=y
2883# CONFIG_JBD_DEBUG is not set
2884CONFIG_FS_MBCACHE=y
2885CONFIG_REISERFS_FS=m
2886# CONFIG_REISERFS_CHECK is not set
2887# CONFIG_REISERFS_PROC_INFO is not set
2888CONFIG_REISERFS_FS_XATTR=y
2889CONFIG_REISERFS_FS_POSIX_ACL=y
2890CONFIG_REISERFS_FS_SECURITY=y
2891CONFIG_JFS_FS=m
2892CONFIG_JFS_POSIX_ACL=y
2893CONFIG_JFS_SECURITY=y
2894# CONFIG_JFS_DEBUG is not set
2895CONFIG_JFS_STATISTICS=y
2896CONFIG_FS_POSIX_ACL=y
2897CONFIG_FILE_LOCKING=y
2898# CONFIG_XFS_FS is not set
2899# CONFIG_GFS2_FS is not set
2900# CONFIG_OCFS2_FS is not set
2901# CONFIG_BTRFS_FS is not set
2902CONFIG_DNOTIFY=y
2903CONFIG_INOTIFY=y
2904CONFIG_INOTIFY_USER=y
2905CONFIG_QUOTA=y
2906# CONFIG_QUOTA_NETLINK_INTERFACE is not set
2907CONFIG_PRINT_QUOTA_WARNING=y
2908CONFIG_QUOTA_TREE=m
2909CONFIG_QFMT_V1=m
2910CONFIG_QFMT_V2=m
2911CONFIG_QUOTACTL=y
2912CONFIG_AUTOFS_FS=m
2913CONFIG_AUTOFS4_FS=m
2914CONFIG_FUSE_FS=m
2915CONFIG_GENERIC_ACL=y
2916
2917#
2918# CD-ROM/DVD Filesystems
2919#
2920CONFIG_ISO9660_FS=y
2921CONFIG_JOLIET=y
2922CONFIG_ZISOFS=y
2923CONFIG_UDF_FS=m
2924CONFIG_UDF_NLS=y
2925
2926#
2927# DOS/FAT/NT Filesystems
2928#
2929CONFIG_FAT_FS=y
2930CONFIG_MSDOS_FS=y
2931CONFIG_VFAT_FS=y
2932CONFIG_FAT_DEFAULT_CODEPAGE=437
2933CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
2934CONFIG_NTFS_FS=m
2935# CONFIG_NTFS_DEBUG is not set
2936CONFIG_NTFS_RW=y
2937
2938#
2939# Pseudo filesystems
2940#
2941CONFIG_PROC_FS=y
2942CONFIG_PROC_KCORE=y
2943CONFIG_PROC_SYSCTL=y
2944CONFIG_PROC_PAGE_MONITOR=y
2945CONFIG_SYSFS=y
2946CONFIG_TMPFS=y
2947CONFIG_TMPFS_POSIX_ACL=y
2948CONFIG_HUGETLBFS=y
2949CONFIG_HUGETLB_PAGE=y
2950CONFIG_CONFIGFS_FS=m
2951CONFIG_MISC_FILESYSTEMS=y
2952CONFIG_ADFS_FS=m
2953# CONFIG_ADFS_FS_RW is not set
2954CONFIG_AFFS_FS=m
2955# CONFIG_ECRYPT_FS is not set
2956CONFIG_HFS_FS=m
2957CONFIG_HFSPLUS_FS=m
2958CONFIG_BEFS_FS=m
2959# CONFIG_BEFS_DEBUG is not set
2960CONFIG_BFS_FS=m
2961CONFIG_EFS_FS=m
2962CONFIG_JFFS2_FS=m
2963CONFIG_JFFS2_FS_DEBUG=0
2964CONFIG_JFFS2_FS_WRITEBUFFER=y
2965# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
2966CONFIG_JFFS2_SUMMARY=y
2967CONFIG_JFFS2_FS_XATTR=y
2968CONFIG_JFFS2_FS_POSIX_ACL=y
2969CONFIG_JFFS2_FS_SECURITY=y
2970CONFIG_JFFS2_COMPRESSION_OPTIONS=y
2971CONFIG_JFFS2_ZLIB=y
2972# CONFIG_JFFS2_LZO is not set
2973CONFIG_JFFS2_RTIME=y
2974# CONFIG_JFFS2_RUBIN is not set
2975# CONFIG_JFFS2_CMODE_NONE is not set
2976CONFIG_JFFS2_CMODE_PRIORITY=y
2977# CONFIG_JFFS2_CMODE_SIZE is not set
2978# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
2979CONFIG_CRAMFS=y
2980# CONFIG_SQUASHFS is not set
2981CONFIG_VXFS_FS=m
2982# CONFIG_MINIX_FS is not set
2983# CONFIG_OMFS_FS is not set
2984CONFIG_HPFS_FS=m
2985CONFIG_QNX4FS_FS=m
2986CONFIG_ROMFS_FS=m
2987CONFIG_SYSV_FS=m
2988CONFIG_UFS_FS=m
2989CONFIG_UFS_FS_WRITE=y
2990# CONFIG_UFS_DEBUG is not set
2991CONFIG_NETWORK_FILESYSTEMS=y
2992CONFIG_NFS_FS=m
2993CONFIG_NFS_V3=y
2994CONFIG_NFS_V3_ACL=y
2995CONFIG_NFS_V4=y
2996CONFIG_NFSD=m
2997CONFIG_NFSD_V2_ACL=y
2998CONFIG_NFSD_V3=y
2999CONFIG_NFSD_V3_ACL=y
3000CONFIG_NFSD_V4=y
3001CONFIG_LOCKD=m
3002CONFIG_LOCKD_V4=y
3003CONFIG_EXPORTFS=m
3004CONFIG_NFS_ACL_SUPPORT=m
3005CONFIG_NFS_COMMON=y
3006CONFIG_SUNRPC=m
3007CONFIG_SUNRPC_GSS=m
3008# CONFIG_SUNRPC_REGISTER_V4 is not set
3009CONFIG_RPCSEC_GSS_KRB5=m
3010CONFIG_RPCSEC_GSS_SPKM3=m
3011CONFIG_SMB_FS=y
3012# CONFIG_SMB_NLS_DEFAULT is not set
3013CONFIG_CIFS=m
3014CONFIG_CIFS_STATS=y
3015CONFIG_CIFS_STATS2=y
3016CONFIG_CIFS_WEAK_PW_HASH=y
3017# CONFIG_CIFS_UPCALL is not set
3018CONFIG_CIFS_XATTR=y
3019CONFIG_CIFS_POSIX=y
3020# CONFIG_CIFS_DEBUG2 is not set
3021# CONFIG_CIFS_EXPERIMENTAL is not set
3022# CONFIG_NCP_FS is not set
3023# CONFIG_CODA_FS is not set
3024# CONFIG_AFS_FS is not set
3025
3026#
3027# Partition Types
3028#
3029CONFIG_PARTITION_ADVANCED=y
3030# CONFIG_ACORN_PARTITION is not set
3031CONFIG_OSF_PARTITION=y
3032# CONFIG_AMIGA_PARTITION is not set
3033CONFIG_ATARI_PARTITION=y
3034CONFIG_MAC_PARTITION=y
3035CONFIG_MSDOS_PARTITION=y
3036CONFIG_BSD_DISKLABEL=y
3037# CONFIG_MINIX_SUBPARTITION is not set
3038CONFIG_SOLARIS_X86_PARTITION=y
3039CONFIG_UNIXWARE_DISKLABEL=y
3040CONFIG_LDM_PARTITION=y
3041# CONFIG_LDM_DEBUG is not set
3042CONFIG_SGI_PARTITION=y
3043CONFIG_ULTRIX_PARTITION=y
3044CONFIG_SUN_PARTITION=y
3045CONFIG_KARMA_PARTITION=y
3046CONFIG_EFI_PARTITION=y
3047# CONFIG_SYSV68_PARTITION is not set
3048CONFIG_NLS=y
3049CONFIG_NLS_DEFAULT="utf8"
3050CONFIG_NLS_CODEPAGE_437=y
3051CONFIG_NLS_CODEPAGE_737=m
3052CONFIG_NLS_CODEPAGE_775=m
3053CONFIG_NLS_CODEPAGE_850=m
3054CONFIG_NLS_CODEPAGE_852=m
3055CONFIG_NLS_CODEPAGE_855=m
3056CONFIG_NLS_CODEPAGE_857=m
3057CONFIG_NLS_CODEPAGE_860=m
3058CONFIG_NLS_CODEPAGE_861=m
3059CONFIG_NLS_CODEPAGE_862=m
3060CONFIG_NLS_CODEPAGE_863=m
3061CONFIG_NLS_CODEPAGE_864=m
3062CONFIG_NLS_CODEPAGE_865=m
3063CONFIG_NLS_CODEPAGE_866=m
3064CONFIG_NLS_CODEPAGE_869=m
3065CONFIG_NLS_CODEPAGE_936=m
3066CONFIG_NLS_CODEPAGE_950=m
3067CONFIG_NLS_CODEPAGE_932=m
3068CONFIG_NLS_CODEPAGE_949=m
3069CONFIG_NLS_CODEPAGE_874=m
3070CONFIG_NLS_ISO8859_8=m
3071CONFIG_NLS_CODEPAGE_1250=m
3072CONFIG_NLS_CODEPAGE_1251=m
3073CONFIG_NLS_ASCII=y
3074CONFIG_NLS_ISO8859_1=y
3075CONFIG_NLS_ISO8859_2=m
3076CONFIG_NLS_ISO8859_3=m
3077CONFIG_NLS_ISO8859_4=m
3078CONFIG_NLS_ISO8859_5=m
3079CONFIG_NLS_ISO8859_6=m
3080CONFIG_NLS_ISO8859_7=m
3081CONFIG_NLS_ISO8859_9=m
3082CONFIG_NLS_ISO8859_13=m
3083CONFIG_NLS_ISO8859_14=m
3084CONFIG_NLS_ISO8859_15=m
3085CONFIG_NLS_KOI8_R=m
3086CONFIG_NLS_KOI8_U=m
3087CONFIG_NLS_UTF8=m
3088# CONFIG_DLM is not set
3089
3090#
3091# Kernel hacking
3092#
3093CONFIG_TRACE_IRQFLAGS_SUPPORT=y
3094# CONFIG_PRINTK_TIME is not set
3095CONFIG_ENABLE_WARN_DEPRECATED=y
3096CONFIG_ENABLE_MUST_CHECK=y
3097CONFIG_FRAME_WARN=1024
3098CONFIG_MAGIC_SYSRQ=y
3099# CONFIG_UNUSED_SYMBOLS is not set
3100CONFIG_DEBUG_FS=y
3101# CONFIG_HEADERS_CHECK is not set
3102CONFIG_DEBUG_KERNEL=y
3103# CONFIG_DEBUG_SHIRQ is not set
3104CONFIG_DETECT_SOFTLOCKUP=y
3105# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
3106CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
3107CONFIG_SCHED_DEBUG=y
3108# CONFIG_SCHEDSTATS is not set
3109CONFIG_TIMER_STATS=y
3110# CONFIG_DEBUG_OBJECTS is not set
3111# CONFIG_DEBUG_SLAB is not set
3112# CONFIG_DEBUG_RT_MUTEXES is not set
3113# CONFIG_RT_MUTEX_TESTER is not set
3114# CONFIG_DEBUG_SPINLOCK is not set
3115# CONFIG_DEBUG_MUTEXES is not set
3116# CONFIG_DEBUG_LOCK_ALLOC is not set
3117# CONFIG_PROVE_LOCKING is not set
3118# CONFIG_LOCK_STAT is not set
3119# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
3120# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
3121# CONFIG_DEBUG_KOBJECT is not set
3122# CONFIG_DEBUG_HIGHMEM is not set
3123CONFIG_DEBUG_BUGVERBOSE=y
3124# CONFIG_DEBUG_INFO is not set
3125# CONFIG_DEBUG_VM is not set
3126# CONFIG_DEBUG_VIRTUAL is not set
3127# CONFIG_DEBUG_WRITECOUNT is not set
3128CONFIG_DEBUG_MEMORY_INIT=y
3129# CONFIG_DEBUG_LIST is not set
3130# CONFIG_DEBUG_SG is not set
3131# CONFIG_DEBUG_NOTIFIERS is not set
3132CONFIG_ARCH_WANT_FRAME_POINTERS=y
3133# CONFIG_FRAME_POINTER is not set
3134# CONFIG_BOOT_PRINTK_DELAY is not set
3135# CONFIG_RCU_TORTURE_TEST is not set
3136# CONFIG_RCU_CPU_STALL_DETECTOR is not set
3137# CONFIG_BACKTRACE_SELF_TEST is not set
3138# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
3139# CONFIG_FAULT_INJECTION is not set
3140# CONFIG_LATENCYTOP is not set
3141# CONFIG_SYSCTL_SYSCALL_CHECK is not set
3142CONFIG_USER_STACKTRACE_SUPPORT=y
3143CONFIG_HAVE_FUNCTION_TRACER=y
3144CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
3145CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
3146CONFIG_HAVE_DYNAMIC_FTRACE=y
3147CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
3148
3149#
3150# Tracers
3151#
3152# CONFIG_FUNCTION_TRACER is not set
3153# CONFIG_IRQSOFF_TRACER is not set
3154# CONFIG_SYSPROF_TRACER is not set
3155# CONFIG_SCHED_TRACER is not set
3156# CONFIG_CONTEXT_SWITCH_TRACER is not set
3157# CONFIG_BOOT_TRACER is not set
3158# CONFIG_TRACE_BRANCH_PROFILING is not set
3159# CONFIG_POWER_TRACER is not set
3160# CONFIG_STACK_TRACER is not set
3161# CONFIG_MMIOTRACE is not set
3162# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
3163# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
3164# CONFIG_SAMPLES is not set
3165CONFIG_HAVE_ARCH_KGDB=y
3166# CONFIG_KGDB is not set
3167# CONFIG_STRICT_DEVMEM is not set
3168CONFIG_X86_VERBOSE_BOOTUP=y
3169CONFIG_EARLY_PRINTK=y
3170# CONFIG_EARLY_PRINTK_DBGP is not set
3171# CONFIG_DEBUG_STACKOVERFLOW is not set
3172# CONFIG_DEBUG_STACK_USAGE is not set
3173# CONFIG_DEBUG_PAGEALLOC is not set
3174# CONFIG_DEBUG_PER_CPU_MAPS is not set
3175# CONFIG_X86_PTDUMP is not set
3176# CONFIG_DEBUG_RODATA is not set
3177# CONFIG_DEBUG_NX_TEST is not set
3178# CONFIG_4KSTACKS is not set
3179CONFIG_DOUBLEFAULT=y
3180CONFIG_HAVE_MMIOTRACE_SUPPORT=y
3181CONFIG_IO_DELAY_TYPE_0X80=0
3182CONFIG_IO_DELAY_TYPE_0XED=1
3183CONFIG_IO_DELAY_TYPE_UDELAY=2
3184CONFIG_IO_DELAY_TYPE_NONE=3
3185CONFIG_IO_DELAY_0X80=y
3186# CONFIG_IO_DELAY_0XED is not set
3187# CONFIG_IO_DELAY_UDELAY is not set
3188# CONFIG_IO_DELAY_NONE is not set
3189CONFIG_DEFAULT_IO_DELAY_TYPE=0
3190# CONFIG_DEBUG_BOOT_PARAMS is not set
3191# CONFIG_CPA_DEBUG is not set
3192# CONFIG_OPTIMIZE_INLINING is not set
3193
3194#
3195# Security options
3196#
3197CONFIG_KEYS=y
3198CONFIG_KEYS_DEBUG_PROC_KEYS=y
3199CONFIG_SECURITY=y
3200# CONFIG_SECURITYFS is not set
3201CONFIG_SECURITY_NETWORK=y
3202# CONFIG_SECURITY_NETWORK_XFRM is not set
3203# CONFIG_SECURITY_PATH is not set
3204# CONFIG_SECURITY_FILE_CAPABILITIES is not set
3205# CONFIG_SECURITY_ROOTPLUG is not set
3206CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
3207CONFIG_SECURITY_SELINUX=y
3208CONFIG_SECURITY_SELINUX_BOOTPARAM=y
3209CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
3210CONFIG_SECURITY_SELINUX_DISABLE=y
3211CONFIG_SECURITY_SELINUX_DEVELOP=y
3212CONFIG_SECURITY_SELINUX_AVC_STATS=y
3213CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
3214# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
3215CONFIG_CRYPTO=y
3216
3217#
3218# Crypto core or helper
3219#
3220# CONFIG_CRYPTO_FIPS is not set
3221CONFIG_CRYPTO_ALGAPI=y
3222CONFIG_CRYPTO_ALGAPI2=y
3223CONFIG_CRYPTO_AEAD=m
3224CONFIG_CRYPTO_AEAD2=y
3225CONFIG_CRYPTO_BLKCIPHER=y
3226CONFIG_CRYPTO_BLKCIPHER2=y
3227CONFIG_CRYPTO_HASH=y
3228CONFIG_CRYPTO_HASH2=y
3229CONFIG_CRYPTO_RNG2=y
3230CONFIG_CRYPTO_MANAGER=y
3231CONFIG_CRYPTO_MANAGER2=y
3232# CONFIG_CRYPTO_GF128MUL is not set
3233CONFIG_CRYPTO_NULL=m
3234# CONFIG_CRYPTO_CRYPTD is not set
3235CONFIG_CRYPTO_AUTHENC=m
3236CONFIG_CRYPTO_TEST=m
3237
3238#
3239# Authenticated Encryption with Associated Data
3240#
3241# CONFIG_CRYPTO_CCM is not set
3242# CONFIG_CRYPTO_GCM is not set
3243# CONFIG_CRYPTO_SEQIV is not set
3244
3245#
3246# Block modes
3247#
3248CONFIG_CRYPTO_CBC=y
3249# CONFIG_CRYPTO_CTR is not set
3250# CONFIG_CRYPTO_CTS is not set
3251CONFIG_CRYPTO_ECB=m
3252# CONFIG_CRYPTO_LRW is not set
3253CONFIG_CRYPTO_PCBC=m
3254# CONFIG_CRYPTO_XTS is not set
3255
3256#
3257# Hash modes
3258#
3259CONFIG_CRYPTO_HMAC=y
3260# CONFIG_CRYPTO_XCBC is not set
3261
3262#
3263# Digest
3264#
3265CONFIG_CRYPTO_CRC32C=m
3266# CONFIG_CRYPTO_CRC32C_INTEL is not set
3267CONFIG_CRYPTO_MD4=m
3268CONFIG_CRYPTO_MD5=y
3269CONFIG_CRYPTO_MICHAEL_MIC=m
3270# CONFIG_CRYPTO_RMD128 is not set
3271# CONFIG_CRYPTO_RMD160 is not set
3272# CONFIG_CRYPTO_RMD256 is not set
3273# CONFIG_CRYPTO_RMD320 is not set
3274CONFIG_CRYPTO_SHA1=m
3275CONFIG_CRYPTO_SHA256=m
3276CONFIG_CRYPTO_SHA512=m
3277CONFIG_CRYPTO_TGR192=m
3278CONFIG_CRYPTO_WP512=m
3279
3280#
3281# Ciphers
3282#
3283CONFIG_CRYPTO_AES=m
3284CONFIG_CRYPTO_AES_586=m
3285CONFIG_CRYPTO_ANUBIS=m
3286CONFIG_CRYPTO_ARC4=m
3287CONFIG_CRYPTO_BLOWFISH=m
3288# CONFIG_CRYPTO_CAMELLIA is not set
3289CONFIG_CRYPTO_CAST5=y
3290CONFIG_CRYPTO_CAST6=m
3291CONFIG_CRYPTO_DES=y
3292# CONFIG_CRYPTO_FCRYPT is not set
3293CONFIG_CRYPTO_KHAZAD=m
3294# CONFIG_CRYPTO_SALSA20 is not set
3295# CONFIG_CRYPTO_SALSA20_586 is not set
3296# CONFIG_CRYPTO_SEED is not set
3297CONFIG_CRYPTO_SERPENT=m
3298CONFIG_CRYPTO_TEA=m
3299CONFIG_CRYPTO_TWOFISH=m
3300CONFIG_CRYPTO_TWOFISH_COMMON=m
3301# CONFIG_CRYPTO_TWOFISH_586 is not set
3302
3303#
3304# Compression
3305#
3306CONFIG_CRYPTO_DEFLATE=m
3307# CONFIG_CRYPTO_LZO is not set
3308
3309#
3310# Random Number Generation
3311#
3312# CONFIG_CRYPTO_ANSI_CPRNG is not set
3313CONFIG_CRYPTO_HW=y
3314CONFIG_CRYPTO_DEV_PADLOCK=m
3315CONFIG_CRYPTO_DEV_PADLOCK_AES=m
3316CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
3317CONFIG_CRYPTO_DEV_GEODE=m
3318# CONFIG_CRYPTO_DEV_HIFN_795X is not set
3319CONFIG_HAVE_KVM=y
3320CONFIG_VIRTUALIZATION=y
3321# CONFIG_KVM is not set
3322# CONFIG_LGUEST is not set
3323# CONFIG_VIRTIO_PCI is not set
3324# CONFIG_VIRTIO_BALLOON is not set
3325
3326#
3327# Library routines
3328#
3329CONFIG_BITREVERSE=y
3330CONFIG_GENERIC_FIND_FIRST_BIT=y
3331CONFIG_GENERIC_FIND_NEXT_BIT=y
3332CONFIG_GENERIC_FIND_LAST_BIT=y
3333CONFIG_CRC_CCITT=m
3334CONFIG_CRC16=m
3335# CONFIG_CRC_T10DIF is not set
3336CONFIG_CRC_ITU_T=m
3337CONFIG_CRC32=y
3338# CONFIG_CRC7 is not set
3339CONFIG_LIBCRC32C=m
3340CONFIG_AUDIT_GENERIC=y
3341CONFIG_ZLIB_INFLATE=y
3342CONFIG_ZLIB_DEFLATE=m
3343CONFIG_REED_SOLOMON=m
3344CONFIG_REED_SOLOMON_DEC16=y
3345CONFIG_TEXTSEARCH=y
3346CONFIG_TEXTSEARCH_KMP=m
3347CONFIG_TEXTSEARCH_BM=m
3348CONFIG_TEXTSEARCH_FSM=m
3349CONFIG_PLIST=y
3350CONFIG_HAS_IOMEM=y
3351CONFIG_HAS_IOPORT=y
3352CONFIG_HAS_DMA=y
3353CONFIG_CHECK_SIGNATURE=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook
deleted file mode 100644
index 6aba13def5..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook
+++ /dev/null
@@ -1,2747 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29.1
4# Mon May 18 16:53:09 2009
5#
6# CONFIG_64BIT is not set
7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set
9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11CONFIG_GENERIC_TIME=y
12CONFIG_GENERIC_CMOS_UPDATE=y
13CONFIG_CLOCKSOURCE_WATCHDOG=y
14CONFIG_GENERIC_CLOCKEVENTS=y
15CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
16CONFIG_LOCKDEP_SUPPORT=y
17CONFIG_STACKTRACE_SUPPORT=y
18CONFIG_HAVE_LATENCYTOP_SUPPORT=y
19CONFIG_FAST_CMPXCHG_LOCAL=y
20CONFIG_MMU=y
21CONFIG_ZONE_DMA=y
22CONFIG_GENERIC_ISA_DMA=y
23CONFIG_GENERIC_IOMAP=y
24CONFIG_GENERIC_BUG=y
25CONFIG_GENERIC_HWEIGHT=y
26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
30CONFIG_GENERIC_CALIBRATE_DELAY=y
31# CONFIG_GENERIC_TIME_VSYSCALL is not set
32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
37CONFIG_ARCH_HIBERNATION_POSSIBLE=y
38CONFIG_ARCH_SUSPEND_POSSIBLE=y
39# CONFIG_ZONE_DMA32 is not set
40CONFIG_ARCH_POPULATES_NODE_MAP=y
41# CONFIG_AUDIT_ARCH is not set
42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
43CONFIG_GENERIC_HARDIRQS=y
44CONFIG_GENERIC_IRQ_PROBE=y
45CONFIG_GENERIC_PENDING_IRQ=y
46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
48CONFIG_X86_32_SMP=y
49CONFIG_X86_HT=y
50CONFIG_X86_BIOS_REBOOT=y
51CONFIG_X86_TRAMPOLINE=y
52CONFIG_KTIME_SCALAR=y
53CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
54
55#
56# General setup
57#
58CONFIG_EXPERIMENTAL=y
59CONFIG_LOCK_KERNEL=y
60CONFIG_INIT_ENV_ARG_LIMIT=32
61CONFIG_LOCALVERSION="-netbook"
62# CONFIG_LOCALVERSION_AUTO is not set
63CONFIG_SWAP=y
64CONFIG_SYSVIPC=y
65CONFIG_SYSVIPC_SYSCTL=y
66CONFIG_POSIX_MQUEUE=y
67CONFIG_BSD_PROCESS_ACCT=y
68CONFIG_BSD_PROCESS_ACCT_V3=y
69# CONFIG_TASKSTATS is not set
70# CONFIG_AUDIT is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=17
73CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
74# CONFIG_GROUP_SCHED is not set
75# CONFIG_CGROUPS is not set
76# CONFIG_SYSFS_DEPRECATED_V2 is not set
77CONFIG_RELAY=y
78CONFIG_NAMESPACES=y
79# CONFIG_UTS_NS is not set
80# CONFIG_IPC_NS is not set
81# CONFIG_USER_NS is not set
82# CONFIG_PID_NS is not set
83# CONFIG_NET_NS is not set
84CONFIG_BLK_DEV_INITRD=y
85CONFIG_INITRAMFS_SOURCE=""
86CONFIG_CC_OPTIMIZE_FOR_SIZE=y
87CONFIG_FASTBOOT=y
88CONFIG_SYSCTL=y
89CONFIG_ANON_INODES=y
90# CONFIG_EMBEDDED is not set
91CONFIG_UID16=y
92CONFIG_SYSCTL_SYSCALL=y
93CONFIG_KALLSYMS=y
94CONFIG_KALLSYMS_ALL=y
95CONFIG_KALLSYMS_EXTRA_PASS=y
96CONFIG_HOTPLUG=y
97CONFIG_PRINTK=y
98CONFIG_BUG=y
99CONFIG_ELF_CORE=y
100CONFIG_PCSPKR_PLATFORM=y
101CONFIG_BASE_FULL=y
102CONFIG_FUTEX=y
103CONFIG_EPOLL=y
104CONFIG_SIGNALFD=y
105CONFIG_TIMERFD=y
106CONFIG_EVENTFD=y
107CONFIG_SHMEM=y
108CONFIG_AIO=y
109CONFIG_VM_EVENT_COUNTERS=y
110CONFIG_PCI_QUIRKS=y
111# CONFIG_COMPAT_BRK is not set
112CONFIG_SLAB=y
113# CONFIG_SLUB is not set
114# CONFIG_SLOB is not set
115CONFIG_PROFILING=y
116CONFIG_TRACEPOINTS=y
117# CONFIG_MARKERS is not set
118# CONFIG_OPROFILE is not set
119CONFIG_HAVE_OPROFILE=y
120# CONFIG_KPROBES is not set
121CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
122CONFIG_HAVE_IOREMAP_PROT=y
123CONFIG_HAVE_KPROBES=y
124CONFIG_HAVE_KRETPROBES=y
125CONFIG_HAVE_ARCH_TRACEHOOK=y
126CONFIG_HAVE_GENERIC_DMA_COHERENT=y
127CONFIG_SLABINFO=y
128CONFIG_RT_MUTEXES=y
129CONFIG_BASE_SMALL=0
130CONFIG_MODULES=y
131# CONFIG_MODULE_FORCE_LOAD is not set
132CONFIG_MODULE_UNLOAD=y
133# CONFIG_MODULE_FORCE_UNLOAD is not set
134# CONFIG_MODVERSIONS is not set
135# CONFIG_MODULE_SRCVERSION_ALL is not set
136CONFIG_STOP_MACHINE=y
137CONFIG_BLOCK=y
138# CONFIG_LBD is not set
139CONFIG_BLK_DEV_IO_TRACE=y
140CONFIG_BLK_DEV_BSG=y
141# CONFIG_BLK_DEV_INTEGRITY is not set
142
143#
144# IO Schedulers
145#
146CONFIG_IOSCHED_NOOP=y
147# CONFIG_IOSCHED_AS is not set
148# CONFIG_IOSCHED_DEADLINE is not set
149CONFIG_IOSCHED_CFQ=y
150# CONFIG_DEFAULT_AS is not set
151# CONFIG_DEFAULT_DEADLINE is not set
152CONFIG_DEFAULT_CFQ=y
153# CONFIG_DEFAULT_NOOP is not set
154CONFIG_DEFAULT_IOSCHED="cfq"
155CONFIG_CLASSIC_RCU=y
156CONFIG_FREEZER=y
157
158#
159# Processor type and features
160#
161CONFIG_TICK_ONESHOT=y
162CONFIG_NO_HZ=y
163CONFIG_HIGH_RES_TIMERS=y
164CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
165CONFIG_SMP=y
166# CONFIG_SPARSE_IRQ is not set
167CONFIG_X86_FIND_SMP_CONFIG=y
168CONFIG_X86_MPPARSE=y
169# CONFIG_X86_PC is not set
170# CONFIG_X86_ELAN is not set
171# CONFIG_X86_VOYAGER is not set
172CONFIG_X86_GENERICARCH=y
173# CONFIG_X86_NUMAQ is not set
174# CONFIG_X86_SUMMIT is not set
175# CONFIG_X86_ES7000 is not set
176# CONFIG_X86_BIGSMP is not set
177# CONFIG_X86_VSMP is not set
178# CONFIG_X86_RDC321X is not set
179CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
180# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
181# CONFIG_PARAVIRT_GUEST is not set
182# CONFIG_MEMTEST is not set
183# CONFIG_M386 is not set
184# CONFIG_M486 is not set
185# CONFIG_M586 is not set
186# CONFIG_M586TSC is not set
187# CONFIG_M586MMX is not set
188# CONFIG_M686 is not set
189# CONFIG_MPENTIUMII is not set
190# CONFIG_MPENTIUMIII is not set
191CONFIG_MPENTIUMM=y
192# CONFIG_MPENTIUM4 is not set
193# CONFIG_MK6 is not set
194# CONFIG_MK7 is not set
195# CONFIG_MK8 is not set
196# CONFIG_MCRUSOE is not set
197# CONFIG_MEFFICEON is not set
198# CONFIG_MWINCHIPC6 is not set
199# CONFIG_MWINCHIP3D is not set
200# CONFIG_MGEODEGX1 is not set
201# CONFIG_MGEODE_LX is not set
202# CONFIG_MCYRIXIII is not set
203# CONFIG_MVIAC3_2 is not set
204# CONFIG_MVIAC7 is not set
205# CONFIG_MPSC is not set
206# CONFIG_MCORE2 is not set
207# CONFIG_GENERIC_CPU is not set
208CONFIG_X86_GENERIC=y
209CONFIG_X86_CPU=y
210CONFIG_X86_CMPXCHG=y
211CONFIG_X86_L1_CACHE_SHIFT=7
212CONFIG_X86_XADD=y
213# CONFIG_X86_PPRO_FENCE is not set
214CONFIG_X86_WP_WORKS_OK=y
215CONFIG_X86_INVLPG=y
216CONFIG_X86_BSWAP=y
217CONFIG_X86_POPAD_OK=y
218CONFIG_X86_INTEL_USERCOPY=y
219CONFIG_X86_USE_PPRO_CHECKSUM=y
220CONFIG_X86_TSC=y
221CONFIG_X86_CMPXCHG64=y
222CONFIG_X86_CMOV=y
223CONFIG_X86_MINIMUM_CPU_FAMILY=4
224CONFIG_X86_DEBUGCTLMSR=y
225CONFIG_CPU_SUP_INTEL=y
226CONFIG_CPU_SUP_CYRIX_32=y
227CONFIG_CPU_SUP_AMD=y
228CONFIG_CPU_SUP_CENTAUR_32=y
229CONFIG_CPU_SUP_TRANSMETA_32=y
230CONFIG_CPU_SUP_UMC_32=y
231# CONFIG_X86_DS is not set
232# CONFIG_X86_PTRACE_BTS is not set
233CONFIG_HPET_TIMER=y
234CONFIG_HPET_EMULATE_RTC=y
235CONFIG_DMI=y
236# CONFIG_IOMMU_HELPER is not set
237CONFIG_NR_CPUS=8
238CONFIG_SCHED_SMT=y
239CONFIG_SCHED_MC=y
240# CONFIG_PREEMPT_NONE is not set
241# CONFIG_PREEMPT_VOLUNTARY is not set
242CONFIG_PREEMPT=y
243# CONFIG_DEBUG_PREEMPT is not set
244# CONFIG_PREEMPT_TRACER is not set
245CONFIG_X86_LOCAL_APIC=y
246CONFIG_X86_IO_APIC=y
247# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
248CONFIG_X86_MCE=y
249# CONFIG_X86_MCE_NONFATAL is not set
250CONFIG_X86_MCE_P4THERMAL=y
251CONFIG_VM86=y
252# CONFIG_TOSHIBA is not set
253# CONFIG_I8K is not set
254# CONFIG_X86_REBOOTFIXUPS is not set
255CONFIG_MICROCODE=y
256CONFIG_MICROCODE_INTEL=y
257# CONFIG_MICROCODE_AMD is not set
258CONFIG_MICROCODE_OLD_INTERFACE=y
259CONFIG_X86_MSR=y
260CONFIG_X86_CPUID=y
261# CONFIG_NOHIGHMEM is not set
262CONFIG_HIGHMEM4G=y
263# CONFIG_HIGHMEM64G is not set
264CONFIG_PAGE_OFFSET=0xC0000000
265CONFIG_HIGHMEM=y
266# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
267CONFIG_NEED_NODE_MEMMAP_SIZE=y
268CONFIG_ARCH_FLATMEM_ENABLE=y
269CONFIG_ARCH_SPARSEMEM_ENABLE=y
270CONFIG_ARCH_SELECT_MEMORY_MODEL=y
271CONFIG_SELECT_MEMORY_MODEL=y
272# CONFIG_FLATMEM_MANUAL is not set
273# CONFIG_DISCONTIGMEM_MANUAL is not set
274CONFIG_SPARSEMEM_MANUAL=y
275CONFIG_SPARSEMEM=y
276CONFIG_HAVE_MEMORY_PRESENT=y
277CONFIG_SPARSEMEM_STATIC=y
278
279#
280# Memory hotplug is currently incompatible with Software Suspend
281#
282CONFIG_PAGEFLAGS_EXTENDED=y
283CONFIG_SPLIT_PTLOCK_CPUS=4
284CONFIG_RESOURCES_64BIT=y
285CONFIG_PHYS_ADDR_T_64BIT=y
286CONFIG_ZONE_DMA_FLAG=1
287CONFIG_BOUNCE=y
288CONFIG_VIRT_TO_BUS=y
289CONFIG_UNEVICTABLE_LRU=y
290CONFIG_HIGHPTE=y
291# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
292CONFIG_X86_RESERVE_LOW_64K=y
293# CONFIG_MATH_EMULATION is not set
294CONFIG_MTRR=y
295CONFIG_MTRR_SANITIZER=y
296CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
297CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
298CONFIG_X86_PAT=y
299CONFIG_EFI=y
300# CONFIG_SECCOMP is not set
301# CONFIG_HZ_100 is not set
302# CONFIG_HZ_250 is not set
303# CONFIG_HZ_300 is not set
304CONFIG_HZ_1000=y
305CONFIG_HZ=1000
306CONFIG_SCHED_HRTICK=y
307CONFIG_KEXEC=y
308CONFIG_CRASH_DUMP=y
309# CONFIG_KEXEC_JUMP is not set
310CONFIG_PHYSICAL_START=0x400000
311CONFIG_RELOCATABLE=y
312CONFIG_PHYSICAL_ALIGN=0x200000
313CONFIG_HOTPLUG_CPU=y
314# CONFIG_COMPAT_VDSO is not set
315# CONFIG_CMDLINE_BOOL is not set
316CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
317
318#
319# Power management and ACPI options
320#
321CONFIG_PM=y
322CONFIG_PM_DEBUG=y
323# CONFIG_PM_VERBOSE is not set
324CONFIG_CAN_PM_TRACE=y
325CONFIG_PM_TRACE=y
326CONFIG_PM_TRACE_RTC=y
327CONFIG_PM_SLEEP_SMP=y
328CONFIG_PM_SLEEP=y
329CONFIG_SUSPEND=y
330# CONFIG_PM_TEST_SUSPEND is not set
331CONFIG_SUSPEND_FREEZER=y
332CONFIG_HIBERNATION=y
333CONFIG_PM_STD_PARTITION=""
334CONFIG_ACPI=y
335CONFIG_ACPI_SLEEP=y
336CONFIG_ACPI_PROCFS=y
337CONFIG_ACPI_PROCFS_POWER=y
338CONFIG_ACPI_SYSFS_POWER=y
339CONFIG_ACPI_PROC_EVENT=y
340CONFIG_ACPI_AC=y
341CONFIG_ACPI_BATTERY=m
342CONFIG_ACPI_BUTTON=y
343CONFIG_ACPI_VIDEO=y
344CONFIG_ACPI_FAN=y
345CONFIG_ACPI_DOCK=y
346CONFIG_ACPI_PROCESSOR=y
347CONFIG_ACPI_HOTPLUG_CPU=y
348CONFIG_ACPI_THERMAL=y
349CONFIG_ACPI_WMI=y
350CONFIG_ACPI_ASUS=m
351CONFIG_ACPI_TOSHIBA=m
352# CONFIG_ACPI_CUSTOM_DSDT is not set
353CONFIG_ACPI_BLACKLIST_YEAR=1999
354# CONFIG_ACPI_DEBUG is not set
355# CONFIG_ACPI_PCI_SLOT is not set
356CONFIG_ACPI_SYSTEM=y
357CONFIG_X86_PM_TIMER=y
358CONFIG_ACPI_CONTAINER=y
359CONFIG_ACPI_SBS=m
360# CONFIG_APM is not set
361
362#
363# CPU Frequency scaling
364#
365CONFIG_CPU_FREQ=y
366CONFIG_CPU_FREQ_TABLE=y
367CONFIG_CPU_FREQ_DEBUG=y
368CONFIG_CPU_FREQ_STAT=y
369CONFIG_CPU_FREQ_STAT_DETAILS=y
370# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
371# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
372# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
373CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
374# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
375CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
376# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
377CONFIG_CPU_FREQ_GOV_USERSPACE=y
378CONFIG_CPU_FREQ_GOV_ONDEMAND=y
379# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
380
381#
382# CPUFreq processor drivers
383#
384CONFIG_X86_ACPI_CPUFREQ=y
385# CONFIG_X86_POWERNOW_K6 is not set
386# CONFIG_X86_POWERNOW_K7 is not set
387# CONFIG_X86_POWERNOW_K8 is not set
388# CONFIG_X86_GX_SUSPMOD is not set
389# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
390# CONFIG_X86_SPEEDSTEP_ICH is not set
391# CONFIG_X86_SPEEDSTEP_SMI is not set
392# CONFIG_X86_P4_CLOCKMOD is not set
393# CONFIG_X86_CPUFREQ_NFORCE2 is not set
394# CONFIG_X86_LONGRUN is not set
395# CONFIG_X86_LONGHAUL is not set
396# CONFIG_X86_E_POWERSAVER is not set
397
398#
399# shared options
400#
401# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
402# CONFIG_X86_SPEEDSTEP_LIB is not set
403CONFIG_CPU_IDLE=y
404CONFIG_CPU_IDLE_GOV_LADDER=y
405CONFIG_CPU_IDLE_GOV_MENU=y
406
407#
408# Bus options (PCI etc.)
409#
410CONFIG_PCI=y
411# CONFIG_PCI_GOBIOS is not set
412# CONFIG_PCI_GOMMCONFIG is not set
413# CONFIG_PCI_GODIRECT is not set
414# CONFIG_PCI_GOOLPC is not set
415CONFIG_PCI_GOANY=y
416CONFIG_PCI_BIOS=y
417CONFIG_PCI_DIRECT=y
418CONFIG_PCI_MMCONFIG=y
419CONFIG_PCI_DOMAINS=y
420CONFIG_PCIEPORTBUS=y
421# CONFIG_PCIEAER is not set
422# CONFIG_PCIEASPM is not set
423# CONFIG_PCIEASPM_DEBUG is not set
424CONFIG_ARCH_SUPPORTS_MSI=y
425CONFIG_PCI_MSI=y
426# CONFIG_PCI_LEGACY is not set
427# CONFIG_PCI_DEBUG is not set
428# CONFIG_PCI_STUB is not set
429# CONFIG_HT_IRQ is not set
430CONFIG_ISA_DMA_API=y
431# CONFIG_ISA is not set
432# CONFIG_MCA is not set
433# CONFIG_SCx200 is not set
434# CONFIG_OLPC is not set
435CONFIG_PCCARD=y
436# CONFIG_PCMCIA_DEBUG is not set
437# CONFIG_PCMCIA is not set
438CONFIG_CARDBUS=y
439
440#
441# PC-card bridges
442#
443CONFIG_YENTA=y
444CONFIG_YENTA_O2=y
445CONFIG_YENTA_RICOH=y
446CONFIG_YENTA_TI=y
447CONFIG_YENTA_ENE_TUNE=y
448CONFIG_YENTA_TOSHIBA=y
449CONFIG_PCMCIA_PROBE=y
450CONFIG_PCCARD_NONSTATIC=y
451# CONFIG_HOTPLUG_PCI is not set
452
453#
454# Executable file formats / Emulations
455#
456CONFIG_BINFMT_ELF=y
457# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
458CONFIG_HAVE_AOUT=y
459# CONFIG_BINFMT_AOUT is not set
460CONFIG_BINFMT_MISC=y
461CONFIG_HAVE_ATOMIC_IOMAP=y
462CONFIG_NET=y
463
464#
465# Networking options
466#
467CONFIG_COMPAT_NET_DEV_OPS=y
468CONFIG_PACKET=y
469CONFIG_PACKET_MMAP=y
470CONFIG_UNIX=y
471CONFIG_XFRM=y
472CONFIG_XFRM_USER=y
473CONFIG_XFRM_SUB_POLICY=y
474CONFIG_XFRM_MIGRATE=y
475CONFIG_XFRM_STATISTICS=y
476CONFIG_XFRM_IPCOMP=m
477CONFIG_NET_KEY=m
478CONFIG_NET_KEY_MIGRATE=y
479CONFIG_INET=y
480CONFIG_IP_MULTICAST=y
481# CONFIG_IP_ADVANCED_ROUTER is not set
482CONFIG_IP_FIB_HASH=y
483# CONFIG_IP_PNP is not set
484# CONFIG_NET_IPIP is not set
485# CONFIG_NET_IPGRE is not set
486CONFIG_IP_MROUTE=y
487CONFIG_IP_PIMSM_V1=y
488CONFIG_IP_PIMSM_V2=y
489# CONFIG_ARPD is not set
490CONFIG_SYN_COOKIES=y
491CONFIG_INET_AH=m
492CONFIG_INET_ESP=m
493CONFIG_INET_IPCOMP=m
494CONFIG_INET_XFRM_TUNNEL=m
495CONFIG_INET_TUNNEL=m
496CONFIG_INET_XFRM_MODE_TRANSPORT=m
497CONFIG_INET_XFRM_MODE_TUNNEL=m
498CONFIG_INET_XFRM_MODE_BEET=m
499CONFIG_INET_LRO=y
500CONFIG_INET_DIAG=m
501CONFIG_INET_TCP_DIAG=m
502CONFIG_TCP_CONG_ADVANCED=y
503CONFIG_TCP_CONG_BIC=m
504CONFIG_TCP_CONG_CUBIC=y
505# CONFIG_TCP_CONG_WESTWOOD is not set
506# CONFIG_TCP_CONG_HTCP is not set
507# CONFIG_TCP_CONG_HSTCP is not set
508# CONFIG_TCP_CONG_HYBLA is not set
509# CONFIG_TCP_CONG_VEGAS is not set
510# CONFIG_TCP_CONG_SCALABLE is not set
511# CONFIG_TCP_CONG_LP is not set
512# CONFIG_TCP_CONG_VENO is not set
513# CONFIG_TCP_CONG_YEAH is not set
514# CONFIG_TCP_CONG_ILLINOIS is not set
515# CONFIG_DEFAULT_BIC is not set
516CONFIG_DEFAULT_CUBIC=y
517# CONFIG_DEFAULT_HTCP is not set
518# CONFIG_DEFAULT_VEGAS is not set
519# CONFIG_DEFAULT_WESTWOOD is not set
520# CONFIG_DEFAULT_RENO is not set
521CONFIG_DEFAULT_TCP_CONG="cubic"
522CONFIG_TCP_MD5SIG=y
523CONFIG_IPV6=y
524CONFIG_IPV6_PRIVACY=y
525CONFIG_IPV6_ROUTER_PREF=y
526CONFIG_IPV6_ROUTE_INFO=y
527CONFIG_IPV6_OPTIMISTIC_DAD=y
528CONFIG_INET6_AH=m
529CONFIG_INET6_ESP=m
530CONFIG_INET6_IPCOMP=m
531CONFIG_IPV6_MIP6=m
532CONFIG_INET6_XFRM_TUNNEL=m
533CONFIG_INET6_TUNNEL=m
534CONFIG_INET6_XFRM_MODE_TRANSPORT=m
535CONFIG_INET6_XFRM_MODE_TUNNEL=m
536CONFIG_INET6_XFRM_MODE_BEET=m
537CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
538CONFIG_IPV6_SIT=m
539CONFIG_IPV6_NDISC_NODETYPE=y
540CONFIG_IPV6_TUNNEL=m
541CONFIG_IPV6_MULTIPLE_TABLES=y
542CONFIG_IPV6_SUBTREES=y
543# CONFIG_IPV6_MROUTE is not set
544CONFIG_NETLABEL=y
545CONFIG_NETWORK_SECMARK=y
546CONFIG_NETFILTER=y
547# CONFIG_NETFILTER_DEBUG is not set
548CONFIG_NETFILTER_ADVANCED=y
549
550#
551# Core Netfilter Configuration
552#
553CONFIG_NETFILTER_NETLINK=m
554CONFIG_NETFILTER_NETLINK_QUEUE=m
555CONFIG_NETFILTER_NETLINK_LOG=m
556CONFIG_NF_CONNTRACK=y
557CONFIG_NF_CT_ACCT=y
558CONFIG_NF_CONNTRACK_MARK=y
559CONFIG_NF_CONNTRACK_SECMARK=y
560CONFIG_NF_CONNTRACK_EVENTS=y
561# CONFIG_NF_CT_PROTO_DCCP is not set
562CONFIG_NF_CT_PROTO_GRE=m
563CONFIG_NF_CT_PROTO_SCTP=m
564CONFIG_NF_CT_PROTO_UDPLITE=m
565CONFIG_NF_CONNTRACK_AMANDA=m
566CONFIG_NF_CONNTRACK_FTP=m
567CONFIG_NF_CONNTRACK_H323=m
568CONFIG_NF_CONNTRACK_IRC=m
569CONFIG_NF_CONNTRACK_NETBIOS_NS=m
570CONFIG_NF_CONNTRACK_PPTP=m
571CONFIG_NF_CONNTRACK_SANE=m
572CONFIG_NF_CONNTRACK_SIP=m
573CONFIG_NF_CONNTRACK_TFTP=m
574CONFIG_NF_CT_NETLINK=m
575# CONFIG_NETFILTER_TPROXY is not set
576CONFIG_NETFILTER_XTABLES=y
577CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
578CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
579CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
580CONFIG_NETFILTER_XT_TARGET_DSCP=m
581CONFIG_NETFILTER_XT_TARGET_MARK=m
582CONFIG_NETFILTER_XT_TARGET_NFLOG=m
583CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
584CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
585CONFIG_NETFILTER_XT_TARGET_RATEEST=m
586CONFIG_NETFILTER_XT_TARGET_TRACE=m
587CONFIG_NETFILTER_XT_TARGET_SECMARK=m
588CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
589CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
590CONFIG_NETFILTER_XT_MATCH_COMMENT=m
591CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
592CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
593CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
594CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
595# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
596CONFIG_NETFILTER_XT_MATCH_DSCP=m
597CONFIG_NETFILTER_XT_MATCH_ESP=m
598CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
599CONFIG_NETFILTER_XT_MATCH_HELPER=m
600CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
601CONFIG_NETFILTER_XT_MATCH_LENGTH=m
602CONFIG_NETFILTER_XT_MATCH_LIMIT=m
603CONFIG_NETFILTER_XT_MATCH_MAC=m
604CONFIG_NETFILTER_XT_MATCH_MARK=m
605CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
606CONFIG_NETFILTER_XT_MATCH_OWNER=m
607CONFIG_NETFILTER_XT_MATCH_POLICY=m
608CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
609CONFIG_NETFILTER_XT_MATCH_QUOTA=m
610CONFIG_NETFILTER_XT_MATCH_RATEEST=m
611CONFIG_NETFILTER_XT_MATCH_REALM=m
612# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
613CONFIG_NETFILTER_XT_MATCH_SCTP=m
614CONFIG_NETFILTER_XT_MATCH_STATE=y
615CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
616CONFIG_NETFILTER_XT_MATCH_STRING=m
617CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
618CONFIG_NETFILTER_XT_MATCH_TIME=m
619CONFIG_NETFILTER_XT_MATCH_U32=m
620# CONFIG_IP_VS is not set
621
622#
623# IP: Netfilter Configuration
624#
625CONFIG_NF_DEFRAG_IPV4=y
626CONFIG_NF_CONNTRACK_IPV4=y
627# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
628CONFIG_IP_NF_QUEUE=m
629CONFIG_IP_NF_IPTABLES=y
630CONFIG_IP_NF_MATCH_ADDRTYPE=m
631CONFIG_IP_NF_MATCH_AH=m
632CONFIG_IP_NF_MATCH_ECN=m
633CONFIG_IP_NF_MATCH_TTL=m
634CONFIG_IP_NF_FILTER=y
635CONFIG_IP_NF_TARGET_REJECT=y
636CONFIG_IP_NF_TARGET_LOG=m
637CONFIG_IP_NF_TARGET_ULOG=m
638CONFIG_NF_NAT=m
639CONFIG_NF_NAT_NEEDED=y
640CONFIG_IP_NF_TARGET_MASQUERADE=m
641CONFIG_IP_NF_TARGET_NETMAP=m
642CONFIG_IP_NF_TARGET_REDIRECT=m
643CONFIG_NF_NAT_SNMP_BASIC=m
644CONFIG_NF_NAT_PROTO_GRE=m
645CONFIG_NF_NAT_PROTO_UDPLITE=m
646CONFIG_NF_NAT_PROTO_SCTP=m
647CONFIG_NF_NAT_FTP=m
648CONFIG_NF_NAT_IRC=m
649CONFIG_NF_NAT_TFTP=m
650CONFIG_NF_NAT_AMANDA=m
651CONFIG_NF_NAT_PPTP=m
652CONFIG_NF_NAT_H323=m
653CONFIG_NF_NAT_SIP=m
654CONFIG_IP_NF_MANGLE=m
655CONFIG_IP_NF_TARGET_CLUSTERIP=m
656CONFIG_IP_NF_TARGET_ECN=m
657CONFIG_IP_NF_TARGET_TTL=m
658CONFIG_IP_NF_RAW=m
659# CONFIG_IP_NF_SECURITY is not set
660CONFIG_IP_NF_ARPTABLES=m
661CONFIG_IP_NF_ARPFILTER=m
662CONFIG_IP_NF_ARP_MANGLE=m
663
664#
665# IPv6: Netfilter Configuration
666#
667CONFIG_NF_CONNTRACK_IPV6=y
668CONFIG_IP6_NF_QUEUE=m
669CONFIG_IP6_NF_IPTABLES=y
670CONFIG_IP6_NF_MATCH_AH=m
671CONFIG_IP6_NF_MATCH_EUI64=m
672CONFIG_IP6_NF_MATCH_FRAG=m
673CONFIG_IP6_NF_MATCH_OPTS=m
674CONFIG_IP6_NF_MATCH_HL=m
675CONFIG_IP6_NF_MATCH_IPV6HEADER=m
676CONFIG_IP6_NF_MATCH_MH=m
677CONFIG_IP6_NF_MATCH_RT=m
678CONFIG_IP6_NF_TARGET_LOG=m
679CONFIG_IP6_NF_FILTER=y
680CONFIG_IP6_NF_TARGET_REJECT=y
681CONFIG_IP6_NF_MANGLE=m
682CONFIG_IP6_NF_TARGET_HL=m
683CONFIG_IP6_NF_RAW=m
684# CONFIG_IP6_NF_SECURITY is not set
685# CONFIG_IP_DCCP is not set
686# CONFIG_IP_SCTP is not set
687# CONFIG_TIPC is not set
688# CONFIG_ATM is not set
689# CONFIG_BRIDGE is not set
690# CONFIG_NET_DSA is not set
691# CONFIG_VLAN_8021Q is not set
692# CONFIG_DECNET is not set
693# CONFIG_LLC2 is not set
694# CONFIG_IPX is not set
695# CONFIG_ATALK is not set
696# CONFIG_X25 is not set
697# CONFIG_LAPB is not set
698# CONFIG_ECONET is not set
699# CONFIG_WAN_ROUTER is not set
700# CONFIG_NET_SCHED is not set
701CONFIG_NET_CLS_ROUTE=y
702# CONFIG_DCB is not set
703
704#
705# Network testing
706#
707# CONFIG_NET_PKTGEN is not set
708# CONFIG_HAMRADIO is not set
709# CONFIG_CAN is not set
710# CONFIG_IRDA is not set
711CONFIG_BT=y
712CONFIG_BT_L2CAP=y
713CONFIG_BT_SCO=y
714CONFIG_BT_RFCOMM=y
715CONFIG_BT_RFCOMM_TTY=y
716CONFIG_BT_BNEP=y
717CONFIG_BT_BNEP_MC_FILTER=y
718CONFIG_BT_BNEP_PROTO_FILTER=y
719CONFIG_BT_HIDP=y
720
721#
722# Bluetooth device drivers
723#
724CONFIG_BT_HCIBTUSB=y
725CONFIG_BT_HCIBTSDIO=m
726CONFIG_BT_HCIUART=m
727CONFIG_BT_HCIUART_H4=y
728CONFIG_BT_HCIUART_BCSP=y
729CONFIG_BT_HCIUART_LL=y
730CONFIG_BT_HCIBCM203X=m
731CONFIG_BT_HCIBPA10X=m
732CONFIG_BT_HCIBFUSB=m
733CONFIG_BT_HCIVHCI=m
734# CONFIG_AF_RXRPC is not set
735# CONFIG_PHONET is not set
736CONFIG_FIB_RULES=y
737CONFIG_WIRELESS=y
738CONFIG_CFG80211=y
739# CONFIG_CFG80211_REG_DEBUG is not set
740CONFIG_NL80211=y
741CONFIG_WIRELESS_OLD_REGULATORY=y
742CONFIG_WIRELESS_EXT=y
743CONFIG_WIRELESS_EXT_SYSFS=y
744CONFIG_LIB80211=m
745CONFIG_LIB80211_CRYPT_WEP=m
746CONFIG_LIB80211_CRYPT_CCMP=m
747CONFIG_LIB80211_CRYPT_TKIP=m
748CONFIG_MAC80211=y
749
750#
751# Rate control algorithm selection
752#
753CONFIG_MAC80211_RC_PID=y
754# CONFIG_MAC80211_RC_MINSTREL is not set
755CONFIG_MAC80211_RC_DEFAULT_PID=y
756# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
757CONFIG_MAC80211_RC_DEFAULT="pid"
758CONFIG_MAC80211_MESH=y
759CONFIG_MAC80211_LEDS=y
760CONFIG_MAC80211_DEBUGFS=y
761# CONFIG_MAC80211_DEBUG_MENU is not set
762CONFIG_IEEE80211=m
763# CONFIG_IEEE80211_DEBUG is not set
764CONFIG_IEEE80211_CRYPT_WEP=m
765CONFIG_IEEE80211_CRYPT_CCMP=m
766CONFIG_IEEE80211_CRYPT_TKIP=m
767CONFIG_WIMAX=m
768CONFIG_WIMAX_DEBUG_LEVEL=8
769CONFIG_RFKILL=y
770CONFIG_RFKILL_INPUT=y
771CONFIG_RFKILL_LEDS=y
772# CONFIG_NET_9P is not set
773
774#
775# Device Drivers
776#
777
778#
779# Generic Driver Options
780#
781CONFIG_UEVENT_HELPER_PATH=""
782CONFIG_STANDALONE=y
783CONFIG_PREVENT_FIRMWARE_BUILD=y
784CONFIG_FW_LOADER=y
785CONFIG_FIRMWARE_IN_KERNEL=y
786CONFIG_EXTRA_FIRMWARE=""
787# CONFIG_DEBUG_DRIVER is not set
788CONFIG_DEBUG_DEVRES=y
789# CONFIG_SYS_HYPERVISOR is not set
790CONFIG_CONNECTOR=y
791CONFIG_PROC_EVENTS=y
792# CONFIG_MTD is not set
793# CONFIG_PARPORT is not set
794CONFIG_PNP=y
795CONFIG_PNP_DEBUG_MESSAGES=y
796
797#
798# Protocols
799#
800CONFIG_PNPACPI=y
801CONFIG_BLK_DEV=y
802# CONFIG_BLK_DEV_FD is not set
803# CONFIG_BLK_CPQ_DA is not set
804# CONFIG_BLK_CPQ_CISS_DA is not set
805# CONFIG_BLK_DEV_DAC960 is not set
806# CONFIG_BLK_DEV_UMEM is not set
807# CONFIG_BLK_DEV_COW_COMMON is not set
808CONFIG_BLK_DEV_LOOP=y
809CONFIG_BLK_DEV_CRYPTOLOOP=m
810# CONFIG_BLK_DEV_NBD is not set
811# CONFIG_BLK_DEV_SX8 is not set
812# CONFIG_BLK_DEV_UB is not set
813# CONFIG_BLK_DEV_RAM is not set
814CONFIG_CDROM_PKTCDVD=m
815CONFIG_CDROM_PKTCDVD_BUFFERS=8
816# CONFIG_CDROM_PKTCDVD_WCACHE is not set
817# CONFIG_ATA_OVER_ETH is not set
818# CONFIG_BLK_DEV_HD is not set
819CONFIG_MISC_DEVICES=y
820# CONFIG_IBM_ASM is not set
821# CONFIG_PHANTOM is not set
822# CONFIG_SGI_IOC4 is not set
823CONFIG_TIFM_CORE=m
824CONFIG_TIFM_7XX1=m
825# CONFIG_ICS932S401 is not set
826# CONFIG_ENCLOSURE_SERVICES is not set
827# CONFIG_HP_ILO is not set
828# CONFIG_C2PORT is not set
829
830#
831# EEPROM support
832#
833# CONFIG_EEPROM_AT24 is not set
834# CONFIG_EEPROM_LEGACY is not set
835CONFIG_EEPROM_93CX6=m
836CONFIG_HAVE_IDE=y
837# CONFIG_IDE is not set
838
839#
840# SCSI device support
841#
842CONFIG_RAID_ATTRS=m
843CONFIG_SCSI=y
844CONFIG_SCSI_DMA=y
845# CONFIG_SCSI_TGT is not set
846# CONFIG_SCSI_NETLINK is not set
847CONFIG_SCSI_PROC_FS=y
848
849#
850# SCSI support type (disk, tape, CD-ROM)
851#
852CONFIG_BLK_DEV_SD=y
853CONFIG_CHR_DEV_ST=m
854# CONFIG_CHR_DEV_OSST is not set
855CONFIG_BLK_DEV_SR=y
856CONFIG_BLK_DEV_SR_VENDOR=y
857# CONFIG_CHR_DEV_SG is not set
858CONFIG_CHR_DEV_SCH=m
859
860#
861# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
862#
863CONFIG_SCSI_MULTI_LUN=y
864CONFIG_SCSI_CONSTANTS=y
865CONFIG_SCSI_LOGGING=y
866CONFIG_SCSI_SCAN_ASYNC=y
867CONFIG_SCSI_WAIT_SCAN=m
868
869#
870# SCSI Transports
871#
872# CONFIG_SCSI_SPI_ATTRS is not set
873# CONFIG_SCSI_FC_ATTRS is not set
874# CONFIG_SCSI_ISCSI_ATTRS is not set
875# CONFIG_SCSI_SAS_ATTRS is not set
876# CONFIG_SCSI_SAS_LIBSAS is not set
877# CONFIG_SCSI_SRP_ATTRS is not set
878CONFIG_SCSI_LOWLEVEL=y
879# CONFIG_ISCSI_TCP is not set
880# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
881# CONFIG_SCSI_3W_9XXX is not set
882# CONFIG_SCSI_ACARD is not set
883# CONFIG_SCSI_AACRAID is not set
884# CONFIG_SCSI_AIC7XXX is not set
885# CONFIG_SCSI_AIC7XXX_OLD is not set
886# CONFIG_SCSI_AIC79XX is not set
887# CONFIG_SCSI_AIC94XX is not set
888# CONFIG_SCSI_DPT_I2O is not set
889# CONFIG_SCSI_ADVANSYS is not set
890# CONFIG_SCSI_ARCMSR is not set
891# CONFIG_MEGARAID_NEWGEN is not set
892# CONFIG_MEGARAID_LEGACY is not set
893# CONFIG_MEGARAID_SAS is not set
894# CONFIG_SCSI_HPTIOP is not set
895# CONFIG_SCSI_BUSLOGIC is not set
896# CONFIG_LIBFC is not set
897# CONFIG_FCOE is not set
898# CONFIG_SCSI_DMX3191D is not set
899# CONFIG_SCSI_EATA is not set
900# CONFIG_SCSI_FUTURE_DOMAIN is not set
901# CONFIG_SCSI_GDTH is not set
902# CONFIG_SCSI_IPS is not set
903# CONFIG_SCSI_INITIO is not set
904# CONFIG_SCSI_INIA100 is not set
905# CONFIG_SCSI_MVSAS is not set
906# CONFIG_SCSI_STEX is not set
907# CONFIG_SCSI_SYM53C8XX_2 is not set
908# CONFIG_SCSI_IPR is not set
909# CONFIG_SCSI_QLOGIC_1280 is not set
910# CONFIG_SCSI_QLA_FC is not set
911# CONFIG_SCSI_QLA_ISCSI is not set
912# CONFIG_SCSI_LPFC is not set
913# CONFIG_SCSI_DC395x is not set
914# CONFIG_SCSI_DC390T is not set
915# CONFIG_SCSI_NSP32 is not set
916# CONFIG_SCSI_DEBUG is not set
917# CONFIG_SCSI_SRP is not set
918# CONFIG_SCSI_DH is not set
919CONFIG_ATA=y
920# CONFIG_ATA_NONSTANDARD is not set
921CONFIG_ATA_ACPI=y
922# CONFIG_SATA_PMP is not set
923CONFIG_SATA_AHCI=y
924# CONFIG_SATA_SIL24 is not set
925CONFIG_ATA_SFF=y
926# CONFIG_SATA_SVW is not set
927CONFIG_ATA_PIIX=y
928# CONFIG_SATA_MV is not set
929# CONFIG_SATA_NV is not set
930# CONFIG_PDC_ADMA is not set
931# CONFIG_SATA_QSTOR is not set
932# CONFIG_SATA_PROMISE is not set
933# CONFIG_SATA_SX4 is not set
934# CONFIG_SATA_SIL is not set
935# CONFIG_SATA_SIS is not set
936# CONFIG_SATA_ULI is not set
937# CONFIG_SATA_VIA is not set
938# CONFIG_SATA_VITESSE is not set
939# CONFIG_SATA_INIC162X is not set
940# CONFIG_PATA_ACPI is not set
941# CONFIG_PATA_ALI is not set
942# CONFIG_PATA_AMD is not set
943# CONFIG_PATA_ARTOP is not set
944# CONFIG_PATA_ATIIXP is not set
945# CONFIG_PATA_CMD640_PCI is not set
946# CONFIG_PATA_CMD64X is not set
947# CONFIG_PATA_CS5520 is not set
948# CONFIG_PATA_CS5530 is not set
949# CONFIG_PATA_CS5535 is not set
950# CONFIG_PATA_CS5536 is not set
951# CONFIG_PATA_CYPRESS is not set
952# CONFIG_PATA_EFAR is not set
953CONFIG_ATA_GENERIC=y
954# CONFIG_PATA_HPT366 is not set
955# CONFIG_PATA_HPT37X is not set
956# CONFIG_PATA_HPT3X2N is not set
957# CONFIG_PATA_HPT3X3 is not set
958# CONFIG_PATA_IT821X is not set
959# CONFIG_PATA_IT8213 is not set
960# CONFIG_PATA_JMICRON is not set
961# CONFIG_PATA_TRIFLEX is not set
962# CONFIG_PATA_MARVELL is not set
963CONFIG_PATA_MPIIX=y
964# CONFIG_PATA_OLDPIIX is not set
965# CONFIG_PATA_NETCELL is not set
966# CONFIG_PATA_NINJA32 is not set
967# CONFIG_PATA_NS87410 is not set
968# CONFIG_PATA_NS87415 is not set
969# CONFIG_PATA_OPTI is not set
970# CONFIG_PATA_OPTIDMA is not set
971# CONFIG_PATA_PDC_OLD is not set
972# CONFIG_PATA_RADISYS is not set
973# CONFIG_PATA_RZ1000 is not set
974# CONFIG_PATA_SC1200 is not set
975# CONFIG_PATA_SERVERWORKS is not set
976# CONFIG_PATA_PDC2027X is not set
977# CONFIG_PATA_SIL680 is not set
978# CONFIG_PATA_SIS is not set
979# CONFIG_PATA_VIA is not set
980# CONFIG_PATA_WINBOND is not set
981CONFIG_PATA_SCH=y
982CONFIG_MD=y
983# CONFIG_BLK_DEV_MD is not set
984CONFIG_BLK_DEV_DM=m
985CONFIG_DM_DEBUG=y
986# CONFIG_DM_CRYPT is not set
987CONFIG_DM_SNAPSHOT=m
988CONFIG_DM_MIRROR=m
989CONFIG_DM_ZERO=m
990CONFIG_DM_MULTIPATH=m
991CONFIG_DM_DELAY=m
992# CONFIG_DM_UEVENT is not set
993CONFIG_FUSION=y
994CONFIG_FUSION_SPI=m
995CONFIG_FUSION_FC=m
996CONFIG_FUSION_SAS=m
997CONFIG_FUSION_MAX_SGE=40
998CONFIG_FUSION_CTL=m
999CONFIG_FUSION_LAN=m
1000CONFIG_FUSION_LOGGING=y
1001
1002#
1003# IEEE 1394 (FireWire) support
1004#
1005
1006#
1007# Enable only one of the two stacks, unless you know what you are doing
1008#
1009# CONFIG_FIREWIRE is not set
1010# CONFIG_IEEE1394 is not set
1011# CONFIG_I2O is not set
1012# CONFIG_MACINTOSH_DRIVERS is not set
1013CONFIG_NETDEVICES=y
1014# CONFIG_DUMMY is not set
1015# CONFIG_BONDING is not set
1016CONFIG_MACVLAN=m
1017# CONFIG_EQUALIZER is not set
1018CONFIG_TUN=y
1019# CONFIG_VETH is not set
1020# CONFIG_NET_SB1000 is not set
1021# CONFIG_ARCNET is not set
1022CONFIG_PHYLIB=m
1023
1024#
1025# MII PHY device drivers
1026#
1027CONFIG_MARVELL_PHY=m
1028CONFIG_DAVICOM_PHY=m
1029CONFIG_QSEMI_PHY=m
1030CONFIG_LXT_PHY=m
1031CONFIG_CICADA_PHY=m
1032CONFIG_VITESSE_PHY=m
1033CONFIG_SMSC_PHY=m
1034CONFIG_BROADCOM_PHY=m
1035CONFIG_ICPLUS_PHY=m
1036CONFIG_REALTEK_PHY=m
1037# CONFIG_NATIONAL_PHY is not set
1038# CONFIG_STE10XP is not set
1039# CONFIG_LSI_ET1011C_PHY is not set
1040CONFIG_MDIO_BITBANG=m
1041CONFIG_NET_ETHERNET=y
1042CONFIG_MII=y
1043CONFIG_HAPPYMEAL=m
1044CONFIG_SUNGEM=m
1045CONFIG_CASSINI=m
1046CONFIG_NET_VENDOR_3COM=y
1047# CONFIG_VORTEX is not set
1048# CONFIG_TYPHOON is not set
1049# CONFIG_DNET is not set
1050# CONFIG_NET_TULIP is not set
1051# CONFIG_HP100 is not set
1052# CONFIG_IBM_NEW_EMAC_ZMII is not set
1053# CONFIG_IBM_NEW_EMAC_RGMII is not set
1054# CONFIG_IBM_NEW_EMAC_TAH is not set
1055# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1056# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1057# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1058# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1059# CONFIG_NET_PCI is not set
1060# CONFIG_B44 is not set
1061# CONFIG_FORCEDETH is not set
1062# CONFIG_CS89x0 is not set
1063# CONFIG_EEPRO100 is not set
1064CONFIG_E100=y
1065# CONFIG_FEALNX is not set
1066# CONFIG_NATSEMI is not set
1067# CONFIG_NE2K_PCI is not set
1068CONFIG_8139CP=m
1069CONFIG_8139TOO=m
1070CONFIG_8139TOO_PIO=y
1071# CONFIG_8139TOO_TUNE_TWISTER is not set
1072# CONFIG_8139TOO_8129 is not set
1073# CONFIG_8139_OLD_RX_RESET is not set
1074# CONFIG_R6040 is not set
1075CONFIG_SIS900=m
1076# CONFIG_EPIC100 is not set
1077# CONFIG_SMSC9420 is not set
1078# CONFIG_SUNDANCE is not set
1079# CONFIG_TLAN is not set
1080# CONFIG_VIA_RHINE is not set
1081# CONFIG_SC92031 is not set
1082CONFIG_ATL2=m
1083CONFIG_NETDEV_1000=y
1084# CONFIG_ACENIC is not set
1085# CONFIG_DL2K is not set
1086CONFIG_E1000=y
1087CONFIG_E1000E=y
1088# CONFIG_IP1000 is not set
1089CONFIG_IGB=y
1090# CONFIG_IGB_LRO is not set
1091# CONFIG_NS83820 is not set
1092# CONFIG_HAMACHI is not set
1093# CONFIG_YELLOWFIN is not set
1094CONFIG_R8169=y
1095CONFIG_SIS190=m
1096# CONFIG_SKGE is not set
1097CONFIG_SKY2=m
1098# CONFIG_SKY2_DEBUG is not set
1099# CONFIG_VIA_VELOCITY is not set
1100CONFIG_TIGON3=m
1101CONFIG_BNX2=m
1102# CONFIG_QLA3XXX is not set
1103CONFIG_ATL1=m
1104CONFIG_ATL1E=m
1105# CONFIG_ATL1C is not set
1106# CONFIG_JME is not set
1107CONFIG_NETDEV_10000=y
1108# CONFIG_CHELSIO_T1 is not set
1109# CONFIG_CHELSIO_T3 is not set
1110# CONFIG_ENIC is not set
1111CONFIG_IXGBE=m
1112CONFIG_IXGB=m
1113# CONFIG_S2IO is not set
1114# CONFIG_MYRI10GE is not set
1115# CONFIG_NETXEN_NIC is not set
1116# CONFIG_NIU is not set
1117# CONFIG_MLX4_EN is not set
1118# CONFIG_MLX4_CORE is not set
1119# CONFIG_TEHUTI is not set
1120CONFIG_BNX2X=m
1121# CONFIG_QLGE is not set
1122# CONFIG_SFC is not set
1123# CONFIG_TR is not set
1124
1125#
1126# Wireless LAN
1127#
1128CONFIG_WLAN_PRE80211=y
1129# CONFIG_STRIP is not set
1130# CONFIG_ARLAN is not set
1131# CONFIG_WAVELAN is not set
1132CONFIG_WLAN_80211=y
1133CONFIG_IPW2100=m
1134# CONFIG_IPW2100_MONITOR is not set
1135# CONFIG_IPW2100_DEBUG is not set
1136CONFIG_IPW2200=m
1137# CONFIG_IPW2200_MONITOR is not set
1138CONFIG_IPW2200_QOS=y
1139# CONFIG_IPW2200_DEBUG is not set
1140# CONFIG_LIBIPW_DEBUG is not set
1141# CONFIG_LIBERTAS is not set
1142# CONFIG_LIBERTAS_THINFIRM is not set
1143# CONFIG_AIRO is not set
1144# CONFIG_HERMES is not set
1145# CONFIG_ATMEL is not set
1146# CONFIG_PRISM54 is not set
1147CONFIG_USB_ZD1201=m
1148CONFIG_USB_NET_RNDIS_WLAN=m
1149CONFIG_RTL8180=m
1150CONFIG_RTL8187=m
1151# CONFIG_ADM8211 is not set
1152# CONFIG_MAC80211_HWSIM is not set
1153CONFIG_P54_COMMON=m
1154CONFIG_P54_USB=m
1155CONFIG_P54_PCI=m
1156# CONFIG_ATH5K is not set
1157CONFIG_ATH9K=m
1158# CONFIG_ATH9K_DEBUG is not set
1159CONFIG_IWLWIFI=m
1160CONFIG_IWLCORE=m
1161# CONFIG_IWLWIFI_LEDS is not set
1162CONFIG_IWLWIFI_RFKILL=y
1163# CONFIG_IWLWIFI_DEBUG is not set
1164CONFIG_IWLAGN=m
1165# CONFIG_IWLAGN_SPECTRUM_MEASUREMENT is not set
1166# CONFIG_IWLAGN_LEDS is not set
1167CONFIG_IWL4965=y
1168CONFIG_IWL5000=y
1169CONFIG_IWL3945=m
1170CONFIG_IWL3945_RFKILL=y
1171# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
1172# CONFIG_IWL3945_LEDS is not set
1173# CONFIG_IWL3945_DEBUG is not set
1174# CONFIG_HOSTAP is not set
1175CONFIG_B43=m
1176CONFIG_B43_PCI_AUTOSELECT=y
1177CONFIG_B43_PCICORE_AUTOSELECT=y
1178CONFIG_B43_LEDS=y
1179CONFIG_B43_RFKILL=y
1180# CONFIG_B43_DEBUG is not set
1181# CONFIG_B43LEGACY is not set
1182# CONFIG_ZD1211RW is not set
1183CONFIG_RT2X00=m
1184CONFIG_RT2400PCI=m
1185CONFIG_RT2500PCI=m
1186CONFIG_RT61PCI=m
1187CONFIG_RT2500USB=m
1188CONFIG_RT73USB=m
1189CONFIG_RT2X00_LIB_PCI=m
1190CONFIG_RT2X00_LIB_USB=m
1191CONFIG_RT2X00_LIB=m
1192CONFIG_RT2X00_LIB_FIRMWARE=y
1193CONFIG_RT2X00_LIB_CRYPTO=y
1194CONFIG_RT2X00_LIB_RFKILL=y
1195CONFIG_RT2X00_LIB_LEDS=y
1196# CONFIG_RT2X00_LIB_DEBUGFS is not set
1197# CONFIG_RT2X00_DEBUG is not set
1198
1199#
1200# WiMAX Wireless Broadband devices
1201#
1202CONFIG_WIMAX_I2400M_USB=m
1203CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
1204# CONFIG_WIMAX_I2400M_SDIO is not set
1205#
1206#
1207
1208#
1209# USB Network Adapters
1210#
1211CONFIG_USB_CATC=m
1212CONFIG_USB_KAWETH=m
1213CONFIG_USB_PEGASUS=m
1214CONFIG_USB_RTL8150=m
1215CONFIG_USB_USBNET=m
1216CONFIG_USB_NET_AX8817X=m
1217CONFIG_USB_NET_CDCETHER=m
1218CONFIG_USB_NET_DM9601=m
1219CONFIG_USB_NET_SMSC95XX=m
1220CONFIG_USB_NET_GL620A=m
1221CONFIG_USB_NET_NET1080=m
1222CONFIG_USB_NET_PLUSB=m
1223CONFIG_USB_NET_MCS7830=m
1224CONFIG_USB_NET_RNDIS_HOST=m
1225CONFIG_USB_NET_CDC_SUBSET=m
1226CONFIG_USB_ALI_M5632=y
1227CONFIG_USB_AN2720=y
1228CONFIG_USB_BELKIN=y
1229CONFIG_USB_ARMLINUX=y
1230CONFIG_USB_EPSON2888=y
1231CONFIG_USB_KC2190=y
1232CONFIG_USB_NET_ZAURUS=m
1233CONFIG_USB_HSO=m
1234# CONFIG_WAN is not set
1235# CONFIG_FDDI is not set
1236# CONFIG_HIPPI is not set
1237CONFIG_PPP=m
1238CONFIG_PPP_MULTILINK=y
1239CONFIG_PPP_FILTER=y
1240CONFIG_PPP_ASYNC=m
1241CONFIG_PPP_SYNC_TTY=m
1242CONFIG_PPP_DEFLATE=m
1243CONFIG_PPP_BSDCOMP=m
1244CONFIG_PPP_MPPE=m
1245CONFIG_PPPOE=m
1246CONFIG_PPPOL2TP=m
1247# CONFIG_SLIP is not set
1248CONFIG_SLHC=m
1249CONFIG_NET_FC=y
1250CONFIG_NETCONSOLE=m
1251CONFIG_NETCONSOLE_DYNAMIC=y
1252CONFIG_NETPOLL=y
1253CONFIG_NETPOLL_TRAP=y
1254CONFIG_NET_POLL_CONTROLLER=y
1255# CONFIG_ISDN is not set
1256# CONFIG_PHONE is not set
1257
1258#
1259# Input device support
1260#
1261CONFIG_INPUT=y
1262CONFIG_INPUT_FF_MEMLESS=y
1263CONFIG_INPUT_POLLDEV=m
1264
1265#
1266# Userland interfaces
1267#
1268CONFIG_INPUT_MOUSEDEV=y
1269# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
1270CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1271CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1272CONFIG_INPUT_JOYDEV=m
1273CONFIG_INPUT_EVDEV=y
1274# CONFIG_INPUT_EVBUG is not set
1275
1276#
1277# Input Device Drivers
1278#
1279CONFIG_INPUT_KEYBOARD=y
1280CONFIG_KEYBOARD_ATKBD=y
1281# CONFIG_KEYBOARD_SUNKBD is not set
1282# CONFIG_KEYBOARD_LKKBD is not set
1283# CONFIG_KEYBOARD_XTKBD is not set
1284# CONFIG_KEYBOARD_NEWTON is not set
1285# CONFIG_KEYBOARD_STOWAWAY is not set
1286CONFIG_INPUT_MOUSE=y
1287CONFIG_MOUSE_PS2=y
1288CONFIG_MOUSE_PS2_ALPS=y
1289CONFIG_MOUSE_PS2_LOGIPS2PP=y
1290CONFIG_MOUSE_PS2_SYNAPTICS=y
1291CONFIG_MOUSE_PS2_LIFEBOOK=y
1292CONFIG_MOUSE_PS2_TRACKPOINT=y
1293# CONFIG_MOUSE_PS2_ELANTECH is not set
1294CONFIG_MOUSE_PS2_TOUCHKIT=y
1295CONFIG_MOUSE_SERIAL=m
1296# CONFIG_MOUSE_APPLETOUCH is not set
1297# CONFIG_MOUSE_BCM5974 is not set
1298# CONFIG_MOUSE_INPORT is not set
1299# CONFIG_MOUSE_LOGIBM is not set
1300# CONFIG_MOUSE_PC110PAD is not set
1301CONFIG_MOUSE_VSXXXAA=m
1302CONFIG_INPUT_JOYSTICK=y
1303# CONFIG_JOYSTICK_ANALOG is not set
1304# CONFIG_JOYSTICK_A3D is not set
1305# CONFIG_JOYSTICK_ADI is not set
1306# CONFIG_JOYSTICK_COBRA is not set
1307# CONFIG_JOYSTICK_GF2K is not set
1308# CONFIG_JOYSTICK_GRIP is not set
1309# CONFIG_JOYSTICK_GRIP_MP is not set
1310# CONFIG_JOYSTICK_GUILLEMOT is not set
1311# CONFIG_JOYSTICK_INTERACT is not set
1312# CONFIG_JOYSTICK_SIDEWINDER is not set
1313# CONFIG_JOYSTICK_TMDC is not set
1314# CONFIG_JOYSTICK_IFORCE is not set
1315# CONFIG_JOYSTICK_WARRIOR is not set
1316# CONFIG_JOYSTICK_MAGELLAN is not set
1317# CONFIG_JOYSTICK_SPACEORB is not set
1318# CONFIG_JOYSTICK_SPACEBALL is not set
1319# CONFIG_JOYSTICK_STINGER is not set
1320# CONFIG_JOYSTICK_TWIDJOY is not set
1321# CONFIG_JOYSTICK_ZHENHUA is not set
1322# CONFIG_JOYSTICK_JOYDUMP is not set
1323# CONFIG_JOYSTICK_XPAD is not set
1324# CONFIG_INPUT_TABLET is not set
1325CONFIG_INPUT_TOUCHSCREEN=y
1326CONFIG_TOUCHSCREEN_FUJITSU=m
1327CONFIG_TOUCHSCREEN_GUNZE=m
1328CONFIG_TOUCHSCREEN_ELO=m
1329# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1330CONFIG_TOUCHSCREEN_MTOUCH=m
1331CONFIG_TOUCHSCREEN_INEXIO=m
1332CONFIG_TOUCHSCREEN_MK712=m
1333CONFIG_TOUCHSCREEN_HTCPEN=m
1334CONFIG_TOUCHSCREEN_PENMOUNT=m
1335CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
1336CONFIG_TOUCHSCREEN_TOUCHWIN=m
1337CONFIG_TOUCHSCREEN_WM97XX=m
1338CONFIG_TOUCHSCREEN_WM9705=y
1339CONFIG_TOUCHSCREEN_WM9712=y
1340CONFIG_TOUCHSCREEN_WM9713=y
1341CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
1342CONFIG_TOUCHSCREEN_USB_EGALAX=y
1343CONFIG_TOUCHSCREEN_USB_PANJIT=y
1344CONFIG_TOUCHSCREEN_USB_3M=y
1345CONFIG_TOUCHSCREEN_USB_ITM=y
1346CONFIG_TOUCHSCREEN_USB_ETURBO=y
1347CONFIG_TOUCHSCREEN_USB_GUNZE=y
1348CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
1349CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
1350CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
1351CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
1352CONFIG_TOUCHSCREEN_USB_GOTOP=y
1353CONFIG_TOUCHSCREEN_TOUCHIT213=m
1354# CONFIG_TOUCHSCREEN_TSC2003 is not set
1355# CONFIG_TOUCHSCREEN_TSC2007 is not set
1356CONFIG_INPUT_MISC=y
1357# CONFIG_INPUT_PCSPKR is not set
1358# CONFIG_INPUT_APANEL is not set
1359CONFIG_INPUT_WISTRON_BTNS=m
1360# CONFIG_INPUT_ATLAS_BTNS is not set
1361# CONFIG_INPUT_ATI_REMOTE is not set
1362# CONFIG_INPUT_ATI_REMOTE2 is not set
1363CONFIG_INPUT_KEYSPAN_REMOTE=m
1364CONFIG_INPUT_POWERMATE=m
1365CONFIG_INPUT_YEALINK=m
1366# CONFIG_INPUT_CM109 is not set
1367CONFIG_INPUT_UINPUT=m
1368
1369#
1370# Hardware I/O ports
1371#
1372CONFIG_SERIO=y
1373CONFIG_SERIO_I8042=y
1374CONFIG_SERIO_SERPORT=y
1375# CONFIG_SERIO_CT82C710 is not set
1376# CONFIG_SERIO_PCIPS2 is not set
1377CONFIG_SERIO_LIBPS2=y
1378CONFIG_SERIO_RAW=m
1379# CONFIG_GAMEPORT is not set
1380
1381#
1382# Character devices
1383#
1384CONFIG_VT=y
1385CONFIG_CONSOLE_TRANSLATIONS=y
1386CONFIG_VT_CONSOLE=y
1387CONFIG_HW_CONSOLE=y
1388CONFIG_VT_HW_CONSOLE_BINDING=y
1389# CONFIG_DEVKMEM is not set
1390# CONFIG_SERIAL_NONSTANDARD is not set
1391# CONFIG_NOZOMI is not set
1392
1393#
1394# Serial drivers
1395#
1396CONFIG_SERIAL_8250=y
1397# CONFIG_SERIAL_8250_CONSOLE is not set
1398CONFIG_FIX_EARLYCON_MEM=y
1399CONFIG_SERIAL_8250_PCI=y
1400CONFIG_SERIAL_8250_PNP=y
1401CONFIG_SERIAL_8250_NR_UARTS=4
1402CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1403# CONFIG_SERIAL_8250_EXTENDED is not set
1404
1405#
1406# Non-8250 serial port support
1407#
1408# CONFIG_SERIAL_JSM is not set
1409CONFIG_UNIX98_PTYS=y
1410# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1411# CONFIG_LEGACY_PTYS is not set
1412# CONFIG_IPMI_HANDLER is not set
1413CONFIG_HW_RANDOM=m
1414# CONFIG_HW_RANDOM_INTEL is not set
1415# CONFIG_HW_RANDOM_AMD is not set
1416# CONFIG_HW_RANDOM_GEODE is not set
1417# CONFIG_HW_RANDOM_VIA is not set
1418CONFIG_NVRAM=m
1419# CONFIG_DTLK is not set
1420# CONFIG_R3964 is not set
1421# CONFIG_APPLICOM is not set
1422# CONFIG_SONYPI is not set
1423# CONFIG_MWAVE is not set
1424# CONFIG_PC8736x_GPIO is not set
1425# CONFIG_NSC_GPIO is not set
1426# CONFIG_CS5535_GPIO is not set
1427# CONFIG_RAW_DRIVER is not set
1428CONFIG_HPET=y
1429# CONFIG_HPET_MMAP is not set
1430# CONFIG_HANGCHECK_TIMER is not set
1431# CONFIG_TCG_TPM is not set
1432# CONFIG_TELCLOCK is not set
1433CONFIG_DEVPORT=y
1434CONFIG_I2C=y
1435CONFIG_I2C_BOARDINFO=y
1436# CONFIG_I2C_CHARDEV is not set
1437CONFIG_I2C_HELPER_AUTO=y
1438CONFIG_I2C_ALGOBIT=y
1439
1440#
1441# I2C Hardware Bus support
1442#
1443
1444#
1445# PC SMBus host controller drivers
1446#
1447# CONFIG_I2C_ALI1535 is not set
1448# CONFIG_I2C_ALI1563 is not set
1449# CONFIG_I2C_ALI15X3 is not set
1450# CONFIG_I2C_AMD756 is not set
1451# CONFIG_I2C_AMD8111 is not set
1452# CONFIG_I2C_I801 is not set
1453# CONFIG_I2C_ISCH is not set
1454# CONFIG_I2C_PIIX4 is not set
1455# CONFIG_I2C_NFORCE2 is not set
1456# CONFIG_I2C_SIS5595 is not set
1457# CONFIG_I2C_SIS630 is not set
1458# CONFIG_I2C_SIS96X is not set
1459# CONFIG_I2C_VIA is not set
1460# CONFIG_I2C_VIAPRO is not set
1461
1462#
1463# I2C system bus drivers (mostly embedded / system-on-chip)
1464#
1465# CONFIG_I2C_OCORES is not set
1466# CONFIG_I2C_SIMTEC is not set
1467
1468#
1469# External I2C/SMBus adapter drivers
1470#
1471# CONFIG_I2C_PARPORT_LIGHT is not set
1472# CONFIG_I2C_TAOS_EVM is not set
1473# CONFIG_I2C_TINY_USB is not set
1474
1475#
1476# Graphics adapter I2C/DDC channel drivers
1477#
1478# CONFIG_I2C_VOODOO3 is not set
1479
1480#
1481# Other I2C/SMBus bus drivers
1482#
1483# CONFIG_I2C_PCA_PLATFORM is not set
1484# CONFIG_I2C_STUB is not set
1485# CONFIG_SCx200_ACB is not set
1486
1487#
1488# Miscellaneous I2C Chip support
1489#
1490# CONFIG_DS1682 is not set
1491# CONFIG_SENSORS_PCF8574 is not set
1492# CONFIG_PCF8575 is not set
1493# CONFIG_SENSORS_PCA9539 is not set
1494# CONFIG_SENSORS_PCF8591 is not set
1495# CONFIG_SENSORS_MAX6875 is not set
1496# CONFIG_SENSORS_TSL2550 is not set
1497# CONFIG_I2C_DEBUG_CORE is not set
1498# CONFIG_I2C_DEBUG_ALGO is not set
1499# CONFIG_I2C_DEBUG_BUS is not set
1500# CONFIG_I2C_DEBUG_CHIP is not set
1501# CONFIG_SPI is not set
1502CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1503# CONFIG_GPIOLIB is not set
1504# CONFIG_W1 is not set
1505CONFIG_POWER_SUPPLY=y
1506# CONFIG_POWER_SUPPLY_DEBUG is not set
1507# CONFIG_PDA_POWER is not set
1508# CONFIG_BATTERY_DS2760 is not set
1509# CONFIG_BATTERY_BQ27x00 is not set
1510CONFIG_HWMON=y
1511# CONFIG_HWMON_VID is not set
1512# CONFIG_SENSORS_ABITUGURU is not set
1513# CONFIG_SENSORS_ABITUGURU3 is not set
1514# CONFIG_SENSORS_AD7414 is not set
1515# CONFIG_SENSORS_AD7418 is not set
1516# CONFIG_SENSORS_ADM1021 is not set
1517# CONFIG_SENSORS_ADM1025 is not set
1518# CONFIG_SENSORS_ADM1026 is not set
1519# CONFIG_SENSORS_ADM1029 is not set
1520# CONFIG_SENSORS_ADM1031 is not set
1521# CONFIG_SENSORS_ADM9240 is not set
1522# CONFIG_SENSORS_ADT7462 is not set
1523# CONFIG_SENSORS_ADT7470 is not set
1524# CONFIG_SENSORS_ADT7473 is not set
1525# CONFIG_SENSORS_ADT7475 is not set
1526# CONFIG_SENSORS_K8TEMP is not set
1527# CONFIG_SENSORS_ASB100 is not set
1528# CONFIG_SENSORS_ATXP1 is not set
1529# CONFIG_SENSORS_DS1621 is not set
1530# CONFIG_SENSORS_I5K_AMB is not set
1531# CONFIG_SENSORS_F71805F is not set
1532# CONFIG_SENSORS_F71882FG is not set
1533# CONFIG_SENSORS_F75375S is not set
1534# CONFIG_SENSORS_FSCHER is not set
1535# CONFIG_SENSORS_FSCPOS is not set
1536# CONFIG_SENSORS_FSCHMD is not set
1537# CONFIG_SENSORS_GL518SM is not set
1538# CONFIG_SENSORS_GL520SM is not set
1539# CONFIG_SENSORS_CORETEMP is not set
1540# CONFIG_SENSORS_IT87 is not set
1541# CONFIG_SENSORS_LM63 is not set
1542# CONFIG_SENSORS_LM75 is not set
1543# CONFIG_SENSORS_LM77 is not set
1544# CONFIG_SENSORS_LM78 is not set
1545# CONFIG_SENSORS_LM80 is not set
1546# CONFIG_SENSORS_LM83 is not set
1547# CONFIG_SENSORS_LM85 is not set
1548# CONFIG_SENSORS_LM87 is not set
1549# CONFIG_SENSORS_LM90 is not set
1550# CONFIG_SENSORS_LM92 is not set
1551# CONFIG_SENSORS_LM93 is not set
1552# CONFIG_SENSORS_LTC4245 is not set
1553# CONFIG_SENSORS_MAX1619 is not set
1554# CONFIG_SENSORS_MAX6650 is not set
1555# CONFIG_SENSORS_PC87360 is not set
1556# CONFIG_SENSORS_PC87427 is not set
1557# CONFIG_SENSORS_SIS5595 is not set
1558# CONFIG_SENSORS_DME1737 is not set
1559# CONFIG_SENSORS_SMSC47M1 is not set
1560# CONFIG_SENSORS_SMSC47M192 is not set
1561# CONFIG_SENSORS_SMSC47B397 is not set
1562# CONFIG_SENSORS_ADS7828 is not set
1563# CONFIG_SENSORS_THMC50 is not set
1564# CONFIG_SENSORS_VIA686A is not set
1565# CONFIG_SENSORS_VT1211 is not set
1566# CONFIG_SENSORS_VT8231 is not set
1567# CONFIG_SENSORS_W83781D is not set
1568# CONFIG_SENSORS_W83791D is not set
1569# CONFIG_SENSORS_W83792D is not set
1570# CONFIG_SENSORS_W83793 is not set
1571# CONFIG_SENSORS_W83L785TS is not set
1572# CONFIG_SENSORS_W83L786NG is not set
1573# CONFIG_SENSORS_W83627HF is not set
1574# CONFIG_SENSORS_W83627EHF is not set
1575# CONFIG_SENSORS_HDAPS is not set
1576# CONFIG_SENSORS_LIS3LV02D is not set
1577# CONFIG_SENSORS_APPLESMC is not set
1578# CONFIG_HWMON_DEBUG_CHIP is not set
1579CONFIG_THERMAL=y
1580CONFIG_THERMAL_HWMON=y
1581# CONFIG_WATCHDOG is not set
1582CONFIG_SSB_POSSIBLE=y
1583
1584#
1585# Sonics Silicon Backplane
1586#
1587CONFIG_SSB=m
1588CONFIG_SSB_SPROM=y
1589CONFIG_SSB_PCIHOST_POSSIBLE=y
1590CONFIG_SSB_PCIHOST=y
1591CONFIG_SSB_B43_PCI_BRIDGE=y
1592# CONFIG_SSB_DEBUG is not set
1593CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1594CONFIG_SSB_DRIVER_PCICORE=y
1595
1596#
1597# Multifunction device drivers
1598#
1599# CONFIG_MFD_CORE is not set
1600# CONFIG_MFD_SM501 is not set
1601# CONFIG_HTC_PASIC3 is not set
1602# CONFIG_TWL4030_CORE is not set
1603# CONFIG_MFD_TMIO is not set
1604# CONFIG_PMIC_DA903X is not set
1605# CONFIG_MFD_WM8400 is not set
1606# CONFIG_MFD_WM8350_I2C is not set
1607# CONFIG_MFD_PCF50633 is not set
1608# CONFIG_MFD_TIMBERDALE is not set
1609# CONFIG_REGULATOR is not set
1610
1611#
1612# Multimedia devices
1613#
1614
1615#
1616# Multimedia core support
1617#
1618CONFIG_VIDEO_DEV=y
1619CONFIG_VIDEO_V4L2_COMMON=y
1620# CONFIG_VIDEO_ALLOW_V4L1 is not set
1621CONFIG_VIDEO_V4L1_COMPAT=y
1622CONFIG_DVB_CORE=y
1623CONFIG_VIDEO_MEDIA=y
1624
1625#
1626# Multimedia drivers
1627#
1628CONFIG_MEDIA_ATTACH=y
1629CONFIG_MEDIA_TUNER=m
1630# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1631CONFIG_MEDIA_TUNER_SIMPLE=m
1632CONFIG_MEDIA_TUNER_TDA8290=m
1633CONFIG_MEDIA_TUNER_TDA9887=m
1634CONFIG_MEDIA_TUNER_TEA5761=m
1635CONFIG_MEDIA_TUNER_TEA5767=m
1636CONFIG_MEDIA_TUNER_MT20XX=m
1637CONFIG_MEDIA_TUNER_XC2028=m
1638CONFIG_MEDIA_TUNER_XC5000=m
1639CONFIG_VIDEO_V4L2=y
1640CONFIG_VIDEOBUF_GEN=m
1641CONFIG_VIDEOBUF_VMALLOC=m
1642CONFIG_VIDEO_CAPTURE_DRIVERS=y
1643# CONFIG_VIDEO_ADV_DEBUG is not set
1644# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1645CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1646# CONFIG_VIDEO_VIVI is not set
1647# CONFIG_VIDEO_BT848 is not set
1648# CONFIG_VIDEO_SAA5246A is not set
1649# CONFIG_VIDEO_SAA5249 is not set
1650# CONFIG_VIDEO_SAA7134 is not set
1651# CONFIG_VIDEO_HEXIUM_ORION is not set
1652# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1653# CONFIG_VIDEO_CX88 is not set
1654# CONFIG_VIDEO_CX23885 is not set
1655# CONFIG_VIDEO_AU0828 is not set
1656# CONFIG_VIDEO_IVTV is not set
1657# CONFIG_VIDEO_CX18 is not set
1658# CONFIG_VIDEO_CAFE_CCIC is not set
1659# CONFIG_SOC_CAMERA is not set
1660CONFIG_V4L_USB_DRIVERS=y
1661CONFIG_USB_VIDEO_CLASS=m
1662CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
1663CONFIG_USB_GSPCA=m
1664# CONFIG_USB_M5602 is not set
1665# CONFIG_USB_STV06XX is not set
1666# CONFIG_USB_GSPCA_CONEX is not set
1667# CONFIG_USB_GSPCA_ETOMS is not set
1668# CONFIG_USB_GSPCA_FINEPIX is not set
1669# CONFIG_USB_GSPCA_MARS is not set
1670# CONFIG_USB_GSPCA_OV519 is not set
1671# CONFIG_USB_GSPCA_OV534 is not set
1672# CONFIG_USB_GSPCA_PAC207 is not set
1673# CONFIG_USB_GSPCA_PAC7311 is not set
1674# CONFIG_USB_GSPCA_SONIXB is not set
1675# CONFIG_USB_GSPCA_SONIXJ is not set
1676# CONFIG_USB_GSPCA_SPCA500 is not set
1677# CONFIG_USB_GSPCA_SPCA501 is not set
1678# CONFIG_USB_GSPCA_SPCA505 is not set
1679# CONFIG_USB_GSPCA_SPCA506 is not set
1680# CONFIG_USB_GSPCA_SPCA508 is not set
1681# CONFIG_USB_GSPCA_SPCA561 is not set
1682# CONFIG_USB_GSPCA_STK014 is not set
1683# CONFIG_USB_GSPCA_SUNPLUS is not set
1684# CONFIG_USB_GSPCA_T613 is not set
1685# CONFIG_USB_GSPCA_TV8532 is not set
1686# CONFIG_USB_GSPCA_VC032X is not set
1687# CONFIG_USB_GSPCA_ZC3XX is not set
1688# CONFIG_VIDEO_PVRUSB2 is not set
1689# CONFIG_VIDEO_EM28XX is not set
1690# CONFIG_VIDEO_USBVISION is not set
1691CONFIG_USB_ET61X251=m
1692CONFIG_USB_SN9C102=m
1693CONFIG_USB_ZC0301=m
1694CONFIG_USB_ZR364XX=m
1695CONFIG_USB_STKWEBCAM=m
1696CONFIG_USB_S2255=m
1697# CONFIG_RADIO_ADAPTERS is not set
1698# CONFIG_DVB_DYNAMIC_MINORS is not set
1699# CONFIG_DVB_CAPTURE_DRIVERS is not set
1700# CONFIG_DAB is not set
1701
1702#
1703# Graphics support
1704#
1705CONFIG_AGP=y
1706# CONFIG_AGP_ALI is not set
1707# CONFIG_AGP_ATI is not set
1708# CONFIG_AGP_AMD is not set
1709# CONFIG_AGP_AMD64 is not set
1710CONFIG_AGP_INTEL=y
1711# CONFIG_AGP_NVIDIA is not set
1712# CONFIG_AGP_SIS is not set
1713# CONFIG_AGP_SWORKS is not set
1714# CONFIG_AGP_VIA is not set
1715# CONFIG_AGP_EFFICEON is not set
1716CONFIG_DRM=y
1717# CONFIG_DRM_TDFX is not set
1718# CONFIG_DRM_R128 is not set
1719# CONFIG_DRM_RADEON is not set
1720CONFIG_DRM_I810=y
1721# CONFIG_DRM_I830 is not set
1722CONFIG_DRM_I915=y
1723# CONFIG_DRM_I915_KMS is not set
1724# CONFIG_DRM_MGA is not set
1725# CONFIG_DRM_SIS is not set
1726# CONFIG_DRM_VIA is not set
1727# CONFIG_DRM_SAVAGE is not set
1728# CONFIG_DRM_PSB is not set
1729# CONFIG_VGASTATE is not set
1730CONFIG_VIDEO_OUTPUT_CONTROL=y
1731CONFIG_FB=y
1732CONFIG_FIRMWARE_EDID=y
1733CONFIG_FB_DDC=y
1734CONFIG_FB_BOOT_VESA_SUPPORT=y
1735CONFIG_FB_CFB_FILLRECT=y
1736CONFIG_FB_CFB_COPYAREA=y
1737CONFIG_FB_CFB_IMAGEBLIT=y
1738# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
1739# CONFIG_FB_SYS_FILLRECT is not set
1740# CONFIG_FB_SYS_COPYAREA is not set
1741# CONFIG_FB_SYS_IMAGEBLIT is not set
1742# CONFIG_FB_FOREIGN_ENDIAN is not set
1743# CONFIG_FB_SYS_FOPS is not set
1744# CONFIG_FB_SVGALIB is not set
1745# CONFIG_FB_MACMODES is not set
1746# CONFIG_FB_BACKLIGHT is not set
1747CONFIG_FB_MODE_HELPERS=y
1748# CONFIG_FB_TILEBLITTING is not set
1749
1750#
1751# Frame buffer hardware drivers
1752#
1753# CONFIG_FB_CIRRUS is not set
1754# CONFIG_FB_PM2 is not set
1755# CONFIG_FB_CYBER2000 is not set
1756# CONFIG_FB_ARC is not set
1757# CONFIG_FB_ASILIANT is not set
1758# CONFIG_FB_IMSTT is not set
1759# CONFIG_FB_VGA16 is not set
1760# CONFIG_FB_UVESA is not set
1761# CONFIG_FB_VESA is not set
1762# CONFIG_FB_N411 is not set
1763# CONFIG_FB_HGA is not set
1764# CONFIG_FB_S1D13XXX is not set
1765# CONFIG_FB_NVIDIA is not set
1766# CONFIG_FB_RIVA is not set
1767# CONFIG_FB_I810 is not set
1768# CONFIG_FB_LE80578 is not set
1769CONFIG_FB_INTEL=y
1770CONFIG_FB_INTEL_DEBUG=y
1771CONFIG_FB_INTEL_I2C=y
1772# CONFIG_FB_MATROX is not set
1773# CONFIG_FB_RADEON is not set
1774# CONFIG_FB_ATY128 is not set
1775# CONFIG_FB_ATY is not set
1776# CONFIG_FB_S3 is not set
1777# CONFIG_FB_SAVAGE is not set
1778# CONFIG_FB_SIS is not set
1779# CONFIG_FB_VIA is not set
1780# CONFIG_FB_NEOMAGIC is not set
1781# CONFIG_FB_KYRO is not set
1782# CONFIG_FB_3DFX is not set
1783# CONFIG_FB_VOODOO1 is not set
1784# CONFIG_FB_VT8623 is not set
1785# CONFIG_FB_CYBLA is not set
1786# CONFIG_FB_TRIDENT is not set
1787# CONFIG_FB_ARK is not set
1788# CONFIG_FB_PM3 is not set
1789# CONFIG_FB_CARMINE is not set
1790# CONFIG_FB_GEODE is not set
1791# CONFIG_FB_VIRTUAL is not set
1792# CONFIG_FB_METRONOME is not set
1793# CONFIG_FB_MB862XX is not set
1794CONFIG_BACKLIGHT_LCD_SUPPORT=y
1795CONFIG_LCD_CLASS_DEVICE=y
1796# CONFIG_LCD_ILI9320 is not set
1797CONFIG_LCD_PLATFORM=y
1798CONFIG_BACKLIGHT_CLASS_DEVICE=y
1799CONFIG_BACKLIGHT_GENERIC=y
1800# CONFIG_BACKLIGHT_PROGEAR is not set
1801CONFIG_BACKLIGHT_MBP_NVIDIA=y
1802# CONFIG_BACKLIGHT_SAHARA is not set
1803
1804#
1805# Display device support
1806#
1807CONFIG_DISPLAY_SUPPORT=y
1808
1809#
1810# Display hardware drivers
1811#
1812
1813#
1814# Console display driver support
1815#
1816CONFIG_VGA_CONSOLE=y
1817CONFIG_VGACON_SOFT_SCROLLBACK=y
1818CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1819CONFIG_DUMMY_CONSOLE=y
1820CONFIG_FRAMEBUFFER_CONSOLE=y
1821# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
1822# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1823# CONFIG_FONTS is not set
1824CONFIG_FONT_8x8=y
1825CONFIG_FONT_8x16=y
1826# CONFIG_LOGO is not set
1827CONFIG_SOUND=y
1828# CONFIG_SOUND_OSS_CORE is not set
1829CONFIG_SND=y
1830CONFIG_SND_TIMER=y
1831CONFIG_SND_PCM=y
1832CONFIG_SND_HWDEP=y
1833CONFIG_SND_RAWMIDI=m
1834CONFIG_SND_JACK=y
1835CONFIG_SND_SEQUENCER=y
1836CONFIG_SND_SEQ_DUMMY=y
1837# CONFIG_SND_MIXER_OSS is not set
1838# CONFIG_SND_PCM_OSS is not set
1839# CONFIG_SND_SEQUENCER_OSS is not set
1840# CONFIG_SND_HRTIMER is not set
1841CONFIG_SND_DYNAMIC_MINORS=y
1842# CONFIG_SND_SUPPORT_OLD_API is not set
1843CONFIG_SND_VERBOSE_PROCFS=y
1844CONFIG_SND_VERBOSE_PRINTK=y
1845CONFIG_SND_DEBUG=y
1846# CONFIG_SND_DEBUG_VERBOSE is not set
1847CONFIG_SND_PCM_XRUN_DEBUG=y
1848CONFIG_SND_VMASTER=y
1849CONFIG_SND_AC97_CODEC=y
1850CONFIG_SND_DRIVERS=y
1851# CONFIG_SND_PCSP is not set
1852# CONFIG_SND_DUMMY is not set
1853# CONFIG_SND_VIRMIDI is not set
1854# CONFIG_SND_MTPAV is not set
1855# CONFIG_SND_SERIAL_U16550 is not set
1856# CONFIG_SND_MPU401 is not set
1857CONFIG_SND_AC97_POWER_SAVE=y
1858CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
1859CONFIG_SND_PCI=y
1860# CONFIG_SND_AD1889 is not set
1861# CONFIG_SND_ALS300 is not set
1862# CONFIG_SND_ALS4000 is not set
1863# CONFIG_SND_ALI5451 is not set
1864# CONFIG_SND_ATIIXP is not set
1865# CONFIG_SND_ATIIXP_MODEM is not set
1866# CONFIG_SND_AU8810 is not set
1867# CONFIG_SND_AU8820 is not set
1868# CONFIG_SND_AU8830 is not set
1869# CONFIG_SND_AW2 is not set
1870# CONFIG_SND_AZT3328 is not set
1871# CONFIG_SND_BT87X is not set
1872# CONFIG_SND_CA0106 is not set
1873# CONFIG_SND_CMIPCI is not set
1874# CONFIG_SND_OXYGEN is not set
1875# CONFIG_SND_CS4281 is not set
1876# CONFIG_SND_CS46XX is not set
1877# CONFIG_SND_CS5530 is not set
1878# CONFIG_SND_CS5535AUDIO is not set
1879# CONFIG_SND_DARLA20 is not set
1880# CONFIG_SND_GINA20 is not set
1881# CONFIG_SND_LAYLA20 is not set
1882# CONFIG_SND_DARLA24 is not set
1883# CONFIG_SND_GINA24 is not set
1884# CONFIG_SND_LAYLA24 is not set
1885# CONFIG_SND_MONA is not set
1886# CONFIG_SND_MIA is not set
1887# CONFIG_SND_ECHO3G is not set
1888# CONFIG_SND_INDIGO is not set
1889# CONFIG_SND_INDIGOIO is not set
1890# CONFIG_SND_INDIGODJ is not set
1891# CONFIG_SND_EMU10K1 is not set
1892# CONFIG_SND_EMU10K1X is not set
1893# CONFIG_SND_ENS1370 is not set
1894# CONFIG_SND_ENS1371 is not set
1895# CONFIG_SND_ES1938 is not set
1896# CONFIG_SND_ES1968 is not set
1897# CONFIG_SND_FM801 is not set
1898CONFIG_SND_HDA_INTEL=y
1899CONFIG_SND_HDA_HWDEP=y
1900# CONFIG_SND_HDA_RECONFIG is not set
1901# CONFIG_SND_HDA_INPUT_BEEP is not set
1902CONFIG_SND_HDA_CODEC_REALTEK=y
1903CONFIG_SND_HDA_CODEC_ANALOG=y
1904CONFIG_SND_HDA_CODEC_SIGMATEL=y
1905CONFIG_SND_HDA_CODEC_VIA=y
1906CONFIG_SND_HDA_CODEC_ATIHDMI=y
1907CONFIG_SND_HDA_CODEC_NVHDMI=y
1908CONFIG_SND_HDA_CODEC_INTELHDMI=y
1909CONFIG_SND_HDA_ELD=y
1910CONFIG_SND_HDA_CODEC_CONEXANT=y
1911CONFIG_SND_HDA_CODEC_CMEDIA=y
1912CONFIG_SND_HDA_CODEC_SI3054=y
1913CONFIG_SND_HDA_GENERIC=y
1914CONFIG_SND_HDA_POWER_SAVE=y
1915CONFIG_SND_HDA_POWER_SAVE_DEFAULT=5
1916# CONFIG_SND_HDSP is not set
1917# CONFIG_SND_HDSPM is not set
1918# CONFIG_SND_HIFIER is not set
1919# CONFIG_SND_ICE1712 is not set
1920# CONFIG_SND_ICE1724 is not set
1921CONFIG_SND_INTEL8X0=y
1922# CONFIG_SND_INTEL8X0M is not set
1923# CONFIG_SND_KORG1212 is not set
1924# CONFIG_SND_MAESTRO3 is not set
1925# CONFIG_SND_MIXART is not set
1926# CONFIG_SND_NM256 is not set
1927# CONFIG_SND_PCXHR is not set
1928# CONFIG_SND_RIPTIDE is not set
1929# CONFIG_SND_RME32 is not set
1930# CONFIG_SND_RME96 is not set
1931# CONFIG_SND_RME9652 is not set
1932# CONFIG_SND_SIS7019 is not set
1933# CONFIG_SND_SONICVIBES is not set
1934# CONFIG_SND_TRIDENT is not set
1935# CONFIG_SND_VIA82XX is not set
1936# CONFIG_SND_VIA82XX_MODEM is not set
1937# CONFIG_SND_VIRTUOSO is not set
1938# CONFIG_SND_VX222 is not set
1939# CONFIG_SND_YMFPCI is not set
1940CONFIG_SND_USB=y
1941CONFIG_SND_USB_AUDIO=m
1942CONFIG_SND_USB_USX2Y=m
1943CONFIG_SND_USB_CAIAQ=m
1944CONFIG_SND_USB_CAIAQ_INPUT=y
1945# CONFIG_SND_USB_US122L is not set
1946# CONFIG_SND_SOC is not set
1947# CONFIG_SOUND_PRIME is not set
1948CONFIG_AC97_BUS=y
1949CONFIG_HID_SUPPORT=y
1950CONFIG_HID=y
1951CONFIG_HID_DEBUG=y
1952CONFIG_HIDRAW=y
1953
1954#
1955# USB Input Devices
1956#
1957CONFIG_USB_HID=y
1958CONFIG_HID_PID=y
1959CONFIG_USB_HIDDEV=y
1960
1961#
1962# Special HID drivers
1963#
1964CONFIG_HID_COMPAT=y
1965CONFIG_HID_A4TECH=y
1966CONFIG_HID_APPLE=y
1967CONFIG_HID_BELKIN=y
1968CONFIG_HID_BRIGHT=y
1969CONFIG_HID_CHERRY=y
1970CONFIG_HID_CHICONY=y
1971CONFIG_HID_CYPRESS=y
1972CONFIG_HID_DELL=y
1973CONFIG_HID_EZKEY=y
1974CONFIG_HID_GYRATION=y
1975CONFIG_HID_LOGITECH=y
1976CONFIG_LOGITECH_FF=y
1977# CONFIG_LOGIRUMBLEPAD2_FF is not set
1978CONFIG_HID_MICROSOFT=y
1979CONFIG_HID_MONTEREY=y
1980CONFIG_HID_NTRIG=y
1981CONFIG_HID_PANTHERLORD=y
1982CONFIG_PANTHERLORD_FF=y
1983CONFIG_HID_PETALYNX=y
1984CONFIG_HID_SAMSUNG=y
1985CONFIG_HID_SONY=y
1986CONFIG_HID_SUNPLUS=y
1987# CONFIG_GREENASIA_FF is not set
1988CONFIG_HID_TOPSEED=y
1989CONFIG_THRUSTMASTER_FF=y
1990CONFIG_ZEROPLUS_FF=y
1991CONFIG_USB_SUPPORT=y
1992CONFIG_USB_ARCH_HAS_HCD=y
1993CONFIG_USB_ARCH_HAS_OHCI=y
1994CONFIG_USB_ARCH_HAS_EHCI=y
1995CONFIG_USB=y
1996# CONFIG_USB_DEBUG is not set
1997CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
1998
1999#
2000# Miscellaneous USB options
2001#
2002CONFIG_USB_DEVICEFS=y
2003# CONFIG_USB_DEVICE_CLASS is not set
2004# CONFIG_USB_DYNAMIC_MINORS is not set
2005CONFIG_USB_SUSPEND=y
2006# CONFIG_USB_OTG is not set
2007CONFIG_USB_MON=y
2008# CONFIG_USB_WUSB is not set
2009# CONFIG_USB_WUSB_CBAF is not set
2010
2011#
2012# USB Host Controller Drivers
2013#
2014# CONFIG_USB_C67X00_HCD is not set
2015CONFIG_USB_EHCI_HCD=y
2016CONFIG_USB_EHCI_ROOT_HUB_TT=y
2017CONFIG_USB_EHCI_TT_NEWSCHED=y
2018# CONFIG_USB_OXU210HP_HCD is not set
2019CONFIG_USB_ISP116X_HCD=m
2020# CONFIG_USB_ISP1760_HCD is not set
2021CONFIG_USB_OHCI_HCD=y
2022# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
2023# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
2024CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2025CONFIG_USB_UHCI_HCD=y
2026CONFIG_USB_U132_HCD=m
2027CONFIG_USB_SL811_HCD=m
2028# CONFIG_USB_R8A66597_HCD is not set
2029CONFIG_USB_WHCI_HCD=m
2030CONFIG_USB_HWA_HCD=m
2031
2032#
2033# USB Device Class drivers
2034#
2035CONFIG_USB_ACM=m
2036CONFIG_USB_PRINTER=m
2037CONFIG_USB_WDM=m
2038# CONFIG_USB_TMC is not set
2039
2040#
2041# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
2042#
2043
2044#
2045# see USB_STORAGE Help for more information
2046#
2047CONFIG_USB_STORAGE=y
2048# CONFIG_USB_STORAGE_DEBUG is not set
2049CONFIG_USB_STORAGE_DATAFAB=y
2050CONFIG_USB_STORAGE_FREECOM=y
2051CONFIG_USB_STORAGE_ISD200=y
2052CONFIG_USB_STORAGE_USBAT=y
2053CONFIG_USB_STORAGE_SDDR09=y
2054CONFIG_USB_STORAGE_SDDR55=y
2055CONFIG_USB_STORAGE_JUMPSHOT=y
2056CONFIG_USB_STORAGE_ALAUDA=y
2057# CONFIG_USB_STORAGE_ONETOUCH is not set
2058CONFIG_USB_STORAGE_KARMA=y
2059# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
2060CONFIG_USB_LIBUSUAL=y
2061
2062#
2063# USB Imaging devices
2064#
2065CONFIG_USB_MDC800=m
2066CONFIG_USB_MICROTEK=m
2067
2068#
2069# USB port drivers
2070#
2071CONFIG_USB_SERIAL=m
2072CONFIG_USB_EZUSB=y
2073CONFIG_USB_SERIAL_GENERIC=y
2074CONFIG_USB_SERIAL_AIRCABLE=m
2075CONFIG_USB_SERIAL_ARK3116=m
2076CONFIG_USB_SERIAL_BELKIN=m
2077CONFIG_USB_SERIAL_CH341=m
2078CONFIG_USB_SERIAL_WHITEHEAT=m
2079CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
2080CONFIG_USB_SERIAL_CP2101=m
2081CONFIG_USB_SERIAL_CYPRESS_M8=m
2082CONFIG_USB_SERIAL_EMPEG=m
2083CONFIG_USB_SERIAL_FTDI_SIO=m
2084CONFIG_USB_SERIAL_FUNSOFT=m
2085CONFIG_USB_SERIAL_VISOR=m
2086CONFIG_USB_SERIAL_IPAQ=m
2087CONFIG_USB_SERIAL_IR=m
2088CONFIG_USB_SERIAL_EDGEPORT=m
2089CONFIG_USB_SERIAL_EDGEPORT_TI=m
2090CONFIG_USB_SERIAL_GARMIN=m
2091CONFIG_USB_SERIAL_IPW=m
2092CONFIG_USB_SERIAL_IUU=m
2093CONFIG_USB_SERIAL_KEYSPAN_PDA=m
2094CONFIG_USB_SERIAL_KEYSPAN=m
2095CONFIG_USB_SERIAL_KEYSPAN_MPR=y
2096CONFIG_USB_SERIAL_KEYSPAN_USA28=y
2097CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
2098CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
2099CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
2100CONFIG_USB_SERIAL_KEYSPAN_USA19=y
2101CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
2102CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
2103CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
2104CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
2105CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
2106CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
2107CONFIG_USB_SERIAL_KLSI=m
2108CONFIG_USB_SERIAL_KOBIL_SCT=m
2109CONFIG_USB_SERIAL_MCT_U232=m
2110CONFIG_USB_SERIAL_MOS7720=m
2111CONFIG_USB_SERIAL_MOS7840=m
2112# CONFIG_USB_SERIAL_MOTOROLA is not set
2113CONFIG_USB_SERIAL_NAVMAN=m
2114CONFIG_USB_SERIAL_PL2303=m
2115CONFIG_USB_SERIAL_OTI6858=m
2116# CONFIG_USB_SERIAL_SPCP8X5 is not set
2117CONFIG_USB_SERIAL_HP4X=m
2118CONFIG_USB_SERIAL_SAFE=m
2119CONFIG_USB_SERIAL_SAFE_PADDED=y
2120# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
2121CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2122CONFIG_USB_SERIAL_TI=m
2123CONFIG_USB_SERIAL_CYBERJACK=m
2124CONFIG_USB_SERIAL_XIRCOM=m
2125CONFIG_USB_SERIAL_OPTION=m
2126CONFIG_USB_SERIAL_OMNINET=m
2127# CONFIG_USB_SERIAL_OPTICON is not set
2128CONFIG_USB_SERIAL_DEBUG=m
2129
2130#
2131# USB Miscellaneous drivers
2132#
2133CONFIG_USB_EMI62=m
2134CONFIG_USB_EMI26=m
2135CONFIG_USB_ADUTUX=m
2136# CONFIG_USB_SEVSEG is not set
2137# CONFIG_USB_RIO500 is not set
2138CONFIG_USB_LEGOTOWER=m
2139CONFIG_USB_LCD=m
2140CONFIG_USB_BERRY_CHARGE=m
2141CONFIG_USB_LED=m
2142# CONFIG_USB_CYPRESS_CY7C63 is not set
2143# CONFIG_USB_CYTHERM is not set
2144CONFIG_USB_PHIDGET=m
2145CONFIG_USB_PHIDGETKIT=m
2146CONFIG_USB_PHIDGETMOTORCONTROL=m
2147CONFIG_USB_PHIDGETSERVO=m
2148CONFIG_USB_IDMOUSE=m
2149CONFIG_USB_FTDI_ELAN=m
2150CONFIG_USB_APPLEDISPLAY=m
2151CONFIG_USB_SISUSBVGA=m
2152CONFIG_USB_SISUSBVGA_CON=y
2153CONFIG_USB_LD=m
2154CONFIG_USB_TRANCEVIBRATOR=m
2155CONFIG_USB_IOWARRIOR=m
2156# CONFIG_USB_TEST is not set
2157# CONFIG_USB_ISIGHTFW is not set
2158# CONFIG_USB_VST is not set
2159# CONFIG_USB_GADGET is not set
2160
2161#
2162# OTG and related infrastructure
2163#
2164# CONFIG_UWB is not set
2165CONFIG_MMC=m
2166# CONFIG_MMC_DEBUG is not set
2167# CONFIG_MMC_UNSAFE_RESUME is not set
2168
2169#
2170# MMC/SD/SDIO Card Drivers
2171#
2172CONFIG_MMC_BLOCK=y
2173CONFIG_MMC_BLOCK_BOUNCE=y
2174CONFIG_SDIO_UART=m
2175# CONFIG_MMC_TEST is not set
2176
2177#
2178# MMC/SD/SDIO Host Controller Drivers
2179#
2180CONFIG_MMC_SDHCI=y
2181CONFIG_MMC_SDHCI_PCI=y
2182# CONFIG_MMC_RICOH_MMC is not set
2183CONFIG_MMC_WBSD=m
2184CONFIG_MMC_TIFM_SD=m
2185CONFIG_MEMSTICK=m
2186CONFIG_MEMSTICK_DEBUG=y
2187
2188#
2189# MemoryStick drivers
2190#
2191# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
2192CONFIG_MSPRO_BLOCK=m
2193
2194#
2195# MemoryStick Host Controller Drivers
2196#
2197# CONFIG_MEMSTICK_TIFM_MS is not set
2198# CONFIG_MEMSTICK_JMICRON_38X is not set
2199CONFIG_NEW_LEDS=y
2200CONFIG_LEDS_CLASS=y
2201# CONFIG_MMC_CEATA_WR is not set
2202# CONFIG_MMC_SPI is not set
2203
2204#
2205# LED drivers
2206#
2207# CONFIG_LEDS_ALIX2 is not set
2208# CONFIG_LEDS_PCA9532 is not set
2209# CONFIG_LEDS_CLEVO_MAIL is not set
2210# CONFIG_LEDS_PCA955X is not set
2211
2212#
2213# LED Triggers
2214#
2215CONFIG_LEDS_TRIGGERS=y
2216# CONFIG_LEDS_TRIGGER_TIMER is not set
2217# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
2218# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
2219# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2220# CONFIG_ACCESSIBILITY is not set
2221# CONFIG_INFINIBAND is not set
2222# CONFIG_EDAC is not set
2223CONFIG_RTC_LIB=y
2224CONFIG_RTC_CLASS=y
2225# CONFIG_RTC_HCTOSYS is not set
2226# CONFIG_RTC_DEBUG is not set
2227
2228#
2229# RTC interfaces
2230#
2231CONFIG_RTC_INTF_SYSFS=y
2232CONFIG_RTC_INTF_PROC=y
2233CONFIG_RTC_INTF_DEV=y
2234# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
2235# CONFIG_RTC_DRV_TEST is not set
2236
2237#
2238# I2C RTC drivers
2239#
2240# CONFIG_RTC_DRV_DS1307 is not set
2241# CONFIG_RTC_DRV_DS1374 is not set
2242# CONFIG_RTC_DRV_DS1672 is not set
2243# CONFIG_RTC_DRV_MAX6900 is not set
2244# CONFIG_RTC_DRV_RS5C372 is not set
2245# CONFIG_RTC_DRV_ISL1208 is not set
2246# CONFIG_RTC_DRV_X1205 is not set
2247# CONFIG_RTC_DRV_PCF8563 is not set
2248# CONFIG_RTC_DRV_PCF8583 is not set
2249# CONFIG_RTC_DRV_M41T80 is not set
2250# CONFIG_RTC_DRV_S35390A is not set
2251# CONFIG_RTC_DRV_FM3130 is not set
2252# CONFIG_RTC_DRV_RX8581 is not set
2253
2254#
2255# SPI RTC drivers
2256#
2257
2258#
2259# Platform RTC drivers
2260#
2261CONFIG_RTC_DRV_CMOS=y
2262# CONFIG_RTC_DRV_DS1286 is not set
2263# CONFIG_RTC_DRV_DS1511 is not set
2264# CONFIG_RTC_DRV_DS1553 is not set
2265# CONFIG_RTC_DRV_DS1742 is not set
2266# CONFIG_RTC_DRV_STK17TA8 is not set
2267# CONFIG_RTC_DRV_M48T86 is not set
2268# CONFIG_RTC_DRV_M48T35 is not set
2269# CONFIG_RTC_DRV_M48T59 is not set
2270# CONFIG_RTC_DRV_BQ4802 is not set
2271# CONFIG_RTC_DRV_V3020 is not set
2272
2273#
2274# on-CPU RTC drivers
2275#
2276# CONFIG_DMADEVICES is not set
2277# CONFIG_UIO is not set
2278CONFIG_STAGING=y
2279# CONFIG_STAGING_EXCLUDE_BUILD is not set
2280# CONFIG_ET131X is not set
2281# CONFIG_SLICOSS is not set
2282# CONFIG_ME4000 is not set
2283# CONFIG_MEILHAUS is not set
2284# CONFIG_VIDEO_GO7007 is not set
2285# CONFIG_USB_IP_COMMON is not set
2286# CONFIG_W35UND is not set
2287# CONFIG_PRISM2_USB is not set
2288# CONFIG_ECHO is not set
2289# CONFIG_USB_ATMEL is not set
2290# CONFIG_AGNX is not set
2291# CONFIG_OTUS is not set
2292CONFIG_RT2860=m
2293# CONFIG_RT2870 is not set
2294# CONFIG_COMEDI is not set
2295# CONFIG_ASUS_OLED is not set
2296# CONFIG_ALTERA_PCIE_CHDMA is not set
2297# CONFIG_RTL8187SE is not set
2298# CONFIG_INPUT_MIMIO is not set
2299# CONFIG_TRANZPORT is not set
2300# CONFIG_EPL is not set
2301
2302#
2303# Android
2304#
2305# CONFIG_ANDROID is not set
2306# CONFIG_ANDROID_BINDER_IPC is not set
2307# CONFIG_ANDROID_LOGGER is not set
2308# CONFIG_ANDROID_RAM_CONSOLE is not set
2309# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
2310CONFIG_X86_PLATFORM_DEVICES=y
2311# CONFIG_ACER_WMI is not set
2312# CONFIG_FUJITSU_LAPTOP is not set
2313# CONFIG_TC1100_WMI is not set
2314# CONFIG_HP_WMI is not set
2315# CONFIG_MSI_LAPTOP is not set
2316# CONFIG_PANASONIC_LAPTOP is not set
2317# CONFIG_COMPAL_LAPTOP is not set
2318# CONFIG_SONY_LAPTOP is not set
2319# CONFIG_THINKPAD_ACPI is not set
2320# CONFIG_INTEL_MENLOW is not set
2321CONFIG_EEEPC_LAPTOP=y
2322CONFIG_ACPI_WMI=m
2323CONFIG_ACPI_ASUS=y
2324# CONFIG_ACPI_TOSHIBA is not set
2325
2326#
2327# Firmware Drivers
2328#
2329# CONFIG_EDD is not set
2330CONFIG_FIRMWARE_MEMMAP=y
2331# CONFIG_DELL_RBU is not set
2332# CONFIG_DCDBAS is not set
2333# CONFIG_DMIID is not set
2334# CONFIG_ISCSI_IBFT_FIND is not set
2335
2336#
2337# File systems
2338#
2339CONFIG_EXT2_FS=y
2340# CONFIG_EXT2_FS_XATTR is not set
2341# CONFIG_EXT2_FS_XIP is not set
2342CONFIG_EXT3_FS=y
2343CONFIG_EXT3_FS_XATTR=y
2344CONFIG_EXT3_FS_POSIX_ACL=y
2345CONFIG_EXT3_FS_SECURITY=y
2346# CONFIG_EXT4_FS is not set
2347CONFIG_JBD=y
2348# CONFIG_JBD_DEBUG is not set
2349CONFIG_FS_MBCACHE=y
2350# CONFIG_REISERFS_FS is not set
2351# CONFIG_JFS_FS is not set
2352CONFIG_FS_POSIX_ACL=y
2353CONFIG_FILE_LOCKING=y
2354# CONFIG_XFS_FS is not set
2355# CONFIG_OCFS2_FS is not set
2356# CONFIG_BTRFS_FS is not set
2357CONFIG_DNOTIFY=y
2358CONFIG_INOTIFY=y
2359CONFIG_INOTIFY_USER=y
2360# CONFIG_QUOTA is not set
2361# CONFIG_AUTOFS_FS is not set
2362# CONFIG_AUTOFS4_FS is not set
2363CONFIG_FUSE_FS=m
2364CONFIG_GENERIC_ACL=y
2365
2366#
2367# CD-ROM/DVD Filesystems
2368#
2369CONFIG_ISO9660_FS=y
2370CONFIG_JOLIET=y
2371CONFIG_ZISOFS=y
2372CONFIG_UDF_FS=m
2373CONFIG_UDF_NLS=y
2374
2375#
2376# DOS/FAT/NT Filesystems
2377#
2378CONFIG_FAT_FS=y
2379CONFIG_MSDOS_FS=y
2380CONFIG_VFAT_FS=y
2381CONFIG_FAT_DEFAULT_CODEPAGE=437
2382CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
2383# CONFIG_NTFS_FS is not set
2384
2385#
2386# Pseudo filesystems
2387#
2388CONFIG_PROC_FS=y
2389CONFIG_PROC_KCORE=y
2390CONFIG_PROC_VMCORE=y
2391CONFIG_PROC_SYSCTL=y
2392CONFIG_PROC_PAGE_MONITOR=y
2393CONFIG_SYSFS=y
2394CONFIG_TMPFS=y
2395CONFIG_TMPFS_POSIX_ACL=y
2396CONFIG_HUGETLBFS=y
2397CONFIG_HUGETLB_PAGE=y
2398CONFIG_CONFIGFS_FS=m
2399CONFIG_MISC_FILESYSTEMS=y
2400# CONFIG_ADFS_FS is not set
2401# CONFIG_AFFS_FS is not set
2402# CONFIG_ECRYPT_FS is not set
2403# CONFIG_HFS_FS is not set
2404# CONFIG_HFSPLUS_FS is not set
2405# CONFIG_BEFS_FS is not set
2406# CONFIG_BFS_FS is not set
2407# CONFIG_EFS_FS is not set
2408# CONFIG_CRAMFS is not set
2409CONFIG_SQUASHFS=y
2410# CONFIG_SQUASHFS_EMBEDDED is not set
2411CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
2412# CONFIG_VXFS_FS is not set
2413# CONFIG_MINIX_FS is not set
2414# CONFIG_OMFS_FS is not set
2415# CONFIG_HPFS_FS is not set
2416# CONFIG_QNX4FS_FS is not set
2417# CONFIG_ROMFS_FS is not set
2418# CONFIG_SYSV_FS is not set
2419# CONFIG_UFS_FS is not set
2420CONFIG_NETWORK_FILESYSTEMS=y
2421# CONFIG_NFS_FS is not set
2422# CONFIG_NFSD is not set
2423# CONFIG_SMB_FS is not set
2424CONFIG_CIFS=m
2425# CONFIG_CIFS_STATS is not set
2426CONFIG_CIFS_WEAK_PW_HASH=y
2427# CONFIG_CIFS_XATTR is not set
2428# CONFIG_CIFS_DEBUG2 is not set
2429# CONFIG_CIFS_EXPERIMENTAL is not set
2430# CONFIG_NCP_FS is not set
2431# CONFIG_CODA_FS is not set
2432# CONFIG_AFS_FS is not set
2433
2434#
2435# Partition Types
2436#
2437CONFIG_PARTITION_ADVANCED=y
2438# CONFIG_ACORN_PARTITION is not set
2439# CONFIG_OSF_PARTITION is not set
2440# CONFIG_AMIGA_PARTITION is not set
2441# CONFIG_ATARI_PARTITION is not set
2442# CONFIG_MAC_PARTITION is not set
2443CONFIG_MSDOS_PARTITION=y
2444CONFIG_BSD_DISKLABEL=y
2445# CONFIG_MINIX_SUBPARTITION is not set
2446# CONFIG_SOLARIS_X86_PARTITION is not set
2447# CONFIG_UNIXWARE_DISKLABEL is not set
2448CONFIG_LDM_PARTITION=y
2449# CONFIG_LDM_DEBUG is not set
2450# CONFIG_SGI_PARTITION is not set
2451# CONFIG_ULTRIX_PARTITION is not set
2452# CONFIG_SUN_PARTITION is not set
2453# CONFIG_KARMA_PARTITION is not set
2454CONFIG_EFI_PARTITION=y
2455# CONFIG_SYSV68_PARTITION is not set
2456CONFIG_NLS=y
2457CONFIG_NLS_DEFAULT="utf8"
2458CONFIG_NLS_CODEPAGE_437=y
2459CONFIG_NLS_CODEPAGE_737=m
2460CONFIG_NLS_CODEPAGE_775=m
2461CONFIG_NLS_CODEPAGE_850=m
2462CONFIG_NLS_CODEPAGE_852=m
2463CONFIG_NLS_CODEPAGE_855=m
2464CONFIG_NLS_CODEPAGE_857=m
2465CONFIG_NLS_CODEPAGE_860=m
2466CONFIG_NLS_CODEPAGE_861=m
2467CONFIG_NLS_CODEPAGE_862=m
2468CONFIG_NLS_CODEPAGE_863=m
2469CONFIG_NLS_CODEPAGE_864=m
2470CONFIG_NLS_CODEPAGE_865=m
2471CONFIG_NLS_CODEPAGE_866=m
2472CONFIG_NLS_CODEPAGE_869=m
2473CONFIG_NLS_CODEPAGE_936=m
2474CONFIG_NLS_CODEPAGE_950=m
2475CONFIG_NLS_CODEPAGE_932=m
2476CONFIG_NLS_CODEPAGE_949=m
2477CONFIG_NLS_CODEPAGE_874=m
2478CONFIG_NLS_ISO8859_8=m
2479CONFIG_NLS_CODEPAGE_1250=m
2480CONFIG_NLS_CODEPAGE_1251=m
2481CONFIG_NLS_ASCII=y
2482CONFIG_NLS_ISO8859_1=m
2483CONFIG_NLS_ISO8859_2=m
2484CONFIG_NLS_ISO8859_3=m
2485CONFIG_NLS_ISO8859_4=m
2486CONFIG_NLS_ISO8859_5=m
2487CONFIG_NLS_ISO8859_6=m
2488CONFIG_NLS_ISO8859_7=m
2489CONFIG_NLS_ISO8859_9=m
2490CONFIG_NLS_ISO8859_13=m
2491CONFIG_NLS_ISO8859_14=m
2492CONFIG_NLS_ISO8859_15=m
2493CONFIG_NLS_KOI8_R=m
2494CONFIG_NLS_KOI8_U=m
2495CONFIG_NLS_UTF8=m
2496# CONFIG_DLM is not set
2497
2498#
2499# Kernel hacking
2500#
2501CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2502CONFIG_PRINTK_TIME=y
2503# CONFIG_ENABLE_WARN_DEPRECATED is not set
2504CONFIG_ENABLE_MUST_CHECK=y
2505CONFIG_FRAME_WARN=1024
2506CONFIG_MAGIC_SYSRQ=y
2507# CONFIG_UNUSED_SYMBOLS is not set
2508CONFIG_DEBUG_FS=y
2509# CONFIG_HEADERS_CHECK is not set
2510CONFIG_DEBUG_KERNEL=y
2511CONFIG_DEBUG_SHIRQ=y
2512CONFIG_DETECT_SOFTLOCKUP=y
2513# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2514CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2515CONFIG_SCHED_DEBUG=y
2516CONFIG_SCHEDSTATS=y
2517CONFIG_TIMER_STATS=y
2518# CONFIG_DEBUG_OBJECTS is not set
2519# CONFIG_DEBUG_SLAB is not set
2520# CONFIG_DEBUG_RT_MUTEXES is not set
2521# CONFIG_RT_MUTEX_TESTER is not set
2522# CONFIG_DEBUG_SPINLOCK is not set
2523# CONFIG_DEBUG_MUTEXES is not set
2524# CONFIG_DEBUG_LOCK_ALLOC is not set
2525# CONFIG_PROVE_LOCKING is not set
2526# CONFIG_LOCK_STAT is not set
2527CONFIG_DEBUG_SPINLOCK_SLEEP=y
2528# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2529CONFIG_STACKTRACE=y
2530# CONFIG_DEBUG_KOBJECT is not set
2531# CONFIG_DEBUG_HIGHMEM is not set
2532CONFIG_DEBUG_BUGVERBOSE=y
2533# CONFIG_DEBUG_INFO is not set
2534# CONFIG_DEBUG_VM is not set
2535# CONFIG_DEBUG_VIRTUAL is not set
2536# CONFIG_DEBUG_WRITECOUNT is not set
2537CONFIG_DEBUG_MEMORY_INIT=y
2538CONFIG_DEBUG_LIST=y
2539# CONFIG_DEBUG_SG is not set
2540# CONFIG_DEBUG_NOTIFIERS is not set
2541CONFIG_ARCH_WANT_FRAME_POINTERS=y
2542CONFIG_FRAME_POINTER=y
2543CONFIG_BOOT_PRINTK_DELAY=y
2544# CONFIG_RCU_TORTURE_TEST is not set
2545# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2546# CONFIG_BACKTRACE_SELF_TEST is not set
2547# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2548# CONFIG_FAULT_INJECTION is not set
2549CONFIG_LATENCYTOP=y
2550CONFIG_SYSCTL_SYSCALL_CHECK=y
2551CONFIG_USER_STACKTRACE_SUPPORT=y
2552CONFIG_NOP_TRACER=y
2553CONFIG_HAVE_FUNCTION_TRACER=y
2554CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
2555CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
2556CONFIG_HAVE_DYNAMIC_FTRACE=y
2557CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2558CONFIG_HAVE_HW_BRANCH_TRACER=y
2559CONFIG_RING_BUFFER=y
2560CONFIG_TRACING=y
2561
2562#
2563# Tracers
2564#
2565# CONFIG_FUNCTION_TRACER is not set
2566# CONFIG_IRQSOFF_TRACER is not set
2567CONFIG_SYSPROF_TRACER=y
2568# CONFIG_SCHED_TRACER is not set
2569# CONFIG_CONTEXT_SWITCH_TRACER is not set
2570# CONFIG_BOOT_TRACER is not set
2571# CONFIG_TRACE_BRANCH_PROFILING is not set
2572# CONFIG_POWER_TRACER is not set
2573# CONFIG_STACK_TRACER is not set
2574# CONFIG_HW_BRANCH_TRACER is not set
2575# CONFIG_FTRACE_STARTUP_TEST is not set
2576# CONFIG_MMIOTRACE is not set
2577# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2578# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2579# CONFIG_SAMPLES is not set
2580CONFIG_HAVE_ARCH_KGDB=y
2581# CONFIG_KGDB is not set
2582# CONFIG_STRICT_DEVMEM is not set
2583CONFIG_X86_VERBOSE_BOOTUP=y
2584CONFIG_EARLY_PRINTK=y
2585# CONFIG_EARLY_PRINTK_DBGP is not set
2586# CONFIG_DEBUG_STACKOVERFLOW is not set
2587# CONFIG_DEBUG_STACK_USAGE is not set
2588# CONFIG_DEBUG_PAGEALLOC is not set
2589# CONFIG_DEBUG_PER_CPU_MAPS is not set
2590CONFIG_X86_PTDUMP=y
2591CONFIG_DEBUG_RODATA=y
2592# CONFIG_DEBUG_RODATA_TEST is not set
2593# CONFIG_DEBUG_NX_TEST is not set
2594# CONFIG_4KSTACKS is not set
2595CONFIG_DOUBLEFAULT=y
2596CONFIG_HAVE_MMIOTRACE_SUPPORT=y
2597CONFIG_IO_DELAY_TYPE_0X80=0
2598CONFIG_IO_DELAY_TYPE_0XED=1
2599CONFIG_IO_DELAY_TYPE_UDELAY=2
2600CONFIG_IO_DELAY_TYPE_NONE=3
2601CONFIG_IO_DELAY_0X80=y
2602# CONFIG_IO_DELAY_0XED is not set
2603# CONFIG_IO_DELAY_UDELAY is not set
2604# CONFIG_IO_DELAY_NONE is not set
2605CONFIG_DEFAULT_IO_DELAY_TYPE=0
2606CONFIG_DEBUG_BOOT_PARAMS=y
2607# CONFIG_CPA_DEBUG is not set
2608# CONFIG_OPTIMIZE_INLINING is not set
2609
2610#
2611# Security options
2612#
2613# CONFIG_KEYS is not set
2614# CONFIG_SECURITY is not set
2615# CONFIG_SECURITYFS is not set
2616# CONFIG_SECURITY_FILE_CAPABILITIES is not set
2617CONFIG_CRYPTO=y
2618
2619#
2620# Crypto core or helper
2621#
2622# CONFIG_CRYPTO_FIPS is not set
2623CONFIG_CRYPTO_ALGAPI=y
2624CONFIG_CRYPTO_ALGAPI2=y
2625CONFIG_CRYPTO_AEAD=m
2626CONFIG_CRYPTO_AEAD2=y
2627CONFIG_CRYPTO_BLKCIPHER=m
2628CONFIG_CRYPTO_BLKCIPHER2=y
2629CONFIG_CRYPTO_HASH=y
2630CONFIG_CRYPTO_HASH2=y
2631CONFIG_CRYPTO_RNG=m
2632CONFIG_CRYPTO_RNG2=y
2633CONFIG_CRYPTO_MANAGER=y
2634CONFIG_CRYPTO_MANAGER2=y
2635CONFIG_CRYPTO_GF128MUL=m
2636CONFIG_CRYPTO_NULL=m
2637# CONFIG_CRYPTO_CRYPTD is not set
2638CONFIG_CRYPTO_AUTHENC=m
2639CONFIG_CRYPTO_TEST=m
2640
2641#
2642# Authenticated Encryption with Associated Data
2643#
2644CONFIG_CRYPTO_CCM=m
2645CONFIG_CRYPTO_GCM=m
2646CONFIG_CRYPTO_SEQIV=m
2647
2648#
2649# Block modes
2650#
2651CONFIG_CRYPTO_CBC=m
2652CONFIG_CRYPTO_CTR=m
2653# CONFIG_CRYPTO_CTS is not set
2654CONFIG_CRYPTO_ECB=m
2655CONFIG_CRYPTO_LRW=m
2656CONFIG_CRYPTO_PCBC=m
2657CONFIG_CRYPTO_XTS=m
2658
2659#
2660# Hash modes
2661#
2662CONFIG_CRYPTO_HMAC=y
2663CONFIG_CRYPTO_XCBC=m
2664
2665#
2666# Digest
2667#
2668CONFIG_CRYPTO_CRC32C=m
2669# CONFIG_CRYPTO_CRC32C_INTEL is not set
2670CONFIG_CRYPTO_MD4=m
2671CONFIG_CRYPTO_MD5=y
2672CONFIG_CRYPTO_MICHAEL_MIC=m
2673# CONFIG_CRYPTO_RMD128 is not set
2674# CONFIG_CRYPTO_RMD160 is not set
2675# CONFIG_CRYPTO_RMD256 is not set
2676# CONFIG_CRYPTO_RMD320 is not set
2677CONFIG_CRYPTO_SHA1=y
2678CONFIG_CRYPTO_SHA256=m
2679CONFIG_CRYPTO_SHA512=m
2680CONFIG_CRYPTO_TGR192=m
2681CONFIG_CRYPTO_WP512=m
2682
2683#
2684# Ciphers
2685#
2686CONFIG_CRYPTO_AES=y
2687CONFIG_CRYPTO_AES_586=m
2688CONFIG_CRYPTO_ANUBIS=m
2689CONFIG_CRYPTO_ARC4=y
2690CONFIG_CRYPTO_BLOWFISH=m
2691CONFIG_CRYPTO_CAMELLIA=m
2692CONFIG_CRYPTO_CAST5=m
2693CONFIG_CRYPTO_CAST6=m
2694CONFIG_CRYPTO_DES=m
2695CONFIG_CRYPTO_FCRYPT=m
2696CONFIG_CRYPTO_KHAZAD=m
2697CONFIG_CRYPTO_SALSA20=m
2698CONFIG_CRYPTO_SALSA20_586=m
2699CONFIG_CRYPTO_SEED=m
2700CONFIG_CRYPTO_SERPENT=m
2701CONFIG_CRYPTO_TEA=m
2702CONFIG_CRYPTO_TWOFISH=m
2703CONFIG_CRYPTO_TWOFISH_COMMON=m
2704CONFIG_CRYPTO_TWOFISH_586=m
2705
2706#
2707# Compression
2708#
2709CONFIG_CRYPTO_DEFLATE=m
2710# CONFIG_CRYPTO_LZO is not set
2711
2712#
2713# Random Number Generation
2714#
2715# CONFIG_CRYPTO_ANSI_CPRNG is not set
2716CONFIG_CRYPTO_HW=y
2717# CONFIG_CRYPTO_DEV_PADLOCK is not set
2718# CONFIG_CRYPTO_DEV_GEODE is not set
2719# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2720CONFIG_HAVE_KVM=y
2721# CONFIG_VIRTUALIZATION is not set
2722
2723#
2724# Library routines
2725#
2726CONFIG_BITREVERSE=y
2727CONFIG_GENERIC_FIND_FIRST_BIT=y
2728CONFIG_GENERIC_FIND_NEXT_BIT=y
2729CONFIG_GENERIC_FIND_LAST_BIT=y
2730CONFIG_CRC_CCITT=m
2731CONFIG_CRC16=m
2732CONFIG_CRC_T10DIF=y
2733CONFIG_CRC_ITU_T=m
2734CONFIG_CRC32=y
2735# CONFIG_CRC7 is not set
2736CONFIG_LIBCRC32C=m
2737CONFIG_AUDIT_GENERIC=y
2738CONFIG_ZLIB_INFLATE=y
2739CONFIG_ZLIB_DEFLATE=m
2740CONFIG_TEXTSEARCH=y
2741CONFIG_TEXTSEARCH_KMP=m
2742CONFIG_TEXTSEARCH_BM=m
2743CONFIG_TEXTSEARCH_FSM=m
2744CONFIG_PLIST=y
2745CONFIG_HAS_IOMEM=y
2746CONFIG_HAS_IOPORT=y
2747CONFIG_HAS_DMA=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch
deleted file mode 100644
index bd65daf516..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch
+++ /dev/null
@@ -1,128 +0,0 @@
1diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
2index 32e8c5a..8020453 100644
3--- a/scripts/kconfig/Makefile
4+++ b/scripts/kconfig/Makefile
5@@ -24,6 +24,11 @@ oldconfig: $(obj)/conf
6 silentoldconfig: $(obj)/conf
7 $< -s $(Kconfig)
8
9+nonint_oldconfig: $(obj)/conf
10+ $< -b $(Kconfig)
11+loose_nonint_oldconfig: $(obj)/conf
12+ $< -B $(Kconfig)
13+
14 # Create new linux.pot file
15 # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
16 # The symlink is used to repair a deficiency in arch/um
17diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
18index fda6313..ed33b66 100644
19--- a/scripts/kconfig/conf.c
20+++ b/scripts/kconfig/conf.c
21@@ -22,6 +22,8 @@
22 ask_all,
23 ask_new,
24 ask_silent,
25+ dont_ask,
26+ dont_ask_dont_tell,
27 set_default,
28 set_yes,
29 set_mod,
30@@ -39,6 +41,8 @@
31
32 static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
33
34+static int return_value = 0;
35+
36 static const char *get_help(struct menu *menu)
37 {
38 if (menu_has_help(menu))
39@@ -359,7 +363,10 @@
40
41 switch (prop->type) {
42 case P_MENU:
43- if (input_mode == ask_silent && rootEntry != menu) {
44+ if ((input_mode == ask_silent ||
45+ input_mode == dont_ask ||
46+ input_mode == dont_ask_dont_tell) &&
47+ rootEntry != menu) {
48 check_conf(menu);
49 return;
50 }
51@@ -417,12 +424,21 @@
52 if (sym && !sym_has_value(sym)) {
53 if (sym_is_changable(sym) ||
54 (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
55+ if (input_mode == dont_ask ||
56+ input_mode == dont_ask_dont_tell) {
57+ if (input_mode == dont_ask &&
58+ sym->name && !sym_is_choice_value(sym)) {
59+ fprintf(stderr,"CONFIG_%s\n",sym->name);
60+ ++return_value;
61+ }
62+ } else {
63 if (!conf_cnt++)
64 printf(_("*\n* Restart config...\n*\n"));
65 rootEntry = menu_get_parent_menu(menu);
66 conf(rootEntry);
67 }
68 }
69+ }
70
71 for (child = menu->list; child; child = child->next)
72 check_conf(child);
73@@ -438,7 +454,7 @@
74 bindtextdomain(PACKAGE, LOCALEDIR);
75 textdomain(PACKAGE);
76
77- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
78+ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
79 switch (opt) {
80 case 'o':
81 input_mode = ask_silent;
82@@ -447,6 +463,12 @@
83 input_mode = ask_silent;
84 sync_kconfig = 1;
85 break;
86+ case 'b':
87+ input_mode = dont_ask;
88+ break;
89+ case 'B':
90+ input_mode = dont_ask_dont_tell;
91+ break;
92 case 'd':
93 input_mode = set_default;
94 break;
95@@ -510,6 +532,8 @@
96 case ask_silent:
97 case ask_all:
98 case ask_new:
99+ case dont_ask:
100+ case dont_ask_dont_tell:
101 conf_read(NULL);
102 break;
103 case set_no:
104@@ -571,12 +595,16 @@
105 conf(&rootmenu);
106 input_mode = ask_silent;
107 /* fall through */
108+ case dont_ask:
109+ case dont_ask_dont_tell:
110 case ask_silent:
111 /* Update until a loop caused no more changes */
112 do {
113 conf_cnt = 0;
114 check_conf(&rootmenu);
115- } while (conf_cnt);
116+ } while (conf_cnt &&
117+ (input_mode != dont_ask &&
118+ input_mode != dont_ask_dont_tell));
119 break;
120 }
121
122@@ -598,5 +626,5 @@
123 exit(1);
124 }
125 }
126- return 0;
127+ return return_value;
128 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch
deleted file mode 100644
index 32b99a99b8..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch
+++ /dev/null
@@ -1,11 +0,0 @@
1--- linux-2.6.28/drivers/gpu/drm/i915/i915_drv.c~ 2009-02-20 21:36:06.000000000 -0800
2+++ linux-2.6.28/drivers/gpu/drm/i915/i915_drv.c 2009-02-20 21:36:06.000000000 -0800
3@@ -35,7 +35,7 @@
4 #include "drm_pciids.h"
5 #include <linux/console.h>
6
7-static unsigned int i915_modeset = -1;
8+static unsigned int i915_modeset = 1;
9 module_param_named(modeset, i915_modeset, int, 0400);
10
11 unsigned int i915_fbpercrtc = 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch
deleted file mode 100644
index 02a4474cae..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch
+++ /dev/null
@@ -1,43 +0,0 @@
1From dce8113d033975f56630cf6d2a6a908cfb66059d Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:12:16 -0700
4Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
5
6In the non-initrd case, we wait for all devices to finish their
7probing before we try to mount the rootfs.
8In practice, this means that we end up waiting 2 extra seconds for
9the PS/2 mouse probing even though the root holding device has been
10ready since a long time.
11
12The previous two patches in this series made the RAID autodetect code
13do it's own "wait for probing to be done" code, and added
14"wait and retry" functionality in case the root device isn't actually
15available.
16
17These two changes should make it safe to remove the delay itself,
18and this patch does this. On my test laptop, this reduces the boot time
19by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
20
21Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
22---
23--- a/init/do_mounts.c 2009-01-07 18:42:10.000000000 -0800
24+++ b/init/do_mounts.c 2009-01-07 18:43:02.000000000 -0800
25@@ -370,14 +370,17 @@ void __init prepare_namespace(void)
26 ssleep(root_delay);
27 }
28
29+#if 0
30 /*
31 * wait for the known devices to complete their probing
32 *
33 * Note: this is a potential source of long boot delays.
34 * For example, it is not atypical to wait 5 seconds here
35 * for the touchpad of a laptop to initialize.
36 */
37 wait_for_device_probe();
38+#endif
39+ async_synchronize_full();
40
41 md_run_setup();
42
43
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch
deleted file mode 100644
index a8d68338b5..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch
+++ /dev/null
@@ -1,56 +0,0 @@
1From 2b5cde2b272f56ec67b56a2af8c067d42eff7328 Mon Sep 17 00:00:00 2001
2From: Li Peng <peng.li@intel.com>
3Date: Fri, 13 Mar 2009 10:25:07 +0800
4Subject: drm/i915: Fix LVDS dither setting
5
6Update bdb_lvds_options structure according to its defination in
72D driver. Then we can parse and set 'lvds_dither' bit correctly
8on non-965 chips.
9
10Signed-off-by: Li Peng <peng.li@intel.com>
11Signed-off-by: Eric Anholt <eric@anholt.net>
12---
13 drivers/gpu/drm/i915/intel_bios.h | 12 ++++++------
14 drivers/gpu/drm/i915/intel_lvds.c | 2 +-
15 2 files changed, 7 insertions(+), 7 deletions(-)
16
17diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
18index 5ea715a..de621aa 100644
19--- a/drivers/gpu/drm/i915/intel_bios.h
20+++ b/drivers/gpu/drm/i915/intel_bios.h
21@@ -162,13 +162,13 @@ struct bdb_lvds_options {
22 u8 panel_type;
23 u8 rsvd1;
24 /* LVDS capabilities, stored in a dword */
25- u8 rsvd2:1;
26- u8 lvds_edid:1;
27- u8 pixel_dither:1;
28- u8 pfit_ratio_auto:1;
29- u8 pfit_gfx_mode_enhanced:1;
30- u8 pfit_text_mode_enhanced:1;
31 u8 pfit_mode:2;
32+ u8 pfit_text_mode_enhanced:1;
33+ u8 pfit_gfx_mode_enhanced:1;
34+ u8 pfit_ratio_auto:1;
35+ u8 pixel_dither:1;
36+ u8 lvds_edid:1;
37+ u8 rsvd2:1;
38 u8 rsvd4;
39 } __attribute__((packed));
40
41diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
42index 0d211af..6619f26 100644
43--- a/drivers/gpu/drm/i915/intel_lvds.c
44+++ b/drivers/gpu/drm/i915/intel_lvds.c
45@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
46 pfit_control = 0;
47
48 if (!IS_I965G(dev)) {
49- if (dev_priv->panel_wants_dither)
50+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
51 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
52 }
53 else
54--
551.6.1.3
56
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch
deleted file mode 100644
index 850fa161e9..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch
+++ /dev/null
@@ -1,55 +0,0 @@
1From ee977685870767221dc763338bb6ed5fd83f65be Mon Sep 17 00:00:00 2001
2From: Yong Wang <yong.y.wang@intel.com>
3Date: Tue, 6 Jan 2009 15:13:41 +0800
4Subject: [PATCH] Revert "drm/i915: GEM on PAE has problems - disable it for now."
5
6This reverts commit ac5c4e76180a74c7f922f6fa71ace0cef45fa433.
7---
8 drivers/gpu/drm/i915/i915_dma.c | 10 +---------
9 drivers/gpu/drm/i915/i915_drv.h | 2 --
10 2 files changed, 1 insertions(+), 11 deletions(-)
11
12diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
13index afa8a12..553dd4b 100644
14--- a/drivers/gpu/drm/i915/i915_dma.c
15+++ b/drivers/gpu/drm/i915/i915_dma.c
16@@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
17 value = dev->pci_device;
18 break;
19 case I915_PARAM_HAS_GEM:
20- value = dev_priv->has_gem;
21+ value = 1;
22 break;
23 default:
24 DRM_ERROR("Unknown parameter %d\n", param->param);
25@@ -830,14 +830,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
26 "performance may suffer.\n");
27 }
28
29-#ifdef CONFIG_HIGHMEM64G
30- /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
31- dev_priv->has_gem = 0;
32-#else
33- /* enable GEM by default */
34- dev_priv->has_gem = 1;
35-#endif
36-
37 dev->driver->get_vblank_counter = i915_get_vblank_counter;
38 if (IS_GM45(dev))
39 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
40diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
41index b3cc473..adc972c 100644
42--- a/drivers/gpu/drm/i915/i915_drv.h
43+++ b/drivers/gpu/drm/i915/i915_drv.h
44@@ -106,8 +106,6 @@ struct intel_opregion {
45 typedef struct drm_i915_private {
46 struct drm_device *dev;
47
48- int has_gem;
49-
50 void __iomem *regs;
51 drm_local_map_t *sarea;
52
53--
541.5.5.1
55
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch
deleted file mode 100644
index 9291362f04..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch
+++ /dev/null
@@ -1,208 +0,0 @@
1From b55de80e49892002a1878013ab9aee1a30970be6 Mon Sep 17 00:00:00 2001
2From: Bruce Allan <bruce.w.allan@intel.com>
3Date: Sat, 21 Mar 2009 13:25:25 -0700
4Subject: [PATCH] e100: add support for 82552 10/100 adapter
5
6This patch enables support for the new Intel 82552 adapter (new PHY paired
7with the existing MAC in the ICH7 chipset). No new features are added to
8the driver, however there are minor changes due to updated registers and a
9few workarounds for hardware errata.
10
11Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
12Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
13Signed-off-by: David S. Miller <davem@davemloft.net>
14---
15 drivers/net/e100.c | 93 +++++++++++++++++++++++++++++++++++++++++++---------
16 1 files changed, 77 insertions(+), 16 deletions(-)
17
18diff --git a/drivers/net/e100.c b/drivers/net/e100.c
19index 861d2ee..0504db9 100644
20--- a/drivers/net/e100.c
21+++ b/drivers/net/e100.c
22@@ -167,7 +167,7 @@
23
24 #define DRV_NAME "e100"
25 #define DRV_EXT "-NAPI"
26-#define DRV_VERSION "3.5.23-k6"DRV_EXT
27+#define DRV_VERSION "3.5.24-k2"DRV_EXT
28 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
29 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
30 #define PFX DRV_NAME ": "
31@@ -240,6 +240,7 @@ static struct pci_device_id e100_id_table[] = {
32 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
33 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
34 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
35+ INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
36 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
37 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
38 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
39@@ -275,6 +276,7 @@ enum phy {
40 phy_82562_em = 0x032002A8,
41 phy_82562_ek = 0x031002A8,
42 phy_82562_eh = 0x017002A8,
43+ phy_82552_v = 0xd061004d,
44 phy_unknown = 0xFFFFFFFF,
45 };
46
47@@ -943,6 +945,22 @@ static int mdio_read(struct net_device *netdev, int addr, int reg)
48
49 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
50 {
51+ struct nic *nic = netdev_priv(netdev);
52+
53+ if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) &&
54+ (data & (BMCR_ANRESTART | BMCR_ANENABLE))) {
55+ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
56+
57+ /*
58+ * Workaround Si issue where sometimes the part will not
59+ * autoneg to 100Mbps even when advertised.
60+ */
61+ if (advert & ADVERTISE_100FULL)
62+ data |= BMCR_SPEED100 | BMCR_FULLDPLX;
63+ else if (advert & ADVERTISE_100HALF)
64+ data |= BMCR_SPEED100;
65+ }
66+
67 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
68 }
69
70@@ -1276,16 +1294,12 @@ static int e100_phy_init(struct nic *nic)
71 if (addr == 32)
72 return -EAGAIN;
73
74- /* Selected the phy and isolate the rest */
75- for (addr = 0; addr < 32; addr++) {
76- if (addr != nic->mii.phy_id) {
77- mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
78- } else {
79- bmcr = mdio_read(netdev, addr, MII_BMCR);
80- mdio_write(netdev, addr, MII_BMCR,
81- bmcr & ~BMCR_ISOLATE);
82- }
83- }
84+ /* Isolate all the PHY ids */
85+ for (addr = 0; addr < 32; addr++)
86+ mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
87+ /* Select the discovered PHY */
88+ bmcr &= ~BMCR_ISOLATE;
89+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
90
91 /* Get phy ID */
92 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
93@@ -1303,7 +1317,18 @@ static int e100_phy_init(struct nic *nic)
94 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
95 }
96
97- if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
98+ if (nic->phy == phy_82552_v) {
99+ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
100+
101+ /* Workaround Si not advertising flow-control during autoneg */
102+ advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
103+ mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
104+
105+ /* Reset for the above changes to take effect */
106+ bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
107+ bmcr |= BMCR_RESET;
108+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
109+ } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
110 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
111 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
112 /* enable/disable MDI/MDI-X auto-switching. */
113@@ -2134,6 +2159,9 @@ err_clean_rx:
114 }
115
116 #define MII_LED_CONTROL 0x1B
117+#define E100_82552_LED_OVERRIDE 0x19
118+#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
119+#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
120 static void e100_blink_led(unsigned long data)
121 {
122 struct nic *nic = (struct nic *)data;
123@@ -2143,10 +2171,19 @@ static void e100_blink_led(unsigned long data)
124 led_on_559 = 0x05,
125 led_on_557 = 0x07,
126 };
127+ u16 led_reg = MII_LED_CONTROL;
128+
129+ if (nic->phy == phy_82552_v) {
130+ led_reg = E100_82552_LED_OVERRIDE;
131
132- nic->leds = (nic->leds & led_on) ? led_off :
133- (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
134- mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
135+ nic->leds = (nic->leds == E100_82552_LED_ON) ?
136+ E100_82552_LED_OFF : E100_82552_LED_ON;
137+ } else {
138+ nic->leds = (nic->leds & led_on) ? led_off :
139+ (nic->mac < mac_82559_D101M) ? led_on_557 :
140+ led_on_559;
141+ }
142+ mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
143 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
144 }
145
146@@ -2375,13 +2412,15 @@ static void e100_diag_test(struct net_device *netdev,
147 static int e100_phys_id(struct net_device *netdev, u32 data)
148 {
149 struct nic *nic = netdev_priv(netdev);
150+ u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
151+ MII_LED_CONTROL;
152
153 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
154 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
155 mod_timer(&nic->blink_timer, jiffies);
156 msleep_interruptible(data * 1000);
157 del_timer_sync(&nic->blink_timer);
158- mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
159+ mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
160
161 return 0;
162 }
163@@ -2686,6 +2725,9 @@ static void __devexit e100_remove(struct pci_dev *pdev)
164 }
165 }
166
167+#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
168+#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
169+#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
170 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
171 {
172 struct net_device *netdev = pci_get_drvdata(pdev);
173@@ -2698,6 +2740,15 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
174 pci_save_state(pdev);
175
176 if ((nic->flags & wol_magic) | e100_asf(nic)) {
177+ /* enable reverse auto-negotiation */
178+ if (nic->phy == phy_82552_v) {
179+ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
180+ E100_82552_SMARTSPEED);
181+
182+ mdio_write(netdev, nic->mii.phy_id,
183+ E100_82552_SMARTSPEED, smartspeed |
184+ E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
185+ }
186 if (pci_enable_wake(pdev, PCI_D3cold, true))
187 pci_enable_wake(pdev, PCI_D3hot, true);
188 } else {
189@@ -2721,6 +2772,16 @@ static int e100_resume(struct pci_dev *pdev)
190 /* ack any pending wake events, disable PME */
191 pci_enable_wake(pdev, 0, 0);
192
193+ /* disbale reverse auto-negotiation */
194+ if (nic->phy == phy_82552_v) {
195+ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
196+ E100_82552_SMARTSPEED);
197+
198+ mdio_write(netdev, nic->mii.phy_id,
199+ E100_82552_SMARTSPEED,
200+ smartspeed & ~(E100_82552_REV_ANEG));
201+ }
202+
203 netif_device_attach(netdev);
204 if (netif_running(netdev))
205 e100_up(nic);
206--
2071.5.5.1
208
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch
deleted file mode 100644
index 6eea4f6e17..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch
+++ /dev/null
@@ -1,12 +0,0 @@
1--- a/kernel/async.c 2009-01-19 18:30:29.000000000 -0800
2+++ b/kernel/async.c 2009-01-19 18:31:12.000000000 -0800
3@@ -65,7 +65,7 @@ static LIST_HEAD(async_pending);
4 static LIST_HEAD(async_running);
5 static DEFINE_SPINLOCK(async_lock);
6
7-static int async_enabled = 0;
8+static int async_enabled = 1;
9
10 struct async_entry {
11 struct list_head list;
12
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch
deleted file mode 100644
index 80d1edf0aa..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch
+++ /dev/null
@@ -1,20 +0,0 @@
1--- linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c.org 2009-03-21 19:57:13.000000000 -0700
2+++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-03-21 19:57:25.000000000 -0700
3@@ -221,7 +221,7 @@ static void intel_lvds_prepare(struct dr
4 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
5 BACKLIGHT_DUTY_CYCLE_MASK);
6
7- intel_lvds_set_power(dev, false);
8+// intel_lvds_set_power(dev, false);
9 }
10
11 static void intel_lvds_commit( struct drm_encoder *encoder)
12@@ -233,7 +233,7 @@ static void intel_lvds_commit( struct dr
13 dev_priv->backlight_duty_cycle =
14 intel_lvds_get_max_backlight(dev);
15
16- intel_lvds_set_power(dev, true);
17+// intel_lvds_set_power(dev, true);
18 }
19
20 static void intel_lvds_mode_set(struct drm_encoder *encoder,
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch
deleted file mode 100644
index ea4c617ed9..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch
+++ /dev/null
@@ -1,161 +0,0 @@
1From 24559ecf972ff482222f6fc152f15468d2380e2d Mon Sep 17 00:00:00 2001
2From: Li, Shaohua <shaohua.li@intel.com>
3Date: Wed, 13 Aug 2008 17:26:01 +0800
4Subject: [PATCH] fastboot: remove duplicate unpack_to_rootfs()
5
6we check if initrd is initramfs first and then do real unpack. The
7check isn't required, we can directly do unpack. If initrd isn't
8initramfs, we can remove garbage. In my laptop, this saves 0.1s boot
9time. This penalizes non-initramfs case, but now initramfs is mostly
10widely used.
11
12Signed-off-by: Shaohua Li <shaohua.li@intel.com>
13Acked-by: Arjan van de Ven <arjan@infradead.org>
14Signed-off-by: Ingo Molnar <mingo@elte.hu>
15---
16 init/initramfs.c | 71 ++++++++++++++++++++++++++++++++++++++++++-----------
17 1 files changed, 56 insertions(+), 15 deletions(-)
18
19diff --git a/init/initramfs.c b/init/initramfs.c
20index 4f5ba75..6b5c1dc 100644
21--- a/init/initramfs.c
22+++ b/init/initramfs.c
23@@ -5,6 +5,7 @@
24 #include <linux/fcntl.h>
25 #include <linux/delay.h>
26 #include <linux/string.h>
27+#include <linux/dirent.h>
28 #include <linux/syscalls.h>
29 #include <linux/utime.h>
30
31@@ -166,8 +167,6 @@ static __initdata char *victim;
32 static __initdata unsigned count;
33 static __initdata loff_t this_header, next_header;
34
35-static __initdata int dry_run;
36-
37 static inline void __init eat(unsigned n)
38 {
39 victim += n;
40@@ -229,10 +228,6 @@ static int __init do_header(void)
41 parse_header(collected);
42 next_header = this_header + N_ALIGN(name_len) + body_len;
43 next_header = (next_header + 3) & ~3;
44- if (dry_run) {
45- read_into(name_buf, N_ALIGN(name_len), GotName);
46- return 0;
47- }
48 state = SkipIt;
49 if (name_len <= 0 || name_len > PATH_MAX)
50 return 0;
51@@ -303,8 +298,6 @@ static int __init do_name(void)
52 free_hash();
53 return 0;
54 }
55- if (dry_run)
56- return 0;
57 clean_path(collected, mode);
58 if (S_ISREG(mode)) {
59 int ml = maybe_link();
60@@ -475,10 +468,9 @@ static void __init flush_window(void)
61 outcnt = 0;
62 }
63
64-static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
65+static char * __init unpack_to_rootfs(char *buf, unsigned len)
66 {
67 int written;
68- dry_run = check_only;
69 header_buf = kmalloc(110, GFP_KERNEL);
70 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
71 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
72@@ -573,10 +565,57 @@ skip:
73 initrd_end = 0;
74 }
75
76+#define BUF_SIZE 1024
77+static void __init clean_rootfs(void)
78+{
79+ int fd;
80+ void *buf;
81+ struct linux_dirent64 *dirp;
82+ int count;
83+
84+ fd = sys_open("/", O_RDONLY, 0);
85+ WARN_ON(fd < 0);
86+ if (fd < 0)
87+ return;
88+ buf = kzalloc(BUF_SIZE, GFP_KERNEL);
89+ WARN_ON(!buf);
90+ if (!buf) {
91+ sys_close(fd);
92+ return;
93+ }
94+
95+ dirp = buf;
96+ count = sys_getdents64(fd, dirp, BUF_SIZE);
97+ while (count > 0) {
98+ while (count > 0) {
99+ struct stat st;
100+ int ret;
101+
102+ ret = sys_newlstat(dirp->d_name, &st);
103+ WARN_ON_ONCE(ret);
104+ if (!ret) {
105+ if (S_ISDIR(st.st_mode))
106+ sys_rmdir(dirp->d_name);
107+ else
108+ sys_unlink(dirp->d_name);
109+ }
110+
111+ count -= dirp->d_reclen;
112+ dirp = (void *)dirp + dirp->d_reclen;
113+ }
114+ dirp = buf;
115+ memset(buf, 0, BUF_SIZE);
116+ count = sys_getdents64(fd, dirp, BUF_SIZE);
117+ }
118+
119+ sys_close(fd);
120+ kfree(buf);
121+}
122+
123 static int __init populate_rootfs(void)
124 {
125 char *err = unpack_to_rootfs(__initramfs_start,
126- __initramfs_end - __initramfs_start, 0);
127+ __initramfs_end - __initramfs_start);
128 if (err)
129 panic(err);
130 if (initrd_start) {
131@@ -584,13 +623,15 @@ static int __init populate_rootfs(void)
132 int fd;
133 printk(KERN_INFO "checking if image is initramfs...");
134 err = unpack_to_rootfs((char *)initrd_start,
135- initrd_end - initrd_start, 1);
136+ initrd_end - initrd_start);
137 if (!err) {
138 printk(" it is\n");
139- unpack_to_rootfs((char *)initrd_start,
140- initrd_end - initrd_start, 0);
141 free_initrd();
142 return 0;
143+ } else {
144+ clean_rootfs();
145+ unpack_to_rootfs(__initramfs_start,
146+ __initramfs_end - __initramfs_start);
147 }
148 printk("it isn't (%s); looks like an initrd\n", err);
149 fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
150@@ -603,7 +644,7 @@ static int __init populate_rootfs(void)
151 #else
152 printk(KERN_INFO "Unpacking initramfs...");
153 err = unpack_to_rootfs((char *)initrd_start,
154- initrd_end - initrd_start, 0);
155+ initrd_end - initrd_start);
156 if (err)
157 panic(err);
158 printk(" done\n");
159--
1601.5.5.1
161
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch
deleted file mode 100644
index f213958bf5..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch
+++ /dev/null
@@ -1,285 +0,0 @@
1diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
2index 1c3a8c5..144624a 100644
3--- a/drivers/gpu/drm/drm_crtc_helper.c
4+++ b/drivers/gpu/drm/drm_crtc_helper.c
5@@ -29,6 +29,8 @@
6 * Jesse Barnes <jesse.barnes@intel.com>
7 */
8
9+#include <linux/async.h>
10+
11 #include "drmP.h"
12 #include "drm_crtc.h"
13 #include "drm_crtc_helper.h"
14@@ -42,6 +44,8 @@ static struct drm_display_mode std_modes[] = {
15 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
16 };
17
18+LIST_HEAD(drm_async_list);
19+
20 /**
21 * drm_helper_probe_connector_modes - get complete set of display modes
22 * @dev: DRM device
23@@ -137,6 +141,26 @@ int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
24 }
25 EXPORT_SYMBOL(drm_helper_probe_connector_modes);
26
27+int drm_helper_probe_connector_modes_fast(struct drm_device *dev, uint32_t maxX,
28+ uint32_t maxY)
29+{
30+ struct drm_connector *connector;
31+ int count = 0;
32+
33+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
34+ count += drm_helper_probe_single_connector_modes(connector,
35+ maxX, maxY);
36+ /*
37+ * If we found a 'good' connector, we stop probing futher.
38+ */
39+ if (count > 0)
40+ break;
41+ }
42+
43+ return count;
44+}
45+EXPORT_SYMBOL(drm_helper_probe_connector_modes_fast);
46+
47 static void drm_helper_add_std_modes(struct drm_device *dev,
48 struct drm_connector *connector)
49 {
50@@ -882,6 +906,24 @@ bool drm_helper_plugged_event(struct drm_device *dev)
51 /* FIXME: send hotplug event */
52 return true;
53 }
54+
55+static void async_notify_fb_changed(void *data, async_cookie_t cookie)
56+{
57+ struct drm_device *dev = data;
58+ dev->mode_config.funcs->fb_changed(dev);
59+}
60+
61+static void async_probe_hard(void *data, async_cookie_t cookie)
62+{
63+ struct drm_device *dev = data;
64+ /* Need to wait for async_notify_fb_changed to be done */
65+ async_synchronize_cookie_domain(cookie, &drm_async_list);
66+ drm_helper_probe_connector_modes(dev,
67+ dev->mode_config.max_width,
68+ dev->mode_config.max_height);
69+}
70+
71+
72 /**
73 * drm_initial_config - setup a sane initial connector configuration
74 * @dev: DRM device
75@@ -902,7 +944,7 @@ bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
76 struct drm_connector *connector;
77 int count = 0;
78
79- count = drm_helper_probe_connector_modes(dev,
80+ count = drm_helper_probe_connector_modes_fast(dev,
81 dev->mode_config.max_width,
82 dev->mode_config.max_height);
83
84@@ -921,7 +963,9 @@ bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
85 drm_setup_crtcs(dev);
86
87 /* alert the driver fb layer */
88- dev->mode_config.funcs->fb_changed(dev);
89+ async_schedule_domain(async_notify_fb_changed, dev, &drm_async_list);
90+ /* probe further outputs */
91+ async_schedule_domain(async_probe_hard, dev, &drm_async_list);
92
93 return 0;
94 }
95diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
96index 14c7a23..ef52021 100644
97--- a/drivers/gpu/drm/drm_drv.c
98+++ b/drivers/gpu/drm/drm_drv.c
99@@ -48,6 +48,7 @@
100
101 #include "drmP.h"
102 #include "drm_core.h"
103+#include <linux/async.h>
104
105 static int drm_version(struct drm_device *dev, void *data,
106 struct drm_file *file_priv);
107@@ -345,6 +346,9 @@ void drm_exit(struct drm_driver *driver)
108 struct drm_device *dev, *tmp;
109 DRM_DEBUG("\n");
110
111+ /* make sure all async DRM operations are finished */
112+ async_synchronize_full_domain(&drm_async_list);
113+
114 list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
115 drm_cleanup(dev);
116
117diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
118index a839a28..069b189 100644
119--- a/drivers/gpu/drm/drm_edid.c
120+++ b/drivers/gpu/drm/drm_edid.c
121@@ -588,20 +588,22 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
122 {
123 struct i2c_algo_bit_data *algo_data = adapter->algo_data;
124 unsigned char *edid = NULL;
125+ int divider = 5;
126 int i, j;
127
128 algo_data->setscl(algo_data->data, 1);
129
130- for (i = 0; i < 1; i++) {
131+ for (i = 0; i < 2; i++) {
132 /* For some old monitors we need the
133 * following process to initialize/stop DDC
134 */
135+
136 algo_data->setsda(algo_data->data, 1);
137- msleep(13);
138+ msleep(13 / divider);
139
140 algo_data->setscl(algo_data->data, 1);
141 for (j = 0; j < 5; j++) {
142- msleep(10);
143+ msleep(10 / divider);
144 if (algo_data->getscl(algo_data->data))
145 break;
146 }
147@@ -609,31 +611,33 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
148 continue;
149
150 algo_data->setsda(algo_data->data, 0);
151- msleep(15);
152+ msleep(15 / divider);
153 algo_data->setscl(algo_data->data, 0);
154- msleep(15);
155+ msleep(15 / divider);
156 algo_data->setsda(algo_data->data, 1);
157- msleep(15);
158+ msleep(15 / divider);
159
160 /* Do the real work */
161 edid = drm_do_probe_ddc_edid(adapter);
162 algo_data->setsda(algo_data->data, 0);
163 algo_data->setscl(algo_data->data, 0);
164- msleep(15);
165+ msleep(15 / divider);
166
167 algo_data->setscl(algo_data->data, 1);
168 for (j = 0; j < 10; j++) {
169- msleep(10);
170+ msleep(10 / divider);
171 if (algo_data->getscl(algo_data->data))
172 break;
173 }
174
175 algo_data->setsda(algo_data->data, 1);
176- msleep(15);
177+ msleep(15 / divider);
178 algo_data->setscl(algo_data->data, 0);
179 algo_data->setsda(algo_data->data, 0);
180+
181 if (edid)
182 break;
183+ divider = 1;
184 }
185 /* Release the DDC lines when done or the Apple Cinema HD display
186 * will switch off
187diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
188index a283427..6f2eced 100644
189--- a/drivers/gpu/drm/i915/intel_display.c
190+++ b/drivers/gpu/drm/i915/intel_display.c
191@@ -319,7 +319,7 @@ void
192 intel_wait_for_vblank(struct drm_device *dev)
193 {
194 /* Wait for 20ms, i.e. one cycle at 50hz. */
195- udelay(20000);
196+ mdelay(20);
197 }
198
199 static int
200@@ -1466,12 +1466,12 @@ static void intel_setup_outputs(struct drm_device *dev)
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct drm_connector *connector;
203
204- intel_crt_init(dev);
205-
206- /* Set up integrated LVDS */
207+ /* Set up integrated LVDS -- will skip if the lid is closed */
208 if (IS_MOBILE(dev) && !IS_I830(dev))
209 intel_lvds_init(dev);
210
211+ intel_crt_init(dev);
212+
213 if (IS_I9XX(dev)) {
214 int found;
215
216diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
217index 957daef..22a74bd 100644
218--- a/drivers/gpu/drm/i915/intel_drv.h
219+++ b/drivers/gpu/drm/i915/intel_drv.h
220@@ -81,6 +81,7 @@ struct intel_output {
221 int type;
222 struct intel_i2c_chan *i2c_bus; /* for control functions */
223 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
224+ struct edid *edid;
225 bool load_detect_temp;
226 bool needs_tv_clock;
227 void *dev_priv;
228diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
229index 0d211af..dc4fecc 100644
230--- a/drivers/gpu/drm/i915/intel_lvds.c
231+++ b/drivers/gpu/drm/i915/intel_lvds.c
232@@ -336,6 +336,7 @@ static void intel_lvds_destroy(struct drm_connector *connector)
233 intel_i2c_destroy(intel_output->ddc_bus);
234 drm_sysfs_connector_remove(connector);
235 drm_connector_cleanup(connector);
236+ kfree(intel_output->edid);
237 kfree(connector);
238 }
239
240@@ -516,5 +517,6 @@ failed:
241 if (intel_output->ddc_bus)
242 intel_i2c_destroy(intel_output->ddc_bus);
243 drm_connector_cleanup(connector);
244+ kfree(intel_output->edid);
245 kfree(connector);
246 }
247diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
248index e42019e..8c0d5f6 100644
249--- a/drivers/gpu/drm/i915/intel_modes.c
250+++ b/drivers/gpu/drm/i915/intel_modes.c
251@@ -70,13 +70,21 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
252 struct edid *edid;
253 int ret = 0;
254
255+ if (intel_output->edid) {
256+ printk(KERN_INFO "Skipping EDID probe due to cached edid\n");
257+ return ret;
258+ }
259+
260 edid = drm_get_edid(&intel_output->base,
261 &intel_output->ddc_bus->adapter);
262 if (edid) {
263 drm_mode_connector_update_edid_property(&intel_output->base,
264 edid);
265 ret = drm_add_edid_modes(&intel_output->base, edid);
266- kfree(edid);
267+ if (intel_output->type == INTEL_OUTPUT_LVDS)
268+ intel_output->edid = edid;
269+ else
270+ kfree(edid);
271 }
272
273 return ret;
274diff --git a/include/drm/drmP.h b/include/drm/drmP.h
275index e5f4ae9..69ce4f4 100644
276--- a/include/drm/drmP.h
277+++ b/include/drm/drmP.h
278@@ -304,6 +304,7 @@ struct drm_vma_entry {
279 pid_t pid;
280 };
281
282+extern struct list_head drm_async_list;
283 /**
284 * DMA buffer.
285 */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch
deleted file mode 100644
index eda77564ce..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch
+++ /dev/null
@@ -1,40 +0,0 @@
1From: Arjan van de Ven <arjan@linux.intel.com>
2Date: Mon, 26 Jan 2009 18:58:11 -0800
3Subject: [PATCH] ide/net: flip the order of SATA and network init
4
5this patch flips the order in which sata and network drivers are initialized.
6
7SATA probing takes quite a bit of time, and with the asynchronous infrastructure
8other drivers that run after it can execute in parallel. Network drivers do tend
9to take some real time talking to the hardware, so running these later is
10a good thing (the sata probe then runs concurrent)
11
12This saves about 15% of my kernels boot time.
13
14Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
15---
16 drivers/Makefile | 5 +++--
17 1 files changed, 3 insertions(+), 2 deletions(-)
18
19diff --git a/drivers/Makefile b/drivers/Makefile
20index c1bf417..2618a61 100644
21--- a/drivers/Makefile
22+++ b/drivers/Makefile
23@@ -36,13 +36,14 @@
24 obj-$(CONFIG_FB_INTEL) += video/intelfb/
25 obj-y += serial/
26 obj-$(CONFIG_PARPORT) += parport/
27-obj-y += base/ block/ misc/ mfd/ net/ media/
28+obj-y += base/ block/ misc/ mfd/ media/
29 obj-$(CONFIG_NUBUS) += nubus/
30-obj-$(CONFIG_ATM) += atm/
31 obj-y += macintosh/
32 obj-$(CONFIG_IDE) += ide/
33 obj-$(CONFIG_SCSI) += scsi/
34 obj-$(CONFIG_ATA) += ata/
35+obj-y += net/
36+obj-$(CONFIG_ATM) += atm/
37 obj-$(CONFIG_FUSION) += message/
38 obj-$(CONFIG_FIREWIRE) += firewire/
39 obj-y += ieee1394/
40
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch
deleted file mode 100644
index 1ae8257203..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch
+++ /dev/null
@@ -1,92 +0,0 @@
1From 2c5ccde448ae5f4062802bcd6002f856acbd268f Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Tue, 3 Feb 2009 16:26:16 -0800
4Subject: [PATCH] input: introduce a tougher i8042.reset
5
6Some bad touchpads don't reset right the first time (MSI Wind U-100 for
7example). This patch will retry the reset up to 5 times.
8
9In addition, this patch also adds a module parameter to not treat
10reset failures as fatal to the usage of the device. This prevents
11a touchpad failure from also disabling the keyboard....
12
13Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
14---
15 Documentation/kernel-parameters.txt | 2 ++
16 drivers/input/serio/i8042.c | 33 ++++++++++++++++++++++++---------
17 2 files changed, 26 insertions(+), 9 deletions(-)
18
19diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
20index ac613a6..a43e3bd 100644
21--- a/Documentation/kernel-parameters.txt
22+++ b/Documentation/kernel-parameters.txt
23@@ -855,6 +855,8 @@ and is between 256 and 4096 characters. It is defined in the file
24 [HW] Frequency with which keyboard LEDs should blink
25 when kernel panics (default is 0.5 sec)
26 i8042.reset [HW] Reset the controller during init and cleanup
27+ i8042.nonfatal [HW] Don't treat i8042.reset failures as fatal for the
28+ device initialization.
29 i8042.unlock [HW] Unlock (ignore) the keylock
30
31 i810= [HW,DRM]
32diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
33index 170f71e..2473a9a 100644
34--- a/drivers/input/serio/i8042.c
35+++ b/drivers/input/serio/i8042.c
36@@ -47,6 +47,10 @@ static unsigned int i8042_reset;
37 module_param_named(reset, i8042_reset, bool, 0);
38 MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
39
40+static unsigned int i8042_nonfatal;
41+module_param_named(nonfatal, i8042_nonfatal, bool, 0);
42+MODULE_PARM_DESC(reset, "Treat controller test failures as non-fatal.");
43+
44 static unsigned int i8042_direct;
45 module_param_named(direct, i8042_direct, bool, 0);
46 MODULE_PARM_DESC(direct, "Put keyboard port into non-translated mode.");
47@@ -712,22 +716,33 @@ static int i8042_controller_check(void)
48 static int i8042_controller_selftest(void)
49 {
50 unsigned char param;
51+ int i = 0;
52
53 if (!i8042_reset)
54 return 0;
55
56- if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
57- printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
58- return -ENODEV;
59- }
60+ /*
61+ * We try this 5 times; on some really fragile systems this does not
62+ * take the first time...
63+ */
64+ do {
65+
66+ if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
67+ printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
68+ return -ENODEV;
69+ }
70+
71+ if (param == I8042_RET_CTL_TEST)
72+ return 0;
73
74- if (param != I8042_RET_CTL_TEST) {
75 printk(KERN_ERR "i8042.c: i8042 controller selftest failed. (%#x != %#x)\n",
76- param, I8042_RET_CTL_TEST);
77- return -EIO;
78- }
79+ param, I8042_RET_CTL_TEST);
80+ msleep(50);
81+ } while (i++ < 5);
82
83- return 0;
84+ if (i8042_nonfatal)
85+ return 0;
86+ return -EIO;
87 }
88
89 /*
90--
911.6.0.6
92
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch
deleted file mode 100644
index d7bd92151b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch
+++ /dev/null
@@ -1,28 +0,0 @@
1From 0143f8eb8afcaccba5a78196fb3db4361e0097a7 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Mon, 9 Feb 2009 21:25:32 -0800
4Subject: [PATCH] jbd: longer commit interval
5
6... 5 seconds is rather harsh on ssd's..
7
8Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
9---
10 include/linux/jbd.h | 2 +-
11 1 files changed, 1 insertions(+), 1 deletions(-)
12
13diff --git a/include/linux/jbd.h b/include/linux/jbd.h
14index 64246dc..d64b7fd 100644
15--- a/include/linux/jbd.h
16+++ b/include/linux/jbd.h
17@@ -46,7 +46,7 @@
18 /*
19 * The default maximum commit age, in seconds.
20 */
21-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
22+#define JBD_DEFAULT_MAX_COMMIT_AGE 15
23
24 #ifdef CONFIG_JBD_DEBUG
25 /*
26--
271.6.0.6
28
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch
deleted file mode 100644
index 663b367971..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch
+++ /dev/null
@@ -1,32 +0,0 @@
1--- linux-2.6.28/drivers/Makefile~ 2009-03-21 21:23:28.000000000 -0700
2+++ linux-2.6.28/drivers/Makefile 2009-03-21 21:23:28.000000000 -0700
3@@ -25,15 +25,8 @@
4 # default.
5 obj-y += char/
6
7-# gpu/ comes after char for AGP vs DRM startup
8-obj-y += gpu/
9-
10 obj-$(CONFIG_CONNECTOR) += connector/
11
12-# i810fb and intelfb depend on char/agp/
13-obj-$(CONFIG_FB_I810) += video/i810/
14-obj-$(CONFIG_FB_INTEL) += video/intelfb/
15-
16 obj-y += serial/
17 obj-$(CONFIG_PARPORT) += parport/
18 obj-y += base/ block/ misc/ mfd/ media/
19@@ -43,6 +36,13 @@
20 obj-$(CONFIG_SCSI) += scsi/
21 obj-$(CONFIG_ATA) += ata/
22 obj-y += net/
23+
24+# gpu/ comes after char for AGP vs DRM startup
25+obj-y += gpu/
26+# i810fb and intelfb depend on char/agp/
27+obj-$(CONFIG_FB_I810) += video/i810/
28+obj-$(CONFIG_FB_INTEL) += video/intelfb/
29+
30 obj-$(CONFIG_ATM) += atm/
31 obj-$(CONFIG_FUSION) += message/
32 obj-$(CONFIG_FIREWIRE) += firewire/
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch
deleted file mode 100644
index e7fded41e8..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch
+++ /dev/null
@@ -1,57 +0,0 @@
1Patch to get the touchpad on the MSI Wind U-100 working
2
3
4--- linux-2.6.28/drivers/input/serio/i8042-x86ia64io.h.org 2009-02-01 18:31:29.000000000 -0800
5+++ linux-2.6.28/drivers/input/serio/i8042-x86ia64io.h 2009-02-01 18:35:26.000000000 -0800
6@@ -378,6 +378,13 @@ static struct dmi_system_id __initdata i
7 DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
8 },
9 },
10+ {
11+ .ident = "MSI Wind U-100",
12+ .matches = {
13+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
14+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
15+ },
16+ },
17 { }
18 };
19 #endif
20@@ -448,6 +455,25 @@ static struct dmi_system_id __initdata i
21 { }
22 };
23
24+static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
25+ {
26+ .ident = "MSI Wind U-100",
27+ .matches = {
28+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
29+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
30+ },
31+ },
32+ {
33+ .ident = "LG Electronics X110",
34+ .matches = {
35+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
36+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
37+ },
38+ },
39+ { }
40+};
41+
42+
43 #endif /* CONFIG_X86 */
44
45 #ifdef CONFIG_PNP
46@@ -564,6 +583,11 @@ static int __init i8042_pnp_init(void)
47 i8042_nopnp = 1;
48 #endif
49
50+ if (dmi_check_system(i8042_dmi_reset_table)) {
51+ i8042_reset = 1;
52+ i8042_nonfatal = 1;
53+ }
54+
55 if (i8042_nopnp) {
56 printk(KERN_INFO "i8042: PNP detection disabled\n");
57 return 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch
deleted file mode 100644
index 77e553956c..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch
+++ /dev/null
@@ -1,83 +0,0 @@
1From eaf05431b9ea8676d23106e6373b7d2b8ff2d97d Mon Sep 17 00:00:00 2001
2From: Shaohua Li <shaohua.li@intel.com>
3Date: Mon, 23 Feb 2009 15:19:16 +0800
4Subject: agp/intel: Add support for new intel chipset.
5
6This is a G33-like desktop and mobile chipset.
7
8Signed-off-by: Shaohua Li <shaohua.li@intel.com>
9Signed-off-by: Eric Anholt <eric@anholt.net>
10---
11 drivers/char/agp/intel-agp.c | 21 ++++++++++++++++++---
12 1 files changed, 18 insertions(+), 3 deletions(-)
13
14diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
15index c771418..0232cfc 100644
16--- a/drivers/char/agp/intel-agp.c
17+++ b/drivers/char/agp/intel-agp.c
18@@ -26,6 +26,10 @@
19 #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
20 #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
21 #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
22+#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
23+#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
24+#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
25+#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
26 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
27 #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
28 #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
29@@ -60,7 +64,12 @@
30
31 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
32 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
33- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
34+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
35+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
36+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
37+
38+#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
39+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
40
41 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
42 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
43@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
44 size = 512;
45 }
46 size += 4; /* add in BIOS popup space */
47- } else if (IS_G33) {
48+ } else if (IS_G33 && !IS_IGD) {
49 /* G33's GTT size defined in gmch_ctrl */
50 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
51 case G33_PGETBL_SIZE_1M:
52@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
53 size = 512;
54 }
55 size += 4;
56- } else if (IS_G4X) {
57+ } else if (IS_G4X || IS_IGD) {
58 /* On 4 series hardware, GTT stolen is separate from graphics
59 * stolen, ignore it in stolen gtt entries counting. However,
60 * 4KB of the stolen memory doesn't get mapped to the GTT.
61@@ -2159,6 +2168,10 @@ static const struct intel_driver_description {
62 NULL, &intel_g33_driver },
63 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
64 NULL, &intel_g33_driver },
65+ { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
66+ NULL, &intel_g33_driver },
67+ { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
68+ NULL, &intel_g33_driver },
69 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
70 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
71 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
72@@ -2353,6 +2366,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
73 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
74 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
75 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
76+ ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
77+ ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
78 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
79 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
80 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
81--
821.6.1.3
83
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch
deleted file mode 100644
index 1e7b866949..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch
+++ /dev/null
@@ -1,336 +0,0 @@
1From 8b941bea1d0fe0c5cf0de938cd0bd89ce6640dbb Mon Sep 17 00:00:00 2001
2From: Shaohua Li <shaohua.li@intel.com>
3Date: Mon, 23 Feb 2009 15:19:19 +0800
4Subject: drm/i915: Add support for new G33-like chipset.
5
6This chip is nearly the same, but has new clock settings required.
7
8Signed-off-by: Shaohua Li <shaohua.li@intel.com>
9Signed-off-by: Eric Anholt <eric@anholt.net>
10---
11 drivers/gpu/drm/i915/i915_drv.h | 10 +++-
12 drivers/gpu/drm/i915/i915_reg.h | 4 +
13 drivers/gpu/drm/i915/intel_display.c | 111 +++++++++++++++++++++++++++++-----
14 include/drm/drm_pciids.h | 2 +
15 4 files changed, 109 insertions(+), 18 deletions(-)
16
17diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
18index 0e27854..36d6bc3 100644
19--- a/drivers/gpu/drm/i915/i915_drv.h
20+++ b/drivers/gpu/drm/i915/i915_drv.h
21@@ -787,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
22 (dev)->pci_device == 0x2E22 || \
23 IS_GM45(dev))
24
25+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
26+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
27+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
28+
29 #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
30 (dev)->pci_device == 0x29B2 || \
31- (dev)->pci_device == 0x29D2)
32+ (dev)->pci_device == 0x29D2 || \
33+ (IS_IGD(dev)))
34
35 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
36 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
37
38 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
39- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
40+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
41+ IS_IGD(dev))
42
43 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
44 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
45diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
46index 9d6539a..f07d315 100644
47--- a/drivers/gpu/drm/i915/i915_reg.h
48+++ b/drivers/gpu/drm/i915/i915_reg.h
49@@ -358,6 +358,7 @@
50 #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
51 #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
52 #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
53+#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
54
55 #define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
56 #define I915_CRC_ERROR_ENABLE (1UL<<29)
57@@ -434,6 +435,7 @@
58 */
59 #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
60 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16
61+#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
62 /* i830, required in DVO non-gang */
63 #define PLL_P2_DIVIDE_BY_4 (1 << 23)
64 #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
65@@ -500,10 +502,12 @@
66 #define FPB0 0x06048
67 #define FPB1 0x0604c
68 #define FP_N_DIV_MASK 0x003f0000
69+#define FP_N_IGD_DIV_MASK 0x00ff0000
70 #define FP_N_DIV_SHIFT 16
71 #define FP_M1_DIV_MASK 0x00003f00
72 #define FP_M1_DIV_SHIFT 8
73 #define FP_M2_DIV_MASK 0x0000003f
74+#define FP_M2_IGD_DIV_MASK 0x000000ff
75 #define FP_M2_DIV_SHIFT 0
76 #define DPLL_TEST 0x606c
77 #define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
78diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
79index a283427..1702564 100644
80--- a/drivers/gpu/drm/i915/intel_display.c
81+++ b/drivers/gpu/drm/i915/intel_display.c
82@@ -90,18 +90,32 @@ typedef struct {
83 #define I9XX_DOT_MAX 400000
84 #define I9XX_VCO_MIN 1400000
85 #define I9XX_VCO_MAX 2800000
86+#define IGD_VCO_MIN 1700000
87+#define IGD_VCO_MAX 3500000
88 #define I9XX_N_MIN 1
89 #define I9XX_N_MAX 6
90+/* IGD's Ncounter is a ring counter */
91+#define IGD_N_MIN 3
92+#define IGD_N_MAX 6
93 #define I9XX_M_MIN 70
94 #define I9XX_M_MAX 120
95+#define IGD_M_MIN 2
96+#define IGD_M_MAX 256
97 #define I9XX_M1_MIN 10
98 #define I9XX_M1_MAX 22
99 #define I9XX_M2_MIN 5
100 #define I9XX_M2_MAX 9
101+/* IGD M1 is reserved, and must be 0 */
102+#define IGD_M1_MIN 0
103+#define IGD_M1_MAX 0
104+#define IGD_M2_MIN 0
105+#define IGD_M2_MAX 254
106 #define I9XX_P_SDVO_DAC_MIN 5
107 #define I9XX_P_SDVO_DAC_MAX 80
108 #define I9XX_P_LVDS_MIN 7
109 #define I9XX_P_LVDS_MAX 98
110+#define IGD_P_LVDS_MIN 7
111+#define IGD_P_LVDS_MAX 112
112 #define I9XX_P1_MIN 1
113 #define I9XX_P1_MAX 8
114 #define I9XX_P2_SDVO_DAC_SLOW 10
115@@ -115,6 +129,8 @@ typedef struct {
116 #define INTEL_LIMIT_I8XX_LVDS 1
117 #define INTEL_LIMIT_I9XX_SDVO_DAC 2
118 #define INTEL_LIMIT_I9XX_LVDS 3
119+#define INTEL_LIMIT_IGD_SDVO_DAC 4
120+#define INTEL_LIMIT_IGD_LVDS 5
121
122 static const intel_limit_t intel_limits[] = {
123 { /* INTEL_LIMIT_I8XX_DVO_DAC */
124@@ -168,6 +184,32 @@ static const intel_limit_t intel_limits[] = {
125 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
126 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
127 },
128+ { /* INTEL_LIMIT_IGD_SDVO */
129+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
130+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
131+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
132+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
133+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
134+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
135+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
136+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
137+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
138+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
139+ },
140+ { /* INTEL_LIMIT_IGD_LVDS */
141+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
142+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
143+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
144+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
145+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
146+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
147+ .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
148+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
149+ /* IGD only supports single-channel mode. */
150+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
151+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
152+ },
153+
154 };
155
156 static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
157@@ -175,11 +217,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
158 struct drm_device *dev = crtc->dev;
159 const intel_limit_t *limit;
160
161- if (IS_I9XX(dev)) {
162+ if (IS_I9XX(dev) && !IS_IGD(dev)) {
163 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
164 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
165 else
166 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
167+ } else if (IS_IGD(dev)) {
168+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
169+ limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
170+ else
171+ limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
172 } else {
173 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
174 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
175@@ -189,8 +236,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
176 return limit;
177 }
178
179-static void intel_clock(int refclk, intel_clock_t *clock)
180+/* m1 is reserved as 0 in IGD, n is a ring counter */
181+static void igd_clock(int refclk, intel_clock_t *clock)
182 {
183+ clock->m = clock->m2 + 2;
184+ clock->p = clock->p1 * clock->p2;
185+ clock->vco = refclk * clock->m / clock->n;
186+ clock->dot = clock->vco / clock->p;
187+}
188+
189+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
190+{
191+ if (IS_IGD(dev)) {
192+ igd_clock(refclk, clock);
193+ return;
194+ }
195 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
196 clock->p = clock->p1 * clock->p2;
197 clock->vco = refclk * clock->m / (clock->n + 2);
198@@ -226,6 +286,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
199 static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
200 {
201 const intel_limit_t *limit = intel_limit (crtc);
202+ struct drm_device *dev = crtc->dev;
203
204 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
205 INTELPllInvalid ("p1 out of range\n");
206@@ -235,7 +296,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
207 INTELPllInvalid ("m2 out of range\n");
208 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
209 INTELPllInvalid ("m1 out of range\n");
210- if (clock->m1 <= clock->m2)
211+ if (clock->m1 <= clock->m2 && !IS_IGD(dev))
212 INTELPllInvalid ("m1 <= m2\n");
213 if (clock->m < limit->m.min || limit->m.max < clock->m)
214 INTELPllInvalid ("m out of range\n");
215@@ -289,15 +350,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
216 memset (best_clock, 0, sizeof (*best_clock));
217
218 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
219- for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
220- clock.m2 <= limit->m2.max; clock.m2++) {
221+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
222+ /* m1 is always 0 in IGD */
223+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
224+ break;
225 for (clock.n = limit->n.min; clock.n <= limit->n.max;
226 clock.n++) {
227 for (clock.p1 = limit->p1.min;
228 clock.p1 <= limit->p1.max; clock.p1++) {
229 int this_err;
230
231- intel_clock(refclk, &clock);
232+ intel_clock(dev, refclk, &clock);
233
234 if (!intel_PLL_is_valid(crtc, &clock))
235 continue;
236@@ -634,7 +697,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
237 return 400000;
238 else if (IS_I915G(dev))
239 return 333000;
240- else if (IS_I945GM(dev) || IS_845G(dev))
241+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
242 return 200000;
243 else if (IS_I915GM(dev)) {
244 u16 gcfgc = 0;
245@@ -782,7 +845,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
246 return -EINVAL;
247 }
248
249- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
250+ if (IS_IGD(dev))
251+ fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
252+ else
253+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
254
255 dpll = DPLL_VGA_MODE_DIS;
256 if (IS_I9XX(dev)) {
257@@ -799,7 +865,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
258 }
259
260 /* compute bitmask from p1 value */
261- dpll |= (1 << (clock.p1 - 1)) << 16;
262+ if (IS_IGD(dev))
263+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
264+ else
265+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
266 switch (clock.p2) {
267 case 5:
268 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
269@@ -1279,10 +1348,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
270 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
271
272 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
273- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
274- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
275+ if (IS_IGD(dev)) {
276+ clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
277+ clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
278+ } else {
279+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
280+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
281+ }
282+
283 if (IS_I9XX(dev)) {
284- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
285+ if (IS_IGD(dev))
286+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
287+ DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
288+ else
289+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
290 DPLL_FPA01_P1_POST_DIV_SHIFT);
291
292 switch (dpll & DPLL_MODE_MASK) {
293@@ -1301,7 +1380,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
294 }
295
296 /* XXX: Handle the 100Mhz refclk */
297- intel_clock(96000, &clock);
298+ intel_clock(dev, 96000, &clock);
299 } else {
300 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
301
302@@ -1313,9 +1392,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
303 if ((dpll & PLL_REF_INPUT_MASK) ==
304 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
305 /* XXX: might not be 66MHz */
306- intel_clock(66000, &clock);
307+ intel_clock(dev, 66000, &clock);
308 } else
309- intel_clock(48000, &clock);
310+ intel_clock(dev, 48000, &clock);
311 } else {
312 if (dpll & PLL_P1_DIVIDE_BY_TWO)
313 clock.p1 = 2;
314@@ -1328,7 +1407,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
315 else
316 clock.p2 = 2;
317
318- intel_clock(48000, &clock);
319+ intel_clock(dev, 48000, &clock);
320 }
321 }
322
323diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
324index 5165f24..76c4c82 100644
325--- a/include/drm/drm_pciids.h
326+++ b/include/drm/drm_pciids.h
327@@ -418,4 +418,6 @@
328 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
329 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
330 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
331+ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
332+ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
333 {0, 0, 0}
334--
3351.6.1.3
336
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch
deleted file mode 100644
index c16350f9fd..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch
+++ /dev/null
@@ -1,21 +0,0 @@
1IGD device only has last 1 page used by GTT. this should align to AGP gart code.
2
3Signed-off-by: Shaohua Li <shaohua.li@intel.com>
4---
5 drivers/gpu/drm/i915/i915_dma.c | 2 +-
6 1 file changed, 1 insertion(+), 1 deletion(-)
7
8Index: linux/drivers/gpu/drm/i915/i915_dma.c
9===================================================================
10--- linux.orig/drivers/gpu/drm/i915/i915_dma.c 2009-03-13 15:36:12.000000000 +0800
11+++ linux/drivers/gpu/drm/i915/i915_dma.c 2009-03-13 15:37:26.000000000 +0800
12@@ -880,7 +880,7 @@ static int i915_probe_agp(struct drm_dev
13 * Some of the preallocated space is taken by the GTT
14 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
15 */
16- if (IS_G4X(dev))
17+ if (IS_G4X(dev) || IS_IGD(dev))
18 overhead = 4096;
19 else
20 overhead = (*aperture_size / 1024) + 4096;
21
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch
deleted file mode 100644
index 00a6cf481f..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch
+++ /dev/null
@@ -1,38 +0,0 @@
1In IGD, DPCUNIT_CLOCK_GATE_DISABLE bit should be set, otherwise i2c
2access will be wrong.
3
4Signed-off-by: Shaohua Li <shaohua.li@intel.com>
5---
6 drivers/gpu/drm/i915/i915_reg.h | 1 +
7 drivers/gpu/drm/i915/intel_display.c | 5 +++++
8 2 files changed, 6 insertions(+)
9
10Index: linux/drivers/gpu/drm/i915/i915_reg.h
11===================================================================
12--- linux.orig/drivers/gpu/drm/i915/i915_reg.h 2009-03-16 14:18:27.000000000 +0800
13+++ linux/drivers/gpu/drm/i915/i915_reg.h 2009-03-16 14:28:09.000000000 +0800
14@@ -523,6 +523,7 @@
15 #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
16 #define D_STATE 0x6104
17 #define CG_2D_DIS 0x6200
18+#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
19 #define CG_3D_DIS 0x6204
20
21 /*
22Index: linux/drivers/gpu/drm/i915/intel_display.c
23===================================================================
24--- linux.orig/drivers/gpu/drm/i915/intel_display.c 2009-03-16 14:16:11.000000000 +0800
25+++ linux/drivers/gpu/drm/i915/intel_display.c 2009-03-16 14:27:46.000000000 +0800
26@@ -1545,6 +1545,11 @@ static void intel_setup_outputs(struct d
27 struct drm_i915_private *dev_priv = dev->dev_private;
28 struct drm_connector *connector;
29
30+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
31+ if (IS_IGD(dev))
32+ I915_WRITE(CG_2D_DIS,
33+ I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE);
34+
35 intel_crt_init(dev);
36
37 /* Set up integrated LVDS */
38
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch
deleted file mode 100644
index 1003765535..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch
+++ /dev/null
@@ -1,28 +0,0 @@
1diff --git a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
2index 67934c0..8fc5221 100644
3--- a/drivers/gpu/drm/psb/psb_fb.c
4+++ b/drivers/gpu/drm/psb/psb_fb.c
5@@ -896,8 +896,10 @@ static int psbfb_kms_off(struct drm_device *dev, int suspend)
6 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
7 struct fb_info *info = fb->fbdev;
8
9- if (suspend)
10+ if (suspend) {
11 fb_set_suspend(info, 1);
12+ psbfb_blank(FB_BLANK_POWERDOWN, info);
13+ }
14 }
15 mutex_unlock(&dev->mode_config.mutex);
16
17@@ -928,8 +930,10 @@ static int psbfb_kms_on(struct drm_device *dev, int resume)
18 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
19 struct fb_info *info = fb->fbdev;
20
21- if (resume)
22+ if (resume) {
23 fb_set_suspend(info, 0);
24+ psbfb_blank(FB_BLANK_UNBLANK, info);
25+ }
26
27 }
28 mutex_unlock(&dev->mode_config.mutex);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch
deleted file mode 100644
index 4ffda75e15..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch
+++ /dev/null
@@ -1,37524 +0,0 @@
1diff -uNr a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
2--- a/drivers/gpu/drm/Kconfig 2009-03-23 15:12:14.000000000 -0800
3+++ b/drivers/gpu/drm/Kconfig 2009-04-07 13:28:38.000000000 -0700
4@@ -122,3 +122,14 @@
5 help
6 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
7 chipset. If M is selected the module will be called savage.
8+
9+config DRM_PSB
10+ tristate "Intel Poulsbo/Moorestown"
11+ depends on DRM && PCI
12+ select FB_CFB_COPYAREA
13+ select FB_CFB_FILLRECT
14+ select FB_CFB_IMAGEBLIT
15+ help
16+ Choose this option if you have a Poulsbo or Moorestown platform.
17+ If M is selected the module will be called psb.
18+
19diff -uNr a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
20--- a/drivers/gpu/drm/Makefile 2009-03-23 15:12:14.000000000 -0800
21+++ b/drivers/gpu/drm/Makefile 2009-04-07 13:28:38.000000000 -0700
22@@ -25,4 +25,5 @@
23 obj-$(CONFIG_DRM_SIS) += sis/
24 obj-$(CONFIG_DRM_SAVAGE)+= savage/
25 obj-$(CONFIG_DRM_VIA) +=via/
26+obj-$(CONFIG_DRM_PSB) +=psb/
27
28diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c
29--- a/drivers/gpu/drm/psb/lnc_topaz.c 1969-12-31 16:00:00.000000000 -0800
30+++ b/drivers/gpu/drm/psb/lnc_topaz.c 2009-04-07 13:28:38.000000000 -0700
31@@ -0,0 +1,695 @@
32+/**
33+ * file lnc_topaz.c
34+ * TOPAZ I/O operations and IRQ handling
35+ *
36+ */
37+
38+/**************************************************************************
39+ *
40+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
41+ * Copyright (c) Imagination Technologies Limited, UK
42+ * All Rights Reserved.
43+ *
44+ * Permission is hereby granted, free of charge, to any person obtaining a
45+ * copy of this software and associated documentation files (the
46+ * "Software"), to deal in the Software without restriction, including
47+ * without limitation the rights to use, copy, modify, merge, publish,
48+ * distribute, sub license, and/or sell copies of the Software, and to
49+ * permit persons to whom the Software is furnished to do so, subject to
50+ * the following conditions:
51+ *
52+ * The above copyright notice and this permission notice (including the
53+ * next paragraph) shall be included in all copies or substantial portions
54+ * of the Software.
55+ *
56+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
57+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
58+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
59+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
60+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
61+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
62+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
63+ *
64+ **************************************************************************/
65+
66+/* include headers */
67+/* #define DRM_DEBUG_CODE 2 */
68+
69+#include <drm/drmP.h>
70+#include <drm/drm_os_linux.h>
71+
72+#include "psb_drv.h"
73+#include "psb_drm.h"
74+#include "lnc_topaz.h"
75+
76+#include <linux/io.h>
77+#include <linux/delay.h>
78+
79+static int drm_psb_ospmxxx = 0x0;
80+
81+/* static function define */
82+static int lnc_topaz_deliver_command(struct drm_device *dev,
83+ struct ttm_buffer_object *cmd_buffer,
84+ unsigned long cmd_offset,
85+ unsigned long cmd_size,
86+ void **topaz_cmd, uint32_t sequence,
87+ int copy_cmd);
88+static int lnc_topaz_send(struct drm_device *dev, void *cmd,
89+ unsigned long cmd_size, uint32_t sync_seq);
90+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
91+static int lnc_topaz_dequeue_send(struct drm_device *dev);
92+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
93+ unsigned long cmd_size, uint32_t sequence);
94+
95+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat)
96+{
97+ struct drm_psb_private *dev_priv =
98+ (struct drm_psb_private *)dev->dev_private;
99+ uint32_t clr_flag = lnc_topaz_queryirq(dev);
100+
101+ lnc_topaz_clearirq(dev, clr_flag);
102+
103+ /* ignore non-SYNC interrupts */
104+ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
105+ return;
106+
107+ dev_priv->topaz_current_sequence =
108+ *(uint32_t *)dev_priv->topaz_sync_addr;
109+
110+ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
111+ dev_priv->topaz_current_sequence,
112+ dev_priv->sequence[LNC_ENGINE_ENCODE]);
113+
114+ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
115+
116+ dev_priv->topaz_busy = 1;
117+ lnc_topaz_dequeue_send(dev);
118+}
119+
120+static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
121+ struct ttm_buffer_object *cmd_buffer,
122+ unsigned long cmd_offset, unsigned long cmd_size,
123+ struct ttm_fence_object *fence)
124+{
125+ struct drm_psb_private *dev_priv = dev->dev_private;
126+ unsigned long irq_flags;
127+ int ret = 0;
128+ void *cmd;
129+ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
130+
131+ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
132+
133+ /* # lock topaz's mutex [msvdx_mutex] */
134+ mutex_lock(&dev_priv->topaz_mutex);
135+
136+ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", dev_priv->topaz_busy);
137+
138+ if (dev_priv->topaz_fw_loaded == 0) {
139+ /* #.# load fw to driver */
140+ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
141+ ret = topaz_init_fw(dev);
142+ if (ret != 0) {
143+ mutex_unlock(&dev_priv->topaz_mutex);
144+
145+ /* FIXME: find a proper return value */
146+ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
147+ "ensure udevd is configured correctly!\n");
148+
149+ return -EFAULT;
150+ }
151+ dev_priv->topaz_fw_loaded = 1;
152+ } else {
153+ /* OSPM power state change */
154+ /* FIXME: why here? why not in the NEW_CODEC case? */
155+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) {
156+ psb_power_up_topaz(dev);
157+ lnc_topaz_restore_mtx_state(dev);
158+ }
159+ }
160+
161+ /* # schedule watchdog */
162+ /* psb_schedule_watchdog(dev_priv); */
163+
164+ /* # spin lock irq save [msvdx_lock] */
165+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
166+
167+ /* # if topaz need to reset, reset it */
168+ if (dev_priv->topaz_needs_reset) {
169+ /* #.# reset it */
170+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
171+ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
172+
173+ if (lnc_topaz_reset(dev_priv)) {
174+ mutex_unlock(&dev_priv->topaz_mutex);
175+ ret = -EBUSY;
176+ DRM_ERROR("TOPAZ: reset failed.\n");
177+ return ret;
178+ }
179+
180+ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
181+
182+ /* #.# reset any related flags */
183+ dev_priv->topaz_needs_reset = 0;
184+ dev_priv->topaz_busy = 0;
185+ PSB_DEBUG_GENERAL("XXX: does we need idle flag??\n");
186+ dev_priv->topaz_start_idle = 0;
187+
188+ /* #.# init topaz */
189+ lnc_topaz_init(dev);
190+
191+ /* avoid another fw init */
192+ dev_priv->topaz_fw_loaded = 1;
193+
194+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
195+ }
196+
197+ if (!dev_priv->topaz_busy) {
198+ /* # direct map topaz command if topaz is free */
199+ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
200+ sequence);
201+
202+ dev_priv->topaz_busy = 1;
203+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
204+
205+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
206+ cmd_size, NULL, sequence, 0);
207+
208+ if (ret) {
209+ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
210+ mutex_unlock(&dev_priv->topaz_mutex);
211+ return ret;
212+ }
213+ } else {
214+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
215+ sequence);
216+ cmd = NULL;
217+
218+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
219+
220+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
221+ cmd_size, &cmd, sequence, 1);
222+ if (cmd == NULL || ret) {
223+ DRM_ERROR("TOPAZ: map command for save fialed\n");
224+ mutex_unlock(&dev_priv->topaz_mutex);
225+ return ret;
226+ }
227+
228+ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
229+ if (ret)
230+ DRM_ERROR("TOPAZ: save command failed\n");
231+ }
232+
233+ /* OPSM D0IX power state change */
234+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
235+ lnc_topaz_save_mtx_state(dev);
236+
237+ mutex_unlock(&dev_priv->topaz_mutex);
238+
239+ return ret;
240+}
241+
242+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
243+ unsigned long cmd_size, uint32_t sequence)
244+{
245+ struct drm_psb_private *dev_priv = dev->dev_private;
246+ struct lnc_topaz_cmd_queue *topaz_cmd;
247+ unsigned long irq_flags;
248+
249+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
250+ sequence);
251+
252+ topaz_cmd = drm_calloc(1, sizeof(struct lnc_topaz_cmd_queue),
253+ DRM_MEM_DRIVER);
254+ if (topaz_cmd == NULL) {
255+ mutex_unlock(&dev_priv->topaz_mutex);
256+ DRM_ERROR("TOPAZ: out of memory....\n");
257+ return -ENOMEM;
258+ }
259+
260+ topaz_cmd->cmd = cmd;
261+ topaz_cmd->cmd_size = cmd_size;
262+ topaz_cmd->sequence = sequence;
263+
264+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
265+ list_add_tail(&topaz_cmd->head, &dev_priv->topaz_queue);
266+ if (!dev_priv->topaz_busy) {
267+ /* dev_priv->topaz_busy = 1; */
268+ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
269+ lnc_topaz_dequeue_send(dev);
270+ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
271+ }
272+
273+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
274+
275+ return 0;
276+}
277+
278+
279+int lnc_cmdbuf_video(struct drm_file *priv,
280+ struct list_head *validate_list,
281+ uint32_t fence_type,
282+ struct drm_psb_cmdbuf_arg *arg,
283+ struct ttm_buffer_object *cmd_buffer,
284+ struct psb_ttm_fence_rep *fence_arg)
285+{
286+ struct drm_device *dev = priv->minor->dev;
287+ struct ttm_fence_object *fence = NULL;
288+ int ret;
289+
290+ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
291+ arg->cmdbuf_size, fence);
292+ if (ret)
293+ return ret;
294+
295+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
296+ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
297+ validate_list, fence_arg, &fence);
298+
299+ if (fence)
300+ ttm_fence_object_unref(&fence);
301+#endif
302+
303+ mutex_lock(&cmd_buffer->mutex);
304+ if (cmd_buffer->sync_obj != NULL)
305+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
306+ mutex_unlock(&cmd_buffer->mutex);
307+
308+ return 0;
309+}
310+
311+static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
312+{
313+ struct drm_psb_private *dev_priv = dev->dev_private;
314+ uint32_t sync_cmd[3];
315+ int count = 10000;
316+#if 0
317+ struct ttm_fence_device *fdev = &dev_priv->fdev;
318+ struct ttm_fence_class_manager *fc =
319+ &fdev->fence_class[LNC_ENGINE_ENCODE];
320+ unsigned long irq_flags;
321+#endif
322+ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr;
323+
324+ /* insert a SYNC command here */
325+ dev_priv->topaz_sync_cmd_seq = (1 << 15) | dev_priv->topaz_cmd_seq++;
326+ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) |
327+ (dev_priv->topaz_sync_cmd_seq << 16);
328+ sync_cmd[1] = dev_priv->topaz_sync_offset;
329+ sync_cmd[2] = sync_seq;
330+
331+ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
332+ "sync_seq (0x%08x)\n",
333+ dev_priv->topaz_sync_cmd_seq, sync_seq);
334+
335+ lnc_mtx_send(dev_priv, sync_cmd);
336+
337+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
338+ /* # poll topaz register for certain times */
339+ while (count && *sync_p != sync_seq) {
340+ DRM_UDELAY(100);
341+ --count;
342+ }
343+ if ((count == 0) && (*sync_p != sync_seq)) {
344+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
345+ sync_seq, *sync_p);
346+ return -EBUSY;
347+ }
348+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
349+
350+ dev_priv->topaz_busy = 0;
351+
352+ /* XXX: check psb_fence_handler is suitable for topaz */
353+ dev_priv->topaz_current_sequence = *sync_p;
354+#if 0
355+ write_lock_irqsave(&fc->lock, irq_flags);
356+ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
357+ dev_priv->topaz_current_sequence,
358+ _PSB_FENCE_TYPE_EXE, 0);
359+ write_unlock_irqrestore(&fc->lock, irq_flags);
360+#endif
361+#endif
362+ return 0;
363+}
364+
365+int
366+lnc_topaz_deliver_command(struct drm_device *dev,
367+ struct ttm_buffer_object *cmd_buffer,
368+ unsigned long cmd_offset, unsigned long cmd_size,
369+ void **topaz_cmd, uint32_t sequence,
370+ int copy_cmd)
371+{
372+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
373+ struct ttm_bo_kmap_obj cmd_kmap;
374+ bool is_iomem;
375+ int ret;
376+ unsigned char *cmd_start, *tmp;
377+
378+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
379+ &cmd_kmap);
380+ if (ret) {
381+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
382+ return ret;
383+ }
384+ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
385+ &is_iomem) + cmd_page_offset;
386+
387+ if (copy_cmd) {
388+ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
389+ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER);
390+ if (tmp == NULL) {
391+ ret = -ENOMEM;
392+ goto out;
393+ }
394+ memcpy(tmp, cmd_start, cmd_size);
395+ *topaz_cmd = tmp;
396+ } else {
397+ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
398+ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
399+ if (ret) {
400+ DRM_ERROR("TOPAZ: commit commands failed.\n");
401+ ret = -EINVAL;
402+ }
403+ }
404+
405+out:
406+ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
407+ cmd_size, sequence, copy_cmd);
408+
409+ ttm_bo_kunmap(&cmd_kmap);
410+
411+ return ret;
412+}
413+
414+int
415+lnc_topaz_send(struct drm_device *dev, void *cmd,
416+ unsigned long cmd_size, uint32_t sync_seq)
417+{
418+ struct drm_psb_private *dev_priv = dev->dev_private;
419+ int ret = 0;
420+ unsigned char *command = (unsigned char *) cmd;
421+ struct topaz_cmd_header *cur_cmd_header;
422+ uint32_t cur_cmd_size, cur_cmd_id;
423+ uint32_t codec;
424+
425+ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
426+
427+ while (cmd_size > 0) {
428+ cur_cmd_header = (struct topaz_cmd_header *) command;
429+ cur_cmd_size = cur_cmd_header->size * 4;
430+ cur_cmd_id = cur_cmd_header->id;
431+
432+ switch (cur_cmd_id) {
433+ case MTX_CMDID_SW_NEW_CODEC:
434+ codec = *((uint32_t *) cmd + 1);
435+
436+ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
437+ codec_to_string(codec), codec);
438+ if (topaz_setup_fw(dev, codec)) {
439+ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
440+ return -EBUSY;
441+ }
442+
443+ dev_priv->topaz_cur_codec = codec;
444+ break;
445+
446+ case MTX_CMDID_SW_ENTER_LOWPOWER:
447+ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
448+ PSB_DEBUG_GENERAL("XXX: implement it\n");
449+ break;
450+
451+ case MTX_CMDID_SW_LEAVE_LOWPOWER:
452+ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
453+ PSB_DEBUG_GENERAL("XXX: implement it\n");
454+ break;
455+
456+ /* ordinary commmand */
457+ case MTX_CMDID_START_PIC:
458+ /* XXX: specially handle START_PIC hw command */
459+ CCB_CTRL_SET_QP(dev_priv,
460+ *(command + cur_cmd_size - 4));
461+ /* strip the QP parameter (it's software arg) */
462+ cur_cmd_header->size--;
463+ default:
464+ cur_cmd_header->seq = 0x7fff &
465+ dev_priv->topaz_cmd_seq++;
466+
467+ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
468+ " seq (0x%04x)\n",
469+ cmd_to_string(cur_cmd_id),
470+ cur_cmd_size, cur_cmd_header->seq);
471+ ret = lnc_mtx_send(dev_priv, command);
472+ if (ret) {
473+ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
474+ goto out;
475+ }
476+ break;
477+ }
478+
479+ command += cur_cmd_size;
480+ cmd_size -= cur_cmd_size;
481+ }
482+ lnc_topaz_sync(dev, sync_seq);
483+out:
484+ return ret;
485+}
486+
487+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
488+{
489+ struct topaz_cmd_header *cur_cmd_header =
490+ (struct topaz_cmd_header *) cmd;
491+ uint32_t cmd_size = cur_cmd_header->size;
492+ uint32_t read_index, write_index;
493+ const uint32_t *cmd_pointer = (uint32_t *) cmd;
494+
495+ int ret = 0;
496+
497+ /* <msvdx does> # enable all clock */
498+
499+ write_index = dev_priv->topaz_cmd_windex;
500+ if (write_index + cmd_size + 1 > dev_priv->topaz_ccb_size) {
501+ int free_space = dev_priv->topaz_ccb_size - write_index;
502+
503+ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
504+ if (free_space > 0) {
505+ struct topaz_cmd_header pad_cmd;
506+
507+ pad_cmd.id = MTX_CMDID_NULL;
508+ pad_cmd.size = free_space;
509+ pad_cmd.seq = 0x7fff & dev_priv->topaz_cmd_seq++;
510+
511+ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
512+ " size(%d),seq (0x%04x)\n",
513+ pad_cmd.size, pad_cmd.seq);
514+
515+ TOPAZ_BEGIN_CCB(dev_priv);
516+ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
517+ TOPAZ_END_CCB(dev_priv, 1);
518+ }
519+ POLL_WB_RINDEX(dev_priv, 0);
520+ if (ret == 0)
521+ dev_priv->topaz_cmd_windex = 0;
522+ else {
523+ DRM_ERROR("TOPAZ: poll rindex timeout\n");
524+ return ret; /* HW may hang, need reset */
525+ }
526+ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
527+ }
528+
529+ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
530+ write_index = dev_priv->topaz_cmd_windex;
531+
532+ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
533+ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
534+ TOPAZ_BEGIN_CCB(dev_priv);
535+ while (cmd_size > 0) {
536+ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
537+ --cmd_size;
538+ }
539+ TOPAZ_END_CCB(dev_priv, 1);
540+
541+ POLL_WB_RINDEX(dev_priv, dev_priv->topaz_cmd_windex);
542+
543+#if 0
544+ DRM_UDELAY(1000);
545+ lnc_topaz_clearirq(dev,
546+ lnc_topaz_queryirq(dev));
547+ LNC_TRACEL("TOPAZ: after clear, query again\n");
548+ lnc_topaz_queryirq(dev_priv);
549+#endif
550+
551+ return ret;
552+}
553+
554+int lnc_topaz_dequeue_send(struct drm_device *dev)
555+{
556+ struct drm_psb_private *dev_priv = dev->dev_private;
557+ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
558+ int ret;
559+
560+ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
561+
562+ if (list_empty(&dev_priv->topaz_queue)) {
563+ dev_priv->topaz_busy = 0;
564+ return 0;
565+ }
566+
567+ topaz_cmd = list_first_entry(&dev_priv->topaz_queue,
568+ struct lnc_topaz_cmd_queue, head);
569+
570+ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
571+ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
572+ topaz_cmd->sequence);
573+ if (ret) {
574+ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
575+ ret = -EINVAL;
576+ }
577+
578+ list_del(&topaz_cmd->head);
579+ kfree(topaz_cmd->cmd);
580+ drm_free(topaz_cmd, sizeof(struct lnc_topaz_cmd_queue),
581+ DRM_MEM_DRIVER);
582+
583+ return ret;
584+}
585+
586+void
587+lnc_topaz_lockup(struct drm_psb_private *dev_priv,
588+ int *topaz_lockup, int *topaz_idle)
589+{
590+ unsigned long irq_flags;
591+ uint32_t tmp;
592+
593+ /* if have printk in this function, you will have plenties here */
594+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
595+ *topaz_lockup = 0;
596+ *topaz_idle = 1;
597+
598+ if (!dev_priv->has_topaz) {
599+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
600+ return;
601+ }
602+
603+ tmp = dev_priv->topaz_current_sequence
604+ - dev_priv->sequence[LNC_ENGINE_ENCODE];
605+ if (tmp > 0x0FFFFFFF) {
606+ if (dev_priv->topaz_current_sequence ==
607+ dev_priv->topaz_last_sequence) {
608+ *topaz_lockup = 1;
609+ } else {
610+ dev_priv->topaz_last_sequence =
611+ dev_priv->topaz_current_sequence;
612+ *topaz_idle = 0;
613+ }
614+
615+ if (dev_priv->topaz_start_idle)
616+ dev_priv->topaz_start_idle = 0;
617+ } else {
618+ if (dev_priv->topaz_needs_reset == 0) {
619+ if (dev_priv->topaz_start_idle &&
620+ (dev_priv->topaz_finished_sequence
621+ == dev_priv->topaz_current_sequence)) {
622+ if (time_after_eq(jiffies,
623+ dev_priv->topaz_idle_start_jiffies +
624+ TOPAZ_MAX_IDELTIME)) {
625+
626+ /* XXX: disable clock <msvdx does> */
627+ dev_priv->topaz_needs_reset = 1;
628+ } else
629+ *topaz_idle = 0;
630+ } else {
631+ dev_priv->topaz_start_idle = 1;
632+ dev_priv->topaz_idle_start_jiffies = jiffies;
633+ dev_priv->topaz_finished_sequence =
634+ dev_priv->topaz_current_sequence;
635+ *topaz_idle = 0;
636+ }
637+ }
638+ }
639+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
640+}
641+
642+
643+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
644+{
645+ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
646+ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
647+}
648+
649+/* power up msvdx, OSPM function */
650+int psb_power_up_topaz(struct drm_device *dev)
651+{
652+ struct drm_psb_private *dev_priv =
653+ (struct drm_psb_private *)dev->dev_private;
654+
655+ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWERON)
656+ return 0;
657+
658+ psb_up_island_power(dev, PSB_VIDEO_ENC_ISLAND);
659+
660+ PSB_DEBUG_GENERAL("FIXME: how to write clock state for topaz?"
661+ " so many clock\n");
662+ /* PSB_WMSVDX32(dev_priv->topaz_clk_state, MSVDX_MAN_CLK_ENABLE); */
663+
664+ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n");
665+
666+ PSB_DEBUG_GENERAL("FIXME: flush all mmu\n");
667+
668+ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON;
669+
670+ return 0;
671+}
672+
673+int psb_power_down_topaz(struct drm_device *dev)
674+{
675+ struct drm_psb_private *dev_priv =
676+ (struct drm_psb_private *)dev->dev_private;
677+
678+ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWEROFF)
679+ return 0;
680+
681+ if (dev_priv->topaz_busy) {
682+ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n");
683+ return -EBUSY;
684+ }
685+ PSB_DEBUG_GENERAL("FIXME: how to read clock state for topaz?"
686+ " so many clock\n");
687+ /* dev_priv->topaz_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE); */
688+ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n");
689+ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n");
690+
691+ psb_down_island_power(dev, PSB_VIDEO_ENC_ISLAND);
692+
693+ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF;
694+
695+ return 0;
696+}
697+
698+int lnc_prepare_topaz_suspend(struct drm_device *dev)
699+{
700+ /* FIXME: need reset when resume?
701+ * Is mtx restore enough for encoder continue run? */
702+ /* dev_priv->topaz_needs_reset = 1; */
703+
704+ /* make sure all IRQs are seviced */
705+
706+ /* make sure all the fence is signaled */
707+
708+ /* save mtx context into somewhere */
709+ /* lnc_topaz_save_mtx_state(dev); */
710+
711+ return 0;
712+}
713+
714+int lnc_prepare_topaz_resume(struct drm_device *dev)
715+{
716+ /* FIXME: need reset when resume?
717+ * Is mtx restore enough for encoder continue run? */
718+ /* dev_priv->topaz_needs_reset = 1; */
719+
720+ /* make sure IRQ is open */
721+
722+ /* restore mtx context */
723+ /* lnc_topaz_restore_mtx_state(dev); */
724+
725+ return 0;
726+}
727diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h
728--- a/drivers/gpu/drm/psb/lnc_topaz.h 1969-12-31 16:00:00.000000000 -0800
729+++ b/drivers/gpu/drm/psb/lnc_topaz.h 2009-04-07 13:28:38.000000000 -0700
730@@ -0,0 +1,803 @@
731+/**************************************************************************
732+ *
733+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
734+ * Copyright (c) Imagination Technologies Limited, UK
735+ * All Rights Reserved.
736+ *
737+ * Permission is hereby granted, free of charge, to any person obtaining a
738+ * copy of this software and associated documentation files (the
739+ * "Software"), to deal in the Software without restriction, including
740+ * without limitation the rights to use, copy, modify, merge, publish,
741+ * distribute, sub license, and/or sell copies of the Software, and to
742+ * permit persons to whom the Software is furnished to do so, subject to
743+ * the following conditions:
744+ *
745+ * The above copyright notice and this permission notice (including the
746+ * next paragraph) shall be included in all copies or substantial portions
747+ * of the Software.
748+ *
749+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
750+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
751+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
752+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
753+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
754+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
755+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
756+ *
757+ **************************************************************************/
758+
759+#ifndef _LNC_TOPAZ_H_
760+#define _LNC_TOPAZ_H_
761+
762+#include "psb_drv.h"
763+
764+#define LNC_TOPAZ_NO_IRQ 1
765+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
766+#define ENABLE_TOPAZ_OSPM_D0IX (0x10)
767+
768+/* extern int drm_psb_ospm; */
769+
770+int psb_power_up_topaz(struct drm_device *dev);
771+int psb_power_down_topaz(struct drm_device *dev);
772+int lnc_prepare_topaz_suspend(struct drm_device *dev);
773+int lnc_prepare_topaz_resume(struct drm_device *dev);
774+
775+/*
776+ * MACROS to insert values into fields within a word. The basename of the
777+ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
778+ */
779+#define MM_WRITE32(base, offset, value) \
780+do { \
781+ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
782+ + base + offset)) = value; \
783+} while (0)
784+
785+#define MM_READ32(base, offset, pointer) \
786+do { \
787+ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
788+ + base + offset)); \
789+} while (0)
790+
791+#define F_MASK(basename) (MASK_##basename)
792+#define F_SHIFT(basename) (SHIFT_##basename)
793+
794+#define F_ENCODE(val, basename) \
795+ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
796+
797+/* MVEA macro */
798+#define MVEA_START 0x03000
799+
800+#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
801+#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
802+
803+#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
804+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
805+#define F_ENCODE_MVEA(val, basename) \
806+ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
807+
808+/* VLC macro */
809+#define TOPAZ_VLC_START 0x05000
810+
811+/* TOPAZ macro */
812+#define TOPAZ_START 0x02000
813+
814+#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
815+#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
816+
817+#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
818+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
819+#define F_ENCODE_TOPAZ(val,basename) \
820+ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
821+
822+/* MTX macro */
823+#define MTX_START 0x0
824+
825+#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
826+#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
827+
828+/* DMAC macro */
829+#define DMAC_START 0x0f000
830+
831+#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
832+#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
833+
834+#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
835+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
836+#define F_ENCODE_DMAC(val,basename) \
837+ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
838+
839+
840+/* Register CR_IMG_TOPAZ_INTENAB */
841+#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
842+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
843+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
844+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
845+
846+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
847+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
848+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
849+
850+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
851+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
852+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
853+
854+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
855+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
856+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
857+
858+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
859+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
860+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
861+
862+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
863+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
864+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
865+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
866+
867+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
868+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
869+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
870+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
871+
872+#define MTX_CCBCTRL_ROFF 0
873+#define MTX_CCBCTRL_COMPLETE 4
874+#define MTX_CCBCTRL_CCBSIZE 8
875+#define MTX_CCBCTRL_QP 12
876+#define MTX_CCBCTRL_INITQP 24
877+
878+#define TOPAZ_CR_MMU_STATUS 0x001C
879+#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
880+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
881+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
882+
883+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
884+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
885+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
886+
887+#define TOPAZ_CR_MMU_MEM_REQ 0x0020
888+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
889+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
890+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
891+
892+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
893+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
894+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
895+
896+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
897+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
898+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
899+
900+#define MTX_CR_MTX_KICK 0x0080
901+#define MASK_MTX_MTX_KICK 0x0000FFFF
902+#define SHIFT_MTX_MTX_KICK 0
903+#define REGNUM_MTX_MTX_KICK 0x0080
904+
905+#define MTX_DATA_MEM_BASE 0x82880000
906+
907+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
908+#define MASK_MTX_MTX_MCMR 0x00000001
909+#define SHIFT_MTX_MTX_MCMR 0
910+#define REGNUM_MTX_MTX_MCMR 0x0108
911+
912+#define MASK_MTX_MTX_MCMID 0x0FF00000
913+#define SHIFT_MTX_MTX_MCMID 20
914+#define REGNUM_MTX_MTX_MCMID 0x0108
915+
916+#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
917+#define SHIFT_MTX_MTX_MCM_ADDR 2
918+#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
919+
920+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
921+#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
922+#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
923+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
924+
925+#define MASK_MTX_MTX_MCMAI 0x00000002
926+#define SHIFT_MTX_MTX_MCMAI 1
927+#define REGNUM_MTX_MTX_MCMAI 0x0108
928+
929+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
930+
931+#define MVEA_CR_IMG_MVEA_SRST 0x0000
932+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
933+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
934+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
935+
936+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
937+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
938+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
939+
940+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
941+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
942+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
943+
944+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
945+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
946+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
947+
948+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
949+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
950+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
951+
952+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
953+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
954+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
955+
956+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
957+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
958+
959+#define TOPAZ_MTX_PC (0x00000005)
960+#define PC_START_ADDRESS (0x80900000)
961+
962+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
963+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
964+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
965+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
966+
967+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
968+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
969+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
970+
971+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
972+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
973+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
974+
975+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
976+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
977+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
978+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
979+
980+#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
981+
982+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
983+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
984+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
985+
986+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
987+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
988+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
989+
990+#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
991+
992+#define TOPAZ_CR_MMU_CONTROL0 0x0024
993+#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
994+#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
995+#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
996+
997+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
998+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
999+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
1000+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
1001+
1002+#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
1003+#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
1004+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
1005+
1006+#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
1007+#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
1008+#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
1009+
1010+#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
1011+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
1012+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
1013+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
1014+
1015+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
1016+#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
1017+#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
1018+#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
1019+
1020+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
1021+#define TXRPT_WAITONKICK_VALUE 0x8ade0000
1022+
1023+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
1024+
1025+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
1026+#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
1027+
1028+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
1029+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
1030+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
1031+
1032+#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
1033+#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
1034+
1035+#define MTX_CR_MTX_SYSC_CDMAA 0x0344
1036+#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
1037+#define SHIFT_MTX_CDMAA_ADDRESS 2
1038+#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
1039+
1040+#define MTX_CR_MTX_SYSC_CDMAC 0x0340
1041+#define MASK_MTX_LENGTH 0x0000FFFF
1042+#define SHIFT_MTX_LENGTH 0
1043+#define REGNUM_MTX_LENGTH 0x0340
1044+
1045+#define MASK_MTX_BURSTSIZE 0x07000000
1046+#define SHIFT_MTX_BURSTSIZE 24
1047+#define REGNUM_MTX_BURSTSIZE 0x0340
1048+
1049+#define MASK_MTX_RNW 0x00020000
1050+#define SHIFT_MTX_RNW 17
1051+#define REGNUM_MTX_RNW 0x0340
1052+
1053+#define MASK_MTX_ENABLE 0x00010000
1054+#define SHIFT_MTX_ENABLE 16
1055+#define REGNUM_MTX_ENABLE 0x0340
1056+
1057+#define MASK_MTX_LENGTH 0x0000FFFF
1058+#define SHIFT_MTX_LENGTH 0
1059+#define REGNUM_MTX_LENGTH 0x0340
1060+
1061+#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
1062+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
1063+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
1064+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
1065+
1066+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
1067+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
1068+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
1069+
1070+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
1071+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
1072+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
1073+
1074+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
1075+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
1076+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
1077+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
1078+
1079+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
1080+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
1081+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
1082+
1083+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
1084+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
1085+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
1086+
1087+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
1088+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
1089+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
1090+
1091+#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
1092+#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
1093+#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
1094+#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
1095+
1096+#define MTX_CR_MTX_SYSC_CDMAT 0x0350
1097+#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
1098+#define SHIFT_MTX_TRANSFERDATA 0
1099+#define REGNUM_MTX_TRANSFERDATA 0x0350
1100+
1101+#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
1102+#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
1103+#define SHIFT_IMG_SOC_TRANSFER_FIN 17
1104+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
1105+
1106+#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
1107+#define MASK_IMG_SOC_CNT 0x0000FFFF
1108+#define SHIFT_IMG_SOC_CNT 0
1109+#define REGNUM_IMG_SOC_CNT 0x0004
1110+
1111+#define MASK_IMG_SOC_EN 0x00010000
1112+#define SHIFT_IMG_SOC_EN 16
1113+#define REGNUM_IMG_SOC_EN 0x0004
1114+
1115+#define MASK_IMG_SOC_LIST_EN 0x00040000
1116+#define SHIFT_IMG_SOC_LIST_EN 18
1117+#define REGNUM_IMG_SOC_LIST_EN 0x0004
1118+
1119+#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
1120+#define MASK_IMG_SOC_PER_HOLD 0x0000007F
1121+#define SHIFT_IMG_SOC_PER_HOLD 0
1122+#define REGNUM_IMG_SOC_PER_HOLD 0x0018
1123+
1124+#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
1125+#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
1126+#define SHIFT_IMG_SOC_START_ADDRESS 0
1127+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
1128+
1129+#define MASK_IMG_SOC_BSWAP 0x40000000
1130+#define SHIFT_IMG_SOC_BSWAP 30
1131+#define REGNUM_IMG_SOC_BSWAP 0x0004
1132+
1133+#define MASK_IMG_SOC_PW 0x18000000
1134+#define SHIFT_IMG_SOC_PW 27
1135+#define REGNUM_IMG_SOC_PW 0x0004
1136+
1137+#define MASK_IMG_SOC_DIR 0x04000000
1138+#define SHIFT_IMG_SOC_DIR 26
1139+#define REGNUM_IMG_SOC_DIR 0x0004
1140+
1141+#define MASK_IMG_SOC_PI 0x03000000
1142+#define SHIFT_IMG_SOC_PI 24
1143+#define REGNUM_IMG_SOC_PI 0x0004
1144+#define IMG_SOC_PI_1 0x00000002
1145+#define IMG_SOC_PI_2 0x00000001
1146+#define IMG_SOC_PI_4 0x00000000
1147+
1148+#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
1149+#define SHIFT_IMG_SOC_TRANSFER_IEN 29
1150+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
1151+
1152+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
1153+ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
1154+ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
1155+ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
1156+ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
1157+ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
1158+
1159+#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
1160+#define MASK_IMG_SOC_EXT_SA 0x0000000F
1161+#define SHIFT_IMG_SOC_EXT_SA 0
1162+#define REGNUM_IMG_SOC_EXT_SA 0x0008
1163+
1164+#define MASK_IMG_SOC_ACC_DEL 0xE0000000
1165+#define SHIFT_IMG_SOC_ACC_DEL 29
1166+#define REGNUM_IMG_SOC_ACC_DEL 0x0008
1167+
1168+#define MASK_IMG_SOC_INCR 0x08000000
1169+#define SHIFT_IMG_SOC_INCR 27
1170+#define REGNUM_IMG_SOC_INCR 0x0008
1171+
1172+#define MASK_IMG_SOC_BURST 0x07000000
1173+#define SHIFT_IMG_SOC_BURST 24
1174+#define REGNUM_IMG_SOC_BURST 0x0008
1175+
1176+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
1177+((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
1178+(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
1179+(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
1180+
1181+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
1182+#define MASK_IMG_SOC_ADDR 0x007FFFFF
1183+#define SHIFT_IMG_SOC_ADDR 0
1184+#define REGNUM_IMG_SOC_ADDR 0x0014
1185+
1186+/* **************** DMAC define **************** */
1187+enum DMAC_eBSwap {
1188+ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
1189+ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
1190+};
1191+
1192+enum DMAC_ePW {
1193+ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
1194+ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
1195+ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
1196+};
1197+
1198+enum DMAC_eAccDel {
1199+ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
1200+ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
1201+ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
1202+ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
1203+ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
1204+ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
1205+ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
1206+ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
1207+};
1208+
1209+enum DMAC_eBurst {
1210+ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
1211+ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
1212+ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
1213+ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
1214+ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
1215+ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
1216+ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
1217+ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
1218+};
1219+
1220+/* commands for topaz,shared with user space driver */
1221+enum drm_lnc_topaz_cmd {
1222+ MTX_CMDID_NULL = 0,
1223+ MTX_CMDID_DO_HEADER = 1,
1224+ MTX_CMDID_ENCODE_SLICE = 2,
1225+ MTX_CMDID_WRITEREG = 3,
1226+ MTX_CMDID_START_PIC = 4,
1227+ MTX_CMDID_END_PIC = 5,
1228+ MTX_CMDID_SYNC = 6,
1229+ MTX_CMDID_ENCODE_ONE_ROW = 7,
1230+ MTX_CMDID_FLUSH = 8,
1231+ MTX_CMDID_SW_LEAVE_LOWPOWER = 0xfc,
1232+ MTX_CMDID_SW_ENTER_LOWPOWER = 0xfe,
1233+ MTX_CMDID_SW_NEW_CODEC = 0xff
1234+};
1235+
1236+/* codecs topaz supports,shared with user space driver */
1237+enum drm_lnc_topaz_codec {
1238+ IMG_CODEC_JPEG = 0,
1239+ IMG_CODEC_H264_NO_RC,
1240+ IMG_CODEC_H264_VBR,
1241+ IMG_CODEC_H264_CBR,
1242+ IMG_CODEC_H263_NO_RC,
1243+ IMG_CODEC_H263_VBR,
1244+ IMG_CODEC_H263_CBR,
1245+ IMG_CODEC_MPEG4_NO_RC,
1246+ IMG_CODEC_MPEG4_VBR,
1247+ IMG_CODEC_MPEG4_CBR,
1248+ IMG_CODEC_NUM
1249+};
1250+
1251+/* XXX: it's a copy of msvdx cmd queue. should have some change? */
1252+struct lnc_topaz_cmd_queue {
1253+ struct list_head head;
1254+ void *cmd;
1255+ unsigned long cmd_size;
1256+ uint32_t sequence;
1257+};
1258+
1259+
1260+struct topaz_cmd_header {
1261+ union {
1262+ struct {
1263+ unsigned long id:8;
1264+ unsigned long size:8;
1265+ unsigned long seq:16;
1266+ };
1267+ uint32_t val;
1268+ };
1269+};
1270+
1271+/* external function declare */
1272+/* lnc_topazinit.c */
1273+int lnc_topaz_init(struct drm_device *dev);
1274+int lnc_topaz_uninit(struct drm_device *dev);
1275+int lnc_topaz_reset(struct drm_psb_private *dev_priv);
1276+int topaz_init_fw(struct drm_device *dev);
1277+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
1278+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
1279+ uint32_t addr, uint32_t value,
1280+ uint32_t enable);
1281+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
1282+ uint32_t byte_addr, uint32_t val);
1283+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
1284+ uint32_t byte_addr);
1285+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
1286+ uint32_t addr);
1287+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
1288+ uint32_t val);
1289+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
1290+int lnc_topaz_save_mtx_state(struct drm_device *dev);
1291+int lnc_topaz_restore_mtx_state(struct drm_device *dev);
1292+
1293+/* lnc_topaz.c */
1294+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat);
1295+
1296+int lnc_cmdbuf_video(struct drm_file *priv,
1297+ struct list_head *validate_list,
1298+ uint32_t fence_type,
1299+ struct drm_psb_cmdbuf_arg *arg,
1300+ struct ttm_buffer_object *cmd_buffer,
1301+ struct psb_ttm_fence_rep *fence_arg);
1302+
1303+void lnc_topaz_flush_cmd_queue(struct drm_device *dev);
1304+void lnc_topaz_lockup(struct drm_psb_private *dev_priv, int *topaz_lockup,
1305+ int *topaz_idle);
1306+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
1307+
1308+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
1309+
1310+/* macros to get/set CCB control data */
1311+#define WB_CCB_CTRL_RINDEX(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb))
1312+#define WB_CCB_CTRL_SEQ(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb+1))
1313+
1314+#define POLL_WB_RINDEX(dev_priv,value) \
1315+do { \
1316+ int i; \
1317+ for (i = 0; i < 10000; i++) { \
1318+ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
1319+ break; \
1320+ else \
1321+ DRM_UDELAY(100); \
1322+ } \
1323+ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
1324+ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
1325+ ret = -EBUSY; \
1326+ } \
1327+} while (0)
1328+
1329+#define POLL_WB_SEQ(dev_priv,value) \
1330+do { \
1331+ int i; \
1332+ for (i = 0; i < 10000; i++) { \
1333+ if (WB_CCB_CTRL_SEQ(dev_priv) == value) \
1334+ break; \
1335+ else \
1336+ DRM_UDELAY(1000); \
1337+ } \
1338+ if (WB_CCB_CTRL_SEQ(dev_priv) != value) { \
1339+ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%04x(mtx) vs 0x%04x\n",\
1340+ WB_CCB_CTRL_SEQ(dev_priv), value); \
1341+ ret = -EBUSY; \
1342+ } \
1343+} while (0)
1344+
1345+#define CCB_CTRL_RINDEX(dev_priv) \
1346+ topaz_read_mtx_mem(dev_priv, \
1347+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF)
1348+
1349+#define CCB_CTRL_RINDEX(dev_priv) \
1350+ topaz_read_mtx_mem(dev_priv, \
1351+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF)
1352+
1353+#define CCB_CTRL_QP(dev_priv) \
1354+ topaz_read_mtx_mem(dev_priv, \
1355+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP)
1356+
1357+#define CCB_CTRL_SEQ(dev_priv) \
1358+ topaz_read_mtx_mem(dev_priv, \
1359+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_COMPLETE)
1360+
1361+#define CCB_CTRL_FRAMESKIP(dev_priv) \
1362+ topaz_read_mtx_mem(dev_priv, \
1363+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_FRAMESKIP)
1364+
1365+#define CCB_CTRL_SET_QP(dev_priv, qp) \
1366+ topaz_write_mtx_mem(dev_priv, \
1367+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP, qp)
1368+
1369+#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
1370+ topaz_write_mtx_mem(dev_priv, \
1371+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP, qp)
1372+
1373+
1374+#define TOPAZ_BEGIN_CCB(dev_priv) \
1375+ topaz_write_mtx_mem_multiple_setup(dev_priv, \
1376+ dev_priv->topaz_ccb_buffer_addr + \
1377+ dev_priv->topaz_cmd_windex * 4)
1378+
1379+#define TOPAZ_OUT_CCB(dev_priv, cmd) \
1380+do { \
1381+ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
1382+ dev_priv->topaz_cmd_windex++; \
1383+} while (0)
1384+
1385+#define TOPAZ_END_CCB(dev_priv,kick_count) \
1386+ topaz_mtx_kick(dev_priv, 1);
1387+
1388+static inline char *cmd_to_string(int cmd_id)
1389+{
1390+ switch (cmd_id) {
1391+ case MTX_CMDID_START_PIC:
1392+ return "MTX_CMDID_START_PIC";
1393+ case MTX_CMDID_END_PIC:
1394+ return "MTX_CMDID_END_PIC";
1395+ case MTX_CMDID_DO_HEADER:
1396+ return "MTX_CMDID_DO_HEADER";
1397+ case MTX_CMDID_ENCODE_SLICE:
1398+ return "MTX_CMDID_ENCODE_SLICE";
1399+ case MTX_CMDID_SYNC:
1400+ return "MTX_CMDID_SYNC";
1401+
1402+ default:
1403+ return "Undefined command";
1404+
1405+ }
1406+}
1407+
1408+static inline char *codec_to_string(int codec)
1409+{
1410+ switch (codec) {
1411+ case IMG_CODEC_H264_NO_RC:
1412+ return "H264_NO_RC";
1413+ case IMG_CODEC_H264_VBR:
1414+ return "H264_VBR";
1415+ case IMG_CODEC_H264_CBR:
1416+ return "H264_CBR";
1417+ case IMG_CODEC_H263_NO_RC:
1418+ return "H263_NO_RC";
1419+ case IMG_CODEC_H263_VBR:
1420+ return "H263_VBR";
1421+ case IMG_CODEC_H263_CBR:
1422+ return "H263_CBR";
1423+ case IMG_CODEC_MPEG4_NO_RC:
1424+ return "MPEG4_NO_RC";
1425+ case IMG_CODEC_MPEG4_VBR:
1426+ return "MPEG4_VBR";
1427+ case IMG_CODEC_MPEG4_CBR:
1428+ return "MPEG4_CBR";
1429+ default:
1430+ return "Undefined codec";
1431+ }
1432+}
1433+
1434+static inline void lnc_topaz_enableirq(struct drm_device *dev)
1435+{
1436+ struct drm_psb_private *dev_priv = dev->dev_private;
1437+ uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG;
1438+
1439+ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
1440+
1441+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
1442+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
1443+ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
1444+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
1445+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
1446+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
1447+
1448+ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */
1449+}
1450+
1451+static inline void lnc_topaz_disableirq(struct drm_device *dev)
1452+{
1453+
1454+ struct drm_psb_private *dev_priv = dev->dev_private;
1455+ uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG);
1456+
1457+ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
1458+
1459+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
1460+ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */
1461+}
1462+
1463+static inline void lnc_topaz_clearirq(struct drm_device *dev,
1464+ uint32_t clear_topaz)
1465+{
1466+ struct drm_psb_private *dev_priv = dev->dev_private;
1467+
1468+ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
1469+ if (clear_topaz != 0)
1470+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
1471+
1472+ PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R);
1473+}
1474+
1475+static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
1476+{
1477+ struct drm_psb_private *dev_priv = dev->dev_private;
1478+ uint32_t val, iir, clear = 0;
1479+
1480+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
1481+ iir = PSB_RVDC32(PSB_INT_IDENTITY_R);
1482+
1483+ if ((val == 0) && (iir == 0)) {/* no interrupt */
1484+ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
1485+ return 0;
1486+ }
1487+
1488+ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x,IIR=0%08x\n", val, iir);
1489+
1490+ if (val & (1<<31))
1491+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
1492+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1493+ CCB_CTRL_SEQ(dev_priv),
1494+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1495+ *(uint32_t *)dev_priv->topaz_sync_addr);
1496+ else
1497+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
1498+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1499+ CCB_CTRL_SEQ(dev_priv),
1500+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1501+ *(uint32_t *)dev_priv->topaz_sync_addr);
1502+
1503+ if (val & 0x8) {
1504+ uint32_t mmu_status, mmu_req;
1505+
1506+ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
1507+ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
1508+
1509+ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
1510+ "address=0x%08x,mem req=0x%08x\n",
1511+ mmu_status, mmu_req);
1512+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
1513+ }
1514+
1515+ if (val & 0x4) {
1516+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
1517+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
1518+ }
1519+
1520+ if (val & 0x2) {
1521+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
1522+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
1523+ }
1524+
1525+ if (val & 0x1) {
1526+ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
1527+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
1528+ }
1529+
1530+ return clear;
1531+}
1532+
1533+#endif /* _LNC_TOPAZ_H_ */
1534diff -uNr a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c
1535--- a/drivers/gpu/drm/psb/lnc_topazinit.c 1969-12-31 16:00:00.000000000 -0800
1536+++ b/drivers/gpu/drm/psb/lnc_topazinit.c 2009-04-07 13:28:38.000000000 -0700
1537@@ -0,0 +1,1896 @@
1538+/**
1539+ * file lnc_topazinit.c
1540+ * TOPAZ initialization and mtx-firmware upload
1541+ *
1542+ */
1543+
1544+/**************************************************************************
1545+ *
1546+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
1547+ * Copyright (c) Imagination Technologies Limited, UK
1548+ * All Rights Reserved.
1549+ *
1550+ * Permission is hereby granted, free of charge, to any person obtaining a
1551+ * copy of this software and associated documentation files (the
1552+ * "Software"), to deal in the Software without restriction, including
1553+ * without limitation the rights to use, copy, modify, merge, publish,
1554+ * distribute, sub license, and/or sell copies of the Software, and to
1555+ * permit persons to whom the Software is furnished to do so, subject to
1556+ * the following conditions:
1557+ *
1558+ * The above copyright notice and this permission notice (including the
1559+ * next paragraph) shall be included in all copies or substantial portions
1560+ * of the Software.
1561+ *
1562+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1563+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1564+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1565+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
1566+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
1567+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
1568+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
1569+ *
1570+ **************************************************************************/
1571+
1572+/* NOTE: (READ BEFORE REFINE CODE)
1573+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
1574+ * measured by word to DMAC.
1575+ *
1576+ *
1577+ *
1578+ */
1579+
1580+/* include headers */
1581+
1582+/* #define DRM_DEBUG_CODE 2 */
1583+
1584+#include <linux/firmware.h>
1585+
1586+#include <drm/drmP.h>
1587+#include <drm/drm.h>
1588+
1589+#include "psb_drv.h"
1590+#include "lnc_topaz.h"
1591+
1592+/* WARNING: this define is very important */
1593+#define RAM_SIZE (1024 * 24)
1594+
1595+static int drm_psb_ospmxxx = 0x10;
1596+
1597+/* register default values
1598+ * THIS HEADER IS ONLY INCLUDE ONCE*/
1599+static unsigned long topaz_default_regs[183][3] = {
1600+ {MVEA_START, 0x00000000, 0x00000000},
1601+ {MVEA_START, 0x00000004, 0x00000400},
1602+ {MVEA_START, 0x00000008, 0x00000000},
1603+ {MVEA_START, 0x0000000C, 0x00000000},
1604+ {MVEA_START, 0x00000010, 0x00000000},
1605+ {MVEA_START, 0x00000014, 0x00000000},
1606+ {MVEA_START, 0x00000018, 0x00000000},
1607+ {MVEA_START, 0x0000001C, 0x00000000},
1608+ {MVEA_START, 0x00000020, 0x00000120},
1609+ {MVEA_START, 0x00000024, 0x00000000},
1610+ {MVEA_START, 0x00000028, 0x00000000},
1611+ {MVEA_START, 0x00000100, 0x00000000},
1612+ {MVEA_START, 0x00000104, 0x00000000},
1613+ {MVEA_START, 0x00000108, 0x00000000},
1614+ {MVEA_START, 0x0000010C, 0x00000000},
1615+ {MVEA_START, 0x0000011C, 0x00000001},
1616+ {MVEA_START, 0x0000012C, 0x00000000},
1617+ {MVEA_START, 0x00000180, 0x00000000},
1618+ {MVEA_START, 0x00000184, 0x00000000},
1619+ {MVEA_START, 0x00000188, 0x00000000},
1620+ {MVEA_START, 0x0000018C, 0x00000000},
1621+ {MVEA_START, 0x00000190, 0x00000000},
1622+ {MVEA_START, 0x00000194, 0x00000000},
1623+ {MVEA_START, 0x00000198, 0x00000000},
1624+ {MVEA_START, 0x0000019C, 0x00000000},
1625+ {MVEA_START, 0x000001A0, 0x00000000},
1626+ {MVEA_START, 0x000001A4, 0x00000000},
1627+ {MVEA_START, 0x000001A8, 0x00000000},
1628+ {MVEA_START, 0x000001AC, 0x00000000},
1629+ {MVEA_START, 0x000001B0, 0x00000000},
1630+ {MVEA_START, 0x000001B4, 0x00000000},
1631+ {MVEA_START, 0x000001B8, 0x00000000},
1632+ {MVEA_START, 0x000001BC, 0x00000000},
1633+ {MVEA_START, 0x000001F8, 0x00000000},
1634+ {MVEA_START, 0x000001FC, 0x00000000},
1635+ {MVEA_START, 0x00000200, 0x00000000},
1636+ {MVEA_START, 0x00000204, 0x00000000},
1637+ {MVEA_START, 0x00000208, 0x00000000},
1638+ {MVEA_START, 0x0000020C, 0x00000000},
1639+ {MVEA_START, 0x00000210, 0x00000000},
1640+ {MVEA_START, 0x00000220, 0x00000001},
1641+ {MVEA_START, 0x00000224, 0x0000001F},
1642+ {MVEA_START, 0x00000228, 0x00000100},
1643+ {MVEA_START, 0x0000022C, 0x00001F00},
1644+ {MVEA_START, 0x00000230, 0x00000101},
1645+ {MVEA_START, 0x00000234, 0x00001F1F},
1646+ {MVEA_START, 0x00000238, 0x00001F01},
1647+ {MVEA_START, 0x0000023C, 0x0000011F},
1648+ {MVEA_START, 0x00000240, 0x00000200},
1649+ {MVEA_START, 0x00000244, 0x00001E00},
1650+ {MVEA_START, 0x00000248, 0x00000002},
1651+ {MVEA_START, 0x0000024C, 0x0000001E},
1652+ {MVEA_START, 0x00000250, 0x00000003},
1653+ {MVEA_START, 0x00000254, 0x0000001D},
1654+ {MVEA_START, 0x00000258, 0x00001F02},
1655+ {MVEA_START, 0x0000025C, 0x00000102},
1656+ {MVEA_START, 0x00000260, 0x0000011E},
1657+ {MVEA_START, 0x00000264, 0x00000000},
1658+ {MVEA_START, 0x00000268, 0x00000000},
1659+ {MVEA_START, 0x0000026C, 0x00000000},
1660+ {MVEA_START, 0x00000270, 0x00000000},
1661+ {MVEA_START, 0x00000274, 0x00000000},
1662+ {MVEA_START, 0x00000278, 0x00000000},
1663+ {MVEA_START, 0x00000280, 0x00008000},
1664+ {MVEA_START, 0x00000284, 0x00000000},
1665+ {MVEA_START, 0x00000288, 0x00000000},
1666+ {MVEA_START, 0x0000028C, 0x00000000},
1667+ {MVEA_START, 0x00000314, 0x00000000},
1668+ {MVEA_START, 0x00000318, 0x00000000},
1669+ {MVEA_START, 0x0000031C, 0x00000000},
1670+ {MVEA_START, 0x00000320, 0x00000000},
1671+ {MVEA_START, 0x00000324, 0x00000000},
1672+ {MVEA_START, 0x00000348, 0x00000000},
1673+ {MVEA_START, 0x00000380, 0x00000000},
1674+ {MVEA_START, 0x00000384, 0x00000000},
1675+ {MVEA_START, 0x00000388, 0x00000000},
1676+ {MVEA_START, 0x0000038C, 0x00000000},
1677+ {MVEA_START, 0x00000390, 0x00000000},
1678+ {MVEA_START, 0x00000394, 0x00000000},
1679+ {MVEA_START, 0x00000398, 0x00000000},
1680+ {MVEA_START, 0x0000039C, 0x00000000},
1681+ {MVEA_START, 0x000003A0, 0x00000000},
1682+ {MVEA_START, 0x000003A4, 0x00000000},
1683+ {MVEA_START, 0x000003A8, 0x00000000},
1684+ {MVEA_START, 0x000003B0, 0x00000000},
1685+ {MVEA_START, 0x000003B4, 0x00000000},
1686+ {MVEA_START, 0x000003B8, 0x00000000},
1687+ {MVEA_START, 0x000003BC, 0x00000000},
1688+ {MVEA_START, 0x000003D4, 0x00000000},
1689+ {MVEA_START, 0x000003D8, 0x00000000},
1690+ {MVEA_START, 0x000003DC, 0x00000000},
1691+ {MVEA_START, 0x000003E0, 0x00000000},
1692+ {MVEA_START, 0x000003E4, 0x00000000},
1693+ {MVEA_START, 0x000003EC, 0x00000000},
1694+ {MVEA_START, 0x000002D0, 0x00000000},
1695+ {MVEA_START, 0x000002D4, 0x00000000},
1696+ {MVEA_START, 0x000002D8, 0x00000000},
1697+ {MVEA_START, 0x000002DC, 0x00000000},
1698+ {MVEA_START, 0x000002E0, 0x00000000},
1699+ {MVEA_START, 0x000002E4, 0x00000000},
1700+ {MVEA_START, 0x000002E8, 0x00000000},
1701+ {MVEA_START, 0x000002EC, 0x00000000},
1702+ {MVEA_START, 0x000002F0, 0x00000000},
1703+ {MVEA_START, 0x000002F4, 0x00000000},
1704+ {MVEA_START, 0x000002F8, 0x00000000},
1705+ {MVEA_START, 0x000002FC, 0x00000000},
1706+ {MVEA_START, 0x00000300, 0x00000000},
1707+ {MVEA_START, 0x00000304, 0x00000000},
1708+ {MVEA_START, 0x00000308, 0x00000000},
1709+ {MVEA_START, 0x0000030C, 0x00000000},
1710+ {MVEA_START, 0x00000290, 0x00000000},
1711+ {MVEA_START, 0x00000294, 0x00000000},
1712+ {MVEA_START, 0x00000298, 0x00000000},
1713+ {MVEA_START, 0x0000029C, 0x00000000},
1714+ {MVEA_START, 0x000002A0, 0x00000000},
1715+ {MVEA_START, 0x000002A4, 0x00000000},
1716+ {MVEA_START, 0x000002A8, 0x00000000},
1717+ {MVEA_START, 0x000002AC, 0x00000000},
1718+ {MVEA_START, 0x000002B0, 0x00000000},
1719+ {MVEA_START, 0x000002B4, 0x00000000},
1720+ {MVEA_START, 0x000002B8, 0x00000000},
1721+ {MVEA_START, 0x000002BC, 0x00000000},
1722+ {MVEA_START, 0x000002C0, 0x00000000},
1723+ {MVEA_START, 0x000002C4, 0x00000000},
1724+ {MVEA_START, 0x000002C8, 0x00000000},
1725+ {MVEA_START, 0x000002CC, 0x00000000},
1726+ {MVEA_START, 0x00000080, 0x00000000},
1727+ {MVEA_START, 0x00000084, 0x80705700},
1728+ {MVEA_START, 0x00000088, 0x00000000},
1729+ {MVEA_START, 0x0000008C, 0x00000000},
1730+ {MVEA_START, 0x00000090, 0x00000000},
1731+ {MVEA_START, 0x00000094, 0x00000000},
1732+ {MVEA_START, 0x00000098, 0x00000000},
1733+ {MVEA_START, 0x0000009C, 0x00000000},
1734+ {MVEA_START, 0x000000A0, 0x00000000},
1735+ {MVEA_START, 0x000000A4, 0x00000000},
1736+ {MVEA_START, 0x000000A8, 0x00000000},
1737+ {MVEA_START, 0x000000AC, 0x00000000},
1738+ {MVEA_START, 0x000000B0, 0x00000000},
1739+ {MVEA_START, 0x000000B4, 0x00000000},
1740+ {MVEA_START, 0x000000B8, 0x00000000},
1741+ {MVEA_START, 0x000000BC, 0x00000000},
1742+ {MVEA_START, 0x000000C0, 0x00000000},
1743+ {MVEA_START, 0x000000C4, 0x00000000},
1744+ {MVEA_START, 0x000000C8, 0x00000000},
1745+ {MVEA_START, 0x000000CC, 0x00000000},
1746+ {MVEA_START, 0x000000D0, 0x00000000},
1747+ {MVEA_START, 0x000000D4, 0x00000000},
1748+ {MVEA_START, 0x000000D8, 0x00000000},
1749+ {MVEA_START, 0x000000DC, 0x00000000},
1750+ {MVEA_START, 0x000000E0, 0x00000000},
1751+ {MVEA_START, 0x000000E4, 0x00000000},
1752+ {MVEA_START, 0x000000E8, 0x00000000},
1753+ {MVEA_START, 0x000000EC, 0x00000000},
1754+ {MVEA_START, 0x000000F0, 0x00000000},
1755+ {MVEA_START, 0x000000F4, 0x00000000},
1756+ {MVEA_START, 0x000000F8, 0x00000000},
1757+ {MVEA_START, 0x000000FC, 0x00000000},
1758+ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
1759+ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
1760+ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
1761+ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
1762+ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
1763+ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
1764+ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
1765+ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
1766+ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
1767+ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
1768+ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
1769+ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
1770+ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
1771+ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
1772+ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
1773+ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
1774+ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
1775+ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
1776+ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
1777+ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
1778+ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
1779+ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
1780+ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
1781+ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
1782+ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
1783+};
1784+
1785+#define FIRMWARE_NAME "topaz_fw.bin"
1786+
1787+/* define structure */
1788+/* firmware file's info head */
1789+struct topaz_fwinfo {
1790+ unsigned int ver:16;
1791+ unsigned int codec:16;
1792+
1793+ unsigned int text_size;
1794+ unsigned int data_size;
1795+ unsigned int data_location;
1796+};
1797+
1798+/* firmware data array define */
1799+struct topaz_codec_fw {
1800+ uint32_t ver;
1801+ uint32_t codec;
1802+
1803+ uint32_t text_size;
1804+ uint32_t data_size;
1805+ uint32_t data_location;
1806+
1807+ struct ttm_buffer_object *text;
1808+ struct ttm_buffer_object *data;
1809+};
1810+
1811+
1812+
1813+/* static function define */
1814+static int topaz_upload_fw(struct drm_device *dev,
1815+ enum drm_lnc_topaz_codec codec);
1816+static inline void topaz_set_default_regs(struct drm_psb_private
1817+ *dev_priv);
1818+
1819+#define UPLOAD_FW_BY_DMA 1
1820+
1821+#if UPLOAD_FW_BY_DMA
1822+static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
1823+ uint32_t channel, uint32_t src_phy_addr,
1824+ uint32_t offset, uint32_t dst_addr,
1825+ uint32_t byte_num, uint32_t is_increment,
1826+ uint32_t is_write);
1827+#else
1828+static void topaz_mtx_upload_by_register(struct drm_device *dev,
1829+ uint32_t mtx_mem, uint32_t addr,
1830+ uint32_t size,
1831+ struct ttm_buffer_object *buf);
1832+#endif
1833+
1834+static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
1835+ uint32_t reg, const uint32_t val);
1836+static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
1837+ uint32_t reg, uint32_t *ret_val);
1838+static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
1839+static void release_mtx_control_from_dash(struct drm_psb_private
1840+ *dev_priv);
1841+static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
1842+static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
1843+ uint32_t size);
1844+static void mtx_dma_write(struct drm_device *dev);
1845+
1846+
1847+#if 0 /* DEBUG_FUNCTION */
1848+static int topaz_test_null(struct drm_device *dev, uint32_t seq);
1849+static void topaz_mmu_flush(struct drm_device *dev);
1850+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
1851+#endif
1852+#if 0
1853+static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
1854+ uint32_t *data);
1855+static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
1856+ uint32_t *data);
1857+#endif
1858+
1859+/* globale variable define */
1860+struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
1861+
1862+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
1863+ uint32_t byte_addr)
1864+{
1865+ uint32_t read_val;
1866+ uint32_t reg, bank_size, ram_bank_size, ram_id;
1867+
1868+ TOPAZ_READ32(0x3c, &reg);
1869+ reg = 0x0a0a0606;
1870+ bank_size = (reg & 0xF0000) >> 16;
1871+
1872+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
1873+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
1874+
1875+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
1876+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
1877+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
1878+ F_ENCODE(1, MTX_MTX_MCMR));
1879+
1880+ /* ?? poll this reg? */
1881+ topaz_wait_for_register(dev_priv,
1882+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
1883+ 1, 1);
1884+
1885+ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
1886+
1887+ return read_val;
1888+}
1889+
1890+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
1891+ uint32_t byte_addr, uint32_t val)
1892+{
1893+ uint32_t ram_id = 0;
1894+ uint32_t reg, bank_size, ram_bank_size;
1895+
1896+ TOPAZ_READ32(0x3c, &reg);
1897+
1898+ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
1899+ reg = 0x0a0a0606;
1900+
1901+ bank_size = (reg & 0xF0000) >> 16;
1902+
1903+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
1904+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
1905+
1906+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
1907+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
1908+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
1909+
1910+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
1911+
1912+ /* ?? poll this reg? */
1913+ topaz_wait_for_register(dev_priv,
1914+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
1915+ 1, 1);
1916+
1917+ return;
1918+}
1919+
1920+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
1921+ uint32_t byte_addr)
1922+{
1923+ uint32_t ram_id = 0;
1924+ uint32_t reg, bank_size, ram_bank_size;
1925+
1926+ TOPAZ_READ32(0x3c, &reg);
1927+
1928+ reg = 0x0a0a0606;
1929+
1930+ bank_size = (reg & 0xF0000) >> 16;
1931+
1932+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
1933+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
1934+
1935+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
1936+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
1937+ F_ENCODE(1, MTX_MTX_MCMAI) |
1938+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
1939+}
1940+
1941+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
1942+ uint32_t val)
1943+{
1944+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
1945+}
1946+
1947+
1948+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
1949+ uint32_t addr, uint32_t value, uint32_t mask)
1950+{
1951+ uint32_t tmp;
1952+ uint32_t count = 10000;
1953+
1954+ /* # poll topaz register for certain times */
1955+ while (count) {
1956+ /* #.# read */
1957+ MM_READ32(addr, 0, &tmp);
1958+
1959+ if (value == (tmp & mask))
1960+ return 0;
1961+
1962+ /* #.# delay and loop */
1963+ DRM_UDELAY(100);
1964+ --count;
1965+ }
1966+
1967+ /* # now waiting is timeout, return 1 indicat failed */
1968+ /* XXX: testsuit means a timeout 10000 */
1969+
1970+ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
1971+ "actual 0x%08x (0x%08x & 0x%08x)\n",
1972+ addr, value, tmp & mask, tmp, mask);
1973+
1974+ return -EBUSY;
1975+
1976+}
1977+
1978+
1979+void lnc_topaz_reset_wq(struct work_struct *work)
1980+{
1981+ struct drm_psb_private *dev_priv =
1982+ container_of(work, struct drm_psb_private, topaz_watchdog_wq);
1983+
1984+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
1985+ unsigned long irq_flags;
1986+
1987+ mutex_lock(&dev_priv->topaz_mutex);
1988+ dev_priv->topaz_needs_reset = 1;
1989+ dev_priv->topaz_current_sequence++;
1990+ PSB_DEBUG_GENERAL
1991+ ("MSVDXFENCE: incremented topaz_current_sequence to :%d\n",
1992+ dev_priv->topaz_current_sequence);
1993+
1994+ psb_fence_error(scheduler->dev, LNC_ENGINE_ENCODE,
1995+ dev_priv->topaz_current_sequence, _PSB_FENCE_TYPE_EXE,
1996+ DRM_CMD_HANG);
1997+
1998+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
1999+ dev_priv->timer_available = 1;
2000+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
2001+
2002+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
2003+
2004+ /* psb_msvdx_flush_cmd_queue(scheduler->dev); */
2005+
2006+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
2007+
2008+ psb_schedule_watchdog(dev_priv);
2009+ mutex_unlock(&dev_priv->topaz_mutex);
2010+}
2011+
2012+
2013+/* this function finish the first part of initialization, the rest
2014+ * should be done in topaz_setup_fw
2015+ */
2016+int lnc_topaz_init(struct drm_device *dev)
2017+{
2018+ struct drm_psb_private *dev_priv = dev->dev_private;
2019+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2020+ uint32_t core_id, core_rev;
2021+ void *topaz_bo_virt;
2022+ int ret = 0;
2023+ bool is_iomem;
2024+
2025+ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
2026+
2027+ /* # initialize comand topaz queueing [msvdx_queue] */
2028+ INIT_LIST_HEAD(&dev_priv->topaz_queue);
2029+ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
2030+ mutex_init(&dev_priv->topaz_mutex);
2031+ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
2032+ spin_lock_init(&dev_priv->topaz_lock);
2033+
2034+ /* # topaz status init. [msvdx_busy] */
2035+ dev_priv->topaz_busy = 0;
2036+ dev_priv->topaz_cmd_seq = 0;
2037+ dev_priv->topaz_fw_loaded = 0;
2038+ dev_priv->topaz_cur_codec = 0;
2039+ dev_priv->topaz_mtx_data_mem = NULL;
2040+ dev_priv->cur_mtx_data_size = 0;
2041+
2042+ dev_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
2043+ GFP_KERNEL);
2044+ if (dev_priv->topaz_mtx_reg_state == NULL) {
2045+ DRM_ERROR("TOPAZ: failed to allocate space "
2046+ "for mtx register\n");
2047+ return -1;
2048+ }
2049+
2050+ /* # gain write back structure,we may only need 32+4=40DW */
2051+ if (!dev_priv->topaz_bo) {
2052+ ret = ttm_buffer_object_create(bdev, 4096,
2053+ ttm_bo_type_kernel,
2054+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2055+ 0, 0, 0, NULL, &(dev_priv->topaz_bo));
2056+ if (ret != 0) {
2057+ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
2058+ return ret;
2059+ }
2060+ }
2061+
2062+ ret = ttm_bo_kmap(dev_priv->topaz_bo, 0,
2063+ dev_priv->topaz_bo->num_pages,
2064+ &dev_priv->topaz_bo_kmap);
2065+ if (ret) {
2066+ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
2067+ ttm_bo_unref(&dev_priv->topaz_bo);
2068+ return ret;
2069+ }
2070+
2071+ topaz_bo_virt = ttm_kmap_obj_virtual(&dev_priv->topaz_bo_kmap,
2072+ &is_iomem);
2073+ dev_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
2074+ dev_priv->topaz_wb_offset = dev_priv->topaz_bo->offset;
2075+ dev_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt + 2048);
2076+ dev_priv->topaz_sync_offset = dev_priv->topaz_wb_offset + 2048;
2077+ PSB_DEBUG_GENERAL("TOPAZ: allocated BO for WriteBack and SYNC command,"
2078+ "WB offset=0x%08x, SYNC offset=0x%08x\n",
2079+ dev_priv->topaz_wb_offset, dev_priv->topaz_sync_offset);
2080+
2081+ *(dev_priv->topaz_sync_addr) = ~0; /* reset sync seq */
2082+
2083+ /* # reset topaz */
2084+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2085+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2086+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2087+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2088+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2089+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2090+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2091+
2092+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2093+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2094+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2095+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2096+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2097+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2098+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2099+
2100+ /* # set up MMU */
2101+ topaz_mmu_hwsetup(dev_priv);
2102+
2103+ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
2104+ "when receiving user space commands\n");
2105+
2106+#if 0 /* can't load FW here */
2107+ /* #.# load fw to driver */
2108+ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
2109+ ret = topaz_init_fw(dev);
2110+ if (ret != 0)
2111+ return -1;
2112+
2113+ topaz_setup_fw(dev, FW_H264_NO_RC);/* just for test */
2114+#endif
2115+ /* <msvdx does> # minimal clock */
2116+
2117+ /* <msvdx does> # return 0 */
2118+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
2119+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
2120+
2121+ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
2122+ core_id, core_rev);
2123+
2124+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
2125+ psb_power_down_topaz(dev);
2126+
2127+ return 0;
2128+}
2129+
2130+int lnc_topaz_uninit(struct drm_device *dev)
2131+{
2132+ struct drm_psb_private *dev_priv = dev->dev_private;
2133+ /* int n;*/
2134+
2135+ /* flush MMU */
2136+ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
2137+ /* topaz_mmu_flushcache (dev_priv); */
2138+
2139+ /* # reset TOPAZ chip */
2140+ lnc_topaz_reset(dev_priv);
2141+
2142+ /* release resources */
2143+ /* # release write back memory */
2144+ dev_priv->topaz_ccb_wb = NULL;
2145+
2146+ ttm_bo_unref(&dev_priv->topaz_bo);
2147+
2148+ /* release mtx register save space */
2149+ kfree(dev_priv->topaz_mtx_reg_state);
2150+
2151+ /* release mtx data memory save space */
2152+ if (dev_priv->topaz_mtx_data_mem)
2153+ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem);
2154+
2155+ /* # release firmware */
2156+ /* XXX: but this handlnig should be reconsidered */
2157+ /* XXX: there is no jpeg firmware...... */
2158+#if 0 /* FIX WHEN FIRMWARE IS LOADED */
2159+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2160+ ttm_bo_unref(&topaz_fw[n].text);
2161+ ttm_bo_unref(&topaz_fw[n].data);
2162+ }
2163+#endif
2164+ ttm_bo_kunmap(&dev_priv->topaz_bo_kmap);
2165+ ttm_bo_unref(&dev_priv->topaz_bo);
2166+
2167+ return 0;
2168+}
2169+
2170+int lnc_topaz_reset(struct drm_psb_private *dev_priv)
2171+{
2172+ return 0;
2173+#if 0
2174+ int ret = 0;
2175+ /* # software reset */
2176+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
2177+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
2178+
2179+ /* # call lnc_wait_for_register, wait reset finished */
2180+ topaz_wait_for_register(dev_priv,
2181+ MTX_START + MTX_CORE_CR_MTX_ENABLE_OFFSET,
2182+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK,
2183+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
2184+
2185+ /* # if reset finised */
2186+ PSB_DEBUG_GENERAL("XXX: add condition judgement for topaz wait...\n");
2187+ /* #.# clear interrupt enable flag */
2188+
2189+ /* #.# clear pending interrupt flags */
2190+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2191+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX) |
2192+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT) |
2193+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA) |
2194+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT)
2195+ );
2196+ /* # destroy topaz mutex in drm_psb_privaet [msvdx_mutex] */
2197+
2198+ /* # return register value which is waited above */
2199+
2200+ PSB_DEBUG_GENERAL("called\n");
2201+ return 0;
2202+#endif
2203+}
2204+
2205+/* read firmware bin file and load all data into driver */
2206+int topaz_init_fw(struct drm_device *dev)
2207+{
2208+ struct drm_psb_private *dev_priv = dev->dev_private;
2209+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2210+ const struct firmware *raw = NULL;
2211+ unsigned char *ptr;
2212+ int ret = 0;
2213+ int n;
2214+ struct topaz_fwinfo *cur_fw;
2215+ int cur_size;
2216+ struct topaz_codec_fw *cur_codec;
2217+ struct ttm_buffer_object **cur_drm_obj;
2218+ struct ttm_bo_kmap_obj tmp_kmap;
2219+ bool is_iomem;
2220+
2221+ dev_priv->stored_initial_qp = 0;
2222+
2223+ /* # get firmware */
2224+ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
2225+ if (ret != 0) {
2226+ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
2227+ return ret;
2228+ }
2229+
2230+ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
2231+
2232+ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
2233+ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
2234+ goto out;
2235+ }
2236+
2237+ ptr = (unsigned char *) raw->data;
2238+
2239+ if (!ptr) {
2240+ DRM_ERROR("TOPAZ: failed to load firmware.\n");
2241+ goto out;
2242+ }
2243+
2244+ /* # load fw from file */
2245+ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
2246+ cur_fw = NULL;
2247+ /* didn't use the first element */
2248+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2249+ cur_fw = (struct topaz_fwinfo *) ptr;
2250+
2251+ cur_codec = &topaz_fw[cur_fw->codec];
2252+ cur_codec->ver = cur_fw->ver;
2253+ cur_codec->codec = cur_fw->codec;
2254+ cur_codec->text_size = cur_fw->text_size;
2255+ cur_codec->data_size = cur_fw->data_size;
2256+ cur_codec->data_location = cur_fw->data_location;
2257+
2258+ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
2259+ codec_to_string(cur_fw->codec));
2260+
2261+ /* #.# handle text section */
2262+ cur_codec->text = NULL;
2263+ ptr += sizeof(struct topaz_fwinfo);
2264+ cur_drm_obj = &cur_codec->text;
2265+ cur_size = cur_fw->text_size;
2266+
2267+ /* #.# malloc DRM object for fw storage */
2268+ ret = ttm_buffer_object_create(bdev, cur_size,
2269+ ttm_bo_type_kernel,
2270+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2271+ 0, 0, 0, NULL, cur_drm_obj);
2272+ if (ret) {
2273+ DRM_ERROR("Failed to allocate firmware.\n");
2274+ goto out;
2275+ }
2276+
2277+ /* #.# fill DRM object with firmware data */
2278+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2279+ &tmp_kmap);
2280+ if (ret) {
2281+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2282+ ttm_bo_unref(cur_drm_obj);
2283+ *cur_drm_obj = NULL;
2284+ goto out;
2285+ }
2286+
2287+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2288+ cur_size);
2289+
2290+ ttm_bo_kunmap(&tmp_kmap);
2291+
2292+ /* #.# handle data section */
2293+ cur_codec->data = NULL;
2294+ ptr += cur_fw->text_size;
2295+ cur_drm_obj = &cur_codec->data;
2296+ cur_size = cur_fw->data_size;
2297+
2298+ /* #.# malloc DRM object for fw storage */
2299+ ret = ttm_buffer_object_create(bdev, cur_size,
2300+ ttm_bo_type_kernel,
2301+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2302+ 0, 0, 0, NULL, cur_drm_obj);
2303+ if (ret) {
2304+ DRM_ERROR("Failed to allocate firmware.\n");
2305+ goto out;
2306+ }
2307+
2308+ /* #.# fill DRM object with firmware data */
2309+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2310+ &tmp_kmap);
2311+ if (ret) {
2312+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2313+ ttm_bo_unref(cur_drm_obj);
2314+ *cur_drm_obj = NULL;
2315+ goto out;
2316+ }
2317+
2318+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2319+ cur_size);
2320+
2321+ ttm_bo_kunmap(&tmp_kmap);
2322+
2323+ /* #.# validate firmware */
2324+
2325+ /* #.# update ptr */
2326+ ptr += cur_fw->data_size;
2327+ }
2328+
2329+ release_firmware(raw);
2330+
2331+ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
2332+
2333+ return 0;
2334+
2335+out:
2336+ if (raw) {
2337+ PSB_DEBUG_GENERAL("release firmware....\n");
2338+ release_firmware(raw);
2339+ }
2340+
2341+ return -1;
2342+}
2343+
2344+/* setup fw when start a new context */
2345+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2346+{
2347+ struct drm_psb_private *dev_priv = dev->dev_private;
2348+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2349+ uint32_t mem_size = RAM_SIZE; /* follow DDK */
2350+ uint32_t verify_pc;
2351+ int ret;
2352+
2353+#if 0
2354+ if (codec == dev_priv->topaz_current_codec) {
2355+ LNC_TRACEL("TOPAZ: reuse previous codec\n");
2356+ return 0;
2357+ }
2358+#endif
2359+
2360+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
2361+ psb_power_up_topaz(dev);
2362+
2363+ /* XXX: need to rest topaz? */
2364+ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
2365+
2366+ /* XXX: interrupt enable shouldn't be enable here,
2367+ * this funtion is called when interrupt is enable,
2368+ * but here, we've no choice since we have to call setup_fw by
2369+ * manual */
2370+ /* # upload firmware, clear interruputs and start the firmware
2371+ * -- from hostutils.c in TestSuits*/
2372+
2373+ /* # reset MVEA */
2374+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2375+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2376+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2377+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2378+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2379+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2380+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2381+
2382+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2383+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2384+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2385+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2386+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2387+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2388+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2389+
2390+
2391+ topaz_mmu_hwsetup(dev_priv);
2392+
2393+#if !LNC_TOPAZ_NO_IRQ
2394+ lnc_topaz_disableirq(dev);
2395+#endif
2396+
2397+ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
2398+
2399+ topaz_set_default_regs(dev_priv);
2400+
2401+ /* # reset mtx */
2402+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
2403+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
2404+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
2405+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
2406+
2407+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
2408+
2409+ /* # upload fw by drm */
2410+ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
2411+
2412+ topaz_upload_fw(dev, codec);
2413+
2414+ /* allocate the space for context save & restore if needed */
2415+ if (dev_priv->topaz_mtx_data_mem == NULL) {
2416+ ret = ttm_buffer_object_create(bdev,
2417+ dev_priv->cur_mtx_data_size * 4,
2418+ ttm_bo_type_kernel,
2419+ DRM_PSB_FLAG_MEM_MMU |
2420+ TTM_PL_FLAG_NO_EVICT,
2421+ 0, 0, 0, NULL,
2422+ &dev_priv->topaz_mtx_data_mem);
2423+ if (ret) {
2424+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
2425+ "mtx data save\n");
2426+ return -1;
2427+ }
2428+ }
2429+ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
2430+
2431+ /* XXX: In power save mode, need to save the complete data memory
2432+ * and restore it. MTX_FWIF.c record the data size */
2433+ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
2434+
2435+ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
2436+ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
2437+
2438+ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
2439+
2440+ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
2441+
2442+ /* enable auto clock is essential for this driver */
2443+ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
2444+ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
2445+ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
2446+ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
2447+ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
2448+ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
2449+ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
2450+ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
2451+
2452+ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
2453+ verify_pc, PC_START_ADDRESS);
2454+
2455+ /* # turn on MTX */
2456+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2457+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2458+
2459+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
2460+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
2461+
2462+ /* # poll on the interrupt which the firmware will generate */
2463+ topaz_wait_for_register(dev_priv,
2464+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
2465+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
2466+ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
2467+
2468+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2469+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2470+
2471+ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
2472+
2473+ /* # get ccb buffer addr -- file hostutils.c */
2474+ dev_priv->topaz_ccb_buffer_addr =
2475+ topaz_read_mtx_mem(dev_priv,
2476+ MTX_DATA_MEM_BASE + mem_size - 4);
2477+ dev_priv->topaz_ccb_ctrl_addr =
2478+ topaz_read_mtx_mem(dev_priv,
2479+ MTX_DATA_MEM_BASE + mem_size - 8);
2480+ dev_priv->topaz_ccb_size =
2481+ topaz_read_mtx_mem(dev_priv,
2482+ dev_priv->topaz_ccb_ctrl_addr +
2483+ MTX_CCBCTRL_CCBSIZE);
2484+
2485+ dev_priv->topaz_cmd_windex = 0;
2486+
2487+ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
2488+ dev_priv->topaz_ccb_buffer_addr,
2489+ dev_priv->topaz_ccb_ctrl_addr,
2490+ dev_priv->topaz_ccb_size);
2491+
2492+ /* # write back the initial QP Value */
2493+ topaz_write_mtx_mem(dev_priv,
2494+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
2495+ dev_priv->stored_initial_qp);
2496+
2497+ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
2498+ dev_priv->topaz_wb_offset);
2499+ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
2500+ dev_priv->topaz_wb_offset);
2501+
2502+ /* this kick is essential for mtx.... */
2503+ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x01020304;
2504+ topaz_mtx_kick(dev_priv, 1);
2505+ DRM_UDELAY(1000);
2506+ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
2507+ " and here it is 0x%08x\n",
2508+ *((uint32_t *) dev_priv->topaz_ccb_wb));
2509+
2510+ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
2511+ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
2512+
2513+ /* XXX: is there any need to record next cmd num??
2514+ * we use fence seqence number to record it
2515+ */
2516+ dev_priv->topaz_busy = 0;
2517+ dev_priv->topaz_cmd_seq = 0;
2518+
2519+#if !LNC_TOPAZ_NO_IRQ
2520+ lnc_topaz_enableirq(dev);
2521+#endif
2522+
2523+#if 0
2524+ /* test sync command */
2525+ {
2526+ uint32_t sync_cmd[3];
2527+ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr;
2528+ int count = 10000;
2529+
2530+ /* insert a SYNC command here */
2531+ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) |
2532+ (0x5b << 16);
2533+ sync_cmd[1] = dev_priv->topaz_sync_offset;
2534+ sync_cmd[2] = 0x3c;
2535+
2536+ TOPAZ_BEGIN_CCB(dev_priv);
2537+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
2538+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
2539+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
2540+ TOPAZ_END_CCB(dev_priv, 1);
2541+
2542+ while (count && *sync_p != 0x3c) {
2543+ DRM_UDELAY(1000);
2544+ --count;
2545+ }
2546+ if ((count == 0) && (*sync_p != 0x3c)) {
2547+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
2548+ "actual 0x%08x\n",
2549+ 0x3c, *sync_p);
2550+ }
2551+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
2552+ }
2553+#endif
2554+#if 0
2555+ topaz_mmu_flush(dev);
2556+
2557+ topaz_test_null(dev, 0xe1e1);
2558+ topaz_test_null(dev, 0xe2e2);
2559+ topaz_mmu_test(dev, 0x12345678);
2560+ topaz_test_null(dev, 0xe3e3);
2561+ topaz_mmu_test(dev, 0x8764321);
2562+
2563+ topaz_test_null(dev, 0xe4e4);
2564+ topaz_test_null(dev, 0xf3f3);
2565+#endif
2566+
2567+ return 0;
2568+}
2569+
2570+#if UPLOAD_FW_BY_DMA
2571+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2572+{
2573+ struct drm_psb_private *dev_priv = dev->dev_private;
2574+ const struct topaz_codec_fw *cur_codec_fw;
2575+ uint32_t text_size, data_size;
2576+ uint32_t data_location;
2577+ uint32_t cur_mtx_data_size;
2578+
2579+ /* # refer HLD document */
2580+
2581+ /* # MTX reset */
2582+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
2583+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
2584+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
2585+
2586+ DRM_UDELAY(6000);
2587+
2588+ /* # upload the firmware by DMA */
2589+ cur_codec_fw = &topaz_fw[codec];
2590+
2591+ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
2592+ " data location(%d)\n", codec_to_string(codec), codec,
2593+ cur_codec_fw->text_size, cur_codec_fw->data_size,
2594+ cur_codec_fw->data_location);
2595+
2596+ /* # upload text */
2597+ text_size = cur_codec_fw->text_size / 4;
2598+
2599+ /* setup the MTX to start recieving data:
2600+ use a register for the transfer which will point to the source
2601+ (MTX_CR_MTX_SYSC_CDMAT) */
2602+ /* #.# fill the dst addr */
2603+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
2604+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
2605+ F_ENCODE(2, MTX_BURSTSIZE) |
2606+ F_ENCODE(0, MTX_RNW) |
2607+ F_ENCODE(1, MTX_ENABLE) |
2608+ F_ENCODE(text_size, MTX_LENGTH));
2609+
2610+ /* #.# set DMAC access to host memory via BIF */
2611+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
2612+
2613+ /* #.# transfer the codec */
2614+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
2615+ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
2616+
2617+ /* #.# wait dma finish */
2618+ topaz_wait_for_register(dev_priv,
2619+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
2620+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
2621+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
2622+
2623+ /* #.# clear interrupt */
2624+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
2625+
2626+ /* # return access to topaz core */
2627+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
2628+
2629+ /* # upload data */
2630+ data_size = cur_codec_fw->data_size / 4;
2631+ data_location = cur_codec_fw->data_location;
2632+
2633+ /* #.# fill the dst addr */
2634+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
2635+ 0x80900000 + data_location - 0x82880000);
2636+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
2637+ F_ENCODE(2, MTX_BURSTSIZE) |
2638+ F_ENCODE(0, MTX_RNW) |
2639+ F_ENCODE(1, MTX_ENABLE) |
2640+ F_ENCODE(data_size, MTX_LENGTH));
2641+
2642+ /* #.# set DMAC access to host memory via BIF */
2643+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
2644+
2645+ /* #.# transfer the codec */
2646+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
2647+ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
2648+
2649+ /* #.# wait dma finish */
2650+ topaz_wait_for_register(dev_priv,
2651+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
2652+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
2653+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
2654+
2655+ /* #.# clear interrupt */
2656+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
2657+
2658+ /* # return access to topaz core */
2659+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
2660+
2661+ /* record this codec's mtx data size for
2662+ * context save & restore */
2663+ cur_mtx_data_size = RAM_SIZE - (data_location - 0x82880000);
2664+ if (dev_priv->cur_mtx_data_size != cur_mtx_data_size) {
2665+ dev_priv->cur_mtx_data_size = cur_mtx_data_size;
2666+ if (dev_priv->topaz_mtx_data_mem)
2667+ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem);
2668+ dev_priv->topaz_mtx_data_mem = NULL;
2669+ }
2670+
2671+ return 0;
2672+}
2673+
2674+#else
2675+
2676+void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
2677+ uint32_t addr, uint32_t size,
2678+ struct ttm_buffer_object *buf)
2679+{
2680+ struct drm_psb_private *dev_priv = dev->dev_private;
2681+ uint32_t *buf_p;
2682+ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
2683+ uint32_t cur_ram_id, ram_addr , ram_id;
2684+ int map_ret, lp;
2685+ struct ttm_bo_kmap_obj bo_kmap;
2686+ bool is_iomem;
2687+ uint32_t cur_addr;
2688+
2689+ get_mtx_control_from_dash(dev_priv);
2690+
2691+ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
2692+ if (map_ret) {
2693+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
2694+ return;
2695+ }
2696+ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
2697+
2698+
2699+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
2700+ debug_reg = 0x0a0a0606;
2701+ bank_size = (debug_reg & 0xf0000) >> 16;
2702+ bank_ram_size = 1 << (bank_size + 2);
2703+
2704+ bank_count = (debug_reg & 0xf00) >> 8;
2705+
2706+ topaz_wait_for_register(dev_priv,
2707+ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
2708+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
2709+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
2710+
2711+ cur_ram_id = -1;
2712+ cur_addr = addr;
2713+ for (lp = 0; lp < size / 4; ++lp) {
2714+ ram_id = mtx_mem + (cur_addr / bank_ram_size);
2715+
2716+ if (cur_ram_id != ram_id) {
2717+ ram_addr = cur_addr >> 2;
2718+
2719+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
2720+ F_ENCODE(ram_id, MTX_MTX_MCMID) |
2721+ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
2722+ F_ENCODE(1, MTX_MTX_MCMAI));
2723+
2724+ cur_ram_id = ram_id;
2725+ }
2726+ cur_addr += 4;
2727+
2728+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
2729+ *(buf_p + lp));
2730+
2731+ topaz_wait_for_register(dev_priv,
2732+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
2733+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
2734+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
2735+ }
2736+
2737+ ttm_bo_kunmap(&bo_kmap);
2738+
2739+ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
2740+ return;
2741+}
2742+
2743+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2744+{
2745+ struct drm_psb_private *dev_priv = dev->dev_private;
2746+ const struct topaz_codec_fw *cur_codec_fw;
2747+ uint32_t text_size, data_size;
2748+ uint32_t data_location;
2749+
2750+ /* # refer HLD document */
2751+ /* # MTX reset */
2752+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
2753+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
2754+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
2755+
2756+ DRM_UDELAY(6000);
2757+
2758+ /* # upload the firmware by DMA */
2759+ cur_codec_fw = &topaz_fw[codec];
2760+
2761+ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
2762+ " data location(0x%08x)\n", codec_to_string(codec),
2763+ cur_codec_fw->text_size, cur_codec_fw->data_size,
2764+ cur_codec_fw->data_location);
2765+
2766+ /* # upload text */
2767+ text_size = cur_codec_fw->text_size;
2768+
2769+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
2770+ PC_START_ADDRESS - MTX_MEMORY_BASE,
2771+ text_size, cur_codec_fw->text);
2772+
2773+ /* # upload data */
2774+ data_size = cur_codec_fw->data_size;
2775+ data_location = cur_codec_fw->data_location;
2776+
2777+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
2778+ data_location - 0x82880000, data_size,
2779+ cur_codec_fw->data);
2780+
2781+ return 0;
2782+}
2783+
2784+#endif /* UPLOAD_FW_BY_DMA */
2785+
2786+void
2787+topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
2788+ uint32_t src_phy_addr, uint32_t offset,
2789+ uint32_t soc_addr, uint32_t byte_num,
2790+ uint32_t is_increment, uint32_t is_write)
2791+{
2792+ uint32_t dmac_count;
2793+ uint32_t irq_stat;
2794+ uint32_t count;
2795+
2796+ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
2797+ /* # check that no transfer is currently in progress and no
2798+ interrupts are outstanding ?? (why care interrupt) */
2799+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
2800+ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
2801+ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
2802+
2803+ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
2804+
2805+ /* no hold off period */
2806+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
2807+ /* clear previous interrupts */
2808+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
2809+ /* check irq status */
2810+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
2811+ /* assert(0 == irq_stat); */
2812+ if (0 != irq_stat)
2813+ DRM_ERROR("TOPAZ: there is hold up\n");
2814+
2815+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
2816+ (src_phy_addr + offset));
2817+ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
2818+ is_write, DMAC_PWIDTH_32_BIT, byte_num);
2819+ /* generate an interrupt at the end of transfer */
2820+ count |= MASK_IMG_SOC_TRANSFER_IEN;
2821+ count |= F_ENCODE(is_write, IMG_SOC_DIR);
2822+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
2823+
2824+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
2825+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
2826+ is_increment, DMAC_BURST_2));
2827+
2828+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
2829+
2830+ /* Finally, rewrite the count register with
2831+ * the enable bit set to kick off the transfer
2832+ */
2833+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
2834+
2835+ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
2836+
2837+ return;
2838+}
2839+
2840+void topaz_set_default_regs(struct drm_psb_private *dev_priv)
2841+{
2842+ int n;
2843+ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
2844+
2845+ for (n = 0; n < count; n++)
2846+ MM_WRITE32(topaz_default_regs[n][0],
2847+ topaz_default_regs[n][1],
2848+ topaz_default_regs[n][2]);
2849+
2850+}
2851+
2852+void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
2853+ const uint32_t val)
2854+{
2855+ uint32_t tmp;
2856+ get_mtx_control_from_dash(dev_priv);
2857+
2858+ /* put data into MTX_RW_DATA */
2859+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
2860+
2861+ /* request a write */
2862+ tmp = reg &
2863+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
2864+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
2865+
2866+ /* wait for operation finished */
2867+ topaz_wait_for_register(dev_priv,
2868+ MTX_START +
2869+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
2870+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
2871+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
2872+
2873+ release_mtx_control_from_dash(dev_priv);
2874+}
2875+
2876+void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
2877+ uint32_t *ret_val)
2878+{
2879+ uint32_t tmp;
2880+
2881+ get_mtx_control_from_dash(dev_priv);
2882+
2883+ /* request a write */
2884+ tmp = (reg &
2885+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
2886+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
2887+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
2888+
2889+ /* wait for operation finished */
2890+ topaz_wait_for_register(dev_priv,
2891+ MTX_START +
2892+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
2893+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
2894+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
2895+
2896+ /* read */
2897+ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
2898+ ret_val);
2899+
2900+ release_mtx_control_from_dash(dev_priv);
2901+}
2902+
2903+void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
2904+{
2905+ int debug_reg_slave_val;
2906+
2907+ /* GetMTXControlFromDash */
2908+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
2909+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
2910+ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
2911+ do {
2912+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
2913+ &debug_reg_slave_val);
2914+ } while ((debug_reg_slave_val & 0x18) != 0);
2915+
2916+ /* save access control */
2917+ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
2918+ &dev_priv->topaz_dash_access_ctrl);
2919+}
2920+
2921+void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
2922+{
2923+ /* restore access control */
2924+ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
2925+ dev_priv->topaz_dash_access_ctrl);
2926+
2927+ /* release bus */
2928+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
2929+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
2930+}
2931+
2932+void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
2933+{
2934+ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
2935+
2936+ /* bypass all request while MMU is being configured */
2937+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
2938+ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
2939+
2940+ /* set MMU hardware at the page table directory */
2941+ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
2942+ "into MMU_DIR_LIST0/1\n", pd_addr);
2943+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
2944+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
2945+
2946+ /* setup index register, all pointing to directory bank 0 */
2947+ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
2948+
2949+ /* now enable MMU access for all requestors */
2950+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
2951+}
2952+
2953+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
2954+{
2955+ uint32_t mmu_control;
2956+
2957+#if 0
2958+ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
2959+ " so flush using the master core\n");
2960+#endif
2961+ /* XXX: disable interrupt */
2962+
2963+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
2964+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
2965+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
2966+
2967+#if 0
2968+ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
2969+ "still operating afterwards even if not cleared,\n"
2970+ "but may want to replace with MMU_FLUSH?\n");
2971+#endif
2972+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
2973+
2974+ /* clear it */
2975+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
2976+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
2977+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
2978+}
2979+
2980+#if 0 /* DEBUG_FUNCTION */
2981+struct reg_pair {
2982+ uint32_t base;
2983+ uint32_t offset;
2984+};
2985+
2986+
2987+static int ccb_offset;
2988+
2989+static int topaz_test_null(struct drm_device *dev, uint32_t seq)
2990+{
2991+ struct drm_psb_private *dev_priv = dev->dev_private;
2992+
2993+ /* XXX: here we finished firmware setup....
2994+ * using a NULL command to verify the
2995+ * correctness of firmware
2996+ */
2997+ uint32_t null_cmd;
2998+ uint32_t cmd_seq;
2999+
3000+ null_cmd = 0 | (1 << 8) | (seq) << 16;
3001+ topaz_write_mtx_mem(dev_priv,
3002+ dev_priv->topaz_ccb_buffer_addr + ccb_offset,
3003+ null_cmd);
3004+
3005+ topaz_mtx_kick(dev_priv, 1);
3006+
3007+ DRM_UDELAY(1000); /* wait to finish */
3008+
3009+ cmd_seq = topaz_read_mtx_mem(dev_priv,
3010+ dev_priv->topaz_ccb_ctrl_addr + 4);
3011+
3012+ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
3013+ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
3014+ seq, cmd_seq, WB_SEQ, WB_ROFF);
3015+
3016+ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
3017+
3018+ topaz_test_queryirq(dev);
3019+ topaz_test_clearirq(dev);
3020+
3021+ ccb_offset += 4;
3022+
3023+ return 0;
3024+}
3025+
3026+void topaz_mmu_flush(struct drm_psb_private *dev_priv)
3027+{
3028+ uint32_t val;
3029+
3030+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val);
3031+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
3032+ val | F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
3033+ wmb();
3034+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
3035+ val & ~F_ENCODE(0, TOPAZ_CR_MMU_INVALDC));
3036+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val);
3037+}
3038+
3039+/*
3040+ * this function will test whether the mmu is correct:
3041+ * it get a drm_buffer_object and use CMD_SYNC to write
3042+ * certain value into this buffer.
3043+ */
3044+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
3045+{
3046+ struct drm_psb_private *dev_priv = dev->dev_private;
3047+ uint32_t sync_cmd;
3048+ unsigned long real_pfn;
3049+ int ret;
3050+ uint32_t cmd_seq;
3051+
3052+ *((uint32_t *)dev_priv->topaz_sync_addr) = 0xeeeeeeee;
3053+
3054+ /* topaz_mmu_flush(dev); */
3055+
3056+ sync_cmd = MTX_CMDID_SYNC | (3 << 8) | (0xeeee) << 16;
3057+
3058+ topaz_write_mtx_mem_multiple_setup(dev_priv,
3059+ dev_priv->topaz_ccb_buffer_addr + ccb_offset);
3060+
3061+ topaz_write_mtx_mem_multiple(dev_priv, sync_cmd);
3062+ topaz_write_mtx_mem_multiple(dev_priv, dev_priv->topaz_sync_offset);
3063+ topaz_write_mtx_mem_multiple(dev_priv, sync_value);
3064+
3065+ topaz_mtx_kick(dev_priv, 1);
3066+
3067+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
3068+ dev_priv->topaz_sync_offset, &real_pfn);
3069+ if (ret != 0) {
3070+ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
3071+ return;
3072+ }
3073+ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
3074+ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
3075+ dev_priv->topaz_sync_offset, real_pfn, sync_value);
3076+
3077+ /* XXX: if we can use interrupt, we can wait this command finish */
3078+ /* topaz_wait_for_register (dev_priv,
3079+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT, 0xf, 0xf); */
3080+ DRM_UDELAY(1000);
3081+
3082+ cmd_seq = topaz_read_mtx_mem(dev_priv,
3083+ dev_priv->topaz_ccb_ctrl_addr + 4);
3084+ PSB_DEBUG_GENERAL("Topaz: cmd_seq equals 0x%x, and expected 0x%x "
3085+ "(WB_seq=0x%08x,WB_roff=%d),synch value is 0x%x,"
3086+ "expected 0x%08x\n",
3087+ cmd_seq, 0xeeee, WB_SEQ, WB_ROFF,
3088+ *((uint32_t *)dev_priv->topaz_sync_addr), sync_value);
3089+
3090+ PSB_DEBUG_GENERAL("Topaz: after MMU test, query IRQ and clear it\n");
3091+ topaz_test_queryirq(dev);
3092+ topaz_test_clearirq(dev);
3093+
3094+ ccb_offset += 3*4; /* shift 3DWs */
3095+}
3096+
3097+#endif
3098+
3099+int lnc_topaz_restore_mtx_state(struct drm_device *dev)
3100+{
3101+ struct drm_psb_private *dev_priv =
3102+ (struct drm_psb_private *)dev->dev_private;
3103+ uint32_t reg_val;
3104+ uint32_t *mtx_reg_state;
3105+ int i;
3106+
3107+ if (dev_priv->topaz_mtx_data_mem == NULL) {
3108+ DRM_ERROR("TOPAZ: try to restore context without "
3109+ "space allocated\n");
3110+ return -1;
3111+ }
3112+
3113+ /* turn on mtx clocks */
3114+ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
3115+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3116+ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
3117+
3118+ /* reset mtx */
3119+ /* FIXME: should use core_write??? */
3120+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3121+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3122+ DRM_UDELAY(6000);
3123+
3124+ topaz_mmu_hwsetup(dev_priv);
3125+ /* upload code, restore mtx data */
3126+ mtx_dma_write(dev);
3127+
3128+ mtx_reg_state = dev_priv->topaz_mtx_reg_state;
3129+ /* restore register */
3130+ /* FIXME: conside to put read/write into one function */
3131+ /* Saves 8 Registers of D0 Bank */
3132+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3133+ for (i = 0; i < 8; i++) {
3134+ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
3135+ *mtx_reg_state);
3136+ mtx_reg_state++;
3137+ }
3138+ /* Saves 8 Registers of D1 Bank */
3139+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3140+ for (i = 0; i < 8; i++) {
3141+ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
3142+ *mtx_reg_state);
3143+ mtx_reg_state++;
3144+ }
3145+ /* Saves 4 Registers of A0 Bank */
3146+ /* A0StP, A0FrP, A0.2 and A0.3 */
3147+ for (i = 0; i < 4; i++) {
3148+ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
3149+ *mtx_reg_state);
3150+ mtx_reg_state++;
3151+ }
3152+ /* Saves 4 Registers of A1 Bank */
3153+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3154+ for (i = 0; i < 4; i++) {
3155+ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
3156+ *mtx_reg_state);
3157+ mtx_reg_state++;
3158+ }
3159+ /* Saves PC and PCX */
3160+ for (i = 0; i < 2; i++) {
3161+ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
3162+ *mtx_reg_state);
3163+ mtx_reg_state++;
3164+ }
3165+ /* Saves 8 Control Registers */
3166+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3167+ * TXGPIOO */
3168+ for (i = 0; i < 8; i++) {
3169+ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
3170+ *mtx_reg_state);
3171+ mtx_reg_state++;
3172+ }
3173+
3174+ /* turn on MTX */
3175+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3176+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
3177+
3178+ return 0;
3179+}
3180+
3181+int lnc_topaz_save_mtx_state(struct drm_device *dev)
3182+{
3183+ struct drm_psb_private *dev_priv =
3184+ (struct drm_psb_private *)dev->dev_private;
3185+ uint32_t *mtx_reg_state;
3186+ int i;
3187+ struct topaz_codec_fw *cur_codec_fw;
3188+
3189+ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
3190+ if (dev_priv->topaz_mtx_data_mem == NULL) {
3191+ DRM_ERROR("TOPAZ: try to save context without space "
3192+ "allocated\n");
3193+ return -1;
3194+ }
3195+
3196+ topaz_wait_for_register(dev_priv,
3197+ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
3198+ TXRPT_WAITONKICK_VALUE,
3199+ 0xffffffff);
3200+
3201+ /* stop mtx */
3202+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3203+ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
3204+
3205+ mtx_reg_state = dev_priv->topaz_mtx_reg_state;
3206+
3207+ /* FIXME: conside to put read/write into one function */
3208+ /* Saves 8 Registers of D0 Bank */
3209+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3210+ for (i = 0; i < 8; i++) {
3211+ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
3212+ mtx_reg_state);
3213+ mtx_reg_state++;
3214+ }
3215+ /* Saves 8 Registers of D1 Bank */
3216+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3217+ for (i = 0; i < 8; i++) {
3218+ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
3219+ mtx_reg_state);
3220+ mtx_reg_state++;
3221+ }
3222+ /* Saves 4 Registers of A0 Bank */
3223+ /* A0StP, A0FrP, A0.2 and A0.3 */
3224+ for (i = 0; i < 4; i++) {
3225+ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
3226+ mtx_reg_state);
3227+ mtx_reg_state++;
3228+ }
3229+ /* Saves 4 Registers of A1 Bank */
3230+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3231+ for (i = 0; i < 4; i++) {
3232+ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
3233+ mtx_reg_state);
3234+ mtx_reg_state++;
3235+ }
3236+ /* Saves PC and PCX */
3237+ for (i = 0; i < 2; i++) {
3238+ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
3239+ mtx_reg_state);
3240+ mtx_reg_state++;
3241+ }
3242+ /* Saves 8 Control Registers */
3243+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3244+ * TXGPIOO */
3245+ for (i = 0; i < 8; i++) {
3246+ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
3247+ mtx_reg_state);
3248+ mtx_reg_state++;
3249+ }
3250+
3251+ /* save mtx data memory */
3252+ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec];
3253+
3254+ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
3255+ dev_priv->cur_mtx_data_size);
3256+
3257+ /* turn off mtx clocks */
3258+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3259+ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
3260+
3261+ return 0;
3262+}
3263+
3264+void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
3265+{
3266+ struct drm_psb_private *dev_priv =
3267+ (struct drm_psb_private *)dev->dev_private;
3268+ struct ttm_buffer_object *target;
3269+
3270+ /* setup mtx DMAC registers to do transfer */
3271+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
3272+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3273+ F_ENCODE(2, MTX_BURSTSIZE) |
3274+ F_ENCODE(1, MTX_RNW) |
3275+ F_ENCODE(1, MTX_ENABLE) |
3276+ F_ENCODE(size, MTX_LENGTH));
3277+
3278+ /* give the DMAC access to the host memory via BIF */
3279+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3280+
3281+ target = dev_priv->topaz_mtx_data_mem;
3282+ /* transfert the data */
3283+ /* FIXME: size is meaured by bytes? */
3284+ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
3285+ MTX_CR_MTX_SYSC_CDMAT,
3286+ size, 0, 1);
3287+
3288+ /* wait for it transfer */
3289+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3290+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3291+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3292+ /* clear interrupt */
3293+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3294+ /* give access back to topaz core */
3295+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3296+}
3297+
3298+void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
3299+ uint32_t soc_addr, uint32_t bytes_num,
3300+ int increment, int rnw)
3301+{
3302+ struct drm_psb_private *dev_priv =
3303+ (struct drm_psb_private *)dev->dev_private;
3304+ uint32_t count_reg;
3305+ uint32_t irq_state;
3306+
3307+ /* check no transfer is in progress */
3308+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
3309+ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
3310+ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
3311+ "save mtx data\n");
3312+ /* FIXME: how to handle this error */
3313+ return;
3314+ }
3315+
3316+ /* no hold off period */
3317+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
3318+ /* cleare irq state */
3319+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
3320+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
3321+ if (0 != irq_state) {
3322+ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
3323+ return;
3324+ }
3325+
3326+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
3327+ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
3328+ DMAC_PWIDTH_32_BIT, rnw,
3329+ DMAC_PWIDTH_32_BIT, bytes_num);
3330+ /* generate an interrupt at end of transfer */
3331+ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
3332+ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
3333+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
3334+
3335+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
3336+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
3337+ DMAC_BURST_2));
3338+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
3339+
3340+ /* Finally, rewrite the count register with the enable
3341+ * bit set to kick off the transfer */
3342+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
3343+ count_reg | MASK_IMG_SOC_EN);
3344+}
3345+
3346+void mtx_dma_write(struct drm_device *dev)
3347+{
3348+ struct topaz_codec_fw *cur_codec_fw;
3349+ struct drm_psb_private *dev_priv =
3350+ (struct drm_psb_private *)dev->dev_private;
3351+
3352+ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec];
3353+
3354+ /* upload code */
3355+ /* setup mtx DMAC registers to recieve transfer */
3356+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
3357+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3358+ F_ENCODE(2, MTX_BURSTSIZE) |
3359+ F_ENCODE(0, MTX_RNW) |
3360+ F_ENCODE(1, MTX_ENABLE) |
3361+ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
3362+
3363+ /* give DMAC access to host memory */
3364+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3365+
3366+ /* transfer code */
3367+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
3368+ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
3369+ 0, 0);
3370+ /* wait finished */
3371+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3372+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3373+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3374+ /* clear interrupt */
3375+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3376+
3377+ /* setup mtx start recieving data */
3378+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
3379+ (cur_codec_fw->data_location) - 0x82880000);
3380+
3381+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3382+ F_ENCODE(2, MTX_BURSTSIZE) |
3383+ F_ENCODE(0, MTX_RNW) |
3384+ F_ENCODE(1, MTX_ENABLE) |
3385+ F_ENCODE(dev_priv->cur_mtx_data_size, MTX_LENGTH));
3386+
3387+ /* give DMAC access to host memory */
3388+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3389+
3390+ /* transfer data */
3391+ topaz_dma_transfer(dev_priv, 0, dev_priv->topaz_mtx_data_mem->offset,
3392+ 0, MTX_CR_MTX_SYSC_CDMAT,
3393+ dev_priv->cur_mtx_data_size,
3394+ 0, 0);
3395+ /* wait finished */
3396+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3397+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3398+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3399+ /* clear interrupt */
3400+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3401+
3402+ /* give access back to Topaz Core */
3403+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3404+}
3405+
3406+#if 0
3407+void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
3408+{
3409+ int n;
3410+ int count;
3411+
3412+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3413+ for (n = 0; n < count; n++, ++data)
3414+ MM_READ32(topaz_default_regs[n][0],
3415+ topaz_default_regs[n][1],
3416+ data);
3417+
3418+}
3419+
3420+void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
3421+ uint32_t *data)
3422+{
3423+ int n;
3424+ int count;
3425+
3426+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3427+ for (n = 0; n < count; n++, ++data)
3428+ MM_WRITE32(topaz_default_regs[n][0],
3429+ topaz_default_regs[n][1],
3430+ *data);
3431+
3432+}
3433+#endif
3434diff -uNr a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile
3435--- a/drivers/gpu/drm/psb/Makefile 1969-12-31 16:00:00.000000000 -0800
3436+++ b/drivers/gpu/drm/psb/Makefile 2009-04-07 13:28:38.000000000 -0700
3437@@ -0,0 +1,18 @@
3438+#
3439+# Makefile for the drm device driver. This driver provides support for the
3440+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
3441+
3442+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/psb
3443+
3444+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \
3445+ psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \
3446+ psb_reset.o psb_xhw.o psb_msvdx.o \
3447+ lnc_topaz.o lnc_topazinit.o \
3448+ psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \
3449+ ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \
3450+ ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \
3451+ ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \
3452+ ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o
3453+
3454+obj-$(CONFIG_DRM_PSB) += psb.o
3455+
3456diff -uNr a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c
3457--- a/drivers/gpu/drm/psb/psb_buffer.c 1969-12-31 16:00:00.000000000 -0800
3458+++ b/drivers/gpu/drm/psb/psb_buffer.c 2009-04-07 13:28:38.000000000 -0700
3459@@ -0,0 +1,504 @@
3460+/**************************************************************************
3461+ * Copyright (c) 2007, Intel Corporation.
3462+ * All Rights Reserved.
3463+ *
3464+ * This program is free software; you can redistribute it and/or modify it
3465+ * under the terms and conditions of the GNU General Public License,
3466+ * version 2, as published by the Free Software Foundation.
3467+ *
3468+ * This program is distributed in the hope it will be useful, but WITHOUT
3469+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3470+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3471+ * more details.
3472+ *
3473+ * You should have received a copy of the GNU General Public License along with
3474+ * this program; if not, write to the Free Software Foundation, Inc.,
3475+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3476+ *
3477+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
3478+ * develop this driver.
3479+ *
3480+ **************************************************************************/
3481+/*
3482+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
3483+ */
3484+#include "ttm/ttm_placement_common.h"
3485+#include "ttm/ttm_execbuf_util.h"
3486+#include "ttm/ttm_fence_api.h"
3487+#include <drm/drmP.h>
3488+#include "psb_drv.h"
3489+#include "psb_schedule.h"
3490+
3491+#define DRM_MEM_TTM 26
3492+
3493+struct drm_psb_ttm_backend {
3494+ struct ttm_backend base;
3495+ struct page **pages;
3496+ unsigned int desired_tile_stride;
3497+ unsigned int hw_tile_stride;
3498+ int mem_type;
3499+ unsigned long offset;
3500+ unsigned long num_pages;
3501+};
3502+
3503+/*
3504+ * Poulsbo GPU virtual space looks like this
3505+ * (We currently use only one MMU context).
3506+ *
3507+ * gatt_start = Start of GATT aperture in bus space.
3508+ * stolen_end = End of GATT populated by stolen memory in bus space.
3509+ * gatt_end = End of GATT
3510+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
3511+ *
3512+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling-
3513+ * and copy operations.
3514+ * This space is not managed and is protected by the
3515+ * temp_mem mutex.
3516+ *
3517+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
3518+ *
3519+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
3520+ *
3521+ * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages.
3522+ *
3523+ * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine.
3524+ *
3525+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not
3526+ * usable by 2D engine.
3527+ *
3528+ * gatt_end -> 0xffffffff Currently unused.
3529+ */
3530+
3531+static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
3532+ struct ttm_mem_type_manager *man)
3533+{
3534+
3535+ struct drm_psb_private *dev_priv =
3536+ container_of(bdev, struct drm_psb_private, bdev);
3537+ struct psb_gtt *pg = dev_priv->pg;
3538+
3539+ switch (type) {
3540+ case TTM_PL_SYSTEM:
3541+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
3542+ man->available_caching = TTM_PL_FLAG_CACHED |
3543+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3544+ man->default_caching = TTM_PL_FLAG_CACHED;
3545+ break;
3546+ case DRM_PSB_MEM_KERNEL:
3547+ man->io_offset = 0x00000000;
3548+ man->io_size = 0x00000000;
3549+ man->io_addr = NULL;
3550+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3551+ TTM_MEMTYPE_FLAG_CMA;
3552+ man->gpu_offset = PSB_MEM_KERNEL_START;
3553+ man->available_caching = TTM_PL_FLAG_CACHED |
3554+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3555+ man->default_caching = TTM_PL_FLAG_WC;
3556+ break;
3557+ case DRM_PSB_MEM_MMU:
3558+ man->io_offset = 0x00000000;
3559+ man->io_size = 0x00000000;
3560+ man->io_addr = NULL;
3561+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3562+ TTM_MEMTYPE_FLAG_CMA;
3563+ man->gpu_offset = PSB_MEM_MMU_START;
3564+ man->available_caching = TTM_PL_FLAG_CACHED |
3565+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3566+ man->default_caching = TTM_PL_FLAG_WC;
3567+ break;
3568+ case DRM_PSB_MEM_PDS:
3569+ man->io_offset = 0x00000000;
3570+ man->io_size = 0x00000000;
3571+ man->io_addr = NULL;
3572+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3573+ TTM_MEMTYPE_FLAG_CMA;
3574+ man->gpu_offset = PSB_MEM_PDS_START;
3575+ man->available_caching = TTM_PL_FLAG_CACHED |
3576+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3577+ man->default_caching = TTM_PL_FLAG_WC;
3578+ break;
3579+ case DRM_PSB_MEM_RASTGEOM:
3580+ man->io_offset = 0x00000000;
3581+ man->io_size = 0x00000000;
3582+ man->io_addr = NULL;
3583+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3584+ TTM_MEMTYPE_FLAG_CMA;
3585+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
3586+ man->available_caching = TTM_PL_FLAG_CACHED |
3587+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3588+ man->default_caching = TTM_PL_FLAG_WC;
3589+ break;
3590+ case TTM_PL_VRAM:
3591+ man->io_addr = NULL;
3592+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3593+ TTM_MEMTYPE_FLAG_FIXED |
3594+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3595+#ifdef PSB_WORKING_HOST_MMU_ACCESS
3596+ man->io_offset = pg->gatt_start;
3597+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
3598+#else
3599+ man->io_offset = pg->stolen_base;
3600+ man->io_size = pg->vram_stolen_size;
3601+#endif
3602+ man->gpu_offset = pg->gatt_start;
3603+ man->available_caching = TTM_PL_FLAG_UNCACHED |
3604+ TTM_PL_FLAG_WC;
3605+ man->default_caching = TTM_PL_FLAG_WC;
3606+ break;
3607+ case TTM_PL_CI:
3608+ man->io_addr = NULL;
3609+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3610+ TTM_MEMTYPE_FLAG_FIXED |
3611+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3612+ man->io_offset = dev_priv->ci_region_start;
3613+ man->io_size = pg->ci_stolen_size;
3614+ man->gpu_offset = pg->gatt_start - pg->ci_stolen_size;
3615+ man->available_caching = TTM_PL_FLAG_UNCACHED;
3616+ man->default_caching = TTM_PL_FLAG_UNCACHED;
3617+ break;
3618+ case TTM_PL_TT: /* Mappable GATT memory */
3619+ man->io_offset = pg->gatt_start;
3620+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
3621+ man->io_addr = NULL;
3622+#ifdef PSB_WORKING_HOST_MMU_ACCESS
3623+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3624+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3625+#else
3626+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3627+ TTM_MEMTYPE_FLAG_CMA;
3628+#endif
3629+ man->available_caching = TTM_PL_FLAG_CACHED |
3630+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3631+ man->default_caching = TTM_PL_FLAG_WC;
3632+ man->gpu_offset = pg->gatt_start;
3633+ break;
3634+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
3635+ man->io_offset = pg->gatt_start;
3636+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
3637+ man->io_addr = NULL;
3638+#ifdef PSB_WORKING_HOST_MMU_ACCESS
3639+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3640+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3641+#else
3642+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3643+ TTM_MEMTYPE_FLAG_CMA;
3644+#endif
3645+ man->gpu_offset = pg->gatt_start;
3646+ man->available_caching = TTM_PL_FLAG_CACHED |
3647+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3648+ man->default_caching = TTM_PL_FLAG_WC;
3649+ break;
3650+ default:
3651+ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
3652+ return -EINVAL;
3653+ }
3654+ return 0;
3655+}
3656+
3657+static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
3658+{
3659+ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
3660+
3661+
3662+ switch (bo->mem.mem_type) {
3663+ case TTM_PL_VRAM:
3664+ if (bo->mem.proposed_flags & TTM_PL_FLAG_TT)
3665+ return cur_placement | TTM_PL_FLAG_TT;
3666+ else
3667+ return cur_placement | TTM_PL_FLAG_SYSTEM;
3668+ default:
3669+ return cur_placement | TTM_PL_FLAG_SYSTEM;
3670+ }
3671+}
3672+
3673+static int psb_invalidate_caches(struct ttm_bo_device *bdev,
3674+ uint32_t placement)
3675+{
3676+ return 0;
3677+}
3678+
3679+static int psb_move_blit(struct ttm_buffer_object *bo,
3680+ bool evict, bool no_wait,
3681+ struct ttm_mem_reg *new_mem)
3682+{
3683+ struct drm_psb_private *dev_priv =
3684+ container_of(bo->bdev, struct drm_psb_private, bdev);
3685+ struct drm_device *dev = dev_priv->dev;
3686+ struct ttm_mem_reg *old_mem = &bo->mem;
3687+ struct ttm_fence_object *fence;
3688+ int dir = 0;
3689+ int ret;
3690+
3691+ if ((old_mem->mem_type == new_mem->mem_type) &&
3692+ (new_mem->mm_node->start <
3693+ old_mem->mm_node->start + old_mem->mm_node->size)) {
3694+ dir = 1;
3695+ }
3696+
3697+ psb_emit_2d_copy_blit(dev,
3698+ old_mem->mm_node->start << PAGE_SHIFT,
3699+ new_mem->mm_node->start << PAGE_SHIFT,
3700+ new_mem->num_pages, dir);
3701+
3702+ ret = ttm_fence_object_create(&dev_priv->fdev, 0,
3703+ _PSB_FENCE_TYPE_EXE,
3704+ TTM_FENCE_FLAG_EMIT,
3705+ &fence);
3706+ if (unlikely(ret != 0)) {
3707+ psb_idle_2d(dev);
3708+ if (fence)
3709+ ttm_fence_object_unref(&fence);
3710+ }
3711+
3712+ ret = ttm_bo_move_accel_cleanup(bo, (void *) fence,
3713+ (void *) (unsigned long)
3714+ _PSB_FENCE_TYPE_EXE,
3715+ evict, no_wait, new_mem);
3716+ if (fence)
3717+ ttm_fence_object_unref(&fence);
3718+ return ret;
3719+}
3720+
3721+/*
3722+ * Flip destination ttm into GATT,
3723+ * then blit and subsequently move out again.
3724+ */
3725+
3726+static int psb_move_flip(struct ttm_buffer_object *bo,
3727+ bool evict, bool interruptible, bool no_wait,
3728+ struct ttm_mem_reg *new_mem)
3729+{
3730+ struct ttm_bo_device *bdev = bo->bdev;
3731+ struct ttm_mem_reg tmp_mem;
3732+ int ret;
3733+
3734+ tmp_mem = *new_mem;
3735+ tmp_mem.mm_node = NULL;
3736+ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
3737+
3738+ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
3739+ if (ret)
3740+ return ret;
3741+ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
3742+ if (ret)
3743+ goto out_cleanup;
3744+ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
3745+ if (ret)
3746+ goto out_cleanup;
3747+
3748+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
3749+out_cleanup:
3750+ if (tmp_mem.mm_node) {
3751+ spin_lock(&bdev->lru_lock);
3752+ drm_mm_put_block(tmp_mem.mm_node);
3753+ tmp_mem.mm_node = NULL;
3754+ spin_unlock(&bdev->lru_lock);
3755+ }
3756+ return ret;
3757+}
3758+
3759+static int psb_move(struct ttm_buffer_object *bo,
3760+ bool evict, bool interruptible,
3761+ bool no_wait, struct ttm_mem_reg *new_mem)
3762+{
3763+ struct ttm_mem_reg *old_mem = &bo->mem;
3764+
3765+ if (old_mem->mem_type == TTM_PL_SYSTEM) {
3766+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
3767+ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
3768+ int ret = psb_move_flip(bo, evict, interruptible,
3769+ no_wait, new_mem);
3770+ if (unlikely(ret != 0)) {
3771+ if (ret == -ERESTART)
3772+ return ret;
3773+ else
3774+ return ttm_bo_move_memcpy(bo, evict, no_wait,
3775+ new_mem);
3776+ }
3777+ } else {
3778+ if (psb_move_blit(bo, evict, no_wait, new_mem))
3779+ return ttm_bo_move_memcpy(bo, evict, no_wait,
3780+ new_mem);
3781+ }
3782+ return 0;
3783+}
3784+
3785+static int drm_psb_tbe_populate(struct ttm_backend *backend,
3786+ unsigned long num_pages,
3787+ struct page **pages,
3788+ struct page *dummy_read_page)
3789+{
3790+ struct drm_psb_ttm_backend *psb_be =
3791+ container_of(backend, struct drm_psb_ttm_backend, base);
3792+
3793+ psb_be->pages = pages;
3794+ return 0;
3795+}
3796+
3797+static int drm_psb_tbe_unbind(struct ttm_backend *backend)
3798+{
3799+ struct ttm_bo_device *bdev = backend->bdev;
3800+ struct drm_psb_private *dev_priv =
3801+ container_of(bdev, struct drm_psb_private, bdev);
3802+ struct drm_psb_ttm_backend *psb_be =
3803+ container_of(backend, struct drm_psb_ttm_backend, base);
3804+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
3805+ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
3806+
3807+ PSB_DEBUG_RENDER("MMU unbind.\n");
3808+
3809+ if (psb_be->mem_type == TTM_PL_TT) {
3810+ uint32_t gatt_p_offset =
3811+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
3812+
3813+ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
3814+ psb_be->num_pages,
3815+ psb_be->desired_tile_stride,
3816+ psb_be->hw_tile_stride);
3817+ }
3818+
3819+ psb_mmu_remove_pages(pd, psb_be->offset,
3820+ psb_be->num_pages,
3821+ psb_be->desired_tile_stride,
3822+ psb_be->hw_tile_stride);
3823+
3824+ return 0;
3825+}
3826+
3827+static int drm_psb_tbe_bind(struct ttm_backend *backend,
3828+ struct ttm_mem_reg *bo_mem)
3829+{
3830+ struct ttm_bo_device *bdev = backend->bdev;
3831+ struct drm_psb_private *dev_priv =
3832+ container_of(bdev, struct drm_psb_private, bdev);
3833+ struct drm_psb_ttm_backend *psb_be =
3834+ container_of(backend, struct drm_psb_ttm_backend, base);
3835+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
3836+ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
3837+ int type;
3838+ int ret = 0;
3839+
3840+ psb_be->mem_type = bo_mem->mem_type;
3841+ psb_be->num_pages = bo_mem->num_pages;
3842+ psb_be->desired_tile_stride = 0;
3843+ psb_be->hw_tile_stride = 0;
3844+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
3845+ man->gpu_offset;
3846+
3847+ type =
3848+ (bo_mem->
3849+ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
3850+
3851+ PSB_DEBUG_RENDER("MMU bind.\n");
3852+ if (psb_be->mem_type == TTM_PL_TT) {
3853+ uint32_t gatt_p_offset =
3854+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
3855+
3856+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
3857+ gatt_p_offset,
3858+ psb_be->num_pages,
3859+ psb_be->desired_tile_stride,
3860+ psb_be->hw_tile_stride, type);
3861+ }
3862+
3863+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
3864+ psb_be->offset, psb_be->num_pages,
3865+ psb_be->desired_tile_stride,
3866+ psb_be->hw_tile_stride, type);
3867+ if (ret)
3868+ goto out_err;
3869+
3870+ return 0;
3871+out_err:
3872+ drm_psb_tbe_unbind(backend);
3873+ return ret;
3874+
3875+}
3876+
3877+static void drm_psb_tbe_clear(struct ttm_backend *backend)
3878+{
3879+ struct drm_psb_ttm_backend *psb_be =
3880+ container_of(backend, struct drm_psb_ttm_backend, base);
3881+
3882+ psb_be->pages = NULL;
3883+ return;
3884+}
3885+
3886+static void drm_psb_tbe_destroy(struct ttm_backend *backend)
3887+{
3888+ struct drm_psb_ttm_backend *psb_be =
3889+ container_of(backend, struct drm_psb_ttm_backend, base);
3890+
3891+ if (backend)
3892+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
3893+}
3894+
3895+static struct ttm_backend_func psb_ttm_backend = {
3896+ .populate = drm_psb_tbe_populate,
3897+ .clear = drm_psb_tbe_clear,
3898+ .bind = drm_psb_tbe_bind,
3899+ .unbind = drm_psb_tbe_unbind,
3900+ .destroy = drm_psb_tbe_destroy,
3901+};
3902+
3903+static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
3904+{
3905+ struct drm_psb_ttm_backend *psb_be;
3906+
3907+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
3908+ if (!psb_be)
3909+ return NULL;
3910+ psb_be->pages = NULL;
3911+ psb_be->base.func = &psb_ttm_backend;
3912+ psb_be->base.bdev = bdev;
3913+ return &psb_be->base;
3914+}
3915+
3916+/*
3917+ * Use this memory type priority if no eviction is needed.
3918+ */
3919+static uint32_t psb_mem_prios[] = {
3920+ TTM_PL_CI,
3921+ TTM_PL_VRAM,
3922+ TTM_PL_TT,
3923+ DRM_PSB_MEM_KERNEL,
3924+ DRM_PSB_MEM_MMU,
3925+ DRM_PSB_MEM_RASTGEOM,
3926+ DRM_PSB_MEM_PDS,
3927+ DRM_PSB_MEM_APER,
3928+ TTM_PL_SYSTEM
3929+};
3930+
3931+/*
3932+ * Use this memory type priority if need to evict.
3933+ */
3934+static uint32_t psb_busy_prios[] = {
3935+ TTM_PL_TT,
3936+ TTM_PL_VRAM,
3937+ TTM_PL_CI,
3938+ DRM_PSB_MEM_KERNEL,
3939+ DRM_PSB_MEM_MMU,
3940+ DRM_PSB_MEM_RASTGEOM,
3941+ DRM_PSB_MEM_PDS,
3942+ DRM_PSB_MEM_APER,
3943+ TTM_PL_SYSTEM
3944+};
3945+
3946+
3947+struct ttm_bo_driver psb_ttm_bo_driver = {
3948+ .mem_type_prio = psb_mem_prios,
3949+ .mem_busy_prio = psb_busy_prios,
3950+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
3951+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
3952+ .create_ttm_backend_entry = &drm_psb_tbe_init,
3953+ .invalidate_caches = &psb_invalidate_caches,
3954+ .init_mem_type = &psb_init_mem_type,
3955+ .evict_flags = &psb_evict_mask,
3956+ .move = &psb_move,
3957+ .verify_access = &psb_verify_access,
3958+ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
3959+ .sync_obj_wait = &ttm_fence_sync_obj_wait,
3960+ .sync_obj_flush = &ttm_fence_sync_obj_flush,
3961+ .sync_obj_unref = &ttm_fence_sync_obj_unref,
3962+ .sync_obj_ref = &ttm_fence_sync_obj_ref
3963+};
3964diff -uNr a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h
3965--- a/drivers/gpu/drm/psb/psb_drm.h 1969-12-31 16:00:00.000000000 -0800
3966+++ b/drivers/gpu/drm/psb/psb_drm.h 2009-04-07 13:28:38.000000000 -0700
3967@@ -0,0 +1,444 @@
3968+/**************************************************************************
3969+ * Copyright (c) 2007, Intel Corporation.
3970+ * All Rights Reserved.
3971+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
3972+ * All Rights Reserved.
3973+ *
3974+ * This program is free software; you can redistribute it and/or modify it
3975+ * under the terms and conditions of the GNU General Public License,
3976+ * version 2, as published by the Free Software Foundation.
3977+ *
3978+ * This program is distributed in the hope it will be useful, but WITHOUT
3979+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3980+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3981+ * more details.
3982+ *
3983+ * You should have received a copy of the GNU General Public License along with
3984+ * this program; if not, write to the Free Software Foundation, Inc.,
3985+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3986+ *
3987+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
3988+ * develop this driver.
3989+ *
3990+ **************************************************************************/
3991+/*
3992+ */
3993+
3994+#ifndef _PSB_DRM_H_
3995+#define _PSB_DRM_H_
3996+
3997+#if defined(__linux__) && !defined(__KERNEL__)
3998+#include<stdint.h>
3999+#endif
4000+
4001+#include "ttm/ttm_fence_user.h"
4002+#include "ttm/ttm_placement_user.h"
4003+
4004+#define DRM_PSB_SAREA_MAJOR 0
4005+#define DRM_PSB_SAREA_MINOR 2
4006+#define PSB_FIXED_SHIFT 16
4007+
4008+#define DRM_PSB_FIRST_TA_USE_REG 3
4009+#define DRM_PSB_NUM_TA_USE_REG 6
4010+#define DRM_PSB_FIRST_RASTER_USE_REG 8
4011+#define DRM_PSB_NUM_RASTER_USE_REG 7
4012+
4013+/*
4014+ * Public memory types.
4015+ */
4016+
4017+#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
4018+#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
4019+#define DRM_PSB_MEM_PDS TTM_PL_PRIV2
4020+#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2
4021+#define DRM_PSB_MEM_APER TTM_PL_PRIV3
4022+#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3
4023+#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4
4024+#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4
4025+#define PSB_MEM_RASTGEOM_START 0x30000000
4026+
4027+typedef int32_t psb_fixed;
4028+typedef uint32_t psb_ufixed;
4029+
4030+static inline int32_t psb_int_to_fixed(int a)
4031+{
4032+ return a * (1 << PSB_FIXED_SHIFT);
4033+}
4034+
4035+static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
4036+{
4037+ return a << PSB_FIXED_SHIFT;
4038+}
4039+
4040+/*Status of the command sent to the gfx device.*/
4041+typedef enum {
4042+ DRM_CMD_SUCCESS,
4043+ DRM_CMD_FAILED,
4044+ DRM_CMD_HANG
4045+} drm_cmd_status_t;
4046+
4047+struct drm_psb_scanout {
4048+ uint32_t buffer_id; /* DRM buffer object ID */
4049+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
4050+ uint32_t stride; /* Buffer stride in bytes */
4051+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
4052+ uint32_t width; /* Buffer width in pixels */
4053+ uint32_t height; /* Buffer height in lines */
4054+ int32_t transform[3][3]; /* Buffer composite transform */
4055+ /* (scaling, rot, reflect) */
4056+};
4057+
4058+#define DRM_PSB_SAREA_OWNERS 16
4059+#define DRM_PSB_SAREA_OWNER_2D 0
4060+#define DRM_PSB_SAREA_OWNER_3D 1
4061+
4062+#define DRM_PSB_SAREA_SCANOUTS 3
4063+
4064+struct drm_psb_sarea {
4065+ /* Track changes of this data structure */
4066+
4067+ uint32_t major;
4068+ uint32_t minor;
4069+
4070+ /* Last context to touch part of hw */
4071+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
4072+
4073+ /* Definition of front- and rotated buffers */
4074+ uint32_t num_scanouts;
4075+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
4076+
4077+ int planeA_x;
4078+ int planeA_y;
4079+ int planeA_w;
4080+ int planeA_h;
4081+ int planeB_x;
4082+ int planeB_y;
4083+ int planeB_w;
4084+ int planeB_h;
4085+ /* Number of active scanouts */
4086+ uint32_t num_active_scanouts;
4087+};
4088+
4089+#define PSB_RELOC_MAGIC 0x67676767
4090+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
4091+#define PSB_RELOC_SHIFT_SHIFT 0
4092+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
4093+#define PSB_RELOC_ALSHIFT_SHIFT 16
4094+
4095+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
4096+ * buffer
4097+ */
4098+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
4099+ * buffer, relative to 2D
4100+ * base address
4101+ */
4102+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
4103+ * relative to PDS base address
4104+ */
4105+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
4106+ * buffer (for tiling)
4107+ */
4108+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
4109+ * relative to base reg
4110+ */
4111+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
4112+
4113+struct drm_psb_reloc {
4114+ uint32_t reloc_op;
4115+ uint32_t where; /* offset in destination buffer */
4116+ uint32_t buffer; /* Buffer reloc applies to */
4117+ uint32_t mask; /* Destination format: */
4118+ uint32_t shift; /* Destination format: */
4119+ uint32_t pre_add; /* Destination format: */
4120+ uint32_t background; /* Destination add */
4121+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
4122+ uint32_t arg0; /* Reloc-op dependant */
4123+ uint32_t arg1;
4124+};
4125+
4126+
4127+#define PSB_GPU_ACCESS_READ (1ULL << 32)
4128+#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
4129+#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
4130+
4131+#define PSB_BO_FLAG_TA (1ULL << 48)
4132+#define PSB_BO_FLAG_SCENE (1ULL << 49)
4133+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
4134+#define PSB_BO_FLAG_USSE (1ULL << 51)
4135+#define PSB_BO_FLAG_COMMAND (1ULL << 52)
4136+
4137+#define PSB_ENGINE_2D 0
4138+#define PSB_ENGINE_VIDEO 1
4139+#define PSB_ENGINE_RASTERIZER 2
4140+#define PSB_ENGINE_TA 3
4141+#define PSB_ENGINE_HPRAST 4
4142+#define LNC_ENGINE_ENCODE 5
4143+
4144+#define PSB_DEVICE_SGX 0x1
4145+#define PSB_DEVICE_DISLAY 0x2
4146+#define PSB_DEVICE_MSVDX 0x4
4147+#define PSB_DEVICE_TOPAZ 0x8
4148+
4149+/*
4150+ * For this fence class we have a couple of
4151+ * fence types.
4152+ */
4153+
4154+#define _PSB_FENCE_EXE_SHIFT 0
4155+#define _PSB_FENCE_TA_DONE_SHIFT 1
4156+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
4157+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
4158+#define _PSB_FENCE_FEEDBACK_SHIFT 4
4159+
4160+#define _PSB_ENGINE_TA_FENCE_TYPES 5
4161+#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
4162+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
4163+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
4164+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
4165+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
4166+
4167+#define PSB_ENGINE_HPRAST 4
4168+#define PSB_NUM_ENGINES 6
4169+
4170+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
4171+#define PSB_TA_FLAG_LASTPASS (1 << 1)
4172+
4173+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
4174+
4175+struct drm_psb_extension_rep {
4176+ int32_t exists;
4177+ uint32_t driver_ioctl_offset;
4178+ uint32_t sarea_offset;
4179+ uint32_t major;
4180+ uint32_t minor;
4181+ uint32_t pl;
4182+};
4183+
4184+#define DRM_PSB_EXT_NAME_LEN 128
4185+
4186+union drm_psb_extension_arg {
4187+ char extension[DRM_PSB_EXT_NAME_LEN];
4188+ struct drm_psb_extension_rep rep;
4189+};
4190+
4191+struct psb_validate_req {
4192+ uint64_t set_flags;
4193+ uint64_t clear_flags;
4194+ uint64_t next;
4195+ uint64_t presumed_gpu_offset;
4196+ uint32_t buffer_handle;
4197+ uint32_t presumed_flags;
4198+ uint32_t group;
4199+ uint32_t pad64;
4200+};
4201+
4202+struct psb_validate_rep {
4203+ uint64_t gpu_offset;
4204+ uint32_t placement;
4205+ uint32_t fence_type_mask;
4206+};
4207+
4208+#define PSB_USE_PRESUMED (1 << 0)
4209+
4210+struct psb_validate_arg {
4211+ int handled;
4212+ int ret;
4213+ union {
4214+ struct psb_validate_req req;
4215+ struct psb_validate_rep rep;
4216+ } d;
4217+};
4218+
4219+struct drm_psb_scene {
4220+ int handle_valid;
4221+ uint32_t handle;
4222+ uint32_t w; /* also contains msaa info */
4223+ uint32_t h;
4224+ uint32_t num_buffers;
4225+};
4226+
4227+#define DRM_PSB_FENCE_NO_USER (1 << 0)
4228+
4229+struct psb_ttm_fence_rep {
4230+ uint32_t handle;
4231+ uint32_t fence_class;
4232+ uint32_t fence_type;
4233+ uint32_t signaled_types;
4234+ uint32_t error;
4235+};
4236+
4237+typedef struct drm_psb_cmdbuf_arg {
4238+ uint64_t buffer_list; /* List of buffers to validate */
4239+ uint64_t clip_rects; /* See i915 counterpart */
4240+ uint64_t scene_arg;
4241+ uint64_t fence_arg;
4242+
4243+ uint32_t ta_flags;
4244+
4245+ uint32_t ta_handle; /* TA reg-value pairs */
4246+ uint32_t ta_offset;
4247+ uint32_t ta_size;
4248+
4249+ uint32_t oom_handle;
4250+ uint32_t oom_offset;
4251+ uint32_t oom_size;
4252+
4253+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
4254+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
4255+ uint32_t cmdbuf_size;
4256+
4257+ uint32_t reloc_handle; /* Reloc buffer object */
4258+ uint32_t reloc_offset;
4259+ uint32_t num_relocs;
4260+
4261+ int32_t damage; /* Damage front buffer with cliprects */
4262+ /* Not implemented yet */
4263+ uint32_t fence_flags;
4264+ uint32_t engine;
4265+
4266+ /*
4267+ * Feedback;
4268+ */
4269+
4270+ uint32_t feedback_ops;
4271+ uint32_t feedback_handle;
4272+ uint32_t feedback_offset;
4273+ uint32_t feedback_breakpoints;
4274+ uint32_t feedback_size;
4275+}drm_psb_cmdbuf_arg_t;
4276+
4277+struct drm_psb_xhw_init_arg {
4278+ uint32_t operation;
4279+ uint32_t buffer_handle;
4280+};
4281+
4282+/*
4283+ * Feedback components:
4284+ */
4285+
4286+/*
4287+ * Vistest component. The number of these in the feedback buffer
4288+ * equals the number of vistest breakpoints + 1.
4289+ * This is currently the only feedback component.
4290+ */
4291+
4292+struct drm_psb_vistest {
4293+ uint32_t vt[8];
4294+};
4295+
4296+#define PSB_HW_COOKIE_SIZE 16
4297+#define PSB_HW_FEEDBACK_SIZE 8
4298+#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2)
4299+
4300+struct drm_psb_xhw_arg {
4301+ uint32_t op;
4302+ int ret;
4303+ uint32_t irq_op;
4304+ uint32_t issue_irq;
4305+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
4306+ union {
4307+ struct {
4308+ uint32_t w; /* also contains msaa info */
4309+ uint32_t h;
4310+ uint32_t size;
4311+ uint32_t clear_p_start;
4312+ uint32_t clear_num_pages;
4313+ } si;
4314+ struct {
4315+ uint32_t fire_flags;
4316+ uint32_t hw_context;
4317+ uint32_t offset;
4318+ uint32_t engine;
4319+ uint32_t flags;
4320+ uint32_t rca;
4321+ uint32_t num_oom_cmds;
4322+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
4323+ } sb;
4324+ struct {
4325+ uint32_t pages;
4326+ uint32_t size;
4327+ uint32_t ta_min_size;
4328+ } bi;
4329+ struct {
4330+ uint32_t bca;
4331+ uint32_t rca;
4332+ uint32_t flags;
4333+ } oom;
4334+ struct {
4335+ uint32_t pt_offset;
4336+ uint32_t param_offset;
4337+ uint32_t flags;
4338+ } bl;
4339+ struct {
4340+ uint32_t value;
4341+ } cl;
4342+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
4343+ } arg;
4344+};
4345+
4346+/* Controlling the kernel modesetting buffers */
4347+
4348+#define DRM_PSB_KMS_OFF 0x00
4349+#define DRM_PSB_KMS_ON 0x01
4350+#define DRM_PSB_VT_LEAVE 0x02
4351+#define DRM_PSB_VT_ENTER 0x03
4352+#define DRM_PSB_XHW_INIT 0x04
4353+#define DRM_PSB_XHW 0x05
4354+#define DRM_PSB_EXTENSION 0x06
4355+
4356+/*
4357+ * Xhw commands.
4358+ */
4359+
4360+#define PSB_XHW_INIT 0x00
4361+#define PSB_XHW_TAKEDOWN 0x01
4362+
4363+#define PSB_XHW_FIRE_RASTER 0x00
4364+#define PSB_XHW_SCENE_INFO 0x01
4365+#define PSB_XHW_SCENE_BIND_FIRE 0x02
4366+#define PSB_XHW_TA_MEM_INFO 0x03
4367+#define PSB_XHW_RESET_DPM 0x04
4368+#define PSB_XHW_OOM 0x05
4369+#define PSB_XHW_TERMINATE 0x06
4370+#define PSB_XHW_VISTEST 0x07
4371+#define PSB_XHW_RESUME 0x08
4372+#define PSB_XHW_TA_MEM_LOAD 0x09
4373+#define PSB_XHW_CHECK_LOCKUP 0x0a
4374+
4375+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
4376+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
4377+#define PSB_SCENE_FLAG_SETUP (1 << 2)
4378+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
4379+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
4380+
4381+#define PSB_TA_MEM_FLAG_TA (1 << 0)
4382+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
4383+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
4384+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
4385+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
4386+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
4387+
4388+/*Raster fire will deallocate memory */
4389+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
4390+/*Isp reset needed due to change in ZLS format */
4391+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
4392+/*These are set by Xpsb. */
4393+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
4394+/*The task has had at least one OOM and Xpsb will
4395+ send back messages on each fire. */
4396+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
4397+
4398+#define PSB_SCENE_ENGINE_TA 0
4399+#define PSB_SCENE_ENGINE_RASTER 1
4400+#define PSB_SCENE_NUM_ENGINES 2
4401+
4402+#define PSB_LOCKUP_RASTER (1 << 0)
4403+#define PSB_LOCKUP_TA (1 << 1)
4404+
4405+struct drm_psb_dev_info_arg {
4406+ uint32_t num_use_attribute_registers;
4407+};
4408+#define DRM_PSB_DEVINFO 0x01
4409+
4410+
4411+#endif
4412diff -uNr a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c
4413--- a/drivers/gpu/drm/psb/psb_drv.c 1969-12-31 16:00:00.000000000 -0800
4414+++ b/drivers/gpu/drm/psb/psb_drv.c 2009-04-07 13:31:58.000000000 -0700
4415@@ -0,0 +1,1465 @@
4416+/**************************************************************************
4417+ * Copyright (c) 2007, Intel Corporation.
4418+ * All Rights Reserved.
4419+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
4420+ * All Rights Reserved.
4421+ *
4422+ * This program is free software; you can redistribute it and/or modify it
4423+ * under the terms and conditions of the GNU General Public License,
4424+ * version 2, as published by the Free Software Foundation.
4425+ *
4426+ * This program is distributed in the hope it will be useful, but WITHOUT
4427+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4428+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4429+ * more details.
4430+ *
4431+ * You should have received a copy of the GNU General Public License along with
4432+ * this program; if not, write to the Free Software Foundation, Inc.,
4433+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4434+ *
4435+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4436+ * develop this driver.
4437+ *
4438+ **************************************************************************/
4439+/*
4440+ */
4441+
4442+#include <drm/drmP.h>
4443+#include <drm/drm.h>
4444+#include "psb_drm.h"
4445+#include "psb_drv.h"
4446+#include "psb_reg.h"
4447+#include "psb_intel_reg.h"
4448+#include "psb_msvdx.h"
4449+#include "lnc_topaz.h"
4450+#include <drm/drm_pciids.h>
4451+#include "psb_scene.h"
4452+
4453+#include <linux/cpu.h>
4454+#include <linux/notifier.h>
4455+#include <linux/spinlock.h>
4456+
4457+int drm_psb_debug;
4458+EXPORT_SYMBOL(drm_psb_debug);
4459+static int drm_psb_trap_pagefaults;
4460+static int drm_psb_clock_gating;
4461+static int drm_psb_ta_mem_size = 32 * 1024;
4462+
4463+int drm_psb_disable_vsync;
4464+int drm_psb_no_fb;
4465+int drm_psb_force_pipeb;
4466+int drm_idle_check_interval = 5;
4467+int drm_psb_ospm;
4468+
4469+MODULE_PARM_DESC(debug, "Enable debug output");
4470+MODULE_PARM_DESC(clock_gating, "clock gating");
4471+MODULE_PARM_DESC(no_fb, "Disable FBdev");
4472+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
4473+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
4474+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
4475+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
4476+MODULE_PARM_DESC(ospm, "switch for ospm support");
4477+module_param_named(debug, drm_psb_debug, int, 0600);
4478+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
4479+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
4480+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
4481+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
4482+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
4483+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
4484+module_param_named(ospm, drm_psb_ospm, int, 0600);
4485+
4486+#ifndef CONFIG_X86_PAT
4487+#warning "Don't build this driver without PAT support!!!"
4488+#endif
4489+
4490+#define psb_PCI_IDS \
4491+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
4492+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
4493+ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4494+ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4495+ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4496+ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4497+ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4498+ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4499+ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4500+ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4501+ {0, 0, 0}
4502+
4503+static struct pci_device_id pciidlist[] = {
4504+ psb_PCI_IDS
4505+};
4506+
4507+/*
4508+ * Standard IOCTLs.
4509+ */
4510+
4511+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
4512+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
4513+#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
4514+#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
4515+#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \
4516+ struct drm_psb_xhw_init_arg)
4517+#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE)
4518+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
4519+ union drm_psb_extension_arg)
4520+/*
4521+ * TTM execbuf extension.
4522+ */
4523+
4524+#define DRM_PSB_CMDBUF (DRM_PSB_EXTENSION + 1)
4525+#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
4526+#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
4527+ struct drm_psb_cmdbuf_arg)
4528+#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
4529+ struct drm_psb_scene)
4530+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
4531+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
4532+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
4533+ union drm_psb_extension_arg)
4534+/*
4535+ * TTM placement user extension.
4536+ */
4537+
4538+#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
4539+
4540+#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
4541+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
4542+#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
4543+#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
4544+#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
4545+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
4546+
4547+/*
4548+ * TTM fence extension.
4549+ */
4550+
4551+#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
4552+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
4553+#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
4554+#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
4555+
4556+#define DRM_IOCTL_PSB_TTM_PL_CREATE \
4557+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
4558+ union ttm_pl_create_arg)
4559+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
4560+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
4561+ union ttm_pl_reference_arg)
4562+#define DRM_IOCTL_PSB_TTM_PL_UNREF \
4563+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
4564+ struct ttm_pl_reference_req)
4565+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
4566+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
4567+ struct ttm_pl_synccpu_arg)
4568+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
4569+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
4570+ struct ttm_pl_waitidle_arg)
4571+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
4572+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
4573+ union ttm_pl_setstatus_arg)
4574+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
4575+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
4576+ union ttm_fence_signaled_arg)
4577+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
4578+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
4579+ union ttm_fence_finish_arg)
4580+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
4581+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
4582+ struct ttm_fence_unref_arg)
4583+
4584+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
4585+ struct drm_file *file_priv);
4586+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
4587+ struct drm_file *file_priv);
4588+
4589+#define PSB_IOCTL_DEF(ioctl, func, flags) \
4590+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, func, flags}
4591+
4592+static struct drm_ioctl_desc psb_ioctls[] = {
4593+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
4594+ DRM_ROOT_ONLY),
4595+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
4596+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
4597+ DRM_ROOT_ONLY),
4598+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY),
4599+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl,
4600+ DRM_ROOT_ONLY),
4601+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY),
4602+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
4603+
4604+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
4605+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
4606+ DRM_AUTH),
4607+
4608+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
4609+ DRM_AUTH),
4610+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
4611+ DRM_AUTH),
4612+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
4613+ DRM_AUTH),
4614+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
4615+ DRM_AUTH),
4616+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
4617+ DRM_AUTH),
4618+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
4619+ DRM_AUTH),
4620+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
4621+ psb_fence_signaled_ioctl, DRM_AUTH),
4622+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
4623+ DRM_AUTH),
4624+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
4625+ DRM_AUTH)
4626+};
4627+
4628+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
4629+
4630+static void get_ci_info(struct drm_psb_private *dev_priv)
4631+{
4632+ struct pci_dev *pdev;
4633+
4634+ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
4635+ if (pdev == NULL) {
4636+ /* IF no pci_device we set size & addr to 0, no ci
4637+ * share buffer can be created */
4638+ dev_priv->ci_region_start = 0;
4639+ dev_priv->ci_region_size = 0;
4640+ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
4641+ return;
4642+ }
4643+
4644+ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
4645+ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
4646+
4647+ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
4648+ dev_priv->ci_region_start, dev_priv->ci_region_size);
4649+
4650+ pci_dev_put(pdev);
4651+
4652+ return;
4653+}
4654+
4655+static int dri_library_name(struct drm_device *dev, char *buf)
4656+{
4657+ return snprintf(buf, PAGE_SIZE, "psb\n");
4658+}
4659+
4660+static void psb_set_uopt(struct drm_psb_uopt *uopt)
4661+{
4662+ uopt->clock_gating = drm_psb_clock_gating;
4663+}
4664+
4665+static void psb_lastclose(struct drm_device *dev)
4666+{
4667+ struct drm_psb_private *dev_priv =
4668+ (struct drm_psb_private *) dev->dev_private;
4669+
4670+ if (!dev->dev_private)
4671+ return;
4672+
4673+ if (dev_priv->ta_mem)
4674+ psb_ta_mem_unref(&dev_priv->ta_mem);
4675+ mutex_lock(&dev_priv->cmdbuf_mutex);
4676+ if (dev_priv->context.buffers) {
4677+ vfree(dev_priv->context.buffers);
4678+ dev_priv->context.buffers = NULL;
4679+ }
4680+ mutex_unlock(&dev_priv->cmdbuf_mutex);
4681+}
4682+
4683+static void psb_do_takedown(struct drm_device *dev)
4684+{
4685+ struct drm_psb_private *dev_priv =
4686+ (struct drm_psb_private *) dev->dev_private;
4687+ struct ttm_bo_device *bdev = &dev_priv->bdev;
4688+
4689+
4690+ if (dev_priv->have_mem_rastgeom) {
4691+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM);
4692+ dev_priv->have_mem_rastgeom = 0;
4693+ }
4694+ if (dev_priv->have_mem_mmu) {
4695+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
4696+ dev_priv->have_mem_mmu = 0;
4697+ }
4698+ if (dev_priv->have_mem_aper) {
4699+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER);
4700+ dev_priv->have_mem_aper = 0;
4701+ }
4702+ if (dev_priv->have_tt) {
4703+ ttm_bo_clean_mm(bdev, TTM_PL_TT);
4704+ dev_priv->have_tt = 0;
4705+ }
4706+ if (dev_priv->have_vram) {
4707+ ttm_bo_clean_mm(bdev, TTM_PL_VRAM);
4708+ dev_priv->have_vram = 0;
4709+ }
4710+ if (dev_priv->have_camera) {
4711+ ttm_bo_clean_mm(bdev, TTM_PL_CI);
4712+ dev_priv->have_camera = 0;
4713+ }
4714+
4715+ if (dev_priv->has_msvdx)
4716+ psb_msvdx_uninit(dev);
4717+
4718+ if (IS_MRST(dev)) {
4719+ if (dev_priv->has_topaz)
4720+ lnc_topaz_uninit(dev);
4721+ }
4722+
4723+ if (dev_priv->comm) {
4724+ kunmap(dev_priv->comm_page);
4725+ dev_priv->comm = NULL;
4726+ }
4727+ if (dev_priv->comm_page) {
4728+ __free_page(dev_priv->comm_page);
4729+ dev_priv->comm_page = NULL;
4730+ }
4731+}
4732+
4733+void psb_clockgating(struct drm_psb_private *dev_priv)
4734+{
4735+ uint32_t clock_gating;
4736+
4737+ if (dev_priv->uopt.clock_gating == 1) {
4738+ PSB_DEBUG_INIT("Disabling clock gating.\n");
4739+
4740+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4741+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
4742+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4743+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
4744+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4745+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
4746+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4747+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
4748+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4749+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
4750+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4751+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
4752+
4753+ } else if (dev_priv->uopt.clock_gating == 2) {
4754+ PSB_DEBUG_INIT("Enabling clock gating.\n");
4755+
4756+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4757+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
4758+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4759+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
4760+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4761+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
4762+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4763+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
4764+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4765+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
4766+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4767+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
4768+ } else
4769+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
4770+
4771+#ifdef FIX_TG_2D_CLOCKGATE
4772+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
4773+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4774+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
4775+#endif
4776+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
4777+ (void) PSB_RSGX32(PSB_CR_CLKGATECTL);
4778+}
4779+
4780+#define FB_REG06 0xD0810600
4781+#define FB_MIPI_DISABLE BIT11
4782+#define FB_REG09 0xD0810900
4783+#define FB_SKU_MASK (BIT12|BIT13|BIT14)
4784+#define FB_SKU_SHIFT 12
4785+#define FB_SKU_100 0
4786+#define FB_SKU_100L 1
4787+#define FB_SKU_83 2
4788+#if 1 /* FIXME remove it after PO */
4789+#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
4790+#define FB_GFX_CLK_DIVIDE_SHIFT 20
4791+#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
4792+#define FB_VED_CLK_DIVIDE_SHIFT 23
4793+#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
4794+#define FB_VEC_CLK_DIVIDE_SHIFT 25
4795+#endif /* FIXME remove it after PO */
4796+
4797+
4798+void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
4799+{
4800+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
4801+ uint32_t fuse_value = 0;
4802+ uint32_t fuse_value_tmp = 0;
4803+
4804+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
4805+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
4806+
4807+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
4808+
4809+ DRM_INFO("internal display is %s\n",
4810+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
4811+
4812+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
4813+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
4814+
4815+ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
4816+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
4817+
4818+ switch (fuse_value_tmp) {
4819+ case FB_SKU_100:
4820+ DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n");
4821+ dev_priv->sku_100 = true;
4822+ break;
4823+ case FB_SKU_100L:
4824+ DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n");
4825+ dev_priv->sku_100L = true;
4826+ break;
4827+ case FB_SKU_83:
4828+ DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n");
4829+ dev_priv->sku_83 = true;
4830+ break;
4831+ default:
4832+ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
4833+ fuse_value_tmp);
4834+ }
4835+
4836+#if 1 /* FIXME remove it after PO */
4837+ fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
4838+
4839+ switch (fuse_value_tmp) {
4840+ case 0:
4841+ DRM_INFO("Gfx clk : core clk = 1:1. \n");
4842+ break;
4843+ case 1:
4844+ DRM_INFO("Gfx clk : core clk = 4:3. \n");
4845+ break;
4846+ case 2:
4847+ DRM_INFO("Gfx clk : core clk = 8:5. \n");
4848+ break;
4849+ case 3:
4850+ DRM_INFO("Gfx clk : core clk = 2:1. \n");
4851+ break;
4852+ case 5:
4853+ DRM_INFO("Gfx clk : core clk = 8:3. \n");
4854+ break;
4855+ case 6:
4856+ DRM_INFO("Gfx clk : core clk = 16:5. \n");
4857+ break;
4858+ default:
4859+ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
4860+ fuse_value_tmp);
4861+ }
4862+
4863+ fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
4864+
4865+ switch (fuse_value_tmp) {
4866+ case 0:
4867+ DRM_INFO("Ved clk : core clk = 1:1. \n");
4868+ break;
4869+ case 1:
4870+ DRM_INFO("Ved clk : core clk = 4:3. \n");
4871+ break;
4872+ case 2:
4873+ DRM_INFO("Ved clk : core clk = 8:5. \n");
4874+ break;
4875+ case 3:
4876+ DRM_INFO("Ved clk : core clk = 2:1. \n");
4877+ break;
4878+ default:
4879+ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
4880+ fuse_value_tmp);
4881+ }
4882+
4883+ fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
4884+
4885+ switch (fuse_value_tmp) {
4886+ case 0:
4887+ DRM_INFO("Vec clk : core clk = 1:1. \n");
4888+ break;
4889+ case 1:
4890+ DRM_INFO("Vec clk : core clk = 4:3. \n");
4891+ break;
4892+ case 2:
4893+ DRM_INFO("Vec clk : core clk = 8:5. \n");
4894+ break;
4895+ case 3:
4896+ DRM_INFO("Vec clk : core clk = 2:1. \n");
4897+ break;
4898+ default:
4899+ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
4900+ fuse_value_tmp);
4901+ }
4902+#endif /* FIXME remove it after PO */
4903+
4904+ return;
4905+}
4906+
4907+static int psb_do_init(struct drm_device *dev)
4908+{
4909+ struct drm_psb_private *dev_priv =
4910+ (struct drm_psb_private *) dev->dev_private;
4911+ struct ttm_bo_device *bdev = &dev_priv->bdev;
4912+ struct psb_gtt *pg = dev_priv->pg;
4913+
4914+ uint32_t stolen_gtt;
4915+ uint32_t tt_start;
4916+ uint32_t tt_pages;
4917+
4918+ int ret = -ENOMEM;
4919+
4920+ dev_priv->ta_mem_pages =
4921+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024,
4922+ PAGE_SIZE) >> PAGE_SHIFT;
4923+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
4924+ if (!dev_priv->comm_page)
4925+ goto out_err;
4926+
4927+ dev_priv->comm = kmap(dev_priv->comm_page);
4928+ memset((void *) dev_priv->comm, 0, PAGE_SIZE);
4929+
4930+ set_pages_uc(dev_priv->comm_page, 1);
4931+
4932+ /*
4933+ * Initialize sequence numbers for the different command
4934+ * submission mechanisms.
4935+ */
4936+
4937+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
4938+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
4939+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
4940+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
4941+
4942+ if (pg->gatt_start & 0x0FFFFFFF) {
4943+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
4944+ ret = -EINVAL;
4945+ goto out_err;
4946+ }
4947+
4948+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
4949+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
4950+ stolen_gtt =
4951+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
4952+
4953+ dev_priv->gatt_free_offset = pg->gatt_start +
4954+ (stolen_gtt << PAGE_SHIFT) * 1024;
4955+
4956+ /*
4957+ * Insert a cache-coherent communications page in mmu space
4958+ * just after the stolen area. Will be used for fencing etc.
4959+ */
4960+
4961+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
4962+ dev_priv->gatt_free_offset += PAGE_SIZE;
4963+
4964+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
4965+ &dev_priv->comm_page,
4966+ dev_priv->comm_mmu_offset, 1, 0, 0, 0);
4967+
4968+ if (ret)
4969+ goto out_err;
4970+
4971+ if (1 || drm_debug) {
4972+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
4973+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
4974+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
4975+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
4976+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
4977+ _PSB_CC_REVISION_MAJOR_SHIFT,
4978+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
4979+ _PSB_CC_REVISION_MINOR_SHIFT);
4980+ DRM_INFO
4981+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
4982+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
4983+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
4984+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
4985+ _PSB_CC_REVISION_DESIGNER_SHIFT);
4986+ }
4987+
4988+ spin_lock_init(&dev_priv->irqmask_lock);
4989+ dev_priv->fence0_irq_on = 0;
4990+
4991+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
4992+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
4993+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
4994+ tt_pages -= tt_start >> PAGE_SHIFT;
4995+
4996+ if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0,
4997+ pg->vram_stolen_size >> PAGE_SHIFT)) {
4998+ dev_priv->have_vram = 1;
4999+ }
5000+
5001+ if (!ttm_bo_init_mm(bdev, TTM_PL_CI, 0,
5002+ dev_priv->ci_region_size >> PAGE_SHIFT)) {
5003+ dev_priv->have_camera = 1;
5004+ }
5005+
5006+ if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT,
5007+ tt_pages)) {
5008+ dev_priv->have_tt = 1;
5009+ }
5010+
5011+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000,
5012+ (pg->gatt_start - PSB_MEM_MMU_START -
5013+ pg->ci_stolen_size) >> PAGE_SHIFT)) {
5014+ dev_priv->have_mem_mmu = 1;
5015+ }
5016+
5017+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
5018+ (PSB_MEM_MMU_START -
5019+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
5020+ dev_priv->have_mem_rastgeom = 1;
5021+ }
5022+#if 0
5023+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
5024+ if (!ttm_bo_init_mm
5025+ (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
5026+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) {
5027+ dev_priv->have_mem_aper = 1;
5028+ }
5029+ }
5030+#endif
5031+
5032+ PSB_DEBUG_INIT("Init MSVDX\n");
5033+ dev_priv->has_msvdx = 1;
5034+ if (psb_msvdx_init(dev))
5035+ dev_priv->has_msvdx = 0;
5036+
5037+ if (IS_MRST(dev)) {
5038+ PSB_DEBUG_INIT("Init Topaz\n");
5039+ dev_priv->has_topaz = 1;
5040+ if (lnc_topaz_init(dev))
5041+ dev_priv->has_topaz = 0;
5042+ }
5043+ return 0;
5044+out_err:
5045+ psb_do_takedown(dev);
5046+ return ret;
5047+}
5048+
5049+static int psb_driver_unload(struct drm_device *dev)
5050+{
5051+ struct drm_psb_private *dev_priv =
5052+ (struct drm_psb_private *) dev->dev_private;
5053+
5054+ if (drm_psb_no_fb == 0)
5055+ psb_modeset_cleanup(dev);
5056+
5057+ if (dev_priv) {
5058+ struct ttm_bo_device *bdev = &dev_priv->bdev;
5059+
5060+ psb_watchdog_takedown(dev_priv);
5061+ psb_do_takedown(dev);
5062+ psb_xhw_takedown(dev_priv);
5063+ psb_scheduler_takedown(&dev_priv->scheduler);
5064+
5065+ if (dev_priv->have_mem_pds) {
5066+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS);
5067+ dev_priv->have_mem_pds = 0;
5068+ }
5069+ if (dev_priv->have_mem_kernel) {
5070+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL);
5071+ dev_priv->have_mem_kernel = 0;
5072+ }
5073+
5074+ if (dev_priv->pf_pd) {
5075+ psb_mmu_free_pagedir(dev_priv->pf_pd);
5076+ dev_priv->pf_pd = NULL;
5077+ }
5078+ if (dev_priv->mmu) {
5079+ struct psb_gtt *pg = dev_priv->pg;
5080+
5081+ down_read(&pg->sem);
5082+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
5083+ (dev_priv->mmu),
5084+ pg->gatt_start,
5085+ pg->vram_stolen_size >>
5086+ PAGE_SHIFT);
5087+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
5088+ (dev_priv->mmu),
5089+ pg->gatt_start - pg->ci_stolen_size,
5090+ pg->ci_stolen_size >>
5091+ PAGE_SHIFT);
5092+ up_read(&pg->sem);
5093+ psb_mmu_driver_takedown(dev_priv->mmu);
5094+ dev_priv->mmu = NULL;
5095+ }
5096+ psb_gtt_takedown(dev_priv->pg, 1);
5097+ if (dev_priv->scratch_page) {
5098+ __free_page(dev_priv->scratch_page);
5099+ dev_priv->scratch_page = NULL;
5100+ }
5101+ if (dev_priv->has_bo_device) {
5102+ ttm_bo_device_release(&dev_priv->bdev);
5103+ dev_priv->has_bo_device = 0;
5104+ }
5105+ if (dev_priv->has_fence_device) {
5106+ ttm_fence_device_release(&dev_priv->fdev);
5107+ dev_priv->has_fence_device = 0;
5108+ }
5109+ if (dev_priv->vdc_reg) {
5110+ iounmap(dev_priv->vdc_reg);
5111+ dev_priv->vdc_reg = NULL;
5112+ }
5113+ if (dev_priv->sgx_reg) {
5114+ iounmap(dev_priv->sgx_reg);
5115+ dev_priv->sgx_reg = NULL;
5116+ }
5117+ if (dev_priv->msvdx_reg) {
5118+ iounmap(dev_priv->msvdx_reg);
5119+ dev_priv->msvdx_reg = NULL;
5120+ }
5121+
5122+ if (IS_MRST(dev)) {
5123+ if (dev_priv->topaz_reg) {
5124+ iounmap(dev_priv->topaz_reg);
5125+ dev_priv->topaz_reg = NULL;
5126+ }
5127+ }
5128+
5129+ if (dev_priv->tdev)
5130+ ttm_object_device_release(&dev_priv->tdev);
5131+
5132+ if (dev_priv->has_global)
5133+ psb_ttm_global_release(dev_priv);
5134+
5135+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
5136+ dev->dev_private = NULL;
5137+ }
5138+ return 0;
5139+}
5140+
5141+
5142+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
5143+{
5144+ struct drm_psb_private *dev_priv;
5145+ struct ttm_bo_device *bdev;
5146+ unsigned long resource_start;
5147+ struct psb_gtt *pg;
5148+ int ret = -ENOMEM;
5149+
5150+ if (IS_MRST(dev))
5151+ DRM_INFO("Run drivers on Moorestown platform!\n");
5152+ else
5153+ DRM_INFO("Run drivers on Poulsbo platform!\n");
5154+
5155+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
5156+ if (dev_priv == NULL)
5157+ return -ENOMEM;
5158+
5159+ dev_priv->dev = dev;
5160+ bdev = &dev_priv->bdev;
5161+
5162+ ret = psb_ttm_global_init(dev_priv);
5163+ if (unlikely(ret != 0))
5164+ goto out_err;
5165+ dev_priv->has_global = 1;
5166+
5167+ dev_priv->tdev = ttm_object_device_init
5168+ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
5169+ if (unlikely(dev_priv->tdev == NULL))
5170+ goto out_err;
5171+
5172+ mutex_init(&dev_priv->temp_mem);
5173+ mutex_init(&dev_priv->cmdbuf_mutex);
5174+ mutex_init(&dev_priv->reset_mutex);
5175+ INIT_LIST_HEAD(&dev_priv->context.validate_list);
5176+ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
5177+ psb_init_disallowed();
5178+
5179+#ifdef FIX_TG_16
5180+ atomic_set(&dev_priv->lock_2d, 0);
5181+ atomic_set(&dev_priv->ta_wait_2d, 0);
5182+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
5183+ atomic_set(&dev_priv->waiters_2d, 0);;
5184+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
5185+#else
5186+ mutex_init(&dev_priv->mutex_2d);
5187+#endif
5188+
5189+ spin_lock_init(&dev_priv->reloc_lock);
5190+
5191+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
5192+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
5193+
5194+ dev->dev_private = (void *) dev_priv;
5195+ dev_priv->chipset = chipset;
5196+ psb_set_uopt(&dev_priv->uopt);
5197+
5198+ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
5199+ psb_watchdog_init(dev_priv);
5200+ psb_scheduler_init(dev, &dev_priv->scheduler);
5201+
5202+
5203+ PSB_DEBUG_INIT("Mapping MMIO\n");
5204+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
5205+
5206+ if (IS_MRST(dev))
5207+ dev_priv->msvdx_reg =
5208+ ioremap(resource_start + MRST_MSVDX_OFFSET,
5209+ PSB_MSVDX_SIZE);
5210+ else
5211+ dev_priv->msvdx_reg =
5212+ ioremap(resource_start + PSB_MSVDX_OFFSET,
5213+ PSB_MSVDX_SIZE);
5214+
5215+ if (!dev_priv->msvdx_reg)
5216+ goto out_err;
5217+
5218+ if (IS_MRST(dev)) {
5219+ dev_priv->topaz_reg =
5220+ ioremap(resource_start + LNC_TOPAZ_OFFSET,
5221+ LNC_TOPAZ_SIZE);
5222+ if (!dev_priv->topaz_reg)
5223+ goto out_err;
5224+ }
5225+
5226+ dev_priv->vdc_reg =
5227+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
5228+ if (!dev_priv->vdc_reg)
5229+ goto out_err;
5230+
5231+ if (IS_MRST(dev))
5232+ dev_priv->sgx_reg =
5233+ ioremap(resource_start + MRST_SGX_OFFSET,
5234+ PSB_SGX_SIZE);
5235+ else
5236+ dev_priv->sgx_reg =
5237+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
5238+
5239+ if (!dev_priv->sgx_reg)
5240+ goto out_err;
5241+
5242+ if (IS_MRST(dev))
5243+ mrst_get_fuse_settings(dev_priv);
5244+
5245+ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
5246+
5247+ get_ci_info(dev_priv);
5248+
5249+ psb_clockgating(dev_priv);
5250+
5251+ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
5252+ if (unlikely(ret != 0))
5253+ goto out_err;
5254+
5255+ dev_priv->has_fence_device = 1;
5256+ ret = ttm_bo_device_init(bdev,
5257+ dev_priv->mem_global_ref.object,
5258+ &psb_ttm_bo_driver,
5259+ DRM_PSB_FILE_PAGE_OFFSET);
5260+ if (unlikely(ret != 0))
5261+ goto out_err;
5262+ dev_priv->has_bo_device = 1;
5263+ ttm_lock_init(&dev_priv->ttm_lock);
5264+
5265+ ret = -ENOMEM;
5266+
5267+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
5268+ if (!dev_priv->scratch_page)
5269+ goto out_err;
5270+
5271+ set_pages_uc(dev_priv->scratch_page, 1);
5272+
5273+ dev_priv->pg = psb_gtt_alloc(dev);
5274+ if (!dev_priv->pg)
5275+ goto out_err;
5276+
5277+ ret = psb_gtt_init(dev_priv->pg, 0);
5278+ if (ret)
5279+ goto out_err;
5280+
5281+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
5282+ drm_psb_trap_pagefaults, 0,
5283+ dev_priv);
5284+ if (!dev_priv->mmu)
5285+ goto out_err;
5286+
5287+ pg = dev_priv->pg;
5288+
5289+ /*
5290+ * Make sgx MMU aware of the stolen memory area we call VRAM.
5291+ */
5292+
5293+ down_read(&pg->sem);
5294+ ret =
5295+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
5296+ (dev_priv->mmu),
5297+ pg->stolen_base >> PAGE_SHIFT,
5298+ pg->gatt_start,
5299+ pg->vram_stolen_size >> PAGE_SHIFT, 0);
5300+ up_read(&pg->sem);
5301+ if (ret)
5302+ goto out_err;
5303+
5304+ /*
5305+ * Make sgx MMU aware of the stolen memory area we call VRAM.
5306+ */
5307+
5308+ down_read(&pg->sem);
5309+ ret =
5310+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
5311+ (dev_priv->mmu),
5312+ dev_priv->ci_region_start >> PAGE_SHIFT,
5313+ pg->gatt_start - pg->ci_stolen_size,
5314+ pg->ci_stolen_size >> PAGE_SHIFT, 0);
5315+ up_read(&pg->sem);
5316+ if (ret)
5317+ goto out_err;
5318+
5319+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
5320+ if (!dev_priv->pf_pd)
5321+ goto out_err;
5322+
5323+ /*
5324+ * Make all presumably unused requestors page-fault by making them
5325+ * use context 1 which does not have any valid mappings.
5326+ */
5327+
5328+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
5329+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
5330+ PSB_RSGX32(PSB_CR_BIF_BANK1);
5331+
5332+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
5333+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
5334+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
5335+
5336+ psb_init_2d(dev_priv);
5337+
5338+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000,
5339+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
5340+ >> PAGE_SHIFT);
5341+ if (ret)
5342+ goto out_err;
5343+ dev_priv->have_mem_kernel = 1;
5344+
5345+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000,
5346+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
5347+ >> PAGE_SHIFT);
5348+ if (ret)
5349+ goto out_err;
5350+ dev_priv->have_mem_pds = 1;
5351+
5352+ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
5353+
5354+ ret = psb_do_init(dev);
5355+ if (ret)
5356+ return ret;
5357+
5358+ ret = psb_xhw_init(dev);
5359+ if (ret)
5360+ return ret;
5361+
5362+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
5363+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
5364+
5365+ psb_init_ospm(dev_priv);
5366+
5367+ if (drm_psb_no_fb == 0) {
5368+ psb_modeset_init(dev);
5369+ drm_helper_initial_config(dev, false);
5370+ }
5371+
5372+ /*initialize the MSI for MRST*/
5373+ if (IS_MRST(dev)) {
5374+ if (pci_enable_msi(dev->pdev)) {
5375+ DRM_ERROR("Enable MSI for MRST failed!\n");
5376+ } else {
5377+ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
5378+ dev->pdev->irq);
5379+ /* pci_write_config_word(pdev, 0x04, 0x07); */
5380+ }
5381+ }
5382+
5383+ /*set SGX in low power mode*/
5384+ if (drm_psb_ospm && IS_MRST(dev))
5385+ if (psb_try_power_down_sgx(dev))
5386+ PSB_DEBUG_PM("initialize SGX to low power failed\n");
5387+ return 0;
5388+out_err:
5389+ psb_driver_unload(dev);
5390+ return ret;
5391+}
5392+
5393+int psb_driver_device_is_agp(struct drm_device *dev)
5394+{
5395+ return 0;
5396+}
5397+
5398+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
5399+{
5400+#ifdef PSB_FIXME
5401+ struct drm_psb_private *dev_priv =
5402+ (struct drm_psb_private *) dev->dev_private;
5403+ struct ttm_fence_device *fdev = &dev_priv->fdev;
5404+ struct ttm_fence_class_manager *fc =
5405+ &fdev->fence_class[PSB_ENGINE_VIDEO];
5406+ struct ttm_fence_object *fence;
5407+ int ret = 0;
5408+ int signaled = 0;
5409+ int count = 0;
5410+ unsigned long _end = jiffies + 3 * DRM_HZ;
5411+
5412+ PSB_DEBUG_GENERAL
5413+ ("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
5414+
5415+ /*set the msvdx-reset flag here.. */
5416+ dev_priv->msvdx_needs_reset = 1;
5417+
5418+ /*Ensure that all pending IRQs are serviced, */
5419+
5420+ /*
5421+ * Save the last MSVDX fence in dev_priv instead!!!
5422+ * Need to be fc->write_locked while accessing a fence from the ring.
5423+ */
5424+
5425+ list_for_each_entry(fence, &fc->ring, ring) {
5426+ count++;
5427+ do {
5428+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
5429+ (signaled =
5430+ ttm_fence_object_signaled(fence,
5431+ DRM_FENCE_TYPE_EXE)));
5432+ if (signaled)
5433+ break;
5434+ if (time_after_eq(jiffies, _end))
5435+ PSB_DEBUG_GENERAL
5436+ ("MSVDXACPI: fence 0x%x didn't get"
5437+ " signaled for 3 secs; "
5438+ "we will suspend anyways\n",
5439+ (unsigned int) fence);
5440+ } while (ret == -EINTR);
5441+
5442+ }
5443+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
5444+ count);
5445+#endif
5446+ return 0;
5447+}
5448+
5449+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
5450+{
5451+ struct drm_device *dev = pci_get_drvdata(pdev);
5452+ struct drm_psb_private *dev_priv =
5453+ (struct drm_psb_private *) dev->dev_private;
5454+
5455+ if (!down_write_trylock(&dev_priv->sgx_sem))
5456+ return -EBUSY;
5457+ if (dev_priv->graphics_state != PSB_PWR_STATE_D0i0);
5458+ PSB_DEBUG_PM("Not suspending from D0i0\n");
5459+ if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
5460+ goto exit;
5461+ if (drm_psb_no_fb == 0){
5462+ psbfb_suspend(dev);
5463+ psb_modeset_cleanup(dev);
5464+ }
5465+
5466+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
5467+ (void) psb_idle_3d(dev);
5468+ (void) psb_idle_2d(dev);
5469+ flush_scheduled_work();
5470+
5471+ if (dev_priv->has_msvdx)
5472+ psb_prepare_msvdx_suspend(dev);
5473+
5474+ if (dev_priv->has_topaz)
5475+ lnc_prepare_topaz_suspend(dev);
5476+
5477+#ifdef OSPM_STAT
5478+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
5479+ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change;
5480+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
5481+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
5482+ else
5483+ PSB_DEBUG_PM("suspend: illegal previous power state\n");
5484+ dev_priv->gfx_last_mode_change = jiffies;
5485+ dev_priv->gfx_d3_cnt++;
5486+#endif
5487+
5488+ dev_priv->graphics_state = PSB_PWR_STATE_D3;
5489+ dev_priv->msvdx_state = PSB_PWR_STATE_D3;
5490+ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF;
5491+ pci_save_state(pdev);
5492+ pci_disable_device(pdev);
5493+ pci_set_power_state(pdev, PCI_D3hot);
5494+ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND
5495+ | PSB_VIDEO_DEC_ISLAND);
5496+exit:
5497+ up_write(&dev_priv->sgx_sem);
5498+ return 0;
5499+}
5500+
5501+static int psb_resume(struct pci_dev *pdev)
5502+{
5503+ struct drm_device *dev = pci_get_drvdata(pdev);
5504+ struct drm_psb_private *dev_priv =
5505+ (struct drm_psb_private *) dev->dev_private;
5506+ struct psb_gtt *pg = dev_priv->pg;
5507+ int ret;
5508+ if (dev_priv->graphics_state != PSB_PWR_STATE_D3)
5509+ return 0;
5510+
5511+ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND
5512+ | PSB_VIDEO_DEC_ISLAND);
5513+ pci_set_power_state(pdev, PCI_D0);
5514+ pci_restore_state(pdev);
5515+ ret = pci_enable_device(pdev);
5516+ if (ret)
5517+ return ret;
5518+
5519+ DRM_ERROR("FIXME: topaz's resume is not ready..\n");
5520+#ifdef OSPM_STAT
5521+ if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
5522+ dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change;
5523+ else
5524+ PSB_DEBUG_PM("resume :illegal previous power state\n");
5525+ dev_priv->gfx_last_mode_change = jiffies;
5526+ dev_priv->gfx_d0i0_cnt++;
5527+#endif
5528+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
5529+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0;
5530+ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON;
5531+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
5532+ dev_priv->msvdx_needs_reset = 1;
5533+
5534+ lnc_prepare_topaz_resume(dev);
5535+
5536+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
5537+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
5538+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
5539+
5540+ /*
5541+ * Don't reinitialize the GTT as it is unnecessary. The gtt is
5542+ * stored in memory so it will automatically be restored. All
5543+ * we need to do is restore the PGETBL_CTL which we already do
5544+ * above.
5545+ */
5546+
5547+ //psb_gtt_init(dev_priv->pg, 1);
5548+
5549+ /*
5550+ * The SGX loses it's register contents.
5551+ * Restore BIF registers. The MMU page tables are
5552+ * "normal" pages, so their contents should be kept.
5553+ */
5554+
5555+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
5556+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
5557+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
5558+ PSB_RSGX32(PSB_CR_BIF_BANK1);
5559+
5560+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
5561+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
5562+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
5563+
5564+ /*
5565+ * 2D Base registers..
5566+ */
5567+ psb_init_2d(dev_priv);
5568+
5569+ /*
5570+ * Persistant 3D base registers and USSE base registers..
5571+ */
5572+
5573+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
5574+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
5575+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
5576+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
5577+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
5578+
5579+ /*
5580+ * Now, re-initialize the 3D engine.
5581+ */
5582+
5583+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
5584+
5585+ psb_scheduler_ta_mem_check(dev_priv);
5586+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
5587+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
5588+ PSB_TA_MEM_FLAG_TA |
5589+ PSB_TA_MEM_FLAG_RASTER |
5590+ PSB_TA_MEM_FLAG_HOSTA |
5591+ PSB_TA_MEM_FLAG_HOSTD |
5592+ PSB_TA_MEM_FLAG_INIT,
5593+ dev_priv->ta_mem->ta_memory->offset,
5594+ dev_priv->ta_mem->hw_data->offset,
5595+ dev_priv->ta_mem->hw_cookie);
5596+ }
5597+
5598+ if (drm_psb_no_fb == 0) {
5599+ psb_modeset_init(dev);
5600+ drm_helper_initial_config(dev, false);
5601+ psbfb_resume(dev);
5602+ }
5603+ return 0;
5604+}
5605+
5606+int psb_extension_ioctl(struct drm_device *dev, void *data,
5607+ struct drm_file *file_priv)
5608+{
5609+ union drm_psb_extension_arg *arg = data;
5610+ struct drm_psb_extension_rep *rep = &arg->rep;
5611+
5612+ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
5613+ rep->exists = 1;
5614+ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
5615+ rep->sarea_offset = 0;
5616+ rep->major = 1;
5617+ rep->minor = 0;
5618+ rep->pl = 0;
5619+ return 0;
5620+ }
5621+ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
5622+ rep->exists = 1;
5623+ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
5624+ rep->sarea_offset = 0;
5625+ rep->major = 1;
5626+ rep->minor = 0;
5627+ rep->pl = 0;
5628+ return 0;
5629+ }
5630+ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
5631+ rep->exists = 1;
5632+ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
5633+ rep->sarea_offset = 0;
5634+ rep->major = 1;
5635+ rep->minor = 0;
5636+ rep->pl = 0;
5637+ return 0;
5638+ }
5639+
5640+ rep->exists = 0;
5641+ return 0;
5642+}
5643+
5644+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
5645+ struct drm_file *file_priv)
5646+{
5647+ struct drm_psb_private *dev_priv = psb_priv(dev);
5648+ struct ttm_bo_device *bdev = &dev_priv->bdev;
5649+ struct ttm_mem_type_manager *man;
5650+ int clean;
5651+ int ret;
5652+
5653+ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
5654+ psb_fpriv(file_priv)->tfile);
5655+ if (unlikely(ret != 0))
5656+ return ret;
5657+
5658+ /*
5659+ * Clean VRAM and TT for fbdev.
5660+ */
5661+
5662+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
5663+ if (unlikely(ret != 0))
5664+ goto out_unlock;
5665+
5666+ man = &bdev->man[TTM_PL_VRAM];
5667+ spin_lock(&bdev->lru_lock);
5668+ clean = drm_mm_clean(&man->manager);
5669+ spin_unlock(&bdev->lru_lock);
5670+ if (unlikely(!clean))
5671+ DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n");
5672+
5673+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
5674+ if (unlikely(ret != 0))
5675+ goto out_unlock;
5676+
5677+ man = &bdev->man[TTM_PL_TT];
5678+ spin_lock(&bdev->lru_lock);
5679+ clean = drm_mm_clean(&man->manager);
5680+ spin_unlock(&bdev->lru_lock);
5681+ if (unlikely(!clean))
5682+ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
5683+
5684+ ttm_bo_swapout_all(&dev_priv->bdev);
5685+
5686+ return 0;
5687+out_unlock:
5688+ (void) ttm_write_unlock(&dev_priv->ttm_lock,
5689+ psb_fpriv(file_priv)->tfile);
5690+ return ret;
5691+}
5692+
5693+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
5694+ struct drm_file *file_priv)
5695+{
5696+ struct drm_psb_private *dev_priv = psb_priv(dev);
5697+ return ttm_write_unlock(&dev_priv->ttm_lock,
5698+ psb_fpriv(file_priv)->tfile);
5699+}
5700+
5701+/* always available as we are SIGIO'd */
5702+static unsigned int psb_poll(struct file *filp,
5703+ struct poll_table_struct *wait)
5704+{
5705+ return POLLIN | POLLRDNORM;
5706+}
5707+
5708+int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
5709+{
5710+ /*psb_check_power_state(dev, PSB_DEVICE_SGX);*/
5711+ return 0;
5712+}
5713+
5714+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
5715+ unsigned long arg)
5716+{
5717+ struct drm_file *file_priv = filp->private_data;
5718+ struct drm_device *dev = file_priv->minor->dev;
5719+ unsigned int nr = DRM_IOCTL_NR(cmd);
5720+ long ret;
5721+
5722+ /*
5723+ * The driver private ioctls and TTM ioctls should be
5724+ * thread-safe.
5725+ */
5726+
5727+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
5728+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
5729+ struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE];
5730+
5731+ if (unlikely(ioctl->cmd != cmd)) {
5732+ DRM_ERROR("Invalid drm command %d\n",
5733+ nr - DRM_COMMAND_BASE);
5734+ return -EINVAL;
5735+ }
5736+
5737+ return drm_unlocked_ioctl(filp, cmd, arg);
5738+ }
5739+ /*
5740+ * Not all old drm ioctls are thread-safe.
5741+ */
5742+
5743+ lock_kernel();
5744+ ret = drm_unlocked_ioctl(filp, cmd, arg);
5745+ unlock_kernel();
5746+ return ret;
5747+}
5748+
5749+static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
5750+ int *eof, void *data)
5751+{
5752+ struct drm_minor *minor = (struct drm_minor *) data;
5753+ struct drm_device *dev = minor->dev;
5754+ struct drm_psb_private *dev_priv =
5755+ (struct drm_psb_private *) dev->dev_private;
5756+ int len = 0;
5757+ unsigned long d0i0 = 0;
5758+ unsigned long d0i3 = 0;
5759+ unsigned long d3 = 0;
5760+ *start = &buf[offset];
5761+ *eof = 0;
5762+ DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled");
5763+ switch (dev_priv->graphics_state) {
5764+ case PSB_PWR_STATE_D0i0:
5765+ DRM_PROC_PRINT("GFX:%s\n", "D0i0");
5766+ break;
5767+ case PSB_PWR_STATE_D0i3:
5768+ DRM_PROC_PRINT("GFX:%s\n", "D0i3");
5769+ break;
5770+ case PSB_PWR_STATE_D3:
5771+ DRM_PROC_PRINT("GFX:%s\n", "D3");
5772+ break;
5773+ default:
5774+ DRM_PROC_PRINT("GFX:%s\n", "unkown");
5775+ }
5776+#ifdef OSPM_STAT
5777+ d0i0 = dev_priv->gfx_d0i0_time * 1000 / HZ;
5778+ d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ;
5779+ d3 = dev_priv->gfx_d3_time * 1000 / HZ;
5780+ switch (dev_priv->graphics_state) {
5781+ case PSB_PWR_STATE_D0i0:
5782+ d0i0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
5783+ break;
5784+ case PSB_PWR_STATE_D0i3:
5785+ d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
5786+ break;
5787+ case PSB_PWR_STATE_D3:
5788+ d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
5789+ break;
5790+ }
5791+ DRM_PROC_PRINT("GFX(cnt/ms):\n");
5792+ DRM_PROC_PRINT("D0i0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n",
5793+ dev_priv->gfx_d0i0_cnt, d0i0, dev_priv->gfx_d0i3_cnt, d0i3,
5794+ dev_priv->gfx_d3_cnt, d3);
5795+#endif
5796+ if (len > request + offset)
5797+ return request;
5798+ *eof = 1;
5799+ return len - offset;
5800+}
5801+
5802+static int psb_proc_init(struct drm_minor *minor)
5803+{
5804+ struct proc_dir_entry *ent;
5805+ if (!minor->dev_root)
5806+ return 0;
5807+ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->dev_root,
5808+ psb_ospm_read, minor);
5809+ if (ent)
5810+ return 0;
5811+ else
5812+ return -1;
5813+}
5814+
5815+static void psb_proc_cleanup(struct drm_minor *minor)
5816+{
5817+ if (!minor->dev_root)
5818+ return;
5819+ remove_proc_entry(OSPM_PROC_ENTRY, minor->dev_root);
5820+ return;
5821+}
5822+
5823+static struct drm_driver driver = {
5824+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
5825+ .load = psb_driver_load,
5826+ .unload = psb_driver_unload,
5827+ .dri_library_name = dri_library_name,
5828+ .get_reg_ofs = drm_core_get_reg_ofs,
5829+ .ioctls = psb_ioctls,
5830+ .device_is_agp = psb_driver_device_is_agp,
5831+ .irq_preinstall = psb_irq_preinstall,
5832+ .irq_postinstall = psb_irq_postinstall,
5833+ .irq_uninstall = psb_irq_uninstall,
5834+ .irq_handler = psb_irq_handler,
5835+ .firstopen = NULL,
5836+ .lastclose = psb_lastclose,
5837+ .open = psb_driver_open,
5838+ .proc_init = psb_proc_init,
5839+ .proc_cleanup = psb_proc_cleanup,
5840+ .fops = {
5841+ .owner = THIS_MODULE,
5842+ .open = psb_open,
5843+ .release = psb_release,
5844+ .unlocked_ioctl = psb_unlocked_ioctl,
5845+ .mmap = psb_mmap,
5846+ .poll = psb_poll,
5847+ .fasync = drm_fasync,
5848+ },
5849+ .pci_driver = {
5850+ .name = DRIVER_NAME,
5851+ .id_table = pciidlist,
5852+ .resume = psb_resume,
5853+ .suspend = psb_suspend,
5854+ },
5855+ .name = DRIVER_NAME,
5856+ .desc = DRIVER_DESC,
5857+ .date = PSB_DRM_DRIVER_DATE,
5858+ .major = PSB_DRM_DRIVER_MAJOR,
5859+ .minor = PSB_DRM_DRIVER_MINOR,
5860+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
5861+};
5862+
5863+static int __init psb_init(void)
5864+{
5865+ driver.num_ioctls = psb_max_ioctl;
5866+
5867+ return drm_init(&driver);
5868+}
5869+
5870+static void __exit psb_exit(void)
5871+{
5872+ drm_exit(&driver);
5873+}
5874+
5875+module_init(psb_init);
5876+module_exit(psb_exit);
5877+
5878+MODULE_AUTHOR(DRIVER_AUTHOR);
5879+MODULE_DESCRIPTION(DRIVER_DESC);
5880+MODULE_LICENSE("GPL");
5881diff -uNr a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h
5882--- a/drivers/gpu/drm/psb/psb_drv.h 1969-12-31 16:00:00.000000000 -0800
5883+++ b/drivers/gpu/drm/psb/psb_drv.h 2009-04-07 13:28:38.000000000 -0700
5884@@ -0,0 +1,1129 @@
5885+/**************************************************************************
5886+ *Copyright (c) 2007-2008, Intel Corporation.
5887+ *All Rights Reserved.
5888+ *
5889+ *This program is free software; you can redistribute it and/or modify it
5890+ *under the terms and conditions of the GNU General Public License,
5891+ *version 2, as published by the Free Software Foundation.
5892+ *
5893+ *This program is distributed in the hope it will be useful, but WITHOUT
5894+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5895+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5896+ *more details.
5897+ *
5898+ *You should have received a copy of the GNU General Public License along with
5899+ *this program; if not, write to the Free Software Foundation, Inc.,
5900+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5901+ *
5902+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5903+ *develop this driver.
5904+ *
5905+ **************************************************************************/
5906+/*
5907+ */
5908+#ifndef _PSB_DRV_H_
5909+#define _PSB_DRV_H_
5910+
5911+#include <drm/drmP.h>
5912+#include "psb_drm.h"
5913+#include "psb_reg.h"
5914+#include "psb_schedule.h"
5915+#include "psb_intel_drv.h"
5916+#include "ttm/ttm_object.h"
5917+#include "ttm/ttm_fence_driver.h"
5918+#include "ttm/ttm_bo_driver.h"
5919+#include "ttm/ttm_lock.h"
5920+
5921+extern struct ttm_bo_driver psb_ttm_bo_driver;
5922+
5923+enum {
5924+ CHIP_PSB_8108 = 0,
5925+ CHIP_PSB_8109 = 1,
5926+ CHIP_MRST_4100 = 2
5927+};
5928+
5929+/*
5930+ *Hardware bugfixes
5931+ */
5932+
5933+#define FIX_TG_16
5934+#define FIX_TG_2D_CLOCKGATE
5935+#define OSPM_STAT
5936+
5937+#define DRIVER_NAME "psb"
5938+#define DRIVER_DESC "drm driver for the Intel GMA500"
5939+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
5940+#define OSPM_PROC_ENTRY "ospm"
5941+
5942+#define PSB_DRM_DRIVER_DATE "2009-02-09"
5943+#define PSB_DRM_DRIVER_MAJOR 8
5944+#define PSB_DRM_DRIVER_MINOR 0
5945+#define PSB_DRM_DRIVER_PATCHLEVEL 0
5946+
5947+/*
5948+ *TTM driver private offsets.
5949+ */
5950+
5951+#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
5952+
5953+#define PSB_OBJECT_HASH_ORDER 13
5954+#define PSB_FILE_OBJECT_HASH_ORDER 12
5955+#define PSB_BO_HASH_ORDER 12
5956+
5957+#define PSB_VDC_OFFSET 0x00000000
5958+#define PSB_VDC_SIZE 0x000080000
5959+#define MRST_MMIO_SIZE 0x0000C0000
5960+#define PSB_SGX_SIZE 0x8000
5961+#define PSB_SGX_OFFSET 0x00040000
5962+#define MRST_SGX_OFFSET 0x00080000
5963+#define PSB_MMIO_RESOURCE 0
5964+#define PSB_GATT_RESOURCE 2
5965+#define PSB_GTT_RESOURCE 3
5966+#define PSB_GMCH_CTRL 0x52
5967+#define PSB_BSM 0x5C
5968+#define _PSB_GMCH_ENABLED 0x4
5969+#define PSB_PGETBL_CTL 0x2020
5970+#define _PSB_PGETBL_ENABLED 0x00000001
5971+#define PSB_SGX_2D_SLAVE_PORT 0x4000
5972+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
5973+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
5974+#define PSB_NUM_VALIDATE_BUFFERS 2048
5975+#define PSB_MEM_KERNEL_START 0x10000000
5976+#define PSB_MEM_PDS_START 0x20000000
5977+#define PSB_MEM_MMU_START 0x40000000
5978+
5979+#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0
5980+#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0
5981+
5982+/*
5983+ *Flags for external memory type field.
5984+ */
5985+
5986+#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
5987+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
5988+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
5989+#define PSB_MSVDX_SIZE 0x10000
5990+
5991+#define LNC_TOPAZ_OFFSET 0xA0000
5992+#define LNC_TOPAZ_SIZE 0x10000
5993+
5994+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
5995+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
5996+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
5997+
5998+/*
5999+ *PTE's and PDE's
6000+ */
6001+
6002+#define PSB_PDE_MASK 0x003FFFFF
6003+#define PSB_PDE_SHIFT 22
6004+#define PSB_PTE_SHIFT 12
6005+
6006+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
6007+#define PSB_PTE_WO 0x0002 /* Write only */
6008+#define PSB_PTE_RO 0x0004 /* Read only */
6009+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
6010+
6011+/*
6012+ *VDC registers and bits
6013+ */
6014+#define PSB_HWSTAM 0x2098
6015+#define PSB_INSTPM 0x20C0
6016+#define PSB_INT_IDENTITY_R 0x20A4
6017+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
6018+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
6019+#define _PSB_IRQ_SGX_FLAG (1<<18)
6020+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
6021+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
6022+#define PSB_INT_MASK_R 0x20A8
6023+#define PSB_INT_ENABLE_R 0x20A0
6024+#define PSB_PIPEASTAT 0x70024
6025+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
6026+#define _PSB_VBLANK_CLEAR (1 << 1)
6027+#define PSB_PIPEBSTAT 0x71024
6028+
6029+#define _PSB_MMU_ER_MASK 0x0001FF00
6030+#define _PSB_MMU_ER_HOST (1 << 16)
6031+#define GPIOA 0x5010
6032+#define GPIOB 0x5014
6033+#define GPIOC 0x5018
6034+#define GPIOD 0x501c
6035+#define GPIOE 0x5020
6036+#define GPIOF 0x5024
6037+#define GPIOG 0x5028
6038+#define GPIOH 0x502c
6039+#define GPIO_CLOCK_DIR_MASK (1 << 0)
6040+#define GPIO_CLOCK_DIR_IN (0 << 1)
6041+#define GPIO_CLOCK_DIR_OUT (1 << 1)
6042+#define GPIO_CLOCK_VAL_MASK (1 << 2)
6043+#define GPIO_CLOCK_VAL_OUT (1 << 3)
6044+#define GPIO_CLOCK_VAL_IN (1 << 4)
6045+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
6046+#define GPIO_DATA_DIR_MASK (1 << 8)
6047+#define GPIO_DATA_DIR_IN (0 << 9)
6048+#define GPIO_DATA_DIR_OUT (1 << 9)
6049+#define GPIO_DATA_VAL_MASK (1 << 10)
6050+#define GPIO_DATA_VAL_OUT (1 << 11)
6051+#define GPIO_DATA_VAL_IN (1 << 12)
6052+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
6053+
6054+#define VCLK_DIVISOR_VGA0 0x6000
6055+#define VCLK_DIVISOR_VGA1 0x6004
6056+#define VCLK_POST_DIV 0x6010
6057+
6058+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
6059+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
6060+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
6061+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
6062+#define PSB_COMM_USER_IRQ (1024 >> 2)
6063+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
6064+#define PSB_COMM_FW (2048 >> 2)
6065+
6066+#define PSB_UIRQ_VISTEST 1
6067+#define PSB_UIRQ_OOM_REPLY 2
6068+#define PSB_UIRQ_FIRE_TA_REPLY 3
6069+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
6070+
6071+#define PSB_2D_SIZE (256*1024*1024)
6072+#define PSB_MAX_RELOC_PAGES 1024
6073+
6074+#define PSB_LOW_REG_OFFS 0x0204
6075+#define PSB_HIGH_REG_OFFS 0x0600
6076+
6077+#define PSB_NUM_VBLANKS 2
6078+
6079+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
6080+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
6081+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
6082+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
6083+#define PSB_COMM_FW (2048 >> 2)
6084+
6085+#define PSB_2D_SIZE (256*1024*1024)
6086+#define PSB_MAX_RELOC_PAGES 1024
6087+
6088+#define PSB_LOW_REG_OFFS 0x0204
6089+#define PSB_HIGH_REG_OFFS 0x0600
6090+
6091+#define PSB_NUM_VBLANKS 2
6092+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
6093+
6094+#define PSB_PWR_STATE_MASK 0x0F
6095+#define PSB_PWR_ACTION_MASK 0xF0
6096+#define PSB_PWR_STATE_D0i0 0x1
6097+#define PSB_PWR_STATE_D0i3 0x2
6098+#define PSB_PWR_STATE_D3 0x3
6099+#define PSB_PWR_ACTION_DOWN 0x10 /*Need to power down*/
6100+#define PSB_PWR_ACTION_UP 0x20/*Need to power up*/
6101+#define PSB_GRAPHICS_ISLAND 0x1
6102+#define PSB_VIDEO_ENC_ISLAND 0x2
6103+#define PSB_VIDEO_DEC_ISLAND 0x4
6104+#define LNC_TOPAZ_POWERON 0x1
6105+#define LNC_TOPAZ_POWEROFF 0x0
6106+
6107+/*
6108+ *User options.
6109+ */
6110+
6111+struct drm_psb_uopt {
6112+ int clock_gating;
6113+};
6114+
6115+/**
6116+ *struct psb_context
6117+ *
6118+ *@buffers: array of pre-allocated validate buffers.
6119+ *@used_buffers: number of buffers in @buffers array currently in use.
6120+ *@validate_buffer: buffers validated from user-space.
6121+ *@kern_validate_buffers : buffers validated from kernel-space.
6122+ *@fence_flags : Fence flags to be used for fence creation.
6123+ *
6124+ *This structure is used during execbuf validation.
6125+ */
6126+
6127+struct psb_context {
6128+ struct psb_validate_buffer *buffers;
6129+ uint32_t used_buffers;
6130+ struct list_head validate_list;
6131+ struct list_head kern_validate_list;
6132+ uint32_t fence_types;
6133+ uint32_t val_seq;
6134+};
6135+
6136+struct psb_gtt {
6137+ struct drm_device *dev;
6138+ int initialized;
6139+ uint32_t gatt_start;
6140+ uint32_t gtt_start;
6141+ uint32_t gtt_phys_start;
6142+ unsigned gtt_pages;
6143+ unsigned gatt_pages;
6144+ uint32_t stolen_base;
6145+ uint32_t pge_ctl;
6146+ u16 gmch_ctrl;
6147+ unsigned long stolen_size;
6148+ unsigned long vram_stolen_size;
6149+ unsigned long ci_stolen_size;
6150+ unsigned long rar_stolen_size;
6151+ uint32_t *gtt_map;
6152+ struct rw_semaphore sem;
6153+};
6154+
6155+struct psb_use_base {
6156+ struct list_head head;
6157+ struct ttm_fence_object *fence;
6158+ unsigned int reg;
6159+ unsigned long offset;
6160+ unsigned int dm;
6161+};
6162+
6163+struct psb_validate_buffer;
6164+
6165+struct psb_msvdx_cmd_queue {
6166+ struct list_head head;
6167+ void *cmd;
6168+ unsigned long cmd_size;
6169+ uint32_t sequence;
6170+};
6171+
6172+
6173+struct drm_psb_private {
6174+
6175+ /*
6176+ *TTM Glue.
6177+ */
6178+
6179+ struct drm_global_reference mem_global_ref;
6180+ int has_global;
6181+
6182+ struct drm_device *dev;
6183+ struct ttm_object_device *tdev;
6184+ struct ttm_fence_device fdev;
6185+ struct ttm_bo_device bdev;
6186+ struct ttm_lock ttm_lock;
6187+ struct vm_operations_struct *ttm_vm_ops;
6188+ int has_fence_device;
6189+ int has_bo_device;
6190+
6191+ unsigned long chipset;
6192+
6193+ struct psb_xhw_buf resume_buf;
6194+ struct drm_psb_dev_info_arg dev_info;
6195+ struct drm_psb_uopt uopt;
6196+
6197+ struct psb_gtt *pg;
6198+
6199+ struct page *scratch_page;
6200+ struct page *comm_page;
6201+ /* Deleted volatile because it is not recommended to use. */
6202+ uint32_t *comm;
6203+ uint32_t comm_mmu_offset;
6204+ uint32_t mmu_2d_offset;
6205+ uint32_t sequence[PSB_NUM_ENGINES];
6206+ uint32_t last_sequence[PSB_NUM_ENGINES];
6207+ int idle[PSB_NUM_ENGINES];
6208+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
6209+ int engine_lockup_2d;
6210+
6211+ struct psb_mmu_driver *mmu;
6212+ struct psb_mmu_pd *pf_pd;
6213+
6214+ uint8_t *sgx_reg;
6215+ uint8_t *vdc_reg;
6216+ uint32_t gatt_free_offset;
6217+
6218+ /*
6219+ *MSVDX
6220+ */
6221+ int has_msvdx;
6222+ uint8_t *msvdx_reg;
6223+ int msvdx_needs_reset;
6224+ atomic_t msvdx_mmu_invaldc;
6225+
6226+ /*
6227+ *TOPAZ
6228+ */
6229+ uint8_t *topaz_reg;
6230+
6231+ void *topaz_mtx_reg_state;
6232+ struct ttm_buffer_object *topaz_mtx_data_mem;
6233+ uint32_t topaz_cur_codec;
6234+ uint32_t cur_mtx_data_size;
6235+ int topaz_needs_reset;
6236+ int has_topaz;
6237+#define TOPAZ_MAX_IDELTIME (HZ*30)
6238+ int topaz_start_idle;
6239+ unsigned long topaz_idle_start_jiffies;
6240+ /* used by lnc_topaz_lockup */
6241+ uint32_t topaz_current_sequence;
6242+ uint32_t topaz_last_sequence;
6243+ uint32_t topaz_finished_sequence;
6244+
6245+ /*
6246+ *Fencing / irq.
6247+ */
6248+
6249+ uint32_t sgx_irq_mask;
6250+ uint32_t sgx2_irq_mask;
6251+ uint32_t vdc_irq_mask;
6252+
6253+ spinlock_t irqmask_lock;
6254+ spinlock_t sequence_lock;
6255+ int fence0_irq_on;
6256+ int irq_enabled;
6257+ unsigned int irqen_count_2d;
6258+ wait_queue_head_t event_2d_queue;
6259+
6260+#ifdef FIX_TG_16
6261+ wait_queue_head_t queue_2d;
6262+ atomic_t lock_2d;
6263+ atomic_t ta_wait_2d;
6264+ atomic_t ta_wait_2d_irq;
6265+ atomic_t waiters_2d;
6266+#else
6267+ struct mutex mutex_2d;
6268+#endif
6269+ uint32_t msvdx_current_sequence;
6270+ uint32_t msvdx_last_sequence;
6271+ int fence2_irq_on;
6272+
6273+ /*
6274+ *Modesetting
6275+ */
6276+ struct psb_intel_mode_device mode_dev;
6277+
6278+ /*
6279+ *MSVDX Rendec Memory
6280+ */
6281+ struct ttm_buffer_object *ccb0;
6282+ uint32_t base_addr0;
6283+ struct ttm_buffer_object *ccb1;
6284+ uint32_t base_addr1;
6285+
6286+ /*
6287+ * CI share buffer
6288+ */
6289+ unsigned int ci_region_start;
6290+ unsigned int ci_region_size;
6291+
6292+ /*
6293+ *Memory managers
6294+ */
6295+
6296+ int have_vram;
6297+ int have_camera;
6298+ int have_tt;
6299+ int have_mem_mmu;
6300+ int have_mem_aper;
6301+ int have_mem_kernel;
6302+ int have_mem_pds;
6303+ int have_mem_rastgeom;
6304+ struct mutex temp_mem;
6305+
6306+ /*
6307+ *Relocation buffer mapping.
6308+ */
6309+
6310+ spinlock_t reloc_lock;
6311+ unsigned int rel_mapped_pages;
6312+ wait_queue_head_t rel_mapped_queue;
6313+
6314+ /*
6315+ *SAREA
6316+ */
6317+ struct drm_psb_sarea *sarea_priv;
6318+
6319+ /*
6320+ *LVDS info
6321+ */
6322+ int backlight_duty_cycle; /* restore backlight to this value */
6323+ bool panel_wants_dither;
6324+ struct drm_display_mode *panel_fixed_mode;
6325+
6326+/* MRST private date start */
6327+/*FIXME JLIU7 need to revisit */
6328+ bool sku_83;
6329+ bool sku_100;
6330+ bool sku_100L;
6331+ bool sku_bypass;
6332+ uint32_t iLVDS_enable;
6333+
6334+ /* pipe config register value */
6335+ uint32_t pipeconf;
6336+
6337+ /* plane control register value */
6338+ uint32_t dspcntr;
6339+
6340+/* MRST_DSI private date start */
6341+ /*
6342+ *MRST DSI info
6343+ */
6344+ /* The DSI device ready */
6345+ bool dsi_device_ready;
6346+
6347+ /* The DPI panel power on */
6348+ bool dpi_panel_on;
6349+
6350+ /* The DBI panel power on */
6351+ bool dbi_panel_on;
6352+
6353+ /* The DPI display */
6354+ bool dpi;
6355+
6356+ /* status */
6357+ uint32_t videoModeFormat:2;
6358+ uint32_t laneCount:3;
6359+ uint32_t status_reserved:27;
6360+
6361+ /* dual display - DPI & DBI */
6362+ bool dual_display;
6363+
6364+ /* HS or LP transmission */
6365+ bool lp_transmission;
6366+
6367+ /* configuration phase */
6368+ bool config_phase;
6369+
6370+ /* DSI clock */
6371+ uint32_t RRate;
6372+ uint32_t DDR_Clock;
6373+ uint32_t DDR_Clock_Calculated;
6374+ uint32_t ClockBits;
6375+
6376+ /* DBI Buffer pointer */
6377+ u8 *p_DBI_commandBuffer_orig;
6378+ u8 *p_DBI_commandBuffer;
6379+ uint32_t DBI_CB_pointer;
6380+ u8 *p_DBI_dataBuffer_orig;
6381+ u8 *p_DBI_dataBuffer;
6382+ uint32_t DBI_DB_pointer;
6383+
6384+ /* DPI panel spec */
6385+ uint32_t pixelClock;
6386+ uint32_t HsyncWidth;
6387+ uint32_t HbackPorch;
6388+ uint32_t HfrontPorch;
6389+ uint32_t HactiveArea;
6390+ uint32_t VsyncWidth;
6391+ uint32_t VbackPorch;
6392+ uint32_t VfrontPorch;
6393+ uint32_t VactiveArea;
6394+ uint32_t bpp:5;
6395+ uint32_t Reserved:27;
6396+
6397+ /* DBI panel spec */
6398+ uint32_t dbi_pixelClock;
6399+ uint32_t dbi_HsyncWidth;
6400+ uint32_t dbi_HbackPorch;
6401+ uint32_t dbi_HfrontPorch;
6402+ uint32_t dbi_HactiveArea;
6403+ uint32_t dbi_VsyncWidth;
6404+ uint32_t dbi_VbackPorch;
6405+ uint32_t dbi_VfrontPorch;
6406+ uint32_t dbi_VactiveArea;
6407+ uint32_t dbi_bpp:5;
6408+ uint32_t dbi_Reserved:27;
6409+
6410+/* MRST_DSI private date end */
6411+
6412+ /*
6413+ *Register state
6414+ */
6415+ uint32_t saveDSPACNTR;
6416+ uint32_t saveDSPBCNTR;
6417+ uint32_t savePIPEACONF;
6418+ uint32_t savePIPEBCONF;
6419+ uint32_t savePIPEASRC;
6420+ uint32_t savePIPEBSRC;
6421+ uint32_t saveFPA0;
6422+ uint32_t saveFPA1;
6423+ uint32_t saveDPLL_A;
6424+ uint32_t saveDPLL_A_MD;
6425+ uint32_t saveHTOTAL_A;
6426+ uint32_t saveHBLANK_A;
6427+ uint32_t saveHSYNC_A;
6428+ uint32_t saveVTOTAL_A;
6429+ uint32_t saveVBLANK_A;
6430+ uint32_t saveVSYNC_A;
6431+ uint32_t saveDSPASTRIDE;
6432+ uint32_t saveDSPASIZE;
6433+ uint32_t saveDSPAPOS;
6434+ uint32_t saveDSPABASE;
6435+ uint32_t saveDSPASURF;
6436+ uint32_t saveFPB0;
6437+ uint32_t saveFPB1;
6438+ uint32_t saveDPLL_B;
6439+ uint32_t saveDPLL_B_MD;
6440+ uint32_t saveHTOTAL_B;
6441+ uint32_t saveHBLANK_B;
6442+ uint32_t saveHSYNC_B;
6443+ uint32_t saveVTOTAL_B;
6444+ uint32_t saveVBLANK_B;
6445+ uint32_t saveVSYNC_B;
6446+ uint32_t saveDSPBSTRIDE;
6447+ uint32_t saveDSPBSIZE;
6448+ uint32_t saveDSPBPOS;
6449+ uint32_t saveDSPBBASE;
6450+ uint32_t saveDSPBSURF;
6451+ uint32_t saveVCLK_DIVISOR_VGA0;
6452+ uint32_t saveVCLK_DIVISOR_VGA1;
6453+ uint32_t saveVCLK_POST_DIV;
6454+ uint32_t saveVGACNTRL;
6455+ uint32_t saveADPA;
6456+ uint32_t saveLVDS;
6457+ uint32_t saveDVOA;
6458+ uint32_t saveDVOB;
6459+ uint32_t saveDVOC;
6460+ uint32_t savePP_ON;
6461+ uint32_t savePP_OFF;
6462+ uint32_t savePP_CONTROL;
6463+ uint32_t savePP_CYCLE;
6464+ uint32_t savePFIT_CONTROL;
6465+ uint32_t savePaletteA[256];
6466+ uint32_t savePaletteB[256];
6467+ uint32_t saveBLC_PWM_CTL;
6468+ uint32_t saveCLOCKGATING;
6469+
6470+ /*
6471+ *Xhw
6472+ */
6473+
6474+ uint32_t *xhw;
6475+ struct ttm_buffer_object *xhw_bo;
6476+ struct ttm_bo_kmap_obj xhw_kmap;
6477+ struct list_head xhw_in;
6478+ spinlock_t xhw_lock;
6479+ atomic_t xhw_client;
6480+ struct drm_file *xhw_file;
6481+ wait_queue_head_t xhw_queue;
6482+ wait_queue_head_t xhw_caller_queue;
6483+ struct mutex xhw_mutex;
6484+ struct psb_xhw_buf *xhw_cur_buf;
6485+ int xhw_submit_ok;
6486+ int xhw_on;
6487+
6488+ /*
6489+ *Scheduling.
6490+ */
6491+
6492+ struct mutex reset_mutex;
6493+ struct psb_scheduler scheduler;
6494+ struct mutex cmdbuf_mutex;
6495+ uint32_t ta_mem_pages;
6496+ struct psb_ta_mem *ta_mem;
6497+ int force_ta_mem_load;
6498+ atomic_t val_seq;
6499+
6500+ /*
6501+ *TODO: change this to be per drm-context.
6502+ */
6503+
6504+ struct psb_context context;
6505+
6506+ /*
6507+ *Watchdog
6508+ */
6509+
6510+ spinlock_t watchdog_lock;
6511+ struct timer_list watchdog_timer;
6512+ struct work_struct watchdog_wq;
6513+ struct work_struct msvdx_watchdog_wq;
6514+ struct work_struct topaz_watchdog_wq;
6515+ int timer_available;
6516+
6517+ /*
6518+ *msvdx command queue
6519+ */
6520+ spinlock_t msvdx_lock;
6521+ struct mutex msvdx_mutex;
6522+ struct list_head msvdx_queue;
6523+ int msvdx_busy;
6524+ int msvdx_fw_loaded;
6525+ void *msvdx_fw;
6526+ int msvdx_fw_size;
6527+
6528+ /*
6529+ *topaz command queue
6530+ */
6531+ spinlock_t topaz_lock;
6532+ struct mutex topaz_mutex;
6533+ struct list_head topaz_queue;
6534+ int topaz_busy; /* 0 means topaz is free */
6535+ int topaz_fw_loaded;
6536+
6537+ /* topaz ccb data */
6538+ /* XXX: should the addr stored by 32 bits? more compatible way?? */
6539+ uint32_t topaz_ccb_buffer_addr;
6540+ uint32_t topaz_ccb_ctrl_addr;
6541+ uint32_t topaz_ccb_size;
6542+ uint32_t topaz_cmd_windex;
6543+ uint16_t topaz_cmd_seq;
6544+
6545+ uint32_t stored_initial_qp;
6546+ uint32_t topaz_dash_access_ctrl;
6547+
6548+ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
6549+ struct ttm_bo_kmap_obj topaz_bo_kmap;
6550+ void *topaz_ccb_wb;
6551+ uint32_t topaz_wb_offset;
6552+ uint32_t *topaz_sync_addr;
6553+ uint32_t topaz_sync_offset;
6554+ uint32_t topaz_sync_cmd_seq;
6555+
6556+ struct rw_semaphore sgx_sem; /*sgx is in used*/
6557+ struct semaphore pm_sem; /*pm action in process*/
6558+ unsigned char graphics_state;
6559+#ifdef OSPM_STAT
6560+ unsigned long gfx_d0i3_time;
6561+ unsigned long gfx_d0i0_time;
6562+ unsigned long gfx_d3_time;
6563+ unsigned long gfx_last_mode_change;
6564+ unsigned long gfx_d0i0_cnt;
6565+ unsigned long gfx_d0i3_cnt;
6566+ unsigned long gfx_d3_cnt;
6567+#endif
6568+
6569+ /* MSVDX OSPM */
6570+ unsigned char msvdx_state;
6571+ unsigned long msvdx_last_action;
6572+ uint32_t msvdx_clk_state;
6573+
6574+ /* TOPAZ OSPM */
6575+ unsigned char topaz_power_state;
6576+ unsigned long topaz_last_action;
6577+ uint32_t topaz_clk_state;
6578+};
6579+
6580+struct psb_fpriv {
6581+ struct ttm_object_file *tfile;
6582+};
6583+
6584+struct psb_mmu_driver;
6585+
6586+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
6587+extern int drm_pick_crtcs(struct drm_device *dev);
6588+
6589+
6590+static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
6591+{
6592+ return (struct psb_fpriv *) file_priv->driver_priv;
6593+}
6594+
6595+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
6596+{
6597+ return (struct drm_psb_private *) dev->dev_private;
6598+}
6599+
6600+/*
6601+ *TTM glue. psb_ttm_glue.c
6602+ */
6603+
6604+extern int psb_open(struct inode *inode, struct file *filp);
6605+extern int psb_release(struct inode *inode, struct file *filp);
6606+extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
6607+
6608+extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
6609+ struct drm_file *file_priv);
6610+extern int psb_verify_access(struct ttm_buffer_object *bo,
6611+ struct file *filp);
6612+extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
6613+ size_t count, loff_t *f_pos);
6614+extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
6615+ size_t count, loff_t *f_pos);
6616+extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
6617+ struct drm_file *file_priv);
6618+extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
6619+ struct drm_file *file_priv);
6620+extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
6621+ struct drm_file *file_priv);
6622+extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
6623+ struct drm_file *file_priv);
6624+extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
6625+ struct drm_file *file_priv);
6626+extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
6627+ struct drm_file *file_priv);
6628+extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
6629+ struct drm_file *file_priv);
6630+extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
6631+ struct drm_file *file_priv);
6632+extern int psb_extension_ioctl(struct drm_device *dev, void *data,
6633+ struct drm_file *file_priv);
6634+extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
6635+extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
6636+/*
6637+ *MMU stuff.
6638+ */
6639+
6640+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
6641+ int trap_pagefaults,
6642+ int invalid_type,
6643+ struct drm_psb_private *dev_priv);
6644+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
6645+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
6646+ *driver);
6647+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
6648+ uint32_t gtt_start, uint32_t gtt_pages);
6649+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
6650+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
6651+ int trap_pagefaults,
6652+ int invalid_type);
6653+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
6654+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
6655+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
6656+ unsigned long address,
6657+ uint32_t num_pages);
6658+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
6659+ uint32_t start_pfn,
6660+ unsigned long address,
6661+ uint32_t num_pages, int type);
6662+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
6663+ unsigned long *pfn);
6664+
6665+/*
6666+ *Enable / disable MMU for different requestors.
6667+ */
6668+
6669+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
6670+ uint32_t mask);
6671+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
6672+ uint32_t mask);
6673+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
6674+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
6675+ unsigned long address, uint32_t num_pages,
6676+ uint32_t desired_tile_stride,
6677+ uint32_t hw_tile_stride, int type);
6678+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
6679+ unsigned long address, uint32_t num_pages,
6680+ uint32_t desired_tile_stride,
6681+ uint32_t hw_tile_stride);
6682+/*
6683+ *psb_sgx.c
6684+ */
6685+
6686+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
6687+ uint32_t sequence);
6688+extern void psb_init_2d(struct drm_psb_private *dev_priv);
6689+extern int psb_idle_2d(struct drm_device *dev);
6690+extern int psb_idle_3d(struct drm_device *dev);
6691+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
6692+ uint32_t src_offset,
6693+ uint32_t dst_offset, uint32_t pages,
6694+ int direction);
6695+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
6696+ struct drm_file *file_priv);
6697+extern int psb_reg_submit(struct drm_psb_private *dev_priv,
6698+ uint32_t *regs, unsigned int cmds);
6699+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
6700+ struct ttm_buffer_object *cmd_buffer,
6701+ unsigned long cmd_offset,
6702+ unsigned long cmd_size, int engine,
6703+ uint32_t *copy_buffer);
6704+
6705+extern void psb_init_disallowed(void);
6706+extern void psb_fence_or_sync(struct drm_file *file_priv,
6707+ uint32_t engine,
6708+ uint32_t fence_types,
6709+ uint32_t fence_flags,
6710+ struct list_head *list,
6711+ struct psb_ttm_fence_rep *fence_arg,
6712+ struct ttm_fence_object **fence_p);
6713+extern int psb_validate_kernel_buffer(struct psb_context *context,
6714+ struct ttm_buffer_object *bo,
6715+ uint32_t fence_class,
6716+ uint64_t set_flags,
6717+ uint64_t clr_flags);
6718+extern void psb_init_ospm(struct drm_psb_private *dev_priv);
6719+extern void psb_check_power_state(struct drm_device *dev, int devices);
6720+extern void psb_down_island_power(struct drm_device *dev, int islands);
6721+extern void psb_up_island_power(struct drm_device *dev, int islands);
6722+extern int psb_try_power_down_sgx(struct drm_device *dev);
6723+
6724+/*
6725+ *psb_irq.c
6726+ */
6727+
6728+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
6729+extern void psb_irq_preinstall(struct drm_device *dev);
6730+extern int psb_irq_postinstall(struct drm_device *dev);
6731+extern void psb_irq_uninstall(struct drm_device *dev);
6732+extern int psb_vblank_wait2(struct drm_device *dev,
6733+ unsigned int *sequence);
6734+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
6735+
6736+/*
6737+ *psb_fence.c
6738+ */
6739+
6740+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
6741+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
6742+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
6743+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
6744+ uint32_t class);
6745+extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
6746+ uint32_t fence_class,
6747+ uint32_t flags, uint32_t *sequence,
6748+ unsigned long *timeout_jiffies);
6749+extern void psb_fence_error(struct drm_device *dev,
6750+ uint32_t class,
6751+ uint32_t sequence, uint32_t type, int error);
6752+extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
6753+
6754+/*MSVDX stuff*/
6755+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
6756+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
6757+
6758+/*
6759+ *psb_gtt.c
6760+ */
6761+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
6762+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
6763+ unsigned offset_pages, unsigned num_pages,
6764+ unsigned desired_tile_stride,
6765+ unsigned hw_tile_stride, int type);
6766+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
6767+ unsigned num_pages,
6768+ unsigned desired_tile_stride,
6769+ unsigned hw_tile_stride);
6770+
6771+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
6772+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
6773+
6774+/*
6775+ *psb_fb.c
6776+ */
6777+extern int psbfb_probed(struct drm_device *dev);
6778+extern int psbfb_remove(struct drm_device *dev,
6779+ struct drm_framebuffer *fb);
6780+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
6781+ struct drm_file *file_priv);
6782+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
6783+ struct drm_file *file_priv);
6784+extern void psbfb_suspend(struct drm_device *dev);
6785+extern void psbfb_resume(struct drm_device *dev);
6786+
6787+/*
6788+ *psb_reset.c
6789+ */
6790+
6791+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
6792+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
6793+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
6794+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
6795+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
6796+
6797+/*
6798+ *psb_xhw.c
6799+ */
6800+
6801+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
6802+ struct drm_file *file_priv);
6803+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
6804+ struct drm_file *file_priv);
6805+extern int psb_xhw_init(struct drm_device *dev);
6806+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
6807+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
6808+ struct drm_file *file_priv, int closing);
6809+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
6810+ struct psb_xhw_buf *buf,
6811+ uint32_t fire_flags,
6812+ uint32_t hw_context,
6813+ uint32_t *cookie,
6814+ uint32_t *oom_cmds,
6815+ uint32_t num_oom_cmds,
6816+ uint32_t offset,
6817+ uint32_t engine, uint32_t flags);
6818+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
6819+ struct psb_xhw_buf *buf,
6820+ uint32_t fire_flags);
6821+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
6822+ struct psb_xhw_buf *buf, uint32_t w,
6823+ uint32_t h, uint32_t *hw_cookie,
6824+ uint32_t *bo_size, uint32_t *clear_p_start,
6825+ uint32_t *clear_num_pages);
6826+
6827+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
6828+ struct psb_xhw_buf *buf);
6829+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
6830+ struct psb_xhw_buf *buf, uint32_t *value);
6831+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
6832+ struct psb_xhw_buf *buf,
6833+ uint32_t pages,
6834+ uint32_t * hw_cookie,
6835+ uint32_t * size,
6836+ uint32_t * ta_min_size);
6837+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
6838+ struct psb_xhw_buf *buf, uint32_t *cookie);
6839+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
6840+ struct psb_xhw_buf *buf,
6841+ uint32_t *cookie,
6842+ uint32_t *bca,
6843+ uint32_t *rca, uint32_t *flags);
6844+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
6845+ struct psb_xhw_buf *buf);
6846+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
6847+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
6848+ struct psb_xhw_buf *buf);
6849+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
6850+ struct psb_xhw_buf *buf, uint32_t *cookie);
6851+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
6852+ struct psb_xhw_buf *buf,
6853+ uint32_t flags,
6854+ uint32_t param_offset,
6855+ uint32_t pt_offset, uint32_t *hw_cookie);
6856+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
6857+ struct psb_xhw_buf *buf);
6858+
6859+/*
6860+ *psb_schedule.c: HW bug fixing.
6861+ */
6862+
6863+#ifdef FIX_TG_16
6864+
6865+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
6866+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
6867+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
6868+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
6869+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
6870+extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
6871+#else
6872+
6873+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
6874+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
6875+
6876+#endif
6877+
6878+/* modesetting */
6879+extern void psb_modeset_init(struct drm_device *dev);
6880+extern void psb_modeset_cleanup(struct drm_device *dev);
6881+
6882+
6883+/*
6884+ *Utilities
6885+ */
6886+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
6887+
6888+static inline u32 MSG_READ32(uint port, uint offset)
6889+{
6890+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
6891+ outl(0x800000D0, 0xCF8);
6892+ outl(mcr, 0xCFC);
6893+ outl(0x800000D4, 0xCF8);
6894+ return inl(0xcfc);
6895+}
6896+static inline void MSG_WRITE32(uint port, uint offset, u32 value)
6897+{
6898+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
6899+ outl(0x800000D4, 0xCF8);
6900+ outl(value, 0xcfc);
6901+ outl(0x800000D0, 0xCF8);
6902+ outl(mcr, 0xCFC);
6903+}
6904+
6905+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
6906+{
6907+ struct drm_psb_private *dev_priv = dev->dev_private;
6908+
6909+ return ioread32(dev_priv->vdc_reg + (reg));
6910+}
6911+
6912+#define REG_READ(reg) REGISTER_READ(dev, (reg))
6913+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
6914+ uint32_t val)
6915+{
6916+ struct drm_psb_private *dev_priv = dev->dev_private;
6917+
6918+ iowrite32((val), dev_priv->vdc_reg + (reg));
6919+}
6920+
6921+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
6922+
6923+static inline void REGISTER_WRITE16(struct drm_device *dev,
6924+ uint32_t reg, uint32_t val)
6925+{
6926+ struct drm_psb_private *dev_priv = dev->dev_private;
6927+
6928+ iowrite16((val), dev_priv->vdc_reg + (reg));
6929+}
6930+
6931+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
6932+
6933+static inline void REGISTER_WRITE8(struct drm_device *dev,
6934+ uint32_t reg, uint32_t val)
6935+{
6936+ struct drm_psb_private *dev_priv = dev->dev_private;
6937+
6938+ iowrite8((val), dev_priv->vdc_reg + (reg));
6939+}
6940+
6941+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
6942+
6943+#define PSB_ALIGN_TO(_val, _align) \
6944+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
6945+#define PSB_WVDC32(_val, _offs) \
6946+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
6947+#define PSB_RVDC32(_offs) \
6948+ ioread32(dev_priv->vdc_reg + (_offs))
6949+#define PSB_WSGX32(_val, _offs) \
6950+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
6951+#define PSB_RSGX32(_offs) \
6952+ ioread32(dev_priv->sgx_reg + (_offs))
6953+#define PSB_WMSVDX32(_val, _offs) \
6954+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
6955+#define PSB_RMSVDX32(_offs) \
6956+ ioread32(dev_priv->msvdx_reg + (_offs))
6957+
6958+#define PSB_ALPL(_val, _base) \
6959+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
6960+#define PSB_ALPLM(_val, _base) \
6961+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
6962+
6963+#define PSB_D_RENDER (1 << 16)
6964+
6965+#define PSB_D_GENERAL (1 << 0)
6966+#define PSB_D_INIT (1 << 1)
6967+#define PSB_D_IRQ (1 << 2)
6968+#define PSB_D_FW (1 << 3)
6969+#define PSB_D_PERF (1 << 4)
6970+#define PSB_D_TMP (1 << 5)
6971+#define PSB_D_PM (1 << 6)
6972+
6973+extern int drm_psb_debug;
6974+extern int drm_psb_no_fb;
6975+extern int drm_psb_disable_vsync;
6976+extern int drm_idle_check_interval;
6977+extern int drm_psb_ospm;
6978+
6979+#define PSB_DEBUG_FW(_fmt, _arg...) \
6980+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
6981+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
6982+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
6983+#define PSB_DEBUG_INIT(_fmt, _arg...) \
6984+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
6985+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
6986+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
6987+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
6988+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
6989+#define PSB_DEBUG_PERF(_fmt, _arg...) \
6990+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
6991+#define PSB_DEBUG_TMP(_fmt, _arg...) \
6992+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
6993+#define PSB_DEBUG_PM(_fmt, _arg...) \
6994+ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
6995+
6996+#if DRM_DEBUG_CODE
6997+#define PSB_DEBUG(_flag, _fmt, _arg...) \
6998+ do { \
6999+ if (unlikely((_flag) & drm_psb_debug)) \
7000+ printk(KERN_DEBUG \
7001+ "[psb:0x%02x:%s] " _fmt , _flag, \
7002+ __func__ , ##_arg); \
7003+ } while (0)
7004+#else
7005+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
7006+#endif
7007+
7008+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
7009+ ((dev)->pci_device == 0x8109))
7010+
7011+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
7012+
7013+#endif
7014diff -uNr a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
7015--- a/drivers/gpu/drm/psb/psb_fb.c 1969-12-31 16:00:00.000000000 -0800
7016+++ b/drivers/gpu/drm/psb/psb_fb.c 2009-04-07 13:28:38.000000000 -0700
7017@@ -0,0 +1,1687 @@
7018+/**************************************************************************
7019+ * Copyright (c) 2007, Intel Corporation.
7020+ * All Rights Reserved.
7021+ *
7022+ * This program is free software; you can redistribute it and/or modify it
7023+ * under the terms and conditions of the GNU General Public License,
7024+ * version 2, as published by the Free Software Foundation.
7025+ *
7026+ * This program is distributed in the hope it will be useful, but WITHOUT
7027+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7028+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7029+ * more details.
7030+ *
7031+ * You should have received a copy of the GNU General Public License along with
7032+ * this program; if not, write to the Free Software Foundation, Inc.,
7033+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7034+ *
7035+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7036+ * develop this driver.
7037+ *
7038+ **************************************************************************/
7039+
7040+#include <linux/module.h>
7041+#include <linux/kernel.h>
7042+#include <linux/errno.h>
7043+#include <linux/string.h>
7044+#include <linux/mm.h>
7045+#include <linux/tty.h>
7046+#include <linux/slab.h>
7047+#include <linux/delay.h>
7048+#include <linux/fb.h>
7049+#include <linux/init.h>
7050+#include <linux/console.h>
7051+
7052+#include <drm/drmP.h>
7053+#include <drm/drm.h>
7054+#include <drm/drm_crtc.h>
7055+
7056+#include "psb_drv.h"
7057+#include "psb_intel_reg.h"
7058+#include "psb_intel_drv.h"
7059+#include "ttm/ttm_userobj_api.h"
7060+#include "psb_fb.h"
7061+#include "psb_sgx.h"
7062+
7063+static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
7064+{
7065+ switch (depth) {
7066+ case 8:
7067+ var->red.offset = 0;
7068+ var->green.offset = 0;
7069+ var->blue.offset = 0;
7070+ var->red.length = 8;
7071+ var->green.length = 8;
7072+ var->blue.length = 8;
7073+ var->transp.length = 0;
7074+ var->transp.offset = 0;
7075+ break;
7076+ case 15:
7077+ var->red.offset = 10;
7078+ var->green.offset = 5;
7079+ var->blue.offset = 0;
7080+ var->red.length = 5;
7081+ var->green.length = 5;
7082+ var->blue.length = 5;
7083+ var->transp.length = 1;
7084+ var->transp.offset = 15;
7085+ break;
7086+ case 16:
7087+ var->red.offset = 11;
7088+ var->green.offset = 5;
7089+ var->blue.offset = 0;
7090+ var->red.length = 5;
7091+ var->green.length = 6;
7092+ var->blue.length = 5;
7093+ var->transp.length = 0;
7094+ var->transp.offset = 0;
7095+ break;
7096+ case 24:
7097+ var->red.offset = 16;
7098+ var->green.offset = 8;
7099+ var->blue.offset = 0;
7100+ var->red.length = 8;
7101+ var->green.length = 8;
7102+ var->blue.length = 8;
7103+ var->transp.length = 0;
7104+ var->transp.offset = 0;
7105+ break;
7106+ case 32:
7107+ var->red.offset = 16;
7108+ var->green.offset = 8;
7109+ var->blue.offset = 0;
7110+ var->red.length = 8;
7111+ var->green.length = 8;
7112+ var->blue.length = 8;
7113+ var->transp.length = 8;
7114+ var->transp.offset = 24;
7115+ break;
7116+ default:
7117+ return -EINVAL;
7118+ }
7119+
7120+ return 0;
7121+}
7122+
7123+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
7124+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7125+ struct drm_file *file_priv,
7126+ unsigned int *handle);
7127+
7128+static const struct drm_framebuffer_funcs psb_fb_funcs = {
7129+ .destroy = psb_user_framebuffer_destroy,
7130+ .create_handle = psb_user_framebuffer_create_handle,
7131+};
7132+
7133+struct psbfb_par {
7134+ struct drm_device *dev;
7135+ struct psb_framebuffer *psbfb;
7136+
7137+ int dpms_state;
7138+
7139+ int crtc_count;
7140+ /* crtc currently bound to this */
7141+ uint32_t crtc_ids[2];
7142+};
7143+
7144+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
7145+
7146+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
7147+ unsigned blue, unsigned transp,
7148+ struct fb_info *info)
7149+{
7150+ struct psbfb_par *par = info->par;
7151+ struct drm_framebuffer *fb = &par->psbfb->base;
7152+ uint32_t v;
7153+
7154+ if (!fb)
7155+ return -ENOMEM;
7156+
7157+ if (regno > 255)
7158+ return 1;
7159+
7160+#if 0 /* JB: not drop, check that this works */
7161+ if (fb->bits_per_pixel == 8) {
7162+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7163+ head) {
7164+ for (i = 0; i < par->crtc_count; i++)
7165+ if (crtc->base.id == par->crtc_ids[i])
7166+ break;
7167+
7168+ if (i == par->crtc_count)
7169+ continue;
7170+
7171+ if (crtc->funcs->gamma_set)
7172+ crtc->funcs->gamma_set(crtc, red, green,
7173+ blue, regno);
7174+ }
7175+ return 0;
7176+ }
7177+#endif
7178+
7179+ red = CMAP_TOHW(red, info->var.red.length);
7180+ blue = CMAP_TOHW(blue, info->var.blue.length);
7181+ green = CMAP_TOHW(green, info->var.green.length);
7182+ transp = CMAP_TOHW(transp, info->var.transp.length);
7183+
7184+ v = (red << info->var.red.offset) |
7185+ (green << info->var.green.offset) |
7186+ (blue << info->var.blue.offset) |
7187+ (transp << info->var.transp.offset);
7188+
7189+ if (regno < 16) {
7190+ switch (fb->bits_per_pixel) {
7191+ case 16:
7192+ ((uint32_t *) info->pseudo_palette)[regno] = v;
7193+ break;
7194+ case 24:
7195+ case 32:
7196+ ((uint32_t *) info->pseudo_palette)[regno] = v;
7197+ break;
7198+ }
7199+ }
7200+
7201+ return 0;
7202+}
7203+
7204+static struct drm_display_mode *psbfb_find_first_mode(struct
7205+ fb_var_screeninfo
7206+ *var,
7207+ struct fb_info *info,
7208+ struct drm_crtc
7209+ *crtc)
7210+{
7211+ struct psbfb_par *par = info->par;
7212+ struct drm_device *dev = par->dev;
7213+ struct drm_display_mode *drm_mode;
7214+ struct drm_display_mode *last_mode = NULL;
7215+ struct drm_connector *connector;
7216+ int found;
7217+
7218+ found = 0;
7219+ list_for_each_entry(connector, &dev->mode_config.connector_list,
7220+ head) {
7221+ if (connector->encoder && connector->encoder->crtc == crtc) {
7222+ found = 1;
7223+ break;
7224+ }
7225+ }
7226+
7227+ /* found no connector, bail */
7228+ if (!found)
7229+ return NULL;
7230+
7231+ found = 0;
7232+ list_for_each_entry(drm_mode, &connector->modes, head) {
7233+ if (drm_mode->hdisplay == var->xres &&
7234+ drm_mode->vdisplay == var->yres
7235+ && drm_mode->clock != 0) {
7236+ found = 1;
7237+ last_mode = drm_mode;
7238+ }
7239+ }
7240+
7241+ /* No mode matching mode found */
7242+ if (!found)
7243+ return NULL;
7244+
7245+ return last_mode;
7246+}
7247+
7248+static int psbfb_check_var(struct fb_var_screeninfo *var,
7249+ struct fb_info *info)
7250+{
7251+ struct psbfb_par *par = info->par;
7252+ struct psb_framebuffer *psbfb = par->psbfb;
7253+ struct drm_device *dev = par->dev;
7254+ int ret;
7255+ int depth;
7256+ int pitch;
7257+ int bpp = var->bits_per_pixel;
7258+
7259+ if (!psbfb)
7260+ return -ENOMEM;
7261+
7262+ if (!var->pixclock)
7263+ return -EINVAL;
7264+
7265+ /* don't support virtuals for now */
7266+ if (var->xres_virtual > var->xres)
7267+ return -EINVAL;
7268+
7269+ if (var->yres_virtual > var->yres)
7270+ return -EINVAL;
7271+
7272+ switch (bpp) {
7273+#if 0 /* JB: for now only support true color */
7274+ case 8:
7275+ depth = 8;
7276+ break;
7277+#endif
7278+ case 16:
7279+ depth = (var->green.length == 6) ? 16 : 15;
7280+ break;
7281+ case 24: /* assume this is 32bpp / depth 24 */
7282+ bpp = 32;
7283+ /* fallthrough */
7284+ case 32:
7285+ depth = (var->transp.length > 0) ? 32 : 24;
7286+ break;
7287+ default:
7288+ return -EINVAL;
7289+ }
7290+
7291+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
7292+
7293+ /* Check that we can resize */
7294+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
7295+#if 1
7296+ /* Need to resize the fb object.
7297+ * But the generic fbdev code doesn't really understand
7298+ * that we can do this. So disable for now.
7299+ */
7300+ DRM_INFO("Can't support requested size, too big!\n");
7301+ return -EINVAL;
7302+#else
7303+ struct drm_psb_private *dev_priv = psb_priv(dev);
7304+ struct ttm_bo_device *bdev = &dev_priv->bdev;
7305+ struct ttm_buffer_object *fbo = NULL;
7306+ struct ttm_bo_kmap_obj tmp_kmap;
7307+
7308+ /* a temporary BO to check if we could resize in setpar.
7309+ * Therefore no need to set NO_EVICT.
7310+ */
7311+ ret = ttm_buffer_object_create(bdev,
7312+ pitch * var->yres,
7313+ ttm_bo_type_kernel,
7314+ TTM_PL_FLAG_TT |
7315+ TTM_PL_FLAG_VRAM |
7316+ TTM_PL_FLAG_NO_EVICT,
7317+ 0, 0, &fbo);
7318+ if (ret || !fbo)
7319+ return -ENOMEM;
7320+
7321+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
7322+ if (ret) {
7323+ ttm_bo_usage_deref_unlocked(&fbo);
7324+ return -EINVAL;
7325+ }
7326+
7327+ ttm_bo_kunmap(&tmp_kmap);
7328+ /* destroy our current fbo! */
7329+ ttm_bo_usage_deref_unlocked(&fbo);
7330+#endif
7331+ }
7332+
7333+ ret = fill_fb_bitfield(var, depth);
7334+ if (ret)
7335+ return ret;
7336+
7337+#if 1
7338+ /* Here we walk the output mode list and look for modes. If we haven't
7339+ * got it, then bail. Not very nice, so this is disabled.
7340+ * In the set_par code, we create our mode based on the incoming
7341+ * parameters. Nicer, but may not be desired by some.
7342+ */
7343+ {
7344+ struct drm_crtc *crtc;
7345+ int i;
7346+
7347+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7348+ head) {
7349+ struct psb_intel_crtc *psb_intel_crtc =
7350+ to_psb_intel_crtc(crtc);
7351+
7352+ for (i = 0; i < par->crtc_count; i++)
7353+ if (crtc->base.id == par->crtc_ids[i])
7354+ break;
7355+
7356+ if (i == par->crtc_count)
7357+ continue;
7358+
7359+ if (psb_intel_crtc->mode_set.num_connectors == 0)
7360+ continue;
7361+
7362+ if (!psbfb_find_first_mode(&info->var, info, crtc))
7363+ return -EINVAL;
7364+ }
7365+ }
7366+#else
7367+ (void) i;
7368+ (void) dev; /* silence warnings */
7369+ (void) crtc;
7370+ (void) drm_mode;
7371+ (void) connector;
7372+#endif
7373+
7374+ return 0;
7375+}
7376+
7377+/* this will let fbcon do the mode init */
7378+static int psbfb_set_par(struct fb_info *info)
7379+{
7380+ struct psbfb_par *par = info->par;
7381+ struct psb_framebuffer *psbfb = par->psbfb;
7382+ struct drm_framebuffer *fb = &psbfb->base;
7383+ struct drm_device *dev = par->dev;
7384+ struct fb_var_screeninfo *var = &info->var;
7385+ struct drm_psb_private *dev_priv = dev->dev_private;
7386+ struct drm_display_mode *drm_mode;
7387+ int pitch;
7388+ int depth;
7389+ int bpp = var->bits_per_pixel;
7390+
7391+ if (!fb)
7392+ return -ENOMEM;
7393+
7394+ switch (bpp) {
7395+ case 8:
7396+ depth = 8;
7397+ break;
7398+ case 16:
7399+ depth = (var->green.length == 6) ? 16 : 15;
7400+ break;
7401+ case 24: /* assume this is 32bpp / depth 24 */
7402+ bpp = 32;
7403+ /* fallthrough */
7404+ case 32:
7405+ depth = (var->transp.length > 0) ? 32 : 24;
7406+ break;
7407+ default:
7408+ DRM_ERROR("Illegal BPP\n");
7409+ return -EINVAL;
7410+ }
7411+
7412+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
7413+
7414+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
7415+#if 1
7416+ /* Need to resize the fb object.
7417+ * But the generic fbdev code doesn't really understand
7418+ * that we can do this. So disable for now.
7419+ */
7420+ DRM_INFO("Can't support requested size, too big!\n");
7421+ return -EINVAL;
7422+#else
7423+ int ret;
7424+ struct ttm_buffer_object *fbo = NULL, *tfbo;
7425+ struct ttm_bo_kmap_obj tmp_kmap, tkmap;
7426+
7427+ ret = ttm_buffer_object_create(bdev,
7428+ pitch * var->yres,
7429+ ttm_bo_type_kernel,
7430+ TTM_PL_FLAG_MEM_TT |
7431+ TTM_PL_FLAG_MEM_VRAM |
7432+ TTM_PL_FLAG_NO_EVICT,
7433+ 0, 0, &fbo);
7434+ if (ret || !fbo) {
7435+ DRM_ERROR
7436+ ("failed to allocate new resized framebuffer\n");
7437+ return -ENOMEM;
7438+ }
7439+
7440+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
7441+ if (ret) {
7442+ DRM_ERROR("failed to kmap framebuffer.\n");
7443+ ttm_bo_usage_deref_unlocked(&fbo);
7444+ return -EINVAL;
7445+ }
7446+
7447+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n",
7448+ fb->width, fb->height, fb->offset, fbo);
7449+
7450+ /* set new screen base */
7451+ info->screen_base = tmp_kmap.virtual;
7452+
7453+ tkmap = fb->kmap;
7454+ fb->kmap = tmp_kmap;
7455+ ttm_bo_kunmap(&tkmap);
7456+
7457+ tfbo = fb->bo;
7458+ fb->bo = fbo;
7459+ ttm_bo_usage_deref_unlocked(&tfbo);
7460+#endif
7461+ }
7462+
7463+ psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start;
7464+ fb->width = var->xres;
7465+ fb->height = var->yres;
7466+ fb->bits_per_pixel = bpp;
7467+ fb->pitch = pitch;
7468+ fb->depth = depth;
7469+
7470+ info->fix.line_length = psbfb->base.pitch;
7471+ info->fix.visual =
7472+ (psbfb->base.depth ==
7473+ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
7474+
7475+ /* some fbdev's apps don't want these to change */
7476+ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
7477+
7478+#if 0
7479+ /* relates to resize - disable */
7480+ info->fix.smem_len = info->fix.line_length * var->yres;
7481+ info->screen_size = info->fix.smem_len; /* ??? */
7482+#endif
7483+
7484+ /* Should we walk the output's modelist or just create our own ???
7485+ * For now, we create and destroy a mode based on the incoming
7486+ * parameters. But there's commented out code below which scans
7487+ * the output list too.
7488+ */
7489+#if 1
7490+ /* This code is now in the for loop futher down. */
7491+#endif
7492+
7493+ {
7494+ struct drm_crtc *crtc;
7495+ int ret;
7496+ int i;
7497+
7498+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7499+ head) {
7500+ struct psb_intel_crtc *psb_intel_crtc =
7501+ to_psb_intel_crtc(crtc);
7502+
7503+ for (i = 0; i < par->crtc_count; i++)
7504+ if (crtc->base.id == par->crtc_ids[i])
7505+ break;
7506+
7507+ if (i == par->crtc_count)
7508+ continue;
7509+
7510+ if (psb_intel_crtc->mode_set.num_connectors == 0)
7511+ continue;
7512+
7513+#if 1
7514+ drm_mode =
7515+ psbfb_find_first_mode(&info->var, info, crtc);
7516+ if (!drm_mode)
7517+ DRM_ERROR("No matching mode found\n");
7518+ psb_intel_crtc->mode_set.mode = drm_mode;
7519+#endif
7520+
7521+#if 0 /* FIXME: TH */
7522+ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
7523+#endif
7524+ DRM_DEBUG
7525+ ("setting mode on crtc %p with id %u\n",
7526+ crtc, crtc->base.id);
7527+ ret =
7528+ crtc->funcs->
7529+ set_config(&psb_intel_crtc->mode_set);
7530+ if (ret) {
7531+ DRM_ERROR("Failed setting mode\n");
7532+ return ret;
7533+ }
7534+#if 0
7535+ }
7536+#endif
7537+ }
7538+ DRM_DEBUG("Set par returned OK.\n");
7539+ return 0;
7540+ }
7541+
7542+ return 0;
7543+}
7544+
7545+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
7546+ unsigned size)
7547+{
7548+ int ret = 0;
7549+ int i;
7550+ unsigned submit_size;
7551+
7552+ while (size > 0) {
7553+ submit_size = (size < 0x60) ? size : 0x60;
7554+ size -= submit_size;
7555+ ret = psb_2d_wait_available(dev_priv, submit_size);
7556+ if (ret)
7557+ return ret;
7558+
7559+ submit_size <<= 2;
7560+ for (i = 0; i < submit_size; i += 4) {
7561+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
7562+ }
7563+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
7564+ }
7565+ return 0;
7566+}
7567+
7568+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
7569+ uint32_t dst_offset, uint32_t dst_stride,
7570+ uint32_t dst_format, uint16_t dst_x,
7571+ uint16_t dst_y, uint16_t size_x,
7572+ uint16_t size_y, uint32_t fill)
7573+{
7574+ uint32_t buffer[10];
7575+ uint32_t *buf;
7576+
7577+ buf = buffer;
7578+
7579+ *buf++ = PSB_2D_FENCE_BH;
7580+
7581+ *buf++ =
7582+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
7583+ PSB_2D_DST_STRIDE_SHIFT);
7584+ *buf++ = dst_offset;
7585+
7586+ *buf++ =
7587+ PSB_2D_BLIT_BH |
7588+ PSB_2D_ROT_NONE |
7589+ PSB_2D_COPYORDER_TL2BR |
7590+ PSB_2D_DSTCK_DISABLE |
7591+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
7592+
7593+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
7594+ *buf++ =
7595+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
7596+ PSB_2D_DST_YSTART_SHIFT);
7597+ *buf++ =
7598+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
7599+ PSB_2D_DST_YSIZE_SHIFT);
7600+ *buf++ = PSB_2D_FLUSH_BH;
7601+
7602+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
7603+}
7604+
7605+static void psbfb_fillrect_accel(struct fb_info *info,
7606+ const struct fb_fillrect *r)
7607+{
7608+ struct psbfb_par *par = info->par;
7609+ struct psb_framebuffer *psbfb = par->psbfb;
7610+ struct drm_framebuffer *fb = &psbfb->base;
7611+ struct drm_psb_private *dev_priv = par->dev->dev_private;
7612+ uint32_t offset;
7613+ uint32_t stride;
7614+ uint32_t format;
7615+
7616+ if (!fb)
7617+ return;
7618+
7619+ offset = psbfb->offset;
7620+ stride = fb->pitch;
7621+
7622+ switch (fb->depth) {
7623+ case 8:
7624+ format = PSB_2D_DST_332RGB;
7625+ break;
7626+ case 15:
7627+ format = PSB_2D_DST_555RGB;
7628+ break;
7629+ case 16:
7630+ format = PSB_2D_DST_565RGB;
7631+ break;
7632+ case 24:
7633+ case 32:
7634+ /* this is wrong but since we don't do blending its okay */
7635+ format = PSB_2D_DST_8888ARGB;
7636+ break;
7637+ default:
7638+ /* software fallback */
7639+ cfb_fillrect(info, r);
7640+ return;
7641+ }
7642+
7643+ psb_accel_2d_fillrect(dev_priv,
7644+ offset, stride, format,
7645+ r->dx, r->dy, r->width, r->height, r->color);
7646+}
7647+
7648+static void psbfb_fillrect(struct fb_info *info,
7649+ const struct fb_fillrect *rect)
7650+{
7651+ struct psbfb_par *par = info->par;
7652+ struct drm_device *dev = par->dev;
7653+ struct drm_psb_private *dev_priv = dev->dev_private;
7654+
7655+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
7656+ return;
7657+
7658+ if (info->flags & FBINFO_HWACCEL_DISABLED)
7659+ return cfb_fillrect(info, rect);
7660+
7661+ if (psb_2d_trylock(dev_priv)) {
7662+ psb_check_power_state(dev, PSB_DEVICE_SGX);
7663+ psbfb_fillrect_accel(info, rect);
7664+ psb_2d_unlock(dev_priv);
7665+ if (drm_psb_ospm && IS_MRST(dev))
7666+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7667+ } else
7668+ cfb_fillrect(info, rect);
7669+}
7670+
7671+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
7672+{
7673+ if (xdir < 0)
7674+ return (ydir <
7675+ 0) ? PSB_2D_COPYORDER_BR2TL :
7676+ PSB_2D_COPYORDER_TR2BL;
7677+ else
7678+ return (ydir <
7679+ 0) ? PSB_2D_COPYORDER_BL2TR :
7680+ PSB_2D_COPYORDER_TL2BR;
7681+}
7682+
7683+/*
7684+ * @srcOffset in bytes
7685+ * @srcStride in bytes
7686+ * @srcFormat psb 2D format defines
7687+ * @dstOffset in bytes
7688+ * @dstStride in bytes
7689+ * @dstFormat psb 2D format defines
7690+ * @srcX offset in pixels
7691+ * @srcY offset in pixels
7692+ * @dstX offset in pixels
7693+ * @dstY offset in pixels
7694+ * @sizeX of the copied area
7695+ * @sizeY of the copied area
7696+ */
7697+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
7698+ uint32_t src_offset, uint32_t src_stride,
7699+ uint32_t src_format, uint32_t dst_offset,
7700+ uint32_t dst_stride, uint32_t dst_format,
7701+ uint16_t src_x, uint16_t src_y,
7702+ uint16_t dst_x, uint16_t dst_y,
7703+ uint16_t size_x, uint16_t size_y)
7704+{
7705+ uint32_t blit_cmd;
7706+ uint32_t buffer[10];
7707+ uint32_t *buf;
7708+ uint32_t direction;
7709+
7710+ buf = buffer;
7711+
7712+ direction =
7713+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
7714+
7715+ if (direction == PSB_2D_COPYORDER_BR2TL ||
7716+ direction == PSB_2D_COPYORDER_TR2BL) {
7717+ src_x += size_x - 1;
7718+ dst_x += size_x - 1;
7719+ }
7720+ if (direction == PSB_2D_COPYORDER_BR2TL ||
7721+ direction == PSB_2D_COPYORDER_BL2TR) {
7722+ src_y += size_y - 1;
7723+ dst_y += size_y - 1;
7724+ }
7725+
7726+ blit_cmd =
7727+ PSB_2D_BLIT_BH |
7728+ PSB_2D_ROT_NONE |
7729+ PSB_2D_DSTCK_DISABLE |
7730+ PSB_2D_SRCCK_DISABLE |
7731+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
7732+
7733+ *buf++ = PSB_2D_FENCE_BH;
7734+ *buf++ =
7735+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
7736+ PSB_2D_DST_STRIDE_SHIFT);
7737+ *buf++ = dst_offset;
7738+ *buf++ =
7739+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
7740+ PSB_2D_SRC_STRIDE_SHIFT);
7741+ *buf++ = src_offset;
7742+ *buf++ =
7743+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
7744+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
7745+ *buf++ = blit_cmd;
7746+ *buf++ =
7747+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
7748+ PSB_2D_DST_YSTART_SHIFT);
7749+ *buf++ =
7750+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
7751+ PSB_2D_DST_YSIZE_SHIFT);
7752+ *buf++ = PSB_2D_FLUSH_BH;
7753+
7754+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
7755+}
7756+
7757+static void psbfb_copyarea_accel(struct fb_info *info,
7758+ const struct fb_copyarea *a)
7759+{
7760+ struct psbfb_par *par = info->par;
7761+ struct psb_framebuffer *psbfb = par->psbfb;
7762+ struct drm_framebuffer *fb = &psbfb->base;
7763+ struct drm_psb_private *dev_priv = par->dev->dev_private;
7764+ uint32_t offset;
7765+ uint32_t stride;
7766+ uint32_t src_format;
7767+ uint32_t dst_format;
7768+
7769+ if (!fb)
7770+ return;
7771+
7772+ offset = psbfb->offset;
7773+ stride = fb->pitch;
7774+
7775+ switch (fb->depth) {
7776+ case 8:
7777+ src_format = PSB_2D_SRC_332RGB;
7778+ dst_format = PSB_2D_DST_332RGB;
7779+ break;
7780+ case 15:
7781+ src_format = PSB_2D_SRC_555RGB;
7782+ dst_format = PSB_2D_DST_555RGB;
7783+ break;
7784+ case 16:
7785+ src_format = PSB_2D_SRC_565RGB;
7786+ dst_format = PSB_2D_DST_565RGB;
7787+ break;
7788+ case 24:
7789+ case 32:
7790+ /* this is wrong but since we don't do blending its okay */
7791+ src_format = PSB_2D_SRC_8888ARGB;
7792+ dst_format = PSB_2D_DST_8888ARGB;
7793+ break;
7794+ default:
7795+ /* software fallback */
7796+ cfb_copyarea(info, a);
7797+ return;
7798+ }
7799+
7800+ psb_accel_2d_copy(dev_priv,
7801+ offset, stride, src_format,
7802+ offset, stride, dst_format,
7803+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
7804+}
7805+
7806+static void psbfb_copyarea(struct fb_info *info,
7807+ const struct fb_copyarea *region)
7808+{
7809+ struct psbfb_par *par = info->par;
7810+ struct drm_device *dev = par->dev;
7811+ struct drm_psb_private *dev_priv = dev->dev_private;
7812+
7813+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
7814+ return;
7815+
7816+ if (info->flags & FBINFO_HWACCEL_DISABLED)
7817+ return cfb_copyarea(info, region);
7818+
7819+ if (psb_2d_trylock(dev_priv)) {
7820+ psb_check_power_state(dev, PSB_DEVICE_SGX);
7821+ psbfb_copyarea_accel(info, region);
7822+ psb_2d_unlock(dev_priv);
7823+ if (drm_psb_ospm && IS_MRST(dev))
7824+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7825+ } else
7826+ cfb_copyarea(info, region);
7827+}
7828+
7829+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
7830+{
7831+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
7832+ return;
7833+
7834+ cfb_imageblit(info, image);
7835+}
7836+
7837+static void psbfb_onoff(struct fb_info *info, int dpms_mode)
7838+{
7839+ struct psbfb_par *par = info->par;
7840+ struct drm_device *dev = par->dev;
7841+ struct drm_crtc *crtc;
7842+ struct drm_encoder *encoder;
7843+ int i;
7844+
7845+ /*
7846+ * For each CRTC in this fb, find all associated encoders
7847+ * and turn them off, then turn off the CRTC.
7848+ */
7849+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7850+ struct drm_crtc_helper_funcs *crtc_funcs =
7851+ crtc->helper_private;
7852+
7853+ for (i = 0; i < par->crtc_count; i++)
7854+ if (crtc->base.id == par->crtc_ids[i])
7855+ break;
7856+
7857+ if (i == par->crtc_count)
7858+ continue;
7859+
7860+ if (dpms_mode == DRM_MODE_DPMS_ON)
7861+ crtc_funcs->dpms(crtc, dpms_mode);
7862+
7863+ /* Found a CRTC on this fb, now find encoders */
7864+ list_for_each_entry(encoder,
7865+ &dev->mode_config.encoder_list, head) {
7866+ if (encoder->crtc == crtc) {
7867+ struct drm_encoder_helper_funcs
7868+ *encoder_funcs;
7869+ encoder_funcs = encoder->helper_private;
7870+ encoder_funcs->dpms(encoder, dpms_mode);
7871+ }
7872+ }
7873+
7874+ if (dpms_mode == DRM_MODE_DPMS_OFF)
7875+ crtc_funcs->dpms(crtc, dpms_mode);
7876+ }
7877+}
7878+
7879+static int psbfb_blank(int blank_mode, struct fb_info *info)
7880+{
7881+ struct psbfb_par *par = info->par;
7882+
7883+ par->dpms_state = blank_mode;
7884+ PSB_DEBUG_PM("psbfb_blank \n");
7885+ switch (blank_mode) {
7886+ case FB_BLANK_UNBLANK:
7887+ psbfb_onoff(info, DRM_MODE_DPMS_ON);
7888+ break;
7889+ case FB_BLANK_NORMAL:
7890+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
7891+ break;
7892+ case FB_BLANK_HSYNC_SUSPEND:
7893+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
7894+ break;
7895+ case FB_BLANK_VSYNC_SUSPEND:
7896+ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
7897+ break;
7898+ case FB_BLANK_POWERDOWN:
7899+ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
7900+ break;
7901+ }
7902+
7903+ return 0;
7904+}
7905+
7906+
7907+static int psbfb_kms_off(struct drm_device *dev, int suspend)
7908+{
7909+ struct drm_framebuffer *fb = 0;
7910+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
7911+
7912+ mutex_lock(&dev->mode_config.mutex);
7913+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
7914+ struct fb_info *info = fb->fbdev;
7915+
7916+ if (suspend)
7917+ fb_set_suspend(info, 1);
7918+ }
7919+ mutex_unlock(&dev->mode_config.mutex);
7920+
7921+ return 0;
7922+}
7923+
7924+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
7925+ struct drm_file *file_priv)
7926+{
7927+ int ret;
7928+
7929+ if (drm_psb_no_fb)
7930+ return 0;
7931+ acquire_console_sem();
7932+ ret = psbfb_kms_off(dev, 0);
7933+ release_console_sem();
7934+
7935+ return ret;
7936+}
7937+
7938+static int psbfb_kms_on(struct drm_device *dev, int resume)
7939+{
7940+ struct drm_framebuffer *fb = 0;
7941+
7942+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
7943+
7944+ mutex_lock(&dev->mode_config.mutex);
7945+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
7946+ struct fb_info *info = fb->fbdev;
7947+
7948+ if (resume)
7949+ fb_set_suspend(info, 0);
7950+
7951+ }
7952+ mutex_unlock(&dev->mode_config.mutex);
7953+
7954+ return 0;
7955+}
7956+
7957+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
7958+ struct drm_file *file_priv)
7959+{
7960+ int ret;
7961+
7962+ if (drm_psb_no_fb)
7963+ return 0;
7964+ acquire_console_sem();
7965+ ret = psbfb_kms_on(dev, 0);
7966+ release_console_sem();
7967+ drm_helper_disable_unused_functions(dev);
7968+ return ret;
7969+}
7970+
7971+void psbfb_suspend(struct drm_device *dev)
7972+{
7973+ acquire_console_sem();
7974+ psbfb_kms_off(dev, 1);
7975+ release_console_sem();
7976+}
7977+
7978+void psbfb_resume(struct drm_device *dev)
7979+{
7980+ acquire_console_sem();
7981+ psbfb_kms_on(dev, 1);
7982+ release_console_sem();
7983+ drm_helper_disable_unused_functions(dev);
7984+}
7985+
7986+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
7987+{
7988+ struct psbfb_par *par = info->par;
7989+ struct psb_framebuffer *psbfb = par->psbfb;
7990+ struct ttm_buffer_object *bo = psbfb->bo;
7991+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
7992+ unsigned long offset = vma->vm_pgoff;
7993+
7994+ if (vma->vm_pgoff != 0)
7995+ return -EINVAL;
7996+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
7997+ return -EINVAL;
7998+ if (offset + size > bo->num_pages)
7999+ return -EINVAL;
8000+
8001+ mutex_lock(&bo->mutex);
8002+ if (!psbfb->addr_space)
8003+ psbfb->addr_space = vma->vm_file->f_mapping;
8004+ mutex_unlock(&bo->mutex);
8005+
8006+ return ttm_fbdev_mmap(vma, bo);
8007+}
8008+
8009+int psbfb_sync(struct fb_info *info)
8010+{
8011+ struct psbfb_par *par = info->par;
8012+ struct drm_psb_private *dev_priv = par->dev->dev_private;
8013+
8014+ if (psb_2d_trylock(dev_priv)) {
8015+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
8016+ psb_idle_2d(par->dev);
8017+ psb_2d_unlock(dev_priv);
8018+ } else
8019+ udelay(5);
8020+
8021+ return 0;
8022+}
8023+
8024+static struct fb_ops psbfb_ops = {
8025+ .owner = THIS_MODULE,
8026+ .fb_check_var = psbfb_check_var,
8027+ .fb_set_par = psbfb_set_par,
8028+ .fb_setcolreg = psbfb_setcolreg,
8029+ .fb_fillrect = psbfb_fillrect,
8030+ .fb_copyarea = psbfb_copyarea,
8031+ .fb_imageblit = psbfb_imageblit,
8032+ .fb_mmap = psbfb_mmap,
8033+ .fb_sync = psbfb_sync,
8034+ .fb_blank = psbfb_blank,
8035+};
8036+
8037+static struct drm_mode_set panic_mode;
8038+
8039+int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
8040+ void *panic_str)
8041+{
8042+ DRM_ERROR("panic occurred, switching back to text console\n");
8043+ drm_crtc_helper_set_config(&panic_mode);
8044+
8045+ return 0;
8046+}
8047+EXPORT_SYMBOL(psbfb_panic);
8048+
8049+static struct notifier_block paniced = {
8050+ .notifier_call = psbfb_panic,
8051+};
8052+
8053+
8054+static struct drm_framebuffer *psb_framebuffer_create
8055+ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
8056+ void *mm_private)
8057+{
8058+ struct psb_framebuffer *fb;
8059+ int ret;
8060+
8061+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
8062+ if (!fb)
8063+ return NULL;
8064+
8065+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
8066+
8067+ if (ret)
8068+ goto err;
8069+
8070+ drm_helper_mode_fill_fb_struct(&fb->base, r);
8071+
8072+ fb->bo = mm_private;
8073+
8074+ return &fb->base;
8075+
8076+err:
8077+ kfree(fb);
8078+ return NULL;
8079+}
8080+
8081+static struct drm_framebuffer *psb_user_framebuffer_create
8082+ (struct drm_device *dev, struct drm_file *filp,
8083+ struct drm_mode_fb_cmd *r)
8084+{
8085+ struct ttm_buffer_object *bo = NULL;
8086+ uint64_t size;
8087+
8088+ bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle);
8089+ if (!bo)
8090+ return NULL;
8091+
8092+ /* JB: TODO not drop, make smarter */
8093+ size = ((uint64_t) bo->num_pages) << PAGE_SHIFT;
8094+ if (size < r->width * r->height * 4)
8095+ return NULL;
8096+
8097+ /* JB: TODO not drop, refcount buffer */
8098+ return psb_framebuffer_create(dev, r, bo);
8099+}
8100+
8101+int psbfb_create(struct drm_device *dev, uint32_t fb_width,
8102+ uint32_t fb_height, uint32_t surface_width,
8103+ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
8104+{
8105+ struct fb_info *info;
8106+ struct psbfb_par *par;
8107+ struct drm_framebuffer *fb;
8108+ struct psb_framebuffer *psbfb;
8109+ struct ttm_bo_kmap_obj tmp_kmap;
8110+ struct drm_mode_fb_cmd mode_cmd;
8111+ struct device *device = &dev->pdev->dev;
8112+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
8113+ int size, aligned_size, ret;
8114+ struct ttm_buffer_object *fbo = NULL;
8115+ bool is_iomem;
8116+
8117+ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
8118+ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
8119+
8120+ mode_cmd.bpp = 32;
8121+ mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8);
8122+ mode_cmd.depth = 24;
8123+
8124+ size = mode_cmd.pitch * mode_cmd.height;
8125+ aligned_size = ALIGN(size, PAGE_SIZE);
8126+ ret = ttm_buffer_object_create(bdev,
8127+ aligned_size,
8128+ ttm_bo_type_kernel,
8129+ TTM_PL_FLAG_TT |
8130+ TTM_PL_FLAG_VRAM |
8131+ TTM_PL_FLAG_NO_EVICT,
8132+ 0, 0, 0, NULL, &fbo);
8133+
8134+ if (unlikely(ret != 0)) {
8135+ DRM_ERROR("failed to allocate framebuffer.\n");
8136+ return -ENOMEM;
8137+ }
8138+
8139+ mutex_lock(&dev->struct_mutex);
8140+ fb = psb_framebuffer_create(dev, &mode_cmd, fbo);
8141+ if (!fb) {
8142+ DRM_ERROR("failed to allocate fb.\n");
8143+ ret = -ENOMEM;
8144+ goto out_err0;
8145+ }
8146+ psbfb = to_psb_fb(fb);
8147+ psbfb->bo = fbo;
8148+
8149+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
8150+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
8151+ if (!info) {
8152+ ret = -ENOMEM;
8153+ goto out_err1;
8154+ }
8155+
8156+ par = info->par;
8157+ par->psbfb = psbfb;
8158+
8159+ strcpy(info->fix.id, "psbfb");
8160+ info->fix.type = FB_TYPE_PACKED_PIXELS;
8161+ info->fix.visual = FB_VISUAL_TRUECOLOR;
8162+ info->fix.type_aux = 0;
8163+ info->fix.xpanstep = 1; /* doing it in hw */
8164+ info->fix.ypanstep = 1; /* doing it in hw */
8165+ info->fix.ywrapstep = 0;
8166+ info->fix.accel = FB_ACCEL_I830;
8167+ info->fix.type_aux = 0;
8168+
8169+ info->flags = FBINFO_DEFAULT;
8170+
8171+ info->fbops = &psbfb_ops;
8172+
8173+ info->fix.line_length = fb->pitch;
8174+ info->fix.smem_start =
8175+ dev->mode_config.fb_base + psbfb->bo->offset;
8176+ info->fix.smem_len = size;
8177+
8178+ info->flags = FBINFO_DEFAULT;
8179+
8180+ ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap);
8181+ if (ret) {
8182+ DRM_ERROR("error mapping fb: %d\n", ret);
8183+ goto out_err2;
8184+ }
8185+
8186+
8187+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
8188+ info->screen_size = size;
8189+
8190+ if (is_iomem)
8191+ memset_io(info->screen_base, 0, size);
8192+ else
8193+ memset(info->screen_base, 0, size);
8194+
8195+ info->pseudo_palette = fb->pseudo_palette;
8196+ info->var.xres_virtual = fb->width;
8197+ info->var.yres_virtual = fb->height;
8198+ info->var.bits_per_pixel = fb->bits_per_pixel;
8199+ info->var.xoffset = 0;
8200+ info->var.yoffset = 0;
8201+ info->var.activate = FB_ACTIVATE_NOW;
8202+ info->var.height = -1;
8203+ info->var.width = -1;
8204+
8205+ info->var.xres = fb_width;
8206+ info->var.yres = fb_height;
8207+
8208+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
8209+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
8210+
8211+ info->pixmap.size = 64 * 1024;
8212+ info->pixmap.buf_align = 8;
8213+ info->pixmap.access_align = 32;
8214+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
8215+ info->pixmap.scan_align = 1;
8216+
8217+ DRM_DEBUG("fb depth is %d\n", fb->depth);
8218+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
8219+ fill_fb_bitfield(&info->var, fb->depth);
8220+
8221+ fb->fbdev = info;
8222+
8223+ par->dev = dev;
8224+
8225+ /* To allow resizing without swapping buffers */
8226+ printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n",
8227+ psbfb->base.width,
8228+ psbfb->base.height, psbfb->bo->offset, psbfb->bo);
8229+
8230+ if (psbfb_p)
8231+ *psbfb_p = psbfb;
8232+
8233+ mutex_unlock(&dev->struct_mutex);
8234+
8235+ return 0;
8236+out_err2:
8237+ unregister_framebuffer(info);
8238+out_err1:
8239+ fb->funcs->destroy(fb);
8240+out_err0:
8241+ mutex_unlock(&dev->struct_mutex);
8242+ ttm_bo_unref(&fbo);
8243+ return ret;
8244+}
8245+
8246+static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
8247+ struct drm_crtc *crtc)
8248+{
8249+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
8250+ struct drm_framebuffer *fb = crtc->fb;
8251+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
8252+ struct drm_connector *connector;
8253+ struct fb_info *info;
8254+ struct psbfb_par *par;
8255+ struct drm_mode_set *modeset;
8256+ unsigned int width, height;
8257+ int new_fb = 0;
8258+ int ret, i, conn_count;
8259+
8260+ if (!drm_helper_crtc_in_use(crtc))
8261+ return 0;
8262+
8263+ if (!crtc->desired_mode)
8264+ return 0;
8265+
8266+ width = crtc->desired_mode->hdisplay;
8267+ height = crtc->desired_mode->vdisplay;
8268+
8269+ /* is there an fb bound to this crtc already */
8270+ if (!psb_intel_crtc->mode_set.fb) {
8271+ ret =
8272+ psbfb_create(dev, width, height, width, height,
8273+ &psbfb);
8274+ if (ret)
8275+ return -EINVAL;
8276+ new_fb = 1;
8277+ } else {
8278+ fb = psb_intel_crtc->mode_set.fb;
8279+ if ((fb->width < width) || (fb->height < height))
8280+ return -EINVAL;
8281+ }
8282+
8283+ info = fb->fbdev;
8284+ par = info->par;
8285+
8286+ modeset = &psb_intel_crtc->mode_set;
8287+ modeset->fb = fb;
8288+ conn_count = 0;
8289+ list_for_each_entry(connector, &dev->mode_config.connector_list,
8290+ head) {
8291+ if (connector->encoder)
8292+ if (connector->encoder->crtc == modeset->crtc) {
8293+ modeset->connectors[conn_count] =
8294+ connector;
8295+ conn_count++;
8296+ if (conn_count > INTELFB_CONN_LIMIT)
8297+ BUG();
8298+ }
8299+ }
8300+
8301+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
8302+ modeset->connectors[i] = NULL;
8303+
8304+ par->crtc_ids[0] = crtc->base.id;
8305+
8306+ modeset->num_connectors = conn_count;
8307+ if (modeset->mode != modeset->crtc->desired_mode)
8308+ modeset->mode = modeset->crtc->desired_mode;
8309+
8310+ par->crtc_count = 1;
8311+
8312+ if (new_fb) {
8313+ info->var.pixclock = -1;
8314+ if (register_framebuffer(info) < 0)
8315+ return -EINVAL;
8316+ } else
8317+ psbfb_set_par(info);
8318+
8319+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
8320+ info->fix.id);
8321+
8322+ /* Switch back to kernel console on panic */
8323+ panic_mode = *modeset;
8324+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
8325+ printk(KERN_INFO "registered panic notifier\n");
8326+
8327+ return 0;
8328+}
8329+
8330+static int psbfb_multi_fb_probe(struct drm_device *dev)
8331+{
8332+
8333+ struct drm_crtc *crtc;
8334+ int ret = 0;
8335+
8336+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8337+ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
8338+ if (ret)
8339+ return ret;
8340+ }
8341+ return ret;
8342+}
8343+
8344+static int psbfb_single_fb_probe(struct drm_device *dev)
8345+{
8346+ struct drm_crtc *crtc;
8347+ struct drm_connector *connector;
8348+ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
8349+ unsigned int surface_width = 0, surface_height = 0;
8350+ int new_fb = 0;
8351+ int crtc_count = 0;
8352+ int ret, i, conn_count = 0;
8353+ struct fb_info *info;
8354+ struct psbfb_par *par;
8355+ struct drm_mode_set *modeset = NULL;
8356+ struct drm_framebuffer *fb = NULL;
8357+ struct psb_framebuffer *psbfb = NULL;
8358+
8359+ /* first up get a count of crtcs now in use and
8360+ * new min/maxes width/heights */
8361+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8362+ if (drm_helper_crtc_in_use(crtc)) {
8363+ if (crtc->desired_mode) {
8364+ fb = crtc->fb;
8365+ if (crtc->desired_mode->hdisplay <
8366+ fb_width)
8367+ fb_width =
8368+ crtc->desired_mode->hdisplay;
8369+
8370+ if (crtc->desired_mode->vdisplay <
8371+ fb_height)
8372+ fb_height =
8373+ crtc->desired_mode->vdisplay;
8374+
8375+ if (crtc->desired_mode->hdisplay >
8376+ surface_width)
8377+ surface_width =
8378+ crtc->desired_mode->hdisplay;
8379+
8380+ if (crtc->desired_mode->vdisplay >
8381+ surface_height)
8382+ surface_height =
8383+ crtc->desired_mode->vdisplay;
8384+
8385+ }
8386+ crtc_count++;
8387+ }
8388+ }
8389+
8390+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
8391+ /* hmm everyone went away - assume VGA cable just fell out
8392+ and will come back later. */
8393+ return 0;
8394+ }
8395+
8396+ /* do we have an fb already? */
8397+ if (list_empty(&dev->mode_config.fb_kernel_list)) {
8398+ /* create an fb if we don't have one */
8399+ ret =
8400+ psbfb_create(dev, fb_width, fb_height, surface_width,
8401+ surface_height, &psbfb);
8402+ if (ret)
8403+ return -EINVAL;
8404+ new_fb = 1;
8405+ fb = &psbfb->base;
8406+ } else {
8407+ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
8408+ struct drm_framebuffer, filp_head);
8409+
8410+ /* if someone hotplugs something bigger than we have already
8411+ * allocated, we are pwned. As really we can't resize an
8412+ * fbdev that is in the wild currently due to fbdev not really
8413+ * being designed for the lower layers moving stuff around
8414+ * under it. - so in the grand style of things - punt. */
8415+ if ((fb->width < surface_width)
8416+ || (fb->height < surface_height)) {
8417+ DRM_ERROR
8418+ ("Framebuffer not large enough to scale"
8419+ " console onto.\n");
8420+ return -EINVAL;
8421+ }
8422+ }
8423+
8424+ info = fb->fbdev;
8425+ par = info->par;
8426+
8427+ crtc_count = 0;
8428+ /* okay we need to setup new connector sets in the crtcs */
8429+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8430+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
8431+ modeset = &psb_intel_crtc->mode_set;
8432+ modeset->fb = fb;
8433+ conn_count = 0;
8434+ list_for_each_entry(connector,
8435+ &dev->mode_config.connector_list,
8436+ head) {
8437+ if (connector->encoder)
8438+ if (connector->encoder->crtc ==
8439+ modeset->crtc) {
8440+ modeset->connectors[conn_count] =
8441+ connector;
8442+ conn_count++;
8443+ if (conn_count >
8444+ INTELFB_CONN_LIMIT)
8445+ BUG();
8446+ }
8447+ }
8448+
8449+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
8450+ modeset->connectors[i] = NULL;
8451+
8452+ par->crtc_ids[crtc_count++] = crtc->base.id;
8453+
8454+ modeset->num_connectors = conn_count;
8455+ if (modeset->mode != modeset->crtc->desired_mode)
8456+ modeset->mode = modeset->crtc->desired_mode;
8457+ }
8458+ par->crtc_count = crtc_count;
8459+
8460+ if (new_fb) {
8461+ info->var.pixclock = -1;
8462+ if (register_framebuffer(info) < 0)
8463+ return -EINVAL;
8464+ } else
8465+ psbfb_set_par(info);
8466+
8467+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
8468+ info->fix.id);
8469+
8470+ /* Switch back to kernel console on panic */
8471+ panic_mode = *modeset;
8472+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
8473+ printk(KERN_INFO "registered panic notifier\n");
8474+
8475+ return 0;
8476+}
8477+
8478+int psbfb_probe(struct drm_device *dev)
8479+{
8480+ int ret = 0;
8481+
8482+ DRM_DEBUG("\n");
8483+
8484+ /* something has changed in the lower levels of hell - deal with it
8485+ here */
8486+
8487+ /* two modes : a) 1 fb to rule all crtcs.
8488+ b) one fb per crtc.
8489+ two actions 1) new connected device
8490+ 2) device removed.
8491+ case a/1 : if the fb surface isn't big enough -
8492+ resize the surface fb.
8493+ if the fb size isn't big enough - resize fb into surface.
8494+ if everything big enough configure the new crtc/etc.
8495+ case a/2 : undo the configuration
8496+ possibly resize down the fb to fit the new configuration.
8497+ case b/1 : see if it is on a new crtc - setup a new fb and add it.
8498+ case b/2 : teardown the new fb.
8499+ */
8500+
8501+ /* mode a first */
8502+ /* search for an fb */
8503+ if (0 /*i915_fbpercrtc == 1 */)
8504+ ret = psbfb_multi_fb_probe(dev);
8505+ else
8506+ ret = psbfb_single_fb_probe(dev);
8507+
8508+ return ret;
8509+}
8510+EXPORT_SYMBOL(psbfb_probe);
8511+
8512+int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
8513+{
8514+ struct fb_info *info;
8515+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
8516+
8517+ if (drm_psb_no_fb)
8518+ return 0;
8519+
8520+ info = fb->fbdev;
8521+
8522+ if (info) {
8523+ unregister_framebuffer(info);
8524+ ttm_bo_kunmap(&psbfb->kmap);
8525+ ttm_bo_unref(&psbfb->bo);
8526+ framebuffer_release(info);
8527+ }
8528+
8529+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
8530+ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
8531+ return 0;
8532+}
8533+EXPORT_SYMBOL(psbfb_remove);
8534+
8535+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
8536+ struct drm_file *file_priv,
8537+ unsigned int *handle)
8538+{
8539+ /* JB: TODO currently we can't go from a bo to a handle with ttm */
8540+ (void) file_priv;
8541+ *handle = 0;
8542+ return 0;
8543+}
8544+
8545+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
8546+{
8547+ struct drm_device *dev = fb->dev;
8548+ if (fb->fbdev)
8549+ psbfb_remove(dev, fb);
8550+
8551+ /* JB: TODO not drop, refcount buffer */
8552+ drm_framebuffer_cleanup(fb);
8553+
8554+ kfree(fb);
8555+}
8556+
8557+static const struct drm_mode_config_funcs psb_mode_funcs = {
8558+ .fb_create = psb_user_framebuffer_create,
8559+ .fb_changed = psbfb_probe,
8560+};
8561+
8562+static void psb_setup_outputs(struct drm_device *dev)
8563+{
8564+ struct drm_psb_private *dev_priv =
8565+ (struct drm_psb_private *) dev->dev_private;
8566+ struct drm_connector *connector;
8567+
8568+ if (IS_MRST(dev)) {
8569+ if (dev_priv->iLVDS_enable)
8570+ /* Set up integrated LVDS for MRST */
8571+ mrst_lvds_init(dev, &dev_priv->mode_dev);
8572+ else {
8573+ /* Set up integrated MIPI for MRST */
8574+ mrst_dsi_init(dev, &dev_priv->mode_dev);
8575+ }
8576+ } else {
8577+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
8578+ /* psb_intel_sdvo_init(dev, SDVOB); */
8579+ }
8580+
8581+ list_for_each_entry(connector, &dev->mode_config.connector_list,
8582+ head) {
8583+ struct psb_intel_output *psb_intel_output =
8584+ to_psb_intel_output(connector);
8585+ struct drm_encoder *encoder = &psb_intel_output->enc;
8586+ int crtc_mask = 0, clone_mask = 0;
8587+
8588+ /* valid crtcs */
8589+ switch (psb_intel_output->type) {
8590+ case INTEL_OUTPUT_SDVO:
8591+ crtc_mask = ((1 << 0) | (1 << 1));
8592+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
8593+ break;
8594+ case INTEL_OUTPUT_LVDS:
8595+ if (IS_MRST(dev))
8596+ crtc_mask = (1 << 0);
8597+ else
8598+ crtc_mask = (1 << 1);
8599+
8600+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
8601+ break;
8602+ case INTEL_OUTPUT_MIPI:
8603+ crtc_mask = (1 << 0);
8604+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
8605+ break;
8606+ }
8607+ encoder->possible_crtcs = crtc_mask;
8608+ encoder->possible_clones =
8609+ psb_intel_connector_clones(dev, clone_mask);
8610+ }
8611+}
8612+
8613+static void *psb_bo_from_handle(struct drm_device *dev,
8614+ struct drm_file *file_priv,
8615+ unsigned int handle)
8616+{
8617+ return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
8618+ handle);
8619+}
8620+
8621+static size_t psb_bo_size(struct drm_device *dev, void *bof)
8622+{
8623+ struct ttm_buffer_object *bo = bof;
8624+ return bo->num_pages << PAGE_SHIFT;
8625+}
8626+
8627+static size_t psb_bo_offset(struct drm_device *dev, void *bof)
8628+{
8629+ struct drm_psb_private *dev_priv =
8630+ (struct drm_psb_private *) dev->dev_private;
8631+ struct ttm_buffer_object *bo = bof;
8632+
8633+ size_t offset = bo->offset - dev_priv->pg->gatt_start;
8634+ DRM_DEBUG("Offset %u\n", offset);
8635+ return offset;
8636+}
8637+
8638+static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
8639+{
8640+#if 0 /* JB: Not used for the drop */
8641+ struct ttm_buffer_object *bo = bof;
8642+ We should do things like check if
8643+ the buffer is in a scanout : able
8644+ place.And make sure that its pinned.
8645+#endif
8646+ return 0;
8647+ }
8648+
8649+ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
8650+ void *bo) {
8651+#if 0 /* JB: Not used for the drop */
8652+ struct ttm_buffer_object *bo = bof;
8653+#endif
8654+ return 0;
8655+ }
8656+
8657+ void psb_modeset_init(struct drm_device *dev)
8658+ {
8659+ struct drm_psb_private *dev_priv =
8660+ (struct drm_psb_private *) dev->dev_private;
8661+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
8662+ int i;
8663+ int num_pipe;
8664+
8665+ /* Init mm functions */
8666+ mode_dev->bo_from_handle = psb_bo_from_handle;
8667+ mode_dev->bo_size = psb_bo_size;
8668+ mode_dev->bo_offset = psb_bo_offset;
8669+ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
8670+ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
8671+
8672+ drm_mode_config_init(dev);
8673+
8674+ dev->mode_config.min_width = 0;
8675+ dev->mode_config.min_height = 0;
8676+
8677+ dev->mode_config.funcs = (void *) &psb_mode_funcs;
8678+
8679+ dev->mode_config.max_width = 2048;
8680+ dev->mode_config.max_height = 2048;
8681+
8682+ /* set memory base */
8683+ dev->mode_config.fb_base =
8684+ pci_resource_start(dev->pdev, 0);
8685+
8686+ if (IS_MRST(dev))
8687+ num_pipe = 1;
8688+ else
8689+ num_pipe = 2;
8690+
8691+
8692+ for (i = 0; i < num_pipe; i++)
8693+ psb_intel_crtc_init(dev, i, mode_dev);
8694+
8695+ psb_setup_outputs(dev);
8696+
8697+ /* setup fbs */
8698+ /* drm_initial_config(dev, false); */
8699+ }
8700+
8701+ void psb_modeset_cleanup(struct drm_device *dev)
8702+ {
8703+ drm_mode_config_cleanup(dev);
8704+ }
8705diff -uNr a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h
8706--- a/drivers/gpu/drm/psb/psb_fb.h 1969-12-31 16:00:00.000000000 -0800
8707+++ b/drivers/gpu/drm/psb/psb_fb.h 2009-04-07 13:28:38.000000000 -0700
8708@@ -0,0 +1,47 @@
8709+/*
8710+ * Copyright (c) 2008, Intel Corporation
8711+ *
8712+ * Permission is hereby granted, free of charge, to any person obtaining a
8713+ * copy of this software and associated documentation files (the "Software"),
8714+ * to deal in the Software without restriction, including without limitation
8715+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8716+ * and/or sell copies of the Software, and to permit persons to whom the
8717+ * Software is furnished to do so, subject to the following conditions:
8718+ *
8719+ * The above copyright notice and this permission notice (including the next
8720+ * paragraph) shall be included in all copies or substantial portions of the
8721+ * Software.
8722+ *
8723+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8724+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
8725+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
8726+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
8727+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
8728+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
8729+ * SOFTWARE.
8730+ *
8731+ * Authors:
8732+ * Eric Anholt <eric@anholt.net>
8733+ *
8734+ **/
8735+
8736+#ifndef _PSB_FB_H_
8737+#define _PSB_FB_H_
8738+
8739+struct psb_framebuffer {
8740+ struct drm_framebuffer base;
8741+ struct address_space *addr_space;
8742+ struct ttm_buffer_object *bo;
8743+ struct ttm_bo_kmap_obj kmap;
8744+ uint64_t offset;
8745+};
8746+
8747+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
8748+
8749+
8750+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
8751+
8752+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);
8753+
8754+#endif
8755+
8756diff -uNr a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c
8757--- a/drivers/gpu/drm/psb/psb_fence.c 1969-12-31 16:00:00.000000000 -0800
8758+++ b/drivers/gpu/drm/psb/psb_fence.c 2009-04-07 13:28:38.000000000 -0700
8759@@ -0,0 +1,343 @@
8760+/**************************************************************************
8761+ * Copyright (c) 2007, Intel Corporation.
8762+ * All Rights Reserved.
8763+ *
8764+ * This program is free software; you can redistribute it and/or modify it
8765+ * under the terms and conditions of the GNU General Public License,
8766+ * version 2, as published by the Free Software Foundation.
8767+ *
8768+ * This program is distributed in the hope it will be useful, but WITHOUT
8769+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8770+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8771+ * more details.
8772+ *
8773+ * You should have received a copy of the GNU General Public License along with
8774+ * this program; if not, write to the Free Software Foundation, Inc.,
8775+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8776+ *
8777+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
8778+ * develop this driver.
8779+ *
8780+ **************************************************************************/
8781+/*
8782+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
8783+ */
8784+
8785+#include <drm/drmP.h>
8786+#include "psb_drv.h"
8787+
8788+static void psb_print_ta_fence_status(struct ttm_fence_device *fdev)
8789+{
8790+ struct drm_psb_private *dev_priv =
8791+ container_of(fdev, struct drm_psb_private, fdev);
8792+ struct psb_scheduler_seq *seq = dev_priv->scheduler.seq;
8793+ int i;
8794+
8795+ for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
8796+ DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n",
8797+ (1 << i),
8798+ (unsigned long) seq->sequence,
8799+ seq->reported);
8800+ seq++;
8801+ }
8802+}
8803+
8804+static void psb_poll_ta(struct ttm_fence_device *fdev,
8805+ uint32_t waiting_types)
8806+{
8807+ struct drm_psb_private *dev_priv =
8808+ container_of(fdev, struct drm_psb_private, fdev);
8809+ uint32_t cur_flag = 1;
8810+ uint32_t flags = 0;
8811+ uint32_t sequence = 0;
8812+ uint32_t remaining = 0xFFFFFFFF;
8813+ uint32_t diff;
8814+
8815+ struct psb_scheduler *scheduler;
8816+ struct psb_scheduler_seq *seq;
8817+ struct ttm_fence_class_manager *fc =
8818+ &fdev->fence_class[PSB_ENGINE_TA];
8819+
8820+ scheduler = &dev_priv->scheduler;
8821+ seq = scheduler->seq;
8822+
8823+ while (likely(waiting_types & remaining)) {
8824+ if (!(waiting_types & cur_flag))
8825+ goto skip;
8826+ if (seq->reported)
8827+ goto skip;
8828+ if (flags == 0)
8829+ sequence = seq->sequence;
8830+ else if (sequence != seq->sequence) {
8831+ ttm_fence_handler(fdev, PSB_ENGINE_TA,
8832+ sequence, flags, 0);
8833+ sequence = seq->sequence;
8834+ flags = 0;
8835+ }
8836+ flags |= cur_flag;
8837+
8838+ /*
8839+ * Sequence may not have ended up on the ring yet.
8840+ * In that case, report it but don't mark it as
8841+ * reported. A subsequent poll will report it again.
8842+ */
8843+
8844+ diff = (fc->latest_queued_sequence - sequence) &
8845+ fc->sequence_mask;
8846+ if (diff < fc->wrap_diff)
8847+ seq->reported = 1;
8848+
8849+skip:
8850+ cur_flag <<= 1;
8851+ remaining <<= 1;
8852+ seq++;
8853+ }
8854+
8855+ if (flags)
8856+ ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0);
8857+
8858+}
8859+
8860+static void psb_poll_other(struct ttm_fence_device *fdev,
8861+ uint32_t fence_class, uint32_t waiting_types)
8862+{
8863+ struct drm_psb_private *dev_priv =
8864+ container_of(fdev, struct drm_psb_private, fdev);
8865+ struct ttm_fence_class_manager *fc =
8866+ &fdev->fence_class[fence_class];
8867+ uint32_t sequence;
8868+
8869+ if (unlikely(!dev_priv))
8870+ return;
8871+
8872+ if (waiting_types) {
8873+ switch (fence_class) {
8874+ case PSB_ENGINE_VIDEO:
8875+ sequence = dev_priv->msvdx_current_sequence;
8876+ break;
8877+ case LNC_ENGINE_ENCODE:
8878+ sequence = dev_priv->topaz_current_sequence;
8879+ break;
8880+ default:
8881+ sequence = dev_priv->comm[fence_class << 4];
8882+ break;
8883+ }
8884+
8885+ ttm_fence_handler(fdev, fence_class, sequence,
8886+ _PSB_FENCE_TYPE_EXE, 0);
8887+
8888+ switch (fence_class) {
8889+ case PSB_ENGINE_2D:
8890+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
8891+ psb_2D_irq_off(dev_priv);
8892+ dev_priv->fence0_irq_on = 0;
8893+ } else if (!dev_priv->fence0_irq_on
8894+ && fc->waiting_types) {
8895+ psb_2D_irq_on(dev_priv);
8896+ dev_priv->fence0_irq_on = 1;
8897+ }
8898+ break;
8899+#if 0
8900+ /*
8901+ * FIXME: MSVDX irq switching
8902+ */
8903+
8904+ case PSB_ENGINE_VIDEO:
8905+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
8906+ psb_msvdx_irq_off(dev_priv);
8907+ dev_priv->fence2_irq_on = 0;
8908+ } else if (!dev_priv->fence2_irq_on
8909+ && fc->pending_exe_flush) {
8910+ psb_msvdx_irq_on(dev_priv);
8911+ dev_priv->fence2_irq_on = 1;
8912+ }
8913+ break;
8914+#endif
8915+ default:
8916+ return;
8917+ }
8918+ }
8919+}
8920+
8921+static void psb_fence_poll(struct ttm_fence_device *fdev,
8922+ uint32_t fence_class, uint32_t waiting_types)
8923+{
8924+ if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0)))
8925+ PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class);
8926+ switch (fence_class) {
8927+ case PSB_ENGINE_TA:
8928+ psb_poll_ta(fdev, waiting_types);
8929+ break;
8930+ default:
8931+ psb_poll_other(fdev, fence_class, waiting_types);
8932+ break;
8933+ }
8934+}
8935+
8936+void psb_fence_error(struct drm_device *dev,
8937+ uint32_t fence_class,
8938+ uint32_t sequence, uint32_t type, int error)
8939+{
8940+ struct drm_psb_private *dev_priv = psb_priv(dev);
8941+ struct ttm_fence_device *fdev = &dev_priv->fdev;
8942+ unsigned long irq_flags;
8943+ struct ttm_fence_class_manager *fc =
8944+ &fdev->fence_class[fence_class];
8945+
8946+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
8947+ write_lock_irqsave(&fc->lock, irq_flags);
8948+ ttm_fence_handler(fdev, fence_class, sequence, type, error);
8949+ write_unlock_irqrestore(&fc->lock, irq_flags);
8950+}
8951+
8952+int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
8953+ uint32_t fence_class,
8954+ uint32_t flags, uint32_t *sequence,
8955+ unsigned long *timeout_jiffies)
8956+{
8957+ struct drm_psb_private *dev_priv =
8958+ container_of(fdev, struct drm_psb_private, fdev);
8959+ uint32_t seq = 0;
8960+ int ret;
8961+
8962+ if (!dev_priv)
8963+ return -EINVAL;
8964+
8965+ if (fence_class >= PSB_NUM_ENGINES)
8966+ return -EINVAL;
8967+
8968+ switch (fence_class) {
8969+ case PSB_ENGINE_2D:
8970+ spin_lock(&dev_priv->sequence_lock);
8971+ seq = ++dev_priv->sequence[fence_class];
8972+ spin_unlock(&dev_priv->sequence_lock);
8973+ ret = psb_blit_sequence(dev_priv, seq);
8974+ if (ret)
8975+ return ret;
8976+ break;
8977+ case PSB_ENGINE_VIDEO:
8978+ spin_lock(&dev_priv->sequence_lock);
8979+ seq = dev_priv->sequence[fence_class]++;
8980+ spin_unlock(&dev_priv->sequence_lock);
8981+ break;
8982+ case LNC_ENGINE_ENCODE:
8983+ spin_lock(&dev_priv->sequence_lock);
8984+ seq = dev_priv->sequence[fence_class]++;
8985+ spin_unlock(&dev_priv->sequence_lock);
8986+ break;
8987+ default:
8988+ spin_lock(&dev_priv->sequence_lock);
8989+ seq = dev_priv->sequence[fence_class];
8990+ spin_unlock(&dev_priv->sequence_lock);
8991+ }
8992+
8993+ *sequence = seq;
8994+
8995+ if (fence_class == PSB_ENGINE_TA)
8996+ *timeout_jiffies = jiffies + DRM_HZ / 2;
8997+ else
8998+ *timeout_jiffies = jiffies + DRM_HZ * 3;
8999+
9000+ return 0;
9001+}
9002+
9003+uint32_t psb_fence_advance_sequence(struct drm_device *dev,
9004+ uint32_t fence_class)
9005+{
9006+ struct drm_psb_private *dev_priv =
9007+ (struct drm_psb_private *) dev->dev_private;
9008+ uint32_t sequence;
9009+
9010+ spin_lock(&dev_priv->sequence_lock);
9011+ sequence = ++dev_priv->sequence[fence_class];
9012+ spin_unlock(&dev_priv->sequence_lock);
9013+
9014+ return sequence;
9015+}
9016+
9017+static void psb_fence_lockup(struct ttm_fence_object *fence,
9018+ uint32_t fence_types)
9019+{
9020+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
9021+
9022+ if (fence->fence_class == PSB_ENGINE_TA) {
9023+
9024+ /*
9025+ * The 3D engine has its own lockup detection.
9026+ * Just extend the fence expiry time.
9027+ */
9028+
9029+ DRM_INFO("Extending 3D fence timeout.\n");
9030+ write_lock(&fc->lock);
9031+
9032+ DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n",
9033+ (unsigned long) fence->sequence, fence_types,
9034+ fence->info.signaled_types);
9035+
9036+ if (time_after_eq(jiffies, fence->timeout_jiffies))
9037+ fence->timeout_jiffies = jiffies + DRM_HZ / 2;
9038+
9039+ psb_print_ta_fence_status(fence->fdev);
9040+ write_unlock(&fc->lock);
9041+ } else {
9042+ DRM_ERROR
9043+ ("GPU timeout (probable lockup) detected on engine %u "
9044+ "fence type 0x%08x\n",
9045+ (unsigned int) fence->fence_class,
9046+ (unsigned int) fence_types);
9047+ write_lock(&fc->lock);
9048+ ttm_fence_handler(fence->fdev, fence->fence_class,
9049+ fence->sequence, fence_types, -EBUSY);
9050+ write_unlock(&fc->lock);
9051+ }
9052+}
9053+
9054+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
9055+{
9056+ struct drm_psb_private *dev_priv = psb_priv(dev);
9057+ struct ttm_fence_device *fdev = &dev_priv->fdev;
9058+ struct ttm_fence_class_manager *fc =
9059+ &fdev->fence_class[fence_class];
9060+ unsigned long irq_flags;
9061+
9062+#ifdef FIX_TG_16
9063+ if (fence_class == PSB_ENGINE_2D) {
9064+
9065+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
9066+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
9067+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
9068+ _PSB_C2B_STATUS_BUSY) == 0))
9069+ psb_resume_ta_2d_idle(dev_priv);
9070+ }
9071+#endif
9072+ write_lock_irqsave(&fc->lock, irq_flags);
9073+ psb_fence_poll(fdev, fence_class, fc->waiting_types);
9074+ write_unlock_irqrestore(&fc->lock, irq_flags);
9075+}
9076+
9077+
9078+static struct ttm_fence_driver psb_ttm_fence_driver = {
9079+ .has_irq = NULL,
9080+ .emit = psb_fence_emit_sequence,
9081+ .flush = NULL,
9082+ .poll = psb_fence_poll,
9083+ .needed_flush = NULL,
9084+ .wait = NULL,
9085+ .signaled = NULL,
9086+ .lockup = psb_fence_lockup,
9087+};
9088+
9089+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
9090+{
9091+ struct drm_psb_private *dev_priv =
9092+ container_of(fdev, struct drm_psb_private, fdev);
9093+ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
9094+ .flush_diff = (1 << 29),
9095+ .sequence_mask = 0xFFFFFFFF
9096+ };
9097+
9098+ return ttm_fence_device_init(PSB_NUM_ENGINES,
9099+ dev_priv->mem_global_ref.object,
9100+ fdev, &fci, 1,
9101+ &psb_ttm_fence_driver);
9102+}
9103diff -uNr a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c
9104--- a/drivers/gpu/drm/psb/psb_gtt.c 1969-12-31 16:00:00.000000000 -0800
9105+++ b/drivers/gpu/drm/psb/psb_gtt.c 2009-04-07 13:28:38.000000000 -0700
9106@@ -0,0 +1,257 @@
9107+/**************************************************************************
9108+ * Copyright (c) 2007, Intel Corporation.
9109+ * All Rights Reserved.
9110+ *
9111+ * This program is free software; you can redistribute it and/or modify it
9112+ * under the terms and conditions of the GNU General Public License,
9113+ * version 2, as published by the Free Software Foundation.
9114+ *
9115+ * This program is distributed in the hope it will be useful, but WITHOUT
9116+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9117+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9118+ * more details.
9119+ *
9120+ * You should have received a copy of the GNU General Public License along with
9121+ * this program; if not, write to the Free Software Foundation, Inc.,
9122+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9123+ *
9124+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
9125+ * develop this driver.
9126+ *
9127+ **************************************************************************/
9128+/*
9129+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
9130+ */
9131+#include <drm/drmP.h>
9132+#include "psb_drv.h"
9133+
9134+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
9135+{
9136+ uint32_t mask = PSB_PTE_VALID;
9137+
9138+ if (type & PSB_MMU_CACHED_MEMORY)
9139+ mask |= PSB_PTE_CACHED;
9140+ if (type & PSB_MMU_RO_MEMORY)
9141+ mask |= PSB_PTE_RO;
9142+ if (type & PSB_MMU_WO_MEMORY)
9143+ mask |= PSB_PTE_WO;
9144+
9145+ return (pfn << PAGE_SHIFT) | mask;
9146+}
9147+
9148+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
9149+{
9150+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
9151+
9152+ if (!tmp)
9153+ return NULL;
9154+
9155+ init_rwsem(&tmp->sem);
9156+ tmp->dev = dev;
9157+
9158+ return tmp;
9159+}
9160+
9161+void psb_gtt_takedown(struct psb_gtt *pg, int free)
9162+{
9163+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
9164+
9165+ if (!pg)
9166+ return;
9167+
9168+ if (pg->gtt_map) {
9169+ iounmap(pg->gtt_map);
9170+ pg->gtt_map = NULL;
9171+ }
9172+ if (pg->initialized) {
9173+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
9174+ pg->gmch_ctrl);
9175+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
9176+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
9177+ }
9178+ if (free)
9179+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
9180+}
9181+
9182+int psb_gtt_init(struct psb_gtt *pg, int resume)
9183+{
9184+ struct drm_device *dev = pg->dev;
9185+ struct drm_psb_private *dev_priv = dev->dev_private;
9186+ unsigned gtt_pages;
9187+ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
9188+ unsigned i, num_pages;
9189+ unsigned pfn_base;
9190+
9191+ int ret = 0;
9192+ uint32_t pte;
9193+
9194+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
9195+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
9196+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
9197+
9198+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
9199+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
9200+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
9201+
9202+ pg->initialized = 1;
9203+
9204+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
9205+
9206+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
9207+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
9208+ gtt_pages =
9209+ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
9210+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
9211+ >> PAGE_SHIFT;
9212+
9213+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
9214+ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
9215+
9216+ ci_stolen_size = dev_priv->ci_region_size;
9217+ /* add CI & RAR share buffer space to stolen_size */
9218+ /* stolen_size = vram_stolen_size + ci_stolen_size; */
9219+ stolen_size = vram_stolen_size;
9220+
9221+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
9222+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
9223+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
9224+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
9225+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
9226+
9227+ if (resume && (gtt_pages != pg->gtt_pages) &&
9228+ (stolen_size != pg->stolen_size)) {
9229+ DRM_ERROR("GTT resume error.\n");
9230+ ret = -EINVAL;
9231+ goto out_err;
9232+ }
9233+
9234+ pg->gtt_pages = gtt_pages;
9235+ pg->stolen_size = stolen_size;
9236+ pg->vram_stolen_size = vram_stolen_size;
9237+ pg->ci_stolen_size = ci_stolen_size;
9238+ pg->gtt_map =
9239+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
9240+ if (!pg->gtt_map) {
9241+ DRM_ERROR("Failure to map gtt.\n");
9242+ ret = -ENOMEM;
9243+ goto out_err;
9244+ }
9245+
9246+ /*
9247+ * insert vram stolen pages.
9248+ */
9249+
9250+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
9251+ num_pages = vram_stolen_size >> PAGE_SHIFT;
9252+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
9253+ num_pages, pfn_base);
9254+ for (i = 0; i < num_pages; ++i) {
9255+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
9256+ iowrite32(pte, pg->gtt_map + i);
9257+ }
9258+#if 0
9259+ /*
9260+ * insert CI stolen pages
9261+ */
9262+
9263+ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
9264+ num_pages = ci_stolen_size >> PAGE_SHIFT;
9265+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
9266+ num_pages, pfn_base);
9267+ for (; i < num_pages; ++i) {
9268+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
9269+ iowrite32(pte, pg->gtt_map + i);
9270+ }
9271+#endif
9272+ /*
9273+ * Init rest of gtt.
9274+ */
9275+
9276+ pfn_base = page_to_pfn(dev_priv->scratch_page);
9277+ pte = psb_gtt_mask_pte(pfn_base, 0);
9278+ PSB_DEBUG_INIT("Initializing the rest of a total "
9279+ "of %d gtt pages.\n", pg->gatt_pages);
9280+
9281+ for (; i < pg->gatt_pages; ++i)
9282+ iowrite32(pte, pg->gtt_map + i);
9283+ (void) ioread32(pg->gtt_map + i - 1);
9284+
9285+ return 0;
9286+
9287+out_err:
9288+ psb_gtt_takedown(pg, 0);
9289+ return ret;
9290+}
9291+
9292+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
9293+ unsigned offset_pages, unsigned num_pages,
9294+ unsigned desired_tile_stride,
9295+ unsigned hw_tile_stride, int type)
9296+{
9297+ unsigned rows = 1;
9298+ unsigned add;
9299+ unsigned row_add;
9300+ unsigned i;
9301+ unsigned j;
9302+ uint32_t *cur_page = NULL;
9303+ uint32_t pte;
9304+
9305+ if (hw_tile_stride)
9306+ rows = num_pages / desired_tile_stride;
9307+ else
9308+ desired_tile_stride = num_pages;
9309+
9310+ add = desired_tile_stride;
9311+ row_add = hw_tile_stride;
9312+
9313+ down_read(&pg->sem);
9314+ for (i = 0; i < rows; ++i) {
9315+ cur_page = pg->gtt_map + offset_pages;
9316+ for (j = 0; j < desired_tile_stride; ++j) {
9317+ pte =
9318+ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
9319+ iowrite32(pte, cur_page++);
9320+ }
9321+ offset_pages += add;
9322+ }
9323+ (void) ioread32(cur_page - 1);
9324+ up_read(&pg->sem);
9325+
9326+ return 0;
9327+}
9328+
9329+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
9330+ unsigned num_pages, unsigned desired_tile_stride,
9331+ unsigned hw_tile_stride)
9332+{
9333+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
9334+ unsigned rows = 1;
9335+ unsigned add;
9336+ unsigned row_add;
9337+ unsigned i;
9338+ unsigned j;
9339+ uint32_t *cur_page = NULL;
9340+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
9341+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
9342+
9343+ if (hw_tile_stride)
9344+ rows = num_pages / desired_tile_stride;
9345+ else
9346+ desired_tile_stride = num_pages;
9347+
9348+ add = desired_tile_stride;
9349+ row_add = hw_tile_stride;
9350+
9351+ down_read(&pg->sem);
9352+ for (i = 0; i < rows; ++i) {
9353+ cur_page = pg->gtt_map + offset_pages;
9354+ for (j = 0; j < desired_tile_stride; ++j)
9355+ iowrite32(pte, cur_page++);
9356+
9357+ offset_pages += add;
9358+ }
9359+ (void) ioread32(cur_page - 1);
9360+ up_read(&pg->sem);
9361+
9362+ return 0;
9363+}
9364diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c
9365--- a/drivers/gpu/drm/psb/psb_intel_display.c 1969-12-31 16:00:00.000000000 -0800
9366+++ b/drivers/gpu/drm/psb/psb_intel_display.c 2009-04-07 13:28:38.000000000 -0700
9367@@ -0,0 +1,2435 @@
9368+/*
9369+ * Copyright © 2006-2007 Intel Corporation
9370+ *
9371+ * Permission is hereby granted, free of charge, to any person obtaining a
9372+ * copy of this software and associated documentation files (the "Software"),
9373+ * to deal in the Software without restriction, including without limitation
9374+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9375+ * and/or sell copies of the Software, and to permit persons to whom the
9376+ * Software is furnished to do so, subject to the following conditions:
9377+ *
9378+ * The above copyright notice and this permission notice (including the next
9379+ * paragraph) shall be included in all copies or substantial portions of the
9380+ * Software.
9381+ *
9382+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9383+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9384+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
9385+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
9386+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
9387+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
9388+ * DEALINGS IN THE SOFTWARE.
9389+ *
9390+ * Authors:
9391+ * Eric Anholt <eric@anholt.net>
9392+ */
9393+
9394+#include <linux/i2c.h>
9395+
9396+#include <drm/drm_crtc_helper.h>
9397+#include "psb_fb.h"
9398+#include "psb_intel_display.h"
9399+
9400+
9401+struct psb_intel_clock_t {
9402+ /* given values */
9403+ int n;
9404+ int m1, m2;
9405+ int p1, p2;
9406+ /* derived values */
9407+ int dot;
9408+ int vco;
9409+ int m;
9410+ int p;
9411+};
9412+
9413+struct psb_intel_range_t {
9414+ int min, max;
9415+};
9416+
9417+struct psb_intel_p2_t {
9418+ int dot_limit;
9419+ int p2_slow, p2_fast;
9420+};
9421+
9422+#define INTEL_P2_NUM 2
9423+
9424+struct psb_intel_limit_t {
9425+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
9426+ struct psb_intel_p2_t p2;
9427+};
9428+
9429+#define I8XX_DOT_MIN 25000
9430+#define I8XX_DOT_MAX 350000
9431+#define I8XX_VCO_MIN 930000
9432+#define I8XX_VCO_MAX 1400000
9433+#define I8XX_N_MIN 3
9434+#define I8XX_N_MAX 16
9435+#define I8XX_M_MIN 96
9436+#define I8XX_M_MAX 140
9437+#define I8XX_M1_MIN 18
9438+#define I8XX_M1_MAX 26
9439+#define I8XX_M2_MIN 6
9440+#define I8XX_M2_MAX 16
9441+#define I8XX_P_MIN 4
9442+#define I8XX_P_MAX 128
9443+#define I8XX_P1_MIN 2
9444+#define I8XX_P1_MAX 33
9445+#define I8XX_P1_LVDS_MIN 1
9446+#define I8XX_P1_LVDS_MAX 6
9447+#define I8XX_P2_SLOW 4
9448+#define I8XX_P2_FAST 2
9449+#define I8XX_P2_LVDS_SLOW 14
9450+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
9451+#define I8XX_P2_SLOW_LIMIT 165000
9452+
9453+#define I9XX_DOT_MIN 20000
9454+#define I9XX_DOT_MAX 400000
9455+#define I9XX_VCO_MIN 1400000
9456+#define I9XX_VCO_MAX 2800000
9457+#define I9XX_N_MIN 3
9458+#define I9XX_N_MAX 8
9459+#define I9XX_M_MIN 70
9460+#define I9XX_M_MAX 120
9461+#define I9XX_M1_MIN 10
9462+#define I9XX_M1_MAX 20
9463+#define I9XX_M2_MIN 5
9464+#define I9XX_M2_MAX 9
9465+#define I9XX_P_SDVO_DAC_MIN 5
9466+#define I9XX_P_SDVO_DAC_MAX 80
9467+#define I9XX_P_LVDS_MIN 7
9468+#define I9XX_P_LVDS_MAX 98
9469+#define I9XX_P1_MIN 1
9470+#define I9XX_P1_MAX 8
9471+#define I9XX_P2_SDVO_DAC_SLOW 10
9472+#define I9XX_P2_SDVO_DAC_FAST 5
9473+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
9474+#define I9XX_P2_LVDS_SLOW 14
9475+#define I9XX_P2_LVDS_FAST 7
9476+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
9477+
9478+#define INTEL_LIMIT_I8XX_DVO_DAC 0
9479+#define INTEL_LIMIT_I8XX_LVDS 1
9480+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
9481+#define INTEL_LIMIT_I9XX_LVDS 3
9482+
9483+static const struct psb_intel_limit_t psb_intel_limits[] = {
9484+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
9485+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
9486+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
9487+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
9488+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
9489+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
9490+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
9491+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
9492+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
9493+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
9494+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
9495+ },
9496+ { /* INTEL_LIMIT_I8XX_LVDS */
9497+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
9498+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
9499+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
9500+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
9501+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
9502+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
9503+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
9504+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
9505+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
9506+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
9507+ },
9508+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
9509+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
9510+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
9511+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
9512+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
9513+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
9514+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
9515+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
9516+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
9517+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
9518+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
9519+ I9XX_P2_SDVO_DAC_FAST},
9520+ },
9521+ { /* INTEL_LIMIT_I9XX_LVDS */
9522+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
9523+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
9524+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
9525+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
9526+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
9527+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
9528+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
9529+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
9530+ /* The single-channel range is 25-112Mhz, and dual-channel
9531+ * is 80-224Mhz. Prefer single channel as much as possible.
9532+ */
9533+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
9534+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
9535+ },
9536+};
9537+
9538+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
9539+{
9540+ struct drm_device *dev = crtc->dev;
9541+ const struct psb_intel_limit_t *limit;
9542+
9543+ if (IS_I9XX(dev)) {
9544+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
9545+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
9546+ else
9547+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
9548+ } else {
9549+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
9550+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
9551+ else
9552+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
9553+ }
9554+ return limit;
9555+}
9556+
9557+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
9558+
9559+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
9560+{
9561+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
9562+ clock->p = clock->p1 * clock->p2;
9563+ clock->vco = refclk * clock->m / (clock->n + 2);
9564+ clock->dot = clock->vco / clock->p;
9565+}
9566+
9567+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
9568+
9569+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
9570+{
9571+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
9572+ clock->p = clock->p1 * clock->p2;
9573+ clock->vco = refclk * clock->m / (clock->n + 2);
9574+ clock->dot = clock->vco / clock->p;
9575+}
9576+
9577+static void psb_intel_clock(struct drm_device *dev, int refclk,
9578+ struct psb_intel_clock_t *clock)
9579+{
9580+ if (IS_I9XX(dev))
9581+ return i9xx_clock(refclk, clock);
9582+ else
9583+ return i8xx_clock(refclk, clock);
9584+}
9585+
9586+/**
9587+ * Returns whether any output on the specified pipe is of the specified type
9588+ */
9589+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
9590+{
9591+ struct drm_device *dev = crtc->dev;
9592+ struct drm_mode_config *mode_config = &dev->mode_config;
9593+ struct drm_connector *l_entry;
9594+
9595+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
9596+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
9597+ struct psb_intel_output *psb_intel_output =
9598+ to_psb_intel_output(l_entry);
9599+ if (psb_intel_output->type == type)
9600+ return true;
9601+ }
9602+ }
9603+ return false;
9604+}
9605+
9606+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
9607+/**
9608+ * Returns whether the given set of divisors are valid for a given refclk with
9609+ * the given connectors.
9610+ */
9611+
9612+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
9613+ struct psb_intel_clock_t *clock)
9614+{
9615+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
9616+
9617+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
9618+ INTELPllInvalid("p1 out of range\n");
9619+ if (clock->p < limit->p.min || limit->p.max < clock->p)
9620+ INTELPllInvalid("p out of range\n");
9621+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
9622+ INTELPllInvalid("m2 out of range\n");
9623+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
9624+ INTELPllInvalid("m1 out of range\n");
9625+ if (clock->m1 <= clock->m2)
9626+ INTELPllInvalid("m1 <= m2\n");
9627+ if (clock->m < limit->m.min || limit->m.max < clock->m)
9628+ INTELPllInvalid("m out of range\n");
9629+ if (clock->n < limit->n.min || limit->n.max < clock->n)
9630+ INTELPllInvalid("n out of range\n");
9631+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
9632+ INTELPllInvalid("vco out of range\n");
9633+ /* XXX: We may need to be checking "Dot clock"
9634+ * depending on the multiplier, connector, etc.,
9635+ * rather than just a single range.
9636+ */
9637+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
9638+ INTELPllInvalid("dot out of range\n");
9639+
9640+ return true;
9641+}
9642+
9643+/**
9644+ * Returns a set of divisors for the desired target clock with the given
9645+ * refclk, or FALSE. The returned values represent the clock equation:
9646+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
9647+ */
9648+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
9649+ int refclk,
9650+ struct psb_intel_clock_t *best_clock)
9651+{
9652+ struct drm_device *dev = crtc->dev;
9653+ struct psb_intel_clock_t clock;
9654+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
9655+ int err = target;
9656+
9657+ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
9658+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
9659+ /*
9660+ * For LVDS, if the panel is on, just rely on its current
9661+ * settings for dual-channel. We haven't figured out how to
9662+ * reliably set up different single/dual channel state, if we
9663+ * even can.
9664+ */
9665+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
9666+ LVDS_CLKB_POWER_UP)
9667+ clock.p2 = limit->p2.p2_fast;
9668+ else
9669+ clock.p2 = limit->p2.p2_slow;
9670+ } else {
9671+ if (target < limit->p2.dot_limit)
9672+ clock.p2 = limit->p2.p2_slow;
9673+ else
9674+ clock.p2 = limit->p2.p2_fast;
9675+ }
9676+
9677+ memset(best_clock, 0, sizeof(*best_clock));
9678+
9679+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
9680+ clock.m1++) {
9681+ for (clock.m2 = limit->m2.min;
9682+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
9683+ clock.m2++) {
9684+ for (clock.n = limit->n.min;
9685+ clock.n <= limit->n.max; clock.n++) {
9686+ for (clock.p1 = limit->p1.min;
9687+ clock.p1 <= limit->p1.max;
9688+ clock.p1++) {
9689+ int this_err;
9690+
9691+ psb_intel_clock(dev, refclk, &clock);
9692+
9693+ if (!psb_intel_PLL_is_valid
9694+ (crtc, &clock))
9695+ continue;
9696+
9697+ this_err = abs(clock.dot - target);
9698+ if (this_err < err) {
9699+ *best_clock = clock;
9700+ err = this_err;
9701+ }
9702+ }
9703+ }
9704+ }
9705+ }
9706+
9707+ return err != target;
9708+}
9709+
9710+void psb_intel_wait_for_vblank(struct drm_device *dev)
9711+{
9712+ /* Wait for 20ms, i.e. one cycle at 50hz. */
9713+ udelay(20000);
9714+}
9715+
9716+int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
9717+{
9718+ struct drm_device *dev = crtc->dev;
9719+ /* struct drm_i915_master_private *master_priv; */
9720+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
9721+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
9722+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
9723+ int pipe = psb_intel_crtc->pipe;
9724+ unsigned long Start, Offset;
9725+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
9726+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
9727+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
9728+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
9729+ u32 dspcntr;
9730+
9731+ /* no fb bound */
9732+ if (!crtc->fb) {
9733+ DRM_DEBUG("No FB bound\n");
9734+ return 0;
9735+ }
9736+
9737+ if (IS_MRST(dev) && (pipe == 0))
9738+ dspbase = MRST_DSPABASE;
9739+
9740+ Start = mode_dev->bo_offset(dev, psbfb->bo);
9741+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
9742+
9743+ REG_WRITE(dspstride, crtc->fb->pitch);
9744+
9745+ dspcntr = REG_READ(dspcntr_reg);
9746+ switch (crtc->fb->bits_per_pixel) {
9747+ case 8:
9748+ dspcntr |= DISPPLANE_8BPP;
9749+ break;
9750+ case 16:
9751+ if (crtc->fb->depth == 15)
9752+ dspcntr |= DISPPLANE_15_16BPP;
9753+ else
9754+ dspcntr |= DISPPLANE_16BPP;
9755+ break;
9756+ case 24:
9757+ case 32:
9758+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
9759+ break;
9760+ default:
9761+ DRM_ERROR("Unknown color depth\n");
9762+ return -EINVAL;
9763+ }
9764+ REG_WRITE(dspcntr_reg, dspcntr);
9765+
9766+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
9767+ if (IS_I965G(dev) || IS_MRST(dev)) {
9768+ REG_WRITE(dspbase, Offset);
9769+ REG_READ(dspbase);
9770+ REG_WRITE(dspsurf, Start);
9771+ REG_READ(dspsurf);
9772+ } else {
9773+ REG_WRITE(dspbase, Start + Offset);
9774+ REG_READ(dspbase);
9775+ }
9776+
9777+ if (!dev->primary->master)
9778+ return 0;
9779+
9780+#if 0 /* JB: Enable sarea later */
9781+ master_priv = dev->primary->master->driver_priv;
9782+ if (!master_priv->sarea_priv)
9783+ return 0;
9784+
9785+ switch (pipe) {
9786+ case 0:
9787+ master_priv->sarea_priv->planeA_x = x;
9788+ master_priv->sarea_priv->planeA_y = y;
9789+ break;
9790+ case 1:
9791+ master_priv->sarea_priv->planeB_x = x;
9792+ master_priv->sarea_priv->planeB_y = y;
9793+ break;
9794+ default:
9795+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
9796+ break;
9797+ }
9798+#endif
9799+}
9800+
9801+
9802+
9803+/**
9804+ * Sets the power management mode of the pipe and plane.
9805+ *
9806+ * This code should probably grow support for turning the cursor off and back
9807+ * on appropriately at the same time as we're turning the pipe off/on.
9808+ */
9809+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
9810+{
9811+ struct drm_device *dev = crtc->dev;
9812+ /* struct drm_i915_master_private *master_priv; */
9813+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
9814+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
9815+ int pipe = psb_intel_crtc->pipe;
9816+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
9817+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
9818+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
9819+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
9820+ u32 temp;
9821+ bool enabled;
9822+
9823+ /* XXX: When our outputs are all unaware of DPMS modes other than off
9824+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
9825+ */
9826+ switch (mode) {
9827+ case DRM_MODE_DPMS_ON:
9828+ case DRM_MODE_DPMS_STANDBY:
9829+ case DRM_MODE_DPMS_SUSPEND:
9830+ /* Enable the DPLL */
9831+ temp = REG_READ(dpll_reg);
9832+ if ((temp & DPLL_VCO_ENABLE) == 0) {
9833+ REG_WRITE(dpll_reg, temp);
9834+ REG_READ(dpll_reg);
9835+ /* Wait for the clocks to stabilize. */
9836+ udelay(150);
9837+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
9838+ REG_READ(dpll_reg);
9839+ /* Wait for the clocks to stabilize. */
9840+ udelay(150);
9841+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
9842+ REG_READ(dpll_reg);
9843+ /* Wait for the clocks to stabilize. */
9844+ udelay(150);
9845+ }
9846+
9847+ /* Enable the pipe */
9848+ temp = REG_READ(pipeconf_reg);
9849+ if ((temp & PIPEACONF_ENABLE) == 0)
9850+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
9851+
9852+ /* Enable the plane */
9853+ temp = REG_READ(dspcntr_reg);
9854+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
9855+ REG_WRITE(dspcntr_reg,
9856+ temp | DISPLAY_PLANE_ENABLE);
9857+ /* Flush the plane changes */
9858+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
9859+ }
9860+
9861+ psb_intel_crtc_load_lut(crtc);
9862+
9863+ /* Give the overlay scaler a chance to enable
9864+ * if it's on this pipe */
9865+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
9866+ break;
9867+ case DRM_MODE_DPMS_OFF:
9868+ /* Give the overlay scaler a chance to disable
9869+ * if it's on this pipe */
9870+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
9871+
9872+ /* Disable the VGA plane that we never use */
9873+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
9874+
9875+ /* Disable display plane */
9876+ temp = REG_READ(dspcntr_reg);
9877+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
9878+ REG_WRITE(dspcntr_reg,
9879+ temp & ~DISPLAY_PLANE_ENABLE);
9880+ /* Flush the plane changes */
9881+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
9882+ REG_READ(dspbase_reg);
9883+ }
9884+
9885+ if (!IS_I9XX(dev)) {
9886+ /* Wait for vblank for the disable to take effect */
9887+ psb_intel_wait_for_vblank(dev);
9888+ }
9889+
9890+ /* Next, disable display pipes */
9891+ temp = REG_READ(pipeconf_reg);
9892+ if ((temp & PIPEACONF_ENABLE) != 0) {
9893+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
9894+ REG_READ(pipeconf_reg);
9895+ }
9896+
9897+ /* Wait for vblank for the disable to take effect. */
9898+ psb_intel_wait_for_vblank(dev);
9899+
9900+ temp = REG_READ(dpll_reg);
9901+ if ((temp & DPLL_VCO_ENABLE) != 0) {
9902+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
9903+ REG_READ(dpll_reg);
9904+ }
9905+
9906+ /* Wait for the clocks to turn off. */
9907+ udelay(150);
9908+ break;
9909+ }
9910+
9911+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
9912+
9913+#if 0 /* JB: Add vblank support later */
9914+ if (enabled)
9915+ dev_priv->vblank_pipe |= (1 << pipe);
9916+ else
9917+ dev_priv->vblank_pipe &= ~(1 << pipe);
9918+#endif
9919+
9920+ psb_intel_crtc->dpms_mode = mode;
9921+
9922+#if 0 /* JB: Add sarea support later */
9923+ if (!dev->primary->master)
9924+ return 0;
9925+
9926+ master_priv = dev->primary->master->driver_priv;
9927+ if (!master_priv->sarea_priv)
9928+ return 0;
9929+
9930+ switch (pipe) {
9931+ case 0:
9932+ master_priv->sarea_priv->planeA_w =
9933+ enabled ? crtc->mode.hdisplay : 0;
9934+ master_priv->sarea_priv->planeA_h =
9935+ enabled ? crtc->mode.vdisplay : 0;
9936+ break;
9937+ case 1:
9938+ master_priv->sarea_priv->planeB_w =
9939+ enabled ? crtc->mode.hdisplay : 0;
9940+ master_priv->sarea_priv->planeB_h =
9941+ enabled ? crtc->mode.vdisplay : 0;
9942+ break;
9943+ default:
9944+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
9945+ break;
9946+ }
9947+#endif
9948+}
9949+
9950+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
9951+{
9952+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
9953+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
9954+}
9955+
9956+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
9957+{
9958+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
9959+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
9960+}
9961+
9962+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
9963+{
9964+ struct drm_encoder_helper_funcs *encoder_funcs =
9965+ encoder->helper_private;
9966+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
9967+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
9968+}
9969+
9970+void psb_intel_encoder_commit(struct drm_encoder *encoder)
9971+{
9972+ struct drm_encoder_helper_funcs *encoder_funcs =
9973+ encoder->helper_private;
9974+ /* lvds has its own version of commit see psb_intel_lvds_commit */
9975+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
9976+}
9977+
9978+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
9979+ struct drm_display_mode *mode,
9980+ struct drm_display_mode *adjusted_mode)
9981+{
9982+ return true;
9983+}
9984+
9985+
9986+/** Returns the core display clock speed for i830 - i945 */
9987+static int psb_intel_get_core_clock_speed(struct drm_device *dev)
9988+{
9989+#if 0 /* JB: Look into this more */
9990+ /* Core clock values taken from the published datasheets.
9991+ * The 830 may go up to 166 Mhz, which we should check.
9992+ */
9993+ if (IS_I945G(dev))
9994+ return 400000;
9995+ else if (IS_I915G(dev))
9996+ return 333000;
9997+ else if (IS_I945GM(dev) || IS_845G(dev))
9998+ return 200000;
9999+ else if (IS_I915GM(dev)) {
10000+ u16 gcfgc = 0;
10001+
10002+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10003+
10004+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
10005+ return 133000;
10006+ else {
10007+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
10008+ case GC_DISPLAY_CLOCK_333_MHZ:
10009+ return 333000;
10010+ default:
10011+ case GC_DISPLAY_CLOCK_190_200_MHZ:
10012+ return 190000;
10013+ }
10014+ }
10015+ } else if (IS_I865G(dev))
10016+ return 266000;
10017+ else if (IS_I855(dev)) {
10018+#if 0
10019+ PCITAG bridge = pciTag(0, 0, 0);
10020+ /* This is always the host bridge */
10021+ u16 hpllcc = pciReadWord(bridge, HPLLCC);
10022+
10023+#endif
10024+ u16 hpllcc = 0;
10025+ /* Assume that the hardware is in the high speed state. This
10026+ * should be the default.
10027+ */
10028+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
10029+ case GC_CLOCK_133_200:
10030+ case GC_CLOCK_100_200:
10031+ return 200000;
10032+ case GC_CLOCK_166_250:
10033+ return 250000;
10034+ case GC_CLOCK_100_133:
10035+ return 133000;
10036+ }
10037+ } else /* 852, 830 */
10038+ return 133000;
10039+#endif
10040+ return 0; /* Silence gcc warning */
10041+}
10042+
10043+
10044+/**
10045+ * Return the pipe currently connected to the panel fitter,
10046+ * or -1 if the panel fitter is not present or not in use
10047+ */
10048+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
10049+{
10050+ u32 pfit_control;
10051+
10052+ /* i830 doesn't have a panel fitter */
10053+ if (IS_I830(dev))
10054+ return -1;
10055+
10056+ pfit_control = REG_READ(PFIT_CONTROL);
10057+
10058+ /* See if the panel fitter is in use */
10059+ if ((pfit_control & PFIT_ENABLE) == 0)
10060+ return -1;
10061+
10062+ /* 965 can place panel fitter on either pipe */
10063+ if (IS_I965G(dev) || IS_MRST(dev))
10064+ return (pfit_control >> 29) & 0x3;
10065+
10066+ /* older chips can only use pipe 1 */
10067+ return 1;
10068+}
10069+
10070+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
10071+ struct drm_display_mode *mode,
10072+ struct drm_display_mode *adjusted_mode,
10073+ int x, int y,
10074+ struct drm_framebuffer *old_fb)
10075+{
10076+ struct drm_device *dev = crtc->dev;
10077+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10078+ int pipe = psb_intel_crtc->pipe;
10079+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
10080+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
10081+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
10082+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
10083+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
10084+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
10085+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
10086+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
10087+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
10088+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
10089+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
10090+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
10091+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
10092+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
10093+ int refclk;
10094+ struct psb_intel_clock_t clock;
10095+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
10096+ bool ok, is_sdvo = false, is_dvo = false;
10097+ bool is_crt = false, is_lvds = false, is_tv = false;
10098+ struct drm_mode_config *mode_config = &dev->mode_config;
10099+ struct drm_connector *connector;
10100+
10101+ list_for_each_entry(connector, &mode_config->connector_list, head) {
10102+ struct psb_intel_output *psb_intel_output =
10103+ to_psb_intel_output(connector);
10104+
10105+ if (!connector->encoder
10106+ || connector->encoder->crtc != crtc)
10107+ continue;
10108+
10109+ switch (psb_intel_output->type) {
10110+ case INTEL_OUTPUT_LVDS:
10111+ is_lvds = true;
10112+ break;
10113+ case INTEL_OUTPUT_SDVO:
10114+ is_sdvo = true;
10115+ break;
10116+ case INTEL_OUTPUT_DVO:
10117+ is_dvo = true;
10118+ break;
10119+ case INTEL_OUTPUT_TVOUT:
10120+ is_tv = true;
10121+ break;
10122+ case INTEL_OUTPUT_ANALOG:
10123+ is_crt = true;
10124+ break;
10125+ }
10126+ }
10127+
10128+ if (IS_I9XX(dev))
10129+ refclk = 96000;
10130+ else
10131+ refclk = 48000;
10132+
10133+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
10134+ &clock);
10135+ if (!ok) {
10136+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
10137+ return 0;
10138+ }
10139+
10140+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
10141+
10142+ dpll = DPLL_VGA_MODE_DIS;
10143+ if (IS_I9XX(dev)) {
10144+ if (is_lvds) {
10145+ dpll |= DPLLB_MODE_LVDS;
10146+ if (IS_POULSBO(dev))
10147+ dpll |= DPLL_DVO_HIGH_SPEED;
10148+ } else
10149+ dpll |= DPLLB_MODE_DAC_SERIAL;
10150+ if (is_sdvo) {
10151+ dpll |= DPLL_DVO_HIGH_SPEED;
10152+ if (IS_I945G(dev) || IS_I945GM(dev)) {
10153+ int sdvo_pixel_multiply =
10154+ adjusted_mode->clock / mode->clock;
10155+ dpll |=
10156+ (sdvo_pixel_multiply -
10157+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
10158+ }
10159+ }
10160+
10161+ /* compute bitmask from p1 value */
10162+ dpll |= (1 << (clock.p1 - 1)) << 16;
10163+ switch (clock.p2) {
10164+ case 5:
10165+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10166+ break;
10167+ case 7:
10168+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10169+ break;
10170+ case 10:
10171+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10172+ break;
10173+ case 14:
10174+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10175+ break;
10176+ }
10177+ if (IS_I965G(dev))
10178+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
10179+ } else {
10180+ if (is_lvds) {
10181+ dpll |=
10182+ (1 << (clock.p1 - 1)) <<
10183+ DPLL_FPA01_P1_POST_DIV_SHIFT;
10184+ } else {
10185+ if (clock.p1 == 2)
10186+ dpll |= PLL_P1_DIVIDE_BY_TWO;
10187+ else
10188+ dpll |=
10189+ (clock.p1 -
10190+ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10191+ if (clock.p2 == 4)
10192+ dpll |= PLL_P2_DIVIDE_BY_4;
10193+ }
10194+ }
10195+
10196+ if (is_tv) {
10197+ /* XXX: just matching BIOS for now */
10198+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
10199+ dpll |= 3;
10200+ }
10201+#if 0
10202+ else if (is_lvds)
10203+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10204+#endif
10205+ else
10206+ dpll |= PLL_REF_INPUT_DREFCLK;
10207+
10208+ /* setup pipeconf */
10209+ pipeconf = REG_READ(pipeconf_reg);
10210+
10211+ /* Set up the display plane register */
10212+ dspcntr = DISPPLANE_GAMMA_ENABLE;
10213+
10214+ if (pipe == 0)
10215+ dspcntr |= DISPPLANE_SEL_PIPE_A;
10216+ else
10217+ dspcntr |= DISPPLANE_SEL_PIPE_B;
10218+
10219+ if (pipe == 0 && !IS_I965G(dev)) {
10220+ /* Enable pixel doubling when the dot clock is > 90%
10221+ * of the (display) core speed.
10222+ *
10223+ * XXX: No double-wide on 915GM pipe B.
10224+ * Is that the only reason for the
10225+ * pipe == 0 check?
10226+ */
10227+ if (mode->clock > psb_intel_get_core_clock_speed(dev) * 9 / 10)
10228+ pipeconf |= PIPEACONF_DOUBLE_WIDE;
10229+ else
10230+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
10231+ }
10232+
10233+ dspcntr |= DISPLAY_PLANE_ENABLE;
10234+ pipeconf |= PIPEACONF_ENABLE;
10235+ dpll |= DPLL_VCO_ENABLE;
10236+
10237+
10238+ /* Disable the panel fitter if it was on our pipe */
10239+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
10240+ REG_WRITE(PFIT_CONTROL, 0);
10241+
10242+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
10243+ drm_mode_debug_printmodeline(mode);
10244+
10245+#if 0
10246+ if (!xf86ModesEqual(mode, adjusted_mode)) {
10247+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
10248+ "Adjusted mode for pipe %c:\n",
10249+ pipe == 0 ? 'A' : 'B');
10250+ xf86PrintModeline(pScrn->scrnIndex, mode);
10251+ }
10252+ i830PrintPll("chosen", &clock);
10253+#endif
10254+
10255+ if (dpll & DPLL_VCO_ENABLE) {
10256+ REG_WRITE(fp_reg, fp);
10257+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
10258+ REG_READ(dpll_reg);
10259+ udelay(150);
10260+ }
10261+
10262+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
10263+ * This is an exception to the general rule that mode_set doesn't turn
10264+ * things on.
10265+ */
10266+ if (is_lvds) {
10267+ u32 lvds = REG_READ(LVDS);
10268+
10269+ lvds |=
10270+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
10271+ LVDS_PIPEB_SELECT;
10272+ /* Set the B0-B3 data pairs corresponding to
10273+ * whether we're going to
10274+ * set the DPLLs for dual-channel mode or not.
10275+ */
10276+ if (clock.p2 == 7)
10277+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
10278+ else
10279+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
10280+
10281+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
10282+ * appropriately here, but we need to look more
10283+ * thoroughly into how panels behave in the two modes.
10284+ */
10285+
10286+ REG_WRITE(LVDS, lvds);
10287+ REG_READ(LVDS);
10288+ }
10289+
10290+ REG_WRITE(fp_reg, fp);
10291+ REG_WRITE(dpll_reg, dpll);
10292+ REG_READ(dpll_reg);
10293+ /* Wait for the clocks to stabilize. */
10294+ udelay(150);
10295+
10296+ if (IS_I965G(dev)) {
10297+ int sdvo_pixel_multiply =
10298+ adjusted_mode->clock / mode->clock;
10299+ REG_WRITE(dpll_md_reg,
10300+ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
10301+ ((sdvo_pixel_multiply -
10302+ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
10303+ } else {
10304+ /* write it again -- the BIOS does, after all */
10305+ REG_WRITE(dpll_reg, dpll);
10306+ }
10307+ REG_READ(dpll_reg);
10308+ /* Wait for the clocks to stabilize. */
10309+ udelay(150);
10310+
10311+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
10312+ ((adjusted_mode->crtc_htotal - 1) << 16));
10313+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
10314+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
10315+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
10316+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
10317+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
10318+ ((adjusted_mode->crtc_vtotal - 1) << 16));
10319+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
10320+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
10321+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
10322+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
10323+ /* pipesrc and dspsize control the size that is scaled from,
10324+ * which should always be the user's requested size.
10325+ */
10326+ REG_WRITE(dspsize_reg,
10327+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
10328+ REG_WRITE(dsppos_reg, 0);
10329+ REG_WRITE(pipesrc_reg,
10330+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
10331+ REG_WRITE(pipeconf_reg, pipeconf);
10332+ REG_READ(pipeconf_reg);
10333+
10334+ psb_intel_wait_for_vblank(dev);
10335+
10336+ REG_WRITE(dspcntr_reg, dspcntr);
10337+
10338+ /* Flush the plane changes */
10339+ {
10340+ struct drm_crtc_helper_funcs *crtc_funcs =
10341+ crtc->helper_private;
10342+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
10343+ }
10344+
10345+ psb_intel_wait_for_vblank(dev);
10346+
10347+ return 0;
10348+}
10349+
10350+/** Loads the palette/gamma unit for the CRTC with the prepared values */
10351+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
10352+{
10353+ struct drm_device *dev = crtc->dev;
10354+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10355+ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
10356+ int i;
10357+
10358+ /* The clocks have to be on to load the palette. */
10359+ if (!crtc->enabled)
10360+ return;
10361+
10362+ for (i = 0; i < 256; i++) {
10363+ REG_WRITE(palreg + 4 * i,
10364+ (psb_intel_crtc->lut_r[i] << 16) |
10365+ (psb_intel_crtc->lut_g[i] << 8) |
10366+ psb_intel_crtc->lut_b[i]);
10367+ }
10368+}
10369+
10370+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
10371+ struct drm_file *file_priv,
10372+ uint32_t handle,
10373+ uint32_t width, uint32_t height)
10374+{
10375+ struct drm_device *dev = crtc->dev;
10376+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10377+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
10378+ int pipe = psb_intel_crtc->pipe;
10379+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
10380+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
10381+ uint32_t temp;
10382+ size_t addr = 0;
10383+ size_t size;
10384+ void *bo;
10385+ int ret;
10386+
10387+ DRM_DEBUG("\n");
10388+
10389+ /* if we want to turn of the cursor ignore width and height */
10390+ if (!handle) {
10391+ DRM_DEBUG("cursor off\n");
10392+ /* turn of the cursor */
10393+ temp = 0;
10394+ temp |= CURSOR_MODE_DISABLE;
10395+
10396+ REG_WRITE(control, temp);
10397+ REG_WRITE(base, 0);
10398+
10399+ /* unpin the old bo */
10400+ if (psb_intel_crtc->cursor_bo) {
10401+ mode_dev->bo_unpin_for_scanout(dev,
10402+ psb_intel_crtc->
10403+ cursor_bo);
10404+ psb_intel_crtc->cursor_bo = NULL;
10405+ }
10406+
10407+ return 0;
10408+ }
10409+
10410+ /* Currently we only support 64x64 cursors */
10411+ if (width != 64 || height != 64) {
10412+ DRM_ERROR("we currently only support 64x64 cursors\n");
10413+ return -EINVAL;
10414+ }
10415+
10416+ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
10417+ if (!bo)
10418+ return -ENOENT;
10419+
10420+ ret = mode_dev->bo_pin_for_scanout(dev, bo);
10421+ if (ret)
10422+ return ret;
10423+
10424+ size = mode_dev->bo_size(dev, bo);
10425+ if (size < width * height * 4) {
10426+ DRM_ERROR("buffer is to small\n");
10427+ return -ENOMEM;
10428+ }
10429+
10430+ addr = mode_dev->bo_size(dev, bo);
10431+ if (mode_dev->cursor_needs_physical)
10432+ addr = dev->agp->base + addr;
10433+
10434+ psb_intel_crtc->cursor_addr = addr;
10435+ temp = 0;
10436+ /* set the pipe for the cursor */
10437+ temp |= (pipe << 28);
10438+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
10439+
10440+ REG_WRITE(control, temp);
10441+ REG_WRITE(base, addr);
10442+
10443+ /* unpin the old bo */
10444+ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
10445+ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
10446+ psb_intel_crtc->cursor_bo = bo;
10447+ }
10448+
10449+ return 0;
10450+}
10451+
10452+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
10453+{
10454+ struct drm_device *dev = crtc->dev;
10455+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10456+ int pipe = psb_intel_crtc->pipe;
10457+ uint32_t temp = 0;
10458+ uint32_t adder;
10459+
10460+ if (x < 0) {
10461+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
10462+ x = -x;
10463+ }
10464+ if (y < 0) {
10465+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
10466+ y = -y;
10467+ }
10468+
10469+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
10470+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
10471+
10472+ adder = psb_intel_crtc->cursor_addr;
10473+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
10474+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
10475+
10476+ return 0;
10477+}
10478+
10479+/** Sets the color ramps on behalf of RandR */
10480+void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
10481+ u16 blue, int regno)
10482+{
10483+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10484+
10485+ psb_intel_crtc->lut_r[regno] = red >> 8;
10486+ psb_intel_crtc->lut_g[regno] = green >> 8;
10487+ psb_intel_crtc->lut_b[regno] = blue >> 8;
10488+}
10489+
10490+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
10491+ u16 *green, u16 *blue, uint32_t size)
10492+{
10493+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10494+ int i;
10495+
10496+ if (size != 256)
10497+ return;
10498+
10499+ for (i = 0; i < 256; i++) {
10500+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
10501+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
10502+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
10503+ }
10504+
10505+ psb_intel_crtc_load_lut(crtc);
10506+}
10507+
10508+/**
10509+ * Get a pipe with a simple mode set on it for doing load-based monitor
10510+ * detection.
10511+ *
10512+ * It will be up to the load-detect code to adjust the pipe as appropriate for
10513+ * its requirements. The pipe will be connected to no other outputs.
10514+ *
10515+ * Currently this code will only succeed if there is a pipe with no outputs
10516+ * configured for it. In the future, it could choose to temporarily disable
10517+ * some outputs to free up a pipe for its use.
10518+ *
10519+ * \return crtc, or NULL if no pipes are available.
10520+ */
10521+
10522+/* VESA 640x480x72Hz mode to set on the pipe */
10523+static struct drm_display_mode load_detect_mode = {
10524+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10525+ 704, 832, 0, 480, 489, 491, 520, 0,
10526+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10527+};
10528+
10529+struct drm_crtc *psb_intel_get_load_detect_pipe(struct psb_intel_output
10530+ *psb_intel_output,
10531+ struct drm_display_mode *mode,
10532+ int *dpms_mode)
10533+{
10534+ struct psb_intel_crtc *psb_intel_crtc;
10535+ struct drm_crtc *possible_crtc;
10536+ struct drm_crtc *supported_crtc = NULL;
10537+ struct drm_encoder *encoder = &psb_intel_output->enc;
10538+ struct drm_crtc *crtc = NULL;
10539+ struct drm_device *dev = encoder->dev;
10540+ struct drm_encoder_helper_funcs *encoder_funcs =
10541+ encoder->helper_private;
10542+ struct drm_crtc_helper_funcs *crtc_funcs;
10543+ int i = -1;
10544+
10545+ /*
10546+ * Algorithm gets a little messy:
10547+ * - if the connector already has an assigned crtc, use it (but make
10548+ * sure it's on first)
10549+ * - try to find the first unused crtc that can drive this connector,
10550+ * and use that if we find one
10551+ * - if there are no unused crtcs available, try to use the first
10552+ * one we found that supports the connector
10553+ */
10554+
10555+ /* See if we already have a CRTC for this connector */
10556+ if (encoder->crtc) {
10557+ crtc = encoder->crtc;
10558+ /* Make sure the crtc and connector are running */
10559+ psb_intel_crtc = to_psb_intel_crtc(crtc);
10560+ *dpms_mode = psb_intel_crtc->dpms_mode;
10561+ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
10562+ crtc_funcs = crtc->helper_private;
10563+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
10564+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
10565+ }
10566+ return crtc;
10567+ }
10568+
10569+ /* Find an unused one (if possible) */
10570+ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list,
10571+ head) {
10572+ i++;
10573+ if (!(encoder->possible_crtcs & (1 << i)))
10574+ continue;
10575+ if (!possible_crtc->enabled) {
10576+ crtc = possible_crtc;
10577+ break;
10578+ }
10579+ if (!supported_crtc)
10580+ supported_crtc = possible_crtc;
10581+ }
10582+
10583+ /*
10584+ * If we didn't find an unused CRTC, don't use any.
10585+ */
10586+ if (!crtc)
10587+ return NULL;
10588+
10589+ encoder->crtc = crtc;
10590+ psb_intel_output->load_detect_temp = true;
10591+
10592+ psb_intel_crtc = to_psb_intel_crtc(crtc);
10593+ *dpms_mode = psb_intel_crtc->dpms_mode;
10594+
10595+ if (!crtc->enabled) {
10596+ if (!mode)
10597+ mode = &load_detect_mode;
10598+ drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
10599+ } else {
10600+ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
10601+ crtc_funcs = crtc->helper_private;
10602+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
10603+ }
10604+
10605+ /* Add this connector to the crtc */
10606+ encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
10607+ encoder_funcs->commit(encoder);
10608+ }
10609+ /* let the connector get through one full cycle before testing */
10610+ psb_intel_wait_for_vblank(dev);
10611+
10612+ return crtc;
10613+}
10614+
10615+void psb_intel_release_load_detect_pipe(struct psb_intel_output *psb_intel_output,
10616+ int dpms_mode)
10617+{
10618+ struct drm_encoder *encoder = &psb_intel_output->enc;
10619+ struct drm_device *dev = encoder->dev;
10620+ struct drm_crtc *crtc = encoder->crtc;
10621+ struct drm_encoder_helper_funcs *encoder_funcs =
10622+ encoder->helper_private;
10623+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
10624+
10625+ if (psb_intel_output->load_detect_temp) {
10626+ encoder->crtc = NULL;
10627+ psb_intel_output->load_detect_temp = false;
10628+ crtc->enabled = drm_helper_crtc_in_use(crtc);
10629+ drm_helper_disable_unused_functions(dev);
10630+ }
10631+
10632+ /* Switch crtc and output back off if necessary */
10633+ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
10634+ if (encoder->crtc == crtc)
10635+ encoder_funcs->dpms(encoder, dpms_mode);
10636+ crtc_funcs->dpms(crtc, dpms_mode);
10637+ }
10638+}
10639+
10640+/* Returns the clock of the currently programmed mode of the given pipe. */
10641+static int psb_intel_crtc_clock_get(struct drm_device *dev,
10642+ struct drm_crtc *crtc)
10643+{
10644+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10645+ int pipe = psb_intel_crtc->pipe;
10646+ u32 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
10647+ u32 fp;
10648+ struct psb_intel_clock_t clock;
10649+
10650+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10651+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
10652+ else
10653+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
10654+
10655+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10656+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10657+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10658+ if (IS_I9XX(dev)) {
10659+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10660+ DPLL_FPA01_P1_POST_DIV_SHIFT);
10661+
10662+ switch (dpll & DPLL_MODE_MASK) {
10663+ case DPLLB_MODE_DAC_SERIAL:
10664+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10665+ 5 : 10;
10666+ break;
10667+ case DPLLB_MODE_LVDS:
10668+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10669+ 7 : 14;
10670+ break;
10671+ default:
10672+ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
10673+ "mode\n", (int) (dpll & DPLL_MODE_MASK));
10674+ return 0;
10675+ }
10676+
10677+ /* XXX: Handle the 100Mhz refclk */
10678+ i9xx_clock(96000, &clock);
10679+ } else {
10680+ bool is_lvds = (pipe == 1)
10681+ && (REG_READ(LVDS) & LVDS_PORT_EN);
10682+
10683+ if (is_lvds) {
10684+ clock.p1 =
10685+ ffs((dpll &
10686+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10687+ DPLL_FPA01_P1_POST_DIV_SHIFT);
10688+ clock.p2 = 14;
10689+
10690+ if ((dpll & PLL_REF_INPUT_MASK) ==
10691+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
10692+ /* XXX: might not be 66MHz */
10693+ i8xx_clock(66000, &clock);
10694+ } else
10695+ i8xx_clock(48000, &clock);
10696+ } else {
10697+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
10698+ clock.p1 = 2;
10699+ else {
10700+ clock.p1 =
10701+ ((dpll &
10702+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10703+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10704+ }
10705+ if (dpll & PLL_P2_DIVIDE_BY_4)
10706+ clock.p2 = 4;
10707+ else
10708+ clock.p2 = 2;
10709+
10710+ i8xx_clock(48000, &clock);
10711+ }
10712+ }
10713+
10714+ /* XXX: It would be nice to validate the clocks, but we can't reuse
10715+ * i830PllIsValid() because it relies on the xf86_config connector
10716+ * configuration being accurate, which it isn't necessarily.
10717+ */
10718+
10719+ return clock.dot;
10720+}
10721+
10722+/** Returns the currently programmed mode of the given pipe. */
10723+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
10724+ struct drm_crtc *crtc)
10725+{
10726+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10727+ int pipe = psb_intel_crtc->pipe;
10728+ struct drm_display_mode *mode;
10729+ int htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
10730+ int hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
10731+ int vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
10732+ int vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
10733+
10734+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10735+ if (!mode)
10736+ return NULL;
10737+
10738+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
10739+ mode->hdisplay = (htot & 0xffff) + 1;
10740+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10741+ mode->hsync_start = (hsync & 0xffff) + 1;
10742+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10743+ mode->vdisplay = (vtot & 0xffff) + 1;
10744+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10745+ mode->vsync_start = (vsync & 0xffff) + 1;
10746+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10747+
10748+ drm_mode_set_name(mode);
10749+ drm_mode_set_crtcinfo(mode, 0);
10750+
10751+ return mode;
10752+}
10753+
10754+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
10755+{
10756+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10757+
10758+ drm_crtc_cleanup(crtc);
10759+ kfree(psb_intel_crtc);
10760+}
10761+
10762+static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
10763+ .dpms = psb_intel_crtc_dpms,
10764+ .mode_fixup = psb_intel_crtc_mode_fixup,
10765+ .mode_set = psb_intel_crtc_mode_set,
10766+ .mode_set_base = psb_intel_pipe_set_base,
10767+ .prepare = psb_intel_crtc_prepare,
10768+ .commit = psb_intel_crtc_commit,
10769+};
10770+
10771+static const struct drm_crtc_helper_funcs mrst_helper_funcs;
10772+
10773+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
10774+ .cursor_set = psb_intel_crtc_cursor_set,
10775+ .cursor_move = psb_intel_crtc_cursor_move,
10776+ .gamma_set = psb_intel_crtc_gamma_set,
10777+ .set_config = drm_crtc_helper_set_config,
10778+ .destroy = psb_intel_crtc_destroy,
10779+};
10780+
10781+
10782+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
10783+ struct psb_intel_mode_device *mode_dev)
10784+{
10785+ struct psb_intel_crtc *psb_intel_crtc;
10786+ int i;
10787+
10788+#if PRINT_JLIU7
10789+ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
10790+#endif /* PRINT_JLIU7 */
10791+
10792+ /* We allocate a extra array of drm_connector pointers
10793+ * for fbdev after the crtc */
10794+ psb_intel_crtc =
10795+ kzalloc(sizeof(struct psb_intel_crtc) +
10796+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
10797+ GFP_KERNEL);
10798+ if (psb_intel_crtc == NULL)
10799+ return;
10800+
10801+ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
10802+
10803+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
10804+ psb_intel_crtc->pipe = pipe;
10805+ for (i = 0; i < 256; i++) {
10806+ psb_intel_crtc->lut_r[i] = i;
10807+ psb_intel_crtc->lut_g[i] = i;
10808+ psb_intel_crtc->lut_b[i] = i;
10809+ }
10810+
10811+ psb_intel_crtc->mode_dev = mode_dev;
10812+ psb_intel_crtc->cursor_addr = 0;
10813+ psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
10814+
10815+ if (IS_MRST(dev)) {
10816+ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
10817+ } else {
10818+ drm_crtc_helper_add(&psb_intel_crtc->base,
10819+ &psb_intel_helper_funcs);
10820+ }
10821+
10822+ /* Setup the array of drm_connector pointer array */
10823+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
10824+ psb_intel_crtc->mode_set.connectors =
10825+ (struct drm_connector **) (psb_intel_crtc + 1);
10826+ psb_intel_crtc->mode_set.num_connectors = 0;
10827+
10828+#if 0 /* JB: not drop, What should go in here? */
10829+ if (i915_fbpercrtc)
10830+#endif
10831+}
10832+
10833+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
10834+{
10835+ struct drm_crtc *crtc = NULL;
10836+
10837+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10838+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10839+ if (psb_intel_crtc->pipe == pipe)
10840+ break;
10841+ }
10842+ return crtc;
10843+}
10844+
10845+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
10846+{
10847+ int index_mask = 0;
10848+ struct drm_connector *connector;
10849+ int entry = 0;
10850+
10851+ list_for_each_entry(connector, &dev->mode_config.connector_list,
10852+ head) {
10853+ struct psb_intel_output *psb_intel_output =
10854+ to_psb_intel_output(connector);
10855+ if (type_mask & (1 << psb_intel_output->type))
10856+ index_mask |= (1 << entry);
10857+ entry++;
10858+ }
10859+ return index_mask;
10860+}
10861+
10862+#if 0 /* JB: Should be per device */
10863+static void psb_intel_setup_outputs(struct drm_device *dev)
10864+{
10865+ struct drm_connector *connector;
10866+
10867+ psb_intel_crt_init(dev);
10868+
10869+ /* Set up integrated LVDS */
10870+ if (IS_MOBILE(dev) && !IS_I830(dev))
10871+ psb_intel_lvds_init(dev);
10872+
10873+ if (IS_I9XX(dev)) {
10874+ psb_intel_sdvo_init(dev, SDVOB);
10875+ psb_intel_sdvo_init(dev, SDVOC);
10876+ } else
10877+ psb_intel_dvo_init(dev);
10878+
10879+ if (IS_I9XX(dev) && !IS_I915G(dev))
10880+ psb_intel_tv_init(dev);
10881+
10882+ list_for_each_entry(connector, &dev->mode_config.connector_list,
10883+ head) {
10884+ struct psb_intel_output *psb_intel_output =
10885+ to_psb_intel_output(connector);
10886+ struct drm_encoder *encoder = &psb_intel_output->enc;
10887+ int crtc_mask = 0, clone_mask = 0;
10888+
10889+ /* valid crtcs */
10890+ switch (psb_intel_output->type) {
10891+ case INTEL_OUTPUT_DVO:
10892+ case INTEL_OUTPUT_SDVO:
10893+ crtc_mask = ((1 << 0) | (1 << 1));
10894+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
10895+ (1 << INTEL_OUTPUT_DVO) |
10896+ (1 << INTEL_OUTPUT_SDVO));
10897+ break;
10898+ case INTEL_OUTPUT_ANALOG:
10899+ crtc_mask = ((1 << 0) | (1 << 1));
10900+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
10901+ (1 << INTEL_OUTPUT_DVO) |
10902+ (1 << INTEL_OUTPUT_SDVO));
10903+ break;
10904+ case INTEL_OUTPUT_LVDS:
10905+ crtc_mask = (1 << 1);
10906+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
10907+ break;
10908+ case INTEL_OUTPUT_TVOUT:
10909+ crtc_mask = ((1 << 0) | (1 << 1));
10910+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
10911+ break;
10912+ }
10913+ encoder->possible_crtcs = crtc_mask;
10914+ encoder->possible_clones =
10915+ psb_intel_connector_clones(dev, clone_mask);
10916+ }
10917+}
10918+#endif
10919+
10920+#if 0 /* JB: Rework framebuffer code into something none device specific */
10921+static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
10922+{
10923+ struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb);
10924+ struct drm_device *dev = fb->dev;
10925+
10926+ if (fb->fbdev)
10927+ intelfb_remove(dev, fb);
10928+
10929+ drm_framebuffer_cleanup(fb);
10930+ drm_gem_object_unreference(fb->mm_private);
10931+
10932+ kfree(psb_intel_fb);
10933+}
10934+
10935+static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
10936+ struct drm_file *file_priv,
10937+ unsigned int *handle)
10938+{
10939+ struct drm_gem_object *object = fb->mm_private;
10940+
10941+ return drm_gem_handle_create(file_priv, object, handle);
10942+}
10943+
10944+static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
10945+ .destroy = psb_intel_user_framebuffer_destroy,
10946+ .create_handle = psb_intel_user_framebuffer_create_handle,
10947+};
10948+
10949+struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
10950+ struct drm_mode_fb_cmd
10951+ *mode_cmd,
10952+ void *mm_private)
10953+{
10954+ struct psb_intel_framebuffer *psb_intel_fb;
10955+
10956+ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
10957+ if (!psb_intel_fb)
10958+ return NULL;
10959+
10960+ if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs))
10961+ return NULL;
10962+
10963+ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
10964+
10965+ return &psb_intel_fb->base;
10966+}
10967+
10968+
10969+static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
10970+ drm_device
10971+ *dev,
10972+ struct
10973+ drm_file
10974+ *filp,
10975+ struct
10976+ drm_mode_fb_cmd
10977+ *mode_cmd)
10978+{
10979+ struct drm_gem_object *obj;
10980+
10981+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
10982+ if (!obj)
10983+ return NULL;
10984+
10985+ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
10986+}
10987+
10988+static int psb_intel_insert_new_fb(struct drm_device *dev,
10989+ struct drm_file *file_priv,
10990+ struct drm_framebuffer *fb,
10991+ struct drm_mode_fb_cmd *mode_cmd)
10992+{
10993+ struct psb_intel_framebuffer *psb_intel_fb;
10994+ struct drm_gem_object *obj;
10995+ struct drm_crtc *crtc;
10996+
10997+ psb_intel_fb = to_psb_intel_framebuffer(fb);
10998+
10999+ mutex_lock(&dev->struct_mutex);
11000+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
11001+
11002+ if (!obj) {
11003+ mutex_unlock(&dev->struct_mutex);
11004+ return -EINVAL;
11005+ }
11006+ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
11007+ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
11008+ mutex_unlock(&dev->struct_mutex);
11009+
11010+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11011+ if (crtc->fb == fb) {
11012+ struct drm_crtc_helper_funcs *crtc_funcs =
11013+ crtc->helper_private;
11014+ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
11015+ }
11016+ }
11017+ return 0;
11018+}
11019+
11020+static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
11021+ .resize_fb = psb_intel_insert_new_fb,
11022+ .fb_create = psb_intel_user_framebuffer_create,
11023+ .fb_changed = intelfb_probe,
11024+};
11025+#endif
11026+
11027+#if 0 /* Should be per device */
11028+void psb_intel_modeset_init(struct drm_device *dev)
11029+{
11030+ int num_pipe;
11031+ int i;
11032+
11033+ drm_mode_config_init(dev);
11034+
11035+ dev->mode_config.min_width = 0;
11036+ dev->mode_config.min_height = 0;
11037+
11038+ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
11039+
11040+ if (IS_I965G(dev)) {
11041+ dev->mode_config.max_width = 8192;
11042+ dev->mode_config.max_height = 8192;
11043+ } else {
11044+ dev->mode_config.max_width = 2048;
11045+ dev->mode_config.max_height = 2048;
11046+ }
11047+
11048+ /* set memory base */
11049+ if (IS_I9XX(dev))
11050+ dev->mode_config.fb_base =
11051+ pci_resource_start(dev->pdev, 2);
11052+ else
11053+ dev->mode_config.fb_base =
11054+ pci_resource_start(dev->pdev, 0);
11055+
11056+ if (IS_MOBILE(dev) || IS_I9XX(dev))
11057+ num_pipe = 2;
11058+ else
11059+ num_pipe = 1;
11060+ DRM_DEBUG("%d display pipe%s available.\n",
11061+ num_pipe, num_pipe > 1 ? "s" : "");
11062+
11063+ for (i = 0; i < num_pipe; i++)
11064+ psb_intel_crtc_init(dev, i);
11065+
11066+ psb_intel_setup_outputs(dev);
11067+
11068+ /* setup fbs */
11069+ /* drm_initial_config(dev, false); */
11070+}
11071+#endif
11072+
11073+void psb_intel_modeset_cleanup(struct drm_device *dev)
11074+{
11075+ drm_mode_config_cleanup(dev);
11076+}
11077+
11078+
11079+/* current intel driver doesn't take advantage of encoders
11080+ always give back the encoder for the connector
11081+*/
11082+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
11083+{
11084+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
11085+
11086+ return &psb_intel_output->enc;
11087+}
11088+
11089+/* MRST_PLATFORM start */
11090+
11091+#if DUMP_REGISTER
11092+void dump_dc_registers(struct drm_device *dev)
11093+{
11094+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11095+ unsigned int i = 0;
11096+
11097+ DRM_INFO("jliu7 dump_dc_registers\n");
11098+
11099+
11100+ if (0x80000000 & REG_READ(0x70008)) {
11101+ for (i = 0x20a0; i < 0x20af; i += 4) {
11102+ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i));
11103+ }
11104+
11105+ for (i = 0xf014; i < 0xf047; i += 4) {
11106+ DRM_INFO
11107+ ("jliu7 pipe A dpll register=0x%x, value=%x\n",
11108+ i, (unsigned int) REG_READ(i));
11109+ }
11110+
11111+ for (i = 0x60000; i < 0x6005f; i += 4) {
11112+ DRM_INFO
11113+ ("jliu7 pipe A timing register=0x%x, value=%x\n",
11114+ i, (unsigned int) REG_READ(i));
11115+ }
11116+
11117+ for (i = 0x61140; i < 0x61143; i += 4) {
11118+ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
11119+ i, (unsigned int) REG_READ(i));
11120+ }
11121+
11122+ for (i = 0x61180; i < 0x6123F; i += 4) {
11123+ DRM_INFO
11124+ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
11125+ i, (unsigned int) REG_READ(i));
11126+ }
11127+
11128+ for (i = 0x61254; i < 0x612AB; i += 4) {
11129+ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
11130+ i, (unsigned int) REG_READ(i));
11131+ }
11132+
11133+ for (i = 0x70000; i < 0x70047; i += 4) {
11134+ DRM_INFO
11135+ ("jliu7 PIPE A control register=0x%x, value=%x\n",
11136+ i, (unsigned int) REG_READ(i));
11137+ }
11138+
11139+ for (i = 0x70180; i < 0x7020b; i += 4) {
11140+ DRM_INFO("jliu7 display A control register=0x%x,"
11141+ "value=%x\n", i,
11142+ (unsigned int) REG_READ(i));
11143+ }
11144+
11145+ for (i = 0x71400; i < 0x71403; i += 4) {
11146+ DRM_INFO
11147+ ("jliu7 VGA Display Plane Control register=0x%x,"
11148+ "value=%x\n", i, (unsigned int) REG_READ(i));
11149+ }
11150+ }
11151+
11152+ if (0x80000000 & REG_READ(0x71008)) {
11153+ for (i = 0x61000; i < 0x6105f; i += 4) {
11154+ DRM_INFO
11155+ ("jliu7 pipe B timing register=0x%x, value=%x\n",
11156+ i, (unsigned int) REG_READ(i));
11157+ }
11158+
11159+ for (i = 0x71000; i < 0x71047; i += 4) {
11160+ DRM_INFO
11161+ ("jliu7 PIPE B control register=0x%x, value=%x\n",
11162+ i, (unsigned int) REG_READ(i));
11163+ }
11164+
11165+ for (i = 0x71180; i < 0x7120b; i += 4) {
11166+ DRM_INFO("jliu7 display B control register=0x%x,"
11167+ "value=%x\n", i,
11168+ (unsigned int) REG_READ(i));
11169+ }
11170+ }
11171+#if 0
11172+ for (i = 0x70080; i < 0x700df; i += 4) {
11173+ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
11174+ i, (unsigned int) REG_READ(i));
11175+ }
11176+#endif
11177+
11178+}
11179+
11180+void dump_dsi_registers(struct drm_device *dev)
11181+{
11182+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11183+ unsigned int i = 0;
11184+
11185+ DRM_INFO("jliu7 dump_dsi_registers\n");
11186+
11187+ for (i = 0xb000; i < 0xb064; i += 4) {
11188+ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
11189+ (unsigned int) REG_READ(i));
11190+ }
11191+
11192+ i = 0xb104;
11193+ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
11194+ i, (unsigned int) REG_READ(i));
11195+}
11196+#endif /* DUMP_REGISTER */
11197+
11198+
11199+struct mrst_limit_t {
11200+ struct psb_intel_range_t dot, m, p1;
11201+};
11202+
11203+struct mrst_clock_t {
11204+ /* derived values */
11205+ int dot;
11206+ int m;
11207+ int p1;
11208+};
11209+
11210+#define MRST_LIMIT_LVDS_100L 0
11211+#define MRST_LIMIT_LVDS_83 1
11212+#define MRST_LIMIT_LVDS_100 2
11213+
11214+#define MRST_DOT_MIN 19750
11215+#define MRST_DOT_MAX 120000
11216+#define MRST_M_MIN_100L 20
11217+#define MRST_M_MIN_100 10
11218+#define MRST_M_MIN_83 12
11219+#define MRST_M_MAX_100L 34
11220+#define MRST_M_MAX_100 17
11221+#define MRST_M_MAX_83 20
11222+#define MRST_P1_MIN 2
11223+#define MRST_P1_MAX_0 7
11224+#define MRST_P1_MAX_1 8
11225+
11226+static const struct mrst_limit_t mrst_limits[] = {
11227+ { /* MRST_LIMIT_LVDS_100L */
11228+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
11229+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
11230+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
11231+ },
11232+ { /* MRST_LIMIT_LVDS_83L */
11233+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
11234+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
11235+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
11236+ },
11237+ { /* MRST_LIMIT_LVDS_100 */
11238+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
11239+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
11240+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
11241+ },
11242+};
11243+
11244+#define MRST_M_MIN 10
11245+static const u32 mrst_m_converts[] = {
11246+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
11247+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
11248+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
11249+};
11250+
11251+#define COUNT_MAX 0x10000000
11252+void mrstWaitForPipeDisable(struct drm_device *dev)
11253+{
11254+ int count, temp;
11255+
11256+ /* FIXME JLIU7_PO */
11257+ psb_intel_wait_for_vblank(dev);
11258+ return;
11259+
11260+ /* Wait for for the pipe disable to take effect. */
11261+ for (count = 0; count < COUNT_MAX; count++) {
11262+ temp = REG_READ(PIPEACONF);
11263+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
11264+ break;
11265+ }
11266+
11267+ if (count == COUNT_MAX) {
11268+#if PRINT_JLIU7
11269+ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
11270+#endif /* PRINT_JLIU7 */
11271+ } else {
11272+#if PRINT_JLIU7
11273+ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
11274+ count);
11275+#endif /* PRINT_JLIU7 */
11276+ }
11277+}
11278+
11279+void mrstWaitForPipeEnable(struct drm_device *dev)
11280+{
11281+ int count, temp;
11282+
11283+ /* FIXME JLIU7_PO */
11284+ psb_intel_wait_for_vblank(dev);
11285+ return;
11286+
11287+ /* Wait for for the pipe disable to take effect. */
11288+ for (count = 0; count < COUNT_MAX; count++) {
11289+ temp = REG_READ(PIPEACONF);
11290+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
11291+ break;
11292+ }
11293+
11294+ if (count == COUNT_MAX) {
11295+#if PRINT_JLIU7
11296+ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
11297+#endif /* PRINT_JLIU7 */
11298+ } else {
11299+#if PRINT_JLIU7
11300+ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
11301+ count);
11302+#endif /* PRINT_JLIU7 */
11303+ }
11304+}
11305+
11306+static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
11307+{
11308+ const struct mrst_limit_t *limit;
11309+ struct drm_device *dev = crtc->dev;
11310+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11311+
11312+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
11313+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
11314+ if (dev_priv->sku_100L)
11315+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
11316+ if (dev_priv->sku_83)
11317+ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
11318+ if (dev_priv->sku_100)
11319+ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
11320+ } else {
11321+ limit = NULL;
11322+#if PRINT_JLIU7
11323+ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
11324+#endif /* PRINT_JLIU7 */
11325+ }
11326+
11327+ return limit;
11328+}
11329+
11330+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
11331+static void mrst_clock(int refclk, struct mrst_clock_t *clock)
11332+{
11333+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
11334+}
11335+
11336+void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
11337+{
11338+#if PRINT_JLIU7
11339+ DRM_INFO
11340+ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
11341+ prefix, clock->dot, clock->m, clock->p1);
11342+#endif /* PRINT_JLIU7 */
11343+}
11344+
11345+/**
11346+ * Returns a set of divisors for the desired target clock with the given refclk,
11347+ * or FALSE. Divisor values are the actual divisors for
11348+ */
11349+static bool
11350+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
11351+ struct mrst_clock_t *best_clock)
11352+{
11353+ struct mrst_clock_t clock;
11354+ const struct mrst_limit_t *limit = mrst_limit(crtc);
11355+ int err = target;
11356+
11357+ memset(best_clock, 0, sizeof(*best_clock));
11358+
11359+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
11360+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
11361+ clock.p1++) {
11362+ int this_err;
11363+
11364+ mrst_clock(refclk, &clock);
11365+
11366+ this_err = abs(clock.dot - target);
11367+ if (this_err < err) {
11368+ *best_clock = clock;
11369+ err = this_err;
11370+ }
11371+ }
11372+ }
11373+ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
11374+
11375+ return err != target;
11376+}
11377+
11378+/**
11379+ * Sets the power management mode of the pipe and plane.
11380+ *
11381+ * This code should probably grow support for turning the cursor off and back
11382+ * on appropriately at the same time as we're turning the pipe off/on.
11383+ */
11384+static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
11385+{
11386+ struct drm_device *dev = crtc->dev;
11387+ /* struct drm_i915_master_private *master_priv; */
11388+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
11389+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
11390+ int pipe = psb_intel_crtc->pipe;
11391+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
11392+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
11393+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
11394+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
11395+ u32 temp;
11396+ bool enabled;
11397+
11398+#if PRINT_JLIU7
11399+ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
11400+ mode, pipe);
11401+#endif /* PRINT_JLIU7 */
11402+
11403+ /* XXX: When our outputs are all unaware of DPMS modes other than off
11404+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
11405+ */
11406+ switch (mode) {
11407+ case DRM_MODE_DPMS_ON:
11408+ case DRM_MODE_DPMS_STANDBY:
11409+ case DRM_MODE_DPMS_SUSPEND:
11410+ /* Enable the DPLL */
11411+ temp = REG_READ(dpll_reg);
11412+ if ((temp & DPLL_VCO_ENABLE) == 0) {
11413+ REG_WRITE(dpll_reg, temp);
11414+ REG_READ(dpll_reg);
11415+ /* Wait for the clocks to stabilize. */
11416+ udelay(150);
11417+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
11418+ REG_READ(dpll_reg);
11419+ /* Wait for the clocks to stabilize. */
11420+ udelay(150);
11421+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
11422+ REG_READ(dpll_reg);
11423+ /* Wait for the clocks to stabilize. */
11424+ udelay(150);
11425+ }
11426+
11427+ /* Enable the pipe */
11428+ temp = REG_READ(pipeconf_reg);
11429+ if ((temp & PIPEACONF_ENABLE) == 0)
11430+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
11431+
11432+ /* Enable the plane */
11433+ temp = REG_READ(dspcntr_reg);
11434+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
11435+ REG_WRITE(dspcntr_reg,
11436+ temp | DISPLAY_PLANE_ENABLE);
11437+ /* Flush the plane changes */
11438+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
11439+ }
11440+
11441+ psb_intel_crtc_load_lut(crtc);
11442+
11443+ /* Give the overlay scaler a chance to enable
11444+ if it's on this pipe */
11445+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
11446+ break;
11447+ case DRM_MODE_DPMS_OFF:
11448+ /* Give the overlay scaler a chance to disable
11449+ * if it's on this pipe */
11450+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
11451+
11452+ /* Disable the VGA plane that we never use */
11453+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
11454+
11455+ /* Disable display plane */
11456+ temp = REG_READ(dspcntr_reg);
11457+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
11458+ REG_WRITE(dspcntr_reg,
11459+ temp & ~DISPLAY_PLANE_ENABLE);
11460+ /* Flush the plane changes */
11461+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
11462+ REG_READ(dspbase_reg);
11463+ }
11464+
11465+ if (!IS_I9XX(dev)) {
11466+ /* Wait for vblank for the disable to take effect */
11467+ psb_intel_wait_for_vblank(dev);
11468+ }
11469+
11470+ /* Next, disable display pipes */
11471+ temp = REG_READ(pipeconf_reg);
11472+ if ((temp & PIPEACONF_ENABLE) != 0) {
11473+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
11474+ REG_READ(pipeconf_reg);
11475+ }
11476+
11477+ /* Wait for for the pipe disable to take effect. */
11478+ mrstWaitForPipeDisable(dev);
11479+
11480+ temp = REG_READ(dpll_reg);
11481+ if ((temp & DPLL_VCO_ENABLE) != 0) {
11482+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
11483+ REG_READ(dpll_reg);
11484+ }
11485+
11486+ /* Wait for the clocks to turn off. */
11487+ udelay(150);
11488+ break;
11489+ }
11490+
11491+#if DUMP_REGISTER
11492+ dump_dc_registers(dev);
11493+#endif /* DUMP_REGISTER */
11494+
11495+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
11496+
11497+#if 0 /* JB: Add vblank support later */
11498+ if (enabled)
11499+ dev_priv->vblank_pipe |= (1 << pipe);
11500+ else
11501+ dev_priv->vblank_pipe &= ~(1 << pipe);
11502+#endif
11503+
11504+ psb_intel_crtc->dpms_mode = mode;
11505+
11506+#if 0 /* JB: Add sarea support later */
11507+ if (!dev->primary->master)
11508+ return;
11509+
11510+ master_priv = dev->primary->master->driver_priv;
11511+ if (!master_priv->sarea_priv)
11512+ return;
11513+
11514+ switch (pipe) {
11515+ case 0:
11516+ master_priv->sarea_priv->planeA_w =
11517+ enabled ? crtc->mode.hdisplay : 0;
11518+ master_priv->sarea_priv->planeA_h =
11519+ enabled ? crtc->mode.vdisplay : 0;
11520+ break;
11521+ case 1:
11522+ master_priv->sarea_priv->planeB_w =
11523+ enabled ? crtc->mode.hdisplay : 0;
11524+ master_priv->sarea_priv->planeB_h =
11525+ enabled ? crtc->mode.vdisplay : 0;
11526+ break;
11527+ default:
11528+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
11529+ break;
11530+ }
11531+#endif
11532+}
11533+
11534+static int mrst_crtc_mode_set(struct drm_crtc *crtc,
11535+ struct drm_display_mode *mode,
11536+ struct drm_display_mode *adjusted_mode,
11537+ int x, int y,
11538+ struct drm_framebuffer *old_fb)
11539+{
11540+ struct drm_device *dev = crtc->dev;
11541+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
11542+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11543+ int pipe = psb_intel_crtc->pipe;
11544+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
11545+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
11546+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
11547+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
11548+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
11549+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
11550+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
11551+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
11552+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
11553+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
11554+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
11555+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
11556+ int refclk = 0;
11557+ struct mrst_clock_t clock;
11558+ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
11559+ bool ok, is_sdvo = false;
11560+ bool is_crt = false, is_lvds = false, is_tv = false;
11561+ bool is_mipi = false;
11562+ struct drm_mode_config *mode_config = &dev->mode_config;
11563+ struct drm_connector *connector;
11564+ struct psb_intel_output *psb_intel_output;
11565+
11566+#if PRINT_JLIU7
11567+ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
11568+#endif /* PRINT_JLIU7 */
11569+
11570+ list_for_each_entry(connector, &mode_config->connector_list, head) {
11571+ psb_intel_output = to_psb_intel_output(connector);
11572+
11573+ if (!connector->encoder
11574+ || connector->encoder->crtc != crtc)
11575+ continue;
11576+
11577+ switch (psb_intel_output->type) {
11578+ case INTEL_OUTPUT_LVDS:
11579+ is_lvds = true;
11580+ break;
11581+ case INTEL_OUTPUT_SDVO:
11582+ is_sdvo = true;
11583+ break;
11584+ case INTEL_OUTPUT_TVOUT:
11585+ is_tv = true;
11586+ break;
11587+ case INTEL_OUTPUT_ANALOG:
11588+ is_crt = true;
11589+ break;
11590+ case INTEL_OUTPUT_MIPI:
11591+ is_mipi = true;
11592+ break;
11593+ }
11594+ }
11595+
11596+ if (is_lvds | is_mipi) {
11597+ /*FIXME JLIU7 Get panel power delay parameters from
11598+ config data */
11599+ REG_WRITE(0x61208, 0x25807d0);
11600+ REG_WRITE(0x6120c, 0x1f407d0);
11601+ REG_WRITE(0x61210, 0x270f04);
11602+ }
11603+
11604+ /* Disable the VGA plane that we never use */
11605+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
11606+
11607+ /* Disable the panel fitter if it was on our pipe */
11608+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
11609+ REG_WRITE(PFIT_CONTROL, 0);
11610+
11611+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
11612+ drm_mode_debug_printmodeline(mode);
11613+
11614+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
11615+ ((adjusted_mode->crtc_htotal - 1) << 16));
11616+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
11617+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
11618+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
11619+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
11620+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
11621+ ((adjusted_mode->crtc_vtotal - 1) << 16));
11622+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
11623+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
11624+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
11625+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
11626+ /* pipesrc and dspsize control the size that is scaled from,
11627+ * which should always be the user's requested size.
11628+ */
11629+ REG_WRITE(dspsize_reg,
11630+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
11631+ REG_WRITE(pipesrc_reg,
11632+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
11633+
11634+ /* Flush the plane changes */
11635+ {
11636+ struct drm_crtc_helper_funcs *crtc_funcs =
11637+ crtc->helper_private;
11638+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
11639+ }
11640+
11641+ /* setup pipeconf */
11642+ pipeconf = REG_READ(pipeconf_reg);
11643+
11644+ /* Set up the display plane register */
11645+ dspcntr = REG_READ(dspcntr_reg);
11646+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
11647+
11648+ if (pipe == 0)
11649+ dspcntr |= DISPPLANE_SEL_PIPE_A;
11650+ else
11651+ dspcntr |= DISPPLANE_SEL_PIPE_B;
11652+
11653+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
11654+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
11655+
11656+ if (is_mipi)
11657+ return 0;
11658+
11659+ if (dev_priv->sku_100L)
11660+ refclk = 100000;
11661+ else if (dev_priv->sku_83)
11662+ refclk = 166000;
11663+ else if (dev_priv->sku_100)
11664+ refclk = 200000;
11665+
11666+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
11667+
11668+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
11669+
11670+ if (!ok) {
11671+#if 0 /* FIXME JLIU7 */
11672+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
11673+ return;
11674+#endif /* FIXME JLIU7 */
11675+#if PRINT_JLIU7
11676+ DRM_INFO
11677+ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
11678+#endif /* PRINT_JLIU7 */
11679+ } else {
11680+#if PRINT_JLIU7
11681+ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
11682+ "m = %x, p1 = %x. \n", clock.dot, clock.m,
11683+ clock.p1);
11684+#endif /* PRINT_JLIU7 */
11685+ }
11686+
11687+ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
11688+
11689+ dpll |= DPLL_VGA_MODE_DIS;
11690+
11691+
11692+ dpll |= DPLL_VCO_ENABLE;
11693+
11694+ if (is_lvds)
11695+ dpll |= DPLLA_MODE_LVDS;
11696+ else
11697+ dpll |= DPLLB_MODE_DAC_SERIAL;
11698+
11699+ if (is_sdvo) {
11700+ int sdvo_pixel_multiply =
11701+ adjusted_mode->clock / mode->clock;
11702+
11703+ dpll |= DPLL_DVO_HIGH_SPEED;
11704+ dpll |=
11705+ (sdvo_pixel_multiply -
11706+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
11707+ }
11708+
11709+
11710+ /* compute bitmask from p1 value */
11711+ dpll |= (1 << (clock.p1 - 2)) << 17;
11712+
11713+ dpll |= DPLL_VCO_ENABLE;
11714+
11715+#if PRINT_JLIU7
11716+ mrstPrintPll("chosen", &clock);
11717+#endif /* PRINT_JLIU7 */
11718+
11719+#if 0
11720+ if (!xf86ModesEqual(mode, adjusted_mode)) {
11721+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
11722+ "Adjusted mode for pipe %c:\n",
11723+ pipe == 0 ? 'A' : 'B');
11724+ xf86PrintModeline(pScrn->scrnIndex, mode);
11725+ }
11726+ i830PrintPll("chosen", &clock);
11727+#endif
11728+
11729+ if (dpll & DPLL_VCO_ENABLE) {
11730+ REG_WRITE(fp_reg, fp);
11731+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
11732+ REG_READ(dpll_reg);
11733+/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
11734+ udelay(150);
11735+ }
11736+
11737+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
11738+ * This is an exception to the general rule that mode_set doesn't turn
11739+ * things on.
11740+ */
11741+ if (is_lvds) {
11742+
11743+ /* FIXME JLIU7 need to support 24bit panel */
11744+#if MRST_24BIT_LVDS
11745+ lvdsport =
11746+ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN
11747+ | LVDS_A3_POWER_UP | LVDS_A0A2_CLKA_POWER_UP;
11748+
11749+#if MRST_24BIT_DOT_1
11750+ lvdsport |= MRST_PANEL_24_DOT_1_FORMAT;
11751+#endif /* MRST_24BIT_DOT_1 */
11752+
11753+#else /* MRST_24BIT_LVDS */
11754+ lvdsport =
11755+ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN;
11756+#endif /* MRST_24BIT_LVDS */
11757+
11758+#if MRST_24BIT_WA
11759+ lvdsport = 0x80300340;
11760+#else /* MRST_24BIT_DOT_WA */
11761+ lvdsport = 0x82300300;
11762+#endif /* MRST_24BIT_DOT_WA */
11763+
11764+ REG_WRITE(LVDS, lvdsport);
11765+ REG_READ(LVDS);
11766+ }
11767+
11768+ REG_WRITE(fp_reg, fp);
11769+ REG_WRITE(dpll_reg, dpll);
11770+ REG_READ(dpll_reg);
11771+ /* Wait for the clocks to stabilize. */
11772+ udelay(150);
11773+
11774+ /* write it again -- the BIOS does, after all */
11775+ REG_WRITE(dpll_reg, dpll);
11776+ REG_READ(dpll_reg);
11777+ /* Wait for the clocks to stabilize. */
11778+ udelay(150);
11779+
11780+ REG_WRITE(pipeconf_reg, pipeconf);
11781+ REG_READ(pipeconf_reg);
11782+
11783+ /* Wait for for the pipe enable to take effect. */
11784+ mrstWaitForPipeEnable(dev);
11785+
11786+ REG_WRITE(dspcntr_reg, dspcntr);
11787+ psb_intel_wait_for_vblank(dev);
11788+
11789+ return 0;
11790+}
11791+
11792+
11793+static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
11794+ .dpms = mrst_crtc_dpms,
11795+ .mode_fixup = psb_intel_crtc_mode_fixup,
11796+ .mode_set = mrst_crtc_mode_set,
11797+ .mode_set_base = psb_intel_pipe_set_base,
11798+ .prepare = psb_intel_crtc_prepare,
11799+ .commit = psb_intel_crtc_commit,
11800+};
11801+
11802+/* MRST_PLATFORM end */
11803diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h
11804--- a/drivers/gpu/drm/psb/psb_intel_display.h 1969-12-31 16:00:00.000000000 -0800
11805+++ b/drivers/gpu/drm/psb/psb_intel_display.h 2009-04-07 13:28:38.000000000 -0700
11806@@ -0,0 +1,31 @@
11807+
11808+/* copyright (c) 2008, Intel Corporation
11809+ * Permission is hereby granted, free of charge, to any person obtaining a
11810+ * copy of this software and associated documentation files (the "Software"),
11811+ * to deal in the Software without restriction, including without limitation
11812+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11813+ * and/or sell copies of the Software, and to permit persons to whom the
11814+ * Software is furnished to do so, subject to the following conditions:
11815+ *
11816+ * The above copyright notice and this permission notice (including the next
11817+ * paragraph) shall be included in all copies or substantial portions of the
11818+ * Software.
11819+ *
11820+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11821+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11822+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11823+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11824+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11825+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
11826+ * DEALINGS IN THE SOFTWARE.
11827+ *
11828+ * Authors:
11829+ * Eric Anholt <eric@anholt.net>
11830+ */
11831+
11832+#ifndef _INTEL_DISPLAY_H_
11833+#define _INTEL_DISPLAY_H_
11834+
11835+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
11836+
11837+#endif
11838diff -uNr a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h
11839--- a/drivers/gpu/drm/psb/psb_intel_drv.h 1969-12-31 16:00:00.000000000 -0800
11840+++ b/drivers/gpu/drm/psb/psb_intel_drv.h 2009-04-07 13:28:38.000000000 -0700
11841@@ -0,0 +1,192 @@
11842+/*
11843+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
11844+ * Copyright (c) 2007 Intel Corporation
11845+ * Jesse Barnes <jesse.barnes@intel.com>
11846+ */
11847+#ifndef __INTEL_DRV_H__
11848+#define __INTEL_DRV_H__
11849+
11850+#include <linux/i2c.h>
11851+#include <linux/i2c-id.h>
11852+#include <linux/i2c-algo-bit.h>
11853+#include <drm/drm_crtc.h>
11854+
11855+#include <drm/drm_crtc_helper.h>
11856+
11857+/*
11858+ * MOORESTOWN defines
11859+ */
11860+#define MRST_I2C 0
11861+
11862+#define DUMP_REGISTER 0
11863+#define MRST_24BIT_LVDS 1
11864+#define MRST_24BIT_DOT_1 0
11865+#define MRST_24BIT_WA 0
11866+
11867+#define PRINT_JLIU7 0
11868+#define DELAY_TIME1 80 /* 1000 = 1ms */
11869+
11870+/*
11871+ * Display related stuff
11872+ */
11873+
11874+/* store information about an Ixxx DVO */
11875+/* The i830->i865 use multiple DVOs with multiple i2cs */
11876+/* the i915, i945 have a single sDVO i2c bus - which is different */
11877+#define MAX_OUTPUTS 6
11878+/* maximum connectors per crtcs in the mode set */
11879+#define INTELFB_CONN_LIMIT 4
11880+
11881+#define INTEL_I2C_BUS_DVO 1
11882+#define INTEL_I2C_BUS_SDVO 2
11883+
11884+/* these are outputs from the chip - integrated only
11885+ * external chips are via DVO or SDVO output */
11886+#define INTEL_OUTPUT_UNUSED 0
11887+#define INTEL_OUTPUT_ANALOG 1
11888+#define INTEL_OUTPUT_DVO 2
11889+#define INTEL_OUTPUT_SDVO 3
11890+#define INTEL_OUTPUT_LVDS 4
11891+#define INTEL_OUTPUT_TVOUT 5
11892+#define INTEL_OUTPUT_MIPI 6
11893+
11894+#define INTEL_DVO_CHIP_NONE 0
11895+#define INTEL_DVO_CHIP_LVDS 1
11896+#define INTEL_DVO_CHIP_TMDS 2
11897+#define INTEL_DVO_CHIP_TVOUT 4
11898+
11899+/**
11900+ * Hold information useally put on the device driver privates here,
11901+ * since it needs to be shared across multiple of devices drivers privates.
11902+ */
11903+struct psb_intel_mode_device {
11904+
11905+ /*
11906+ * Abstracted memory manager operations
11907+ */
11908+ void *(*bo_from_handle) (struct drm_device *dev,
11909+ struct drm_file *file_priv,
11910+ unsigned int handle);
11911+ size_t(*bo_size) (struct drm_device *dev, void *bo);
11912+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
11913+ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
11914+ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
11915+
11916+ /*
11917+ * Cursor
11918+ */
11919+ int cursor_needs_physical;
11920+
11921+ /*
11922+ * LVDS info
11923+ */
11924+ int backlight_duty_cycle; /* restore backlight to this value */
11925+ bool panel_wants_dither;
11926+ struct drm_display_mode *panel_fixed_mode;
11927+ struct drm_display_mode *vbt_mode; /* if any */
11928+
11929+ uint32_t saveBLC_PWM_CTL;
11930+};
11931+
11932+struct psb_intel_i2c_chan {
11933+ /* for getting at dev. private (mmio etc.) */
11934+ struct drm_device *drm_dev;
11935+ u32 reg; /* GPIO reg */
11936+ struct i2c_adapter adapter;
11937+ struct i2c_algo_bit_data algo;
11938+ u8 slave_addr;
11939+};
11940+
11941+struct psb_intel_output {
11942+ struct drm_connector base;
11943+
11944+ struct drm_encoder enc;
11945+ int type;
11946+ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
11947+ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
11948+ bool load_detect_temp;
11949+ void *dev_priv;
11950+
11951+ struct psb_intel_mode_device *mode_dev;
11952+
11953+};
11954+
11955+struct psb_intel_crtc {
11956+ struct drm_crtc base;
11957+ int pipe;
11958+ int plane;
11959+ uint32_t cursor_addr;
11960+ u8 lut_r[256], lut_g[256], lut_b[256];
11961+ int dpms_mode;
11962+ struct psb_intel_framebuffer *fbdev_fb;
11963+ /* a mode_set for fbdev users on this crtc */
11964+ struct drm_mode_set mode_set;
11965+
11966+ /* current bo we scanout from */
11967+ void *scanout_bo;
11968+
11969+ /* current bo we cursor from */
11970+ void *cursor_bo;
11971+
11972+ struct psb_intel_mode_device *mode_dev;
11973+};
11974+
11975+#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base)
11976+#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base)
11977+#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc)
11978+#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base)
11979+
11980+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
11981+ const u32 reg, const char *name);
11982+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
11983+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
11984+extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
11985+
11986+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
11987+ struct psb_intel_mode_device *mode_dev);
11988+extern void psb_intel_crt_init(struct drm_device *dev);
11989+extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
11990+extern void psb_intel_dvo_init(struct drm_device *dev);
11991+extern void psb_intel_tv_init(struct drm_device *dev);
11992+extern void psb_intel_lvds_init(struct drm_device *dev,
11993+ struct psb_intel_mode_device *mode_dev);
11994+extern void mrst_lvds_init(struct drm_device *dev,
11995+ struct psb_intel_mode_device *mode_dev);
11996+extern void mrst_dsi_init(struct drm_device *dev,
11997+ struct psb_intel_mode_device *mode_dev);
11998+
11999+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
12000+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
12001+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
12002+
12003+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
12004+ *connector);
12005+
12006+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
12007+ struct drm_crtc *crtc);
12008+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
12009+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
12010+ int pipe);
12011+extern struct drm_crtc *psb_intel_get_load_detect_pipe
12012+ (struct psb_intel_output *psb_intel_output,
12013+ struct drm_display_mode *mode, int *dpms_mode);
12014+extern void psb_intel_release_load_detect_pipe(struct psb_intel_output
12015+ *psb_intel_output, int dpms_mode);
12016+
12017+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
12018+ int sdvoB);
12019+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
12020+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
12021+ int enable);
12022+extern int intelfb_probe(struct drm_device *dev);
12023+extern int intelfb_remove(struct drm_device *dev,
12024+ struct drm_framebuffer *fb);
12025+extern void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red,
12026+ u16 green, u16 blue, int regno);
12027+
12028+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
12029+ *dev, struct
12030+ drm_mode_fb_cmd
12031+ *mode_cmd,
12032+ void *mm_private);
12033+#endif /* __INTEL_DRV_H__ */
12034diff -uNr a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c
12035--- a/drivers/gpu/drm/psb/psb_intel_dsi.c 1969-12-31 16:00:00.000000000 -0800
12036+++ b/drivers/gpu/drm/psb/psb_intel_dsi.c 2009-04-07 13:28:38.000000000 -0700
12037@@ -0,0 +1,1644 @@
12038+/*
12039+ * Copyright © 2006-2007 Intel Corporation
12040+ *
12041+ * Permission is hereby granted, free of charge, to any person obtaining a
12042+ * copy of this software and associated documentation files (the "Software"),
12043+ * to deal in the Software without restriction, including without limitation
12044+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12045+ * and/or sell copies of the Software, and to permit persons to whom the
12046+ * Software is furnished to do so, subject to the following conditions:
12047+ *
12048+ * The above copyright notice and this permission notice (including the next
12049+ * paragraph) shall be included in all copies or substantial portions of the
12050+ * Software.
12051+ *
12052+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12053+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12054+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12055+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12056+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
12057+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
12058+ * DEALINGS IN THE SOFTWARE.
12059+ *
12060+ * Authors:
12061+ * jim liu <jim.liu@intel.com>
12062+ */
12063+
12064+#include <linux/backlight.h>
12065+#include <drm/drm_crtc.h>
12066+#include <drm/drm_edid.h>
12067+
12068+#define DRM_MODE_ENCODER_MIPI 5
12069+#define DRM_MODE_CONNECTOR_MIPI 13
12070+
12071+#if DUMP_REGISTER
12072+extern void dump_dsi_registers(struct drm_device *dev);
12073+#endif /* DUMP_REGISTER */
12074+
12075+int dsi_backlight; /* restore backlight to this value */
12076+
12077+/**
12078+ * Returns the maximum level of the backlight duty cycle field.
12079+ */
12080+static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
12081+{
12082+#if PRINT_JLIU7
12083+ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
12084+#endif /* PRINT_JLIU7 */
12085+
12086+ return BRIGHTNESS_MAX_LEVEL;
12087+
12088+/* FIXME jliu7 need to revisit */
12089+}
12090+
12091+/**
12092+ * Sets the backlight level.
12093+ *
12094+ * \param level backlight level, from 0 to psb_intel_dsi_get_max_backlight().
12095+ */
12096+static void mrst_dsi_set_backlight(struct drm_device *dev, int level)
12097+{
12098+ u32 blc_pwm_ctl;
12099+ u32 max_pwm_blc;
12100+
12101+#if PRINT_JLIU7
12102+ DRM_INFO("JLIU7 enter mrst_dsi_set_backlight \n");
12103+#endif /* PRINT_JLIU7 */
12104+
12105+#if 1 /* FIXME JLIU7 */
12106+ return;
12107+#endif /* FIXME JLIU7 */
12108+
12109+ /* Provent LVDS going to total black */
12110+ if (level < 20)
12111+ level = 20;
12112+
12113+ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev);
12114+
12115+ if (max_pwm_blc ==0)
12116+ {
12117+ return;
12118+ }
12119+
12120+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
12121+
12122+ if (blc_pol == BLC_POLARITY_INVERSE) {
12123+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
12124+ }
12125+
12126+ REG_WRITE(BLC_PWM_CTL,
12127+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
12128+ blc_pwm_ctl);
12129+}
12130+
12131+/**
12132+ * Sets the power state for the panel.
12133+ */
12134+static void mrst_dsi_set_power(struct drm_device *dev,
12135+ struct psb_intel_output *output, bool on)
12136+{
12137+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12138+ u32 pp_status;
12139+
12140+#if PRINT_JLIU7
12141+ DRM_INFO("JLIU7 enter mrst_dsi_set_power \n");
12142+#endif /* PRINT_JLIU7 */
12143+ /*
12144+ * The DIS device must be ready before we can change power state.
12145+ */
12146+ if (!dev_priv->dsi_device_ready)
12147+ {
12148+ return;
12149+ }
12150+
12151+ /*
12152+ * We don't support dual DSI yet. May be in POR in the future.
12153+ */
12154+ if (dev_priv->dual_display)
12155+ {
12156+ return;
12157+ }
12158+
12159+ if (on) {
12160+ if (dev_priv->dpi & (!dev_priv->dpi_panel_on))
12161+ {
12162+
12163+#if PRINT_JLIU7
12164+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n");
12165+#endif /* PRINT_JLIU7 */
12166+ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
12167+#if 0 /*FIXME JLIU7 */
12168+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA);
12169+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON);
12170+#endif /*FIXME JLIU7 */
12171+
12172+ dev_priv->dpi_panel_on = true;
12173+
12174+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
12175+ POWER_TARGET_ON);
12176+ do {
12177+ pp_status = REG_READ(PP_STATUS);
12178+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
12179+ }
12180+ else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on))
12181+ {
12182+#if PRINT_JLIU7
12183+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n");
12184+#endif /* PRINT_JLIU7 */
12185+
12186+ dev_priv->DBI_CB_pointer = 0;
12187+ /* exit sleep mode */
12188+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode;
12189+
12190+#if 0 /*FIXME JLIU7 */
12191+ /* Check MIPI Adatper command registers */
12192+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
12193+#endif /*FIXME JLIU7 */
12194+
12195+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
12196+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
12197+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
12198+
12199+ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
12200+ command. This delay allows the supply voltages and clock circuits to stabilize */
12201+ udelay(5000);
12202+
12203+ dev_priv->DBI_CB_pointer = 0;
12204+
12205+ /* set display on */
12206+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ;
12207+
12208+#if 0 /*FIXME JLIU7 */
12209+ /* Check MIPI Adatper command registers */
12210+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
12211+#endif /*FIXME JLIU7 */
12212+
12213+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
12214+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
12215+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
12216+
12217+ dev_priv->dbi_panel_on = true;
12218+ }
12219+/*FIXME JLIU7 */
12220+/* Need to figure out how to control the MIPI panel power on sequence*/
12221+
12222+ mrst_dsi_set_backlight(dev, dsi_backlight);
12223+ }
12224+ else
12225+ {
12226+ mrst_dsi_set_backlight(dev, 0);
12227+/*FIXME JLIU7 */
12228+/* Need to figure out how to control the MIPI panel power down sequence*/
12229+ /*
12230+ * Only save the current backlight value if we're going from
12231+ * on to off.
12232+ */
12233+ if (dev_priv->dpi & dev_priv->dpi_panel_on)
12234+ {
12235+#if PRINT_JLIU7
12236+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n");
12237+#endif /* PRINT_JLIU7 */
12238+
12239+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
12240+ ~POWER_TARGET_ON);
12241+ do {
12242+ pp_status = REG_READ(PP_STATUS);
12243+ } while (pp_status & PP_ON);
12244+
12245+#if 0 /*FIXME JLIU7 */
12246+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA);
12247+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF);
12248+#endif /*FIXME JLIU7 */
12249+ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
12250+ dev_priv->dpi_panel_on = false;
12251+ }
12252+ else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on)
12253+ {
12254+#if PRINT_JLIU7
12255+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n");
12256+#endif /* PRINT_JLIU7 */
12257+ dev_priv->DBI_CB_pointer = 0;
12258+ /* enter sleep mode */
12259+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode;
12260+
12261+ /* Check MIPI Adatper command registers */
12262+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
12263+
12264+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
12265+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
12266+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
12267+ dev_priv->dbi_panel_on = false;
12268+ }
12269+ }
12270+}
12271+
12272+static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
12273+{
12274+ struct drm_device *dev = encoder->dev;
12275+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
12276+
12277+#if PRINT_JLIU7
12278+ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
12279+#endif /* PRINT_JLIU7 */
12280+
12281+ if (mode == DRM_MODE_DPMS_ON)
12282+ mrst_dsi_set_power(dev, output, true);
12283+ else
12284+ mrst_dsi_set_power(dev, output, false);
12285+
12286+ /* XXX: We never power down the DSI pairs. */
12287+}
12288+
12289+static void mrst_dsi_save(struct drm_connector *connector)
12290+{
12291+#if 0 /* JB: Disable for drop */
12292+ struct drm_device *dev = connector->dev;
12293+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12294+
12295+#if PRINT_JLIU7
12296+ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
12297+#endif /* PRINT_JLIU7 */
12298+
12299+ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
12300+ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
12301+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
12302+ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
12303+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
12304+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
12305+ BACKLIGHT_DUTY_CYCLE_MASK);
12306+
12307+ /*
12308+ * make backlight to full brightness
12309+ */
12310+ dsi_backlight = mrst_dsi_get_max_backlight(dev);
12311+#endif
12312+}
12313+
12314+static void mrst_dsi_restore(struct drm_connector *connector)
12315+{
12316+#if 0 /* JB: Disable for drop */
12317+ struct drm_device *dev = connector->dev;
12318+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12319+
12320+#if PRINT_JLIU7
12321+ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
12322+#endif /* PRINT_JLIU7 */
12323+
12324+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
12325+ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
12326+ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
12327+ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
12328+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
12329+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
12330+ mrst_dsi_set_power(dev, true);
12331+ else
12332+ mrst_dsi_set_power(dev, false);
12333+#endif
12334+}
12335+
12336+static void mrst_dsi_prepare(struct drm_encoder *encoder)
12337+{
12338+ struct drm_device *dev = encoder->dev;
12339+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
12340+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
12341+
12342+#if PRINT_JLIU7
12343+ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
12344+#endif /* PRINT_JLIU7 */
12345+
12346+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
12347+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
12348+ BACKLIGHT_DUTY_CYCLE_MASK);
12349+
12350+ mrst_dsi_set_power(dev, output, false);
12351+}
12352+
12353+static void mrst_dsi_commit( struct drm_encoder *encoder)
12354+{
12355+ struct drm_device *dev = encoder->dev;
12356+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
12357+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
12358+
12359+#if PRINT_JLIU7
12360+ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
12361+#endif /* PRINT_JLIU7 */
12362+
12363+ if (mode_dev->backlight_duty_cycle == 0)
12364+ mode_dev->backlight_duty_cycle =
12365+ mrst_dsi_get_max_backlight(dev);
12366+
12367+ mrst_dsi_set_power(dev, output, true);
12368+
12369+#if DUMP_REGISTER
12370+ dump_dsi_registers(dev);
12371+#endif /* DUMP_REGISTER */
12372+}
12373+
12374+/* ************************************************************************* *\
12375+FUNCTION: GetHS_TX_timeoutCount
12376+ `
12377+DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
12378+ (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended.
12379+
12380+ In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs).
12381+ To timeout this timer 1+ of the above said value is recommended.
12382+
12383+\* ************************************************************************* */
12384+static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
12385+{
12386+
12387+ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
12388+
12389+ /* Total pixels need to be transfer per line*/
12390+ HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea;
12391+
12392+ /* byte count = (pixel count * bits per pixel) / 8 */
12393+ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
12394+
12395+ if (dev_priv->videoModeFormat == BURST_MODE)
12396+ {
12397+ timeoutCount = HTOT_count + 1;
12398+#if 1 /*FIXME remove it after power-on */
12399+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
12400+ + dev_priv->VsyncWidth;
12401+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
12402+ timeoutCount = (HTOT_count * VTOT_count) + 1;
12403+#endif
12404+ }
12405+ else
12406+ {
12407+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
12408+ + dev_priv->VsyncWidth;
12409+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
12410+ timeoutCount = (HTOT_count * VTOT_count) + 1;
12411+ }
12412+
12413+ return timeoutCount & 0xFFFF;
12414+}
12415+
12416+/* ************************************************************************* *\
12417+FUNCTION: GetLP_RX_timeoutCount
12418+
12419+DESCRIPTION: The timeout value is protocol specific. Time out value is calculated
12420+ from txclkesc(50ns).
12421+
12422+ Minimum value =
12423+ Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence)
12424+ + 8-bit trigger message (2x8xtxclkesc)
12425+ +1 txclksesc [stop_state]
12426+ = 21 X txclkesc [ 15h]
12427+
12428+ Maximum Value =
12429+ Time to send a long packet with maximum payload data
12430+ = 4 X txclkesc [Escape mode entry sequence)
12431+ + 8-bit Low power data transmission Command (2x8xtxclkesc)
12432+ + packet header [ 4X8X2X txclkesc]
12433+ +payload [ nX8X2Xtxclkesc]
12434+ +CRC[2X8X2txclkesc]
12435+ +1 txclksesc [stop_state]
12436+ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
12437+
12438+\* ************************************************************************* */
12439+static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
12440+{
12441+
12442+ u32 timeoutCount = 0;
12443+
12444+ if (dev_priv->config_phase)
12445+ {
12446+ /* Assuming 256 byte DDB data.*/
12447+ timeoutCount = 117 + 256 * 16;
12448+ }
12449+ else
12450+ {
12451+ /* For DPI video only mode use the minimum value.*/
12452+ timeoutCount = 0x15;
12453+#if 1 /*FIXME remove it after power-on */
12454+ /* Assuming 256 byte DDB data.*/
12455+ timeoutCount = 117 + 256 * 16;
12456+#endif
12457+ }
12458+
12459+ return timeoutCount;
12460+}
12461+
12462+/* ************************************************************************* *\
12463+FUNCTION: GetHSA_Count
12464+
12465+DESCRIPTION: Shows the horizontal sync value in terms of byte clock
12466+ (txbyteclkhs)
12467+ Minimum HSA period should be sufficient to transmit a hsync start short
12468+ packet(4 bytes)
12469+ i) For Non-burst Mode with sync pulse, Min value – 4 in decimal [plus
12470+ an optional 6 bytes for a zero payload blanking packet]. But if
12471+ the value is less than 10 but more than 4, then this count will
12472+ be added to the HBP’s count for one lane.
12473+ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you
12474+ can program this to zero. If you program this register, these
12475+ byte values will be added to HBP.
12476+ iii) For Burst mode of operation, normally the values programmed in
12477+ terms of byte clock are based on the principle - time for transfering
12478+ HSA in Burst mode is the same as in non-bust mode.
12479+\* ************************************************************************* */
12480+static u32 GetHSA_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12481+{
12482+ u32 HSA_count;
12483+ u32 HSA_countX8;
12484+
12485+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12486+ HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
12487+
12488+ if (dev_priv->videoModeFormat == BURST_MODE)
12489+ {
12490+ HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12491+ }
12492+
12493+ HSA_count = HSA_countX8 / 8;
12494+
12495+ return HSA_count;
12496+}
12497+
12498+/* ************************************************************************* *\
12499+FUNCTION: GetHBP_Count
12500+
12501+DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
12502+ Minimum HBP period should be sufficient to transmit a “hsync end short
12503+ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)”
12504+ For Burst mode of operation, normally the values programmed in terms of
12505+ byte clock are based on the principle - time for transfering HBP
12506+ in Burst mode is the same as in non-bust mode.
12507+
12508+ Min value – 14 in decimal [ accounted with zero payload for blanking packet] for one lane.
12509+ Max value – any value greater than 14 based on DPI resolution
12510+\* ************************************************************************* */
12511+static u32 GetHBP_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12512+{
12513+ u32 HBP_count;
12514+ u32 HBP_countX8;
12515+
12516+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12517+ HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
12518+
12519+ if (dev_priv->videoModeFormat == BURST_MODE)
12520+ {
12521+ HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12522+ }
12523+
12524+ HBP_count = HBP_countX8 / 8;
12525+
12526+ return HBP_count;
12527+}
12528+
12529+/* ************************************************************************* *\
12530+FUNCTION: GetHFP_Count
12531+
12532+DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
12533+ Minimum HFP period should be sufficient to transmit “RGB Data packet
12534+ footer(2 bytes) + Blanking packet overhead(6 bytes)” for non burst mode.
12535+
12536+ For burst mode, Minimum HFP period should be sufficient to transmit
12537+ Blanking packet overhead(6 bytes)”
12538+
12539+ For Burst mode of operation, normally the values programmed in terms of
12540+ byte clock are based on the principle - time for transfering HFP
12541+ in Burst mode is the same as in non-bust mode.
12542+
12543+ Min value – 8 in decimal for non-burst mode [accounted with zero payload
12544+ for blanking packet] for one lane.
12545+ Min value – 6 in decimal for burst mode for one lane.
12546+
12547+ Max value – any value greater than the minimum vaue based on DPI resolution
12548+\* ************************************************************************* */
12549+static u32 GetHFP_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12550+{
12551+ u32 HFP_count;
12552+ u32 HFP_countX8;
12553+
12554+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12555+ HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
12556+
12557+ if (dev_priv->videoModeFormat == BURST_MODE)
12558+ {
12559+ HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12560+ }
12561+
12562+ HFP_count = HFP_countX8 / 8;
12563+
12564+ return HFP_count;
12565+}
12566+
12567+/* ************************************************************************* *\
12568+FUNCTION: GetHAdr_Count
12569+
12570+DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
12571+ In Non Burst Mode, Count equal to RGB word count value
12572+
12573+ In Burst Mode, RGB pixel packets are time-compressed, leaving more time
12574+ during a scan line for LP mode (saving power) or for multiplexing
12575+ other transmissions onto the DSI link. Hence, the count equals the
12576+ time in txbyteclkhs for sending time compressed RGB pixels plus
12577+ the time needed for moving to power save mode or the time needed
12578+ for secondary channel to use the DSI link.
12579+
12580+ But if the left out time for moving to low power mode is less than
12581+ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
12582+ 6txbyteclkhs for a blanking packet with zero payload], then
12583+ this count will be added to the HFP's count for one lane.
12584+
12585+ Min value – 8 in decimal for non-burst mode [accounted with zero payload
12586+ for blanking packet] for one lane.
12587+ Min value – 6 in decimal for burst mode for one lane.
12588+
12589+ Max value – any value greater than the minimum vaue based on DPI resolution
12590+\* ************************************************************************* */
12591+static u32 GetHAdr_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12592+{
12593+ u32 HAdr_count;
12594+ u32 HAdr_countX8;
12595+
12596+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12597+ HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
12598+
12599+ if (dev_priv->videoModeFormat == BURST_MODE)
12600+ {
12601+ HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12602+ }
12603+
12604+ HAdr_count = HAdr_countX8 / 8;
12605+
12606+ return HAdr_count;
12607+}
12608+
12609+/* ************************************************************************* *\
12610+FUNCTION: GetHighLowSwitchCount
12611+
12612+DESCRIPTION: High speed to low power or Low power to high speed switching time
12613+ in terms byte clock (txbyteclkhs). This value is based on the
12614+ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
12615+
12616+ Typical value - Number of byte clocks required to switch from low power mode
12617+ to high speed mode after "txrequesths" is asserted.
12618+
12619+ The worst count value among the low to high or high to low switching time
12620+ in terms of txbyteclkhs has to be programmed in this register.
12621+
12622+ Usefull Formulae:
12623+ DDR clock period = 2 times UI
12624+ txbyteclkhs clock = 8 times UI
12625+ Tlpx = 1 / txclkesc
12626+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
12627+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
12628+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
12629+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
12630+\* ************************************************************************* */
12631+static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
12632+{
12633+ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
12634+
12635+/* ************************************************************************* *\
12636+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
12637+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
12638+
12639+ Tlpx = 50 ns, Using max txclkesc (20MHz)
12640+
12641+ txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
12642+ UI_period = 500 / dev_priv->DDR_Clock; in ns
12643+
12644+ HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
12645+ = 9000 / dev_priv->DDR_Clock + 200;
12646+
12647+ HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
12648+ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
12649+ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
12650+
12651+\* ************************************************************************* */
12652+ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
12653+
12654+/* ************************************************************************* *\
12655+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
12656+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
12657+
12658+ LP_to_HS = 10 * UI_period + 5 * Tlpx =
12659+ = 5000 / dev_priv->DDR_Clock + 250;
12660+
12661+ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
12662+ = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock)
12663+ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
12664+
12665+\* ************************************************************************* */
12666+ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
12667+
12668+ if (HighToLowSwitchCount > LowToHighSwitchCount)
12669+ {
12670+ HighLowSwitchCount = HighToLowSwitchCount;
12671+ }
12672+ else
12673+ {
12674+ HighLowSwitchCount = LowToHighSwitchCount;
12675+ }
12676+
12677+
12678+ /* FIXME jliu need to fine tune the above formulae and remove the following after power on */
12679+ if (HighLowSwitchCount < 0x1f)
12680+ HighLowSwitchCount = 0x1f;
12681+
12682+ return HighLowSwitchCount;
12683+}
12684+
12685+/* ************************************************************************* *\
12686+FUNCTION: mrst_gen_long_write
12687+ `
12688+DESCRIPTION:
12689+
12690+\* ************************************************************************* */
12691+static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc)
12692+{
12693+ u32 gen_data_reg = HS_GEN_DATA_REG;
12694+ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
12695+ u32 date_full_bit = HS_DATA_FIFO_FULL;
12696+ u32 control_full_bit = HS_CTRL_FIFO_FULL;
12697+ u16 wc_saved = wc;
12698+
12699+#if PRINT_JLIU7
12700+ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
12701+#endif /* PRINT_JLIU7 */
12702+
12703+ /* sanity check */
12704+ if (vc > 4)
12705+ {
12706+ DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n");
12707+ return;
12708+ }
12709+
12710+
12711+ if (0) /* FIXME JLIU7 check if it is in LP*/
12712+ {
12713+ gen_data_reg = LP_GEN_DATA_REG;
12714+ gen_ctrl_reg = LP_GEN_CTRL_REG;
12715+ date_full_bit = LP_DATA_FIFO_FULL;
12716+ control_full_bit = LP_CTRL_FIFO_FULL;
12717+ }
12718+
12719+ while (wc >= 4)
12720+ {
12721+ /* Check if MIPI IP generic data fifo is not full */
12722+ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit);
12723+
12724+ /* write to data buffer */
12725+ REG_WRITE(gen_data_reg, *data);
12726+
12727+ wc -= 4;
12728+ data ++;
12729+ }
12730+
12731+ switch (wc)
12732+ {
12733+ case 1:
12734+ REG_WRITE8(gen_data_reg, *((u8 *)data));
12735+ break;
12736+ case 2:
12737+ REG_WRITE16(gen_data_reg, *((u16 *)data));
12738+ break;
12739+ case 3:
12740+ REG_WRITE16(gen_data_reg, *((u16 *)data));
12741+ data = (u32*)((u8*) data + 2);
12742+ REG_WRITE8(gen_data_reg, *((u8 *)data));
12743+ break;
12744+ }
12745+
12746+ /* Check if MIPI IP generic control fifo is not full */
12747+ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit);
12748+ /* write to control buffer */
12749+ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
12750+}
12751+
12752+/* ************************************************************************* *\
12753+FUNCTION: mrst_init_HIMAX_MIPI_bridge
12754+ `
12755+DESCRIPTION:
12756+
12757+\* ************************************************************************* */
12758+static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
12759+{
12760+ u32 gen_data[2];
12761+ u16 wc = 0;
12762+ u8 vc =0;
12763+ u32 gen_data_intel = 0x200105;
12764+
12765+#if PRINT_JLIU7
12766+ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
12767+#endif /* PRINT_JLIU7 */
12768+
12769+ /* exit sleep mode */
12770+ wc = 0x5;
12771+ gen_data[0] = gen_data_intel | (0x11 << 24);
12772+ gen_data[1] = 0;
12773+ mrst_gen_long_write(dev, gen_data, wc, vc);
12774+
12775+ /* set_pixel_format */
12776+ gen_data[0] = gen_data_intel | (0x3A << 24);
12777+ gen_data[1] = 0x77;
12778+ mrst_gen_long_write(dev, gen_data, wc, vc);
12779+
12780+ /* Set resolution for (800X480) */
12781+ wc = 0x8;
12782+ gen_data[0] = gen_data_intel | (0x2A << 24);
12783+ gen_data[1] = 0x1F030000;
12784+ mrst_gen_long_write(dev, gen_data, wc, vc);
12785+ gen_data[0] = gen_data_intel | (0x2B << 24);
12786+ gen_data[1] = 0xDF010000;
12787+ mrst_gen_long_write(dev, gen_data, wc, vc);
12788+
12789+ /* System control */
12790+ wc = 0x6;
12791+ gen_data[0] = gen_data_intel | (0xEE << 24);
12792+ gen_data[1] = 0x10FA;
12793+ mrst_gen_long_write(dev, gen_data, wc, vc);
12794+
12795+ /* INPUT TIMING FOR TEST PATTERN(800X480) */
12796+ /* H-size */
12797+ gen_data[1] = 0x2000;
12798+ mrst_gen_long_write(dev, gen_data, wc, vc);
12799+ gen_data[1] = 0x0301;
12800+ mrst_gen_long_write(dev, gen_data, wc, vc);
12801+
12802+ /* V-size */
12803+ gen_data[1] = 0xE002;
12804+ mrst_gen_long_write(dev, gen_data, wc, vc);
12805+ gen_data[1] = 0x0103;
12806+ mrst_gen_long_write(dev, gen_data, wc, vc);
12807+
12808+ /* H-total */
12809+ gen_data[1] = 0x2004;
12810+ mrst_gen_long_write(dev, gen_data, wc, vc);
12811+ gen_data[1] = 0x0405;
12812+ mrst_gen_long_write(dev, gen_data, wc, vc);
12813+
12814+ /* V-total */
12815+ gen_data[1] = 0x0d06;
12816+ mrst_gen_long_write(dev, gen_data, wc, vc);
12817+ gen_data[1] = 0x0207;
12818+ mrst_gen_long_write(dev, gen_data, wc, vc);
12819+
12820+ /* H-blank */
12821+ gen_data[1] = 0x0308;
12822+ mrst_gen_long_write(dev, gen_data, wc, vc);
12823+ gen_data[1] = 0x0009;
12824+ mrst_gen_long_write(dev, gen_data, wc, vc);
12825+
12826+ /* H-blank */
12827+ gen_data[1] = 0x030A;
12828+ mrst_gen_long_write(dev, gen_data, wc, vc);
12829+ gen_data[1] = 0x000B;
12830+ mrst_gen_long_write(dev, gen_data, wc, vc);
12831+
12832+ /* H-start */
12833+ gen_data[1] = 0xD80C;
12834+ mrst_gen_long_write(dev, gen_data, wc, vc);
12835+ gen_data[1] = 0x000D;
12836+ mrst_gen_long_write(dev, gen_data, wc, vc);
12837+
12838+ /* V-start */
12839+ gen_data[1] = 0x230E;
12840+ mrst_gen_long_write(dev, gen_data, wc, vc);
12841+ gen_data[1] = 0x000F;
12842+ mrst_gen_long_write(dev, gen_data, wc, vc);
12843+
12844+ /* RGB domain */
12845+ gen_data[1] = 0x0027;
12846+ mrst_gen_long_write(dev, gen_data, wc, vc);
12847+
12848+ /* INP_FORM Setting */
12849+ /* set_1 */
12850+ gen_data[1] = 0x1C10;
12851+ mrst_gen_long_write(dev, gen_data, wc, vc);
12852+
12853+ /* set_2 */
12854+ gen_data[1] = 0x0711;
12855+ mrst_gen_long_write(dev, gen_data, wc, vc);
12856+
12857+ /* set_3 */
12858+ gen_data[1] = 0x0012;
12859+ mrst_gen_long_write(dev, gen_data, wc, vc);
12860+
12861+ /* set_4 */
12862+ gen_data[1] = 0x0013;
12863+ mrst_gen_long_write(dev, gen_data, wc, vc);
12864+
12865+ /* set_5 */
12866+ gen_data[1] = 0x2314;
12867+ mrst_gen_long_write(dev, gen_data, wc, vc);
12868+
12869+ /* set_6 */
12870+ gen_data[1] = 0x0015;
12871+ mrst_gen_long_write(dev, gen_data, wc, vc);
12872+
12873+ /* set_7 */
12874+ gen_data[1] = 0x2316;
12875+ mrst_gen_long_write(dev, gen_data, wc, vc);
12876+
12877+ /* set_8 */
12878+ gen_data[1] = 0x0017;
12879+ mrst_gen_long_write(dev, gen_data, wc, vc);
12880+
12881+ /* set_1 */
12882+ gen_data[1] = 0x0330;
12883+ mrst_gen_long_write(dev, gen_data, wc, vc);
12884+
12885+ /* FRC Setting */
12886+ /* FRC_set_2 */
12887+ gen_data[1] = 0x237A;
12888+ mrst_gen_long_write(dev, gen_data, wc, vc);
12889+
12890+ /* FRC_set_3 */
12891+ gen_data[1] = 0x4C7B;
12892+ mrst_gen_long_write(dev, gen_data, wc, vc);
12893+
12894+ /* FRC_set_4 */
12895+ gen_data[1] = 0x037C;
12896+ mrst_gen_long_write(dev, gen_data, wc, vc);
12897+
12898+ /* FRC_set_5 */
12899+ gen_data[1] = 0x3482;
12900+ mrst_gen_long_write(dev, gen_data, wc, vc);
12901+
12902+ /* FRC_set_7 */
12903+ gen_data[1] = 0x1785;
12904+ mrst_gen_long_write(dev, gen_data, wc, vc);
12905+
12906+#if 0
12907+ /* FRC_set_8 */
12908+ gen_data[1] = 0xD08F;
12909+ mrst_gen_long_write(dev, gen_data, wc, vc);
12910+#endif
12911+
12912+ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
12913+ /* out_htotal */
12914+ gen_data[1] = 0x2090;
12915+ mrst_gen_long_write(dev, gen_data, wc, vc);
12916+ gen_data[1] = 0x0491;
12917+ mrst_gen_long_write(dev, gen_data, wc, vc);
12918+
12919+ /* out_hsync */
12920+ gen_data[1] = 0x0392;
12921+ mrst_gen_long_write(dev, gen_data, wc, vc);
12922+ gen_data[1] = 0x0093;
12923+ mrst_gen_long_write(dev, gen_data, wc, vc);
12924+
12925+ /* out_hstart */
12926+ gen_data[1] = 0xD894;
12927+ mrst_gen_long_write(dev, gen_data, wc, vc);
12928+ gen_data[1] = 0x0095;
12929+ mrst_gen_long_write(dev, gen_data, wc, vc);
12930+
12931+ /* out_hsize */
12932+ gen_data[1] = 0x2096;
12933+ mrst_gen_long_write(dev, gen_data, wc, vc);
12934+ gen_data[1] = 0x0397;
12935+ mrst_gen_long_write(dev, gen_data, wc, vc);
12936+
12937+ /* out_vtotal */
12938+ gen_data[1] = 0x0D98;
12939+ mrst_gen_long_write(dev, gen_data, wc, vc);
12940+ gen_data[1] = 0x0299;
12941+ mrst_gen_long_write(dev, gen_data, wc, vc);
12942+
12943+ /* out_vsync */
12944+ gen_data[1] = 0x039A;
12945+ mrst_gen_long_write(dev, gen_data, wc, vc);
12946+ gen_data[1] = 0x009B;
12947+ mrst_gen_long_write(dev, gen_data, wc, vc);
12948+
12949+ /* out_vstart */
12950+ gen_data[1] = 0x239C;
12951+ mrst_gen_long_write(dev, gen_data, wc, vc);
12952+ gen_data[1] = 0x009D;
12953+ mrst_gen_long_write(dev, gen_data, wc, vc);
12954+
12955+ /* out_vsize */
12956+ gen_data[1] = 0xE09E;
12957+ mrst_gen_long_write(dev, gen_data, wc, vc);
12958+ gen_data[1] = 0x019F;
12959+ mrst_gen_long_write(dev, gen_data, wc, vc);
12960+
12961+ /* FRC_set_6 */
12962+ gen_data[1] = 0x9084;
12963+ mrst_gen_long_write(dev, gen_data, wc, vc);
12964+
12965+ /* Other setting */
12966+ gen_data[1] = 0x0526;
12967+ mrst_gen_long_write(dev, gen_data, wc, vc);
12968+
12969+ /* RBG domain */
12970+ gen_data[1] = 0x1177;
12971+ mrst_gen_long_write(dev, gen_data, wc, vc);
12972+
12973+ /* rgbw */
12974+ /* set_1 */
12975+ gen_data[1] = 0xD28F;
12976+ mrst_gen_long_write(dev, gen_data, wc, vc);
12977+
12978+ /* set_2 */
12979+ gen_data[1] = 0x02D0;
12980+ mrst_gen_long_write(dev, gen_data, wc, vc);
12981+
12982+ /* set_3 */
12983+ gen_data[1] = 0x08D1;
12984+ mrst_gen_long_write(dev, gen_data, wc, vc);
12985+
12986+ /* set_4 */
12987+ gen_data[1] = 0x05D2;
12988+ mrst_gen_long_write(dev, gen_data, wc, vc);
12989+
12990+ /* set_5 */
12991+ gen_data[1] = 0x24D4;
12992+ mrst_gen_long_write(dev, gen_data, wc, vc);
12993+
12994+ /* set_6 */
12995+ gen_data[1] = 0x00D5;
12996+ mrst_gen_long_write(dev, gen_data, wc, vc);
12997+ gen_data[1] = 0x02D7;
12998+ mrst_gen_long_write(dev, gen_data, wc, vc);
12999+ gen_data[1] = 0x00D8;
13000+ mrst_gen_long_write(dev, gen_data, wc, vc);
13001+
13002+ gen_data[1] = 0x48F3;
13003+ mrst_gen_long_write(dev, gen_data, wc, vc);
13004+ gen_data[1] = 0xD4F2;
13005+ mrst_gen_long_write(dev, gen_data, wc, vc);
13006+ gen_data[1] = 0x3D8E;
13007+ mrst_gen_long_write(dev, gen_data, wc, vc);
13008+ gen_data[1] = 0x60FD;
13009+ mrst_gen_long_write(dev, gen_data, wc, vc);
13010+ gen_data[1] = 0x00B5;
13011+ mrst_gen_long_write(dev, gen_data, wc, vc);
13012+ gen_data[1] = 0x48F4;
13013+ mrst_gen_long_write(dev, gen_data, wc, vc);
13014+
13015+ /* inside patten */
13016+ gen_data[1] = 0x0060;
13017+ mrst_gen_long_write(dev, gen_data, wc, vc);
13018+}
13019+
13020+/* ************************************************************************* *\
13021+FUNCTION: mrst_init_NSC_MIPI_bridge
13022+ `
13023+DESCRIPTION:
13024+
13025+\* ************************************************************************* */
13026+static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
13027+{
13028+
13029+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13030+#if PRINT_JLIU7
13031+ DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n");
13032+#endif /* PRINT_JLIU7 */
13033+ /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event,
13034+ 1 or 2 Data Lanes */
13035+
13036+ udelay(DELAY_TIME1);
13037+ /* enable RGB24*/
13038+ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
13039+
13040+ udelay(DELAY_TIME1);
13041+ /* enable all error reporting*/
13042+ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
13043+ udelay(DELAY_TIME1);
13044+ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
13045+
13046+ udelay(DELAY_TIME1);
13047+ /* enable 2 data lane; video shaping & error reporting */
13048+ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
13049+
13050+ udelay(DELAY_TIME1);
13051+ /* HS timeout */
13052+ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
13053+
13054+ udelay(DELAY_TIME1);
13055+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
13056+ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
13057+
13058+ /* enable all virtual channels */
13059+ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
13060+
13061+ /* set output strength to low-drive */
13062+ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
13063+
13064+ if (dev_priv->sku_83)
13065+ {
13066+ /* set escape clock to divede by 8 */
13067+ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
13068+ }
13069+ else if(dev_priv->sku_100L)
13070+ {
13071+ /* set escape clock to divede by 16 */
13072+ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
13073+ }
13074+ else if(dev_priv->sku_100)
13075+ {
13076+ /* set escape clock to divede by 32*/
13077+ REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);
13078+
13079+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
13080+ REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);
13081+ }
13082+
13083+ /* CFG_VALID=1; RGB_CLK_EN=1. */
13084+ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
13085+
13086+}
13087+
13088+static void mrst_dsi_mode_set(struct drm_encoder *encoder,
13089+ struct drm_display_mode *mode,
13090+ struct drm_display_mode *adjusted_mode)
13091+{
13092+ struct drm_device *dev = encoder->dev;
13093+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13094+ u32 pfit_control;
13095+ u32 dsiFuncPrgValue = 0;
13096+ u32 SupportedFormat = 0;
13097+ u32 channelNumber = 0;
13098+ u32 DBI_dataWidth = 0;
13099+ u32 resolution = 0;
13100+ u32 mipiport = 0;
13101+
13102+#if PRINT_JLIU7
13103+ DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n");
13104+#endif /* PRINT_JLIU7 */
13105+
13106+ switch (dev_priv->bpp)
13107+ {
13108+ case 16:
13109+ SupportedFormat = RGB_565_FMT;
13110+ break;
13111+ case 18:
13112+ SupportedFormat = RGB_666_FMT;
13113+ break;
13114+ case 24:
13115+ SupportedFormat = RGB_888_FMT;
13116+ break;
13117+ default:
13118+ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
13119+ break;
13120+ }
13121+
13122+ resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
13123+
13124+ if (dev_priv->dpi)
13125+ {
13126+ /* Enable automatic panel scaling for non-native modes so that they fill
13127+ * the screen. Should be enabled before the pipe is enabled, according to
13128+ * register description and PRM.
13129+ */
13130+ /*FIXME JLIU7, enable Auto-scale only */
13131+ /*
13132+ * Enable automatic panel scaling so that non-native modes fill the
13133+ * screen. Should be enabled before the pipe is enabled, according to
13134+ * register description and PRM.
13135+ */
13136+#if 0 /*JLIU7_PO */
13137+ if (mode->hdisplay != adjusted_mode->hdisplay ||
13138+ mode->vdisplay != adjusted_mode->vdisplay)
13139+ {
13140+ pfit_control = PFIT_ENABLE;
13141+ }
13142+ else
13143+#endif /*JLIU7_PO */
13144+ {
13145+ pfit_control = 0;
13146+ }
13147+ REG_WRITE(PFIT_CONTROL, pfit_control);
13148+
13149+ /* Enable MIPI Port */
13150+ mipiport = MIPI_PORT_EN;
13151+ REG_WRITE(MIPI, mipiport);
13152+
13153+ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
13154+ REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
13155+
13156+ /* Enable all the error interrupt */
13157+ REG_WRITE(INTR_EN_REG, 0xffffffff);
13158+ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000F);
13159+ REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/
13160+ REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */
13161+
13162+ SupportedFormat <<= FMT_DPI_POS;
13163+ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
13164+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
13165+
13166+ REG_WRITE(DPI_RESOLUTION_REG, resolution);
13167+ REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);
13168+
13169+ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
13170+ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
13171+ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
13172+
13173+#if 1 /*JLIU7_PO hard coded for NSC PO */
13174+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, 0x1e);
13175+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, 0x18);
13176+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, 0x8);
13177+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, 0x4b0);
13178+#else /*JLIU7_PO hard coded for NSC PO */
13179+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, GetHSA_Count(dev_priv));
13180+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, GetHBP_Count(dev_priv));
13181+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, GetHFP_Count(dev_priv));
13182+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, GetHAdr_Count(dev_priv));
13183+#endif /*JLIU7_PO hard coded for NSC PO */
13184+ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
13185+ }
13186+ else
13187+ {
13188+ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/
13189+ channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
13190+ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
13191+ dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth;
13192+ /* JLIU7 FIXME */
13193+ SupportedFormat <<= FMT_DBI_POS;
13194+ dsiFuncPrgValue |= SupportedFormat;
13195+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
13196+
13197+ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
13198+ REG_WRITE(DBI_RESOLUTION_REG, resolution);
13199+ }
13200+
13201+#if 1 /*JLIU7_PO hard code for NSC PO */
13202+ REG_WRITE(HS_TX_TIMEOUT_REG, 0xffff);
13203+ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
13204+
13205+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
13206+#else /*JLIU7_PO hard code for NSC PO */
13207+ REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv));
13208+ REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv));
13209+
13210+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv));
13211+#endif /*JLIU7_PO hard code for NSC PO */
13212+
13213+
13214+ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
13215+
13216+ /* FIXME JLIU7 for NSC PO */
13217+ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
13218+
13219+ REG_WRITE(DEVICE_READY_REG, 0x00000001);
13220+ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
13221+
13222+ dev_priv->dsi_device_ready = true;
13223+
13224+#if 0 /*JLIU7_PO */
13225+ mrst_init_HIMAX_MIPI_bridge(dev);
13226+#endif /*JLIU7_PO */
13227+ mrst_init_NSC_MIPI_bridge(dev);
13228+
13229+ if (dev_priv->sku_100L)
13230+ /* Set DSI link to 100MHz; 2:1 clock ratio */
13231+ REG_WRITE(MIPI_CONTROL_REG, 0x00000009);
13232+
13233+ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
13234+ REG_READ(PIPEACONF);
13235+
13236+ /* Wait for 20ms for the pipe enable to take effect. */
13237+ udelay(20000);
13238+
13239+ /* JLIU7_PO hard code for NSC PO Program the display FIFO watermarks */
13240+ REG_WRITE(DSPARB, 0x00001d9c);
13241+ REG_WRITE(DSPFW1, 0xfc0f0f18);
13242+ REG_WRITE(DSPFW5, 0x04140404);
13243+ REG_WRITE(DSPFW6, 0x000001f0);
13244+
13245+ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
13246+
13247+ /* Wait for 20ms for the plane enable to take effect. */
13248+ udelay(20000);
13249+}
13250+
13251+/**
13252+ * Detect the MIPI connection.
13253+ *
13254+ * This always returns CONNECTOR_STATUS_CONNECTED.
13255+ * This connector should only have
13256+ * been set up if the MIPI was actually connected anyway.
13257+ */
13258+static enum drm_connector_status mrst_dsi_detect(struct drm_connector
13259+ *connector)
13260+{
13261+#if PRINT_JLIU7
13262+ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
13263+#endif /* PRINT_JLIU7 */
13264+
13265+ return connector_status_connected;
13266+}
13267+
13268+/**
13269+ * Return the list of MIPI DDB modes if available.
13270+ */
13271+static int mrst_dsi_get_modes(struct drm_connector *connector)
13272+{
13273+ struct drm_device *dev = connector->dev;
13274+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
13275+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
13276+
13277+/* FIXME get the MIPI DDB modes */
13278+
13279+ /* Didn't get an DDB, so
13280+ * Set wide sync ranges so we get all modes
13281+ * handed to valid_mode for checking
13282+ */
13283+ connector->display_info.min_vfreq = 0;
13284+ connector->display_info.max_vfreq = 200;
13285+ connector->display_info.min_hfreq = 0;
13286+ connector->display_info.max_hfreq = 200;
13287+
13288+ if (mode_dev->panel_fixed_mode != NULL) {
13289+ struct drm_display_mode *mode =
13290+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
13291+ drm_mode_probed_add(connector, mode);
13292+ return 1;
13293+ }
13294+
13295+ return 0;
13296+}
13297+
13298+static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
13299+ .dpms = mrst_dsi_dpms,
13300+ .mode_fixup = psb_intel_lvds_mode_fixup,
13301+ .prepare = mrst_dsi_prepare,
13302+ .mode_set = mrst_dsi_mode_set,
13303+ .commit = mrst_dsi_commit,
13304+};
13305+
13306+static const struct drm_connector_helper_funcs
13307+ mrst_dsi_connector_helper_funcs = {
13308+ .get_modes = mrst_dsi_get_modes,
13309+ .mode_valid = psb_intel_lvds_mode_valid,
13310+ .best_encoder = psb_intel_best_encoder,
13311+};
13312+
13313+static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
13314+ .save = mrst_dsi_save,
13315+ .restore = mrst_dsi_restore,
13316+ .detect = mrst_dsi_detect,
13317+ .fill_modes = drm_helper_probe_single_connector_modes,
13318+ .destroy = psb_intel_lvds_destroy,
13319+};
13320+
13321+/** Returns the panel fixed mode from configuration. */
13322+/** FIXME JLIU7 need to revist it. */
13323+struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
13324+{
13325+ struct drm_display_mode *mode;
13326+
13327+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
13328+ if (!mode)
13329+ return NULL;
13330+
13331+#if 1 /*FIXME jliu7 remove it later */
13332+ /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
13333+ mode->hdisplay = 800;
13334+ mode->vdisplay = 480;
13335+ mode->hsync_start = 808;
13336+ mode->hsync_end = 848;
13337+ mode->htotal = 880;
13338+ mode->vsync_start = 482;
13339+ mode->vsync_end = 483;
13340+ mode->vtotal = 486;
13341+ mode->clock = 33264;
13342+#endif /*FIXME jliu7 remove it later */
13343+
13344+#if 0 /*FIXME jliu7 remove it later */
13345+ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
13346+ mode->hdisplay = 800;
13347+ mode->vdisplay = 480;
13348+ mode->hsync_start = 836;
13349+ mode->hsync_end = 846;
13350+ mode->htotal = 1056;
13351+ mode->vsync_start = 489;
13352+ mode->vsync_end = 491;
13353+ mode->vtotal = 525;
13354+ mode->clock = 33264;
13355+#endif /*FIXME jliu7 remove it later */
13356+
13357+#if 0 /*FIXME jliu7 remove it later */
13358+ /* hard coded fixed mode for LVDS 800x480 */
13359+ mode->hdisplay = 800;
13360+ mode->vdisplay = 480;
13361+ mode->hsync_start = 801;
13362+ mode->hsync_end = 802;
13363+ mode->htotal = 1024;
13364+ mode->vsync_start = 481;
13365+ mode->vsync_end = 482;
13366+ mode->vtotal = 525;
13367+ mode->clock = 30994;
13368+#endif /*FIXME jliu7 remove it later */
13369+
13370+#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
13371+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
13372+ mode->hdisplay = 1024;
13373+ mode->vdisplay = 600;
13374+ mode->hsync_start = 1072;
13375+ mode->hsync_end = 1104;
13376+ mode->htotal = 1184;
13377+ mode->vsync_start = 603;
13378+ mode->vsync_end = 604;
13379+ mode->vtotal = 608;
13380+ mode->clock = 53990;
13381+#endif /*FIXME jliu7 remove it later */
13382+
13383+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
13384+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
13385+ mode->hdisplay = 1024;
13386+ mode->vdisplay = 600;
13387+ mode->hsync_start = 1104;
13388+ mode->hsync_end = 1136;
13389+ mode->htotal = 1184;
13390+ mode->vsync_start = 603;
13391+ mode->vsync_end = 604;
13392+ mode->vtotal = 608;
13393+ mode->clock = 53990;
13394+#endif /*FIXME jliu7 remove it later */
13395+
13396+#if 0 /*FIXME jliu7 remove it later */
13397+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
13398+ mode->hdisplay = 1024;
13399+ mode->vdisplay = 600;
13400+ mode->hsync_start = 1124;
13401+ mode->hsync_end = 1204;
13402+ mode->htotal = 1312;
13403+ mode->vsync_start = 607;
13404+ mode->vsync_end = 610;
13405+ mode->vtotal = 621;
13406+ mode->clock = 48885;
13407+#endif /*FIXME jliu7 remove it later */
13408+
13409+#if 0 /*FIXME jliu7 remove it later */
13410+ /* hard coded fixed mode for LVDS 1024x768 */
13411+ mode->hdisplay = 1024;
13412+ mode->vdisplay = 768;
13413+ mode->hsync_start = 1048;
13414+ mode->hsync_end = 1184;
13415+ mode->htotal = 1344;
13416+ mode->vsync_start = 771;
13417+ mode->vsync_end = 777;
13418+ mode->vtotal = 806;
13419+ mode->clock = 65000;
13420+#endif /*FIXME jliu7 remove it later */
13421+
13422+#if 0 /*FIXME jliu7 remove it later */
13423+ /* hard coded fixed mode for LVDS 1366x768 */
13424+ mode->hdisplay = 1366;
13425+ mode->vdisplay = 768;
13426+ mode->hsync_start = 1430;
13427+ mode->hsync_end = 1558;
13428+ mode->htotal = 1664;
13429+ mode->vsync_start = 769;
13430+ mode->vsync_end = 770;
13431+ mode->vtotal = 776;
13432+ mode->clock = 77500;
13433+#endif /*FIXME jliu7 remove it later */
13434+
13435+ drm_mode_set_name(mode);
13436+ drm_mode_set_crtcinfo(mode, 0);
13437+
13438+ return mode;
13439+}
13440+
13441+/* ************************************************************************* *\
13442+FUNCTION: mrstDSI_clockInit
13443+ `
13444+DESCRIPTION:
13445+
13446+\* ************************************************************************* */
13447+static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
13448+static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
13449+static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
13450+#define MIPI_2XCLK_COUNT 0x04
13451+
13452+static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
13453+{
13454+ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
13455+ u32 i = 0;
13456+ u32 *p_mipi_2xclk = NULL;
13457+
13458+ (void)GetHS_TX_timeoutCount;
13459+ (void)GetLP_RX_timeoutCount;
13460+ (void)GetHSA_Count;
13461+ (void)GetHBP_Count;
13462+ (void)GetHFP_Count;
13463+ (void)GetHAdr_Count;
13464+ (void)GetHighLowSwitchCount;
13465+ (void)mrst_init_HIMAX_MIPI_bridge;
13466+
13467+#if 0 /* JLIU7_PO old values */
13468+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
13469+ dev_priv->pixelClock = 33264; /*KHz*/
13470+ dev_priv->HsyncWidth = 10;
13471+ dev_priv->HbackPorch = 210;
13472+ dev_priv->HfrontPorch = 36;
13473+ dev_priv->HactiveArea = 800;
13474+ dev_priv->VsyncWidth = 2;
13475+ dev_priv->VbackPorch = 34;
13476+ dev_priv->VfrontPorch = 9;
13477+ dev_priv->VactiveArea = 480;
13478+ dev_priv->bpp = 24;
13479+
13480+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
13481+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
13482+ dev_priv->dbi_HsyncWidth = 10;
13483+ dev_priv->dbi_HbackPorch = 210;
13484+ dev_priv->dbi_HfrontPorch = 36;
13485+ dev_priv->dbi_HactiveArea = 800;
13486+ dev_priv->dbi_VsyncWidth = 2;
13487+ dev_priv->dbi_VbackPorch = 34;
13488+ dev_priv->dbi_VfrontPorch = 9;
13489+ dev_priv->dbi_VactiveArea = 480;
13490+ dev_priv->dbi_bpp = 24;
13491+#else /* JLIU7_PO old values */
13492+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
13493+ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
13494+ dev_priv->pixelClock = 33264; /*KHz*/
13495+ dev_priv->HsyncWidth = 10;
13496+ dev_priv->HbackPorch = 8;
13497+ dev_priv->HfrontPorch = 3;
13498+ dev_priv->HactiveArea = 800;
13499+ dev_priv->VsyncWidth = 2;
13500+ dev_priv->VbackPorch = 3;
13501+ dev_priv->VfrontPorch = 2;
13502+ dev_priv->VactiveArea = 480;
13503+ dev_priv->bpp = 24;
13504+
13505+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
13506+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
13507+ dev_priv->dbi_HsyncWidth = 10;
13508+ dev_priv->dbi_HbackPorch = 8;
13509+ dev_priv->dbi_HfrontPorch = 3;
13510+ dev_priv->dbi_HactiveArea = 800;
13511+ dev_priv->dbi_VsyncWidth = 2;
13512+ dev_priv->dbi_VbackPorch = 3;
13513+ dev_priv->dbi_VfrontPorch = 2;
13514+ dev_priv->dbi_VactiveArea = 480;
13515+ dev_priv->dbi_bpp = 24;
13516+#endif /* JLIU7_PO old values */
13517+
13518+ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea;
13519+ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea;
13520+
13521+ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
13522+
13523+ dev_priv->RRate = RRate;
13524+
13525+ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
13526+ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */
13527+ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
13528+
13529+ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
13530+
13531+ if (dev_priv->sku_100)
13532+ {
13533+ p_mipi_2xclk = sku_100_mipi_2xclk;
13534+ }
13535+ else if (dev_priv->sku_100L)
13536+ {
13537+ p_mipi_2xclk = sku_100L_mipi_2xclk;
13538+ }
13539+ else
13540+ {
13541+ p_mipi_2xclk = sku_83_mipi_2xclk;
13542+ }
13543+
13544+ for (; i < MIPI_2XCLK_COUNT; i++)
13545+ {
13546+ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
13547+ break;
13548+ }
13549+
13550+ if (i == MIPI_2XCLK_COUNT)
13551+ {
13552+ DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
13553+ return false;
13554+ }
13555+
13556+ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
13557+ dev_priv->ClockBits = i;
13558+
13559+#if 0 /*JLIU7_PO */
13560+#if 0 /* FIXME remove it after power on*/
13561+ mipiControlReg = REG_READ(MIPI_CONTROL_REG) & (~MIPI_2X_CLOCK_BITS);
13562+ mipiControlReg |= i;
13563+ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
13564+#else /* FIXME remove it after power on*/
13565+ mipiControlReg |= i;
13566+ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
13567+#endif /* FIXME remove it after power on*/
13568+#endif /*JLIU7_PO */
13569+
13570+#if 1 /* FIXME remove it after power on*/
13571+ DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated);
13572+#endif /* FIXME remove it after power on*/
13573+
13574+ return true;
13575+}
13576+
13577+/**
13578+ * mrst_dsi_init - setup MIPI connectors on this device
13579+ * @dev: drm device
13580+ *
13581+ * Create the connector, try to figure out what
13582+ * modes we can display on the MIPI panel (if present).
13583+ */
13584+void mrst_dsi_init(struct drm_device *dev,
13585+ struct psb_intel_mode_device *mode_dev)
13586+{
13587+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13588+ struct psb_intel_output *psb_intel_output;
13589+ struct drm_connector *connector;
13590+ struct drm_encoder *encoder;
13591+
13592+#if PRINT_JLIU7
13593+ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
13594+#endif /* PRINT_JLIU7 */
13595+
13596+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
13597+ if (!psb_intel_output)
13598+ return;
13599+
13600+ psb_intel_output->mode_dev = mode_dev;
13601+ connector = &psb_intel_output->base;
13602+ encoder = &psb_intel_output->enc;
13603+ drm_connector_init(dev, &psb_intel_output->base,
13604+ &mrst_dsi_connector_funcs,
13605+ DRM_MODE_CONNECTOR_MIPI);
13606+
13607+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
13608+ DRM_MODE_ENCODER_MIPI);
13609+
13610+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
13611+ &psb_intel_output->enc);
13612+ psb_intel_output->type = INTEL_OUTPUT_MIPI;
13613+
13614+ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
13615+ drm_connector_helper_add(connector,
13616+ &mrst_dsi_connector_helper_funcs);
13617+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
13618+ connector->interlace_allowed = false;
13619+ connector->doublescan_allowed = false;
13620+
13621+ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
13622+ blc_pol = BLC_POLARITY_INVERSE;
13623+ blc_freq = 0xc8;
13624+
13625+ /*
13626+ * MIPI discovery:
13627+ * 1) check for DDB data
13628+ * 2) check for VBT data
13629+ * 4) make sure lid is open
13630+ * if closed, act like it's not there for now
13631+ */
13632+
13633+ /* FIXME jliu7 we only support DPI */
13634+ dev_priv->dpi = true;
13635+
13636+ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */
13637+ dev_priv->laneCount = 2;
13638+
13639+ /* FIXME hard coded for NSC PO. */
13640+ /* We only support BUST_MODE */
13641+ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */
13642+ /* FIXME change it to true if GET_DDB works */
13643+ dev_priv->config_phase = false;
13644+
13645+ if (!mrstDSI_clockInit(dev_priv))
13646+ {
13647+ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
13648+#if 0 /* FIXME JLIU7 */
13649+ goto failed_find;
13650+#endif /* FIXME JLIU7 */
13651+ }
13652+
13653+ /*
13654+ * If we didn't get DDB data, try geting panel timing
13655+ * from configuration data
13656+ */
13657+ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
13658+
13659+ if (mode_dev->panel_fixed_mode) {
13660+ mode_dev->panel_fixed_mode->type |=
13661+ DRM_MODE_TYPE_PREFERRED;
13662+ goto out; /* FIXME: check for quirks */
13663+ }
13664+
13665+ /* If we still don't have a mode after all that, give up. */
13666+ if (!mode_dev->panel_fixed_mode) {
13667+ DRM_DEBUG
13668+ ("Found no modes on the lvds, ignoring the LVDS\n");
13669+ goto failed_find;
13670+ }
13671+
13672+out:
13673+ drm_sysfs_connector_add(connector);
13674+ return;
13675+
13676+failed_find:
13677+ DRM_DEBUG("No MIIP modes found, disabling.\n");
13678+ drm_encoder_cleanup(encoder);
13679+ drm_connector_cleanup(connector);
13680+ kfree(connector);
13681+}
13682diff -uNr a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c
13683--- a/drivers/gpu/drm/psb/psb_intel_i2c.c 1969-12-31 16:00:00.000000000 -0800
13684+++ b/drivers/gpu/drm/psb/psb_intel_i2c.c 2009-04-07 13:28:38.000000000 -0700
13685@@ -0,0 +1,179 @@
13686+/*
13687+ * Copyright © 2006-2007 Intel Corporation
13688+ *
13689+ * Permission is hereby granted, free of charge, to any person obtaining a
13690+ * copy of this software and associated documentation files (the "Software"),
13691+ * to deal in the Software without restriction, including without limitation
13692+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13693+ * and/or sell copies of the Software, and to permit persons to whom the
13694+ * Software is furnished to do so, subject to the following conditions:
13695+ *
13696+ * The above copyright notice and this permission notice (including the next
13697+ * paragraph) shall be included in all copies or substantial portions of the
13698+ * Software.
13699+ *
13700+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13701+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13702+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13703+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13704+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13705+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13706+ * DEALINGS IN THE SOFTWARE.
13707+ *
13708+ * Authors:
13709+ * Eric Anholt <eric@anholt.net>
13710+ */
13711+/*
13712+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13713+ * Jesse Barnes <jesse.barnes@intel.com>
13714+ */
13715+
13716+#include <linux/i2c.h>
13717+#include <linux/i2c-id.h>
13718+#include <linux/i2c-algo-bit.h>
13719+
13720+/*
13721+ * Intel GPIO access functions
13722+ */
13723+
13724+#define I2C_RISEFALL_TIME 20
13725+
13726+static int get_clock(void *data)
13727+{
13728+ struct psb_intel_i2c_chan *chan = data;
13729+ struct drm_device *dev = chan->drm_dev;
13730+ u32 val;
13731+
13732+ val = REG_READ(chan->reg);
13733+ return (val & GPIO_CLOCK_VAL_IN) != 0;
13734+}
13735+
13736+static int get_data(void *data)
13737+{
13738+ struct psb_intel_i2c_chan *chan = data;
13739+ struct drm_device *dev = chan->drm_dev;
13740+ u32 val;
13741+
13742+ val = REG_READ(chan->reg);
13743+ return (val & GPIO_DATA_VAL_IN) != 0;
13744+}
13745+
13746+static void set_clock(void *data, int state_high)
13747+{
13748+ struct psb_intel_i2c_chan *chan = data;
13749+ struct drm_device *dev = chan->drm_dev;
13750+ u32 reserved = 0, clock_bits;
13751+
13752+ /* On most chips, these bits must be preserved in software. */
13753+ if (!IS_I830(dev) && !IS_845G(dev))
13754+ reserved =
13755+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
13756+ GPIO_CLOCK_PULLUP_DISABLE);
13757+
13758+ if (state_high)
13759+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
13760+ else
13761+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
13762+ GPIO_CLOCK_VAL_MASK;
13763+ REG_WRITE(chan->reg, reserved | clock_bits);
13764+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
13765+}
13766+
13767+static void set_data(void *data, int state_high)
13768+{
13769+ struct psb_intel_i2c_chan *chan = data;
13770+ struct drm_device *dev = chan->drm_dev;
13771+ u32 reserved = 0, data_bits;
13772+
13773+ /* On most chips, these bits must be preserved in software. */
13774+ if (!IS_I830(dev) && !IS_845G(dev))
13775+ reserved =
13776+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
13777+ GPIO_CLOCK_PULLUP_DISABLE);
13778+
13779+ if (state_high)
13780+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
13781+ else
13782+ data_bits =
13783+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
13784+ GPIO_DATA_VAL_MASK;
13785+
13786+ REG_WRITE(chan->reg, reserved | data_bits);
13787+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
13788+}
13789+
13790+/**
13791+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
13792+ * @dev: DRM device
13793+ * @output: driver specific output device
13794+ * @reg: GPIO reg to use
13795+ * @name: name for this bus
13796+ *
13797+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
13798+ * in output probing and control (e.g. DDC or SDVO control functions).
13799+ *
13800+ * Possible values for @reg include:
13801+ * %GPIOA
13802+ * %GPIOB
13803+ * %GPIOC
13804+ * %GPIOD
13805+ * %GPIOE
13806+ * %GPIOF
13807+ * %GPIOG
13808+ * %GPIOH
13809+ * see PRM for details on how these different busses are used.
13810+ */
13811+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
13812+ const u32 reg, const char *name)
13813+{
13814+ struct psb_intel_i2c_chan *chan;
13815+
13816+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
13817+ if (!chan)
13818+ goto out_free;
13819+
13820+ chan->drm_dev = dev;
13821+ chan->reg = reg;
13822+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
13823+ chan->adapter.owner = THIS_MODULE;
13824+ chan->adapter.algo_data = &chan->algo;
13825+ chan->adapter.dev.parent = &dev->pdev->dev;
13826+ chan->algo.setsda = set_data;
13827+ chan->algo.setscl = set_clock;
13828+ chan->algo.getsda = get_data;
13829+ chan->algo.getscl = get_clock;
13830+ chan->algo.udelay = 20;
13831+ chan->algo.timeout = usecs_to_jiffies(2200);
13832+ chan->algo.data = chan;
13833+
13834+ i2c_set_adapdata(&chan->adapter, chan);
13835+
13836+ if (i2c_bit_add_bus(&chan->adapter))
13837+ goto out_free;
13838+
13839+ /* JJJ: raise SCL and SDA? */
13840+ set_data(chan, 1);
13841+ set_clock(chan, 1);
13842+ udelay(20);
13843+
13844+ return chan;
13845+
13846+out_free:
13847+ kfree(chan);
13848+ return NULL;
13849+}
13850+
13851+/**
13852+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
13853+ * @output: channel to free
13854+ *
13855+ * Unregister the adapter from the i2c layer, then free the structure.
13856+ */
13857+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
13858+{
13859+ if (!chan)
13860+ return;
13861+
13862+ i2c_del_adapter(&chan->adapter);
13863+ kfree(chan);
13864+}
13865diff -uNr a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c
13866--- a/drivers/gpu/drm/psb/psb_intel_lvds.c 1969-12-31 16:00:00.000000000 -0800
13867+++ b/drivers/gpu/drm/psb/psb_intel_lvds.c 2009-04-07 13:28:38.000000000 -0700
13868@@ -0,0 +1,1015 @@
13869+/*
13870+ * Copyright © 2006-2007 Intel Corporation
13871+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13872+ *
13873+ * Permission is hereby granted, free of charge, to any person obtaining a
13874+ * copy of this software and associated documentation files (the "Software"),
13875+ * to deal in the Software without restriction, including without limitation
13876+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13877+ * and/or sell copies of the Software, and to permit persons to whom the
13878+ * Software is furnished to do so, subject to the following conditions:
13879+ *
13880+ * The above copyright notice and this permission notice (including the next
13881+ * paragraph) shall be included in all copies or substantial portions of the
13882+ * Software.
13883+ *
13884+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13885+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13886+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13887+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13888+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13889+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13890+ * DEALINGS IN THE SOFTWARE.
13891+ *
13892+ * Authors:
13893+ * Eric Anholt <eric@anholt.net>
13894+ * Dave Airlie <airlied@linux.ie>
13895+ * Jesse Barnes <jesse.barnes@intel.com>
13896+ */
13897+
13898+#include <linux/i2c.h>
13899+#include <drm/drm_crtc.h>
13900+#include <drm/drm_edid.h>
13901+/* MRST defines start */
13902+uint8_t blc_type;
13903+uint8_t blc_pol;
13904+uint8_t blc_freq;
13905+uint8_t blc_minbrightness;
13906+uint8_t blc_i2caddr;
13907+uint8_t blc_brightnesscmd;
13908+int lvds_backlight; /* restore backlight to this value */
13909+
13910+u32 CoreClock;
13911+u32 PWMControlRegFreq;
13912+/* MRST defines end */
13913+
13914+/**
13915+ * Sets the backlight level.
13916+ *
13917+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
13918+ */
13919+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
13920+{
13921+ u32 blc_pwm_ctl;
13922+
13923+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
13924+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
13925+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
13926+}
13927+
13928+/**
13929+ * Returns the maximum level of the backlight duty cycle field.
13930+ */
13931+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
13932+{
13933+ return ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
13934+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
13935+}
13936+
13937+/**
13938+ * Sets the power state for the panel.
13939+ */
13940+static void psb_intel_lvds_set_power(struct drm_device *dev,
13941+ struct psb_intel_output *output, bool on)
13942+{
13943+ u32 pp_status;
13944+
13945+ if (on) {
13946+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
13947+ POWER_TARGET_ON);
13948+ do {
13949+ pp_status = REG_READ(PP_STATUS);
13950+ } while ((pp_status & PP_ON) == 0);
13951+
13952+ psb_intel_lvds_set_backlight(dev,
13953+ output->
13954+ mode_dev->backlight_duty_cycle);
13955+ } else {
13956+ psb_intel_lvds_set_backlight(dev, 0);
13957+
13958+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
13959+ ~POWER_TARGET_ON);
13960+ do {
13961+ pp_status = REG_READ(PP_STATUS);
13962+ } while (pp_status & PP_ON);
13963+ }
13964+}
13965+
13966+static void psb_intel_lvds_dpms(struct drm_encoder *encoder, int mode)
13967+{
13968+ struct drm_device *dev = encoder->dev;
13969+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
13970+
13971+ if (mode == DRM_MODE_DPMS_ON)
13972+ psb_intel_lvds_set_power(dev, output, true);
13973+ else
13974+ psb_intel_lvds_set_power(dev, output, false);
13975+
13976+ /* XXX: We never power down the LVDS pairs. */
13977+}
13978+
13979+static void psb_intel_lvds_save(struct drm_connector *connector)
13980+{
13981+#if 0 /* JB: Disable for drop */
13982+ struct drm_device *dev = connector->dev;
13983+
13984+ dev_priv->savePP_ON = REG_READ(PP_ON_DELAYS);
13985+ dev_priv->savePP_OFF = REG_READ(PP_OFF_DELAYS);
13986+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
13987+ dev_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);
13988+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
13989+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
13990+ BACKLIGHT_DUTY_CYCLE_MASK);
13991+
13992+ /*
13993+ * If the light is off at server startup, just make it full brightness
13994+ */
13995+ if (dev_priv->backlight_duty_cycle == 0)
13996+ dev_priv->backlight_duty_cycle =
13997+ psb_intel_lvds_get_max_backlight(dev);
13998+#endif
13999+}
14000+
14001+static void psb_intel_lvds_restore(struct drm_connector *connector)
14002+{
14003+#if 0 /* JB: Disable for drop */
14004+ struct drm_device *dev = connector->dev;
14005+
14006+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
14007+ REG_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
14008+ REG_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
14009+ REG_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
14010+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
14011+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
14012+ psb_intel_lvds_set_power(dev, true);
14013+ else
14014+ psb_intel_lvds_set_power(dev, false);
14015+#endif
14016+}
14017+
14018+static int psb_intel_lvds_mode_valid(struct drm_connector *connector,
14019+ struct drm_display_mode *mode)
14020+{
14021+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14022+ struct drm_display_mode *fixed_mode =
14023+ psb_intel_output->mode_dev->panel_fixed_mode;
14024+
14025+#if PRINT_JLIU7
14026+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
14027+#endif /* PRINT_JLIU7 */
14028+
14029+ if (fixed_mode) {
14030+ if (mode->hdisplay > fixed_mode->hdisplay)
14031+ return MODE_PANEL;
14032+ if (mode->vdisplay > fixed_mode->vdisplay)
14033+ return MODE_PANEL;
14034+ }
14035+ return MODE_OK;
14036+}
14037+
14038+static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
14039+ struct drm_display_mode *mode,
14040+ struct drm_display_mode *adjusted_mode)
14041+{
14042+ struct psb_intel_mode_device *mode_dev =
14043+ enc_to_psb_intel_output(encoder)->mode_dev;
14044+ struct drm_device *dev = encoder->dev;
14045+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
14046+ struct drm_encoder *tmp_encoder;
14047+
14048+#if PRINT_JLIU7
14049+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
14050+#endif /* PRINT_JLIU7 */
14051+
14052+ /* Should never happen!! */
14053+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
14054+ printk(KERN_ERR
14055+ "Can't support LVDS/MIPI on pipe B on MRST\n");
14056+ return false;
14057+ } else if (!IS_MRST(dev) && !IS_I965G(dev)
14058+ && psb_intel_crtc->pipe == 0) {
14059+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
14060+ return false;
14061+ }
14062+ /* Should never happen!! */
14063+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
14064+ head) {
14065+ if (tmp_encoder != encoder
14066+ && tmp_encoder->crtc == encoder->crtc) {
14067+ printk(KERN_ERR "Can't enable LVDS and another "
14068+ "encoder on the same pipe\n");
14069+ return false;
14070+ }
14071+ }
14072+
14073+ /*
14074+ * If we have timings from the BIOS for the panel, put them in
14075+ * to the adjusted mode. The CRTC will be set up for this mode,
14076+ * with the panel scaling set up to source from the H/VDisplay
14077+ * of the original mode.
14078+ */
14079+ if (mode_dev->panel_fixed_mode != NULL) {
14080+ adjusted_mode->hdisplay =
14081+ mode_dev->panel_fixed_mode->hdisplay;
14082+ adjusted_mode->hsync_start =
14083+ mode_dev->panel_fixed_mode->hsync_start;
14084+ adjusted_mode->hsync_end =
14085+ mode_dev->panel_fixed_mode->hsync_end;
14086+ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
14087+ adjusted_mode->vdisplay =
14088+ mode_dev->panel_fixed_mode->vdisplay;
14089+ adjusted_mode->vsync_start =
14090+ mode_dev->panel_fixed_mode->vsync_start;
14091+ adjusted_mode->vsync_end =
14092+ mode_dev->panel_fixed_mode->vsync_end;
14093+ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
14094+ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
14095+ drm_mode_set_crtcinfo(adjusted_mode,
14096+ CRTC_INTERLACE_HALVE_V);
14097+ }
14098+
14099+ /*
14100+ * XXX: It would be nice to support lower refresh rates on the
14101+ * panels to reduce power consumption, and perhaps match the
14102+ * user's requested refresh rate.
14103+ */
14104+
14105+ return true;
14106+}
14107+
14108+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
14109+{
14110+ struct drm_device *dev = encoder->dev;
14111+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
14112+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
14113+
14114+#if PRINT_JLIU7
14115+ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
14116+#endif /* PRINT_JLIU7 */
14117+
14118+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
14119+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
14120+ BACKLIGHT_DUTY_CYCLE_MASK);
14121+
14122+ psb_intel_lvds_set_power(dev, output, false);
14123+}
14124+
14125+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
14126+{
14127+ struct drm_device *dev = encoder->dev;
14128+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
14129+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
14130+
14131+#if PRINT_JLIU7
14132+ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
14133+#endif /* PRINT_JLIU7 */
14134+
14135+ if (mode_dev->backlight_duty_cycle == 0)
14136+ mode_dev->backlight_duty_cycle =
14137+ psb_intel_lvds_get_max_backlight(dev);
14138+
14139+ psb_intel_lvds_set_power(dev, output, true);
14140+}
14141+
14142+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
14143+ struct drm_display_mode *mode,
14144+ struct drm_display_mode *adjusted_mode)
14145+{
14146+ struct psb_intel_mode_device *mode_dev =
14147+ enc_to_psb_intel_output(encoder)->mode_dev;
14148+ struct drm_device *dev = encoder->dev;
14149+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
14150+ u32 pfit_control;
14151+
14152+ /*
14153+ * The LVDS pin pair will already have been turned on in the
14154+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
14155+ * settings.
14156+ */
14157+
14158+ /*
14159+ * Enable automatic panel scaling so that non-native modes fill the
14160+ * screen. Should be enabled before the pipe is enabled, according to
14161+ * register description and PRM.
14162+ */
14163+ if (mode->hdisplay != adjusted_mode->hdisplay ||
14164+ mode->vdisplay != adjusted_mode->vdisplay)
14165+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
14166+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
14167+ HORIZ_INTERP_BILINEAR);
14168+ else
14169+ pfit_control = 0;
14170+
14171+ if (!IS_I965G(dev)) {
14172+ if (mode_dev->panel_wants_dither)
14173+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
14174+ } else
14175+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
14176+
14177+ REG_WRITE(PFIT_CONTROL, pfit_control);
14178+}
14179+
14180+/**
14181+ * Detect the LVDS connection.
14182+ *
14183+ * This always returns CONNECTOR_STATUS_CONNECTED.
14184+ * This connector should only have
14185+ * been set up if the LVDS was actually connected anyway.
14186+ */
14187+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
14188+ *connector)
14189+{
14190+ return connector_status_connected;
14191+}
14192+
14193+/**
14194+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
14195+ */
14196+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
14197+{
14198+ struct drm_device *dev = connector->dev;
14199+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14200+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
14201+ int ret = 0;
14202+
14203+ if (!IS_MRST(dev))
14204+ ret = psb_intel_ddc_get_modes(psb_intel_output);
14205+
14206+ if (ret)
14207+ return ret;
14208+
14209+ /* Didn't get an EDID, so
14210+ * Set wide sync ranges so we get all modes
14211+ * handed to valid_mode for checking
14212+ */
14213+ connector->display_info.min_vfreq = 0;
14214+ connector->display_info.max_vfreq = 200;
14215+ connector->display_info.min_hfreq = 0;
14216+ connector->display_info.max_hfreq = 200;
14217+
14218+ if (mode_dev->panel_fixed_mode != NULL) {
14219+ struct drm_display_mode *mode =
14220+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
14221+ drm_mode_probed_add(connector, mode);
14222+ return 1;
14223+ }
14224+
14225+ return 0;
14226+}
14227+
14228+/**
14229+ * psb_intel_lvds_destroy - unregister and free LVDS structures
14230+ * @connector: connector to free
14231+ *
14232+ * Unregister the DDC bus for this connector then free the driver private
14233+ * structure.
14234+ */
14235+static void psb_intel_lvds_destroy(struct drm_connector *connector)
14236+{
14237+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14238+
14239+ if (psb_intel_output->ddc_bus)
14240+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
14241+ drm_sysfs_connector_remove(connector);
14242+ drm_connector_cleanup(connector);
14243+ kfree(connector);
14244+}
14245+
14246+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
14247+ .dpms = psb_intel_lvds_dpms,
14248+ .mode_fixup = psb_intel_lvds_mode_fixup,
14249+ .prepare = psb_intel_lvds_prepare,
14250+ .mode_set = psb_intel_lvds_mode_set,
14251+ .commit = psb_intel_lvds_commit,
14252+};
14253+
14254+static const struct drm_connector_helper_funcs
14255+ psb_intel_lvds_connector_helper_funcs = {
14256+ .get_modes = psb_intel_lvds_get_modes,
14257+ .mode_valid = psb_intel_lvds_mode_valid,
14258+ .best_encoder = psb_intel_best_encoder,
14259+};
14260+
14261+static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
14262+ .save = psb_intel_lvds_save,
14263+ .restore = psb_intel_lvds_restore,
14264+ .detect = psb_intel_lvds_detect,
14265+ .fill_modes = drm_helper_probe_single_connector_modes,
14266+ .destroy = psb_intel_lvds_destroy,
14267+};
14268+
14269+
14270+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
14271+{
14272+ drm_encoder_cleanup(encoder);
14273+}
14274+
14275+static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
14276+ .destroy = psb_intel_lvds_enc_destroy,
14277+};
14278+
14279+
14280+
14281+/**
14282+ * psb_intel_lvds_init - setup LVDS connectors on this device
14283+ * @dev: drm device
14284+ *
14285+ * Create the connector, register the LVDS DDC bus, and try to figure out what
14286+ * modes we can display on the LVDS panel (if present).
14287+ */
14288+void psb_intel_lvds_init(struct drm_device *dev,
14289+ struct psb_intel_mode_device *mode_dev)
14290+{
14291+ struct psb_intel_output *psb_intel_output;
14292+ struct drm_connector *connector;
14293+ struct drm_encoder *encoder;
14294+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
14295+ struct drm_crtc *crtc;
14296+ u32 lvds;
14297+ int pipe;
14298+
14299+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
14300+ if (!psb_intel_output)
14301+ return;
14302+
14303+ psb_intel_output->mode_dev = mode_dev;
14304+ connector = &psb_intel_output->base;
14305+ encoder = &psb_intel_output->enc;
14306+ drm_connector_init(dev, &psb_intel_output->base,
14307+ &psb_intel_lvds_connector_funcs,
14308+ DRM_MODE_CONNECTOR_LVDS);
14309+
14310+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
14311+ DRM_MODE_ENCODER_LVDS);
14312+
14313+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
14314+ &psb_intel_output->enc);
14315+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
14316+
14317+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
14318+ drm_connector_helper_add(connector,
14319+ &psb_intel_lvds_connector_helper_funcs);
14320+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
14321+ connector->interlace_allowed = false;
14322+ connector->doublescan_allowed = false;
14323+
14324+
14325+ /*
14326+ * LVDS discovery:
14327+ * 1) check for EDID on DDC
14328+ * 2) check for VBT data
14329+ * 3) check to see if LVDS is already on
14330+ * if none of the above, no panel
14331+ * 4) make sure lid is open
14332+ * if closed, act like it's not there for now
14333+ */
14334+
14335+ /* Set up the DDC bus. */
14336+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
14337+ if (!psb_intel_output->ddc_bus) {
14338+ dev_printk(KERN_ERR, &dev->pdev->dev,
14339+ "DDC bus registration " "failed.\n");
14340+ goto failed_ddc;
14341+ }
14342+
14343+ /*
14344+ * Attempt to get the fixed panel mode from DDC. Assume that the
14345+ * preferred mode is the right one.
14346+ */
14347+ psb_intel_ddc_get_modes(psb_intel_output);
14348+ list_for_each_entry(scan, &connector->probed_modes, head) {
14349+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
14350+ mode_dev->panel_fixed_mode =
14351+ drm_mode_duplicate(dev, scan);
14352+ goto out; /* FIXME: check for quirks */
14353+ }
14354+ }
14355+
14356+ /* Failed to get EDID, what about VBT? */
14357+ if (mode_dev->vbt_mode)
14358+ mode_dev->panel_fixed_mode =
14359+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
14360+
14361+ /*
14362+ * If we didn't get EDID, try checking if the panel is already turned
14363+ * on. If so, assume that whatever is currently programmed is the
14364+ * correct mode.
14365+ */
14366+ lvds = REG_READ(LVDS);
14367+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
14368+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
14369+
14370+ if (crtc && (lvds & LVDS_PORT_EN)) {
14371+ mode_dev->panel_fixed_mode =
14372+ psb_intel_crtc_mode_get(dev, crtc);
14373+ if (mode_dev->panel_fixed_mode) {
14374+ mode_dev->panel_fixed_mode->type |=
14375+ DRM_MODE_TYPE_PREFERRED;
14376+ goto out; /* FIXME: check for quirks */
14377+ }
14378+ }
14379+
14380+ /* If we still don't have a mode after all that, give up. */
14381+ if (!mode_dev->panel_fixed_mode) {
14382+ DRM_DEBUG
14383+ ("Found no modes on the lvds, ignoring the LVDS\n");
14384+ goto failed_find;
14385+ }
14386+
14387+ /* FIXME: detect aopen & mac mini type stuff automatically? */
14388+ /*
14389+ * Blacklist machines with BIOSes that list an LVDS panel without
14390+ * actually having one.
14391+ */
14392+ if (IS_I945GM(dev)) {
14393+ /* aopen mini pc */
14394+ if (dev->pdev->subsystem_vendor == 0xa0a0) {
14395+ DRM_DEBUG
14396+ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
14397+ goto failed_find;
14398+ }
14399+
14400+ if ((dev->pdev->subsystem_vendor == 0x8086) &&
14401+ (dev->pdev->subsystem_device == 0x7270)) {
14402+ /* It's a Mac Mini or Macbook Pro. */
14403+
14404+ if (mode_dev->panel_fixed_mode != NULL &&
14405+ mode_dev->panel_fixed_mode->hdisplay == 800 &&
14406+ mode_dev->panel_fixed_mode->vdisplay == 600) {
14407+ DRM_DEBUG
14408+ ("Suspected Mac Mini, ignoring the LVDS\n");
14409+ goto failed_find;
14410+ }
14411+ }
14412+ }
14413+
14414+out:
14415+ drm_sysfs_connector_add(connector);
14416+
14417+#if PRINT_JLIU7
14418+ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
14419+ mode_dev->panel_fixed_mode->hdisplay);
14420+ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
14421+ mode_dev->panel_fixed_mode->vdisplay);
14422+ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
14423+ mode_dev->panel_fixed_mode->hsync_start);
14424+ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
14425+ mode_dev->panel_fixed_mode->hsync_end);
14426+ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
14427+ mode_dev->panel_fixed_mode->htotal);
14428+ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
14429+ mode_dev->panel_fixed_mode->vsync_start);
14430+ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
14431+ mode_dev->panel_fixed_mode->vsync_end);
14432+ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
14433+ mode_dev->panel_fixed_mode->vtotal);
14434+ DRM_INFO("PRINT_JLIU7 clock = %d\n",
14435+ mode_dev->panel_fixed_mode->clock);
14436+#endif /* PRINT_JLIU7 */
14437+ return;
14438+
14439+failed_find:
14440+ if (psb_intel_output->ddc_bus)
14441+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
14442+failed_ddc:
14443+ drm_encoder_cleanup(encoder);
14444+ drm_connector_cleanup(connector);
14445+ kfree(connector);
14446+}
14447+
14448+/* MRST platform start */
14449+
14450+/*
14451+ * FIXME need to move to register define head file
14452+ */
14453+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
14454+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
14455+
14456+/* The max/min PWM frequency in BPCR[31:17] - */
14457+/* The smallest number is 1 (not 0) that can fit in the
14458+ * 15-bit field of the and then*/
14459+/* shifts to the left by one bit to get the actual 16-bit
14460+ * value that the 15-bits correspond to.*/
14461+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
14462+
14463+#define BRIGHTNESS_MAX_LEVEL 100
14464+#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
14465+#define BLC_PWM_FREQ_CALC_CONSTANT 32
14466+#define MHz 1000000
14467+#define BLC_POLARITY_NORMAL 0
14468+#define BLC_POLARITY_INVERSE 1
14469+
14470+/**
14471+ * Calculate PWM control register value.
14472+ */
14473+static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
14474+{
14475+ unsigned long value = 0;
14476+ if (blc_freq == 0) {
14477+ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
14478+ * Frequency Requested is 0.\n"); */
14479+ return false;
14480+ }
14481+
14482+ value = (CoreClock * MHz);
14483+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
14484+ value = (value * BLC_PWM_PRECISION_FACTOR);
14485+ value = (value / blc_freq);
14486+ value = (value / BLC_PWM_PRECISION_FACTOR);
14487+
14488+ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
14489+ return 0;
14490+ } else {
14491+ PWMControlRegFreq = (u32) value;
14492+ return 1;
14493+ }
14494+}
14495+
14496+/**
14497+ * Returns the maximum level of the backlight duty cycle field.
14498+ */
14499+static u32 mrst_lvds_get_PWM_ctrl_freq(struct drm_device *dev)
14500+{
14501+ u32 max_pwm_blc = 0;
14502+
14503+#if PRINT_JLIU7
14504+ DRM_INFO("JLIU7 enter mrst_lvds_get_PWM_ctrl_freq \n");
14505+#endif /* PRINT_JLIU7 */
14506+
14507+/*FIXME JLIU7 get the PWM frequency from configuration */
14508+
14509+ max_pwm_blc =
14510+ (REG_READ(BLC_PWM_CTL) & MRST_BACKLIGHT_MODULATION_FREQ_MASK)
14511+ >> MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
14512+
14513+
14514+ if (!max_pwm_blc) {
14515+ if (mrstLVDSCalculatePWMCtrlRegFreq(dev))
14516+ max_pwm_blc = PWMControlRegFreq;
14517+ }
14518+
14519+ return max_pwm_blc;
14520+}
14521+
14522+/**
14523+ * Sets the backlight level.
14524+ *
14525+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
14526+ */
14527+static void mrst_lvds_set_backlight(struct drm_device *dev, int level)
14528+{
14529+ u32 blc_pwm_ctl;
14530+ u32 max_pwm_blc;
14531+#if PRINT_JLIU7
14532+ DRM_INFO("JLIU7 enter mrst_lvds_set_backlight \n");
14533+#endif /* PRINT_JLIU7 */
14534+
14535+#if 1 /* FIXME JLIU7 */
14536+ return;
14537+#endif /* FIXME JLIU7 */
14538+
14539+ /* Provent LVDS going to total black */
14540+ if (level < 20)
14541+ level = 20;
14542+
14543+ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev);
14544+
14545+ if (max_pwm_blc == 0)
14546+ return;
14547+
14548+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
14549+
14550+ if (blc_pol == BLC_POLARITY_INVERSE)
14551+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
14552+
14553+ REG_WRITE(BLC_PWM_CTL,
14554+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
14555+ blc_pwm_ctl);
14556+}
14557+
14558+/**
14559+ * Sets the power state for the panel.
14560+ */
14561+static void mrst_lvds_set_power(struct drm_device *dev,
14562+ struct psb_intel_output *output, bool on)
14563+{
14564+ u32 pp_status;
14565+
14566+#if PRINT_JLIU7
14567+ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
14568+#endif /* PRINT_JLIU7 */
14569+
14570+ if (on) {
14571+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
14572+ POWER_TARGET_ON);
14573+ do {
14574+ pp_status = REG_READ(PP_STATUS);
14575+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
14576+
14577+ mrst_lvds_set_backlight(dev, lvds_backlight);
14578+ } else {
14579+ mrst_lvds_set_backlight(dev, 0);
14580+
14581+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
14582+ ~POWER_TARGET_ON);
14583+ do {
14584+ pp_status = REG_READ(PP_STATUS);
14585+ } while (pp_status & PP_ON);
14586+ }
14587+}
14588+
14589+static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
14590+{
14591+ struct drm_device *dev = encoder->dev;
14592+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
14593+
14594+#if PRINT_JLIU7
14595+ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
14596+#endif /* PRINT_JLIU7 */
14597+
14598+ if (mode == DRM_MODE_DPMS_ON)
14599+ mrst_lvds_set_power(dev, output, true);
14600+ else
14601+ mrst_lvds_set_power(dev, output, false);
14602+
14603+ /* XXX: We never power down the LVDS pairs. */
14604+}
14605+
14606+static void mrst_lvds_mode_set(struct drm_encoder *encoder,
14607+ struct drm_display_mode *mode,
14608+ struct drm_display_mode *adjusted_mode)
14609+{
14610+ struct psb_intel_mode_device *mode_dev =
14611+ enc_to_psb_intel_output(encoder)->mode_dev;
14612+ struct drm_device *dev = encoder->dev;
14613+ u32 pfit_control;
14614+ u32 lvds_port;
14615+
14616+#if PRINT_JLIU7
14617+ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
14618+#endif /* PRINT_JLIU7 */
14619+
14620+ /*
14621+ * The LVDS pin pair will already have been turned on in the
14622+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
14623+ * settings.
14624+ */
14625+ /*FIXME JLIU7 Get panel power delay parameters from config data */
14626+ REG_WRITE(0x61208, 0x25807d0);
14627+ REG_WRITE(0x6120c, 0x1f407d0);
14628+ REG_WRITE(0x61210, 0x270f04);
14629+
14630+ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN;
14631+
14632+ if (mode_dev->panel_wants_dither)
14633+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
14634+
14635+ REG_WRITE(LVDS, lvds_port);
14636+
14637+ /*
14638+ * Enable automatic panel scaling so that non-native modes fill the
14639+ * screen. Should be enabled before the pipe is enabled, according to
14640+ * register description and PRM.
14641+ */
14642+ if (mode->hdisplay != adjusted_mode->hdisplay ||
14643+ mode->vdisplay != adjusted_mode->vdisplay)
14644+ pfit_control = PFIT_ENABLE;
14645+ else
14646+ pfit_control = 0;
14647+
14648+ REG_WRITE(PFIT_CONTROL, pfit_control);
14649+}
14650+
14651+
14652+static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
14653+ .dpms = mrst_lvds_dpms,
14654+ .mode_fixup = psb_intel_lvds_mode_fixup,
14655+ .prepare = psb_intel_lvds_prepare,
14656+ .mode_set = mrst_lvds_mode_set,
14657+ .commit = psb_intel_lvds_commit,
14658+};
14659+
14660+/** Returns the panel fixed mode from configuration. */
14661+/** FIXME JLIU7 need to revist it. */
14662+struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
14663+ *dev)
14664+{
14665+ struct drm_display_mode *mode;
14666+
14667+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
14668+ if (!mode)
14669+ return NULL;
14670+
14671+#if 0 /*FIXME jliu7 remove it later */
14672+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
14673+ mode->hdisplay = 800;
14674+ mode->vdisplay = 480;
14675+ mode->hsync_start = 836;
14676+ mode->hsync_end = 846;
14677+ mode->htotal = 1056;
14678+ mode->vsync_start = 489;
14679+ mode->vsync_end = 491;
14680+ mode->vtotal = 525;
14681+ mode->clock = 33264;
14682+#endif /*FIXME jliu7 remove it later */
14683+
14684+#if 0 /*FIXME jliu7 remove it later */
14685+ /* hard coded fixed mode for LVDS 800x480 */
14686+ mode->hdisplay = 800;
14687+ mode->vdisplay = 480;
14688+ mode->hsync_start = 801;
14689+ mode->hsync_end = 802;
14690+ mode->htotal = 1024;
14691+ mode->vsync_start = 481;
14692+ mode->vsync_end = 482;
14693+ mode->vtotal = 525;
14694+ mode->clock = 30994;
14695+#endif /*FIXME jliu7 remove it later */
14696+
14697+#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
14698+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
14699+ mode->hdisplay = 1024;
14700+ mode->vdisplay = 600;
14701+ mode->hsync_start = 1072;
14702+ mode->hsync_end = 1104;
14703+ mode->htotal = 1184;
14704+ mode->vsync_start = 603;
14705+ mode->vsync_end = 604;
14706+ mode->vtotal = 608;
14707+ mode->clock = 53990;
14708+#endif /*FIXME jliu7 remove it later */
14709+
14710+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
14711+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
14712+ mode->hdisplay = 1024;
14713+ mode->vdisplay = 600;
14714+ mode->hsync_start = 1104;
14715+ mode->hsync_end = 1136;
14716+ mode->htotal = 1184;
14717+ mode->vsync_start = 603;
14718+ mode->vsync_end = 604;
14719+ mode->vtotal = 608;
14720+ mode->clock = 53990;
14721+#endif /*FIXME jliu7 remove it later */
14722+
14723+#if 0 /*FIXME jliu7 remove it later */
14724+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
14725+ mode->hdisplay = 1024;
14726+ mode->vdisplay = 600;
14727+ mode->hsync_start = 1124;
14728+ mode->hsync_end = 1204;
14729+ mode->htotal = 1312;
14730+ mode->vsync_start = 607;
14731+ mode->vsync_end = 610;
14732+ mode->vtotal = 621;
14733+ mode->clock = 48885;
14734+#endif /*FIXME jliu7 remove it later */
14735+
14736+#if 0 /*FIXME jliu7 remove it later */
14737+ /* hard coded fixed mode for LVDS 1024x768 */
14738+ mode->hdisplay = 1024;
14739+ mode->vdisplay = 768;
14740+ mode->hsync_start = 1048;
14741+ mode->hsync_end = 1184;
14742+ mode->htotal = 1344;
14743+ mode->vsync_start = 771;
14744+ mode->vsync_end = 777;
14745+ mode->vtotal = 806;
14746+ mode->clock = 65000;
14747+#endif /*FIXME jliu7 remove it later */
14748+
14749+#if 0 /*FIXME jliu7 remove it later */
14750+ /* hard coded fixed mode for LVDS 1366x768 */
14751+ mode->hdisplay = 1366;
14752+ mode->vdisplay = 768;
14753+ mode->hsync_start = 1430;
14754+ mode->hsync_end = 1558;
14755+ mode->htotal = 1664;
14756+ mode->vsync_start = 769;
14757+ mode->vsync_end = 770;
14758+ mode->vtotal = 776;
14759+ mode->clock = 77500;
14760+#endif /*FIXME jliu7 remove it later */
14761+
14762+ drm_mode_set_name(mode);
14763+ drm_mode_set_crtcinfo(mode, 0);
14764+
14765+ return mode;
14766+}
14767+
14768+/**
14769+ * mrst_lvds_init - setup LVDS connectors on this device
14770+ * @dev: drm device
14771+ *
14772+ * Create the connector, register the LVDS DDC bus, and try to figure out what
14773+ * modes we can display on the LVDS panel (if present).
14774+ */
14775+void mrst_lvds_init(struct drm_device *dev,
14776+ struct psb_intel_mode_device *mode_dev)
14777+{
14778+ struct psb_intel_output *psb_intel_output;
14779+ struct drm_connector *connector;
14780+ struct drm_encoder *encoder;
14781+#if MRST_I2C
14782+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
14783+#endif
14784+#if PRINT_JLIU7
14785+ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
14786+#endif /* PRINT_JLIU7 */
14787+
14788+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
14789+ if (!psb_intel_output)
14790+ return;
14791+
14792+ psb_intel_output->mode_dev = mode_dev;
14793+ connector = &psb_intel_output->base;
14794+ encoder = &psb_intel_output->enc;
14795+ drm_connector_init(dev, &psb_intel_output->base,
14796+ &psb_intel_lvds_connector_funcs,
14797+ DRM_MODE_CONNECTOR_LVDS);
14798+
14799+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
14800+ DRM_MODE_ENCODER_LVDS);
14801+
14802+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
14803+ &psb_intel_output->enc);
14804+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
14805+
14806+ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
14807+ drm_connector_helper_add(connector,
14808+ &psb_intel_lvds_connector_helper_funcs);
14809+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
14810+ connector->interlace_allowed = false;
14811+ connector->doublescan_allowed = false;
14812+
14813+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
14814+
14815+ /*
14816+ * LVDS discovery:
14817+ * 1) check for EDID on DDC
14818+ * 2) check for VBT data
14819+ * 3) check to see if LVDS is already on
14820+ * if none of the above, no panel
14821+ * 4) make sure lid is open
14822+ * if closed, act like it's not there for now
14823+ */
14824+
14825+#if MRST_I2C
14826+ /* Set up the DDC bus. */
14827+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
14828+ if (!psb_intel_output->ddc_bus) {
14829+ dev_printk(KERN_ERR, &dev->pdev->dev,
14830+ "DDC bus registration " "failed.\n");
14831+ goto failed_ddc;
14832+ }
14833+
14834+ /*
14835+ * Attempt to get the fixed panel mode from DDC. Assume that the
14836+ * preferred mode is the right one.
14837+ */
14838+ psb_intel_ddc_get_modes(psb_intel_output);
14839+ list_for_each_entry(scan, &connector->probed_modes, head) {
14840+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
14841+ mode_dev->panel_fixed_mode =
14842+ drm_mode_duplicate(dev, scan);
14843+ goto out; /* FIXME: check for quirks */
14844+ }
14845+ }
14846+#endif /* MRST_I2C */
14847+
14848+ /*
14849+ * If we didn't get EDID, try geting panel timing
14850+ * from configuration data
14851+ */
14852+ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
14853+
14854+ if (mode_dev->panel_fixed_mode) {
14855+ mode_dev->panel_fixed_mode->type |=
14856+ DRM_MODE_TYPE_PREFERRED;
14857+ goto out; /* FIXME: check for quirks */
14858+ }
14859+
14860+ /* If we still don't have a mode after all that, give up. */
14861+ if (!mode_dev->panel_fixed_mode) {
14862+ DRM_DEBUG
14863+ ("Found no modes on the lvds, ignoring the LVDS\n");
14864+ goto failed_find;
14865+ }
14866+
14867+out:
14868+ drm_sysfs_connector_add(connector);
14869+ return;
14870+
14871+failed_find:
14872+ DRM_DEBUG("No LVDS modes found, disabling.\n");
14873+ if (psb_intel_output->ddc_bus)
14874+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
14875+#if MRST_I2C
14876+failed_ddc:
14877+#endif
14878+ drm_encoder_cleanup(encoder);
14879+ drm_connector_cleanup(connector);
14880+ kfree(connector);
14881+}
14882+
14883+/* MRST platform end */
14884diff -uNr a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c
14885--- a/drivers/gpu/drm/psb/psb_intel_modes.c 1969-12-31 16:00:00.000000000 -0800
14886+++ b/drivers/gpu/drm/psb/psb_intel_modes.c 2009-04-07 13:28:38.000000000 -0700
14887@@ -0,0 +1,64 @@
14888+/*
14889+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
14890+ * Copyright (c) 2007 Intel Corporation
14891+ * Jesse Barnes <jesse.barnes@intel.com>
14892+ */
14893+
14894+#include <linux/i2c.h>
14895+#include <linux/fb.h>
14896+#include <drm/drmP.h>
14897+#include "psb_intel_drv.h"
14898+
14899+/**
14900+ * psb_intel_ddc_probe
14901+ *
14902+ */
14903+bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
14904+{
14905+ u8 out_buf[] = { 0x0, 0x0 };
14906+ u8 buf[2];
14907+ int ret;
14908+ struct i2c_msg msgs[] = {
14909+ {
14910+ .addr = 0x50,
14911+ .flags = 0,
14912+ .len = 1,
14913+ .buf = out_buf,
14914+ },
14915+ {
14916+ .addr = 0x50,
14917+ .flags = I2C_M_RD,
14918+ .len = 1,
14919+ .buf = buf,
14920+ }
14921+ };
14922+
14923+ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
14924+ if (ret == 2)
14925+ return true;
14926+
14927+ return false;
14928+}
14929+
14930+/**
14931+ * psb_intel_ddc_get_modes - get modelist from monitor
14932+ * @connector: DRM connector device to use
14933+ *
14934+ * Fetch the EDID information from @connector using the DDC bus.
14935+ */
14936+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
14937+{
14938+ struct edid *edid;
14939+ int ret = 0;
14940+
14941+ edid =
14942+ drm_get_edid(&psb_intel_output->base,
14943+ &psb_intel_output->ddc_bus->adapter);
14944+ if (edid) {
14945+ drm_mode_connector_update_edid_property(&psb_intel_output->
14946+ base, edid);
14947+ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
14948+ kfree(edid);
14949+ }
14950+ return ret;
14951+}
14952diff -uNr a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h
14953--- a/drivers/gpu/drm/psb/psb_intel_reg.h 1969-12-31 16:00:00.000000000 -0800
14954+++ b/drivers/gpu/drm/psb/psb_intel_reg.h 2009-04-07 13:28:38.000000000 -0700
14955@@ -0,0 +1,972 @@
14956+#define BLC_PWM_CTL 0x61254
14957+#define BLC_PWM_CTL2 0x61250
14958+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
14959+/**
14960+ * This is the most significant 15 bits of the number of backlight cycles in a
14961+ * complete cycle of the modulated backlight control.
14962+ *
14963+ * The actual value is this field multiplied by two.
14964+ */
14965+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
14966+#define BLM_LEGACY_MODE (1 << 16)
14967+/**
14968+ * This is the number of cycles out of the backlight modulation cycle for which
14969+ * the backlight is on.
14970+ *
14971+ * This field must be no greater than the number of cycles in the complete
14972+ * backlight modulation cycle.
14973+ */
14974+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
14975+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
14976+
14977+#define I915_GCFGC 0xf0
14978+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
14979+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
14980+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
14981+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
14982+
14983+#define I855_HPLLCC 0xc0
14984+#define I855_CLOCK_CONTROL_MASK (3 << 0)
14985+#define I855_CLOCK_133_200 (0 << 0)
14986+#define I855_CLOCK_100_200 (1 << 0)
14987+#define I855_CLOCK_100_133 (2 << 0)
14988+#define I855_CLOCK_166_250 (3 << 0)
14989+
14990+/* I830 CRTC registers */
14991+#define HTOTAL_A 0x60000
14992+#define HBLANK_A 0x60004
14993+#define HSYNC_A 0x60008
14994+#define VTOTAL_A 0x6000c
14995+#define VBLANK_A 0x60010
14996+#define VSYNC_A 0x60014
14997+#define PIPEASRC 0x6001c
14998+#define BCLRPAT_A 0x60020
14999+#define VSYNCSHIFT_A 0x60028
15000+
15001+#define HTOTAL_B 0x61000
15002+#define HBLANK_B 0x61004
15003+#define HSYNC_B 0x61008
15004+#define VTOTAL_B 0x6100c
15005+#define VBLANK_B 0x61010
15006+#define VSYNC_B 0x61014
15007+#define PIPEBSRC 0x6101c
15008+#define BCLRPAT_B 0x61020
15009+#define VSYNCSHIFT_B 0x61028
15010+
15011+#define PP_STATUS 0x61200
15012+# define PP_ON (1 << 31)
15013+/**
15014+ * Indicates that all dependencies of the panel are on:
15015+ *
15016+ * - PLL enabled
15017+ * - pipe enabled
15018+ * - LVDS/DVOB/DVOC on
15019+ */
15020+# define PP_READY (1 << 30)
15021+# define PP_SEQUENCE_NONE (0 << 28)
15022+# define PP_SEQUENCE_ON (1 << 28)
15023+# define PP_SEQUENCE_OFF (2 << 28)
15024+# define PP_SEQUENCE_MASK 0x30000000
15025+#define PP_CONTROL 0x61204
15026+# define POWER_TARGET_ON (1 << 0)
15027+
15028+#define LVDSPP_ON 0x61208
15029+#define LVDSPP_OFF 0x6120c
15030+#define PP_CYCLE 0x61210
15031+
15032+#define PFIT_CONTROL 0x61230
15033+# define PFIT_ENABLE (1 << 31)
15034+# define PFIT_PIPE_MASK (3 << 29)
15035+# define PFIT_PIPE_SHIFT 29
15036+# define VERT_INTERP_DISABLE (0 << 10)
15037+# define VERT_INTERP_BILINEAR (1 << 10)
15038+# define VERT_INTERP_MASK (3 << 10)
15039+# define VERT_AUTO_SCALE (1 << 9)
15040+# define HORIZ_INTERP_DISABLE (0 << 6)
15041+# define HORIZ_INTERP_BILINEAR (1 << 6)
15042+# define HORIZ_INTERP_MASK (3 << 6)
15043+# define HORIZ_AUTO_SCALE (1 << 5)
15044+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
15045+
15046+#define PFIT_PGM_RATIOS 0x61234
15047+# define PFIT_VERT_SCALE_MASK 0xfff00000
15048+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
15049+
15050+#define PFIT_AUTO_RATIOS 0x61238
15051+
15052+
15053+#define DPLL_A 0x06014
15054+#define DPLL_B 0x06018
15055+# define DPLL_VCO_ENABLE (1 << 31)
15056+# define DPLL_DVO_HIGH_SPEED (1 << 30)
15057+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
15058+# define DPLL_VGA_MODE_DIS (1 << 28)
15059+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
15060+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
15061+# define DPLL_MODE_MASK (3 << 26)
15062+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
15063+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
15064+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
15065+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
15066+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
15067+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
15068+/**
15069+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
15070+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
15071+ */
15072+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
15073+/**
15074+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
15075+ * this field (only one bit may be set).
15076+ */
15077+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
15078+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
15079+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
15080+ * in DVO non-gang */
15081+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
15082+# define PLL_REF_INPUT_DREFCLK (0 << 13)
15083+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
15084+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
15085+ * TVCLKIN */
15086+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
15087+# define PLL_REF_INPUT_MASK (3 << 13)
15088+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
15089+/*
15090+ * Parallel to Serial Load Pulse phase selection.
15091+ * Selects the phase for the 10X DPLL clock for the PCIe
15092+ * digital display port. The range is 4 to 13; 10 or more
15093+ * is just a flip delay. The default is 6
15094+ */
15095+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
15096+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
15097+
15098+/**
15099+ * SDVO multiplier for 945G/GM. Not used on 965.
15100+ *
15101+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
15102+ */
15103+# define SDVO_MULTIPLIER_MASK 0x000000ff
15104+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
15105+# define SDVO_MULTIPLIER_SHIFT_VGA 0
15106+
15107+/** @defgroup DPLL_MD
15108+ * @{
15109+ */
15110+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
15111+#define DPLL_A_MD 0x0601c
15112+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
15113+#define DPLL_B_MD 0x06020
15114+/**
15115+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
15116+ *
15117+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
15118+ */
15119+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
15120+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
15121+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
15122+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
15123+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
15124+/**
15125+ * SDVO/UDI pixel multiplier.
15126+ *
15127+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
15128+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
15129+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
15130+ * dummy bytes in the datastream at an increased clock rate, with both sides of
15131+ * the link knowing how many bytes are fill.
15132+ *
15133+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
15134+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
15135+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
15136+ * through an SDVO command.
15137+ *
15138+ * This register field has values of multiplication factor minus 1, with
15139+ * a maximum multiplier of 5 for SDVO.
15140+ */
15141+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
15142+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
15143+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
15144+ * This best be set to the default value (3) or the CRT won't work. No,
15145+ * I don't entirely understand what this does...
15146+ */
15147+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
15148+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
15149+/** @} */
15150+
15151+#define DPLL_TEST 0x606c
15152+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
15153+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
15154+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
15155+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
15156+# define DPLLB_TEST_N_BYPASS (1 << 19)
15157+# define DPLLB_TEST_M_BYPASS (1 << 18)
15158+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
15159+# define DPLLA_TEST_N_BYPASS (1 << 3)
15160+# define DPLLA_TEST_M_BYPASS (1 << 2)
15161+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
15162+
15163+#define ADPA 0x61100
15164+#define ADPA_DAC_ENABLE (1<<31)
15165+#define ADPA_DAC_DISABLE 0
15166+#define ADPA_PIPE_SELECT_MASK (1<<30)
15167+#define ADPA_PIPE_A_SELECT 0
15168+#define ADPA_PIPE_B_SELECT (1<<30)
15169+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
15170+#define ADPA_SETS_HVPOLARITY 0
15171+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
15172+#define ADPA_VSYNC_CNTL_ENABLE 0
15173+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
15174+#define ADPA_HSYNC_CNTL_ENABLE 0
15175+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
15176+#define ADPA_VSYNC_ACTIVE_LOW 0
15177+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
15178+#define ADPA_HSYNC_ACTIVE_LOW 0
15179+
15180+#define FPA0 0x06040
15181+#define FPA1 0x06044
15182+#define FPB0 0x06048
15183+#define FPB1 0x0604c
15184+# define FP_N_DIV_MASK 0x003f0000
15185+# define FP_N_DIV_SHIFT 16
15186+# define FP_M1_DIV_MASK 0x00003f00
15187+# define FP_M1_DIV_SHIFT 8
15188+# define FP_M2_DIV_MASK 0x0000003f
15189+# define FP_M2_DIV_SHIFT 0
15190+
15191+
15192+#define PORT_HOTPLUG_EN 0x61110
15193+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
15194+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
15195+# define TV_HOTPLUG_INT_EN (1 << 18)
15196+# define CRT_HOTPLUG_INT_EN (1 << 9)
15197+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
15198+
15199+#define PORT_HOTPLUG_STAT 0x61114
15200+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
15201+# define TV_HOTPLUG_INT_STATUS (1 << 10)
15202+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
15203+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
15204+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
15205+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
15206+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
15207+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
15208+
15209+#define SDVOB 0x61140
15210+#define SDVOC 0x61160
15211+#define SDVO_ENABLE (1 << 31)
15212+#define SDVO_PIPE_B_SELECT (1 << 30)
15213+#define SDVO_STALL_SELECT (1 << 29)
15214+#define SDVO_INTERRUPT_ENABLE (1 << 26)
15215+/**
15216+ * 915G/GM SDVO pixel multiplier.
15217+ *
15218+ * Programmed value is multiplier - 1, up to 5x.
15219+ *
15220+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
15221+ */
15222+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
15223+#define SDVO_PORT_MULTIPLY_SHIFT 23
15224+#define SDVO_PHASE_SELECT_MASK (15 << 19)
15225+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
15226+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
15227+#define SDVOC_GANG_MODE (1 << 16)
15228+#define SDVO_BORDER_ENABLE (1 << 7)
15229+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
15230+#define SDVO_DETECTED (1 << 2)
15231+/* Bits to be preserved when writing */
15232+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
15233+#define SDVOC_PRESERVE_MASK (1 << 17)
15234+
15235+/** @defgroup LVDS
15236+ * @{
15237+ */
15238+/**
15239+ * This register controls the LVDS output enable, pipe selection, and data
15240+ * format selection.
15241+ *
15242+ * All of the clock/data pairs are force powered down by power sequencing.
15243+ */
15244+#define LVDS 0x61180
15245+/**
15246+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
15247+ * the DPLL semantics change when the LVDS is assigned to that pipe.
15248+ */
15249+# define LVDS_PORT_EN (1 << 31)
15250+/** Selects pipe B for LVDS data. Must be set on pre-965. */
15251+# define LVDS_PIPEB_SELECT (1 << 30)
15252+
15253+/**
15254+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
15255+ * pixel.
15256+ */
15257+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
15258+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
15259+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
15260+/**
15261+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
15262+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
15263+ * on.
15264+ */
15265+# define LVDS_A3_POWER_MASK (3 << 6)
15266+# define LVDS_A3_POWER_DOWN (0 << 6)
15267+# define LVDS_A3_POWER_UP (3 << 6)
15268+/**
15269+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
15270+ * is set.
15271+ */
15272+# define LVDS_CLKB_POWER_MASK (3 << 4)
15273+# define LVDS_CLKB_POWER_DOWN (0 << 4)
15274+# define LVDS_CLKB_POWER_UP (3 << 4)
15275+
15276+/**
15277+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
15278+ * setting for whether we are in dual-channel mode. The B3 pair will
15279+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
15280+ */
15281+# define LVDS_B0B3_POWER_MASK (3 << 2)
15282+# define LVDS_B0B3_POWER_DOWN (0 << 2)
15283+# define LVDS_B0B3_POWER_UP (3 << 2)
15284+
15285+#define PIPEACONF 0x70008
15286+#define PIPEACONF_ENABLE (1<<31)
15287+#define PIPEACONF_DISABLE 0
15288+#define PIPEACONF_DOUBLE_WIDE (1<<30)
15289+#define I965_PIPECONF_ACTIVE (1<<30)
15290+#define PIPEACONF_SINGLE_WIDE 0
15291+#define PIPEACONF_PIPE_UNLOCKED 0
15292+#define PIPEACONF_PIPE_LOCKED (1<<25)
15293+#define PIPEACONF_PALETTE 0
15294+#define PIPEACONF_GAMMA (1<<24)
15295+#define PIPECONF_FORCE_BORDER (1<<25)
15296+#define PIPECONF_PROGRESSIVE (0 << 21)
15297+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
15298+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
15299+
15300+#define PIPEBCONF 0x71008
15301+#define PIPEBCONF_ENABLE (1<<31)
15302+#define PIPEBCONF_DISABLE 0
15303+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
15304+#define PIPEBCONF_DISABLE 0
15305+#define PIPEBCONF_GAMMA (1<<24)
15306+#define PIPEBCONF_PALETTE 0
15307+
15308+#define PIPEBGCMAXRED 0x71010
15309+#define PIPEBGCMAXGREEN 0x71014
15310+#define PIPEBGCMAXBLUE 0x71018
15311+#define PIPEBSTAT 0x71024
15312+#define PIPEBFRAMEHIGH 0x71040
15313+#define PIPEBFRAMEPIXEL 0x71044
15314+
15315+#define DSPARB 0x70030
15316+#define DSPFW1 0x70034
15317+#define DSPFW2 0x70038
15318+#define DSPFW3 0x7003c
15319+#define DSPFW4 0x70050
15320+#define DSPFW5 0x70054
15321+#define DSPFW6 0x70058
15322+
15323+#define DSPACNTR 0x70180
15324+#define DSPBCNTR 0x71180
15325+#define DISPLAY_PLANE_ENABLE (1<<31)
15326+#define DISPLAY_PLANE_DISABLE 0
15327+#define DISPPLANE_GAMMA_ENABLE (1<<30)
15328+#define DISPPLANE_GAMMA_DISABLE 0
15329+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
15330+#define DISPPLANE_8BPP (0x2<<26)
15331+#define DISPPLANE_15_16BPP (0x4<<26)
15332+#define DISPPLANE_16BPP (0x5<<26)
15333+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
15334+#define DISPPLANE_32BPP (0x7<<26)
15335+#define DISPPLANE_STEREO_ENABLE (1<<25)
15336+#define DISPPLANE_STEREO_DISABLE 0
15337+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
15338+#define DISPPLANE_SEL_PIPE_A 0
15339+#define DISPPLANE_SEL_PIPE_B (1<<24)
15340+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
15341+#define DISPPLANE_SRC_KEY_DISABLE 0
15342+#define DISPPLANE_LINE_DOUBLE (1<<20)
15343+#define DISPPLANE_NO_LINE_DOUBLE 0
15344+#define DISPPLANE_STEREO_POLARITY_FIRST 0
15345+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
15346+/* plane B only */
15347+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
15348+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
15349+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
15350+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
15351+
15352+#define DSPABASE 0x70184
15353+#define DSPASTRIDE 0x70188
15354+
15355+#define DSPBBASE 0x71184
15356+#define DSPBADDR DSPBBASE
15357+#define DSPBSTRIDE 0x71188
15358+
15359+#define DSPAKEYVAL 0x70194
15360+#define DSPAKEYMASK 0x70198
15361+
15362+#define DSPAPOS 0x7018C /* reserved */
15363+#define DSPASIZE 0x70190
15364+#define DSPBPOS 0x7118C
15365+#define DSPBSIZE 0x71190
15366+
15367+#define DSPASURF 0x7019C
15368+#define DSPATILEOFF 0x701A4
15369+
15370+#define DSPBSURF 0x7119C
15371+#define DSPBTILEOFF 0x711A4
15372+
15373+#define VGACNTRL 0x71400
15374+# define VGA_DISP_DISABLE (1 << 31)
15375+# define VGA_2X_MODE (1 << 30)
15376+# define VGA_PIPE_B_SELECT (1 << 29)
15377+
15378+/*
15379+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
15380+ * of video memory available to the BIOS in SWF1.
15381+ */
15382+
15383+#define SWF0 0x71410
15384+#define SWF1 0x71414
15385+#define SWF2 0x71418
15386+#define SWF3 0x7141c
15387+#define SWF4 0x71420
15388+#define SWF5 0x71424
15389+#define SWF6 0x71428
15390+
15391+/*
15392+ * 855 scratch registers.
15393+ */
15394+#define SWF00 0x70410
15395+#define SWF01 0x70414
15396+#define SWF02 0x70418
15397+#define SWF03 0x7041c
15398+#define SWF04 0x70420
15399+#define SWF05 0x70424
15400+#define SWF06 0x70428
15401+
15402+#define SWF10 SWF0
15403+#define SWF11 SWF1
15404+#define SWF12 SWF2
15405+#define SWF13 SWF3
15406+#define SWF14 SWF4
15407+#define SWF15 SWF5
15408+#define SWF16 SWF6
15409+
15410+#define SWF30 0x72414
15411+#define SWF31 0x72418
15412+#define SWF32 0x7241c
15413+
15414+
15415+/*
15416+ * Palette registers
15417+ */
15418+#define PALETTE_A 0x0a000
15419+#define PALETTE_B 0x0a800
15420+
15421+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
15422+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
15423+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
15424+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
15425+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
15426+
15427+
15428+/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
15429+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
15430+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
15431+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
15432+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
15433+
15434+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
15435+ (dev)->pci_device == 0x2982 || \
15436+ (dev)->pci_device == 0x2992 || \
15437+ (dev)->pci_device == 0x29A2 || \
15438+ (dev)->pci_device == 0x2A02 || \
15439+ (dev)->pci_device == 0x2A12)
15440+
15441+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
15442+
15443+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
15444+ (dev)->pci_device == 0x29B2 || \
15445+ (dev)->pci_device == 0x29D2)
15446+
15447+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
15448+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
15449+ IS_MRST(dev))
15450+
15451+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
15452+ IS_I945GM(dev) || IS_I965GM(dev) || \
15453+ IS_POULSBO(dev) || IS_MRST(dev))
15454+
15455+/* Cursor A & B regs */
15456+#define CURACNTR 0x70080
15457+#define CURSOR_MODE_DISABLE 0x00
15458+#define CURSOR_MODE_64_32B_AX 0x07
15459+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
15460+#define MCURSOR_GAMMA_ENABLE (1 << 26)
15461+#define CURABASE 0x70084
15462+#define CURAPOS 0x70088
15463+#define CURSOR_POS_MASK 0x007FF
15464+#define CURSOR_POS_SIGN 0x8000
15465+#define CURSOR_X_SHIFT 0
15466+#define CURSOR_Y_SHIFT 16
15467+#define CURBCNTR 0x700c0
15468+#define CURBBASE 0x700c4
15469+#define CURBPOS 0x700c8
15470+
15471+/*
15472+ * MOORESTOWN delta registers
15473+ */
15474+#define MRST_DPLL_A 0x0f014
15475+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
15476+#define MRST_FPA0 0x0f040
15477+#define MRST_FPA1 0x0f044
15478+
15479+/* #define LVDS 0x61180 */
15480+# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
15481+# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
15482+# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
15483+
15484+#define MIPI 0x61190
15485+# define MIPI_PORT_EN (1 << 31)
15486+
15487+/* #define PP_CONTROL 0x61204 */
15488+# define POWER_DOWN_ON_RESET (1 << 1)
15489+
15490+/* #define PFIT_CONTROL 0x61230 */
15491+# define PFIT_PIPE_SELECT (3 << 29)
15492+# define PFIT_PIPE_SELECT_SHIFT (29)
15493+
15494+/* #define BLC_PWM_CTL 0x61254 */
15495+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
15496+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
15497+
15498+/* #define PIPEACONF 0x70008 */
15499+#define PIPEACONF_PIPE_STATE (1<<30)
15500+/* #define DSPACNTR 0x70180 */
15501+#if 0 /*FIXME JLIU7 need to define the following */
15502+1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
15503+pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
15504+(16 : 16 : 16 : 16) 16 bit floating point pixel format.
15505+Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
15506+ Ignore
15507+ alpha.
15508+#endif /*FIXME JLIU7 need to define the following */
15509+
15510+#define MRST_DSPABASE 0x7019c
15511+
15512+/*
15513+ * MOORESTOWN reserved registers
15514+ */
15515+#if 0
15516+#define DSPAPOS 0x7018C /* reserved */
15517+#define DSPASIZE 0x70190
15518+#endif
15519+/*
15520+ * Moorestown registers.
15521+ */
15522+/*===========================================================================
15523+; General Constants
15524+;--------------------------------------------------------------------------*/
15525+#define BIT0 0x00000001
15526+#define BIT1 0x00000002
15527+#define BIT2 0x00000004
15528+#define BIT3 0x00000008
15529+#define BIT4 0x00000010
15530+#define BIT5 0x00000020
15531+#define BIT6 0x00000040
15532+#define BIT7 0x00000080
15533+#define BIT8 0x00000100
15534+#define BIT9 0x00000200
15535+#define BIT10 0x00000400
15536+#define BIT11 0x00000800
15537+#define BIT12 0x00001000
15538+#define BIT13 0x00002000
15539+#define BIT14 0x00004000
15540+#define BIT15 0x00008000
15541+#define BIT16 0x00010000
15542+#define BIT17 0x00020000
15543+#define BIT18 0x00040000
15544+#define BIT19 0x00080000
15545+#define BIT20 0x00100000
15546+#define BIT21 0x00200000
15547+#define BIT22 0x00400000
15548+#define BIT23 0x00800000
15549+#define BIT24 0x01000000
15550+#define BIT25 0x02000000
15551+#define BIT26 0x04000000
15552+#define BIT27 0x08000000
15553+#define BIT28 0x10000000
15554+#define BIT29 0x20000000
15555+#define BIT30 0x40000000
15556+#define BIT31 0x80000000
15557+/*===========================================================================
15558+; MIPI IP registers
15559+;--------------------------------------------------------------------------*/
15560+#define DEVICE_READY_REG 0xb000
15561+#define INTR_STAT_REG 0xb004
15562+#define RX_SOT_ERROR BIT0
15563+#define RX_SOT_SYNC_ERROR BIT1
15564+#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
15565+#define RX_LP_TX_SYNC_ERROR BIT4
15566+#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
15567+#define RX_FALSE_CONTROL_ERROR BIT6
15568+#define RX_ECC_SINGLE_BIT_ERROR BIT7
15569+#define RX_ECC_MULTI_BIT_ERROR BIT8
15570+#define RX_CHECKSUM_ERROR BIT9
15571+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
15572+#define RX_DSI_VC_ID_INVALID BIT11
15573+#define TX_FALSE_CONTROL_ERROR BIT12
15574+#define TX_ECC_SINGLE_BIT_ERROR BIT13
15575+#define TX_ECC_MULTI_BIT_ERROR BIT14
15576+#define TX_CHECKSUM_ERROR BIT15
15577+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
15578+#define TX_DSI_VC_ID_INVALID BIT17
15579+#define HIGH_CONTENTION BIT18
15580+#define LOW_CONTENTION BIT19
15581+#define DPI_FIFO_UNDER_RUN BIT20
15582+#define HS_TX_TIMEOUT BIT21
15583+#define LP_RX_TIMEOUT BIT22
15584+#define TURN_AROUND_ACK_TIMEOUT BIT23
15585+#define ACK_WITH_NO_ERROR BIT24
15586+#define INTR_EN_REG 0xb008
15587+#define DSI_FUNC_PRG_REG 0xb00c
15588+#define DPI_CHANNEL_NUMBER_POS 0x03
15589+#define DBI_CHANNEL_NUMBER_POS 0x05
15590+#define FMT_DPI_POS 0x07
15591+#define FMT_DBI_POS 0x0A
15592+#define DBI_DATA_WIDTH_POS 0x0D
15593+#define HS_TX_TIMEOUT_REG 0xb010
15594+#define LP_RX_TIMEOUT_REG 0xb014
15595+#define TURN_AROUND_TIMEOUT_REG 0xb018
15596+#define DEVICE_RESET_REG 0xb01C
15597+#define DPI_RESOLUTION_REG 0xb020
15598+#define RES_V_POS 0x10
15599+#define DBI_RESOLUTION_REG 0xb024
15600+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
15601+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
15602+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
15603+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
15604+#define VERT_SYNC_PAD_COUNT_REG 0xb038
15605+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
15606+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
15607+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
15608+#define DPI_CONTROL_REG 0xb048
15609+#define DPI_SHUT_DOWN BIT0
15610+#define DPI_TURN_ON BIT1
15611+#define DPI_COLOR_MODE_ON BIT2
15612+#define DPI_COLOR_MODE_OFF BIT3
15613+#define DPI_BACK_LIGHT_ON BIT4
15614+#define DPI_BACK_LIGHT_OFF BIT5
15615+#define DPI_LP BIT6
15616+#define DPI_DATA_REG 0xb04c
15617+#define DPI_BACK_LIGHT_ON_DATA 0x07
15618+#define DPI_BACK_LIGHT_OFF_DATA 0x17
15619+#define INIT_COUNT_REG 0xb050
15620+#define MAX_RET_PAK_REG 0xb054
15621+#define VIDEO_FMT_REG 0xb058
15622+#define EOT_DISABLE_REG 0xb05c
15623+#define LP_BYTECLK_REG 0xb060
15624+#define LP_GEN_DATA_REG 0xb064
15625+#define HS_GEN_DATA_REG 0xb068
15626+#define LP_GEN_CTRL_REG 0xb06C
15627+#define HS_GEN_CTRL_REG 0xb070
15628+#define GEN_FIFO_STAT_REG 0xb074
15629+#define HS_DATA_FIFO_FULL BIT0
15630+#define HS_DATA_FIFO_HALF_EMPTY BIT1
15631+#define HS_DATA_FIFO_EMPTY BIT2
15632+#define LP_DATA_FIFO_FULL BIT8
15633+#define LP_DATA_FIFO_HALF_EMPTY BIT9
15634+#define LP_DATA_FIFO_EMPTY BIT10
15635+#define HS_CTRL_FIFO_FULL BIT16
15636+#define HS_CTRL_FIFO_HALF_EMPTY BIT17
15637+#define HS_CTRL_FIFO_EMPTY BIT18
15638+#define LP_CTRL_FIFO_FULL BIT24
15639+#define LP_CTRL_FIFO_HALF_EMPTY BIT25
15640+#define LP_CTRL_FIFO_EMPTY BIT26
15641+/*===========================================================================
15642+; MIPI Adapter registers
15643+;--------------------------------------------------------------------------*/
15644+#define MIPI_CONTROL_REG 0xb104
15645+#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
15646+#define MIPI_DATA_ADDRESS_REG 0xb108
15647+#define MIPI_DATA_LENGTH_REG 0xb10C
15648+#define MIPI_COMMAND_ADDRESS_REG 0xb110
15649+#define MIPI_COMMAND_LENGTH_REG 0xb114
15650+#define MIPI_READ_DATA_RETURN_REG0 0xb118
15651+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
15652+#define MIPI_READ_DATA_RETURN_REG2 0xb120
15653+#define MIPI_READ_DATA_RETURN_REG3 0xb124
15654+#define MIPI_READ_DATA_RETURN_REG4 0xb128
15655+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
15656+#define MIPI_READ_DATA_RETURN_REG6 0xb130
15657+#define MIPI_READ_DATA_RETURN_REG7 0xb134
15658+#define MIPI_READ_DATA_VALID_REG 0xb138
15659+/* DBI COMMANDS */
15660+#define soft_reset 0x01
15661+/* ************************************************************************* *\
15662+The display module performs a software reset.
15663+Registers are written with their SW Reset default values.
15664+\* ************************************************************************* */
15665+#define get_power_mode 0x0a
15666+/* ************************************************************************* *\
15667+The display module returns the current power mode
15668+\* ************************************************************************* */
15669+#define get_address_mode 0x0b
15670+/* ************************************************************************* *\
15671+The display module returns the current status.
15672+\* ************************************************************************* */
15673+#define get_pixel_format 0x0c
15674+/* ************************************************************************* *\
15675+This command gets the pixel format for the RGB image data
15676+used by the interface.
15677+\* ************************************************************************* */
15678+#define get_display_mode 0x0d
15679+/* ************************************************************************* *\
15680+The display module returns the Display Image Mode status.
15681+\* ************************************************************************* */
15682+#define get_signal_mode 0x0e
15683+/* ************************************************************************* *\
15684+The display module returns the Display Signal Mode.
15685+\* ************************************************************************* */
15686+#define get_diagnostic_result 0x0f
15687+/* ************************************************************************* *\
15688+The display module returns the self-diagnostic results following
15689+a Sleep Out command.
15690+\* ************************************************************************* */
15691+#define enter_sleep_mode 0x10
15692+/* ************************************************************************* *\
15693+This command causes the display module to enter the Sleep mode.
15694+In this mode, all unnecessary blocks inside the display module are disabled
15695+except interface communication. This is the lowest power mode
15696+the display module supports.
15697+\* ************************************************************************* */
15698+#define exit_sleep_mode 0x11
15699+/* ************************************************************************* *\
15700+This command causes the display module to exit Sleep mode.
15701+All blocks inside the display module are enabled.
15702+\* ************************************************************************* */
15703+#define enter_partial_mode 0x12
15704+/* ************************************************************************* *\
15705+This command causes the display module to enter the Partial Display Mode.
15706+The Partial Display Mode window is described by the set_partial_area command.
15707+\* ************************************************************************* */
15708+#define enter_normal_mode 0x13
15709+/* ************************************************************************* *\
15710+This command causes the display module to enter the Normal mode.
15711+Normal Mode is defined as Partial Display mode and Scroll mode are off
15712+\* ************************************************************************* */
15713+#define exit_invert_mode 0x20
15714+/* ************************************************************************* *\
15715+This command causes the display module to stop inverting the image data on
15716+the display device. The frame memory contents remain unchanged.
15717+No status bits are changed.
15718+\* ************************************************************************* */
15719+#define enter_invert_mode 0x21
15720+/* ************************************************************************* *\
15721+This command causes the display module to invert the image data only on
15722+the display device. The frame memory contents remain unchanged.
15723+No status bits are changed.
15724+\* ************************************************************************* */
15725+#define set_gamma_curve 0x26
15726+/* ************************************************************************* *\
15727+This command selects the desired gamma curve for the display device.
15728+Four fixed gamma curves are defined in section DCS spec.
15729+\* ************************************************************************* */
15730+#define set_display_off 0x28
15731+/* ************************************************************************* *\
15732+This command causes the display module to stop displaying the image data
15733+on the display device. The frame memory contents remain unchanged.
15734+No status bits are changed.
15735+\* ************************************************************************* */
15736+#define set_display_on 0x29
15737+/* ************************************************************************* *\
15738+This command causes the display module to start displaying the image data
15739+on the display device. The frame memory contents remain unchanged.
15740+No status bits are changed.
15741+\* ************************************************************************* */
15742+#define set_column_address 0x2a
15743+/* ************************************************************************* *\
15744+This command defines the column extent of the frame memory accessed by the
15745+hostprocessor with the read_memory_continue and write_memory_continue commands.
15746+No status bits are changed.
15747+\* ************************************************************************* */
15748+#define set_page_address 0x2b
15749+/* ************************************************************************* *\
15750+This command defines the page extent of the frame memory accessed by the host
15751+processor with the write_memory_continue and read_memory_continue command.
15752+No status bits are changed.
15753+\* ************************************************************************* */
15754+#define write_mem_start 0x2c
15755+/* ************************************************************************* *\
15756+This command transfers image data from the host processor to the display
15757+module s frame memory starting at the pixel location specified by
15758+preceding set_column_address and set_page_address commands.
15759+\* ************************************************************************* */
15760+#define set_partial_area 0x30
15761+/* ************************************************************************* *\
15762+This command defines the Partial Display mode s display area.
15763+There are two parameters associated with
15764+this command, the first defines the Start Row (SR) and the second the End Row
15765+(ER). SR and ER refer to the Frame Memory Line Pointer.
15766+\* ************************************************************************* */
15767+#define set_scroll_area 0x33
15768+/* ************************************************************************* *\
15769+This command defines the display modules Vertical Scrolling Area.
15770+\* ************************************************************************* */
15771+#define set_tear_off 0x34
15772+/* ************************************************************************* *\
15773+This command turns off the display modules Tearing Effect output signal on
15774+the TE signal line.
15775+\* ************************************************************************* */
15776+#define set_tear_on 0x35
15777+/* ************************************************************************* *\
15778+This command turns on the display modules Tearing Effect output signal
15779+on the TE signal line.
15780+\* ************************************************************************* */
15781+#define set_address_mode 0x36
15782+/* ************************************************************************* *\
15783+This command sets the data order for transfers from the host processor to
15784+display modules frame memory,bits B[7:5] and B3, and from the display
15785+modules frame memory to the display device, bits B[2:0] and B4.
15786+\* ************************************************************************* */
15787+#define set_scroll_start 0x37
15788+/* ************************************************************************* *\
15789+This command sets the start of the vertical scrolling area in the frame memory.
15790+The vertical scrolling area is fully defined when this command is used with
15791+the set_scroll_area command The set_scroll_start command has one parameter,
15792+the Vertical Scroll Pointer. The VSP defines the line in the frame memory
15793+that is written to the display device as the first line of the vertical
15794+scroll area.
15795+\* ************************************************************************* */
15796+#define exit_idle_mode 0x38
15797+/* ************************************************************************* *\
15798+This command causes the display module to exit Idle mode.
15799+\* ************************************************************************* */
15800+#define enter_idle_mode 0x39
15801+/* ************************************************************************* *\
15802+This command causes the display module to enter Idle Mode.
15803+In Idle Mode, color expression is reduced. Colors are shown on the display
15804+device using the MSB of each of the R, G and B color components in the frame
15805+memory
15806+\* ************************************************************************* */
15807+#define set_pixel_format 0x3a
15808+/* ************************************************************************* *\
15809+This command sets the pixel format for the RGB image data used by the interface.
15810+Bits D[6:4] DPI Pixel Format Definition
15811+Bits D[2:0] DBI Pixel Format Definition
15812+Bits D7 and D3 are not used.
15813+\* ************************************************************************* */
15814+#define write_mem_cont 0x3c
15815+/* ************************************************************************* *\
15816+This command transfers image data from the host processor to the display
15817+module's frame memory continuing from the pixel location following the
15818+previous write_memory_continue or write_memory_start command.
15819+\* ************************************************************************* */
15820+#define set_tear_scanline 0x44
15821+/* ************************************************************************* *\
15822+This command turns on the display modules Tearing Effect output signal on the
15823+TE signal line when the display module reaches line N.
15824+\* ************************************************************************* */
15825+#define get_scanline 0x45
15826+/* ************************************************************************* *\
15827+The display module returns the current scanline, N, used to update the
15828+display device. The total number of scanlines on a display device is
15829+defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
15830+the first line of V Sync and is denoted as Line 0.
15831+When in Sleep Mode, the value returned by get_scanline is undefined.
15832+\* ************************************************************************* */
15833+/* DCS Interface Pixel Formats */
15834+#define DCS_PIXEL_FORMAT_3BPP 0x1
15835+#define DCS_PIXEL_FORMAT_8BPP 0x2
15836+#define DCS_PIXEL_FORMAT_12BPP 0x3
15837+#define DCS_PIXEL_FORMAT_16BPP 0x5
15838+#define DCS_PIXEL_FORMAT_18BPP 0x6
15839+#define DCS_PIXEL_FORMAT_24BPP 0x7
15840+/* ONE PARAMETER READ DATA */
15841+#define addr_mode_data 0xfc
15842+#define diag_res_data 0x00
15843+#define disp_mode_data 0x23
15844+#define pxl_fmt_data 0x77
15845+#define pwr_mode_data 0x74
15846+#define sig_mode_data 0x00
15847+/* TWO PARAMETERS READ DATA */
15848+#define scanline_data1 0xff
15849+#define scanline_data2 0xff
15850+/* DPI PIXEL FORMATS */
15851+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
15852+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
15853+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
15854+ * 666 FORMAT
15855+ */
15856+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
15857+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
15858+ * with Sync Pulse
15859+ */
15860+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
15861+ * with Sync events
15862+ */
15863+#define BURST_MODE 0x03 /* Burst Mode */
15864+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
15865+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
15866+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
15867+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
15868+#define DBI_NOT_SUPPORTED 0x00 /* command mode
15869+ * is not supported
15870+ */
15871+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
15872+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
15873+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
15874+#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
15875+ * 0x100 Byte with 32
15876+ * byte alignment
15877+ */
15878+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
15879+ * 0x100 Byte with 32
15880+ * byte alignment
15881+ */
15882+#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
15883+#define SKU_83 0x01
15884+#define SKU_100 0x02
15885+#define SKU_100L 0x04
15886+#define SKU_BYPASS 0x08
15887+#if 0
15888+/* ************************************************************************* *\
15889+DSI command data structure
15890+\* ************************************************************************* */
15891+union DSI_LONG_PACKET_HEADER {
15892+ u32 DSI_longPacketHeader;
15893+ struct {
15894+ u8 dataID;
15895+ u16 wordCount;
15896+ u8 ECC;
15897+ };
15898+#if 0 /*FIXME JLIU7 */
15899+ struct {
15900+ u8 DT:6;
15901+ u8 VC:2;
15902+ };
15903+#endif /*FIXME JLIU7 */
15904+};
15905+
15906+union MIPI_ADPT_CMD_LNG_REG {
15907+ u32 commnadLengthReg;
15908+ struct {
15909+ u8 command0;
15910+ u8 command1;
15911+ u8 command2;
15912+ u8 command3;
15913+ };
15914+};
15915+
15916+struct SET_COLUMN_ADDRESS_DATA {
15917+ u8 command;
15918+ u16 SC; /* Start Column */
15919+ u16 EC; /* End Column */
15920+};
15921+
15922+struct SET_PAGE_ADDRESS_DATA {
15923+ u8 command;
15924+ u16 SP; /* Start Page */
15925+ u16 EP; /* End Page */
15926+};
15927+#endif
15928diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c
15929--- a/drivers/gpu/drm/psb/psb_intel_sdvo.c 1969-12-31 16:00:00.000000000 -0800
15930+++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c 2009-04-07 13:28:38.000000000 -0700
15931@@ -0,0 +1,1232 @@
15932+/*
15933+ * Copyright © 2006-2007 Intel Corporation
15934+ *
15935+ * Permission is hereby granted, free of charge, to any person obtaining a
15936+ * copy of this software and associated documentation files (the "Software"),
15937+ * to deal in the Software without restriction, including without limitation
15938+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15939+ * and/or sell copies of the Software, and to permit persons to whom the
15940+ * Software is furnished to do so, subject to the following conditions:
15941+ *
15942+ * The above copyright notice and this permission notice (including the next
15943+ * paragraph) shall be included in all copies or substantial portions of the
15944+ * Software.
15945+ *
15946+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15947+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15948+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15949+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15950+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15951+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
15952+ * DEALINGS IN THE SOFTWARE.
15953+ *
15954+ * Authors:
15955+ * Eric Anholt <eric@anholt.net>
15956+ */
15957+/*
15958+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
15959+ * Jesse Barnes <jesse.barnes@intel.com>
15960+ */
15961+
15962+#include <linux/i2c.h>
15963+#include <linux/delay.h>
15964+#include <drm/drm_crtc.h>
15965+#include "psb_intel_sdvo_regs.h"
15966+
15967+struct psb_intel_sdvo_priv {
15968+ struct psb_intel_i2c_chan *i2c_bus;
15969+ int slaveaddr;
15970+ int output_device;
15971+
15972+ u16 active_outputs;
15973+
15974+ struct psb_intel_sdvo_caps caps;
15975+ int pixel_clock_min, pixel_clock_max;
15976+
15977+ int save_sdvo_mult;
15978+ u16 save_active_outputs;
15979+ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
15980+ struct psb_intel_sdvo_dtd save_output_dtd[16];
15981+ u32 save_SDVOX;
15982+};
15983+
15984+/**
15985+ * Writes the SDVOB or SDVOC with the given value, but always writes both
15986+ * SDVOB and SDVOC to work around apparent hardware issues (according to
15987+ * comments in the BIOS).
15988+ */
15989+void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val)
15990+{
15991+ struct drm_device *dev = psb_intel_output->base.dev;
15992+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
15993+ u32 bval = val, cval = val;
15994+ int i;
15995+
15996+ if (sdvo_priv->output_device == SDVOB)
15997+ cval = REG_READ(SDVOC);
15998+ else
15999+ bval = REG_READ(SDVOB);
16000+ /*
16001+ * Write the registers twice for luck. Sometimes,
16002+ * writing them only once doesn't appear to 'stick'.
16003+ * The BIOS does this too. Yay, magic
16004+ */
16005+ for (i = 0; i < 2; i++) {
16006+ REG_WRITE(SDVOB, bval);
16007+ REG_READ(SDVOB);
16008+ REG_WRITE(SDVOC, cval);
16009+ REG_READ(SDVOC);
16010+ }
16011+}
16012+
16013+static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output,
16014+ u8 addr, u8 *ch)
16015+{
16016+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16017+ u8 out_buf[2];
16018+ u8 buf[2];
16019+ int ret;
16020+
16021+ struct i2c_msg msgs[] = {
16022+ {
16023+ .addr = sdvo_priv->i2c_bus->slave_addr,
16024+ .flags = 0,
16025+ .len = 1,
16026+ .buf = out_buf,
16027+ },
16028+ {
16029+ .addr = sdvo_priv->i2c_bus->slave_addr,
16030+ .flags = I2C_M_RD,
16031+ .len = 1,
16032+ .buf = buf,
16033+ }
16034+ };
16035+
16036+ out_buf[0] = addr;
16037+ out_buf[1] = 0;
16038+
16039+ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
16040+ if (ret == 2) {
16041+ /* DRM_DEBUG("got back from addr %02X = %02x\n",
16042+ * out_buf[0], buf[0]);
16043+ */
16044+ *ch = buf[0];
16045+ return true;
16046+ }
16047+
16048+ DRM_DEBUG("i2c transfer returned %d\n", ret);
16049+ return false;
16050+}
16051+
16052+static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output,
16053+ int addr, u8 ch)
16054+{
16055+ u8 out_buf[2];
16056+ struct i2c_msg msgs[] = {
16057+ {
16058+ .addr = psb_intel_output->i2c_bus->slave_addr,
16059+ .flags = 0,
16060+ .len = 2,
16061+ .buf = out_buf,
16062+ }
16063+ };
16064+
16065+ out_buf[0] = addr;
16066+ out_buf[1] = ch;
16067+
16068+ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
16069+ return true;
16070+ return false;
16071+}
16072+
16073+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
16074+/** Mapping of command numbers to names, for debug output */
16075+const static struct _sdvo_cmd_name {
16076+ u8 cmd;
16077+ char *name;
16078+} sdvo_cmd_names[] = {
16079+SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
16080+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
16081+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
16082+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
16083+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
16084+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
16085+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
16086+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
16087+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
16088+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
16089+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
16090+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
16091+ SDVO_CMD_NAME_ENTRY
16092+ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
16093+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
16094+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
16095+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
16096+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
16097+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
16098+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
16099+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
16100+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
16101+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
16102+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
16103+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
16104+ SDVO_CMD_NAME_ENTRY
16105+ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
16106+ SDVO_CMD_NAME_ENTRY
16107+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
16108+ SDVO_CMD_NAME_ENTRY
16109+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
16110+ SDVO_CMD_NAME_ENTRY
16111+ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
16112+ SDVO_CMD_NAME_ENTRY
16113+ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
16114+ SDVO_CMD_NAME_ENTRY
16115+ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
16116+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
16117+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
16118+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
16119+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
16120+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
16121+ SDVO_CMD_NAME_ENTRY
16122+ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
16123+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
16124+
16125+#define SDVO_NAME(dev_priv) \
16126+ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
16127+#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
16128+
16129+static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd,
16130+ void *args, int args_len)
16131+{
16132+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16133+ int i;
16134+
16135+ if (1) {
16136+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
16137+ for (i = 0; i < args_len; i++)
16138+ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
16139+ for (; i < 8; i++)
16140+ printk(" ");
16141+ for (i = 0;
16142+ i <
16143+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
16144+ i++) {
16145+ if (cmd == sdvo_cmd_names[i].cmd) {
16146+ printk("(%s)", sdvo_cmd_names[i].name);
16147+ break;
16148+ }
16149+ }
16150+ if (i ==
16151+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
16152+ printk("(%02X)", cmd);
16153+ printk("\n");
16154+ }
16155+
16156+ for (i = 0; i < args_len; i++) {
16157+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i,
16158+ ((u8 *) args)[i]);
16159+ }
16160+
16161+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
16162+}
16163+
16164+static const char *cmd_status_names[] = {
16165+ "Power on",
16166+ "Success",
16167+ "Not supported",
16168+ "Invalid arg",
16169+ "Pending",
16170+ "Target not specified",
16171+ "Scaling not supported"
16172+};
16173+
16174+static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output,
16175+ void *response, int response_len)
16176+{
16177+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16178+ int i;
16179+ u8 status;
16180+ u8 retry = 50;
16181+
16182+ while (retry--) {
16183+ /* Read the command response */
16184+ for (i = 0; i < response_len; i++) {
16185+ psb_intel_sdvo_read_byte(psb_intel_output,
16186+ SDVO_I2C_RETURN_0 + i,
16187+ &((u8 *) response)[i]);
16188+ }
16189+
16190+ /* read the return status */
16191+ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS,
16192+ &status);
16193+
16194+ if (1) {
16195+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
16196+ for (i = 0; i < response_len; i++)
16197+ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
16198+ for (; i < 8; i++)
16199+ printk(" ");
16200+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
16201+ printk(KERN_INFO"(%s)",
16202+ cmd_status_names[status]);
16203+ else
16204+ printk(KERN_INFO"(??? %d)", status);
16205+ printk("\n");
16206+ }
16207+
16208+ if (status != SDVO_CMD_STATUS_PENDING)
16209+ return status;
16210+
16211+ mdelay(50);
16212+ }
16213+
16214+ return status;
16215+}
16216+
16217+int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
16218+{
16219+ if (mode->clock >= 100000)
16220+ return 1;
16221+ else if (mode->clock >= 50000)
16222+ return 2;
16223+ else
16224+ return 4;
16225+}
16226+
16227+/**
16228+ * Don't check status code from this as it switches the bus back to the
16229+ * SDVO chips which defeats the purpose of doing a bus switch in the first
16230+ * place.
16231+ */
16232+void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output,
16233+ u8 target)
16234+{
16235+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
16236+ &target, 1);
16237+}
16238+
16239+static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output,
16240+ bool target_0, bool target_1)
16241+{
16242+ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
16243+ u8 status;
16244+
16245+ if (target_0 && target_1)
16246+ return SDVO_CMD_STATUS_NOTSUPP;
16247+
16248+ if (target_1)
16249+ targets.target_1 = 1;
16250+
16251+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
16252+ &targets, sizeof(targets));
16253+
16254+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16255+
16256+ return status == SDVO_CMD_STATUS_SUCCESS;
16257+}
16258+
16259+/**
16260+ * Return whether each input is trained.
16261+ *
16262+ * This function is making an assumption about the layout of the response,
16263+ * which should be checked against the docs.
16264+ */
16265+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
16266+ *psb_intel_output, bool *input_1,
16267+ bool *input_2)
16268+{
16269+ struct psb_intel_sdvo_get_trained_inputs_response response;
16270+ u8 status;
16271+
16272+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
16273+ NULL, 0);
16274+ status =
16275+ psb_intel_sdvo_read_response(psb_intel_output, &response,
16276+ sizeof(response));
16277+ if (status != SDVO_CMD_STATUS_SUCCESS)
16278+ return false;
16279+
16280+ *input_1 = response.input0_trained;
16281+ *input_2 = response.input1_trained;
16282+ return true;
16283+}
16284+
16285+static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
16286+ *psb_intel_output, u16 *outputs)
16287+{
16288+ u8 status;
16289+
16290+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
16291+ NULL, 0);
16292+ status =
16293+ psb_intel_sdvo_read_response(psb_intel_output, outputs,
16294+ sizeof(*outputs));
16295+
16296+ return status == SDVO_CMD_STATUS_SUCCESS;
16297+}
16298+
16299+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
16300+ *psb_intel_output, u16 outputs)
16301+{
16302+ u8 status;
16303+
16304+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
16305+ &outputs, sizeof(outputs));
16306+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16307+ return status == SDVO_CMD_STATUS_SUCCESS;
16308+}
16309+
16310+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
16311+ *psb_intel_output, int mode)
16312+{
16313+ u8 status, state = SDVO_ENCODER_STATE_ON;
16314+
16315+ switch (mode) {
16316+ case DRM_MODE_DPMS_ON:
16317+ state = SDVO_ENCODER_STATE_ON;
16318+ break;
16319+ case DRM_MODE_DPMS_STANDBY:
16320+ state = SDVO_ENCODER_STATE_STANDBY;
16321+ break;
16322+ case DRM_MODE_DPMS_SUSPEND:
16323+ state = SDVO_ENCODER_STATE_SUSPEND;
16324+ break;
16325+ case DRM_MODE_DPMS_OFF:
16326+ state = SDVO_ENCODER_STATE_OFF;
16327+ break;
16328+ }
16329+
16330+ psb_intel_sdvo_write_cmd(psb_intel_output,
16331+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
16332+ sizeof(state));
16333+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16334+
16335+ return status == SDVO_CMD_STATUS_SUCCESS;
16336+}
16337+
16338+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
16339+ *psb_intel_output,
16340+ int *clock_min,
16341+ int *clock_max)
16342+{
16343+ struct psb_intel_sdvo_pixel_clock_range clocks;
16344+ u8 status;
16345+
16346+ psb_intel_sdvo_write_cmd(psb_intel_output,
16347+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
16348+ 0);
16349+
16350+ status =
16351+ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
16352+ sizeof(clocks));
16353+
16354+ if (status != SDVO_CMD_STATUS_SUCCESS)
16355+ return false;
16356+
16357+ /* Convert the values from units of 10 kHz to kHz. */
16358+ *clock_min = clocks.min * 10;
16359+ *clock_max = clocks.max * 10;
16360+
16361+ return true;
16362+}
16363+
16364+static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output,
16365+ u16 outputs)
16366+{
16367+ u8 status;
16368+
16369+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
16370+ &outputs, sizeof(outputs));
16371+
16372+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16373+ return status == SDVO_CMD_STATUS_SUCCESS;
16374+}
16375+
16376+static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
16377+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
16378+{
16379+ u8 status;
16380+
16381+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
16382+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
16383+ sizeof(dtd->part1));
16384+ if (status != SDVO_CMD_STATUS_SUCCESS)
16385+ return false;
16386+
16387+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
16388+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
16389+ sizeof(dtd->part2));
16390+ if (status != SDVO_CMD_STATUS_SUCCESS)
16391+ return false;
16392+
16393+ return true;
16394+}
16395+
16396+static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output,
16397+ struct psb_intel_sdvo_dtd *dtd)
16398+{
16399+ return psb_intel_sdvo_get_timing(psb_intel_output,
16400+ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
16401+ dtd);
16402+}
16403+
16404+static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output,
16405+ struct psb_intel_sdvo_dtd *dtd)
16406+{
16407+ return psb_intel_sdvo_get_timing(psb_intel_output,
16408+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
16409+ dtd);
16410+}
16411+
16412+static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output,
16413+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
16414+{
16415+ u8 status;
16416+
16417+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
16418+ sizeof(dtd->part1));
16419+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16420+ if (status != SDVO_CMD_STATUS_SUCCESS)
16421+ return false;
16422+
16423+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
16424+ sizeof(dtd->part2));
16425+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16426+ if (status != SDVO_CMD_STATUS_SUCCESS)
16427+ return false;
16428+
16429+ return true;
16430+}
16431+
16432+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output,
16433+ struct psb_intel_sdvo_dtd *dtd)
16434+{
16435+ return psb_intel_sdvo_set_timing(psb_intel_output,
16436+ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
16437+ dtd);
16438+}
16439+
16440+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output,
16441+ struct psb_intel_sdvo_dtd *dtd)
16442+{
16443+ return psb_intel_sdvo_set_timing(psb_intel_output,
16444+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
16445+ dtd);
16446+}
16447+
16448+#if 0
16449+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
16450+ *psb_intel_output,
16451+ struct psb_intel_sdvo_dtd
16452+ *dtd)
16453+{
16454+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16455+ u8 status;
16456+
16457+ psb_intel_sdvo_write_cmd(psb_intel_output,
16458+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
16459+ NULL, 0);
16460+
16461+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
16462+ sizeof(dtd->part1));
16463+ if (status != SDVO_CMD_STATUS_SUCCESS)
16464+ return false;
16465+
16466+ psb_intel_sdvo_write_cmd(psb_intel_output,
16467+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
16468+ NULL, 0);
16469+ status =
16470+ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
16471+ sizeof(dtd->part2));
16472+ if (status != SDVO_CMD_STATUS_SUCCESS)
16473+ return false;
16474+
16475+ return true;
16476+}
16477+#endif
16478+
16479+static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
16480+ *psb_intel_output)
16481+{
16482+ u8 response, status;
16483+
16484+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT,
16485+ NULL, 0);
16486+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
16487+
16488+ if (status != SDVO_CMD_STATUS_SUCCESS) {
16489+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
16490+ return SDVO_CLOCK_RATE_MULT_1X;
16491+ } else {
16492+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
16493+ }
16494+
16495+ return response;
16496+}
16497+
16498+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
16499+ *psb_intel_output, u8 val)
16500+{
16501+ u8 status;
16502+
16503+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT,
16504+ &val, 1);
16505+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16506+ if (status != SDVO_CMD_STATUS_SUCCESS)
16507+ return false;
16508+
16509+ return true;
16510+}
16511+
16512+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
16513+ struct drm_display_mode *mode,
16514+ struct drm_display_mode *adjusted_mode)
16515+{
16516+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
16517+ * device will be told of the multiplier during mode_set.
16518+ */
16519+ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
16520+ return true;
16521+}
16522+
16523+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
16524+ struct drm_display_mode *mode,
16525+ struct drm_display_mode *adjusted_mode)
16526+{
16527+ struct drm_device *dev = encoder->dev;
16528+ struct drm_crtc *crtc = encoder->crtc;
16529+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
16530+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
16531+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16532+ u16 width, height;
16533+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
16534+ u16 h_sync_offset, v_sync_offset;
16535+ u32 sdvox;
16536+ struct psb_intel_sdvo_dtd output_dtd;
16537+ int sdvo_pixel_multiply;
16538+
16539+ if (!mode)
16540+ return;
16541+
16542+ width = mode->crtc_hdisplay;
16543+ height = mode->crtc_vdisplay;
16544+
16545+ /* do some mode translations */
16546+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
16547+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
16548+
16549+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
16550+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
16551+
16552+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
16553+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
16554+
16555+ output_dtd.part1.clock = mode->clock / 10;
16556+ output_dtd.part1.h_active = width & 0xff;
16557+ output_dtd.part1.h_blank = h_blank_len & 0xff;
16558+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
16559+ ((h_blank_len >> 8) & 0xf);
16560+ output_dtd.part1.v_active = height & 0xff;
16561+ output_dtd.part1.v_blank = v_blank_len & 0xff;
16562+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
16563+ ((v_blank_len >> 8) & 0xf);
16564+
16565+ output_dtd.part2.h_sync_off = h_sync_offset;
16566+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
16567+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
16568+ (v_sync_len & 0xf);
16569+ output_dtd.part2.sync_off_width_high =
16570+ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
16571+ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
16572+
16573+ output_dtd.part2.dtd_flags = 0x18;
16574+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
16575+ output_dtd.part2.dtd_flags |= 0x2;
16576+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
16577+ output_dtd.part2.dtd_flags |= 0x4;
16578+
16579+ output_dtd.part2.sdvo_flags = 0;
16580+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
16581+ output_dtd.part2.reserved = 0;
16582+
16583+ /* Set the output timing to the screen */
16584+ psb_intel_sdvo_set_target_output(psb_intel_output,
16585+ sdvo_priv->active_outputs);
16586+ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
16587+
16588+ /* Set the input timing to the screen. Assume always input 0. */
16589+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
16590+
16591+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
16592+ * provide the device with a timing it can support, if it supports that
16593+ * feature. However, presumably we would need to adjust the CRTC to
16594+ * output the preferred timing, and we don't support that currently.
16595+ */
16596+#if 0
16597+ success =
16598+ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock,
16599+ width, height);
16600+ if (success) {
16601+ struct psb_intel_sdvo_dtd *input_dtd;
16602+
16603+ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
16604+ &input_dtd);
16605+ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
16606+ }
16607+#else
16608+ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
16609+#endif
16610+
16611+ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
16612+ case 1:
16613+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16614+ SDVO_CLOCK_RATE_MULT_1X);
16615+ break;
16616+ case 2:
16617+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16618+ SDVO_CLOCK_RATE_MULT_2X);
16619+ break;
16620+ case 4:
16621+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16622+ SDVO_CLOCK_RATE_MULT_4X);
16623+ break;
16624+ }
16625+
16626+ /* Set the SDVO control regs. */
16627+ if (0 /*IS_I965GM(dev) */) {
16628+ sdvox = SDVO_BORDER_ENABLE;
16629+ } else {
16630+ sdvox = REG_READ(sdvo_priv->output_device);
16631+ switch (sdvo_priv->output_device) {
16632+ case SDVOB:
16633+ sdvox &= SDVOB_PRESERVE_MASK;
16634+ break;
16635+ case SDVOC:
16636+ sdvox &= SDVOC_PRESERVE_MASK;
16637+ break;
16638+ }
16639+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
16640+ }
16641+ if (psb_intel_crtc->pipe == 1)
16642+ sdvox |= SDVO_PIPE_B_SELECT;
16643+
16644+ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
16645+ if (IS_I965G(dev)) {
16646+ /* done in crtc_mode_set as the dpll_md reg must be written
16647+ * early */
16648+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
16649+ /* done in crtc_mode_set as it lives inside the
16650+ * dpll register */
16651+ } else {
16652+ sdvox |=
16653+ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
16654+ }
16655+
16656+ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
16657+}
16658+
16659+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
16660+{
16661+ struct drm_device *dev = encoder->dev;
16662+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
16663+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16664+ u32 temp;
16665+
16666+ if (mode != DRM_MODE_DPMS_ON) {
16667+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
16668+ if (0)
16669+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
16670+ mode);
16671+
16672+ if (mode == DRM_MODE_DPMS_OFF) {
16673+ temp = REG_READ(sdvo_priv->output_device);
16674+ if ((temp & SDVO_ENABLE) != 0) {
16675+ psb_intel_sdvo_write_sdvox(psb_intel_output,
16676+ temp &
16677+ ~SDVO_ENABLE);
16678+ }
16679+ }
16680+ } else {
16681+ bool input1, input2;
16682+ int i;
16683+ u8 status;
16684+
16685+ temp = REG_READ(sdvo_priv->output_device);
16686+ if ((temp & SDVO_ENABLE) == 0)
16687+ psb_intel_sdvo_write_sdvox(psb_intel_output,
16688+ temp | SDVO_ENABLE);
16689+ for (i = 0; i < 2; i++)
16690+ psb_intel_wait_for_vblank(dev);
16691+
16692+ status =
16693+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
16694+ &input2);
16695+
16696+
16697+ /* Warn if the device reported failure to sync.
16698+ * A lot of SDVO devices fail to notify of sync, but it's
16699+ * a given it the status is a success, we succeeded.
16700+ */
16701+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
16702+ DRM_DEBUG
16703+ ("First %s output reported failure to sync\n",
16704+ SDVO_NAME(sdvo_priv));
16705+ }
16706+
16707+ if (0)
16708+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
16709+ mode);
16710+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
16711+ sdvo_priv->active_outputs);
16712+ }
16713+ return;
16714+}
16715+
16716+static void psb_intel_sdvo_save(struct drm_connector *connector)
16717+{
16718+ struct drm_device *dev = connector->dev;
16719+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16720+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16721+ int o;
16722+
16723+ sdvo_priv->save_sdvo_mult =
16724+ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
16725+ psb_intel_sdvo_get_active_outputs(psb_intel_output,
16726+ &sdvo_priv->save_active_outputs);
16727+
16728+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
16729+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
16730+ psb_intel_sdvo_get_input_timing(psb_intel_output,
16731+ &sdvo_priv->save_input_dtd_1);
16732+ }
16733+
16734+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
16735+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
16736+ psb_intel_sdvo_get_input_timing(psb_intel_output,
16737+ &sdvo_priv->save_input_dtd_2);
16738+ }
16739+
16740+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
16741+ u16 this_output = (1 << o);
16742+ if (sdvo_priv->caps.output_flags & this_output) {
16743+ psb_intel_sdvo_set_target_output(psb_intel_output,
16744+ this_output);
16745+ psb_intel_sdvo_get_output_timing(psb_intel_output,
16746+ &sdvo_priv->
16747+ save_output_dtd[o]);
16748+ }
16749+ }
16750+
16751+ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
16752+}
16753+
16754+static void psb_intel_sdvo_restore(struct drm_connector *connector)
16755+{
16756+ struct drm_device *dev = connector->dev;
16757+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16758+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16759+ int o;
16760+ int i;
16761+ bool input1, input2;
16762+ u8 status;
16763+
16764+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
16765+
16766+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
16767+ u16 this_output = (1 << o);
16768+ if (sdvo_priv->caps.output_flags & this_output) {
16769+ psb_intel_sdvo_set_target_output(psb_intel_output,
16770+ this_output);
16771+ psb_intel_sdvo_set_output_timing(psb_intel_output,
16772+ &sdvo_priv->
16773+ save_output_dtd[o]);
16774+ }
16775+ }
16776+
16777+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
16778+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
16779+ psb_intel_sdvo_set_input_timing(psb_intel_output,
16780+ &sdvo_priv->save_input_dtd_1);
16781+ }
16782+
16783+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
16784+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
16785+ psb_intel_sdvo_set_input_timing(psb_intel_output,
16786+ &sdvo_priv->save_input_dtd_2);
16787+ }
16788+
16789+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16790+ sdvo_priv->save_sdvo_mult);
16791+
16792+ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
16793+
16794+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
16795+ for (i = 0; i < 2; i++)
16796+ psb_intel_wait_for_vblank(dev);
16797+ status =
16798+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
16799+ &input2);
16800+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
16801+ DRM_DEBUG
16802+ ("First %s output reported failure to sync\n",
16803+ SDVO_NAME(sdvo_priv));
16804+ }
16805+
16806+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
16807+ sdvo_priv->save_active_outputs);
16808+}
16809+
16810+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
16811+ struct drm_display_mode *mode)
16812+{
16813+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16814+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16815+
16816+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
16817+ return MODE_NO_DBLESCAN;
16818+
16819+ if (sdvo_priv->pixel_clock_min > mode->clock)
16820+ return MODE_CLOCK_LOW;
16821+
16822+ if (sdvo_priv->pixel_clock_max < mode->clock)
16823+ return MODE_CLOCK_HIGH;
16824+
16825+ return MODE_OK;
16826+}
16827+
16828+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output,
16829+ struct psb_intel_sdvo_caps *caps)
16830+{
16831+ u8 status;
16832+
16833+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL,
16834+ 0);
16835+ status =
16836+ psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps));
16837+ if (status != SDVO_CMD_STATUS_SUCCESS)
16838+ return false;
16839+
16840+ return true;
16841+}
16842+
16843+struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
16844+{
16845+ struct drm_connector *connector = NULL;
16846+ struct psb_intel_output *iout = NULL;
16847+ struct psb_intel_sdvo_priv *sdvo;
16848+
16849+ /* find the sdvo connector */
16850+ list_for_each_entry(connector, &dev->mode_config.connector_list,
16851+ head) {
16852+ iout = to_psb_intel_output(connector);
16853+
16854+ if (iout->type != INTEL_OUTPUT_SDVO)
16855+ continue;
16856+
16857+ sdvo = iout->dev_priv;
16858+
16859+ if (sdvo->output_device == SDVOB && sdvoB)
16860+ return connector;
16861+
16862+ if (sdvo->output_device == SDVOC && !sdvoB)
16863+ return connector;
16864+
16865+ }
16866+
16867+ return NULL;
16868+}
16869+
16870+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
16871+{
16872+ u8 response[2];
16873+ u8 status;
16874+ struct psb_intel_output *psb_intel_output;
16875+ DRM_DEBUG("\n");
16876+
16877+ if (!connector)
16878+ return 0;
16879+
16880+ psb_intel_output = to_psb_intel_output(connector);
16881+
16882+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
16883+ NULL, 0);
16884+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16885+
16886+ if (response[0] != 0)
16887+ return 1;
16888+
16889+ return 0;
16890+}
16891+
16892+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
16893+{
16894+ u8 response[2];
16895+ u8 status;
16896+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16897+
16898+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
16899+ NULL, 0);
16900+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16901+
16902+ if (on) {
16903+ psb_intel_sdvo_write_cmd(psb_intel_output,
16904+ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
16905+ 0);
16906+ status =
16907+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16908+
16909+ psb_intel_sdvo_write_cmd(psb_intel_output,
16910+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
16911+ &response, 2);
16912+ } else {
16913+ response[0] = 0;
16914+ response[1] = 0;
16915+ psb_intel_sdvo_write_cmd(psb_intel_output,
16916+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
16917+ &response, 2);
16918+ }
16919+
16920+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
16921+ NULL, 0);
16922+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16923+}
16924+
16925+static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
16926+ *connector)
16927+{
16928+ u8 response[2];
16929+ u8 status;
16930+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16931+
16932+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS,
16933+ NULL, 0);
16934+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16935+
16936+ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
16937+ if ((response[0] != 0) || (response[1] != 0))
16938+ return connector_status_connected;
16939+ else
16940+ return connector_status_disconnected;
16941+}
16942+
16943+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
16944+{
16945+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16946+
16947+ /* set the bus switch and get the modes */
16948+ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
16949+ SDVO_CONTROL_BUS_DDC2);
16950+ psb_intel_ddc_get_modes(psb_intel_output);
16951+
16952+ if (list_empty(&connector->probed_modes))
16953+ return 0;
16954+ return 1;
16955+#if 0
16956+ /* Mac mini hack. On this device, I get DDC through the analog, which
16957+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
16958+ * but it does load-detect as connected. So, just steal the DDC bits
16959+ * from analog when we fail at finding it the right way.
16960+ */
16961+ /* TODO */
16962+ return NULL;
16963+
16964+ return NULL;
16965+#endif
16966+}
16967+
16968+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
16969+{
16970+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16971+
16972+ if (psb_intel_output->i2c_bus)
16973+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
16974+ drm_sysfs_connector_remove(connector);
16975+ drm_connector_cleanup(connector);
16976+ kfree(psb_intel_output);
16977+}
16978+
16979+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
16980+ .dpms = psb_intel_sdvo_dpms,
16981+ .mode_fixup = psb_intel_sdvo_mode_fixup,
16982+ .prepare = psb_intel_encoder_prepare,
16983+ .mode_set = psb_intel_sdvo_mode_set,
16984+ .commit = psb_intel_encoder_commit,
16985+};
16986+
16987+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
16988+ .save = psb_intel_sdvo_save,
16989+ .restore = psb_intel_sdvo_restore,
16990+ .detect = psb_intel_sdvo_detect,
16991+ .fill_modes = drm_helper_probe_single_connector_modes,
16992+ .destroy = psb_intel_sdvo_destroy,
16993+};
16994+
16995+static const struct drm_connector_helper_funcs
16996+ psb_intel_sdvo_connector_helper_funcs = {
16997+ .get_modes = psb_intel_sdvo_get_modes,
16998+ .mode_valid = psb_intel_sdvo_mode_valid,
16999+ .best_encoder = psb_intel_best_encoder,
17000+};
17001+
17002+void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
17003+{
17004+ drm_encoder_cleanup(encoder);
17005+}
17006+
17007+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
17008+ .destroy = psb_intel_sdvo_enc_destroy,
17009+};
17010+
17011+
17012+void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
17013+{
17014+ struct drm_connector *connector;
17015+ struct psb_intel_output *psb_intel_output;
17016+ struct psb_intel_sdvo_priv *sdvo_priv;
17017+ struct psb_intel_i2c_chan *i2cbus = NULL;
17018+ int connector_type;
17019+ u8 ch[0x40];
17020+ int i;
17021+ int encoder_type, output_id;
17022+
17023+ psb_intel_output =
17024+ kcalloc(sizeof(struct psb_intel_output) +
17025+ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
17026+ if (!psb_intel_output)
17027+ return;
17028+
17029+ connector = &psb_intel_output->base;
17030+
17031+ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
17032+ DRM_MODE_CONNECTOR_Unknown);
17033+ drm_connector_helper_add(connector,
17034+ &psb_intel_sdvo_connector_helper_funcs);
17035+ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
17036+ psb_intel_output->type = INTEL_OUTPUT_SDVO;
17037+
17038+ connector->interlace_allowed = 0;
17039+ connector->doublescan_allowed = 0;
17040+
17041+ /* setup the DDC bus. */
17042+ if (output_device == SDVOB)
17043+ i2cbus =
17044+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
17045+ else
17046+ i2cbus =
17047+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
17048+
17049+ if (!i2cbus)
17050+ goto err_connector;
17051+
17052+ sdvo_priv->i2c_bus = i2cbus;
17053+
17054+ if (output_device == SDVOB) {
17055+ output_id = 1;
17056+ sdvo_priv->i2c_bus->slave_addr = 0x38;
17057+ } else {
17058+ output_id = 2;
17059+ sdvo_priv->i2c_bus->slave_addr = 0x39;
17060+ }
17061+
17062+ sdvo_priv->output_device = output_device;
17063+ psb_intel_output->i2c_bus = i2cbus;
17064+ psb_intel_output->dev_priv = sdvo_priv;
17065+
17066+
17067+ /* Read the regs to test if we can talk to the device */
17068+ for (i = 0; i < 0x40; i++) {
17069+ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
17070+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
17071+ output_device == SDVOB ? 'B' : 'C');
17072+ goto err_i2c;
17073+ }
17074+ }
17075+
17076+ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
17077+
17078+ memset(&sdvo_priv->active_outputs, 0,
17079+ sizeof(sdvo_priv->active_outputs));
17080+
17081+ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
17082+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
17083+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
17084+ connector->display_info.subpixel_order =
17085+ SubPixelHorizontalRGB;
17086+ encoder_type = DRM_MODE_ENCODER_DAC;
17087+ connector_type = DRM_MODE_CONNECTOR_VGA;
17088+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
17089+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
17090+ connector->display_info.subpixel_order =
17091+ SubPixelHorizontalRGB;
17092+ encoder_type = DRM_MODE_ENCODER_DAC;
17093+ connector_type = DRM_MODE_CONNECTOR_VGA;
17094+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
17095+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
17096+ connector->display_info.subpixel_order =
17097+ SubPixelHorizontalRGB;
17098+ encoder_type = DRM_MODE_ENCODER_TMDS;
17099+ connector_type = DRM_MODE_CONNECTOR_DVID;
17100+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
17101+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
17102+ connector->display_info.subpixel_order =
17103+ SubPixelHorizontalRGB;
17104+ encoder_type = DRM_MODE_ENCODER_TMDS;
17105+ connector_type = DRM_MODE_CONNECTOR_DVID;
17106+ } else {
17107+ unsigned char bytes[2];
17108+
17109+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
17110+ DRM_DEBUG
17111+ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
17112+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
17113+ goto err_i2c;
17114+ }
17115+
17116+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
17117+ encoder_type);
17118+ drm_encoder_helper_add(&psb_intel_output->enc,
17119+ &psb_intel_sdvo_helper_funcs);
17120+ connector->connector_type = connector_type;
17121+
17122+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
17123+ &psb_intel_output->enc);
17124+ drm_sysfs_connector_add(connector);
17125+
17126+ /* Set the input timing to the screen. Assume always input 0. */
17127+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
17128+
17129+ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
17130+ &sdvo_priv->pixel_clock_min,
17131+ &sdvo_priv->
17132+ pixel_clock_max);
17133+
17134+
17135+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
17136+ "clock range %dMHz - %dMHz, "
17137+ "input 1: %c, input 2: %c, "
17138+ "output 1: %c, output 2: %c\n",
17139+ SDVO_NAME(sdvo_priv),
17140+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
17141+ sdvo_priv->caps.device_rev_id,
17142+ sdvo_priv->pixel_clock_min / 1000,
17143+ sdvo_priv->pixel_clock_max / 1000,
17144+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
17145+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
17146+ /* check currently supported outputs */
17147+ sdvo_priv->caps.output_flags &
17148+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
17149+ sdvo_priv->caps.output_flags &
17150+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
17151+
17152+ psb_intel_output->ddc_bus = i2cbus;
17153+
17154+ return;
17155+
17156+err_i2c:
17157+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
17158+err_connector:
17159+ drm_connector_cleanup(connector);
17160+ kfree(psb_intel_output);
17161+
17162+ return;
17163+}
17164diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
17165--- a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 1969-12-31 16:00:00.000000000 -0800
17166+++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 2009-04-07 13:28:38.000000000 -0700
17167@@ -0,0 +1,328 @@
17168+/*
17169+ * Copyright (c) 2008, Intel Corporation
17170+ *
17171+ * Permission is hereby granted, free of charge, to any person obtaining a
17172+ * copy of this software and associated documentation files (the "Software"),
17173+ * to deal in the Software without restriction, including without limitation
17174+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17175+ * and/or sell copies of the Software, and to permit persons to whom the
17176+ * Software is furnished to do so, subject to the following conditions:
17177+ *
17178+ * The above copyright notice and this permission notice (including the next
17179+ * paragraph) shall be included in all copies or substantial portions of the
17180+ * Software.
17181+ *
17182+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17183+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17184+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17185+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17186+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17187+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
17188+ * DEALINGS IN THE SOFTWARE.
17189+ *
17190+ * Authors:
17191+ * Eric Anholt <eric@anholt.net>
17192+ */
17193+
17194+/**
17195+ * @file SDVO command definitions and structures.
17196+ */
17197+
17198+#define SDVO_OUTPUT_FIRST (0)
17199+#define SDVO_OUTPUT_TMDS0 (1 << 0)
17200+#define SDVO_OUTPUT_RGB0 (1 << 1)
17201+#define SDVO_OUTPUT_CVBS0 (1 << 2)
17202+#define SDVO_OUTPUT_SVID0 (1 << 3)
17203+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
17204+#define SDVO_OUTPUT_SCART0 (1 << 5)
17205+#define SDVO_OUTPUT_LVDS0 (1 << 6)
17206+#define SDVO_OUTPUT_TMDS1 (1 << 8)
17207+#define SDVO_OUTPUT_RGB1 (1 << 9)
17208+#define SDVO_OUTPUT_CVBS1 (1 << 10)
17209+#define SDVO_OUTPUT_SVID1 (1 << 11)
17210+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
17211+#define SDVO_OUTPUT_SCART1 (1 << 13)
17212+#define SDVO_OUTPUT_LVDS1 (1 << 14)
17213+#define SDVO_OUTPUT_LAST (14)
17214+
17215+struct psb_intel_sdvo_caps {
17216+ u8 vendor_id;
17217+ u8 device_id;
17218+ u8 device_rev_id;
17219+ u8 sdvo_version_major;
17220+ u8 sdvo_version_minor;
17221+ unsigned int sdvo_inputs_mask:2;
17222+ unsigned int smooth_scaling:1;
17223+ unsigned int sharp_scaling:1;
17224+ unsigned int up_scaling:1;
17225+ unsigned int down_scaling:1;
17226+ unsigned int stall_support:1;
17227+ unsigned int pad:1;
17228+ u16 output_flags;
17229+} __attribute__ ((packed));
17230+
17231+/** This matches the EDID DTD structure, more or less */
17232+struct psb_intel_sdvo_dtd {
17233+ struct {
17234+ u16 clock; /**< pixel clock, in 10kHz units */
17235+ u8 h_active; /**< lower 8 bits (pixels) */
17236+ u8 h_blank; /**< lower 8 bits (pixels) */
17237+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
17238+ u8 v_active; /**< lower 8 bits (lines) */
17239+ u8 v_blank; /**< lower 8 bits (lines) */
17240+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
17241+ } part1;
17242+
17243+ struct {
17244+ u8 h_sync_off;
17245+ /**< lower 8 bits, from hblank start */
17246+ u8 h_sync_width;/**< lower 8 bits (pixels) */
17247+ /** lower 4 bits each vsync offset, vsync width */
17248+ u8 v_sync_off_width;
17249+ /**
17250+ * 2 high bits of hsync offset, 2 high bits of hsync width,
17251+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
17252+ */
17253+ u8 sync_off_width_high;
17254+ u8 dtd_flags;
17255+ u8 sdvo_flags;
17256+ /** bits 6-7 of vsync offset at bits 6-7 */
17257+ u8 v_sync_off_high;
17258+ u8 reserved;
17259+ } part2;
17260+} __attribute__ ((packed));
17261+
17262+struct psb_intel_sdvo_pixel_clock_range {
17263+ u16 min; /**< pixel clock, in 10kHz units */
17264+ u16 max; /**< pixel clock, in 10kHz units */
17265+} __attribute__ ((packed));
17266+
17267+struct psb_intel_sdvo_preferred_input_timing_args {
17268+ u16 clock;
17269+ u16 width;
17270+ u16 height;
17271+} __attribute__ ((packed));
17272+
17273+/* I2C registers for SDVO */
17274+#define SDVO_I2C_ARG_0 0x07
17275+#define SDVO_I2C_ARG_1 0x06
17276+#define SDVO_I2C_ARG_2 0x05
17277+#define SDVO_I2C_ARG_3 0x04
17278+#define SDVO_I2C_ARG_4 0x03
17279+#define SDVO_I2C_ARG_5 0x02
17280+#define SDVO_I2C_ARG_6 0x01
17281+#define SDVO_I2C_ARG_7 0x00
17282+#define SDVO_I2C_OPCODE 0x08
17283+#define SDVO_I2C_CMD_STATUS 0x09
17284+#define SDVO_I2C_RETURN_0 0x0a
17285+#define SDVO_I2C_RETURN_1 0x0b
17286+#define SDVO_I2C_RETURN_2 0x0c
17287+#define SDVO_I2C_RETURN_3 0x0d
17288+#define SDVO_I2C_RETURN_4 0x0e
17289+#define SDVO_I2C_RETURN_5 0x0f
17290+#define SDVO_I2C_RETURN_6 0x10
17291+#define SDVO_I2C_RETURN_7 0x11
17292+#define SDVO_I2C_VENDOR_BEGIN 0x20
17293+
17294+/* Status results */
17295+#define SDVO_CMD_STATUS_POWER_ON 0x0
17296+#define SDVO_CMD_STATUS_SUCCESS 0x1
17297+#define SDVO_CMD_STATUS_NOTSUPP 0x2
17298+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
17299+#define SDVO_CMD_STATUS_PENDING 0x4
17300+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
17301+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
17302+
17303+/* SDVO commands, argument/result registers */
17304+
17305+#define SDVO_CMD_RESET 0x01
17306+
17307+/** Returns a struct psb_intel_sdvo_caps */
17308+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
17309+
17310+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
17311+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
17312+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
17313+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
17314+
17315+/**
17316+ * Reports which inputs are trained (managed to sync).
17317+ *
17318+ * Devices must have trained within 2 vsyncs of a mode change.
17319+ */
17320+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
17321+struct psb_intel_sdvo_get_trained_inputs_response {
17322+ unsigned int input0_trained:1;
17323+ unsigned int input1_trained:1;
17324+ unsigned int pad:6;
17325+} __attribute__ ((packed));
17326+
17327+/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
17328+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
17329+
17330+/**
17331+ * Sets the current set of active outputs.
17332+ *
17333+ * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
17334+ * on multi-output devices.
17335+ */
17336+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
17337+
17338+/**
17339+ * Returns the current mapping of SDVO inputs to outputs on the device.
17340+ *
17341+ * Returns two struct psb_intel_sdvo_output_flags structures.
17342+ */
17343+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
17344+
17345+/**
17346+ * Sets the current mapping of SDVO inputs to outputs on the device.
17347+ *
17348+ * Takes two struct i380_sdvo_output_flags structures.
17349+ */
17350+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
17351+
17352+/**
17353+ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
17354+ */
17355+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
17356+
17357+/**
17358+ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
17359+ */
17360+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
17361+
17362+/**
17363+ * Takes a struct psb_intel_sdvo_output_flags.
17364+ */
17365+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
17366+
17367+/**
17368+ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
17369+ * interrupts enabled.
17370+ */
17371+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
17372+
17373+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
17374+struct psb_intel_sdvo_get_interrupt_event_source_response {
17375+ u16 interrupt_status;
17376+ unsigned int ambient_light_interrupt:1;
17377+ unsigned int pad:7;
17378+} __attribute__ ((packed));
17379+
17380+/**
17381+ * Selects which input is affected by future input commands.
17382+ *
17383+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
17384+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
17385+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
17386+ */
17387+#define SDVO_CMD_SET_TARGET_INPUT 0x10
17388+struct psb_intel_sdvo_set_target_input_args {
17389+ unsigned int target_1:1;
17390+ unsigned int pad:7;
17391+} __attribute__ ((packed));
17392+
17393+/**
17394+ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
17395+ * future output commands.
17396+ *
17397+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
17398+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
17399+ */
17400+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
17401+
17402+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
17403+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
17404+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
17405+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
17406+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
17407+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
17408+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
17409+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
17410+/* Part 1 */
17411+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
17412+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
17413+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
17414+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
17415+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
17416+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
17417+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
17418+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
17419+/* Part 2 */
17420+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
17421+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
17422+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
17423+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
17424+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
17425+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
17426+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
17427+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
17428+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
17429+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
17430+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
17431+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
17432+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
17433+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
17434+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
17435+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
17436+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
17437+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
17438+
17439+/**
17440+ * Generates a DTD based on the given width, height, and flags.
17441+ *
17442+ * This will be supported by any device supporting scaling or interlaced
17443+ * modes.
17444+ */
17445+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
17446+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
17447+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
17448+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
17449+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
17450+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
17451+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
17452+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
17453+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
17454+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
17455+
17456+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
17457+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
17458+
17459+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
17460+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
17461+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
17462+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
17463+
17464+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
17465+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
17466+
17467+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
17468+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
17469+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
17470+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
17471+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
17472+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
17473+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
17474+
17475+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
17476+
17477+#define SDVO_CMD_GET_TV_FORMAT 0x28
17478+
17479+#define SDVO_CMD_SET_TV_FORMAT 0x29
17480+
17481+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
17482+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
17483+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
17484+# define SDVO_ENCODER_STATE_ON (1 << 0)
17485+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
17486+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
17487+# define SDVO_ENCODER_STATE_OFF (1 << 3)
17488+
17489+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
17490+
17491+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
17492+# define SDVO_CONTROL_BUS_PROM 0x0
17493+# define SDVO_CONTROL_BUS_DDC1 0x1
17494+# define SDVO_CONTROL_BUS_DDC2 0x2
17495+# define SDVO_CONTROL_BUS_DDC3 0x3
17496diff -uNr a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c
17497--- a/drivers/gpu/drm/psb/psb_irq.c 1969-12-31 16:00:00.000000000 -0800
17498+++ b/drivers/gpu/drm/psb/psb_irq.c 2009-04-07 13:28:38.000000000 -0700
17499@@ -0,0 +1,420 @@
17500+/**************************************************************************
17501+ * Copyright (c) 2007, Intel Corporation.
17502+ * All Rights Reserved.
17503+ *
17504+ * This program is free software; you can redistribute it and/or modify it
17505+ * under the terms and conditions of the GNU General Public License,
17506+ * version 2, as published by the Free Software Foundation.
17507+ *
17508+ * This program is distributed in the hope it will be useful, but WITHOUT
17509+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17510+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17511+ * more details.
17512+ *
17513+ * You should have received a copy of the GNU General Public License along with
17514+ * this program; if not, write to the Free Software Foundation, Inc.,
17515+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17516+ *
17517+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
17518+ * develop this driver.
17519+ *
17520+ **************************************************************************/
17521+/*
17522+ */
17523+
17524+#include <drm/drmP.h>
17525+#include "psb_drv.h"
17526+#include "psb_reg.h"
17527+#include "psb_msvdx.h"
17528+#include "lnc_topaz.h"
17529+
17530+/*
17531+ * Video display controller interrupt.
17532+ */
17533+
17534+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
17535+{
17536+ struct drm_psb_private *dev_priv =
17537+ (struct drm_psb_private *) dev->dev_private;
17538+ int wake = 0;
17539+
17540+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
17541+#ifdef PSB_FIXME
17542+ atomic_inc(&dev->vbl_received);
17543+#endif
17544+ wake = 1;
17545+ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
17546+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
17547+ }
17548+
17549+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
17550+#ifdef PSB_FIXME
17551+ atomic_inc(&dev->vbl_received2);
17552+#endif
17553+ wake = 1;
17554+ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
17555+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
17556+ }
17557+
17558+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
17559+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
17560+ DRM_READMEMORYBARRIER();
17561+
17562+#ifdef PSB_FIXME
17563+ if (wake) {
17564+ DRM_WAKEUP(&dev->vbl_queue);
17565+ drm_vbl_send_signals(dev);
17566+ }
17567+#endif
17568+}
17569+
17570+/*
17571+ * SGX interrupt source 1.
17572+ */
17573+
17574+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
17575+ uint32_t sgx_stat2)
17576+{
17577+ struct drm_psb_private *dev_priv =
17578+ (struct drm_psb_private *) dev->dev_private;
17579+
17580+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
17581+ DRM_WAKEUP(&dev_priv->event_2d_queue);
17582+ psb_fence_handler(dev, PSB_ENGINE_2D);
17583+ }
17584+
17585+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
17586+ psb_print_pagefault(dev_priv);
17587+
17588+ psb_scheduler_handler(dev_priv, sgx_stat);
17589+}
17590+
17591+/*
17592+ * MSVDX interrupt.
17593+ */
17594+static void psb_msvdx_interrupt(struct drm_device *dev,
17595+ uint32_t msvdx_stat)
17596+{
17597+ struct drm_psb_private *dev_priv =
17598+ (struct drm_psb_private *) dev->dev_private;
17599+
17600+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
17601+ /*Ideally we should we should never get to this */
17602+ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n",
17603+ msvdx_stat, dev_priv->fence2_irq_on);
17604+
17605+ /* Pause MMU */
17606+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
17607+ MSVDX_MMU_CONTROL0);
17608+ DRM_WRITEMEMORYBARRIER();
17609+
17610+ /* Clear this interupt bit only */
17611+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
17612+ MSVDX_INTERRUPT_CLEAR);
17613+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
17614+ DRM_READMEMORYBARRIER();
17615+
17616+ dev_priv->msvdx_needs_reset = 1;
17617+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
17618+ PSB_DEBUG_IRQ
17619+ ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n",
17620+ msvdx_stat, dev_priv->fence2_irq_on);
17621+
17622+ /* Clear all interupt bits */
17623+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
17624+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
17625+ DRM_READMEMORYBARRIER();
17626+
17627+ psb_msvdx_mtx_interrupt(dev);
17628+ }
17629+}
17630+
17631+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
17632+{
17633+ struct drm_device *dev = (struct drm_device *) arg;
17634+ struct drm_psb_private *dev_priv =
17635+ (struct drm_psb_private *) dev->dev_private;
17636+ uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0;
17637+ uint32_t sgx_stat = 0;
17638+ uint32_t sgx_stat2 = 0;
17639+ uint32_t sgx_int = 0;
17640+ int handled = 0;
17641+
17642+ spin_lock(&dev_priv->irqmask_lock);
17643+
17644+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
17645+
17646+ if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
17647+ PSB_DEBUG_IRQ("Got SGX interrupt\n");
17648+ sgx_int = 1;
17649+ }
17650+ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
17651+ PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
17652+ msvdx_int = 1;
17653+ }
17654+
17655+ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
17656+ PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
17657+ topaz_int = 1;
17658+ }
17659+ if (sgx_int && (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)) {
17660+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
17661+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
17662+
17663+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
17664+ sgx_stat &= dev_priv->sgx_irq_mask;
17665+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
17666+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
17667+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
17668+ } else if (unlikely(PSB_D_PM & drm_psb_debug)) {
17669+ if (sgx_int)
17670+ PSB_DEBUG_PM("sgx int in down mode\n");
17671+ }
17672+ vdc_stat &= dev_priv->vdc_irq_mask;
17673+ spin_unlock(&dev_priv->irqmask_lock);
17674+
17675+ if (msvdx_int) {
17676+ uint32_t msvdx_stat = 0;
17677+
17678+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
17679+ psb_msvdx_interrupt(dev, msvdx_stat);
17680+ handled = 1;
17681+ }
17682+
17683+ if (IS_MRST(dev) && topaz_int) {
17684+ uint32_t topaz_stat = 0;
17685+
17686+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat);
17687+ lnc_topaz_interrupt (dev, topaz_stat);
17688+ handled = 1;
17689+ }
17690+
17691+ if (vdc_stat) {
17692+ /* MSVDX IRQ status is part of vdc_irq_mask */
17693+ psb_vdc_interrupt(dev, vdc_stat);
17694+ handled = 1;
17695+ }
17696+
17697+ if (sgx_stat || sgx_stat2) {
17698+
17699+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
17700+ handled = 1;
17701+ }
17702+
17703+ if (!handled)
17704+ return IRQ_NONE;
17705+
17706+
17707+ return IRQ_HANDLED;
17708+}
17709+
17710+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
17711+{
17712+ unsigned long mtx_int = 0;
17713+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
17714+
17715+ /* Clear MTX interrupt */
17716+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
17717+ 1);
17718+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
17719+}
17720+
17721+void psb_irq_preinstall(struct drm_device *dev)
17722+{
17723+ struct drm_psb_private *dev_priv =
17724+ (struct drm_psb_private *) dev->dev_private;
17725+ unsigned long mtx_int = 0;
17726+ unsigned long irqflags;
17727+ PSB_DEBUG_PM("psb_irq_preinstall\n");
17728+
17729+ down_read(&dev_priv->sgx_sem);
17730+ psb_check_power_state(dev, PSB_DEVICE_SGX);
17731+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17732+
17733+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
17734+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
17735+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
17736+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
17737+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17738+
17739+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
17740+ _PSB_CE_DPM_3D_MEM_FREE |
17741+ _PSB_CE_TA_FINISHED |
17742+ _PSB_CE_DPM_REACHED_MEM_THRESH |
17743+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
17744+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
17745+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
17746+
17747+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
17748+
17749+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
17750+
17751+ if (!drm_psb_disable_vsync)
17752+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
17753+ _PSB_VSYNC_PIPEB_FLAG;
17754+
17755+ /* Clear MTX interrupt */
17756+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
17757+ CR_MTX_IRQ, 1);
17758+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
17759+
17760+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17761+ up_read(&dev_priv->sgx_sem);
17762+}
17763+
17764+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
17765+{
17766+ /* Enable Mtx Interupt to host */
17767+ unsigned long enables = 0;
17768+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
17769+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
17770+ 1);
17771+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
17772+}
17773+
17774+int psb_irq_postinstall(struct drm_device *dev)
17775+{
17776+ struct drm_psb_private *dev_priv =
17777+ (struct drm_psb_private *) dev->dev_private;
17778+ unsigned long irqflags;
17779+ unsigned long enables = 0;
17780+
17781+ PSB_DEBUG_PM("psb_irq_postinstall\n");
17782+ down_read(&dev_priv->sgx_sem);
17783+ psb_check_power_state(dev, PSB_DEVICE_SGX);
17784+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17785+
17786+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17787+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
17788+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
17789+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17790+
17791+ /* MSVDX IRQ Setup, Enable Mtx Interupt to host */
17792+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
17793+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
17794+ CR_MTX_IRQ, 1);
17795+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
17796+
17797+ dev_priv->irq_enabled = 1;
17798+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17799+ up_read(&dev_priv->sgx_sem);
17800+ return 0;
17801+}
17802+
17803+void psb_irq_uninstall(struct drm_device *dev)
17804+{
17805+ struct drm_psb_private *dev_priv =
17806+ (struct drm_psb_private *) dev->dev_private;
17807+ unsigned long irqflags;
17808+ PSB_DEBUG_PM("psb_irq_uninstall\n");
17809+ down_read(&dev_priv->sgx_sem);
17810+ psb_check_power_state(dev, PSB_DEVICE_SGX);
17811+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17812+
17813+ dev_priv->sgx_irq_mask = 0x00000000;
17814+ dev_priv->sgx2_irq_mask = 0x00000000;
17815+ dev_priv->vdc_irq_mask = 0x00000000;
17816+
17817+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
17818+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
17819+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17820+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
17821+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
17822+ wmb();
17823+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
17824+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS),
17825+ PSB_CR_EVENT_HOST_CLEAR);
17826+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2),
17827+ PSB_CR_EVENT_HOST_CLEAR2);
17828+
17829+ /* MSVDX IRQ Setup */
17830+ /* Clear interrupt enabled flag */
17831+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
17832+
17833+ if (IS_MRST(dev))
17834+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
17835+
17836+ dev_priv->irq_enabled = 0;
17837+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17838+ up_read(&dev_priv->sgx_sem);
17839+}
17840+
17841+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
17842+{
17843+ unsigned long irqflags;
17844+ uint32_t old_mask;
17845+ uint32_t cleared_mask;
17846+
17847+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17848+ --dev_priv->irqen_count_2d;
17849+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
17850+
17851+ old_mask = dev_priv->sgx_irq_mask;
17852+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
17853+ PSB_WSGX32(dev_priv->sgx_irq_mask,
17854+ PSB_CR_EVENT_HOST_ENABLE);
17855+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17856+
17857+ cleared_mask =
17858+ (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
17859+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
17860+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
17861+ }
17862+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17863+}
17864+
17865+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
17866+{
17867+ unsigned long irqflags;
17868+
17869+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17870+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
17871+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
17872+ PSB_WSGX32(dev_priv->sgx_irq_mask,
17873+ PSB_CR_EVENT_HOST_ENABLE);
17874+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17875+ }
17876+ ++dev_priv->irqen_count_2d;
17877+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17878+}
17879+
17880+#ifdef PSB_FIXME
17881+static int psb_vblank_do_wait(struct drm_device *dev,
17882+ unsigned int *sequence, atomic_t *counter)
17883+{
17884+ unsigned int cur_vblank;
17885+ int ret = 0;
17886+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
17887+ (((cur_vblank = atomic_read(counter))
17888+ - *sequence) <= (1 << 23)));
17889+ *sequence = cur_vblank;
17890+
17891+ return ret;
17892+}
17893+#endif
17894+
17895+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
17896+{
17897+ unsigned long irqflags;
17898+
17899+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17900+ if (dev_priv->irq_enabled) {
17901+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
17902+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17903+ (void) PSB_RSGX32(PSB_INT_ENABLE_R);
17904+ }
17905+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17906+}
17907+
17908+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
17909+{
17910+ unsigned long irqflags;
17911+
17912+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17913+ if (dev_priv->irq_enabled) {
17914+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
17915+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17916+ (void) PSB_RSGX32(PSB_INT_ENABLE_R);
17917+ }
17918+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17919+}
17920diff -uNr a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c
17921--- a/drivers/gpu/drm/psb/psb_mmu.c 1969-12-31 16:00:00.000000000 -0800
17922+++ b/drivers/gpu/drm/psb/psb_mmu.c 2009-04-07 13:28:38.000000000 -0700
17923@@ -0,0 +1,1069 @@
17924+/**************************************************************************
17925+ * Copyright (c) 2007, Intel Corporation.
17926+ * All Rights Reserved.
17927+ *
17928+ * This program is free software; you can redistribute it and/or modify it
17929+ * under the terms and conditions of the GNU General Public License,
17930+ * version 2, as published by the Free Software Foundation.
17931+ *
17932+ * This program is distributed in the hope it will be useful, but WITHOUT
17933+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17934+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17935+ * more details.
17936+ *
17937+ * You should have received a copy of the GNU General Public License along with
17938+ * this program; if not, write to the Free Software Foundation, Inc.,
17939+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17940+ *
17941+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
17942+ * develop this driver.
17943+ *
17944+ **************************************************************************/
17945+#include <drm/drmP.h>
17946+#include "psb_drv.h"
17947+#include "psb_reg.h"
17948+
17949+/*
17950+ * Code for the SGX MMU:
17951+ */
17952+
17953+/*
17954+ * clflush on one processor only:
17955+ * clflush should apparently flush the cache line on all processors in an
17956+ * SMP system.
17957+ */
17958+
17959+/*
17960+ * kmap atomic:
17961+ * The usage of the slots must be completely encapsulated within a spinlock, and
17962+ * no other functions that may be using the locks for other purposed may be
17963+ * called from within the locked region.
17964+ * Since the slots are per processor, this will guarantee that we are the only
17965+ * user.
17966+ */
17967+
17968+/*
17969+ * TODO: Inserting ptes from an interrupt handler:
17970+ * This may be desirable for some SGX functionality where the GPU can fault in
17971+ * needed pages. For that, we need to make an atomic insert_pages function, that
17972+ * may fail.
17973+ * If it fails, the caller need to insert the page using a workqueue function,
17974+ * but on average it should be fast.
17975+ */
17976+
17977+struct psb_mmu_driver {
17978+ /* protects driver- and pd structures. Always take in read mode
17979+ * before taking the page table spinlock.
17980+ */
17981+ struct rw_semaphore sem;
17982+
17983+ /* protects page tables, directory tables and pt tables.
17984+ * and pt structures.
17985+ */
17986+ spinlock_t lock;
17987+
17988+ atomic_t needs_tlbflush;
17989+
17990+ uint8_t __iomem *register_map;
17991+ struct psb_mmu_pd *default_pd;
17992+ uint32_t bif_ctrl;
17993+ int has_clflush;
17994+ int clflush_add;
17995+ unsigned long clflush_mask;
17996+
17997+ struct drm_psb_private *dev_priv;
17998+};
17999+
18000+struct psb_mmu_pd;
18001+
18002+struct psb_mmu_pt {
18003+ struct psb_mmu_pd *pd;
18004+ uint32_t index;
18005+ uint32_t count;
18006+ struct page *p;
18007+ uint32_t *v;
18008+};
18009+
18010+struct psb_mmu_pd {
18011+ struct psb_mmu_driver *driver;
18012+ int hw_context;
18013+ struct psb_mmu_pt **tables;
18014+ struct page *p;
18015+ struct page *dummy_pt;
18016+ struct page *dummy_page;
18017+ uint32_t pd_mask;
18018+ uint32_t invalid_pde;
18019+ uint32_t invalid_pte;
18020+};
18021+
18022+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
18023+
18024+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
18025+{
18026+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
18027+}
18028+
18029+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
18030+{
18031+ return offset >> PSB_PDE_SHIFT;
18032+}
18033+
18034+#if defined(CONFIG_X86)
18035+static inline void psb_clflush(void *addr)
18036+{
18037+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
18038+}
18039+
18040+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
18041+ void *addr)
18042+{
18043+ if (!driver->has_clflush)
18044+ return;
18045+
18046+ mb();
18047+ psb_clflush(addr);
18048+ mb();
18049+}
18050+#else
18051+
18052+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
18053+ void *addr)
18054+{;
18055+}
18056+
18057+#endif
18058+
18059+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
18060+ uint32_t val, uint32_t offset)
18061+{
18062+ iowrite32(val, d->register_map + offset);
18063+}
18064+
18065+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
18066+ uint32_t offset)
18067+{
18068+ return ioread32(d->register_map + offset);
18069+}
18070+
18071+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
18072+ int force)
18073+{
18074+ if (atomic_read(&driver->needs_tlbflush) || force) {
18075+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
18076+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
18077+ PSB_CR_BIF_CTRL);
18078+ wmb();
18079+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
18080+ PSB_CR_BIF_CTRL);
18081+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
18082+ if (driver->dev_priv) {
18083+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
18084+ if (IS_MRST(driver->dev_priv->dev))
18085+ topaz_mmu_flushcache(driver->dev_priv);
18086+ }
18087+ }
18088+ atomic_set(&driver->needs_tlbflush, 0);
18089+}
18090+
18091+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
18092+{
18093+ down_write(&driver->sem);
18094+ psb_mmu_flush_pd_locked(driver, force);
18095+ up_write(&driver->sem);
18096+}
18097+
18098+void psb_mmu_flush(struct psb_mmu_driver *driver)
18099+{
18100+ uint32_t val;
18101+
18102+ down_write(&driver->sem);
18103+ if (driver->dev_priv->graphics_state == PSB_PWR_STATE_D0i0) {
18104+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
18105+ if (atomic_read(&driver->needs_tlbflush))
18106+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
18107+ PSB_CR_BIF_CTRL);
18108+ else
18109+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
18110+ PSB_CR_BIF_CTRL);
18111+ wmb();
18112+ psb_iowrite32(driver,
18113+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
18114+ PSB_CR_BIF_CTRL);
18115+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
18116+ atomic_set(&driver->needs_tlbflush, 0);
18117+ } else {
18118+ PSB_DEBUG_PM("mmu flush when down\n");
18119+ }
18120+
18121+ if (driver->dev_priv) {
18122+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
18123+ if (IS_MRST(driver->dev_priv->dev))
18124+ topaz_mmu_flushcache(driver->dev_priv);
18125+ }
18126+
18127+ up_write(&driver->sem);
18128+}
18129+
18130+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
18131+{
18132+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
18133+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
18134+
18135+ ttm_tt_cache_flush(&pd->p, 1);
18136+ down_write(&pd->driver->sem);
18137+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT),
18138+ offset);
18139+ wmb();
18140+ psb_mmu_flush_pd_locked(pd->driver, 1);
18141+ pd->hw_context = hw_context;
18142+ up_write(&pd->driver->sem);
18143+
18144+}
18145+
18146+static inline unsigned long psb_pd_addr_end(unsigned long addr,
18147+ unsigned long end)
18148+{
18149+
18150+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
18151+ return (addr < end) ? addr : end;
18152+}
18153+
18154+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
18155+{
18156+ uint32_t mask = PSB_PTE_VALID;
18157+
18158+ if (type & PSB_MMU_CACHED_MEMORY)
18159+ mask |= PSB_PTE_CACHED;
18160+ if (type & PSB_MMU_RO_MEMORY)
18161+ mask |= PSB_PTE_RO;
18162+ if (type & PSB_MMU_WO_MEMORY)
18163+ mask |= PSB_PTE_WO;
18164+
18165+ return (pfn << PAGE_SHIFT) | mask;
18166+}
18167+
18168+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
18169+ int trap_pagefaults, int invalid_type)
18170+{
18171+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
18172+ uint32_t *v;
18173+ int i;
18174+
18175+ if (!pd)
18176+ return NULL;
18177+
18178+ pd->p = alloc_page(GFP_DMA32);
18179+ if (!pd->p)
18180+ goto out_err1;
18181+ pd->dummy_pt = alloc_page(GFP_DMA32);
18182+ if (!pd->dummy_pt)
18183+ goto out_err2;
18184+ pd->dummy_page = alloc_page(GFP_DMA32);
18185+ if (!pd->dummy_page)
18186+ goto out_err3;
18187+
18188+ if (!trap_pagefaults) {
18189+ pd->invalid_pde =
18190+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
18191+ invalid_type);
18192+ pd->invalid_pte =
18193+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
18194+ invalid_type);
18195+ } else {
18196+ pd->invalid_pde = 0;
18197+ pd->invalid_pte = 0;
18198+ }
18199+
18200+ v = kmap(pd->dummy_pt);
18201+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
18202+ v[i] = pd->invalid_pte;
18203+
18204+ kunmap(pd->dummy_pt);
18205+
18206+ v = kmap(pd->p);
18207+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
18208+ v[i] = pd->invalid_pde;
18209+
18210+ kunmap(pd->p);
18211+
18212+ clear_page(kmap(pd->dummy_page));
18213+ kunmap(pd->dummy_page);
18214+
18215+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
18216+ if (!pd->tables)
18217+ goto out_err4;
18218+
18219+ pd->hw_context = -1;
18220+ pd->pd_mask = PSB_PTE_VALID;
18221+ pd->driver = driver;
18222+
18223+ return pd;
18224+
18225+out_err4:
18226+ __free_page(pd->dummy_page);
18227+out_err3:
18228+ __free_page(pd->dummy_pt);
18229+out_err2:
18230+ __free_page(pd->p);
18231+out_err1:
18232+ kfree(pd);
18233+ return NULL;
18234+}
18235+
18236+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
18237+{
18238+ __free_page(pt->p);
18239+ kfree(pt);
18240+}
18241+
18242+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
18243+{
18244+ struct psb_mmu_driver *driver = pd->driver;
18245+ struct psb_mmu_pt *pt;
18246+ int i;
18247+
18248+ down_write(&driver->sem);
18249+ if (pd->hw_context != -1) {
18250+ psb_iowrite32(driver, 0,
18251+ PSB_CR_BIF_DIR_LIST_BASE0 +
18252+ pd->hw_context * 4);
18253+ psb_mmu_flush_pd_locked(driver, 1);
18254+ }
18255+
18256+ /* Should take the spinlock here, but we don't need to do that
18257+ since we have the semaphore in write mode. */
18258+
18259+ for (i = 0; i < 1024; ++i) {
18260+ pt = pd->tables[i];
18261+ if (pt)
18262+ psb_mmu_free_pt(pt);
18263+ }
18264+
18265+ vfree(pd->tables);
18266+ __free_page(pd->dummy_page);
18267+ __free_page(pd->dummy_pt);
18268+ __free_page(pd->p);
18269+ kfree(pd);
18270+ up_write(&driver->sem);
18271+}
18272+
18273+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
18274+{
18275+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
18276+ void *v;
18277+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
18278+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
18279+ spinlock_t *lock = &pd->driver->lock;
18280+ uint8_t *clf;
18281+ uint32_t *ptes;
18282+ int i;
18283+
18284+ if (!pt)
18285+ return NULL;
18286+
18287+ pt->p = alloc_page(GFP_DMA32);
18288+ if (!pt->p) {
18289+ kfree(pt);
18290+ return NULL;
18291+ }
18292+
18293+ spin_lock(lock);
18294+
18295+ v = kmap_atomic(pt->p, KM_USER0);
18296+ clf = (uint8_t *) v;
18297+ ptes = (uint32_t *) v;
18298+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
18299+ *ptes++ = pd->invalid_pte;
18300+
18301+
18302+#if defined(CONFIG_X86)
18303+ if (pd->driver->has_clflush && pd->hw_context != -1) {
18304+ mb();
18305+ for (i = 0; i < clflush_count; ++i) {
18306+ psb_clflush(clf);
18307+ clf += clflush_add;
18308+ }
18309+ mb();
18310+ }
18311+#endif
18312+ kunmap_atomic(v, KM_USER0);
18313+ spin_unlock(lock);
18314+
18315+ pt->count = 0;
18316+ pt->pd = pd;
18317+ pt->index = 0;
18318+
18319+ return pt;
18320+}
18321+
18322+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
18323+ unsigned long addr)
18324+{
18325+ uint32_t index = psb_mmu_pd_index(addr);
18326+ struct psb_mmu_pt *pt;
18327+ uint32_t *v;
18328+ spinlock_t *lock = &pd->driver->lock;
18329+
18330+ spin_lock(lock);
18331+ pt = pd->tables[index];
18332+ while (!pt) {
18333+ spin_unlock(lock);
18334+ pt = psb_mmu_alloc_pt(pd);
18335+ if (!pt)
18336+ return NULL;
18337+ spin_lock(lock);
18338+
18339+ if (pd->tables[index]) {
18340+ spin_unlock(lock);
18341+ psb_mmu_free_pt(pt);
18342+ spin_lock(lock);
18343+ pt = pd->tables[index];
18344+ continue;
18345+ }
18346+
18347+ v = kmap_atomic(pd->p, KM_USER0);
18348+ pd->tables[index] = pt;
18349+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
18350+ pt->index = index;
18351+ kunmap_atomic((void *) v, KM_USER0);
18352+
18353+ if (pd->hw_context != -1) {
18354+ psb_mmu_clflush(pd->driver, (void *) &v[index]);
18355+ atomic_set(&pd->driver->needs_tlbflush, 1);
18356+ }
18357+ }
18358+ pt->v = kmap_atomic(pt->p, KM_USER0);
18359+ return pt;
18360+}
18361+
18362+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
18363+ unsigned long addr)
18364+{
18365+ uint32_t index = psb_mmu_pd_index(addr);
18366+ struct psb_mmu_pt *pt;
18367+ spinlock_t *lock = &pd->driver->lock;
18368+
18369+ spin_lock(lock);
18370+ pt = pd->tables[index];
18371+ if (!pt) {
18372+ spin_unlock(lock);
18373+ return NULL;
18374+ }
18375+ pt->v = kmap_atomic(pt->p, KM_USER0);
18376+ return pt;
18377+}
18378+
18379+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
18380+{
18381+ struct psb_mmu_pd *pd = pt->pd;
18382+ uint32_t *v;
18383+
18384+ kunmap_atomic(pt->v, KM_USER0);
18385+ if (pt->count == 0) {
18386+ v = kmap_atomic(pd->p, KM_USER0);
18387+ v[pt->index] = pd->invalid_pde;
18388+ pd->tables[pt->index] = NULL;
18389+
18390+ if (pd->hw_context != -1) {
18391+ psb_mmu_clflush(pd->driver,
18392+ (void *) &v[pt->index]);
18393+ atomic_set(&pd->driver->needs_tlbflush, 1);
18394+ }
18395+ kunmap_atomic(pt->v, KM_USER0);
18396+ spin_unlock(&pd->driver->lock);
18397+ psb_mmu_free_pt(pt);
18398+ return;
18399+ }
18400+ spin_unlock(&pd->driver->lock);
18401+}
18402+
18403+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
18404+ unsigned long addr, uint32_t pte)
18405+{
18406+ pt->v[psb_mmu_pt_index(addr)] = pte;
18407+}
18408+
18409+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
18410+ unsigned long addr)
18411+{
18412+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
18413+}
18414+
18415+#if 0
18416+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
18417+ uint32_t mmu_offset)
18418+{
18419+ uint32_t *v;
18420+ uint32_t pfn;
18421+
18422+ v = kmap_atomic(pd->p, KM_USER0);
18423+ if (!v) {
18424+ printk(KERN_INFO "Could not kmap pde page.\n");
18425+ return 0;
18426+ }
18427+ pfn = v[psb_mmu_pd_index(mmu_offset)];
18428+ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
18429+ kunmap_atomic(v, KM_USER0);
18430+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
18431+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
18432+ mmu_offset, pfn);
18433+ }
18434+ v = ioremap(pfn & 0xFFFFF000, 4096);
18435+ if (!v) {
18436+ printk(KERN_INFO "Could not kmap pte page.\n");
18437+ return 0;
18438+ }
18439+ pfn = v[psb_mmu_pt_index(mmu_offset)];
18440+ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
18441+ iounmap(v);
18442+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
18443+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
18444+ mmu_offset, pfn);
18445+ }
18446+ return pfn >> PAGE_SHIFT;
18447+}
18448+
18449+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
18450+ uint32_t mmu_offset,
18451+ uint32_t gtt_pages)
18452+{
18453+ uint32_t start;
18454+ uint32_t next;
18455+
18456+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
18457+ mmu_offset, gtt_pages);
18458+ down_read(&pd->driver->sem);
18459+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
18460+ mmu_offset += PAGE_SIZE;
18461+ gtt_pages -= 1;
18462+ while (gtt_pages--) {
18463+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
18464+ if (next != start + 1) {
18465+ printk(KERN_INFO
18466+ "Ptes out of order: 0x%08x, 0x%08x.\n",
18467+ start, next);
18468+ }
18469+ start = next;
18470+ mmu_offset += PAGE_SIZE;
18471+ }
18472+ up_read(&pd->driver->sem);
18473+}
18474+
18475+#endif
18476+
18477+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
18478+ uint32_t mmu_offset, uint32_t gtt_start,
18479+ uint32_t gtt_pages)
18480+{
18481+ uint32_t *v;
18482+ uint32_t start = psb_mmu_pd_index(mmu_offset);
18483+ struct psb_mmu_driver *driver = pd->driver;
18484+ int num_pages = gtt_pages;
18485+
18486+ down_read(&driver->sem);
18487+ spin_lock(&driver->lock);
18488+
18489+ v = kmap_atomic(pd->p, KM_USER0);
18490+ v += start;
18491+
18492+ while (gtt_pages--) {
18493+ *v++ = gtt_start | pd->pd_mask;
18494+ gtt_start += PAGE_SIZE;
18495+ }
18496+
18497+ ttm_tt_cache_flush(&pd->p, num_pages);
18498+ kunmap_atomic(v, KM_USER0);
18499+ spin_unlock(&driver->lock);
18500+
18501+ if (pd->hw_context != -1)
18502+ atomic_set(&pd->driver->needs_tlbflush, 1);
18503+
18504+ up_read(&pd->driver->sem);
18505+ psb_mmu_flush_pd(pd->driver, 0);
18506+}
18507+
18508+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
18509+{
18510+ struct psb_mmu_pd *pd;
18511+
18512+ down_read(&driver->sem);
18513+ pd = driver->default_pd;
18514+ up_read(&driver->sem);
18515+
18516+ return pd;
18517+}
18518+
18519+/* Returns the physical address of the PD shared by sgx/msvdx */
18520+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
18521+{
18522+ struct psb_mmu_pd *pd;
18523+
18524+ pd = psb_mmu_get_default_pd(driver);
18525+ return page_to_pfn(pd->p) << PAGE_SHIFT;
18526+}
18527+
18528+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
18529+{
18530+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
18531+ psb_mmu_free_pagedir(driver->default_pd);
18532+ kfree(driver);
18533+}
18534+
18535+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
18536+ int trap_pagefaults,
18537+ int invalid_type,
18538+ struct drm_psb_private *dev_priv)
18539+{
18540+ struct psb_mmu_driver *driver;
18541+
18542+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
18543+
18544+ if (!driver)
18545+ return NULL;
18546+ driver->dev_priv = dev_priv;
18547+
18548+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
18549+ invalid_type);
18550+ if (!driver->default_pd)
18551+ goto out_err1;
18552+
18553+ spin_lock_init(&driver->lock);
18554+ init_rwsem(&driver->sem);
18555+ down_write(&driver->sem);
18556+ driver->register_map = registers;
18557+ atomic_set(&driver->needs_tlbflush, 1);
18558+
18559+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
18560+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
18561+ PSB_CR_BIF_CTRL);
18562+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
18563+ PSB_CR_BIF_CTRL);
18564+
18565+ driver->has_clflush = 0;
18566+
18567+#if defined(CONFIG_X86)
18568+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
18569+ uint32_t tfms, misc, cap0, cap4, clflush_size;
18570+
18571+ /*
18572+ * clflush size is determined at kernel setup for x86_64
18573+ * but not for i386. We have to do it here.
18574+ */
18575+
18576+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
18577+ clflush_size = ((misc >> 8) & 0xff) * 8;
18578+ driver->has_clflush = 1;
18579+ driver->clflush_add =
18580+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
18581+ driver->clflush_mask = driver->clflush_add - 1;
18582+ driver->clflush_mask = ~driver->clflush_mask;
18583+ }
18584+#endif
18585+
18586+ up_write(&driver->sem);
18587+ return driver;
18588+
18589+out_err1:
18590+ kfree(driver);
18591+ return NULL;
18592+}
18593+
18594+#if defined(CONFIG_X86)
18595+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
18596+ unsigned long address, uint32_t num_pages,
18597+ uint32_t desired_tile_stride,
18598+ uint32_t hw_tile_stride)
18599+{
18600+ struct psb_mmu_pt *pt;
18601+ uint32_t rows = 1;
18602+ uint32_t i;
18603+ unsigned long addr;
18604+ unsigned long end;
18605+ unsigned long next;
18606+ unsigned long add;
18607+ unsigned long row_add;
18608+ unsigned long clflush_add = pd->driver->clflush_add;
18609+ unsigned long clflush_mask = pd->driver->clflush_mask;
18610+
18611+ if (!pd->driver->has_clflush) {
18612+ ttm_tt_cache_flush(&pd->p, num_pages);
18613+ return;
18614+ }
18615+
18616+ if (hw_tile_stride)
18617+ rows = num_pages / desired_tile_stride;
18618+ else
18619+ desired_tile_stride = num_pages;
18620+
18621+ add = desired_tile_stride << PAGE_SHIFT;
18622+ row_add = hw_tile_stride << PAGE_SHIFT;
18623+ mb();
18624+ for (i = 0; i < rows; ++i) {
18625+
18626+ addr = address;
18627+ end = addr + add;
18628+
18629+ do {
18630+ next = psb_pd_addr_end(addr, end);
18631+ pt = psb_mmu_pt_map_lock(pd, addr);
18632+ if (!pt)
18633+ continue;
18634+ do {
18635+ psb_clflush(&pt->v
18636+ [psb_mmu_pt_index(addr)]);
18637+ } while (addr +=
18638+ clflush_add,
18639+ (addr & clflush_mask) < next);
18640+
18641+ psb_mmu_pt_unmap_unlock(pt);
18642+ } while (addr = next, next != end);
18643+ address += row_add;
18644+ }
18645+ mb();
18646+}
18647+#else
18648+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
18649+ unsigned long address, uint32_t num_pages,
18650+ uint32_t desired_tile_stride,
18651+ uint32_t hw_tile_stride)
18652+{
18653+ drm_ttm_cache_flush(&pd->p, num_pages);
18654+}
18655+#endif
18656+
18657+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
18658+ unsigned long address, uint32_t num_pages)
18659+{
18660+ struct psb_mmu_pt *pt;
18661+ unsigned long addr;
18662+ unsigned long end;
18663+ unsigned long next;
18664+ unsigned long f_address = address;
18665+
18666+ down_read(&pd->driver->sem);
18667+
18668+ addr = address;
18669+ end = addr + (num_pages << PAGE_SHIFT);
18670+
18671+ do {
18672+ next = psb_pd_addr_end(addr, end);
18673+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
18674+ if (!pt)
18675+ goto out;
18676+ do {
18677+ psb_mmu_invalidate_pte(pt, addr);
18678+ --pt->count;
18679+ } while (addr += PAGE_SIZE, addr < next);
18680+ psb_mmu_pt_unmap_unlock(pt);
18681+
18682+ } while (addr = next, next != end);
18683+
18684+out:
18685+ if (pd->hw_context != -1)
18686+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
18687+
18688+ up_read(&pd->driver->sem);
18689+
18690+ if (pd->hw_context != -1)
18691+ psb_mmu_flush(pd->driver);
18692+
18693+ return;
18694+}
18695+
18696+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
18697+ uint32_t num_pages, uint32_t desired_tile_stride,
18698+ uint32_t hw_tile_stride)
18699+{
18700+ struct psb_mmu_pt *pt;
18701+ uint32_t rows = 1;
18702+ uint32_t i;
18703+ unsigned long addr;
18704+ unsigned long end;
18705+ unsigned long next;
18706+ unsigned long add;
18707+ unsigned long row_add;
18708+ unsigned long f_address = address;
18709+
18710+ if (hw_tile_stride)
18711+ rows = num_pages / desired_tile_stride;
18712+ else
18713+ desired_tile_stride = num_pages;
18714+
18715+ add = desired_tile_stride << PAGE_SHIFT;
18716+ row_add = hw_tile_stride << PAGE_SHIFT;
18717+
18718+ down_read(&pd->driver->sem);
18719+
18720+ /* Make sure we only need to flush this processor's cache */
18721+
18722+ for (i = 0; i < rows; ++i) {
18723+
18724+ addr = address;
18725+ end = addr + add;
18726+
18727+ do {
18728+ next = psb_pd_addr_end(addr, end);
18729+ pt = psb_mmu_pt_map_lock(pd, addr);
18730+ if (!pt)
18731+ continue;
18732+ do {
18733+ psb_mmu_invalidate_pte(pt, addr);
18734+ --pt->count;
18735+
18736+ } while (addr += PAGE_SIZE, addr < next);
18737+ psb_mmu_pt_unmap_unlock(pt);
18738+
18739+ } while (addr = next, next != end);
18740+ address += row_add;
18741+ }
18742+ if (pd->hw_context != -1)
18743+ psb_mmu_flush_ptes(pd, f_address, num_pages,
18744+ desired_tile_stride, hw_tile_stride);
18745+
18746+ up_read(&pd->driver->sem);
18747+
18748+ if (pd->hw_context != -1)
18749+ psb_mmu_flush(pd->driver);
18750+}
18751+
18752+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
18753+ unsigned long address, uint32_t num_pages,
18754+ int type)
18755+{
18756+ struct psb_mmu_pt *pt;
18757+ uint32_t pte;
18758+ unsigned long addr;
18759+ unsigned long end;
18760+ unsigned long next;
18761+ unsigned long f_address = address;
18762+ int ret = 0;
18763+
18764+ down_read(&pd->driver->sem);
18765+
18766+ addr = address;
18767+ end = addr + (num_pages << PAGE_SHIFT);
18768+
18769+ do {
18770+ next = psb_pd_addr_end(addr, end);
18771+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
18772+ if (!pt) {
18773+ ret = -ENOMEM;
18774+ goto out;
18775+ }
18776+ do {
18777+ pte = psb_mmu_mask_pte(start_pfn++, type);
18778+ psb_mmu_set_pte(pt, addr, pte);
18779+ pt->count++;
18780+ } while (addr += PAGE_SIZE, addr < next);
18781+ psb_mmu_pt_unmap_unlock(pt);
18782+
18783+ } while (addr = next, next != end);
18784+
18785+out:
18786+ if (pd->hw_context != -1)
18787+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
18788+
18789+ up_read(&pd->driver->sem);
18790+
18791+ if (pd->hw_context != -1)
18792+ psb_mmu_flush(pd->driver);
18793+
18794+ return ret;
18795+}
18796+
18797+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
18798+ unsigned long address, uint32_t num_pages,
18799+ uint32_t desired_tile_stride,
18800+ uint32_t hw_tile_stride, int type)
18801+{
18802+ struct psb_mmu_pt *pt;
18803+ uint32_t rows = 1;
18804+ uint32_t i;
18805+ uint32_t pte;
18806+ unsigned long addr;
18807+ unsigned long end;
18808+ unsigned long next;
18809+ unsigned long add;
18810+ unsigned long row_add;
18811+ unsigned long f_address = address;
18812+ int ret = 0;
18813+
18814+ if (hw_tile_stride) {
18815+ if (num_pages % desired_tile_stride != 0)
18816+ return -EINVAL;
18817+ rows = num_pages / desired_tile_stride;
18818+ } else {
18819+ desired_tile_stride = num_pages;
18820+ }
18821+
18822+ add = desired_tile_stride << PAGE_SHIFT;
18823+ row_add = hw_tile_stride << PAGE_SHIFT;
18824+
18825+ down_read(&pd->driver->sem);
18826+
18827+ for (i = 0; i < rows; ++i) {
18828+
18829+ addr = address;
18830+ end = addr + add;
18831+
18832+ do {
18833+ next = psb_pd_addr_end(addr, end);
18834+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
18835+ if (!pt) {
18836+ ret = -ENOMEM;
18837+ goto out;
18838+ }
18839+ do {
18840+ pte =
18841+ psb_mmu_mask_pte(page_to_pfn(*pages++),
18842+ type);
18843+ psb_mmu_set_pte(pt, addr, pte);
18844+ pt->count++;
18845+ } while (addr += PAGE_SIZE, addr < next);
18846+ psb_mmu_pt_unmap_unlock(pt);
18847+
18848+ } while (addr = next, next != end);
18849+
18850+ address += row_add;
18851+ }
18852+out:
18853+ if (pd->hw_context != -1)
18854+ psb_mmu_flush_ptes(pd, f_address, num_pages,
18855+ desired_tile_stride, hw_tile_stride);
18856+
18857+ up_read(&pd->driver->sem);
18858+
18859+ if (pd->hw_context != -1)
18860+ psb_mmu_flush(pd->driver);
18861+
18862+ return ret;
18863+}
18864+
18865+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
18866+{
18867+ mask &= _PSB_MMU_ER_MASK;
18868+ psb_iowrite32(driver,
18869+ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
18870+ PSB_CR_BIF_CTRL);
18871+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
18872+}
18873+
18874+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
18875+ uint32_t mask)
18876+{
18877+ mask &= _PSB_MMU_ER_MASK;
18878+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
18879+ PSB_CR_BIF_CTRL);
18880+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
18881+}
18882+
18883+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
18884+ unsigned long *pfn)
18885+{
18886+ int ret;
18887+ struct psb_mmu_pt *pt;
18888+ uint32_t tmp;
18889+ spinlock_t *lock = &pd->driver->lock;
18890+
18891+ down_read(&pd->driver->sem);
18892+ pt = psb_mmu_pt_map_lock(pd, virtual);
18893+ if (!pt) {
18894+ uint32_t *v;
18895+
18896+ spin_lock(lock);
18897+ v = kmap_atomic(pd->p, KM_USER0);
18898+ tmp = v[psb_mmu_pd_index(virtual)];
18899+ kunmap_atomic(v, KM_USER0);
18900+ spin_unlock(lock);
18901+
18902+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
18903+ !(pd->invalid_pte & PSB_PTE_VALID)) {
18904+ ret = -EINVAL;
18905+ goto out;
18906+ }
18907+ ret = 0;
18908+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
18909+ goto out;
18910+ }
18911+ tmp = pt->v[psb_mmu_pt_index(virtual)];
18912+ if (!(tmp & PSB_PTE_VALID)) {
18913+ ret = -EINVAL;
18914+ } else {
18915+ ret = 0;
18916+ *pfn = tmp >> PAGE_SHIFT;
18917+ }
18918+ psb_mmu_pt_unmap_unlock(pt);
18919+out:
18920+ up_read(&pd->driver->sem);
18921+ return ret;
18922+}
18923+
18924+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
18925+{
18926+ struct page *p;
18927+ unsigned long pfn;
18928+ int ret = 0;
18929+ struct psb_mmu_pd *pd;
18930+ uint32_t *v;
18931+ uint32_t *vmmu;
18932+
18933+ pd = driver->default_pd;
18934+ if (!pd)
18935+ printk(KERN_WARNING "Could not get default pd\n");
18936+
18937+
18938+ p = alloc_page(GFP_DMA32);
18939+
18940+ if (!p) {
18941+ printk(KERN_WARNING "Failed allocating page\n");
18942+ return;
18943+ }
18944+
18945+ v = kmap(p);
18946+ memset(v, 0x67, PAGE_SIZE);
18947+
18948+ pfn = (offset >> PAGE_SHIFT);
18949+
18950+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
18951+ if (ret) {
18952+ printk(KERN_WARNING "Failed inserting mmu page\n");
18953+ goto out_err1;
18954+ }
18955+
18956+ /* Ioremap the page through the GART aperture */
18957+
18958+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
18959+ if (!vmmu) {
18960+ printk(KERN_WARNING "Failed ioremapping page\n");
18961+ goto out_err2;
18962+ }
18963+
18964+ /* Read from the page with mmu disabled. */
18965+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
18966+
18967+ /* Enable the mmu for host accesses and read again. */
18968+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
18969+
18970+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
18971+ ioread32(vmmu));
18972+ *v = 0x15243705;
18973+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
18974+ ioread32(vmmu));
18975+ iowrite32(0x16243355, vmmu);
18976+ (void) ioread32(vmmu);
18977+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
18978+
18979+ printk(KERN_INFO "Int stat is 0x%08x\n",
18980+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
18981+ printk(KERN_INFO "Fault is 0x%08x\n",
18982+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
18983+
18984+ /* Disable MMU for host accesses and clear page fault register */
18985+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
18986+ iounmap(vmmu);
18987+out_err2:
18988+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
18989+out_err1:
18990+ kunmap(p);
18991+ __free_page(p);
18992+}
18993diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c
18994--- a/drivers/gpu/drm/psb/psb_msvdx.c 1969-12-31 16:00:00.000000000 -0800
18995+++ b/drivers/gpu/drm/psb/psb_msvdx.c 2009-04-07 13:28:38.000000000 -0700
18996@@ -0,0 +1,681 @@
18997+/**
18998+ * file psb_msvdx.c
18999+ * MSVDX I/O operations and IRQ handling
19000+ *
19001+ */
19002+
19003+/**************************************************************************
19004+ *
19005+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
19006+ * Copyright (c) Imagination Technologies Limited, UK
19007+ * All Rights Reserved.
19008+ *
19009+ * Permission is hereby granted, free of charge, to any person obtaining a
19010+ * copy of this software and associated documentation files (the
19011+ * "Software"), to deal in the Software without restriction, including
19012+ * without limitation the rights to use, copy, modify, merge, publish,
19013+ * distribute, sub license, and/or sell copies of the Software, and to
19014+ * permit persons to whom the Software is furnished to do so, subject to
19015+ * the following conditions:
19016+ *
19017+ * The above copyright notice and this permission notice (including the
19018+ * next paragraph) shall be included in all copies or substantial portions
19019+ * of the Software.
19020+ *
19021+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19022+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19023+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19024+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19025+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19026+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19027+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
19028+ *
19029+ **************************************************************************/
19030+
19031+#include <drm/drmP.h>
19032+#include <drm/drm_os_linux.h>
19033+#include "psb_drv.h"
19034+#include "psb_drm.h"
19035+#include "psb_msvdx.h"
19036+
19037+#include <linux/io.h>
19038+#include <linux/delay.h>
19039+
19040+#ifndef list_first_entry
19041+#define list_first_entry(ptr, type, member) \
19042+ list_entry((ptr)->next, type, member)
19043+#endif
19044+
19045+
19046+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
19047+ unsigned long cmd_size);
19048+
19049+int psb_msvdx_dequeue_send(struct drm_device *dev)
19050+{
19051+ struct drm_psb_private *dev_priv = dev->dev_private;
19052+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
19053+ int ret = 0;
19054+
19055+ if (list_empty(&dev_priv->msvdx_queue)) {
19056+ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
19057+ dev_priv->msvdx_busy = 0;
19058+ return -EINVAL;
19059+ }
19060+ msvdx_cmd = list_first_entry(&dev_priv->msvdx_queue,
19061+ struct psb_msvdx_cmd_queue, head);
19062+ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
19063+ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
19064+ if (ret) {
19065+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
19066+ ret = -EINVAL;
19067+ }
19068+ list_del(&msvdx_cmd->head);
19069+ kfree(msvdx_cmd->cmd);
19070+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
19071+
19072+ return ret;
19073+}
19074+
19075+int psb_msvdx_map_command(struct drm_device *dev,
19076+ struct ttm_buffer_object *cmd_buffer,
19077+ unsigned long cmd_offset, unsigned long cmd_size,
19078+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
19079+{
19080+ struct drm_psb_private *dev_priv = dev->dev_private;
19081+ int ret = 0;
19082+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
19083+ unsigned long cmd_size_remaining;
19084+ struct ttm_bo_kmap_obj cmd_kmap;
19085+ void *cmd, *tmp, *cmd_start;
19086+ bool is_iomem;
19087+
19088+ /* command buffers may not exceed page boundary */
19089+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
19090+ return -EINVAL;
19091+
19092+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
19093+ if (ret) {
19094+ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
19095+ return ret;
19096+ }
19097+
19098+ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
19099+ + cmd_page_offset;
19100+ cmd = cmd_start;
19101+ cmd_size_remaining = cmd_size;
19102+
19103+ while (cmd_size_remaining > 0) {
19104+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
19105+ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
19106+ uint32_t mmu_ptd = 0, tmp = 0;
19107+
19108+ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
19109+ " cur_cmd_id = %02x fence = %08x\n",
19110+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
19111+ if ((cur_cmd_size % sizeof(uint32_t))
19112+ || (cur_cmd_size > cmd_size_remaining)) {
19113+ ret = -EINVAL;
19114+ DRM_ERROR("MSVDX: ret:%d\n", ret);
19115+ goto out;
19116+ }
19117+
19118+ switch (cur_cmd_id) {
19119+ case VA_MSGID_RENDER:
19120+ /* Fence ID */
19121+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
19122+ sequence);
19123+ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
19124+ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
19125+ 1, 0);
19126+ if (tmp == 1) {
19127+ mmu_ptd |= 1;
19128+ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
19129+ }
19130+
19131+ /* PTD */
19132+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
19133+ break;
19134+
19135+ default:
19136+ /* Msg not supported */
19137+ ret = -EINVAL;
19138+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
19139+ goto out;
19140+ }
19141+
19142+ cmd += cur_cmd_size;
19143+ cmd_size_remaining -= cur_cmd_size;
19144+ }
19145+
19146+ if (copy_cmd) {
19147+ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
19148+
19149+ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER);
19150+ if (tmp == NULL) {
19151+ ret = -ENOMEM;
19152+ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
19153+ goto out;
19154+ }
19155+ memcpy(tmp, cmd_start, cmd_size);
19156+ *msvdx_cmd = tmp;
19157+ } else {
19158+ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
19159+ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
19160+ if (ret) {
19161+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
19162+ ret = -EINVAL;
19163+ }
19164+ }
19165+
19166+out:
19167+ ttm_bo_kunmap(&cmd_kmap);
19168+
19169+ return ret;
19170+}
19171+
19172+int psb_submit_video_cmdbuf(struct drm_device *dev,
19173+ struct ttm_buffer_object *cmd_buffer,
19174+ unsigned long cmd_offset, unsigned long cmd_size,
19175+ struct ttm_fence_object *fence)
19176+{
19177+ struct drm_psb_private *dev_priv = dev->dev_private;
19178+ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
19179+ unsigned long irq_flags;
19180+ int ret = 0;
19181+
19182+ mutex_lock(&dev_priv->msvdx_mutex);
19183+
19184+ psb_schedule_watchdog(dev_priv);
19185+
19186+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19187+ if (dev_priv->msvdx_needs_reset) {
19188+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19189+ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
19190+ if (psb_msvdx_reset(dev_priv)) {
19191+ mutex_unlock(&dev_priv->msvdx_mutex);
19192+ ret = -EBUSY;
19193+ DRM_ERROR("MSVDX: Reset failed\n");
19194+ return ret;
19195+ }
19196+ dev_priv->msvdx_needs_reset = 0;
19197+ dev_priv->msvdx_busy = 0;
19198+
19199+ psb_msvdx_init(dev);
19200+ psb_msvdx_irq_preinstall(dev_priv);
19201+ psb_msvdx_irq_postinstall(dev_priv);
19202+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19203+ }
19204+
19205+ if (!dev_priv->msvdx_fw_loaded) {
19206+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19207+ PSB_DEBUG_GENERAL("MSVDX:load /lib/firmware/msvdx_fw.bin"
19208+ " by udevd\n");
19209+
19210+ ret = psb_setup_fw(dev);
19211+ if (ret) {
19212+ mutex_unlock(&dev_priv->msvdx_mutex);
19213+
19214+ DRM_ERROR("MSVDX:is there a /lib/firmware/msvdx_fw.bin,"
19215+ "and udevd is configured correctly?\n");
19216+
19217+ /* FIXME: find a proper return value */
19218+ return -EFAULT;
19219+ }
19220+ dev_priv->msvdx_fw_loaded = 1;
19221+
19222+ psb_msvdx_irq_preinstall(dev_priv);
19223+ psb_msvdx_irq_postinstall(dev_priv);
19224+ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
19225+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19226+ }
19227+
19228+
19229+ if (!dev_priv->msvdx_busy) {
19230+ dev_priv->msvdx_busy = 1;
19231+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19232+ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
19233+ sequence);
19234+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
19235+ cmd_size, NULL, sequence, 0);
19236+ if (ret) {
19237+ mutex_unlock(&dev_priv->msvdx_mutex);
19238+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
19239+ return ret;
19240+ }
19241+ } else {
19242+ struct psb_msvdx_cmd_queue *msvdx_cmd;
19243+ void *cmd = NULL;
19244+
19245+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19246+ /* queue the command to be sent when the h/w is ready */
19247+ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
19248+ sequence);
19249+ msvdx_cmd = drm_calloc(1, sizeof(struct psb_msvdx_cmd_queue),
19250+ DRM_MEM_DRIVER);
19251+ if (msvdx_cmd == NULL) {
19252+ mutex_unlock(&dev_priv->msvdx_mutex);
19253+ DRM_ERROR("MSVDXQUE: Out of memory...\n");
19254+ return -ENOMEM;
19255+ }
19256+
19257+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
19258+ cmd_size, &cmd, sequence, 1);
19259+ if (ret) {
19260+ mutex_unlock(&dev_priv->msvdx_mutex);
19261+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
19262+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
19263+ DRM_MEM_DRIVER);
19264+ return ret;
19265+ }
19266+ msvdx_cmd->cmd = cmd;
19267+ msvdx_cmd->cmd_size = cmd_size;
19268+ msvdx_cmd->sequence = sequence;
19269+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19270+ list_add_tail(&msvdx_cmd->head, &dev_priv->msvdx_queue);
19271+ if (!dev_priv->msvdx_busy) {
19272+ dev_priv->msvdx_busy = 1;
19273+ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
19274+ psb_msvdx_dequeue_send(dev);
19275+ }
19276+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19277+ }
19278+ mutex_unlock(&dev_priv->msvdx_mutex);
19279+ return ret;
19280+}
19281+
19282+int psb_msvdx_send(struct drm_device *dev, void *cmd, unsigned long cmd_size)
19283+{
19284+ int ret = 0;
19285+ struct drm_psb_private *dev_priv = dev->dev_private;
19286+
19287+ while (cmd_size > 0) {
19288+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
19289+ if (cur_cmd_size > cmd_size) {
19290+ ret = -EINVAL;
19291+ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
19292+ cmd_size, (unsigned long)cur_cmd_size);
19293+ goto out;
19294+ }
19295+ /* Send the message to h/w */
19296+ ret = psb_mtx_send(dev_priv, cmd);
19297+ if (ret) {
19298+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
19299+ goto out;
19300+ }
19301+ cmd += cur_cmd_size;
19302+ cmd_size -= cur_cmd_size;
19303+ }
19304+
19305+out:
19306+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
19307+ return ret;
19308+}
19309+
19310+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
19311+{
19312+ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
19313+ const uint32_t *p_msg = (uint32_t *) msg;
19314+ uint32_t msg_num, words_free, ridx, widx;
19315+ int ret = 0;
19316+
19317+ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
19318+
19319+ /* we need clocks enabled before we touch VEC local ram */
19320+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19321+
19322+ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
19323+
19324+ if (msg_num > NUM_WORDS_MTX_BUF) {
19325+ ret = -EINVAL;
19326+ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
19327+ goto out;
19328+ }
19329+
19330+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
19331+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
19332+
19333+ /* message would wrap, need to send a pad message */
19334+ if (widx + msg_num > NUM_WORDS_MTX_BUF) {
19335+ /* Shouldn't happen for a PAD message itself */
19336+ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
19337+ == FWRK_MSGID_PADDING);
19338+
19339+ /* if the read pointer is at zero then we must wait for it to
19340+ * change otherwise the write pointer will equal the read
19341+ * pointer,which should only happen when the buffer is empty
19342+ *
19343+ * This will only happens if we try to overfill the queue,
19344+ * queue management should make
19345+ * sure this never happens in the first place.
19346+ */
19347+ BUG_ON(0 == ridx);
19348+ if (0 == ridx) {
19349+ ret = -EINVAL;
19350+ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
19351+ goto out;
19352+ }
19353+ /* Send a pad message */
19354+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
19355+ (NUM_WORDS_MTX_BUF - widx) << 2);
19356+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
19357+ FWRK_MSGID_PADDING);
19358+ psb_mtx_send(dev_priv, pad_msg);
19359+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
19360+ }
19361+
19362+ if (widx >= ridx)
19363+ words_free = NUM_WORDS_MTX_BUF - (widx - ridx);
19364+ else
19365+ words_free = ridx - widx;
19366+
19367+ BUG_ON(msg_num > words_free);
19368+ if (msg_num > words_free) {
19369+ ret = -EINVAL;
19370+ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
19371+ goto out;
19372+ }
19373+
19374+ while (msg_num > 0) {
19375+ PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2));
19376+ msg_num--;
19377+ widx++;
19378+ if (NUM_WORDS_MTX_BUF == widx)
19379+ widx = 0;
19380+ }
19381+ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
19382+
19383+ /* Make sure clocks are enabled before we kick */
19384+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19385+
19386+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19387+
19388+ /* signal an interrupt to let the mtx know there is a new message */
19389+ PSB_WMSVDX32(1, MSVDX_MTX_KICKI);
19390+
19391+out:
19392+ return ret;
19393+}
19394+
19395+/*
19396+ * MSVDX MTX interrupt
19397+ */
19398+void psb_msvdx_mtx_interrupt(struct drm_device *dev)
19399+{
19400+ struct drm_psb_private *dev_priv =
19401+ (struct drm_psb_private *)dev->dev_private;
19402+ static uint32_t buf[128]; /* message buffer */
19403+ uint32_t ridx, widx;
19404+ uint32_t num, ofs; /* message num and offset */
19405+
19406+ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
19407+
19408+ /* Are clocks enabled - If not enable before
19409+ * attempting to read from VLR
19410+ */
19411+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
19412+ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
19413+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19414+ }
19415+
19416+loop: /* just for coding style check */
19417+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
19418+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
19419+
19420+ /* Get out of here if nothing */
19421+ if (ridx == widx)
19422+ goto done;
19423+
19424+ ofs = 0;
19425+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
19426+
19427+ /* round to nearest word */
19428+ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
19429+
19430+ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
19431+
19432+ if (++ridx >= NUM_WORDS_HOST_BUF)
19433+ ridx = 0;
19434+
19435+ for (ofs++; ofs < num; ofs++) {
19436+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
19437+
19438+ if (++ridx >= NUM_WORDS_HOST_BUF)
19439+ ridx = 0;
19440+ }
19441+
19442+ /* Update the Read index */
19443+ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
19444+
19445+ if (dev_priv->msvdx_needs_reset)
19446+ goto loop;
19447+
19448+ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
19449+ case VA_MSGID_CMD_HW_PANIC:
19450+ case VA_MSGID_CMD_FAILED: {
19451+ uint32_t fence = MEMIO_READ_FIELD(buf,
19452+ FW_VA_CMD_FAILED_FENCE_VALUE);
19453+ uint32_t fault = MEMIO_READ_FIELD(buf,
19454+ FW_VA_CMD_FAILED_IRQSTATUS);
19455+ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
19456+ uint32_t diff = 0;
19457+
19458+ if (msg_id == VA_MSGID_CMD_HW_PANIC)
19459+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
19460+ "Fault detected"
19461+ " - Fence: %08x, Status: %08x"
19462+ " - resetting and ignoring error\n",
19463+ fence, fault);
19464+ else
19465+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
19466+ "Fault detected"
19467+ " - Fence: %08x, Status: %08x"
19468+ " - resetting and ignoring error\n",
19469+ fence, fault);
19470+
19471+ dev_priv->msvdx_needs_reset = 1;
19472+
19473+ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
19474+ diff = dev_priv->msvdx_current_sequence
19475+ - dev_priv->sequence[PSB_ENGINE_VIDEO];
19476+
19477+ if (diff > 0x0FFFFFFF)
19478+ dev_priv->msvdx_current_sequence++;
19479+
19480+ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
19481+ "assuming %08x\n",
19482+ dev_priv->msvdx_current_sequence);
19483+ } else {
19484+ dev_priv->msvdx_current_sequence = fence;
19485+ }
19486+
19487+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
19488+ dev_priv->msvdx_current_sequence,
19489+ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
19490+
19491+ /* Flush the command queue */
19492+ psb_msvdx_flush_cmd_queue(dev);
19493+
19494+ goto done;
19495+ }
19496+ case VA_MSGID_CMD_COMPLETED: {
19497+ uint32_t fence = MEMIO_READ_FIELD(buf,
19498+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
19499+ uint32_t flags = MEMIO_READ_FIELD(buf,
19500+ FW_VA_CMD_COMPLETED_FLAGS);
19501+
19502+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
19503+ "FenceID: %08x, flags: 0x%x\n",
19504+ fence, flags);
19505+
19506+ dev_priv->msvdx_current_sequence = fence;
19507+
19508+ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
19509+
19510+ if (flags & FW_VA_RENDER_HOST_INT) {
19511+ /*Now send the next command from the msvdx cmd queue */
19512+ psb_msvdx_dequeue_send(dev);
19513+ goto done;
19514+ }
19515+
19516+ break;
19517+ }
19518+ case VA_MSGID_CMD_COMPLETED_BATCH: {
19519+ uint32_t fence = MEMIO_READ_FIELD(buf,
19520+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
19521+ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
19522+ FW_VA_CMD_COMPLETED_NO_TICKS);
19523+
19524+ /* we have the fence value in the message */
19525+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
19526+ " FenceID: %08x, TickCount: %08x\n",
19527+ fence, tickcnt);
19528+ dev_priv->msvdx_current_sequence = fence;
19529+
19530+ break;
19531+ }
19532+ case VA_MSGID_ACK:
19533+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
19534+ break;
19535+
19536+ case VA_MSGID_TEST1:
19537+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
19538+ break;
19539+
19540+ case VA_MSGID_TEST2:
19541+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
19542+ break;
19543+ /* Don't need to do anything with these messages */
19544+
19545+ case VA_MSGID_DEBLOCK_REQUIRED: {
19546+ uint32_t ctxid = MEMIO_READ_FIELD(buf,
19547+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
19548+
19549+ /* The BE we now be locked. */
19550+ /* Unblock rendec by reading the mtx2mtx end of slice */
19551+ (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA);
19552+
19553+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
19554+ " Context=%08x\n", ctxid);
19555+ goto done;
19556+ }
19557+ default:
19558+ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
19559+ goto done;
19560+ }
19561+
19562+done:
19563+
19564+#if 1
19565+ if (!dev_priv->msvdx_busy) {
19566+ /* If the firmware says the hardware is idle
19567+ * and the CCB is empty then we can power down
19568+ */
19569+ uint32_t fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS);
19570+ uint32_t ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
19571+ uint32_t ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
19572+
19573+ /* check that clocks are enabled before reading VLR */
19574+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
19575+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19576+
19577+ if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) &&
19578+ (ccb_roff == ccb_woff)) {
19579+ PSB_DEBUG_GENERAL("MSVDX: Setting clock to minimal\n");
19580+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
19581+ }
19582+ }
19583+#endif
19584+ DRM_MEMORYBARRIER(); /* TBD check this... */
19585+}
19586+
19587+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
19588+ int *msvdx_lockup, int *msvdx_idle)
19589+{
19590+ int tmp;
19591+ *msvdx_lockup = 0;
19592+ *msvdx_idle = 1;
19593+
19594+ if (!dev_priv->has_msvdx)
19595+ return;
19596+#if 0
19597+ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
19598+ "last_sequence:%d and last_submitted_sequence :%d\n",
19599+ dev_priv->msvdx_current_sequence,
19600+ dev_priv->msvdx_last_sequence,
19601+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
19602+#endif
19603+
19604+ tmp = dev_priv->msvdx_current_sequence -
19605+ dev_priv->sequence[PSB_ENGINE_VIDEO];
19606+
19607+ if (tmp > 0x0FFFFFFF) {
19608+ if (dev_priv->msvdx_current_sequence ==
19609+ dev_priv->msvdx_last_sequence) {
19610+ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
19611+ dev_priv->msvdx_current_sequence);
19612+ *msvdx_lockup = 1;
19613+ } else {
19614+ PSB_DEBUG_GENERAL("MSVDXTimer: "
19615+ "msvdx responded fine so far\n");
19616+ dev_priv->msvdx_last_sequence =
19617+ dev_priv->msvdx_current_sequence;
19618+ *msvdx_idle = 0;
19619+ }
19620+ }
19621+}
19622+
19623+/* power up msvdx, OSPM function */
19624+int psb_power_up_msvdx(struct drm_device *dev)
19625+{
19626+ struct drm_psb_private *dev_priv =
19627+ (struct drm_psb_private *)dev->dev_private;
19628+ int ret;
19629+
19630+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i3)
19631+ return -EINVAL;
19632+
19633+ PSB_DEBUG_TMP("power up msvdx\n");
19634+ dump_stack();
19635+
19636+ psb_up_island_power(dev, PSB_VIDEO_DEC_ISLAND);
19637+
19638+ ret = psb_msvdx_init(dev);
19639+ if (ret) {
19640+ DRM_ERROR("failed to init msvdx when power up it\n");
19641+ goto err;
19642+ }
19643+ PSB_WMSVDX32(dev_priv->msvdx_clk_state, MSVDX_MAN_CLK_ENABLE);
19644+
19645+ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n");
19646+
19647+ PSB_DEBUG_GENERAL("FIXME MSVDX MMU setting up\n");
19648+
19649+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0;
19650+ return 0;
19651+
19652+err:
19653+ return -1;
19654+}
19655+
19656+int psb_power_down_msvdx(struct drm_device *dev)
19657+{
19658+ struct drm_psb_private *dev_priv =
19659+ (struct drm_psb_private *)dev->dev_private;
19660+
19661+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i0)
19662+ return -EINVAL;
19663+ if (dev_priv->msvdx_busy) {
19664+ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n");
19665+ return -EBUSY;
19666+ }
19667+
19668+ dev_priv->msvdx_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE);
19669+ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n");
19670+
19671+ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n");
19672+ psb_down_island_power(dev, PSB_VIDEO_DEC_ISLAND);
19673+
19674+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i3;
19675+
19676+ return 0;
19677+}
19678diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h
19679--- a/drivers/gpu/drm/psb/psb_msvdx.h 1969-12-31 16:00:00.000000000 -0800
19680+++ b/drivers/gpu/drm/psb/psb_msvdx.h 2009-04-07 13:28:38.000000000 -0700
19681@@ -0,0 +1,442 @@
19682+/**************************************************************************
19683+ *
19684+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
19685+ * Copyright (c) Imagination Technologies Limited, UK
19686+ * All Rights Reserved.
19687+ *
19688+ * Permission is hereby granted, free of charge, to any person obtaining a
19689+ * copy of this software and associated documentation files (the
19690+ * "Software"), to deal in the Software without restriction, including
19691+ * without limitation the rights to use, copy, modify, merge, publish,
19692+ * distribute, sub license, and/or sell copies of the Software, and to
19693+ * permit persons to whom the Software is furnished to do so, subject to
19694+ * the following conditions:
19695+ *
19696+ * The above copyright notice and this permission notice (including the
19697+ * next paragraph) shall be included in all copies or substantial portions
19698+ * of the Software.
19699+ *
19700+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19701+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19702+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19703+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19704+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19705+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19706+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
19707+ *
19708+ **************************************************************************/
19709+
19710+#ifndef _PSB_MSVDX_H_
19711+#define _PSB_MSVDX_H_
19712+
19713+#include "psb_drv.h"
19714+
19715+void psb_msvdx_mtx_interrupt(struct drm_device *dev);
19716+int psb_msvdx_init(struct drm_device *dev);
19717+int psb_msvdx_uninit(struct drm_device *dev);
19718+int psb_msvdx_reset(struct drm_psb_private *dev_priv);
19719+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
19720+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
19721+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv);
19722+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv);
19723+void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
19724+extern void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
19725+ int *msvdx_lockup, int *msvdx_idle);
19726+int psb_setup_fw(struct drm_device *dev);
19727+int psb_power_up_msvdx(struct drm_device *dev);
19728+int psb_power_down_msvdx(struct drm_device *dev);
19729+
19730+/* Non-Optimal Invalidation is not default */
19731+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
19732+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
19733+
19734+#define FW_VA_RENDER_HOST_INT 0x00004000
19735+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
19736+
19737+/* There is no work currently underway on the hardware */
19738+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
19739+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
19740+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
19741+ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
19742+ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
19743+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
19744+
19745+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
19746+ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
19747+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
19748+
19749+#define POULSBO_D0 0x5
19750+#define POULSBO_D1 0x6
19751+#define PSB_REVID_OFFSET 0x8
19752+
19753+#define MTX_CODE_BASE (0x80900000)
19754+#define MTX_DATA_BASE (0x82880000)
19755+#define PC_START_ADDRESS (0x80900000)
19756+
19757+#define MTX_CORE_CODE_MEM (0x10)
19758+#define MTX_CORE_DATA_MEM (0x18)
19759+
19760+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
19761+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
19762+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
19763+ (0x00010000)
19764+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
19765+ (0x00100000)
19766+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
19767+ (0x01000000)
19768+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
19769+ (0x10000000)
19770+
19771+#define clk_enable_all \
19772+(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19773+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
19774+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
19775+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
19776+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
19777+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
19778+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
19779+
19780+#define clk_enable_minimal \
19781+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19782+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
19783+
19784+#define clk_enable_auto \
19785+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
19786+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
19787+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
19788+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
19789+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
19790+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19791+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
19792+
19793+#define msvdx_sw_reset_all \
19794+(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
19795+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
19796+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
19797+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
19798+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
19799+
19800+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
19801+ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
19802+#define MTX_PC MTX_INTERNAL_REG(0, 5)
19803+
19804+#define RENDEC_A_SIZE (1024 * 1024)
19805+#define RENDEC_B_SIZE (1024 * 1024)
19806+
19807+#define MEMIO_READ_FIELD(vpMem, field) \
19808+ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
19809+ & field##_MASK) >> field##_SHIFT))
19810+
19811+#define MEMIO_WRITE_FIELD(vpMem, field, value) \
19812+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
19813+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
19814+ & (field##_TYPE)~field##_MASK) | \
19815+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
19816+
19817+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
19818+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
19819+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
19820+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
19821+
19822+#define REGIO_READ_FIELD(reg_val, reg, field) \
19823+ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
19824+
19825+#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
19826+ (reg_val) = \
19827+ ((reg_val) & ~(reg##_##field##_MASK)) | \
19828+ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
19829+
19830+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
19831+ (reg_val) = \
19832+ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
19833+
19834+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
19835+ (0x00000001)
19836+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
19837+ (0x00000002)
19838+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
19839+ (0x00000004)
19840+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
19841+ (0x00000008)
19842+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
19843+ (0x00000010)
19844+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
19845+ (0x00000020)
19846+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
19847+ (0x00000040)
19848+
19849+#define clk_enable_all \
19850+ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19851+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
19852+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
19853+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
19854+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
19855+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
19856+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
19857+
19858+#define clk_enable_minimal \
19859+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19860+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
19861+
19862+/* MTX registers */
19863+#define MSVDX_MTX_ENABLE (0x0000)
19864+#define MSVDX_MTX_KICKI (0x0088)
19865+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
19866+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
19867+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
19868+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
19869+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
19870+#define MSVDX_MTX_SOFT_RESET (0x0200)
19871+
19872+/* MSVDX registers */
19873+#define MSVDX_CONTROL (0x0600)
19874+#define MSVDX_INTERRUPT_CLEAR (0x060C)
19875+#define MSVDX_INTERRUPT_STATUS (0x0608)
19876+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
19877+#define MSVDX_MMU_CONTROL0 (0x0680)
19878+#define MSVDX_MTX_RAM_BANK (0x06F0)
19879+#define MSVDX_MAN_CLK_ENABLE (0x0620)
19880+
19881+/* RENDEC registers */
19882+#define MSVDX_RENDEC_CONTROL0 (0x0868)
19883+#define MSVDX_RENDEC_CONTROL1 (0x086C)
19884+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
19885+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
19886+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
19887+#define MSVDX_RENDEC_READ_DATA (0x0898)
19888+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
19889+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
19890+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
19891+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
19892+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
19893+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
19894+
19895+/*
19896+ * This defines the MSVDX communication buffer
19897+ */
19898+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
19899+/*!< Host buffer size (in 32-bit words) */
19900+#define NUM_WORDS_HOST_BUF (100)
19901+/*!< MTX buffer size (in 32-bit words) */
19902+#define NUM_WORDS_MTX_BUF (100)
19903+
19904+/* There is no work currently underway on the hardware */
19905+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
19906+
19907+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
19908+
19909+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
19910+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
19911+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
19912+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
19913+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
19914+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
19915+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
19916+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
19917+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
19918+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
19919+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
19920+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
19921+#define MSVDX_COMMS_TO_MTX_BUF \
19922+ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
19923+
19924+#define MSVDX_COMMS_AREA_END \
19925+ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
19926+
19927+#if (MSVDX_COMMS_AREA_END != 0x03000)
19928+#error
19929+#endif
19930+
19931+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
19932+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
19933+
19934+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
19935+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
19936+
19937+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
19938+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
19939+
19940+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
19941+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
19942+
19943+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
19944+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
19945+
19946+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
19947+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
19948+
19949+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
19950+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
19951+
19952+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
19953+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
19954+
19955+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
19956+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
19957+
19958+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
19959+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
19960+
19961+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
19962+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
19963+
19964+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
19965+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
19966+
19967+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
19968+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
19969+
19970+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
19971+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
19972+
19973+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
19974+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
19975+
19976+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
19977+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
19978+
19979+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
19980+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
19981+
19982+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
19983+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
19984+
19985+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
19986+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
19987+
19988+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
19989+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
19990+
19991+/* Start of parser specific Host->MTX messages. */
19992+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
19993+
19994+/* Start of parser specific MTX->Host messages. */
19995+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
19996+
19997+#define FWRK_MSGID_PADDING (0)
19998+
19999+#define FWRK_GENMSG_SIZE_TYPE uint8_t
20000+#define FWRK_GENMSG_SIZE_MASK (0xFF)
20001+#define FWRK_GENMSG_SIZE_SHIFT (0)
20002+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
20003+#define FWRK_GENMSG_ID_TYPE uint8_t
20004+#define FWRK_GENMSG_ID_MASK (0xFF)
20005+#define FWRK_GENMSG_ID_SHIFT (0)
20006+#define FWRK_GENMSG_ID_OFFSET (0x0001)
20007+#define FWRK_PADMSG_SIZE (2)
20008+
20009+/* This type defines the framework specified message ids */
20010+enum {
20011+ /* ! Sent by the DXVA driver on the host to the mtx firmware.
20012+ */
20013+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
20014+ VA_MSGID_RENDER,
20015+ VA_MSGID_DEBLOCK,
20016+ VA_MSGID_BUBBLE,
20017+
20018+ /* Test Messages */
20019+ VA_MSGID_TEST1,
20020+ VA_MSGID_TEST2,
20021+
20022+ /*! Sent by the mtx firmware to itself.
20023+ */
20024+ VA_MSGID_RENDER_MC_INTERRUPT,
20025+
20026+ /*! Sent by the DXVA firmware on the MTX to the host.
20027+ */
20028+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
20029+ VA_MSGID_CMD_COMPLETED_BATCH,
20030+ VA_MSGID_DEBLOCK_REQUIRED,
20031+ VA_MSGID_TEST_RESPONCE,
20032+ VA_MSGID_ACK,
20033+
20034+ VA_MSGID_CMD_FAILED,
20035+ VA_MSGID_CMD_UNSUPPORTED,
20036+ VA_MSGID_CMD_HW_PANIC,
20037+};
20038+
20039+/* MSVDX Firmware interface */
20040+#define FW_VA_INIT_SIZE (8)
20041+#define FW_VA_DEBUG_TEST2_SIZE (4)
20042+
20043+/* FW_VA_DEBUG_TEST2 MSG_SIZE */
20044+#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
20045+#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
20046+#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
20047+#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
20048+
20049+/* FW_VA_DEBUG_TEST2 ID */
20050+#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
20051+#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
20052+#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
20053+#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
20054+
20055+/* FW_VA_CMD_FAILED FENCE_VALUE */
20056+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
20057+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
20058+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
20059+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
20060+
20061+/* FW_VA_CMD_FAILED IRQSTATUS */
20062+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
20063+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
20064+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
20065+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
20066+
20067+/* FW_VA_CMD_COMPLETED FENCE_VALUE */
20068+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
20069+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
20070+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
20071+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
20072+
20073+/* FW_VA_CMD_COMPLETED FLAGS */
20074+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
20075+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
20076+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
20077+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
20078+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
20079+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
20080+
20081+/* FW_VA_CMD_COMPLETED NO_TICKS */
20082+#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
20083+#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
20084+#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
20085+#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
20086+
20087+/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
20088+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
20089+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
20090+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
20091+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
20092+
20093+/* FW_VA_INIT GLOBAL_PTD */
20094+#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
20095+#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
20096+#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
20097+#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
20098+
20099+/* FW_VA_RENDER FENCE_VALUE */
20100+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
20101+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
20102+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
20103+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
20104+
20105+/* FW_VA_RENDER MMUPTD */
20106+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
20107+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
20108+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
20109+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
20110+
20111+/* FW_VA_RENDER BUFFER_ADDRESS */
20112+#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
20113+#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
20114+#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
20115+#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
20116+
20117+/* FW_VA_RENDER BUFFER_SIZE */
20118+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
20119+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
20120+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
20121+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
20122+
20123+#endif
20124diff -uNr a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c
20125--- a/drivers/gpu/drm/psb/psb_msvdxinit.c 1969-12-31 16:00:00.000000000 -0800
20126+++ b/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-04-07 13:28:38.000000000 -0700
20127@@ -0,0 +1,668 @@
20128+/**
20129+ * file psb_msvdxinit.c
20130+ * MSVDX initialization and mtx-firmware upload
20131+ *
20132+ */
20133+
20134+/**************************************************************************
20135+ *
20136+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
20137+ * Copyright (c) Imagination Technologies Limited, UK
20138+ * All Rights Reserved.
20139+ *
20140+ * Permission is hereby granted, free of charge, to any person obtaining a
20141+ * copy of this software and associated documentation files (the
20142+ * "Software"), to deal in the Software without restriction, including
20143+ * without limitation the rights to use, copy, modify, merge, publish,
20144+ * distribute, sub license, and/or sell copies of the Software, and to
20145+ * permit persons to whom the Software is furnished to do so, subject to
20146+ * the following conditions:
20147+ *
20148+ * The above copyright notice and this permission notice (including the
20149+ * next paragraph) shall be included in all copies or substantial portions
20150+ * of the Software.
20151+ *
20152+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20153+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20154+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20155+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20156+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20157+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20158+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
20159+ *
20160+ **************************************************************************/
20161+
20162+#include <drm/drmP.h>
20163+#include <drm/drm.h>
20164+#include "psb_drv.h"
20165+#include "psb_msvdx.h"
20166+#include <linux/firmware.h>
20167+
20168+#define MSVDX_REG (dev_priv->msvdx_reg)
20169+uint8_t psb_rev_id;
20170+/*MSVDX FW header*/
20171+struct msvdx_fw {
20172+ uint32_t ver;
20173+ uint32_t text_size;
20174+ uint32_t data_size;
20175+ uint32_t data_location;
20176+};
20177+
20178+int psb_wait_for_register(struct drm_psb_private *dev_priv,
20179+ uint32_t offset, uint32_t value, uint32_t enable)
20180+{
20181+ uint32_t tmp;
20182+ uint32_t poll_cnt = 10000;
20183+ while (poll_cnt) {
20184+ tmp = PSB_RMSVDX32(offset);
20185+ if (value == (tmp & enable)) /* All the bits are reset */
20186+ return 0; /* So exit */
20187+
20188+ /* Wait a bit */
20189+ DRM_UDELAY(1000);
20190+ poll_cnt--;
20191+ }
20192+ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
20193+ " expecting %08x (mask %08x), got %08x\n",
20194+ offset, value, enable, tmp);
20195+
20196+ return 1;
20197+}
20198+
20199+int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
20200+{
20201+ int ret = 0;
20202+ uint32_t mtx_int = 0;
20203+
20204+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
20205+ 1);
20206+
20207+ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
20208+ /* Required value */
20209+ mtx_int,
20210+ /* Enabled bits */
20211+ mtx_int);
20212+
20213+ if (ret) {
20214+ DRM_ERROR("MSVDX: Error Mtx did not return"
20215+ " int within a resonable time\n");
20216+ return ret;
20217+ }
20218+
20219+ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
20220+
20221+ /* Got it so clear the bit */
20222+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
20223+
20224+ return ret;
20225+}
20226+
20227+void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
20228+ const uint32_t core_reg, const uint32_t val)
20229+{
20230+ uint32_t reg = 0;
20231+
20232+ /* Put data in MTX_RW_DATA */
20233+ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
20234+
20235+ /* DREADY is set to 0 and request a write */
20236+ reg = core_reg;
20237+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
20238+ MTX_RNW, 0);
20239+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
20240+ MTX_DREADY, 0);
20241+ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
20242+
20243+ psb_wait_for_register(dev_priv,
20244+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
20245+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
20246+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
20247+}
20248+
20249+void psb_upload_fw(struct drm_psb_private *dev_priv,
20250+ const uint32_t data_mem, uint32_t ram_bank_size,
20251+ uint32_t address, const unsigned int words,
20252+ const uint32_t * const data)
20253+{
20254+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
20255+ uint32_t access_ctrl;
20256+
20257+ /* Save the access control register... */
20258+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
20259+
20260+ /* Wait for MCMSTAT to become be idle 1 */
20261+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20262+ 1, /* Required Value */
20263+ 0xffffffff /* Enables */);
20264+
20265+ for (loop = 0; loop < words; loop++) {
20266+ ram_id = data_mem + (address / ram_bank_size);
20267+ if (ram_id != cur_bank) {
20268+ addr = address >> 2;
20269+ ctrl = 0;
20270+ REGIO_WRITE_FIELD_LITE(ctrl,
20271+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20272+ MTX_MCMID, ram_id);
20273+ REGIO_WRITE_FIELD_LITE(ctrl,
20274+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20275+ MTX_MCM_ADDR, addr);
20276+ REGIO_WRITE_FIELD_LITE(ctrl,
20277+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20278+ MTX_MCMAI, 1);
20279+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20280+ cur_bank = ram_id;
20281+ }
20282+ address += 4;
20283+
20284+ PSB_WMSVDX32(data[loop],
20285+ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
20286+
20287+ /* Wait for MCMSTAT to become be idle 1 */
20288+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20289+ 1, /* Required Value */
20290+ 0xffffffff /* Enables */);
20291+ }
20292+ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
20293+
20294+ /* Restore the access control register... */
20295+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20296+}
20297+
20298+static int psb_verify_fw(struct drm_psb_private *dev_priv,
20299+ const uint32_t ram_bank_size,
20300+ const uint32_t data_mem, uint32_t address,
20301+ const uint32_t words, const uint32_t * const data)
20302+{
20303+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
20304+ uint32_t access_ctrl;
20305+ int ret = 0;
20306+
20307+ /* Save the access control register... */
20308+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
20309+
20310+ /* Wait for MCMSTAT to become be idle 1 */
20311+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20312+ 1, /* Required Value */
20313+ 0xffffffff /* Enables */);
20314+
20315+ for (loop = 0; loop < words; loop++) {
20316+ uint32_t tmp;
20317+ ram_id = data_mem + (address / ram_bank_size);
20318+
20319+ if (ram_id != cur_bank) {
20320+ addr = address >> 2;
20321+ ctrl = 0;
20322+ REGIO_WRITE_FIELD_LITE(ctrl,
20323+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20324+ MTX_MCMID, ram_id);
20325+ REGIO_WRITE_FIELD_LITE(ctrl,
20326+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20327+ MTX_MCM_ADDR, addr);
20328+ REGIO_WRITE_FIELD_LITE(ctrl,
20329+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20330+ MTX_MCMAI, 1);
20331+ REGIO_WRITE_FIELD_LITE(ctrl,
20332+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20333+ MTX_MCMR, 1);
20334+
20335+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20336+
20337+ cur_bank = ram_id;
20338+ }
20339+ address += 4;
20340+
20341+ /* Wait for MCMSTAT to become be idle 1 */
20342+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20343+ 1, /* Required Value */
20344+ 0xffffffff /* Enables */);
20345+
20346+ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
20347+ if (data[loop] != tmp) {
20348+ DRM_ERROR("psb: Firmware validation fails"
20349+ " at index=%08x\n", loop);
20350+ ret = 1;
20351+ break;
20352+ }
20353+ }
20354+
20355+ /* Restore the access control register... */
20356+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20357+
20358+ return ret;
20359+}
20360+
20361+static uint32_t *msvdx_get_fw(struct drm_device *dev,
20362+ const struct firmware **raw, uint8_t *name)
20363+{
20364+ struct drm_psb_private *dev_priv = dev->dev_private;
20365+ int rc, fw_size;
20366+ int *ptr = NULL;
20367+
20368+ rc = request_firmware(raw, name, &dev->pdev->dev);
20369+ if (rc < 0) {
20370+ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
20371+ name, rc);
20372+ return NULL;
20373+ }
20374+
20375+ if ((*raw)->size < sizeof(struct msvdx_fw)) {
20376+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
20377+ name, (*raw)->size);
20378+ return NULL;
20379+ }
20380+
20381+ ptr = (int *) ((*raw))->data;
20382+
20383+ if (!ptr) {
20384+ DRM_ERROR("MSVDX: Failed to load %s\n", name);
20385+ return NULL;
20386+ }
20387+
20388+ /* another sanity check... */
20389+ fw_size = sizeof(struct msvdx_fw) +
20390+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
20391+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
20392+ if ((*raw)->size != fw_size) {
20393+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
20394+ name, (*raw)->size);
20395+ return NULL;
20396+ }
20397+ dev_priv->msvdx_fw = drm_calloc(1, fw_size, DRM_MEM_DRIVER);
20398+ if (dev_priv->msvdx_fw == NULL)
20399+ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
20400+ else {
20401+ memcpy(dev_priv->msvdx_fw, ptr, fw_size);
20402+ dev_priv->msvdx_fw_size = fw_size;
20403+ }
20404+
20405+ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
20406+ release_firmware(*raw);
20407+
20408+ return dev_priv->msvdx_fw;
20409+}
20410+
20411+int psb_setup_fw(struct drm_device *dev)
20412+{
20413+ struct drm_psb_private *dev_priv = dev->dev_private;
20414+ int ret = 0;
20415+
20416+ uint32_t ram_bank_size;
20417+ struct msvdx_fw *fw;
20418+ uint32_t *fw_ptr = NULL;
20419+ uint32_t *text_ptr = NULL;
20420+ uint32_t *data_ptr = NULL;
20421+ const struct firmware *raw = NULL;
20422+ /* todo : Assert the clock is on - if not turn it on to upload code */
20423+
20424+ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
20425+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
20426+
20427+ /* Reset MTX */
20428+ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
20429+ MSVDX_MTX_SOFT_RESET);
20430+
20431+ /* Initialses Communication controll area to 0 */
20432+ if (psb_rev_id >= POULSBO_D1) {
20433+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
20434+ " or later revision.\n");
20435+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
20436+ MSVDX_COMMS_OFFSET_FLAGS);
20437+ } else {
20438+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
20439+ " or earlier revision.\n");
20440+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
20441+ MSVDX_COMMS_OFFSET_FLAGS);
20442+ }
20443+
20444+ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
20445+ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
20446+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
20447+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
20448+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
20449+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
20450+ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
20451+
20452+ /* read register bank size */
20453+ {
20454+ uint32_t bank_size, reg;
20455+ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
20456+ bank_size =
20457+ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
20458+ CR_MTX_RAM_BANK_SIZE);
20459+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
20460+ }
20461+
20462+ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
20463+ ram_bank_size);
20464+
20465+ /* if FW already loaded from storage */
20466+ if (dev_priv->msvdx_fw)
20467+ fw_ptr = dev_priv->msvdx_fw;
20468+ else
20469+ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
20470+
20471+ if (!fw_ptr) {
20472+ DRM_ERROR("psb: No valid msvdx_fw.bin firmware found.\n");
20473+ ret = 1;
20474+ goto out;
20475+ }
20476+
20477+ fw = (struct msvdx_fw *) fw_ptr;
20478+ if (fw->ver != 0x02) {
20479+ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
20480+ "got version=%02x expected version=%02x\n",
20481+ fw->ver, 0x02);
20482+ ret = 1;
20483+ goto out;
20484+ }
20485+
20486+ text_ptr =
20487+ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
20488+ data_ptr = text_ptr + fw->text_size;
20489+
20490+ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
20491+ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
20492+ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
20493+ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
20494+ fw->data_location);
20495+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
20496+ *text_ptr);
20497+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
20498+ *data_ptr);
20499+
20500+ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
20501+ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
20502+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
20503+ text_ptr);
20504+ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
20505+ fw->data_location - MTX_DATA_BASE, fw->data_size,
20506+ data_ptr);
20507+
20508+#if 0
20509+ /* todo : Verify code upload possibly only in debug */
20510+ ret = psb_verify_fw(dev_priv, ram_bank_size,
20511+ MTX_CORE_CODE_MEM,
20512+ PC_START_ADDRESS - MTX_CODE_BASE,
20513+ fw->text_size, text_ptr);
20514+ if (ret) {
20515+ /* Firmware code upload failed */
20516+ ret = 1;
20517+ goto out;
20518+ }
20519+
20520+ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
20521+ fw->data_location - MTX_DATA_BASE,
20522+ fw->data_size, data_ptr);
20523+ if (ret) {
20524+ /* Firmware data upload failed */
20525+ ret = 1;
20526+ goto out;
20527+ }
20528+#else
20529+ (void)psb_verify_fw;
20530+#endif
20531+ /* -- Set starting PC address */
20532+ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
20533+
20534+ /* -- Turn on the thread */
20535+ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
20536+
20537+ /* Wait for the signature value to be written back */
20538+ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
20539+ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
20540+ 0xffffffff /* Enabled bits */);
20541+ if (ret) {
20542+ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
20543+ goto out;
20544+ }
20545+
20546+ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
20547+ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
20548+ MSVDX_COMMS_AREA_ADDR);
20549+#if 0
20550+
20551+ /* Send test message */
20552+ {
20553+ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
20554+
20555+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
20556+ FW_VA_DEBUG_TEST2_SIZE);
20557+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
20558+ VA_MSGID_TEST2);
20559+
20560+ ret = psb_mtx_send(dev_priv, msg_buf);
20561+ if (ret) {
20562+ DRM_ERROR("psb: MSVDX sending fails.\n");
20563+ goto out;
20564+ }
20565+
20566+ /* Wait for Mtx to ack this message */
20567+ psb_poll_mtx_irq(dev_priv);
20568+
20569+ }
20570+#endif
20571+out:
20572+
20573+ return ret;
20574+}
20575+
20576+
20577+static void psb_free_ccb(struct ttm_buffer_object **ccb)
20578+{
20579+ ttm_bo_unref(ccb);
20580+ *ccb = NULL;
20581+}
20582+
20583+/**
20584+ * Reset chip and disable interrupts.
20585+ * Return 0 success, 1 failure
20586+ */
20587+int psb_msvdx_reset(struct drm_psb_private *dev_priv)
20588+{
20589+ int ret = 0;
20590+
20591+ /* Issue software reset */
20592+ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
20593+
20594+ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
20595+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
20596+
20597+ if (!ret) {
20598+ /* Clear interrupt enabled flag */
20599+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
20600+
20601+ /* Clear any pending interrupt flags */
20602+ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
20603+ }
20604+
20605+ /* mutex_destroy(&dev_priv->msvdx_mutex); */
20606+
20607+ return ret;
20608+}
20609+
20610+static int psb_allocate_ccb(struct drm_device *dev,
20611+ struct ttm_buffer_object **ccb,
20612+ uint32_t *base_addr, int size)
20613+{
20614+ struct drm_psb_private *dev_priv = psb_priv(dev);
20615+ struct ttm_bo_device *bdev = &dev_priv->bdev;
20616+ int ret;
20617+ struct ttm_bo_kmap_obj tmp_kmap;
20618+ bool is_iomem;
20619+
20620+ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
20621+
20622+ ret = ttm_buffer_object_create(bdev, size,
20623+ ttm_bo_type_kernel,
20624+ DRM_PSB_FLAG_MEM_KERNEL |
20625+ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
20626+ NULL, ccb);
20627+ if (ret) {
20628+ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
20629+ *ccb = NULL;
20630+ return 1;
20631+ }
20632+
20633+ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
20634+ if (ret) {
20635+ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
20636+ ttm_bo_unref(ccb);
20637+ *ccb = NULL;
20638+ return 1;
20639+ }
20640+
20641+ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
20642+ RENDEC_A_SIZE);
20643+ ttm_bo_kunmap(&tmp_kmap);
20644+
20645+ *base_addr = (*ccb)->offset;
20646+ return 0;
20647+}
20648+
20649+int psb_msvdx_init(struct drm_device *dev)
20650+{
20651+ struct drm_psb_private *dev_priv = dev->dev_private;
20652+ uint32_t cmd;
20653+ /* uint32_t clk_gate_ctrl = clk_enable_all; */
20654+ int ret;
20655+
20656+ if (!dev_priv->ccb0) { /* one for the first time */
20657+ /* Initialize comand msvdx queueing */
20658+ INIT_LIST_HEAD(&dev_priv->msvdx_queue);
20659+ mutex_init(&dev_priv->msvdx_mutex);
20660+ spin_lock_init(&dev_priv->msvdx_lock);
20661+ /*figure out the stepping */
20662+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
20663+ }
20664+
20665+ dev_priv->msvdx_busy = 0;
20666+
20667+ /* Enable Clocks */
20668+ PSB_DEBUG_GENERAL("Enabling clocks\n");
20669+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
20670+
20671+ /* Enable MMU by removing all bypass bits */
20672+ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
20673+
20674+ /* move firmware loading to the place receiving first command buffer */
20675+
20676+ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
20677+ /* Allocate device virtual memory as required by rendec.... */
20678+ if (!dev_priv->ccb0) {
20679+ ret = psb_allocate_ccb(dev, &dev_priv->ccb0,
20680+ &dev_priv->base_addr0,
20681+ RENDEC_A_SIZE);
20682+ if (ret)
20683+ goto err_exit;
20684+ }
20685+
20686+ if (!dev_priv->ccb1) {
20687+ ret = psb_allocate_ccb(dev, &dev_priv->ccb1,
20688+ &dev_priv->base_addr1,
20689+ RENDEC_B_SIZE);
20690+ if (ret)
20691+ goto err_exit;
20692+ }
20693+
20694+
20695+ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
20696+ dev_priv->base_addr0, dev_priv->base_addr1);
20697+
20698+ PSB_WMSVDX32(dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
20699+ PSB_WMSVDX32(dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
20700+
20701+ cmd = 0;
20702+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
20703+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
20704+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
20705+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
20706+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
20707+
20708+ cmd = 0;
20709+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20710+ RENDEC_DECODE_START_SIZE, 0);
20711+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20712+ RENDEC_BURST_SIZE_W, 1);
20713+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20714+ RENDEC_BURST_SIZE_R, 1);
20715+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20716+ RENDEC_EXTERNAL_MEMORY, 1);
20717+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
20718+
20719+ cmd = 0x00101010;
20720+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
20721+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
20722+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
20723+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
20724+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
20725+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
20726+
20727+ cmd = 0;
20728+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
20729+ 1);
20730+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
20731+
20732+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
20733+ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
20734+ " place when receiving user space commands\n");
20735+
20736+ dev_priv->msvdx_fw_loaded = 0; /* need to load firware */
20737+
20738+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
20739+
20740+#if 0
20741+ ret = psb_setup_fw(dev);
20742+ if (ret)
20743+ goto err_exit;
20744+ /* Send Initialisation message to firmware */
20745+ if (0) {
20746+ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
20747+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
20748+ FW_VA_INIT_SIZE);
20749+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
20750+
20751+ /* Need to set this for all but A0 */
20752+ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
20753+ psb_get_default_pd_addr(dev_priv->mmu));
20754+
20755+ ret = psb_mtx_send(dev_priv, msg_init);
20756+ if (ret)
20757+ goto err_exit;
20758+
20759+ psb_poll_mtx_irq(dev_priv);
20760+ }
20761+#endif
20762+
20763+ return 0;
20764+
20765+err_exit:
20766+ DRM_ERROR("MSVDX: initialization failed\n");
20767+ if (dev_priv->ccb0)
20768+ psb_free_ccb(&dev_priv->ccb0);
20769+ if (dev_priv->ccb1)
20770+ psb_free_ccb(&dev_priv->ccb1);
20771+
20772+ return 1;
20773+}
20774+
20775+int psb_msvdx_uninit(struct drm_device *dev)
20776+{
20777+ struct drm_psb_private *dev_priv = dev->dev_private;
20778+
20779+ /* Reset MSVDX chip */
20780+ psb_msvdx_reset(dev_priv);
20781+
20782+ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
20783+ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
20784+ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
20785+
20786+ if (dev_priv->ccb0)
20787+ psb_free_ccb(&dev_priv->ccb0);
20788+ if (dev_priv->ccb1)
20789+ psb_free_ccb(&dev_priv->ccb1);
20790+ if (dev_priv->msvdx_fw)
20791+ drm_free(dev_priv->msvdx_fw, dev_priv->msvdx_fw_size,
20792+ DRM_MEM_DRIVER);
20793+
20794+ return 0;
20795+}
20796diff -uNr a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h
20797--- a/drivers/gpu/drm/psb/psb_reg.h 1969-12-31 16:00:00.000000000 -0800
20798+++ b/drivers/gpu/drm/psb/psb_reg.h 2009-04-07 13:28:38.000000000 -0700
20799@@ -0,0 +1,569 @@
20800+/**************************************************************************
20801+ *
20802+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
20803+ * Copyright (c) 2007, Intel Corporation.
20804+ * All Rights Reserved.
20805+ *
20806+ * This program is free software; you can redistribute it and/or modify it
20807+ * under the terms and conditions of the GNU General Public License,
20808+ * version 2, as published by the Free Software Foundation.
20809+ *
20810+ * This program is distributed in the hope it will be useful, but WITHOUT
20811+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20812+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20813+ * more details.
20814+ *
20815+ * You should have received a copy of the GNU General Public License along with
20816+ * this program; if not, write to the Free Software Foundation, Inc.,
20817+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20818+ *
20819+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
20820+ * develop this driver.
20821+ *
20822+ **************************************************************************/
20823+/*
20824+ */
20825+#ifndef _PSB_REG_H_
20826+#define _PSB_REG_H_
20827+
20828+#define PSB_CR_CLKGATECTL 0x0000
20829+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
20830+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
20831+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
20832+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
20833+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
20834+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
20835+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
20836+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
20837+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
20838+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
20839+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
20840+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
20841+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
20842+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
20843+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
20844+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
20845+
20846+#define PSB_CR_CORE_ID 0x0010
20847+#define _PSB_CC_ID_ID_SHIFT (16)
20848+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
20849+#define _PSB_CC_ID_CONFIG_SHIFT (0)
20850+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
20851+
20852+#define PSB_CR_CORE_REVISION 0x0014
20853+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
20854+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
20855+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
20856+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
20857+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
20858+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
20859+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
20860+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
20861+
20862+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
20863+
20864+#define PSB_CR_SOFT_RESET 0x0080
20865+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
20866+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
20867+#define _PSB_CS_RESET_USE_RESET (1 << 4)
20868+#define _PSB_CS_RESET_TA_RESET (1 << 3)
20869+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
20870+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
20871+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
20872+
20873+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
20874+
20875+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
20876+
20877+#define PSB_CR_EVENT_STATUS2 0x0118
20878+
20879+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
20880+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
20881+
20882+#define PSB_CR_EVENT_STATUS 0x012C
20883+
20884+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
20885+
20886+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
20887+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
20888+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
20889+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
20890+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
20891+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
20892+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
20893+#define _PSB_CE_SW_EVENT (1 << 14)
20894+#define _PSB_CE_TA_FINISHED (1 << 13)
20895+#define _PSB_CE_TA_TERMINATE (1 << 12)
20896+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
20897+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
20898+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
20899+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
20900+
20901+
20902+#define PSB_USE_OFFSET_MASK 0x0007FFFF
20903+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
20904+#define PSB_CR_USE_CODE_BASE0 0x0A0C
20905+#define PSB_CR_USE_CODE_BASE1 0x0A10
20906+#define PSB_CR_USE_CODE_BASE2 0x0A14
20907+#define PSB_CR_USE_CODE_BASE3 0x0A18
20908+#define PSB_CR_USE_CODE_BASE4 0x0A1C
20909+#define PSB_CR_USE_CODE_BASE5 0x0A20
20910+#define PSB_CR_USE_CODE_BASE6 0x0A24
20911+#define PSB_CR_USE_CODE_BASE7 0x0A28
20912+#define PSB_CR_USE_CODE_BASE8 0x0A2C
20913+#define PSB_CR_USE_CODE_BASE9 0x0A30
20914+#define PSB_CR_USE_CODE_BASE10 0x0A34
20915+#define PSB_CR_USE_CODE_BASE11 0x0A38
20916+#define PSB_CR_USE_CODE_BASE12 0x0A3C
20917+#define PSB_CR_USE_CODE_BASE13 0x0A40
20918+#define PSB_CR_USE_CODE_BASE14 0x0A44
20919+#define PSB_CR_USE_CODE_BASE15 0x0A48
20920+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
20921+#define _PSB_CUC_BASE_DM_SHIFT (25)
20922+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
20923+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
20924+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
20925+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
20926+#define _PSB_CUC_DM_VERTEX (0)
20927+#define _PSB_CUC_DM_PIXEL (1)
20928+#define _PSB_CUC_DM_RESERVED (2)
20929+#define _PSB_CUC_DM_EDM (3)
20930+
20931+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
20932+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
20933+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
20934+
20935+#define PSB_CR_EVENT_KICKER 0x0AC4
20936+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
20937+
20938+#define PSB_CR_EVENT_KICK 0x0AC8
20939+#define _PSB_CE_KICK_NOW (1 << 0)
20940+
20941+
20942+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
20943+
20944+#define PSB_CR_BIF_CTRL 0x0C00
20945+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
20946+#define _PSB_CB_CTRL_INVALDC (1 << 3)
20947+#define _PSB_CB_CTRL_FLUSH (1 << 2)
20948+
20949+#define PSB_CR_BIF_INT_STAT 0x0C04
20950+
20951+#define PSB_CR_BIF_FAULT 0x0C08
20952+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
20953+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
20954+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
20955+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
20956+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
20957+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
20958+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
20959+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
20960+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
20961+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
20962+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
20963+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
20964+
20965+#define PSB_CR_BIF_BANK0 0x0C78
20966+
20967+#define PSB_CR_BIF_BANK1 0x0C7C
20968+
20969+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
20970+
20971+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
20972+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
20973+
20974+#define PSB_CR_2D_SOCIF 0x0E18
20975+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
20976+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
20977+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
20978+
20979+#define PSB_CR_2D_BLIT_STATUS 0x0E04
20980+#define _PSB_C2B_STATUS_BUSY (1 << 24)
20981+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
20982+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
20983+
20984+/*
20985+ * 2D defs.
20986+ */
20987+
20988+/*
20989+ * 2D Slave Port Data : Block Header's Object Type
20990+ */
20991+
20992+#define PSB_2D_CLIP_BH (0x00000000)
20993+#define PSB_2D_PAT_BH (0x10000000)
20994+#define PSB_2D_CTRL_BH (0x20000000)
20995+#define PSB_2D_SRC_OFF_BH (0x30000000)
20996+#define PSB_2D_MASK_OFF_BH (0x40000000)
20997+#define PSB_2D_RESERVED1_BH (0x50000000)
20998+#define PSB_2D_RESERVED2_BH (0x60000000)
20999+#define PSB_2D_FENCE_BH (0x70000000)
21000+#define PSB_2D_BLIT_BH (0x80000000)
21001+#define PSB_2D_SRC_SURF_BH (0x90000000)
21002+#define PSB_2D_DST_SURF_BH (0xA0000000)
21003+#define PSB_2D_PAT_SURF_BH (0xB0000000)
21004+#define PSB_2D_SRC_PAL_BH (0xC0000000)
21005+#define PSB_2D_PAT_PAL_BH (0xD0000000)
21006+#define PSB_2D_MASK_SURF_BH (0xE0000000)
21007+#define PSB_2D_FLUSH_BH (0xF0000000)
21008+
21009+/*
21010+ * Clip Definition block (PSB_2D_CLIP_BH)
21011+ */
21012+#define PSB_2D_CLIPCOUNT_MAX (1)
21013+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
21014+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
21015+#define PSB_2D_CLIPCOUNT_SHIFT (0)
21016+/* clip rectangle min & max */
21017+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
21018+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
21019+#define PSB_2D_CLIP_XMAX_SHIFT (12)
21020+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
21021+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
21022+#define PSB_2D_CLIP_XMIN_SHIFT (0)
21023+/* clip rectangle offset */
21024+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
21025+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
21026+#define PSB_2D_CLIP_YMAX_SHIFT (12)
21027+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
21028+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
21029+#define PSB_2D_CLIP_YMIN_SHIFT (0)
21030+
21031+/*
21032+ * Pattern Control (PSB_2D_PAT_BH)
21033+ */
21034+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
21035+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
21036+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
21037+#define PSB_2D_PAT_WIDTH_SHIFT (5)
21038+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
21039+#define PSB_2D_PAT_YSTART_SHIFT (10)
21040+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
21041+#define PSB_2D_PAT_XSTART_SHIFT (15)
21042+
21043+/*
21044+ * 2D Control block (PSB_2D_CTRL_BH)
21045+ */
21046+/* Present Flags */
21047+#define PSB_2D_SRCCK_CTRL (0x00000001)
21048+#define PSB_2D_DSTCK_CTRL (0x00000002)
21049+#define PSB_2D_ALPHA_CTRL (0x00000004)
21050+/* Colour Key Colour (SRC/DST)*/
21051+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
21052+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
21053+#define PSB_2D_CK_COL_SHIFT (0)
21054+/* Colour Key Mask (SRC/DST)*/
21055+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
21056+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
21057+#define PSB_2D_CK_MASK_SHIFT (0)
21058+/* Alpha Control (Alpha/RGB)*/
21059+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
21060+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
21061+#define PSB_2D_GBLALPHA_SHIFT (12)
21062+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
21063+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
21064+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
21065+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
21066+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
21067+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
21068+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
21069+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
21070+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
21071+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
21072+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
21073+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
21074+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
21075+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
21076+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
21077+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
21078+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
21079+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
21080+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
21081+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
21082+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
21083+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
21084+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
21085+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
21086+
21087+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
21088+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
21089+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
21090+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
21091+
21092+/*
21093+ *Source Offset (PSB_2D_SRC_OFF_BH)
21094+ */
21095+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
21096+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
21097+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
21098+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
21099+
21100+/*
21101+ * Mask Offset (PSB_2D_MASK_OFF_BH)
21102+ */
21103+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
21104+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
21105+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
21106+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
21107+
21108+/*
21109+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
21110+ */
21111+
21112+/*
21113+ *Blit Rectangle (PSB_2D_BLIT_BH)
21114+ */
21115+
21116+#define PSB_2D_ROT_MASK (3<<25)
21117+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
21118+#define PSB_2D_ROT_NONE (0<<25)
21119+#define PSB_2D_ROT_90DEGS (1<<25)
21120+#define PSB_2D_ROT_180DEGS (2<<25)
21121+#define PSB_2D_ROT_270DEGS (3<<25)
21122+
21123+#define PSB_2D_COPYORDER_MASK (3<<23)
21124+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
21125+#define PSB_2D_COPYORDER_TL2BR (0<<23)
21126+#define PSB_2D_COPYORDER_BR2TL (1<<23)
21127+#define PSB_2D_COPYORDER_TR2BL (2<<23)
21128+#define PSB_2D_COPYORDER_BL2TR (3<<23)
21129+
21130+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
21131+#define PSB_2D_DSTCK_DISABLE (0x00000000)
21132+#define PSB_2D_DSTCK_PASS (0x00200000)
21133+#define PSB_2D_DSTCK_REJECT (0x00400000)
21134+
21135+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
21136+#define PSB_2D_SRCCK_DISABLE (0x00000000)
21137+#define PSB_2D_SRCCK_PASS (0x00080000)
21138+#define PSB_2D_SRCCK_REJECT (0x00100000)
21139+
21140+#define PSB_2D_CLIP_ENABLE (0x00040000)
21141+
21142+#define PSB_2D_ALPHA_ENABLE (0x00020000)
21143+
21144+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
21145+#define PSB_2D_PAT_MASK (0x00010000)
21146+#define PSB_2D_USE_PAT (0x00010000)
21147+#define PSB_2D_USE_FILL (0x00000000)
21148+/*
21149+ * Tungsten Graphics note on rop codes: If rop A and rop B are
21150+ * identical, the mask surface will not be read and need not be
21151+ * set up.
21152+ */
21153+
21154+#define PSB_2D_ROP3B_MASK (0x0000FF00)
21155+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
21156+#define PSB_2D_ROP3B_SHIFT (8)
21157+/* rop code A */
21158+#define PSB_2D_ROP3A_MASK (0x000000FF)
21159+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
21160+#define PSB_2D_ROP3A_SHIFT (0)
21161+
21162+#define PSB_2D_ROP4_MASK (0x0000FFFF)
21163+/*
21164+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
21165+ * Fill Colour RGBA8888
21166+ */
21167+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
21168+#define PSB_2D_FILLCOLOUR_SHIFT (0)
21169+/*
21170+ * DWORD1: (Always Present)
21171+ * X Start (Dest)
21172+ * Y Start (Dest)
21173+ */
21174+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
21175+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
21176+#define PSB_2D_DST_XSTART_SHIFT (12)
21177+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
21178+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
21179+#define PSB_2D_DST_YSTART_SHIFT (0)
21180+/*
21181+ * DWORD2: (Always Present)
21182+ * X Size (Dest)
21183+ * Y Size (Dest)
21184+ */
21185+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
21186+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
21187+#define PSB_2D_DST_XSIZE_SHIFT (12)
21188+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
21189+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
21190+#define PSB_2D_DST_YSIZE_SHIFT (0)
21191+
21192+/*
21193+ * Source Surface (PSB_2D_SRC_SURF_BH)
21194+ */
21195+/*
21196+ * WORD 0
21197+ */
21198+
21199+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
21200+#define PSB_2D_SRC_1_PAL (0x00000000)
21201+#define PSB_2D_SRC_2_PAL (0x00008000)
21202+#define PSB_2D_SRC_4_PAL (0x00010000)
21203+#define PSB_2D_SRC_8_PAL (0x00018000)
21204+#define PSB_2D_SRC_8_ALPHA (0x00020000)
21205+#define PSB_2D_SRC_4_ALPHA (0x00028000)
21206+#define PSB_2D_SRC_332RGB (0x00030000)
21207+#define PSB_2D_SRC_4444ARGB (0x00038000)
21208+#define PSB_2D_SRC_555RGB (0x00040000)
21209+#define PSB_2D_SRC_1555ARGB (0x00048000)
21210+#define PSB_2D_SRC_565RGB (0x00050000)
21211+#define PSB_2D_SRC_0888ARGB (0x00058000)
21212+#define PSB_2D_SRC_8888ARGB (0x00060000)
21213+#define PSB_2D_SRC_8888UYVY (0x00068000)
21214+#define PSB_2D_SRC_RESERVED (0x00070000)
21215+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
21216+
21217+
21218+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
21219+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
21220+#define PSB_2D_SRC_STRIDE_SHIFT (0)
21221+/*
21222+ * WORD 1 - Base Address
21223+ */
21224+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
21225+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
21226+#define PSB_2D_SRC_ADDR_SHIFT (2)
21227+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
21228+
21229+/*
21230+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
21231+ */
21232+/*
21233+ * WORD 0
21234+ */
21235+
21236+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
21237+#define PSB_2D_PAT_1_PAL (0x00000000)
21238+#define PSB_2D_PAT_2_PAL (0x00008000)
21239+#define PSB_2D_PAT_4_PAL (0x00010000)
21240+#define PSB_2D_PAT_8_PAL (0x00018000)
21241+#define PSB_2D_PAT_8_ALPHA (0x00020000)
21242+#define PSB_2D_PAT_4_ALPHA (0x00028000)
21243+#define PSB_2D_PAT_332RGB (0x00030000)
21244+#define PSB_2D_PAT_4444ARGB (0x00038000)
21245+#define PSB_2D_PAT_555RGB (0x00040000)
21246+#define PSB_2D_PAT_1555ARGB (0x00048000)
21247+#define PSB_2D_PAT_565RGB (0x00050000)
21248+#define PSB_2D_PAT_0888ARGB (0x00058000)
21249+#define PSB_2D_PAT_8888ARGB (0x00060000)
21250+
21251+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
21252+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
21253+#define PSB_2D_PAT_STRIDE_SHIFT (0)
21254+/*
21255+ * WORD 1 - Base Address
21256+ */
21257+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
21258+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
21259+#define PSB_2D_PAT_ADDR_SHIFT (2)
21260+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
21261+
21262+/*
21263+ * Destination Surface (PSB_2D_DST_SURF_BH)
21264+ */
21265+/*
21266+ * WORD 0
21267+ */
21268+
21269+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
21270+#define PSB_2D_DST_332RGB (0x00030000)
21271+#define PSB_2D_DST_4444ARGB (0x00038000)
21272+#define PSB_2D_DST_555RGB (0x00040000)
21273+#define PSB_2D_DST_1555ARGB (0x00048000)
21274+#define PSB_2D_DST_565RGB (0x00050000)
21275+#define PSB_2D_DST_0888ARGB (0x00058000)
21276+#define PSB_2D_DST_8888ARGB (0x00060000)
21277+#define PSB_2D_DST_8888AYUV (0x00070000)
21278+
21279+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
21280+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
21281+#define PSB_2D_DST_STRIDE_SHIFT (0)
21282+/*
21283+ * WORD 1 - Base Address
21284+ */
21285+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
21286+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
21287+#define PSB_2D_DST_ADDR_SHIFT (2)
21288+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
21289+
21290+/*
21291+ * Mask Surface (PSB_2D_MASK_SURF_BH)
21292+ */
21293+/*
21294+ * WORD 0
21295+ */
21296+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
21297+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
21298+#define PSB_2D_MASK_STRIDE_SHIFT (0)
21299+/*
21300+ * WORD 1 - Base Address
21301+ */
21302+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
21303+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
21304+#define PSB_2D_MASK_ADDR_SHIFT (2)
21305+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
21306+
21307+/*
21308+ * Source Palette (PSB_2D_SRC_PAL_BH)
21309+ */
21310+
21311+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
21312+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
21313+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
21314+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
21315+
21316+/*
21317+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
21318+ */
21319+
21320+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
21321+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
21322+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
21323+#define PSB_2D_PATPAL_BYTEALIGN (1024)
21324+
21325+/*
21326+ * Rop3 Codes (2 LS bytes)
21327+ */
21328+
21329+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
21330+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
21331+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
21332+#define PSB_2D_ROP3_BLACKNESS (0x0000)
21333+#define PSB_2D_ROP3_SRC (0xCC)
21334+#define PSB_2D_ROP3_PAT (0xF0)
21335+#define PSB_2D_ROP3_DST (0xAA)
21336+
21337+
21338+/*
21339+ * Sizes.
21340+ */
21341+
21342+#define PSB_SCENE_HW_COOKIE_SIZE 16
21343+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
21344+
21345+/*
21346+ * Scene stuff.
21347+ */
21348+
21349+#define PSB_NUM_HW_SCENES 2
21350+
21351+/*
21352+ * Scheduler completion actions.
21353+ */
21354+
21355+#define PSB_RASTER_BLOCK 0
21356+#define PSB_RASTER 1
21357+#define PSB_RETURN 2
21358+#define PSB_TA 3
21359+
21360+
21361+/*Power management*/
21362+#define PSB_PUNIT_PORT 0x04
21363+#define PSB_PWRGT_CNT 0x60
21364+#define PSB_PWRGT_STS 0x61
21365+#define PSB_PWRGT_GFX_MASK 0x3
21366+#define PSB_PWRGT_VID_ENC_MASK 0x30
21367+#define PSB_PWRGT_VID_DEC_MASK 0xc
21368+#endif
21369diff -uNr a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c
21370--- a/drivers/gpu/drm/psb/psb_reset.c 1969-12-31 16:00:00.000000000 -0800
21371+++ b/drivers/gpu/drm/psb/psb_reset.c 2009-04-07 13:28:38.000000000 -0700
21372@@ -0,0 +1,423 @@
21373+/**************************************************************************
21374+ * Copyright (c) 2007, Intel Corporation.
21375+ * All Rights Reserved.
21376+ *
21377+ * This program is free software; you can redistribute it and/or modify it
21378+ * under the terms and conditions of the GNU General Public License,
21379+ * version 2, as published by the Free Software Foundation.
21380+ *
21381+ * This program is distributed in the hope it will be useful, but WITHOUT
21382+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21383+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21384+ * more details.
21385+ *
21386+ * You should have received a copy of the GNU General Public License along with
21387+ * this program; if not, write to the Free Software Foundation, Inc.,
21388+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21389+ *
21390+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
21391+ * develop this driver.
21392+ *
21393+ **************************************************************************/
21394+/*
21395+ * Authors:
21396+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
21397+ */
21398+
21399+#include <drm/drmP.h>
21400+#include "psb_drv.h"
21401+#include "psb_reg.h"
21402+#include "psb_scene.h"
21403+#include "psb_msvdx.h"
21404+#include "lnc_topaz.h"
21405+#include <linux/spinlock.h>
21406+#define PSB_2D_TIMEOUT_MSEC 100
21407+
21408+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
21409+{
21410+ uint32_t val;
21411+
21412+ val = _PSB_CS_RESET_BIF_RESET |
21413+ _PSB_CS_RESET_DPM_RESET |
21414+ _PSB_CS_RESET_TA_RESET |
21415+ _PSB_CS_RESET_USE_RESET |
21416+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
21417+
21418+ if (reset_2d)
21419+ val |= _PSB_CS_RESET_TWOD_RESET;
21420+
21421+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
21422+ (void) PSB_RSGX32(PSB_CR_SOFT_RESET);
21423+
21424+ msleep(1);
21425+
21426+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
21427+ wmb();
21428+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
21429+ PSB_CR_BIF_CTRL);
21430+ wmb();
21431+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21432+
21433+ msleep(1);
21434+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
21435+ PSB_CR_BIF_CTRL);
21436+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21437+}
21438+
21439+void psb_print_pagefault(struct drm_psb_private *dev_priv)
21440+{
21441+ uint32_t val;
21442+ uint32_t addr;
21443+
21444+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
21445+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
21446+
21447+ if (val) {
21448+ if (val & _PSB_CBI_STAT_PF_N_RW)
21449+ DRM_ERROR("Poulsbo MMU page fault:\n");
21450+ else
21451+ DRM_ERROR("Poulsbo MMU read / write "
21452+ "protection fault:\n");
21453+
21454+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
21455+ DRM_ERROR("\tCache requestor.\n");
21456+ if (val & _PSB_CBI_STAT_FAULT_TA)
21457+ DRM_ERROR("\tTA requestor.\n");
21458+ if (val & _PSB_CBI_STAT_FAULT_VDM)
21459+ DRM_ERROR("\tVDM requestor.\n");
21460+ if (val & _PSB_CBI_STAT_FAULT_2D)
21461+ DRM_ERROR("\t2D requestor.\n");
21462+ if (val & _PSB_CBI_STAT_FAULT_PBE)
21463+ DRM_ERROR("\tPBE requestor.\n");
21464+ if (val & _PSB_CBI_STAT_FAULT_TSP)
21465+ DRM_ERROR("\tTSP requestor.\n");
21466+ if (val & _PSB_CBI_STAT_FAULT_ISP)
21467+ DRM_ERROR("\tISP requestor.\n");
21468+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
21469+ DRM_ERROR("\tUSSEPDS requestor.\n");
21470+ if (val & _PSB_CBI_STAT_FAULT_HOST)
21471+ DRM_ERROR("\tHost requestor.\n");
21472+
21473+ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
21474+ (unsigned) addr);
21475+ }
21476+}
21477+
21478+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
21479+{
21480+ struct timer_list *wt = &dev_priv->watchdog_timer;
21481+ unsigned long irq_flags;
21482+
21483+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21484+ if (dev_priv->timer_available && !timer_pending(wt)) {
21485+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
21486+ add_timer(wt);
21487+ }
21488+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21489+}
21490+
21491+#if 0
21492+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
21493+ unsigned int engine, int *lockup,
21494+ int *idle)
21495+{
21496+ uint32_t received_seq;
21497+
21498+ received_seq = dev_priv->comm[engine << 4];
21499+ spin_lock(&dev_priv->sequence_lock);
21500+ *idle = (received_seq == dev_priv->sequence[engine]);
21501+ spin_unlock(&dev_priv->sequence_lock);
21502+
21503+ if (*idle) {
21504+ dev_priv->idle[engine] = 1;
21505+ *lockup = 0;
21506+ return;
21507+ }
21508+
21509+ if (dev_priv->idle[engine]) {
21510+ dev_priv->idle[engine] = 0;
21511+ dev_priv->last_sequence[engine] = received_seq;
21512+ *lockup = 0;
21513+ return;
21514+ }
21515+
21516+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
21517+}
21518+
21519+#endif
21520+static void psb_watchdog_func(unsigned long data)
21521+{
21522+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
21523+ struct drm_device *dev = dev_priv->dev;
21524+ int lockup;
21525+ int msvdx_lockup;
21526+ int msvdx_idle;
21527+ int lockup_2d;
21528+#if 0
21529+ int topaz_lockup = 0;
21530+ int topaz_idle = 0;
21531+#endif
21532+ int idle_2d;
21533+ int idle;
21534+ unsigned long irq_flags;
21535+
21536+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
21537+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
21538+
21539+#if 0
21540+ if (IS_MRST(dev))
21541+ lnc_topaz_lockup(dev_priv, &topaz_lockup, &topaz_idle);
21542+#endif
21543+
21544+#if 0
21545+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
21546+#else
21547+ lockup_2d = false;
21548+ idle_2d = true;
21549+#endif
21550+ if (lockup || msvdx_lockup || lockup_2d) {
21551+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21552+ dev_priv->timer_available = 0;
21553+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
21554+ irq_flags);
21555+ if (lockup) {
21556+ psb_print_pagefault(dev_priv);
21557+ schedule_work(&dev_priv->watchdog_wq);
21558+ }
21559+ if (msvdx_lockup)
21560+ schedule_work(&dev_priv->msvdx_watchdog_wq);
21561+#if 0
21562+ if (IS_MRST(dev) && (topaz_lockup))
21563+ schedule_work(&dev_priv->topaz_watchdog_wq);
21564+#else
21565+ (void) dev;
21566+#endif
21567+ }
21568+ if (!idle || !msvdx_idle || !idle_2d /* || !topaz_idle */)
21569+ psb_schedule_watchdog(dev_priv);
21570+}
21571+
21572+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
21573+{
21574+ struct drm_psb_private *dev_priv = dev->dev_private;
21575+ struct psb_msvdx_cmd_queue *msvdx_cmd;
21576+ struct list_head *list, *next;
21577+ /*Flush the msvdx cmd queue and signal all fences in the queue */
21578+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
21579+ msvdx_cmd =
21580+ list_entry(list, struct psb_msvdx_cmd_queue, head);
21581+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
21582+ msvdx_cmd->sequence);
21583+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
21584+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
21585+ dev_priv->msvdx_current_sequence,
21586+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
21587+ list_del(list);
21588+ kfree(msvdx_cmd->cmd);
21589+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
21590+ DRM_MEM_DRIVER);
21591+ }
21592+}
21593+
21594+static void psb_msvdx_reset_wq(struct work_struct *work)
21595+{
21596+ struct drm_psb_private *dev_priv =
21597+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
21598+
21599+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
21600+ unsigned long irq_flags;
21601+
21602+ mutex_lock(&dev_priv->msvdx_mutex);
21603+ dev_priv->msvdx_needs_reset = 1;
21604+ dev_priv->msvdx_current_sequence++;
21605+ PSB_DEBUG_GENERAL
21606+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
21607+ dev_priv->msvdx_current_sequence);
21608+
21609+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
21610+ dev_priv->msvdx_current_sequence,
21611+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
21612+
21613+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21614+ dev_priv->timer_available = 1;
21615+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21616+
21617+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
21618+ psb_msvdx_flush_cmd_queue(scheduler->dev);
21619+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
21620+
21621+ psb_schedule_watchdog(dev_priv);
21622+ mutex_unlock(&dev_priv->msvdx_mutex);
21623+}
21624+
21625+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
21626+{
21627+ struct psb_xhw_buf buf;
21628+ uint32_t bif_ctrl;
21629+
21630+ INIT_LIST_HEAD(&buf.head);
21631+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
21632+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
21633+ PSB_WSGX32(bif_ctrl |
21634+ _PSB_CB_CTRL_CLEAR_FAULT |
21635+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
21636+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21637+ msleep(1);
21638+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
21639+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21640+ return psb_xhw_reset_dpm(dev_priv, &buf);
21641+}
21642+
21643+/*
21644+ * Block command submission and reset hardware and schedulers.
21645+ */
21646+
21647+static void psb_reset_wq(struct work_struct *work)
21648+{
21649+ struct drm_psb_private *dev_priv =
21650+ container_of(work, struct drm_psb_private, watchdog_wq);
21651+ int lockup_2d;
21652+ int idle_2d;
21653+ unsigned long irq_flags;
21654+ int ret;
21655+ int reset_count = 0;
21656+ struct psb_xhw_buf buf;
21657+ uint32_t xhw_lockup;
21658+
21659+ /*
21660+ * Block command submission.
21661+ */
21662+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
21663+
21664+ mutex_lock(&dev_priv->reset_mutex);
21665+
21666+ INIT_LIST_HEAD(&buf.head);
21667+ ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup);
21668+ if (likely(ret == 0)) {
21669+ if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) {
21670+ /*
21671+ * no lockup, just re-schedule
21672+ */
21673+ spin_lock_irqsave(&dev_priv->watchdog_lock,
21674+ irq_flags);
21675+ dev_priv->timer_available = 1;
21676+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
21677+ irq_flags);
21678+ psb_schedule_watchdog(dev_priv);
21679+ mutex_unlock(&dev_priv->reset_mutex);
21680+ return;
21681+ }
21682+ } else {
21683+ DRM_ERROR("Check lockup returned %d\n", ret);
21684+ }
21685+#if 0
21686+ msleep(PSB_2D_TIMEOUT_MSEC);
21687+
21688+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
21689+
21690+ if (lockup_2d) {
21691+ uint32_t seq_2d;
21692+ spin_lock(&dev_priv->sequence_lock);
21693+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
21694+ spin_unlock(&dev_priv->sequence_lock);
21695+ psb_fence_error(dev_priv->scheduler.dev,
21696+ PSB_ENGINE_2D,
21697+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
21698+ DRM_INFO("Resetting 2D engine.\n");
21699+ }
21700+
21701+ psb_reset(dev_priv, lockup_2d);
21702+#else
21703+ (void) lockup_2d;
21704+ (void) idle_2d;
21705+ psb_reset(dev_priv, 0);
21706+#endif
21707+ (void) psb_xhw_mmu_reset(dev_priv);
21708+ DRM_INFO("Resetting scheduler.\n");
21709+ psb_scheduler_pause(dev_priv);
21710+ psb_scheduler_reset(dev_priv, -EBUSY);
21711+ psb_scheduler_ta_mem_check(dev_priv);
21712+
21713+ while (dev_priv->ta_mem &&
21714+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
21715+ struct ttm_fence_object *fence;
21716+
21717+ /*
21718+ * TA memory is currently fenced so offsets
21719+ * are valid. Reload offsets into the dpm now.
21720+ */
21721+
21722+ struct psb_xhw_buf buf;
21723+ INIT_LIST_HEAD(&buf.head);
21724+
21725+ msleep(100);
21726+
21727+ fence = dev_priv->ta_mem->ta_memory->sync_obj;
21728+
21729+ DRM_INFO("Reloading TA memory at offset "
21730+ "0x%08lx to 0x%08lx seq %d\n",
21731+ dev_priv->ta_mem->ta_memory->offset,
21732+ dev_priv->ta_mem->ta_memory->offset +
21733+ (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT),
21734+ fence->sequence);
21735+
21736+ fence = dev_priv->ta_mem->hw_data->sync_obj;
21737+
21738+ DRM_INFO("Reloading TA HW memory at offset "
21739+ "0x%08lx to 0x%08lx seq %u\n",
21740+ dev_priv->ta_mem->hw_data->offset,
21741+ dev_priv->ta_mem->hw_data->offset +
21742+ (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT),
21743+ fence->sequence);
21744+
21745+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
21746+ PSB_TA_MEM_FLAG_TA |
21747+ PSB_TA_MEM_FLAG_RASTER |
21748+ PSB_TA_MEM_FLAG_HOSTA |
21749+ PSB_TA_MEM_FLAG_HOSTD |
21750+ PSB_TA_MEM_FLAG_INIT,
21751+ dev_priv->ta_mem->ta_memory->
21752+ offset,
21753+ dev_priv->ta_mem->hw_data->
21754+ offset,
21755+ dev_priv->ta_mem->hw_cookie);
21756+ if (!ret)
21757+ break;
21758+
21759+ DRM_INFO("Reloading TA memory failed. Retrying.\n");
21760+ psb_reset(dev_priv, 0);
21761+ (void) psb_xhw_mmu_reset(dev_priv);
21762+ }
21763+
21764+ psb_scheduler_restart(dev_priv);
21765+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21766+ dev_priv->timer_available = 1;
21767+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21768+ mutex_unlock(&dev_priv->reset_mutex);
21769+}
21770+
21771+void psb_watchdog_init(struct drm_psb_private *dev_priv)
21772+{
21773+ struct timer_list *wt = &dev_priv->watchdog_timer;
21774+ unsigned long irq_flags;
21775+
21776+ spin_lock_init(&dev_priv->watchdog_lock);
21777+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21778+ init_timer(wt);
21779+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
21780+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
21781+ wt->data = (unsigned long) dev_priv;
21782+ wt->function = &psb_watchdog_func;
21783+ dev_priv->timer_available = 1;
21784+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21785+}
21786+
21787+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
21788+{
21789+ unsigned long irq_flags;
21790+
21791+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21792+ dev_priv->timer_available = 0;
21793+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21794+ (void) del_timer_sync(&dev_priv->watchdog_timer);
21795+}
21796diff -uNr a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c
21797--- a/drivers/gpu/drm/psb/psb_scene.c 1969-12-31 16:00:00.000000000 -0800
21798+++ b/drivers/gpu/drm/psb/psb_scene.c 2009-04-07 13:28:38.000000000 -0700
21799@@ -0,0 +1,523 @@
21800+/**************************************************************************
21801+ * Copyright (c) 2007, Intel Corporation.
21802+ * All Rights Reserved.
21803+ *
21804+ * This program is free software; you can redistribute it and/or modify it
21805+ * under the terms and conditions of the GNU General Public License,
21806+ * version 2, as published by the Free Software Foundation.
21807+ *
21808+ * This program is distributed in the hope it will be useful, but WITHOUT
21809+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21810+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21811+ * more details.
21812+ *
21813+ * You should have received a copy of the GNU General Public License along with
21814+ * this program; if not, write to the Free Software Foundation, Inc.,
21815+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21816+ *
21817+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
21818+ * develop this driver.
21819+ *
21820+ **************************************************************************/
21821+/*
21822+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
21823+ */
21824+
21825+#include <drm/drmP.h>
21826+#include "psb_drv.h"
21827+#include "psb_scene.h"
21828+
21829+void psb_clear_scene_atomic(struct psb_scene *scene)
21830+{
21831+ int i;
21832+ struct page *page;
21833+ void *v;
21834+
21835+ for (i = 0; i < scene->clear_num_pages; ++i) {
21836+ page = ttm_tt_get_page(scene->hw_data->ttm,
21837+ scene->clear_p_start + i);
21838+ if (in_irq())
21839+ v = kmap_atomic(page, KM_IRQ0);
21840+ else
21841+ v = kmap_atomic(page, KM_USER0);
21842+
21843+ memset(v, 0, PAGE_SIZE);
21844+
21845+ if (in_irq())
21846+ kunmap_atomic(v, KM_IRQ0);
21847+ else
21848+ kunmap_atomic(v, KM_USER0);
21849+ }
21850+}
21851+
21852+int psb_clear_scene(struct psb_scene *scene)
21853+{
21854+ struct ttm_bo_kmap_obj bmo;
21855+ bool is_iomem;
21856+ void *addr;
21857+
21858+ int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start,
21859+ scene->clear_num_pages, &bmo);
21860+
21861+ PSB_DEBUG_RENDER("Scene clear.\n");
21862+ if (ret)
21863+ return ret;
21864+
21865+ addr = ttm_kmap_obj_virtual(&bmo, &is_iomem);
21866+ BUG_ON(is_iomem);
21867+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
21868+ ttm_bo_kunmap(&bmo);
21869+
21870+ return 0;
21871+}
21872+
21873+static void psb_destroy_scene(struct kref *kref)
21874+{
21875+ struct psb_scene *scene =
21876+ container_of(kref, struct psb_scene, kref);
21877+
21878+ PSB_DEBUG_RENDER("Scene destroy.\n");
21879+ psb_scheduler_remove_scene_refs(scene);
21880+ ttm_bo_unref(&scene->hw_data);
21881+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
21882+}
21883+
21884+void psb_scene_unref(struct psb_scene **p_scene)
21885+{
21886+ struct psb_scene *scene = *p_scene;
21887+
21888+ PSB_DEBUG_RENDER("Scene unref.\n");
21889+ *p_scene = NULL;
21890+ kref_put(&scene->kref, &psb_destroy_scene);
21891+}
21892+
21893+struct psb_scene *psb_scene_ref(struct psb_scene *src)
21894+{
21895+ PSB_DEBUG_RENDER("Scene ref.\n");
21896+ kref_get(&src->kref);
21897+ return src;
21898+}
21899+
21900+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
21901+ uint32_t w, uint32_t h)
21902+{
21903+ struct drm_psb_private *dev_priv =
21904+ (struct drm_psb_private *) dev->dev_private;
21905+ struct ttm_bo_device *bdev = &dev_priv->bdev;
21906+ int ret = -EINVAL;
21907+ struct psb_scene *scene;
21908+ uint32_t bo_size;
21909+ struct psb_xhw_buf buf;
21910+
21911+ PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h,
21912+ w >> 16);
21913+
21914+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
21915+
21916+ if (!scene) {
21917+ DRM_ERROR("Out of memory allocating scene object.\n");
21918+ return NULL;
21919+ }
21920+
21921+ scene->dev = dev;
21922+ scene->w = w;
21923+ scene->h = h;
21924+ scene->hw_scene = NULL;
21925+ kref_init(&scene->kref);
21926+
21927+ INIT_LIST_HEAD(&buf.head);
21928+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
21929+ scene->hw_cookie, &bo_size,
21930+ &scene->clear_p_start,
21931+ &scene->clear_num_pages);
21932+ if (ret)
21933+ goto out_err;
21934+
21935+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
21936+ DRM_PSB_FLAG_MEM_MMU |
21937+ TTM_PL_FLAG_CACHED,
21938+ 0, 0, 1, NULL, &scene->hw_data);
21939+ if (ret)
21940+ goto out_err;
21941+
21942+ return scene;
21943+out_err:
21944+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
21945+ return NULL;
21946+}
21947+
21948+int psb_validate_scene_pool(struct psb_context *context,
21949+ struct psb_scene_pool *pool,
21950+ uint32_t w,
21951+ uint32_t h,
21952+ int final_pass, struct psb_scene **scene_p)
21953+{
21954+ struct drm_device *dev = pool->dev;
21955+ struct drm_psb_private *dev_priv =
21956+ (struct drm_psb_private *) dev->dev_private;
21957+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
21958+ int ret;
21959+ unsigned long irq_flags;
21960+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
21961+ uint32_t bin_pt_offset;
21962+ uint32_t bin_param_offset;
21963+
21964+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n",
21965+ pool->cur_scene);
21966+
21967+ if (unlikely(!dev_priv->ta_mem)) {
21968+ dev_priv->ta_mem =
21969+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
21970+ if (!dev_priv->ta_mem)
21971+ return -ENOMEM;
21972+
21973+ bin_pt_offset = ~0;
21974+ bin_param_offset = ~0;
21975+ } else {
21976+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
21977+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
21978+ }
21979+
21980+ pool->w = w;
21981+ pool->h = h;
21982+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
21983+ spin_lock_irqsave(&scheduler->lock, irq_flags);
21984+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
21985+ spin_unlock_irqrestore(&scheduler->lock,
21986+ irq_flags);
21987+ DRM_ERROR("Trying to resize a dirty scene.\n");
21988+ return -EINVAL;
21989+ }
21990+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
21991+ psb_scene_unref(&pool->scenes[pool->cur_scene]);
21992+ scene = NULL;
21993+ }
21994+
21995+ if (!scene) {
21996+ pool->scenes[pool->cur_scene] = scene =
21997+ psb_alloc_scene(pool->dev, pool->w, pool->h);
21998+
21999+ if (!scene)
22000+ return -ENOMEM;
22001+
22002+ scene->flags = PSB_SCENE_FLAG_CLEARED;
22003+ }
22004+
22005+ ret = psb_validate_kernel_buffer(context, scene->hw_data,
22006+ PSB_ENGINE_TA,
22007+ PSB_BO_FLAG_SCENE |
22008+ PSB_GPU_ACCESS_READ |
22009+ PSB_GPU_ACCESS_WRITE, 0);
22010+ if (unlikely(ret != 0))
22011+ return ret;
22012+
22013+ /*
22014+ * FIXME: We need atomic bit manipulation here for the
22015+ * scheduler. For now use the spinlock.
22016+ */
22017+
22018+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22019+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
22020+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22021+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
22022+ mutex_lock(&scene->hw_data->mutex);
22023+
22024+ ret = ttm_bo_wait(scene->hw_data, 0, 1, 0);
22025+ mutex_unlock(&scene->hw_data->mutex);
22026+ if (ret)
22027+ return ret;
22028+
22029+ ret = psb_clear_scene(scene);
22030+
22031+ if (ret)
22032+ return ret;
22033+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22034+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
22035+ }
22036+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22037+
22038+ ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data,
22039+ PSB_ENGINE_TA,
22040+ PSB_BO_FLAG_SCENE |
22041+ PSB_GPU_ACCESS_READ |
22042+ PSB_GPU_ACCESS_WRITE, 0);
22043+ if (unlikely(ret != 0))
22044+ return ret;
22045+
22046+ ret =
22047+ psb_validate_kernel_buffer(context,
22048+ dev_priv->ta_mem->ta_memory,
22049+ PSB_ENGINE_TA,
22050+ PSB_BO_FLAG_SCENE |
22051+ PSB_GPU_ACCESS_READ |
22052+ PSB_GPU_ACCESS_WRITE, 0);
22053+
22054+ if (unlikely(ret != 0))
22055+ return ret;
22056+
22057+ if (unlikely(bin_param_offset !=
22058+ dev_priv->ta_mem->ta_memory->offset ||
22059+ bin_pt_offset !=
22060+ dev_priv->ta_mem->hw_data->offset ||
22061+ dev_priv->force_ta_mem_load)) {
22062+
22063+ struct psb_xhw_buf buf;
22064+
22065+ INIT_LIST_HEAD(&buf.head);
22066+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
22067+ PSB_TA_MEM_FLAG_TA |
22068+ PSB_TA_MEM_FLAG_RASTER |
22069+ PSB_TA_MEM_FLAG_HOSTA |
22070+ PSB_TA_MEM_FLAG_HOSTD |
22071+ PSB_TA_MEM_FLAG_INIT,
22072+ dev_priv->ta_mem->ta_memory->
22073+ offset,
22074+ dev_priv->ta_mem->hw_data->
22075+ offset,
22076+ dev_priv->ta_mem->hw_cookie);
22077+ if (ret)
22078+ return ret;
22079+
22080+ dev_priv->force_ta_mem_load = 0;
22081+ }
22082+
22083+ if (final_pass) {
22084+
22085+ /*
22086+ * Clear the scene on next use. Advance the scene counter.
22087+ */
22088+
22089+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22090+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
22091+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22092+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
22093+ }
22094+
22095+ *scene_p = psb_scene_ref(scene);
22096+ return 0;
22097+}
22098+
22099+static void psb_scene_pool_destroy(struct kref *kref)
22100+{
22101+ struct psb_scene_pool *pool =
22102+ container_of(kref, struct psb_scene_pool, kref);
22103+ int i;
22104+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
22105+
22106+ for (i = 0; i < pool->num_scenes; ++i) {
22107+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
22108+ (unsigned long) pool->scenes[i]);
22109+ if (pool->scenes[i])
22110+ psb_scene_unref(&pool->scenes[i]);
22111+ }
22112+
22113+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
22114+}
22115+
22116+void psb_scene_pool_unref(struct psb_scene_pool **p_pool)
22117+{
22118+ struct psb_scene_pool *pool = *p_pool;
22119+
22120+ PSB_DEBUG_RENDER("Scene pool unref\n");
22121+ *p_pool = NULL;
22122+ kref_put(&pool->kref, &psb_scene_pool_destroy);
22123+}
22124+
22125+struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src)
22126+{
22127+ kref_get(&src->kref);
22128+ return src;
22129+}
22130+
22131+/*
22132+ * Callback for base object manager.
22133+ */
22134+
22135+static void psb_scene_pool_release(struct ttm_base_object **p_base)
22136+{
22137+ struct ttm_base_object *base = *p_base;
22138+ struct psb_scene_pool *pool =
22139+ container_of(base, struct psb_scene_pool, base);
22140+ *p_base = NULL;
22141+
22142+ psb_scene_pool_unref(&pool);
22143+}
22144+
22145+struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv,
22146+ uint32_t handle,
22147+ int check_owner)
22148+{
22149+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
22150+ struct ttm_base_object *base;
22151+ struct psb_scene_pool *pool;
22152+
22153+
22154+ base = ttm_base_object_lookup(tfile, handle);
22155+ if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) {
22156+ DRM_ERROR("Could not find scene pool object 0x%08x\n",
22157+ handle);
22158+ return NULL;
22159+ }
22160+
22161+ if (check_owner && tfile != base->tfile && !base->shareable) {
22162+ ttm_base_object_unref(&base);
22163+ return NULL;
22164+ }
22165+
22166+ pool = container_of(base, struct psb_scene_pool, base);
22167+ kref_get(&pool->kref);
22168+ ttm_base_object_unref(&base);
22169+ return pool;
22170+}
22171+
22172+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv,
22173+ int shareable,
22174+ uint32_t num_scenes,
22175+ uint32_t w, uint32_t h)
22176+{
22177+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
22178+ struct drm_device *dev = file_priv->minor->dev;
22179+ struct psb_scene_pool *pool;
22180+ int ret;
22181+
22182+ PSB_DEBUG_RENDER("Scene pool alloc\n");
22183+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
22184+ if (!pool) {
22185+ DRM_ERROR("Out of memory allocating scene pool object.\n");
22186+ return NULL;
22187+ }
22188+ pool->w = w;
22189+ pool->h = h;
22190+ pool->dev = dev;
22191+ pool->num_scenes = num_scenes;
22192+ kref_init(&pool->kref);
22193+
22194+ /*
22195+ * The base object holds a reference.
22196+ */
22197+
22198+ kref_get(&pool->kref);
22199+ ret = ttm_base_object_init(tfile, &pool->base, shareable,
22200+ PSB_USER_OBJECT_SCENE_POOL,
22201+ &psb_scene_pool_release, NULL);
22202+ if (unlikely(ret != 0))
22203+ goto out_err;
22204+
22205+ return pool;
22206+out_err:
22207+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
22208+ return NULL;
22209+}
22210+
22211+/*
22212+ * Code to support multiple ta memory buffers.
22213+ */
22214+
22215+static void psb_ta_mem_destroy(struct kref *kref)
22216+{
22217+ struct psb_ta_mem *ta_mem =
22218+ container_of(kref, struct psb_ta_mem, kref);
22219+
22220+ ttm_bo_unref(&ta_mem->hw_data);
22221+ ttm_bo_unref(&ta_mem->ta_memory);
22222+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
22223+}
22224+
22225+void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem)
22226+{
22227+ struct psb_ta_mem *ta_mem = *p_ta_mem;
22228+ *p_ta_mem = NULL;
22229+ kref_put(&ta_mem->kref, psb_ta_mem_destroy);
22230+}
22231+
22232+struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src)
22233+{
22234+ kref_get(&src->kref);
22235+ return src;
22236+}
22237+
22238+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
22239+{
22240+ struct drm_psb_private *dev_priv =
22241+ (struct drm_psb_private *) dev->dev_private;
22242+ struct ttm_bo_device *bdev = &dev_priv->bdev;
22243+ int ret = -EINVAL;
22244+ struct psb_ta_mem *ta_mem;
22245+ uint32_t bo_size;
22246+ uint32_t ta_min_size;
22247+ struct psb_xhw_buf buf;
22248+
22249+ INIT_LIST_HEAD(&buf.head);
22250+
22251+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
22252+
22253+ if (!ta_mem) {
22254+ DRM_ERROR("Out of memory allocating parameter memory.\n");
22255+ return NULL;
22256+ }
22257+
22258+ kref_init(&ta_mem->kref);
22259+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
22260+ ta_mem->hw_cookie,
22261+ &bo_size,
22262+ &ta_min_size);
22263+ if (ret == -ENOMEM) {
22264+ DRM_ERROR("Parameter memory size is too small.\n");
22265+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
22266+ (unsigned int) (pages * (PAGE_SIZE / 1024)));
22267+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
22268+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
22269+ (unsigned int)(ta_min_size / 1024));
22270+ DRM_INFO("\"ta_mem_size\" parameter!\n");
22271+ }
22272+ if (ret)
22273+ goto out_err0;
22274+
22275+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
22276+ DRM_PSB_FLAG_MEM_MMU,
22277+ 0, 0, 0, NULL,
22278+ &ta_mem->hw_data);
22279+ if (ret)
22280+ goto out_err0;
22281+
22282+ bo_size = pages * PAGE_SIZE;
22283+ ret =
22284+ ttm_buffer_object_create(bdev, bo_size,
22285+ ttm_bo_type_kernel,
22286+ DRM_PSB_FLAG_MEM_RASTGEOM,
22287+ 0,
22288+ 1024 * 1024 >> PAGE_SHIFT, 0,
22289+ NULL,
22290+ &ta_mem->ta_memory);
22291+ if (ret)
22292+ goto out_err1;
22293+
22294+ return ta_mem;
22295+out_err1:
22296+ ttm_bo_unref(&ta_mem->hw_data);
22297+out_err0:
22298+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
22299+ return NULL;
22300+}
22301+
22302+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
22303+ void *data, struct drm_file *file_priv)
22304+{
22305+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
22306+ struct drm_psb_scene *scene = (struct drm_psb_scene *) data;
22307+ int ret = 0;
22308+ struct drm_psb_private *dev_priv = psb_priv(dev);
22309+ if (!scene->handle_valid)
22310+ return 0;
22311+ down_read(&dev_priv->sgx_sem);
22312+ psb_check_power_state(dev, PSB_DEVICE_SGX);
22313+
22314+ ret =
22315+ ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE);
22316+ if (unlikely(ret != 0))
22317+ DRM_ERROR("Could not unreference a scene object.\n");
22318+ up_read(&dev_priv->sgx_sem);
22319+ if (drm_psb_ospm && IS_MRST(dev))
22320+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
22321+ return ret;
22322+}
22323diff -uNr a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h
22324--- a/drivers/gpu/drm/psb/psb_scene.h 1969-12-31 16:00:00.000000000 -0800
22325+++ b/drivers/gpu/drm/psb/psb_scene.h 2009-04-07 13:28:38.000000000 -0700
22326@@ -0,0 +1,119 @@
22327+/**************************************************************************
22328+ * Copyright (c) 2007, Intel Corporation.
22329+ * All Rights Reserved.
22330+ *
22331+ * This program is free software; you can redistribute it and/or modify it
22332+ * under the terms and conditions of the GNU General Public License,
22333+ * version 2, as published by the Free Software Foundation.
22334+ *
22335+ * This program is distributed in the hope it will be useful, but WITHOUT
22336+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22337+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22338+ * more details.
22339+ *
22340+ * You should have received a copy of the GNU General Public License along with
22341+ * this program; if not, write to the Free Software Foundation, Inc.,
22342+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22343+ *
22344+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22345+ * develop this driver.
22346+ *
22347+ **************************************************************************/
22348+/*
22349+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
22350+ */
22351+
22352+#ifndef _PSB_SCENE_H_
22353+#define _PSB_SCENE_H_
22354+
22355+#include "ttm/ttm_object.h"
22356+
22357+#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0
22358+#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1
22359+#define PSB_MAX_NUM_SCENES 8
22360+
22361+struct psb_hw_scene;
22362+struct psb_hw_ta_mem;
22363+
22364+struct psb_scene_pool {
22365+ struct ttm_base_object base;
22366+ struct drm_device *dev;
22367+ struct kref kref;
22368+ uint32_t w;
22369+ uint32_t h;
22370+ uint32_t cur_scene;
22371+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
22372+ uint32_t num_scenes;
22373+};
22374+
22375+struct psb_scene {
22376+ struct drm_device *dev;
22377+ struct kref kref;
22378+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
22379+ uint32_t bo_size;
22380+ uint32_t w;
22381+ uint32_t h;
22382+ struct psb_ta_mem *ta_mem;
22383+ struct psb_hw_scene *hw_scene;
22384+ struct ttm_buffer_object *hw_data;
22385+ uint32_t flags;
22386+ uint32_t clear_p_start;
22387+ uint32_t clear_num_pages;
22388+};
22389+
22390+#if 0
22391+struct psb_scene_entry {
22392+ struct list_head head;
22393+ struct psb_scene *scene;
22394+};
22395+
22396+struct psb_user_scene {
22397+ struct ttm_base_object base;
22398+ struct drm_device *dev;
22399+};
22400+
22401+#endif
22402+
22403+struct psb_ta_mem {
22404+ struct ttm_base_object base;
22405+ struct drm_device *dev;
22406+ struct kref kref;
22407+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
22408+ uint32_t bo_size;
22409+ struct ttm_buffer_object *ta_memory;
22410+ struct ttm_buffer_object *hw_data;
22411+ int is_deallocating;
22412+ int deallocating_scheduled;
22413+};
22414+
22415+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
22416+ int shareable,
22417+ uint32_t num_scenes,
22418+ uint32_t w, uint32_t h);
22419+extern void psb_scene_pool_unref(struct psb_scene_pool **pool);
22420+extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file
22421+ *priv,
22422+ uint32_t handle,
22423+ int check_owner);
22424+extern int psb_validate_scene_pool(struct psb_context *context,
22425+ struct psb_scene_pool *pool,
22426+ uint32_t w,
22427+ uint32_t h, int final_pass,
22428+ struct psb_scene **scene_p);
22429+extern void psb_scene_unref(struct psb_scene **scene);
22430+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
22431+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
22432+ void *data,
22433+ struct drm_file *file_priv);
22434+
22435+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
22436+{
22437+ return pool->base.hash.key;
22438+}
22439+
22440+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
22441+ uint32_t pages);
22442+extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src);
22443+extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem);
22444+
22445+#endif
22446diff -uNr a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c
22447--- a/drivers/gpu/drm/psb/psb_schedule.c 1969-12-31 16:00:00.000000000 -0800
22448+++ b/drivers/gpu/drm/psb/psb_schedule.c 2009-04-07 13:28:38.000000000 -0700
22449@@ -0,0 +1,1539 @@
22450+/**************************************************************************
22451+ * Copyright (c) 2007, Intel Corporation.
22452+ * All Rights Reserved.
22453+ *
22454+ * This program is free software; you can redistribute it and/or modify it
22455+ * under the terms and conditions of the GNU General Public License,
22456+ * version 2, as published by the Free Software Foundation.
22457+ *
22458+ * This program is distributed in the hope it will be useful, but WITHOUT
22459+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22460+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22461+ * more details.
22462+ *
22463+ * You should have received a copy of the GNU General Public License along with
22464+ * this program; if not, write to the Free Software Foundation, Inc.,
22465+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22466+ *
22467+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22468+ * develop this driver.
22469+ *
22470+ **************************************************************************/
22471+/*
22472+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
22473+ */
22474+
22475+#include <drm/drmP.h>
22476+#include "psb_drm.h"
22477+#include "psb_drv.h"
22478+#include "psb_reg.h"
22479+#include "psb_scene.h"
22480+#include "ttm/ttm_execbuf_util.h"
22481+
22482+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 30)
22483+#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 30)
22484+#define PSB_RASTER_TIMEOUT (DRM_HZ / 10)
22485+#define PSB_TA_TIMEOUT (DRM_HZ / 10)
22486+
22487+#undef PSB_SOFTWARE_WORKAHEAD
22488+
22489+#ifdef PSB_STABLE_SETTING
22490+
22491+/*
22492+ * Software blocks completely while the engines are working so there can be no
22493+ * overlap.
22494+ */
22495+
22496+#define PSB_WAIT_FOR_RASTER_COMPLETION
22497+#define PSB_WAIT_FOR_TA_COMPLETION
22498+
22499+#elif defined(PSB_PARANOID_SETTING)
22500+/*
22501+ * Software blocks "almost" while the engines are working so there can be no
22502+ * overlap.
22503+ */
22504+
22505+#define PSB_WAIT_FOR_RASTER_COMPLETION
22506+#define PSB_WAIT_FOR_TA_COMPLETION
22507+#define PSB_BE_PARANOID
22508+
22509+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
22510+/*
22511+ * Software leaps ahead while the rasterizer is running and prepares
22512+ * a new ta job that can be scheduled before the rasterizer has
22513+ * finished.
22514+ */
22515+
22516+#define PSB_WAIT_FOR_TA_COMPLETION
22517+
22518+#elif defined(PSB_SOFTWARE_WORKAHEAD)
22519+/*
22520+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
22521+ * But block overlapping in the scheduler.
22522+ */
22523+
22524+#define PSB_BLOCK_OVERLAP
22525+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
22526+
22527+#endif
22528+
22529+/*
22530+ * Avoid pixelbe pagefaults on C0.
22531+ */
22532+#if 0
22533+#define PSB_BLOCK_OVERLAP
22534+#endif
22535+
22536+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
22537+ struct psb_scheduler *scheduler,
22538+ uint32_t reply_flag);
22539+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
22540+ struct psb_scheduler *scheduler,
22541+ uint32_t reply_flag);
22542+
22543+#ifdef FIX_TG_16
22544+
22545+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
22546+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
22547+
22548+#endif
22549+
22550+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
22551+ int *lockup, int *idle)
22552+{
22553+ unsigned long irq_flags;
22554+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
22555+
22556+ *lockup = 0;
22557+ *idle = 1;
22558+
22559+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22560+
22561+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
22562+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
22563+ *lockup = 1;
22564+ }
22565+ if (!*lockup
22566+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
22567+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
22568+ *lockup = 1;
22569+ }
22570+ if (!*lockup)
22571+ *idle = scheduler->idle;
22572+
22573+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22574+}
22575+
22576+static inline void psb_set_idle(struct psb_scheduler *scheduler)
22577+{
22578+ scheduler->idle =
22579+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
22580+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
22581+ if (scheduler->idle)
22582+ wake_up(&scheduler->idle_queue);
22583+}
22584+
22585+/*
22586+ * Call with the scheduler spinlock held.
22587+ * Assigns a scene context to either the ta or the rasterizer,
22588+ * flushing out other scenes to memory if necessary.
22589+ */
22590+
22591+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
22592+ struct psb_scene *scene,
22593+ int engine, struct psb_task *task)
22594+{
22595+ uint32_t flags = 0;
22596+ struct psb_hw_scene *hw_scene;
22597+ struct drm_device *dev = scene->dev;
22598+ struct drm_psb_private *dev_priv =
22599+ (struct drm_psb_private *) dev->dev_private;
22600+
22601+ hw_scene = scene->hw_scene;
22602+ if (hw_scene && hw_scene->last_scene == scene) {
22603+
22604+ /*
22605+ * Reuse the last hw scene context and delete it from the
22606+ * free list.
22607+ */
22608+
22609+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
22610+ hw_scene->context_number);
22611+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
22612+
22613+ /*
22614+ * No hw context initialization to be done.
22615+ */
22616+
22617+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
22618+ }
22619+
22620+ list_del_init(&hw_scene->head);
22621+
22622+ } else {
22623+ struct list_head *list;
22624+ hw_scene = NULL;
22625+
22626+ /*
22627+ * Grab a new hw scene context.
22628+ */
22629+
22630+ list_for_each(list, &scheduler->hw_scenes) {
22631+ hw_scene =
22632+ list_entry(list, struct psb_hw_scene, head);
22633+ break;
22634+ }
22635+ BUG_ON(!hw_scene);
22636+ PSB_DEBUG_RENDER("New hw scene %d.\n",
22637+ hw_scene->context_number);
22638+
22639+ list_del_init(list);
22640+ }
22641+ scene->hw_scene = hw_scene;
22642+ hw_scene->last_scene = scene;
22643+
22644+ flags |= PSB_SCENE_FLAG_SETUP;
22645+
22646+ /*
22647+ * Switch context and setup the engine.
22648+ */
22649+
22650+ return psb_xhw_scene_bind_fire(dev_priv,
22651+ &task->buf,
22652+ task->flags,
22653+ hw_scene->context_number,
22654+ scene->hw_cookie,
22655+ task->oom_cmds,
22656+ task->oom_cmd_size,
22657+ scene->hw_data->offset,
22658+ engine, flags | scene->flags);
22659+}
22660+
22661+static inline void psb_report_fence(struct drm_psb_private *dev_priv,
22662+ struct psb_scheduler *scheduler,
22663+ uint32_t class,
22664+ uint32_t sequence,
22665+ uint32_t type, int call_handler)
22666+{
22667+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
22668+ struct ttm_fence_device *fdev = &dev_priv->fdev;
22669+ struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA];
22670+ unsigned long irq_flags;
22671+
22672+ /**
22673+ * Block racing poll_ta calls, that take the lock in write mode.
22674+ */
22675+
22676+ read_lock_irqsave(&fc->lock, irq_flags);
22677+ seq->sequence = sequence;
22678+ seq->reported = 0;
22679+ read_unlock_irqrestore(&fc->lock, irq_flags);
22680+
22681+ if (call_handler)
22682+ psb_fence_handler(scheduler->dev, class);
22683+}
22684+
22685+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
22686+ struct psb_scheduler *scheduler);
22687+
22688+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
22689+ struct psb_scheduler *scheduler)
22690+{
22691+ struct psb_task *task = NULL;
22692+ struct list_head *list, *next;
22693+ int pushed_raster_task = 0;
22694+
22695+ PSB_DEBUG_RENDER("schedule ta\n");
22696+
22697+ if (scheduler->idle_count != 0)
22698+ return;
22699+
22700+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
22701+ return;
22702+
22703+ if (scheduler->ta_state)
22704+ return;
22705+
22706+ /*
22707+ * Skip the ta stage for rasterization-only
22708+ * tasks. They arrive here to make sure we're rasterizing
22709+ * tasks in the correct order.
22710+ */
22711+
22712+ list_for_each_safe(list, next, &scheduler->ta_queue) {
22713+ task = list_entry(list, struct psb_task, head);
22714+ if (task->task_type != psb_raster_task)
22715+ break;
22716+
22717+ list_del_init(list);
22718+ list_add_tail(list, &scheduler->raster_queue);
22719+ psb_report_fence(dev_priv, scheduler, task->engine,
22720+ task->sequence,
22721+ _PSB_FENCE_TA_DONE_SHIFT, 1);
22722+ task = NULL;
22723+ pushed_raster_task = 1;
22724+ }
22725+
22726+ if (pushed_raster_task)
22727+ psb_schedule_raster(dev_priv, scheduler);
22728+
22729+ if (!task)
22730+ return;
22731+
22732+ /*
22733+ * Still waiting for a vistest?
22734+ */
22735+
22736+ if (scheduler->feedback_task == task)
22737+ return;
22738+
22739+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
22740+
22741+ /*
22742+ * Block ta from trying to use both hardware contexts
22743+ * without the rasterizer starting to render from one of them.
22744+ */
22745+
22746+ if (!list_empty(&scheduler->raster_queue))
22747+ return;
22748+
22749+#endif
22750+
22751+#ifdef PSB_BLOCK_OVERLAP
22752+ /*
22753+ * Make sure rasterizer isn't doing anything.
22754+ */
22755+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
22756+ return;
22757+#endif
22758+ if (list_empty(&scheduler->hw_scenes))
22759+ return;
22760+
22761+#ifdef FIX_TG_16
22762+ if (psb_check_2d_idle(dev_priv))
22763+ return;
22764+#endif
22765+
22766+ list_del_init(&task->head);
22767+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
22768+ scheduler->ta_state = 1;
22769+
22770+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
22771+ scheduler->idle = 0;
22772+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
22773+ scheduler->total_ta_jiffies = 0;
22774+
22775+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
22776+ 0x00000000 : PSB_RF_FIRE_TA;
22777+
22778+ (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
22779+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA,
22780+ task);
22781+ psb_schedule_watchdog(dev_priv);
22782+}
22783+
22784+static int psb_fire_raster(struct psb_scheduler *scheduler,
22785+ struct psb_task *task)
22786+{
22787+ struct drm_device *dev = scheduler->dev;
22788+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
22789+ dev->dev_private;
22790+
22791+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
22792+
22793+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
22794+}
22795+
22796+/*
22797+ * Take the first rasterization task from the hp raster queue or from the
22798+ * raster queue and fire the rasterizer.
22799+ */
22800+
22801+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
22802+ struct psb_scheduler *scheduler)
22803+{
22804+ struct psb_task *task;
22805+ struct list_head *list;
22806+
22807+ if (scheduler->idle_count != 0)
22808+ return;
22809+
22810+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
22811+ PSB_DEBUG_RENDER("Raster busy.\n");
22812+ return;
22813+ }
22814+#ifdef PSB_BLOCK_OVERLAP
22815+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
22816+ PSB_DEBUG_RENDER("TA busy.\n");
22817+ return;
22818+ }
22819+#endif
22820+
22821+ if (!list_empty(&scheduler->hp_raster_queue))
22822+ list = scheduler->hp_raster_queue.next;
22823+ else if (!list_empty(&scheduler->raster_queue))
22824+ list = scheduler->raster_queue.next;
22825+ else {
22826+ PSB_DEBUG_RENDER("Nothing in list\n");
22827+ return;
22828+ }
22829+
22830+ task = list_entry(list, struct psb_task, head);
22831+
22832+ /*
22833+ * Sometimes changing ZLS format requires an ISP reset.
22834+ * Doesn't seem to consume too much time.
22835+ */
22836+
22837+ if (task->scene)
22838+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
22839+
22840+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
22841+
22842+ list_del_init(list);
22843+ scheduler->idle = 0;
22844+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
22845+ scheduler->total_raster_jiffies = 0;
22846+
22847+ if (task->scene)
22848+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
22849+
22850+ (void) psb_reg_submit(dev_priv, task->raster_cmds,
22851+ task->raster_cmd_size);
22852+
22853+ if (task->scene) {
22854+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
22855+ 0x00000000 : PSB_RF_FIRE_RASTER;
22856+ psb_set_scene_fire(scheduler,
22857+ task->scene, PSB_SCENE_ENGINE_RASTER,
22858+ task);
22859+ } else {
22860+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
22861+ psb_fire_raster(scheduler, task);
22862+ }
22863+ psb_schedule_watchdog(dev_priv);
22864+}
22865+
22866+int psb_extend_timeout(struct drm_psb_private *dev_priv,
22867+ uint32_t xhw_lockup)
22868+{
22869+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
22870+ unsigned long irq_flags;
22871+ int ret = -EBUSY;
22872+
22873+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22874+
22875+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
22876+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
22877+ if (xhw_lockup & PSB_LOCKUP_TA) {
22878+ goto out_unlock;
22879+ } else {
22880+ scheduler->total_ta_jiffies +=
22881+ jiffies - scheduler->ta_end_jiffies +
22882+ PSB_TA_TIMEOUT;
22883+ if (scheduler->total_ta_jiffies >
22884+ PSB_ALLOWED_TA_RUNTIME)
22885+ goto out_unlock;
22886+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
22887+ }
22888+ }
22889+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL &&
22890+ time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
22891+ if (xhw_lockup & PSB_LOCKUP_RASTER) {
22892+ goto out_unlock;
22893+ } else {
22894+ scheduler->total_raster_jiffies +=
22895+ jiffies - scheduler->raster_end_jiffies +
22896+ PSB_RASTER_TIMEOUT;
22897+ if (scheduler->total_raster_jiffies >
22898+ PSB_ALLOWED_RASTER_RUNTIME)
22899+ goto out_unlock;
22900+ scheduler->raster_end_jiffies =
22901+ jiffies + PSB_RASTER_TIMEOUT;
22902+ }
22903+ }
22904+
22905+ ret = 0;
22906+
22907+out_unlock:
22908+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22909+ return ret;
22910+}
22911+
22912+/*
22913+ * TA done handler.
22914+ */
22915+
22916+static void psb_ta_done(struct drm_psb_private *dev_priv,
22917+ struct psb_scheduler *scheduler)
22918+{
22919+ struct psb_task *task =
22920+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
22921+ struct psb_scene *scene = task->scene;
22922+
22923+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
22924+
22925+ switch (task->ta_complete_action) {
22926+ case PSB_RASTER_BLOCK:
22927+ scheduler->ta_state = 1;
22928+ scene->flags |=
22929+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
22930+ list_add_tail(&task->head, &scheduler->raster_queue);
22931+ break;
22932+ case PSB_RASTER:
22933+ scene->flags |=
22934+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
22935+ list_add_tail(&task->head, &scheduler->raster_queue);
22936+ break;
22937+ case PSB_RETURN:
22938+ scheduler->ta_state = 0;
22939+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
22940+ list_add_tail(&scene->hw_scene->head,
22941+ &scheduler->hw_scenes);
22942+
22943+ break;
22944+ }
22945+
22946+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
22947+
22948+#ifdef FIX_TG_16
22949+ psb_2d_atomic_unlock(dev_priv);
22950+#endif
22951+
22952+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
22953+ psb_report_fence(dev_priv, scheduler, task->engine,
22954+ task->sequence,
22955+ _PSB_FENCE_TA_DONE_SHIFT, 1);
22956+
22957+ psb_schedule_raster(dev_priv, scheduler);
22958+ psb_schedule_ta(dev_priv, scheduler);
22959+ psb_set_idle(scheduler);
22960+
22961+ if (task->ta_complete_action != PSB_RETURN)
22962+ return;
22963+
22964+ list_add_tail(&task->head, &scheduler->task_done_queue);
22965+ schedule_delayed_work(&scheduler->wq, 1);
22966+}
22967+
22968+/*
22969+ * Rasterizer done handler.
22970+ */
22971+
22972+static void psb_raster_done(struct drm_psb_private *dev_priv,
22973+ struct psb_scheduler *scheduler)
22974+{
22975+ struct psb_task *task =
22976+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
22977+ struct psb_scene *scene = task->scene;
22978+ uint32_t complete_action = task->raster_complete_action;
22979+
22980+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
22981+
22982+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
22983+
22984+ if (complete_action != PSB_RASTER)
22985+ psb_schedule_raster(dev_priv, scheduler);
22986+
22987+ if (scene) {
22988+ if (task->feedback.page) {
22989+ if (unlikely(scheduler->feedback_task)) {
22990+ /*
22991+ * This should never happen, since the previous
22992+ * feedback query will return before the next
22993+ * raster task is fired.
22994+ */
22995+ DRM_ERROR("Feedback task busy.\n");
22996+ }
22997+ scheduler->feedback_task = task;
22998+ psb_xhw_vistest(dev_priv, &task->buf);
22999+ }
23000+ switch (complete_action) {
23001+ case PSB_RETURN:
23002+ scene->flags &=
23003+ ~(PSB_SCENE_FLAG_DIRTY |
23004+ PSB_SCENE_FLAG_COMPLETE);
23005+ list_add_tail(&scene->hw_scene->head,
23006+ &scheduler->hw_scenes);
23007+ psb_report_fence(dev_priv, scheduler, task->engine,
23008+ task->sequence,
23009+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
23010+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
23011+ scheduler->ta_state = 0;
23012+
23013+ break;
23014+ case PSB_RASTER:
23015+ list_add(&task->head, &scheduler->raster_queue);
23016+ task->raster_complete_action = PSB_RETURN;
23017+ psb_schedule_raster(dev_priv, scheduler);
23018+ break;
23019+ case PSB_TA:
23020+ list_add(&task->head, &scheduler->ta_queue);
23021+ scheduler->ta_state = 0;
23022+ task->raster_complete_action = PSB_RETURN;
23023+ task->ta_complete_action = PSB_RASTER;
23024+ break;
23025+
23026+ }
23027+ }
23028+ psb_schedule_ta(dev_priv, scheduler);
23029+ psb_set_idle(scheduler);
23030+
23031+ if (complete_action == PSB_RETURN) {
23032+ if (task->scene == NULL) {
23033+ psb_report_fence(dev_priv, scheduler, task->engine,
23034+ task->sequence,
23035+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
23036+ }
23037+ if (!task->feedback.page) {
23038+ list_add_tail(&task->head,
23039+ &scheduler->task_done_queue);
23040+ schedule_delayed_work(&scheduler->wq, 1);
23041+ }
23042+ }
23043+}
23044+
23045+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
23046+{
23047+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23048+ unsigned long irq_flags;
23049+
23050+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23051+ scheduler->idle_count++;
23052+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23053+}
23054+
23055+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
23056+{
23057+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23058+ unsigned long irq_flags;
23059+
23060+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23061+ if (--scheduler->idle_count == 0) {
23062+ psb_schedule_ta(dev_priv, scheduler);
23063+ psb_schedule_raster(dev_priv, scheduler);
23064+ }
23065+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23066+}
23067+
23068+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
23069+{
23070+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23071+ unsigned long irq_flags;
23072+ int ret;
23073+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23074+ ret = scheduler->idle_count != 0 && scheduler->idle;
23075+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23076+ return ret;
23077+}
23078+
23079+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
23080+{
23081+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23082+ unsigned long irq_flags;
23083+ int ret;
23084+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23085+ ret = (scheduler->idle &&
23086+ list_empty(&scheduler->raster_queue) &&
23087+ list_empty(&scheduler->ta_queue) &&
23088+ list_empty(&scheduler->hp_raster_queue));
23089+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23090+ return ret;
23091+}
23092+
23093+static void psb_ta_oom(struct drm_psb_private *dev_priv,
23094+ struct psb_scheduler *scheduler)
23095+{
23096+
23097+ struct psb_task *task =
23098+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23099+ if (!task)
23100+ return;
23101+
23102+ if (task->aborting)
23103+ return;
23104+ task->aborting = 1;
23105+
23106+ DRM_INFO("Info: TA out of parameter memory.\n");
23107+
23108+ (void) psb_xhw_ta_oom(dev_priv, &task->buf,
23109+ task->scene->hw_cookie);
23110+}
23111+
23112+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
23113+ struct psb_scheduler *scheduler)
23114+{
23115+
23116+ struct psb_task *task =
23117+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23118+ uint32_t flags;
23119+ if (!task)
23120+ return;
23121+
23122+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
23123+ task->scene->hw_cookie,
23124+ &task->ta_complete_action,
23125+ &task->raster_complete_action, &flags);
23126+ task->flags |= flags;
23127+ task->aborting = 0;
23128+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
23129+}
23130+
23131+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
23132+ struct psb_scheduler *scheduler)
23133+{
23134+ DRM_ERROR("TA hw scene freed.\n");
23135+}
23136+
23137+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
23138+ struct psb_scheduler *scheduler)
23139+{
23140+ struct psb_task *task = scheduler->feedback_task;
23141+ uint8_t *feedback_map;
23142+ uint32_t add;
23143+ uint32_t cur;
23144+ struct drm_psb_vistest *vistest;
23145+ int i;
23146+
23147+ scheduler->feedback_task = NULL;
23148+ if (!task) {
23149+ DRM_ERROR("No Poulsbo feedback task.\n");
23150+ return;
23151+ }
23152+ if (!task->feedback.page) {
23153+ DRM_ERROR("No Poulsbo feedback page.\n");
23154+ goto out;
23155+ }
23156+
23157+ if (in_irq())
23158+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
23159+ else
23160+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
23161+
23162+ /*
23163+ * Loop over all requested vistest components here.
23164+ * Only one (vistest) currently.
23165+ */
23166+
23167+ vistest = (struct drm_psb_vistest *)
23168+ (feedback_map + task->feedback.offset);
23169+
23170+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
23171+ add = task->buf.arg.arg.feedback[i];
23172+ cur = vistest->vt[i];
23173+
23174+ /*
23175+ * Vistest saturates.
23176+ */
23177+
23178+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
23179+ }
23180+ if (in_irq())
23181+ kunmap_atomic(feedback_map, KM_IRQ0);
23182+ else
23183+ kunmap_atomic(feedback_map, KM_USER0);
23184+out:
23185+ psb_report_fence(dev_priv, scheduler, task->engine, task->sequence,
23186+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
23187+
23188+ if (list_empty(&task->head)) {
23189+ list_add_tail(&task->head, &scheduler->task_done_queue);
23190+ schedule_delayed_work(&scheduler->wq, 1);
23191+ } else
23192+ psb_schedule_ta(dev_priv, scheduler);
23193+}
23194+
23195+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
23196+ struct psb_scheduler *scheduler)
23197+{
23198+ struct psb_task *task =
23199+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23200+
23201+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
23202+
23203+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
23204+}
23205+
23206+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
23207+ struct psb_scheduler *scheduler)
23208+{
23209+ struct psb_task *task =
23210+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
23211+ uint32_t reply_flags;
23212+
23213+ if (!task) {
23214+ DRM_ERROR("Null task.\n");
23215+ return;
23216+ }
23217+
23218+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
23219+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
23220+
23221+ reply_flags = PSB_RF_FIRE_RASTER;
23222+ if (task->raster_complete_action == PSB_RASTER)
23223+ reply_flags |= PSB_RF_DEALLOC;
23224+
23225+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
23226+}
23227+
23228+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
23229+ struct psb_scheduler *scheduler)
23230+{
23231+ uint32_t type;
23232+ int ret;
23233+ unsigned long irq_flags;
23234+
23235+ /*
23236+ * Xhw cannot write directly to the comm page, so
23237+ * do it here. Firmware would have written directly.
23238+ */
23239+
23240+ ret = psb_xhw_handler(dev_priv);
23241+ if (unlikely(ret))
23242+ return ret;
23243+
23244+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
23245+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
23246+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
23247+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
23248+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
23249+ DRM_ERROR("Lost Poulsbo hardware event.\n");
23250+ }
23251+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
23252+
23253+ if (type == 0)
23254+ return 0;
23255+
23256+ switch (type) {
23257+ case PSB_UIRQ_VISTEST:
23258+ psb_vistest_reply(dev_priv, scheduler);
23259+ break;
23260+ case PSB_UIRQ_OOM_REPLY:
23261+ psb_ta_oom_reply(dev_priv, scheduler);
23262+ break;
23263+ case PSB_UIRQ_FIRE_TA_REPLY:
23264+ psb_ta_fire_reply(dev_priv, scheduler);
23265+ break;
23266+ case PSB_UIRQ_FIRE_RASTER_REPLY:
23267+ psb_raster_fire_reply(dev_priv, scheduler);
23268+ break;
23269+ default:
23270+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
23271+ }
23272+ return 0;
23273+}
23274+
23275+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
23276+{
23277+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23278+ unsigned long irq_flags;
23279+ int ret;
23280+
23281+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23282+ ret = psb_user_interrupt(dev_priv, scheduler);
23283+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23284+ return ret;
23285+}
23286+
23287+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
23288+ struct psb_scheduler *scheduler,
23289+ uint32_t reply_flag)
23290+{
23291+ struct psb_task *task =
23292+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23293+ uint32_t flags;
23294+ uint32_t mask;
23295+
23296+ task->reply_flags |= reply_flag;
23297+ flags = task->reply_flags;
23298+ mask = PSB_RF_FIRE_TA;
23299+
23300+ if (!(flags & mask))
23301+ return;
23302+
23303+ mask = PSB_RF_TA_DONE;
23304+ if ((flags & mask) == mask) {
23305+ task->reply_flags &= ~mask;
23306+ psb_ta_done(dev_priv, scheduler);
23307+ }
23308+
23309+ mask = PSB_RF_OOM;
23310+ if ((flags & mask) == mask) {
23311+ task->reply_flags &= ~mask;
23312+ psb_ta_oom(dev_priv, scheduler);
23313+ }
23314+
23315+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
23316+ if ((flags & mask) == mask) {
23317+ task->reply_flags &= ~mask;
23318+ psb_ta_done(dev_priv, scheduler);
23319+ }
23320+}
23321+
23322+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
23323+ struct psb_scheduler *scheduler,
23324+ uint32_t reply_flag)
23325+{
23326+ struct psb_task *task =
23327+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
23328+ uint32_t flags;
23329+ uint32_t mask;
23330+
23331+ task->reply_flags |= reply_flag;
23332+ flags = task->reply_flags;
23333+ mask = PSB_RF_FIRE_RASTER;
23334+
23335+ if (!(flags & mask))
23336+ return;
23337+
23338+ /*
23339+ * For rasterizer-only tasks, don't report fence done here,
23340+ * as this is time consuming and the rasterizer wants a new
23341+ * task immediately. For other tasks, the hardware is probably
23342+ * still busy deallocating TA memory, so we can report
23343+ * fence done in parallel.
23344+ */
23345+
23346+ if (task->raster_complete_action == PSB_RETURN &&
23347+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
23348+ psb_report_fence(dev_priv, scheduler, task->engine,
23349+ task->sequence,
23350+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
23351+ }
23352+
23353+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
23354+ if ((flags & mask) == mask) {
23355+ task->reply_flags &= ~mask;
23356+ psb_raster_done(dev_priv, scheduler);
23357+ }
23358+}
23359+
23360+void psb_scheduler_handler(struct drm_psb_private *dev_priv,
23361+ uint32_t status)
23362+{
23363+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23364+
23365+ spin_lock(&scheduler->lock);
23366+
23367+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
23368+ psb_dispatch_raster(dev_priv, scheduler,
23369+ PSB_RF_RASTER_DONE);
23370+ }
23371+ if (status & _PSB_CE_DPM_3D_MEM_FREE)
23372+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
23373+
23374+ if (status & _PSB_CE_TA_FINISHED)
23375+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
23376+
23377+ if (status & _PSB_CE_TA_TERMINATE)
23378+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
23379+
23380+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
23381+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
23382+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
23383+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
23384+ }
23385+ if (status & _PSB_CE_DPM_TA_MEM_FREE)
23386+ psb_ta_hw_scene_freed(dev_priv, scheduler);
23387+
23388+ if (status & _PSB_CE_SW_EVENT)
23389+ psb_user_interrupt(dev_priv, scheduler);
23390+
23391+ spin_unlock(&scheduler->lock);
23392+}
23393+
23394+static void psb_free_task_wq(struct work_struct *work)
23395+{
23396+ struct psb_scheduler *scheduler =
23397+ container_of(work, struct psb_scheduler, wq.work);
23398+
23399+ struct list_head *list, *next;
23400+ unsigned long irq_flags;
23401+ struct psb_task *task;
23402+
23403+ if (!mutex_trylock(&scheduler->task_wq_mutex))
23404+ return;
23405+
23406+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23407+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
23408+ task = list_entry(list, struct psb_task, head);
23409+ list_del_init(list);
23410+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23411+
23412+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
23413+ "Feedback bo 0x%08lx, done %d\n",
23414+ task->sequence,
23415+ (unsigned long) task->scene,
23416+ (unsigned long) task->feedback.bo,
23417+ atomic_read(&task->buf.done));
23418+
23419+ if (task->scene) {
23420+ PSB_DEBUG_RENDER("Unref scene %d\n",
23421+ task->sequence);
23422+ psb_scene_unref(&task->scene);
23423+ if (task->feedback.bo) {
23424+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
23425+ task->sequence);
23426+ ttm_bo_unref(&task->feedback.bo);
23427+ }
23428+ }
23429+
23430+ if (atomic_read(&task->buf.done)) {
23431+ PSB_DEBUG_RENDER("Deleting task %d\n",
23432+ task->sequence);
23433+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
23434+ task = NULL;
23435+ }
23436+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23437+ if (task != NULL)
23438+ list_add(list, &scheduler->task_done_queue);
23439+ }
23440+ if (!list_empty(&scheduler->task_done_queue)) {
23441+ PSB_DEBUG_RENDER("Rescheduling wq\n");
23442+ schedule_delayed_work(&scheduler->wq, 1);
23443+ }
23444+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23445+
23446+ if (list_empty(&scheduler->task_done_queue) &&
23447+ drm_psb_ospm && IS_MRST(scheduler->dev)) {
23448+ psb_try_power_down_sgx(scheduler->dev);
23449+ }
23450+ mutex_unlock(&scheduler->task_wq_mutex);
23451+}
23452+
23453+/*
23454+ * Check if any of the tasks in the queues is using a scene.
23455+ * In that case we know the TA memory buffer objects are
23456+ * fenced and will not be evicted until that fence is signaled.
23457+ */
23458+
23459+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
23460+{
23461+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23462+ unsigned long irq_flags;
23463+ struct psb_task *task;
23464+ struct psb_task *next_task;
23465+
23466+ dev_priv->force_ta_mem_load = 1;
23467+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23468+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue,
23469+ head) {
23470+ if (task->scene) {
23471+ dev_priv->force_ta_mem_load = 0;
23472+ break;
23473+ }
23474+ }
23475+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
23476+ head) {
23477+ if (task->scene) {
23478+ dev_priv->force_ta_mem_load = 0;
23479+ break;
23480+ }
23481+ }
23482+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23483+}
23484+
23485+void psb_scheduler_reset(struct drm_psb_private *dev_priv,
23486+ int error_condition)
23487+{
23488+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23489+ unsigned long wait_jiffies;
23490+ unsigned long cur_jiffies;
23491+ struct psb_task *task;
23492+ struct psb_task *next_task;
23493+ unsigned long irq_flags;
23494+
23495+ psb_scheduler_pause(dev_priv);
23496+ if (!psb_scheduler_idle(dev_priv)) {
23497+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23498+
23499+ cur_jiffies = jiffies;
23500+ wait_jiffies = cur_jiffies;
23501+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
23502+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
23503+ wait_jiffies = scheduler->ta_end_jiffies;
23504+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
23505+ time_after_eq(scheduler->raster_end_jiffies,
23506+ wait_jiffies))
23507+ wait_jiffies = scheduler->raster_end_jiffies;
23508+
23509+ wait_jiffies -= cur_jiffies;
23510+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23511+
23512+ (void) wait_event_timeout(scheduler->idle_queue,
23513+ psb_scheduler_idle(dev_priv),
23514+ wait_jiffies);
23515+ }
23516+
23517+ if (!psb_scheduler_idle(dev_priv)) {
23518+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23519+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
23520+ if (task) {
23521+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
23522+ if (task->engine == PSB_ENGINE_HPRAST) {
23523+ psb_fence_error(scheduler->dev,
23524+ PSB_ENGINE_HPRAST,
23525+ task->sequence,
23526+ _PSB_FENCE_TYPE_RASTER_DONE,
23527+ error_condition);
23528+
23529+ list_del(&task->head);
23530+ psb_xhw_clean_buf(dev_priv, &task->buf);
23531+ list_add_tail(&task->head,
23532+ &scheduler->task_done_queue);
23533+ } else {
23534+ list_add(&task->head,
23535+ &scheduler->raster_queue);
23536+ }
23537+ }
23538+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
23539+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
23540+ if (task) {
23541+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
23542+ list_add_tail(&task->head,
23543+ &scheduler->raster_queue);
23544+#ifdef FIX_TG_16
23545+ psb_2d_atomic_unlock(dev_priv);
23546+#endif
23547+ }
23548+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
23549+ scheduler->ta_state = 0;
23550+
23551+#ifdef FIX_TG_16
23552+ atomic_set(&dev_priv->ta_wait_2d, 0);
23553+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
23554+ wake_up(&dev_priv->queue_2d);
23555+#endif
23556+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23557+ }
23558+
23559+ /*
23560+ * Empty raster queue.
23561+ */
23562+
23563+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23564+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
23565+ head) {
23566+ struct psb_scene *scene = task->scene;
23567+
23568+ DRM_INFO("Signaling fence sequence %u\n",
23569+ task->sequence);
23570+
23571+ psb_fence_error(scheduler->dev,
23572+ task->engine,
23573+ task->sequence,
23574+ _PSB_FENCE_TYPE_TA_DONE |
23575+ _PSB_FENCE_TYPE_RASTER_DONE |
23576+ _PSB_FENCE_TYPE_SCENE_DONE |
23577+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
23578+ if (scene) {
23579+ scene->flags = 0;
23580+ if (scene->hw_scene) {
23581+ list_add_tail(&scene->hw_scene->head,
23582+ &scheduler->hw_scenes);
23583+ scene->hw_scene = NULL;
23584+ }
23585+ }
23586+
23587+ psb_xhw_clean_buf(dev_priv, &task->buf);
23588+ list_del(&task->head);
23589+ list_add_tail(&task->head, &scheduler->task_done_queue);
23590+ }
23591+
23592+ schedule_delayed_work(&scheduler->wq, 1);
23593+ scheduler->idle = 1;
23594+ wake_up(&scheduler->idle_queue);
23595+
23596+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23597+ psb_scheduler_restart(dev_priv);
23598+
23599+}
23600+
23601+int psb_scheduler_init(struct drm_device *dev,
23602+ struct psb_scheduler *scheduler)
23603+{
23604+ struct psb_hw_scene *hw_scene;
23605+ int i;
23606+
23607+ memset(scheduler, 0, sizeof(*scheduler));
23608+ scheduler->dev = dev;
23609+ mutex_init(&scheduler->task_wq_mutex);
23610+ spin_lock_init(&scheduler->lock);
23611+ scheduler->idle = 1;
23612+
23613+ INIT_LIST_HEAD(&scheduler->ta_queue);
23614+ INIT_LIST_HEAD(&scheduler->raster_queue);
23615+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
23616+ INIT_LIST_HEAD(&scheduler->hw_scenes);
23617+ INIT_LIST_HEAD(&scheduler->task_done_queue);
23618+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
23619+ init_waitqueue_head(&scheduler->idle_queue);
23620+
23621+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
23622+ hw_scene = &scheduler->hs[i];
23623+ hw_scene->context_number = i;
23624+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
23625+ }
23626+
23627+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i)
23628+ scheduler->seq[i].reported = 0;
23629+ return 0;
23630+}
23631+
23632+/*
23633+ * Scene references maintained by the scheduler are not refcounted.
23634+ * Remove all references to a particular scene here.
23635+ */
23636+
23637+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
23638+{
23639+ struct drm_psb_private *dev_priv =
23640+ (struct drm_psb_private *) scene->dev->dev_private;
23641+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23642+ struct psb_hw_scene *hw_scene;
23643+ unsigned long irq_flags;
23644+ unsigned int i;
23645+
23646+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23647+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
23648+ hw_scene = &scheduler->hs[i];
23649+ if (hw_scene->last_scene == scene) {
23650+ BUG_ON(list_empty(&hw_scene->head));
23651+ hw_scene->last_scene = NULL;
23652+ }
23653+ }
23654+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23655+}
23656+
23657+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
23658+{
23659+ flush_scheduled_work();
23660+}
23661+
23662+static int psb_setup_task(struct drm_device *dev,
23663+ struct drm_psb_cmdbuf_arg *arg,
23664+ struct ttm_buffer_object *raster_cmd_buffer,
23665+ struct ttm_buffer_object *ta_cmd_buffer,
23666+ struct ttm_buffer_object *oom_cmd_buffer,
23667+ struct psb_scene *scene,
23668+ enum psb_task_type task_type,
23669+ uint32_t engine,
23670+ uint32_t flags, struct psb_task **task_p)
23671+{
23672+ struct psb_task *task;
23673+ int ret;
23674+
23675+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
23676+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
23677+ return -EINVAL;
23678+ }
23679+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
23680+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
23681+ return -EINVAL;
23682+ }
23683+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
23684+ DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size);
23685+ return -EINVAL;
23686+ }
23687+
23688+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
23689+ if (!task)
23690+ return -ENOMEM;
23691+
23692+ atomic_set(&task->buf.done, 1);
23693+ task->engine = engine;
23694+ INIT_LIST_HEAD(&task->head);
23695+ INIT_LIST_HEAD(&task->buf.head);
23696+ if (ta_cmd_buffer && arg->ta_size != 0) {
23697+ task->ta_cmd_size = arg->ta_size;
23698+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
23699+ arg->ta_offset,
23700+ arg->ta_size,
23701+ PSB_ENGINE_TA, task->ta_cmds);
23702+ if (ret)
23703+ goto out_err;
23704+ }
23705+ if (raster_cmd_buffer) {
23706+ task->raster_cmd_size = arg->cmdbuf_size;
23707+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
23708+ arg->cmdbuf_offset,
23709+ arg->cmdbuf_size,
23710+ PSB_ENGINE_TA,
23711+ task->raster_cmds);
23712+ if (ret)
23713+ goto out_err;
23714+ }
23715+ if (oom_cmd_buffer && arg->oom_size != 0) {
23716+ task->oom_cmd_size = arg->oom_size;
23717+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
23718+ arg->oom_offset,
23719+ arg->oom_size,
23720+ PSB_ENGINE_TA,
23721+ task->oom_cmds);
23722+ if (ret)
23723+ goto out_err;
23724+ }
23725+ task->task_type = task_type;
23726+ task->flags = flags;
23727+ if (scene)
23728+ task->scene = psb_scene_ref(scene);
23729+
23730+ *task_p = task;
23731+ return 0;
23732+out_err:
23733+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
23734+ *task_p = NULL;
23735+ return ret;
23736+}
23737+
23738+int psb_cmdbuf_ta(struct drm_file *priv,
23739+ struct psb_context *context,
23740+ struct drm_psb_cmdbuf_arg *arg,
23741+ struct ttm_buffer_object *cmd_buffer,
23742+ struct ttm_buffer_object *ta_buffer,
23743+ struct ttm_buffer_object *oom_buffer,
23744+ struct psb_scene *scene,
23745+ struct psb_feedback_info *feedback,
23746+ struct psb_ttm_fence_rep *fence_arg)
23747+{
23748+ struct drm_device *dev = priv->minor->dev;
23749+ struct drm_psb_private *dev_priv = dev->dev_private;
23750+ struct ttm_fence_object *fence = NULL;
23751+ struct psb_task *task = NULL;
23752+ int ret;
23753+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23754+ uint32_t sequence;
23755+
23756+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
23757+
23758+ ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer,
23759+ oom_buffer, scene,
23760+ psb_ta_task, PSB_ENGINE_TA,
23761+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
23762+
23763+ if (ret)
23764+ goto out_err;
23765+
23766+ task->feedback = *feedback;
23767+ mutex_lock(&dev_priv->reset_mutex);
23768+
23769+ /*
23770+ * Hand the task over to the scheduler.
23771+ */
23772+
23773+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
23774+
23775+ task->ta_complete_action = PSB_RASTER;
23776+ task->raster_complete_action = PSB_RETURN;
23777+ sequence = task->sequence;
23778+
23779+ spin_lock_irq(&scheduler->lock);
23780+
23781+ list_add_tail(&task->head, &scheduler->ta_queue);
23782+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
23783+
23784+ psb_schedule_ta(dev_priv, scheduler);
23785+
23786+ /**
23787+ * From this point we may no longer dereference task,
23788+ * as the object it points to may be freed by another thread.
23789+ */
23790+
23791+ task = NULL;
23792+ spin_unlock_irq(&scheduler->lock);
23793+ mutex_unlock(&dev_priv->reset_mutex);
23794+
23795+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
23796+ arg->fence_flags,
23797+ &context->validate_list, fence_arg, &fence);
23798+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
23799+
23800+ if (fence) {
23801+ spin_lock_irq(&scheduler->lock);
23802+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA,
23803+ sequence, _PSB_FENCE_EXE_SHIFT, 1);
23804+ spin_unlock_irq(&scheduler->lock);
23805+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
23806+ }
23807+
23808+out_err:
23809+ if (ret && ret != -ERESTART)
23810+ DRM_ERROR("TA task queue job failed.\n");
23811+
23812+ if (fence) {
23813+#ifdef PSB_WAIT_FOR_TA_COMPLETION
23814+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
23815+ _PSB_FENCE_TYPE_TA_DONE);
23816+#ifdef PSB_BE_PARANOID
23817+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
23818+ _PSB_FENCE_TYPE_SCENE_DONE);
23819+#endif
23820+#endif
23821+ ttm_fence_object_unref(&fence);
23822+ }
23823+ return ret;
23824+}
23825+
23826+int psb_cmdbuf_raster(struct drm_file *priv,
23827+ struct psb_context *context,
23828+ struct drm_psb_cmdbuf_arg *arg,
23829+ struct ttm_buffer_object *cmd_buffer,
23830+ struct psb_ttm_fence_rep *fence_arg)
23831+{
23832+ struct drm_device *dev = priv->minor->dev;
23833+ struct drm_psb_private *dev_priv = dev->dev_private;
23834+ struct ttm_fence_object *fence = NULL;
23835+ struct psb_task *task = NULL;
23836+ int ret;
23837+ uint32_t sequence;
23838+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23839+
23840+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
23841+
23842+ ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL,
23843+ NULL, psb_raster_task,
23844+ PSB_ENGINE_TA, 0, &task);
23845+
23846+ if (ret)
23847+ goto out_err;
23848+
23849+ /*
23850+ * Hand the task over to the scheduler.
23851+ */
23852+
23853+ mutex_lock(&dev_priv->reset_mutex);
23854+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
23855+ task->ta_complete_action = PSB_RASTER;
23856+ task->raster_complete_action = PSB_RETURN;
23857+ sequence = task->sequence;
23858+
23859+ spin_lock_irq(&scheduler->lock);
23860+ list_add_tail(&task->head, &scheduler->ta_queue);
23861+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
23862+ psb_schedule_ta(dev_priv, scheduler);
23863+
23864+ /**
23865+ * From this point we may no longer dereference task,
23866+ * as the object it points to may be freed by another thread.
23867+ */
23868+
23869+ task = NULL;
23870+ spin_unlock_irq(&scheduler->lock);
23871+ mutex_unlock(&dev_priv->reset_mutex);
23872+
23873+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
23874+ arg->fence_flags,
23875+ &context->validate_list, fence_arg, &fence);
23876+
23877+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
23878+ if (fence) {
23879+ spin_lock_irq(&scheduler->lock);
23880+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence,
23881+ _PSB_FENCE_EXE_SHIFT, 1);
23882+ spin_unlock_irq(&scheduler->lock);
23883+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
23884+ }
23885+out_err:
23886+ if (ret && ret != -ERESTART)
23887+ DRM_ERROR("Raster task queue job failed.\n");
23888+
23889+ if (fence) {
23890+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
23891+ ttm_fence_object_wait(fence, 1, 1, fence->type);
23892+#endif
23893+ ttm_fence_object_unref(&fence);
23894+ }
23895+
23896+ return ret;
23897+}
23898+
23899+#ifdef FIX_TG_16
23900+
23901+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
23902+{
23903+ if (psb_2d_trylock(dev_priv)) {
23904+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
23905+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
23906+ _PSB_C2B_STATUS_BUSY))) {
23907+ return 0;
23908+ }
23909+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
23910+ psb_2D_irq_on(dev_priv);
23911+
23912+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
23913+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
23914+ (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
23915+
23916+ psb_2d_atomic_unlock(dev_priv);
23917+ }
23918+
23919+ atomic_set(&dev_priv->ta_wait_2d, 1);
23920+ return -EBUSY;
23921+}
23922+
23923+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
23924+{
23925+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23926+
23927+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
23928+ psb_schedule_ta(dev_priv, scheduler);
23929+ if (atomic_read(&dev_priv->waiters_2d) != 0)
23930+ wake_up(&dev_priv->queue_2d);
23931+ }
23932+}
23933+
23934+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
23935+{
23936+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23937+ unsigned long irq_flags;
23938+
23939+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23940+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
23941+ atomic_set(&dev_priv->ta_wait_2d, 0);
23942+ psb_2D_irq_off(dev_priv);
23943+ psb_schedule_ta(dev_priv, scheduler);
23944+ if (atomic_read(&dev_priv->waiters_2d) != 0)
23945+ wake_up(&dev_priv->queue_2d);
23946+ }
23947+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23948+}
23949+
23950+/*
23951+ * 2D locking functions. Can't use a mutex since the trylock() and
23952+ * unlock() methods need to be accessible from interrupt context.
23953+ */
23954+
23955+int psb_2d_trylock(struct drm_psb_private *dev_priv)
23956+{
23957+ return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0;
23958+}
23959+
23960+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
23961+{
23962+ atomic_set(&dev_priv->lock_2d, 0);
23963+ if (atomic_read(&dev_priv->waiters_2d) != 0)
23964+ wake_up(&dev_priv->queue_2d);
23965+}
23966+
23967+void psb_2d_unlock(struct drm_psb_private *dev_priv)
23968+{
23969+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23970+ unsigned long irq_flags;
23971+
23972+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23973+ psb_2d_atomic_unlock(dev_priv);
23974+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
23975+ psb_atomic_resume_ta_2d_idle(dev_priv);
23976+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23977+}
23978+
23979+void psb_2d_lock(struct drm_psb_private *dev_priv)
23980+{
23981+ atomic_inc(&dev_priv->waiters_2d);
23982+ wait_event(dev_priv->queue_2d,
23983+ atomic_read(&dev_priv->ta_wait_2d) == 0);
23984+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
23985+ atomic_dec(&dev_priv->waiters_2d);
23986+}
23987+
23988+#endif
23989diff -uNr a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h
23990--- a/drivers/gpu/drm/psb/psb_schedule.h 1969-12-31 16:00:00.000000000 -0800
23991+++ b/drivers/gpu/drm/psb/psb_schedule.h 2009-04-07 13:28:38.000000000 -0700
23992@@ -0,0 +1,176 @@
23993+/**************************************************************************
23994+ * Copyright (c) 2007, Intel Corporation.
23995+ * All Rights Reserved.
23996+ *
23997+ * This program is free software; you can redistribute it and/or modify it
23998+ * under the terms and conditions of the GNU General Public License,
23999+ * version 2, as published by the Free Software Foundation.
24000+ *
24001+ * This program is distributed in the hope it will be useful, but WITHOUT
24002+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24003+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24004+ * more details.
24005+ *
24006+ * You should have received a copy of the GNU General Public License along with
24007+ * this program; if not, write to the Free Software Foundation, Inc.,
24008+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24009+ *
24010+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24011+ * develop this driver.
24012+ *
24013+ **************************************************************************/
24014+/*
24015+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
24016+ */
24017+
24018+#ifndef _PSB_SCHEDULE_H_
24019+#define _PSB_SCHEDULE_H_
24020+
24021+#include <drm/drmP.h>
24022+
24023+struct psb_context;
24024+
24025+enum psb_task_type {
24026+ psb_ta_midscene_task,
24027+ psb_ta_task,
24028+ psb_raster_task,
24029+ psb_freescene_task
24030+};
24031+
24032+#define PSB_MAX_TA_CMDS 60
24033+#define PSB_MAX_RASTER_CMDS 60
24034+#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6)
24035+
24036+struct psb_xhw_buf {
24037+ struct list_head head;
24038+ int copy_back;
24039+ atomic_t done;
24040+ struct drm_psb_xhw_arg arg;
24041+
24042+};
24043+
24044+struct psb_feedback_info {
24045+ struct ttm_buffer_object *bo;
24046+ struct page *page;
24047+ uint32_t offset;
24048+};
24049+
24050+struct psb_task {
24051+ struct list_head head;
24052+ struct psb_scene *scene;
24053+ struct psb_feedback_info feedback;
24054+ enum psb_task_type task_type;
24055+ uint32_t engine;
24056+ uint32_t sequence;
24057+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
24058+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
24059+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
24060+ uint32_t ta_cmd_size;
24061+ uint32_t raster_cmd_size;
24062+ uint32_t oom_cmd_size;
24063+ uint32_t feedback_offset;
24064+ uint32_t ta_complete_action;
24065+ uint32_t raster_complete_action;
24066+ uint32_t hw_cookie;
24067+ uint32_t flags;
24068+ uint32_t reply_flags;
24069+ uint32_t aborting;
24070+ struct psb_xhw_buf buf;
24071+};
24072+
24073+struct psb_hw_scene {
24074+ struct list_head head;
24075+ uint32_t context_number;
24076+
24077+ /*
24078+ * This pointer does not refcount the last_scene_buffer,
24079+ * so we must make sure it is set to NULL before destroying
24080+ * the corresponding task.
24081+ */
24082+
24083+ struct psb_scene *last_scene;
24084+};
24085+
24086+struct psb_scene;
24087+struct drm_psb_private;
24088+
24089+struct psb_scheduler_seq {
24090+ uint32_t sequence;
24091+ int reported;
24092+};
24093+
24094+struct psb_scheduler {
24095+ struct drm_device *dev;
24096+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
24097+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
24098+ struct mutex task_wq_mutex;
24099+ spinlock_t lock;
24100+ struct list_head hw_scenes;
24101+ struct list_head ta_queue;
24102+ struct list_head raster_queue;
24103+ struct list_head hp_raster_queue;
24104+ struct list_head task_done_queue;
24105+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
24106+ struct psb_task *feedback_task;
24107+ int ta_state;
24108+ struct psb_hw_scene *pending_hw_scene;
24109+ uint32_t pending_hw_scene_seq;
24110+ struct delayed_work wq;
24111+ struct psb_scene_pool *pool;
24112+ uint32_t idle_count;
24113+ int idle;
24114+ wait_queue_head_t idle_queue;
24115+ unsigned long ta_end_jiffies;
24116+ unsigned long total_ta_jiffies;
24117+ unsigned long raster_end_jiffies;
24118+ unsigned long total_raster_jiffies;
24119+};
24120+
24121+#define PSB_RF_FIRE_TA (1 << 0)
24122+#define PSB_RF_OOM (1 << 1)
24123+#define PSB_RF_OOM_REPLY (1 << 2)
24124+#define PSB_RF_TERMINATE (1 << 3)
24125+#define PSB_RF_TA_DONE (1 << 4)
24126+#define PSB_RF_FIRE_RASTER (1 << 5)
24127+#define PSB_RF_RASTER_DONE (1 << 6)
24128+#define PSB_RF_DEALLOC (1 << 7)
24129+
24130+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
24131+ int shareable,
24132+ uint32_t w, uint32_t h);
24133+extern uint32_t psb_scene_handle(struct psb_scene *scene);
24134+extern int psb_scheduler_init(struct drm_device *dev,
24135+ struct psb_scheduler *scheduler);
24136+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
24137+extern int psb_cmdbuf_ta(struct drm_file *priv,
24138+ struct psb_context *context,
24139+ struct drm_psb_cmdbuf_arg *arg,
24140+ struct ttm_buffer_object *cmd_buffer,
24141+ struct ttm_buffer_object *ta_buffer,
24142+ struct ttm_buffer_object *oom_buffer,
24143+ struct psb_scene *scene,
24144+ struct psb_feedback_info *feedback,
24145+ struct psb_ttm_fence_rep *fence_arg);
24146+extern int psb_cmdbuf_raster(struct drm_file *priv,
24147+ struct psb_context *context,
24148+ struct drm_psb_cmdbuf_arg *arg,
24149+ struct ttm_buffer_object *cmd_buffer,
24150+ struct psb_ttm_fence_rep *fence_arg);
24151+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
24152+ uint32_t status);
24153+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
24154+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
24155+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
24156+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
24157+
24158+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
24159+ int *lockup, int *idle);
24160+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
24161+ int error_condition);
24162+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
24163+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
24164+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
24165+extern int psb_extend_timeout(struct drm_psb_private *dev_priv,
24166+ uint32_t xhw_lockup);
24167+
24168+#endif
24169diff -uNr a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c
24170--- a/drivers/gpu/drm/psb/psb_setup.c 1969-12-31 16:00:00.000000000 -0800
24171+++ b/drivers/gpu/drm/psb/psb_setup.c 2009-04-07 13:28:38.000000000 -0700
24172@@ -0,0 +1,18 @@
24173+#include <drm/drmP.h>
24174+#include <drm/drm.h>
24175+#include <drm/drm_crtc.h>
24176+#include <drm/drm_edid.h>
24177+#include "psb_intel_drv.h"
24178+#include "psb_drv.h"
24179+#include "psb_intel_reg.h"
24180+
24181+/* Fixed name */
24182+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
24183+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
24184+
24185+#include "psb_intel_i2c.c"
24186+#include "psb_intel_sdvo.c"
24187+#include "psb_intel_modes.c"
24188+#include "psb_intel_lvds.c"
24189+#include "psb_intel_dsi.c"
24190+#include "psb_intel_display.c"
24191diff -uNr a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c
24192--- a/drivers/gpu/drm/psb/psb_sgx.c 1969-12-31 16:00:00.000000000 -0800
24193+++ b/drivers/gpu/drm/psb/psb_sgx.c 2009-04-07 13:28:38.000000000 -0700
24194@@ -0,0 +1,1869 @@
24195+/**************************************************************************
24196+ * Copyright (c) 2007, Intel Corporation.
24197+ * All Rights Reserved.
24198+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
24199+ * All Rights Reserved.
24200+ *
24201+ * This program is free software; you can redistribute it and/or modify it
24202+ * under the terms and conditions of the GNU General Public License,
24203+ * version 2, as published by the Free Software Foundation.
24204+ *
24205+ * This program is distributed in the hope it will be useful, but WITHOUT
24206+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24207+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24208+ * more details.
24209+ *
24210+ * You should have received a copy of the GNU General Public License along with
24211+ * this program; if not, write to the Free Software Foundation, Inc.,
24212+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24213+ *
24214+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24215+ * develop this driver.
24216+ *
24217+ **************************************************************************/
24218+/*
24219+ */
24220+
24221+#include <drm/drmP.h>
24222+#include "psb_drv.h"
24223+#include "psb_drm.h"
24224+#include "psb_reg.h"
24225+#include "psb_scene.h"
24226+#include "psb_msvdx.h"
24227+#include "lnc_topaz.h"
24228+#include "ttm/ttm_bo_api.h"
24229+#include "ttm/ttm_execbuf_util.h"
24230+#include "ttm/ttm_userobj_api.h"
24231+#include "ttm/ttm_placement_common.h"
24232+#include "psb_sgx.h"
24233+
24234+static inline int psb_same_page(unsigned long offset,
24235+ unsigned long offset2)
24236+{
24237+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
24238+}
24239+
24240+static inline unsigned long psb_offset_end(unsigned long offset,
24241+ unsigned long end)
24242+{
24243+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
24244+ return (end < offset) ? end : offset;
24245+}
24246+
24247+static void psb_idle_engine(struct drm_device *dev, int engine);
24248+
24249+struct psb_dstbuf_cache {
24250+ unsigned int dst;
24251+ struct ttm_buffer_object *dst_buf;
24252+ unsigned long dst_offset;
24253+ uint32_t *dst_page;
24254+ unsigned int dst_page_offset;
24255+ struct ttm_bo_kmap_obj dst_kmap;
24256+ bool dst_is_iomem;
24257+};
24258+
24259+struct psb_validate_buffer {
24260+ struct ttm_validate_buffer base;
24261+ struct psb_validate_req req;
24262+ int ret;
24263+ struct psb_validate_arg __user *user_val_arg;
24264+ uint32_t flags;
24265+ uint32_t offset;
24266+ int po_correct;
24267+};
24268+
24269+
24270+
24271+#define PSB_REG_GRAN_SHIFT 2
24272+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
24273+#define PSB_MAX_REG 0x1000
24274+
24275+static const uint32_t disallowed_ranges[][2] = {
24276+ {0x0000, 0x0200},
24277+ {0x0208, 0x0214},
24278+ {0x021C, 0x0224},
24279+ {0x0230, 0x0234},
24280+ {0x0248, 0x024C},
24281+ {0x0254, 0x0358},
24282+ {0x0428, 0x0428},
24283+ {0x0430, 0x043C},
24284+ {0x0498, 0x04B4},
24285+ {0x04CC, 0x04D8},
24286+ {0x04E0, 0x07FC},
24287+ {0x0804, 0x0A14},
24288+ {0x0A4C, 0x0A58},
24289+ {0x0A68, 0x0A80},
24290+ {0x0AA0, 0x0B1C},
24291+ {0x0B2C, 0x0CAC},
24292+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
24293+};
24294+
24295+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
24296+ (PSB_REG_GRANULARITY *
24297+ (sizeof(uint32_t) << 3))];
24298+
24299+static inline int psb_disallowed(uint32_t reg)
24300+{
24301+ reg >>= PSB_REG_GRAN_SHIFT;
24302+ return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0;
24303+}
24304+
24305+void psb_init_disallowed(void)
24306+{
24307+ int i;
24308+ uint32_t reg, tmp;
24309+ static int initialized;
24310+
24311+ if (initialized)
24312+ return;
24313+
24314+ initialized = 1;
24315+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
24316+
24317+ for (i = 0;
24318+ i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
24319+ ++i) {
24320+ for (reg = disallowed_ranges[i][0];
24321+ reg <= disallowed_ranges[i][1]; reg += 4) {
24322+ tmp = reg >> 2;
24323+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
24324+ }
24325+ }
24326+}
24327+
24328+static int psb_memcpy_check(uint32_t *dst, const uint32_t *src,
24329+ uint32_t size)
24330+{
24331+ size >>= 3;
24332+ while (size--) {
24333+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
24334+ DRM_ERROR("Forbidden SGX register access: "
24335+ "0x%04x.\n", *src);
24336+ return -EPERM;
24337+ }
24338+ *dst++ = *src++;
24339+ *dst++ = *src++;
24340+ }
24341+ return 0;
24342+}
24343+
24344+int psb_2d_wait_available(struct drm_psb_private *dev_priv,
24345+ unsigned size)
24346+{
24347+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
24348+ int ret = 0;
24349+
24350+retry:
24351+ if (avail < size) {
24352+#if 0
24353+ /* We'd ideally
24354+ * like to have an IRQ-driven event here.
24355+ */
24356+
24357+ psb_2D_irq_on(dev_priv);
24358+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
24359+ ((avail =
24360+ PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
24361+ psb_2D_irq_off(dev_priv);
24362+ if (ret == 0)
24363+ return 0;
24364+ if (ret == -EINTR) {
24365+ ret = 0;
24366+ goto retry;
24367+ }
24368+#else
24369+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
24370+ goto retry;
24371+#endif
24372+ }
24373+ return ret;
24374+}
24375+
24376+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
24377+ unsigned size)
24378+{
24379+ int ret = 0;
24380+ int i;
24381+ unsigned submit_size;
24382+
24383+ while (size > 0) {
24384+ submit_size = (size < 0x60) ? size : 0x60;
24385+ size -= submit_size;
24386+ ret = psb_2d_wait_available(dev_priv, submit_size);
24387+ if (ret)
24388+ return ret;
24389+
24390+ submit_size <<= 2;
24391+ mutex_lock(&dev_priv->reset_mutex);
24392+ for (i = 0; i < submit_size; i += 4) {
24393+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
24394+ }
24395+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
24396+ mutex_unlock(&dev_priv->reset_mutex);
24397+ }
24398+ return 0;
24399+}
24400+
24401+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
24402+{
24403+ uint32_t buffer[8];
24404+ uint32_t *bufp = buffer;
24405+ int ret;
24406+
24407+ *bufp++ = PSB_2D_FENCE_BH;
24408+
24409+ *bufp++ = PSB_2D_DST_SURF_BH |
24410+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
24411+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
24412+
24413+ *bufp++ = PSB_2D_BLIT_BH |
24414+ PSB_2D_ROT_NONE |
24415+ PSB_2D_COPYORDER_TL2BR |
24416+ PSB_2D_DSTCK_DISABLE |
24417+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
24418+
24419+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
24420+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
24421+ (0 << PSB_2D_DST_YSTART_SHIFT);
24422+ *bufp++ =
24423+ (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
24424+
24425+ *bufp++ = PSB_2D_FLUSH_BH;
24426+
24427+ psb_2d_lock(dev_priv);
24428+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
24429+ psb_2d_unlock(dev_priv);
24430+
24431+ if (!ret)
24432+ psb_schedule_watchdog(dev_priv);
24433+ return ret;
24434+}
24435+
24436+int psb_emit_2d_copy_blit(struct drm_device *dev,
24437+ uint32_t src_offset,
24438+ uint32_t dst_offset, uint32_t pages,
24439+ int direction)
24440+{
24441+ uint32_t cur_pages;
24442+ struct drm_psb_private *dev_priv = dev->dev_private;
24443+ uint32_t buf[10];
24444+ uint32_t *bufp;
24445+ uint32_t xstart;
24446+ uint32_t ystart;
24447+ uint32_t blit_cmd;
24448+ uint32_t pg_add;
24449+ int ret = 0;
24450+
24451+ if (!dev_priv)
24452+ return 0;
24453+
24454+ if (direction) {
24455+ pg_add = (pages - 1) << PAGE_SHIFT;
24456+ src_offset += pg_add;
24457+ dst_offset += pg_add;
24458+ }
24459+
24460+ blit_cmd = PSB_2D_BLIT_BH |
24461+ PSB_2D_ROT_NONE |
24462+ PSB_2D_DSTCK_DISABLE |
24463+ PSB_2D_SRCCK_DISABLE |
24464+ PSB_2D_USE_PAT |
24465+ PSB_2D_ROP3_SRCCOPY |
24466+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
24467+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
24468+
24469+ psb_2d_lock(dev_priv);
24470+ while (pages > 0) {
24471+ cur_pages = pages;
24472+ if (cur_pages > 2048)
24473+ cur_pages = 2048;
24474+ pages -= cur_pages;
24475+ ystart = (direction) ? cur_pages - 1 : 0;
24476+
24477+ bufp = buf;
24478+ *bufp++ = PSB_2D_FENCE_BH;
24479+
24480+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
24481+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
24482+ *bufp++ = dst_offset;
24483+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
24484+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
24485+ *bufp++ = src_offset;
24486+ *bufp++ =
24487+ PSB_2D_SRC_OFF_BH | (xstart <<
24488+ PSB_2D_SRCOFF_XSTART_SHIFT) |
24489+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
24490+ *bufp++ = blit_cmd;
24491+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
24492+ (ystart << PSB_2D_DST_YSTART_SHIFT);
24493+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
24494+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
24495+
24496+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
24497+ if (ret)
24498+ goto out;
24499+ pg_add =
24500+ (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
24501+ src_offset += pg_add;
24502+ dst_offset += pg_add;
24503+ }
24504+out:
24505+ psb_2d_unlock(dev_priv);
24506+ return ret;
24507+}
24508+
24509+void psb_init_2d(struct drm_psb_private *dev_priv)
24510+{
24511+ spin_lock_init(&dev_priv->sequence_lock);
24512+ psb_reset(dev_priv, 1);
24513+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
24514+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
24515+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
24516+}
24517+
24518+int psb_idle_2d(struct drm_device *dev)
24519+{
24520+ struct drm_psb_private *dev_priv = dev->dev_private;
24521+ unsigned long _end = jiffies + DRM_HZ;
24522+ int busy = 0;
24523+
24524+ /*
24525+ * First idle the 2D engine.
24526+ */
24527+
24528+ if (dev_priv->engine_lockup_2d)
24529+ return -EBUSY;
24530+
24531+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
24532+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) ==
24533+ 0))
24534+ goto out;
24535+
24536+ do {
24537+ busy =
24538+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
24539+ } while (busy && !time_after_eq(jiffies, _end));
24540+
24541+ if (busy)
24542+ busy =
24543+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
24544+ if (busy)
24545+ goto out;
24546+
24547+ do {
24548+ busy =
24549+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
24550+ _PSB_C2B_STATUS_BUSY)
24551+ != 0);
24552+ } while (busy && !time_after_eq(jiffies, _end));
24553+ if (busy)
24554+ busy =
24555+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
24556+ _PSB_C2B_STATUS_BUSY)
24557+ != 0);
24558+
24559+out:
24560+ if (busy)
24561+ dev_priv->engine_lockup_2d = 1;
24562+
24563+ return (busy) ? -EBUSY : 0;
24564+}
24565+
24566+int psb_idle_3d(struct drm_device *dev)
24567+{
24568+ struct drm_psb_private *dev_priv = dev->dev_private;
24569+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
24570+ int ret;
24571+
24572+ ret = wait_event_timeout(scheduler->idle_queue,
24573+ psb_scheduler_finished(dev_priv),
24574+ DRM_HZ * 10);
24575+
24576+ return (ret < 1) ? -EBUSY : 0;
24577+}
24578+
24579+static int psb_check_presumed(struct psb_validate_req *req,
24580+ struct ttm_buffer_object *bo,
24581+ struct psb_validate_arg __user *data,
24582+ int *presumed_ok)
24583+{
24584+ struct psb_validate_req __user *user_req = &(data->d.req);
24585+
24586+ *presumed_ok = 0;
24587+
24588+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
24589+ *presumed_ok = 1;
24590+ return 0;
24591+ }
24592+
24593+ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
24594+ return 0;
24595+
24596+ if (bo->offset == req->presumed_gpu_offset) {
24597+ *presumed_ok = 1;
24598+ return 0;
24599+ }
24600+
24601+ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
24602+ &user_req->presumed_flags);
24603+}
24604+
24605+
24606+static void psb_unreference_buffers(struct psb_context *context)
24607+{
24608+ struct ttm_validate_buffer *entry, *next;
24609+ struct psb_validate_buffer *vbuf;
24610+ struct list_head *list = &context->validate_list;
24611+
24612+ list_for_each_entry_safe(entry, next, list, head) {
24613+ vbuf =
24614+ container_of(entry, struct psb_validate_buffer, base);
24615+ list_del(&entry->head);
24616+ ttm_bo_unref(&entry->bo);
24617+ }
24618+
24619+ list = &context->kern_validate_list;
24620+
24621+ list_for_each_entry_safe(entry, next, list, head) {
24622+ vbuf =
24623+ container_of(entry, struct psb_validate_buffer, base);
24624+ list_del(&entry->head);
24625+ ttm_bo_unref(&entry->bo);
24626+ }
24627+}
24628+
24629+
24630+static int psb_lookup_validate_buffer(struct drm_file *file_priv,
24631+ uint64_t data,
24632+ struct psb_validate_buffer *item)
24633+{
24634+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
24635+
24636+ item->user_val_arg =
24637+ (struct psb_validate_arg __user *) (unsigned long) data;
24638+
24639+ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
24640+ sizeof(item->req)) != 0)) {
24641+ DRM_ERROR("Lookup copy fault.\n");
24642+ return -EFAULT;
24643+ }
24644+
24645+ item->base.bo =
24646+ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
24647+
24648+ if (unlikely(item->base.bo == NULL)) {
24649+ DRM_ERROR("Bo lookup fault.\n");
24650+ return -EINVAL;
24651+ }
24652+
24653+ return 0;
24654+}
24655+
24656+static int psb_reference_buffers(struct drm_file *file_priv,
24657+ uint64_t data,
24658+ struct psb_context *context)
24659+{
24660+ struct psb_validate_buffer *item;
24661+ int ret;
24662+
24663+ while (likely(data != 0)) {
24664+ if (unlikely(context->used_buffers >=
24665+ PSB_NUM_VALIDATE_BUFFERS)) {
24666+ DRM_ERROR("Too many buffers "
24667+ "on validate list.\n");
24668+ ret = -EINVAL;
24669+ goto out_err0;
24670+ }
24671+
24672+ item = &context->buffers[context->used_buffers];
24673+
24674+ ret = psb_lookup_validate_buffer(file_priv, data, item);
24675+ if (unlikely(ret != 0))
24676+ goto out_err0;
24677+
24678+ item->base.reserved = 0;
24679+ list_add_tail(&item->base.head, &context->validate_list);
24680+ context->used_buffers++;
24681+ data = item->req.next;
24682+ }
24683+ return 0;
24684+
24685+out_err0:
24686+ psb_unreference_buffers(context);
24687+ return ret;
24688+}
24689+
24690+static int
24691+psb_placement_fence_type(struct ttm_buffer_object *bo,
24692+ uint64_t set_val_flags,
24693+ uint64_t clr_val_flags,
24694+ uint32_t new_fence_class,
24695+ uint32_t *new_fence_type)
24696+{
24697+ int ret;
24698+ uint32_t n_fence_type;
24699+ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
24700+ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
24701+ struct ttm_fence_object *old_fence;
24702+ uint32_t old_fence_type;
24703+
24704+ if (unlikely
24705+ (!(set_val_flags &
24706+ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
24707+ DRM_ERROR
24708+ ("GPU access type (read / write) is not indicated.\n");
24709+ return -EINVAL;
24710+ }
24711+
24712+ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
24713+ if (unlikely(ret != 0))
24714+ return ret;
24715+
24716+ switch (new_fence_class) {
24717+ case PSB_ENGINE_TA:
24718+ n_fence_type = _PSB_FENCE_TYPE_EXE |
24719+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
24720+ if (set_val_flags & PSB_BO_FLAG_TA)
24721+ n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
24722+ if (set_val_flags & PSB_BO_FLAG_COMMAND)
24723+ n_fence_type &=
24724+ ~(_PSB_FENCE_TYPE_RASTER_DONE |
24725+ _PSB_FENCE_TYPE_TA_DONE);
24726+ if (set_val_flags & PSB_BO_FLAG_SCENE)
24727+ n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE;
24728+ if (set_val_flags & PSB_BO_FLAG_FEEDBACK)
24729+ n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK;
24730+ break;
24731+ default:
24732+ n_fence_type = _PSB_FENCE_TYPE_EXE;
24733+ }
24734+
24735+ *new_fence_type = n_fence_type;
24736+ old_fence = (struct ttm_fence_object *) bo->sync_obj;
24737+ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
24738+
24739+ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
24740+ ((n_fence_type ^ old_fence_type) &
24741+ old_fence_type))) {
24742+ ret = ttm_bo_wait(bo, 0, 1, 0);
24743+ if (unlikely(ret != 0))
24744+ return ret;
24745+ }
24746+
24747+ bo->proposed_flags = (bo->proposed_flags | set_flags)
24748+ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
24749+
24750+ return 0;
24751+}
24752+
24753+int psb_validate_kernel_buffer(struct psb_context *context,
24754+ struct ttm_buffer_object *bo,
24755+ uint32_t fence_class,
24756+ uint64_t set_flags, uint64_t clr_flags)
24757+{
24758+ struct psb_validate_buffer *item;
24759+ uint32_t cur_fence_type;
24760+ int ret;
24761+
24762+ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
24763+ DRM_ERROR("Out of free validation buffer entries for "
24764+ "kernel buffer validation.\n");
24765+ return -ENOMEM;
24766+ }
24767+
24768+ item = &context->buffers[context->used_buffers];
24769+ item->user_val_arg = NULL;
24770+ item->base.reserved = 0;
24771+
24772+ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
24773+ if (unlikely(ret != 0))
24774+ goto out_unlock;
24775+
24776+ mutex_lock(&bo->mutex);
24777+ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
24778+ &cur_fence_type);
24779+ if (unlikely(ret != 0)) {
24780+ ttm_bo_unreserve(bo);
24781+ goto out_unlock;
24782+ }
24783+
24784+ item->base.bo = ttm_bo_reference(bo);
24785+ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
24786+ item->base.reserved = 1;
24787+
24788+ list_add_tail(&item->base.head, &context->kern_validate_list);
24789+ context->used_buffers++;
24790+
24791+ ret = ttm_buffer_object_validate(bo, 1, 0);
24792+ if (unlikely(ret != 0))
24793+ goto out_unlock;
24794+
24795+ item->offset = bo->offset;
24796+ item->flags = bo->mem.flags;
24797+ context->fence_types |= cur_fence_type;
24798+
24799+out_unlock:
24800+ mutex_unlock(&bo->mutex);
24801+ return ret;
24802+}
24803+
24804+
24805+static int psb_validate_buffer_list(struct drm_file *file_priv,
24806+ uint32_t fence_class,
24807+ struct psb_context *context,
24808+ int *po_correct)
24809+{
24810+ struct psb_validate_buffer *item;
24811+ struct ttm_buffer_object *bo;
24812+ int ret;
24813+ struct psb_validate_req *req;
24814+ uint32_t fence_types = 0;
24815+ uint32_t cur_fence_type;
24816+ struct ttm_validate_buffer *entry;
24817+ struct list_head *list = &context->validate_list;
24818+
24819+ *po_correct = 1;
24820+
24821+ list_for_each_entry(entry, list, head) {
24822+ item =
24823+ container_of(entry, struct psb_validate_buffer, base);
24824+ bo = entry->bo;
24825+ item->ret = 0;
24826+ req = &item->req;
24827+
24828+ mutex_lock(&bo->mutex);
24829+ ret = psb_placement_fence_type(bo,
24830+ req->set_flags,
24831+ req->clear_flags,
24832+ fence_class,
24833+ &cur_fence_type);
24834+ if (unlikely(ret != 0))
24835+ goto out_err;
24836+
24837+ ret = ttm_buffer_object_validate(bo, 1, 0);
24838+
24839+ if (unlikely(ret != 0))
24840+ goto out_err;
24841+
24842+ fence_types |= cur_fence_type;
24843+ entry->new_sync_obj_arg = (void *)
24844+ (unsigned long) cur_fence_type;
24845+
24846+ item->offset = bo->offset;
24847+ item->flags = bo->mem.flags;
24848+ mutex_unlock(&bo->mutex);
24849+
24850+ ret =
24851+ psb_check_presumed(&item->req, bo, item->user_val_arg,
24852+ &item->po_correct);
24853+ if (unlikely(ret != 0))
24854+ goto out_err;
24855+
24856+ if (unlikely(!item->po_correct))
24857+ *po_correct = 0;
24858+
24859+ item++;
24860+ }
24861+
24862+ context->fence_types |= fence_types;
24863+
24864+ return 0;
24865+out_err:
24866+ mutex_unlock(&bo->mutex);
24867+ item->ret = ret;
24868+ return ret;
24869+}
24870+
24871+
24872+int
24873+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs,
24874+ unsigned int cmds)
24875+{
24876+ int i;
24877+
24878+ /*
24879+ * cmds is 32-bit words.
24880+ */
24881+
24882+ cmds >>= 1;
24883+ for (i = 0; i < cmds; ++i) {
24884+ PSB_WSGX32(regs[1], regs[0]);
24885+ regs += 2;
24886+ }
24887+ wmb();
24888+ return 0;
24889+}
24890+
24891+/*
24892+ * Security: Block user-space writing to MMU mapping registers.
24893+ * This is important for security and brings Poulsbo DRM
24894+ * up to par with the other DRM drivers. Using this,
24895+ * user-space should not be able to map arbitrary memory
24896+ * pages to graphics memory, but all user-space processes
24897+ * basically have access to all buffer objects mapped to
24898+ * graphics memory.
24899+ */
24900+
24901+int
24902+psb_submit_copy_cmdbuf(struct drm_device *dev,
24903+ struct ttm_buffer_object *cmd_buffer,
24904+ unsigned long cmd_offset,
24905+ unsigned long cmd_size,
24906+ int engine, uint32_t *copy_buffer)
24907+{
24908+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
24909+ struct drm_psb_private *dev_priv = dev->dev_private;
24910+ unsigned long cmd_page_offset =
24911+ cmd_offset - (cmd_offset & PAGE_MASK);
24912+ unsigned long cmd_next;
24913+ struct ttm_bo_kmap_obj cmd_kmap;
24914+ uint32_t *cmd_page;
24915+ unsigned cmds;
24916+ bool is_iomem;
24917+ int ret = 0;
24918+
24919+ if (cmd_size == 0)
24920+ return 0;
24921+
24922+ if (engine == PSB_ENGINE_2D)
24923+ psb_2d_lock(dev_priv);
24924+
24925+ do {
24926+ cmd_next = psb_offset_end(cmd_offset, cmd_end);
24927+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
24928+ 1, &cmd_kmap);
24929+
24930+ if (ret) {
24931+ if (engine == PSB_ENGINE_2D)
24932+ psb_2d_unlock(dev_priv);
24933+ return ret;
24934+ }
24935+ cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem);
24936+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
24937+ cmds = (cmd_next - cmd_offset) >> 2;
24938+
24939+ switch (engine) {
24940+ case PSB_ENGINE_2D:
24941+ ret =
24942+ psb_2d_submit(dev_priv,
24943+ cmd_page + cmd_page_offset,
24944+ cmds);
24945+ break;
24946+ case PSB_ENGINE_RASTERIZER:
24947+ case PSB_ENGINE_TA:
24948+ case PSB_ENGINE_HPRAST:
24949+ PSB_DEBUG_GENERAL("Reg copy.\n");
24950+ ret = psb_memcpy_check(copy_buffer,
24951+ cmd_page + cmd_page_offset,
24952+ cmds * sizeof(uint32_t));
24953+ copy_buffer += cmds;
24954+ break;
24955+ default:
24956+ ret = -EINVAL;
24957+ }
24958+ ttm_bo_kunmap(&cmd_kmap);
24959+ if (ret)
24960+ break;
24961+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
24962+
24963+ if (engine == PSB_ENGINE_2D)
24964+ psb_2d_unlock(dev_priv);
24965+
24966+ return ret;
24967+}
24968+
24969+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
24970+{
24971+ if (dst_cache->dst_page) {
24972+ ttm_bo_kunmap(&dst_cache->dst_kmap);
24973+ dst_cache->dst_page = NULL;
24974+ }
24975+ dst_cache->dst_buf = NULL;
24976+ dst_cache->dst = ~0;
24977+}
24978+
24979+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
24980+ struct psb_validate_buffer *buffers,
24981+ unsigned int dst,
24982+ unsigned long dst_offset)
24983+{
24984+ int ret;
24985+
24986+ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
24987+
24988+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
24989+ psb_clear_dstbuf_cache(dst_cache);
24990+ dst_cache->dst = dst;
24991+ dst_cache->dst_buf = buffers[dst].base.bo;
24992+ }
24993+
24994+ if (unlikely
24995+ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
24996+ DRM_ERROR("Relocation destination out of bounds.\n");
24997+ return -EINVAL;
24998+ }
24999+
25000+ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
25001+ NULL == dst_cache->dst_page) {
25002+ if (NULL != dst_cache->dst_page) {
25003+ ttm_bo_kunmap(&dst_cache->dst_kmap);
25004+ dst_cache->dst_page = NULL;
25005+ }
25006+
25007+ ret =
25008+ ttm_bo_kmap(dst_cache->dst_buf,
25009+ dst_offset >> PAGE_SHIFT, 1,
25010+ &dst_cache->dst_kmap);
25011+ if (ret) {
25012+ DRM_ERROR("Could not map destination buffer for "
25013+ "relocation.\n");
25014+ return ret;
25015+ }
25016+
25017+ dst_cache->dst_page =
25018+ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
25019+ &dst_cache->dst_is_iomem);
25020+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
25021+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
25022+ }
25023+ return 0;
25024+}
25025+
25026+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
25027+ uint32_t fence_class,
25028+ const struct drm_psb_reloc *reloc,
25029+ struct psb_validate_buffer *buffers,
25030+ int num_buffers,
25031+ struct psb_dstbuf_cache *dst_cache,
25032+ int no_wait, int interruptible)
25033+{
25034+ uint32_t val;
25035+ uint32_t background;
25036+ unsigned int index;
25037+ int ret;
25038+ unsigned int shift;
25039+ unsigned int align_shift;
25040+ struct ttm_buffer_object *reloc_bo;
25041+
25042+
25043+ PSB_DEBUG_GENERAL("Reloc type %d\n"
25044+ "\t where 0x%04x\n"
25045+ "\t buffer 0x%04x\n"
25046+ "\t mask 0x%08x\n"
25047+ "\t shift 0x%08x\n"
25048+ "\t pre_add 0x%08x\n"
25049+ "\t background 0x%08x\n"
25050+ "\t dst_buffer 0x%08x\n"
25051+ "\t arg0 0x%08x\n"
25052+ "\t arg1 0x%08x\n",
25053+ reloc->reloc_op,
25054+ reloc->where,
25055+ reloc->buffer,
25056+ reloc->mask,
25057+ reloc->shift,
25058+ reloc->pre_add,
25059+ reloc->background,
25060+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
25061+
25062+ if (unlikely(reloc->buffer >= num_buffers)) {
25063+ DRM_ERROR("Illegal relocation buffer %d.\n",
25064+ reloc->buffer);
25065+ return -EINVAL;
25066+ }
25067+
25068+ if (buffers[reloc->buffer].po_correct)
25069+ return 0;
25070+
25071+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
25072+ DRM_ERROR
25073+ ("Illegal destination buffer for relocation %d.\n",
25074+ reloc->dst_buffer);
25075+ return -EINVAL;
25076+ }
25077+
25078+ ret =
25079+ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
25080+ reloc->where << 2);
25081+ if (ret)
25082+ return ret;
25083+
25084+ reloc_bo = buffers[reloc->buffer].base.bo;
25085+
25086+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
25087+ DRM_ERROR("Illegal relocation offset add.\n");
25088+ return -EINVAL;
25089+ }
25090+
25091+ switch (reloc->reloc_op) {
25092+ case PSB_RELOC_OP_OFFSET:
25093+ val = reloc_bo->offset + reloc->pre_add;
25094+ break;
25095+ case PSB_RELOC_OP_2D_OFFSET:
25096+ val = reloc_bo->offset + reloc->pre_add -
25097+ dev_priv->mmu_2d_offset;
25098+ if (unlikely(val >= PSB_2D_SIZE)) {
25099+ DRM_ERROR("2D relocation out of bounds\n");
25100+ return -EINVAL;
25101+ }
25102+ break;
25103+ case PSB_RELOC_OP_PDS_OFFSET:
25104+ val =
25105+ reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
25106+ if (unlikely
25107+ (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
25108+ DRM_ERROR("PDS relocation out of bounds\n");
25109+ return -EINVAL;
25110+ }
25111+ break;
25112+ default:
25113+ DRM_ERROR("Unimplemented relocation.\n");
25114+ return -EINVAL;
25115+ }
25116+
25117+ shift =
25118+ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
25119+ align_shift =
25120+ (reloc->
25121+ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
25122+
25123+ val = ((val >> align_shift) << shift);
25124+ index = reloc->where - dst_cache->dst_page_offset;
25125+
25126+ background = reloc->background;
25127+ val = (background & ~reloc->mask) | (val & reloc->mask);
25128+ dst_cache->dst_page[index] = val;
25129+
25130+ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
25131+ reloc->dst_buffer, index,
25132+ dst_cache->dst_page[index]);
25133+
25134+ return 0;
25135+}
25136+
25137+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
25138+ unsigned int num_pages)
25139+{
25140+ int ret = 0;
25141+
25142+ spin_lock(&dev_priv->reloc_lock);
25143+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
25144+ dev_priv->rel_mapped_pages += num_pages;
25145+ ret = 1;
25146+ }
25147+ spin_unlock(&dev_priv->reloc_lock);
25148+ return ret;
25149+}
25150+
25151+static int psb_fixup_relocs(struct drm_file *file_priv,
25152+ uint32_t fence_class,
25153+ unsigned int num_relocs,
25154+ unsigned int reloc_offset,
25155+ uint32_t reloc_handle,
25156+ struct psb_context *context,
25157+ int no_wait, int interruptible)
25158+{
25159+ struct drm_device *dev = file_priv->minor->dev;
25160+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
25161+ struct drm_psb_private *dev_priv =
25162+ (struct drm_psb_private *) dev->dev_private;
25163+ struct ttm_buffer_object *reloc_buffer = NULL;
25164+ unsigned int reloc_num_pages;
25165+ unsigned int reloc_first_page;
25166+ unsigned int reloc_last_page;
25167+ struct psb_dstbuf_cache dst_cache;
25168+ struct drm_psb_reloc *reloc;
25169+ struct ttm_bo_kmap_obj reloc_kmap;
25170+ bool reloc_is_iomem;
25171+ int count;
25172+ int ret = 0;
25173+ int registered = 0;
25174+ uint32_t num_buffers = context->used_buffers;
25175+
25176+ if (num_relocs == 0)
25177+ return 0;
25178+
25179+ memset(&dst_cache, 0, sizeof(dst_cache));
25180+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
25181+
25182+ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
25183+ if (!reloc_buffer)
25184+ goto out;
25185+
25186+ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
25187+ DRM_ERROR("Relocation buffer was not on validate list.\n");
25188+ ret = -EINVAL;
25189+ goto out;
25190+ }
25191+
25192+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
25193+ reloc_last_page =
25194+ (reloc_offset +
25195+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
25196+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
25197+ reloc_offset &= ~PAGE_MASK;
25198+
25199+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
25200+ DRM_ERROR("Relocation buffer is too large\n");
25201+ ret = -EINVAL;
25202+ goto out;
25203+ }
25204+
25205+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
25206+ (registered =
25207+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
25208+
25209+ if (ret == -EINTR) {
25210+ ret = -ERESTART;
25211+ goto out;
25212+ }
25213+ if (ret) {
25214+ DRM_ERROR("Error waiting for space to map "
25215+ "relocation buffer.\n");
25216+ goto out;
25217+ }
25218+
25219+ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
25220+ reloc_num_pages, &reloc_kmap);
25221+
25222+ if (ret) {
25223+ DRM_ERROR("Could not map relocation buffer.\n"
25224+ "\tReloc buffer id 0x%08x.\n"
25225+ "\tReloc first page %d.\n"
25226+ "\tReloc num pages %d.\n",
25227+ reloc_handle, reloc_first_page, reloc_num_pages);
25228+ goto out;
25229+ }
25230+
25231+ reloc = (struct drm_psb_reloc *)
25232+ ((unsigned long)
25233+ ttm_kmap_obj_virtual(&reloc_kmap,
25234+ &reloc_is_iomem) + reloc_offset);
25235+
25236+ for (count = 0; count < num_relocs; ++count) {
25237+ ret = psb_apply_reloc(dev_priv, fence_class,
25238+ reloc, context->buffers,
25239+ num_buffers, &dst_cache,
25240+ no_wait, interruptible);
25241+ if (ret)
25242+ goto out1;
25243+ reloc++;
25244+ }
25245+
25246+out1:
25247+ ttm_bo_kunmap(&reloc_kmap);
25248+out:
25249+ if (registered) {
25250+ spin_lock(&dev_priv->reloc_lock);
25251+ dev_priv->rel_mapped_pages -= reloc_num_pages;
25252+ spin_unlock(&dev_priv->reloc_lock);
25253+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
25254+ }
25255+
25256+ psb_clear_dstbuf_cache(&dst_cache);
25257+ if (reloc_buffer)
25258+ ttm_bo_unref(&reloc_buffer);
25259+ return ret;
25260+}
25261+
25262+void psb_fence_or_sync(struct drm_file *file_priv,
25263+ uint32_t engine,
25264+ uint32_t fence_types,
25265+ uint32_t fence_flags,
25266+ struct list_head *list,
25267+ struct psb_ttm_fence_rep *fence_arg,
25268+ struct ttm_fence_object **fence_p)
25269+{
25270+ struct drm_device *dev = file_priv->minor->dev;
25271+ struct drm_psb_private *dev_priv = psb_priv(dev);
25272+ struct ttm_fence_device *fdev = &dev_priv->fdev;
25273+ int ret;
25274+ struct ttm_fence_object *fence;
25275+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
25276+ uint32_t handle;
25277+
25278+ ret = ttm_fence_user_create(fdev, tfile,
25279+ engine, fence_types,
25280+ TTM_FENCE_FLAG_EMIT, &fence, &handle);
25281+ if (ret) {
25282+
25283+ /*
25284+ * Fence creation failed.
25285+ * Fall back to synchronous operation and idle the engine.
25286+ */
25287+
25288+ psb_idle_engine(dev, engine);
25289+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
25290+
25291+ /*
25292+ * Communicate to user-space that
25293+ * fence creation has failed and that
25294+ * the engine is idle.
25295+ */
25296+
25297+ fence_arg->handle = ~0;
25298+ fence_arg->error = ret;
25299+ }
25300+
25301+ ttm_eu_backoff_reservation(list);
25302+ if (fence_p)
25303+ *fence_p = NULL;
25304+ return;
25305+ }
25306+
25307+ ttm_eu_fence_buffer_objects(list, fence);
25308+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
25309+ struct ttm_fence_info info = ttm_fence_get_info(fence);
25310+ fence_arg->handle = handle;
25311+ fence_arg->fence_class = ttm_fence_class(fence);
25312+ fence_arg->fence_type = ttm_fence_types(fence);
25313+ fence_arg->signaled_types = info.signaled_types;
25314+ fence_arg->error = 0;
25315+ } else {
25316+ ret =
25317+ ttm_ref_object_base_unref(tfile, handle,
25318+ ttm_fence_type);
25319+ BUG_ON(ret);
25320+ }
25321+
25322+ if (fence_p)
25323+ *fence_p = fence;
25324+ else if (fence)
25325+ ttm_fence_object_unref(&fence);
25326+}
25327+
25328+
25329+
25330+static int psb_cmdbuf_2d(struct drm_file *priv,
25331+ struct list_head *validate_list,
25332+ uint32_t fence_type,
25333+ struct drm_psb_cmdbuf_arg *arg,
25334+ struct ttm_buffer_object *cmd_buffer,
25335+ struct psb_ttm_fence_rep *fence_arg)
25336+{
25337+ struct drm_device *dev = priv->minor->dev;
25338+ int ret;
25339+
25340+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
25341+ arg->cmdbuf_size, PSB_ENGINE_2D,
25342+ NULL);
25343+ if (ret)
25344+ goto out_unlock;
25345+
25346+ psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type,
25347+ arg->fence_flags, validate_list, fence_arg,
25348+ NULL);
25349+
25350+ mutex_lock(&cmd_buffer->mutex);
25351+ if (cmd_buffer->sync_obj != NULL)
25352+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
25353+ mutex_unlock(&cmd_buffer->mutex);
25354+out_unlock:
25355+ return ret;
25356+}
25357+
25358+#if 0
25359+static int psb_dump_page(struct ttm_buffer_object *bo,
25360+ unsigned int page_offset, unsigned int num)
25361+{
25362+ struct ttm_bo_kmap_obj kmobj;
25363+ int is_iomem;
25364+ uint32_t *p;
25365+ int ret;
25366+ unsigned int i;
25367+
25368+ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
25369+ if (ret)
25370+ return ret;
25371+
25372+ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
25373+ for (i = 0; i < num; ++i)
25374+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
25375+
25376+ ttm_bo_kunmap(&kmobj);
25377+ return 0;
25378+}
25379+#endif
25380+
25381+static void psb_idle_engine(struct drm_device *dev, int engine)
25382+{
25383+ struct drm_psb_private *dev_priv =
25384+ (struct drm_psb_private *) dev->dev_private;
25385+ uint32_t dummy;
25386+ unsigned long dummy2;
25387+
25388+ switch (engine) {
25389+ case PSB_ENGINE_2D:
25390+
25391+ /*
25392+ * Make sure we flush 2D properly using a dummy
25393+ * fence sequence emit.
25394+ */
25395+
25396+ (void) psb_fence_emit_sequence(&dev_priv->fdev,
25397+ PSB_ENGINE_2D, 0,
25398+ &dummy, &dummy2);
25399+ psb_2d_lock(dev_priv);
25400+ (void) psb_idle_2d(dev);
25401+ psb_2d_unlock(dev_priv);
25402+ break;
25403+ case PSB_ENGINE_TA:
25404+ case PSB_ENGINE_RASTERIZER:
25405+ case PSB_ENGINE_HPRAST:
25406+ (void) psb_idle_3d(dev);
25407+ break;
25408+ default:
25409+
25410+ /*
25411+ * FIXME: Insert video engine idle command here.
25412+ */
25413+
25414+ break;
25415+ }
25416+}
25417+
25418+static int psb_handle_copyback(struct drm_device *dev,
25419+ struct psb_context *context,
25420+ int ret)
25421+{
25422+ int err = ret;
25423+ struct ttm_validate_buffer *entry;
25424+ struct psb_validate_arg arg;
25425+ struct list_head *list = &context->validate_list;
25426+
25427+ if (ret) {
25428+ ttm_eu_backoff_reservation(list);
25429+ ttm_eu_backoff_reservation(&context->kern_validate_list);
25430+ }
25431+
25432+
25433+ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
25434+ list_for_each_entry(entry, list, head) {
25435+ struct psb_validate_buffer *vbuf =
25436+ container_of(entry, struct psb_validate_buffer,
25437+ base);
25438+ arg.handled = 1;
25439+ arg.ret = vbuf->ret;
25440+ if (!arg.ret) {
25441+ struct ttm_buffer_object *bo = entry->bo;
25442+ mutex_lock(&bo->mutex);
25443+ arg.d.rep.gpu_offset = bo->offset;
25444+ arg.d.rep.placement = bo->mem.flags;
25445+ arg.d.rep.fence_type_mask =
25446+ (uint32_t) (unsigned long)
25447+ entry->new_sync_obj_arg;
25448+ mutex_unlock(&bo->mutex);
25449+ }
25450+
25451+ if (__copy_to_user(vbuf->user_val_arg,
25452+ &arg, sizeof(arg)))
25453+ err = -EFAULT;
25454+
25455+ if (arg.ret)
25456+ break;
25457+ }
25458+ }
25459+
25460+ return err;
25461+}
25462+
25463+
25464+static int psb_cmdbuf_video(struct drm_file *priv,
25465+ struct list_head *validate_list,
25466+ uint32_t fence_type,
25467+ struct drm_psb_cmdbuf_arg *arg,
25468+ struct ttm_buffer_object *cmd_buffer,
25469+ struct psb_ttm_fence_rep *fence_arg)
25470+{
25471+ struct drm_device *dev = priv->minor->dev;
25472+ struct ttm_fence_object *fence;
25473+ int ret;
25474+
25475+ /*
25476+ * Check this. Doesn't seem right. Have fencing done AFTER command
25477+ * submission and make sure drm_psb_idle idles the MSVDX completely.
25478+ */
25479+ ret =
25480+ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
25481+ arg->cmdbuf_size, NULL);
25482+ if (ret)
25483+ return ret;
25484+
25485+
25486+ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
25487+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
25488+ arg->fence_flags, validate_list, fence_arg,
25489+ &fence);
25490+
25491+
25492+ ttm_fence_object_unref(&fence);
25493+ mutex_lock(&cmd_buffer->mutex);
25494+ if (cmd_buffer->sync_obj != NULL)
25495+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
25496+ mutex_unlock(&cmd_buffer->mutex);
25497+ return 0;
25498+}
25499+
25500+static int psb_feedback_buf(struct ttm_object_file *tfile,
25501+ struct psb_context *context,
25502+ uint32_t feedback_ops,
25503+ uint32_t handle,
25504+ uint32_t offset,
25505+ uint32_t feedback_breakpoints,
25506+ uint32_t feedback_size,
25507+ struct psb_feedback_info *feedback)
25508+{
25509+ struct ttm_buffer_object *bo;
25510+ struct page *page;
25511+ uint32_t page_no;
25512+ uint32_t page_offset;
25513+ int ret;
25514+
25515+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
25516+ DRM_ERROR("Illegal feedback op.\n");
25517+ return -EINVAL;
25518+ }
25519+
25520+ if (feedback_breakpoints != 0) {
25521+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
25522+ return -EINVAL;
25523+ }
25524+
25525+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
25526+ DRM_ERROR("Feedback buffer size too small.\n");
25527+ return -EINVAL;
25528+ }
25529+
25530+ page_offset = offset & ~PAGE_MASK;
25531+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
25532+ < page_offset) {
25533+ DRM_ERROR("Illegal feedback buffer alignment.\n");
25534+ return -EINVAL;
25535+ }
25536+
25537+ bo = ttm_buffer_object_lookup(tfile, handle);
25538+ if (unlikely(bo == NULL)) {
25539+ DRM_ERROR("Failed looking up feedback buffer.\n");
25540+ return -EINVAL;
25541+ }
25542+
25543+
25544+ ret = psb_validate_kernel_buffer(context, bo,
25545+ PSB_ENGINE_TA,
25546+ TTM_PL_FLAG_SYSTEM |
25547+ TTM_PL_FLAG_CACHED |
25548+ PSB_GPU_ACCESS_WRITE |
25549+ PSB_BO_FLAG_FEEDBACK,
25550+ TTM_PL_MASK_MEM &
25551+ ~(TTM_PL_FLAG_SYSTEM |
25552+ TTM_PL_FLAG_CACHED));
25553+ if (unlikely(ret != 0))
25554+ goto out_unref;
25555+
25556+ page_no = offset >> PAGE_SHIFT;
25557+ if (unlikely(page_no >= bo->num_pages)) {
25558+ ret = -EINVAL;
25559+ DRM_ERROR("Illegal feedback buffer offset.\n");
25560+ goto out_unref;
25561+ }
25562+
25563+ if (unlikely(bo->ttm == NULL)) {
25564+ ret = -EINVAL;
25565+ DRM_ERROR("Vistest buffer without TTM.\n");
25566+ goto out_unref;
25567+ }
25568+
25569+ page = ttm_tt_get_page(bo->ttm, page_no);
25570+ if (unlikely(page == NULL)) {
25571+ ret = -ENOMEM;
25572+ goto out_unref;
25573+ }
25574+
25575+ feedback->page = page;
25576+ feedback->offset = page_offset;
25577+
25578+ /*
25579+ * Note: bo referece transferred.
25580+ */
25581+
25582+ feedback->bo = bo;
25583+ return 0;
25584+
25585+out_unref:
25586+ ttm_bo_unref(&bo);
25587+ return ret;
25588+}
25589+
25590+void psb_down_island_power(struct drm_device *dev, int islands)
25591+{
25592+ u32 pwr_cnt = 0;
25593+ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT);
25594+ if (islands & PSB_GRAPHICS_ISLAND)
25595+ pwr_cnt |= 0x3;
25596+ if (islands & PSB_VIDEO_ENC_ISLAND)
25597+ pwr_cnt |= 0x30;
25598+ if (islands & PSB_VIDEO_DEC_ISLAND)
25599+ pwr_cnt |= 0xc;
25600+ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt);
25601+}
25602+void psb_up_island_power(struct drm_device *dev, int islands)
25603+{
25604+ u32 pwr_cnt = 0;
25605+ u32 count = 5;
25606+ u32 pwr_sts = 0;
25607+ u32 pwr_mask = 0;
25608+ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT);
25609+ if (islands & PSB_GRAPHICS_ISLAND) {
25610+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
25611+ pwr_mask |= PSB_PWRGT_GFX_MASK;
25612+ }
25613+ if (islands & PSB_VIDEO_ENC_ISLAND) {
25614+ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
25615+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
25616+ }
25617+ if (islands & PSB_VIDEO_DEC_ISLAND) {
25618+ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
25619+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
25620+ }
25621+ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt);
25622+ while (count--) {
25623+ pwr_sts = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_STS);
25624+ if ((pwr_sts & pwr_mask) == 0)
25625+ break;
25626+ else
25627+ udelay(10);
25628+ }
25629+}
25630+
25631+static int psb_power_down_sgx(struct drm_device *dev)
25632+{
25633+ struct drm_psb_private *dev_priv =
25634+ (struct drm_psb_private *)dev->dev_private;
25635+
25636+ PSB_DEBUG_PM("power down sgx \n");
25637+
25638+#ifdef OSPM_STAT
25639+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
25640+ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change;
25641+ else
25642+ PSB_DEBUG_PM("power down:illegal previous power state\n");
25643+ dev_priv->gfx_last_mode_change = jiffies;
25644+ dev_priv->gfx_d0i3_cnt++;
25645+#endif
25646+
25647+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
25648+ dev_priv->graphics_state = PSB_PWR_STATE_D0i3;
25649+ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND);
25650+ return 0;
25651+}
25652+static int psb_power_up_sgx(struct drm_device *dev)
25653+{
25654+ struct drm_psb_private *dev_priv =
25655+ (struct drm_psb_private *)dev->dev_private;
25656+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) !=
25657+ PSB_PWR_STATE_D0i3)
25658+ return -EINVAL;
25659+
25660+ PSB_DEBUG_PM("power up sgx \n");
25661+ if (unlikely(PSB_D_PM & drm_psb_debug))
25662+ dump_stack();
25663+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
25664+
25665+ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND);
25666+
25667+ /*
25668+ * The SGX loses it's register contents.
25669+ * Restore BIF registers. The MMU page tables are
25670+ * "normal" pages, so their contents should be kept.
25671+ */
25672+
25673+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
25674+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
25675+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
25676+ PSB_RSGX32(PSB_CR_BIF_BANK1);
25677+
25678+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
25679+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
25680+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
25681+
25682+ /*
25683+ * 2D Base registers..
25684+ */
25685+ psb_init_2d(dev_priv);
25686+ /*
25687+ * Persistant 3D base registers and USSE base registers..
25688+ */
25689+
25690+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
25691+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
25692+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
25693+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
25694+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
25695+ /*
25696+ * Now, re-initialize the 3D engine.
25697+ */
25698+ if (dev_priv->xhw_on)
25699+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
25700+
25701+ psb_scheduler_ta_mem_check(dev_priv);
25702+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
25703+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
25704+ PSB_TA_MEM_FLAG_TA |
25705+ PSB_TA_MEM_FLAG_RASTER |
25706+ PSB_TA_MEM_FLAG_HOSTA |
25707+ PSB_TA_MEM_FLAG_HOSTD |
25708+ PSB_TA_MEM_FLAG_INIT,
25709+ dev_priv->ta_mem->ta_memory->offset,
25710+ dev_priv->ta_mem->hw_data->offset,
25711+ dev_priv->ta_mem->hw_cookie);
25712+ }
25713+
25714+#ifdef OSPM_STAT
25715+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
25716+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
25717+ else
25718+ PSB_DEBUG_PM("power up:illegal previous power state\n");
25719+ dev_priv->gfx_last_mode_change = jiffies;
25720+ dev_priv->gfx_d0i0_cnt++;
25721+#endif
25722+
25723+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
25724+
25725+ return 0;
25726+}
25727+
25728+int psb_try_power_down_sgx(struct drm_device *dev)
25729+{
25730+ struct drm_psb_private *dev_priv =
25731+ (struct drm_psb_private *)dev->dev_private;
25732+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
25733+ int ret;
25734+ if (!down_write_trylock(&dev_priv->sgx_sem))
25735+ return -EBUSY;
25736+ /*Try lock 2d, because FB driver ususally use 2D engine.*/
25737+ if (!psb_2d_trylock(dev_priv)) {
25738+ ret = -EBUSY;
25739+ goto out_err0;
25740+ }
25741+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) !=
25742+ PSB_PWR_STATE_D0i0) {
25743+ ret = -EINVAL;
25744+ goto out_err1;
25745+ }
25746+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY) ||
25747+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) != 0)) {
25748+ ret = -EBUSY;
25749+ goto out_err1;
25750+ }
25751+ if (!scheduler->idle ||
25752+ !list_empty(&scheduler->raster_queue) ||
25753+ !list_empty(&scheduler->ta_queue) ||
25754+ !list_empty(&scheduler->hp_raster_queue)) {
25755+ ret = -EBUSY;
25756+ goto out_err1;
25757+ }
25758+ /*flush_scheduled_work();*/
25759+ ret = psb_power_down_sgx(dev);
25760+out_err1:
25761+ psb_2d_atomic_unlock(dev_priv);
25762+out_err0:
25763+ up_write(&dev_priv->sgx_sem);
25764+ return ret;
25765+}
25766+/*check power state, if in sleep, wake up*/
25767+void psb_check_power_state(struct drm_device *dev, int devices)
25768+{
25769+ struct pci_dev *pdev = dev->pdev;
25770+ struct drm_psb_private *dev_priv = dev->dev_private;
25771+ down(&dev_priv->pm_sem);
25772+ switch (pdev->current_state) {
25773+ case PCI_D3hot:
25774+ dev->driver->pci_driver.resume(pdev);
25775+ break;
25776+ default:
25777+
25778+ if (devices & PSB_DEVICE_SGX) {
25779+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) ==
25780+ PSB_PWR_STATE_D0i3) {
25781+ /*power up sgx*/
25782+ psb_power_up_sgx(dev);
25783+ }
25784+ } else if (devices & PSB_DEVICE_MSVDX) {
25785+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) ==
25786+ PSB_PWR_STATE_D0i3) {
25787+ psb_power_up_msvdx(dev);
25788+ } else {
25789+ dev_priv->msvdx_last_action = jiffies;
25790+ }
25791+ }
25792+ break;
25793+ }
25794+ up(&dev_priv->pm_sem);
25795+}
25796+
25797+void psb_init_ospm(struct drm_psb_private *dev_priv)
25798+{
25799+ static int init;
25800+ if (!init) {
25801+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
25802+ init_rwsem(&dev_priv->sgx_sem);
25803+ sema_init(&dev_priv->pm_sem, 1);
25804+#ifdef OSPM_STAT
25805+ dev_priv->gfx_last_mode_change = jiffies;
25806+ dev_priv->gfx_d0i0_time = 0;
25807+ dev_priv->gfx_d0i3_time = 0;
25808+ dev_priv->gfx_d3_time = 0;
25809+#endif
25810+ init = 1;
25811+ }
25812+}
25813+
25814+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
25815+ struct drm_file *file_priv)
25816+{
25817+ struct drm_psb_cmdbuf_arg *arg = data;
25818+ int ret = 0;
25819+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
25820+ struct ttm_buffer_object *cmd_buffer = NULL;
25821+ struct ttm_buffer_object *ta_buffer = NULL;
25822+ struct ttm_buffer_object *oom_buffer = NULL;
25823+ struct psb_ttm_fence_rep fence_arg;
25824+ struct drm_psb_scene user_scene;
25825+ struct psb_scene_pool *pool = NULL;
25826+ struct psb_scene *scene = NULL;
25827+ struct drm_psb_private *dev_priv =
25828+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
25829+ int engine;
25830+ struct psb_feedback_info feedback;
25831+ int po_correct;
25832+ struct psb_context *context;
25833+ unsigned num_buffers;
25834+
25835+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
25836+
25837+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
25838+ if (unlikely(ret != 0))
25839+ return ret;
25840+
25841+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
25842+ || (arg->engine == PSB_ENGINE_RASTERIZER)) {
25843+ down_read(&dev_priv->sgx_sem);
25844+ psb_check_power_state(dev, PSB_DEVICE_SGX);
25845+ }
25846+
25847+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
25848+ if (unlikely(ret != 0))
25849+ goto out_err0;
25850+
25851+
25852+ context = &dev_priv->context;
25853+ context->used_buffers = 0;
25854+ context->fence_types = 0;
25855+ BUG_ON(!list_empty(&context->validate_list));
25856+ BUG_ON(!list_empty(&context->kern_validate_list));
25857+
25858+ if (unlikely(context->buffers == NULL)) {
25859+ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
25860+ sizeof(*context->buffers));
25861+ if (unlikely(context->buffers == NULL)) {
25862+ ret = -ENOMEM;
25863+ goto out_err1;
25864+ }
25865+ }
25866+
25867+ ret = psb_reference_buffers(file_priv,
25868+ arg->buffer_list,
25869+ context);
25870+
25871+ if (unlikely(ret != 0))
25872+ goto out_err1;
25873+
25874+ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
25875+
25876+ ret = ttm_eu_reserve_buffers(&context->validate_list,
25877+ context->val_seq);
25878+ if (unlikely(ret != 0)) {
25879+ goto out_err2;
25880+ }
25881+
25882+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
25883+ PSB_ENGINE_TA : arg->engine;
25884+
25885+ ret = psb_validate_buffer_list(file_priv, engine,
25886+ context, &po_correct);
25887+ if (unlikely(ret != 0))
25888+ goto out_err3;
25889+
25890+ if (!po_correct) {
25891+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
25892+ arg->reloc_offset,
25893+ arg->reloc_handle, context, 0, 1);
25894+ if (unlikely(ret != 0))
25895+ goto out_err3;
25896+
25897+ }
25898+
25899+ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
25900+ if (unlikely(cmd_buffer == NULL)) {
25901+ ret = -EINVAL;
25902+ goto out_err4;
25903+ }
25904+
25905+ switch (arg->engine) {
25906+ case PSB_ENGINE_2D:
25907+ ret = psb_cmdbuf_2d(file_priv, &context->validate_list,
25908+ context->fence_types, arg, cmd_buffer,
25909+ &fence_arg);
25910+ if (unlikely(ret != 0))
25911+ goto out_err4;
25912+ break;
25913+ case PSB_ENGINE_VIDEO:
25914+ psb_check_power_state(dev, PSB_DEVICE_MSVDX);
25915+ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
25916+ context->fence_types, arg,
25917+ cmd_buffer, &fence_arg);
25918+
25919+ if (unlikely(ret != 0))
25920+ goto out_err4;
25921+ break;
25922+ case LNC_ENGINE_ENCODE:
25923+ psb_check_power_state(dev, PSB_DEVICE_TOPAZ);
25924+ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
25925+ context->fence_types, arg,
25926+ cmd_buffer, &fence_arg);
25927+ if (unlikely(ret != 0))
25928+ goto out_err4;
25929+ break;
25930+ case PSB_ENGINE_RASTERIZER:
25931+ ret = psb_cmdbuf_raster(file_priv, context,
25932+ arg, cmd_buffer, &fence_arg);
25933+ if (unlikely(ret != 0))
25934+ goto out_err4;
25935+ break;
25936+ case PSB_ENGINE_TA:
25937+ if (arg->ta_handle == arg->cmdbuf_handle) {
25938+ ta_buffer = ttm_bo_reference(cmd_buffer);
25939+ } else {
25940+ ta_buffer =
25941+ ttm_buffer_object_lookup(tfile,
25942+ arg->ta_handle);
25943+ if (!ta_buffer) {
25944+ ret = -EINVAL;
25945+ goto out_err4;
25946+ }
25947+ }
25948+ if (arg->oom_size != 0) {
25949+ if (arg->oom_handle == arg->cmdbuf_handle) {
25950+ oom_buffer = ttm_bo_reference(cmd_buffer);
25951+ } else {
25952+ oom_buffer =
25953+ ttm_buffer_object_lookup(tfile,
25954+ arg->
25955+ oom_handle);
25956+ if (!oom_buffer) {
25957+ ret = -EINVAL;
25958+ goto out_err4;
25959+ }
25960+ }
25961+ }
25962+
25963+ ret = copy_from_user(&user_scene, (void __user *)
25964+ ((unsigned long) arg->scene_arg),
25965+ sizeof(user_scene));
25966+ if (ret)
25967+ goto out_err4;
25968+
25969+ if (!user_scene.handle_valid) {
25970+ pool = psb_scene_pool_alloc(file_priv, 0,
25971+ user_scene.num_buffers,
25972+ user_scene.w,
25973+ user_scene.h);
25974+ if (!pool) {
25975+ ret = -ENOMEM;
25976+ goto out_err0;
25977+ }
25978+
25979+ user_scene.handle = psb_scene_pool_handle(pool);
25980+ user_scene.handle_valid = 1;
25981+ ret = copy_to_user((void __user *)
25982+ ((unsigned long) arg->
25983+ scene_arg), &user_scene,
25984+ sizeof(user_scene));
25985+
25986+ if (ret)
25987+ goto out_err4;
25988+ } else {
25989+ pool =
25990+ psb_scene_pool_lookup(file_priv,
25991+ user_scene.handle, 1);
25992+ if (!pool) {
25993+ ret = -EINVAL;
25994+ goto out_err4;
25995+ }
25996+ }
25997+
25998+ ret = psb_validate_scene_pool(context, pool,
25999+ user_scene.w,
26000+ user_scene.h,
26001+ arg->ta_flags &
26002+ PSB_TA_FLAG_LASTPASS, &scene);
26003+ if (ret)
26004+ goto out_err4;
26005+
26006+ memset(&feedback, 0, sizeof(feedback));
26007+ if (arg->feedback_ops) {
26008+ ret = psb_feedback_buf(tfile,
26009+ context,
26010+ arg->feedback_ops,
26011+ arg->feedback_handle,
26012+ arg->feedback_offset,
26013+ arg->feedback_breakpoints,
26014+ arg->feedback_size,
26015+ &feedback);
26016+ if (ret)
26017+ goto out_err4;
26018+ }
26019+ ret = psb_cmdbuf_ta(file_priv, context,
26020+ arg, cmd_buffer, ta_buffer,
26021+ oom_buffer, scene, &feedback,
26022+ &fence_arg);
26023+ if (ret)
26024+ goto out_err4;
26025+ break;
26026+ default:
26027+ DRM_ERROR
26028+ ("Unimplemented command submission mechanism (%x).\n",
26029+ arg->engine);
26030+ ret = -EINVAL;
26031+ goto out_err4;
26032+ }
26033+
26034+ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
26035+ ret = copy_to_user((void __user *)
26036+ ((unsigned long) arg->fence_arg),
26037+ &fence_arg, sizeof(fence_arg));
26038+ }
26039+
26040+out_err4:
26041+ if (scene)
26042+ psb_scene_unref(&scene);
26043+ if (pool)
26044+ psb_scene_pool_unref(&pool);
26045+ if (cmd_buffer)
26046+ ttm_bo_unref(&cmd_buffer);
26047+ if (ta_buffer)
26048+ ttm_bo_unref(&ta_buffer);
26049+ if (oom_buffer)
26050+ ttm_bo_unref(&oom_buffer);
26051+out_err3:
26052+ ret = psb_handle_copyback(dev, context, ret);
26053+out_err2:
26054+ psb_unreference_buffers(context);
26055+out_err1:
26056+ mutex_unlock(&dev_priv->cmdbuf_mutex);
26057+out_err0:
26058+ ttm_read_unlock(&dev_priv->ttm_lock);
26059+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
26060+ || (arg->engine == PSB_ENGINE_RASTERIZER))
26061+ up_read(&dev_priv->sgx_sem);
26062+ return ret;
26063+}
26064diff -uNr a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h
26065--- a/drivers/gpu/drm/psb/psb_sgx.h 1969-12-31 16:00:00.000000000 -0800
26066+++ b/drivers/gpu/drm/psb/psb_sgx.h 2009-04-07 13:28:38.000000000 -0700
26067@@ -0,0 +1,41 @@
26068+/*
26069+ * Copyright (c) 2008, Intel Corporation
26070+ *
26071+ * Permission is hereby granted, free of charge, to any person obtaining a
26072+ * copy of this software and associated documentation files (the "Software"),
26073+ * to deal in the Software without restriction, including without limitation
26074+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
26075+ * and/or sell copies of the Software, and to permit persons to whom the
26076+ * Software is furnished to do so, subject to the following conditions:
26077+ *
26078+ * The above copyright notice and this permission notice (including the next
26079+ * paragraph) shall be included in all copies or substantial portions of the
26080+ * Software.
26081+ *
26082+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26083+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26084+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26085+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26086+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26087+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26088+ * SOFTWARE.
26089+ *
26090+ * Authors:
26091+ * Eric Anholt <eric@anholt.net>
26092+ *
26093+ **/
26094+#ifndef _PSB_SGX_H_
26095+#define _PSB_SGX_H_
26096+
26097+extern int psb_submit_video_cmdbuf(struct drm_device *dev,
26098+ struct ttm_buffer_object *cmd_buffer,
26099+ unsigned long cmd_offset,
26100+ unsigned long cmd_size,
26101+ struct ttm_fence_object *fence);
26102+
26103+extern int psb_2d_wait_available(struct drm_psb_private *dev_priv,
26104+ unsigned size);
26105+extern int drm_idle_check_interval;
26106+extern int drm_psb_ospm;
26107+
26108+#endif
26109diff -uNr a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c
26110--- a/drivers/gpu/drm/psb/psb_ttm_glue.c 1969-12-31 16:00:00.000000000 -0800
26111+++ b/drivers/gpu/drm/psb/psb_ttm_glue.c 2009-04-07 13:28:38.000000000 -0700
26112@@ -0,0 +1,345 @@
26113+/**************************************************************************
26114+ * Copyright (c) 2008, Intel Corporation.
26115+ * All Rights Reserved.
26116+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
26117+ * All Rights Reserved.
26118+ *
26119+ * This program is free software; you can redistribute it and/or modify it
26120+ * under the terms and conditions of the GNU General Public License,
26121+ * version 2, as published by the Free Software Foundation.
26122+ *
26123+ * This program is distributed in the hope it will be useful, but WITHOUT
26124+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26125+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
26126+ * more details.
26127+ *
26128+ * You should have received a copy of the GNU General Public License along with
26129+ * this program; if not, write to the Free Software Foundation, Inc.,
26130+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26131+ *
26132+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
26133+ * develop this driver.
26134+ *
26135+ **************************************************************************/
26136+/*
26137+ */
26138+
26139+#include <drm/drmP.h>
26140+#include "psb_drv.h"
26141+#include "ttm/ttm_userobj_api.h"
26142+
26143+static struct vm_operations_struct psb_ttm_vm_ops;
26144+
26145+int psb_open(struct inode *inode, struct file *filp)
26146+{
26147+ struct drm_file *file_priv;
26148+ struct drm_psb_private *dev_priv;
26149+ struct psb_fpriv *psb_fp;
26150+ int ret;
26151+
26152+ ret = drm_open(inode, filp);
26153+ if (unlikely(ret))
26154+ return ret;
26155+
26156+ psb_fp = drm_calloc(1, sizeof(*psb_fp), DRM_MEM_FILES);
26157+
26158+ if (unlikely(psb_fp == NULL))
26159+ goto out_err0;
26160+
26161+ file_priv = (struct drm_file *) filp->private_data;
26162+ dev_priv = psb_priv(file_priv->minor->dev);
26163+
26164+
26165+ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
26166+ PSB_FILE_OBJECT_HASH_ORDER);
26167+ if (unlikely(psb_fp->tfile == NULL))
26168+ goto out_err1;
26169+
26170+ file_priv->driver_priv = psb_fp;
26171+
26172+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
26173+ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
26174+
26175+ return 0;
26176+
26177+out_err1:
26178+ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES);
26179+out_err0:
26180+ (void) drm_release(inode, filp);
26181+ return ret;
26182+}
26183+
26184+int psb_release(struct inode *inode, struct file *filp)
26185+{
26186+ struct drm_file *file_priv;
26187+ struct psb_fpriv *psb_fp;
26188+ struct drm_psb_private *dev_priv;
26189+ int ret;
26190+
26191+ file_priv = (struct drm_file *) filp->private_data;
26192+ psb_fp = psb_fpriv(file_priv);
26193+ dev_priv = psb_priv(file_priv->minor->dev);
26194+
26195+ down_read(&dev_priv->sgx_sem);
26196+ psb_check_power_state(file_priv->minor->dev, PSB_DEVICE_SGX);
26197+
26198+ ttm_object_file_release(&psb_fp->tfile);
26199+ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES);
26200+
26201+ if (dev_priv && dev_priv->xhw_file)
26202+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
26203+
26204+ ret = drm_release(inode, filp);
26205+ up_read(&dev_priv->sgx_sem);
26206+ if (drm_psb_ospm && IS_MRST(dev_priv->dev))
26207+ schedule_delayed_work(&dev_priv->scheduler.wq, 0);
26208+ return ret;
26209+}
26210+
26211+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
26212+ struct drm_file *file_priv)
26213+{
26214+ int ret;
26215+ struct drm_psb_private *dev_priv = psb_priv(dev);
26216+ down_read(&dev_priv->sgx_sem);
26217+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26218+ ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
26219+ up_read(&dev_priv->sgx_sem);
26220+ if (drm_psb_ospm && IS_MRST(dev))
26221+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26222+ return ret;
26223+}
26224+
26225+int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
26226+ struct drm_file *file_priv)
26227+{
26228+ int ret;
26229+ struct drm_psb_private *dev_priv = psb_priv(dev);
26230+ down_read(&dev_priv->sgx_sem);
26231+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26232+ ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
26233+ up_read(&dev_priv->sgx_sem);
26234+ if (drm_psb_ospm && IS_MRST(dev))
26235+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26236+ return ret;
26237+}
26238+
26239+int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
26240+ struct drm_file *file_priv)
26241+{
26242+ int ret;
26243+ struct drm_psb_private *dev_priv = psb_priv(dev);
26244+ down_read(&dev_priv->sgx_sem);
26245+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26246+ ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
26247+ up_read(&dev_priv->sgx_sem);
26248+ if (drm_psb_ospm && IS_MRST(dev))
26249+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26250+ return ret;
26251+}
26252+
26253+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
26254+ struct drm_file *file_priv)
26255+{
26256+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
26257+ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
26258+}
26259+
26260+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
26261+ struct drm_file *file_priv)
26262+{
26263+ int ret;
26264+ struct drm_psb_private *dev_priv = psb_priv(dev);
26265+ down_read(&dev_priv->sgx_sem);
26266+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26267+ ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
26268+ &psb_priv(dev)->ttm_lock, data);
26269+ up_read(&dev_priv->sgx_sem);
26270+ if (drm_psb_ospm && IS_MRST(dev))
26271+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26272+ return ret;
26273+}
26274+
26275+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
26276+ struct drm_file *file_priv)
26277+{
26278+ int ret;
26279+ struct drm_psb_private *dev_priv = psb_priv(dev);
26280+ down_read(&dev_priv->sgx_sem);
26281+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26282+ ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
26283+ up_read(&dev_priv->sgx_sem);
26284+ if (drm_psb_ospm && IS_MRST(dev))
26285+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26286+ return ret;
26287+}
26288+
26289+int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
26290+ struct drm_file *file_priv)
26291+{
26292+ struct drm_psb_private *dev_priv = psb_priv(dev);
26293+ int ret;
26294+ down_read(&dev_priv->sgx_sem);
26295+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26296+ ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
26297+ up_read(&dev_priv->sgx_sem);
26298+ if (drm_psb_ospm && IS_MRST(dev))
26299+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26300+ return ret;
26301+}
26302+
26303+int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
26304+ struct drm_file *file_priv)
26305+{
26306+ struct drm_psb_private *dev_priv = psb_priv(dev);
26307+ int ret;
26308+ down_read(&dev_priv->sgx_sem);
26309+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26310+ ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
26311+ up_read(&dev_priv->sgx_sem);
26312+ if (drm_psb_ospm && IS_MRST(dev))
26313+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26314+ return ret;
26315+}
26316+
26317+int psb_pl_create_ioctl(struct drm_device *dev, void *data,
26318+ struct drm_file *file_priv)
26319+{
26320+ struct drm_psb_private *dev_priv = psb_priv(dev);
26321+ int ret;
26322+ down_read(&dev_priv->sgx_sem);
26323+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26324+ ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
26325+ &dev_priv->bdev, &dev_priv->ttm_lock, data);
26326+ up_read(&dev_priv->sgx_sem);
26327+ if (drm_psb_ospm && IS_MRST(dev))
26328+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26329+ return ret;
26330+}
26331+
26332+/**
26333+ * psb_ttm_fault - Wrapper around the ttm fault method.
26334+ *
26335+ * @vma: The struct vm_area_struct as in the vm fault() method.
26336+ * @vmf: The struct vm_fault as in the vm fault() method.
26337+ *
26338+ * Since ttm_fault() will reserve buffers while faulting,
26339+ * we need to take the ttm read lock around it, as this driver
26340+ * relies on the ttm_lock in write mode to exclude all threads from
26341+ * reserving and thus validating buffers in aperture- and memory shortage
26342+ * situations.
26343+ */
26344+
26345+static int psb_ttm_fault(struct vm_area_struct *vma,
26346+ struct vm_fault *vmf)
26347+{
26348+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
26349+ vma->vm_private_data;
26350+ struct drm_psb_private *dev_priv =
26351+ container_of(bo->bdev, struct drm_psb_private, bdev);
26352+ int ret;
26353+
26354+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
26355+ if (unlikely(ret != 0))
26356+ return VM_FAULT_NOPAGE;
26357+
26358+ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
26359+
26360+ ttm_read_unlock(&dev_priv->ttm_lock);
26361+ return ret;
26362+}
26363+
26364+
26365+int psb_mmap(struct file *filp, struct vm_area_struct *vma)
26366+{
26367+ struct drm_file *file_priv;
26368+ struct drm_psb_private *dev_priv;
26369+ int ret;
26370+
26371+ if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET))
26372+ return drm_mmap(filp, vma);
26373+
26374+ file_priv = (struct drm_file *) filp->private_data;
26375+ dev_priv = psb_priv(file_priv->minor->dev);
26376+
26377+ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
26378+ if (unlikely(ret != 0))
26379+ return ret;
26380+
26381+ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
26382+ dev_priv->ttm_vm_ops = vma->vm_ops;
26383+ psb_ttm_vm_ops = *vma->vm_ops;
26384+ psb_ttm_vm_ops.fault = &psb_ttm_fault;
26385+ }
26386+
26387+ vma->vm_ops = &psb_ttm_vm_ops;
26388+
26389+ return 0;
26390+}
26391+
26392+ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
26393+ size_t count, loff_t *f_pos)
26394+{
26395+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
26396+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
26397+
26398+ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
26399+}
26400+
26401+ssize_t psb_ttm_read(struct file *filp, char __user *buf,
26402+ size_t count, loff_t *f_pos)
26403+{
26404+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
26405+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
26406+
26407+ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
26408+}
26409+
26410+int psb_verify_access(struct ttm_buffer_object *bo,
26411+ struct file *filp)
26412+{
26413+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
26414+
26415+ if (capable(CAP_SYS_ADMIN))
26416+ return 0;
26417+
26418+ if (unlikely(!file_priv->authenticated))
26419+ return -EPERM;
26420+
26421+ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
26422+}
26423+
26424+static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
26425+{
26426+ return ttm_mem_global_init(ref->object);
26427+}
26428+
26429+static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
26430+{
26431+ ttm_mem_global_release(ref->object);
26432+}
26433+
26434+int psb_ttm_global_init(struct drm_psb_private *dev_priv)
26435+{
26436+ struct drm_global_reference *global_ref;
26437+ int ret;
26438+
26439+ global_ref = &dev_priv->mem_global_ref;
26440+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
26441+ global_ref->size = sizeof(struct ttm_mem_global);
26442+ global_ref->init = &psb_ttm_mem_global_init;
26443+ global_ref->release = &psb_ttm_mem_global_release;
26444+
26445+ ret = drm_global_item_ref(global_ref);
26446+ if (unlikely(ret != 0)) {
26447+ DRM_ERROR("Failed referencing a global TTM memory object.\n");
26448+ return ret;
26449+ }
26450+
26451+ return 0;
26452+}
26453+
26454+void psb_ttm_global_release(struct drm_psb_private *dev_priv)
26455+{
26456+ drm_global_item_unref(&dev_priv->mem_global_ref);
26457+}
26458diff -uNr a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c
26459--- a/drivers/gpu/drm/psb/psb_xhw.c 1969-12-31 16:00:00.000000000 -0800
26460+++ b/drivers/gpu/drm/psb/psb_xhw.c 2009-04-07 13:28:38.000000000 -0700
26461@@ -0,0 +1,629 @@
26462+/**************************************************************************
26463+ *Copyright (c) 2007-2008, Intel Corporation.
26464+ *All Rights Reserved.
26465+ *
26466+ *This program is free software; you can redistribute it and/or modify it
26467+ *under the terms and conditions of the GNU General Public License,
26468+ *version 2, as published by the Free Software Foundation.
26469+ *
26470+ *This program is distributed in the hope it will be useful, but WITHOUT
26471+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26472+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
26473+ *more details.
26474+ *
26475+ *You should have received a copy of the GNU General Public License along with
26476+ *this program; if not, write to the Free Software Foundation, Inc.,
26477+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26478+ *
26479+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
26480+ *develop this driver.
26481+ *
26482+ **************************************************************************/
26483+/*
26484+ *Make calls into closed source X server code.
26485+ */
26486+
26487+#include <drm/drmP.h>
26488+#include "psb_drv.h"
26489+#include "ttm/ttm_userobj_api.h"
26490+
26491+void
26492+psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
26493+ struct psb_xhw_buf *buf)
26494+{
26495+ unsigned long irq_flags;
26496+
26497+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26498+ list_del_init(&buf->head);
26499+ if (dev_priv->xhw_cur_buf == buf)
26500+ dev_priv->xhw_cur_buf = NULL;
26501+ atomic_set(&buf->done, 1);
26502+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26503+}
26504+
26505+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
26506+ struct psb_xhw_buf *buf)
26507+{
26508+ unsigned long irq_flags;
26509+
26510+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26511+ atomic_set(&buf->done, 0);
26512+ if (unlikely(!dev_priv->xhw_submit_ok)) {
26513+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26514+ DRM_ERROR("No Xpsb 3D extension available.\n");
26515+ return -EINVAL;
26516+ }
26517+ if (!list_empty(&buf->head)) {
26518+ DRM_ERROR("Recursive list adding.\n");
26519+ goto out;
26520+ }
26521+ list_add_tail(&buf->head, &dev_priv->xhw_in);
26522+ wake_up_interruptible(&dev_priv->xhw_queue);
26523+out:
26524+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26525+ return 0;
26526+}
26527+
26528+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
26529+ struct psb_xhw_buf *buf,
26530+ uint32_t w,
26531+ uint32_t h,
26532+ uint32_t *hw_cookie,
26533+ uint32_t *bo_size,
26534+ uint32_t *clear_p_start,
26535+ uint32_t *clear_num_pages)
26536+{
26537+ struct drm_psb_xhw_arg *xa = &buf->arg;
26538+ int ret;
26539+
26540+ buf->copy_back = 1;
26541+ xa->op = PSB_XHW_SCENE_INFO;
26542+ xa->irq_op = 0;
26543+ xa->issue_irq = 0;
26544+ xa->arg.si.w = w;
26545+ xa->arg.si.h = h;
26546+
26547+ ret = psb_xhw_add(dev_priv, buf);
26548+ if (ret)
26549+ return ret;
26550+
26551+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26552+ atomic_read(&buf->done), DRM_HZ);
26553+
26554+ if (!atomic_read(&buf->done)) {
26555+ psb_xhw_clean_buf(dev_priv, buf);
26556+ return -EBUSY;
26557+ }
26558+
26559+ if (!xa->ret) {
26560+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
26561+ *bo_size = xa->arg.si.size;
26562+ *clear_p_start = xa->arg.si.clear_p_start;
26563+ *clear_num_pages = xa->arg.si.clear_num_pages;
26564+ }
26565+ return xa->ret;
26566+}
26567+
26568+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
26569+ struct psb_xhw_buf *buf, uint32_t fire_flags)
26570+{
26571+ struct drm_psb_xhw_arg *xa = &buf->arg;
26572+
26573+ buf->copy_back = 0;
26574+ xa->op = PSB_XHW_FIRE_RASTER;
26575+ xa->issue_irq = 0;
26576+ xa->arg.sb.fire_flags = 0;
26577+
26578+ return psb_xhw_add(dev_priv, buf);
26579+}
26580+
26581+int psb_xhw_vistest(struct drm_psb_private *dev_priv,
26582+ struct psb_xhw_buf *buf)
26583+{
26584+ struct drm_psb_xhw_arg *xa = &buf->arg;
26585+
26586+ buf->copy_back = 1;
26587+ xa->op = PSB_XHW_VISTEST;
26588+ /*
26589+ *Could perhaps decrease latency somewhat by
26590+ *issuing an irq in this case.
26591+ */
26592+ xa->issue_irq = 0;
26593+ xa->irq_op = PSB_UIRQ_VISTEST;
26594+ return psb_xhw_add(dev_priv, buf);
26595+}
26596+
26597+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
26598+ struct psb_xhw_buf *buf,
26599+ uint32_t fire_flags,
26600+ uint32_t hw_context,
26601+ uint32_t *cookie,
26602+ uint32_t *oom_cmds,
26603+ uint32_t num_oom_cmds,
26604+ uint32_t offset, uint32_t engine,
26605+ uint32_t flags)
26606+{
26607+ struct drm_psb_xhw_arg *xa = &buf->arg;
26608+
26609+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
26610+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
26611+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
26612+ if (unlikely(buf->copy_back))
26613+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
26614+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
26615+ else
26616+ xa->irq_op = 0;
26617+ xa->arg.sb.fire_flags = fire_flags;
26618+ xa->arg.sb.hw_context = hw_context;
26619+ xa->arg.sb.offset = offset;
26620+ xa->arg.sb.engine = engine;
26621+ xa->arg.sb.flags = flags;
26622+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
26623+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
26624+ if (num_oom_cmds)
26625+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
26626+ sizeof(uint32_t) * num_oom_cmds);
26627+ return psb_xhw_add(dev_priv, buf);
26628+}
26629+
26630+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
26631+ struct psb_xhw_buf *buf)
26632+{
26633+ struct drm_psb_xhw_arg *xa = &buf->arg;
26634+ int ret;
26635+
26636+ buf->copy_back = 1;
26637+ xa->op = PSB_XHW_RESET_DPM;
26638+ xa->issue_irq = 0;
26639+ xa->irq_op = 0;
26640+
26641+ ret = psb_xhw_add(dev_priv, buf);
26642+ if (ret)
26643+ return ret;
26644+
26645+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26646+ atomic_read(&buf->done), 3 * DRM_HZ);
26647+
26648+ if (!atomic_read(&buf->done)) {
26649+ psb_xhw_clean_buf(dev_priv, buf);
26650+ return -EBUSY;
26651+ }
26652+
26653+ return xa->ret;
26654+}
26655+
26656+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
26657+ struct psb_xhw_buf *buf, uint32_t *value)
26658+{
26659+ struct drm_psb_xhw_arg *xa = &buf->arg;
26660+ int ret;
26661+
26662+ *value = 0;
26663+
26664+ buf->copy_back = 1;
26665+ xa->op = PSB_XHW_CHECK_LOCKUP;
26666+ xa->issue_irq = 0;
26667+ xa->irq_op = 0;
26668+
26669+ ret = psb_xhw_add(dev_priv, buf);
26670+ if (ret)
26671+ return ret;
26672+
26673+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26674+ atomic_read(&buf->done), DRM_HZ * 3);
26675+
26676+ if (!atomic_read(&buf->done)) {
26677+ psb_xhw_clean_buf(dev_priv, buf);
26678+ return -EBUSY;
26679+ }
26680+
26681+ if (!xa->ret)
26682+ *value = xa->arg.cl.value;
26683+
26684+ return xa->ret;
26685+}
26686+
26687+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
26688+ struct psb_xhw_buf *buf)
26689+{
26690+ struct drm_psb_xhw_arg *xa = &buf->arg;
26691+ unsigned long irq_flags;
26692+
26693+ buf->copy_back = 0;
26694+ xa->op = PSB_XHW_TERMINATE;
26695+ xa->issue_irq = 0;
26696+
26697+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26698+ dev_priv->xhw_submit_ok = 0;
26699+ atomic_set(&buf->done, 0);
26700+ if (!list_empty(&buf->head)) {
26701+ DRM_ERROR("Recursive list adding.\n");
26702+ goto out;
26703+ }
26704+ list_add_tail(&buf->head, &dev_priv->xhw_in);
26705+out:
26706+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26707+ wake_up_interruptible(&dev_priv->xhw_queue);
26708+
26709+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26710+ atomic_read(&buf->done), DRM_HZ / 10);
26711+
26712+ if (!atomic_read(&buf->done)) {
26713+ DRM_ERROR("Xpsb terminate timeout.\n");
26714+ psb_xhw_clean_buf(dev_priv, buf);
26715+ return -EBUSY;
26716+ }
26717+
26718+ return 0;
26719+}
26720+
26721+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
26722+ struct psb_xhw_buf *buf,
26723+ uint32_t pages, uint32_t * hw_cookie,
26724+ uint32_t * size,
26725+ uint32_t * ta_min_size)
26726+{
26727+ struct drm_psb_xhw_arg *xa = &buf->arg;
26728+ int ret;
26729+
26730+ buf->copy_back = 1;
26731+ xa->op = PSB_XHW_TA_MEM_INFO;
26732+ xa->issue_irq = 0;
26733+ xa->irq_op = 0;
26734+ xa->arg.bi.pages = pages;
26735+
26736+ ret = psb_xhw_add(dev_priv, buf);
26737+ if (ret)
26738+ return ret;
26739+
26740+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26741+ atomic_read(&buf->done), DRM_HZ);
26742+
26743+ if (!atomic_read(&buf->done)) {
26744+ psb_xhw_clean_buf(dev_priv, buf);
26745+ return -EBUSY;
26746+ }
26747+
26748+ if (!xa->ret)
26749+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
26750+
26751+ *size = xa->arg.bi.size;
26752+ *ta_min_size = xa->arg.bi.ta_min_size;
26753+ return xa->ret;
26754+}
26755+
26756+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
26757+ struct psb_xhw_buf *buf,
26758+ uint32_t flags,
26759+ uint32_t param_offset,
26760+ uint32_t pt_offset, uint32_t *hw_cookie)
26761+{
26762+ struct drm_psb_xhw_arg *xa = &buf->arg;
26763+ int ret;
26764+
26765+ buf->copy_back = 1;
26766+ xa->op = PSB_XHW_TA_MEM_LOAD;
26767+ xa->issue_irq = 0;
26768+ xa->irq_op = 0;
26769+ xa->arg.bl.flags = flags;
26770+ xa->arg.bl.param_offset = param_offset;
26771+ xa->arg.bl.pt_offset = pt_offset;
26772+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
26773+
26774+ ret = psb_xhw_add(dev_priv, buf);
26775+ if (ret)
26776+ return ret;
26777+
26778+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26779+ atomic_read(&buf->done), 3 * DRM_HZ);
26780+
26781+ if (!atomic_read(&buf->done)) {
26782+ psb_xhw_clean_buf(dev_priv, buf);
26783+ return -EBUSY;
26784+ }
26785+
26786+ if (!xa->ret)
26787+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
26788+
26789+ return xa->ret;
26790+}
26791+
26792+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
26793+ struct psb_xhw_buf *buf, uint32_t *cookie)
26794+{
26795+ struct drm_psb_xhw_arg *xa = &buf->arg;
26796+
26797+ /*
26798+ *This calls the extensive closed source
26799+ *OOM handler, which resolves the condition and
26800+ *sends a reply telling the scheduler what to do
26801+ *with the task.
26802+ */
26803+
26804+ buf->copy_back = 1;
26805+ xa->op = PSB_XHW_OOM;
26806+ xa->issue_irq = 1;
26807+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
26808+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
26809+
26810+ return psb_xhw_add(dev_priv, buf);
26811+}
26812+
26813+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
26814+ struct psb_xhw_buf *buf,
26815+ uint32_t *cookie,
26816+ uint32_t *bca, uint32_t *rca, uint32_t *flags)
26817+{
26818+ struct drm_psb_xhw_arg *xa = &buf->arg;
26819+
26820+ /*
26821+ *Get info about how to schedule an OOM task.
26822+ */
26823+
26824+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
26825+ *bca = xa->arg.oom.bca;
26826+ *rca = xa->arg.oom.rca;
26827+ *flags = xa->arg.oom.flags;
26828+}
26829+
26830+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
26831+ struct psb_xhw_buf *buf, uint32_t *cookie)
26832+{
26833+ struct drm_psb_xhw_arg *xa = &buf->arg;
26834+
26835+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
26836+}
26837+
26838+int psb_xhw_resume(struct drm_psb_private *dev_priv,
26839+ struct psb_xhw_buf *buf)
26840+{
26841+ struct drm_psb_xhw_arg *xa = &buf->arg;
26842+
26843+ buf->copy_back = 0;
26844+ xa->op = PSB_XHW_RESUME;
26845+ xa->issue_irq = 0;
26846+ xa->irq_op = 0;
26847+ return psb_xhw_add(dev_priv, buf);
26848+}
26849+
26850+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
26851+{
26852+}
26853+
26854+int psb_xhw_init(struct drm_device *dev)
26855+{
26856+ struct drm_psb_private *dev_priv =
26857+ (struct drm_psb_private *) dev->dev_private;
26858+ unsigned long irq_flags;
26859+
26860+ INIT_LIST_HEAD(&dev_priv->xhw_in);
26861+ spin_lock_init(&dev_priv->xhw_lock);
26862+ atomic_set(&dev_priv->xhw_client, 0);
26863+ init_waitqueue_head(&dev_priv->xhw_queue);
26864+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
26865+ mutex_init(&dev_priv->xhw_mutex);
26866+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26867+ dev_priv->xhw_on = 0;
26868+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26869+
26870+ return 0;
26871+}
26872+
26873+static int psb_xhw_init_init(struct drm_device *dev,
26874+ struct drm_file *file_priv,
26875+ struct drm_psb_xhw_init_arg *arg)
26876+{
26877+ struct drm_psb_private *dev_priv =
26878+ (struct drm_psb_private *) dev->dev_private;
26879+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
26880+ int ret;
26881+ bool is_iomem;
26882+
26883+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
26884+ unsigned long irq_flags;
26885+
26886+ dev_priv->xhw_bo =
26887+ ttm_buffer_object_lookup(tfile, arg->buffer_handle);
26888+ if (!dev_priv->xhw_bo) {
26889+ ret = -EINVAL;
26890+ goto out_err;
26891+ }
26892+ ret = ttm_bo_kmap(dev_priv->xhw_bo, 0,
26893+ dev_priv->xhw_bo->num_pages,
26894+ &dev_priv->xhw_kmap);
26895+ if (ret) {
26896+ DRM_ERROR("Failed mapping X server "
26897+ "communications buffer.\n");
26898+ goto out_err0;
26899+ }
26900+ dev_priv->xhw =
26901+ ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem);
26902+ if (is_iomem) {
26903+ DRM_ERROR("X server communications buffer"
26904+ "is in device memory.\n");
26905+ ret = -EINVAL;
26906+ goto out_err1;
26907+ }
26908+ dev_priv->xhw_file = file_priv;
26909+
26910+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26911+ dev_priv->xhw_on = 1;
26912+ dev_priv->xhw_submit_ok = 1;
26913+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26914+ return 0;
26915+ } else {
26916+ DRM_ERROR("Xhw is already initialized.\n");
26917+ return -EBUSY;
26918+ }
26919+out_err1:
26920+ dev_priv->xhw = NULL;
26921+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
26922+out_err0:
26923+ ttm_bo_unref(&dev_priv->xhw_bo);
26924+out_err:
26925+ atomic_dec(&dev_priv->xhw_client);
26926+ return ret;
26927+}
26928+
26929+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
26930+{
26931+ struct psb_xhw_buf *cur_buf, *next;
26932+ unsigned long irq_flags;
26933+
26934+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26935+ dev_priv->xhw_submit_ok = 0;
26936+
26937+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
26938+ list_del_init(&cur_buf->head);
26939+ if (cur_buf->copy_back)
26940+ cur_buf->arg.ret = -EINVAL;
26941+ atomic_set(&cur_buf->done, 1);
26942+ }
26943+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26944+ wake_up(&dev_priv->xhw_caller_queue);
26945+}
26946+
26947+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
26948+ struct drm_file *file_priv, int closing)
26949+{
26950+
26951+ if (dev_priv->xhw_file == file_priv &&
26952+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
26953+
26954+ if (closing)
26955+ psb_xhw_queue_empty(dev_priv);
26956+ else {
26957+ struct psb_xhw_buf buf;
26958+ INIT_LIST_HEAD(&buf.head);
26959+
26960+ psb_xhw_terminate(dev_priv, &buf);
26961+ psb_xhw_queue_empty(dev_priv);
26962+ }
26963+
26964+ dev_priv->xhw = NULL;
26965+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
26966+ ttm_bo_unref(&dev_priv->xhw_bo);
26967+ dev_priv->xhw_file = NULL;
26968+ }
26969+}
26970+
26971+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
26972+ struct drm_file *file_priv)
26973+{
26974+ struct drm_psb_xhw_init_arg *arg =
26975+ (struct drm_psb_xhw_init_arg *) data;
26976+ struct drm_psb_private *dev_priv =
26977+ (struct drm_psb_private *) dev->dev_private;
26978+ int ret = 0;
26979+ down_read(&dev_priv->sgx_sem);
26980+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26981+ switch (arg->operation) {
26982+ case PSB_XHW_INIT:
26983+ ret = psb_xhw_init_init(dev, file_priv, arg);
26984+ break;
26985+ case PSB_XHW_TAKEDOWN:
26986+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
26987+ break;
26988+ }
26989+ up_read(&dev_priv->sgx_sem);
26990+ return ret;
26991+}
26992+
26993+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
26994+{
26995+ int empty;
26996+ unsigned long irq_flags;
26997+
26998+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26999+ empty = list_empty(&dev_priv->xhw_in);
27000+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27001+ return empty;
27002+}
27003+
27004+int psb_xhw_handler(struct drm_psb_private *dev_priv)
27005+{
27006+ unsigned long irq_flags;
27007+ struct drm_psb_xhw_arg *xa;
27008+ struct psb_xhw_buf *buf;
27009+
27010+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
27011+
27012+ if (!dev_priv->xhw_on) {
27013+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27014+ return -EINVAL;
27015+ }
27016+
27017+ buf = dev_priv->xhw_cur_buf;
27018+ if (buf && buf->copy_back) {
27019+ xa = &buf->arg;
27020+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
27021+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
27022+ atomic_set(&buf->done, 1);
27023+ wake_up(&dev_priv->xhw_caller_queue);
27024+ } else
27025+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
27026+
27027+ dev_priv->xhw_cur_buf = 0;
27028+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27029+ return 0;
27030+}
27031+
27032+int psb_xhw_ioctl(struct drm_device *dev, void *data,
27033+ struct drm_file *file_priv)
27034+{
27035+ struct drm_psb_private *dev_priv =
27036+ (struct drm_psb_private *) dev->dev_private;
27037+ unsigned long irq_flags;
27038+ struct drm_psb_xhw_arg *xa;
27039+ int ret;
27040+ struct list_head *list;
27041+ struct psb_xhw_buf *buf;
27042+
27043+ if (!dev_priv)
27044+ return -EINVAL;
27045+
27046+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
27047+ return -ERESTART;
27048+
27049+ if (psb_forced_user_interrupt(dev_priv)) {
27050+ mutex_unlock(&dev_priv->xhw_mutex);
27051+ return -EINVAL;
27052+ }
27053+
27054+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
27055+ while (list_empty(&dev_priv->xhw_in)) {
27056+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27057+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
27058+ !psb_xhw_in_empty
27059+ (dev_priv), DRM_HZ);
27060+ if (ret == -ERESTARTSYS || ret == 0) {
27061+ mutex_unlock(&dev_priv->xhw_mutex);
27062+ return -ERESTART;
27063+ }
27064+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
27065+ }
27066+
27067+ list = dev_priv->xhw_in.next;
27068+ list_del_init(list);
27069+
27070+ buf = list_entry(list, struct psb_xhw_buf, head);
27071+ xa = &buf->arg;
27072+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
27073+
27074+ if (unlikely(buf->copy_back))
27075+ dev_priv->xhw_cur_buf = buf;
27076+ else {
27077+ atomic_set(&buf->done, 1);
27078+ dev_priv->xhw_cur_buf = NULL;
27079+ }
27080+
27081+ if (xa->op == PSB_XHW_TERMINATE) {
27082+ dev_priv->xhw_on = 0;
27083+ wake_up(&dev_priv->xhw_caller_queue);
27084+ }
27085+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27086+
27087+ mutex_unlock(&dev_priv->xhw_mutex);
27088+
27089+ return 0;
27090+}
27091diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
27092--- a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 1969-12-31 16:00:00.000000000 -0800
27093+++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 2009-04-07 13:28:38.000000000 -0700
27094@@ -0,0 +1,149 @@
27095+/**************************************************************************
27096+ *
27097+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
27098+ * All Rights Reserved.
27099+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
27100+ * All Rights Reserved.
27101+ *
27102+ * Permission is hereby granted, free of charge, to any person obtaining a
27103+ * copy of this software and associated documentation files (the
27104+ * "Software"), to deal in the Software without restriction, including
27105+ * without limitation the rights to use, copy, modify, merge, publish,
27106+ * distribute, sub license, and/or sell copies of the Software, and to
27107+ * permit persons to whom the Software is furnished to do so, subject to
27108+ * the following conditions:
27109+ *
27110+ * The above copyright notice and this permission notice (including the
27111+ * next paragraph) shall be included in all copies or substantial portions
27112+ * of the Software.
27113+ *
27114+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27115+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27116+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27117+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27118+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27119+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27120+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
27121+ *
27122+ **************************************************************************/
27123+/*
27124+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27125+ * Keith Packard.
27126+ */
27127+
27128+#include "ttm/ttm_bo_driver.h"
27129+#ifdef TTM_HAS_AGP
27130+#include "ttm/ttm_placement_common.h"
27131+#include <linux/agp_backend.h>
27132+#include <asm/agp.h>
27133+#include <asm/io.h>
27134+
27135+struct ttm_agp_backend {
27136+ struct ttm_backend backend;
27137+ struct agp_memory *mem;
27138+ struct agp_bridge_data *bridge;
27139+};
27140+
27141+static int ttm_agp_populate(struct ttm_backend *backend,
27142+ unsigned long num_pages, struct page **pages,
27143+ struct page *dummy_read_page)
27144+{
27145+ struct ttm_agp_backend *agp_be =
27146+ container_of(backend, struct ttm_agp_backend, backend);
27147+ struct page **cur_page, **last_page = pages + num_pages;
27148+ struct agp_memory *mem;
27149+
27150+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
27151+ if (unlikely(mem == NULL))
27152+ return -ENOMEM;
27153+
27154+ mem->page_count = 0;
27155+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
27156+ struct page *page = *cur_page;
27157+ if (!page) {
27158+ page = dummy_read_page;
27159+ }
27160+ mem->memory[mem->page_count++] =
27161+ phys_to_gart(page_to_phys(page));
27162+ }
27163+ agp_be->mem = mem;
27164+ return 0;
27165+}
27166+
27167+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
27168+{
27169+ struct ttm_agp_backend *agp_be =
27170+ container_of(backend, struct ttm_agp_backend, backend);
27171+ struct agp_memory *mem = agp_be->mem;
27172+ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
27173+ int ret;
27174+
27175+ mem->is_flushed = 1;
27176+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
27177+
27178+ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
27179+ if (ret)
27180+ printk(KERN_ERR "AGP Bind memory failed.\n");
27181+
27182+ return ret;
27183+}
27184+
27185+static int ttm_agp_unbind(struct ttm_backend *backend)
27186+{
27187+ struct ttm_agp_backend *agp_be =
27188+ container_of(backend, struct ttm_agp_backend, backend);
27189+
27190+ if (agp_be->mem->is_bound)
27191+ return agp_unbind_memory(agp_be->mem);
27192+ else
27193+ return 0;
27194+}
27195+
27196+static void ttm_agp_clear(struct ttm_backend *backend)
27197+{
27198+ struct ttm_agp_backend *agp_be =
27199+ container_of(backend, struct ttm_agp_backend, backend);
27200+ struct agp_memory *mem = agp_be->mem;
27201+
27202+ if (mem) {
27203+ ttm_agp_unbind(backend);
27204+ agp_free_memory(mem);
27205+ }
27206+ agp_be->mem = NULL;
27207+}
27208+
27209+static void ttm_agp_destroy(struct ttm_backend *backend)
27210+{
27211+ struct ttm_agp_backend *agp_be =
27212+ container_of(backend, struct ttm_agp_backend, backend);
27213+
27214+ if (agp_be->mem)
27215+ ttm_agp_clear(backend);
27216+ kfree(agp_be);
27217+}
27218+
27219+static struct ttm_backend_func ttm_agp_func = {
27220+ .populate = ttm_agp_populate,
27221+ .clear = ttm_agp_clear,
27222+ .bind = ttm_agp_bind,
27223+ .unbind = ttm_agp_unbind,
27224+ .destroy = ttm_agp_destroy,
27225+};
27226+
27227+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
27228+ struct agp_bridge_data *bridge)
27229+{
27230+ struct ttm_agp_backend *agp_be;
27231+
27232+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
27233+ if (!agp_be)
27234+ return NULL;
27235+
27236+ agp_be->mem = NULL;
27237+ agp_be->bridge = bridge;
27238+ agp_be->backend.func = &ttm_agp_func;
27239+ agp_be->backend.bdev = bdev;
27240+ return &agp_be->backend;
27241+}
27242+
27243+#endif
27244diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
27245--- a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 1969-12-31 16:00:00.000000000 -0800
27246+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 2009-04-07 13:28:38.000000000 -0700
27247@@ -0,0 +1,578 @@
27248+/**************************************************************************
27249+ *
27250+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
27251+ * All Rights Reserved.
27252+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
27253+ * All Rights Reserved.
27254+ *
27255+ * Permission is hereby granted, free of charge, to any person obtaining a
27256+ * copy of this software and associated documentation files (the
27257+ * "Software"), to deal in the Software without restriction, including
27258+ * without limitation the rights to use, copy, modify, merge, publish,
27259+ * distribute, sub license, and/or sell copies of the Software, and to
27260+ * permit persons to whom the Software is furnished to do so, subject to
27261+ * the following conditions:
27262+ *
27263+ * The above copyright notice and this permission notice (including the
27264+ * next paragraph) shall be included in all copies or substantial portions
27265+ * of the Software.
27266+ *
27267+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27268+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27269+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27270+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27271+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27272+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27273+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
27274+ *
27275+ **************************************************************************/
27276+/*
27277+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27278+ */
27279+
27280+#ifndef _TTM_BO_API_H_
27281+#define _TTM_BO_API_H_
27282+
27283+#include <drm/drm_hashtab.h>
27284+#include <linux/kref.h>
27285+#include <linux/list.h>
27286+#include <linux/wait.h>
27287+#include <linux/mutex.h>
27288+#include <linux/mm.h>
27289+#include <linux/rbtree.h>
27290+
27291+struct ttm_bo_device;
27292+
27293+struct drm_mm_node;
27294+
27295+/**
27296+ * struct ttm_mem_reg
27297+ *
27298+ * @mm_node: Memory manager node.
27299+ * @size: Requested size of memory region.
27300+ * @num_pages: Actual size of memory region in pages.
27301+ * @page_alignment: Page alignment.
27302+ * @flags: Placement flags.
27303+ * @proposed_flags: Proposed placement flags.
27304+ *
27305+ * Structure indicating the placement and space resources used by a
27306+ * buffer object.
27307+ */
27308+
27309+struct ttm_mem_reg {
27310+ struct drm_mm_node *mm_node;
27311+ unsigned long size;
27312+ unsigned long num_pages;
27313+ uint32_t page_alignment;
27314+ uint32_t mem_type;
27315+ uint32_t flags;
27316+ uint32_t proposed_flags;
27317+};
27318+
27319+/**
27320+ * enum ttm_bo_type
27321+ *
27322+ * @ttm_bo_type_device: These are 'normal' buffers that can
27323+ * be mmapped by user space. Each of these bos occupy a slot in the
27324+ * device address space, that can be used for normal vm operations.
27325+ *
27326+ * @ttm_bo_type_user: These are user-space memory areas that are made
27327+ * available to the GPU by mapping the buffer pages into the GPU aperture
27328+ * space. These buffers cannot be mmaped from the device address space.
27329+ *
27330+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
27331+ * but they cannot be accessed from user-space. For kernel-only use.
27332+ */
27333+
27334+enum ttm_bo_type {
27335+ ttm_bo_type_device,
27336+ ttm_bo_type_user,
27337+ ttm_bo_type_kernel
27338+};
27339+
27340+struct ttm_tt;
27341+
27342+/**
27343+ * struct ttm_buffer_object
27344+ *
27345+ * @bdev: Pointer to the buffer object device structure.
27346+ * @kref: Reference count of this buffer object. When this refcount reaches
27347+ * zero, the object is put on the delayed delete list.
27348+ * @list_kref: List reference count of this buffer object. This member is
27349+ * used to avoid destruction while the buffer object is still on a list.
27350+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
27351+ * keeps one refcount. When this refcount reaches zero,
27352+ * the object is destroyed.
27353+ * @proposed_flags: Proposed placement for the buffer. Changed only by the
27354+ * creator prior to validation as opposed to bo->mem.proposed_flags which is
27355+ * changed by the implementation prior to a buffer move if it wants to outsmart
27356+ * the buffer creator / user. This latter happens, for example, at eviction.
27357+ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
27358+ * buffers.
27359+ * @type: The bo type.
27360+ * @offset: The current GPU offset, which can have different meanings
27361+ * depending on the memory type. For SYSTEM type memory, it should be 0.
27362+ * @mem: structure describing current placement.
27363+ * @val_seq: Sequence of the validation holding the @reserved lock.
27364+ * Used to avoid starvation when many processes compete to validate the
27365+ * buffer. This member is protected by the bo_device::lru_lock.
27366+ * @seq_valid: The value of @val_seq is valid. This value is protected by
27367+ * the bo_device::lru_lock.
27368+ * @lru: List head for the lru list.
27369+ * @ddestroy: List head for the delayed destroy list.
27370+ * @swap: List head for swap LRU list.
27371+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
27372+ * pinned in physical memory. If this behaviour is not desired, this member
27373+ * holds a pointer to a persistant shmem object.
27374+ * @destroy: Destruction function. If NULL, kfree is used.
27375+ * @sync_obj_arg: Opaque argument to synchronization object function.
27376+ * @sync_obj: Pointer to a synchronization object.
27377+ * @priv_flags: Flags describing buffer object internal state.
27378+ * @event_queue: Queue for processes waiting on buffer object status change.
27379+ * @mutex: Lock protecting all members with the exception of constant members
27380+ * and list heads. We should really use a spinlock here.
27381+ * @num_pages: Actual number of pages.
27382+ * @ttm: TTM structure holding system pages.
27383+ * @vm_hash: Hash item for fast address space lookup. Need to change to a
27384+ * rb-tree node.
27385+ * @vm_node: Address space manager node.
27386+ * @addr_space_offset: Address space offset.
27387+ * @cpu_writes: For synchronization. Number of cpu writers.
27388+ * @reserved: Deadlock-free lock used for synchronization state transitions.
27389+ * @acc_size: Accounted size for this object.
27390+ *
27391+ * Base class for TTM buffer object, that deals with data placement and CPU
27392+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
27393+ * the driver can usually use the placement offset @offset directly as the
27394+ * GPU virtual address. For drivers implementing multiple
27395+ * GPU memory manager contexts, the driver should manage the address space
27396+ * in these contexts separately and use these objects to get the correct
27397+ * placement and caching for these GPU maps. This makes it possible to use
27398+ * these objects for even quite elaborate memory management schemes.
27399+ * The destroy member, the API visibility of this object makes it possible
27400+ * to derive driver specific types.
27401+ */
27402+
27403+struct ttm_buffer_object {
27404+ struct ttm_bo_device *bdev;
27405+ struct kref kref;
27406+ struct kref list_kref;
27407+
27408+ /*
27409+ * If there is a possibility that the usage variable is zero,
27410+ * then dev->struct_mutex should be locked before incrementing it.
27411+ */
27412+
27413+ uint32_t proposed_flags;
27414+ unsigned long buffer_start;
27415+ enum ttm_bo_type type;
27416+ unsigned long offset;
27417+ struct ttm_mem_reg mem;
27418+ uint32_t val_seq;
27419+ bool seq_valid;
27420+
27421+ struct list_head lru;
27422+ struct list_head ddestroy;
27423+ struct list_head swap;
27424+
27425+ struct file *persistant_swap_storage;
27426+
27427+ void (*destroy) (struct ttm_buffer_object *);
27428+
27429+ void *sync_obj_arg;
27430+ void *sync_obj;
27431+
27432+ uint32_t priv_flags;
27433+ wait_queue_head_t event_queue;
27434+ struct mutex mutex;
27435+ unsigned long num_pages;
27436+
27437+ struct ttm_tt *ttm;
27438+ struct rb_node vm_rb;
27439+ struct drm_mm_node *vm_node;
27440+ uint64_t addr_space_offset;
27441+
27442+ atomic_t cpu_writers;
27443+ atomic_t reserved;
27444+
27445+ size_t acc_size;
27446+};
27447+
27448+/**
27449+ * struct ttm_bo_kmap_obj
27450+ *
27451+ * @virtual: The current kernel virtual address.
27452+ * @page: The page when kmap'ing a single page.
27453+ * @bo_kmap_type: Type of bo_kmap.
27454+ *
27455+ * Object describing a kernel mapping. Since a TTM bo may be located
27456+ * in various memory types with various caching policies, the
27457+ * mapping can either be an ioremap, a vmap, a kmap or part of a
27458+ * premapped region.
27459+ */
27460+
27461+struct ttm_bo_kmap_obj {
27462+ void *virtual;
27463+ struct page *page;
27464+ enum {
27465+ ttm_bo_map_iomap,
27466+ ttm_bo_map_vmap,
27467+ ttm_bo_map_kmap,
27468+ ttm_bo_map_premapped,
27469+ } bo_kmap_type;
27470+};
27471+
27472+/**
27473+ * ttm_bo_reference - reference a struct ttm_buffer_object
27474+ *
27475+ * @bo: The buffer object.
27476+ *
27477+ * Returns a refcounted pointer to a buffer object.
27478+ */
27479+
27480+static inline struct ttm_buffer_object *ttm_bo_reference(struct
27481+ ttm_buffer_object *bo)
27482+{
27483+ kref_get(&bo->kref);
27484+ return bo;
27485+}
27486+
27487+/**
27488+ * ttm_bo_wait - wait for buffer idle.
27489+ *
27490+ * @bo: The buffer object.
27491+ * @interruptible: Use interruptible wait.
27492+ * @no_wait: Return immediately if buffer is busy.
27493+ *
27494+ * This function must be called with the bo::mutex held, and makes
27495+ * sure any previous rendering to the buffer is completed.
27496+ * Note: It might be necessary to block validations before the
27497+ * wait by reserving the buffer.
27498+ * Returns -EBUSY if no_wait is true and the buffer is busy.
27499+ * Returns -ERESTART if interrupted by a signal.
27500+ */
27501+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
27502+ bool interruptible, bool no_wait);
27503+/**
27504+ * ttm_buffer_object_validate
27505+ *
27506+ * @bo: The buffer object.
27507+ * @interruptible: Sleep interruptible if sleeping.
27508+ * @no_wait: Return immediately if the buffer is busy.
27509+ *
27510+ * Changes placement and caching policy of the buffer object
27511+ * according to bo::proposed_flags.
27512+ * Returns
27513+ * -EINVAL on invalid proposed_flags.
27514+ * -ENOMEM on out-of-memory condition.
27515+ * -EBUSY if no_wait is true and buffer busy.
27516+ * -ERESTART if interrupted by a signal.
27517+ */
27518+extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
27519+ bool interruptible, bool no_wait);
27520+/**
27521+ * ttm_bo_unref
27522+ *
27523+ * @bo: The buffer object.
27524+ *
27525+ * Unreference and clear a pointer to a buffer object.
27526+ */
27527+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
27528+
27529+/**
27530+ * ttm_bo_synccpu_write_grab
27531+ *
27532+ * @bo: The buffer object:
27533+ * @no_wait: Return immediately if buffer is busy.
27534+ *
27535+ * Synchronizes a buffer object for CPU RW access. This means
27536+ * blocking command submission that affects the buffer and
27537+ * waiting for buffer idle. This lock is recursive.
27538+ * Returns
27539+ * -EBUSY if the buffer is busy and no_wait is true.
27540+ * -ERESTART if interrupted by a signal.
27541+ */
27542+
27543+extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
27544+/**
27545+ * ttm_bo_synccpu_write_release:
27546+ *
27547+ * @bo : The buffer object.
27548+ *
27549+ * Releases a synccpu lock.
27550+ */
27551+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
27552+
27553+/**
27554+ * ttm_buffer_object_init
27555+ *
27556+ * @bdev: Pointer to a ttm_bo_device struct.
27557+ * @bo: Pointer to a ttm_buffer_object to be initialized.
27558+ * @size: Requested size of buffer object.
27559+ * @type: Requested type of buffer object.
27560+ * @flags: Initial placement flags.
27561+ * @page_alignment: Data alignment in pages.
27562+ * @buffer_start: Virtual address of user space data backing a
27563+ * user buffer object.
27564+ * @interruptible: If needing to sleep to wait for GPU resources,
27565+ * sleep interruptible.
27566+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
27567+ * pinned in physical memory. If this behaviour is not desired, this member
27568+ * holds a pointer to a persistant shmem object. Typically, this would
27569+ * point to the shmem object backing a GEM object if TTM is used to back a
27570+ * GEM user interface.
27571+ * @acc_size: Accounted size for this object.
27572+ * @destroy: Destroy function. Use NULL for kfree().
27573+ *
27574+ * This function initializes a pre-allocated struct ttm_buffer_object.
27575+ * As this object may be part of a larger structure, this function,
27576+ * together with the @destroy function,
27577+ * enables driver-specific objects derived from a ttm_buffer_object.
27578+ * On successful return, the object kref and list_kref are set to 1.
27579+ * Returns
27580+ * -ENOMEM: Out of memory.
27581+ * -EINVAL: Invalid placement flags.
27582+ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
27583+ */
27584+
27585+extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
27586+ struct ttm_buffer_object *bo,
27587+ unsigned long size,
27588+ enum ttm_bo_type type,
27589+ uint32_t flags,
27590+ uint32_t page_alignment,
27591+ unsigned long buffer_start,
27592+ bool interrubtible,
27593+ struct file *persistant_swap_storage,
27594+ size_t acc_size,
27595+ void (*destroy) (struct ttm_buffer_object *));
27596+/**
27597+ * ttm_bo_synccpu_object_init
27598+ *
27599+ * @bdev: Pointer to a ttm_bo_device struct.
27600+ * @bo: Pointer to a ttm_buffer_object to be initialized.
27601+ * @size: Requested size of buffer object.
27602+ * @type: Requested type of buffer object.
27603+ * @flags: Initial placement flags.
27604+ * @page_alignment: Data alignment in pages.
27605+ * @buffer_start: Virtual address of user space data backing a
27606+ * user buffer object.
27607+ * @interruptible: If needing to sleep while waiting for GPU resources,
27608+ * sleep interruptible.
27609+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
27610+ * pinned in physical memory. If this behaviour is not desired, this member
27611+ * holds a pointer to a persistant shmem object. Typically, this would
27612+ * point to the shmem object backing a GEM object if TTM is used to back a
27613+ * GEM user interface.
27614+ * @p_bo: On successful completion *p_bo points to the created object.
27615+ *
27616+ * This function allocates a ttm_buffer_object, and then calls
27617+ * ttm_buffer_object_init on that object.
27618+ * The destroy function is set to kfree().
27619+ * Returns
27620+ * -ENOMEM: Out of memory.
27621+ * -EINVAL: Invalid placement flags.
27622+ * -ERESTART: Interrupted by signal while waiting for resources.
27623+ */
27624+
27625+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
27626+ unsigned long size,
27627+ enum ttm_bo_type type,
27628+ uint32_t flags,
27629+ uint32_t page_alignment,
27630+ unsigned long buffer_start,
27631+ bool interruptible,
27632+ struct file *persistant_swap_storage,
27633+ struct ttm_buffer_object **p_bo);
27634+
27635+/**
27636+ * ttm_bo_check_placement
27637+ *
27638+ * @bo: the buffer object.
27639+ * @set_flags: placement flags to set.
27640+ * @clr_flags: placement flags to clear.
27641+ *
27642+ * Performs minimal validity checking on an intended change of
27643+ * placement flags.
27644+ * Returns
27645+ * -EINVAL: Intended change is invalid or not allowed.
27646+ */
27647+
27648+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
27649+ uint32_t set_flags, uint32_t clr_flags);
27650+
27651+/**
27652+ * ttm_bo_init_mm
27653+ *
27654+ * @bdev: Pointer to a ttm_bo_device struct.
27655+ * @mem_type: The memory type.
27656+ * @p_offset: offset for managed area in pages.
27657+ * @p_size: size managed area in pages.
27658+ *
27659+ * Initialize a manager for a given memory type.
27660+ * Note: if part of driver firstopen, it must be protected from a
27661+ * potentially racing lastclose.
27662+ * Returns:
27663+ * -EINVAL: invalid size or memory type.
27664+ * -ENOMEM: Not enough memory.
27665+ * May also return driver-specified errors.
27666+ */
27667+
27668+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
27669+ unsigned long p_offset, unsigned long p_size);
27670+/**
27671+ * ttm_bo_clean_mm
27672+ *
27673+ * @bdev: Pointer to a ttm_bo_device struct.
27674+ * @mem_type: The memory type.
27675+ *
27676+ * Take down a manager for a given memory type after first walking
27677+ * the LRU list to evict any buffers left alive.
27678+ *
27679+ * Normally, this function is part of lastclose() or unload(), and at that
27680+ * point there shouldn't be any buffers left created by user-space, since
27681+ * there should've been removed by the file descriptor release() method.
27682+ * However, before this function is run, make sure to signal all sync objects,
27683+ * and verify that the delayed delete queue is empty. The driver must also
27684+ * make sure that there are no NO_EVICT buffers present in this memory type
27685+ * when the call is made.
27686+ *
27687+ * If this function is part of a VT switch, the caller must make sure that
27688+ * there are no appications currently validating buffers before this
27689+ * function is called. The caller can do that by first taking the
27690+ * struct ttm_bo_device::ttm_lock in write mode.
27691+ *
27692+ * Returns:
27693+ * -EINVAL: invalid or uninitialized memory type.
27694+ * -EBUSY: There are still buffers left in this memory type.
27695+ */
27696+
27697+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
27698+
27699+/**
27700+ * ttm_bo_evict_mm
27701+ *
27702+ * @bdev: Pointer to a ttm_bo_device struct.
27703+ * @mem_type: The memory type.
27704+ *
27705+ * Evicts all buffers on the lru list of the memory type.
27706+ * This is normally part of a VT switch or an
27707+ * out-of-memory-space-due-to-fragmentation handler.
27708+ * The caller must make sure that there are no other processes
27709+ * currently validating buffers, and can do that by taking the
27710+ * struct ttm_bo_device::ttm_lock in write mode.
27711+ *
27712+ * Returns:
27713+ * -EINVAL: Invalid or uninitialized memory type.
27714+ * -ERESTART: The call was interrupted by a signal while waiting to
27715+ * evict a buffer.
27716+ */
27717+
27718+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
27719+
27720+/**
27721+ * ttm_kmap_obj_virtual
27722+ *
27723+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
27724+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
27725+ * virtual map is io memory, 0 if normal memory.
27726+ *
27727+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
27728+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
27729+ * that should strictly be accessed by the iowriteXX() and similar functions.
27730+ */
27731+
27732+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
27733+ bool *is_iomem)
27734+{
27735+ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
27736+ map->bo_kmap_type == ttm_bo_map_premapped);
27737+ return map->virtual;
27738+}
27739+
27740+/**
27741+ * ttm_bo_kmap
27742+ *
27743+ * @bo: The buffer object.
27744+ * @start_page: The first page to map.
27745+ * @num_pages: Number of pages to map.
27746+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
27747+ *
27748+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
27749+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
27750+ * used to obtain a virtual address to the data.
27751+ *
27752+ * Returns
27753+ * -ENOMEM: Out of memory.
27754+ * -EINVAL: Invalid range.
27755+ */
27756+
27757+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
27758+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
27759+
27760+/**
27761+ * ttm_bo_kunmap
27762+ *
27763+ * @map: Object describing the map to unmap.
27764+ *
27765+ * Unmaps a kernel map set up by ttm_bo_kmap.
27766+ */
27767+
27768+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
27769+
27770+#if 0
27771+#endif
27772+
27773+/**
27774+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
27775+ *
27776+ * @vma: vma as input from the fbdev mmap method.
27777+ * @bo: The bo backing the address space. The address space will
27778+ * have the same size as the bo, and start at offset 0.
27779+ *
27780+ * This function is intended to be called by the fbdev mmap method
27781+ * if the fbdev address space is to be backed by a bo.
27782+ */
27783+
27784+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
27785+ struct ttm_buffer_object *bo);
27786+
27787+/**
27788+ * ttm_bo_mmap - mmap out of the ttm device address space.
27789+ *
27790+ * @filp: filp as input from the mmap method.
27791+ * @vma: vma as input from the mmap method.
27792+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
27793+ *
27794+ * This function is intended to be called by the device mmap method.
27795+ * if the device address space is to be backed by the bo manager.
27796+ */
27797+
27798+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
27799+ struct ttm_bo_device *bdev);
27800+
27801+/**
27802+ * ttm_bo_io
27803+ *
27804+ * @bdev: Pointer to the struct ttm_bo_device.
27805+ * @filp: Pointer to the struct file attempting to read / write.
27806+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
27807+ * @rbuf: User-space pointer to address of buffer to read into. Null on write.
27808+ * @count: Number of bytes to read / write.
27809+ * @f_pos: Pointer to current file position.
27810+ * @write: 1 for read, 0 for write.
27811+ *
27812+ * This function implements read / write into ttm buffer objects, and is intended to
27813+ * be called from the fops::read and fops::write method.
27814+ * Returns:
27815+ * See man (2) write, man(2) read. In particular, the function may return -EINTR if
27816+ * interrupted by a signal.
27817+ */
27818+
27819+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
27820+ const char __user * wbuf, char __user * rbuf,
27821+ size_t count, loff_t * f_pos, bool write);
27822+
27823+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
27824+
27825+#endif
27826diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c
27827--- a/drivers/gpu/drm/psb/ttm/ttm_bo.c 1969-12-31 16:00:00.000000000 -0800
27828+++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c 2009-04-07 13:28:38.000000000 -0700
27829@@ -0,0 +1,1716 @@
27830+/**************************************************************************
27831+ *
27832+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
27833+ * All Rights Reserved.
27834+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
27835+ * All Rights Reserved.
27836+ *
27837+ * Permission is hereby granted, free of charge, to any person obtaining a
27838+ * copy of this software and associated documentation files (the
27839+ * "Software"), to deal in the Software without restriction, including
27840+ * without limitation the rights to use, copy, modify, merge, publish,
27841+ * distribute, sub license, and/or sell copies of the Software, and to
27842+ * permit persons to whom the Software is furnished to do so, subject to
27843+ * the following conditions:
27844+ *
27845+ * The above copyright notice and this permission notice (including the
27846+ * next paragraph) shall be included in all copies or substantial portions
27847+ * of the Software.
27848+ *
27849+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27850+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27851+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27852+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27853+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27854+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27855+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
27856+ *
27857+ **************************************************************************/
27858+/*
27859+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
27860+ */
27861+
27862+#include "ttm/ttm_bo_driver.h"
27863+#include "ttm/ttm_placement_common.h"
27864+#include <linux/jiffies.h>
27865+#include <linux/slab.h>
27866+#include <linux/sched.h>
27867+#include <linux/mm.h>
27868+#include <linux/file.h>
27869+
27870+#define TTM_ASSERT_LOCKED(param)
27871+#define TTM_DEBUG(fmt, arg...)
27872+#define TTM_BO_HASH_ORDER 13
27873+
27874+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
27875+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
27876+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
27877+
27878+static inline uint32_t ttm_bo_type_flags(unsigned type)
27879+{
27880+ return (1 << (type));
27881+}
27882+
27883+static void ttm_bo_release_list(struct kref *list_kref)
27884+{
27885+ struct ttm_buffer_object *bo =
27886+ container_of(list_kref, struct ttm_buffer_object, list_kref);
27887+ struct ttm_bo_device *bdev = bo->bdev;
27888+
27889+ BUG_ON(atomic_read(&bo->list_kref.refcount));
27890+ BUG_ON(atomic_read(&bo->kref.refcount));
27891+ BUG_ON(atomic_read(&bo->cpu_writers));
27892+ BUG_ON(bo->sync_obj != NULL);
27893+ BUG_ON(bo->mem.mm_node != NULL);
27894+ BUG_ON(!list_empty(&bo->lru));
27895+ BUG_ON(!list_empty(&bo->ddestroy));
27896+
27897+ if (bo->ttm)
27898+ ttm_tt_destroy(bo->ttm);
27899+ if (bo->destroy)
27900+ bo->destroy(bo);
27901+ else {
27902+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
27903+ kfree(bo);
27904+ }
27905+}
27906+
27907+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
27908+{
27909+
27910+ if (interruptible) {
27911+ int ret = 0;
27912+
27913+ ret = wait_event_interruptible(bo->event_queue,
27914+ atomic_read(&bo->reserved) == 0);
27915+ if (unlikely(ret != 0))
27916+ return -ERESTART;
27917+ } else {
27918+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
27919+ }
27920+ return 0;
27921+}
27922+
27923+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
27924+{
27925+ struct ttm_bo_device *bdev = bo->bdev;
27926+ struct ttm_mem_type_manager *man;
27927+
27928+ BUG_ON(!atomic_read(&bo->reserved));
27929+
27930+ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
27931+
27932+ BUG_ON(!list_empty(&bo->lru));
27933+
27934+ man = &bdev->man[bo->mem.mem_type];
27935+ list_add_tail(&bo->lru, &man->lru);
27936+ kref_get(&bo->list_kref);
27937+
27938+ if (bo->ttm != NULL) {
27939+ list_add_tail(&bo->swap, &bdev->swap_lru);
27940+ kref_get(&bo->list_kref);
27941+ }
27942+ }
27943+}
27944+
27945+/*
27946+ * Call with bdev->lru_lock and bdev->global->swap_lock held..
27947+ */
27948+
27949+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
27950+{
27951+ int put_count = 0;
27952+
27953+ if (!list_empty(&bo->swap)) {
27954+ list_del_init(&bo->swap);
27955+ ++put_count;
27956+ }
27957+ if (!list_empty(&bo->lru)) {
27958+ list_del_init(&bo->lru);
27959+ ++put_count;
27960+ }
27961+
27962+ /*
27963+ * TODO: Add a driver hook to delete from
27964+ * driver-specific LRU's here.
27965+ */
27966+
27967+ return put_count;
27968+}
27969+
27970+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
27971+ bool interruptible,
27972+ bool no_wait, bool use_sequence, uint32_t sequence)
27973+{
27974+ struct ttm_bo_device *bdev = bo->bdev;
27975+ int ret;
27976+
27977+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
27978+ if (use_sequence && bo->seq_valid &&
27979+ (sequence - bo->val_seq < (1 << 31))) {
27980+ return -EAGAIN;
27981+ }
27982+
27983+ if (no_wait)
27984+ return -EBUSY;
27985+
27986+ spin_unlock(&bdev->lru_lock);
27987+ ret = ttm_bo_wait_unreserved(bo, interruptible);
27988+ spin_lock(&bdev->lru_lock);
27989+
27990+ if (unlikely(ret))
27991+ return ret;
27992+ }
27993+
27994+ if (use_sequence) {
27995+ bo->val_seq = sequence;
27996+ bo->seq_valid = true;
27997+ } else {
27998+ bo->seq_valid = false;
27999+ }
28000+
28001+ return 0;
28002+}
28003+
28004+static void ttm_bo_ref_bug(struct kref *list_kref)
28005+{
28006+ BUG();
28007+}
28008+
28009+int ttm_bo_reserve(struct ttm_buffer_object *bo,
28010+ bool interruptible,
28011+ bool no_wait, bool use_sequence, uint32_t sequence)
28012+{
28013+ struct ttm_bo_device *bdev = bo->bdev;
28014+ int put_count = 0;
28015+ int ret;
28016+
28017+ spin_lock(&bdev->lru_lock);
28018+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
28019+ sequence);
28020+ if (likely(ret == 0))
28021+ put_count = ttm_bo_del_from_lru(bo);
28022+ spin_unlock(&bdev->lru_lock);
28023+
28024+ while (put_count--)
28025+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
28026+
28027+ return ret;
28028+}
28029+
28030+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
28031+{
28032+ struct ttm_bo_device *bdev = bo->bdev;
28033+
28034+ spin_lock(&bdev->lru_lock);
28035+ ttm_bo_add_to_lru(bo);
28036+ atomic_set(&bo->reserved, 0);
28037+ wake_up_all(&bo->event_queue);
28038+ spin_unlock(&bdev->lru_lock);
28039+}
28040+
28041+/*
28042+ * Call bo->mutex locked.
28043+ */
28044+
28045+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
28046+{
28047+ struct ttm_bo_device *bdev = bo->bdev;
28048+ int ret = 0;
28049+ uint32_t page_flags = 0;
28050+
28051+ TTM_ASSERT_LOCKED(&bo->mutex);
28052+ bo->ttm = NULL;
28053+
28054+ switch (bo->type) {
28055+ case ttm_bo_type_device:
28056+ case ttm_bo_type_kernel:
28057+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
28058+ page_flags, bdev->dummy_read_page);
28059+ if (unlikely(bo->ttm == NULL))
28060+ ret = -ENOMEM;
28061+ break;
28062+ case ttm_bo_type_user:
28063+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
28064+ page_flags | TTM_PAGE_FLAG_USER,
28065+ bdev->dummy_read_page);
28066+ if (unlikely(bo->ttm == NULL))
28067+ ret = -ENOMEM;
28068+ break;
28069+
28070+ ret = ttm_tt_set_user(bo->ttm, current,
28071+ bo->buffer_start, bo->num_pages);
28072+ if (unlikely(ret != 0))
28073+ ttm_tt_destroy(bo->ttm);
28074+ break;
28075+ default:
28076+ printk(KERN_ERR "Illegal buffer object type\n");
28077+ ret = -EINVAL;
28078+ break;
28079+ }
28080+
28081+ return ret;
28082+}
28083+
28084+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
28085+ struct ttm_mem_reg *mem,
28086+ bool evict, bool interruptible, bool no_wait)
28087+{
28088+ struct ttm_bo_device *bdev = bo->bdev;
28089+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
28090+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
28091+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
28092+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
28093+ int ret = 0;
28094+
28095+ if (old_is_pci || new_is_pci ||
28096+ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
28097+ ttm_bo_unmap_virtual(bo);
28098+
28099+ /*
28100+ * Create and bind a ttm if required.
28101+ */
28102+
28103+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
28104+ ret = ttm_bo_add_ttm(bo);
28105+ if (ret)
28106+ goto out_err;
28107+
28108+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
28109+ if (ret)
28110+ return ret;
28111+
28112+ if (mem->mem_type != TTM_PL_SYSTEM) {
28113+ ret = ttm_tt_bind(bo->ttm, mem);
28114+ if (ret)
28115+ goto out_err;
28116+ }
28117+
28118+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
28119+
28120+ struct ttm_mem_reg *old_mem = &bo->mem;
28121+ uint32_t save_flags = old_mem->flags;
28122+ uint32_t save_proposed_flags = old_mem->proposed_flags;
28123+
28124+ *old_mem = *mem;
28125+ mem->mm_node = NULL;
28126+ old_mem->proposed_flags = save_proposed_flags;
28127+ ttm_flag_masked(&save_flags, mem->flags,
28128+ TTM_PL_MASK_MEMTYPE);
28129+ goto moved;
28130+ }
28131+
28132+ }
28133+
28134+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
28135+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
28136+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
28137+ else if (bdev->driver->move)
28138+ ret = bdev->driver->move(bo, evict, interruptible,
28139+ no_wait, mem);
28140+ else
28141+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
28142+
28143+ if (ret)
28144+ goto out_err;
28145+
28146+ moved:
28147+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
28148+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
28149+ if (ret)
28150+ printk(KERN_ERR "Can not flush read caches\n");
28151+ }
28152+
28153+ ttm_flag_masked(&bo->priv_flags,
28154+ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
28155+ TTM_BO_PRIV_FLAG_EVICTED);
28156+
28157+ if (bo->mem.mm_node)
28158+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
28159+ bdev->man[bo->mem.mem_type].gpu_offset;
28160+
28161+ return 0;
28162+
28163+ out_err:
28164+ new_man = &bdev->man[bo->mem.mem_type];
28165+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
28166+ ttm_tt_unbind(bo->ttm);
28167+ ttm_tt_destroy(bo->ttm);
28168+ bo->ttm = NULL;
28169+ }
28170+
28171+ return ret;
28172+}
28173+
28174+static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
28175+ bool allow_errors)
28176+{
28177+ struct ttm_bo_device *bdev = bo->bdev;
28178+ struct ttm_bo_driver *driver = bdev->driver;
28179+
28180+ if (bo->sync_obj) {
28181+ if (bdev->nice_mode) {
28182+ unsigned long _end = jiffies + 3 * HZ;
28183+ int ret;
28184+ do {
28185+ ret = ttm_bo_wait(bo, false, false, false);
28186+ if (ret && allow_errors)
28187+ return ret;
28188+
28189+ } while (ret && !time_after_eq(jiffies, _end));
28190+
28191+ if (bo->sync_obj) {
28192+ bdev->nice_mode = false;
28193+ printk(KERN_ERR "Detected probable GPU lockup. "
28194+ "Evicting buffer.\n");
28195+ }
28196+ }
28197+ if (bo->sync_obj) {
28198+ driver->sync_obj_unref(&bo->sync_obj);
28199+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28200+ }
28201+ }
28202+ return 0;
28203+}
28204+
28205+/**
28206+ * If bo idle, remove from delayed- and lru lists, and unref.
28207+ * If not idle, and already on delayed list, do nothing.
28208+ * If not idle, and not on delayed list, put on delayed list,
28209+ * up the list_kref and schedule a delayed list check.
28210+ */
28211+
28212+static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
28213+{
28214+ struct ttm_bo_device *bdev = bo->bdev;
28215+ struct ttm_bo_driver *driver = bdev->driver;
28216+
28217+ mutex_lock(&bo->mutex);
28218+
28219+ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
28220+ bo->sync_obj_arg)) {
28221+ driver->sync_obj_unref(&bo->sync_obj);
28222+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28223+ }
28224+
28225+ if (bo->sync_obj && remove_all)
28226+ (void)ttm_bo_expire_sync_obj(bo, false);
28227+
28228+ if (!bo->sync_obj) {
28229+ int put_count;
28230+
28231+ if (bo->ttm)
28232+ ttm_tt_unbind(bo->ttm);
28233+ spin_lock(&bdev->lru_lock);
28234+ if (!list_empty(&bo->ddestroy)) {
28235+ list_del_init(&bo->ddestroy);
28236+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
28237+ }
28238+ if (bo->mem.mm_node) {
28239+ drm_mm_put_block(bo->mem.mm_node);
28240+ bo->mem.mm_node = NULL;
28241+ }
28242+ put_count = ttm_bo_del_from_lru(bo);
28243+ spin_unlock(&bdev->lru_lock);
28244+ mutex_unlock(&bo->mutex);
28245+ while (put_count--)
28246+ kref_put(&bo->list_kref, ttm_bo_release_list);
28247+
28248+ return;
28249+ }
28250+
28251+ spin_lock(&bdev->lru_lock);
28252+ if (list_empty(&bo->ddestroy)) {
28253+ spin_unlock(&bdev->lru_lock);
28254+ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
28255+ spin_lock(&bdev->lru_lock);
28256+ if (list_empty(&bo->ddestroy)) {
28257+ kref_get(&bo->list_kref);
28258+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
28259+ }
28260+ spin_unlock(&bdev->lru_lock);
28261+ schedule_delayed_work(&bdev->wq,
28262+ ((HZ / 100) < 1) ? 1 : HZ / 100);
28263+ } else
28264+ spin_unlock(&bdev->lru_lock);
28265+
28266+ mutex_unlock(&bo->mutex);
28267+ return;
28268+}
28269+
28270+/**
28271+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
28272+ * encountered buffers.
28273+ */
28274+
28275+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
28276+{
28277+ struct ttm_buffer_object *entry, *nentry;
28278+ struct list_head *list, *next;
28279+ int ret;
28280+
28281+ spin_lock(&bdev->lru_lock);
28282+ list_for_each_safe(list, next, &bdev->ddestroy) {
28283+ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
28284+ nentry = NULL;
28285+
28286+ /*
28287+ * Protect the next list entry from destruction while we
28288+ * unlock the lru_lock.
28289+ */
28290+
28291+ if (next != &bdev->ddestroy) {
28292+ nentry = list_entry(next, struct ttm_buffer_object,
28293+ ddestroy);
28294+ kref_get(&nentry->list_kref);
28295+ }
28296+ kref_get(&entry->list_kref);
28297+
28298+ spin_unlock(&bdev->lru_lock);
28299+ ttm_bo_cleanup_refs(entry, remove_all);
28300+ kref_put(&entry->list_kref, ttm_bo_release_list);
28301+ spin_lock(&bdev->lru_lock);
28302+
28303+ if (nentry) {
28304+ bool next_onlist = !list_empty(next);
28305+ kref_put(&nentry->list_kref, ttm_bo_release_list);
28306+
28307+ /*
28308+ * Someone might have raced us and removed the
28309+ * next entry from the list. We don't bother restarting
28310+ * list traversal.
28311+ */
28312+
28313+ if (!next_onlist)
28314+ break;
28315+ }
28316+ }
28317+ ret = !list_empty(&bdev->ddestroy);
28318+ spin_unlock(&bdev->lru_lock);
28319+
28320+ return ret;
28321+}
28322+
28323+static void ttm_bo_delayed_workqueue(struct work_struct *work)
28324+{
28325+ struct ttm_bo_device *bdev =
28326+ container_of(work, struct ttm_bo_device, wq.work);
28327+
28328+ if (ttm_bo_delayed_delete(bdev, false)) {
28329+ schedule_delayed_work(&bdev->wq,
28330+ ((HZ / 100) < 1) ? 1 : HZ / 100);
28331+ }
28332+}
28333+
28334+static void ttm_bo_release(struct kref *kref)
28335+{
28336+ struct ttm_buffer_object *bo =
28337+ container_of(kref, struct ttm_buffer_object, kref);
28338+ struct ttm_bo_device *bdev = bo->bdev;
28339+
28340+ if (likely(bo->vm_node != NULL)) {
28341+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
28342+ drm_mm_put_block(bo->vm_node);
28343+ }
28344+ write_unlock(&bdev->vm_lock);
28345+ ttm_bo_cleanup_refs(bo, false);
28346+ kref_put(&bo->list_kref, ttm_bo_release_list);
28347+ write_lock(&bdev->vm_lock);
28348+}
28349+
28350+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
28351+{
28352+ struct ttm_buffer_object *bo = *p_bo;
28353+ struct ttm_bo_device *bdev = bo->bdev;
28354+
28355+ *p_bo = NULL;
28356+ write_lock(&bdev->vm_lock);
28357+ kref_put(&bo->kref, ttm_bo_release);
28358+ write_unlock(&bdev->vm_lock);
28359+}
28360+
28361+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
28362+ bool interruptible, bool no_wait)
28363+{
28364+ int ret = 0;
28365+ struct ttm_bo_device *bdev = bo->bdev;
28366+ struct ttm_mem_reg evict_mem;
28367+
28368+ if (bo->mem.mem_type != mem_type)
28369+ goto out;
28370+
28371+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
28372+ if (ret && ret != -ERESTART) {
28373+ printk(KERN_ERR "Failed to expire sync object before "
28374+ "buffer eviction.\n");
28375+ goto out;
28376+ }
28377+
28378+ BUG_ON(!atomic_read(&bo->reserved));
28379+
28380+ evict_mem = bo->mem;
28381+ evict_mem.mm_node = NULL;
28382+
28383+ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
28384+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
28385+
28386+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
28387+ if (unlikely(ret != 0 && ret != -ERESTART)) {
28388+ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
28389+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
28390+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
28391+ }
28392+
28393+ if (ret) {
28394+ if (ret != -ERESTART)
28395+ printk(KERN_ERR "Failed to find memory space for "
28396+ "buffer 0x%p eviction.\n", bo);
28397+ goto out;
28398+ }
28399+
28400+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait);
28401+ if (ret) {
28402+ if (ret != -ERESTART)
28403+ printk(KERN_ERR "Buffer eviction failed\n");
28404+ goto out;
28405+ }
28406+
28407+ spin_lock(&bdev->lru_lock);
28408+ if (evict_mem.mm_node) {
28409+ drm_mm_put_block(evict_mem.mm_node);
28410+ evict_mem.mm_node = NULL;
28411+ }
28412+ spin_unlock(&bdev->lru_lock);
28413+
28414+ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
28415+ TTM_BO_PRIV_FLAG_EVICTED);
28416+
28417+ out:
28418+ return ret;
28419+}
28420+
28421+/**
28422+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
28423+ * space, or we've evicted everything and there isn't enough space.
28424+ */
28425+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
28426+ struct ttm_mem_reg *mem,
28427+ uint32_t mem_type,
28428+ bool interruptible, bool no_wait)
28429+{
28430+ struct drm_mm_node *node;
28431+ struct ttm_buffer_object *entry;
28432+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
28433+ struct list_head *lru;
28434+ unsigned long num_pages = mem->num_pages;
28435+ int put_count = 0;
28436+ int ret;
28437+
28438+ retry_pre_get:
28439+ ret = drm_mm_pre_get(&man->manager);
28440+ if (unlikely(ret != 0))
28441+ return ret;
28442+
28443+ spin_lock(&bdev->lru_lock);
28444+ do {
28445+ node = drm_mm_search_free(&man->manager, num_pages,
28446+ mem->page_alignment, 1);
28447+ if (node)
28448+ break;
28449+
28450+ lru = &man->lru;
28451+ if (list_empty(lru))
28452+ break;
28453+
28454+ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
28455+ kref_get(&entry->list_kref);
28456+
28457+ ret =
28458+ ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0);
28459+
28460+ if (likely(ret == 0))
28461+ put_count = ttm_bo_del_from_lru(entry);
28462+
28463+ spin_unlock(&bdev->lru_lock);
28464+
28465+ if (unlikely(ret != 0))
28466+ return ret;
28467+
28468+ while (put_count--)
28469+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
28470+
28471+ mutex_lock(&entry->mutex);
28472+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
28473+ mutex_unlock(&entry->mutex);
28474+
28475+ ttm_bo_unreserve(entry);
28476+
28477+ kref_put(&entry->list_kref, ttm_bo_release_list);
28478+ if (ret)
28479+ return ret;
28480+
28481+ spin_lock(&bdev->lru_lock);
28482+ } while (1);
28483+
28484+ if (!node) {
28485+ spin_unlock(&bdev->lru_lock);
28486+ return -ENOMEM;
28487+ }
28488+
28489+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
28490+ if (unlikely(!node)) {
28491+ spin_unlock(&bdev->lru_lock);
28492+ goto retry_pre_get;
28493+ }
28494+
28495+ spin_unlock(&bdev->lru_lock);
28496+ mem->mm_node = node;
28497+ mem->mem_type = mem_type;
28498+ return 0;
28499+}
28500+
28501+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
28502+ bool disallow_fixed,
28503+ uint32_t mem_type,
28504+ uint32_t mask, uint32_t * res_mask)
28505+{
28506+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
28507+
28508+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
28509+ return false;
28510+
28511+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
28512+ return false;
28513+
28514+ if ((mask & man->available_caching) == 0)
28515+ return false;
28516+ if (mask & man->default_caching)
28517+ cur_flags |= man->default_caching;
28518+ else if (mask & TTM_PL_FLAG_CACHED)
28519+ cur_flags |= TTM_PL_FLAG_CACHED;
28520+ else if (mask & TTM_PL_FLAG_WC)
28521+ cur_flags |= TTM_PL_FLAG_WC;
28522+ else
28523+ cur_flags |= TTM_PL_FLAG_UNCACHED;
28524+
28525+ *res_mask = cur_flags;
28526+ return true;
28527+}
28528+
28529+/**
28530+ * Creates space for memory region @mem according to its type.
28531+ *
28532+ * This function first searches for free space in compatible memory types in
28533+ * the priority order defined by the driver. If free space isn't found, then
28534+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
28535+ * space.
28536+ */
28537+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
28538+ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
28539+{
28540+ struct ttm_bo_device *bdev = bo->bdev;
28541+ struct ttm_mem_type_manager *man;
28542+
28543+ uint32_t num_prios = bdev->driver->num_mem_type_prio;
28544+ const uint32_t *prios = bdev->driver->mem_type_prio;
28545+ uint32_t i;
28546+ uint32_t mem_type = TTM_PL_SYSTEM;
28547+ uint32_t cur_flags = 0;
28548+ bool type_found = false;
28549+ bool type_ok = false;
28550+ bool has_eagain = false;
28551+ struct drm_mm_node *node = NULL;
28552+ int ret;
28553+
28554+ mem->mm_node = NULL;
28555+ for (i = 0; i < num_prios; ++i) {
28556+ mem_type = prios[i];
28557+ man = &bdev->man[mem_type];
28558+
28559+ type_ok = ttm_bo_mt_compatible(man,
28560+ bo->type == ttm_bo_type_user,
28561+ mem_type, mem->proposed_flags,
28562+ &cur_flags);
28563+
28564+ if (!type_ok)
28565+ continue;
28566+
28567+ if (mem_type == TTM_PL_SYSTEM)
28568+ break;
28569+
28570+ if (man->has_type && man->use_type) {
28571+ type_found = true;
28572+ do {
28573+ ret = drm_mm_pre_get(&man->manager);
28574+ if (unlikely(ret))
28575+ return ret;
28576+
28577+ spin_lock(&bdev->lru_lock);
28578+ node = drm_mm_search_free(&man->manager,
28579+ mem->num_pages,
28580+ mem->page_alignment,
28581+ 1);
28582+ if (unlikely(!node)) {
28583+ spin_unlock(&bdev->lru_lock);
28584+ break;
28585+ }
28586+ node = drm_mm_get_block_atomic(node,
28587+ mem->num_pages,
28588+ mem->
28589+ page_alignment);
28590+ spin_unlock(&bdev->lru_lock);
28591+ } while (!node);
28592+ }
28593+ if (node)
28594+ break;
28595+ }
28596+
28597+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
28598+ mem->mm_node = node;
28599+ mem->mem_type = mem_type;
28600+ mem->flags = cur_flags;
28601+ return 0;
28602+ }
28603+
28604+ if (!type_found)
28605+ return -EINVAL;
28606+
28607+ num_prios = bdev->driver->num_mem_busy_prio;
28608+ prios = bdev->driver->mem_busy_prio;
28609+
28610+ for (i = 0; i < num_prios; ++i) {
28611+ mem_type = prios[i];
28612+ man = &bdev->man[mem_type];
28613+
28614+ if (!man->has_type)
28615+ continue;
28616+
28617+ if (!ttm_bo_mt_compatible(man,
28618+ bo->type == ttm_bo_type_user,
28619+ mem_type,
28620+ mem->proposed_flags, &cur_flags))
28621+ continue;
28622+
28623+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
28624+ interruptible, no_wait);
28625+
28626+ if (ret == 0 && mem->mm_node) {
28627+ mem->flags = cur_flags;
28628+ return 0;
28629+ }
28630+
28631+ if (ret == -ERESTART)
28632+ has_eagain = true;
28633+ }
28634+
28635+ ret = (has_eagain) ? -ERESTART : -ENOMEM;
28636+ return ret;
28637+}
28638+
28639+/*
28640+ * Call bo->mutex locked.
28641+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
28642+ */
28643+
28644+static int ttm_bo_busy(struct ttm_buffer_object *bo)
28645+{
28646+ void *sync_obj = bo->sync_obj;
28647+ struct ttm_bo_driver *driver = bo->bdev->driver;
28648+
28649+ if (sync_obj) {
28650+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
28651+ driver->sync_obj_unref(&bo->sync_obj);
28652+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28653+ return 0;
28654+ }
28655+ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
28656+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
28657+ driver->sync_obj_unref(&bo->sync_obj);
28658+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28659+ return 0;
28660+ }
28661+ return 1;
28662+ }
28663+ return 0;
28664+}
28665+
28666+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
28667+{
28668+ int ret = 0;
28669+
28670+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
28671+ return -EBUSY;
28672+
28673+ ret = wait_event_interruptible(bo->event_queue,
28674+ atomic_read(&bo->cpu_writers) == 0);
28675+
28676+ if (ret == -ERESTARTSYS)
28677+ ret = -ERESTART;
28678+
28679+ return ret;
28680+}
28681+
28682+/*
28683+ * bo->mutex locked.
28684+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
28685+ */
28686+
28687+int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
28688+ bool interruptible, bool no_wait)
28689+{
28690+ struct ttm_bo_device *bdev = bo->bdev;
28691+ int ret = 0;
28692+ struct ttm_mem_reg mem;
28693+
28694+ BUG_ON(!atomic_read(&bo->reserved));
28695+
28696+ /*
28697+ * FIXME: It's possible to pipeline buffer moves.
28698+ * Have the driver move function wait for idle when necessary,
28699+ * instead of doing it here.
28700+ */
28701+
28702+ ttm_bo_busy(bo);
28703+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
28704+ if (ret)
28705+ return ret;
28706+
28707+ mem.num_pages = bo->num_pages;
28708+ mem.size = mem.num_pages << PAGE_SHIFT;
28709+ mem.proposed_flags = new_mem_flags;
28710+ mem.page_alignment = bo->mem.page_alignment;
28711+
28712+ /*
28713+ * Determine where to move the buffer.
28714+ */
28715+
28716+ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
28717+ if (ret)
28718+ goto out_unlock;
28719+
28720+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
28721+
28722+ out_unlock:
28723+ if (ret && mem.mm_node) {
28724+ spin_lock(&bdev->lru_lock);
28725+ drm_mm_put_block(mem.mm_node);
28726+ spin_unlock(&bdev->lru_lock);
28727+ }
28728+ return ret;
28729+}
28730+
28731+static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
28732+{
28733+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
28734+ return 0;
28735+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
28736+ return 0;
28737+
28738+ return 1;
28739+}
28740+
28741+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
28742+ bool interruptible, bool no_wait)
28743+{
28744+ int ret;
28745+
28746+ BUG_ON(!atomic_read(&bo->reserved));
28747+ bo->mem.proposed_flags = bo->proposed_flags;
28748+
28749+ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
28750+ (unsigned long)bo->mem.proposed_flags,
28751+ (unsigned long)bo->mem.flags);
28752+
28753+ /*
28754+ * Check whether we need to move buffer.
28755+ */
28756+
28757+ if (!ttm_bo_mem_compat(&bo->mem)) {
28758+ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
28759+ interruptible, no_wait);
28760+ if (ret) {
28761+ if (ret != -ERESTART)
28762+ printk(KERN_ERR "Failed moving buffer. "
28763+ "Proposed placement 0x%08x\n",
28764+ bo->mem.proposed_flags);
28765+ if (ret == -ENOMEM)
28766+ printk(KERN_ERR "Out of aperture space or "
28767+ "DRM memory quota.\n");
28768+ return ret;
28769+ }
28770+ }
28771+
28772+ /*
28773+ * We might need to add a TTM.
28774+ */
28775+
28776+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
28777+ ret = ttm_bo_add_ttm(bo);
28778+ if (ret)
28779+ return ret;
28780+ }
28781+ /*
28782+ * Validation has succeeded, move the access and other
28783+ * non-mapping-related flag bits from the proposed flags to
28784+ * the active flags
28785+ */
28786+
28787+ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
28788+ ~TTM_PL_MASK_MEMTYPE);
28789+
28790+ return 0;
28791+}
28792+
28793+int
28794+ttm_bo_check_placement(struct ttm_buffer_object *bo,
28795+ uint32_t set_flags, uint32_t clr_flags)
28796+{
28797+ uint32_t new_mask = set_flags | clr_flags;
28798+
28799+ if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) {
28800+ printk(KERN_ERR
28801+ "User buffers require cache-coherent memory.\n");
28802+ return -EINVAL;
28803+ }
28804+
28805+ if (!capable(CAP_SYS_ADMIN)) {
28806+ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
28807+ printk(KERN_ERR "Need to be root to modify"
28808+ " NO_EVICT status.\n");
28809+ return -EINVAL;
28810+ }
28811+
28812+ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
28813+ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
28814+ printk(KERN_ERR "Incompatible memory specification"
28815+ " for NO_EVICT buffer.\n");
28816+ return -EINVAL;
28817+ }
28818+ }
28819+ return 0;
28820+}
28821+
28822+int ttm_buffer_object_init(struct ttm_bo_device *bdev,
28823+ struct ttm_buffer_object *bo,
28824+ unsigned long size,
28825+ enum ttm_bo_type type,
28826+ uint32_t flags,
28827+ uint32_t page_alignment,
28828+ unsigned long buffer_start,
28829+ bool interruptible,
28830+ struct file *persistant_swap_storage,
28831+ size_t acc_size,
28832+ void (*destroy) (struct ttm_buffer_object *))
28833+{
28834+ int ret = 0;
28835+ unsigned long num_pages;
28836+
28837+ size += buffer_start & ~PAGE_MASK;
28838+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
28839+ if (num_pages == 0) {
28840+ printk(KERN_ERR "Illegal buffer object size.\n");
28841+ return -EINVAL;
28842+ }
28843+ bo->destroy = destroy;
28844+
28845+ mutex_init(&bo->mutex);
28846+ mutex_lock(&bo->mutex);
28847+ kref_init(&bo->kref);
28848+ kref_init(&bo->list_kref);
28849+ atomic_set(&bo->cpu_writers, 0);
28850+ atomic_set(&bo->reserved, 1);
28851+ init_waitqueue_head(&bo->event_queue);
28852+ INIT_LIST_HEAD(&bo->lru);
28853+ INIT_LIST_HEAD(&bo->ddestroy);
28854+ INIT_LIST_HEAD(&bo->swap);
28855+ bo->bdev = bdev;
28856+ bo->type = type;
28857+ bo->num_pages = num_pages;
28858+ bo->mem.mem_type = TTM_PL_SYSTEM;
28859+ bo->mem.num_pages = bo->num_pages;
28860+ bo->mem.mm_node = NULL;
28861+ bo->mem.page_alignment = page_alignment;
28862+ bo->buffer_start = buffer_start & PAGE_MASK;
28863+ bo->priv_flags = 0;
28864+ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
28865+ bo->seq_valid = false;
28866+ bo->persistant_swap_storage = persistant_swap_storage;
28867+ bo->acc_size = acc_size;
28868+
28869+ ret = ttm_bo_check_placement(bo, flags, 0ULL);
28870+ if (unlikely(ret != 0))
28871+ goto out_err;
28872+
28873+ /*
28874+ * If no caching attributes are set, accept any form of caching.
28875+ */
28876+
28877+ if ((flags & TTM_PL_MASK_CACHING) == 0)
28878+ flags |= TTM_PL_MASK_CACHING;
28879+
28880+ bo->proposed_flags = flags;
28881+ bo->mem.proposed_flags = flags;
28882+
28883+ /*
28884+ * For ttm_bo_type_device buffers, allocate
28885+ * address space from the device.
28886+ */
28887+
28888+ if (bo->type == ttm_bo_type_device) {
28889+ ret = ttm_bo_setup_vm(bo);
28890+ if (ret)
28891+ goto out_err;
28892+ }
28893+
28894+ ret = ttm_buffer_object_validate(bo, interruptible, false);
28895+ if (ret)
28896+ goto out_err;
28897+
28898+ mutex_unlock(&bo->mutex);
28899+ ttm_bo_unreserve(bo);
28900+ return 0;
28901+
28902+ out_err:
28903+ mutex_unlock(&bo->mutex);
28904+ ttm_bo_unreserve(bo);
28905+ ttm_bo_unref(&bo);
28906+
28907+ return ret;
28908+}
28909+
28910+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
28911+ unsigned long num_pages)
28912+{
28913+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
28914+ PAGE_MASK;
28915+
28916+ return bdev->ttm_bo_size + 2 * page_array_size;
28917+}
28918+
28919+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
28920+ unsigned long size,
28921+ enum ttm_bo_type type,
28922+ uint32_t flags,
28923+ uint32_t page_alignment,
28924+ unsigned long buffer_start,
28925+ bool interruptible,
28926+ struct file *persistant_swap_storage,
28927+ struct ttm_buffer_object **p_bo)
28928+{
28929+ struct ttm_buffer_object *bo;
28930+ int ret;
28931+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
28932+
28933+ size_t acc_size =
28934+ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
28935+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
28936+ if (unlikely(ret != 0))
28937+ return ret;
28938+
28939+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
28940+
28941+ if (unlikely(bo == NULL)) {
28942+ ttm_mem_global_free(mem_glob, acc_size, false);
28943+ return -ENOMEM;
28944+ }
28945+
28946+ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
28947+ page_alignment, buffer_start,
28948+ interruptible,
28949+ persistant_swap_storage, acc_size, NULL);
28950+ if (likely(ret == 0))
28951+ *p_bo = bo;
28952+
28953+ return ret;
28954+}
28955+
28956+static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
28957+ uint32_t mem_type, bool allow_errors)
28958+{
28959+ int ret;
28960+
28961+ mutex_lock(&bo->mutex);
28962+
28963+ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
28964+ if (ret)
28965+ goto out;
28966+
28967+ if (bo->mem.mem_type == mem_type)
28968+ ret = ttm_bo_evict(bo, mem_type, false, false);
28969+
28970+ if (ret) {
28971+ if (allow_errors) {
28972+ goto out;
28973+ } else {
28974+ ret = 0;
28975+ printk(KERN_ERR "Cleanup eviction failed\n");
28976+ }
28977+ }
28978+
28979+ out:
28980+ mutex_unlock(&bo->mutex);
28981+ return ret;
28982+}
28983+
28984+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
28985+ struct list_head *head,
28986+ unsigned mem_type, bool allow_errors)
28987+{
28988+ struct ttm_buffer_object *entry;
28989+ int ret;
28990+ int put_count;
28991+
28992+ /*
28993+ * Can't use standard list traversal since we're unlocking.
28994+ */
28995+
28996+ spin_lock(&bdev->lru_lock);
28997+
28998+ while (!list_empty(head)) {
28999+ entry = list_first_entry(head, struct ttm_buffer_object, lru);
29000+ kref_get(&entry->list_kref);
29001+ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
29002+ put_count = ttm_bo_del_from_lru(entry);
29003+ spin_unlock(&bdev->lru_lock);
29004+ while (put_count--)
29005+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
29006+ BUG_ON(ret);
29007+ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
29008+ ttm_bo_unreserve(entry);
29009+ kref_put(&entry->list_kref, ttm_bo_release_list);
29010+ spin_lock(&bdev->lru_lock);
29011+ }
29012+
29013+ spin_unlock(&bdev->lru_lock);
29014+
29015+ return 0;
29016+}
29017+
29018+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
29019+{
29020+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
29021+ int ret = -EINVAL;
29022+
29023+ if (mem_type >= TTM_NUM_MEM_TYPES) {
29024+ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
29025+ return ret;
29026+ }
29027+
29028+ if (!man->has_type) {
29029+ printk(KERN_ERR "Trying to take down uninitialized "
29030+ "memory manager type %u\n", mem_type);
29031+ return ret;
29032+ }
29033+
29034+ man->use_type = false;
29035+ man->has_type = false;
29036+
29037+ ret = 0;
29038+ if (mem_type > 0) {
29039+ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
29040+
29041+ spin_lock(&bdev->lru_lock);
29042+ if (drm_mm_clean(&man->manager)) {
29043+ drm_mm_takedown(&man->manager);
29044+ } else {
29045+ ret = -EBUSY;
29046+ }
29047+ spin_unlock(&bdev->lru_lock);
29048+ }
29049+
29050+ return ret;
29051+}
29052+
29053+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
29054+{
29055+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
29056+
29057+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
29058+ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
29059+ mem_type);
29060+ return -EINVAL;
29061+ }
29062+
29063+ if (!man->has_type) {
29064+ printk(KERN_ERR "Memory type %u has not been initialized.\n",
29065+ mem_type);
29066+ return 0;
29067+ }
29068+
29069+ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
29070+}
29071+
29072+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
29073+ unsigned long p_offset, unsigned long p_size)
29074+{
29075+ int ret = -EINVAL;
29076+ struct ttm_mem_type_manager *man;
29077+
29078+ if (type >= TTM_NUM_MEM_TYPES) {
29079+ printk(KERN_ERR "Illegal memory type %d\n", type);
29080+ return ret;
29081+ }
29082+
29083+ man = &bdev->man[type];
29084+ if (man->has_type) {
29085+ printk(KERN_ERR
29086+ "Memory manager already initialized for type %d\n",
29087+ type);
29088+ return ret;
29089+ }
29090+
29091+ ret = bdev->driver->init_mem_type(bdev, type, man);
29092+ if (ret)
29093+ return ret;
29094+
29095+ ret = 0;
29096+ if (type != TTM_PL_SYSTEM) {
29097+ if (!p_size) {
29098+ printk(KERN_ERR "Zero size memory manager type %d\n",
29099+ type);
29100+ return ret;
29101+ }
29102+ ret = drm_mm_init(&man->manager, p_offset, p_size);
29103+ if (ret)
29104+ return ret;
29105+ }
29106+ man->has_type = true;
29107+ man->use_type = true;
29108+ man->size = p_size;
29109+
29110+ INIT_LIST_HEAD(&man->lru);
29111+
29112+ return 0;
29113+}
29114+
29115+int ttm_bo_device_release(struct ttm_bo_device *bdev)
29116+{
29117+ int ret = 0;
29118+ unsigned i = TTM_NUM_MEM_TYPES;
29119+ struct ttm_mem_type_manager *man;
29120+
29121+ while (i--) {
29122+ man = &bdev->man[i];
29123+ if (man->has_type) {
29124+ man->use_type = false;
29125+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
29126+ ret = -EBUSY;
29127+ printk(KERN_ERR "DRM memory manager type %d "
29128+ "is not clean.\n", i);
29129+ }
29130+ man->has_type = false;
29131+ }
29132+ }
29133+
29134+ if (!cancel_delayed_work(&bdev->wq))
29135+ flush_scheduled_work();
29136+
29137+ while (ttm_bo_delayed_delete(bdev, true)) ;
29138+
29139+ spin_lock(&bdev->lru_lock);
29140+ if (list_empty(&bdev->ddestroy))
29141+ TTM_DEBUG("Delayed destroy list was clean\n");
29142+
29143+ if (list_empty(&bdev->man[0].lru))
29144+ TTM_DEBUG("Swap list was clean\n");
29145+ spin_unlock(&bdev->lru_lock);
29146+
29147+ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
29148+ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
29149+ write_lock(&bdev->vm_lock);
29150+ drm_mm_takedown(&bdev->addr_space_mm);
29151+ write_unlock(&bdev->vm_lock);
29152+
29153+ __free_page(bdev->dummy_read_page);
29154+ return ret;
29155+}
29156+
29157+/*
29158+ * This function is intended to be called on drm driver load.
29159+ * If you decide to call it from firstopen, you must protect the call
29160+ * from a potentially racing ttm_bo_driver_finish in lastclose.
29161+ * (This may happen on X server restart).
29162+ */
29163+
29164+int ttm_bo_device_init(struct ttm_bo_device *bdev,
29165+ struct ttm_mem_global *mem_glob,
29166+ struct ttm_bo_driver *driver, uint64_t file_page_offset)
29167+{
29168+ int ret = -EINVAL;
29169+
29170+ bdev->dummy_read_page = NULL;
29171+ rwlock_init(&bdev->vm_lock);
29172+ spin_lock_init(&bdev->lru_lock);
29173+
29174+ bdev->driver = driver;
29175+ bdev->mem_glob = mem_glob;
29176+
29177+ memset(bdev->man, 0, sizeof(bdev->man));
29178+
29179+ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
29180+ if (unlikely(bdev->dummy_read_page == NULL)) {
29181+ ret = -ENOMEM;
29182+ goto out_err0;
29183+ }
29184+
29185+ /*
29186+ * Initialize the system memory buffer type.
29187+ * Other types need to be driver / IOCTL initialized.
29188+ */
29189+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
29190+ if (unlikely(ret != 0))
29191+ goto out_err1;
29192+
29193+ bdev->addr_space_rb = RB_ROOT;
29194+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
29195+ if (unlikely(ret != 0))
29196+ goto out_err2;
29197+
29198+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
29199+ bdev->nice_mode = true;
29200+ INIT_LIST_HEAD(&bdev->ddestroy);
29201+ INIT_LIST_HEAD(&bdev->swap_lru);
29202+ bdev->dev_mapping = NULL;
29203+ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
29204+ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
29205+ if (unlikely(ret != 0)) {
29206+ printk(KERN_ERR "Could not register buffer object swapout.\n");
29207+ goto out_err2;
29208+ }
29209+ return 0;
29210+ out_err2:
29211+ ttm_bo_clean_mm(bdev, 0);
29212+ out_err1:
29213+ __free_page(bdev->dummy_read_page);
29214+ out_err0:
29215+ return ret;
29216+}
29217+
29218+/*
29219+ * buffer object vm functions.
29220+ */
29221+
29222+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
29223+{
29224+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
29225+
29226+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
29227+ if (mem->mem_type == TTM_PL_SYSTEM)
29228+ return false;
29229+
29230+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
29231+ return false;
29232+
29233+ if (mem->flags & TTM_PL_FLAG_CACHED)
29234+ return false;
29235+ }
29236+ return true;
29237+}
29238+
29239+int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
29240+ struct ttm_mem_reg *mem,
29241+ unsigned long *bus_base,
29242+ unsigned long *bus_offset, unsigned long *bus_size)
29243+{
29244+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
29245+
29246+ *bus_size = 0;
29247+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
29248+ return -EINVAL;
29249+
29250+ if (ttm_mem_reg_is_pci(bdev, mem)) {
29251+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
29252+ *bus_size = mem->num_pages << PAGE_SHIFT;
29253+ *bus_base = man->io_offset;
29254+ }
29255+
29256+ return 0;
29257+}
29258+
29259+/**
29260+ * \c Kill all user-space virtual mappings of this buffer object.
29261+ *
29262+ * \param bo The buffer object.
29263+ *
29264+ * Call bo->mutex locked.
29265+ */
29266+
29267+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
29268+{
29269+ struct ttm_bo_device *bdev = bo->bdev;
29270+ loff_t offset = (loff_t) bo->addr_space_offset;
29271+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
29272+
29273+ if (!bdev->dev_mapping)
29274+ return;
29275+
29276+ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
29277+}
29278+
29279+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
29280+{
29281+ struct ttm_bo_device *bdev = bo->bdev;
29282+ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
29283+ struct rb_node *parent = NULL;
29284+ struct ttm_buffer_object *cur_bo;
29285+ unsigned long offset = bo->vm_node->start;
29286+ unsigned long cur_offset;
29287+
29288+ while (*cur) {
29289+ parent = *cur;
29290+ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
29291+ cur_offset = cur_bo->vm_node->start;
29292+ if (offset < cur_offset)
29293+ cur = &parent->rb_left;
29294+ else if (offset > cur_offset)
29295+ cur = &parent->rb_right;
29296+ else
29297+ BUG();
29298+ }
29299+
29300+ rb_link_node(&bo->vm_rb, parent, cur);
29301+ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
29302+}
29303+
29304+/**
29305+ * ttm_bo_setup_vm:
29306+ *
29307+ * @bo: the buffer to allocate address space for
29308+ *
29309+ * Allocate address space in the drm device so that applications
29310+ * can mmap the buffer and access the contents. This only
29311+ * applies to ttm_bo_type_device objects as others are not
29312+ * placed in the drm device address space.
29313+ */
29314+
29315+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
29316+{
29317+ struct ttm_bo_device *bdev = bo->bdev;
29318+ int ret;
29319+
29320+ retry_pre_get:
29321+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
29322+ if (unlikely(ret != 0))
29323+ return ret;
29324+
29325+ write_lock(&bdev->vm_lock);
29326+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
29327+ bo->mem.num_pages, 0, 0);
29328+
29329+ if (unlikely(bo->vm_node == NULL)) {
29330+ ret = -ENOMEM;
29331+ goto out_unlock;
29332+ }
29333+
29334+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
29335+ bo->mem.num_pages, 0);
29336+
29337+ if (unlikely(bo->vm_node == NULL)) {
29338+ write_unlock(&bdev->vm_lock);
29339+ goto retry_pre_get;
29340+ }
29341+
29342+ ttm_bo_vm_insert_rb(bo);
29343+ write_unlock(&bdev->vm_lock);
29344+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
29345+
29346+ return 0;
29347+ out_unlock:
29348+ write_unlock(&bdev->vm_lock);
29349+ return ret;
29350+}
29351+
29352+int ttm_bo_wait(struct ttm_buffer_object *bo,
29353+ bool lazy, bool interruptible, bool no_wait)
29354+{
29355+ struct ttm_bo_driver *driver = bo->bdev->driver;
29356+ void *sync_obj;
29357+ void *sync_obj_arg;
29358+ int ret = 0;
29359+
29360+ while (bo->sync_obj) {
29361+ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
29362+ driver->sync_obj_unref(&bo->sync_obj);
29363+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
29364+ goto out;
29365+ }
29366+ if (no_wait) {
29367+ ret = -EBUSY;
29368+ goto out;
29369+ }
29370+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
29371+ sync_obj_arg = bo->sync_obj_arg;
29372+ mutex_unlock(&bo->mutex);
29373+ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
29374+ lazy, interruptible);
29375+
29376+ mutex_lock(&bo->mutex);
29377+ if (unlikely(ret != 0)) {
29378+ driver->sync_obj_unref(&sync_obj);
29379+ return ret;
29380+ }
29381+
29382+ if (bo->sync_obj == sync_obj) {
29383+ driver->sync_obj_unref(&bo->sync_obj);
29384+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
29385+ }
29386+ driver->sync_obj_unref(&sync_obj);
29387+ }
29388+ out:
29389+ return 0;
29390+}
29391+
29392+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
29393+{
29394+ atomic_set(&bo->reserved, 0);
29395+ wake_up_all(&bo->event_queue);
29396+}
29397+
29398+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
29399+ bool no_wait)
29400+{
29401+ int ret;
29402+
29403+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
29404+ if (no_wait)
29405+ return -EBUSY;
29406+ else if (interruptible) {
29407+ ret = wait_event_interruptible
29408+ (bo->event_queue, atomic_read(&bo->reserved) == 0);
29409+ if (unlikely(ret != 0))
29410+ return -ERESTART;
29411+ } else {
29412+ wait_event(bo->event_queue,
29413+ atomic_read(&bo->reserved) == 0);
29414+ }
29415+ }
29416+ return 0;
29417+}
29418+
29419+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
29420+{
29421+ int ret = 0;
29422+
29423+ /*
29424+ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
29425+ * makes sure the lru lists are updated.
29426+ */
29427+
29428+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
29429+ if (unlikely(ret != 0))
29430+ return ret;
29431+ mutex_lock(&bo->mutex);
29432+ ret = ttm_bo_wait(bo, false, true, no_wait);
29433+ if (unlikely(ret != 0))
29434+ goto out_err0;
29435+ atomic_inc(&bo->cpu_writers);
29436+ out_err0:
29437+ mutex_unlock(&bo->mutex);
29438+ ttm_bo_unreserve(bo);
29439+ return ret;
29440+}
29441+
29442+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
29443+{
29444+ if (atomic_dec_and_test(&bo->cpu_writers))
29445+ wake_up_all(&bo->event_queue);
29446+}
29447+
29448+/**
29449+ * A buffer object shrink method that tries to swap out the first
29450+ * buffer object on the bo_global::swap_lru list.
29451+ */
29452+
29453+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
29454+{
29455+ struct ttm_bo_device *bdev =
29456+ container_of(shrink, struct ttm_bo_device, shrink);
29457+ struct ttm_buffer_object *bo;
29458+ int ret = -EBUSY;
29459+ int put_count;
29460+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
29461+
29462+ spin_lock(&bdev->lru_lock);
29463+ while (ret == -EBUSY) {
29464+ if (unlikely(list_empty(&bdev->swap_lru))) {
29465+ spin_unlock(&bdev->lru_lock);
29466+ return -EBUSY;
29467+ }
29468+
29469+ bo = list_first_entry(&bdev->swap_lru,
29470+ struct ttm_buffer_object, swap);
29471+ kref_get(&bo->list_kref);
29472+
29473+ /**
29474+ * Reserve buffer. Since we unlock while sleeping, we need
29475+ * to re-check that nobody removed us from the swap-list while
29476+ * we slept.
29477+ */
29478+
29479+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
29480+ if (unlikely(ret == -EBUSY)) {
29481+ spin_unlock(&bdev->lru_lock);
29482+ ttm_bo_wait_unreserved(bo, false);
29483+ kref_put(&bo->list_kref, ttm_bo_release_list);
29484+ spin_lock(&bdev->lru_lock);
29485+ }
29486+ }
29487+
29488+ BUG_ON(ret != 0);
29489+ put_count = ttm_bo_del_from_lru(bo);
29490+ spin_unlock(&bdev->lru_lock);
29491+
29492+ while (put_count--)
29493+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
29494+
29495+ /**
29496+ * Wait for GPU, then move to system cached.
29497+ */
29498+
29499+ mutex_lock(&bo->mutex);
29500+ ret = ttm_bo_wait(bo, false, false, false);
29501+ if (unlikely(ret != 0))
29502+ goto out;
29503+
29504+ if ((bo->mem.flags & swap_placement) != swap_placement) {
29505+ struct ttm_mem_reg evict_mem;
29506+
29507+ evict_mem = bo->mem;
29508+ evict_mem.mm_node = NULL;
29509+ evict_mem.proposed_flags =
29510+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
29511+ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
29512+ evict_mem.mem_type = TTM_PL_SYSTEM;
29513+
29514+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false);
29515+ if (unlikely(ret != 0))
29516+ goto out;
29517+ }
29518+
29519+ ttm_bo_unmap_virtual(bo);
29520+
29521+ /**
29522+ * Swap out. Buffer will be swapped in again as soon as
29523+ * anyone tries to access a ttm page.
29524+ */
29525+
29526+ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
29527+ out:
29528+ mutex_unlock(&bo->mutex);
29529+
29530+ /**
29531+ *
29532+ * Unreserve without putting on LRU to avoid swapping out an
29533+ * already swapped buffer.
29534+ */
29535+
29536+ atomic_set(&bo->reserved, 0);
29537+ wake_up_all(&bo->event_queue);
29538+ kref_put(&bo->list_kref, ttm_bo_release_list);
29539+ return ret;
29540+}
29541+
29542+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
29543+{
29544+ while (ttm_bo_swapout(&bdev->shrink) == 0) ;
29545+}
29546diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
29547--- a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 1969-12-31 16:00:00.000000000 -0800
29548+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 2009-04-07 13:28:38.000000000 -0700
29549@@ -0,0 +1,859 @@
29550+/**************************************************************************
29551+ *
29552+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
29553+ * All Rights Reserved.
29554+ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
29555+ * All Rights Reserved.
29556+ *
29557+ * Permission is hereby granted, free of charge, to any person obtaining a
29558+ * copy of this software and associated documentation files (the
29559+ * "Software"), to deal in the Software without restriction, including
29560+ * without limitation the rights to use, copy, modify, merge, publish,
29561+ * distribute, sub license, and/or sell copies of the Software, and to
29562+ * permit persons to whom the Software is furnished to do so, subject to
29563+ * the following conditions:
29564+ *
29565+ * The above copyright notice and this permission notice (including the
29566+ * next paragraph) shall be included in all copies or substantial portions
29567+ * of the Software.
29568+ *
29569+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29570+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29571+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29572+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
29573+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
29574+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29575+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
29576+ *
29577+ **************************************************************************/
29578+/*
29579+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
29580+ */
29581+#ifndef _TTM_BO_DRIVER_H_
29582+#define _TTM_BO_DRIVER_H_
29583+
29584+#include "ttm/ttm_bo_api.h"
29585+#include "ttm/ttm_memory.h"
29586+#include <drm/drm_mm.h>
29587+#include "linux/workqueue.h"
29588+#include "linux/fs.h"
29589+#include "linux/spinlock.h"
29590+
29591+struct ttm_backend;
29592+
29593+struct ttm_backend_func {
29594+ /**
29595+ * struct ttm_backend_func member populate
29596+ *
29597+ * @backend: Pointer to a struct ttm_backend.
29598+ * @num_pages: Number of pages to populate.
29599+ * @pages: Array of pointers to ttm pages.
29600+ * @dummy_read_page: Page to be used instead of NULL pages in the
29601+ * array @pages.
29602+ *
29603+ * Populate the backend with ttm pages. Depending on the backend,
29604+ * it may or may not copy the @pages array.
29605+ */
29606+ int (*populate) (struct ttm_backend * backend,
29607+ unsigned long num_pages, struct page ** pages,
29608+ struct page * dummy_read_page);
29609+ /**
29610+ * struct ttm_backend_func member clear
29611+ *
29612+ * @backend: Pointer to a struct ttm_backend.
29613+ *
29614+ * This is an "unpopulate" function. Release all resources
29615+ * allocated with populate.
29616+ */
29617+ void (*clear) (struct ttm_backend * backend);
29618+
29619+ /**
29620+ * struct ttm_backend_func member bind
29621+ *
29622+ * @backend: Pointer to a struct ttm_backend.
29623+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
29624+ * memory type and location for binding.
29625+ *
29626+ * Bind the backend pages into the aperture in the location
29627+ * indicated by @bo_mem. This function should be able to handle
29628+ * differences between aperture- and system page sizes.
29629+ */
29630+ int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem);
29631+
29632+ /**
29633+ * struct ttm_backend_func member unbind
29634+ *
29635+ * @backend: Pointer to a struct ttm_backend.
29636+ *
29637+ * Unbind previously bound backend pages. This function should be
29638+ * able to handle differences between aperture- and system page sizes.
29639+ */
29640+ int (*unbind) (struct ttm_backend * backend);
29641+
29642+ /**
29643+ * struct ttm_backend_func member destroy
29644+ *
29645+ * @backend: Pointer to a struct ttm_backend.
29646+ *
29647+ * Destroy the backend.
29648+ */
29649+ void (*destroy) (struct ttm_backend * backend);
29650+};
29651+
29652+/**
29653+ * struct ttm_backend
29654+ *
29655+ * @bdev: Pointer to a struct ttm_bo_device.
29656+ * @flags: For driver use.
29657+ * @func: Pointer to a struct ttm_backend_func that describes
29658+ * the backend methods.
29659+ *
29660+ */
29661+
29662+struct ttm_backend {
29663+ struct ttm_bo_device *bdev;
29664+ uint32_t flags;
29665+ struct ttm_backend_func *func;
29666+};
29667+
29668+#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
29669+#define TTM_PAGE_FLAG_USER (1 << 1)
29670+#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
29671+#define TTM_PAGE_FLAG_WRITE (1 << 3)
29672+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
29673+#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
29674+
29675+enum ttm_caching_state {
29676+ tt_uncached,
29677+ tt_wc,
29678+ tt_cached
29679+};
29680+
29681+/**
29682+ * struct ttm_tt
29683+ *
29684+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
29685+ * pointer.
29686+ * @pages: Array of pages backing the data.
29687+ * @first_himem_page: Himem pages are put last in the page array, which
29688+ * enables us to run caching attribute changes on only the first part
29689+ * of the page array containing lomem pages. This is the index of the
29690+ * first himem page.
29691+ * @last_lomem_page: Index of the last lomem page in the page array.
29692+ * @num_pages: Number of pages in the page array.
29693+ * @bdev: Pointer to the current struct ttm_bo_device.
29694+ * @be: Pointer to the ttm backend.
29695+ * @tsk: The task for user ttm.
29696+ * @start: virtual address for user ttm.
29697+ * @swap_storage: Pointer to shmem struct file for swap storage.
29698+ * @caching_state: The current caching state of the pages.
29699+ * @state: The current binding state of the pages.
29700+ *
29701+ * This is a structure holding the pages, caching- and aperture binding
29702+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
29703+ * memory.
29704+ */
29705+
29706+struct ttm_tt {
29707+ struct page *dummy_read_page;
29708+ struct page **pages;
29709+ long first_himem_page;
29710+ long last_lomem_page;
29711+ uint32_t page_flags;
29712+ unsigned long num_pages;
29713+ struct ttm_bo_device *bdev;
29714+ struct ttm_backend *be;
29715+ struct task_struct *tsk;
29716+ unsigned long start;
29717+ struct file *swap_storage;
29718+ enum ttm_caching_state caching_state;
29719+ enum {
29720+ tt_bound,
29721+ tt_unbound,
29722+ tt_unpopulated,
29723+ } state;
29724+};
29725+
29726+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
29727+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
29728+#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
29729+ before kernel access. */
29730+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
29731+
29732+/**
29733+ * struct ttm_mem_type_manager
29734+ *
29735+ * @has_type: The memory type has been initialized.
29736+ * @use_type: The memory type is enabled.
29737+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
29738+ * managed by this memory type.
29739+ * @gpu_offset: If used, the GPU offset of the first managed page of
29740+ * fixed memory or the first managed location in an aperture.
29741+ * @io_offset: The io_offset of the first managed page of IO memory or
29742+ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
29743+ * memory, this should be set to NULL.
29744+ * @io_size: The size of a managed IO region (fixed memory or aperture).
29745+ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
29746+ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
29747+ * @io_addr should be set to NULL.
29748+ * @size: Size of the managed region.
29749+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
29750+ * as defined in ttm_placement_common.h
29751+ * @default_caching: The default caching policy used for a buffer object
29752+ * placed in this memory type if the user doesn't provide one.
29753+ * @manager: The range manager used for this memory type. FIXME: If the aperture
29754+ * has a page size different from the underlying system, the granularity
29755+ * of this manager should take care of this. But the range allocating code
29756+ * in ttm_bo.c needs to be modified for this.
29757+ * @lru: The lru list for this memory type.
29758+ *
29759+ * This structure is used to identify and manage memory types for a device.
29760+ * It's set up by the ttm_bo_driver::init_mem_type method.
29761+ */
29762+
29763+struct ttm_mem_type_manager {
29764+
29765+ /*
29766+ * No protection. Constant from start.
29767+ */
29768+
29769+ bool has_type;
29770+ bool use_type;
29771+ uint32_t flags;
29772+ unsigned long gpu_offset;
29773+ unsigned long io_offset;
29774+ unsigned long io_size;
29775+ void *io_addr;
29776+ uint64_t size;
29777+ uint32_t available_caching;
29778+ uint32_t default_caching;
29779+
29780+ /*
29781+ * Protected by the bdev->lru_lock.
29782+ * TODO: Consider one lru_lock per ttm_mem_type_manager.
29783+ * Plays ill with list removal, though.
29784+ */
29785+
29786+ struct drm_mm manager;
29787+ struct list_head lru;
29788+};
29789+
29790+/**
29791+ * struct ttm_bo_driver
29792+ *
29793+ * @mem_type_prio: Priority array of memory types to place a buffer object in
29794+ * if it fits without evicting buffers from any of these memory types.
29795+ * @mem_busy_prio: Priority array of memory types to place a buffer object in
29796+ * if it needs to evict buffers to make room.
29797+ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
29798+ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
29799+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
29800+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
29801+ * has been evicted.
29802+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure.
29803+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
29804+ * @move: Callback for a driver to hook in accelerated functions to move a buffer.
29805+ * If set to NULL, a potentially slow memcpy() move is used.
29806+ * @sync_obj_signaled: See ttm_fence_api.h
29807+ * @sync_obj_wait: See ttm_fence_api.h
29808+ * @sync_obj_flush: See ttm_fence_api.h
29809+ * @sync_obj_unref: See ttm_fence_api.h
29810+ * @sync_obj_ref: See ttm_fence_api.h
29811+ */
29812+
29813+struct ttm_bo_driver {
29814+ const uint32_t *mem_type_prio;
29815+ const uint32_t *mem_busy_prio;
29816+ uint32_t num_mem_type_prio;
29817+ uint32_t num_mem_busy_prio;
29818+
29819+ /**
29820+ * struct ttm_bo_driver member create_ttm_backend_entry
29821+ *
29822+ * @bdev: The buffer object device.
29823+ *
29824+ * Create a driver specific struct ttm_backend.
29825+ */
29826+
29827+ struct ttm_backend *(*create_ttm_backend_entry)
29828+ (struct ttm_bo_device * bdev);
29829+
29830+ /**
29831+ * struct ttm_bo_driver member invalidate_caches
29832+ *
29833+ * @bdev: the buffer object device.
29834+ * @flags: new placement of the rebound buffer object.
29835+ *
29836+ * A previosly evicted buffer has been rebound in a
29837+ * potentially new location. Tell the driver that it might
29838+ * consider invalidating read (texture) caches on the next command
29839+ * submission as a consequence.
29840+ */
29841+
29842+ int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags);
29843+ int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type,
29844+ struct ttm_mem_type_manager * man);
29845+ /**
29846+ * struct ttm_bo_driver member evict_flags:
29847+ *
29848+ * @bo: the buffer object to be evicted
29849+ *
29850+ * Return the bo flags for a buffer which is not mapped to the hardware.
29851+ * These will be placed in proposed_flags so that when the move is
29852+ * finished, they'll end up in bo->mem.flags
29853+ */
29854+
29855+ uint32_t(*evict_flags) (struct ttm_buffer_object * bo);
29856+ /**
29857+ * struct ttm_bo_driver member move:
29858+ *
29859+ * @bo: the buffer to move
29860+ * @evict: whether this motion is evicting the buffer from
29861+ * the graphics address space
29862+ * @interruptible: Use interruptible sleeps if possible when sleeping.
29863+ * @no_wait: whether this should give up and return -EBUSY
29864+ * if this move would require sleeping
29865+ * @new_mem: the new memory region receiving the buffer
29866+ *
29867+ * Move a buffer between two memory regions.
29868+ */
29869+ int (*move) (struct ttm_buffer_object * bo,
29870+ bool evict, bool interruptible,
29871+ bool no_wait, struct ttm_mem_reg * new_mem);
29872+
29873+ /**
29874+ * struct ttm_bo_driver_member verify_access
29875+ *
29876+ * @bo: Pointer to a buffer object.
29877+ * @filp: Pointer to a struct file trying to access the object.
29878+ *
29879+ * Called from the map / write / read methods to verify that the
29880+ * caller is permitted to access the buffer object.
29881+ * This member may be set to NULL, which will refuse this kind of
29882+ * access for all buffer objects.
29883+ * This function should return 0 if access is granted, -EPERM otherwise.
29884+ */
29885+ int (*verify_access) (struct ttm_buffer_object * bo,
29886+ struct file * filp);
29887+
29888+ /**
29889+ * In case a driver writer dislikes the TTM fence objects,
29890+ * the driver writer can replace those with sync objects of
29891+ * his / her own. If it turns out that no driver writer is
29892+ * using these. I suggest we remove these hooks and plug in
29893+ * fences directly. The bo driver needs the following functionality:
29894+ * See the corresponding functions in the fence object API
29895+ * documentation.
29896+ */
29897+
29898+ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
29899+ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
29900+ bool lazy, bool interruptible);
29901+ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
29902+ void (*sync_obj_unref) (void **sync_obj);
29903+ void *(*sync_obj_ref) (void *sync_obj);
29904+};
29905+
29906+#define TTM_NUM_MEM_TYPES 10
29907+
29908+#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
29909+#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs
29910+ idling before CPU mapping */
29911+/**
29912+ * struct ttm_bo_device - Buffer object driver device-specific data.
29913+ *
29914+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
29915+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
29916+ * @count: Current number of buffer object.
29917+ * @pages: Current number of pinned pages.
29918+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
29919+ * of unpopulated pages.
29920+ * @shrink: A shrink callback object used for buffre object swap.
29921+ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
29922+ * used by a buffer object. This is excluding page arrays and backing pages.
29923+ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
29924+ * @man: An array of mem_type_managers.
29925+ * @addr_space_mm: Range manager for the device address space.
29926+ * lru_lock: Spinlock that protects the buffer+device lru lists and
29927+ * ddestroy lists.
29928+ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
29929+ * If a GPU lockup has been detected, this is forced to 0.
29930+ * @dev_mapping: A pointer to the struct address_space representing the
29931+ * device address space.
29932+ * @wq: Work queue structure for the delayed delete workqueue.
29933+ *
29934+ */
29935+
29936+struct ttm_bo_device {
29937+
29938+ /*
29939+ * Constant after bo device init / atomic.
29940+ */
29941+
29942+ struct ttm_mem_global *mem_glob;
29943+ struct ttm_bo_driver *driver;
29944+ struct page *dummy_read_page;
29945+ struct ttm_mem_shrink shrink;
29946+
29947+ size_t ttm_bo_extra_size;
29948+ size_t ttm_bo_size;
29949+
29950+ rwlock_t vm_lock;
29951+ /*
29952+ * Protected by the vm lock.
29953+ */
29954+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
29955+ struct rb_root addr_space_rb;
29956+ struct drm_mm addr_space_mm;
29957+
29958+ /*
29959+ * Might want to change this to one lock per manager.
29960+ */
29961+ spinlock_t lru_lock;
29962+ /*
29963+ * Protected by the lru lock.
29964+ */
29965+ struct list_head ddestroy;
29966+ struct list_head swap_lru;
29967+
29968+ /*
29969+ * Protected by load / firstopen / lastclose /unload sync.
29970+ */
29971+
29972+ bool nice_mode;
29973+ struct address_space *dev_mapping;
29974+
29975+ /*
29976+ * Internal protection.
29977+ */
29978+
29979+ struct delayed_work wq;
29980+};
29981+
29982+/**
29983+ * ttm_flag_masked
29984+ *
29985+ * @old: Pointer to the result and original value.
29986+ * @new: New value of bits.
29987+ * @mask: Mask of bits to change.
29988+ *
29989+ * Convenience function to change a number of bits identified by a mask.
29990+ */
29991+
29992+static inline uint32_t
29993+ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask)
29994+{
29995+ *old ^= (*old ^ new) & mask;
29996+ return *old;
29997+}
29998+
29999+/**
30000+ * ttm_tt_create
30001+ *
30002+ * @bdev: pointer to a struct ttm_bo_device:
30003+ * @size: Size of the data needed backing.
30004+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
30005+ * @dummy_read_page: See struct ttm_bo_device.
30006+ *
30007+ * Create a struct ttm_tt to back data with system memory pages.
30008+ * No pages are actually allocated.
30009+ * Returns:
30010+ * NULL: Out of memory.
30011+ */
30012+extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
30013+ unsigned long size,
30014+ uint32_t page_flags,
30015+ struct page *dummy_read_page);
30016+
30017+/**
30018+ * ttm_tt_set_user:
30019+ *
30020+ * @ttm: The struct ttm_tt to populate.
30021+ * @tsk: A struct task_struct for which @start is a valid user-space address.
30022+ * @start: A valid user-space address.
30023+ * @num_pages: Size in pages of the user memory area.
30024+ *
30025+ * Populate a struct ttm_tt with a user-space memory area after first pinning
30026+ * the pages backing it.
30027+ * Returns:
30028+ * !0: Error.
30029+ */
30030+
30031+extern int ttm_tt_set_user(struct ttm_tt *ttm,
30032+ struct task_struct *tsk,
30033+ unsigned long start, unsigned long num_pages);
30034+
30035+/**
30036+ * ttm_ttm_bind:
30037+ *
30038+ * @ttm: The struct ttm_tt containing backing pages.
30039+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
30040+ *
30041+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
30042+ */
30043+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
30044+
30045+/**
30046+ * ttm_ttm_destroy:
30047+ *
30048+ * @ttm: The struct ttm_tt.
30049+ *
30050+ * Unbind, unpopulate and destroy a struct ttm_tt.
30051+ */
30052+extern void ttm_tt_destroy(struct ttm_tt *ttm);
30053+
30054+/**
30055+ * ttm_ttm_unbind:
30056+ *
30057+ * @ttm: The struct ttm_tt.
30058+ *
30059+ * Unbind a struct ttm_tt.
30060+ */
30061+extern void ttm_tt_unbind(struct ttm_tt *ttm);
30062+
30063+/**
30064+ * ttm_ttm_destroy:
30065+ *
30066+ * @ttm: The struct ttm_tt.
30067+ * @index: Index of the desired page.
30068+ *
30069+ * Return a pointer to the struct page backing @ttm at page
30070+ * index @index. If the page is unpopulated, one will be allocated to
30071+ * populate that index.
30072+ *
30073+ * Returns:
30074+ * NULL on OOM.
30075+ */
30076+extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
30077+
30078+/**
30079+ * ttm_tt_cache_flush:
30080+ *
30081+ * @pages: An array of pointers to struct page:s to flush.
30082+ * @num_pages: Number of pages to flush.
30083+ *
30084+ * Flush the data of the indicated pages from the cpu caches.
30085+ * This is used when changing caching attributes of the pages from
30086+ * cache-coherent.
30087+ */
30088+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
30089+
30090+/**
30091+ * ttm_tt_set_placement_caching:
30092+ *
30093+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
30094+ * @placement: Flag indicating the desired caching policy.
30095+ *
30096+ * This function will change caching policy of any default kernel mappings of
30097+ * the pages backing @ttm. If changing from cached to uncached or write-combined,
30098+ * all CPU caches will first be flushed to make sure the data of the pages
30099+ * hit RAM. This function may be very costly as it involves global TLB
30100+ * and cache flushes and potential page splitting / combining.
30101+ */
30102+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
30103+extern int ttm_tt_swapout(struct ttm_tt *ttm,
30104+ struct file *persistant_swap_storage);
30105+
30106+/*
30107+ * ttm_bo.c
30108+ */
30109+
30110+/**
30111+ * ttm_mem_reg_is_pci
30112+ *
30113+ * @bdev: Pointer to a struct ttm_bo_device.
30114+ * @mem: A valid struct ttm_mem_reg.
30115+ *
30116+ * Returns true if the memory described by @mem is PCI memory,
30117+ * false otherwise.
30118+ */
30119+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
30120+ struct ttm_mem_reg *mem);
30121+
30122+/**
30123+ * ttm_bo_mem_space
30124+ *
30125+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
30126+ * we want to allocate space for.
30127+ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
30128+ * up.
30129+ * @interruptible: Sleep interruptible when sliping.
30130+ * @no_wait: Don't sleep waiting for space to become available.
30131+ *
30132+ * Allocate memory space for the buffer object pointed to by @bo, using
30133+ * the placement flags in @mem, potentially evicting other idle buffer objects.
30134+ * This function may sleep while waiting for space to become available.
30135+ * Returns:
30136+ * -EBUSY: No space available (only if no_wait == 1).
30137+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
30138+ * fragmentation or concurrent allocators.
30139+ * -ERESTART: An interruptible sleep was interrupted by a signal.
30140+ */
30141+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
30142+ struct ttm_mem_reg *mem,
30143+ bool interruptible, bool no_wait);
30144+/**
30145+ * ttm_bo_wait_for_cpu
30146+ *
30147+ * @bo: Pointer to a struct ttm_buffer_object.
30148+ * @no_wait: Don't sleep while waiting.
30149+ *
30150+ * Wait until a buffer object is no longer sync'ed for CPU access.
30151+ * Returns:
30152+ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
30153+ * -ERESTART: An interruptible sleep was interrupted by a signal.
30154+ */
30155+
30156+extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
30157+
30158+/**
30159+ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
30160+ *
30161+ * @bo Pointer to a struct ttm_buffer_object.
30162+ * @bus_base On return the base of the PCI region
30163+ * @bus_offset On return the byte offset into the PCI region
30164+ * @bus_size On return the byte size of the buffer object or zero if
30165+ * the buffer object memory is not accessible through a PCI region.
30166+ *
30167+ * Returns:
30168+ * -EINVAL if the buffer object is currently not mappable.
30169+ * 0 otherwise.
30170+ */
30171+
30172+extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
30173+ struct ttm_mem_reg *mem,
30174+ unsigned long *bus_base,
30175+ unsigned long *bus_offset,
30176+ unsigned long *bus_size);
30177+
30178+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
30179+
30180+/**
30181+ * ttm_bo_device_init
30182+ *
30183+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
30184+ * @mem_global: A pointer to an initialized struct ttm_mem_global.
30185+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
30186+ * @file_page_offset: Offset into the device address space that is available
30187+ * for buffer data. This ensures compatibility with other users of the
30188+ * address space.
30189+ *
30190+ * Initializes a struct ttm_bo_device:
30191+ * Returns:
30192+ * !0: Failure.
30193+ */
30194+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
30195+ struct ttm_mem_global *mem_glob,
30196+ struct ttm_bo_driver *driver,
30197+ uint64_t file_page_offset);
30198+
30199+/**
30200+ * ttm_bo_reserve:
30201+ *
30202+ * @bo: A pointer to a struct ttm_buffer_object.
30203+ * @interruptible: Sleep interruptible if waiting.
30204+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
30205+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
30206+ * it to become unreserved if @sequence < (@bo)->sequence.
30207+ *
30208+ * Locks a buffer object for validation. (Or prevents other processes from
30209+ * locking it for validation) and removes it from lru lists, while taking
30210+ * a number of measures to prevent deadlocks.
30211+ *
30212+ * Deadlocks may occur when two processes try to reserve multiple buffers in
30213+ * different order, either by will or as a result of a buffer being evicted
30214+ * to make room for a buffer already reserved. (Buffers are reserved before
30215+ * they are evicted). The following algorithm prevents such deadlocks from
30216+ * occuring:
30217+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
30218+ * reservation they are removed from the lru list. This stops a reserved buffer
30219+ * from being evicted. However the lru spinlock is released between the time
30220+ * a buffer is selected for eviction and the time it is reserved.
30221+ * Therefore a check is made when a buffer is reserved for eviction, that it
30222+ * is still the first buffer in the lru list, before it is removed from the
30223+ * list. @check_lru == 1 forces this check. If it fails, the function returns
30224+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
30225+ * the procedure.
30226+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
30227+ * (typically execbuf), should first obtain a unique 32-bit
30228+ * validation sequence number,
30229+ * and call this function with @use_sequence == 1 and @sequence == the unique
30230+ * sequence number. If upon call of this function, the buffer object is already
30231+ * reserved, the validation sequence is checked against the validation
30232+ * sequence of the process currently reserving the buffer,
30233+ * and if the current validation sequence is greater than that of the process
30234+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
30235+ * waiting for the buffer to become unreserved, after which it retries reserving.
30236+ * The caller should, when receiving an -EAGAIN error
30237+ * release all its buffer reservations, wait for @bo to become unreserved, and
30238+ * then rerun the validation with the same validation sequence. This procedure
30239+ * will always guarantee that the process with the lowest validation sequence
30240+ * will eventually succeed, preventing both deadlocks and starvation.
30241+ *
30242+ * Returns:
30243+ * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations,
30244+ * wait for @bo to become unreserved and try again. (only if use_sequence == 1).
30245+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
30246+ * a signal. Release all buffer reservations and return to user-space.
30247+ */
30248+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
30249+ bool interruptible,
30250+ bool no_wait, bool use_sequence, uint32_t sequence);
30251+
30252+/**
30253+ * ttm_bo_unreserve
30254+ *
30255+ * @bo: A pointer to a struct ttm_buffer_object.
30256+ *
30257+ * Unreserve a previous reservation of @bo.
30258+ */
30259+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
30260+
30261+/**
30262+ * ttm_bo_wait_unreserved
30263+ *
30264+ * @bo: A pointer to a struct ttm_buffer_object.
30265+ *
30266+ * Wait for a struct ttm_buffer_object to become unreserved.
30267+ * This is typically used in the execbuf code to relax cpu-usage when
30268+ * a potential deadlock condition backoff.
30269+ */
30270+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
30271+ bool interruptible);
30272+
30273+/**
30274+ * ttm_bo_block_reservation
30275+ *
30276+ * @bo: A pointer to a struct ttm_buffer_object.
30277+ * @interruptible: Use interruptible sleep when waiting.
30278+ * @no_wait: Don't sleep, but rather return -EBUSY.
30279+ *
30280+ * Block reservation for validation by simply reserving the buffer. This is intended
30281+ * for single buffer use only without eviction, and thus needs no deadlock protection.
30282+ *
30283+ * Returns:
30284+ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
30285+ * -ERESTART: If interruptible == 1 and the process received a signal while sleeping.
30286+ */
30287+extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
30288+ bool interruptible, bool no_wait);
30289+
30290+/**
30291+ * ttm_bo_unblock_reservation
30292+ *
30293+ * @bo: A pointer to a struct ttm_buffer_object.
30294+ *
30295+ * Unblocks reservation leaving lru lists untouched.
30296+ */
30297+extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
30298+
30299+/*
30300+ * ttm_bo_util.c
30301+ */
30302+
30303+/**
30304+ * ttm_bo_move_ttm
30305+ *
30306+ * @bo: A pointer to a struct ttm_buffer_object.
30307+ * @evict: 1: This is an eviction. Don't try to pipeline.
30308+ * @no_wait: Never sleep, but rather return with -EBUSY.
30309+ * @new_mem: struct ttm_mem_reg indicating where to move.
30310+ *
30311+ * Optimized move function for a buffer object with both old and
30312+ * new placement backed by a TTM. The function will, if successful,
30313+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
30314+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
30315+ * data remains untouched, and it's up to the caller to free the
30316+ * memory space indicated by @new_mem.
30317+ * Returns:
30318+ * !0: Failure.
30319+ */
30320+
30321+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
30322+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem);
30323+
30324+/**
30325+ * ttm_bo_move_memcpy
30326+ *
30327+ * @bo: A pointer to a struct ttm_buffer_object.
30328+ * @evict: 1: This is an eviction. Don't try to pipeline.
30329+ * @no_wait: Never sleep, but rather return with -EBUSY.
30330+ * @new_mem: struct ttm_mem_reg indicating where to move.
30331+ *
30332+ * Fallback move function for a mappable buffer object in mappable memory.
30333+ * The function will, if successful,
30334+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
30335+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
30336+ * data remains untouched, and it's up to the caller to free the
30337+ * memory space indicated by @new_mem.
30338+ * Returns:
30339+ * !0: Failure.
30340+ */
30341+
30342+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
30343+ bool evict,
30344+ bool no_wait, struct ttm_mem_reg *new_mem);
30345+
30346+/**
30347+ * ttm_bo_free_old_node
30348+ *
30349+ * @bo: A pointer to a struct ttm_buffer_object.
30350+ *
30351+ * Utility function to free an old placement after a successful move.
30352+ */
30353+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
30354+
30355+/**
30356+ * ttm_bo_move_accel_cleanup.
30357+ *
30358+ * @bo: A pointer to a struct ttm_buffer_object.
30359+ * @sync_obj: A sync object that signals when moving is complete.
30360+ * @sync_obj_arg: An argument to pass to the sync object idle / wait
30361+ * functions.
30362+ * @evict: This is an evict move. Don't return until the buffer is idle.
30363+ * @no_wait: Never sleep, but rather return with -EBUSY.
30364+ * @new_mem: struct ttm_mem_reg indicating where to move.
30365+ *
30366+ * Accelerated move function to be called when an accelerated move
30367+ * has been scheduled. The function will create a new temporary buffer object
30368+ * representing the old placement, and put the sync object on both buffer
30369+ * objects. After that the newly created buffer object is unref'd to be
30370+ * destroyed when the move is complete. This will help pipeline
30371+ * buffer moves.
30372+ */
30373+
30374+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
30375+ void *sync_obj,
30376+ void *sync_obj_arg,
30377+ bool evict, bool no_wait,
30378+ struct ttm_mem_reg *new_mem);
30379+/**
30380+ * ttm_io_prot
30381+ *
30382+ * @c_state: Caching state.
30383+ * @tmp: Page protection flag for a normal, cached mapping.
30384+ *
30385+ * Utility function that returns the pgprot_t that should be used for
30386+ * setting up a PTE with the caching model indicated by @c_state.
30387+ */
30388+extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
30389+
30390+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
30391+#define TTM_HAS_AGP
30392+#include <linux/agp_backend.h>
30393+
30394+/**
30395+ * ttm_agp_backend_init
30396+ *
30397+ * @bdev: Pointer to a struct ttm_bo_device.
30398+ * @bridge: The agp bridge this device is sitting on.
30399+ *
30400+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
30401+ * for TT memory. This function uses the linux agpgart interface to
30402+ * bind and unbind memory backing a ttm_tt.
30403+ */
30404+extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
30405+ struct agp_bridge_data *bridge);
30406+#endif
30407+
30408+#endif
30409diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
30410--- a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 1969-12-31 16:00:00.000000000 -0800
30411+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 2009-04-07 13:28:38.000000000 -0700
30412@@ -0,0 +1,529 @@
30413+/**************************************************************************
30414+ *
30415+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
30416+ * All Rights Reserved.
30417+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
30418+ * All Rights Reserved.
30419+ *
30420+ * Permission is hereby granted, free of charge, to any person obtaining a
30421+ * copy of this software and associated documentation files (the
30422+ * "Software"), to deal in the Software without restriction, including
30423+ * without limitation the rights to use, copy, modify, merge, publish,
30424+ * distribute, sub license, and/or sell copies of the Software, and to
30425+ * permit persons to whom the Software is furnished to do so, subject to
30426+ * the following conditions:
30427+ *
30428+ * The above copyright notice and this permission notice (including the
30429+ * next paragraph) shall be included in all copies or substantial portions
30430+ * of the Software.
30431+ *
30432+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30433+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30434+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
30435+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
30436+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
30437+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
30438+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
30439+ *
30440+ **************************************************************************/
30441+/*
30442+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30443+ */
30444+
30445+#include "ttm/ttm_bo_driver.h"
30446+#include "ttm/ttm_placement_common.h"
30447+#include "ttm/ttm_pat_compat.h"
30448+#include <linux/io.h>
30449+#include <linux/highmem.h>
30450+#include <linux/wait.h>
30451+#include <linux/version.h>
30452+
30453+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
30454+{
30455+ struct ttm_mem_reg *old_mem = &bo->mem;
30456+
30457+ if (old_mem->mm_node) {
30458+ spin_lock(&bo->bdev->lru_lock);
30459+ drm_mm_put_block(old_mem->mm_node);
30460+ spin_unlock(&bo->bdev->lru_lock);
30461+ }
30462+ old_mem->mm_node = NULL;
30463+}
30464+
30465+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
30466+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
30467+{
30468+ struct ttm_tt *ttm = bo->ttm;
30469+ struct ttm_mem_reg *old_mem = &bo->mem;
30470+ uint32_t save_flags = old_mem->flags;
30471+ uint32_t save_proposed_flags = old_mem->proposed_flags;
30472+ int ret;
30473+
30474+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
30475+ ttm_tt_unbind(ttm);
30476+ ttm_bo_free_old_node(bo);
30477+ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
30478+ TTM_PL_MASK_MEM);
30479+ old_mem->mem_type = TTM_PL_SYSTEM;
30480+ save_flags = old_mem->flags;
30481+ }
30482+
30483+ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
30484+ if (unlikely(ret != 0))
30485+ return ret;
30486+
30487+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
30488+ ret = ttm_tt_bind(ttm, new_mem);
30489+ if (unlikely(ret != 0))
30490+ return ret;
30491+ }
30492+
30493+ *old_mem = *new_mem;
30494+ new_mem->mm_node = NULL;
30495+ old_mem->proposed_flags = save_proposed_flags;
30496+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
30497+ return 0;
30498+}
30499+
30500+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
30501+ void **virtual)
30502+{
30503+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
30504+ unsigned long bus_offset;
30505+ unsigned long bus_size;
30506+ unsigned long bus_base;
30507+ int ret;
30508+ void *addr;
30509+
30510+ *virtual = NULL;
30511+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
30512+ if (ret || bus_size == 0)
30513+ return ret;
30514+
30515+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
30516+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
30517+ else {
30518+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
30519+ if (mem->flags & TTM_PL_FLAG_WC)
30520+ addr = ioremap_wc(bus_base + bus_offset, bus_size);
30521+ else
30522+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
30523+#else
30524+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
30525+#endif
30526+ if (!addr)
30527+ return -ENOMEM;
30528+ }
30529+ *virtual = addr;
30530+ return 0;
30531+}
30532+
30533+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
30534+ void *virtual)
30535+{
30536+ struct ttm_mem_type_manager *man;
30537+
30538+ man = &bdev->man[mem->mem_type];
30539+
30540+ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
30541+ iounmap(virtual);
30542+}
30543+
30544+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
30545+{
30546+ uint32_t *dstP =
30547+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
30548+ uint32_t *srcP =
30549+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
30550+
30551+ int i;
30552+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
30553+ iowrite32(ioread32(srcP++), dstP++);
30554+ return 0;
30555+}
30556+
30557+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
30558+ unsigned long page)
30559+{
30560+ struct page *d = ttm_tt_get_page(ttm, page);
30561+ void *dst;
30562+
30563+ if (!d)
30564+ return -ENOMEM;
30565+
30566+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
30567+ dst = kmap(d);
30568+ if (!dst)
30569+ return -ENOMEM;
30570+
30571+ memcpy_fromio(dst, src, PAGE_SIZE);
30572+ kunmap(d);
30573+ return 0;
30574+}
30575+
30576+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
30577+ unsigned long page)
30578+{
30579+ struct page *s = ttm_tt_get_page(ttm, page);
30580+ void *src;
30581+
30582+ if (!s)
30583+ return -ENOMEM;
30584+
30585+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
30586+ src = kmap(s);
30587+ if (!src)
30588+ return -ENOMEM;
30589+
30590+ memcpy_toio(dst, src, PAGE_SIZE);
30591+ kunmap(s);
30592+ return 0;
30593+}
30594+
30595+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
30596+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
30597+{
30598+ struct ttm_bo_device *bdev = bo->bdev;
30599+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
30600+ struct ttm_tt *ttm = bo->ttm;
30601+ struct ttm_mem_reg *old_mem = &bo->mem;
30602+ struct ttm_mem_reg old_copy = *old_mem;
30603+ void *old_iomap;
30604+ void *new_iomap;
30605+ int ret;
30606+ uint32_t save_flags = old_mem->flags;
30607+ uint32_t save_proposed_flags = old_mem->proposed_flags;
30608+ unsigned long i;
30609+ unsigned long page;
30610+ unsigned long add = 0;
30611+ int dir;
30612+
30613+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
30614+ if (ret)
30615+ return ret;
30616+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
30617+ if (ret)
30618+ goto out;
30619+
30620+ if (old_iomap == NULL && new_iomap == NULL)
30621+ goto out2;
30622+ if (old_iomap == NULL && ttm == NULL)
30623+ goto out2;
30624+
30625+ add = 0;
30626+ dir = 1;
30627+
30628+ if ((old_mem->mem_type == new_mem->mem_type) &&
30629+ (new_mem->mm_node->start <
30630+ old_mem->mm_node->start + old_mem->mm_node->size)) {
30631+ dir = -1;
30632+ add = new_mem->num_pages - 1;
30633+ }
30634+
30635+ for (i = 0; i < new_mem->num_pages; ++i) {
30636+ page = i * dir + add;
30637+ if (old_iomap == NULL)
30638+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
30639+ else if (new_iomap == NULL)
30640+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
30641+ else
30642+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
30643+ if (ret)
30644+ goto out1;
30645+ }
30646+ mb();
30647+ out2:
30648+ ttm_bo_free_old_node(bo);
30649+
30650+ *old_mem = *new_mem;
30651+ new_mem->mm_node = NULL;
30652+ old_mem->proposed_flags = save_proposed_flags;
30653+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
30654+
30655+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
30656+ ttm_tt_unbind(ttm);
30657+ ttm_tt_destroy(ttm);
30658+ bo->ttm = NULL;
30659+ }
30660+
30661+ out1:
30662+ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
30663+ out:
30664+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
30665+ return ret;
30666+}
30667+
30668+/**
30669+ * ttm_buffer_object_transfer
30670+ *
30671+ * @bo: A pointer to a struct ttm_buffer_object.
30672+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
30673+ * holding the data of @bo with the old placement.
30674+ *
30675+ * This is a utility function that may be called after an accelerated move
30676+ * has been scheduled. A new buffer object is created as a placeholder for
30677+ * the old data while it's being copied. When that buffer object is idle,
30678+ * it can be destroyed, releasing the space of the old placement.
30679+ * Returns:
30680+ * !0: Failure.
30681+ */
30682+
30683+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
30684+ struct ttm_buffer_object **new_obj)
30685+{
30686+ struct ttm_buffer_object *fbo;
30687+ struct ttm_bo_device *bdev = bo->bdev;
30688+ struct ttm_bo_driver *driver = bdev->driver;
30689+
30690+ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
30691+ if (!fbo)
30692+ return -ENOMEM;
30693+
30694+ *fbo = *bo;
30695+ mutex_init(&fbo->mutex);
30696+ mutex_lock(&fbo->mutex);
30697+
30698+ init_waitqueue_head(&fbo->event_queue);
30699+ INIT_LIST_HEAD(&fbo->ddestroy);
30700+ INIT_LIST_HEAD(&fbo->lru);
30701+
30702+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
30703+ if (fbo->mem.mm_node)
30704+ fbo->mem.mm_node->private = (void *)fbo;
30705+ kref_init(&fbo->list_kref);
30706+ kref_init(&fbo->kref);
30707+
30708+ mutex_unlock(&fbo->mutex);
30709+
30710+ *new_obj = fbo;
30711+ return 0;
30712+}
30713+
30714+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
30715+{
30716+#if defined(__i386__) || defined(__x86_64__)
30717+ if (caching_flags & TTM_PL_FLAG_WC) {
30718+ tmp = pgprot_ttm_x86_wc(tmp);
30719+ } else if (boot_cpu_data.x86 > 3) {
30720+ tmp = pgprot_noncached(tmp);
30721+ }
30722+#elif defined(__powerpc__)
30723+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
30724+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
30725+ if (caching_flags & TTM_PL_FLAG_UNCACHED)
30726+ pgprot_val(tmp) |= _PAGE_GUARDED;
30727+ }
30728+#endif
30729+#if defined(__ia64__)
30730+ if (caching_flags & TTM_PL_FLAG_WC)
30731+ tmp = pgprot_writecombine(tmp);
30732+ else
30733+ tmp = pgprot_noncached(tmp);
30734+#endif
30735+#if defined(__sparc__)
30736+ if (!(caching_flags & TTM_PL_FLAG_CACHED))
30737+ tmp = pgprot_noncached(tmp);
30738+#endif
30739+ return tmp;
30740+}
30741+
30742+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
30743+ unsigned long bus_base,
30744+ unsigned long bus_offset,
30745+ unsigned long bus_size,
30746+ struct ttm_bo_kmap_obj *map)
30747+{
30748+ struct ttm_bo_device * bdev = bo->bdev;
30749+ struct ttm_mem_reg * mem = &bo->mem;
30750+ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
30751+
30752+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
30753+ map->bo_kmap_type = ttm_bo_map_premapped;
30754+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
30755+ map->bo_kmap_type = ttm_bo_map_iomap;
30756+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
30757+ if (mem->flags & TTM_PL_FLAG_WC)
30758+ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
30759+ else
30760+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
30761+#else
30762+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
30763+#endif
30764+ }
30765+ return (!map->virtual) ? -ENOMEM : 0;
30766+}
30767+
30768+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
30769+ unsigned long start_page,
30770+ unsigned long num_pages,
30771+ struct ttm_bo_kmap_obj *map)
30772+{
30773+ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
30774+ struct ttm_tt * ttm = bo->ttm;
30775+ struct page * d;
30776+ int i;
30777+ BUG_ON(!ttm);
30778+ if (num_pages == 1 && (mem->flags & TTM_PL_FLAG_CACHED)) {
30779+ /*
30780+ * We're mapping a single page, and the desired
30781+ * page protection is consistent with the bo.
30782+ */
30783+ map->bo_kmap_type = ttm_bo_map_kmap;
30784+ map->page = ttm_tt_get_page(ttm, start_page);
30785+ map->virtual = kmap(map->page);
30786+ } else {
30787+ /*
30788+ * Populate the part we're mapping;
30789+ */
30790+ for (i = start_page; i < start_page + num_pages; ++i) {
30791+ d = ttm_tt_get_page(ttm, i); if (!d)
30792+ return -ENOMEM;
30793+ }
30794+
30795+ /*
30796+ * We need to use vmap to get the desired page protection
30797+ * or to make the buffer object look contigous.
30798+ */
30799+ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
30800+ PAGE_KERNEL :
30801+ ttm_io_prot(mem->flags, PAGE_KERNEL);
30802+ map->bo_kmap_type = ttm_bo_map_vmap;
30803+ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);
30804+ }
30805+ return (!map->virtual) ? -ENOMEM : 0;
30806+}
30807+
30808+int ttm_bo_kmap(struct ttm_buffer_object *bo,
30809+ unsigned long start_page, unsigned long num_pages,
30810+ struct ttm_bo_kmap_obj *map)
30811+{
30812+ int ret;
30813+ unsigned long bus_base;
30814+ unsigned long bus_offset;
30815+ unsigned long bus_size;
30816+ BUG_ON(!list_empty(&bo->swap));
30817+ map->virtual = NULL;
30818+ if (num_pages > bo->num_pages)
30819+ return -EINVAL;
30820+ if (start_page > bo->num_pages)
30821+ return -EINVAL;
30822+#if 0
30823+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
30824+ return -EPERM;
30825+#endif
30826+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
30827+ &bus_offset, &bus_size);
30828+ if (ret)
30829+ return ret;
30830+ if (bus_size == 0) {
30831+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
30832+ } else {
30833+ bus_offset += start_page << PAGE_SHIFT;
30834+ bus_size = num_pages << PAGE_SHIFT;
30835+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
30836+ }
30837+}
30838+
30839+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
30840+{
30841+ if (!map->virtual)
30842+ return;
30843+ switch (map->bo_kmap_type) {
30844+ case ttm_bo_map_iomap:
30845+ iounmap(map->virtual);
30846+ break;
30847+ case ttm_bo_map_vmap:
30848+ vunmap(map->virtual);
30849+ break;
30850+ case ttm_bo_map_kmap:
30851+ kunmap(map->page);
30852+ break;
30853+ case ttm_bo_map_premapped:
30854+ break;
30855+ default:
30856+ BUG();
30857+ }
30858+ map->virtual = NULL;
30859+ map->page = NULL;
30860+}
30861+
30862+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
30863+ unsigned long dst_offset,
30864+ unsigned long *pfn, pgprot_t * prot)
30865+{
30866+ struct ttm_mem_reg * mem = &bo->mem;
30867+ struct ttm_bo_device * bdev = bo->bdev;
30868+ unsigned long bus_offset;
30869+ unsigned long bus_size;
30870+ unsigned long bus_base;
30871+ int ret;
30872+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
30873+ &bus_size);
30874+ if (ret)
30875+ return -EINVAL;
30876+ if (bus_size != 0)
30877+ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
30878+ else
30879+ if (!bo->ttm)
30880+ return -EINVAL;
30881+ else
30882+ *pfn =
30883+ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
30884+ *prot =
30885+ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
30886+ flags,
30887+ PAGE_KERNEL);
30888+ return 0;
30889+}
30890+
30891+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
30892+ void *sync_obj,
30893+ void *sync_obj_arg,
30894+ bool evict, bool no_wait,
30895+ struct ttm_mem_reg *new_mem)
30896+{
30897+ struct ttm_bo_device * bdev = bo->bdev;
30898+ struct ttm_bo_driver * driver = bdev->driver;
30899+ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
30900+ struct ttm_mem_reg * old_mem = &bo->mem;
30901+ int ret;
30902+ uint32_t save_flags = old_mem->flags;
30903+ uint32_t save_proposed_flags = old_mem->proposed_flags;
30904+ struct ttm_buffer_object * old_obj;
30905+ if (bo->sync_obj)
30906+ driver->sync_obj_unref(&bo->sync_obj);
30907+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
30908+ bo->sync_obj_arg = sync_obj_arg;
30909+ if (evict) {
30910+ ret = ttm_bo_wait(bo, false, false, false);
30911+ if (ret)
30912+ return ret;
30913+ ttm_bo_free_old_node(bo);
30914+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
30915+ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;
30916+ }
30917+ } else {
30918+
30919+ /* This should help pipeline ordinary buffer moves.
30920+ *
30921+ * Hang old buffer memory on a new buffer object,
30922+ * and leave it to be released when the GPU
30923+ * operation has completed.
30924+ */
30925+ ret = ttm_buffer_object_transfer(bo, &old_obj);
30926+ if (ret)
30927+ return ret;
30928+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
30929+ old_obj->ttm = NULL;
30930+ else
30931+ bo->ttm = NULL;
30932+ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
30933+ ttm_bo_unreserve(old_obj);
30934+ }
30935+
30936+ *old_mem = *new_mem;
30937+ new_mem->mm_node = NULL;
30938+ old_mem->proposed_flags = save_proposed_flags;
30939+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
30940+ return 0;
30941+}
30942diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
30943--- a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 1969-12-31 16:00:00.000000000 -0800
30944+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 2009-04-07 13:28:38.000000000 -0700
30945@@ -0,0 +1,596 @@
30946+/**************************************************************************
30947+ *
30948+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
30949+ * All Rights Reserved.
30950+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
30951+ * All Rights Reserved.
30952+ *
30953+ * Permission is hereby granted, free of charge, to any person obtaining a
30954+ * copy of this software and associated documentation files (the
30955+ * "Software"), to deal in the Software without restriction, including
30956+ * without limitation the rights to use, copy, modify, merge, publish,
30957+ * distribute, sub license, and/or sell copies of the Software, and to
30958+ * permit persons to whom the Software is furnished to do so, subject to
30959+ * the following conditions:
30960+ *
30961+ * The above copyright notice and this permission notice (including the
30962+ * next paragraph) shall be included in all copies or substantial portions
30963+ * of the Software.
30964+ *
30965+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30966+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30967+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
30968+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
30969+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
30970+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
30971+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
30972+ *
30973+ **************************************************************************/
30974+/*
30975+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30976+ */
30977+
30978+
30979+#include "ttm/ttm_bo_driver.h"
30980+#include "ttm/ttm_placement_common.h"
30981+#include <linux/mm.h>
30982+#include <linux/version.h>
30983+#include <linux/rbtree.h>
30984+#include <asm/uaccess.h>
30985+
30986+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
30987+#error "TTM doesn't build on kernel versions below 2.6.25."
30988+#endif
30989+
30990+#define TTM_BO_VM_NUM_PREFAULT 16
30991+
30992+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
30993+ unsigned long page_start,
30994+ unsigned long num_pages)
30995+{
30996+ struct rb_node *cur = bdev->addr_space_rb.rb_node;
30997+ unsigned long cur_offset;
30998+ struct ttm_buffer_object *bo;
30999+ struct ttm_buffer_object *best_bo = NULL;
31000+
31001+ while (likely(cur != NULL)) {
31002+ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
31003+ cur_offset = bo->vm_node->start;
31004+ if (page_start >= cur_offset) {
31005+ cur = cur->rb_right;
31006+ best_bo = bo;
31007+ if (page_start == cur_offset)
31008+ break;
31009+ } else
31010+ cur = cur->rb_left;
31011+ }
31012+
31013+ if (unlikely(best_bo == NULL))
31014+ return NULL;
31015+
31016+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
31017+ (page_start + num_pages)))
31018+ return NULL;
31019+
31020+ return best_bo;
31021+}
31022+
31023+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
31024+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
31025+{
31026+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
31027+ vma->vm_private_data;
31028+ struct ttm_bo_device *bdev = bo->bdev;
31029+ unsigned long bus_base;
31030+ unsigned long bus_offset;
31031+ unsigned long bus_size;
31032+ unsigned long page_offset;
31033+ unsigned long page_last;
31034+ unsigned long pfn;
31035+ struct ttm_tt *ttm = NULL;
31036+ struct page *page;
31037+ int ret;
31038+ int i;
31039+ bool is_iomem;
31040+ unsigned long address = (unsigned long)vmf->virtual_address;
31041+ int retval = VM_FAULT_NOPAGE;
31042+
31043+ ret = ttm_bo_reserve(bo, true, false, false, 0);
31044+ if (unlikely(ret != 0))
31045+ return VM_FAULT_NOPAGE;
31046+
31047+ mutex_lock(&bo->mutex);
31048+
31049+ /*
31050+ * Wait for buffer data in transit, due to a pipelined
31051+ * move.
31052+ */
31053+
31054+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
31055+ ret = ttm_bo_wait(bo, false, true, false);
31056+ if (unlikely(ret != 0)) {
31057+ retval = (ret != -ERESTART) ?
31058+ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
31059+ goto out_unlock;
31060+ }
31061+ }
31062+
31063+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
31064+ &bus_size);
31065+ if (unlikely(ret != 0)) {
31066+ retval = VM_FAULT_SIGBUS;
31067+ goto out_unlock;
31068+ }
31069+
31070+ is_iomem = (bus_size != 0);
31071+
31072+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
31073+ bo->vm_node->start - vma->vm_pgoff;
31074+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
31075+ bo->vm_node->start - vma->vm_pgoff;
31076+
31077+ if (unlikely(page_offset >= bo->num_pages)) {
31078+ retval = VM_FAULT_SIGBUS;
31079+ goto out_unlock;
31080+ }
31081+
31082+ /*
31083+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
31084+ * since the mmap_sem is only held in read mode. However, we
31085+ * modify only the caching bits of vma->vm_page_prot and
31086+ * consider those bits protected by
31087+ * the bo->mutex, as we should be the only writers.
31088+ * There shouldn't really be any readers of these bits except
31089+ * within vm_insert_mixed()? fork?
31090+ *
31091+ * TODO: Add a list of vmas to the bo, and change the
31092+ * vma->vm_page_prot when the object changes caching policy, with
31093+ * the correct locks held.
31094+ */
31095+
31096+ if (is_iomem) {
31097+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
31098+ vma->vm_page_prot);
31099+ } else {
31100+ ttm = bo->ttm;
31101+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
31102+ vm_get_page_prot(vma->vm_flags) :
31103+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
31104+ }
31105+
31106+ /*
31107+ * Speculatively prefault a number of pages. Only error on
31108+ * first page.
31109+ */
31110+
31111+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
31112+
31113+ if (is_iomem)
31114+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
31115+ page_offset;
31116+ else {
31117+ page = ttm_tt_get_page(ttm, page_offset);
31118+ if (unlikely(!page && i == 0)) {
31119+ retval = VM_FAULT_OOM;
31120+ goto out_unlock;
31121+ } else if (unlikely(!page)) {
31122+ break;
31123+ }
31124+ pfn = page_to_pfn(page);
31125+ }
31126+
31127+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
31128+ ret = vm_insert_mixed(vma, address, pfn);
31129+#else
31130+ ret = vm_insert_pfn(vma, address, pfn);
31131+#endif
31132+ /*
31133+ * Somebody beat us to this PTE or prefaulting to
31134+ * an already populated PTE, or prefaulting error.
31135+ */
31136+
31137+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
31138+ break;
31139+ else if (unlikely(ret != 0)) {
31140+ retval =
31141+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
31142+ goto out_unlock;
31143+
31144+ }
31145+
31146+ address += PAGE_SIZE;
31147+ if (unlikely(++page_offset >= page_last))
31148+ break;
31149+ }
31150+
31151+ out_unlock:
31152+ mutex_unlock(&bo->mutex);
31153+ ttm_bo_unreserve(bo);
31154+ return retval;
31155+}
31156+
31157+#else
31158+
31159+static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
31160+ unsigned long address)
31161+{
31162+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
31163+ vma->vm_private_data;
31164+ struct ttm_bo_device *bdev = bo->bdev;
31165+ unsigned long bus_base;
31166+ unsigned long bus_offset;
31167+ unsigned long bus_size;
31168+ unsigned long page_offset;
31169+ unsigned long page_last;
31170+ unsigned long pfn;
31171+ struct ttm_tt *ttm = NULL;
31172+ struct page *page;
31173+ int ret;
31174+ int i;
31175+ bool is_iomem;
31176+ unsigned long retval = NOPFN_REFAULT;
31177+
31178+ ret = ttm_bo_reserve(bo, true, false, false, 0);
31179+ if (unlikely(ret != 0))
31180+ return NOPFN_REFAULT;
31181+
31182+ mutex_lock(&bo->mutex);
31183+
31184+ /*
31185+ * Wait for buffer data in transit, due to a pipelined
31186+ * move.
31187+ */
31188+
31189+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
31190+ ret = ttm_bo_wait(bo, false, true, false);
31191+ if (unlikely(ret != 0)) {
31192+ retval = (ret != -ERESTART) ?
31193+ NOPFN_SIGBUS : NOPFN_REFAULT;
31194+ goto out_unlock;
31195+ }
31196+ }
31197+
31198+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
31199+ &bus_size);
31200+ if (unlikely(ret != 0)) {
31201+ printk(KERN_ERR "Attempted buffer object access "
31202+ "of unmappable object.\n");
31203+ retval = NOPFN_SIGBUS;
31204+ goto out_unlock;
31205+ }
31206+
31207+ is_iomem = (bus_size != 0);
31208+
31209+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
31210+ bo->vm_node->start - vma->vm_pgoff;
31211+
31212+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
31213+ bo->vm_node->start - vma->vm_pgoff;
31214+
31215+ if (unlikely(page_offset >= bo->num_pages)) {
31216+ printk(KERN_ERR "Attempted buffer object access "
31217+ "outside object.\n");
31218+ retval = NOPFN_SIGBUS;
31219+ goto out_unlock;
31220+ }
31221+
31222+ /*
31223+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
31224+ * since the mmap_sem is only held in read mode. However, we
31225+ * modify only the caching bits of vma->vm_page_prot and
31226+ * consider those bits protected by
31227+ * the bo->mutex, as we should be the only writers.
31228+ * There shouldn't really be any readers of these bits except
31229+ * within vm_insert_mixed()? fork?
31230+ *
31231+ * TODO: Add a list of vmas to the bo, and change the
31232+ * vma->vm_page_prot when the object changes caching policy, with
31233+ * the correct locks held.
31234+ */
31235+
31236+ if (is_iomem) {
31237+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
31238+ vma->vm_page_prot);
31239+ } else {
31240+ ttm = bo->ttm;
31241+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
31242+ vm_get_page_prot(vma->vm_flags) :
31243+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
31244+ }
31245+
31246+ /*
31247+ * Speculatively prefault a number of pages. Only error on
31248+ * first page.
31249+ */
31250+
31251+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
31252+
31253+ if (is_iomem)
31254+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
31255+ page_offset;
31256+ else {
31257+ page = ttm_tt_get_page(ttm, page_offset);
31258+ if (unlikely(!page && i == 0)) {
31259+ retval = NOPFN_OOM;
31260+ goto out_unlock;
31261+ } else if (unlikely(!page)) {
31262+ break;
31263+ }
31264+ pfn = page_to_pfn(page);
31265+ }
31266+
31267+ ret = vm_insert_pfn(vma, address, pfn);
31268+ if (unlikely(ret == -EBUSY || (ret != 0 && i != 0)))
31269+ break;
31270+
31271+ /*
31272+ * Somebody beat us to this PTE or prefaulting to
31273+ * an already populated PTE, or prefaulting error.
31274+ */
31275+
31276+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
31277+ break;
31278+ else if (unlikely(ret != 0)) {
31279+ retval =
31280+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
31281+ goto out_unlock;
31282+ }
31283+
31284+ address += PAGE_SIZE;
31285+ if (unlikely(++page_offset >= page_last))
31286+ break;
31287+ }
31288+
31289+ out_unlock:
31290+ mutex_unlock(&bo->mutex);
31291+ ttm_bo_unreserve(bo);
31292+ return retval;
31293+}
31294+#endif
31295+
31296+static void ttm_bo_vm_open(struct vm_area_struct *vma)
31297+{
31298+ struct ttm_buffer_object *bo =
31299+ (struct ttm_buffer_object *)vma->vm_private_data;
31300+
31301+ (void)ttm_bo_reference(bo);
31302+}
31303+
31304+static void ttm_bo_vm_close(struct vm_area_struct *vma)
31305+{
31306+ struct ttm_buffer_object *bo =
31307+ (struct ttm_buffer_object *)vma->vm_private_data;
31308+
31309+ ttm_bo_unref(&bo);
31310+ vma->vm_private_data = NULL;
31311+}
31312+
31313+static struct vm_operations_struct ttm_bo_vm_ops = {
31314+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
31315+ .fault = ttm_bo_vm_fault,
31316+#else
31317+ .nopfn = ttm_bo_vm_nopfn,
31318+#endif
31319+ .open = ttm_bo_vm_open,
31320+ .close = ttm_bo_vm_close
31321+};
31322+
31323+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
31324+ struct ttm_bo_device *bdev)
31325+{
31326+ struct ttm_bo_driver *driver;
31327+ struct ttm_buffer_object *bo;
31328+ int ret;
31329+
31330+ read_lock(&bdev->vm_lock);
31331+ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
31332+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
31333+ if (likely(bo != NULL))
31334+ ttm_bo_reference(bo);
31335+ read_unlock(&bdev->vm_lock);
31336+
31337+ if (unlikely(bo == NULL)) {
31338+ printk(KERN_ERR "Could not find buffer object to map.\n");
31339+ ret = -EINVAL;
31340+ goto out_unref;
31341+ }
31342+
31343+ driver = bo->bdev->driver;
31344+ if (unlikely(!driver->verify_access)) {
31345+ ret = -EPERM;
31346+ goto out_unref;
31347+ }
31348+ ret = driver->verify_access(bo, filp);
31349+ if (unlikely(ret != 0))
31350+ goto out_unref;
31351+
31352+ vma->vm_ops = &ttm_bo_vm_ops;
31353+
31354+ /*
31355+ * Note: We're transferring the bo reference to
31356+ * vma->vm_private_data here.
31357+ */
31358+
31359+ vma->vm_private_data = bo;
31360+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
31361+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
31362+#else
31363+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
31364+#endif
31365+ return 0;
31366+ out_unref:
31367+ ttm_bo_unref(&bo);
31368+ return ret;
31369+}
31370+
31371+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
31372+{
31373+ if (vma->vm_pgoff != 0)
31374+ return -EACCES;
31375+
31376+ vma->vm_ops = &ttm_bo_vm_ops;
31377+ vma->vm_private_data = ttm_bo_reference(bo);
31378+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
31379+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
31380+#else
31381+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
31382+#endif
31383+ return 0;
31384+}
31385+
31386+ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
31387+ const char __user * wbuf, char __user * rbuf, size_t count,
31388+ loff_t * f_pos, bool write)
31389+{
31390+ struct ttm_buffer_object *bo;
31391+ struct ttm_bo_driver *driver;
31392+ struct ttm_bo_kmap_obj map;
31393+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
31394+ unsigned long kmap_offset;
31395+ unsigned long kmap_end;
31396+ unsigned long kmap_num;
31397+ size_t io_size;
31398+ unsigned int page_offset;
31399+ char *virtual;
31400+ int ret;
31401+ bool no_wait = false;
31402+ bool dummy;
31403+
31404+ driver = bo->bdev->driver;
31405+ read_lock(&bdev->vm_lock);
31406+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
31407+ if (likely(bo != NULL))
31408+ ttm_bo_reference(bo);
31409+ read_unlock(&bdev->vm_lock);
31410+
31411+ if (unlikely(bo == NULL))
31412+ return -EFAULT;
31413+
31414+ if (unlikely(driver->verify_access))
31415+ return -EPERM;
31416+
31417+ ret = driver->verify_access(bo, filp);
31418+ if (unlikely(ret != 0))
31419+ goto out_unref;
31420+
31421+ kmap_offset = dev_offset - bo->vm_node->start;
31422+ if (unlikely(kmap_offset) >= bo->num_pages) {
31423+ ret = -EFBIG;
31424+ goto out_unref;
31425+ }
31426+
31427+ page_offset = *f_pos & ~PAGE_MASK;
31428+ io_size = bo->num_pages - kmap_offset;
31429+ io_size = (io_size << PAGE_SHIFT) - page_offset;
31430+ if (count < io_size)
31431+ io_size = count;
31432+
31433+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
31434+ kmap_num = kmap_end - kmap_offset + 1;
31435+
31436+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
31437+
31438+ switch (ret) {
31439+ case 0:
31440+ break;
31441+ case -ERESTART:
31442+ ret = -EINTR;
31443+ goto out_unref;
31444+ case -EBUSY:
31445+ ret = -EAGAIN;
31446+ goto out_unref;
31447+ default:
31448+ goto out_unref;
31449+ }
31450+
31451+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
31452+ if (unlikely(ret != 0))
31453+ goto out_unref;
31454+
31455+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
31456+ virtual += page_offset;
31457+
31458+ if (write)
31459+ ret = copy_from_user(virtual, wbuf, io_size);
31460+ else
31461+ ret = copy_to_user(rbuf, virtual, io_size);
31462+
31463+ ttm_bo_kunmap(&map);
31464+ ttm_bo_unreserve(bo);
31465+ ttm_bo_unref(&bo);
31466+
31467+ if (unlikely(ret != 0))
31468+ return -EFBIG;
31469+
31470+ *f_pos += io_size;
31471+
31472+ return io_size;
31473+ out_unref:
31474+ ttm_bo_unref(&bo);
31475+ return ret;
31476+}
31477+
31478+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
31479+ char __user * rbuf, size_t count, loff_t * f_pos,
31480+ bool write)
31481+{
31482+ struct ttm_bo_kmap_obj map;
31483+ unsigned long kmap_offset;
31484+ unsigned long kmap_end;
31485+ unsigned long kmap_num;
31486+ size_t io_size;
31487+ unsigned int page_offset;
31488+ char *virtual;
31489+ int ret;
31490+ bool no_wait = false;
31491+ bool dummy;
31492+
31493+ kmap_offset = (*f_pos >> PAGE_SHIFT);
31494+ if (unlikely(kmap_offset) >= bo->num_pages)
31495+ return -EFBIG;
31496+
31497+ page_offset = *f_pos & ~PAGE_MASK;
31498+ io_size = bo->num_pages - kmap_offset;
31499+ io_size = (io_size << PAGE_SHIFT) - page_offset;
31500+ if (count < io_size)
31501+ io_size = count;
31502+
31503+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
31504+ kmap_num = kmap_end - kmap_offset + 1;
31505+
31506+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
31507+
31508+ switch (ret) {
31509+ case 0:
31510+ break;
31511+ case -ERESTART:
31512+ return -EINTR;
31513+ case -EBUSY:
31514+ return -EAGAIN;
31515+ default:
31516+ return ret;
31517+ }
31518+
31519+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
31520+ if (unlikely(ret != 0))
31521+ return ret;
31522+
31523+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
31524+ virtual += page_offset;
31525+
31526+ if (write)
31527+ ret = copy_from_user(virtual, wbuf, io_size);
31528+ else
31529+ ret = copy_to_user(rbuf, virtual, io_size);
31530+
31531+ ttm_bo_kunmap(&map);
31532+ ttm_bo_unreserve(bo);
31533+ ttm_bo_unref(&bo);
31534+
31535+ if (unlikely(ret != 0))
31536+ return ret;
31537+
31538+ *f_pos += io_size;
31539+
31540+ return io_size;
31541+}
31542diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
31543--- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 1969-12-31 16:00:00.000000000 -0800
31544+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 2009-04-07 13:28:38.000000000 -0700
31545@@ -0,0 +1,115 @@
31546+/**************************************************************************
31547+ *
31548+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
31549+ * All Rights Reserved.
31550+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
31551+ * All Rights Reserved.
31552+ *
31553+ * Permission is hereby granted, free of charge, to any person obtaining a
31554+ * copy of this software and associated documentation files (the
31555+ * "Software"), to deal in the Software without restriction, including
31556+ * without limitation the rights to use, copy, modify, merge, publish,
31557+ * distribute, sub license, and/or sell copies of the Software, and to
31558+ * permit persons to whom the Software is furnished to do so, subject to
31559+ * the following conditions:
31560+ *
31561+ * The above copyright notice and this permission notice (including the
31562+ * next paragraph) shall be included in all copies or substantial portions
31563+ * of the Software.
31564+ *
31565+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31566+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31567+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31568+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
31569+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
31570+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31571+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
31572+ *
31573+ **************************************************************************/
31574+
31575+#include "ttm/ttm_execbuf_util.h"
31576+#include "ttm/ttm_bo_driver.h"
31577+#include "ttm/ttm_placement_common.h"
31578+#include <linux/wait.h>
31579+#include <linux/sched.h>
31580+
31581+void ttm_eu_backoff_reservation(struct list_head *list)
31582+{
31583+ struct ttm_validate_buffer *entry;
31584+
31585+ list_for_each_entry(entry, list, head) {
31586+ struct ttm_buffer_object *bo = entry->bo;
31587+ if (!entry->reserved)
31588+ continue;
31589+
31590+ entry->reserved = false;
31591+ ttm_bo_unreserve(bo);
31592+ }
31593+}
31594+
31595+/*
31596+ * Reserve buffers for validation.
31597+ *
31598+ * If a buffer in the list is marked for CPU access, we back off and
31599+ * wait for that buffer to become free for GPU access.
31600+ *
31601+ * If a buffer is reserved for another validation, the validator with
31602+ * the highest validation sequence backs off and waits for that buffer
31603+ * to become unreserved. This prevents deadlocks when validating multiple
31604+ * buffers in different orders.
31605+ */
31606+
31607+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
31608+{
31609+ struct ttm_validate_buffer *entry;
31610+ int ret;
31611+
31612+ retry:
31613+ list_for_each_entry(entry, list, head) {
31614+ struct ttm_buffer_object *bo = entry->bo;
31615+
31616+ entry->reserved = false;
31617+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
31618+ if (ret != 0) {
31619+ ttm_eu_backoff_reservation(list);
31620+ if (ret == -EAGAIN) {
31621+ ret = ttm_bo_wait_unreserved(bo, true);
31622+ if (unlikely(ret != 0))
31623+ return ret;
31624+ goto retry;
31625+ } else
31626+ return ret;
31627+ }
31628+
31629+ entry->reserved = true;
31630+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
31631+ ttm_eu_backoff_reservation(list);
31632+ ret = ttm_bo_wait_cpu(bo, false);
31633+ if (ret)
31634+ return ret;
31635+ goto retry;
31636+ }
31637+ }
31638+ return 0;
31639+}
31640+
31641+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
31642+{
31643+ struct ttm_validate_buffer *entry;
31644+
31645+ list_for_each_entry(entry, list, head) {
31646+ struct ttm_buffer_object *bo = entry->bo;
31647+ struct ttm_bo_driver *driver = bo->bdev->driver;
31648+ void *old_sync_obj;
31649+
31650+ mutex_lock(&bo->mutex);
31651+ old_sync_obj = bo->sync_obj;
31652+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
31653+ bo->sync_obj_arg = entry->new_sync_obj_arg;
31654+ mutex_unlock(&bo->mutex);
31655+ ttm_bo_unreserve(bo);
31656+ entry->reserved = false;
31657+ if (old_sync_obj)
31658+ driver->sync_obj_unref(&old_sync_obj);
31659+ }
31660+}
31661diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
31662--- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 1969-12-31 16:00:00.000000000 -0800
31663+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 2009-04-07 13:28:38.000000000 -0700
31664@@ -0,0 +1,110 @@
31665+/**************************************************************************
31666+ *
31667+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
31668+ * All Rights Reserved.
31669+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
31670+ * All Rights Reserved.
31671+ *
31672+ * Permission is hereby granted, free of charge, to any person obtaining a
31673+ * copy of this software and associated documentation files (the
31674+ * "Software"), to deal in the Software without restriction, including
31675+ * without limitation the rights to use, copy, modify, merge, publish,
31676+ * distribute, sub license, and/or sell copies of the Software, and to
31677+ * permit persons to whom the Software is furnished to do so, subject to
31678+ * the following conditions:
31679+ *
31680+ * The above copyright notice and this permission notice (including the
31681+ * next paragraph) shall be included in all copies or substantial portions
31682+ * of the Software.
31683+ *
31684+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31685+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31686+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31687+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
31688+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
31689+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31690+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
31691+ *
31692+ **************************************************************************/
31693+/*
31694+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31695+ */
31696+
31697+#ifndef _TTM_EXECBUF_UTIL_H_
31698+#define _TTM_EXECBUF_UTIL_H_
31699+
31700+#include "ttm/ttm_bo_api.h"
31701+#include "ttm/ttm_fence_api.h"
31702+#include <linux/list.h>
31703+
31704+/**
31705+ * struct ttm_validate_buffer
31706+ *
31707+ * @head: list head for thread-private list.
31708+ * @bo: refcounted buffer object pointer.
31709+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
31710+ * adding a new sync object.
31711+ * @reservied: Indicates whether @bo has been reserved for validation.
31712+ */
31713+
31714+struct ttm_validate_buffer {
31715+ struct list_head head;
31716+ struct ttm_buffer_object *bo;
31717+ void *new_sync_obj_arg;
31718+ bool reserved;
31719+};
31720+
31721+/**
31722+ * function ttm_eu_backoff_reservation
31723+ *
31724+ * @list: thread private list of ttm_validate_buffer structs.
31725+ *
31726+ * Undoes all buffer validation reservations for bos pointed to by
31727+ * the list entries.
31728+ */
31729+
31730+extern void ttm_eu_backoff_reservation(struct list_head *list);
31731+
31732+/**
31733+ * function ttm_eu_reserve_buffers
31734+ *
31735+ * @list: thread private list of ttm_validate_buffer structs.
31736+ * @val_seq: A unique sequence number.
31737+ *
31738+ * Tries to reserve bos pointed to by the list entries for validation.
31739+ * If the function returns 0, all buffers are marked as "unfenced",
31740+ * taken off the lru lists and are not synced for write CPU usage.
31741+ *
31742+ * If the function detects a deadlock due to multiple threads trying to
31743+ * reserve the same buffers in reverse order, all threads except one will
31744+ * back off and retry. This function may sleep while waiting for
31745+ * CPU write reservations to be cleared, and for other threads to
31746+ * unreserve their buffers.
31747+ *
31748+ * This function may return -ERESTART or -EAGAIN if the calling process
31749+ * receives a signal while waiting. In that case, no buffers on the list
31750+ * will be reserved upon return.
31751+ *
31752+ * Buffers reserved by this function should be unreserved by
31753+ * a call to either ttm_eu_backoff_reservation() or
31754+ * ttm_eu_fence_buffer_objects() when command submission is complete or
31755+ * has failed.
31756+ */
31757+
31758+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
31759+
31760+/**
31761+ * function ttm_eu_fence_buffer_objects.
31762+ *
31763+ * @list: thread private list of ttm_validate_buffer structs.
31764+ * @sync_obj: The new sync object for the buffers.
31765+ *
31766+ * This function should be called when command submission is complete, and
31767+ * it will add a new sync object to bos pointed to by entries on @list.
31768+ * It also unreserves all buffers, putting them on lru lists.
31769+ *
31770+ */
31771+
31772+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
31773+
31774+#endif
31775diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
31776--- a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 1969-12-31 16:00:00.000000000 -0800
31777+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 2009-04-07 13:28:38.000000000 -0700
31778@@ -0,0 +1,277 @@
31779+/**************************************************************************
31780+ *
31781+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
31782+ * All Rights Reserved.
31783+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
31784+ * All Rights Reserved.
31785+ *
31786+ * Permission is hereby granted, free of charge, to any person obtaining a
31787+ * copy of this software and associated documentation files (the
31788+ * "Software"), to deal in the Software without restriction, including
31789+ * without limitation the rights to use, copy, modify, merge, publish,
31790+ * distribute, sub license, and/or sell copies of the Software, and to
31791+ * permit persons to whom the Software is furnished to do so, subject to
31792+ * the following conditions:
31793+ *
31794+ * The above copyright notice and this permission notice (including the
31795+ * next paragraph) shall be included in all copies or substantial portions
31796+ * of the Software.
31797+ *
31798+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31799+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31800+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31801+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
31802+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
31803+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31804+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
31805+ *
31806+ **************************************************************************/
31807+/*
31808+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31809+ */
31810+#ifndef _TTM_FENCE_API_H_
31811+#define _TTM_FENCE_API_H_
31812+
31813+#include <linux/list.h>
31814+#include <linux/kref.h>
31815+
31816+#define TTM_FENCE_FLAG_EMIT (1 << 0)
31817+#define TTM_FENCE_TYPE_EXE (1 << 0)
31818+
31819+struct ttm_fence_device;
31820+
31821+/**
31822+ * struct ttm_fence_info
31823+ *
31824+ * @fence_class: The fence class.
31825+ * @fence_type: Bitfield indicating types for this fence.
31826+ * @signaled_types: Bitfield indicating which types are signaled.
31827+ * @error: Last error reported from the device.
31828+ *
31829+ * Used as output from the ttm_fence_get_info
31830+ */
31831+
31832+struct ttm_fence_info {
31833+ uint32_t signaled_types;
31834+ uint32_t error;
31835+};
31836+
31837+/**
31838+ * struct ttm_fence_object
31839+ *
31840+ * @fdev: Pointer to the fence device struct.
31841+ * @kref: Holds the reference count of this fence object.
31842+ * @ring: List head used for the circular list of not-completely
31843+ * signaled fences.
31844+ * @info: Data for fast retrieval using the ttm_fence_get_info()
31845+ * function.
31846+ * @timeout_jiffies: Absolute jiffies value indicating when this fence
31847+ * object times out and, if waited on, calls ttm_fence_lockup
31848+ * to check for and resolve a GPU lockup.
31849+ * @sequence: Fence sequence number.
31850+ * @waiting_types: Types currently waited on.
31851+ * @destroy: Called to free the fence object, when its refcount has
31852+ * reached zero. If NULL, kfree is used.
31853+ *
31854+ * This struct is provided in the driver interface so that drivers can
31855+ * derive from it and create their own fence implementation. All members
31856+ * are private to the fence implementation and the fence driver callbacks.
31857+ * Otherwise a driver may access the derived object using container_of().
31858+ */
31859+
31860+struct ttm_fence_object {
31861+ struct ttm_fence_device *fdev;
31862+ struct kref kref;
31863+ uint32_t fence_class;
31864+ uint32_t fence_type;
31865+
31866+ /*
31867+ * The below fields are protected by the fence class
31868+ * manager spinlock.
31869+ */
31870+
31871+ struct list_head ring;
31872+ struct ttm_fence_info info;
31873+ unsigned long timeout_jiffies;
31874+ uint32_t sequence;
31875+ uint32_t waiting_types;
31876+ void (*destroy) (struct ttm_fence_object *);
31877+};
31878+
31879+/**
31880+ * ttm_fence_object_init
31881+ *
31882+ * @fdev: Pointer to a struct ttm_fence_device.
31883+ * @fence_class: Fence class for this fence.
31884+ * @type: Fence type for this fence.
31885+ * @create_flags: Flags indicating varios actions at init time. At this point
31886+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
31887+ * the command stream.
31888+ * @destroy: Destroy function. If NULL, kfree() is used.
31889+ * @fence: The struct ttm_fence_object to initialize.
31890+ *
31891+ * Initialize a pre-allocated fence object. This function, together with the
31892+ * destroy function makes it possible to derive driver-specific fence objects.
31893+ */
31894+
31895+extern int
31896+ttm_fence_object_init(struct ttm_fence_device *fdev,
31897+ uint32_t fence_class,
31898+ uint32_t type,
31899+ uint32_t create_flags,
31900+ void (*destroy) (struct ttm_fence_object * fence),
31901+ struct ttm_fence_object *fence);
31902+
31903+/**
31904+ * ttm_fence_object_create
31905+ *
31906+ * @fdev: Pointer to a struct ttm_fence_device.
31907+ * @fence_class: Fence class for this fence.
31908+ * @type: Fence type for this fence.
31909+ * @create_flags: Flags indicating varios actions at init time. At this point
31910+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
31911+ * the command stream.
31912+ * @c_fence: On successful termination, *(@c_fence) will point to the created
31913+ * fence object.
31914+ *
31915+ * Create and initialize a struct ttm_fence_object. The destroy function will
31916+ * be set to kfree().
31917+ */
31918+
31919+extern int
31920+ttm_fence_object_create(struct ttm_fence_device *fdev,
31921+ uint32_t fence_class,
31922+ uint32_t type,
31923+ uint32_t create_flags,
31924+ struct ttm_fence_object **c_fence);
31925+
31926+/**
31927+ * ttm_fence_object_wait
31928+ *
31929+ * @fence: The fence object to wait on.
31930+ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
31931+ * @interruptible: Sleep interruptible when waiting.
31932+ * @type_mask: Wait for the given type_mask to signal.
31933+ *
31934+ * Wait for a fence to signal the given type_mask. The function will
31935+ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
31936+ *
31937+ * Returns
31938+ * -ERESTART if interrupted by a signal.
31939+ * May return driver-specific error codes if timed-out.
31940+ */
31941+
31942+extern int
31943+ttm_fence_object_wait(struct ttm_fence_object *fence,
31944+ bool lazy, bool interruptible, uint32_t type_mask);
31945+
31946+/**
31947+ * ttm_fence_object_flush
31948+ *
31949+ * @fence: The fence object to flush.
31950+ * @flush_mask: Fence types to flush.
31951+ *
31952+ * Make sure that the given fence eventually signals the
31953+ * types indicated by @flush_mask. Note that this may or may not
31954+ * map to a CPU or GPU flush.
31955+ */
31956+
31957+extern int
31958+ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
31959+
31960+/**
31961+ * ttm_fence_get_info
31962+ *
31963+ * @fence: The fence object.
31964+ *
31965+ * Copy the info block from the fence while holding relevant locks.
31966+ */
31967+
31968+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
31969+
31970+/**
31971+ * ttm_fence_object_ref
31972+ *
31973+ * @fence: The fence object.
31974+ *
31975+ * Return a ref-counted pointer to the fence object indicated by @fence.
31976+ */
31977+
31978+static inline struct ttm_fence_object *ttm_fence_object_ref(struct
31979+ ttm_fence_object
31980+ *fence)
31981+{
31982+ kref_get(&fence->kref);
31983+ return fence;
31984+}
31985+
31986+/**
31987+ * ttm_fence_object_unref
31988+ *
31989+ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
31990+ *
31991+ * Unreference the fence object pointed to by *(@p_fence), clearing
31992+ * *(p_fence).
31993+ */
31994+
31995+extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
31996+
31997+/**
31998+ * ttm_fence_object_signaled
31999+ *
32000+ * @fence: Pointer to the struct ttm_fence_object.
32001+ * @mask: Type mask to check whether signaled.
32002+ *
32003+ * This function checks (without waiting) whether the fence object
32004+ * pointed to by @fence has signaled the types indicated by @mask,
32005+ * and returns 1 if true, 0 if false. This function does NOT perform
32006+ * an implicit fence flush.
32007+ */
32008+
32009+extern bool
32010+ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
32011+
32012+/**
32013+ * ttm_fence_class
32014+ *
32015+ * @fence: Pointer to the struct ttm_fence_object.
32016+ *
32017+ * Convenience function that returns the fence class of a struct ttm_fence_object.
32018+ */
32019+
32020+static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
32021+{
32022+ return fence->fence_class;
32023+}
32024+
32025+/**
32026+ * ttm_fence_types
32027+ *
32028+ * @fence: Pointer to the struct ttm_fence_object.
32029+ *
32030+ * Convenience function that returns the fence types of a struct ttm_fence_object.
32031+ */
32032+
32033+static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
32034+{
32035+ return fence->fence_type;
32036+}
32037+
32038+/*
32039+ * The functions below are wrappers to the above functions, with
32040+ * similar names but with sync_obj omitted. These wrappers are intended
32041+ * to be plugged directly into the buffer object driver's sync object
32042+ * API, if the driver chooses to use ttm_fence_objects as buffer object
32043+ * sync objects. In the prototypes below, a sync_obj is cast to a
32044+ * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing
32045+ * a fence_type argument.
32046+ */
32047+
32048+extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
32049+extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
32050+ bool lazy, bool interruptible);
32051+extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
32052+extern void ttm_fence_sync_obj_unref(void **sync_obj);
32053+extern void *ttm_fence_sync_obj_ref(void *sync_obj);
32054+
32055+#endif
32056diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c
32057--- a/drivers/gpu/drm/psb/ttm/ttm_fence.c 1969-12-31 16:00:00.000000000 -0800
32058+++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c 2009-04-07 13:28:38.000000000 -0700
32059@@ -0,0 +1,607 @@
32060+/**************************************************************************
32061+ *
32062+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
32063+ * All Rights Reserved.
32064+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
32065+ * All Rights Reserved.
32066+ *
32067+ * Permission is hereby granted, free of charge, to any person obtaining a
32068+ * copy of this software and associated documentation files (the
32069+ * "Software"), to deal in the Software without restriction, including
32070+ * without limitation the rights to use, copy, modify, merge, publish,
32071+ * distribute, sub license, and/or sell copies of the Software, and to
32072+ * permit persons to whom the Software is furnished to do so, subject to
32073+ * the following conditions:
32074+ *
32075+ * The above copyright notice and this permission notice (including the
32076+ * next paragraph) shall be included in all copies or substantial portions
32077+ * of the Software.
32078+ *
32079+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32080+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32081+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
32082+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
32083+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
32084+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
32085+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
32086+ *
32087+ **************************************************************************/
32088+/*
32089+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32090+ */
32091+
32092+#include "ttm/ttm_fence_api.h"
32093+#include "ttm/ttm_fence_driver.h"
32094+#include <linux/wait.h>
32095+#include <linux/sched.h>
32096+
32097+#include <drm/drmP.h>
32098+
32099+/*
32100+ * Simple implementation for now.
32101+ */
32102+
32103+static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
32104+{
32105+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32106+
32107+ printk(KERN_ERR "GPU lockup dectected on engine %u "
32108+ "fence type 0x%08x\n",
32109+ (unsigned int)fence->fence_class, (unsigned int)mask);
32110+ /*
32111+ * Give engines some time to idle?
32112+ */
32113+
32114+ write_lock(&fc->lock);
32115+ ttm_fence_handler(fence->fdev, fence->fence_class,
32116+ fence->sequence, mask, -EBUSY);
32117+ write_unlock(&fc->lock);
32118+}
32119+
32120+/*
32121+ * Convenience function to be called by fence::wait methods that
32122+ * need polling.
32123+ */
32124+
32125+int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
32126+ bool interruptible, uint32_t mask)
32127+{
32128+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32129+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32130+ uint32_t count = 0;
32131+ int ret;
32132+ unsigned long end_jiffies = fence->timeout_jiffies;
32133+
32134+ DECLARE_WAITQUEUE(entry, current);
32135+ add_wait_queue(&fc->fence_queue, &entry);
32136+
32137+ ret = 0;
32138+
32139+ for (;;) {
32140+ __set_current_state((interruptible) ?
32141+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
32142+ if (ttm_fence_object_signaled(fence, mask))
32143+ break;
32144+ if (time_after_eq(jiffies, end_jiffies)) {
32145+ if (driver->lockup)
32146+ driver->lockup(fence, mask);
32147+ else
32148+ ttm_fence_lockup(fence, mask);
32149+ continue;
32150+ }
32151+ if (lazy)
32152+ schedule_timeout(1);
32153+ else if ((++count & 0x0F) == 0) {
32154+ __set_current_state(TASK_RUNNING);
32155+ schedule();
32156+ __set_current_state((interruptible) ?
32157+ TASK_INTERRUPTIBLE :
32158+ TASK_UNINTERRUPTIBLE);
32159+ }
32160+ if (interruptible && signal_pending(current)) {
32161+ ret = -ERESTART;
32162+ break;
32163+ }
32164+ }
32165+ __set_current_state(TASK_RUNNING);
32166+ remove_wait_queue(&fc->fence_queue, &entry);
32167+ return ret;
32168+}
32169+
32170+/*
32171+ * Typically called by the IRQ handler.
32172+ */
32173+
32174+void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
32175+ uint32_t sequence, uint32_t type, uint32_t error)
32176+{
32177+ int wake = 0;
32178+ uint32_t diff;
32179+ uint32_t relevant_type;
32180+ uint32_t new_type;
32181+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
32182+ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
32183+ struct list_head *head;
32184+ struct ttm_fence_object *fence, *next;
32185+ bool found = false;
32186+
32187+ if (list_empty(&fc->ring))
32188+ return;
32189+
32190+ list_for_each_entry(fence, &fc->ring, ring) {
32191+ diff = (sequence - fence->sequence) & fc->sequence_mask;
32192+ if (diff > fc->wrap_diff) {
32193+ found = true;
32194+ break;
32195+ }
32196+ }
32197+
32198+ fc->waiting_types &= ~type;
32199+ head = (found) ? &fence->ring : &fc->ring;
32200+
32201+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
32202+ if (&fence->ring == &fc->ring)
32203+ break;
32204+
32205+ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
32206+ (unsigned long)fence, fence->sequence,
32207+ fence->fence_type);
32208+
32209+ if (error) {
32210+ fence->info.error = error;
32211+ fence->info.signaled_types = fence->fence_type;
32212+ list_del_init(&fence->ring);
32213+ wake = 1;
32214+ break;
32215+ }
32216+
32217+ relevant_type = type & fence->fence_type;
32218+ new_type = (fence->info.signaled_types | relevant_type) ^
32219+ fence->info.signaled_types;
32220+
32221+ if (new_type) {
32222+ fence->info.signaled_types |= new_type;
32223+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
32224+ (unsigned long)fence,
32225+ fence->info.signaled_types);
32226+
32227+ if (unlikely(driver->signaled))
32228+ driver->signaled(fence);
32229+
32230+ if (driver->needed_flush)
32231+ fc->pending_flush |=
32232+ driver->needed_flush(fence);
32233+
32234+ if (new_type & fence->waiting_types)
32235+ wake = 1;
32236+ }
32237+
32238+ fc->waiting_types |=
32239+ fence->waiting_types & ~fence->info.signaled_types;
32240+
32241+ if (!(fence->fence_type & ~fence->info.signaled_types)) {
32242+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
32243+ (unsigned long)fence);
32244+ list_del_init(&fence->ring);
32245+ }
32246+ }
32247+
32248+ /*
32249+ * Reinstate lost waiting types.
32250+ */
32251+
32252+ if ((fc->waiting_types & type) != type) {
32253+ head = head->prev;
32254+ list_for_each_entry(fence, head, ring) {
32255+ if (&fence->ring == &fc->ring)
32256+ break;
32257+ diff =
32258+ (fc->highest_waiting_sequence -
32259+ fence->sequence) & fc->sequence_mask;
32260+ if (diff > fc->wrap_diff)
32261+ break;
32262+
32263+ fc->waiting_types |=
32264+ fence->waiting_types & ~fence->info.signaled_types;
32265+ }
32266+ }
32267+
32268+ if (wake)
32269+ wake_up_all(&fc->fence_queue);
32270+}
32271+
32272+static void ttm_fence_unring(struct ttm_fence_object *fence)
32273+{
32274+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32275+ unsigned long irq_flags;
32276+
32277+ write_lock_irqsave(&fc->lock, irq_flags);
32278+ list_del_init(&fence->ring);
32279+ write_unlock_irqrestore(&fc->lock, irq_flags);
32280+}
32281+
32282+bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
32283+{
32284+ unsigned long flags;
32285+ bool signaled;
32286+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32287+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32288+
32289+ mask &= fence->fence_type;
32290+ read_lock_irqsave(&fc->lock, flags);
32291+ signaled = (mask & fence->info.signaled_types) == mask;
32292+ read_unlock_irqrestore(&fc->lock, flags);
32293+ if (!signaled && driver->poll) {
32294+ write_lock_irqsave(&fc->lock, flags);
32295+ driver->poll(fence->fdev, fence->fence_class, mask);
32296+ signaled = (mask & fence->info.signaled_types) == mask;
32297+ write_unlock_irqrestore(&fc->lock, flags);
32298+ }
32299+ return signaled;
32300+}
32301+
32302+int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
32303+{
32304+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32305+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32306+ unsigned long irq_flags;
32307+ uint32_t saved_pending_flush;
32308+ uint32_t diff;
32309+ bool call_flush;
32310+
32311+ if (type & ~fence->fence_type) {
32312+ DRM_ERROR("Flush trying to extend fence type, "
32313+ "0x%x, 0x%x\n", type, fence->fence_type);
32314+ return -EINVAL;
32315+ }
32316+
32317+ write_lock_irqsave(&fc->lock, irq_flags);
32318+ fence->waiting_types |= type;
32319+ fc->waiting_types |= fence->waiting_types;
32320+ diff = (fence->sequence - fc->highest_waiting_sequence) &
32321+ fc->sequence_mask;
32322+
32323+ if (diff < fc->wrap_diff)
32324+ fc->highest_waiting_sequence = fence->sequence;
32325+
32326+ /*
32327+ * fence->waiting_types has changed. Determine whether
32328+ * we need to initiate some kind of flush as a result of this.
32329+ */
32330+
32331+ saved_pending_flush = fc->pending_flush;
32332+ if (driver->needed_flush)
32333+ fc->pending_flush |= driver->needed_flush(fence);
32334+
32335+ if (driver->poll)
32336+ driver->poll(fence->fdev, fence->fence_class,
32337+ fence->waiting_types);
32338+
32339+ call_flush = (fc->pending_flush != 0);
32340+ write_unlock_irqrestore(&fc->lock, irq_flags);
32341+
32342+ if (call_flush && driver->flush)
32343+ driver->flush(fence->fdev, fence->fence_class);
32344+
32345+ return 0;
32346+}
32347+
32348+/*
32349+ * Make sure old fence objects are signaled before their fence sequences are
32350+ * wrapped around and reused.
32351+ */
32352+
32353+void ttm_fence_flush_old(struct ttm_fence_device *fdev,
32354+ uint32_t fence_class, uint32_t sequence)
32355+{
32356+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
32357+ struct ttm_fence_object *fence;
32358+ unsigned long irq_flags;
32359+ const struct ttm_fence_driver *driver = fdev->driver;
32360+ bool call_flush;
32361+
32362+ uint32_t diff;
32363+
32364+ write_lock_irqsave(&fc->lock, irq_flags);
32365+
32366+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
32367+ diff = (sequence - fence->sequence) & fc->sequence_mask;
32368+ if (diff <= fc->flush_diff)
32369+ break;
32370+
32371+ fence->waiting_types = fence->fence_type;
32372+ fc->waiting_types |= fence->fence_type;
32373+
32374+ if (driver->needed_flush)
32375+ fc->pending_flush |= driver->needed_flush(fence);
32376+ }
32377+
32378+ if (driver->poll)
32379+ driver->poll(fdev, fence_class, fc->waiting_types);
32380+
32381+ call_flush = (fc->pending_flush != 0);
32382+ write_unlock_irqrestore(&fc->lock, irq_flags);
32383+
32384+ if (call_flush && driver->flush)
32385+ driver->flush(fdev, fence->fence_class);
32386+
32387+ /*
32388+ * FIXME: Shold we implement a wait here for really old fences?
32389+ */
32390+
32391+}
32392+
32393+int ttm_fence_object_wait(struct ttm_fence_object *fence,
32394+ bool lazy, bool interruptible, uint32_t mask)
32395+{
32396+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32397+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32398+ int ret = 0;
32399+ unsigned long timeout;
32400+ unsigned long cur_jiffies;
32401+ unsigned long to_jiffies;
32402+
32403+ if (mask & ~fence->fence_type) {
32404+ DRM_ERROR("Wait trying to extend fence type"
32405+ " 0x%08x 0x%08x\n", mask, fence->fence_type);
32406+ BUG();
32407+ return -EINVAL;
32408+ }
32409+
32410+ if (driver->wait)
32411+ return driver->wait(fence, lazy, interruptible, mask);
32412+
32413+ ttm_fence_object_flush(fence, mask);
32414+ retry:
32415+ if (!driver->has_irq ||
32416+ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
32417+
32418+ cur_jiffies = jiffies;
32419+ to_jiffies = fence->timeout_jiffies;
32420+
32421+ timeout = (time_after(to_jiffies, cur_jiffies)) ?
32422+ to_jiffies - cur_jiffies : 1;
32423+
32424+ if (interruptible)
32425+ ret = wait_event_interruptible_timeout
32426+ (fc->fence_queue,
32427+ ttm_fence_object_signaled(fence, mask), timeout);
32428+ else
32429+ ret = wait_event_timeout
32430+ (fc->fence_queue,
32431+ ttm_fence_object_signaled(fence, mask), timeout);
32432+
32433+ if (unlikely(ret == -ERESTARTSYS))
32434+ return -ERESTART;
32435+
32436+ if (unlikely(ret == 0)) {
32437+ if (driver->lockup)
32438+ driver->lockup(fence, mask);
32439+ else
32440+ ttm_fence_lockup(fence, mask);
32441+ goto retry;
32442+ }
32443+
32444+ return 0;
32445+ }
32446+
32447+ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
32448+}
32449+
32450+int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
32451+ uint32_t fence_class, uint32_t type)
32452+{
32453+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32454+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32455+ unsigned long flags;
32456+ uint32_t sequence;
32457+ unsigned long timeout;
32458+ int ret;
32459+
32460+ ttm_fence_unring(fence);
32461+ ret = driver->emit(fence->fdev,
32462+ fence_class, fence_flags, &sequence, &timeout);
32463+ if (ret)
32464+ return ret;
32465+
32466+ write_lock_irqsave(&fc->lock, flags);
32467+ fence->fence_class = fence_class;
32468+ fence->fence_type = type;
32469+ fence->waiting_types = 0;
32470+ fence->info.signaled_types = 0;
32471+ fence->info.error = 0;
32472+ fence->sequence = sequence;
32473+ fence->timeout_jiffies = timeout;
32474+ if (list_empty(&fc->ring))
32475+ fc->highest_waiting_sequence = sequence - 1;
32476+ list_add_tail(&fence->ring, &fc->ring);
32477+ fc->latest_queued_sequence = sequence;
32478+ write_unlock_irqrestore(&fc->lock, flags);
32479+ return 0;
32480+}
32481+
32482+int ttm_fence_object_init(struct ttm_fence_device *fdev,
32483+ uint32_t fence_class,
32484+ uint32_t type,
32485+ uint32_t create_flags,
32486+ void (*destroy) (struct ttm_fence_object *),
32487+ struct ttm_fence_object *fence)
32488+{
32489+ int ret = 0;
32490+
32491+ kref_init(&fence->kref);
32492+ fence->fence_class = fence_class;
32493+ fence->fence_type = type;
32494+ fence->info.signaled_types = 0;
32495+ fence->waiting_types = 0;
32496+ fence->sequence = 0;
32497+ fence->info.error = 0;
32498+ fence->fdev = fdev;
32499+ fence->destroy = destroy;
32500+ INIT_LIST_HEAD(&fence->ring);
32501+ atomic_inc(&fdev->count);
32502+
32503+ if (create_flags & TTM_FENCE_FLAG_EMIT) {
32504+ ret = ttm_fence_object_emit(fence, create_flags,
32505+ fence->fence_class, type);
32506+ }
32507+
32508+ return ret;
32509+}
32510+
32511+int ttm_fence_object_create(struct ttm_fence_device *fdev,
32512+ uint32_t fence_class,
32513+ uint32_t type,
32514+ uint32_t create_flags,
32515+ struct ttm_fence_object **c_fence)
32516+{
32517+ struct ttm_fence_object *fence;
32518+ int ret;
32519+
32520+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false);
32521+ if (unlikely(ret != 0)) {
32522+ printk(KERN_ERR "Out of memory creating fence object\n");
32523+ return ret;
32524+ }
32525+
32526+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
32527+ if (!fence) {
32528+ printk(KERN_ERR "Out of memory creating fence object\n");
32529+ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
32530+ return -ENOMEM;
32531+ }
32532+
32533+ ret = ttm_fence_object_init(fdev, fence_class, type,
32534+ create_flags, NULL, fence);
32535+ if (ret) {
32536+ ttm_fence_object_unref(&fence);
32537+ return ret;
32538+ }
32539+ *c_fence = fence;
32540+
32541+ return 0;
32542+}
32543+
32544+static void ttm_fence_object_destroy(struct kref *kref)
32545+{
32546+ struct ttm_fence_object *fence =
32547+ container_of(kref, struct ttm_fence_object, kref);
32548+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32549+ unsigned long irq_flags;
32550+
32551+ write_lock_irqsave(&fc->lock, irq_flags);
32552+ list_del_init(&fence->ring);
32553+ write_unlock_irqrestore(&fc->lock, irq_flags);
32554+
32555+ atomic_dec(&fence->fdev->count);
32556+ if (fence->destroy)
32557+ fence->destroy(fence);
32558+ else {
32559+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false);
32560+ kfree(fence);
32561+ }
32562+}
32563+
32564+void ttm_fence_device_release(struct ttm_fence_device *fdev)
32565+{
32566+ kfree(fdev->fence_class);
32567+}
32568+
32569+int
32570+ttm_fence_device_init(int num_classes,
32571+ struct ttm_mem_global *mem_glob,
32572+ struct ttm_fence_device *fdev,
32573+ const struct ttm_fence_class_init *init,
32574+ bool replicate_init, const struct ttm_fence_driver *driver)
32575+{
32576+ struct ttm_fence_class_manager *fc;
32577+ const struct ttm_fence_class_init *fci;
32578+ int i;
32579+
32580+ fdev->mem_glob = mem_glob;
32581+ fdev->fence_class = kzalloc(num_classes *
32582+ sizeof(*fdev->fence_class), GFP_KERNEL);
32583+
32584+ if (unlikely(!fdev->fence_class))
32585+ return -ENOMEM;
32586+
32587+ fdev->num_classes = num_classes;
32588+ atomic_set(&fdev->count, 0);
32589+ fdev->driver = driver;
32590+
32591+ for (i = 0; i < fdev->num_classes; ++i) {
32592+ fc = &fdev->fence_class[i];
32593+ fci = &init[(replicate_init) ? 0 : i];
32594+
32595+ fc->wrap_diff = fci->wrap_diff;
32596+ fc->flush_diff = fci->flush_diff;
32597+ fc->sequence_mask = fci->sequence_mask;
32598+
32599+ rwlock_init(&fc->lock);
32600+ INIT_LIST_HEAD(&fc->ring);
32601+ init_waitqueue_head(&fc->fence_queue);
32602+ }
32603+
32604+ return 0;
32605+}
32606+
32607+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
32608+{
32609+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32610+ struct ttm_fence_info tmp;
32611+ unsigned long irq_flags;
32612+
32613+ read_lock_irqsave(&fc->lock, irq_flags);
32614+ tmp = fence->info;
32615+ read_unlock_irqrestore(&fc->lock, irq_flags);
32616+
32617+ return tmp;
32618+}
32619+
32620+void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
32621+{
32622+ struct ttm_fence_object *fence = *p_fence;
32623+
32624+ *p_fence = NULL;
32625+ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
32626+}
32627+
32628+/*
32629+ * Placement / BO sync object glue.
32630+ */
32631+
32632+bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
32633+{
32634+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
32635+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
32636+
32637+ return ttm_fence_object_signaled(fence, fence_types);
32638+}
32639+
32640+int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
32641+ bool lazy, bool interruptible)
32642+{
32643+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
32644+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
32645+
32646+ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
32647+}
32648+
32649+int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
32650+{
32651+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
32652+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
32653+
32654+ return ttm_fence_object_flush(fence, fence_types);
32655+}
32656+
32657+void ttm_fence_sync_obj_unref(void **sync_obj)
32658+{
32659+ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
32660+}
32661+
32662+void *ttm_fence_sync_obj_ref(void *sync_obj)
32663+{
32664+ return (void *)
32665+ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
32666+}
32667diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
32668--- a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 1969-12-31 16:00:00.000000000 -0800
32669+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 2009-04-07 13:28:38.000000000 -0700
32670@@ -0,0 +1,309 @@
32671+/**************************************************************************
32672+ *
32673+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
32674+ * All Rights Reserved.
32675+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
32676+ * All Rights Reserved.
32677+ *
32678+ * Permission is hereby granted, free of charge, to any person obtaining a
32679+ * copy of this software and associated documentation files (the
32680+ * "Software"), to deal in the Software without restriction, including
32681+ * without limitation the rights to use, copy, modify, merge, publish,
32682+ * distribute, sub license, and/or sell copies of the Software, and to
32683+ * permit persons to whom the Software is furnished to do so, subject to
32684+ * the following conditions:
32685+ *
32686+ * The above copyright notice and this permission notice (including the
32687+ * next paragraph) shall be included in all copies or substantial portions
32688+ * of the Software.
32689+ *
32690+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32691+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32692+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
32693+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
32694+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
32695+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
32696+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
32697+ *
32698+ **************************************************************************/
32699+/*
32700+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32701+ */
32702+#ifndef _TTM_FENCE_DRIVER_H_
32703+#define _TTM_FENCE_DRIVER_H_
32704+
32705+#include <linux/kref.h>
32706+#include <linux/spinlock.h>
32707+#include <linux/wait.h>
32708+#include "ttm_fence_api.h"
32709+#include "ttm_memory.h"
32710+
32711+/** @file ttm_fence_driver.h
32712+ *
32713+ * Definitions needed for a driver implementing the
32714+ * ttm_fence subsystem.
32715+ */
32716+
32717+/**
32718+ * struct ttm_fence_class_manager:
32719+ *
32720+ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
32721+ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
32722+ * @flush_diff: Sequence difference to trigger fence flush.
32723+ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
32724+ * seqa as old an needing a flush.
32725+ * @sequence_mask: Mask of valid bits in a fence sequence.
32726+ * @lock: Lock protecting this struct as well as fence objects
32727+ * associated with this struct.
32728+ * @ring: Circular sequence-ordered list of fence objects.
32729+ * @pending_flush: Fence types currently needing a flush.
32730+ * @waiting_types: Fence types that are currently waited for.
32731+ * @fence_queue: Queue of waiters on fences belonging to this fence class.
32732+ * @highest_waiting_sequence: Sequence number of the fence with highest sequence
32733+ * number and that is waited for.
32734+ * @latest_queued_sequence: Sequence number of the fence latest queued on the ring.
32735+ */
32736+
32737+struct ttm_fence_class_manager {
32738+
32739+ /*
32740+ * Unprotected constant members.
32741+ */
32742+
32743+ uint32_t wrap_diff;
32744+ uint32_t flush_diff;
32745+ uint32_t sequence_mask;
32746+
32747+ /*
32748+ * The rwlock protects this structure as well as
32749+ * the data in all fence objects belonging to this
32750+ * class. This should be OK as most fence objects are
32751+ * only read from once they're created.
32752+ */
32753+
32754+ rwlock_t lock;
32755+ struct list_head ring;
32756+ uint32_t pending_flush;
32757+ uint32_t waiting_types;
32758+ wait_queue_head_t fence_queue;
32759+ uint32_t highest_waiting_sequence;
32760+ uint32_t latest_queued_sequence;
32761+};
32762+
32763+/**
32764+ * struct ttm_fence_device
32765+ *
32766+ * @fence_class: Array of fence class managers.
32767+ * @num_classes: Array dimension of @fence_class.
32768+ * @count: Current number of fence objects for statistics.
32769+ * @driver: Driver struct.
32770+ *
32771+ * Provided in the driver interface so that the driver can derive
32772+ * from this struct for its driver_private, and accordingly
32773+ * access the driver_private from the fence driver callbacks.
32774+ *
32775+ * All members except "count" are initialized at creation and
32776+ * never touched after that. No protection needed.
32777+ *
32778+ * This struct is private to the fence implementation and to the fence
32779+ * driver callbacks, and may otherwise be used by drivers only to
32780+ * obtain the derived device_private object using container_of().
32781+ */
32782+
32783+struct ttm_fence_device {
32784+ struct ttm_mem_global *mem_glob;
32785+ struct ttm_fence_class_manager *fence_class;
32786+ uint32_t num_classes;
32787+ atomic_t count;
32788+ const struct ttm_fence_driver *driver;
32789+};
32790+
32791+/**
32792+ * struct ttm_fence_class_init
32793+ *
32794+ * @wrap_diff: Fence sequence number wrap indicator. If
32795+ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
32796+ * considered to be older than sequence2.
32797+ * @flush_diff: Fence sequence number flush indicator.
32798+ * If a non-completely-signaled fence has a fence sequence number
32799+ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
32800+ * the fence is considered too old and it will be flushed upon the
32801+ * next call of ttm_fence_flush_old(), to make sure no fences with
32802+ * stale sequence numbers remains unsignaled. @flush_diff should
32803+ * be sufficiently less than @wrap_diff.
32804+ * @sequence_mask: Mask with valid bits of the fence sequence
32805+ * number set to 1.
32806+ *
32807+ * This struct is used as input to ttm_fence_device_init.
32808+ */
32809+
32810+struct ttm_fence_class_init {
32811+ uint32_t wrap_diff;
32812+ uint32_t flush_diff;
32813+ uint32_t sequence_mask;
32814+};
32815+
32816+/**
32817+ * struct ttm_fence_driver
32818+ *
32819+ * @has_irq: Called by a potential waiter. Should return 1 if a
32820+ * fence object with indicated parameters is expected to signal
32821+ * automatically, and 0 if the fence implementation needs to
32822+ * repeatedly call @poll to make it signal.
32823+ * @emit: Make sure a fence with the given parameters is
32824+ * present in the indicated command stream. Return its sequence number
32825+ * in "breadcrumb".
32826+ * @poll: Check and report sequences of the given "fence_class"
32827+ * that have signaled "types"
32828+ * @flush: Make sure that the types indicated by the bitfield
32829+ * ttm_fence_class_manager::pending_flush will eventually
32830+ * signal. These bits have been put together using the
32831+ * result from the needed_flush function described below.
32832+ * @needed_flush: Given the fence_class and fence_types indicated by
32833+ * "fence", and the last received fence sequence of this
32834+ * fence class, indicate what types need a fence flush to
32835+ * signal. Return as a bitfield.
32836+ * @wait: Set to non-NULL if the driver wants to override the fence
32837+ * wait implementation. Return 0 on success, -EBUSY on failure,
32838+ * and -ERESTART if interruptible and a signal is pending.
32839+ * @signaled: Driver callback that is called whenever a
32840+ * ttm_fence_object::signaled_types has changed status.
32841+ * This function is called from atomic context,
32842+ * with the ttm_fence_class_manager::lock held in write mode.
32843+ * @lockup: Driver callback that is called whenever a wait has exceeded
32844+ * the lifetime of a fence object.
32845+ * If there is a GPU lockup,
32846+ * this function should, if possible, reset the GPU,
32847+ * call the ttm_fence_handler with an error status, and
32848+ * return. If no lockup was detected, simply extend the
32849+ * fence timeout_jiffies and return. The driver might
32850+ * want to protect the lockup check with a mutex and cache a
32851+ * non-locked-up status for a while to avoid an excessive
32852+ * amount of lockup checks from every waiting thread.
32853+ */
32854+
32855+struct ttm_fence_driver {
32856+ bool (*has_irq) (struct ttm_fence_device * fdev,
32857+ uint32_t fence_class, uint32_t flags);
32858+ int (*emit) (struct ttm_fence_device * fdev,
32859+ uint32_t fence_class,
32860+ uint32_t flags,
32861+ uint32_t * breadcrumb, unsigned long *timeout_jiffies);
32862+ void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class);
32863+ void (*poll) (struct ttm_fence_device * fdev,
32864+ uint32_t fence_class, uint32_t types);
32865+ uint32_t(*needed_flush)
32866+ (struct ttm_fence_object * fence);
32867+ int (*wait) (struct ttm_fence_object * fence, bool lazy,
32868+ bool interruptible, uint32_t mask);
32869+ void (*signaled) (struct ttm_fence_object * fence);
32870+ void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types);
32871+};
32872+
32873+/**
32874+ * function ttm_fence_device_init
32875+ *
32876+ * @num_classes: Number of fence classes for this fence implementation.
32877+ * @mem_global: Pointer to the global memory accounting info.
32878+ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
32879+ * @init: Array of initialization info for each fence class.
32880+ * @replicate_init: Use the first @init initialization info for all classes.
32881+ * @driver: Driver callbacks.
32882+ *
32883+ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
32884+ * out-of-memory. Otherwise returns 0.
32885+ */
32886+extern int
32887+ttm_fence_device_init(int num_classes,
32888+ struct ttm_mem_global *mem_glob,
32889+ struct ttm_fence_device *fdev,
32890+ const struct ttm_fence_class_init *init,
32891+ bool replicate_init,
32892+ const struct ttm_fence_driver *driver);
32893+
32894+/**
32895+ * function ttm_fence_device_release
32896+ *
32897+ * @fdev: Pointer to the fence device.
32898+ *
32899+ * Release all resources held by a fence device. Note that before
32900+ * this function is called, the caller must have made sure all fence
32901+ * objects belonging to this fence device are completely signaled.
32902+ */
32903+
32904+extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
32905+
32906+/**
32907+ * ttm_fence_handler - the fence handler.
32908+ *
32909+ * @fdev: Pointer to the fence device.
32910+ * @fence_class: Fence class that signals.
32911+ * @sequence: Signaled sequence.
32912+ * @type: Types that signal.
32913+ * @error: Error from the engine.
32914+ *
32915+ * This function signals all fences with a sequence previous to the
32916+ * @sequence argument, and belonging to @fence_class. The signaled fence
32917+ * types are provided in @type. If error is non-zero, the error member
32918+ * of the fence with sequence = @sequence is set to @error. This value
32919+ * may be reported back to user-space, indicating, for example an illegal
32920+ * 3D command or illegal mpeg data.
32921+ *
32922+ * This function is typically called from the driver::poll method when the
32923+ * command sequence preceding the fence marker has executed. It should be
32924+ * called with the ttm_fence_class_manager::lock held in write mode and
32925+ * may be called from interrupt context.
32926+ */
32927+
32928+extern void
32929+ttm_fence_handler(struct ttm_fence_device *fdev,
32930+ uint32_t fence_class,
32931+ uint32_t sequence, uint32_t type, uint32_t error);
32932+
32933+/**
32934+ * ttm_fence_driver_from_dev
32935+ *
32936+ * @fdev: The ttm fence device.
32937+ *
32938+ * Returns a pointer to the fence driver struct.
32939+ */
32940+
32941+static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct
32942+ ttm_fence_device
32943+ *fdev)
32944+{
32945+ return fdev->driver;
32946+}
32947+
32948+/**
32949+ * ttm_fence_driver
32950+ *
32951+ * @fence: Pointer to a ttm fence object.
32952+ *
32953+ * Returns a pointer to the fence driver struct.
32954+ */
32955+
32956+static inline const struct ttm_fence_driver *ttm_fence_driver(struct
32957+ ttm_fence_object
32958+ *fence)
32959+{
32960+ return ttm_fence_driver_from_dev(fence->fdev);
32961+}
32962+
32963+/**
32964+ * ttm_fence_fc
32965+ *
32966+ * @fence: Pointer to a ttm fence object.
32967+ *
32968+ * Returns a pointer to the struct ttm_fence_class_manager for the
32969+ * fence class of @fence.
32970+ */
32971+
32972+static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
32973+ ttm_fence_object
32974+ *fence)
32975+{
32976+ return &fence->fdev->fence_class[fence->fence_class];
32977+}
32978+
32979+#endif
32980diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
32981--- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 1969-12-31 16:00:00.000000000 -0800
32982+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 2009-04-07 13:28:38.000000000 -0700
32983@@ -0,0 +1,242 @@
32984+/**************************************************************************
32985+ *
32986+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
32987+ * All Rights Reserved.
32988+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
32989+ * All Rights Reserved.
32990+ *
32991+ * Permission is hereby granted, free of charge, to any person obtaining a
32992+ * copy of this software and associated documentation files (the
32993+ * "Software"), to deal in the Software without restriction, including
32994+ * without limitation the rights to use, copy, modify, merge, publish,
32995+ * distribute, sub license, and/or sell copies of the Software, and to
32996+ * permit persons to whom the Software is furnished to do so, subject to
32997+ * the following conditions:
32998+ *
32999+ * The above copyright notice and this permission notice (including the
33000+ * next paragraph) shall be included in all copies or substantial portions
33001+ * of the Software.
33002+ *
33003+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33004+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33005+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33006+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33007+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33008+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33009+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33010+ *
33011+ **************************************************************************/
33012+/*
33013+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33014+ */
33015+
33016+#include <drm/drmP.h>
33017+#include "ttm/ttm_fence_user.h"
33018+#include "ttm/ttm_object.h"
33019+#include "ttm/ttm_fence_driver.h"
33020+#include "ttm/ttm_userobj_api.h"
33021+
33022+/**
33023+ * struct ttm_fence_user_object
33024+ *
33025+ * @base: The base object used for user-space visibility and refcounting.
33026+ *
33027+ * @fence: The fence object itself.
33028+ *
33029+ */
33030+
33031+struct ttm_fence_user_object {
33032+ struct ttm_base_object base;
33033+ struct ttm_fence_object fence;
33034+};
33035+
33036+static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct
33037+ ttm_object_file
33038+ *tfile,
33039+ uint32_t
33040+ handle)
33041+{
33042+ struct ttm_base_object *base;
33043+
33044+ base = ttm_base_object_lookup(tfile, handle);
33045+ if (unlikely(base == NULL)) {
33046+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
33047+ (unsigned long)handle);
33048+ return NULL;
33049+ }
33050+
33051+ if (unlikely(base->object_type != ttm_fence_type)) {
33052+ ttm_base_object_unref(&base);
33053+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
33054+ (unsigned long)handle);
33055+ return NULL;
33056+ }
33057+
33058+ return container_of(base, struct ttm_fence_user_object, base);
33059+}
33060+
33061+/*
33062+ * The fence object destructor.
33063+ */
33064+
33065+static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
33066+{
33067+ struct ttm_fence_user_object *ufence =
33068+ container_of(fence, struct ttm_fence_user_object, fence);
33069+
33070+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
33071+ kfree(ufence);
33072+}
33073+
33074+/*
33075+ * The base object destructor. We basically unly unreference the
33076+ * attached fence object.
33077+ */
33078+
33079+static void ttm_fence_user_release(struct ttm_base_object **p_base)
33080+{
33081+ struct ttm_fence_user_object *ufence;
33082+ struct ttm_base_object *base = *p_base;
33083+ struct ttm_fence_object *fence;
33084+
33085+ *p_base = NULL;
33086+
33087+ if (unlikely(base == NULL))
33088+ return;
33089+
33090+ ufence = container_of(base, struct ttm_fence_user_object, base);
33091+ fence = &ufence->fence;
33092+ ttm_fence_object_unref(&fence);
33093+}
33094+
33095+int
33096+ttm_fence_user_create(struct ttm_fence_device *fdev,
33097+ struct ttm_object_file *tfile,
33098+ uint32_t fence_class,
33099+ uint32_t fence_types,
33100+ uint32_t create_flags,
33101+ struct ttm_fence_object **fence, uint32_t * user_handle)
33102+{
33103+ int ret;
33104+ struct ttm_fence_object *tmp;
33105+ struct ttm_fence_user_object *ufence;
33106+
33107+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false);
33108+ if (unlikely(ret != 0))
33109+ return -ENOMEM;
33110+
33111+ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
33112+ if (unlikely(ufence == NULL)) {
33113+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
33114+ return -ENOMEM;
33115+ }
33116+
33117+ ret = ttm_fence_object_init(fdev,
33118+ fence_class,
33119+ fence_types, create_flags,
33120+ &ttm_fence_user_destroy, &ufence->fence);
33121+
33122+ if (unlikely(ret != 0))
33123+ goto out_err0;
33124+
33125+ /*
33126+ * One fence ref is held by the fence ptr we return.
33127+ * The other one by the base object. Need to up the
33128+ * fence refcount before we publish this object to
33129+ * user-space.
33130+ */
33131+
33132+ tmp = ttm_fence_object_ref(&ufence->fence);
33133+ ret = ttm_base_object_init(tfile, &ufence->base,
33134+ false, ttm_fence_type,
33135+ &ttm_fence_user_release, NULL);
33136+
33137+ if (unlikely(ret != 0))
33138+ goto out_err1;
33139+
33140+ *fence = &ufence->fence;
33141+ *user_handle = ufence->base.hash.key;
33142+
33143+ return 0;
33144+ out_err1:
33145+ ttm_fence_object_unref(&tmp);
33146+ tmp = &ufence->fence;
33147+ ttm_fence_object_unref(&tmp);
33148+ return ret;
33149+ out_err0:
33150+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
33151+ kfree(ufence);
33152+ return ret;
33153+}
33154+
33155+int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
33156+{
33157+ int ret;
33158+ union ttm_fence_signaled_arg *arg = data;
33159+ struct ttm_fence_object *fence;
33160+ struct ttm_fence_info info;
33161+ struct ttm_fence_user_object *ufence;
33162+ struct ttm_base_object *base;
33163+ ret = 0;
33164+
33165+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
33166+ if (unlikely(ufence == NULL))
33167+ return -EINVAL;
33168+
33169+ fence = &ufence->fence;
33170+
33171+ if (arg->req.flush) {
33172+ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
33173+ if (unlikely(ret != 0))
33174+ goto out;
33175+ }
33176+
33177+ info = ttm_fence_get_info(fence);
33178+ arg->rep.signaled_types = info.signaled_types;
33179+ arg->rep.fence_error = info.error;
33180+
33181+ out:
33182+ base = &ufence->base;
33183+ ttm_base_object_unref(&base);
33184+ return ret;
33185+}
33186+
33187+int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
33188+{
33189+ int ret;
33190+ union ttm_fence_finish_arg *arg = data;
33191+ struct ttm_fence_user_object *ufence;
33192+ struct ttm_base_object *base;
33193+ struct ttm_fence_object *fence;
33194+ ret = 0;
33195+
33196+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
33197+ if (unlikely(ufence == NULL))
33198+ return -EINVAL;
33199+
33200+ fence = &ufence->fence;
33201+
33202+ ret = ttm_fence_object_wait(fence,
33203+ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
33204+ true, arg->req.fence_type);
33205+ if (likely(ret == 0)) {
33206+ struct ttm_fence_info info = ttm_fence_get_info(fence);
33207+
33208+ arg->rep.signaled_types = info.signaled_types;
33209+ arg->rep.fence_error = info.error;
33210+ }
33211+
33212+ base = &ufence->base;
33213+ ttm_base_object_unref(&base);
33214+
33215+ return ret;
33216+}
33217+
33218+int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
33219+{
33220+ struct ttm_fence_unref_arg *arg = data;
33221+ int ret = 0;
33222+
33223+ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
33224+ return ret;
33225+}
33226diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
33227--- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800
33228+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700
33229@@ -0,0 +1,147 @@
33230+/**************************************************************************
33231+ *
33232+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33233+ * All Rights Reserved.
33234+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33235+ * All Rights Reserved.
33236+ *
33237+ * Permission is hereby granted, free of charge, to any person obtaining a
33238+ * copy of this software and associated documentation files (the
33239+ * "Software"), to deal in the Software without restriction, including
33240+ * without limitation the rights to use, copy, modify, merge, publish,
33241+ * distribute, sub license, and/or sell copies of the Software, and to
33242+ * permit persons to whom the Software is furnished to do so, subject to
33243+ * the following conditions:
33244+ *
33245+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33246+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33247+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33248+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33249+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33250+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33251+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33252+ *
33253+ * The above copyright notice and this permission notice (including the
33254+ * next paragraph) shall be included in all copies or substantial portions
33255+ * of the Software.
33256+ *
33257+ **************************************************************************/
33258+/*
33259+ * Authors
33260+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33261+ */
33262+
33263+#ifndef TTM_FENCE_USER_H
33264+#define TTM_FENCE_USER_H
33265+
33266+#if !defined(__KERNEL__) && !defined(_KERNEL)
33267+#include <stdint.h>
33268+#endif
33269+
33270+#define TTM_FENCE_MAJOR 0
33271+#define TTM_FENCE_MINOR 1
33272+#define TTM_FENCE_PL 0
33273+#define TTM_FENCE_DATE "080819"
33274+
33275+/**
33276+ * struct ttm_fence_signaled_req
33277+ *
33278+ * @handle: Handle to the fence object. Input.
33279+ *
33280+ * @fence_type: Fence types we want to flush. Input.
33281+ *
33282+ * @flush: Boolean. Flush the indicated fence_types. Input.
33283+ *
33284+ * Argument to the TTM_FENCE_SIGNALED ioctl.
33285+ */
33286+
33287+struct ttm_fence_signaled_req {
33288+ uint32_t handle;
33289+ uint32_t fence_type;
33290+ int32_t flush;
33291+ uint32_t pad64;
33292+};
33293+
33294+/**
33295+ * struct ttm_fence_rep
33296+ *
33297+ * @signaled_types: Fence type that has signaled.
33298+ *
33299+ * @fence_error: Command execution error.
33300+ * Hardware errors that are consequences of the execution
33301+ * of the command stream preceding the fence are reported
33302+ * here.
33303+ *
33304+ * Output argument to the TTM_FENCE_SIGNALED and
33305+ * TTM_FENCE_FINISH ioctls.
33306+ */
33307+
33308+struct ttm_fence_rep {
33309+ uint32_t signaled_types;
33310+ uint32_t fence_error;
33311+};
33312+
33313+union ttm_fence_signaled_arg {
33314+ struct ttm_fence_signaled_req req;
33315+ struct ttm_fence_rep rep;
33316+};
33317+
33318+/*
33319+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
33320+ *
33321+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
33322+ * wait.
33323+ *
33324+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
33325+ * but return -EBUSY if the buffer is busy.
33326+ */
33327+
33328+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
33329+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
33330+
33331+/**
33332+ * struct ttm_fence_finish_req
33333+ *
33334+ * @handle: Handle to the fence object. Input.
33335+ *
33336+ * @fence_type: Fence types we want to finish.
33337+ *
33338+ * @mode: Wait mode.
33339+ *
33340+ * Input to the TTM_FENCE_FINISH ioctl.
33341+ */
33342+
33343+struct ttm_fence_finish_req {
33344+ uint32_t handle;
33345+ uint32_t fence_type;
33346+ uint32_t mode;
33347+ uint32_t pad64;
33348+};
33349+
33350+union ttm_fence_finish_arg {
33351+ struct ttm_fence_finish_req req;
33352+ struct ttm_fence_rep rep;
33353+};
33354+
33355+/**
33356+ * struct ttm_fence_unref_arg
33357+ *
33358+ * @handle: Handle to the fence object.
33359+ *
33360+ * Argument to the TTM_FENCE_UNREF ioctl.
33361+ */
33362+
33363+struct ttm_fence_unref_arg {
33364+ uint32_t handle;
33365+ uint32_t pad64;
33366+};
33367+
33368+/*
33369+ * Ioctl offsets frome extenstion start.
33370+ */
33371+
33372+#define TTM_FENCE_SIGNALED 0x01
33373+#define TTM_FENCE_FINISH 0x02
33374+#define TTM_FENCE_UNREF 0x03
33375+
33376+#endif
33377diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c
33378--- a/drivers/gpu/drm/psb/ttm/ttm_lock.c 1969-12-31 16:00:00.000000000 -0800
33379+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c 2009-04-07 13:28:38.000000000 -0700
33380@@ -0,0 +1,162 @@
33381+/**************************************************************************
33382+ *
33383+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33384+ * All Rights Reserved.
33385+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33386+ * All Rights Reserved.
33387+ *
33388+ * Permission is hereby granted, free of charge, to any person obtaining a
33389+ * copy of this software and associated documentation files (the
33390+ * "Software"), to deal in the Software without restriction, including
33391+ * without limitation the rights to use, copy, modify, merge, publish,
33392+ * distribute, sub license, and/or sell copies of the Software, and to
33393+ * permit persons to whom the Software is furnished to do so, subject to
33394+ * the following conditions:
33395+ *
33396+ * The above copyright notice and this permission notice (including the
33397+ * next paragraph) shall be included in all copies or substantial portions
33398+ * of the Software.
33399+ *
33400+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33401+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33402+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33403+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33404+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33405+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33406+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33407+ *
33408+ **************************************************************************/
33409+/*
33410+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33411+ */
33412+
33413+#include "ttm/ttm_lock.h"
33414+#include <asm/atomic.h>
33415+#include <linux/errno.h>
33416+#include <linux/wait.h>
33417+#include <linux/sched.h>
33418+
33419+void ttm_lock_init(struct ttm_lock *lock)
33420+{
33421+ init_waitqueue_head(&lock->queue);
33422+ atomic_set(&lock->write_lock_pending, 0);
33423+ atomic_set(&lock->readers, 0);
33424+ lock->kill_takers = false;
33425+ lock->signal = SIGKILL;
33426+}
33427+
33428+void ttm_read_unlock(struct ttm_lock *lock)
33429+{
33430+ if (atomic_dec_and_test(&lock->readers))
33431+ wake_up_all(&lock->queue);
33432+}
33433+
33434+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
33435+{
33436+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
33437+ int ret;
33438+
33439+ if (!interruptible) {
33440+ wait_event(lock->queue,
33441+ atomic_read(&lock->write_lock_pending) == 0);
33442+ continue;
33443+ }
33444+ ret = wait_event_interruptible
33445+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
33446+ if (ret)
33447+ return -ERESTART;
33448+ }
33449+
33450+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
33451+ int ret;
33452+ if (!interruptible) {
33453+ wait_event(lock->queue,
33454+ atomic_read(&lock->readers) != -1);
33455+ continue;
33456+ }
33457+ ret = wait_event_interruptible
33458+ (lock->queue, atomic_read(&lock->readers) != -1);
33459+ if (ret)
33460+ return -ERESTART;
33461+ }
33462+
33463+ if (unlikely(lock->kill_takers)) {
33464+ send_sig(lock->signal, current, 0);
33465+ ttm_read_unlock(lock);
33466+ return -ERESTART;
33467+ }
33468+
33469+ return 0;
33470+}
33471+
33472+static int __ttm_write_unlock(struct ttm_lock *lock)
33473+{
33474+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
33475+ return -EINVAL;
33476+ wake_up_all(&lock->queue);
33477+ return 0;
33478+}
33479+
33480+static void ttm_write_lock_remove(struct ttm_base_object **p_base)
33481+{
33482+ struct ttm_base_object *base = *p_base;
33483+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
33484+ int ret;
33485+
33486+ *p_base = NULL;
33487+ ret = __ttm_write_unlock(lock);
33488+ BUG_ON(ret != 0);
33489+}
33490+
33491+int ttm_write_lock(struct ttm_lock *lock,
33492+ bool interruptible,
33493+ struct ttm_object_file *tfile)
33494+{
33495+ int ret = 0;
33496+
33497+ atomic_inc(&lock->write_lock_pending);
33498+
33499+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
33500+ if (!interruptible) {
33501+ wait_event(lock->queue,
33502+ atomic_read(&lock->readers) == 0);
33503+ continue;
33504+ }
33505+ ret = wait_event_interruptible
33506+ (lock->queue, atomic_read(&lock->readers) == 0);
33507+
33508+ if (ret) {
33509+ if (atomic_dec_and_test(&lock->write_lock_pending))
33510+ wake_up_all(&lock->queue);
33511+ return -ERESTART;
33512+ }
33513+ }
33514+
33515+ if (atomic_dec_and_test(&lock->write_lock_pending))
33516+ wake_up_all(&lock->queue);
33517+
33518+ if (unlikely(lock->kill_takers)) {
33519+ send_sig(lock->signal, current, 0);
33520+ __ttm_write_unlock(lock);
33521+ return -ERESTART;
33522+ }
33523+
33524+ /*
33525+ * Add a base-object, the destructor of which will
33526+ * make sure the lock is released if the client dies
33527+ * while holding it.
33528+ */
33529+
33530+ ret = ttm_base_object_init(tfile, &lock->base, false,
33531+ ttm_lock_type, &ttm_write_lock_remove, NULL);
33532+ if (ret)
33533+ (void)__ttm_write_unlock(lock);
33534+
33535+ return ret;
33536+}
33537+
33538+int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
33539+{
33540+ return ttm_ref_object_base_unref(tfile,
33541+ lock->base.hash.key, TTM_REF_USAGE);
33542+}
33543diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h
33544--- a/drivers/gpu/drm/psb/ttm/ttm_lock.h 1969-12-31 16:00:00.000000000 -0800
33545+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h 2009-04-07 13:28:38.000000000 -0700
33546@@ -0,0 +1,181 @@
33547+/**************************************************************************
33548+ *
33549+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33550+ * All Rights Reserved.
33551+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33552+ * All Rights Reserved.
33553+ *
33554+ * Permission is hereby granted, free of charge, to any person obtaining a
33555+ * copy of this software and associated documentation files (the
33556+ * "Software"), to deal in the Software without restriction, including
33557+ * without limitation the rights to use, copy, modify, merge, publish,
33558+ * distribute, sub license, and/or sell copies of the Software, and to
33559+ * permit persons to whom the Software is furnished to do so, subject to
33560+ * the following conditions:
33561+ *
33562+ * The above copyright notice and this permission notice (including the
33563+ * next paragraph) shall be included in all copies or substantial portions
33564+ * of the Software.
33565+ *
33566+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33567+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33568+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33569+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33570+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33571+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33572+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33573+ *
33574+ **************************************************************************/
33575+/*
33576+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33577+ */
33578+
33579+/** @file ttm_lock.h
33580+ * This file implements a simple replacement for the buffer manager use
33581+ * of the DRM heavyweight hardware lock.
33582+ * The lock is a read-write lock. Taking it in read mode is fast, and
33583+ * intended for in-kernel use only.
33584+ * Taking it in write mode is slow.
33585+ *
33586+ * The write mode is used only when there is a need to block all
33587+ * user-space processes from validating buffers.
33588+ * It's allowed to leave kernel space with the write lock held.
33589+ * If a user-space process dies while having the write-lock,
33590+ * it will be released during the file descriptor release.
33591+ *
33592+ * The read lock is typically placed at the start of an IOCTL- or
33593+ * user-space callable function that may end up allocating a memory area.
33594+ * This includes setstatus, super-ioctls and faults; the latter may move
33595+ * unmappable regions to mappable. It's a bug to leave kernel space with the
33596+ * read lock held.
33597+ *
33598+ * Both read- and write lock taking is interruptible for low signal-delivery
33599+ * latency. The locking functions will return -ERESTART if interrupted by a
33600+ * signal.
33601+ *
33602+ * Locking order: The lock should be taken BEFORE any TTM mutexes
33603+ * or spinlocks.
33604+ *
33605+ * Typical usages:
33606+ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
33607+ * stops it from being repopulated.
33608+ * b) out-of-VRAM or out-of-aperture space, in which case the process
33609+ * receiving the out-of-space notification may take the lock in write mode
33610+ * and evict all buffers prior to start validating its own buffers.
33611+ */
33612+
33613+#ifndef _TTM_LOCK_H_
33614+#define _TTM_LOCK_H_
33615+
33616+#include "ttm_object.h"
33617+#include <linux/wait.h>
33618+#include <asm/atomic.h>
33619+
33620+/**
33621+ * struct ttm_lock
33622+ *
33623+ * @base: ttm base object used solely to release the lock if the client
33624+ * holding the lock dies.
33625+ * @queue: Queue for processes waiting for lock change-of-status.
33626+ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
33627+ * write lock starvation.
33628+ * @readers: The lock status: A negative number indicates that a write lock is
33629+ * held. Positive values indicate number of concurrent readers.
33630+ */
33631+
33632+struct ttm_lock {
33633+ struct ttm_base_object base;
33634+ wait_queue_head_t queue;
33635+ atomic_t write_lock_pending;
33636+ atomic_t readers;
33637+ bool kill_takers;
33638+ int signal;
33639+};
33640+
33641+/**
33642+ * ttm_lock_init
33643+ *
33644+ * @lock: Pointer to a struct ttm_lock
33645+ * Initializes the lock.
33646+ */
33647+extern void ttm_lock_init(struct ttm_lock *lock);
33648+
33649+/**
33650+ * ttm_read_unlock
33651+ *
33652+ * @lock: Pointer to a struct ttm_lock
33653+ *
33654+ * Releases a read lock.
33655+ */
33656+
33657+extern void ttm_read_unlock(struct ttm_lock *lock);
33658+
33659+/**
33660+ * ttm_read_unlock
33661+ *
33662+ * @lock: Pointer to a struct ttm_lock
33663+ * @interruptible: Interruptible sleeping while waiting for a lock.
33664+ *
33665+ * Takes the lock in read mode.
33666+ * Returns:
33667+ * -ERESTART If interrupted by a signal and interruptible is true.
33668+ */
33669+
33670+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
33671+
33672+/**
33673+ * ttm_write_lock
33674+ *
33675+ * @lock: Pointer to a struct ttm_lock
33676+ * @interruptible: Interruptible sleeping while waiting for a lock.
33677+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
33678+ * application taking the lock.
33679+ *
33680+ * Takes the lock in write mode.
33681+ * Returns:
33682+ * -ERESTART If interrupted by a signal and interruptible is true.
33683+ * -ENOMEM: Out of memory when locking.
33684+ */
33685+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
33686+ struct ttm_object_file *tfile);
33687+
33688+/**
33689+ * ttm_write_unlock
33690+ *
33691+ * @lock: Pointer to a struct ttm_lock
33692+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
33693+ * application taking the lock.
33694+ *
33695+ * Releases a write lock.
33696+ * Returns:
33697+ * -EINVAL If the lock was not held.
33698+ */
33699+extern int ttm_write_unlock(struct ttm_lock *lock,
33700+ struct ttm_object_file *tfile);
33701+
33702+/**
33703+ * ttm_lock_set_kill
33704+ *
33705+ * @lock: Pointer to a struct ttm_lock
33706+ * @val: Boolean whether to kill processes taking the lock.
33707+ * @signal: Signal to send to the process taking the lock.
33708+ *
33709+ * The kill-when-taking-lock functionality is used to kill processes that keep
33710+ * on using the TTM functionality when its resources has been taken down, for
33711+ * example when the X server exits. A typical sequence would look like this:
33712+ * - X server takes lock in write mode.
33713+ * - ttm_lock_set_kill() is called with @val set to true.
33714+ * - As part of X server exit, TTM resources are taken down.
33715+ * - X server releases the lock on file release.
33716+ * - Another dri client wants to render, takes the lock and is killed.
33717+ *
33718+ */
33719+
33720+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal)
33721+{
33722+ lock->kill_takers = val;
33723+ if (val)
33724+ lock->signal = signal;
33725+}
33726+
33727+#endif
33728diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c
33729--- a/drivers/gpu/drm/psb/ttm/ttm_memory.c 1969-12-31 16:00:00.000000000 -0800
33730+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c 2009-04-07 13:28:38.000000000 -0700
33731@@ -0,0 +1,232 @@
33732+/**************************************************************************
33733+ *
33734+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33735+ * All Rights Reserved.
33736+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33737+ * All Rights Reserved.
33738+ *
33739+ * Permission is hereby granted, free of charge, to any person obtaining a
33740+ * copy of this software and associated documentation files (the
33741+ * "Software"), to deal in the Software without restriction, including
33742+ * without limitation the rights to use, copy, modify, merge, publish,
33743+ * distribute, sub license, and/or sell copies of the Software, and to
33744+ * permit persons to whom the Software is furnished to do so, subject to
33745+ * the following conditions:
33746+ *
33747+ * The above copyright notice and this permission notice (including the
33748+ * next paragraph) shall be included in all copies or substantial portions
33749+ * of the Software.
33750+ *
33751+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33752+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33753+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33754+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33755+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33756+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33757+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33758+ *
33759+ **************************************************************************/
33760+
33761+#include "ttm/ttm_memory.h"
33762+#include <linux/spinlock.h>
33763+#include <linux/sched.h>
33764+#include <linux/wait.h>
33765+#include <linux/mm.h>
33766+
33767+#define TTM_MEMORY_ALLOC_RETRIES 4
33768+
33769+/**
33770+ * At this point we only support a single shrink callback.
33771+ * Extend this if needed, perhaps using a linked list of callbacks.
33772+ * Note that this function is reentrant:
33773+ * many threads may try to swap out at any given time.
33774+ */
33775+
33776+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
33777+ uint64_t extra)
33778+{
33779+ int ret;
33780+ struct ttm_mem_shrink *shrink;
33781+ uint64_t target;
33782+ uint64_t total_target;
33783+
33784+ spin_lock(&glob->lock);
33785+ if (glob->shrink == NULL)
33786+ goto out;
33787+
33788+ if (from_workqueue) {
33789+ target = glob->swap_limit;
33790+ total_target = glob->total_memory_swap_limit;
33791+ } else if (capable(CAP_SYS_ADMIN)) {
33792+ total_target = glob->emer_total_memory;
33793+ target = glob->emer_memory;
33794+ } else {
33795+ total_target = glob->max_total_memory;
33796+ target = glob->max_memory;
33797+ }
33798+
33799+ total_target = (extra >= total_target) ? 0: total_target - extra;
33800+ target = (extra >= target) ? 0: target - extra;
33801+
33802+ while (glob->used_memory > target ||
33803+ glob->used_total_memory > total_target) {
33804+ shrink = glob->shrink;
33805+ spin_unlock(&glob->lock);
33806+ ret = shrink->do_shrink(shrink);
33807+ spin_lock(&glob->lock);
33808+ if (unlikely(ret != 0))
33809+ goto out;
33810+ }
33811+ out:
33812+ spin_unlock(&glob->lock);
33813+}
33814+
33815+static void ttm_shrink_work(struct work_struct *work)
33816+{
33817+ struct ttm_mem_global *glob =
33818+ container_of(work, struct ttm_mem_global, work);
33819+
33820+ ttm_shrink(glob, true, 0ULL);
33821+}
33822+
33823+int ttm_mem_global_init(struct ttm_mem_global *glob)
33824+{
33825+ struct sysinfo si;
33826+ uint64_t mem;
33827+
33828+ spin_lock_init(&glob->lock);
33829+ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
33830+ INIT_WORK(&glob->work, ttm_shrink_work);
33831+ init_waitqueue_head(&glob->queue);
33832+
33833+ si_meminfo(&si);
33834+
33835+ mem = si.totalram - si.totalhigh;
33836+ mem *= si.mem_unit;
33837+
33838+ glob->max_memory = mem >> 1;
33839+ glob->emer_memory = glob->max_memory + (mem >> 2);
33840+ glob->swap_limit = glob->max_memory - (mem >> 5);
33841+ glob->used_memory = 0;
33842+ glob->used_total_memory = 0;
33843+ glob->shrink = NULL;
33844+
33845+ mem = si.totalram;
33846+ mem *= si.mem_unit;
33847+
33848+ glob->max_total_memory = mem >> 1;
33849+ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
33850+ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
33851+
33852+ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
33853+ glob->max_total_memory >> 20);
33854+ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
33855+ glob->max_memory >> 20);
33856+ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
33857+ glob->swap_limit >> 20);
33858+
33859+ return 0;
33860+}
33861+
33862+void ttm_mem_global_release(struct ttm_mem_global *glob)
33863+{
33864+ printk(KERN_INFO "Used total memory is %llu bytes.\n",
33865+ (unsigned long long)glob->used_total_memory);
33866+ flush_workqueue(glob->swap_queue);
33867+ destroy_workqueue(glob->swap_queue);
33868+ glob->swap_queue = NULL;
33869+}
33870+
33871+static inline void ttm_check_swapping(struct ttm_mem_global *glob)
33872+{
33873+ bool needs_swapping;
33874+
33875+ spin_lock(&glob->lock);
33876+ needs_swapping = (glob->used_memory > glob->swap_limit ||
33877+ glob->used_total_memory >
33878+ glob->total_memory_swap_limit);
33879+ spin_unlock(&glob->lock);
33880+
33881+ if (unlikely(needs_swapping))
33882+ (void)queue_work(glob->swap_queue, &glob->work);
33883+
33884+}
33885+
33886+void ttm_mem_global_free(struct ttm_mem_global *glob,
33887+ uint64_t amount, bool himem)
33888+{
33889+ spin_lock(&glob->lock);
33890+ glob->used_total_memory -= amount;
33891+ if (!himem)
33892+ glob->used_memory -= amount;
33893+ wake_up_all(&glob->queue);
33894+ spin_unlock(&glob->lock);
33895+}
33896+
33897+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
33898+ uint64_t amount, bool himem, bool reserve)
33899+{
33900+ uint64_t limit;
33901+ uint64_t lomem_limit;
33902+ int ret = -ENOMEM;
33903+
33904+ spin_lock(&glob->lock);
33905+
33906+ if (capable(CAP_SYS_ADMIN)) {
33907+ limit = glob->emer_total_memory;
33908+ lomem_limit = glob->emer_memory;
33909+ } else {
33910+ limit = glob->max_total_memory;
33911+ lomem_limit = glob->max_memory;
33912+ }
33913+
33914+ if (unlikely(glob->used_total_memory + amount > limit))
33915+ goto out_unlock;
33916+ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
33917+ goto out_unlock;
33918+
33919+ if (reserve) {
33920+ glob->used_total_memory += amount;
33921+ if (!himem)
33922+ glob->used_memory += amount;
33923+ }
33924+ ret = 0;
33925+ out_unlock:
33926+ spin_unlock(&glob->lock);
33927+ ttm_check_swapping(glob);
33928+
33929+ return ret;
33930+}
33931+
33932+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
33933+ bool no_wait, bool interruptible, bool himem)
33934+{
33935+ int count = TTM_MEMORY_ALLOC_RETRIES;
33936+
33937+ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) {
33938+ if (no_wait)
33939+ return -ENOMEM;
33940+ if (unlikely(count-- == 0))
33941+ return -ENOMEM;
33942+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
33943+ }
33944+
33945+ return 0;
33946+}
33947+
33948+size_t ttm_round_pot(size_t size)
33949+{
33950+ if ((size & (size - 1)) == 0)
33951+ return size;
33952+ else if (size > PAGE_SIZE)
33953+ return PAGE_ALIGN(size);
33954+ else {
33955+ size_t tmp_size = 4;
33956+
33957+ while (tmp_size < size)
33958+ tmp_size <<= 1;
33959+
33960+ return tmp_size;
33961+ }
33962+ return 0;
33963+}
33964diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h
33965--- a/drivers/gpu/drm/psb/ttm/ttm_memory.h 1969-12-31 16:00:00.000000000 -0800
33966+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h 2009-04-07 13:28:38.000000000 -0700
33967@@ -0,0 +1,154 @@
33968+/**************************************************************************
33969+ *
33970+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33971+ * All Rights Reserved.
33972+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33973+ * All Rights Reserved.
33974+ *
33975+ * Permission is hereby granted, free of charge, to any person obtaining a
33976+ * copy of this software and associated documentation files (the
33977+ * "Software"), to deal in the Software without restriction, including
33978+ * without limitation the rights to use, copy, modify, merge, publish,
33979+ * distribute, sub license, and/or sell copies of the Software, and to
33980+ * permit persons to whom the Software is furnished to do so, subject to
33981+ * the following conditions:
33982+ *
33983+ * The above copyright notice and this permission notice (including the
33984+ * next paragraph) shall be included in all copies or substantial portions
33985+ * of the Software.
33986+ *
33987+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33988+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33989+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33990+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33991+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33992+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33993+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33994+ *
33995+ **************************************************************************/
33996+
33997+#ifndef TTM_MEMORY_H
33998+#define TTM_MEMORY_H
33999+
34000+#include <linux/workqueue.h>
34001+#include <linux/spinlock.h>
34002+#include <linux/wait.h>
34003+
34004+/**
34005+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
34006+ *
34007+ * @do_shrink: The callback function.
34008+ *
34009+ * Arguments to the do_shrink functions are intended to be passed using
34010+ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
34011+ * and can be accessed using container_of().
34012+ */
34013+
34014+struct ttm_mem_shrink {
34015+ int (*do_shrink) (struct ttm_mem_shrink *);
34016+};
34017+
34018+/**
34019+ * struct ttm_mem_global - Global memory accounting structure.
34020+ *
34021+ * @shrink: A single callback to shrink TTM memory usage. Extend this
34022+ * to a linked list to be able to handle multiple callbacks when needed.
34023+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
34024+ * need a separate workqueue since it will spend a lot of time waiting
34025+ * for the GPU, and this will otherwise block other workqueue tasks(?)
34026+ * At this point we use only a single-threaded workqueue.
34027+ * @work: The workqueue callback for the shrink queue.
34028+ * @queue: Wait queue for processes suspended waiting for memory.
34029+ * @lock: Lock to protect the @shrink - and the memory accounting members,
34030+ * that is, essentially the whole structure with some exceptions.
34031+ * @emer_memory: Lowmem memory limit available for root.
34032+ * @max_memory: Lowmem memory limit available for non-root.
34033+ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
34034+ * @used_memory: Currently used lowmem memory.
34035+ * @used_total_memory: Currently used total (lowmem + highmem) memory.
34036+ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
34037+ * kicks in.
34038+ * @max_total_memory: Total memory available to non-root processes.
34039+ * @emer_total_memory: Total memory available to root processes.
34040+ *
34041+ * Note that this structure is not per device. It should be global for all
34042+ * graphics devices.
34043+ */
34044+
34045+struct ttm_mem_global {
34046+ struct ttm_mem_shrink *shrink;
34047+ struct workqueue_struct *swap_queue;
34048+ struct work_struct work;
34049+ wait_queue_head_t queue;
34050+ spinlock_t lock;
34051+ uint64_t emer_memory;
34052+ uint64_t max_memory;
34053+ uint64_t swap_limit;
34054+ uint64_t used_memory;
34055+ uint64_t used_total_memory;
34056+ uint64_t total_memory_swap_limit;
34057+ uint64_t max_total_memory;
34058+ uint64_t emer_total_memory;
34059+};
34060+
34061+/**
34062+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
34063+ *
34064+ * @shrink: The object to initialize.
34065+ * @func: The callback function.
34066+ */
34067+
34068+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
34069+ int (*func) (struct ttm_mem_shrink *))
34070+{
34071+ shrink->do_shrink = func;
34072+}
34073+
34074+/**
34075+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
34076+ *
34077+ * @glob: The struct ttm_mem_global object to register with.
34078+ * @shrink: An initialized struct ttm_mem_shrink object to register.
34079+ *
34080+ * Returns:
34081+ * -EBUSY: There's already a callback registered. (May change).
34082+ */
34083+
34084+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
34085+ struct ttm_mem_shrink *shrink)
34086+{
34087+ spin_lock(&glob->lock);
34088+ if (glob->shrink != NULL) {
34089+ spin_unlock(&glob->lock);
34090+ return -EBUSY;
34091+ }
34092+ glob->shrink = shrink;
34093+ spin_unlock(&glob->lock);
34094+ return 0;
34095+}
34096+
34097+/**
34098+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
34099+ *
34100+ * @glob: The struct ttm_mem_global object to unregister from.
34101+ * @shrink: A previously registert struct ttm_mem_shrink object.
34102+ *
34103+ */
34104+
34105+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
34106+ struct ttm_mem_shrink *shrink)
34107+{
34108+ spin_lock(&glob->lock);
34109+ BUG_ON(glob->shrink != shrink);
34110+ glob->shrink = NULL;
34111+ spin_unlock(&glob->lock);
34112+}
34113+
34114+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
34115+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
34116+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
34117+ bool no_wait, bool interruptible, bool himem);
34118+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
34119+ uint64_t amount, bool himem);
34120+extern size_t ttm_round_pot(size_t size);
34121+#endif
34122diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c
34123--- a/drivers/gpu/drm/psb/ttm/ttm_object.c 1969-12-31 16:00:00.000000000 -0800
34124+++ b/drivers/gpu/drm/psb/ttm/ttm_object.c 2009-04-07 13:28:38.000000000 -0700
34125@@ -0,0 +1,444 @@
34126+/**************************************************************************
34127+ *
34128+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34129+ * All Rights Reserved.
34130+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34131+ * All Rights Reserved.
34132+ *
34133+ * Permission is hereby granted, free of charge, to any person obtaining a
34134+ * copy of this software and associated documentation files (the
34135+ * "Software"), to deal in the Software without restriction, including
34136+ * without limitation the rights to use, copy, modify, merge, publish,
34137+ * distribute, sub license, and/or sell copies of the Software, and to
34138+ * permit persons to whom the Software is furnished to do so, subject to
34139+ * the following conditions:
34140+ *
34141+ * The above copyright notice and this permission notice (including the
34142+ * next paragraph) shall be included in all copies or substantial portions
34143+ * of the Software.
34144+ *
34145+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34146+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34147+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34148+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34149+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34150+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34151+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34152+ *
34153+ **************************************************************************/
34154+/*
34155+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34156+ */
34157+/** @file ttm_ref_object.c
34158+ *
34159+ * Base- and reference object implementation for the various
34160+ * ttm objects. Implements reference counting, minimal security checks
34161+ * and release on file close.
34162+ */
34163+
34164+/**
34165+ * struct ttm_object_file
34166+ *
34167+ * @tdev: Pointer to the ttm_object_device.
34168+ *
34169+ * @lock: Lock that protects the ref_list list and the
34170+ * ref_hash hash tables.
34171+ *
34172+ * @ref_list: List of ttm_ref_objects to be destroyed at
34173+ * file release.
34174+ *
34175+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
34176+ * for fast lookup of ref objects given a base object.
34177+ */
34178+
34179+#include "ttm/ttm_object.h"
34180+#include <linux/list.h>
34181+#include <linux/spinlock.h>
34182+#include <linux/slab.h>
34183+#include <asm/atomic.h>
34184+
34185+struct ttm_object_file {
34186+ struct ttm_object_device *tdev;
34187+ rwlock_t lock;
34188+ struct list_head ref_list;
34189+ struct drm_open_hash ref_hash[TTM_REF_NUM];
34190+ struct kref refcount;
34191+};
34192+
34193+/**
34194+ * struct ttm_object_device
34195+ *
34196+ * @object_lock: lock that protects the object_hash hash table.
34197+ *
34198+ * @object_hash: hash table for fast lookup of object global names.
34199+ *
34200+ * @object_count: Per device object count.
34201+ *
34202+ * This is the per-device data structure needed for ttm object management.
34203+ */
34204+
34205+struct ttm_object_device {
34206+ rwlock_t object_lock;
34207+ struct drm_open_hash object_hash;
34208+ atomic_t object_count;
34209+ struct ttm_mem_global *mem_glob;
34210+};
34211+
34212+/**
34213+ * struct ttm_ref_object
34214+ *
34215+ * @hash: Hash entry for the per-file object reference hash.
34216+ *
34217+ * @head: List entry for the per-file list of ref-objects.
34218+ *
34219+ * @kref: Ref count.
34220+ *
34221+ * @obj: Base object this ref object is referencing.
34222+ *
34223+ * @ref_type: Type of ref object.
34224+ *
34225+ * This is similar to an idr object, but it also has a hash table entry
34226+ * that allows lookup with a pointer to the referenced object as a key. In
34227+ * that way, one can easily detect whether a base object is referenced by
34228+ * a particular ttm_object_file. It also carries a ref count to avoid creating
34229+ * multiple ref objects if a ttm_object_file references the same base object more
34230+ * than once.
34231+ */
34232+
34233+struct ttm_ref_object {
34234+ struct drm_hash_item hash;
34235+ struct list_head head;
34236+ struct kref kref;
34237+ struct ttm_base_object *obj;
34238+ enum ttm_ref_type ref_type;
34239+ struct ttm_object_file *tfile;
34240+};
34241+
34242+static inline struct ttm_object_file *
34243+ttm_object_file_ref(struct ttm_object_file *tfile)
34244+{
34245+ kref_get(&tfile->refcount);
34246+ return tfile;
34247+}
34248+
34249+static void ttm_object_file_destroy(struct kref *kref)
34250+{
34251+ struct ttm_object_file *tfile =
34252+ container_of(kref, struct ttm_object_file, refcount);
34253+
34254+// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile);
34255+ kfree(tfile);
34256+}
34257+
34258+
34259+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
34260+{
34261+ struct ttm_object_file *tfile = *p_tfile;
34262+
34263+ *p_tfile = NULL;
34264+ kref_put(&tfile->refcount, ttm_object_file_destroy);
34265+}
34266+
34267+
34268+int ttm_base_object_init(struct ttm_object_file *tfile,
34269+ struct ttm_base_object *base,
34270+ bool shareable,
34271+ enum ttm_object_type object_type,
34272+ void (*refcount_release) (struct ttm_base_object **),
34273+ void (*ref_obj_release) (struct ttm_base_object *,
34274+ enum ttm_ref_type ref_type))
34275+{
34276+ struct ttm_object_device *tdev = tfile->tdev;
34277+ int ret;
34278+
34279+ base->shareable = shareable;
34280+ base->tfile = ttm_object_file_ref(tfile);
34281+ base->refcount_release = refcount_release;
34282+ base->ref_obj_release = ref_obj_release;
34283+ base->object_type = object_type;
34284+ write_lock(&tdev->object_lock);
34285+ kref_init(&base->refcount);
34286+ ret = drm_ht_just_insert_please(&tdev->object_hash,
34287+ &base->hash,
34288+ (unsigned long)base, 31, 0, 0);
34289+ write_unlock(&tdev->object_lock);
34290+ if (unlikely(ret != 0))
34291+ goto out_err0;
34292+
34293+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
34294+ if (unlikely(ret != 0))
34295+ goto out_err1;
34296+
34297+ ttm_base_object_unref(&base);
34298+
34299+ return 0;
34300+ out_err1:
34301+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
34302+ out_err0:
34303+ return ret;
34304+}
34305+
34306+static void ttm_release_base(struct kref *kref)
34307+{
34308+ struct ttm_base_object *base =
34309+ container_of(kref, struct ttm_base_object, refcount);
34310+ struct ttm_object_device *tdev = base->tfile->tdev;
34311+
34312+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
34313+ write_unlock(&tdev->object_lock);
34314+ if (base->refcount_release) {
34315+ ttm_object_file_unref(&base->tfile);
34316+ base->refcount_release(&base);
34317+ }
34318+ write_lock(&tdev->object_lock);
34319+}
34320+
34321+void ttm_base_object_unref(struct ttm_base_object **p_base)
34322+{
34323+ struct ttm_base_object *base = *p_base;
34324+ struct ttm_object_device *tdev = base->tfile->tdev;
34325+
34326+ // printk(KERN_INFO "TTM base object unref.\n");
34327+ *p_base = NULL;
34328+
34329+ /*
34330+ * Need to take the lock here to avoid racing with
34331+ * users trying to look up the object.
34332+ */
34333+
34334+ write_lock(&tdev->object_lock);
34335+ (void)kref_put(&base->refcount, &ttm_release_base);
34336+ write_unlock(&tdev->object_lock);
34337+}
34338+
34339+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
34340+ uint32_t key)
34341+{
34342+ struct ttm_object_device *tdev = tfile->tdev;
34343+ struct ttm_base_object *base;
34344+ struct drm_hash_item *hash;
34345+ int ret;
34346+
34347+ read_lock(&tdev->object_lock);
34348+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
34349+
34350+ if (likely(ret == 0)) {
34351+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
34352+ kref_get(&base->refcount);
34353+ }
34354+ read_unlock(&tdev->object_lock);
34355+
34356+ if (unlikely(ret != 0))
34357+ return NULL;
34358+
34359+ if (tfile != base->tfile && !base->shareable) {
34360+ printk(KERN_ERR "Attempted access of non-shareable object.\n");
34361+ ttm_base_object_unref(&base);
34362+ return NULL;
34363+ }
34364+
34365+ return base;
34366+}
34367+
34368+int ttm_ref_object_add(struct ttm_object_file *tfile,
34369+ struct ttm_base_object *base,
34370+ enum ttm_ref_type ref_type, bool *existed)
34371+{
34372+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
34373+ struct ttm_ref_object *ref;
34374+ struct drm_hash_item *hash;
34375+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
34376+ int ret = -EINVAL;
34377+
34378+ if (existed != NULL)
34379+ *existed = true;
34380+
34381+ while (ret == -EINVAL) {
34382+ read_lock(&tfile->lock);
34383+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
34384+
34385+ if (ret == 0) {
34386+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
34387+ kref_get(&ref->kref);
34388+ read_unlock(&tfile->lock);
34389+ break;
34390+ }
34391+
34392+ read_unlock(&tfile->lock);
34393+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false);
34394+ if (unlikely(ret != 0))
34395+ return ret;
34396+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
34397+ if (unlikely(ref == NULL)) {
34398+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
34399+ return -ENOMEM;
34400+ }
34401+
34402+ ref->hash.key = base->hash.key;
34403+ ref->obj = base;
34404+ ref->tfile = tfile;
34405+ ref->ref_type = ref_type;
34406+ kref_init(&ref->kref);
34407+
34408+ write_lock(&tfile->lock);
34409+ ret = drm_ht_insert_item(ht, &ref->hash);
34410+
34411+ if (likely(ret == 0)) {
34412+ list_add_tail(&ref->head, &tfile->ref_list);
34413+ kref_get(&base->refcount);
34414+ write_unlock(&tfile->lock);
34415+ if (existed != NULL)
34416+ *existed = false;
34417+ break;
34418+ }
34419+
34420+ write_unlock(&tfile->lock);
34421+ BUG_ON(ret != -EINVAL);
34422+
34423+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
34424+ kfree(ref);
34425+ }
34426+
34427+ return ret;
34428+}
34429+
34430+static void ttm_ref_object_release(struct kref *kref)
34431+{
34432+ struct ttm_ref_object *ref =
34433+ container_of(kref, struct ttm_ref_object, kref);
34434+ struct ttm_base_object *base = ref->obj;
34435+ struct ttm_object_file *tfile = ref->tfile;
34436+ struct drm_open_hash *ht;
34437+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
34438+
34439+ ht = &tfile->ref_hash[ref->ref_type];
34440+ (void)drm_ht_remove_item(ht, &ref->hash);
34441+ list_del(&ref->head);
34442+ write_unlock(&tfile->lock);
34443+
34444+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
34445+ base->ref_obj_release(base, ref->ref_type);
34446+
34447+ ttm_base_object_unref(&ref->obj);
34448+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
34449+ kfree(ref);
34450+ write_lock(&tfile->lock);
34451+}
34452+
34453+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
34454+ unsigned long key, enum ttm_ref_type ref_type)
34455+{
34456+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
34457+ struct ttm_ref_object *ref;
34458+ struct drm_hash_item *hash;
34459+ int ret;
34460+
34461+ write_lock(&tfile->lock);
34462+ ret = drm_ht_find_item(ht, key, &hash);
34463+ if (unlikely(ret != 0)) {
34464+ write_unlock(&tfile->lock);
34465+ return -EINVAL;
34466+ }
34467+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
34468+ kref_put(&ref->kref, ttm_ref_object_release);
34469+ write_unlock(&tfile->lock);
34470+ return 0;
34471+}
34472+
34473+void ttm_object_file_release(struct ttm_object_file **p_tfile)
34474+{
34475+ struct ttm_ref_object *ref;
34476+ struct list_head *list;
34477+ unsigned int i;
34478+ struct ttm_object_file *tfile = *p_tfile;
34479+
34480+ *p_tfile = NULL;
34481+ write_lock(&tfile->lock);
34482+
34483+ /*
34484+ * Since we release the lock within the loop, we have to
34485+ * restart it from the beginning each time.
34486+ */
34487+
34488+ while (!list_empty(&tfile->ref_list)) {
34489+ list = tfile->ref_list.next;
34490+ ref = list_entry(list, struct ttm_ref_object, head);
34491+ ttm_ref_object_release(&ref->kref);
34492+ }
34493+
34494+ for (i = 0; i < TTM_REF_NUM; ++i) {
34495+ drm_ht_remove(&tfile->ref_hash[i]);
34496+ }
34497+
34498+ write_unlock(&tfile->lock);
34499+ ttm_object_file_unref(&tfile);
34500+}
34501+
34502+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
34503+ unsigned int hash_order)
34504+{
34505+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
34506+ unsigned int i;
34507+ unsigned int j = 0;
34508+ int ret;
34509+
34510+ if (unlikely(tfile == NULL))
34511+ return NULL;
34512+
34513+ rwlock_init(&tfile->lock);
34514+ tfile->tdev = tdev;
34515+ kref_init(&tfile->refcount);
34516+ INIT_LIST_HEAD(&tfile->ref_list);
34517+
34518+ for (i = 0; i < TTM_REF_NUM; ++i) {
34519+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
34520+ if (ret) {
34521+ j = i;
34522+ goto out_err;
34523+ }
34524+ }
34525+
34526+ return tfile;
34527+ out_err:
34528+ for (i = 0; i < j; ++i) {
34529+ drm_ht_remove(&tfile->ref_hash[i]);
34530+ }
34531+ kfree(tfile);
34532+
34533+ return NULL;
34534+}
34535+
34536+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
34537+ *mem_glob,
34538+ unsigned int hash_order)
34539+{
34540+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
34541+ int ret;
34542+
34543+ if (unlikely(tdev == NULL))
34544+ return NULL;
34545+
34546+ tdev->mem_glob = mem_glob;
34547+ rwlock_init(&tdev->object_lock);
34548+ atomic_set(&tdev->object_count, 0);
34549+ ret = drm_ht_create(&tdev->object_hash, hash_order);
34550+
34551+ if (likely(ret == 0))
34552+ return tdev;
34553+
34554+ kfree(tdev);
34555+ return NULL;
34556+}
34557+
34558+void ttm_object_device_release(struct ttm_object_device **p_tdev)
34559+{
34560+ struct ttm_object_device *tdev = *p_tdev;
34561+
34562+ *p_tdev = NULL;
34563+
34564+ write_lock(&tdev->object_lock);
34565+ drm_ht_remove(&tdev->object_hash);
34566+ write_unlock(&tdev->object_lock);
34567+
34568+ kfree(tdev);
34569+}
34570diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h
34571--- a/drivers/gpu/drm/psb/ttm/ttm_object.h 1969-12-31 16:00:00.000000000 -0800
34572+++ b/drivers/gpu/drm/psb/ttm/ttm_object.h 2009-04-07 13:28:38.000000000 -0700
34573@@ -0,0 +1,269 @@
34574+/**************************************************************************
34575+ *
34576+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34577+ * All Rights Reserved.
34578+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34579+ * All Rights Reserved.
34580+ *
34581+ * Permission is hereby granted, free of charge, to any person obtaining a
34582+ * copy of this software and associated documentation files (the
34583+ * "Software"), to deal in the Software without restriction, including
34584+ * without limitation the rights to use, copy, modify, merge, publish,
34585+ * distribute, sub license, and/or sell copies of the Software, and to
34586+ * permit persons to whom the Software is furnished to do so, subject to
34587+ * the following conditions:
34588+ *
34589+ * The above copyright notice and this permission notice (including the
34590+ * next paragraph) shall be included in all copies or substantial portions
34591+ * of the Software.
34592+ *
34593+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34594+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34595+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34596+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34597+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34598+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34599+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34600+ *
34601+ **************************************************************************/
34602+/*
34603+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34604+ */
34605+/** @file ttm_ref_object.h
34606+ *
34607+ * Base- and reference object implementation for the various
34608+ * ttm objects. Implements reference counting, minimal security checks
34609+ * and release on file close.
34610+ */
34611+
34612+#ifndef _TTM_OBJECT_H_
34613+#define _TTM_OBJECT_H_
34614+
34615+#include <linux/list.h>
34616+#include <drm/drm_hashtab.h>
34617+#include <linux/kref.h>
34618+#include <ttm/ttm_memory.h>
34619+
34620+/**
34621+ * enum ttm_ref_type
34622+ *
34623+ * Describes what type of reference a ref object holds.
34624+ *
34625+ * TTM_REF_USAGE is a simple refcount on a base object.
34626+ *
34627+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
34628+ * buffer object.
34629+ *
34630+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
34631+ * buffer object.
34632+ *
34633+ */
34634+
34635+enum ttm_ref_type {
34636+ TTM_REF_USAGE,
34637+ TTM_REF_SYNCCPU_READ,
34638+ TTM_REF_SYNCCPU_WRITE,
34639+ TTM_REF_NUM
34640+};
34641+
34642+/**
34643+ * enum ttm_object_type
34644+ *
34645+ * One entry per ttm object type.
34646+ * Device-specific types should use the
34647+ * ttm_driver_typex types.
34648+ */
34649+
34650+enum ttm_object_type {
34651+ ttm_fence_type,
34652+ ttm_buffer_type,
34653+ ttm_lock_type,
34654+ ttm_driver_type0 = 256,
34655+ ttm_driver_type1
34656+};
34657+
34658+struct ttm_object_file;
34659+struct ttm_object_device;
34660+
34661+/**
34662+ * struct ttm_base_object
34663+ *
34664+ * @hash: hash entry for the per-device object hash.
34665+ * @type: derived type this object is base class for.
34666+ * @shareable: Other ttm_object_files can access this object.
34667+ *
34668+ * @tfile: Pointer to ttm_object_file of the creator.
34669+ * NULL if the object was not created by a user request.
34670+ * (kernel object).
34671+ *
34672+ * @refcount: Number of references to this object, not
34673+ * including the hash entry. A reference to a base object can
34674+ * only be held by a ref object.
34675+ *
34676+ * @refcount_release: A function to be called when there are
34677+ * no more references to this object. This function should
34678+ * destroy the object (or make sure destruction eventually happens),
34679+ * and when it is called, the object has
34680+ * already been taken out of the per-device hash. The parameter
34681+ * "base" should be set to NULL by the function.
34682+ *
34683+ * @ref_obj_release: A function to be called when a reference object
34684+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
34685+ * this function may, for example, release a lock held by a user-space
34686+ * process.
34687+ *
34688+ * This struct is intended to be used as a base struct for objects that
34689+ * are visible to user-space. It provides a global name, race-safe
34690+ * access and refcounting, minimal access contol and hooks for unref actions.
34691+ */
34692+
34693+struct ttm_base_object {
34694+ struct drm_hash_item hash;
34695+ enum ttm_object_type object_type;
34696+ bool shareable;
34697+ struct ttm_object_file *tfile;
34698+ struct kref refcount;
34699+ void (*refcount_release) (struct ttm_base_object ** base);
34700+ void (*ref_obj_release) (struct ttm_base_object * base,
34701+ enum ttm_ref_type ref_type);
34702+};
34703+
34704+/**
34705+ * ttm_base_object_init
34706+ *
34707+ * @tfile: Pointer to a struct ttm_object_file.
34708+ * @base: The struct ttm_base_object to initialize.
34709+ * @shareable: This object is shareable with other applcations.
34710+ * (different @tfile pointers.)
34711+ * @type: The object type.
34712+ * @refcount_release: See the struct ttm_base_object description.
34713+ * @ref_obj_release: See the struct ttm_base_object description.
34714+ *
34715+ * Initializes a struct ttm_base_object.
34716+ */
34717+
34718+extern int ttm_base_object_init(struct ttm_object_file *tfile,
34719+ struct ttm_base_object *base,
34720+ bool shareable,
34721+ enum ttm_object_type type,
34722+ void (*refcount_release) (struct ttm_base_object
34723+ **),
34724+ void (*ref_obj_release) (struct ttm_base_object
34725+ *,
34726+ enum ttm_ref_type
34727+ ref_type));
34728+
34729+/**
34730+ * ttm_base_object_lookup
34731+ *
34732+ * @tfile: Pointer to a struct ttm_object_file.
34733+ * @key: Hash key
34734+ *
34735+ * Looks up a struct ttm_base_object with the key @key.
34736+ * Also verifies that the object is visible to the application, by
34737+ * comparing the @tfile argument and checking the object shareable flag.
34738+ */
34739+
34740+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
34741+ *tfile, uint32_t key);
34742+
34743+/**
34744+ * ttm_base_object_unref
34745+ *
34746+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
34747+ *
34748+ * Decrements the base object refcount and clears the pointer pointed to by
34749+ * p_base.
34750+ */
34751+
34752+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
34753+
34754+/**
34755+ * ttm_ref_object_add.
34756+ *
34757+ * @tfile: A struct ttm_object_file representing the application owning the
34758+ * ref_object.
34759+ * @base: The base object to reference.
34760+ * @ref_type: The type of reference.
34761+ * @existed: Upon completion, indicates that an identical reference object
34762+ * already existed, and the refcount was upped on that object instead.
34763+ *
34764+ * Adding a ref object to a base object is basically like referencing the
34765+ * base object, but a user-space application holds the reference. When the
34766+ * file corresponding to @tfile is closed, all its reference objects are
34767+ * deleted. A reference object can have different types depending on what
34768+ * it's intended for. It can be refcounting to prevent object destruction,
34769+ * When user-space takes a lock, it can add a ref object to that lock to
34770+ * make sure the lock is released if the application dies. A ref object
34771+ * will hold a single reference on a base object.
34772+ */
34773+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
34774+ struct ttm_base_object *base,
34775+ enum ttm_ref_type ref_type, bool *existed);
34776+/**
34777+ * ttm_ref_object_base_unref
34778+ *
34779+ * @key: Key representing the base object.
34780+ * @ref_type: Ref type of the ref object to be dereferenced.
34781+ *
34782+ * Unreference a ref object with type @ref_type
34783+ * on the base object identified by @key. If there are no duplicate
34784+ * references, the ref object will be destroyed and the base object
34785+ * will be unreferenced.
34786+ */
34787+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
34788+ unsigned long key,
34789+ enum ttm_ref_type ref_type);
34790+
34791+/**
34792+ * ttm_object_file_init - initialize a struct ttm_object file
34793+ *
34794+ * @tdev: A struct ttm_object device this file is initialized on.
34795+ * @hash_order: Order of the hash table used to hold the reference objects.
34796+ *
34797+ * This is typically called by the file_ops::open function.
34798+ */
34799+
34800+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
34801+ *tdev,
34802+ unsigned int hash_order);
34803+
34804+/**
34805+ * ttm_object_file_release - release data held by a ttm_object_file
34806+ *
34807+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
34808+ * *p_tfile will be set to NULL by this function.
34809+ *
34810+ * Releases all data associated by a ttm_object_file.
34811+ * Typically called from file_ops::release. The caller must
34812+ * ensure that there are no concurrent users of tfile.
34813+ */
34814+
34815+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
34816+
34817+/**
34818+ * ttm_object device init - initialize a struct ttm_object_device
34819+ *
34820+ * @hash_order: Order of hash table used to hash the base objects.
34821+ *
34822+ * This function is typically called on device initialization to prepare
34823+ * data structures needed for ttm base and ref objects.
34824+ */
34825+
34826+extern struct ttm_object_device *ttm_object_device_init
34827+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
34828+
34829+/**
34830+ * ttm_object_device_release - release data held by a ttm_object_device
34831+ *
34832+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
34833+ * *p_tdev will be set to NULL by this function.
34834+ *
34835+ * Releases all data associated by a ttm_object_device.
34836+ * Typically called from driver::unload before the destruction of the
34837+ * device private data structure.
34838+ */
34839+
34840+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
34841+
34842+#endif
34843diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
34844--- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 1969-12-31 16:00:00.000000000 -0800
34845+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 2009-04-07 13:28:38.000000000 -0700
34846@@ -0,0 +1,178 @@
34847+/**************************************************************************
34848+ *
34849+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34850+ * All Rights Reserved.
34851+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34852+ * All Rights Reserved.
34853+ *
34854+ * Permission is hereby granted, free of charge, to any person obtaining a
34855+ * copy of this software and associated documentation files (the
34856+ * "Software"), to deal in the Software without restriction, including
34857+ * without limitation the rights to use, copy, modify, merge, publish,
34858+ * distribute, sub license, and/or sell copies of the Software, and to
34859+ * permit persons to whom the Software is furnished to do so, subject to
34860+ * the following conditions:
34861+ *
34862+ * The above copyright notice and this permission notice (including the
34863+ * next paragraph) shall be included in all copies or substantial portions
34864+ * of the Software.
34865+ *
34866+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34867+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34868+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34869+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34870+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34871+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34872+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34873+ *
34874+ **************************************************************************/
34875+/*
34876+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34877+ */
34878+
34879+#include "ttm/ttm_pat_compat.h"
34880+#include <linux/version.h>
34881+
34882+#include <linux/spinlock.h>
34883+#include <asm/pgtable.h>
34884+
34885+#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
34886+#include <asm/tlbflush.h>
34887+#include <asm/msr.h>
34888+#include <asm/system.h>
34889+#include <linux/notifier.h>
34890+#include <linux/cpu.h>
34891+
34892+#ifndef MSR_IA32_CR_PAT
34893+#define MSR_IA32_CR_PAT 0x0277
34894+#endif
34895+
34896+#ifndef _PAGE_PAT
34897+#define _PAGE_PAT 0x080
34898+#endif
34899+
34900+static int ttm_has_pat = 0;
34901+
34902+/*
34903+ * Used at resume-time when CPU-s are fired up.
34904+ */
34905+
34906+static void ttm_pat_ipi_handler(void *notused)
34907+{
34908+ u32 v1, v2;
34909+
34910+ rdmsr(MSR_IA32_CR_PAT, v1, v2);
34911+ v2 &= 0xFFFFFFF8;
34912+ v2 |= 0x00000001;
34913+ wbinvd();
34914+ wrmsr(MSR_IA32_CR_PAT, v1, v2);
34915+ wbinvd();
34916+ __flush_tlb_all();
34917+}
34918+
34919+static void ttm_pat_enable(void)
34920+{
34921+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
34922+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) {
34923+#else
34924+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) {
34925+#endif
34926+ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
34927+ }
34928+}
34929+
34930+void ttm_pat_resume(void)
34931+{
34932+ if (unlikely(!ttm_has_pat))
34933+ return;
34934+
34935+ ttm_pat_enable();
34936+}
34937+
34938+static int psb_cpu_callback(struct notifier_block *nfb,
34939+ unsigned long action, void *hcpu)
34940+{
34941+ if (action == CPU_ONLINE) {
34942+ ttm_pat_resume();
34943+ }
34944+
34945+ return 0;
34946+}
34947+
34948+static struct notifier_block psb_nb = {
34949+ .notifier_call = psb_cpu_callback,
34950+ .priority = 1
34951+};
34952+
34953+/*
34954+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
34955+ */
34956+
34957+void ttm_pat_init(void)
34958+{
34959+ if (likely(ttm_has_pat))
34960+ return;
34961+
34962+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
34963+ return;
34964+ }
34965+
34966+ ttm_pat_enable();
34967+
34968+ if (num_present_cpus() > 1)
34969+ register_cpu_notifier(&psb_nb);
34970+
34971+ ttm_has_pat = 1;
34972+}
34973+
34974+void ttm_pat_takedown(void)
34975+{
34976+ if (unlikely(!ttm_has_pat))
34977+ return;
34978+
34979+ if (num_present_cpus() > 1)
34980+ unregister_cpu_notifier(&psb_nb);
34981+
34982+ ttm_has_pat = 0;
34983+}
34984+
34985+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
34986+{
34987+ if (likely(ttm_has_pat)) {
34988+ pgprot_val(prot) |= _PAGE_PAT;
34989+ return prot;
34990+ } else {
34991+ return pgprot_noncached(prot);
34992+ }
34993+}
34994+
34995+#else
34996+
34997+void ttm_pat_init(void)
34998+{
34999+}
35000+
35001+void ttm_pat_takedown(void)
35002+{
35003+}
35004+
35005+void ttm_pat_resume(void)
35006+{
35007+}
35008+
35009+#ifdef CONFIG_X86
35010+#include <asm/pat.h>
35011+
35012+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
35013+{
35014+ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
35015+
35016+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
35017+}
35018+#else
35019+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
35020+{
35021+ BUG();
35022+}
35023+#endif
35024+#endif
35025diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
35026--- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 1969-12-31 16:00:00.000000000 -0800
35027+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 2009-04-07 13:28:38.000000000 -0700
35028@@ -0,0 +1,41 @@
35029+/**************************************************************************
35030+ *
35031+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35032+ * All Rights Reserved.
35033+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35034+ * All Rights Reserved.
35035+ *
35036+ * Permission is hereby granted, free of charge, to any person obtaining a
35037+ * copy of this software and associated documentation files (the
35038+ * "Software"), to deal in the Software without restriction, including
35039+ * without limitation the rights to use, copy, modify, merge, publish,
35040+ * distribute, sub license, and/or sell copies of the Software, and to
35041+ * permit persons to whom the Software is furnished to do so, subject to
35042+ * the following conditions:
35043+ *
35044+ * The above copyright notice and this permission notice (including the
35045+ * next paragraph) shall be included in all copies or substantial portions
35046+ * of the Software.
35047+ *
35048+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35049+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35050+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35051+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35052+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35053+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35054+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35055+ *
35056+ **************************************************************************/
35057+/*
35058+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35059+ */
35060+
35061+#ifndef _TTM_PAT_COMPAT_
35062+#define _TTM_PAT_COMPAT_
35063+#include <asm/page.h>
35064+
35065+extern void ttm_pat_init(void);
35066+extern void ttm_pat_takedown(void);
35067+extern void ttm_pat_resume(void);
35068+extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
35069+#endif
35070diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
35071--- a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800
35072+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700
35073@@ -0,0 +1,96 @@
35074+/**************************************************************************
35075+ *
35076+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35077+ * All Rights Reserved.
35078+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35079+ * All Rights Reserved.
35080+ *
35081+ * Permission is hereby granted, free of charge, to any person obtaining a
35082+ * copy of this software and associated documentation files (the
35083+ * "Software"), to deal in the Software without restriction, including
35084+ * without limitation the rights to use, copy, modify, merge, publish,
35085+ * distribute, sub license, and/or sell copies of the Software, and to
35086+ * permit persons to whom the Software is furnished to do so, subject to
35087+ * the following conditions:
35088+ *
35089+ * The above copyright notice and this permission notice (including the
35090+ * next paragraph) shall be included in all copies or substantial portions
35091+ * of the Software.
35092+ *
35093+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35094+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35095+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35096+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35097+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35098+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35099+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35100+ *
35101+ **************************************************************************/
35102+/*
35103+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35104+ */
35105+
35106+#ifndef _TTM_PL_COMMON_H_
35107+#define _TTM_PL_COMMON_H_
35108+/*
35109+ * Memory regions for data placement.
35110+ */
35111+
35112+#define TTM_PL_SYSTEM 0
35113+#define TTM_PL_TT 1
35114+#define TTM_PL_VRAM 2
35115+#define TTM_PL_PRIV0 3
35116+#define TTM_PL_PRIV1 4
35117+#define TTM_PL_PRIV2 5
35118+#define TTM_PL_PRIV3 6
35119+#define TTM_PL_PRIV4 7
35120+#define TTM_PL_PRIV5 8
35121+#define TTM_PL_CI 9
35122+#define TTM_PL_SWAPPED 15
35123+
35124+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
35125+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
35126+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
35127+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
35128+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
35129+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
35130+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
35131+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
35132+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
35133+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
35134+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
35135+#define TTM_PL_MASK_MEM 0x0000FFFF
35136+
35137+/*
35138+ * Other flags that affects data placement.
35139+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
35140+ * if available.
35141+ * TTM_PL_FLAG_SHARED means that another application may
35142+ * reference the buffer.
35143+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
35144+ * be evicted to make room for other buffers.
35145+ */
35146+
35147+#define TTM_PL_FLAG_CACHED (1 << 16)
35148+#define TTM_PL_FLAG_UNCACHED (1 << 17)
35149+#define TTM_PL_FLAG_WC (1 << 18)
35150+#define TTM_PL_FLAG_SHARED (1 << 20)
35151+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
35152+
35153+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
35154+ TTM_PL_FLAG_UNCACHED | \
35155+ TTM_PL_FLAG_WC)
35156+
35157+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
35158+
35159+/*
35160+ * Access flags to be used for CPU- and GPU- mappings.
35161+ * The idea is that the TTM synchronization mechanism will
35162+ * allow concurrent READ access and exclusive write access.
35163+ * Currently GPU- and CPU accesses are exclusive.
35164+ */
35165+
35166+#define TTM_ACCESS_READ (1 << 0)
35167+#define TTM_ACCESS_WRITE (1 << 1)
35168+
35169+#endif
35170diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
35171--- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 1969-12-31 16:00:00.000000000 -0800
35172+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 2009-04-07 13:28:38.000000000 -0700
35173@@ -0,0 +1,468 @@
35174+/**************************************************************************
35175+ *
35176+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35177+ * All Rights Reserved.
35178+ *
35179+ * Permission is hereby granted, free of charge, to any person obtaining a
35180+ * copy of this software and associated documentation files (the
35181+ * "Software"), to deal in the Software without restriction, including
35182+ * without limitation the rights to use, copy, modify, merge, publish,
35183+ * distribute, sub license, and/or sell copies of the Software, and to
35184+ * permit persons to whom the Software is furnished to do so, subject to
35185+ * the following conditions:
35186+ *
35187+ * The above copyright notice and this permission notice (including the
35188+ * next paragraph) shall be included in all copies or substantial portions
35189+ * of the Software.
35190+ *
35191+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35192+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35193+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35194+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35195+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35196+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35197+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35198+ *
35199+ **************************************************************************/
35200+/*
35201+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35202+ */
35203+
35204+#include "ttm/ttm_placement_user.h"
35205+#include "ttm/ttm_bo_driver.h"
35206+#include "ttm/ttm_object.h"
35207+#include "ttm/ttm_userobj_api.h"
35208+#include "ttm/ttm_lock.h"
35209+
35210+struct ttm_bo_user_object {
35211+ struct ttm_base_object base;
35212+ struct ttm_buffer_object bo;
35213+};
35214+
35215+static size_t pl_bo_size = 0;
35216+
35217+static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
35218+{
35219+ size_t page_array_size =
35220+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
35221+
35222+ if (unlikely(pl_bo_size == 0)) {
35223+ pl_bo_size = bdev->ttm_bo_extra_size +
35224+ ttm_round_pot(sizeof(struct ttm_bo_user_object));
35225+ }
35226+
35227+ return bdev->ttm_bo_size + 2 * page_array_size;
35228+}
35229+
35230+static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
35231+ *tfile, uint32_t handle)
35232+{
35233+ struct ttm_base_object *base;
35234+
35235+ base = ttm_base_object_lookup(tfile, handle);
35236+ if (unlikely(base == NULL)) {
35237+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
35238+ (unsigned long)handle);
35239+ return NULL;
35240+ }
35241+
35242+ if (unlikely(base->object_type != ttm_buffer_type)) {
35243+ ttm_base_object_unref(&base);
35244+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
35245+ (unsigned long)handle);
35246+ return NULL;
35247+ }
35248+
35249+ return container_of(base, struct ttm_bo_user_object, base);
35250+}
35251+
35252+struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
35253+ *tfile, uint32_t handle)
35254+{
35255+ struct ttm_bo_user_object *user_bo;
35256+ struct ttm_base_object *base;
35257+
35258+ user_bo = ttm_bo_user_lookup(tfile, handle);
35259+ if (unlikely(user_bo == NULL))
35260+ return NULL;
35261+
35262+ (void)ttm_bo_reference(&user_bo->bo);
35263+ base = &user_bo->base;
35264+ ttm_base_object_unref(&base);
35265+ return &user_bo->bo;
35266+}
35267+
35268+static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
35269+{
35270+ struct ttm_bo_user_object *user_bo =
35271+ container_of(bo, struct ttm_bo_user_object, bo);
35272+
35273+ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
35274+ kfree(user_bo);
35275+}
35276+
35277+static void ttm_bo_user_release(struct ttm_base_object **p_base)
35278+{
35279+ struct ttm_bo_user_object *user_bo;
35280+ struct ttm_base_object *base = *p_base;
35281+ struct ttm_buffer_object *bo;
35282+
35283+ *p_base = NULL;
35284+
35285+ if (unlikely(base == NULL))
35286+ return;
35287+
35288+ user_bo = container_of(base, struct ttm_bo_user_object, base);
35289+ bo = &user_bo->bo;
35290+ ttm_bo_unref(&bo);
35291+}
35292+
35293+static void ttm_bo_user_ref_release(struct ttm_base_object *base,
35294+ enum ttm_ref_type ref_type)
35295+{
35296+ struct ttm_bo_user_object *user_bo =
35297+ container_of(base, struct ttm_bo_user_object, base);
35298+ struct ttm_buffer_object *bo = &user_bo->bo;
35299+
35300+ switch (ref_type) {
35301+ case TTM_REF_SYNCCPU_WRITE:
35302+ ttm_bo_synccpu_write_release(bo);
35303+ break;
35304+ default:
35305+ BUG();
35306+ }
35307+}
35308+
35309+static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
35310+ struct ttm_pl_rep *rep)
35311+{
35312+ struct ttm_bo_user_object *user_bo =
35313+ container_of(bo, struct ttm_bo_user_object, bo);
35314+
35315+ rep->gpu_offset = bo->offset;
35316+ rep->bo_size = bo->num_pages << PAGE_SHIFT;
35317+ rep->map_handle = bo->addr_space_offset;
35318+ rep->placement = bo->mem.flags;
35319+ rep->handle = user_bo->base.hash.key;
35320+ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
35321+}
35322+
35323+int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
35324+ struct ttm_bo_device *bdev,
35325+ struct ttm_lock *lock, void *data)
35326+{
35327+ union ttm_pl_create_arg *arg = data;
35328+ struct ttm_pl_create_req *req = &arg->req;
35329+ struct ttm_pl_rep *rep = &arg->rep;
35330+ struct ttm_buffer_object *bo;
35331+ struct ttm_buffer_object *tmp;
35332+ struct ttm_bo_user_object *user_bo;
35333+ uint32_t flags;
35334+ int ret = 0;
35335+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
35336+ size_t acc_size =
35337+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
35338+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
35339+ if (unlikely(ret != 0))
35340+ return ret;
35341+
35342+ flags = req->placement;
35343+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
35344+ if (unlikely(user_bo == NULL)) {
35345+ ttm_mem_global_free(mem_glob, acc_size, false);
35346+ return -ENOMEM;
35347+ }
35348+
35349+ bo = &user_bo->bo;
35350+ ret = ttm_read_lock(lock, true);
35351+ if (unlikely(ret != 0)) {
35352+ ttm_mem_global_free(mem_glob, acc_size, false);
35353+ kfree(user_bo);
35354+ return ret;
35355+ }
35356+
35357+ ret = ttm_buffer_object_init(bdev, bo, req->size,
35358+ ttm_bo_type_device, flags,
35359+ req->page_alignment, 0, true,
35360+ NULL, acc_size, &ttm_bo_user_destroy);
35361+ ttm_read_unlock(lock);
35362+
35363+ /*
35364+ * Note that the ttm_buffer_object_init function
35365+ * would've called the destroy function on failure!!
35366+ */
35367+
35368+ if (unlikely(ret != 0))
35369+ goto out;
35370+
35371+ tmp = ttm_bo_reference(bo);
35372+ ret = ttm_base_object_init(tfile, &user_bo->base,
35373+ flags & TTM_PL_FLAG_SHARED,
35374+ ttm_buffer_type,
35375+ &ttm_bo_user_release,
35376+ &ttm_bo_user_ref_release);
35377+ if (unlikely(ret != 0))
35378+ goto out_err;
35379+
35380+ mutex_lock(&bo->mutex);
35381+ ttm_pl_fill_rep(bo, rep);
35382+ mutex_unlock(&bo->mutex);
35383+ ttm_bo_unref(&bo);
35384+ out:
35385+ return 0;
35386+ out_err:
35387+ ttm_bo_unref(&tmp);
35388+ ttm_bo_unref(&bo);
35389+ return ret;
35390+}
35391+
35392+int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
35393+ struct ttm_bo_device *bdev,
35394+ struct ttm_lock *lock, void *data)
35395+{
35396+ union ttm_pl_create_ub_arg *arg = data;
35397+ struct ttm_pl_create_ub_req *req = &arg->req;
35398+ struct ttm_pl_rep *rep = &arg->rep;
35399+ struct ttm_buffer_object *bo;
35400+ struct ttm_buffer_object *tmp;
35401+ struct ttm_bo_user_object *user_bo;
35402+ uint32_t flags;
35403+ int ret = 0;
35404+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
35405+ size_t acc_size =
35406+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
35407+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
35408+ if (unlikely(ret != 0))
35409+ return ret;
35410+
35411+ flags = req->placement;
35412+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
35413+ if (unlikely(user_bo == NULL)) {
35414+ ttm_mem_global_free(mem_glob, acc_size, false);
35415+ return -ENOMEM;
35416+ }
35417+ ret = ttm_read_lock(lock, true);
35418+ if (unlikely(ret != 0)) {
35419+ ttm_mem_global_free(mem_glob, acc_size, false);
35420+ kfree(user_bo);
35421+ return ret;
35422+ }
35423+ bo = &user_bo->bo;
35424+ ret = ttm_buffer_object_init(bdev, bo, req->size,
35425+ ttm_bo_type_user, flags,
35426+ req->page_alignment, req->user_address,
35427+ true, NULL, acc_size, &ttm_bo_user_destroy);
35428+
35429+ /*
35430+ * Note that the ttm_buffer_object_init function
35431+ * would've called the destroy function on failure!!
35432+ */
35433+ ttm_read_unlock(lock);
35434+ if (unlikely(ret != 0))
35435+ goto out;
35436+
35437+ tmp = ttm_bo_reference(bo);
35438+ ret = ttm_base_object_init(tfile, &user_bo->base,
35439+ flags & TTM_PL_FLAG_SHARED,
35440+ ttm_buffer_type,
35441+ &ttm_bo_user_release,
35442+ &ttm_bo_user_ref_release);
35443+ if (unlikely(ret != 0))
35444+ goto out_err;
35445+
35446+ mutex_lock(&bo->mutex);
35447+ ttm_pl_fill_rep(bo, rep);
35448+ mutex_unlock(&bo->mutex);
35449+ ttm_bo_unref(&bo);
35450+ out:
35451+ return 0;
35452+ out_err:
35453+ ttm_bo_unref(&tmp);
35454+ ttm_bo_unref(&bo);
35455+ return ret;
35456+}
35457+
35458+int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
35459+{
35460+ union ttm_pl_reference_arg *arg = data;
35461+ struct ttm_pl_rep *rep = &arg->rep;
35462+ struct ttm_bo_user_object *user_bo;
35463+ struct ttm_buffer_object *bo;
35464+ struct ttm_base_object *base;
35465+ int ret;
35466+
35467+ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
35468+ if (unlikely(user_bo == NULL)) {
35469+ printk(KERN_ERR "Could not reference buffer object.\n");
35470+ return -EINVAL;
35471+ }
35472+
35473+ bo = &user_bo->bo;
35474+ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
35475+ if (unlikely(ret != 0)) {
35476+ printk(KERN_ERR
35477+ "Could not add a reference to buffer object.\n");
35478+ goto out;
35479+ }
35480+
35481+ mutex_lock(&bo->mutex);
35482+ ttm_pl_fill_rep(bo, rep);
35483+ mutex_unlock(&bo->mutex);
35484+
35485+ out:
35486+ base = &user_bo->base;
35487+ ttm_base_object_unref(&base);
35488+ return ret;
35489+}
35490+
35491+int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
35492+{
35493+ struct ttm_pl_reference_req *arg = data;
35494+
35495+ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
35496+}
35497+
35498+int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
35499+{
35500+ struct ttm_pl_synccpu_arg *arg = data;
35501+ struct ttm_bo_user_object *user_bo;
35502+ struct ttm_buffer_object *bo;
35503+ struct ttm_base_object *base;
35504+ bool existed;
35505+ int ret;
35506+
35507+ switch (arg->op) {
35508+ case TTM_PL_SYNCCPU_OP_GRAB:
35509+ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
35510+ if (unlikely(user_bo == NULL)) {
35511+ printk(KERN_ERR
35512+ "Could not find buffer object for synccpu.\n");
35513+ return -EINVAL;
35514+ }
35515+ bo = &user_bo->bo;
35516+ base = &user_bo->base;
35517+ ret = ttm_bo_synccpu_write_grab(bo,
35518+ arg->access_mode &
35519+ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
35520+ if (unlikely(ret != 0)) {
35521+ ttm_base_object_unref(&base);
35522+ goto out;
35523+ }
35524+ ret = ttm_ref_object_add(tfile, &user_bo->base,
35525+ TTM_REF_SYNCCPU_WRITE, &existed);
35526+ if (existed || ret != 0)
35527+ ttm_bo_synccpu_write_release(bo);
35528+ ttm_base_object_unref(&base);
35529+ break;
35530+ case TTM_PL_SYNCCPU_OP_RELEASE:
35531+ ret = ttm_ref_object_base_unref(tfile, arg->handle,
35532+ TTM_REF_SYNCCPU_WRITE);
35533+ break;
35534+ default:
35535+ ret = -EINVAL;
35536+ break;
35537+ }
35538+ out:
35539+ return ret;
35540+}
35541+
35542+int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
35543+ struct ttm_lock *lock, void *data)
35544+{
35545+ union ttm_pl_setstatus_arg *arg = data;
35546+ struct ttm_pl_setstatus_req *req = &arg->req;
35547+ struct ttm_pl_rep *rep = &arg->rep;
35548+ struct ttm_buffer_object *bo;
35549+ struct ttm_bo_device *bdev;
35550+ int ret;
35551+
35552+ bo = ttm_buffer_object_lookup(tfile, req->handle);
35553+ if (unlikely(bo == NULL)) {
35554+ printk(KERN_ERR
35555+ "Could not find buffer object for setstatus.\n");
35556+ return -EINVAL;
35557+ }
35558+
35559+ bdev = bo->bdev;
35560+
35561+ ret = ttm_read_lock(lock, true);
35562+ if (unlikely(ret != 0))
35563+ goto out_err0;
35564+
35565+ ret = ttm_bo_reserve(bo, true, false, false, 0);
35566+ if (unlikely(ret != 0))
35567+ goto out_err1;
35568+
35569+ ret = ttm_bo_wait_cpu(bo, false);
35570+ if (unlikely(ret != 0))
35571+ goto out_err2;
35572+
35573+ mutex_lock(&bo->mutex);
35574+ ret = ttm_bo_check_placement(bo, req->set_placement,
35575+ req->clr_placement);
35576+ if (unlikely(ret != 0))
35577+ goto out_err2;
35578+
35579+ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
35580+ & ~req->clr_placement;
35581+ ret = ttm_buffer_object_validate(bo, true, false);
35582+ if (unlikely(ret != 0))
35583+ goto out_err2;
35584+
35585+ ttm_pl_fill_rep(bo, rep);
35586+ out_err2:
35587+ mutex_unlock(&bo->mutex);
35588+ ttm_bo_unreserve(bo);
35589+ out_err1:
35590+ ttm_read_unlock(lock);
35591+ out_err0:
35592+ ttm_bo_unref(&bo);
35593+ return ret;
35594+}
35595+
35596+int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
35597+{
35598+ struct ttm_pl_waitidle_arg *arg = data;
35599+ struct ttm_buffer_object *bo;
35600+ int ret;
35601+
35602+ bo = ttm_buffer_object_lookup(tfile, arg->handle);
35603+ if (unlikely(bo == NULL)) {
35604+ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
35605+ return -EINVAL;
35606+ }
35607+
35608+ ret =
35609+ ttm_bo_block_reservation(bo, true,
35610+ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
35611+ if (unlikely(ret != 0))
35612+ goto out;
35613+ mutex_lock(&bo->mutex);
35614+ ret = ttm_bo_wait(bo,
35615+ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
35616+ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
35617+ mutex_unlock(&bo->mutex);
35618+ ttm_bo_unblock_reservation(bo);
35619+ out:
35620+ ttm_bo_unref(&bo);
35621+ return ret;
35622+}
35623+
35624+int ttm_pl_verify_access(struct ttm_buffer_object *bo,
35625+ struct ttm_object_file *tfile)
35626+{
35627+ struct ttm_bo_user_object *ubo;
35628+
35629+ /*
35630+ * Check bo subclass.
35631+ */
35632+
35633+ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
35634+ return -EPERM;
35635+
35636+ ubo = container_of(bo, struct ttm_bo_user_object, bo);
35637+ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
35638+ return 0;
35639+
35640+ return -EPERM;
35641+}
35642diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
35643--- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800
35644+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700
35645@@ -0,0 +1,259 @@
35646+/**************************************************************************
35647+ *
35648+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35649+ * All Rights Reserved.
35650+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35651+ * All Rights Reserved.
35652+ *
35653+ * Permission is hereby granted, free of charge, to any person obtaining a
35654+ * copy of this software and associated documentation files (the
35655+ * "Software"), to deal in the Software without restriction, including
35656+ * without limitation the rights to use, copy, modify, merge, publish,
35657+ * distribute, sub license, and/or sell copies of the Software, and to
35658+ * permit persons to whom the Software is furnished to do so, subject to
35659+ * the following conditions:
35660+ *
35661+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35662+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35663+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35664+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35665+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35666+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35667+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35668+ *
35669+ * The above copyright notice and this permission notice (including the
35670+ * next paragraph) shall be included in all copies or substantial portions
35671+ * of the Software.
35672+ *
35673+ **************************************************************************/
35674+/*
35675+ * Authors
35676+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35677+ */
35678+
35679+#ifndef _TTM_PLACEMENT_USER_H_
35680+#define _TTM_PLACEMENT_USER_H_
35681+
35682+#if !defined(__KERNEL__) && !defined(_KERNEL)
35683+#include <stdint.h>
35684+#else
35685+#include <linux/kernel.h>
35686+#endif
35687+
35688+#include "ttm/ttm_placement_common.h"
35689+
35690+#define TTM_PLACEMENT_MAJOR 0
35691+#define TTM_PLACEMENT_MINOR 1
35692+#define TTM_PLACEMENT_PL 0
35693+#define TTM_PLACEMENT_DATE "080819"
35694+
35695+/**
35696+ * struct ttm_pl_create_req
35697+ *
35698+ * @size: The buffer object size.
35699+ * @placement: Flags that indicate initial acceptable
35700+ * placement.
35701+ * @page_alignment: Required alignment in pages.
35702+ *
35703+ * Input to the TTM_BO_CREATE ioctl.
35704+ */
35705+
35706+struct ttm_pl_create_req {
35707+ uint64_t size;
35708+ uint32_t placement;
35709+ uint32_t page_alignment;
35710+};
35711+
35712+/**
35713+ * struct ttm_pl_create_ub_req
35714+ *
35715+ * @size: The buffer object size.
35716+ * @user_address: User-space address of the memory area that
35717+ * should be used to back the buffer object cast to 64-bit.
35718+ * @placement: Flags that indicate initial acceptable
35719+ * placement.
35720+ * @page_alignment: Required alignment in pages.
35721+ *
35722+ * Input to the TTM_BO_CREATE_UB ioctl.
35723+ */
35724+
35725+struct ttm_pl_create_ub_req {
35726+ uint64_t size;
35727+ uint64_t user_address;
35728+ uint32_t placement;
35729+ uint32_t page_alignment;
35730+};
35731+
35732+/**
35733+ * struct ttm_pl_rep
35734+ *
35735+ * @gpu_offset: The current offset into the memory region used.
35736+ * This can be used directly by the GPU if there are no
35737+ * additional GPU mapping procedures used by the driver.
35738+ *
35739+ * @bo_size: Actual buffer object size.
35740+ *
35741+ * @map_handle: Offset into the device address space.
35742+ * Used for map, seek, read, write. This will never change
35743+ * during the lifetime of an object.
35744+ *
35745+ * @placement: Flag indicating the placement status of
35746+ * the buffer object using the TTM_PL flags above.
35747+ *
35748+ * @sync_object_arg: Used for user-space synchronization and
35749+ * depends on the synchronization model used. If fences are
35750+ * used, this is the buffer_object::fence_type_mask
35751+ *
35752+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
35753+ * TTM_PL_SETSTATUS ioctls.
35754+ */
35755+
35756+struct ttm_pl_rep {
35757+ uint64_t gpu_offset;
35758+ uint64_t bo_size;
35759+ uint64_t map_handle;
35760+ uint32_t placement;
35761+ uint32_t handle;
35762+ uint32_t sync_object_arg;
35763+ uint32_t pad64;
35764+};
35765+
35766+/**
35767+ * struct ttm_pl_setstatus_req
35768+ *
35769+ * @set_placement: Placement flags to set.
35770+ *
35771+ * @clr_placement: Placement flags to clear.
35772+ *
35773+ * @handle: The object handle
35774+ *
35775+ * Input to the TTM_PL_SETSTATUS ioctl.
35776+ */
35777+
35778+struct ttm_pl_setstatus_req {
35779+ uint32_t set_placement;
35780+ uint32_t clr_placement;
35781+ uint32_t handle;
35782+ uint32_t pad64;
35783+};
35784+
35785+/**
35786+ * struct ttm_pl_reference_req
35787+ *
35788+ * @handle: The object to put a reference on.
35789+ *
35790+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
35791+ */
35792+
35793+struct ttm_pl_reference_req {
35794+ uint32_t handle;
35795+ uint32_t pad64;
35796+};
35797+
35798+/*
35799+ * ACCESS mode flags for SYNCCPU.
35800+ *
35801+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
35802+ * writing to the buffer.
35803+ *
35804+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
35805+ * accessing the buffer.
35806+ *
35807+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
35808+ * for GPU accesses to finish but return -EBUSY.
35809+ *
35810+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
35811+ * memory while synchronized for CPU.
35812+ */
35813+
35814+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
35815+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
35816+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
35817+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
35818+
35819+/**
35820+ * struct ttm_pl_synccpu_arg
35821+ *
35822+ * @handle: The object to synchronize.
35823+ *
35824+ * @access_mode: access mode indicated by the
35825+ * TTM_SYNCCPU_MODE flags.
35826+ *
35827+ * @op: indicates whether to grab or release the
35828+ * buffer for cpu usage.
35829+ *
35830+ * Input to the TTM_PL_SYNCCPU ioctl.
35831+ */
35832+
35833+struct ttm_pl_synccpu_arg {
35834+ uint32_t handle;
35835+ uint32_t access_mode;
35836+ enum {
35837+ TTM_PL_SYNCCPU_OP_GRAB,
35838+ TTM_PL_SYNCCPU_OP_RELEASE
35839+ } op;
35840+ uint32_t pad64;
35841+};
35842+
35843+/*
35844+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
35845+ *
35846+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
35847+ * wait.
35848+ *
35849+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
35850+ * but return -EBUSY if the buffer is busy.
35851+ */
35852+
35853+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
35854+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
35855+
35856+/**
35857+ * struct ttm_waitidle_arg
35858+ *
35859+ * @handle: The object to synchronize.
35860+ *
35861+ * @mode: wait mode indicated by the
35862+ * TTM_SYNCCPU_MODE flags.
35863+ *
35864+ * Argument to the TTM_BO_WAITIDLE ioctl.
35865+ */
35866+
35867+struct ttm_pl_waitidle_arg {
35868+ uint32_t handle;
35869+ uint32_t mode;
35870+};
35871+
35872+union ttm_pl_create_arg {
35873+ struct ttm_pl_create_req req;
35874+ struct ttm_pl_rep rep;
35875+};
35876+
35877+union ttm_pl_reference_arg {
35878+ struct ttm_pl_reference_req req;
35879+ struct ttm_pl_rep rep;
35880+};
35881+
35882+union ttm_pl_setstatus_arg {
35883+ struct ttm_pl_setstatus_req req;
35884+ struct ttm_pl_rep rep;
35885+};
35886+
35887+union ttm_pl_create_ub_arg {
35888+ struct ttm_pl_create_ub_req req;
35889+ struct ttm_pl_rep rep;
35890+};
35891+
35892+/*
35893+ * Ioctl offsets.
35894+ */
35895+
35896+#define TTM_PL_CREATE 0x00
35897+#define TTM_PL_REFERENCE 0x01
35898+#define TTM_PL_UNREF 0x02
35899+#define TTM_PL_SYNCCPU 0x03
35900+#define TTM_PL_WAITIDLE 0x04
35901+#define TTM_PL_SETSTATUS 0x05
35902+#define TTM_PL_CREATE_UB 0x06
35903+
35904+#endif
35905diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h
35906--- a/drivers/gpu/drm/psb/ttm/ttm_regman.h 1969-12-31 16:00:00.000000000 -0800
35907+++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h 2009-04-07 13:28:38.000000000 -0700
35908@@ -0,0 +1,74 @@
35909+/**************************************************************************
35910+ *
35911+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35912+ * All Rights Reserved.
35913+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35914+ * All Rights Reserved.
35915+ *
35916+ * Permission is hereby granted, free of charge, to any person obtaining a
35917+ * copy of this software and associated documentation files (the
35918+ * "Software"), to deal in the Software without restriction, including
35919+ * without limitation the rights to use, copy, modify, merge, publish,
35920+ * distribute, sub license, and/or sell copies of the Software, and to
35921+ * permit persons to whom the Software is furnished to do so, subject to
35922+ * the following conditions:
35923+ *
35924+ * The above copyright notice and this permission notice (including the
35925+ * next paragraph) shall be included in all copies or substantial portions
35926+ * of the Software.
35927+ *
35928+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35929+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35930+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35931+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35932+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35933+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35934+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35935+ *
35936+ **************************************************************************/
35937+/*
35938+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35939+ */
35940+
35941+#ifndef _TTM_REGMAN_H_
35942+#define _TTM_REGMAN_H_
35943+
35944+#include <linux/list.h>
35945+
35946+struct ttm_fence_object;
35947+
35948+struct ttm_reg {
35949+ struct list_head head;
35950+ struct ttm_fence_object *fence;
35951+ uint32_t fence_type;
35952+ uint32_t new_fence_type;
35953+};
35954+
35955+struct ttm_reg_manager {
35956+ struct list_head free;
35957+ struct list_head lru;
35958+ struct list_head unfenced;
35959+
35960+ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
35961+ void (*reg_destroy)(struct ttm_reg *reg);
35962+};
35963+
35964+extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
35965+ const void *data,
35966+ uint32_t fence_class,
35967+ uint32_t fence_type,
35968+ int interruptible,
35969+ int no_wait,
35970+ struct ttm_reg **reg);
35971+
35972+extern void ttm_regs_fence(struct ttm_reg_manager *regs,
35973+ struct ttm_fence_object *fence);
35974+
35975+extern void ttm_regs_free(struct ttm_reg_manager *manager);
35976+extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
35977+extern void ttm_regs_init(struct ttm_reg_manager *manager,
35978+ int (*reg_reusable)(const struct ttm_reg *,
35979+ const void *),
35980+ void (*reg_destroy)(struct ttm_reg *));
35981+
35982+#endif
35983diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c
35984--- a/drivers/gpu/drm/psb/ttm/ttm_tt.c 1969-12-31 16:00:00.000000000 -0800
35985+++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c 2009-04-07 13:28:38.000000000 -0700
35986@@ -0,0 +1,655 @@
35987+/**************************************************************************
35988+ *
35989+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35990+ * All Rights Reserved.
35991+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35992+ * All Rights Reserved.
35993+ *
35994+ * Permission is hereby granted, free of charge, to any person obtaining a
35995+ * copy of this software and associated documentation files (the
35996+ * "Software"), to deal in the Software without restriction, including
35997+ * without limitation the rights to use, copy, modify, merge, publish,
35998+ * distribute, sub license, and/or sell copies of the Software, and to
35999+ * permit persons to whom the Software is furnished to do so, subject to
36000+ * the following conditions:
36001+ *
36002+ * The above copyright notice and this permission notice (including the
36003+ * next paragraph) shall be included in all copies or substantial portions
36004+ * of the Software.
36005+ *
36006+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36007+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36008+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
36009+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36010+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36011+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
36012+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
36013+ *
36014+ **************************************************************************/
36015+/*
36016+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
36017+ */
36018+
36019+#include <linux/version.h>
36020+#include <linux/vmalloc.h>
36021+#include <linux/sched.h>
36022+#include <linux/highmem.h>
36023+#include <linux/pagemap.h>
36024+#include <linux/file.h>
36025+#include <linux/swap.h>
36026+#include "ttm/ttm_bo_driver.h"
36027+#include "ttm/ttm_placement_common.h"
36028+
36029+static int ttm_tt_swapin(struct ttm_tt *ttm);
36030+
36031+#if defined( CONFIG_X86 )
36032+static void ttm_tt_clflush_page(struct page *page)
36033+{
36034+ uint8_t *page_virtual;
36035+ unsigned int i;
36036+
36037+ if (unlikely(page == NULL))
36038+ return;
36039+
36040+ page_virtual = kmap_atomic(page, KM_USER0);
36041+
36042+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
36043+ clflush(page_virtual + i);
36044+
36045+ kunmap_atomic(page_virtual, KM_USER0);
36046+}
36047+
36048+static void ttm_tt_cache_flush_clflush(struct page *pages[],
36049+ unsigned long num_pages)
36050+{
36051+ unsigned long i;
36052+
36053+ mb();
36054+ for (i = 0; i < num_pages; ++i)
36055+ ttm_tt_clflush_page(*pages++);
36056+ mb();
36057+}
36058+#else
36059+static void ttm_tt_ipi_handler(void *null)
36060+{
36061+ ;
36062+}
36063+#endif
36064+
36065+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
36066+{
36067+
36068+#if defined( CONFIG_X86 )
36069+ if (cpu_has_clflush) {
36070+ ttm_tt_cache_flush_clflush(pages, num_pages);
36071+ return;
36072+ }
36073+#else
36074+ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
36075+ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
36076+#endif
36077+}
36078+
36079+/**
36080+ * Allocates storage for pointers to the pages that back the ttm.
36081+ *
36082+ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
36083+ */
36084+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
36085+{
36086+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
36087+ ttm->pages = NULL;
36088+
36089+ if (size <= PAGE_SIZE)
36090+ ttm->pages = kzalloc(size, GFP_KERNEL);
36091+
36092+ if (!ttm->pages) {
36093+ ttm->pages = vmalloc_user(size);
36094+ if (ttm->pages)
36095+ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
36096+ }
36097+}
36098+
36099+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
36100+{
36101+ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
36102+ vfree(ttm->pages);
36103+ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
36104+ } else {
36105+ kfree(ttm->pages);
36106+ }
36107+ ttm->pages = NULL;
36108+}
36109+
36110+static struct page *ttm_tt_alloc_page(void)
36111+{
36112+ return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
36113+}
36114+
36115+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
36116+{
36117+ int write;
36118+ int dirty;
36119+ struct page *page;
36120+ int i;
36121+ struct ttm_backend *be = ttm->be;
36122+
36123+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
36124+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
36125+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
36126+
36127+ if (be)
36128+ be->func->clear(be);
36129+
36130+ for (i = 0; i < ttm->num_pages; ++i) {
36131+ page = ttm->pages[i];
36132+ if (page == NULL)
36133+ continue;
36134+
36135+ if (page == ttm->dummy_read_page) {
36136+ BUG_ON(write);
36137+ continue;
36138+ }
36139+
36140+ if (write && dirty && !PageReserved(page))
36141+ set_page_dirty_lock(page);
36142+
36143+ ttm->pages[i] = NULL;
36144+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
36145+ put_page(page);
36146+ }
36147+ ttm->state = tt_unpopulated;
36148+ ttm->first_himem_page = ttm->num_pages;
36149+ ttm->last_lomem_page = -1;
36150+}
36151+
36152+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
36153+{
36154+ struct page *p;
36155+ struct ttm_bo_device *bdev = ttm->bdev;
36156+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
36157+ int ret;
36158+
36159+ while (NULL == (p = ttm->pages[index])) {
36160+ p = ttm_tt_alloc_page();
36161+
36162+ if (!p)
36163+ return NULL;
36164+
36165+ if (PageHighMem(p)) {
36166+ ret =
36167+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true);
36168+ if (unlikely(ret != 0))
36169+ goto out_err;
36170+ ttm->pages[--ttm->first_himem_page] = p;
36171+ } else {
36172+ ret =
36173+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false);
36174+ if (unlikely(ret != 0))
36175+ goto out_err;
36176+ ttm->pages[++ttm->last_lomem_page] = p;
36177+ }
36178+ }
36179+ return p;
36180+ out_err:
36181+ put_page(p);
36182+ return NULL;
36183+}
36184+
36185+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
36186+{
36187+ int ret;
36188+
36189+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
36190+ ret = ttm_tt_swapin(ttm);
36191+ if (unlikely(ret != 0))
36192+ return NULL;
36193+ }
36194+ return __ttm_tt_get_page(ttm, index);
36195+}
36196+
36197+int ttm_tt_populate(struct ttm_tt *ttm)
36198+{
36199+ struct page *page;
36200+ unsigned long i;
36201+ struct ttm_backend *be;
36202+ int ret;
36203+
36204+ if (ttm->state != tt_unpopulated)
36205+ return 0;
36206+
36207+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
36208+ ret = ttm_tt_swapin(ttm);
36209+ if (unlikely(ret != 0))
36210+ return ret;
36211+ }
36212+
36213+ be = ttm->be;
36214+
36215+ for (i = 0; i < ttm->num_pages; ++i) {
36216+ page = __ttm_tt_get_page(ttm, i);
36217+ if (!page)
36218+ return -ENOMEM;
36219+ }
36220+
36221+ be->func->populate(be, ttm->num_pages, ttm->pages,
36222+ ttm->dummy_read_page);
36223+ ttm->state = tt_unbound;
36224+ return 0;
36225+}
36226+
36227+#ifdef CONFIG_X86
36228+static inline int ttm_tt_set_page_caching(struct page *p,
36229+ enum ttm_caching_state c_state)
36230+{
36231+ if (PageHighMem(p))
36232+ return 0;
36233+
36234+ switch (c_state) {
36235+ case tt_cached:
36236+ return set_pages_wb(p, 1);
36237+ case tt_wc:
36238+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
36239+ return set_memory_wc((unsigned long) page_address(p), 1);
36240+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */
36241+ default:
36242+ return set_pages_uc(p, 1);
36243+ }
36244+}
36245+#else /* CONFIG_X86 */
36246+static inline int ttm_tt_set_page_caching(struct page *p,
36247+ enum ttm_caching_state c_state)
36248+{
36249+ return 0;
36250+}
36251+#endif /* CONFIG_X86 */
36252+
36253+/*
36254+ * Change caching policy for the linear kernel map
36255+ * for range of pages in a ttm.
36256+ */
36257+
36258+static int ttm_tt_set_caching(struct ttm_tt *ttm,
36259+ enum ttm_caching_state c_state)
36260+{
36261+ int i, j;
36262+ struct page *cur_page;
36263+ int ret;
36264+
36265+ if (ttm->caching_state == c_state)
36266+ return 0;
36267+
36268+ if (c_state != tt_cached) {
36269+ ret = ttm_tt_populate(ttm);
36270+ if (unlikely(ret != 0))
36271+ return ret;
36272+ }
36273+
36274+ if (ttm->caching_state == tt_cached)
36275+ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
36276+
36277+ for (i = 0; i < ttm->num_pages; ++i) {
36278+ cur_page = ttm->pages[i];
36279+ if (likely(cur_page != NULL)) {
36280+ ret = ttm_tt_set_page_caching(cur_page, c_state);
36281+ if (unlikely(ret != 0))
36282+ goto out_err;
36283+ }
36284+ }
36285+
36286+ ttm->caching_state = c_state;
36287+
36288+ return 0;
36289+
36290+ out_err:
36291+ for (j = 0; j < i; ++j) {
36292+ cur_page = ttm->pages[j];
36293+ if (likely(cur_page != NULL)) {
36294+ (void)ttm_tt_set_page_caching(cur_page,
36295+ ttm->caching_state);
36296+ }
36297+ }
36298+
36299+ return ret;
36300+}
36301+
36302+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
36303+{
36304+ enum ttm_caching_state state;
36305+
36306+ if (placement & TTM_PL_FLAG_WC)
36307+ state = tt_wc;
36308+ else if (placement & TTM_PL_FLAG_UNCACHED)
36309+ state = tt_uncached;
36310+ else
36311+ state = tt_cached;
36312+
36313+ return ttm_tt_set_caching(ttm, state);
36314+}
36315+
36316+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
36317+{
36318+ int i;
36319+ struct page *cur_page;
36320+ struct ttm_backend *be = ttm->be;
36321+
36322+ if (be)
36323+ be->func->clear(be);
36324+ (void)ttm_tt_set_caching(ttm, tt_cached);
36325+ for (i = 0; i < ttm->num_pages; ++i) {
36326+ cur_page = ttm->pages[i];
36327+ ttm->pages[i] = NULL;
36328+ if (cur_page) {
36329+ if (page_count(cur_page) != 1)
36330+ printk(KERN_ERR
36331+ "Erroneous page count. Leaking pages.\n");
36332+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
36333+ PageHighMem(cur_page));
36334+ __free_page(cur_page);
36335+ }
36336+ }
36337+ ttm->state = tt_unpopulated;
36338+ ttm->first_himem_page = ttm->num_pages;
36339+ ttm->last_lomem_page = -1;
36340+}
36341+
36342+void ttm_tt_destroy(struct ttm_tt *ttm)
36343+{
36344+ struct ttm_backend *be;
36345+
36346+ if (unlikely(ttm == NULL))
36347+ return;
36348+
36349+ be = ttm->be;
36350+ if (likely(be != NULL)) {
36351+ be->func->destroy(be);
36352+ ttm->be = NULL;
36353+ }
36354+
36355+ if (likely(ttm->pages != NULL)) {
36356+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
36357+ ttm_tt_free_user_pages(ttm);
36358+ else
36359+ ttm_tt_free_alloced_pages(ttm);
36360+
36361+ ttm_tt_free_page_directory(ttm);
36362+ }
36363+
36364+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
36365+ ttm->swap_storage)
36366+ fput(ttm->swap_storage);
36367+
36368+ kfree(ttm);
36369+}
36370+
36371+int ttm_tt_set_user(struct ttm_tt *ttm,
36372+ struct task_struct *tsk,
36373+ unsigned long start, unsigned long num_pages)
36374+{
36375+ struct mm_struct *mm = tsk->mm;
36376+ int ret;
36377+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
36378+ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
36379+
36380+ BUG_ON(num_pages != ttm->num_pages);
36381+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
36382+
36383+ /**
36384+ * Account user pages as lowmem pages for now.
36385+ */
36386+
36387+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false);
36388+ if (unlikely(ret != 0))
36389+ return ret;
36390+
36391+ down_read(&mm->mmap_sem);
36392+ ret = get_user_pages(tsk, mm, start, num_pages,
36393+ write, 0, ttm->pages, NULL);
36394+ up_read(&mm->mmap_sem);
36395+
36396+ if (ret != num_pages && write) {
36397+ ttm_tt_free_user_pages(ttm);
36398+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
36399+ return -ENOMEM;
36400+ }
36401+
36402+ ttm->tsk = tsk;
36403+ ttm->start = start;
36404+ ttm->state = tt_unbound;
36405+
36406+ return 0;
36407+}
36408+
36409+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
36410+ uint32_t page_flags, struct page *dummy_read_page)
36411+{
36412+ struct ttm_bo_driver *bo_driver = bdev->driver;
36413+ struct ttm_tt *ttm;
36414+
36415+ if (!bo_driver)
36416+ return NULL;
36417+
36418+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
36419+ if (!ttm)
36420+ return NULL;
36421+
36422+ ttm->bdev = bdev;
36423+
36424+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
36425+ ttm->first_himem_page = ttm->num_pages;
36426+ ttm->last_lomem_page = -1;
36427+ ttm->caching_state = tt_cached;
36428+ ttm->page_flags = page_flags;
36429+
36430+ ttm->dummy_read_page = dummy_read_page;
36431+
36432+ ttm_tt_alloc_page_directory(ttm);
36433+ if (!ttm->pages) {
36434+ ttm_tt_destroy(ttm);
36435+ printk(KERN_ERR "Failed allocating page table\n");
36436+ return NULL;
36437+ }
36438+ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
36439+ if (!ttm->be) {
36440+ ttm_tt_destroy(ttm);
36441+ printk(KERN_ERR "Failed creating ttm backend entry\n");
36442+ return NULL;
36443+ }
36444+ ttm->state = tt_unpopulated;
36445+ return ttm;
36446+}
36447+
36448+/**
36449+ * ttm_tt_unbind:
36450+ *
36451+ * @ttm: the object to unbind from the graphics device
36452+ *
36453+ * Unbind an object from the aperture. This removes the mappings
36454+ * from the graphics device and flushes caches if necessary.
36455+ */
36456+void ttm_tt_unbind(struct ttm_tt *ttm)
36457+{
36458+ int ret;
36459+ struct ttm_backend *be = ttm->be;
36460+
36461+ if (ttm->state == tt_bound) {
36462+ ret = be->func->unbind(be);
36463+ BUG_ON(ret);
36464+ }
36465+ ttm->state = tt_unbound;
36466+}
36467+
36468+/**
36469+ * ttm_tt_bind:
36470+ *
36471+ * @ttm: the ttm object to bind to the graphics device
36472+ *
36473+ * @bo_mem: the aperture memory region which will hold the object
36474+ *
36475+ * Bind a ttm object to the aperture. This ensures that the necessary
36476+ * pages are allocated, flushes CPU caches as needed and marks the
36477+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
36478+ * modified by the GPU
36479+ */
36480+
36481+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
36482+{
36483+ int ret = 0;
36484+ struct ttm_backend *be;
36485+
36486+ if (!ttm)
36487+ return -EINVAL;
36488+
36489+ if (ttm->state == tt_bound)
36490+ return 0;
36491+
36492+ be = ttm->be;
36493+
36494+ ret = ttm_tt_populate(ttm);
36495+ if (ret)
36496+ return ret;
36497+
36498+ ret = be->func->bind(be, bo_mem);
36499+ if (ret) {
36500+ printk(KERN_ERR "Couldn't bind backend.\n");
36501+ return ret;
36502+ }
36503+
36504+ ttm->state = tt_bound;
36505+
36506+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
36507+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
36508+ return 0;
36509+}
36510+
36511+static int ttm_tt_swapin(struct ttm_tt *ttm)
36512+{
36513+ struct address_space *swap_space;
36514+ struct file *swap_storage;
36515+ struct page *from_page;
36516+ struct page *to_page;
36517+ void *from_virtual;
36518+ void *to_virtual;
36519+ int i;
36520+ int ret;
36521+
36522+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
36523+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
36524+ ttm->num_pages);
36525+ if (unlikely(ret != 0))
36526+ return ret;
36527+
36528+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
36529+ return 0;
36530+ }
36531+
36532+ swap_storage = ttm->swap_storage;
36533+ BUG_ON(swap_storage == NULL);
36534+
36535+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
36536+
36537+ for (i = 0; i < ttm->num_pages; ++i) {
36538+ from_page = read_mapping_page(swap_space, i, NULL);
36539+ if (IS_ERR(from_page))
36540+ goto out_err;
36541+ to_page = __ttm_tt_get_page(ttm, i);
36542+ if (unlikely(to_page == NULL))
36543+ goto out_err;
36544+
36545+ preempt_disable();
36546+ from_virtual = kmap_atomic(from_page, KM_USER0);
36547+ to_virtual = kmap_atomic(to_page, KM_USER1);
36548+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
36549+ kunmap_atomic(to_virtual, KM_USER1);
36550+ kunmap_atomic(from_virtual, KM_USER0);
36551+ preempt_enable();
36552+ page_cache_release(from_page);
36553+ }
36554+
36555+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
36556+ fput(swap_storage);
36557+ ttm->swap_storage = NULL;
36558+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
36559+
36560+ return 0;
36561+ out_err:
36562+ ttm_tt_free_alloced_pages(ttm);
36563+ return -ENOMEM;
36564+}
36565+
36566+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
36567+{
36568+ struct address_space *swap_space;
36569+ struct file *swap_storage;
36570+ struct page *from_page;
36571+ struct page *to_page;
36572+ void *from_virtual;
36573+ void *to_virtual;
36574+ int i;
36575+
36576+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
36577+ BUG_ON(ttm->caching_state != tt_cached);
36578+
36579+ /*
36580+ * For user buffers, just unpin the pages, as there should be
36581+ * vma references.
36582+ */
36583+
36584+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
36585+ ttm_tt_free_user_pages(ttm);
36586+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
36587+ ttm->swap_storage = NULL;
36588+ return 0;
36589+ }
36590+
36591+ if (!persistant_swap_storage) {
36592+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
36593+ swap_storage = shmem_file_setup("ttm swap",
36594+ ttm->num_pages << PAGE_SHIFT,
36595+ 0);
36596+ if (unlikely(IS_ERR(swap_storage))) {
36597+ printk(KERN_ERR "Failed allocating swap storage.\n");
36598+ return -ENOMEM;
36599+ }
36600+#else
36601+ return -ENOMEM;
36602+#endif
36603+ } else
36604+ swap_storage = persistant_swap_storage;
36605+
36606+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
36607+
36608+ for (i = 0; i < ttm->num_pages; ++i) {
36609+ from_page = ttm->pages[i];
36610+ if (unlikely(from_page == NULL))
36611+ continue;
36612+ to_page = read_mapping_page(swap_space, i, NULL);
36613+ if (unlikely(to_page == NULL))
36614+ goto out_err;
36615+
36616+ preempt_disable();
36617+ from_virtual = kmap_atomic(from_page, KM_USER0);
36618+ to_virtual = kmap_atomic(to_page, KM_USER1);
36619+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
36620+ kunmap_atomic(to_virtual, KM_USER1);
36621+ kunmap_atomic(from_virtual, KM_USER0);
36622+ preempt_enable();
36623+ set_page_dirty(to_page);
36624+ mark_page_accessed(to_page);
36625+// unlock_page(to_page);
36626+ page_cache_release(to_page);
36627+ }
36628+
36629+ ttm_tt_free_alloced_pages(ttm);
36630+ ttm->swap_storage = swap_storage;
36631+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
36632+ if (persistant_swap_storage)
36633+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
36634+
36635+ return 0;
36636+ out_err:
36637+ if (!persistant_swap_storage)
36638+ fput(swap_storage);
36639+
36640+ return -ENOMEM;
36641+}
36642diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
36643--- a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 1969-12-31 16:00:00.000000000 -0800
36644+++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 2009-04-07 13:28:38.000000000 -0700
36645@@ -0,0 +1,79 @@
36646+/**************************************************************************
36647+ *
36648+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
36649+ * All Rights Reserved.
36650+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
36651+ * All Rights Reserved.
36652+ *
36653+ * Permission is hereby granted, free of charge, to any person obtaining a
36654+ * copy of this software and associated documentation files (the
36655+ * "Software"), to deal in the Software without restriction, including
36656+ * without limitation the rights to use, copy, modify, merge, publish,
36657+ * distribute, sub license, and/or sell copies of the Software, and to
36658+ * permit persons to whom the Software is furnished to do so, subject to
36659+ * the following conditions:
36660+ *
36661+ * The above copyright notice and this permission notice (including the
36662+ * next paragraph) shall be included in all copies or substantial portions
36663+ * of the Software.
36664+ *
36665+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36666+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36667+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
36668+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36669+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36670+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
36671+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
36672+ *
36673+ **************************************************************************/
36674+/*
36675+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36676+ */
36677+
36678+#ifndef _TTM_USEROBJ_API_H_
36679+#define _TTM_USEROBJ_API_H_
36680+
36681+#include "ttm/ttm_placement_user.h"
36682+#include "ttm/ttm_fence_user.h"
36683+#include "ttm/ttm_object.h"
36684+#include "ttm/ttm_fence_api.h"
36685+#include "ttm/ttm_bo_api.h"
36686+
36687+struct ttm_lock;
36688+
36689+/*
36690+ * User ioctls.
36691+ */
36692+
36693+extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
36694+ struct ttm_bo_device *bdev,
36695+ struct ttm_lock *lock, void *data);
36696+extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
36697+ struct ttm_bo_device *bdev,
36698+ struct ttm_lock *lock, void *data);
36699+extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
36700+extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
36701+extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
36702+extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
36703+ struct ttm_lock *lock, void *data);
36704+extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
36705+extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
36706+extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
36707+extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
36708+
36709+extern int
36710+ttm_fence_user_create(struct ttm_fence_device *fdev,
36711+ struct ttm_object_file *tfile,
36712+ uint32_t fence_class,
36713+ uint32_t fence_types,
36714+ uint32_t create_flags,
36715+ struct ttm_fence_object **fence, uint32_t * user_handle);
36716+
36717+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
36718+ *tfile,
36719+ uint32_t handle);
36720+
36721+extern int
36722+ttm_pl_verify_access(struct ttm_buffer_object *bo,
36723+ struct ttm_object_file *tfile);
36724+#endif
36725diff -uNr a/include/drm/drm_compat.h b/include/drm/drm_compat.h
36726--- a/include/drm/drm_compat.h 1969-12-31 16:00:00.000000000 -0800
36727+++ b/include/drm/drm_compat.h 2009-04-07 13:28:38.000000000 -0700
36728@@ -0,0 +1,238 @@
36729+/**
36730+ * \file drm_compat.h
36731+ * Backward compatability definitions for Direct Rendering Manager
36732+ *
36733+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
36734+ * \author Gareth Hughes <gareth@valinux.com>
36735+ */
36736+
36737+/*
36738+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
36739+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
36740+ * All rights reserved.
36741+ *
36742+ * Permission is hereby granted, free of charge, to any person obtaining a
36743+ * copy of this software and associated documentation files (the "Software"),
36744+ * to deal in the Software without restriction, including without limitation
36745+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36746+ * and/or sell copies of the Software, and to permit persons to whom the
36747+ * Software is furnished to do so, subject to the following conditions:
36748+ *
36749+ * The above copyright notice and this permission notice (including the next
36750+ * paragraph) shall be included in all copies or substantial portions of the
36751+ * Software.
36752+ *
36753+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36754+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36755+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
36756+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
36757+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36758+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36759+ * OTHER DEALINGS IN THE SOFTWARE.
36760+ */
36761+
36762+#ifndef _DRM_COMPAT_H_
36763+#define _DRM_COMPAT_H_
36764+
36765+#ifndef minor
36766+#define minor(x) MINOR((x))
36767+#endif
36768+
36769+#ifndef MODULE_LICENSE
36770+#define MODULE_LICENSE(x)
36771+#endif
36772+
36773+#ifndef preempt_disable
36774+#define preempt_disable()
36775+#define preempt_enable()
36776+#endif
36777+
36778+#ifndef pte_offset_map
36779+#define pte_offset_map pte_offset
36780+#define pte_unmap(pte)
36781+#endif
36782+
36783+#ifndef module_param
36784+#define module_param(name, type, perm)
36785+#endif
36786+
36787+/* older kernels had different irq args */
36788+
36789+#ifndef list_for_each_safe
36790+#define list_for_each_safe(pos, n, head) \
36791+ for (pos = (head)->next, n = pos->next; pos != (head); \
36792+ pos = n, n = pos->next)
36793+#endif
36794+
36795+#ifndef list_for_each_entry
36796+#define list_for_each_entry(pos, head, member) \
36797+ for (pos = list_entry((head)->next, typeof(*pos), member), \
36798+ prefetch(pos->member.next); \
36799+ &pos->member != (head); \
36800+ pos = list_entry(pos->member.next, typeof(*pos), member), \
36801+ prefetch(pos->member.next))
36802+#endif
36803+
36804+#ifndef list_for_each_entry_safe
36805+#define list_for_each_entry_safe(pos, n, head, member) \
36806+ for (pos = list_entry((head)->next, typeof(*pos), member), \
36807+ n = list_entry(pos->member.next, typeof(*pos), member); \
36808+ &pos->member != (head); \
36809+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
36810+#endif
36811+
36812+#ifndef __user
36813+#define __user
36814+#endif
36815+
36816+#if !defined(__put_page)
36817+#define __put_page(p) atomic_dec(&(p)->count)
36818+#endif
36819+
36820+#if !defined(__GFP_COMP)
36821+#define __GFP_COMP 0
36822+#endif
36823+
36824+#if !defined(IRQF_SHARED)
36825+#define IRQF_SHARED SA_SHIRQ
36826+#endif
36827+
36828+
36829+
36830+#ifndef DEFINE_SPINLOCK
36831+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
36832+#endif
36833+
36834+/* old architectures */
36835+#ifdef __AMD64__
36836+#define __x86_64__
36837+#endif
36838+
36839+/* sysfs __ATTR macro */
36840+#ifndef __ATTR
36841+#define __ATTR(_name,_mode,_show,_store) { \
36842+ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
36843+ .show = _show, \
36844+ .store = _store, \
36845+}
36846+#endif
36847+
36848+
36849+#ifndef list_for_each_entry_safe_reverse
36850+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
36851+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
36852+ n = list_entry(pos->member.prev, typeof(*pos), member); \
36853+ &pos->member != (head); \
36854+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
36855+#endif
36856+
36857+#include <linux/mm.h>
36858+#include <asm/page.h>
36859+
36860+
36861+#define DRM_FULL_MM_COMPAT
36862+
36863+
36864+/*
36865+ * Flush relevant caches and clear a VMA structure so that page references
36866+ * will cause a page fault. Don't flush tlbs.
36867+ */
36868+
36869+extern void drm_clear_vma(struct vm_area_struct *vma,
36870+ unsigned long addr, unsigned long end);
36871+
36872+/*
36873+ * Return the PTE protection map entries for the VMA flags given by
36874+ * flags. This is a functional interface to the kernel's protection map.
36875+ */
36876+
36877+extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
36878+
36879+#ifndef GFP_DMA32
36880+#define GFP_DMA32 GFP_KERNEL
36881+#endif
36882+#ifndef __GFP_DMA32
36883+#define __GFP_DMA32 GFP_KERNEL
36884+#endif
36885+
36886+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
36887+
36888+/*
36889+ * These are too slow in earlier kernels.
36890+ */
36891+
36892+extern int drm_unmap_page_from_agp(struct page *page);
36893+extern int drm_map_page_into_agp(struct page *page);
36894+
36895+#define map_page_into_agp drm_map_page_into_agp
36896+#define unmap_page_from_agp drm_unmap_page_from_agp
36897+#endif
36898+
36899+
36900+
36901+
36902+
36903+/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
36904+#ifdef DRM_IDR_COMPAT_FN
36905+int idr_for_each(struct idr *idp,
36906+ int (*fn)(int id, void *p, void *data), void *data);
36907+void idr_remove_all(struct idr *idp);
36908+#endif
36909+
36910+
36911+
36912+
36913+
36914+
36915+#ifndef PM_EVENT_PRETHAW
36916+#define PM_EVENT_PRETHAW 3
36917+#endif
36918+
36919+
36920+#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM) && \
36921+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)))
36922+#define DRM_KMAP_ATOMIC_PROT_PFN
36923+extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
36924+ pgprot_t protection);
36925+#endif
36926+
36927+#if !defined(flush_agp_mappings)
36928+#define flush_agp_mappings() do {} while(0)
36929+#endif
36930+
36931+#ifndef DMA_BIT_MASK
36932+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
36933+#endif
36934+
36935+#ifndef VM_CAN_NONLINEAR
36936+#define DRM_VM_NOPAGE 1
36937+#endif
36938+
36939+#ifdef DRM_VM_NOPAGE
36940+
36941+extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
36942+ unsigned long address, int *type);
36943+
36944+extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
36945+ unsigned long address, int *type);
36946+
36947+extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
36948+ unsigned long address, int *type);
36949+
36950+extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
36951+ unsigned long address, int *type);
36952+#endif
36953+
36954+#define drm_on_each_cpu(handler, data, wait) \
36955+ on_each_cpu(handler, data, wait)
36956+
36957+
36958+#ifndef OS_HAS_GEM
36959+#define OS_HAS_GEM 1
36960+#endif
36961+
36962+#ifndef current_euid
36963+#define current_euid() (current->euid)
36964+#endif
36965+
36966+#endif
36967diff -uNr a/include/drm/drm_internal.h b/include/drm/drm_internal.h
36968--- a/include/drm/drm_internal.h 1969-12-31 16:00:00.000000000 -0800
36969+++ b/include/drm/drm_internal.h 2009-04-07 13:28:38.000000000 -0700
36970@@ -0,0 +1,40 @@
36971+/*
36972+ * Copyright 2007 Red Hat, Inc
36973+ * All rights reserved.
36974+ *
36975+ * Permission is hereby granted, free of charge, to any person obtaining a
36976+ * copy of this software and associated documentation files (the "Software"),
36977+ * to deal in the Software without restriction, including without limitation
36978+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36979+ * and/or sell copies of the Software, and to permit persons to whom the
36980+ * Software is furnished to do so, subject to the following conditions:
36981+ *
36982+ * The above copyright notice and this permission notice (including the next
36983+ * paragraph) shall be included in all copies or substantial portions of the
36984+ * Software.
36985+ *
36986+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36987+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36988+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
36989+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
36990+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36991+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36992+ * OTHER DEALINGS IN THE SOFTWARE.
36993+ */
36994+
36995+/* This header file holds function prototypes and data types that are
36996+ * internal to the drm (not exported to user space) but shared across
36997+ * drivers and platforms */
36998+
36999+#ifndef __DRM_INTERNAL_H__
37000+#define __DRM_INTERNAL_H__
37001+
37002+/**
37003+ * Drawable information.
37004+ */
37005+struct drm_drawable_info {
37006+ unsigned int num_rects;
37007+ struct drm_clip_rect *rects;
37008+};
37009+
37010+#endif
37011diff -uNr a/include/drm/ttm/ttm_fence_user.h b/include/drm/ttm/ttm_fence_user.h
37012--- a/include/drm/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800
37013+++ b/include/drm/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700
37014@@ -0,0 +1,147 @@
37015+/**************************************************************************
37016+ *
37017+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37018+ * All Rights Reserved.
37019+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37020+ * All Rights Reserved.
37021+ *
37022+ * Permission is hereby granted, free of charge, to any person obtaining a
37023+ * copy of this software and associated documentation files (the
37024+ * "Software"), to deal in the Software without restriction, including
37025+ * without limitation the rights to use, copy, modify, merge, publish,
37026+ * distribute, sub license, and/or sell copies of the Software, and to
37027+ * permit persons to whom the Software is furnished to do so, subject to
37028+ * the following conditions:
37029+ *
37030+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37031+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37032+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37033+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37034+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37035+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37036+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37037+ *
37038+ * The above copyright notice and this permission notice (including the
37039+ * next paragraph) shall be included in all copies or substantial portions
37040+ * of the Software.
37041+ *
37042+ **************************************************************************/
37043+/*
37044+ * Authors
37045+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37046+ */
37047+
37048+#ifndef TTM_FENCE_USER_H
37049+#define TTM_FENCE_USER_H
37050+
37051+#if !defined(__KERNEL__) && !defined(_KERNEL)
37052+#include <stdint.h>
37053+#endif
37054+
37055+#define TTM_FENCE_MAJOR 0
37056+#define TTM_FENCE_MINOR 1
37057+#define TTM_FENCE_PL 0
37058+#define TTM_FENCE_DATE "080819"
37059+
37060+/**
37061+ * struct ttm_fence_signaled_req
37062+ *
37063+ * @handle: Handle to the fence object. Input.
37064+ *
37065+ * @fence_type: Fence types we want to flush. Input.
37066+ *
37067+ * @flush: Boolean. Flush the indicated fence_types. Input.
37068+ *
37069+ * Argument to the TTM_FENCE_SIGNALED ioctl.
37070+ */
37071+
37072+struct ttm_fence_signaled_req {
37073+ uint32_t handle;
37074+ uint32_t fence_type;
37075+ int32_t flush;
37076+ uint32_t pad64;
37077+};
37078+
37079+/**
37080+ * struct ttm_fence_rep
37081+ *
37082+ * @signaled_types: Fence type that has signaled.
37083+ *
37084+ * @fence_error: Command execution error.
37085+ * Hardware errors that are consequences of the execution
37086+ * of the command stream preceding the fence are reported
37087+ * here.
37088+ *
37089+ * Output argument to the TTM_FENCE_SIGNALED and
37090+ * TTM_FENCE_FINISH ioctls.
37091+ */
37092+
37093+struct ttm_fence_rep {
37094+ uint32_t signaled_types;
37095+ uint32_t fence_error;
37096+};
37097+
37098+union ttm_fence_signaled_arg {
37099+ struct ttm_fence_signaled_req req;
37100+ struct ttm_fence_rep rep;
37101+};
37102+
37103+/*
37104+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
37105+ *
37106+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
37107+ * wait.
37108+ *
37109+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
37110+ * but return -EBUSY if the buffer is busy.
37111+ */
37112+
37113+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
37114+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
37115+
37116+/**
37117+ * struct ttm_fence_finish_req
37118+ *
37119+ * @handle: Handle to the fence object. Input.
37120+ *
37121+ * @fence_type: Fence types we want to finish.
37122+ *
37123+ * @mode: Wait mode.
37124+ *
37125+ * Input to the TTM_FENCE_FINISH ioctl.
37126+ */
37127+
37128+struct ttm_fence_finish_req {
37129+ uint32_t handle;
37130+ uint32_t fence_type;
37131+ uint32_t mode;
37132+ uint32_t pad64;
37133+};
37134+
37135+union ttm_fence_finish_arg {
37136+ struct ttm_fence_finish_req req;
37137+ struct ttm_fence_rep rep;
37138+};
37139+
37140+/**
37141+ * struct ttm_fence_unref_arg
37142+ *
37143+ * @handle: Handle to the fence object.
37144+ *
37145+ * Argument to the TTM_FENCE_UNREF ioctl.
37146+ */
37147+
37148+struct ttm_fence_unref_arg {
37149+ uint32_t handle;
37150+ uint32_t pad64;
37151+};
37152+
37153+/*
37154+ * Ioctl offsets frome extenstion start.
37155+ */
37156+
37157+#define TTM_FENCE_SIGNALED 0x01
37158+#define TTM_FENCE_FINISH 0x02
37159+#define TTM_FENCE_UNREF 0x03
37160+
37161+#endif
37162diff -uNr a/include/drm/ttm/ttm_placement_common.h b/include/drm/ttm/ttm_placement_common.h
37163--- a/include/drm/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800
37164+++ b/include/drm/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700
37165@@ -0,0 +1,96 @@
37166+/**************************************************************************
37167+ *
37168+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37169+ * All Rights Reserved.
37170+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37171+ * All Rights Reserved.
37172+ *
37173+ * Permission is hereby granted, free of charge, to any person obtaining a
37174+ * copy of this software and associated documentation files (the
37175+ * "Software"), to deal in the Software without restriction, including
37176+ * without limitation the rights to use, copy, modify, merge, publish,
37177+ * distribute, sub license, and/or sell copies of the Software, and to
37178+ * permit persons to whom the Software is furnished to do so, subject to
37179+ * the following conditions:
37180+ *
37181+ * The above copyright notice and this permission notice (including the
37182+ * next paragraph) shall be included in all copies or substantial portions
37183+ * of the Software.
37184+ *
37185+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37186+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37187+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37188+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37189+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37190+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37191+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37192+ *
37193+ **************************************************************************/
37194+/*
37195+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
37196+ */
37197+
37198+#ifndef _TTM_PL_COMMON_H_
37199+#define _TTM_PL_COMMON_H_
37200+/*
37201+ * Memory regions for data placement.
37202+ */
37203+
37204+#define TTM_PL_SYSTEM 0
37205+#define TTM_PL_TT 1
37206+#define TTM_PL_VRAM 2
37207+#define TTM_PL_PRIV0 3
37208+#define TTM_PL_PRIV1 4
37209+#define TTM_PL_PRIV2 5
37210+#define TTM_PL_PRIV3 6
37211+#define TTM_PL_PRIV4 7
37212+#define TTM_PL_PRIV5 8
37213+#define TTM_PL_CI 9
37214+#define TTM_PL_SWAPPED 15
37215+
37216+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
37217+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
37218+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
37219+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
37220+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
37221+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
37222+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
37223+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
37224+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
37225+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
37226+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
37227+#define TTM_PL_MASK_MEM 0x0000FFFF
37228+
37229+/*
37230+ * Other flags that affects data placement.
37231+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
37232+ * if available.
37233+ * TTM_PL_FLAG_SHARED means that another application may
37234+ * reference the buffer.
37235+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
37236+ * be evicted to make room for other buffers.
37237+ */
37238+
37239+#define TTM_PL_FLAG_CACHED (1 << 16)
37240+#define TTM_PL_FLAG_UNCACHED (1 << 17)
37241+#define TTM_PL_FLAG_WC (1 << 18)
37242+#define TTM_PL_FLAG_SHARED (1 << 20)
37243+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
37244+
37245+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
37246+ TTM_PL_FLAG_UNCACHED | \
37247+ TTM_PL_FLAG_WC)
37248+
37249+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
37250+
37251+/*
37252+ * Access flags to be used for CPU- and GPU- mappings.
37253+ * The idea is that the TTM synchronization mechanism will
37254+ * allow concurrent READ access and exclusive write access.
37255+ * Currently GPU- and CPU accesses are exclusive.
37256+ */
37257+
37258+#define TTM_ACCESS_READ (1 << 0)
37259+#define TTM_ACCESS_WRITE (1 << 1)
37260+
37261+#endif
37262diff -uNr a/include/drm/ttm/ttm_placement_user.h b/include/drm/ttm/ttm_placement_user.h
37263--- a/include/drm/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800
37264+++ b/include/drm/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700
37265@@ -0,0 +1,259 @@
37266+/**************************************************************************
37267+ *
37268+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37269+ * All Rights Reserved.
37270+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37271+ * All Rights Reserved.
37272+ *
37273+ * Permission is hereby granted, free of charge, to any person obtaining a
37274+ * copy of this software and associated documentation files (the
37275+ * "Software"), to deal in the Software without restriction, including
37276+ * without limitation the rights to use, copy, modify, merge, publish,
37277+ * distribute, sub license, and/or sell copies of the Software, and to
37278+ * permit persons to whom the Software is furnished to do so, subject to
37279+ * the following conditions:
37280+ *
37281+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37282+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37283+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37284+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37285+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37286+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37287+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37288+ *
37289+ * The above copyright notice and this permission notice (including the
37290+ * next paragraph) shall be included in all copies or substantial portions
37291+ * of the Software.
37292+ *
37293+ **************************************************************************/
37294+/*
37295+ * Authors
37296+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37297+ */
37298+
37299+#ifndef _TTM_PLACEMENT_USER_H_
37300+#define _TTM_PLACEMENT_USER_H_
37301+
37302+#if !defined(__KERNEL__) && !defined(_KERNEL)
37303+#include <stdint.h>
37304+#else
37305+#include <linux/kernel.h>
37306+#endif
37307+
37308+#include "ttm/ttm_placement_common.h"
37309+
37310+#define TTM_PLACEMENT_MAJOR 0
37311+#define TTM_PLACEMENT_MINOR 1
37312+#define TTM_PLACEMENT_PL 0
37313+#define TTM_PLACEMENT_DATE "080819"
37314+
37315+/**
37316+ * struct ttm_pl_create_req
37317+ *
37318+ * @size: The buffer object size.
37319+ * @placement: Flags that indicate initial acceptable
37320+ * placement.
37321+ * @page_alignment: Required alignment in pages.
37322+ *
37323+ * Input to the TTM_BO_CREATE ioctl.
37324+ */
37325+
37326+struct ttm_pl_create_req {
37327+ uint64_t size;
37328+ uint32_t placement;
37329+ uint32_t page_alignment;
37330+};
37331+
37332+/**
37333+ * struct ttm_pl_create_ub_req
37334+ *
37335+ * @size: The buffer object size.
37336+ * @user_address: User-space address of the memory area that
37337+ * should be used to back the buffer object cast to 64-bit.
37338+ * @placement: Flags that indicate initial acceptable
37339+ * placement.
37340+ * @page_alignment: Required alignment in pages.
37341+ *
37342+ * Input to the TTM_BO_CREATE_UB ioctl.
37343+ */
37344+
37345+struct ttm_pl_create_ub_req {
37346+ uint64_t size;
37347+ uint64_t user_address;
37348+ uint32_t placement;
37349+ uint32_t page_alignment;
37350+};
37351+
37352+/**
37353+ * struct ttm_pl_rep
37354+ *
37355+ * @gpu_offset: The current offset into the memory region used.
37356+ * This can be used directly by the GPU if there are no
37357+ * additional GPU mapping procedures used by the driver.
37358+ *
37359+ * @bo_size: Actual buffer object size.
37360+ *
37361+ * @map_handle: Offset into the device address space.
37362+ * Used for map, seek, read, write. This will never change
37363+ * during the lifetime of an object.
37364+ *
37365+ * @placement: Flag indicating the placement status of
37366+ * the buffer object using the TTM_PL flags above.
37367+ *
37368+ * @sync_object_arg: Used for user-space synchronization and
37369+ * depends on the synchronization model used. If fences are
37370+ * used, this is the buffer_object::fence_type_mask
37371+ *
37372+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
37373+ * TTM_PL_SETSTATUS ioctls.
37374+ */
37375+
37376+struct ttm_pl_rep {
37377+ uint64_t gpu_offset;
37378+ uint64_t bo_size;
37379+ uint64_t map_handle;
37380+ uint32_t placement;
37381+ uint32_t handle;
37382+ uint32_t sync_object_arg;
37383+ uint32_t pad64;
37384+};
37385+
37386+/**
37387+ * struct ttm_pl_setstatus_req
37388+ *
37389+ * @set_placement: Placement flags to set.
37390+ *
37391+ * @clr_placement: Placement flags to clear.
37392+ *
37393+ * @handle: The object handle
37394+ *
37395+ * Input to the TTM_PL_SETSTATUS ioctl.
37396+ */
37397+
37398+struct ttm_pl_setstatus_req {
37399+ uint32_t set_placement;
37400+ uint32_t clr_placement;
37401+ uint32_t handle;
37402+ uint32_t pad64;
37403+};
37404+
37405+/**
37406+ * struct ttm_pl_reference_req
37407+ *
37408+ * @handle: The object to put a reference on.
37409+ *
37410+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
37411+ */
37412+
37413+struct ttm_pl_reference_req {
37414+ uint32_t handle;
37415+ uint32_t pad64;
37416+};
37417+
37418+/*
37419+ * ACCESS mode flags for SYNCCPU.
37420+ *
37421+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
37422+ * writing to the buffer.
37423+ *
37424+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
37425+ * accessing the buffer.
37426+ *
37427+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
37428+ * for GPU accesses to finish but return -EBUSY.
37429+ *
37430+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
37431+ * memory while synchronized for CPU.
37432+ */
37433+
37434+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
37435+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
37436+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
37437+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
37438+
37439+/**
37440+ * struct ttm_pl_synccpu_arg
37441+ *
37442+ * @handle: The object to synchronize.
37443+ *
37444+ * @access_mode: access mode indicated by the
37445+ * TTM_SYNCCPU_MODE flags.
37446+ *
37447+ * @op: indicates whether to grab or release the
37448+ * buffer for cpu usage.
37449+ *
37450+ * Input to the TTM_PL_SYNCCPU ioctl.
37451+ */
37452+
37453+struct ttm_pl_synccpu_arg {
37454+ uint32_t handle;
37455+ uint32_t access_mode;
37456+ enum {
37457+ TTM_PL_SYNCCPU_OP_GRAB,
37458+ TTM_PL_SYNCCPU_OP_RELEASE
37459+ } op;
37460+ uint32_t pad64;
37461+};
37462+
37463+/*
37464+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
37465+ *
37466+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
37467+ * wait.
37468+ *
37469+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
37470+ * but return -EBUSY if the buffer is busy.
37471+ */
37472+
37473+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
37474+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
37475+
37476+/**
37477+ * struct ttm_waitidle_arg
37478+ *
37479+ * @handle: The object to synchronize.
37480+ *
37481+ * @mode: wait mode indicated by the
37482+ * TTM_SYNCCPU_MODE flags.
37483+ *
37484+ * Argument to the TTM_BO_WAITIDLE ioctl.
37485+ */
37486+
37487+struct ttm_pl_waitidle_arg {
37488+ uint32_t handle;
37489+ uint32_t mode;
37490+};
37491+
37492+union ttm_pl_create_arg {
37493+ struct ttm_pl_create_req req;
37494+ struct ttm_pl_rep rep;
37495+};
37496+
37497+union ttm_pl_reference_arg {
37498+ struct ttm_pl_reference_req req;
37499+ struct ttm_pl_rep rep;
37500+};
37501+
37502+union ttm_pl_setstatus_arg {
37503+ struct ttm_pl_setstatus_req req;
37504+ struct ttm_pl_rep rep;
37505+};
37506+
37507+union ttm_pl_create_ub_arg {
37508+ struct ttm_pl_create_ub_req req;
37509+ struct ttm_pl_rep rep;
37510+};
37511+
37512+/*
37513+ * Ioctl offsets.
37514+ */
37515+
37516+#define TTM_PL_CREATE 0x00
37517+#define TTM_PL_REFERENCE 0x01
37518+#define TTM_PL_UNREF 0x02
37519+#define TTM_PL_SYNCCPU 0x03
37520+#define TTM_PL_WAITIDLE 0x04
37521+#define TTM_PL_SETSTATUS 0x05
37522+#define TTM_PL_CREATE_UB 0x06
37523+
37524+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch
deleted file mode 100644
index 8f34a0f3f4..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch
+++ /dev/null
@@ -1,61 +0,0 @@
1From 0384d086e31092628596af98b1e33fad586cef0a Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:01:28 -0700
4Subject: [PATCH] fastboot: retry mounting the root fs if we can't find init
5
6currently we wait until all device init is done before trying to mount
7the root fs, and to consequently execute init.
8
9In preparation for relaxing the first delay, this patch adds a retry
10attempt in case /sbin/init is not found. Before retrying, the code
11will wait for all device init to complete.
12
13While this patch by itself doesn't gain boot time yet (it needs follow on
14patches), the alternative already is to panic()...
15
16Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
17---
18--- a/init/main.c 2009-01-07 18:29:11.000000000 -0800
19+++ b/init/main.c 2009-01-07 18:32:08.000000000 -0800
20@@ -837,6 +837,7 @@ static void run_init_process(char *init_
21 */
22 static noinline int init_post(void)
23 {
24+ int retry_count = 1;
25 /* need to finish all async __init code before freeing the memory */
26 async_synchronize_full();
27 free_initmem();
28@@ -859,6 +860,8 @@ static noinline int init_post(void)
29 ramdisk_execute_command);
30 }
31
32+retry:
33+
34 /*
35 * We try each of these until one succeeds.
36 *
37@@ -871,6 +874,23 @@ static noinline int init_post(void)
38 "defaults...\n", execute_command);
39 }
40 run_init_process("/sbin/init");
41+
42+ if (retry_count > 0) {
43+ retry_count--;
44+ /*
45+ * We haven't found init yet... potentially because the device
46+ * is still being probed. We need to
47+ * - flush keventd and friends
48+ * - wait for the known devices to complete their probing
49+ * - try to mount the root fs again
50+ */
51+ flush_scheduled_work();
52+ while (driver_probe_done() != 0)
53+ msleep(100);
54+ prepare_namespace();
55+ goto retry;
56+ }
57+
58 run_init_process("/etc/init");
59 run_init_process("/bin/init");
60 run_init_process("/bin/sh");
61
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch
deleted file mode 100644
index 7bf897ab57..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch
+++ /dev/null
@@ -1,17 +0,0 @@
1From: Arjan van de Ven <arjan@linux.intel.com>
2Date: Fri, 23 Jan 2009
3
4Small fix changing error msg to info msg in acer wmi driver
5---
6diff -durp a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
7--- a/drivers/platform/x86/acer-wmi.c 2009-01-23 13:58:36.000000000 -0800
8+++ b/drivers/platform/x86/acer-wmi.c 2009-01-23 14:00:12.000000000 -0800
9@@ -1290,7 +1290,7 @@ static int __init acer_wmi_init(void)
10 AMW0_find_mailled();
11
12 if (!interface) {
13- printk(ACER_ERR "No or unsupported WMI interface, unable to "
14+ printk(ACER_INFO "No or unsupported WMI interface, unable to "
15 "load\n");
16 return -ENODEV;
17 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch
deleted file mode 100644
index e4e2001104..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch
+++ /dev/null
@@ -1,66 +0,0 @@
1From be9df3282d24a7326bba2eea986c79d822f0e998 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 21 Sep 2008 11:58:27 -0700
4Subject: [PATCH] superreadahead patch
5
6---
7 fs/ext3/ioctl.c | 3 +++
8 fs/ext3/super.c | 1 +
9 include/linux/ext3_fs.h | 1 +
10 include/linux/fs.h | 2 ++
11 4 files changed, 7 insertions(+), 0 deletions(-)
12
13diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
14index b7394d0..c2e7f23 100644
15--- a/fs/ext3/ioctl.c
16+++ b/fs/ext3/ioctl.c
17@@ -290,6 +290,9 @@ group_add_out:
18 mnt_drop_write(filp->f_path.mnt);
19 return err;
20 }
21+ case EXT3_IOC_INODE_JIFFIES: {
22+ return inode->created_when;
23+ }
24
25
26 default:
27diff --git a/fs/ext3/super.c b/fs/ext3/super.c
28index f6c94f2..268dd1d 100644
29--- a/fs/ext3/super.c
30+++ b/fs/ext3/super.c
31@@ -461,6 +461,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
32 #endif
33 ei->i_block_alloc_info = NULL;
34 ei->vfs_inode.i_version = 1;
35+ ei->vfs_inode.created_when = jiffies;
36 return &ei->vfs_inode;
37 }
38
39diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
40index d14f029..fff5510 100644
41--- a/include/linux/ext3_fs.h
42+++ b/include/linux/ext3_fs.h
43@@ -225,6 +225,7 @@ struct ext3_new_group_data {
44 #endif
45 #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
46 #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
47+#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
48
49 /*
50 * ioctl commands in 32 bit emulation
51diff --git a/include/linux/fs.h b/include/linux/fs.h
52index 4a853ef..c346136 100644
53--- a/include/linux/fs.h
54+++ b/include/linux/fs.h
55@@ -685,6 +685,8 @@ struct inode {
56 void *i_security;
57 #endif
58 void *i_private; /* fs or device private pointer */
59+
60+ unsigned long created_when; /* jiffies of creation time */
61 };
62
63 /*
64--
651.5.5.1
66
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch
deleted file mode 100644
index c36e5ba4ad..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch
+++ /dev/null
@@ -1,6095 +0,0 @@
1Patch provided by Mocean in order to enable the timberdale subsystem of the Russelville board.
2
3Signed-off-by: Joel Clark <joel.clark@intel.com>
4Acked-by: Arjan van de Ven <arjan@infradead.org>
5Signed-off-by: Todd Brandt todd.e.brandt@intel.com
6
7
8diff -uNr linux-2.6.29-clean/drivers/gpio/Kconfig linux-2.6.29/drivers/gpio/Kconfig
9--- linux-2.6.29-clean/drivers/gpio/Kconfig 2009-04-01 09:20:23.000000000 -0700
10+++ linux-2.6.29/drivers/gpio/Kconfig 2009-04-06 13:51:47.000000000 -0700
11@@ -161,6 +161,12 @@
12
13 If unsure, say N.
14
15+config GPIO_TIMBERDALE
16+ tristate "Support for timberdale GPIO"
17+ depends on MFD_TIMBERDALE && GPIOLIB
18+ ---help---
19+ Add support for GPIO usage of some pins of the timberdale FPGA.
20+
21 comment "SPI GPIO expanders:"
22
23 config GPIO_MAX7301
24diff -uNr linux-2.6.29-clean/drivers/gpio/Makefile linux-2.6.29/drivers/gpio/Makefile
25--- linux-2.6.29-clean/drivers/gpio/Makefile 2009-04-01 09:20:23.000000000 -0700
26+++ linux-2.6.29/drivers/gpio/Makefile 2009-04-06 13:51:47.000000000 -0700
27@@ -12,3 +12,4 @@
28 obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
29 obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
30 obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
31+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
32diff -uNr linux-2.6.29-clean/drivers/gpio/timbgpio.c linux-2.6.29/drivers/gpio/timbgpio.c
33--- linux-2.6.29-clean/drivers/gpio/timbgpio.c 1969-12-31 16:00:00.000000000 -0800
34+++ linux-2.6.29/drivers/gpio/timbgpio.c 2009-04-06 13:51:47.000000000 -0700
35@@ -0,0 +1,275 @@
36+/*
37+ * timbgpio.c timberdale FPGA GPIO driver
38+ * Copyright (c) 2009 Intel Corporation
39+ *
40+ * This program is free software; you can redistribute it and/or modify
41+ * it under the terms of the GNU General Public License version 2 as
42+ * published by the Free Software Foundation.
43+ *
44+ * This program is distributed in the hope that it will be useful,
45+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
46+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
47+ * GNU General Public License for more details.
48+ *
49+ * You should have received a copy of the GNU General Public License
50+ * along with this program; if not, write to the Free Software
51+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
52+ */
53+
54+/* Supports:
55+ * Timberdale FPGA GPIO
56+ */
57+
58+#include <linux/module.h>
59+#include <linux/gpio.h>
60+#include <linux/pci.h>
61+#include <linux/platform_device.h>
62+#include <linux/interrupt.h>
63+
64+#include "timbgpio.h"
65+
66+static u32 timbgpio_configure(struct gpio_chip *gpio, unsigned nr,
67+ unsigned off, unsigned val)
68+{
69+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
70+
71+ u32 config, oldconfig, wconfig;
72+
73+ mutex_lock(&tgpio->lock);
74+ config = ioread32(tgpio->membase + off);
75+ oldconfig = config;
76+
77+ if (val)
78+ config |= (1 << nr);
79+ else
80+ config &= ~(1 << nr);
81+
82+ iowrite32(config, tgpio->membase + off);
83+ wconfig = ioread32(tgpio->membase + off);
84+ mutex_unlock(&tgpio->lock);
85+
86+ return oldconfig;
87+}
88+
89+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
90+{
91+ timbgpio_configure(gpio, nr, TGPIODIR, 1);
92+ return 0;
93+}
94+
95+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
96+{
97+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
98+ u32 value;
99+
100+ value = ioread32(tgpio->membase + TGPIOVAL);
101+ return (value & (1 << nr)) ? 1 : 0;
102+}
103+
104+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
105+ unsigned nr, int val)
106+{
107+ timbgpio_configure(gpio, nr, TGPIODIR, 0);
108+ return 0;
109+}
110+
111+
112+
113+static void timbgpio_gpio_set(struct gpio_chip *gpio,
114+ unsigned nr, int val)
115+{
116+ timbgpio_configure(gpio, nr, TGPIOVAL, val);
117+}
118+
119+/*
120+ * Function to control flank or level triggered GPIO pin
121+ * @nr - pin
122+ * @ val - 1: flank, 0: level
123+ *
124+ */
125+static void timbgpio_gpio_flnk_lvl_ctrl(struct gpio_chip *gpio,
126+ unsigned nr, int val)
127+{
128+ timbgpio_configure(gpio, nr, TGPIOFLK, val);
129+}
130+EXPORT_SYMBOL(timbgpio_gpio_flnk_lvl_ctrl);
131+
132+/*
133+ * Enable or disable interrupt
134+ *
135+ */
136+static void timbgpio_gpio_int_ctrl(struct gpio_chip *gpio,
137+ unsigned nr, int val)
138+{
139+ timbgpio_configure(gpio, nr, TGPIOINT, val);
140+}
141+EXPORT_SYMBOL(timbgpio_gpio_int_ctrl);
142+
143+/*
144+ * @val - 1: Asserted high or on positive flank, 0: Asserted low or on negative flank
145+ *
146+ */
147+static void timbgpio_gpio_lvl_ctrl(struct gpio_chip *gpio,
148+ unsigned nr, int val)
149+{
150+ timbgpio_configure(gpio, nr, TGPIOLVL, val);
151+}
152+EXPORT_SYMBOL(timbgpio_gpio_lvl_ctrl);
153+
154+static void timbgpio_gpio_int_clr(struct gpio_chip *gpio,
155+ unsigned nr, int val)
156+{
157+ timbgpio_configure(gpio, nr, TGPIOINT_CLR, val);
158+}
159+EXPORT_SYMBOL(timbgpio_gpio_int_clr);
160+
161+
162+static irqreturn_t timbgpio_handleinterrupt(int irq, void *devid)
163+{
164+ struct timbgpio *tgpio = (struct timbgpio *)devid;
165+
166+ iowrite32(0xffffffff, tgpio->membase + TGPIOINT_CLR);
167+
168+ return IRQ_HANDLED;
169+}
170+
171+static int timbgpio_probe(struct platform_device *dev)
172+{
173+ int err, irq;
174+ struct gpio_chip *gc;
175+ struct timbgpio *tgpio;
176+ struct resource *iomem, *rscr;
177+
178+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
179+ if (!iomem) {
180+ err = -EINVAL;
181+ goto err_mem;
182+ }
183+
184+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
185+ if (!tgpio) {
186+ err = -EINVAL;
187+ goto err_mem;
188+ }
189+
190+ mutex_init(&tgpio->lock);
191+
192+ rscr = &tgpio->rscr;
193+ rscr->name = "timb-gpio";
194+ rscr->start = iomem->start;
195+ rscr->end = iomem->end;
196+ rscr->flags = IORESOURCE_MEM;
197+
198+ err = request_resource(iomem, rscr);
199+ if (err)
200+ goto err_request;
201+
202+ tgpio->membase = ioremap(rscr->start, resource_size(rscr));
203+ if (!tgpio->membase) {
204+ err = -ENOMEM;
205+ goto err_ioremap;
206+ }
207+
208+ gc = &tgpio->gpio;
209+
210+ gc->label = "timbgpio";
211+ gc->owner = THIS_MODULE;
212+ gc->direction_input = timbgpio_gpio_direction_input;
213+ gc->get = timbgpio_gpio_get;
214+ gc->direction_output = timbgpio_gpio_direction_output;
215+ gc->set = timbgpio_gpio_set;
216+ gc->dbg_show = NULL;
217+ gc->base = 0;
218+ gc->ngpio = TIMB_NR_GPIOS;
219+ gc->can_sleep = 0;
220+
221+ err = gpiochip_add(gc);
222+ if (err)
223+ goto err_chipadd;
224+
225+ platform_set_drvdata(dev, tgpio);
226+
227+ /* register interrupt */
228+ irq = platform_get_irq(dev, 0);
229+ if (irq < 0)
230+ goto err_get_irq;
231+
232+ /* clear pending interrupts */
233+ iowrite32(0xffffffff, tgpio->membase + TGPIOINT_CLR);
234+ iowrite32(0x0, tgpio->membase + TGPIOINT);
235+
236+ /* request IRQ */
237+ err = request_irq(irq, timbgpio_handleinterrupt, IRQF_SHARED,
238+ "timb-gpio", tgpio);
239+ if (err) {
240+ printk(KERN_ERR "timbgpio: Failed to request IRQ\n");
241+ goto err_get_irq;
242+ }
243+
244+ return err;
245+
246+err_get_irq:
247+ err = gpiochip_remove(&tgpio->gpio);
248+ if (err)
249+ printk(KERN_ERR "timbgpio: failed to remove gpio_chip\n");
250+err_chipadd:
251+ iounmap(tgpio->membase);
252+err_ioremap:
253+ release_resource(&tgpio->rscr);
254+err_request:
255+ kfree(tgpio);
256+err_mem:
257+ printk(KERN_ERR "timberdale: Failed to register GPIOs: %d\n", err);
258+
259+ return err;
260+}
261+
262+static int timbgpio_remove(struct platform_device *dev)
263+{
264+ int err;
265+ struct timbgpio *tgpio = platform_get_drvdata(dev);
266+
267+ /* disable interrupts */
268+ iowrite32(0x0, tgpio->membase + TGPIOINT);
269+
270+ free_irq(platform_get_irq(dev, 0), tgpio);
271+ err = gpiochip_remove(&tgpio->gpio);
272+ if (err)
273+ printk(KERN_ERR "timbgpio: failed to remove gpio_chip\n");
274+
275+ iounmap(tgpio->membase);
276+ release_resource(&tgpio->rscr);
277+ kfree(tgpio);
278+
279+ return 0;
280+}
281+
282+static struct platform_driver timbgpio_platform_driver = {
283+ .driver = {
284+ .name = "timb-gpio",
285+ .owner = THIS_MODULE,
286+ },
287+ .probe = timbgpio_probe,
288+ .remove = timbgpio_remove,
289+};
290+
291+/*--------------------------------------------------------------------------*/
292+
293+static int __init timbgpio_init(void)
294+{
295+ return platform_driver_register(&timbgpio_platform_driver);
296+}
297+
298+static void __exit timbgpio_exit(void)
299+{
300+ platform_driver_unregister(&timbgpio_platform_driver);
301+}
302+
303+module_init(timbgpio_init);
304+module_exit(timbgpio_exit);
305+
306+MODULE_DESCRIPTION("Timberdale GPIO driver");
307+MODULE_LICENSE("GPL v2");
308+MODULE_AUTHOR("Mocean Laboratories");
309+MODULE_ALIAS("platform:timb-gpio");
310+
311diff -uNr linux-2.6.29-clean/drivers/gpio/timbgpio.h linux-2.6.29/drivers/gpio/timbgpio.h
312--- linux-2.6.29-clean/drivers/gpio/timbgpio.h 1969-12-31 16:00:00.000000000 -0800
313+++ linux-2.6.29/drivers/gpio/timbgpio.h 2009-04-06 13:51:47.000000000 -0700
314@@ -0,0 +1,48 @@
315+/*
316+ * timbgpio.h timberdale FPGA GPIO driver defines
317+ * Copyright (c) 2009 Intel Corporation
318+ *
319+ * This program is free software; you can redistribute it and/or modify
320+ * it under the terms of the GNU General Public License version 2 as
321+ * published by the Free Software Foundation.
322+ *
323+ * This program is distributed in the hope that it will be useful,
324+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
325+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
326+ * GNU General Public License for more details.
327+ *
328+ * You should have received a copy of the GNU General Public License
329+ * along with this program; if not, write to the Free Software
330+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
331+ */
332+
333+/* Supports:
334+ * Timberdale FPGA GPIO
335+ */
336+
337+#ifndef _TIMBGPIO_H_
338+#define _TIMBGPIO_H_
339+
340+#include <linux/mutex.h>
341+#include <linux/gpio.h>
342+
343+#define TIMB_NR_GPIOS 16
344+
345+#define TGPIOVAL 0
346+#define TGPIODIR 0x04
347+#define TGPIOINT 0x08
348+#define TGPIOINT_STATUS 0x0c
349+#define TGPIOINT_PENDING 0x10
350+#define TGPIOINT_CLR 0x14
351+#define TGPIOFLK 0x18
352+#define TGPIOLVL 0x1c
353+
354+struct timbgpio {
355+ void __iomem *membase;
356+ struct resource rscr;
357+ struct mutex lock; /* mutual exclusion */
358+ struct pci_dev *pdev;
359+ struct gpio_chip gpio;
360+};
361+
362+#endif
363diff -uNr linux-2.6.29-clean/drivers/i2c/busses/i2c-ocores.c linux-2.6.29/drivers/i2c/busses/i2c-ocores.c
364--- linux-2.6.29-clean/drivers/i2c/busses/i2c-ocores.c 2009-04-01 09:20:24.000000000 -0700
365+++ linux-2.6.29/drivers/i2c/busses/i2c-ocores.c 2009-04-06 13:51:47.000000000 -0700
366@@ -216,6 +216,7 @@
367 struct ocores_i2c_platform_data *pdata;
368 struct resource *res, *res2;
369 int ret;
370+ u8 i;
371
372 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 if (!res)
374@@ -271,6 +272,10 @@
375 goto add_adapter_failed;
376 }
377
378+ /* add in known devices to the bus */
379+ for (i = 0; i < pdata->num_devices; i++)
380+ i2c_new_device(&i2c->adap, pdata->devices + i);
381+
382 return 0;
383
384 add_adapter_failed:
385diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/Kconfig linux-2.6.29/drivers/input/touchscreen/Kconfig
386--- linux-2.6.29-clean/drivers/input/touchscreen/Kconfig 2009-04-01 09:20:23.000000000 -0700
387+++ linux-2.6.29/drivers/input/touchscreen/Kconfig 2009-04-06 13:51:47.000000000 -0700
388@@ -397,6 +397,17 @@
389 To compile this driver as a module, choose M here: the
390 module will be called touchit213.
391
392+config TOUCHSCREEN_TSC2003
393+ tristate "TSC2003 based touchscreens"
394+ depends on I2C
395+ help
396+ Say Y here if you have a TSC2003 based touchscreen.
397+
398+ If unsure, say N.
399+
400+ To compile this driver as a module, choose M here: the
401+ module will be called tsc2003.
402+
403 config TOUCHSCREEN_TSC2007
404 tristate "TSC2007 based touchscreens"
405 depends on I2C
406diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/Makefile linux-2.6.29/drivers/input/touchscreen/Makefile
407--- linux-2.6.29-clean/drivers/input/touchscreen/Makefile 2009-04-01 09:20:23.000000000 -0700
408+++ linux-2.6.29/drivers/input/touchscreen/Makefile 2009-04-06 13:51:47.000000000 -0700
409@@ -25,6 +25,7 @@
410 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
411 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
412 obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
413+obj-$(CONFIG_TOUCHSCREEN_TSC2003) += tsc2003.o
414 obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
415 obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
416 obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
417diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/tsc2003.c linux-2.6.29/drivers/input/touchscreen/tsc2003.c
418--- linux-2.6.29-clean/drivers/input/touchscreen/tsc2003.c 1969-12-31 16:00:00.000000000 -0800
419+++ linux-2.6.29/drivers/input/touchscreen/tsc2003.c 2009-04-06 13:51:47.000000000 -0700
420@@ -0,0 +1,387 @@
421+/*
422+ * tsc2003.c Driver for TI TSC2003 touch screen controller
423+ * Copyright (c) 2009 Intel Corporation
424+ *
425+ * This program is free software; you can redistribute it and/or modify
426+ * it under the terms of the GNU General Public License version 2 as
427+ * published by the Free Software Foundation.
428+ *
429+ * This program is distributed in the hope that it will be useful,
430+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
431+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
432+ * GNU General Public License for more details.
433+ *
434+ * You should have received a copy of the GNU General Public License
435+ * along with this program; if not, write to the Free Software
436+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
437+ */
438+
439+/* Supports:
440+ * TI TSC2003
441+ *
442+ * Inspired by tsc2007, Copyright (c) 2008 MtekVision Co., Ltd.
443+ */
444+#include <linux/module.h>
445+#include <linux/input.h>
446+#include <linux/interrupt.h>
447+#include <linux/i2c.h>
448+#include <linux/i2c/tsc2007.h>
449+#include <linux/kthread.h>
450+#include <linux/semaphore.h>
451+
452+#define TSC2003_DRIVER_NAME "tsc2003"
453+
454+#define TS_POLL_PERIOD 20 /* ms delay between samples */
455+
456+#define TSC2003_MEASURE_TEMP0 (0x0 << 4)
457+#define TSC2003_MEASURE_AUX (0x2 << 4)
458+#define TSC2003_MEASURE_TEMP1 (0x4 << 4)
459+#define TSC2003_ACTIVATE_XN (0x8 << 4)
460+#define TSC2003_ACTIVATE_YN (0x9 << 4)
461+#define TSC2003_ACTIVATE_YP_XN (0xa << 4)
462+#define TSC2003_SETUP (0xb << 4)
463+#define TSC2003_MEASURE_X (0xc << 4)
464+#define TSC2003_MEASURE_Y (0xd << 4)
465+#define TSC2003_MEASURE_Z1 (0xe << 4)
466+#define TSC2003_MEASURE_Z2 (0xf << 4)
467+
468+#define TSC2003_POWER_OFF_IRQ_EN (0x0 << 2)
469+#define TSC2003_ADC_ON_IRQ_DIS0 (0x1 << 2)
470+#define TSC2003_ADC_OFF_IRQ_EN (0x2 << 2)
471+#define TSC2003_ADC_ON_IRQ_DIS1 (0x3 << 2)
472+
473+#define TSC2003_12BIT (0x0 << 1)
474+#define TSC2003_8BIT (0x1 << 1)
475+
476+#define MAX_12BIT ((1 << 12) - 1)
477+
478+#define ADC_ON_12BIT (TSC2003_12BIT | TSC2003_ADC_ON_IRQ_DIS0)
479+
480+#define READ_Y (ADC_ON_12BIT | TSC2003_MEASURE_Y)
481+#define READ_Z1 (ADC_ON_12BIT | TSC2003_MEASURE_Z1)
482+#define READ_Z2 (ADC_ON_12BIT | TSC2003_MEASURE_Z2)
483+#define READ_X (ADC_ON_12BIT | TSC2003_MEASURE_X)
484+#define PWRDOWN (TSC2003_12BIT | TSC2003_POWER_OFF_IRQ_EN)
485+
486+struct ts_event {
487+ int x;
488+ int y;
489+ int z1, z2;
490+};
491+
492+struct tsc2003 {
493+ struct input_dev *input;
494+ char phys[32];
495+ struct task_struct *task;
496+ struct ts_event tc;
497+ struct completion penirq_completion;
498+
499+ struct i2c_client *client;
500+
501+ u16 model;
502+ u16 x_plate_ohms;
503+
504+ unsigned pendown;
505+};
506+
507+static inline int tsc2003_xfer(struct tsc2003 *tsc, u8 cmd)
508+{
509+ s32 data;
510+ u16 val;
511+
512+ data = i2c_smbus_read_word_data(tsc->client, cmd);
513+ if (data < 0) {
514+ dev_err(&tsc->client->dev, "i2c io error: %d\n", data);
515+ return data;
516+ }
517+
518+ /* The protocol and raw data format from i2c interface:
519+ * S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
520+ * Where DataLow has [D11-D4], DataHigh has [D3-D0 << 4 | Dummy 4bit].
521+ */
522+ val = swab16(data) >> 4;
523+
524+ dev_dbg(&tsc->client->dev, "data: 0x%x, val: 0x%x\n", data, val);
525+
526+ return val;
527+}
528+
529+static void tsc2003_send_event(void *tsc)
530+{
531+ struct tsc2003 *ts = tsc;
532+ struct input_dev *input = ts->input;
533+ u32 rt = 0;
534+ u16 x, y, z1, z2;
535+
536+ x = ts->tc.x;
537+ y = ts->tc.y;
538+ z1 = ts->tc.z1;
539+ z2 = ts->tc.z2;
540+
541+ /* range filtering */
542+ if (x == MAX_12BIT)
543+ x = 0;
544+
545+ if (likely(x && z1)) {
546+ /* compute touch pressure resistance using equation #1 */
547+ rt = z2;
548+ rt -= z1;
549+ rt *= x;
550+ rt *= ts->x_plate_ohms;
551+ rt /= z1;
552+ rt = (rt + 2047) >> 12;
553+ }
554+
555+ /* Sample found inconsistent by debouncing or pressure is beyond
556+ * the maximum. Don't report it to user space, repeat at least
557+ * once more the measurement
558+ */
559+ if (rt > MAX_12BIT)
560+ return;
561+
562+ /* NOTE: We can't rely on the pressure to determine the pen down
563+ * state, even this controller has a pressure sensor. The pressure
564+ * value can fluctuate for quite a while after lifting the pen and
565+ * in some cases may not even settle at the expected value.
566+ *
567+ * The only safe way to check for the pen up condition is in the
568+ * timer by reading the pen signal state (it's a GPIO _and_ IRQ).
569+ */
570+ if (rt) {
571+ if (!ts->pendown) {
572+ dev_dbg(&ts->client->dev, "DOWN\n");
573+
574+ input_report_key(input, BTN_TOUCH, 1);
575+ ts->pendown = 1;
576+ }
577+
578+ input_report_abs(input, ABS_X, x);
579+ input_report_abs(input, ABS_Y, y);
580+ input_report_abs(input, ABS_PRESSURE, rt);
581+
582+ input_sync(input);
583+
584+ dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
585+ x, y, rt);
586+ } else if (ts->pendown) {
587+ /* pen up */
588+ dev_dbg(&ts->client->dev, "UP\n");
589+ input_report_key(input, BTN_TOUCH, 0);
590+ input_report_abs(input, ABS_PRESSURE, 0);
591+ input_sync(input);
592+
593+ ts->pendown = 0;
594+ }
595+}
596+
597+static int tsc2003_power_off_irq_en(struct tsc2003 *tsc)
598+{
599+ /* power down */
600+ return tsc2003_xfer(tsc, PWRDOWN);
601+}
602+
603+static int tsc2003_read_values(struct tsc2003 *tsc)
604+{
605+ /* y- still on; turn on only y+ (and ADC) */
606+ tsc->tc.y = tsc2003_xfer(tsc, READ_Y);
607+ if (tsc->tc.y < 0)
608+ return tsc->tc.y;
609+
610+ /* turn y- off, x+ on, then leave in lowpower */
611+ tsc->tc.x = tsc2003_xfer(tsc, READ_X);
612+ if (tsc->tc.x < 0)
613+ return tsc->tc.x;
614+
615+ /* turn y+ off, x- on; we'll use formula #1 */
616+ tsc->tc.z1 = tsc2003_xfer(tsc, READ_Z1);
617+ if (tsc->tc.z1 < 0)
618+ return tsc->tc.z1;
619+
620+ tsc->tc.z2 = tsc2003_xfer(tsc, READ_Z2);
621+ if (tsc->tc.z2 < 0)
622+ return tsc->tc.z2;
623+
624+ return 0;
625+}
626+
627+
628+static irqreturn_t tsc2003_irq(int irq, void *handle)
629+{
630+ struct tsc2003 *ts = handle;
631+
632+ /* do not call the synced version -> deadlock */
633+ disable_irq_nosync(irq);
634+ /* signal the thread to continue */
635+ complete(&ts->penirq_completion);
636+
637+ return IRQ_HANDLED;
638+}
639+
640+static int tsc2003_thread(void *d)
641+{
642+ struct tsc2003 *ts = (struct tsc2003 *)d;
643+ int ret;
644+
645+ allow_signal(SIGKILL);
646+
647+ while (!signal_pending(current)) {
648+ /* power down and wait for interrupt */
649+ do {
650+ /* loop because the I2C bus might be busy */
651+ ret = msleep_interruptible(TS_POLL_PERIOD);
652+ if (!ret)
653+ ret = tsc2003_power_off_irq_en(ts);
654+ } while (ret == -EAGAIN && !signal_pending(current));
655+
656+ if (signal_pending(current))
657+ break;
658+
659+ ret = wait_for_completion_interruptible(&ts->penirq_completion);
660+ if (!ret) {
661+ int first = 1;
662+ /* got IRQ, start poll, until pen is up */
663+ while (!ret && !signal_pending(current)
664+ && (first || ts->pendown)) {
665+ ret = tsc2003_read_values(ts);
666+ if (!ret)
667+ tsc2003_send_event(ts);
668+ ret = msleep_interruptible(TS_POLL_PERIOD);
669+ first = 0;
670+ }
671+
672+ /* we re enable the interrupt */
673+ if (!signal_pending(current))
674+ enable_irq(ts->client->irq);
675+ }
676+ }
677+
678+ return 0;
679+}
680+
681+static int tsc2003_probe(struct i2c_client *client,
682+ const struct i2c_device_id *id)
683+{
684+ struct tsc2003 *ts;
685+ struct tsc2007_platform_data *pdata = client->dev.platform_data;
686+ struct input_dev *input_dev;
687+ int err;
688+
689+ if (!pdata) {
690+ dev_err(&client->dev, "platform data is required!\n");
691+ return -EINVAL;
692+ }
693+
694+ if (!i2c_check_functionality(client->adapter,
695+ I2C_FUNC_SMBUS_READ_WORD_DATA))
696+ return -EIO;
697+
698+ ts = kzalloc(sizeof(struct tsc2003), GFP_KERNEL);
699+ input_dev = input_allocate_device();
700+ if (!ts || !input_dev) {
701+ err = -ENOMEM;
702+ goto err_free_mem;
703+ }
704+
705+ ts->client = client;
706+ i2c_set_clientdata(client, ts);
707+
708+ ts->input = input_dev;
709+
710+ ts->model = pdata->model;
711+ ts->x_plate_ohms = pdata->x_plate_ohms;
712+
713+ snprintf(ts->phys, sizeof(ts->phys),
714+ "%s/input0", dev_name(&client->dev));
715+
716+ input_dev->name = TSC2003_DRIVER_NAME" Touchscreen";
717+ input_dev->phys = ts->phys;
718+ input_dev->id.bustype = BUS_I2C;
719+
720+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
721+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
722+
723+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
724+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
725+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
726+
727+ init_completion(&ts->penirq_completion);
728+
729+ ts->task = kthread_run(tsc2003_thread, ts, TSC2003_DRIVER_NAME);
730+ if (IS_ERR(ts->task)) {
731+ err = PTR_ERR(ts->task);
732+ goto err_free_mem;
733+ }
734+
735+ err = request_irq(client->irq, tsc2003_irq, 0,
736+ client->dev.driver->name, ts);
737+ if (err < 0) {
738+ dev_err(&client->dev, "irq %d busy?\n", client->irq);
739+ goto err_free_thread;
740+ }
741+
742+ err = input_register_device(input_dev);
743+ if (err)
744+ goto err_free_irq;
745+
746+ dev_info(&client->dev, "registered with irq (%d)\n", client->irq);
747+
748+ return 0;
749+
750+ err_free_irq:
751+ free_irq(client->irq, ts);
752+ err_free_thread:
753+ kthread_stop(ts->task);
754+ err_free_mem:
755+ input_free_device(input_dev);
756+ kfree(ts);
757+ return err;
758+}
759+
760+static int tsc2003_remove(struct i2c_client *client)
761+{
762+ struct tsc2003 *ts = i2c_get_clientdata(client);
763+
764+ free_irq(client->irq, ts);
765+ send_sig(SIGKILL, ts->task, 1);
766+ kthread_stop(ts->task);
767+ input_unregister_device(ts->input);
768+ kfree(ts);
769+
770+ return 0;
771+}
772+
773+static struct i2c_device_id tsc2003_idtable[] = {
774+ { TSC2003_DRIVER_NAME, 0 },
775+ { }
776+};
777+
778+MODULE_DEVICE_TABLE(i2c, tsc2003_idtable);
779+
780+static struct i2c_driver tsc2003_driver = {
781+ .driver = {
782+ .owner = THIS_MODULE,
783+ .name = TSC2003_DRIVER_NAME,
784+ .bus = &i2c_bus_type,
785+ },
786+ .id_table = tsc2003_idtable,
787+ .probe = tsc2003_probe,
788+ .remove = tsc2003_remove,
789+};
790+
791+static int __init tsc2003_init(void)
792+{
793+ return i2c_add_driver(&tsc2003_driver);
794+}
795+
796+static void __exit tsc2003_exit(void)
797+{
798+ i2c_del_driver(&tsc2003_driver);
799+}
800+
801+module_init(tsc2003_init);
802+module_exit(tsc2003_exit);
803+
804+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
805+MODULE_DESCRIPTION("TSC2003 TouchScreen Driver");
806+MODULE_LICENSE("GPL v2");
807+
808diff -uNr linux-2.6.29-clean/drivers/media/video/adv7180.c linux-2.6.29/drivers/media/video/adv7180.c
809--- linux-2.6.29-clean/drivers/media/video/adv7180.c 1969-12-31 16:00:00.000000000 -0800
810+++ linux-2.6.29/drivers/media/video/adv7180.c 2009-04-06 13:51:47.000000000 -0700
811@@ -0,0 +1,361 @@
812+/*
813+ * adv7180.c Analog Devices ADV7180 video decoder driver
814+ * Copyright (c) 2009 Intel Corporation
815+ *
816+ * This program is free software; you can redistribute it and/or modify
817+ * it under the terms of the GNU General Public License version 2 as
818+ * published by the Free Software Foundation.
819+ *
820+ * This program is distributed in the hope that it will be useful,
821+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
822+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
823+ * GNU General Public License for more details.
824+ *
825+ * You should have received a copy of the GNU General Public License
826+ * along with this program; if not, write to the Free Software
827+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
828+ */
829+
830+#include <linux/module.h>
831+#include <linux/init.h>
832+#include <linux/interrupt.h>
833+#include <linux/delay.h>
834+#include <linux/errno.h>
835+#include <linux/fs.h>
836+#include <linux/kernel.h>
837+#include <linux/major.h>
838+#include <linux/slab.h>
839+#include <linux/mm.h>
840+#include <linux/signal.h>
841+#include <linux/types.h>
842+#include <linux/io.h>
843+#include <asm/pgtable.h>
844+#include <asm/page.h>
845+#include <linux/uaccess.h>
846+
847+#include <linux/i2c-ocores.h>
848+#include <linux/platform_device.h>
849+#include <linux/i2c.h>
850+#include <linux/i2c-id.h>
851+#include <linux/videodev.h>
852+#include <linux/video_decoder.h>
853+#include <media/v4l2-ioctl.h>
854+#include <media/adv7180.h>
855+
856+
857+MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
858+MODULE_AUTHOR("Mocean Laboratories");
859+MODULE_LICENSE("GPL v2");
860+
861+static inline int adv7180_write(struct i2c_client *client,
862+ u8 reg, u8 value)
863+{
864+ struct adv7180 *decoder = i2c_get_clientdata(client);
865+
866+ decoder->reg[reg] = value;
867+ return i2c_smbus_write_byte_data(client, reg, value);
868+}
869+
870+static inline int adv7180_read(struct i2c_client *client, u8 reg)
871+{
872+ return i2c_smbus_read_byte_data(client, reg);
873+}
874+
875+static int adv7180_write_block(struct i2c_client *client,
876+ const u8 *data, unsigned int len)
877+{
878+ int ret = -1;
879+ u8 reg;
880+
881+ /* the adv7180 has an autoincrement function, use it if
882+ * the adapter understands raw I2C */
883+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
884+ /* do raw I2C, not smbus compatible */
885+ struct adv7180 *decoder = i2c_get_clientdata(client);
886+ u8 block_data[32];
887+ int block_len;
888+
889+ while (len >= 2) {
890+ block_len = 0;
891+ reg = data[0];
892+ block_data[block_len++] = reg;
893+ do {
894+ block_data[block_len++] =
895+ decoder->reg[reg++] = data[1];
896+ len -= 2;
897+ data += 2;
898+ } while (len >= 2 && data[0] == reg &&
899+ block_len < 32);
900+
901+ ret = i2c_master_send(client, block_data, block_len);
902+ if (ret < 0)
903+ break;
904+ }
905+ } else {
906+ /* do some slow I2C emulation kind of thing */
907+ while (len >= 2) {
908+ reg = *data++;
909+ ret = adv7180_write(client, reg, *data++);
910+ if (ret < 0)
911+ break;
912+
913+ len -= 2;
914+ }
915+ }
916+
917+ return ret;
918+}
919+#ifdef CONFIG_MFD_TIMBERDALE
920+static irqreturn_t adv7180_irq(int irq, void *dev_id)
921+{
922+ struct adv7180 *decoder = (struct adv7180 *) dev_id;
923+
924+ /* Activate access to sub-regs */
925+ adv7180_write(decoder->client, ADV7180_ADI_CTRL, ADI_ENABLE);
926+
927+ /* TODO: implement a real interrupt handler
928+ * for now just
929+ * clear all four regs
930+ */
931+ adv7180_write_block(decoder->client, reset_icr, sizeof(reset_icr));
932+
933+ return IRQ_HANDLED;
934+}
935+#endif
936+static int adv7180_command(struct i2c_client *client,
937+ unsigned int cmd, void *arg)
938+{
939+ struct adv7180 *decoder = i2c_get_clientdata(client);
940+ int *iarg = (int *)arg;
941+ int status;
942+
943+ switch (cmd) {
944+
945+ case DECODER_INIT:
946+ adv7180_write(client, 0x0f, 0x80); /* Reset */
947+ break;
948+
949+ case DECODER_GET_CAPABILITIES:
950+ {
951+ struct video_decoder_capability *cap = arg;
952+ cap->flags = VIDEO_DECODER_PAL |
953+ VIDEO_DECODER_NTSC |
954+ VIDEO_DECODER_SECAM |
955+ VIDEO_DECODER_AUTO;
956+ cap->inputs = 3;
957+ cap->outputs = 1;
958+ }
959+ break;
960+
961+ case DECODER_GET_STATUS:
962+ {
963+ *iarg = 0;
964+ status = adv7180_read(client, ADV7180_SR);
965+ if ((status & ADV7180_STATUS_PAL))
966+ *iarg = (*iarg | DECODER_STATUS_PAL);
967+
968+ if ((status & ADV7180_STATUS_NTSC))
969+ *iarg = (*iarg | DECODER_STATUS_NTSC);
970+
971+ if ((status & ADV7180_STATUS_SECAM))
972+ *iarg = (*iarg | DECODER_STATUS_SECAM);
973+ }
974+ break;
975+
976+ case DECODER_SET_NORM:
977+ {
978+ int v = *(int *) arg;
979+ if (decoder->norm != v) {
980+ decoder->norm = v;
981+ switch (v) {
982+ case VIDEO_MODE_NTSC:
983+ adv7180_write(client, ADV7180_IN_CTRL, 0x40);
984+ break;
985+ case VIDEO_MODE_PAL:
986+ adv7180_write(client, ADV7180_IN_CTRL, 0x70);
987+ break;
988+ case VIDEO_MODE_SECAM:
989+ adv7180_write(client, ADV7180_IN_CTRL, 0x90);
990+ break;
991+ case VIDEO_MODE_AUTO:
992+ adv7180_write(client, ADV7180_IN_CTRL, 0x00);
993+ break;
994+ default:
995+ return -EPERM;
996+ }
997+ }
998+ }
999+ break;
1000+
1001+ case DECODER_SET_INPUT:
1002+ {
1003+ int v = *(int *) arg;
1004+ if (decoder->input != v) {
1005+ decoder->input = v;
1006+
1007+ switch (v) {
1008+ case CVBS:
1009+ adv7180_write_block(client, init_cvbs_64,
1010+ sizeof(init_cvbs_64));
1011+ break;
1012+ case SVIDEO:
1013+ adv7180_write_block(client, init_svideo_64,
1014+ sizeof(init_svideo_64));
1015+ break;
1016+ case YPbPr:
1017+ adv7180_write_block(client, init_ypbpr_64,
1018+ sizeof(init_ypbpr_64));
1019+ break;
1020+ default:
1021+ return -EINVAL;
1022+ }
1023+ }
1024+ }
1025+ break;
1026+
1027+ case DECODER_SET_OUTPUT:
1028+ {
1029+ }
1030+ break;
1031+
1032+ case DECODER_ENABLE_OUTPUT:
1033+ {
1034+ }
1035+ break;
1036+
1037+ case DECODER_SET_PICTURE:
1038+ {
1039+ }
1040+ break;
1041+
1042+ case DECODER_DUMP:
1043+ {
1044+ adv7180_write(client, 1, 0x88);
1045+ }
1046+ break;
1047+
1048+ default:
1049+ return -EINVAL;
1050+ }
1051+ return 0;
1052+}
1053+
1054+/* ----------------------------------------------------------------------- */
1055+
1056+/*
1057+ * Generic i2c probe
1058+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
1059+ */
1060+static unsigned short normal_i2c[] = {
1061+ 0x40 >> 1, 0x41 >> 1,
1062+ I2C_ADV7180 >> 1, 0x43 >> 1,
1063+ I2C_CLIENT_END
1064+};
1065+
1066+I2C_CLIENT_INSMOD;
1067+
1068+static int adv7180_detect(struct i2c_client *client, int kind,
1069+ struct i2c_board_info *info)
1070+{
1071+ struct i2c_adapter *adapter = client->adapter;
1072+
1073+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE
1074+ | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
1075+ return -ENODEV;
1076+
1077+ /* Is chip alive ? */
1078+ if (adv7180_read(client, 0x11) != 0x1b)
1079+ return -ENODEV;
1080+
1081+ strlcpy(info->type, DRIVER_NAME, I2C_NAME_SIZE);
1082+
1083+ return 0;
1084+}
1085+
1086+static int adv7180_probe(struct i2c_client *client,
1087+ const struct i2c_device_id *id)
1088+{
1089+ int err = 0;
1090+ struct adv7180 *decoder;
1091+
1092+ printk(KERN_INFO DRIVER_NAME" chip found @ 0x%x (%s)\n",
1093+ client->addr << 1, client->adapter->name);
1094+
1095+ decoder = kzalloc(sizeof(struct adv7180), GFP_KERNEL);
1096+ if (decoder == NULL)
1097+ return -ENOMEM;
1098+
1099+ decoder->norm = VIDEO_MODE_PAL | VIDEO_MODE_NTSC |
1100+ VIDEO_MODE_SECAM |
1101+ VIDEO_MODE_AUTO;
1102+ decoder->input = CVBS;
1103+ decoder->enable = 1;
1104+ decoder->client = client;
1105+ i2c_set_clientdata(client, decoder);
1106+#ifdef CONFIG_MFD_TIMBERDALE
1107+ err = request_irq(client->irq, adv7180_irq, 0,
1108+ client->dev.driver->name, decoder);
1109+ if (err < 0) {
1110+ dev_err(&client->dev, "irq %d busy?\n", client->irq);
1111+ goto err_free_dec;
1112+ }
1113+ dev_info(&client->dev, "registered with irq (%d)\n", client->irq);
1114+#endif
1115+ adv7180_command(client, DECODER_INIT, NULL); /* Reset */
1116+
1117+ return 0;
1118+#ifdef CONFIG_MFD_TIMBERDALE
1119+err_free_dec:
1120+ kfree(decoder);
1121+
1122+ return err;
1123+#endif
1124+}
1125+
1126+static int adv7180_remove(struct i2c_client *client)
1127+{
1128+ struct adv7180 *decoder = i2c_get_clientdata(client);
1129+#ifdef CONFIG_MFD_TIMBERDALE
1130+ free_irq(client->irq, decoder);
1131+#endif
1132+ kfree(decoder);
1133+ return 0;
1134+}
1135+
1136+/* ----------------------------------------------------------------------- */
1137+static const struct i2c_device_id adv7180_id[] = {
1138+ { DRIVER_NAME, 0 },
1139+ { }
1140+};
1141+MODULE_DEVICE_TABLE(i2c, adv7180_id);
1142+
1143+static struct i2c_driver i2c_driver_adv7180 = {
1144+ .driver = {
1145+ .owner = THIS_MODULE,
1146+ .name = DRIVER_NAME,
1147+ .bus = &i2c_bus_type,
1148+ },
1149+
1150+ .id_table = adv7180_id,
1151+ .probe = adv7180_probe,
1152+ .remove = adv7180_remove,
1153+
1154+ .class = 0xffffffff,
1155+ .detect = adv7180_detect,
1156+ .address_data = &addr_data,
1157+
1158+ .command = adv7180_command,
1159+};
1160+
1161+static int __init adv7180_init(void)
1162+{
1163+ return i2c_add_driver(&i2c_driver_adv7180);
1164+}
1165+
1166+static void __exit adv7180_exit(void)
1167+{
1168+ i2c_del_driver(&i2c_driver_adv7180);
1169+}
1170+
1171+module_init(adv7180_init);
1172+module_exit(adv7180_exit);
1173diff -uNr linux-2.6.29-clean/drivers/media/video/Kconfig linux-2.6.29/drivers/media/video/Kconfig
1174--- linux-2.6.29-clean/drivers/media/video/Kconfig 2009-04-01 09:20:24.000000000 -0700
1175+++ linux-2.6.29/drivers/media/video/Kconfig 2009-04-06 13:51:47.000000000 -0700
1176@@ -251,6 +251,15 @@
1177
1178 comment "Video decoders"
1179
1180+config VIDEO_ADV7180
1181+ tristate "Analog Devices ADV7180 decoder"
1182+ depends on VIDEO_V4L1 && I2C
1183+ ---help---
1184+ Support for the Analog Devices ADV7180 video decoder.
1185+
1186+ To compile this driver as a module, choose M here: the
1187+ module will be called adv7180.
1188+
1189 config VIDEO_BT819
1190 tristate "BT819A VideoStream decoder"
1191 depends on VIDEO_V4L1 && I2C
1192@@ -800,6 +809,12 @@
1193 ---help---
1194 This is a v4l2 driver for the TI OMAP2 camera capture interface
1195
1196+config VIDEO_TIMBERDALE
1197+ tristate "Support for timberdale Video In/LogiWIN"
1198+ depends on VIDEO_V4L2 && MFD_TIMBERDALE_DMA
1199+ ---help---
1200+ Add support for the Video In peripherial of the timberdale FPGA.
1201+
1202 #
1203 # USB Multimedia device configuration
1204 #
1205diff -uNr linux-2.6.29-clean/drivers/media/video/Makefile linux-2.6.29/drivers/media/video/Makefile
1206--- linux-2.6.29-clean/drivers/media/video/Makefile 2009-04-01 09:20:24.000000000 -0700
1207+++ linux-2.6.29/drivers/media/video/Makefile 2009-04-06 13:51:47.000000000 -0700
1208@@ -52,6 +52,7 @@
1209 obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
1210 obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
1211 obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
1212+obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
1213 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
1214 obj-$(CONFIG_VIDEO_BT819) += bt819.o
1215 obj-$(CONFIG_VIDEO_BT856) += bt856.o
1216@@ -148,6 +149,8 @@
1217
1218 obj-$(CONFIG_VIDEO_AU0828) += au0828/
1219
1220+obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
1221+
1222 obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
1223
1224 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
1225diff -uNr linux-2.6.29-clean/drivers/media/video/timblogiw.c linux-2.6.29/drivers/media/video/timblogiw.c
1226--- linux-2.6.29-clean/drivers/media/video/timblogiw.c 1969-12-31 16:00:00.000000000 -0800
1227+++ linux-2.6.29/drivers/media/video/timblogiw.c 2009-04-06 13:51:47.000000000 -0700
1228@@ -0,0 +1,930 @@
1229+/*
1230+ * timblogiw.c timberdale FPGA LogiWin Video In driver
1231+ * Copyright (c) 2009 Intel Corporation
1232+ *
1233+ * This program is free software; you can redistribute it and/or modify
1234+ * it under the terms of the GNU General Public License version 2 as
1235+ * published by the Free Software Foundation.
1236+ *
1237+ * This program is distributed in the hope that it will be useful,
1238+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
1239+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1240+ * GNU General Public License for more details.
1241+ *
1242+ * You should have received a copy of the GNU General Public License
1243+ * along with this program; if not, write to the Free Software
1244+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
1245+ */
1246+
1247+/* Supports:
1248+ * Timberdale FPGA LogiWin Video In
1249+ */
1250+
1251+#include <linux/list.h>
1252+#include <linux/version.h>
1253+#include <linux/module.h>
1254+#include <linux/pci.h>
1255+#include <linux/dma-mapping.h>
1256+#include <media/v4l2-common.h>
1257+#include <media/v4l2-ioctl.h>
1258+#include <linux/platform_device.h>
1259+#include <linux/interrupt.h>
1260+#include "timblogiw.h"
1261+#include <linux/mfd/timbdma.h>
1262+
1263+
1264+#define TIMBLOGIW_CTRL 0x40
1265+
1266+#define TIMBLOGIW_H_SCALE 0x20
1267+#define TIMBLOGIW_V_SCALE 0x28
1268+
1269+#define TIMBLOGIW_X_CROP 0x58
1270+#define TIMBLOGIW_Y_CROP 0x60
1271+
1272+#define TIMBLOGIW_W_CROP 0x00
1273+#define TIMBLOGIW_H_CROP 0x08
1274+
1275+#define TIMBLOGIW_VERSION_CODE 0x02
1276+
1277+#define TIMBLOGIW_FRAME 0x10
1278+#define TIMBLOGIW_DROP 0x20
1279+
1280+#define TIMBLOGIW_BUF 0x04
1281+#define TIMBLOGIW_TBI 0x2c
1282+#define TIMBLOGIW_BPL 0x30
1283+
1284+#define dbg(...)
1285+
1286+const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
1287+ {
1288+ .v4l2_id = V4L2_STD_PAL,
1289+ .name = "PAL",
1290+ .swidth = 720,
1291+ .sheight = 576
1292+ },
1293+ {
1294+ .v4l2_id = V4L2_STD_NTSC_M,
1295+ .name = "NTSC",
1296+ .swidth = 720,
1297+ .sheight = 480
1298+ }
1299+};
1300+
1301+static void timblogiw_handleframe(unsigned long arg)
1302+{
1303+ struct timblogiw_frame *f;
1304+ struct timblogiw *lw = (struct timblogiw *)arg;
1305+
1306+ spin_lock_bh(&lw->queue_lock);
1307+ if (!list_empty(&lw->inqueue)) {
1308+ /* put the entry in the outqueue */
1309+ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
1310+
1311+ /* copy data from the DMA buffer */
1312+ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
1313+ /* buffer consumed */
1314+ lw->dma.filled = NULL;
1315+
1316+ do_gettimeofday(&f->buf.timestamp);
1317+ f->buf.sequence = ++lw->frame_count;
1318+ f->buf.field = V4L2_FIELD_NONE;
1319+ f->state = F_DONE;
1320+ f->buf.bytesused = lw->frame_size;
1321+ list_move_tail(&f->frame, &lw->outqueue);
1322+ /* wake up any waiter */
1323+ wake_up(&lw->wait_frame);
1324+ }
1325+ spin_unlock_bh(&lw->queue_lock);
1326+}
1327+
1328+static int timblogiw_isr(u32 flag, void *pdev)
1329+{
1330+ struct timblogiw *lw = (struct timblogiw *)pdev;
1331+
1332+ if (!lw->dma.filled) {
1333+ /* no stored transfer so far, store this, and flip to next */
1334+ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
1335+ lw->dma.curr = !lw->dma.curr;
1336+ }
1337+
1338+ if (lw->stream == STREAM_ON)
1339+ timb_start_dma(DMA_IRQ_VIDEO_RX,
1340+ lw->dma.transfer[lw->dma.curr].handle, lw->frame_size,
1341+ lw->bytesperline);
1342+
1343+ if (flag & DMA_IRQ_VIDEO_DROP)
1344+ dbg("%s: frame dropped\n", __func__);
1345+ if (flag & DMA_IRQ_VIDEO_RX) {
1346+ dbg("%s: frame RX\n", __func__);
1347+ tasklet_schedule(&lw->tasklet);
1348+ }
1349+ return 0;
1350+}
1351+
1352+static void timblogiw_empty_framequeues(struct timblogiw *lw)
1353+{
1354+ u32 i;
1355+
1356+ dbg("%s\n", __func__);
1357+
1358+ INIT_LIST_HEAD(&lw->inqueue);
1359+ INIT_LIST_HEAD(&lw->outqueue);
1360+
1361+ for (i = 0; i < lw->num_frames; i++) {
1362+ lw->frame[i].state = F_UNUSED;
1363+ lw->frame[i].buf.bytesused = 0;
1364+ }
1365+}
1366+
1367+u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
1368+{
1369+ /* needs to be page aligned cause the */
1370+ /* buffers can be mapped individually! */
1371+ const size_t imagesize = PAGE_ALIGN(lw->frame_size);
1372+ void *buff = NULL;
1373+ u32 i;
1374+
1375+ dbg("%s - request of %i buffers of size %zi\n",
1376+ __func__, count, lw->frame_size);
1377+
1378+ lw->dma.transfer[0].buf = pci_alloc_consistent(lw->dev, imagesize,
1379+ &lw->dma.transfer[0].handle);
1380+ lw->dma.transfer[1].buf = pci_alloc_consistent(lw->dev, imagesize,
1381+ &lw->dma.transfer[1].handle);
1382+ if ((lw->dma.transfer[0].buf == NULL) ||
1383+ (lw->dma.transfer[1].buf == NULL)) {
1384+ printk(KERN_ALERT "alloc failed\n");
1385+ if (lw->dma.transfer[0].buf != NULL)
1386+ pci_free_consistent(lw->dev, imagesize,
1387+ lw->dma.transfer[0].buf,
1388+ lw->dma.transfer[0].handle);
1389+ if (lw->dma.transfer[1].buf != NULL)
1390+ pci_free_consistent(lw->dev, imagesize,
1391+ lw->dma.transfer[1].buf,
1392+ lw->dma.transfer[1].handle);
1393+ return 0;
1394+ }
1395+
1396+ if (count > TIMBLOGIW_NUM_FRAMES)
1397+ count = TIMBLOGIW_NUM_FRAMES;
1398+
1399+ lw->num_frames = count;
1400+ while (lw->num_frames > 0) {
1401+ buff = vmalloc_32(lw->num_frames * imagesize);
1402+ if (buff) {
1403+ memset(buff, 0, lw->num_frames * imagesize);
1404+ break;
1405+ }
1406+ lw->num_frames--;
1407+ }
1408+
1409+ for (i = 0; i < lw->num_frames; i++) {
1410+ lw->frame[i].bufmem = buff + i * imagesize;
1411+ lw->frame[i].buf.index = i;
1412+ lw->frame[i].buf.m.offset = i * imagesize;
1413+ lw->frame[i].buf.length = lw->frame_size;
1414+ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1415+ lw->frame[i].buf.sequence = 0;
1416+ lw->frame[i].buf.field = V4L2_FIELD_NONE;
1417+ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
1418+ lw->frame[i].buf.flags = 0;
1419+ }
1420+
1421+ lw->dma.curr = 0;
1422+ lw->dma.filled = NULL;
1423+ return lw->num_frames;
1424+}
1425+
1426+void timblogiw_release_buffers(struct timblogiw *lw)
1427+{
1428+ dbg("%s\n", __func__);
1429+
1430+ if (lw->frame[0].bufmem != NULL) {
1431+ vfree(lw->frame[0].bufmem);
1432+ lw->frame[0].bufmem = NULL;
1433+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
1434+ pci_free_consistent(lw->dev, lw->frame_size,
1435+ lw->dma.transfer[0].buf, lw->dma.transfer[0].handle);
1436+ pci_free_consistent(lw->dev, lw->frame_size,
1437+ lw->dma.transfer[1].buf, lw->dma.transfer[1].handle);
1438+ }
1439+}
1440+
1441+/* IOCTL functions */
1442+
1443+static int timblogiw_g_fmt(struct timblogiw *lw, struct v4l2_format *format)
1444+{
1445+ dbg("%s -\n", __func__);
1446+
1447+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1448+ return -EINVAL;
1449+
1450+ format->fmt.pix.width = lw->width;
1451+ format->fmt.pix.height = lw->height;
1452+ format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
1453+ format->fmt.pix.bytesperline = lw->bytesperline;
1454+ format->fmt.pix.sizeimage = lw->frame_size;
1455+ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
1456+ format->fmt.pix.field = V4L2_FIELD_NONE;
1457+ return 0;
1458+}
1459+
1460+static int timblogiw_s_fmt(struct timblogiw *lw, struct v4l2_format *format)
1461+{
1462+ struct v4l2_pix_format *pix = &format->fmt.pix;
1463+ dbg("%s - type: %d\n", __func__, format->type);
1464+
1465+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1466+ return -EINVAL;
1467+
1468+ if ((lw->height != pix->height) || (lw->width != lw->width))
1469+ return -EINVAL;
1470+
1471+ if (format->fmt.pix.field != V4L2_FIELD_NONE)
1472+ return -EINVAL;
1473+
1474+ dbg("%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
1475+ "bytes per line %d, size image: %d, colorspace: %d\n",
1476+ __func__,
1477+ pix->width, pix->height, pix->pixelformat, pix->field,
1478+ pix->bytesperline, pix->sizeimage, pix->colorspace);
1479+
1480+ return 0;
1481+}
1482+
1483+static int timblogiw_querycap(struct timblogiw *lw,
1484+ struct v4l2_capability *cap)
1485+{
1486+ memset(cap, 0, sizeof(*cap));
1487+ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
1488+ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
1489+ cap->version = TIMBLOGIW_VERSION_CODE;
1490+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1491+ V4L2_CAP_STREAMING;
1492+
1493+ return 0;
1494+}
1495+
1496+static int timblogiw_enum_fmt(struct timblogiw *lw, struct v4l2_fmtdesc *fmt)
1497+{
1498+ dbg("%s - VIDIOC_ENUM_FMT\n", __func__);
1499+
1500+ if (fmt->index != 0)
1501+ return -EINVAL;
1502+ memset(fmt, 0, sizeof(*fmt));
1503+ fmt->index = 0;
1504+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1505+ strncpy(fmt->description, "4:2:2, packed, YUYV",
1506+ sizeof(fmt->description)-1);
1507+ fmt->pixelformat = V4L2_PIX_FMT_YUYV;
1508+ memset(fmt->reserved, 0, sizeof(fmt->reserved));
1509+
1510+ return 0;
1511+}
1512+
1513+static int timblogiw_reqbufs(struct timblogiw *lw,
1514+ struct v4l2_requestbuffers *rb)
1515+{
1516+ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1517+ rb->memory != V4L2_MEMORY_MMAP)
1518+ return -EINVAL;
1519+
1520+ timblogiw_empty_framequeues(lw);
1521+
1522+ timblogiw_release_buffers(lw);
1523+ if (rb->count)
1524+ rb->count = timblogiw_request_buffers(lw, rb->count);
1525+
1526+ dbg("%s - VIDIOC_REQBUFS: io method is mmap. num bufs %i\n",
1527+ __func__, rb->count);
1528+
1529+ return 0;
1530+}
1531+
1532+static int timblogiw_querybuf(struct timblogiw *lw, struct v4l2_buffer *b)
1533+{
1534+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1535+ b->index >= lw->num_frames)
1536+ return -EINVAL;
1537+
1538+ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
1539+
1540+ if (lw->frame[b->index].vma_use_count)
1541+ b->flags |= V4L2_BUF_FLAG_MAPPED;
1542+
1543+ if (lw->frame[b->index].state == F_DONE)
1544+ b->flags |= V4L2_BUF_FLAG_DONE;
1545+ else if (lw->frame[b->index].state != F_UNUSED)
1546+ b->flags |= V4L2_BUF_FLAG_QUEUED;
1547+
1548+ return 0;
1549+}
1550+
1551+static int timblogiw_qbuf(struct timblogiw *lw, struct v4l2_buffer *b)
1552+{
1553+ unsigned long lock_flags;
1554+
1555+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1556+ b->index >= lw->num_frames)
1557+ return -EINVAL;
1558+
1559+ if (lw->frame[b->index].state != F_UNUSED)
1560+ return -EAGAIN;
1561+
1562+ if (b->memory != V4L2_MEMORY_MMAP)
1563+ return -EINVAL;
1564+
1565+ lw->frame[b->index].state = F_QUEUED;
1566+
1567+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
1568+ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
1569+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
1570+
1571+ return 0;
1572+}
1573+
1574+static int timblogiw_dqbuf(struct timblogiw *lw, struct file *file,
1575+ struct v4l2_buffer *b)
1576+{
1577+ struct timblogiw_frame *f;
1578+ unsigned long lock_flags;
1579+ int ret = 0;
1580+
1581+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1582+ dbg("%s - VIDIOC_DQBUF, illegal buf type!\n",
1583+ __func__);
1584+ return -EINVAL;
1585+ }
1586+
1587+ if (list_empty(&lw->outqueue)) {
1588+ if (file->f_flags & O_NONBLOCK)
1589+ return -EAGAIN;
1590+
1591+ ret = wait_event_interruptible(lw->wait_frame,
1592+ !list_empty(&lw->outqueue));
1593+ if (ret)
1594+ return ret;
1595+ }
1596+
1597+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
1598+ f = list_entry(lw->outqueue.next,
1599+ struct timblogiw_frame, frame);
1600+ list_del(lw->outqueue.next);
1601+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
1602+
1603+ f->state = F_UNUSED;
1604+ memcpy(b, &f->buf, sizeof(*b));
1605+
1606+ if (f->vma_use_count)
1607+ b->flags |= V4L2_BUF_FLAG_MAPPED;
1608+
1609+ return 0;
1610+}
1611+
1612+static int timblogiw_enumstd(struct timblogiw *lw, struct v4l2_standard *std)
1613+{
1614+ if (std->index != 0)
1615+ return -EINVAL;
1616+
1617+ memset(std, 0, sizeof(*std));
1618+ std->index = 0;
1619+
1620+ std->id = V4L2_STD_PAL;
1621+ strncpy(std->name, "PAL", sizeof(std->name)-1);
1622+
1623+ return 0;
1624+}
1625+
1626+static int timblogiw_g_std(struct timblogiw *lw, v4l2_std_id *std)
1627+{
1628+ *std = V4L2_STD_PAL;
1629+ return 0;
1630+}
1631+
1632+static int timblogiw_s_std(struct timblogiw *lw, v4l2_std_id *std)
1633+{
1634+ if (!(*std & V4L2_STD_PAL))
1635+ return -EINVAL;
1636+ return 0;
1637+}
1638+
1639+static int timblogiw_enuminput(struct timblogiw *lw, struct v4l2_input *inp)
1640+{
1641+ if (inp->index != 0)
1642+ return -EINVAL;
1643+
1644+ memset(inp, 0, sizeof(*inp));
1645+ inp->index = 0;
1646+
1647+ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
1648+ inp->type = V4L2_INPUT_TYPE_CAMERA;
1649+ inp->std = V4L2_STD_ALL;
1650+
1651+ return 0;
1652+}
1653+
1654+static int timblogiw_g_input(struct timblogiw *lw, int *input)
1655+{
1656+ *input = 0;
1657+
1658+ return 0;
1659+}
1660+
1661+static int timblogiw_s_input(struct timblogiw *lw, int *input)
1662+{
1663+ if (*input != 0)
1664+ return -EINVAL;
1665+ return 0;
1666+}
1667+
1668+static int timblogiw_streamon(struct timblogiw *lw, int *type)
1669+{
1670+ struct timblogiw_frame *f;
1671+
1672+ if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1673+ dbg("%s - No capture device\n", __func__);
1674+ return -EINVAL;
1675+ }
1676+
1677+ if (list_empty(&lw->inqueue)) {
1678+ dbg("%s - inqueue is empty\n", __func__);
1679+ return -EINVAL;
1680+ }
1681+
1682+ if (lw->stream == STREAM_ON)
1683+ return 0;
1684+
1685+ lw->stream = STREAM_ON;
1686+
1687+ f = list_entry(lw->inqueue.next,
1688+ struct timblogiw_frame, frame);
1689+
1690+ dbg("%s - f size: %d, bpr: %d, dma addr: %x\n", __func__,
1691+ lw->frame_size, lw->bytesperline,
1692+ (unsigned int)lw->dma.transfer[lw->dma.curr].handle);
1693+ timb_start_dma(DMA_IRQ_VIDEO_RX,
1694+ lw->dma.transfer[lw->dma.curr].handle,
1695+ lw->frame_size, lw->bytesperline);
1696+
1697+ return 0;
1698+}
1699+
1700+static int timblogiw_streamoff(struct timblogiw *lw, int *type)
1701+{
1702+ if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1703+ return -EINVAL;
1704+
1705+ if (lw->stream == STREAM_ON) {
1706+ unsigned long lock_flags;
1707+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
1708+ timb_stop_dma(DMA_IRQ_VIDEO_RX);
1709+ lw->stream = STREAM_OFF;
1710+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
1711+ }
1712+ timblogiw_empty_framequeues(lw);
1713+
1714+ return 0;
1715+}
1716+
1717+static int timblogiw_querystd(struct timblogiw *lw, v4l2_std_id *std)
1718+{
1719+ /* TODO: Ask encoder */
1720+ *std = V4L2_STD_PAL;
1721+ return 0;
1722+}
1723+
1724+static int timblogiw_enum_framsizes(struct timblogiw *lw,
1725+ struct v4l2_frmsizeenum *fsize)
1726+{
1727+ if ((fsize->index != 0) ||
1728+ (fsize->pixel_format != V4L2_PIX_FMT_YUYV))
1729+ return -EINVAL;
1730+
1731+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1732+ fsize->discrete.width = lw->width;
1733+ fsize->discrete.height = lw->height;
1734+
1735+ return 0;
1736+}
1737+
1738+static int timblogiw_g_parm(struct timblogiw *lw, struct v4l2_streamparm *sp)
1739+{
1740+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1741+ return -EINVAL;
1742+
1743+ sp->parm.capture.extendedmode = 0;
1744+ sp->parm.capture.readbuffers = lw->num_frames;
1745+ return 0;
1746+}
1747+
1748+/*******************************
1749+ * Device Operations functions *
1750+ *******************************/
1751+
1752+static int timblogiw_open(struct file *file)
1753+{
1754+ struct video_device *vdev = video_devdata(file);
1755+ struct timblogiw *lw = video_get_drvdata(vdev);
1756+
1757+ dbg("%s -\n", __func__);
1758+
1759+ mutex_init(&lw->fileop_lock);
1760+ spin_lock_init(&lw->queue_lock);
1761+ init_waitqueue_head(&lw->wait_frame);
1762+
1763+ mutex_lock(&lw->lock);
1764+
1765+ lw->width = 720; /* TODO: Should depend on tv norm */
1766+ lw->height = 576;
1767+ lw->frame_size = lw->width * lw->height * 2;
1768+ lw->bytesperline = lw->width * 2;
1769+
1770+ file->private_data = lw;
1771+ lw->stream = STREAM_OFF;
1772+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
1773+
1774+ timblogiw_empty_framequeues(lw);
1775+
1776+ timb_set_dma_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP,
1777+ timblogiw_isr, (void *)lw);
1778+
1779+ mutex_unlock(&lw->lock);
1780+
1781+ return 0;
1782+}
1783+
1784+static int timblogiw_close(struct file *file)
1785+{
1786+ struct timblogiw *lw = file->private_data;
1787+
1788+ dbg("%s - entry\n", __func__);
1789+
1790+ mutex_lock(&lw->lock);
1791+
1792+ timb_stop_dma(DMA_IRQ_VIDEO_RX);
1793+ timb_set_dma_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP, NULL,
1794+ NULL);
1795+ timblogiw_release_buffers(lw);
1796+
1797+ mutex_unlock(&lw->lock);
1798+ return 0;
1799+}
1800+
1801+static ssize_t timblogiw_read(struct file *file, char __user *data,
1802+ size_t count, loff_t *ppos)
1803+{
1804+ dbg("%s - read request\n", __func__);
1805+ return -EINVAL;
1806+}
1807+
1808+static void timblogiw_vm_open(struct vm_area_struct *vma)
1809+{
1810+ struct timblogiw_frame *f = vma->vm_private_data;
1811+ f->vma_use_count++;
1812+}
1813+
1814+static void timblogiw_vm_close(struct vm_area_struct *vma)
1815+{
1816+ struct timblogiw_frame *f = vma->vm_private_data;
1817+ f->vma_use_count--;
1818+}
1819+
1820+static struct vm_operations_struct timblogiw_vm_ops = {
1821+ .open = timblogiw_vm_open,
1822+ .close = timblogiw_vm_close,
1823+};
1824+
1825+static int timblogiw_mmap(struct file *filp, struct vm_area_struct *vma)
1826+{
1827+ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
1828+ void *pos;
1829+ u32 i;
1830+ int ret = -EINVAL;
1831+
1832+ struct timblogiw *lw = filp->private_data;
1833+ dbg("%s\n", __func__);
1834+
1835+ if (mutex_lock_interruptible(&lw->fileop_lock))
1836+ return -ERESTARTSYS;
1837+
1838+ if (!(vma->vm_flags & VM_WRITE) ||
1839+ size != PAGE_ALIGN(lw->frame[0].buf.length))
1840+ goto error_unlock;
1841+
1842+ for (i = 0; i < lw->num_frames; i++)
1843+ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1844+ break;
1845+
1846+ if (i == lw->num_frames) {
1847+ dbg("%s - user supplied mapping address is out of range\n",
1848+ __func__);
1849+ goto error_unlock;
1850+ }
1851+
1852+ vma->vm_flags |= VM_IO;
1853+ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
1854+
1855+ pos = lw->frame[i].bufmem;
1856+ while (size > 0) { /* size is page-aligned */
1857+ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
1858+ dbg("%s - vm_insert_page failed\n", __func__);
1859+ ret = -EAGAIN;
1860+ goto error_unlock;
1861+ }
1862+ start += PAGE_SIZE;
1863+ pos += PAGE_SIZE;
1864+ size -= PAGE_SIZE;
1865+ }
1866+
1867+ vma->vm_ops = &timblogiw_vm_ops;
1868+ vma->vm_private_data = &lw->frame[i];
1869+ timblogiw_vm_open(vma);
1870+ ret = 0;
1871+
1872+error_unlock:
1873+ mutex_unlock(&lw->fileop_lock);
1874+ return ret;
1875+}
1876+
1877+static long
1878+timblogiw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1879+{
1880+ struct timblogiw *lw = file->private_data;
1881+
1882+ switch (cmd) {
1883+
1884+ case VIDIOC_QUERYCAP:
1885+ {
1886+ dbg("%s - VIDIOC_QUERYCAP\n", __func__);
1887+ return timblogiw_querycap(lw, (struct v4l2_capability *)arg);
1888+ }
1889+
1890+ case VIDIOC_ENUM_FMT:
1891+ {
1892+ dbg("%s - VIDIOC_ENUM_FMT\n", __func__);
1893+ return timblogiw_enum_fmt(lw, (struct v4l2_fmtdesc *)arg);
1894+ }
1895+
1896+ case VIDIOC_G_FMT:
1897+ {
1898+ dbg("%s - VIDIOC_G_FMT\n", __func__);
1899+ return timblogiw_g_fmt(lw, (struct v4l2_format *) arg);
1900+ }
1901+
1902+ case VIDIOC_TRY_FMT:
1903+ case VIDIOC_S_FMT:
1904+ {
1905+ dbg("%s - VIDIOC_S_FMT\n", __func__);
1906+ return timblogiw_s_fmt(lw, (struct v4l2_format *)arg);
1907+ }
1908+
1909+ case VIDIOC_REQBUFS:
1910+ {
1911+ dbg("%s - VIDIOC_REQBUFS\n", __func__);
1912+ return timblogiw_reqbufs(lw, (struct v4l2_requestbuffers *)arg);
1913+ }
1914+
1915+ case VIDIOC_QUERYBUF:
1916+ {
1917+ dbg("%s - VIDIOC_QUERYBUF\n", __func__);
1918+ return timblogiw_querybuf(lw, (struct v4l2_buffer *)arg);
1919+ }
1920+
1921+ case VIDIOC_QBUF:
1922+ {
1923+ return timblogiw_qbuf(lw, (struct v4l2_buffer *)arg);
1924+ }
1925+
1926+ case VIDIOC_DQBUF:
1927+ {
1928+ return timblogiw_dqbuf(lw, file, (struct v4l2_buffer *)arg);
1929+ }
1930+
1931+ case VIDIOC_ENUMSTD:
1932+ {
1933+ dbg("%s - VIDIOC_ENUMSTD\n", __func__);
1934+ return timblogiw_enumstd(lw, (struct v4l2_standard *)arg);
1935+ }
1936+
1937+ case VIDIOC_G_STD:
1938+ {
1939+ dbg("%s - VIDIOC_G_STD\n", __func__);
1940+ return timblogiw_g_std(lw, (v4l2_std_id *)arg);
1941+ }
1942+
1943+ case VIDIOC_S_STD:
1944+ {
1945+ dbg("%s - VIDIOC_S_STD\n", __func__);
1946+ return timblogiw_s_std(lw, (v4l2_std_id *)arg);
1947+ }
1948+
1949+ case VIDIOC_ENUMINPUT:
1950+ {
1951+ dbg("%s - VIDIOC_ENUMINPUT\n", __func__);
1952+ return timblogiw_enuminput(lw, (struct v4l2_input *)arg);
1953+ }
1954+
1955+ case VIDIOC_G_INPUT:
1956+ {
1957+ dbg("%s - VIDIOC_G_INPUT\n", __func__);
1958+ return timblogiw_g_input(lw, (int *)arg);
1959+ }
1960+
1961+ case VIDIOC_S_INPUT:
1962+ {
1963+ dbg("%s - VIDIOC_S_INPUT\n", __func__);
1964+ return timblogiw_s_input(lw, (int *)arg);
1965+ }
1966+
1967+ case VIDIOC_STREAMON:
1968+ {
1969+ dbg("%s - VIDIOC_STREAMON\n", __func__);
1970+ return timblogiw_streamon(lw, (int *)arg);
1971+ }
1972+
1973+ case VIDIOC_STREAMOFF:
1974+ {
1975+ dbg("%s - VIDIOC_STREAMOFF\n", __func__);
1976+ return timblogiw_streamoff(lw, (int *)arg);
1977+ }
1978+
1979+ case VIDIOC_QUERYSTD:
1980+ {
1981+ dbg("%s - VIDIOC_QUERYSTD\n", __func__);
1982+ return timblogiw_querystd(lw, (v4l2_std_id *)arg);
1983+ }
1984+
1985+ case VIDIOC_ENUM_FRAMESIZES:
1986+ {
1987+ dbg("%s - VIDIOC_ENUM_FRAMESIZES\n", __func__);
1988+ return timblogiw_enum_framsizes(lw,
1989+ (struct v4l2_frmsizeenum *)arg);
1990+ }
1991+
1992+ case VIDIOC_G_PARM:
1993+ {
1994+ dbg("%s - VIDIOC_G_PARM\n", __func__);
1995+ return timblogiw_g_parm(lw, (struct v4l2_streamparm *)arg);
1996+ }
1997+
1998+ default:
1999+ {
2000+ dbg("%s Unknown command, dir: %x, type: %x, nr: %x, size: %x\n",
2001+ __func__,
2002+ _IOC_DIR(cmd),
2003+ _IOC_TYPE(cmd),
2004+ _IOC_NR(cmd),
2005+ _IOC_SIZE(cmd));
2006+ break;
2007+ }
2008+ }
2009+
2010+ return -EINVAL;
2011+}
2012+
2013+void timblogiw_vdev_release(struct video_device *vdev)
2014+{
2015+ kfree(vdev);
2016+}
2017+
2018+static const struct v4l2_file_operations timblogiw_fops = {
2019+ .owner = THIS_MODULE,
2020+ .open = timblogiw_open,
2021+ .release = timblogiw_close,
2022+ .ioctl = timblogiw_ioctl,
2023+ .mmap = timblogiw_mmap,
2024+ .read = timblogiw_read,
2025+};
2026+
2027+static const struct video_device timblogiw_template = {
2028+ .name = TIMBLOGIWIN_NAME,
2029+ .fops = &timblogiw_fops,
2030+ .release = &timblogiw_vdev_release,
2031+ .minor = -1
2032+};
2033+
2034+static int timblogiw_probe(struct platform_device *dev)
2035+{
2036+ int err;
2037+ struct timblogiw *lw;
2038+ struct resource *iomem;
2039+
2040+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2041+ if (!iomem) {
2042+ err = -EINVAL;
2043+ goto err_mem;
2044+ }
2045+
2046+ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
2047+ if (!lw) {
2048+ err = -EINVAL;
2049+ goto err_mem;
2050+ }
2051+
2052+ /* find the PCI device from the parent... */
2053+ if (!dev->dev.parent) {
2054+ printk(KERN_ERR "timblogwi: No parent device found??\n");
2055+ err = -ENODEV;
2056+ goto err_mem;
2057+ }
2058+
2059+ lw->dev = container_of(dev->dev.parent, struct pci_dev, dev);
2060+
2061+ mutex_init(&lw->lock);
2062+
2063+ lw->video_dev = video_device_alloc();
2064+ if (!lw->video_dev) {
2065+ err = -ENOMEM;
2066+ goto err_video_req;
2067+ }
2068+ *lw->video_dev = timblogiw_template;
2069+
2070+ err = video_register_device(lw->video_dev, VFL_TYPE_GRABBER, 0);
2071+ if (err) {
2072+ video_device_release(lw->video_dev);
2073+ printk(KERN_ALERT "Error reg video\n");
2074+ goto err_video_req;
2075+ }
2076+
2077+ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
2078+
2079+ if (!request_mem_region(iomem->start, resource_size(iomem),
2080+ "timb-video")) {
2081+ err = -EBUSY;
2082+ goto err_request;
2083+ }
2084+
2085+ lw->membase = ioremap(iomem->start, resource_size(iomem));
2086+ if (!lw->membase) {
2087+ err = -ENOMEM;
2088+ goto err_ioremap;
2089+ }
2090+
2091+ platform_set_drvdata(dev, lw);
2092+ video_set_drvdata(lw->video_dev, lw);
2093+
2094+ return 0;
2095+
2096+err_ioremap:
2097+ release_mem_region(iomem->start, resource_size(iomem));
2098+err_request:
2099+ if (-1 != lw->video_dev->minor)
2100+ video_unregister_device(lw->video_dev);
2101+ else
2102+ video_device_release(lw->video_dev);
2103+err_video_req:
2104+ kfree(lw);
2105+err_mem:
2106+ printk(KERN_ERR
2107+ "timberdale: Failed to register Timberdale Video In: %d\n",
2108+ err);
2109+
2110+ return err;
2111+}
2112+
2113+static int timblogiw_remove(struct platform_device *dev)
2114+{
2115+ struct timblogiw *lw = platform_get_drvdata(dev);
2116+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2117+
2118+ if (-1 != lw->video_dev->minor)
2119+ video_unregister_device(lw->video_dev);
2120+ else
2121+ video_device_release(lw->video_dev);
2122+
2123+ tasklet_kill(&lw->tasklet);
2124+ iounmap(lw->membase);
2125+ release_mem_region(iomem->start, resource_size(iomem));
2126+ kfree(lw);
2127+
2128+ return 0;
2129+}
2130+
2131+static struct platform_driver timblogiw_platform_driver = {
2132+ .driver = {
2133+ .name = "timb-video",
2134+ .owner = THIS_MODULE,
2135+ },
2136+ .probe = timblogiw_probe,
2137+ .remove = timblogiw_remove,
2138+};
2139+
2140+/*--------------------------------------------------------------------------*/
2141+
2142+static int __init timblogiw_init(void)
2143+{
2144+ return platform_driver_register(&timblogiw_platform_driver);
2145+}
2146+
2147+static void __exit timblogiw_exit(void)
2148+{
2149+ platform_driver_unregister(&timblogiw_platform_driver);
2150+}
2151+
2152+module_init(timblogiw_init);
2153+module_exit(timblogiw_exit);
2154+
2155+MODULE_DESCRIPTION("Timberdale Video In driver");
2156+MODULE_LICENSE("GPL v2");
2157+MODULE_ALIAS("platform:timb-video");
2158+
2159diff -uNr linux-2.6.29-clean/drivers/media/video/timblogiw.h linux-2.6.29/drivers/media/video/timblogiw.h
2160--- linux-2.6.29-clean/drivers/media/video/timblogiw.h 1969-12-31 16:00:00.000000000 -0800
2161+++ linux-2.6.29/drivers/media/video/timblogiw.h 2009-04-06 13:51:47.000000000 -0700
2162@@ -0,0 +1,95 @@
2163+/*
2164+ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
2165+ * Copyright (c) 2009 Intel Corporation
2166+ *
2167+ * This program is free software; you can redistribute it and/or modify
2168+ * it under the terms of the GNU General Public License version 2 as
2169+ * published by the Free Software Foundation.
2170+ *
2171+ * This program is distributed in the hope that it will be useful,
2172+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2173+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2174+ * GNU General Public License for more details.
2175+ *
2176+ * You should have received a copy of the GNU General Public License
2177+ * along with this program; if not, write to the Free Software
2178+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2179+ */
2180+
2181+/* Supports:
2182+ * Timberdale FPGA LogiWin Video In
2183+ */
2184+
2185+#ifndef _TIMBLOGIW_H
2186+#define _TIMBLOGIW_H
2187+
2188+#include <linux/interrupt.h>
2189+
2190+#define TIMBLOGIWIN_NAME "Timberdale Video-In"
2191+
2192+#define TIMBLOGIW_NUM_FRAMES 10
2193+
2194+
2195+enum timblogiw_stream_state {
2196+ STREAM_OFF,
2197+ STREAM_ON,
2198+};
2199+
2200+enum timblogiw_frame_state {
2201+ F_UNUSED = 0,
2202+ F_QUEUED,
2203+ F_GRABBING,
2204+ F_DONE,
2205+ F_ERROR,
2206+};
2207+
2208+struct timblogiw_frame {
2209+ void *bufmem;
2210+ struct v4l2_buffer buf;
2211+ enum timblogiw_frame_state state;
2212+ struct list_head frame;
2213+ unsigned long vma_use_count;
2214+};
2215+
2216+struct timblogiw_tvnorm {
2217+ int v4l2_id;
2218+ char *name;
2219+ u16 swidth;
2220+ u16 sheight;
2221+};
2222+
2223+
2224+struct timbdma_transfer {
2225+ dma_addr_t handle;
2226+ void *buf;
2227+};
2228+
2229+struct timbdma_control {
2230+ struct timbdma_transfer transfer[2];
2231+ struct timbdma_transfer *filled;
2232+ int curr;
2233+};
2234+
2235+struct timblogiw {
2236+ struct i2c_client *decoder;
2237+ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
2238+ int num_frames;
2239+ unsigned int frame_count;
2240+ struct list_head inqueue, outqueue;
2241+ spinlock_t queue_lock; /* mutual exclusion */
2242+ enum timblogiw_stream_state stream;
2243+ struct video_device *video_dev;
2244+ struct mutex lock, fileop_lock;
2245+ wait_queue_head_t wait_frame;
2246+ int width;
2247+ int height;
2248+ u32 frame_size;
2249+ int bytesperline;
2250+ struct pci_dev *dev;
2251+ struct timbdma_control dma;
2252+ void __iomem *membase;
2253+ struct tasklet_struct tasklet;
2254+};
2255+
2256+#endif /* _TIMBLOGIW_H */
2257+
2258diff -uNr linux-2.6.29-clean/drivers/mfd/Kconfig linux-2.6.29/drivers/mfd/Kconfig
2259--- linux-2.6.29-clean/drivers/mfd/Kconfig 2009-04-01 09:20:24.000000000 -0700
2260+++ linux-2.6.29/drivers/mfd/Kconfig 2009-04-06 13:51:47.000000000 -0700
2261@@ -240,6 +240,27 @@
2262 Say yes here if you want to include support GPIO for pins on
2263 the PCF50633 chip.
2264
2265+config MFD_TIMBERDALE
2266+ bool "Support for Timberdale"
2267+ select MFD_CORE
2268+ ---help---
2269+ This is the core driver for the timberdale FPGA. This device is a
2270+ multifunctioanl device which may provide numerous interfaces.
2271+
2272+config MFD_TIMBERDALE_DMA
2273+ tristate "Support for timberdale DMA"
2274+ depends on MFD_TIMBERDALE
2275+ ---help---
2276+ Add support the DMA block inside the timberdale FPGA. This to be able
2277+ to do DMA transfers directly to some of the blocks inside the FPGA
2278+
2279+config MFD_TIMBERDALE_I2S
2280+ tristate "Support for timberdale I2S bus"
2281+ depends on MFD_TIMBERDALE
2282+ ---help---
2283+ Add support for the I2S bus handled by timberdale FPGA.
2284+ I2S RX and TX instances are then available for other devices to make use of.
2285+
2286 endmenu
2287
2288 menu "Multimedia Capabilities Port drivers"
2289diff -uNr linux-2.6.29-clean/drivers/mfd/Makefile linux-2.6.29/drivers/mfd/Makefile
2290--- linux-2.6.29-clean/drivers/mfd/Makefile 2009-04-01 09:20:24.000000000 -0700
2291+++ linux-2.6.29/drivers/mfd/Makefile 2009-04-06 13:51:47.000000000 -0700
2292@@ -40,4 +40,8 @@
2293
2294 obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
2295 obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
2296-obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
2297\ No newline at end of file
2298+obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
2299+
2300+obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
2301+obj-$(CONFIG_MFD_TIMBERDALE_DMA) += timbdma.o
2302+obj-$(CONFIG_MFD_TIMBERDALE_I2S) += timbi2s.o
2303diff -uNr linux-2.6.29-clean/drivers/mfd/timbdma.c linux-2.6.29/drivers/mfd/timbdma.c
2304--- linux-2.6.29-clean/drivers/mfd/timbdma.c 1969-12-31 16:00:00.000000000 -0800
2305+++ linux-2.6.29/drivers/mfd/timbdma.c 2009-04-06 13:51:47.000000000 -0700
2306@@ -0,0 +1,301 @@
2307+/*
2308+ * timbdma.c timberdale FPGA DMA driver
2309+ * Copyright (c) 2009 Intel Corporation
2310+ *
2311+ * This program is free software; you can redistribute it and/or modify
2312+ * it under the terms of the GNU General Public License version 2 as
2313+ * published by the Free Software Foundation.
2314+ *
2315+ * This program is distributed in the hope that it will be useful,
2316+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2317+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2318+ * GNU General Public License for more details.
2319+ *
2320+ * You should have received a copy of the GNU General Public License
2321+ * along with this program; if not, write to the Free Software
2322+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2323+ */
2324+
2325+/* Supports:
2326+ * Timberdale FPGA DMA engine
2327+ */
2328+
2329+#include <linux/version.h>
2330+#include <linux/module.h>
2331+#include <linux/pci.h>
2332+#include <linux/interrupt.h>
2333+#include <linux/platform_device.h>
2334+
2335+#include <linux/mfd/timbdma.h>
2336+
2337+static struct timbdma_dev *self_g;
2338+
2339+static irqreturn_t timbdma_handleinterrupt(int irq, void *devid)
2340+{
2341+ struct timbdma_dev *dev = (struct timbdma_dev *)devid;
2342+ int ipr;
2343+ int i;
2344+
2345+ ipr = ioread32(dev->membase + timbdma_ctrlmap_TIMBPEND);
2346+
2347+ /* ack */
2348+ iowrite32(ipr, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
2349+
2350+ /* call the callbacks */
2351+ for (i = 0; i < DMA_IRQS; i++) {
2352+ int mask = 1 << i;
2353+ if ((ipr & mask) && dev->callbacks[i])
2354+ dev->callbacks[i](mask, dev->callback_data[i]);
2355+ }
2356+
2357+ if (ipr)
2358+ return IRQ_HANDLED;
2359+ else
2360+ return IRQ_NONE;
2361+}
2362+
2363+
2364+void timb_start_dma(u32 flag, unsigned long buf, int len, int bytes_per_row)
2365+{
2366+ int i;
2367+ unsigned long irqflags;
2368+ struct timbdma_dev *dev = self_g;
2369+
2370+ spin_lock_irqsave(&dev->lock, irqflags);
2371+
2372+ /* now enable the DMA transfer */
2373+ for (i = 0; i < DMA_IRQS; i++)
2374+ if (flag & (1 << i)) {
2375+ u32 offset = i / 2 * 0x40;
2376+
2377+ if (!(i % 2)) {
2378+ /* RX */
2379+ /* bytes per row */
2380+ iowrite32(bytes_per_row, dev->membase + offset +
2381+ timbdma_dmacfg_BPERROW);
2382+ /* address high */
2383+ iowrite32(0, dev->membase + offset +
2384+ timbdma_dmacfg_RXSTARTH);
2385+ /* address low */
2386+ iowrite32(buf, dev->membase + offset +
2387+ timbdma_dmacfg_RXSTARTL);
2388+ /* Length */
2389+ iowrite32(len, dev->membase + offset +
2390+ timbdma_dmacfg_RXLENGTH);
2391+ /* Clear rx sw read pointer */
2392+ iowrite32(0, dev->membase + offset +
2393+ timbdma_dmacfg_RXSWRP);
2394+ /* enable the transfer */
2395+ iowrite32(1, dev->membase + offset +
2396+ timbdma_dmacfg_RXENABLE);
2397+ } else {
2398+ /* TX */
2399+ /* address high */
2400+ iowrite32(0, dev->membase + offset +
2401+ timbdma_dmacfg_TXSTARTH);
2402+ /* address low */
2403+ iowrite32(buf, dev->membase + offset +
2404+ timbdma_dmacfg_TXSTARTL);
2405+ /* Length */
2406+ iowrite32(len, dev->membase + offset +
2407+ timbdma_dmacfg_TXLENGTH);
2408+ /* Set tx sw write pointer */
2409+ iowrite32(len, dev->membase + offset +
2410+ timbdma_dmacfg_TXSWWP);
2411+ }
2412+
2413+ /* only allow one bit in the flag field */
2414+ break;
2415+ }
2416+ spin_unlock_irqrestore(&dev->lock, irqflags);
2417+}
2418+EXPORT_SYMBOL(timb_start_dma);
2419+
2420+void *timb_stop_dma(u32 flags)
2421+{
2422+ int i;
2423+ unsigned long irqflags;
2424+ struct timbdma_dev *dev = self_g;
2425+ void *result = 0;
2426+
2427+ spin_lock_irqsave(&dev->lock, irqflags);
2428+
2429+ /* now disable the DMA transfers */
2430+ for (i = 0; i < DMA_IRQS; i++)
2431+ if (flags & (1 << i)) {
2432+ /*
2433+ RX enable registers are located at:
2434+ 0x14
2435+ 0x54
2436+ 0x94
2437+
2438+ TX SW pointer registers are located at:
2439+ 0x24
2440+ 0x64
2441+ */
2442+ u32 offset = i / 2 * 0x40;
2443+ u32 result_offset = offset;
2444+ if (!(i % 2)) {
2445+ /* even -> RX enable */
2446+ offset += timbdma_dmacfg_RXENABLE;
2447+ result_offset += timbdma_dmacfg_RXFPGAWP;
2448+ } else {
2449+ /* odd -> TX SW pointer reg */
2450+ offset += timbdma_dmacfg_TXSWWP;
2451+ result_offset = timbdma_dmacfg_TXFPGARP;
2452+ }
2453+
2454+ iowrite32(0, dev->membase + offset);
2455+ /* check how far the FPGA has written/read */
2456+ result = (void *)ioread32(dev->membase + result_offset);
2457+ }
2458+
2459+ /* ack any pending IRQs */
2460+ iowrite32(flags, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
2461+
2462+ spin_unlock_irqrestore(&dev->lock, irqflags);
2463+
2464+ return result;
2465+}
2466+EXPORT_SYMBOL(timb_stop_dma);
2467+
2468+void timb_set_dma_interruptcb(u32 flags, timbdma_interruptcb icb, void *data)
2469+{
2470+ int i;
2471+ unsigned long irqflags;
2472+ struct timbdma_dev *dev = self_g;
2473+ u32 ier;
2474+
2475+ spin_lock_irqsave(&dev->lock, irqflags);
2476+
2477+ for (i = 0; i < DMA_IRQS; i++)
2478+ if (flags & (1 << i)) {
2479+ dev->callbacks[i] = icb;
2480+ dev->callback_data[i] = data;
2481+ }
2482+
2483+ /* Ack any pending IRQ */
2484+ iowrite32(flags, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
2485+
2486+ /* if a null callback is given -> clear interrupt, else -> enable */
2487+ ier = ioread32(dev->membase + timbdma_ctrlmap_TIMBENABLE);
2488+ if (icb != NULL)
2489+ ier |= flags;
2490+ else
2491+ ier &= ~flags;
2492+ iowrite32(ier, dev->membase + timbdma_ctrlmap_TIMBENABLE);
2493+
2494+ spin_unlock_irqrestore(&dev->lock, irqflags);
2495+}
2496+EXPORT_SYMBOL(timb_set_dma_interruptcb);
2497+
2498+static int timbdma_probe(struct platform_device *dev)
2499+{
2500+ int err, irq;
2501+ struct timbdma_dev *self;
2502+ struct resource *iomem;
2503+
2504+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2505+ if (!iomem) {
2506+ err = -EINVAL;
2507+ goto err_mem;
2508+ }
2509+
2510+ self = kzalloc(sizeof(*self), GFP_KERNEL);
2511+ if (!self) {
2512+ err = -EINVAL;
2513+ goto err_mem;
2514+ }
2515+
2516+ spin_lock_init(&self->lock);
2517+
2518+ if (!request_mem_region(iomem->start,
2519+ resource_size(iomem), "timb-dma")) {
2520+ err = -EBUSY;
2521+ goto err_request;
2522+ }
2523+
2524+ self->membase = ioremap(iomem->start, resource_size(iomem));
2525+ if (!self->membase) {
2526+ printk(KERN_ERR "timbdma: Failed to remap I/O memory\n");
2527+ err = -ENOMEM;
2528+ goto err_ioremap;
2529+ }
2530+
2531+ /* register interrupt */
2532+ irq = platform_get_irq(dev, 0);
2533+ if (irq < 0) {
2534+ err = irq;
2535+ goto err_get_irq;
2536+ }
2537+
2538+ /* request IRQ */
2539+ err = request_irq(irq, timbdma_handleinterrupt, IRQF_SHARED,
2540+ "timb-dma", self);
2541+ if (err) {
2542+ printk(KERN_ERR "timbdma: Failed to request IRQ\n");
2543+ goto err_get_irq;
2544+ }
2545+
2546+ platform_set_drvdata(dev, self);
2547+
2548+ /* assign the global pointer */
2549+ self_g = self;
2550+
2551+ return 0;
2552+
2553+err_get_irq:
2554+ iounmap(self->membase);
2555+err_ioremap:
2556+ release_mem_region(iomem->start, resource_size(iomem));
2557+err_request:
2558+ kfree(self);
2559+err_mem:
2560+ printk(KERN_ERR "timberdale: Failed to register Timberdale DMA: %d\n",
2561+ err);
2562+
2563+ return err;
2564+}
2565+
2566+static int timbdma_remove(struct platform_device *dev)
2567+{
2568+ struct timbdma_dev *self = platform_get_drvdata(dev);
2569+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2570+
2571+ free_irq(platform_get_irq(dev, 0), self);
2572+ iounmap(self->membase);
2573+ release_mem_region(iomem->start, resource_size(iomem));
2574+ kfree(self);
2575+ self_g = NULL;
2576+ return 0;
2577+}
2578+
2579+static struct platform_driver timbdma_platform_driver = {
2580+ .driver = {
2581+ .name = "timb-dma",
2582+ .owner = THIS_MODULE,
2583+ },
2584+ .probe = timbdma_probe,
2585+ .remove = timbdma_remove,
2586+};
2587+
2588+/*--------------------------------------------------------------------------*/
2589+
2590+static int __init timbdma_init(void)
2591+{
2592+ self_g = NULL;
2593+ return platform_driver_register(&timbdma_platform_driver);
2594+}
2595+
2596+static void __exit timbdma_exit(void)
2597+{
2598+ platform_driver_unregister(&timbdma_platform_driver);
2599+}
2600+
2601+module_init(timbdma_init);
2602+module_exit(timbdma_exit);
2603+
2604+MODULE_DESCRIPTION("Timberdale DMA driver");
2605+MODULE_LICENSE("GPL v2");
2606+MODULE_ALIAS("platform:timb-dma");
2607+
2608diff -uNr linux-2.6.29-clean/drivers/mfd/timberdale.c linux-2.6.29/drivers/mfd/timberdale.c
2609--- linux-2.6.29-clean/drivers/mfd/timberdale.c 1969-12-31 16:00:00.000000000 -0800
2610+++ linux-2.6.29/drivers/mfd/timberdale.c 2009-04-06 13:51:47.000000000 -0700
2611@@ -0,0 +1,599 @@
2612+/*
2613+ * timberdale.c timberdale FPGA mfd shim driver
2614+ * Copyright (c) 2009 Intel Corporation
2615+ *
2616+ * This program is free software; you can redistribute it and/or modify
2617+ * it under the terms of the GNU General Public License version 2 as
2618+ * published by the Free Software Foundation.
2619+ *
2620+ * This program is distributed in the hope that it will be useful,
2621+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2622+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2623+ * GNU General Public License for more details.
2624+ *
2625+ * You should have received a copy of the GNU General Public License
2626+ * along with this program; if not, write to the Free Software
2627+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2628+ */
2629+
2630+/* Supports:
2631+ * Timberdale FPGA
2632+ */
2633+
2634+#include <linux/kernel.h>
2635+#include <linux/module.h>
2636+#include <linux/pci.h>
2637+#include <linux/msi.h>
2638+#include <linux/init.h>
2639+#include <linux/interrupt.h>
2640+#include <linux/platform_device.h>
2641+#include <linux/mfd/core.h>
2642+#include <linux/irq.h>
2643+
2644+#include <linux/i2c.h>
2645+#include <linux/i2c-ocores.h>
2646+#include <linux/i2c/tsc2007.h>
2647+#include <linux/spi/xilinx_spi.h>
2648+#include "timberdale.h"
2649+
2650+struct timberdale_device {
2651+ resource_size_t intc_mapbase;
2652+ resource_size_t ctl_mapbase;
2653+ unsigned char __iomem *intc_membase;
2654+ unsigned char __iomem *ctl_membase;
2655+ int irq_base;
2656+ u32 irq_ack_mask;
2657+ /* locking from interrupts while modifiying registers */
2658+ spinlock_t lock;
2659+};
2660+
2661+/*--------------------------------------------------------------------------*/
2662+
2663+struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
2664+ .model = 2003,
2665+ .x_plate_ohms = 100
2666+};
2667+
2668+struct i2c_board_info timberdale_i2c_board_info[] = {
2669+ {
2670+ I2C_BOARD_INFO("tsc2003", 0x48),
2671+ .platform_data = &timberdale_tsc2007_platform_data,
2672+ .irq = IRQ_TIMBERDALE_TSC_INT
2673+ },
2674+ {
2675+ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
2676+ .irq = IRQ_TIMBERDALE_ADV7180
2677+ }
2678+};
2679+
2680+static __devinitdata struct ocores_i2c_platform_data
2681+timberdale_i2c_platform_data = {
2682+ .regstep = 4,
2683+ .clock_khz = 62500,
2684+ .devices = timberdale_i2c_board_info,
2685+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
2686+};
2687+
2688+const static __devinitconst struct resource timberdale_i2c_resources[] = {
2689+ {
2690+ .start = I2COFFSET,
2691+ .end = I2CEND,
2692+ .flags = IORESOURCE_MEM,
2693+ },
2694+ {
2695+ .start = IRQ_TIMBERDALE_I2C,
2696+ .end = IRQ_TIMBERDALE_I2C,
2697+ .flags = IORESOURCE_IRQ,
2698+ },
2699+};
2700+
2701+static __devinitdata struct xspi_platform_data timberdale_xspi_platorm_data = {
2702+ .bus_num = -1,
2703+ /* according to spec. we can have up to 32 slaves however,
2704+ * as of current(2009-03-06) revision of
2705+ * Timberdale we can only handle 3 right now
2706+ */
2707+ .num_chipselect = 3,
2708+ .speed_hz = 1953125, /* hardcoded value in IP, for now */
2709+ .cr_offset = 0x60,
2710+ .sr_offset = 0x64,
2711+ .txd_offset = 0x68,
2712+ .rxd_offset = 0x6c,
2713+ .ssr_offset = 0x70
2714+};
2715+
2716+const static __devinitconst struct resource timberdale_spi_resources[] = {
2717+ {
2718+ .start = SPIOFFSET,
2719+ .end = SPIEND,
2720+ .flags = IORESOURCE_MEM,
2721+ },
2722+ {
2723+ .start = IRQ_TIMBERDALE_SPI,
2724+ .end = IRQ_TIMBERDALE_SPI,
2725+ .flags = IORESOURCE_IRQ,
2726+ },
2727+};
2728+
2729+const static __devinitconst struct resource timberdale_eth_resources[] = {
2730+ {
2731+ .start = ETHOFFSET,
2732+ .end = ETHEND,
2733+ .flags = IORESOURCE_MEM,
2734+ },
2735+ {
2736+ .start = IRQ_TIMBERDALE_ETHSW_IF,
2737+ .end = IRQ_TIMBERDALE_ETHSW_IF,
2738+ .flags = IORESOURCE_IRQ,
2739+ },
2740+};
2741+
2742+const static __devinitconst struct resource timberdale_gpio_resources[] = {
2743+ {
2744+ .start = GPIOOFFSET,
2745+ .end = GPIOEND,
2746+ .flags = IORESOURCE_MEM,
2747+ },
2748+ {
2749+ .start = IRQ_TIMBERDALE_GPIO,
2750+ .end = IRQ_TIMBERDALE_GPIO,
2751+ .flags = IORESOURCE_IRQ,
2752+ },
2753+};
2754+
2755+
2756+const static __devinitconst struct resource timberdale_most_resources[] = {
2757+ {
2758+ .start = MOSTOFFSET,
2759+ .end = MOSTEND,
2760+ .flags = IORESOURCE_MEM,
2761+ },
2762+ {
2763+ .start = IRQ_TIMBERDALE_MLB,
2764+ .end = IRQ_TIMBERDALE_MLB,
2765+ .flags = IORESOURCE_IRQ,
2766+ },
2767+};
2768+
2769+const static __devinitconst struct resource timberdale_uart_resources[] = {
2770+ {
2771+ .start = UARTOFFSET,
2772+ .end = UARTEND,
2773+ .flags = IORESOURCE_MEM,
2774+ },
2775+ {
2776+ .start = IRQ_TIMBERDALE_UART,
2777+ .end = IRQ_TIMBERDALE_UART,
2778+ .flags = IORESOURCE_IRQ,
2779+ },
2780+};
2781+
2782+const static __devinitconst struct resource timberdale_i2s_resources[] = {
2783+ {
2784+ .start = I2SOFFSET,
2785+ .end = I2SEND,
2786+ .flags = IORESOURCE_MEM,
2787+ },
2788+ {
2789+ .start = IRQ_TIMBERDALE_I2S,
2790+ .end = IRQ_TIMBERDALE_I2S,
2791+ .flags = IORESOURCE_IRQ,
2792+ },
2793+};
2794+
2795+const static __devinitconst struct resource timberdale_video_resources[] = {
2796+ {
2797+ .start = LOGIWOFFSET,
2798+ .end = LOGIWEND,
2799+ .flags = IORESOURCE_MEM,
2800+ },
2801+ /*
2802+ note that the "frame buffer" is located in DMA area
2803+ starting at 0x1200000
2804+ */
2805+};
2806+
2807+const static __devinitconst struct resource timberdale_dma_resources[] = {
2808+ {
2809+ .start = DMAOFFSET,
2810+ .end = DMAEND,
2811+ .flags = IORESOURCE_MEM,
2812+ },
2813+ {
2814+ .start = IRQ_TIMBERDALE_DMA,
2815+ .end = IRQ_TIMBERDALE_DMA,
2816+ .flags = IORESOURCE_IRQ,
2817+ },
2818+};
2819+
2820+static __devinitdata struct mfd_cell timberdale_cells_bar0[] = {
2821+ {
2822+ .name = "timb-uart",
2823+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
2824+ .resources = timberdale_uart_resources,
2825+ },
2826+ {
2827+ .name = "ocores-i2c",
2828+ .num_resources = ARRAY_SIZE(timberdale_i2c_resources),
2829+ .resources = timberdale_i2c_resources,
2830+ .platform_data = &timberdale_i2c_platform_data,
2831+ .data_size = sizeof(timberdale_i2c_platform_data),
2832+ },
2833+ {
2834+ .name = "timb-gpio",
2835+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
2836+ .resources = timberdale_gpio_resources,
2837+ },
2838+ {
2839+ .name = "timb-i2s",
2840+ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
2841+ .resources = timberdale_i2s_resources,
2842+ },
2843+ {
2844+ .name = "timb-most",
2845+ .num_resources = ARRAY_SIZE(timberdale_most_resources),
2846+ .resources = timberdale_most_resources,
2847+ },
2848+ {
2849+ .name = "timb-video",
2850+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
2851+ .resources = timberdale_video_resources,
2852+ },
2853+ {
2854+ .name = "xilinx_spi",
2855+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
2856+ .resources = timberdale_spi_resources,
2857+ .platform_data = &timberdale_xspi_platorm_data,
2858+ .data_size = sizeof(timberdale_xspi_platorm_data),
2859+ },
2860+ {
2861+ .name = "ks884x",
2862+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
2863+ .resources = timberdale_eth_resources,
2864+ },
2865+ {
2866+ .name = "timb-dma",
2867+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
2868+ .resources = timberdale_dma_resources,
2869+ },
2870+};
2871+
2872+static const __devinitconst struct resource timberdale_sdhc_resources_bar1[] = {
2873+ {
2874+ .start = SDHC0OFFSET,
2875+ .end = SDHC0END,
2876+ .flags = IORESOURCE_MEM,
2877+ },
2878+ {
2879+ .start = IRQ_TIMBERDALE_SDHC,
2880+ .end = IRQ_TIMBERDALE_SDHC,
2881+ .flags = IORESOURCE_IRQ,
2882+ },
2883+};
2884+
2885+static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
2886+ {
2887+ .name = "sdhci",
2888+ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources_bar1),
2889+ .resources = timberdale_sdhc_resources_bar1,
2890+ },
2891+};
2892+
2893+/*--------------------------------------------------------------------------*/
2894+
2895+
2896+/* Handle the timberdale interrupt mux */
2897+static void timberdale_irq(unsigned int irq, struct irq_desc *desc)
2898+{
2899+ struct timberdale_device *priv = get_irq_data(irq);
2900+ unsigned int i, ipr;
2901+
2902+ desc->chip->ack(irq);
2903+
2904+ while ((ipr = ioread32(priv->intc_membase + IPR))) {
2905+ priv->irq_ack_mask = 0;
2906+ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
2907+ if (ipr & (1 << i))
2908+ generic_handle_irq(priv->irq_base + i);
2909+ if (priv->irq_ack_mask)
2910+ iowrite32(priv->irq_ack_mask, priv->intc_membase + IAR);
2911+ }
2912+}
2913+
2914+static void timberdale_irq_mask(unsigned int irq)
2915+{
2916+ struct timberdale_device *priv = get_irq_chip_data(irq);
2917+ unsigned long flags;
2918+
2919+ spin_lock_irqsave(&priv->lock, flags);
2920+ iowrite32(1 << (irq - priv->irq_base), priv->intc_membase + CIE);
2921+ spin_unlock_irqrestore(&priv->lock, flags);
2922+}
2923+
2924+static void timberdale_irq_unmask(unsigned int irq)
2925+{
2926+ struct timberdale_device *priv = get_irq_chip_data(irq);
2927+ unsigned long flags;
2928+
2929+ spin_lock_irqsave(&priv->lock, flags);
2930+ iowrite32(1 << (irq - priv->irq_base), priv->intc_membase + SIE);
2931+ spin_unlock_irqrestore(&priv->lock, flags);
2932+}
2933+
2934+static void timberdale_irq_ack(unsigned int irq)
2935+{
2936+ struct timberdale_device *priv = get_irq_chip_data(irq);
2937+ unsigned long flags;
2938+ u32 ack_mask = 1 << (irq - priv->irq_base);
2939+
2940+ spin_lock_irqsave(&priv->lock, flags);
2941+ /* if edge triggered, ack directly. Otherwhise ack in the end of
2942+ * irq handler
2943+ */
2944+ if (ack_mask & IRQ_TIMBERDALE_EDGE_MASK)
2945+ iowrite32(ack_mask, priv->intc_membase + IAR);
2946+ else
2947+ priv->irq_ack_mask |= ack_mask;
2948+ spin_unlock_irqrestore(&priv->lock, flags);
2949+}
2950+
2951+static struct irq_chip timberdale_chip = {
2952+ .name = "timberdale",
2953+ .ack = timberdale_irq_ack,
2954+ .mask = timberdale_irq_mask,
2955+ .unmask = timberdale_irq_unmask,
2956+ .disable = timberdale_irq_mask,
2957+ .enable = timberdale_irq_unmask,
2958+};
2959+
2960+/*--------------------------------------------------------------------------*/
2961+
2962+/* Install the IRQ handler */
2963+static void timberdale_attach_irq(struct pci_dev *dev)
2964+{
2965+ struct timberdale_device *priv = pci_get_drvdata(dev);
2966+ unsigned int irq, irq_base;
2967+
2968+ irq_base = priv->irq_base;
2969+ for (irq = irq_base; irq < irq_base + TIMBERDALE_NR_IRQS; irq++) {
2970+ set_irq_chip_and_handler_name(irq, &timberdale_chip,
2971+ handle_edge_irq, "mux");
2972+
2973+ set_irq_chip_data(irq, priv);
2974+
2975+#ifdef CONFIG_ARM
2976+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
2977+#endif
2978+ }
2979+
2980+ set_irq_data(dev->irq, priv);
2981+ set_irq_chained_handler(dev->irq, timberdale_irq);
2982+}
2983+
2984+static void timberdale_detach_irq(struct pci_dev *dev)
2985+{
2986+ struct timberdale_device *priv = pci_get_drvdata(dev);
2987+ unsigned int irq, irq_base;
2988+
2989+ irq_base = priv->irq_base;
2990+
2991+ set_irq_chained_handler(dev->irq, NULL);
2992+ set_irq_data(dev->irq, NULL);
2993+
2994+ for (irq = irq_base; irq < irq_base + TIMBERDALE_NR_IRQS; irq++) {
2995+#ifdef CONFIG_ARM
2996+ set_irq_flags(irq, 0);
2997+#endif
2998+ set_irq_chip(irq, NULL);
2999+ set_irq_chip_data(irq, NULL);
3000+ }
3001+}
3002+
3003+static int __devinit timb_probe(struct pci_dev *dev,
3004+ const struct pci_device_id *id)
3005+{
3006+ struct timberdale_device *priv;
3007+ int err, i;
3008+ u16 ver;
3009+ resource_size_t mapbase;
3010+
3011+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3012+ if (!priv)
3013+ return -ENOMEM;
3014+
3015+ spin_lock_init(&priv->lock);
3016+ pci_set_drvdata(dev, priv);
3017+
3018+ err = pci_enable_device(dev);
3019+ if (err)
3020+ goto err_enable;
3021+
3022+ mapbase = pci_resource_start(dev, 0);
3023+ if (!mapbase) {
3024+ printk(KERN_ERR "timberdale: No resource\n");
3025+ goto err_start;
3026+ }
3027+
3028+ /* create a resource for the Interrupt controller registers */
3029+ priv->intc_mapbase = mapbase + INTCOFFSET;
3030+ if (!request_mem_region(priv->intc_mapbase, INTCSIZE, "timb-intc")) {
3031+ printk(KERN_ERR "timberdale: Failed to request intc mem\n");
3032+ goto err_request;
3033+ }
3034+
3035+ /* create a resource for the PCI master register */
3036+ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
3037+ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-intc")) {
3038+ printk(KERN_ERR "timberdale: Failed to request ctl mem\n");
3039+ goto err_request_ctl;
3040+ }
3041+
3042+ priv->intc_membase = ioremap(priv->intc_mapbase, INTCSIZE);
3043+ if (!priv->intc_membase) {
3044+ printk(KERN_ALERT "timberdale: Map error, intc\n");
3045+ goto err_ioremap;
3046+ }
3047+
3048+ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
3049+ if (!priv->ctl_membase) {
3050+ printk(KERN_ALERT "timberdale: Map error, ctl\n");
3051+ goto err_ioremap_ctl;
3052+ }
3053+
3054+ err = pci_enable_msi(dev);
3055+ if (err) {
3056+ printk(KERN_WARNING "timberdale: MSI init failed: %d\n", err);
3057+ goto err_msi;
3058+ }
3059+
3060+ /* Reset all FPGA PLB peripherals */
3061+ iowrite32(0x1, priv->ctl_membase + MAYSVILLERST);
3062+
3063+ /* at this stage the FPGA does not generate a
3064+ * unique interrupt per function, to emulate real interrupts
3065+ * we assign them a faked interrupt which we issue in the
3066+ * interrupt handler. For now just hard code a base number
3067+ */
3068+ priv->irq_base = NR_IRQS - TIMBERDALE_NR_IRQS - 1;
3069+ if (priv->irq_base < dev->irq)
3070+ /* ops the device itself got the IRQ in the end... */
3071+ priv->irq_base = 400;
3072+
3073+ timberdale_attach_irq(dev);
3074+
3075+ /* update IRQ offsets in I2C board info */
3076+ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
3077+ timberdale_i2c_board_info[i].irq += priv->irq_base;
3078+
3079+ /* don't leave platform_data empty on any device */
3080+ for (i = 0; i < ARRAY_SIZE(timberdale_cells_bar0); i++)
3081+ if (timberdale_cells_bar0[i].platform_data == NULL) {
3082+ timberdale_cells_bar0[i].platform_data =
3083+ timberdale_cells_bar0 + i;
3084+ timberdale_cells_bar0[i].data_size =
3085+ sizeof(timberdale_cells_bar0[i]);
3086+ }
3087+
3088+ err = mfd_add_devices(&dev->dev, -1,
3089+ timberdale_cells_bar0, ARRAY_SIZE(timberdale_cells_bar0),
3090+ &dev->resource[0], priv->irq_base);
3091+ if (err)
3092+ printk(KERN_WARNING
3093+ "timberdale: mfd_add_devices failed: %d\n", err);
3094+ else {
3095+ err = mfd_add_devices(&dev->dev, -1,
3096+ timberdale_cells_bar1,
3097+ ARRAY_SIZE(timberdale_cells_bar1),
3098+ &dev->resource[1], priv->irq_base);
3099+
3100+ if (err)
3101+ printk(KERN_WARNING
3102+ "timberdale: timb_add_sdhci failed: %d\n", err);
3103+ }
3104+
3105+ if (err)
3106+ goto err_mfd;
3107+
3108+ ver = ioread16(priv->ctl_membase + TIMB_REV);
3109+
3110+ printk(KERN_INFO "Found Maysville Card. Rev: %d\n", ver);
3111+
3112+ /* Enable interrupts and wire the hardware interrupts */
3113+ iowrite32(0x3, priv->intc_membase + MER);
3114+
3115+ return 0;
3116+err_mfd:
3117+ timberdale_detach_irq(dev);
3118+ pci_disable_msi(dev);
3119+err_msi:
3120+ iounmap(priv->ctl_membase);
3121+err_ioremap_ctl:
3122+ iounmap(priv->intc_membase);
3123+err_ioremap:
3124+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
3125+err_request_ctl:
3126+ release_mem_region(priv->intc_mapbase, INTCSIZE);
3127+err_request:
3128+ pci_set_drvdata(dev, NULL);
3129+err_start:
3130+ pci_disable_device(dev);
3131+err_enable:
3132+ kfree(priv);
3133+ pci_set_drvdata(dev, NULL);
3134+ return -ENODEV;
3135+}
3136+
3137+static void __devexit timb_remove(struct pci_dev *dev)
3138+{
3139+ /* clean up any allocated resources and stuff here.
3140+ * like call release_region();
3141+ */
3142+ struct timberdale_device *priv;
3143+
3144+ priv = pci_get_drvdata(dev);
3145+
3146+ mfd_remove_devices(&dev->dev);
3147+
3148+ timberdale_detach_irq(dev);
3149+
3150+ iowrite32(0xffffffff, priv->intc_membase + IAR);
3151+ iowrite32(0, priv->intc_membase + MER);
3152+ iowrite32(0, priv->intc_membase + IER);
3153+
3154+ iounmap(priv->ctl_membase);
3155+ iounmap(priv->intc_membase);
3156+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
3157+ release_mem_region(priv->intc_mapbase, INTCSIZE);
3158+
3159+ pci_disable_msi(dev);
3160+ pci_disable_device(dev);
3161+ pci_set_drvdata(dev, NULL);
3162+ kfree(priv);
3163+}
3164+
3165+static struct pci_device_id timberdale_pci_tbl[] = {
3166+ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
3167+ { 0 }
3168+};
3169+MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
3170+
3171+static struct pci_driver timberdale_pci_driver = {
3172+ .name = "timberdale",
3173+ .id_table = timberdale_pci_tbl,
3174+ .probe = timb_probe,
3175+ .remove = timb_remove,
3176+};
3177+
3178+static int __init timberdale_init(void)
3179+{
3180+ int err;
3181+
3182+ err = pci_register_driver(&timberdale_pci_driver);
3183+ if (err < 0) {
3184+ printk(KERN_ERR
3185+ "Failed to register PCI driver for %s device.\n",
3186+ timberdale_pci_driver.name);
3187+ return -ENODEV;
3188+ }
3189+
3190+ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
3191+ timberdale_pci_driver.name);
3192+
3193+ return 0;
3194+}
3195+
3196+static void __exit timberdale_exit(void)
3197+{
3198+ pci_unregister_driver(&timberdale_pci_driver);
3199+
3200+ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
3201+ timberdale_pci_driver.name);
3202+}
3203+
3204+MODULE_LICENSE("GPL v2");
3205+MODULE_VERSION(DRV_VERSION);
3206+MODULE_AUTHOR("Richard Rojfors");
3207+
3208+module_init(timberdale_init);
3209+module_exit(timberdale_exit);
3210+
3211diff -uNr linux-2.6.29-clean/drivers/mfd/timberdale.h linux-2.6.29/drivers/mfd/timberdale.h
3212--- linux-2.6.29-clean/drivers/mfd/timberdale.h 1969-12-31 16:00:00.000000000 -0800
3213+++ linux-2.6.29/drivers/mfd/timberdale.h 2009-04-06 13:51:47.000000000 -0700
3214@@ -0,0 +1,114 @@
3215+/*
3216+ * timberdale.h timberdale FPGA mfd shim driver defines
3217+ * Copyright (c) 2009 Intel Corporation
3218+ *
3219+ * This program is free software; you can redistribute it and/or modify
3220+ * it under the terms of the GNU General Public License version 2 as
3221+ * published by the Free Software Foundation.
3222+ *
3223+ * This program is distributed in the hope that it will be useful,
3224+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3225+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3226+ * GNU General Public License for more details.
3227+ *
3228+ * You should have received a copy of the GNU General Public License
3229+ * along with this program; if not, write to the Free Software
3230+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3231+ */
3232+
3233+/* Supports:
3234+ * Timberdale FPGA
3235+ */
3236+
3237+#ifndef MFD_TIMBERDALE_H
3238+#define MFD_TIMBERDALE_H
3239+
3240+/* Registers of the interrupt controller */
3241+#define ISR 0x00
3242+#define IPR 0x04
3243+#define IER 0x08
3244+#define IAR 0x0c
3245+#define SIE 0x10
3246+#define CIE 0x14
3247+#define MER 0x1c
3248+
3249+/* Registers of the control area */
3250+#define TIMB_REV 0x00
3251+#define MAYSVILLERST 0x40
3252+
3253+
3254+#define I2COFFSET 0x0
3255+#define I2CEND 0x1f
3256+
3257+#define SPIOFFSET 0x80
3258+#define SPIEND 0xff
3259+
3260+#define ETHOFFSET 0x300
3261+#define ETHEND 0x30f
3262+
3263+#define GPIOOFFSET 0x400
3264+#define GPIOEND 0x7ff
3265+
3266+#define CHIPCTLOFFSET 0x800
3267+#define CHIPCTLEND 0x8ff
3268+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
3269+
3270+#define INTCOFFSET 0xc00
3271+#define INTCEND 0xfff
3272+#define INTCSIZE (INTCEND - INTCOFFSET)
3273+
3274+#define MOSTOFFSET 0x1000
3275+#define MOSTEND 0x13ff
3276+
3277+#define UARTOFFSET 0x1400
3278+#define UARTEND 0x17ff
3279+
3280+#define I2SOFFSET 0x1C00
3281+#define I2SEND 0x1fff
3282+
3283+#define LOGIWOFFSET 0x30000
3284+#define LOGIWEND 0x37fff
3285+
3286+#define DMAOFFSET 0x01000000
3287+#define DMAEND 0x013fffff
3288+
3289+/* SDHC0 is placed in PCI bar 1 */
3290+#define SDHC0OFFSET 0x00
3291+#define SDHC0END 0xff
3292+
3293+/* SDHC1 is placed in PCI bar 2 */
3294+#define SDHC1OFFSET 0x00
3295+#define SDHC1END 0xff
3296+
3297+#define PCI_VENDOR_ID_TIMB 0x10ee
3298+#define PCI_DEVICE_ID_TIMB 0xa123
3299+#define DRV_VERSION "0.1"
3300+
3301+
3302+#define IRQ_TIMBERDALE_INIC 0
3303+#define IRQ_TIMBERDALE_MLB 1
3304+#define IRQ_TIMBERDALE_GPIO 2
3305+#define IRQ_TIMBERDALE_I2C 3
3306+#define IRQ_TIMBERDALE_UART 4
3307+#define IRQ_TIMBERDALE_DMA 5
3308+#define IRQ_TIMBERDALE_I2S 6
3309+#define IRQ_TIMBERDALE_TSC_INT 7
3310+#define IRQ_TIMBERDALE_SDHC 8
3311+#define IRQ_TIMBERDALE_ADV7180 9
3312+#define IRQ_TIMBERDALE_ETHSW_IF 10
3313+#define IRQ_TIMBERDALE_SPI 11
3314+
3315+#define TIMBERDALE_NR_IRQS 12
3316+
3317+/* Some of the interrupts are level triggered, some are edge triggered */
3318+#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
3319+ (1 << IRQ_TIMBERDALE_TSC_INT) | (1 << IRQ_TIMBERDALE_DMA) | \
3320+ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
3321+
3322+#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
3323+ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
3324+ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
3325+ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO))
3326+
3327+#endif
3328+
3329diff -uNr linux-2.6.29-clean/drivers/mfd/timbi2s.c linux-2.6.29/drivers/mfd/timbi2s.c
3330--- linux-2.6.29-clean/drivers/mfd/timbi2s.c 1969-12-31 16:00:00.000000000 -0800
3331+++ linux-2.6.29/drivers/mfd/timbi2s.c 2009-04-06 13:51:47.000000000 -0700
3332@@ -0,0 +1,597 @@
3333+/*
3334+ * timbi2s.c timberdale FPGA I2S driver
3335+ * Copyright (c) 2009 Intel Corporation
3336+ *
3337+ * This program is free software; you can redistribute it and/or modify
3338+ * it under the terms of the GNU General Public License version 2 as
3339+ * published by the Free Software Foundation.
3340+ *
3341+ * This program is distributed in the hope that it will be useful,
3342+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3343+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3344+ * GNU General Public License for more details.
3345+ *
3346+ * You should have received a copy of the GNU General Public License
3347+ * along with this program; if not, write to the Free Software
3348+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3349+ */
3350+
3351+/* Supports:
3352+ * Timberdale FPGA I2S
3353+ *
3354+ * As of 2009-03-23 I2S instances
3355+ * are not configured as masters
3356+ *
3357+ * TODO: implement switching between master and slave
3358+ */
3359+
3360+#include <linux/io.h>
3361+#include <linux/fs.h>
3362+#include <linux/module.h>
3363+#include <linux/circ_buf.h>
3364+#include <linux/spinlock.h>
3365+#include <linux/workqueue.h>
3366+#include <linux/interrupt.h>
3367+#include <linux/platform_device.h>
3368+
3369+#include <linux/mfd/timbi2s.h>
3370+
3371+#define DRIVER_NAME "timb-i2s"
3372+
3373+#define I2S_CLOCK_SPEED 62500000 /* 62,5MHz */
3374+
3375+#define FIFO_FILL_SIZE 127
3376+#define I2S_BUFFER_SIZE PAGE_SIZE
3377+
3378+#define ALMOST_FULL 170
3379+#define ALMOST_EMPTY 85
3380+
3381+/* As of 2009-03-16, IP can instanciate max. 4 RX and 4 TX */
3382+#define MAX_TX_NR 4
3383+#define MAX_RX_NR 4
3384+/* and actually up and running only 4.
3385+ * 1 TX and 3 RX
3386+ */
3387+#define IP_I2S_NR 4
3388+#define REGSTEP 0x04
3389+
3390+#define VERSION 0x00
3391+#define I2S_UIR 0x04 /* Unit Interrupt Register */
3392+
3393+/* Registers for all possible I2S IP instances
3394+ * are the same as for first one (from 0x08 to 0x20)
3395+ */
3396+#define I2S_PRESCALE 0x08 /* Holds prescale value, if clock master */
3397+#define I2S_ICR 0x0c /* Interrupt Clear Register */
3398+# define ICR_F 0x01 /* Full */
3399+# define ICR_AF 0x02 /* Almost full */
3400+# define ICR_AE 0x04 /* Almost empty */
3401+# define ICR_RX_D 0x08 /* Data present, RX only */
3402+# define ICR_TX_E 0x08 /* Epmty, TX only */
3403+
3404+#define I2S_IPR 0x10 /* Interrupt Pending Register */
3405+#define I2S_ISR 0x14 /* Interrupt Status Register */
3406+
3407+#define I2S_IER 0x18 /* Interrupt Enable Register */
3408+# define IER_FF 0x01 /* RX/TX FIFO Full */
3409+# define IER_FAF 0x02 /* RX/TX FIFO Almost Full */
3410+# define IER_FAE 0x04 /* RX/TX FIFO Almost Empty */
3411+# define IER_RX_DATA 0x08 /* RX. Data Present */
3412+# define IER_TX_FE 0x08 /* TX. FIFO Empty */
3413+
3414+#define I2S_CTRL 0x1c /* Control Register */
3415+# define CTRL_TX_ENABLE 0x01 /* Enable TX */
3416+# define CTRL_RX_ENABLE 0x02 /* Enable RX */
3417+# define CTRL_NONE 0x04 /* Not used */
3418+# define CTRL_FIFO_CLR 0x08 /* FIFO Clear */
3419+# define CTRL_SWR 0x10 /* Soft reset */
3420+# define CTRL_CLKMASTER 0x1000 /* IP I2S instance is master */
3421+# define CTRL_IS_TX 0x40000000 /* IP I2S is an TX-instance */
3422+# define CTRL_IS_RX 0x20000000 /* IP I2S is an RX-instance */
3423+
3424+#define I2S_FIFO 0x20 /* read/write FIFO */
3425+
3426+#define INC_HEAD(buf, size) \
3427+ (buf->head = (buf->head + 1) & (size-1))
3428+
3429+#define INC_TAIL(buf, size) \
3430+ (buf->tail = (buf->tail + 1) & (size-1))
3431+
3432+
3433+/* circular buffer */
3434+static struct circ_buf *timbi2s_buf_alloc(void);
3435+static void timbi2s_buf_free(struct circ_buf *cb);
3436+static void timbi2s_buf_clear(struct circ_buf *cb);
3437+
3438+static int timbi2s_fifo_read(struct circ_buf *cb, ssize_t count, long add);
3439+static int timbi2s_fifo_write(struct circ_buf *cb, ssize_t count, long add);
3440+
3441+static int timbi2s_ioctrl(struct timbi2s_dev *);
3442+
3443+static struct timbi2s_bus *bus_p;
3444+
3445+static int timbi2s_is_tx(struct timbi2s_dev *i2sdev)
3446+{
3447+ return (ioread32(i2sdev->membase + i2sdev->ctrl_offset)
3448+ & CTRL_IS_TX) ? 1 : 0;
3449+}
3450+
3451+static int timbi2s_is_rx(struct timbi2s_dev *i2sdev)
3452+{
3453+ return (ioread32(i2sdev->membase + i2sdev->ctrl_offset)
3454+ & CTRL_IS_RX) ? 1 : 0;
3455+}
3456+
3457+/* Return unused TX-instance */
3458+static struct timbi2s_dev *timbi2s_get_tx(void)
3459+{
3460+ struct timbi2s_dev *tdev, *tmp;
3461+
3462+ if (bus_p == NULL)
3463+ return NULL;
3464+
3465+ list_for_each_entry_safe(tdev, tmp, &bus_p->control->list, item) {
3466+ if (!tdev->in_use && timbi2s_is_tx(tdev)) {
3467+ tdev->in_use = 1;
3468+ return tdev;
3469+ }
3470+
3471+ }
3472+ return NULL;
3473+}
3474+EXPORT_SYMBOL_GPL(timbi2s_get_tx);
3475+
3476+/* Return unused RX-instance */
3477+static struct timbi2s_dev *timbi2s_get_rx(void)
3478+{
3479+ struct timbi2s_dev *tdev, *tmp;
3480+
3481+ if (bus_p == NULL)
3482+ return NULL;
3483+
3484+ list_for_each_entry_safe(tdev, tmp, &bus_p->control->list, item) {
3485+ if (!tdev->in_use && timbi2s_is_rx(tdev)) {
3486+ tdev->in_use = 1;
3487+ return tdev;
3488+ }
3489+
3490+ }
3491+ return NULL;
3492+}
3493+EXPORT_SYMBOL_GPL(timbi2s_get_rx);
3494+
3495+/* Flag TX/RX as unused and reset it */
3496+static void timbi2s_put(struct timbi2s_dev *tdev)
3497+{
3498+ if (tdev->in_use) {
3499+ tdev->in_use = 0;
3500+ timbi2s_ioctrl(tdev);
3501+ }
3502+}
3503+EXPORT_SYMBOL_GPL(timbi2s_put);
3504+
3505+/*
3506+ * Write data to the FIFO
3507+ */
3508+static void timbi2s_tx_handler(struct timbi2s_dev *i2sdev)
3509+{
3510+ u32 pend;
3511+
3512+ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
3513+
3514+ if (pend & IER_FAE) {
3515+ timbi2s_fifo_write(i2sdev->buffer,
3516+ ALMOST_FULL - ALMOST_EMPTY,
3517+ (unsigned long)i2sdev->membase +
3518+ i2sdev->fifo);
3519+ /* clear interrupt */
3520+ iowrite32(ICR_AE, i2sdev->membase + i2sdev->icr_offset);
3521+ }
3522+}
3523+
3524+/*
3525+ * Read data from the FIFO
3526+ */
3527+static void timbi2s_rx_handler(struct timbi2s_dev *i2sdev)
3528+{
3529+ u32 pend;
3530+ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
3531+
3532+ if (pend & IER_FAE) {
3533+ timbi2s_fifo_read(i2sdev->buffer,
3534+ ALMOST_EMPTY,
3535+ (unsigned long)i2sdev->membase +
3536+ i2sdev->fifo);
3537+
3538+ /* clear interrupt */
3539+ iowrite32(ICR_AE | ICR_AF,
3540+ i2sdev->membase + i2sdev->icr_offset);
3541+ }
3542+}
3543+
3544+void timbi2s_int_handler(struct work_struct *workp)
3545+{
3546+ u32 pend, stat, i2stype;
3547+ unsigned long flags;
3548+ struct timbi2s_dev *i2sdev = container_of(workp,
3549+ struct timbi2s_dev,
3550+ work);
3551+
3552+ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
3553+ stat = ioread32(i2sdev->membase + i2sdev->isr_offset);
3554+ i2stype = ioread32(i2sdev->membase + i2sdev->ctrl_offset);
3555+
3556+ spin_lock_irqsave(&i2sdev->lock, flags);
3557+
3558+ if (i2stype & CTRL_IS_RX) {
3559+ /* Enable Almost Empty Almost Full interrupt */
3560+ iowrite32(IER_FAE | IER_FAF,
3561+ i2sdev->membase + i2sdev->ier_offset);
3562+ /* Enable RX */
3563+ iowrite32(CTRL_RX_ENABLE,
3564+ i2sdev->membase + i2sdev->ctrl_offset);
3565+ timbi2s_rx_handler(i2sdev);
3566+ } else if (i2stype & CTRL_IS_TX) {
3567+ /* Enable Almost Empty interrupt */
3568+ iowrite32(IER_FAE, i2sdev->membase + i2sdev->ier_offset);
3569+ /* Enable TX */
3570+ iowrite32(CTRL_TX_ENABLE,
3571+ i2sdev->membase + i2sdev->ctrl_offset);
3572+ timbi2s_tx_handler(i2sdev);
3573+ }
3574+
3575+ spin_unlock_irqrestore(&i2sdev->lock, flags);
3576+}
3577+
3578+static int timbi2s_ioctrl(struct timbi2s_dev *i2sdev)
3579+{
3580+ u32 i2stype;
3581+
3582+ /* Reset */
3583+ iowrite8(CTRL_SWR, i2sdev->membase + i2sdev->ctrl_offset);
3584+ /* Clear IER */
3585+ iowrite32(0x00000000, i2sdev->membase + i2sdev->ier_offset);
3586+ /* Clear ICR */
3587+ iowrite32(0xffffffff, i2sdev->membase + i2sdev->icr_offset);
3588+
3589+ i2stype = ioread32(i2sdev->membase + i2sdev->ctrl_offset);
3590+
3591+ if (i2stype & CTRL_IS_TX)
3592+ printk(KERN_INFO DRIVER_NAME": found active I2S Transmitter\n");
3593+ else if (i2stype & CTRL_IS_RX)
3594+ printk(KERN_INFO DRIVER_NAME": found active I2S Receiver\n");
3595+
3596+ return 1;
3597+}
3598+EXPORT_SYMBOL_GPL(timbi2s_ioctrl);
3599+
3600+static struct circ_buf *timbi2s_buf_alloc(void)
3601+{
3602+ struct circ_buf *cb;
3603+
3604+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
3605+ if (cb == NULL)
3606+ return NULL;
3607+
3608+ cb->buf = kzalloc(I2S_BUFFER_SIZE, GFP_KERNEL);
3609+ if (cb->buf == NULL) {
3610+ kfree(cb);
3611+ return NULL;
3612+ }
3613+
3614+ timbi2s_buf_clear(cb);
3615+
3616+ return cb;
3617+}
3618+
3619+static void timbi2s_buf_free(struct circ_buf *cb)
3620+{
3621+ kfree(cb->buf);
3622+ kfree(cb);
3623+}
3624+
3625+static void timbi2s_buf_clear(struct circ_buf *cb)
3626+{
3627+ cb->head = 0;
3628+ cb->tail = cb->head;
3629+}
3630+
3631+/*
3632+ * Read data from the FIFO and write it to the given circular buffer
3633+ */
3634+static int timbi2s_fifo_read(struct circ_buf *cb, ssize_t count, long add)
3635+{
3636+ int c, ret = 0;
3637+
3638+ unsigned char *hi = (unsigned char *)ioread32((void *)(add >> 16));
3639+ unsigned char *lo = (unsigned char *)ioread32((void *)(add & 0xFFFF));
3640+
3641+ c = CIRC_SPACE_TO_END(cb->head, cb->tail, I2S_BUFFER_SIZE);
3642+ if (count < c)
3643+ c = count;
3644+
3645+ if (c <= 0)
3646+ return 1;
3647+
3648+ while (c >= 0) {
3649+ memcpy(cb->buf + cb->head, hi, 2);
3650+ INC_HEAD(cb, I2S_BUFFER_SIZE);
3651+
3652+ memcpy(cb->buf + cb->head, lo, 2);
3653+ INC_HEAD(cb, I2S_BUFFER_SIZE);
3654+ count -= 4;
3655+ }
3656+ return ret;
3657+}
3658+
3659+/*
3660+ * Get data from the circular buffer and write it to the given FIFO address
3661+ */
3662+static int timbi2s_fifo_write(struct circ_buf *cb, ssize_t count, long add)
3663+{
3664+ int c, ret = 0;
3665+
3666+ c = CIRC_CNT_TO_END(cb->head, cb->tail, I2S_BUFFER_SIZE);
3667+ if (count < c)
3668+ c = count;
3669+
3670+ if (c <= 0)
3671+ return 1;
3672+
3673+ while (c >= 0) {
3674+ iowrite32(*(s16 *)(cb->buf + cb->tail), (void *)(add >> 16));
3675+ INC_TAIL(cb, I2S_BUFFER_SIZE);
3676+
3677+ iowrite32(*(s16 *)(cb->buf + cb->tail), (void *)(add & 0xFFFF));
3678+ INC_TAIL(cb, I2S_BUFFER_SIZE);
3679+ count -= 4;
3680+ }
3681+
3682+ return ret;
3683+}
3684+
3685+static void timbi2s_control_destroy(struct timbi2s_bus_control *control)
3686+{
3687+ kfree(control);
3688+ control = NULL;
3689+}
3690+
3691+static void timbi2s_control_add_dev(struct timbi2s_dev *i2sdev)
3692+{
3693+ list_add(&i2sdev->item, &i2sdev->bus->control->list);
3694+}
3695+
3696+static void timbi2s_control_del_dev(struct timbi2s_dev *i2sdev)
3697+{
3698+ list_del(&i2sdev->item);
3699+ if (list_empty(&i2sdev->bus->control->list))
3700+ timbi2s_control_destroy(i2sdev->bus->control);
3701+}
3702+
3703+static irqreturn_t timbi2s_irq(int irq, void *dev_id)
3704+{
3705+ u8 pend;
3706+ u32 iunit;
3707+ int i;
3708+
3709+ struct timbi2s_bus *tbus = dev_id;
3710+ queue_work(tbus->workqueue, &tbus->work);
3711+
3712+ iunit = ioread32(tbus->membase + I2S_UIR);
3713+ /* Find out which I2S instance is interrupting */
3714+ for (i = 0; i < 32; i++) {
3715+ if ((1 << i) & iunit) {
3716+ pend = ioread8(tbus->membase +
3717+ (I2S_IPR + (i * REGSTEP * 7)));
3718+ iowrite8(pend, tbus->membase +
3719+ (I2S_ICR + (i * REGSTEP * 7)));
3720+ }
3721+ }
3722+
3723+ return IRQ_HANDLED;
3724+}
3725+
3726+static int __init timbi2s_probe(struct platform_device *dev)
3727+{
3728+ int err = 0;
3729+ struct timbi2s_dev *tdev, *tmp;
3730+ struct timbi2s_bus *tbus;
3731+ struct resource *iomem;
3732+ int i;
3733+
3734+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
3735+ if (!iomem) {
3736+ err = -EINVAL;
3737+ goto err_mem;
3738+ }
3739+
3740+ tbus = kzalloc(sizeof(*tbus), GFP_KERNEL);
3741+ if (!tbus) {
3742+ err = -EINVAL;
3743+ goto err_mem;
3744+ }
3745+
3746+ /* Init bus_control */
3747+ tbus->control = kzalloc(sizeof(struct timbi2s_bus_control), GFP_KERNEL);
3748+ if (!tbus->control) {
3749+ printk(KERN_ERR DRIVER_NAME
3750+ ": Failed to allocate timbi2s_bus_control.\n");
3751+ err = -ENOMEM;
3752+ goto err_free;
3753+ }
3754+ INIT_LIST_HEAD(&tbus->control->list);
3755+
3756+ /* Init workqueue */
3757+ tbus->workqueue = create_singlethread_workqueue("timbi2s");
3758+ if (tbus->workqueue == NULL) {
3759+ printk(KERN_ERR DRIVER_NAME
3760+ ": unable to create workqueue\n");
3761+ err = -ENOMEM;
3762+ goto err_control;
3763+ }
3764+ INIT_WORK(&tbus->work, timbi2s_int_handler);
3765+
3766+ if (!request_mem_region(iomem->start,
3767+ resource_size(iomem), DRIVER_NAME)) {
3768+ printk(KERN_EMERG DRIVER_NAME
3769+ ": Mem region is already in use\n");
3770+ err = -ENXIO;
3771+ goto err_control;
3772+ }
3773+
3774+ tbus->membase = ioremap(iomem->start, resource_size(iomem));
3775+ if (tbus->membase == NULL) {
3776+ err = -ENOMEM;
3777+ goto err_request;
3778+ }
3779+
3780+ bus_p = tbus;
3781+
3782+
3783+
3784+ /* For now we have only 4 I2S instances in IP : 3 RX and 1 TX */
3785+ /* Note: TX'es are always on top */
3786+ /* TODO: auto-check how many are alive and bring them into control */
3787+ for (i = 0; i < IP_I2S_NR; i++) {
3788+ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
3789+ if (!tdev) {
3790+ err = -EINVAL;
3791+ goto clean_list;
3792+ }
3793+
3794+ /* Allocate circ_buf */
3795+ tdev->buffer = timbi2s_buf_alloc();
3796+ if (tdev->buffer == NULL) {
3797+ printk(KERN_ERR "timbi2s: unable to allocate buffer\n");
3798+ goto clean_list;
3799+ }
3800+
3801+ INIT_LIST_HEAD(&tdev->item);
3802+ spin_lock_init(&tdev->lock);
3803+
3804+ /* set up offsets for each instance of I2S */
3805+ tdev->bus = tbus; /* ptr to our bus */
3806+ tdev->membase = tbus->membase;
3807+ tdev->in_use = 0;
3808+ tdev->pscale_offset = I2S_PRESCALE + (i * REGSTEP * 7);
3809+ tdev->icr_offset = I2S_ICR + (i * REGSTEP * 7);
3810+ tdev->isr_offset = I2S_ISR + (i * REGSTEP * 7);
3811+ tdev->ipr_offset = I2S_IPR + (i * REGSTEP * 7);
3812+ tdev->ier_offset = I2S_IER + (i * REGSTEP * 7);
3813+ tdev->ctrl_offset = I2S_CTRL + (i * REGSTEP * 7);
3814+ tdev->fifo = I2S_FIFO + (i * REGSTEP * 7);
3815+
3816+ /* Try to check and reset hardware */
3817+ if (timbi2s_ioctrl(tdev))
3818+ timbi2s_control_add_dev(tdev);
3819+
3820+ tdev = NULL;
3821+ }
3822+
3823+ tbus->irq = platform_get_irq(dev, 0);
3824+ if (tbus->irq < 0) {
3825+ err = -EINVAL;
3826+ goto clean_list;
3827+ }
3828+
3829+ err = request_irq(tbus->irq, timbi2s_irq, 0, DRIVER_NAME, tbus);
3830+ if (err != 0)
3831+ goto clean_list;
3832+
3833+ platform_set_drvdata(dev, tbus);
3834+
3835+ dev_info(&dev->dev, "Driver for Timberdale I2S (ver: %d)"
3836+ " has been successfully registered.\n",
3837+ ioread32(tbus->membase + 0x00));
3838+ return 0;
3839+
3840+clean_list:
3841+ list_for_each_entry_safe(tdev, tmp, &tbus->control->list, item) {
3842+ if (tdev->workqueue != NULL) {
3843+ flush_workqueue(tdev->workqueue);
3844+ destroy_workqueue(tdev->workqueue);
3845+ }
3846+
3847+ if (tdev->buffer != NULL)
3848+ timbi2s_buf_free(tdev->buffer);
3849+
3850+ timbi2s_control_del_dev(tdev);
3851+ kfree(tdev);
3852+ }
3853+ free_irq(tbus->irq, tbus);
3854+ iounmap(tbus->membase);
3855+err_request:
3856+ release_mem_region(iomem->start, resource_size(iomem));
3857+err_control:
3858+ if (tbus->control != NULL)
3859+ timbi2s_control_destroy(tbus->control);
3860+err_free:
3861+ kfree(tbus);
3862+err_mem:
3863+ printk(KERN_ERR
3864+ DRIVER_NAME": Failed to register Timberdale I2S: %d\n", err);
3865+
3866+ return err;
3867+}
3868+
3869+static int __devexit timbi2s_remove(struct platform_device *dev)
3870+{
3871+ struct timbi2s_bus *tbus;
3872+ struct timbi2s_dev *tdev, *tmp;
3873+ struct resource *r;
3874+
3875+ tbus = platform_get_drvdata(dev);
3876+ free_irq(tbus->irq, tbus);
3877+
3878+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
3879+
3880+ list_for_each_entry_safe(tdev, tmp, &tbus->control->list, item) {
3881+ if (tdev->workqueue != NULL) {
3882+ flush_workqueue(tdev->workqueue);
3883+ destroy_workqueue(tdev->workqueue);
3884+ }
3885+
3886+ if (tdev->buffer != NULL)
3887+ timbi2s_buf_free(tdev->buffer);
3888+
3889+ kfree(tdev);
3890+ }
3891+
3892+ iounmap(tdev->membase);
3893+ if (r)
3894+ release_mem_region(r->start, resource_size(r));
3895+
3896+ dev_info(&dev->dev, "Driver for Timberdale I2S has been"
3897+ " successfully unregistered.\n");
3898+
3899+ platform_set_drvdata(dev, 0);
3900+ return 0;
3901+}
3902+
3903+static struct platform_driver timbi2s_platform_driver = {
3904+ .driver = {
3905+ .name = DRIVER_NAME,
3906+ .owner = THIS_MODULE,
3907+ },
3908+ .probe = timbi2s_probe,
3909+ .remove = __devexit_p(timbi2s_remove),
3910+};
3911+
3912+/*--------------------------------------------------------------------------*/
3913+
3914+static int __init timbi2s_init(void)
3915+{
3916+ return platform_driver_register(&timbi2s_platform_driver);
3917+}
3918+
3919+static void __exit timbi2s_exit(void)
3920+{
3921+ platform_driver_unregister(&timbi2s_platform_driver);
3922+}
3923+
3924+module_init(timbi2s_init);
3925+module_exit(timbi2s_exit);
3926+
3927+MODULE_AUTHOR("Mocean Laboratories");
3928+MODULE_DESCRIPTION("Timberdale I2S bus driver");
3929+MODULE_LICENSE("GPL v2");
3930diff -uNr linux-2.6.29-clean/drivers/mmc/host/Kconfig linux-2.6.29/drivers/mmc/host/Kconfig
3931--- linux-2.6.29-clean/drivers/mmc/host/Kconfig 2009-04-01 09:20:24.000000000 -0700
3932+++ linux-2.6.29/drivers/mmc/host/Kconfig 2009-04-06 13:51:47.000000000 -0700
3933@@ -65,6 +65,16 @@
3934
3935 If unsure, say Y.
3936
3937+config MMC_SDHCI_PLTFM
3938+ tristate "SDHCI support on platform devices"
3939+ depends on MMC_SDHCI
3940+ help
3941+ This selects the Secure Digital Host Controller Interface.
3942+
3943+ If you have a controller with this interface, say Y or M here.
3944+
3945+ If unsure, say N.
3946+
3947 config MMC_OMAP
3948 tristate "TI OMAP Multimedia Card Interface support"
3949 depends on ARCH_OMAP
3950diff -uNr linux-2.6.29-clean/drivers/mmc/host/Makefile linux-2.6.29/drivers/mmc/host/Makefile
3951--- linux-2.6.29-clean/drivers/mmc/host/Makefile 2009-04-01 09:20:24.000000000 -0700
3952+++ linux-2.6.29/drivers/mmc/host/Makefile 2009-04-06 13:51:47.000000000 -0700
3953@@ -13,6 +13,7 @@
3954 obj-$(CONFIG_MMC_SDHCI) += sdhci.o
3955 obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
3956 obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
3957+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
3958 obj-$(CONFIG_MMC_WBSD) += wbsd.o
3959 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
3960 obj-$(CONFIG_MMC_OMAP) += omap.o
3961diff -uNr linux-2.6.29-clean/drivers/mmc/host/sdhci-pltfm.c linux-2.6.29/drivers/mmc/host/sdhci-pltfm.c
3962--- linux-2.6.29-clean/drivers/mmc/host/sdhci-pltfm.c 1969-12-31 16:00:00.000000000 -0800
3963+++ linux-2.6.29/drivers/mmc/host/sdhci-pltfm.c 2009-04-06 13:51:47.000000000 -0700
3964@@ -0,0 +1,262 @@
3965+/*
3966+ * sdhci-pltfm.c Support for SDHCI platform devices
3967+ * Copyright (c) 2009 Intel Corporation
3968+ *
3969+ * This program is free software; you can redistribute it and/or modify
3970+ * it under the terms of the GNU General Public License version 2 as
3971+ * published by the Free Software Foundation.
3972+ *
3973+ * This program is distributed in the hope that it will be useful,
3974+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3975+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3976+ * GNU General Public License for more details.
3977+ *
3978+ * You should have received a copy of the GNU General Public License
3979+ * along with this program; if not, write to the Free Software
3980+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3981+ */
3982+
3983+/* Supports:
3984+ * SDHCI platform devices
3985+ *
3986+ * Inspired by sdhci-pci.c, by Pierre Ossman
3987+ */
3988+
3989+#include <linux/delay.h>
3990+#include <linux/highmem.h>
3991+#include <linux/platform_device.h>
3992+
3993+#include <linux/mmc/host.h>
3994+
3995+#include <linux/io.h>
3996+
3997+#include "sdhci.h"
3998+
3999+
4000+#define MAX_SLOTS 8
4001+
4002+struct sdhci_pltfm_chip;
4003+
4004+struct sdhci_pltfm_slot {
4005+ struct sdhci_pltfm_chip *chip;
4006+ struct sdhci_host *host;
4007+
4008+ int pltfm_resource;
4009+};
4010+
4011+struct sdhci_pltfm_chip {
4012+ struct platform_device *pdev;
4013+
4014+ unsigned int quirks;
4015+
4016+ int num_slots; /* Slots on controller */
4017+ struct sdhci_pltfm_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
4018+};
4019+
4020+
4021+/*****************************************************************************\
4022+ * *
4023+ * SDHCI core callbacks *
4024+ * *
4025+\*****************************************************************************/
4026+
4027+static struct sdhci_ops sdhci_pltfm_ops = {
4028+};
4029+
4030+/*****************************************************************************\
4031+ * *
4032+ * Device probing/removal *
4033+ * *
4034+\*****************************************************************************/
4035+
4036+
4037+static struct sdhci_pltfm_slot * __devinit sdhci_pltfm_probe_slot(
4038+ struct platform_device *pdev, struct sdhci_pltfm_chip *chip,
4039+ int resource)
4040+{
4041+ struct sdhci_pltfm_slot *slot;
4042+ struct sdhci_host *host;
4043+ struct resource *iomem;
4044+ int ret;
4045+
4046+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, resource);
4047+ if (!iomem)
4048+ return ERR_PTR(-ENODEV);
4049+
4050+ if (resource_size(iomem) != 0x100) {
4051+ dev_err(&pdev->dev, "Invalid iomem size. You may "
4052+ "experience problems.\n");
4053+ }
4054+
4055+ if (!pdev->dev.parent) {
4056+ dev_err(&pdev->dev, "The parent device be a PCI device\n");
4057+ return ERR_PTR(-ENODEV);
4058+ }
4059+
4060+ host = sdhci_alloc_host(pdev->dev.parent,
4061+ sizeof(struct sdhci_pltfm_slot));
4062+ if (IS_ERR(host))
4063+ return ERR_PTR(PTR_ERR(host));
4064+
4065+ slot = sdhci_priv(host);
4066+
4067+ slot->chip = chip;
4068+ slot->host = host;
4069+ slot->pltfm_resource = resource;
4070+
4071+ host->hw_name = "PLTFM";
4072+ host->ops = &sdhci_pltfm_ops;
4073+ host->quirks = chip->quirks;
4074+
4075+ host->irq = platform_get_irq(pdev, 0);
4076+
4077+ if (!request_mem_region(iomem->start, resource_size(iomem),
4078+ mmc_hostname(host->mmc))) {
4079+ dev_err(&pdev->dev, "cannot request region\n");
4080+ ret = -EBUSY;
4081+ goto free;
4082+ }
4083+
4084+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
4085+ if (!host->ioaddr) {
4086+ dev_err(&pdev->dev, "failed to remap registers\n");
4087+ goto release;
4088+ }
4089+
4090+ ret = sdhci_add_host(host);
4091+ if (ret)
4092+ goto unmap;
4093+
4094+ return slot;
4095+
4096+unmap:
4097+ iounmap(host->ioaddr);
4098+release:
4099+ release_mem_region(iomem->start, resource_size(iomem));
4100+free:
4101+ sdhci_free_host(host);
4102+
4103+ return ERR_PTR(ret);
4104+}
4105+
4106+static void sdhci_pltfm_remove_slot(struct sdhci_pltfm_slot *slot)
4107+{
4108+ int dead;
4109+ u32 scratch;
4110+ struct resource *iomem;
4111+
4112+ dead = 0;
4113+ scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
4114+ if (scratch == (u32)-1)
4115+ dead = 1;
4116+
4117+ sdhci_remove_host(slot->host, dead);
4118+
4119+ iounmap(slot->host->ioaddr);
4120+
4121+ iomem = platform_get_resource(slot->chip->pdev, IORESOURCE_MEM,
4122+ slot->pltfm_resource);
4123+ release_mem_region(iomem->start, resource_size(iomem));
4124+
4125+ sdhci_free_host(slot->host);
4126+}
4127+
4128+static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
4129+{
4130+ struct sdhci_pltfm_chip *chip;
4131+ struct sdhci_pltfm_slot *slot;
4132+ u8 slots;
4133+ int ret, i;
4134+
4135+ BUG_ON(pdev == NULL);
4136+
4137+ for (slots = 0; slots <= MAX_SLOTS; slots++)
4138+ if (!platform_get_resource(pdev, IORESOURCE_MEM, slots))
4139+ break;
4140+
4141+ BUG_ON(slots > MAX_SLOTS || slots == 0);
4142+
4143+ chip = kzalloc(sizeof(struct sdhci_pltfm_chip), GFP_KERNEL);
4144+ if (!chip) {
4145+ ret = -ENOMEM;
4146+ goto err;
4147+ }
4148+
4149+ chip->pdev = pdev;
4150+ chip->num_slots = slots;
4151+ platform_set_drvdata(pdev, chip);
4152+
4153+ for (i = 0; i < slots; i++) {
4154+ slot = sdhci_pltfm_probe_slot(pdev, chip, i);
4155+ if (IS_ERR(slot)) {
4156+ for (i--; i >= 0; i--)
4157+ sdhci_pltfm_remove_slot(chip->slots[i]);
4158+ ret = PTR_ERR(slot);
4159+ goto free;
4160+ }
4161+
4162+ chip->slots[i] = slot;
4163+ }
4164+
4165+ return 0;
4166+
4167+free:
4168+ platform_set_drvdata(pdev, NULL);
4169+ kfree(chip);
4170+
4171+err:
4172+ printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
4173+ return ret;
4174+}
4175+
4176+static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
4177+{
4178+ int i;
4179+ struct sdhci_pltfm_chip *chip;
4180+
4181+ chip = platform_get_drvdata(pdev);
4182+
4183+ if (chip) {
4184+ for (i = 0; i < chip->num_slots; i++)
4185+ sdhci_pltfm_remove_slot(chip->slots[i]);
4186+
4187+ platform_set_drvdata(pdev, NULL);
4188+ kfree(chip);
4189+ }
4190+
4191+ return 0;
4192+}
4193+
4194+static struct platform_driver sdhci_pltfm_driver = {
4195+ .driver = {
4196+ .name = "sdhci",
4197+ .owner = THIS_MODULE,
4198+ },
4199+ .probe = sdhci_pltfm_probe,
4200+ .remove = __devexit_p(sdhci_pltfm_remove),
4201+};
4202+
4203+/*****************************************************************************\
4204+ * *
4205+ * Driver init/exit *
4206+ * *
4207+\*****************************************************************************/
4208+
4209+static int __init sdhci_drv_init(void)
4210+{
4211+ return platform_driver_register(&sdhci_pltfm_driver);
4212+}
4213+
4214+static void __exit sdhci_drv_exit(void)
4215+{
4216+ platform_driver_unregister(&sdhci_pltfm_driver);
4217+}
4218+
4219+module_init(sdhci_drv_init);
4220+module_exit(sdhci_drv_exit);
4221+
4222+MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
4223+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
4224+MODULE_LICENSE("GPL v2");
4225+MODULE_ALIAS("platform:sdhci");
4226+
4227diff -uNr linux-2.6.29-clean/drivers/serial/Kconfig linux-2.6.29/drivers/serial/Kconfig
4228--- linux-2.6.29-clean/drivers/serial/Kconfig 2009-04-01 09:20:24.000000000 -0700
4229+++ linux-2.6.29/drivers/serial/Kconfig 2009-04-06 13:51:47.000000000 -0700
4230@@ -1412,4 +1412,11 @@
4231 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
4232 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
4233
4234+config SERIAL_TIMBERDALE
4235+ tristate "Support for timberdale UART"
4236+ depends on MFD_TIMBERDALE
4237+ select SERIAL_CORE
4238+ ---help---
4239+ Add support for UART controller on timberdale.
4240+
4241 endmenu
4242diff -uNr linux-2.6.29-clean/drivers/serial/Makefile linux-2.6.29/drivers/serial/Makefile
4243--- linux-2.6.29-clean/drivers/serial/Makefile 2009-04-01 09:20:24.000000000 -0700
4244+++ linux-2.6.29/drivers/serial/Makefile 2009-04-06 13:51:47.000000000 -0700
4245@@ -76,3 +76,4 @@
4246 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
4247 obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
4248 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
4249+obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
4250diff -uNr linux-2.6.29-clean/drivers/serial/timbuart.c linux-2.6.29/drivers/serial/timbuart.c
4251--- linux-2.6.29-clean/drivers/serial/timbuart.c 1969-12-31 16:00:00.000000000 -0800
4252+++ linux-2.6.29/drivers/serial/timbuart.c 2009-04-06 13:51:47.000000000 -0700
4253@@ -0,0 +1,519 @@
4254+/*
4255+ * timbuart.c timberdale FPGA UART driver
4256+ * Copyright (c) 2009 Intel Corporation
4257+ *
4258+ * This program is free software; you can redistribute it and/or modify
4259+ * it under the terms of the GNU General Public License version 2 as
4260+ * published by the Free Software Foundation.
4261+ *
4262+ * This program is distributed in the hope that it will be useful,
4263+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
4264+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4265+ * GNU General Public License for more details.
4266+ *
4267+ * You should have received a copy of the GNU General Public License
4268+ * along with this program; if not, write to the Free Software
4269+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4270+ */
4271+
4272+/* Supports:
4273+ * Timberdale FPGA UART
4274+ */
4275+
4276+#include <linux/pci.h>
4277+#include <linux/interrupt.h>
4278+#include <linux/serial_core.h>
4279+#include <linux/kernel.h>
4280+#include <linux/platform_device.h>
4281+#include <linux/ioport.h>
4282+
4283+#include "timbuart.h"
4284+
4285+struct timbuart_port {
4286+ struct uart_port port;
4287+ struct tasklet_struct tasklet;
4288+ int usedma;
4289+ u8 last_ier;
4290+ struct platform_device *dev;
4291+};
4292+
4293+static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
4294+ 921600, 1843200, 3250000};
4295+
4296+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
4297+
4298+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
4299+
4300+static void timbuart_stop_rx(struct uart_port *port)
4301+{
4302+ /* spin lock held by upper layer, disable all RX interrupts */
4303+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
4304+ iowrite8(ier, port->membase + TIMBUART_IER);
4305+}
4306+
4307+static void timbuart_stop_tx(struct uart_port *port)
4308+{
4309+ /* spinlock held by upper layer, disable TX interrupt */
4310+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
4311+ iowrite8(ier, port->membase + TIMBUART_IER);
4312+}
4313+
4314+static void timbuart_start_tx(struct uart_port *port)
4315+{
4316+ struct timbuart_port *uart =
4317+ container_of(port, struct timbuart_port, port);
4318+
4319+ /* do not transfer anything here -> fire off the tasklet */
4320+ tasklet_schedule(&uart->tasklet);
4321+}
4322+
4323+static void timbuart_flush_buffer(struct uart_port *port)
4324+{
4325+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
4326+
4327+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
4328+ iowrite8(TXBF, port->membase + TIMBUART_ISR);
4329+}
4330+
4331+static void timbuart_rx_chars(struct uart_port *port)
4332+{
4333+ struct tty_struct *tty = port->info->port.tty;
4334+
4335+ while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
4336+ u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
4337+ /* ack */
4338+ iowrite8(RXDP, port->membase + TIMBUART_ISR);
4339+ port->icount.rx++;
4340+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
4341+ }
4342+
4343+ spin_unlock(&port->lock);
4344+ tty_flip_buffer_push(port->info->port.tty);
4345+ spin_lock(&port->lock);
4346+
4347+ dev_dbg(port->dev, "%s - total read %d bytes\n",
4348+ __func__, port->icount.rx);
4349+}
4350+
4351+static void timbuart_tx_chars(struct uart_port *port)
4352+{
4353+ struct circ_buf *xmit = &port->info->xmit;
4354+
4355+ while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
4356+ !uart_circ_empty(xmit)) {
4357+ iowrite8(xmit->buf[xmit->tail],
4358+ port->membase + TIMBUART_TXFIFO);
4359+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
4360+ port->icount.tx++;
4361+ }
4362+
4363+ dev_dbg(port->dev,
4364+ "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
4365+ __func__,
4366+ port->icount.tx,
4367+ ioread8(port->membase + TIMBUART_CTRL),
4368+ port->mctrl & TIOCM_RTS,
4369+ ioread8(port->membase + TIMBUART_BAUDRATE));
4370+}
4371+
4372+static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
4373+{
4374+ struct timbuart_port *uart =
4375+ container_of(port, struct timbuart_port, port);
4376+ struct circ_buf *xmit = &port->info->xmit;
4377+
4378+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
4379+ return;
4380+
4381+ if (port->x_char)
4382+ return;
4383+
4384+ if (isr & TXFLAGS) {
4385+ timbuart_tx_chars(port);
4386+ /* clear all TX interrupts */
4387+ iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
4388+
4389+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
4390+ uart_write_wakeup(port);
4391+ } else
4392+ /* Re-enable any tx interrupt */
4393+ *ier |= uart->last_ier & TXFLAGS;
4394+
4395+ /* enable interrupts if there are chars in the transmit buffer,
4396+ * Or if we delivered some bytes and want the almost empty interrupt
4397+ * we wake up the upper layer later when we got the interrupt
4398+ * to give it some time to go out...
4399+ */
4400+ if (!uart_circ_empty(xmit))
4401+ *ier |= TXBAE;
4402+
4403+ dev_dbg(port->dev, "%s - leaving\n", __func__);
4404+}
4405+
4406+void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
4407+{
4408+ if (isr & RXFLAGS) {
4409+ /* Some RX status is set */
4410+ if (isr & RXBF) {
4411+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
4412+ TIMBUART_CTRL_FLSHRX;
4413+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
4414+ port->icount.overrun++;
4415+ } else if (isr & (RXDP))
4416+ timbuart_rx_chars(port);
4417+
4418+ /* ack all RX interrupts */
4419+ iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
4420+ }
4421+
4422+ /* always have the RX interrupts enabled */
4423+ *ier |= RXBAF | RXBF | RXTT;
4424+
4425+ dev_dbg(port->dev, "%s - leaving\n", __func__);
4426+}
4427+
4428+void timbuart_tasklet(unsigned long arg)
4429+{
4430+ struct timbuart_port *uart = (struct timbuart_port *)arg;
4431+ u8 isr, ier = 0;
4432+
4433+ spin_lock(&uart->port.lock);
4434+
4435+ isr = ioread8(uart->port.membase + TIMBUART_ISR);
4436+ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
4437+
4438+ if (!uart->usedma)
4439+ timbuart_handle_tx_port(&uart->port, isr, &ier);
4440+
4441+ timbuart_mctrl_check(&uart->port, isr, &ier);
4442+
4443+ if (!uart->usedma)
4444+ timbuart_handle_rx_port(&uart->port, isr, &ier);
4445+
4446+ iowrite8(ier, uart->port.membase + TIMBUART_IER);
4447+
4448+ spin_unlock(&uart->port.lock);
4449+ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
4450+}
4451+
4452+static unsigned int timbuart_tx_empty(struct uart_port *port)
4453+{
4454+ u8 isr = ioread8(port->membase + TIMBUART_ISR);
4455+
4456+ return (isr & TXBAE) ? TIOCSER_TEMT : 0;
4457+}
4458+
4459+static unsigned int timbuart_get_mctrl(struct uart_port *port)
4460+{
4461+ u8 cts = ioread8(port->membase + TIMBUART_CTRL);
4462+ dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
4463+
4464+ if (cts & TIMBUART_CTRL_CTS)
4465+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
4466+ else
4467+ return TIOCM_DSR | TIOCM_CAR;
4468+}
4469+
4470+static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
4471+{
4472+ dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
4473+
4474+ if (mctrl & TIOCM_RTS)
4475+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
4476+ else
4477+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
4478+}
4479+
4480+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
4481+{
4482+ unsigned int cts;
4483+
4484+ if (isr & CTS_DELTA) {
4485+ /* ack */
4486+ iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
4487+ cts = timbuart_get_mctrl(port);
4488+ uart_handle_cts_change(port, cts & TIOCM_CTS);
4489+ wake_up_interruptible(&port->info->delta_msr_wait);
4490+ }
4491+
4492+ *ier |= CTS_DELTA;
4493+}
4494+
4495+static void timbuart_enable_ms(struct uart_port *port)
4496+{
4497+ /* N/A */
4498+}
4499+
4500+static void timbuart_break_ctl(struct uart_port *port, int ctl)
4501+{
4502+ /* N/A */
4503+}
4504+
4505+static int timbuart_startup(struct uart_port *port)
4506+{
4507+ struct timbuart_port *uart =
4508+ container_of(port, struct timbuart_port, port);
4509+
4510+ dev_dbg(port->dev, "%s\n", __func__);
4511+
4512+ iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
4513+ iowrite8(0xff, port->membase + TIMBUART_ISR);
4514+ /* Enable all but TX interrupts */
4515+ iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
4516+ port->membase + TIMBUART_IER);
4517+
4518+ return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
4519+ "timb-uart", uart);
4520+}
4521+
4522+static void timbuart_shutdown(struct uart_port *port)
4523+{
4524+ struct timbuart_port *uart =
4525+ container_of(port, struct timbuart_port, port);
4526+ dev_dbg(port->dev, "%s\n", __func__);
4527+ free_irq(port->irq, uart);
4528+ iowrite8(0, port->membase + TIMBUART_IER);
4529+}
4530+
4531+static int get_bindex(int baud)
4532+{
4533+ int i;
4534+
4535+ for (i = 0; i < ARRAY_SIZE(baudrates); i++)
4536+ if (baud == baudrates[i])
4537+ return i;
4538+
4539+ return -1;
4540+}
4541+
4542+static void timbuart_set_termios(struct uart_port *port,
4543+ struct ktermios *termios,
4544+ struct ktermios *old)
4545+{
4546+ unsigned int baud;
4547+ short bindex;
4548+ unsigned long flags;
4549+
4550+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
4551+ bindex = get_bindex(baud);
4552+ dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
4553+
4554+ if (bindex < 0) {
4555+ printk(KERN_ALERT "timbuart: Unsupported baud rate\n");
4556+ } else {
4557+ spin_lock_irqsave(&port->lock, flags);
4558+ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
4559+ uart_update_timeout(port, termios->c_cflag, baud);
4560+ spin_unlock_irqrestore(&port->lock, flags);
4561+ }
4562+}
4563+
4564+static const char *timbuart_type(struct uart_port *port)
4565+{
4566+ return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
4567+}
4568+
4569+/* We do not request/release mappings of the registers here,
4570+ * currently it's done in the proble function.
4571+ */
4572+static void timbuart_release_port(struct uart_port *port)
4573+{
4574+ struct platform_device *pdev = to_platform_device(port->dev);
4575+ int size =
4576+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
4577+
4578+ if (port->flags & UPF_IOREMAP) {
4579+ iounmap(port->membase);
4580+ port->membase = NULL;
4581+ }
4582+
4583+ release_mem_region(port->mapbase, size);
4584+}
4585+
4586+static int timbuart_request_port(struct uart_port *port)
4587+{
4588+ struct platform_device *pdev = to_platform_device(port->dev);
4589+ int size =
4590+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
4591+
4592+ if (!request_mem_region(port->mapbase, size, "timb-uart"))
4593+ return -EBUSY;
4594+
4595+ if (port->flags & UPF_IOREMAP) {
4596+ port->membase = ioremap(port->mapbase, size);
4597+ if (port->membase == NULL) {
4598+ release_mem_region(port->mapbase, size);
4599+ return -ENOMEM;
4600+ }
4601+ }
4602+
4603+ return 0;
4604+}
4605+
4606+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
4607+{
4608+ struct timbuart_port *uart = (struct timbuart_port *)devid;
4609+
4610+ uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
4611+
4612+ /* disable interrupts, let the tasklet enable them again if needed */
4613+ iowrite8(0, uart->port.membase + TIMBUART_IER);
4614+
4615+ /* fire off bottom half */
4616+ tasklet_schedule(&uart->tasklet);
4617+
4618+ return IRQ_HANDLED;
4619+}
4620+
4621+/*
4622+ * Configure/autoconfigure the port.
4623+ */
4624+static void timbuart_config_port(struct uart_port *port, int flags)
4625+{
4626+ if (flags & UART_CONFIG_TYPE) {
4627+ port->type = PORT_TIMBUART;
4628+ timbuart_request_port(port);
4629+ }
4630+}
4631+
4632+static int timbuart_verify_port(struct uart_port *port,
4633+ struct serial_struct *ser)
4634+{
4635+ /* we don't want the core code to modify any port params */
4636+ return -EINVAL;
4637+}
4638+
4639+static struct uart_ops timbuart_ops = {
4640+ .tx_empty = timbuart_tx_empty,
4641+ .set_mctrl = timbuart_set_mctrl,
4642+ .get_mctrl = timbuart_get_mctrl,
4643+ .stop_tx = timbuart_stop_tx,
4644+ .start_tx = timbuart_start_tx,
4645+ .flush_buffer = timbuart_flush_buffer,
4646+ .stop_rx = timbuart_stop_rx,
4647+ .enable_ms = timbuart_enable_ms,
4648+ .break_ctl = timbuart_break_ctl,
4649+ .startup = timbuart_startup,
4650+ .shutdown = timbuart_shutdown,
4651+ .set_termios = timbuart_set_termios,
4652+ .type = timbuart_type,
4653+ .release_port = timbuart_release_port,
4654+ .request_port = timbuart_request_port,
4655+ .config_port = timbuart_config_port,
4656+ .verify_port = timbuart_verify_port
4657+};
4658+
4659+static struct uart_driver timbuart_driver = {
4660+ .owner = THIS_MODULE,
4661+ .driver_name = "timberdale_uart",
4662+ .dev_name = "ttyTU",
4663+ .major = TIMBUART_MAJOR,
4664+ .minor = TIMBUART_MINOR,
4665+ .nr = 1
4666+};
4667+
4668+static int timbuart_probe(struct platform_device *dev)
4669+{
4670+ int err;
4671+ struct timbuart_port *uart;
4672+ struct resource *iomem;
4673+
4674+ dev_dbg(&dev->dev, "%s\n", __func__);
4675+
4676+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
4677+ if (!uart) {
4678+ err = -EINVAL;
4679+ goto err_mem;
4680+ }
4681+
4682+ uart->usedma = 0;
4683+
4684+ uart->port.uartclk = 3250000 * 16;
4685+ uart->port.fifosize = TIMBUART_FIFO_SIZE;
4686+ uart->port.regshift = 2;
4687+ uart->port.iotype = UPIO_MEM;
4688+ uart->port.ops = &timbuart_ops;
4689+ uart->port.irq = 0;
4690+ uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
4691+ uart->port.line = 0;
4692+ uart->port.dev = &dev->dev;
4693+
4694+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
4695+ if (!iomem) {
4696+ err = -ENOMEM;
4697+ goto err_register;
4698+ }
4699+ uart->port.mapbase = iomem->start;
4700+ uart->port.membase = NULL;
4701+
4702+ uart->port.irq = platform_get_irq(dev, 0);
4703+ if (uart->port.irq < 0) {
4704+ err = -EINVAL;
4705+ goto err_register;
4706+ }
4707+
4708+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
4709+
4710+ err = uart_register_driver(&timbuart_driver);
4711+ if (err)
4712+ goto err_register;
4713+
4714+ err = uart_add_one_port(&timbuart_driver, &uart->port);
4715+ if (err)
4716+ goto err_add_port;
4717+
4718+ platform_set_drvdata(dev, uart);
4719+
4720+ return 0;
4721+
4722+err_add_port:
4723+ uart_unregister_driver(&timbuart_driver);
4724+err_register:
4725+ kfree(uart);
4726+err_mem:
4727+ printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
4728+ err);
4729+
4730+ return err;
4731+}
4732+
4733+static int timbuart_remove(struct platform_device *dev)
4734+{
4735+ struct timbuart_port *uart = platform_get_drvdata(dev);
4736+
4737+ tasklet_kill(&uart->tasklet);
4738+ uart_remove_one_port(&timbuart_driver, &uart->port);
4739+ uart_unregister_driver(&timbuart_driver);
4740+ kfree(uart);
4741+
4742+ return 0;
4743+}
4744+
4745+static struct platform_driver timbuart_platform_driver = {
4746+ .driver = {
4747+ .name = "timb-uart",
4748+ .owner = THIS_MODULE,
4749+ },
4750+ .probe = timbuart_probe,
4751+ .remove = timbuart_remove,
4752+};
4753+
4754+/*--------------------------------------------------------------------------*/
4755+
4756+static int __init timbuart_init(void)
4757+{
4758+ return platform_driver_register(&timbuart_platform_driver);
4759+}
4760+
4761+static void __exit timbuart_exit(void)
4762+{
4763+ platform_driver_unregister(&timbuart_platform_driver);
4764+}
4765+
4766+module_init(timbuart_init);
4767+module_exit(timbuart_exit);
4768+
4769+MODULE_DESCRIPTION("Timberdale UART driver");
4770+MODULE_LICENSE("GPL v2");
4771+MODULE_ALIAS("platform:timb-uart");
4772+
4773diff -uNr linux-2.6.29-clean/drivers/serial/timbuart.h linux-2.6.29/drivers/serial/timbuart.h
4774--- linux-2.6.29-clean/drivers/serial/timbuart.h 1969-12-31 16:00:00.000000000 -0800
4775+++ linux-2.6.29/drivers/serial/timbuart.h 2009-04-06 13:51:47.000000000 -0700
4776@@ -0,0 +1,57 @@
4777+/*
4778+ * timbuart.c timberdale FPGA GPIO driver
4779+ * Copyright (c) 2009 Intel Corporation
4780+ *
4781+ * This program is free software; you can redistribute it and/or modify
4782+ * it under the terms of the GNU General Public License version 2 as
4783+ * published by the Free Software Foundation.
4784+ *
4785+ * This program is distributed in the hope that it will be useful,
4786+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
4787+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4788+ * GNU General Public License for more details.
4789+ *
4790+ * You should have received a copy of the GNU General Public License
4791+ * along with this program; if not, write to the Free Software
4792+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4793+ */
4794+
4795+/* Supports:
4796+ * Timberdale FPGA UART
4797+ */
4798+
4799+#ifndef _TIMBUART_H
4800+#define _TIMBUART_H
4801+
4802+#define TIMBUART_FIFO_SIZE 2048
4803+
4804+#define TIMBUART_RXFIFO 0x08
4805+#define TIMBUART_TXFIFO 0x0c
4806+#define TIMBUART_IER 0x10
4807+#define TIMBUART_IPR 0x14
4808+#define TIMBUART_ISR 0x18
4809+#define TIMBUART_CTRL 0x1c
4810+#define TIMBUART_BAUDRATE 0x20
4811+
4812+#define TIMBUART_CTRL_RTS 0x01
4813+#define TIMBUART_CTRL_CTS 0x02
4814+#define TIMBUART_CTRL_FLSHTX 0x40
4815+#define TIMBUART_CTRL_FLSHRX 0x80
4816+
4817+#define TXBF 0x01
4818+#define TXBAE 0x02
4819+#define CTS_DELTA 0x04
4820+#define RXDP 0x08
4821+#define RXBAF 0x10
4822+#define RXBF 0x20
4823+#define RXTT 0x40
4824+#define RXBNAE 0x80
4825+
4826+#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
4827+#define TXFLAGS (TXBF | TXBAE)
4828+
4829+#define TIMBUART_MAJOR 204
4830+#define TIMBUART_MINOR 192
4831+
4832+#endif /* _TIMBUART_H */
4833+
4834diff -uNr linux-2.6.29-clean/drivers/spi/Kconfig linux-2.6.29/drivers/spi/Kconfig
4835--- linux-2.6.29-clean/drivers/spi/Kconfig 2009-04-01 09:20:25.000000000 -0700
4836+++ linux-2.6.29/drivers/spi/Kconfig 2009-04-06 13:51:47.000000000 -0700
4837@@ -211,8 +211,8 @@
4838 SPI driver for Toshiba TXx9 MIPS SoCs
4839
4840 config SPI_XILINX
4841- tristate "Xilinx SPI controller"
4842- depends on XILINX_VIRTEX && EXPERIMENTAL
4843+ tristate "Xilinx SPI controller common module"
4844+ depends on EXPERIMENTAL
4845 select SPI_BITBANG
4846 help
4847 This exposes the SPI controller IP from the Xilinx EDK.
4848@@ -220,6 +220,25 @@
4849 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
4850 Product Specification document (DS464) for hardware details.
4851
4852+config SPI_XILINX_OF
4853+ tristate "Xilinx SPI controller OF device"
4854+ depends on SPI_XILINX && XILINX_VIRTEX
4855+ help
4856+ This exposes the SPI controller IP from the Xilinx EDK.
4857+
4858+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
4859+ Product Specification document (DS464) for hardware details.
4860+
4861+config SPI_XILINX_PLTFM
4862+ tristate "Xilinx SPI controller platform device"
4863+ depends on SPI_XILINX
4864+ help
4865+ This exposes the SPI controller IP from the Xilinx EDK.
4866+
4867+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
4868+ Product Specification document (DS464) for hardware details.
4869+
4870+
4871 #
4872 # Add new SPI master controllers in alphabetical order above this line
4873 #
4874diff -uNr linux-2.6.29-clean/drivers/spi/Makefile linux-2.6.29/drivers/spi/Makefile
4875--- linux-2.6.29-clean/drivers/spi/Makefile 2009-04-01 09:20:25.000000000 -0700
4876+++ linux-2.6.29/drivers/spi/Makefile 2009-04-06 13:51:47.000000000 -0700
4877@@ -29,6 +29,8 @@
4878 obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
4879 obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
4880 obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
4881+obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
4882+obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
4883 obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
4884 # ... add above this line ...
4885
4886diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi.c linux-2.6.29/drivers/spi/xilinx_spi.c
4887--- linux-2.6.29-clean/drivers/spi/xilinx_spi.c 2009-04-01 09:20:25.000000000 -0700
4888+++ linux-2.6.29/drivers/spi/xilinx_spi.c 2009-04-06 13:51:47.000000000 -0700
4889@@ -14,22 +14,28 @@
4890 #include <linux/module.h>
4891 #include <linux/init.h>
4892 #include <linux/interrupt.h>
4893-#include <linux/platform_device.h>
4894-
4895-#include <linux/of_platform.h>
4896-#include <linux/of_device.h>
4897-#include <linux/of_spi.h>
4898
4899 #include <linux/spi/spi.h>
4900 #include <linux/spi/spi_bitbang.h>
4901 #include <linux/io.h>
4902
4903-#define XILINX_SPI_NAME "xilinx_spi"
4904+#include "xilinx_spi.h"
4905+
4906+#ifndef CONFIG_PPC
4907+#define in_8(addr) ioread8(addr)
4908+#define in_be16(addr) ioread16(addr)
4909+#define in_be32(addr) ioread32(addr)
4910+
4911+#define out_8(addr, b) iowrite8(b, addr)
4912+#define out_be16(addr, w) iowrite16(w, addr)
4913+#define out_be32(addr, l) iowrite32(l, addr)
4914+#endif
4915+
4916
4917 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
4918 * Product Specification", DS464
4919 */
4920-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
4921+#define XSPI_CR_OFFSET_DEF 0x62 /* 16-bit Control Register */
4922
4923 #define XSPI_CR_ENABLE 0x02
4924 #define XSPI_CR_MASTER_MODE 0x04
4925@@ -41,7 +47,7 @@
4926 #define XSPI_CR_MANUAL_SSELECT 0x80
4927 #define XSPI_CR_TRANS_INHIBIT 0x100
4928
4929-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
4930+#define XSPI_SR_OFFSET_DEF 0x67 /* 8-bit Status Register */
4931
4932 #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
4933 #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
4934@@ -49,10 +55,10 @@
4935 #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
4936 #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
4937
4938-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
4939-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
4940+#define XSPI_TXD_OFFSET_DEF 0x6b /* 8-bit Data Transmit Register */
4941+#define XSPI_RXD_OFFSET_DEF 0x6f /* 8-bit Data Receive Register */
4942
4943-#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
4944+#define XSPI_SSR_OFFSET_DEF 0x70 /* 32-bit Slave Select Register */
4945
4946 /* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
4947 * IPIF registers are 32 bit
4948@@ -74,24 +80,10 @@
4949 #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
4950 #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
4951
4952-struct xilinx_spi {
4953- /* bitbang has to be first */
4954- struct spi_bitbang bitbang;
4955- struct completion done;
4956-
4957- void __iomem *regs; /* virt. address of the control registers */
4958-
4959- u32 irq;
4960-
4961- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
4962-
4963- u8 *rx_ptr; /* pointer in the Tx buffer */
4964- const u8 *tx_ptr; /* pointer in the Rx buffer */
4965- int remaining_bytes; /* the number of bytes left to transfer */
4966-};
4967
4968-static void xspi_init_hw(void __iomem *regs_base)
4969+void xspi_init_hw(struct xilinx_spi *xspi)
4970 {
4971+ void __iomem *regs_base = xspi->regs;
4972 /* Reset the SPI device */
4973 out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
4974 XIPIF_V123B_RESET_MASK);
4975@@ -101,30 +93,31 @@
4976 out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
4977 XIPIF_V123B_GINTR_ENABLE);
4978 /* Deselect the slave on the SPI bus */
4979- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
4980+ out_be32(regs_base + xspi->ssr_offset, 0xffff);
4981 /* Disable the transmitter, enable Manual Slave Select Assertion,
4982 * put SPI controller into master mode, and enable it */
4983- out_be16(regs_base + XSPI_CR_OFFSET,
4984+ out_be16(regs_base + xspi->cr_offset,
4985 XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
4986 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
4987 }
4988+EXPORT_SYMBOL(xspi_init_hw);
4989
4990-static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
4991+void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
4992 {
4993 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
4994
4995 if (is_on == BITBANG_CS_INACTIVE) {
4996 /* Deselect the slave on the SPI bus */
4997- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
4998+ out_be32(xspi->regs + xspi->ssr_offset, 0xffff);
4999 } else if (is_on == BITBANG_CS_ACTIVE) {
5000 /* Set the SPI clock phase and polarity */
5001- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
5002+ u16 cr = in_be16(xspi->regs + xspi->cr_offset)
5003 & ~XSPI_CR_MODE_MASK;
5004 if (spi->mode & SPI_CPHA)
5005 cr |= XSPI_CR_CPHA;
5006 if (spi->mode & SPI_CPOL)
5007 cr |= XSPI_CR_CPOL;
5008- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
5009+ out_be16(xspi->regs + xspi->cr_offset, cr);
5010
5011 /* We do not check spi->max_speed_hz here as the SPI clock
5012 * frequency is not software programmable (the IP block design
5013@@ -132,10 +125,11 @@
5014 */
5015
5016 /* Activate the chip select */
5017- out_be32(xspi->regs + XSPI_SSR_OFFSET,
5018+ out_be32(xspi->regs + xspi->ssr_offset,
5019 ~(0x0001 << spi->chip_select));
5020 }
5021 }
5022+EXPORT_SYMBOL(xilinx_spi_chipselect);
5023
5024 /* spi_bitbang requires custom setup_transfer() to be defined if there is a
5025 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
5026@@ -143,8 +137,7 @@
5027 * Check for 8 bits per word. Chip select delay calculations could be
5028 * added here as soon as bitbang_work() can be made aware of the delay value.
5029 */
5030-static int xilinx_spi_setup_transfer(struct spi_device *spi,
5031- struct spi_transfer *t)
5032+int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
5033 {
5034 u8 bits_per_word;
5035
5036@@ -157,11 +150,12 @@
5037
5038 return 0;
5039 }
5040+EXPORT_SYMBOL(xilinx_spi_setup_transfer);
5041
5042 /* the spi->mode bits understood by this driver: */
5043 #define MODEBITS (SPI_CPOL | SPI_CPHA)
5044
5045-static int xilinx_spi_setup(struct spi_device *spi)
5046+int xilinx_spi_setup(struct spi_device *spi)
5047 {
5048 struct spi_bitbang *bitbang;
5049 struct xilinx_spi *xspi;
5050@@ -188,25 +182,25 @@
5051
5052 return 0;
5053 }
5054+EXPORT_SYMBOL(xilinx_spi_setup);
5055
5056 static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
5057 {
5058 u8 sr;
5059
5060 /* Fill the Tx FIFO with as many bytes as possible */
5061- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5062+ sr = in_8(xspi->regs + xspi->sr_offset);
5063 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
5064- if (xspi->tx_ptr) {
5065- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
5066- } else {
5067- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
5068- }
5069+ if (xspi->tx_ptr)
5070+ out_8(xspi->regs + xspi->txd_offset, *xspi->tx_ptr++);
5071+ else
5072+ out_8(xspi->regs + xspi->txd_offset, 0);
5073 xspi->remaining_bytes--;
5074- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5075+ sr = in_8(xspi->regs + xspi->sr_offset);
5076 }
5077 }
5078
5079-static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
5080+int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
5081 {
5082 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
5083 u32 ipif_ier;
5084@@ -229,8 +223,8 @@
5085 ipif_ier | XSPI_INTR_TX_EMPTY);
5086
5087 /* Start the transfer by not inhibiting the transmitter any longer */
5088- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
5089- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
5090+ cr = in_be16(xspi->regs + xspi->cr_offset) & ~XSPI_CR_TRANS_INHIBIT;
5091+ out_be16(xspi->regs + xspi->cr_offset, cr);
5092
5093 wait_for_completion(&xspi->done);
5094
5095@@ -239,14 +233,14 @@
5096
5097 return t->len - xspi->remaining_bytes;
5098 }
5099-
5100+EXPORT_SYMBOL(xilinx_spi_txrx_bufs);
5101
5102 /* This driver supports single master mode only. Hence Tx FIFO Empty
5103 * is the only interrupt we care about.
5104 * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
5105 * Fault are not to happen.
5106 */
5107-static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
5108+irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
5109 {
5110 struct xilinx_spi *xspi = dev_id;
5111 u32 ipif_isr;
5112@@ -264,20 +258,19 @@
5113 * transmitter while the Isr refills the transmit register/FIFO,
5114 * or make sure it is stopped if we're done.
5115 */
5116- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
5117- out_be16(xspi->regs + XSPI_CR_OFFSET,
5118+ cr = in_be16(xspi->regs + xspi->cr_offset);
5119+ out_be16(xspi->regs + xspi->cr_offset,
5120 cr | XSPI_CR_TRANS_INHIBIT);
5121
5122 /* Read out all the data from the Rx FIFO */
5123- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5124+ sr = in_8(xspi->regs + xspi->sr_offset);
5125 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
5126 u8 data;
5127
5128- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
5129- if (xspi->rx_ptr) {
5130+ data = in_8(xspi->regs + xspi->rxd_offset);
5131+ if (xspi->rx_ptr)
5132 *xspi->rx_ptr++ = data;
5133- }
5134- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5135+ sr = in_8(xspi->regs + xspi->sr_offset);
5136 }
5137
5138 /* See if there is more data to send */
5139@@ -286,7 +279,7 @@
5140 /* Start the transfer by not inhibiting the
5141 * transmitter any longer
5142 */
5143- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
5144+ out_be16(xspi->regs + xspi->cr_offset, cr);
5145 } else {
5146 /* No more data to send.
5147 * Indicate the transfer is completed.
5148@@ -297,167 +290,18 @@
5149
5150 return IRQ_HANDLED;
5151 }
5152+EXPORT_SYMBOL(xilinx_spi_irq);
5153
5154-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
5155- const struct of_device_id *match)
5156-{
5157- struct spi_master *master;
5158- struct xilinx_spi *xspi;
5159- struct resource r_irq_struct;
5160- struct resource r_mem_struct;
5161-
5162- struct resource *r_irq = &r_irq_struct;
5163- struct resource *r_mem = &r_mem_struct;
5164- int rc = 0;
5165- const u32 *prop;
5166- int len;
5167-
5168- /* Get resources(memory, IRQ) associated with the device */
5169- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
5170-
5171- if (master == NULL) {
5172- return -ENOMEM;
5173- }
5174-
5175- dev_set_drvdata(&ofdev->dev, master);
5176-
5177- rc = of_address_to_resource(ofdev->node, 0, r_mem);
5178- if (rc) {
5179- dev_warn(&ofdev->dev, "invalid address\n");
5180- goto put_master;
5181- }
5182-
5183- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
5184- if (rc == NO_IRQ) {
5185- dev_warn(&ofdev->dev, "no IRQ found\n");
5186- goto put_master;
5187- }
5188-
5189- xspi = spi_master_get_devdata(master);
5190- xspi->bitbang.master = spi_master_get(master);
5191- xspi->bitbang.chipselect = xilinx_spi_chipselect;
5192- xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
5193- xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
5194- xspi->bitbang.master->setup = xilinx_spi_setup;
5195- init_completion(&xspi->done);
5196-
5197- xspi->irq = r_irq->start;
5198-
5199- if (!request_mem_region(r_mem->start,
5200- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
5201- rc = -ENXIO;
5202- dev_warn(&ofdev->dev, "memory request failure\n");
5203- goto put_master;
5204- }
5205-
5206- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
5207- if (xspi->regs == NULL) {
5208- rc = -ENOMEM;
5209- dev_warn(&ofdev->dev, "ioremap failure\n");
5210- goto put_master;
5211- }
5212- xspi->irq = r_irq->start;
5213-
5214- /* dynamic bus assignment */
5215- master->bus_num = -1;
5216-
5217- /* number of slave select bits is required */
5218- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
5219- if (!prop || len < sizeof(*prop)) {
5220- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
5221- goto put_master;
5222- }
5223- master->num_chipselect = *prop;
5224-
5225- /* SPI controller initializations */
5226- xspi_init_hw(xspi->regs);
5227-
5228- /* Register for SPI Interrupt */
5229- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
5230- if (rc != 0) {
5231- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
5232- goto unmap_io;
5233- }
5234-
5235- rc = spi_bitbang_start(&xspi->bitbang);
5236- if (rc != 0) {
5237- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
5238- goto free_irq;
5239- }
5240-
5241- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
5242- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
5243-
5244- /* Add any subnodes on the SPI bus */
5245- of_register_spi_devices(master, ofdev->node);
5246-
5247- return rc;
5248-
5249-free_irq:
5250- free_irq(xspi->irq, xspi);
5251-unmap_io:
5252- iounmap(xspi->regs);
5253-put_master:
5254- spi_master_put(master);
5255- return rc;
5256-}
5257-
5258-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
5259+void xilinx_spi_set_default_reg_offsets(struct xilinx_spi *xspi)
5260 {
5261- struct xilinx_spi *xspi;
5262- struct spi_master *master;
5263-
5264- master = platform_get_drvdata(ofdev);
5265- xspi = spi_master_get_devdata(master);
5266-
5267- spi_bitbang_stop(&xspi->bitbang);
5268- free_irq(xspi->irq, xspi);
5269- iounmap(xspi->regs);
5270- dev_set_drvdata(&ofdev->dev, 0);
5271- spi_master_put(xspi->bitbang.master);
5272-
5273- return 0;
5274-}
5275-
5276-/* work with hotplug and coldplug */
5277-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
5278-
5279-static int __exit xilinx_spi_of_remove(struct of_device *op)
5280-{
5281- return xilinx_spi_remove(op);
5282+ xspi->cr_offset = XSPI_CR_OFFSET_DEF;
5283+ xspi->sr_offset = XSPI_SR_OFFSET_DEF;
5284+ xspi->txd_offset = XSPI_TXD_OFFSET_DEF;
5285+ xspi->rxd_offset = XSPI_RXD_OFFSET_DEF;
5286+ xspi->ssr_offset = XSPI_SSR_OFFSET_DEF;
5287 }
5288+EXPORT_SYMBOL(xilinx_spi_set_default_reg_offsets);
5289
5290-static struct of_device_id xilinx_spi_of_match[] = {
5291- { .compatible = "xlnx,xps-spi-2.00.a", },
5292- { .compatible = "xlnx,xps-spi-2.00.b", },
5293- {}
5294-};
5295-
5296-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
5297-
5298-static struct of_platform_driver xilinx_spi_of_driver = {
5299- .owner = THIS_MODULE,
5300- .name = "xilinx-xps-spi",
5301- .match_table = xilinx_spi_of_match,
5302- .probe = xilinx_spi_of_probe,
5303- .remove = __exit_p(xilinx_spi_of_remove),
5304- .driver = {
5305- .name = "xilinx-xps-spi",
5306- .owner = THIS_MODULE,
5307- },
5308-};
5309-
5310-static int __init xilinx_spi_init(void)
5311-{
5312- return of_register_platform_driver(&xilinx_spi_of_driver);
5313-}
5314-module_init(xilinx_spi_init);
5315-
5316-static void __exit xilinx_spi_exit(void)
5317-{
5318- of_unregister_platform_driver(&xilinx_spi_of_driver);
5319-}
5320-module_exit(xilinx_spi_exit);
5321 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
5322 MODULE_DESCRIPTION("Xilinx SPI driver");
5323 MODULE_LICENSE("GPL");
5324diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi.h linux-2.6.29/drivers/spi/xilinx_spi.h
5325--- linux-2.6.29-clean/drivers/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
5326+++ linux-2.6.29/drivers/spi/xilinx_spi.h 2009-04-06 13:51:47.000000000 -0700
5327@@ -0,0 +1,52 @@
5328+/*
5329+ * xilinx_spi.c
5330+ *
5331+ * Xilinx SPI controller driver (master mode only)
5332+ *
5333+ * Author: MontaVista Software, Inc.
5334+ * source@mvista.com
5335+ *
5336+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
5337+ * terms of the GNU General Public License version 2. This program is licensed
5338+ * "as is" without any warranty of any kind, whether express or implied.
5339+ */
5340+
5341+#ifndef _XILINX_SPI_H_
5342+#define _XILINX_SPI_H_ 1
5343+
5344+#include <linux/spi/spi.h>
5345+#include <linux/spi/spi_bitbang.h>
5346+
5347+#define XILINX_SPI_NAME "xilinx_spi"
5348+
5349+
5350+struct xilinx_spi {
5351+ /* bitbang has to be first */
5352+ struct spi_bitbang bitbang;
5353+ struct completion done;
5354+
5355+ void __iomem *regs; /* virt. address of the control registers */
5356+
5357+ u32 irq;
5358+
5359+ u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
5360+
5361+ u8 *rx_ptr; /* pointer in the Tx buffer */
5362+ const u8 *tx_ptr; /* pointer in the Rx buffer */
5363+ int remaining_bytes; /* the number of bytes left to transfer */
5364+ /* offset to the XSPI regs, these might vary... */
5365+ u8 cr_offset;
5366+ u8 sr_offset;
5367+ u8 txd_offset;
5368+ u8 rxd_offset;
5369+ u8 ssr_offset;
5370+};
5371+
5372+void xspi_init_hw(struct xilinx_spi *xspi);
5373+void xilinx_spi_set_default_reg_offsets(struct xilinx_spi *xspi);
5374+void xilinx_spi_chipselect(struct spi_device *spi, int is_on);
5375+int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t);
5376+int xilinx_spi_setup(struct spi_device *spi);
5377+int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t);
5378+irqreturn_t xilinx_spi_irq(int irq, void *dev_id);
5379+#endif
5380diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi_of.c linux-2.6.29/drivers/spi/xilinx_spi_of.c
5381--- linux-2.6.29-clean/drivers/spi/xilinx_spi_of.c 1969-12-31 16:00:00.000000000 -0800
5382+++ linux-2.6.29/drivers/spi/xilinx_spi_of.c 2009-04-06 13:51:47.000000000 -0700
5383@@ -0,0 +1,193 @@
5384+/*
5385+ * xilinx_spi.c
5386+ *
5387+ * Xilinx SPI controller driver (master mode only)
5388+ *
5389+ * Author: MontaVista Software, Inc.
5390+ * source@mvista.com
5391+ *
5392+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
5393+ * terms of the GNU General Public License version 2. This program is licensed
5394+ * "as is" without any warranty of any kind, whether express or implied.
5395+ */
5396+
5397+#include <linux/module.h>
5398+#include <linux/init.h>
5399+#include <linux/interrupt.h>
5400+#include <linux/io.h>
5401+#include <linux/platform_device.h>
5402+
5403+#include <linux/of_platform.h>
5404+#include <linux/of_device.h>
5405+#include <linux/of_spi.h>
5406+
5407+#include <linux/spi/spi.h>
5408+#include <linux/spi/spi_bitbang.h>
5409+
5410+#include "xilinx_spi.h"
5411+
5412+
5413+static int __init xilinx_spi_of_probe(struct of_device *ofdev,
5414+ const struct of_device_id *match)
5415+{
5416+ struct spi_master *master;
5417+ struct xilinx_spi *xspi;
5418+ struct resource r_irq_struct;
5419+ struct resource r_mem_struct;
5420+
5421+ struct resource *r_irq = &r_irq_struct;
5422+ struct resource *r_mem = &r_mem_struct;
5423+ int rc = 0;
5424+ const u32 *prop;
5425+ int len;
5426+
5427+ /* Get resources(memory, IRQ) associated with the device */
5428+ master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
5429+
5430+ if (master == NULL)
5431+ return -ENOMEM;
5432+
5433+ dev_set_drvdata(&ofdev->dev, master);
5434+
5435+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
5436+ if (rc) {
5437+ dev_warn(&ofdev->dev, "invalid address\n");
5438+ goto put_master;
5439+ }
5440+
5441+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
5442+ if (rc == NO_IRQ) {
5443+ dev_warn(&ofdev->dev, "no IRQ found\n");
5444+ goto put_master;
5445+ }
5446+
5447+ xspi = spi_master_get_devdata(master);
5448+ xspi->bitbang.master = spi_master_get(master);
5449+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
5450+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
5451+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
5452+ xspi->bitbang.master->setup = xilinx_spi_setup;
5453+ init_completion(&xspi->done);
5454+
5455+ xspi->irq = r_irq->start;
5456+
5457+ if (!request_mem_region(r_mem->start,
5458+ r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
5459+ rc = -ENXIO;
5460+ dev_warn(&ofdev->dev, "memory request failure\n");
5461+ goto put_master;
5462+ }
5463+
5464+ xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
5465+ if (xspi->regs == NULL) {
5466+ rc = -ENOMEM;
5467+ dev_warn(&ofdev->dev, "ioremap failure\n");
5468+ goto put_master;
5469+ }
5470+ xspi->irq = r_irq->start;
5471+
5472+ /* dynamic bus assignment */
5473+ master->bus_num = -1;
5474+
5475+ /* number of slave select bits is required */
5476+ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
5477+ if (!prop || len < sizeof(*prop)) {
5478+ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
5479+ goto put_master;
5480+ }
5481+ master->num_chipselect = *prop;
5482+
5483+ xilinx_spi_set_default_reg_offsets(xspi);
5484+
5485+ /* SPI controller initializations */
5486+ xspi_init_hw(xspi->regs);
5487+
5488+ /* Register for SPI Interrupt */
5489+ rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
5490+ if (rc != 0) {
5491+ dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
5492+ goto unmap_io;
5493+ }
5494+
5495+ rc = spi_bitbang_start(&xspi->bitbang);
5496+ if (rc != 0) {
5497+ dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
5498+ goto free_irq;
5499+ }
5500+
5501+ dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
5502+ (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
5503+
5504+ /* Add any subnodes on the SPI bus */
5505+ of_register_spi_devices(master, ofdev->node);
5506+
5507+ return rc;
5508+
5509+free_irq:
5510+ free_irq(xspi->irq, xspi);
5511+unmap_io:
5512+ iounmap(xspi->regs);
5513+put_master:
5514+ spi_master_put(master);
5515+ return rc;
5516+}
5517+
5518+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
5519+{
5520+ struct xilinx_spi *xspi;
5521+ struct spi_master *master;
5522+
5523+ master = platform_get_drvdata(ofdev);
5524+ xspi = spi_master_get_devdata(master);
5525+
5526+ spi_bitbang_stop(&xspi->bitbang);
5527+ free_irq(xspi->irq, xspi);
5528+ iounmap(xspi->regs);
5529+ dev_set_drvdata(&ofdev->dev, 0);
5530+ spi_master_put(xspi->bitbang.master);
5531+
5532+ return 0;
5533+}
5534+
5535+/* work with hotplug and coldplug */
5536+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
5537+
5538+static int __exit xilinx_spi_of_remove(struct of_device *op)
5539+{
5540+ return xilinx_spi_remove(op);
5541+}
5542+
5543+static struct of_device_id xilinx_spi_of_match[] = {
5544+ { .compatible = "xlnx,xps-spi-2.00.a", },
5545+ { .compatible = "xlnx,xps-spi-2.00.b", },
5546+ {}
5547+};
5548+
5549+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
5550+
5551+static struct of_platform_driver xilinx_spi_of_driver = {
5552+ .owner = THIS_MODULE,
5553+ .name = "xilinx-xps-spi",
5554+ .match_table = xilinx_spi_of_match,
5555+ .probe = xilinx_spi_of_probe,
5556+ .remove = __exit_p(xilinx_spi_of_remove),
5557+ .driver = {
5558+ .name = "xilinx-xps-spi",
5559+ .owner = THIS_MODULE,
5560+ },
5561+};
5562+
5563+static int __init xilinx_spi_init(void)
5564+{
5565+ return of_register_platform_driver(&xilinx_spi_of_driver);
5566+}
5567+module_init(xilinx_spi_init);
5568+
5569+static void __exit xilinx_spi_exit(void)
5570+{
5571+ of_unregister_platform_driver(&xilinx_spi_of_driver);
5572+}
5573+module_exit(xilinx_spi_exit);
5574+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
5575+MODULE_DESCRIPTION("Xilinx SPI driver");
5576+MODULE_LICENSE("GPL");
5577diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi_pltfm.c linux-2.6.29/drivers/spi/xilinx_spi_pltfm.c
5578--- linux-2.6.29-clean/drivers/spi/xilinx_spi_pltfm.c 1969-12-31 16:00:00.000000000 -0800
5579+++ linux-2.6.29/drivers/spi/xilinx_spi_pltfm.c 2009-04-06 13:51:47.000000000 -0700
5580@@ -0,0 +1,184 @@
5581+/*
5582+ * xilinx_spi_pltfm.c Support for Xilinx SPI platform devices
5583+ * Copyright (c) 2009 Intel Corporation
5584+ *
5585+ * This program is free software; you can redistribute it and/or modify
5586+ * it under the terms of the GNU General Public License version 2 as
5587+ * published by the Free Software Foundation.
5588+ *
5589+ * This program is distributed in the hope that it will be useful,
5590+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5591+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5592+ * GNU General Public License for more details.
5593+ *
5594+ * You should have received a copy of the GNU General Public License
5595+ * along with this program; if not, write to the Free Software
5596+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5597+ */
5598+
5599+/* Supports:
5600+ * Xilinx SPI devices as platform devices
5601+ *
5602+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
5603+ */
5604+
5605+#include <linux/module.h>
5606+#include <linux/init.h>
5607+#include <linux/interrupt.h>
5608+#include <linux/io.h>
5609+#include <linux/platform_device.h>
5610+
5611+#include <linux/spi/spi.h>
5612+#include <linux/spi/spi_bitbang.h>
5613+#include <linux/spi/xilinx_spi.h>
5614+
5615+#include "xilinx_spi.h"
5616+
5617+static int __init xilinx_spi_probe(struct platform_device *dev)
5618+{
5619+ int ret = 0;
5620+ struct spi_master *master;
5621+ struct xilinx_spi *xspi;
5622+ struct xspi_platform_data *pdata;
5623+ struct resource *r;
5624+
5625+ master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
5626+
5627+ if (master == NULL)
5628+ return -ENOMEM;
5629+
5630+
5631+ platform_set_drvdata(dev, master);
5632+ pdata = dev->dev.platform_data;
5633+ if (pdata == NULL) {
5634+ ret = -ENODEV;
5635+ goto put_master;
5636+ }
5637+
5638+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
5639+ if (r == NULL) {
5640+ ret = -ENODEV;
5641+ goto put_master;
5642+ }
5643+
5644+ xspi = spi_master_get_devdata(master);
5645+ xspi->bitbang.master = spi_master_get(master);
5646+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
5647+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
5648+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
5649+ xspi->bitbang.master->setup = xilinx_spi_setup;
5650+ init_completion(&xspi->done);
5651+
5652+ if (!request_mem_region(r->start, resource_size(r), XILINX_SPI_NAME)) {
5653+ ret = -ENXIO;
5654+ goto put_master;
5655+ }
5656+
5657+ xspi->regs = ioremap(r->start, resource_size(r));
5658+ if (xspi->regs == NULL) {
5659+ ret = -ENOMEM;
5660+ goto map_failed;
5661+ }
5662+
5663+ ret = platform_get_irq(dev, 0);
5664+ if (ret < 0) {
5665+ ret = -ENXIO;
5666+ goto unmap_io;
5667+ }
5668+ xspi->irq = ret;
5669+
5670+ master->bus_num = pdata->bus_num;
5671+ master->num_chipselect = pdata->num_chipselect;
5672+ xspi->speed_hz = pdata->speed_hz;
5673+ xilinx_spi_set_default_reg_offsets(xspi);
5674+ if (pdata->cr_offset)
5675+ xspi->cr_offset = pdata->cr_offset;
5676+ if (pdata->sr_offset)
5677+ xspi->sr_offset = pdata->sr_offset;
5678+ if (pdata->txd_offset)
5679+ xspi->txd_offset = pdata->txd_offset;
5680+ if (pdata->rxd_offset)
5681+ xspi->rxd_offset = pdata->rxd_offset;
5682+ if (pdata->ssr_offset)
5683+ xspi->ssr_offset = pdata->ssr_offset;
5684+
5685+ /* SPI controller initializations */
5686+ xspi_init_hw(xspi);
5687+
5688+ /* Register for SPI Interrupt */
5689+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
5690+ if (ret != 0)
5691+ goto unmap_io;
5692+
5693+ ret = spi_bitbang_start(&xspi->bitbang);
5694+ if (ret != 0) {
5695+ dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
5696+ goto free_irq;
5697+ }
5698+
5699+ dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
5700+ (u32)r->start, (u32)xspi->regs, xspi->irq);
5701+ return ret;
5702+
5703+free_irq:
5704+ free_irq(xspi->irq, xspi);
5705+unmap_io:
5706+ iounmap(xspi->regs);
5707+map_failed:
5708+ release_mem_region(r->start, resource_size(r));
5709+put_master:
5710+ spi_master_put(master);
5711+ return ret;
5712+}
5713+
5714+static int __devexit xilinx_spi_remove(struct platform_device *dev)
5715+{
5716+ struct xilinx_spi *xspi;
5717+ struct spi_master *master;
5718+ struct resource *r;
5719+
5720+ master = platform_get_drvdata(dev);
5721+ xspi = spi_master_get_devdata(master);
5722+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
5723+
5724+ spi_bitbang_stop(&xspi->bitbang);
5725+ free_irq(xspi->irq, xspi);
5726+ iounmap(xspi->regs);
5727+
5728+ if (r)
5729+ release_mem_region(r->start, resource_size(r));
5730+
5731+ platform_set_drvdata(dev, 0);
5732+ spi_master_put(xspi->bitbang.master);
5733+
5734+ return 0;
5735+}
5736+
5737+/* work with hotplug and coldplug */
5738+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
5739+
5740+static struct platform_driver xilinx_spi_driver = {
5741+ .probe = xilinx_spi_probe,
5742+ .remove = __devexit_p(xilinx_spi_remove),
5743+ .driver = {
5744+ .name = XILINX_SPI_NAME,
5745+ .owner = THIS_MODULE,
5746+ },
5747+};
5748+
5749+static int __init xilinx_spi_init(void)
5750+{
5751+ return platform_driver_register(&xilinx_spi_driver);
5752+}
5753+module_init(xilinx_spi_init);
5754+
5755+static void __exit xilinx_spi_exit(void)
5756+{
5757+ platform_driver_unregister(&xilinx_spi_driver);
5758+}
5759+module_exit(xilinx_spi_exit);
5760+
5761+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
5762+MODULE_DESCRIPTION("Xilinx SPI platform driver");
5763+MODULE_LICENSE("GPL v2");
5764+
5765diff -uNr linux-2.6.29-clean/include/linux/i2c-ocores.h linux-2.6.29/include/linux/i2c-ocores.h
5766--- linux-2.6.29-clean/include/linux/i2c-ocores.h 2009-04-01 09:20:20.000000000 -0700
5767+++ linux-2.6.29/include/linux/i2c-ocores.h 2009-04-06 13:51:47.000000000 -0700
5768@@ -14,6 +14,8 @@
5769 struct ocores_i2c_platform_data {
5770 u32 regstep; /* distance between registers */
5771 u32 clock_khz; /* input clock in kHz */
5772+ u8 num_devices; /* number of devices in the devices list */
5773+ struct i2c_board_info const *devices; /* devices connected to the bus */
5774 };
5775
5776 #endif /* _LINUX_I2C_OCORES_H */
5777diff -uNr linux-2.6.29-clean/include/linux/mfd/timbdma.h linux-2.6.29/include/linux/mfd/timbdma.h
5778--- linux-2.6.29-clean/include/linux/mfd/timbdma.h 1969-12-31 16:00:00.000000000 -0800
5779+++ linux-2.6.29/include/linux/mfd/timbdma.h 2009-04-06 13:51:47.000000000 -0700
5780@@ -0,0 +1,80 @@
5781+/*
5782+ * timbdma.h timberdale FPGA DMA driver defines
5783+ * Copyright (c) 2009 Intel Corporation
5784+ *
5785+ * This program is free software; you can redistribute it and/or modify
5786+ * it under the terms of the GNU General Public License version 2 as
5787+ * published by the Free Software Foundation.
5788+ *
5789+ * This program is distributed in the hope that it will be useful,
5790+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5791+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5792+ * GNU General Public License for more details.
5793+ *
5794+ * You should have received a copy of the GNU General Public License
5795+ * along with this program; if not, write to the Free Software
5796+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5797+ */
5798+
5799+/* Supports:
5800+ * Timberdale FPGA DMA engine
5801+ */
5802+
5803+#ifndef _TIMBDMA_H
5804+#define _TIMBDMA_H
5805+
5806+#include <linux/spinlock.h>
5807+
5808+
5809+#define DMA_IRQ_UART_RX 0x01
5810+#define DMA_IRQ_UART_TX 0x02
5811+#define DMA_IRQ_MLB_RX 0x04
5812+#define DMA_IRQ_MLB_TX 0x08
5813+#define DMA_IRQ_VIDEO_RX 0x10
5814+#define DMA_IRQ_VIDEO_DROP 0x20
5815+#define DMA_IRQS 6
5816+
5817+
5818+typedef int (*timbdma_interruptcb)(u32 flag, void *data);
5819+
5820+enum timbdma_ctrlmap {
5821+ timbdma_ctrlmap_DMACFGBTUART = 0x000000,
5822+ timbdma_ctrlmap_DMACFGMLBSY = 0x000040,
5823+ timbdma_ctrlmap_DMACFGVIDEO = 0x000080,
5824+ timbdma_ctrlmap_TIMBSTATUS = 0x080000,
5825+ timbdma_ctrlmap_TIMBPEND = 0x080004,
5826+ timbdma_ctrlmap_TIMBENABLE = 0x080008,
5827+ timbdma_ctrlmap_VIDEOBUFFER = 0x200000
5828+};
5829+
5830+enum timbdma_dmacfg {
5831+ timbdma_dmacfg_RXSTARTH = 0x00,
5832+ timbdma_dmacfg_RXSTARTL = 0x04,
5833+ timbdma_dmacfg_RXLENGTH = 0x08,
5834+ timbdma_dmacfg_RXFPGAWP = 0x0C,
5835+ timbdma_dmacfg_RXSWRP = 0x10,
5836+ timbdma_dmacfg_RXENABLE = 0x14,
5837+ timbdma_dmacfg_TXSTARTH = 0x18,
5838+ timbdma_dmacfg_TXSTARTL = 0x1C,
5839+ timbdma_dmacfg_TXLENGTH = 0x20,
5840+ timbdma_dmacfg_TXSWWP = 0x24,
5841+ timbdma_dmacfg_TXFPGARP = 0x28,
5842+ timbdma_dmacfg_TXBEFINT = 0x2C,
5843+ timbdma_dmacfg_BPERROW = 0x30
5844+};
5845+
5846+struct timbdma_dev {
5847+ void __iomem *membase;
5848+ timbdma_interruptcb callbacks[DMA_IRQS];
5849+ void *callback_data[DMA_IRQS];
5850+ spinlock_t lock; /* mutual exclusion */
5851+};
5852+
5853+void timb_start_dma(u32 flag, unsigned long buf, int len, int bytes_per_row);
5854+
5855+void *timb_stop_dma(u32 flags);
5856+
5857+void timb_set_dma_interruptcb(u32 flags, timbdma_interruptcb icb, void *data);
5858+
5859+#endif /* _TIMBDMA_H */
5860+
5861diff -uNr linux-2.6.29-clean/include/linux/mfd/timbi2s.h linux-2.6.29/include/linux/mfd/timbi2s.h
5862--- linux-2.6.29-clean/include/linux/mfd/timbi2s.h 1969-12-31 16:00:00.000000000 -0800
5863+++ linux-2.6.29/include/linux/mfd/timbi2s.h 2009-04-06 13:51:47.000000000 -0700
5864@@ -0,0 +1,66 @@
5865+/*
5866+ * timbi2s.h timberdale FPGA I2S driver
5867+ * Copyright (c) 2009 Intel Corporation
5868+ *
5869+ * This program is free software; you can redistribute it and/or modify
5870+ * it under the terms of the GNU General Public License version 2 as
5871+ * published by the Free Software Foundation.
5872+ *
5873+ * This program is distributed in the hope that it will be useful,
5874+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5875+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5876+ * GNU General Public License for more details.
5877+ *
5878+ * You should have received a copy of the GNU General Public License
5879+ * along with this program; if not, write to the Free Software
5880+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5881+ */
5882+
5883+/* Supports:
5884+ * Timberdale FPGA I2S
5885+ */
5886+
5887+struct timbi2s_bus_control {
5888+ struct list_head list;
5889+};
5890+
5891+struct timbi2s_bus {
5892+ void __iomem *membase;
5893+ u32 irq;
5894+ struct timbi2s_bus_control *control;
5895+ struct workqueue_struct *workqueue;
5896+ struct work_struct work;
5897+};
5898+
5899+struct timbi2s_dev {
5900+ void __iomem *membase;
5901+ u32 irq;
5902+ struct timbi2s_bus *bus;
5903+ struct workqueue_struct *workqueue;
5904+ struct work_struct work;
5905+ u32 ioctrl;
5906+ u32 devid;
5907+ u8 timbi2s_rx;
5908+ u8 timbi2s_tx;
5909+ struct circ_buf *buffer;
5910+ /* Register access */
5911+ spinlock_t lock;
5912+
5913+ int in_use;
5914+ u8 pscale_offset; /* Prescale */
5915+ u8 icr_offset; /* Clear register */
5916+ u8 isr_offset; /* Status */
5917+ u8 ipr_offset; /* Pending register */
5918+ u8 ier_offset; /* Interrupt Enable register */
5919+ u8 ctrl_offset;
5920+ u8 fifo;
5921+
5922+ struct list_head item;
5923+};
5924+
5925+static struct timbi2s_dev *timbi2s_get_tx(void);
5926+static struct timbi2s_dev *timbi2s_get_rx(void);
5927+static void timbi2s_put(struct timbi2s_dev *tdev);
5928+
5929+static int timbi2s_ioctrl(struct timbi2s_dev *i2sdev);
5930+
5931diff -uNr linux-2.6.29-clean/include/linux/serial_core.h linux-2.6.29/include/linux/serial_core.h
5932--- linux-2.6.29-clean/include/linux/serial_core.h 2009-04-01 09:20:20.000000000 -0700
5933+++ linux-2.6.29/include/linux/serial_core.h 2009-04-06 13:51:47.000000000 -0700
5934@@ -164,6 +164,9 @@
5935 /* NWPSERIAL */
5936 #define PORT_NWPSERIAL 85
5937
5938+/* Timberdale UART */
5939+#define PORT_TIMBUART 86
5940+
5941 #ifdef __KERNEL__
5942
5943 #include <linux/compiler.h>
5944diff -uNr linux-2.6.29-clean/include/linux/spi/xilinx_spi.h linux-2.6.29/include/linux/spi/xilinx_spi.h
5945--- linux-2.6.29-clean/include/linux/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
5946+++ linux-2.6.29/include/linux/spi/xilinx_spi.h 2009-04-06 13:51:47.000000000 -0700
5947@@ -0,0 +1,17 @@
5948+#ifndef __LINUX_SPI_XILINX_SPI_H
5949+#define __LINUX_SPI_XILINX_SPI_H
5950+
5951+/* SPI Controller IP */
5952+struct xspi_platform_data {
5953+ s16 bus_num;
5954+ u16 num_chipselect;
5955+ u32 speed_hz;
5956+ u8 cr_offset;
5957+ u8 sr_offset;
5958+ u8 txd_offset;
5959+ u8 rxd_offset;
5960+ u8 ssr_offset;
5961+};
5962+
5963+#endif /* __LINUX_SPI_XILINX_SPI_H */
5964+
5965diff -uNr linux-2.6.29-clean/include/media/adv7180.h linux-2.6.29/include/media/adv7180.h
5966--- linux-2.6.29-clean/include/media/adv7180.h 1969-12-31 16:00:00.000000000 -0800
5967+++ linux-2.6.29/include/media/adv7180.h 2009-04-06 13:51:47.000000000 -0700
5968@@ -0,0 +1,127 @@
5969+/*
5970+ * adv7180.h Analog Devices ADV7180 video decoder driver defines
5971+ * Copyright (c) 2009 Intel Corporation
5972+ *
5973+ * This program is free software; you can redistribute it and/or modify
5974+ * it under the terms of the GNU General Public License version 2 as
5975+ * published by the Free Software Foundation.
5976+ *
5977+ * This program is distributed in the hope that it will be useful,
5978+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5979+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5980+ * GNU General Public License for more details.
5981+ *
5982+ * You should have received a copy of the GNU General Public License
5983+ * along with this program; if not, write to the Free Software
5984+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5985+ */
5986+
5987+#define DRIVER_NAME "adv7180"
5988+
5989+#define I2C_ADV7180 0x42
5990+#define ADV7180_NR_REG 0xfc
5991+
5992+#define ADV7180_IN_CTRL 0x00 /* Input CR */
5993+#define ADV7180_OUT_CTRL 0x03 /* Output CR */
5994+#define ADV7180_EXT_OUT_CTRL 0x04 /* Extended Output CR */
5995+
5996+#define ADV7180_ADI_CTRL 0x0e /* ADI CR */
5997+# define ADI_ENABLE 0x20 /* Enable access to sub-regs */
5998+
5999+#define ADV7180_SR_1 0x10 /* Status Register 1 */
6000+#define ADV7180_SR_2 0x12
6001+#define ADV7180_SR_3 0x13
6002+
6003+/* Interrupt and VDP sub-registers */
6004+#define ADV7180_ISR_1 0x42 /* Interrupt Status Register 1 */
6005+#define ADV7180_ICR_1 0x43 /* Interrupt Clear Register 1 */
6006+
6007+#define ADV7180_ISR_2 0x46
6008+#define ADV7180_ICR_2 0x47
6009+
6010+#define ADV7180_ISR_3 0x4a
6011+#define ADV7180_ICR_3 0x4b
6012+
6013+#define ADV7180_ISR_4 0x4e
6014+#define ADV7180_ICR_4 0x4f
6015+/* */
6016+
6017+#define ADV7180_SR 0x10
6018+#define ADV7180_STATUS_NTSM 0x00 /* NTSM M/J */
6019+#define ADV7180_STATUS_NTSC 0x10 /* NTSC 4.43 */
6020+#define ADV7180_STATUS_PAL_M 0x20 /* PAL M */
6021+#define ADV7180_STATUS_PAL_60 0x30 /* PAL 60 */
6022+#define ADV7180_STATUS_PAL 0x40 /* PAL B/G/H/I/D */
6023+#define ADV7180_STATUS_SECAM 0x50 /* SECAM */
6024+#define ADV7180_STATUS_PAL_N 0x60 /* PAL Combination N */
6025+#define ADV7180_STATUS_SECAM_525 0x70 /* SECAM 525 */
6026+
6027+enum input_mode {
6028+ CVBS, /* Composite */
6029+ SVIDEO, /* S-video */
6030+ YPbPr, /* Component */
6031+};
6032+
6033+struct adv7180 {
6034+ unsigned char reg[ADV7180_NR_REG];
6035+ int norm;
6036+ enum input_mode input;
6037+ int enable;
6038+ struct i2c_client *client;
6039+};
6040+
6041+static const unsigned char reset_icr[] = {
6042+ ADV7180_ICR_1, 0x00,
6043+ ADV7180_ICR_2, 0x00,
6044+ ADV7180_ICR_3, 0x00,
6045+ ADV7180_ICR_4, 0x00,
6046+};
6047+
6048+/* ADV7180 LQFP-64. ADV7180.pdf, page 104 */
6049+static const unsigned char init_cvbs_64[] = {
6050+ 0x00, 0x01, /* INSEL = CVBS in on Ain2 */
6051+ 0x04, 0x57, /* Enable SFL */
6052+ 0x17, 0x41, /* Select SH1 */
6053+
6054+ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to
6055+ * suit ADV video encoders
6056+ */
6057+ 0x3d, 0xa2, /* MWE enable manual window,
6058+ * color kill threshold to 2
6059+ */
6060+ 0x3e, 0x6a, /* BLM optimization */
6061+ 0x3f, 0xa0, /* BGB optimization */
6062+ 0x0e, 0x80, /* Hidden space */
6063+ 0x55, 0x81, /* ADC configuration */
6064+ 0x0e, 0x00, /* User space */
6065+};
6066+
6067+static const unsigned char init_svideo_64[] = {
6068+ 0x00, 0x08, /* Insel = Y/C, Y = AIN3, C = AIN6 */
6069+ 0x04, 0x57, /* Enable SFL */
6070+ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to
6071+ * suit ADV video encoders
6072+ */
6073+ 0x3d, 0xa2, /* MWE enable manual window,
6074+ * color kill threshold to 2
6075+ */
6076+ 0x3e, 0x6a, /* BLM optimization */
6077+ 0x3f, 0xa0, /* BGB optimization */
6078+ 0x58, 0x04, /* Mandatory write. This must be
6079+ * performed for correct operation.
6080+ */
6081+ 0x0e, 0x80, /* Hidden space */
6082+ 0x55, 0x81, /* ADC configuration */
6083+ 0x0e, 0x00, /* User space */
6084+};
6085+
6086+static const unsigned char init_ypbpr_64[] = {
6087+ 0x00, 0x09, /* INSEL = YPrPb, Y = AIN1, Pr = AIN4, Pb = AIN5 */
6088+ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to suit ADV video encoders */
6089+ 0x3d, 0xa2, /* MWE enable manual window */
6090+ 0x3e, 0x6a, /* BLM optimization */
6091+ 0x3f, 0xa0, /* ADI recommended */
6092+ 0x0e, 0x80, /* Hidden space */
6093+ 0x55, 0x81, /* ADC configuration */
6094+ 0x0e, 0x00, /* User space */
6095+};
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch
deleted file mode 100644
index 92e71fa31b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch
+++ /dev/null
@@ -1,130 +0,0 @@
1diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
2index f8f86de..5d4cea2 100644
3--- a/drivers/input/mouse/psmouse-base.c
4+++ b/drivers/input/mouse/psmouse-base.c
5@@ -676,6 +676,9 @@ static int psmouse_extensions(struct psmouse *psmouse,
6
7 if (touchkit_ps2_detect(psmouse, set_properties) == 0)
8 return PSMOUSE_TOUCHKIT_PS2;
9+
10+ if (elftouch_ps2_detect(psmouse, set_properties) == 0)
11+ return PSMOUSE_ELFTOUCH_PS2;
12 }
13
14 /*
15@@ -786,6 +789,12 @@ static const struct psmouse_protocol psmouse_protocols[] = {
16 .alias = "trackpoint",
17 .detect = trackpoint_detect,
18 },
19+ {
20+ .type = PSMOUSE_ELFTOUCH_PS2,
21+ .name = "elftouchPS2",
22+ .alias = "elftouch",
23+ .detect = elftouch_ps2_detect,
24+ },
25 #endif
26 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
27 {
28diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
29index 54ed267..8d1ba79 100644
30--- a/drivers/input/mouse/psmouse.h
31+++ b/drivers/input/mouse/psmouse.h
32@@ -89,6 +89,7 @@ enum psmouse_type {
33 PSMOUSE_TRACKPOINT,
34 PSMOUSE_TOUCHKIT_PS2,
35 PSMOUSE_CORTRON,
36+ PSMOUSE_ELFTOUCH_PS2,
37 PSMOUSE_HGPK,
38 PSMOUSE_ELANTECH,
39 PSMOUSE_AUTO /* This one should always be last */
40diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c
41index 3fadb2a..e9c27f1 100644
42--- a/drivers/input/mouse/touchkit_ps2.c
43+++ b/drivers/input/mouse/touchkit_ps2.c
44@@ -51,6 +51,11 @@
45 #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2])
46 #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4])
47
48+#define ELFTOUCH_MAX_XC 0x0fff
49+#define ELFTOUCH_MAX_YC 0x0fff
50+#define ELFTOUCH_GET_X(packet) (((packet)[3] << 7) | (packet)[4])
51+#define ELFTOUCH_GET_Y(packet) (((packet)[1] << 7) | (packet)[2])
52+
53 static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
54 {
55 unsigned char *packet = psmouse->packet;
56@@ -59,9 +64,15 @@ static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
57 if (psmouse->pktcnt != 5)
58 return PSMOUSE_GOOD_DATA;
59
60- input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
61- input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
62+ if(psmouse->type==PSMOUSE_ELFTOUCH_PS2) {
63+ input_report_abs(dev, ABS_X, ELFTOUCH_GET_X(packet));
64+ input_report_abs(dev, ABS_Y, ELFTOUCH_GET_Y(packet));
65+ } else {
66+ input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
67+ input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
68+ }
69 input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet));
70+
71 input_sync(dev);
72
73 return PSMOUSE_FULL_PACKET;
74@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
75
76 return 0;
77 }
78+
79+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties)
80+{
81+ struct input_dev *dev = psmouse->dev;
82+ unsigned char param[16];
83+ int command, res;
84+
85+ param[0]=0x0f4;
86+ command = TOUCHKIT_SEND_PARMS(1, 0, TOUCHKIT_CMD);
87+ res=ps2_command(&psmouse->ps2dev, param, command);
88+ if(res) { return -ENODEV; }
89+
90+ param[0]=0x0b0;
91+ command = TOUCHKIT_SEND_PARMS(1, 1, TOUCHKIT_CMD);
92+ res=ps2_command(&psmouse->ps2dev, param, command);
93+ if(res) { return -ENODEV; }
94+
95+ if (set_properties) {
96+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
97+ set_bit(BTN_TOUCH, dev->keybit);
98+ input_set_abs_params(dev, ABS_X, 0, ELFTOUCH_MAX_XC, 0, 0);
99+ input_set_abs_params(dev, ABS_Y, 0, ELFTOUCH_MAX_YC, 0, 0);
100+
101+ psmouse->vendor = "ElfTouch";
102+ psmouse->name = "Touchscreen";
103+ psmouse->protocol_handler = touchkit_ps2_process_byte;
104+ psmouse->pktsize = 5;
105+ }
106+ return 0;
107+}
108diff --git a/drivers/input/mouse/touchkit_ps2.h b/drivers/input/mouse/touchkit_ps2.h
109index 8a0dd35..f32ef4c 100644
110--- a/drivers/input/mouse/touchkit_ps2.h
111+++ b/drivers/input/mouse/touchkit_ps2.h
112@@ -14,12 +14,18 @@
113
114 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
115 int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
116+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties);
117 #else
118 static inline int touchkit_ps2_detect(struct psmouse *psmouse,
119 int set_properties)
120 {
121 return -ENOSYS;
122 }
123+static inline int elftouch_ps2_detect(struct psmouse *psmouse,
124+ int set_properties)
125+{
126+ return -ENOSYS;
127+}
128 #endif /* CONFIG_MOUSE_PS2_TOUCHKIT */
129
130 #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch
deleted file mode 100644
index a489339cbd..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch
+++ /dev/null
@@ -1,69 +0,0 @@
1
2Gitweb: http://git.kernel.org/linus/d6de2c80e9d758d2e36c21699117db6178c0f517
3Commit: d6de2c80e9d758d2e36c21699117db6178c0f517
4Parent: 7933a3cfba017330ebb25f9820cb25ec9cdd67cc
5Author: Linus Torvalds <torvalds@linux-foundation.org>
6AuthorDate: Fri Apr 10 12:17:41 2009 -0700
7Committer: Linus Torvalds <torvalds@linux-foundation.org>
8CommitDate: Sat Apr 11 12:44:49 2009 -0700
9
10 async: Fix module loading async-work regression
11
12 Several drivers use asynchronous work to do device discovery, and we
13 synchronize with them in the compiled-in case before we actually try to
14 mount root filesystems etc.
15
16 However, when compiled as modules, that synchronization is missing - the
17 module loading completes, but the driver hasn't actually finished
18 probing for devices, and that means that any user mode that expects to
19 use the devices after the 'insmod' is now potentially broken.
20
21 We already saw one case of a similar issue in the ACPI battery code,
22 where the kernel itself expected the module to be all done, and unmapped
23 the init memory - but the async device discovery was still running.
24 That got hacked around by just removing the "__init" (see commit
25 5d38258ec026921a7b266f4047ebeaa75db358e5 "ACPI battery: fix async boot
26 oops"), but the real fix is to just make the module loading wait for all
27 async work to be completed.
28
29 It will slow down module loading, but since common devices should be
30 built in anyway, and since the bug is really annoying and hard to handle
31 from user space (and caused several S3 resume regressions), the simple
32 fix to wait is the right one.
33
34 This fixes at least
35
36 http://bugzilla.kernel.org/show_bug.cgi?id=13063
37
38 but probably a few other bugzilla entries too (12936, for example), and
39 is confirmed to fix Rafael's storage driver breakage after resume bug
40 report (no bugzilla entry).
41
42 We should also be able to now revert that ACPI battery fix.
43
44 Reported-and-tested-by: Rafael J. Wysocki <rjw@suse.com>
45 Tested-by: Heinz Diehl <htd@fancy-poultry.org>
46 Acked-by: Arjan van de Ven <arjan@linux.intel.com>
47 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
48---
49 kernel/module.c | 3 +++
50 1 files changed, 3 insertions(+), 0 deletions(-)
51
52diff --git a/kernel/module.c b/kernel/module.c
53index 05f014e..e797812 100644
54--- a/kernel/module.c
55+++ b/kernel/module.c
56@@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
57 blocking_notifier_call_chain(&module_notify_list,
58 MODULE_STATE_LIVE, mod);
59
60+ /* We need to finish all async code before the module init sequence is done */
61+ async_synchronize_full();
62+
63 mutex_lock(&module_mutex);
64 /* Drop initial reference. */
65 module_put(mod);
66--
67To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
68the body of a message to majordomo@vger.kernel.org
69More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch
deleted file mode 100644
index 3932a51ae0..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch
+++ /dev/null
@@ -1,139 +0,0 @@
1From: Rafael J. Wysocki <rjw@suse.com>
2Organization: SUSE
3To: Arjan van de Ven <arjan@linux.intel.com>
4CC: Linus Torvalds <torvalds@linux-foundation.org>
5
6
7OK, updated patch follows, with a changelog.
8
9I've added this check to user.c too, because that code can be called
10independently of the one in disk.c . Also, if resume is user space-driven,
11it's a good idea to wait for all of the device probes to complete before
12continuing.
13
14Thanks,
15Rafael
16
17---
18From: Rafael J. Wysocki <rjw@sisk.pl>
19Subject: PM/Hibernate: Wait for SCSI devices scan to complete during resume
20
21There is a race between resume from hibernation and the asynchronous
22scanning of SCSI devices and to prevent it from happening we need to
23call scsi_complete_async_scans() during resume from hibernation.
24
25In addition, if the resume from hibernation is userland-driven, it's
26better to wait for all device probes in the kernel to complete before
27attempting to open the resume device.
28
29Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
30---
31 drivers/scsi/scsi_priv.h | 3 ---
32 drivers/scsi/scsi_wait_scan.c | 2 +-
33 include/scsi/scsi_scan.h | 11 +++++++++++
34 kernel/power/disk.c | 8 ++++++++
35 kernel/power/user.c | 9 +++++++++
36 5 files changed, 29 insertions(+), 4 deletions(-)
37
38Index: linux-2.6/include/scsi/scsi_scan.h
39===================================================================
40--- /dev/null
41+++ linux-2.6/include/scsi/scsi_scan.h
42@@ -0,0 +1,11 @@
43+#ifndef _SCSI_SCSI_SCAN_H
44+#define _SCSI_SCSI_SCAN_H
45+
46+#ifdef CONFIG_SCSI
47+/* drivers/scsi/scsi_scan.c */
48+extern int scsi_complete_async_scans(void);
49+#else
50+static inline int scsi_complete_async_scans(void) { return 0; }
51+#endif
52+
53+#endif /* _SCSI_SCSI_SCAN_H */
54Index: linux-2.6/drivers/scsi/scsi_priv.h
55===================================================================
56--- linux-2.6.orig/drivers/scsi/scsi_priv.h
57+++ linux-2.6/drivers/scsi/scsi_priv.h
58@@ -38,9 +38,6 @@ static inline void scsi_log_completion(s
59 { };
60 #endif
61
62-/* scsi_scan.c */
63-int scsi_complete_async_scans(void);
64-
65 /* scsi_devinfo.c */
66 extern int scsi_get_device_flags(struct scsi_device *sdev,
67 const unsigned char *vendor,
68Index: linux-2.6/drivers/scsi/scsi_wait_scan.c
69===================================================================
70--- linux-2.6.orig/drivers/scsi/scsi_wait_scan.c
71+++ linux-2.6/drivers/scsi/scsi_wait_scan.c
72@@ -11,7 +11,7 @@
73 */
74
75 #include <linux/module.h>
76-#include "scsi_priv.h"
77+#include <scsi/scsi_scan.h>
78
79 static int __init wait_scan_init(void)
80 {
81Index: linux-2.6/kernel/power/disk.c
82===================================================================
83--- linux-2.6.orig/kernel/power/disk.c
84+++ linux-2.6/kernel/power/disk.c
85@@ -22,5 +22,6 @@
86 #include <linux/console.h>
87 #include <linux/cpu.h>
88 #include <linux/freezer.h>
89+#include <scsi/scsi_scan.h>
90
91 #include "power.h"
92@@ -645,6 +646,13 @@ static int software_resume(void)
93 return 0;
94
95 /*
96+ * We can't depend on SCSI devices being available after loading one of
97+ * their modules if scsi_complete_async_scans() is not called and the
98+ * resume device usually is a SCSI one.
99+ */
100+ scsi_complete_async_scans();
101+
102+ /*
103 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
104 * is configured into the kernel. Since the regular hibernate
105 * trigger path is via sysfs which takes a buffer mutex before
106Index: linux-2.6/kernel/power/user.c
107===================================================================
108--- linux-2.6.orig/kernel/power/user.c
109+++ linux-2.6/kernel/power/user.c
110@@ -24,6 +24,7 @@
111 #include <linux/cpu.h>
112 #include <linux/freezer.h>
113 #include <linux/smp_lock.h>
114+#include <scsi/scsi_scan.h>
115
116 #include <asm/uaccess.h>
117
118@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *i
119 filp->private_data = data;
120 memset(&data->handle, 0, sizeof(struct snapshot_handle));
121 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
122+ /* Hibernating. The image device should be accessible. */
123 data->swap = swsusp_resume_device ?
124 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
125 data->mode = O_RDONLY;
126@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *i
127 if (error)
128 pm_notifier_call_chain(PM_POST_HIBERNATION);
129 } else {
130+ /*
131+ * Resuming. We may need to wait for the image device to
132+ * appear.
133+ */
134+ wait_for_device_probe();
135+ scsi_complete_async_scans();
136+
137 data->swap = -1;
138 data->mode = O_WRONLY;
139 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch
deleted file mode 100644
index e7676e3725..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch
+++ /dev/null
@@ -1,44328 +0,0 @@
1From 42e6f8da6d694e77678b7ffd8a32a5e9ab56efe3 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alan.r.olsen@intel.com>
3Date: Thu, 15 Oct 2009 10:42:37 -0700
4Subject: [PATCH] Moorestown graphics consolidation patch v2.10
5
6Includes all patches through v2.10 of the PSB drivers as well as
7Alpha2-2.9-mrst-GFX-driver-incremental-restore-MSIreg-in-PCIx.patch.
8
9Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
10Signed-off-by: Sophia (Chia-Hung) Kuo <chia-hung.s.kuo@intel.com>
11Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
12---
13 drivers/gpu/drm/Kconfig | 12 +
14 drivers/gpu/drm/Makefile | 3 +-
15 drivers/gpu/drm/drm_crtc.c | 130 ++
16 drivers/gpu/drm/drm_drv.c | 13 +-
17 drivers/gpu/drm/drm_global.c | 107 +
18 drivers/gpu/drm/drm_irq.c | 30 +
19 drivers/gpu/drm/psb/Makefile | 19 +
20 drivers/gpu/drm/psb/lnc_topaz.c | 676 +++++++
21 drivers/gpu/drm/psb/lnc_topaz.h | 902 +++++++++
22 drivers/gpu/drm/psb/lnc_topazinit.c | 2058 ++++++++++++++++++++
23 drivers/gpu/drm/psb/psb_bl.c | 232 +++
24 drivers/gpu/drm/psb/psb_buffer.c | 519 +++++
25 drivers/gpu/drm/psb/psb_dpst.c | 208 ++
26 drivers/gpu/drm/psb/psb_dpst.h | 90 +
27 drivers/gpu/drm/psb/psb_drm.h | 716 +++++++
28 drivers/gpu/drm/psb/psb_drv.c | 2239 +++++++++++++++++++++
29 drivers/gpu/drm/psb/psb_drv.h | 1224 ++++++++++++
30 drivers/gpu/drm/psb/psb_fb.c | 1833 +++++++++++++++++
31 drivers/gpu/drm/psb/psb_fb.h | 47 +
32 drivers/gpu/drm/psb/psb_fence.c | 359 ++++
33 drivers/gpu/drm/psb/psb_gtt.c | 278 +++
34 drivers/gpu/drm/psb/psb_hotplug.c | 427 ++++
35 drivers/gpu/drm/psb/psb_hotplug.h | 96 +
36 drivers/gpu/drm/psb/psb_intel_bios.c | 309 +++
37 drivers/gpu/drm/psb/psb_intel_bios.h | 436 +++++
38 drivers/gpu/drm/psb/psb_intel_display.c | 2484 ++++++++++++++++++++++++
39 drivers/gpu/drm/psb/psb_intel_display.h | 31 +
40 drivers/gpu/drm/psb/psb_intel_drv.h | 246 +++
41 drivers/gpu/drm/psb/psb_intel_dsi.c | 1798 +++++++++++++++++
42 drivers/gpu/drm/psb/psb_intel_i2c.c | 179 ++
43 drivers/gpu/drm/psb/psb_intel_lvds.c | 1343 +++++++++++++
44 drivers/gpu/drm/psb/psb_intel_modes.c | 64 +
45 drivers/gpu/drm/psb/psb_intel_reg.h | 1015 ++++++++++
46 drivers/gpu/drm/psb/psb_intel_sdvo.c | 1350 +++++++++++++
47 drivers/gpu/drm/psb/psb_intel_sdvo_regs.h | 345 ++++
48 drivers/gpu/drm/psb/psb_irq.c | 621 ++++++
49 drivers/gpu/drm/psb/psb_mmu.c | 1073 ++++++++++
50 drivers/gpu/drm/psb/psb_msvdx.c | 855 ++++++++
51 drivers/gpu/drm/psb/psb_msvdx.h | 527 +++++
52 drivers/gpu/drm/psb/psb_msvdxinit.c | 747 +++++++
53 drivers/gpu/drm/psb/psb_powermgmt.c | 1146 +++++++++++
54 drivers/gpu/drm/psb/psb_powermgmt.h | 73 +
55 drivers/gpu/drm/psb/psb_reg.h | 574 ++++++
56 drivers/gpu/drm/psb/psb_reset.c | 484 +++++
57 drivers/gpu/drm/psb/psb_scene.c | 523 +++++
58 drivers/gpu/drm/psb/psb_scene.h | 119 ++
59 drivers/gpu/drm/psb/psb_schedule.c | 1593 +++++++++++++++
60 drivers/gpu/drm/psb/psb_schedule.h | 181 ++
61 drivers/gpu/drm/psb/psb_setup.c | 18 +
62 drivers/gpu/drm/psb/psb_sgx.c | 1784 +++++++++++++++++
63 drivers/gpu/drm/psb/psb_sgx.h | 41 +
64 drivers/gpu/drm/psb/psb_socket.c | 340 ++++
65 drivers/gpu/drm/psb/psb_ttm_glue.c | 342 ++++
66 drivers/gpu/drm/psb/psb_umevents.c | 490 +++++
67 drivers/gpu/drm/psb/psb_umevents.h | 150 ++
68 drivers/gpu/drm/psb/psb_xhw.c | 652 +++++++
69 drivers/gpu/drm/psb/ttm/ttm_agp_backend.c | 149 ++
70 drivers/gpu/drm/psb/ttm/ttm_bo.c | 1716 ++++++++++++++++
71 drivers/gpu/drm/psb/ttm/ttm_bo_api.h | 578 ++++++
72 drivers/gpu/drm/psb/ttm/ttm_bo_driver.h | 859 ++++++++
73 drivers/gpu/drm/psb/ttm/ttm_bo_util.c | 536 +++++
74 drivers/gpu/drm/psb/ttm/ttm_bo_vm.c | 596 ++++++
75 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c | 115 ++
76 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h | 110 ++
77 drivers/gpu/drm/psb/ttm/ttm_fence.c | 607 ++++++
78 drivers/gpu/drm/psb/ttm/ttm_fence_api.h | 277 +++
79 drivers/gpu/drm/psb/ttm/ttm_fence_driver.h | 309 +++
80 drivers/gpu/drm/psb/ttm/ttm_fence_user.c | 242 +++
81 drivers/gpu/drm/psb/ttm/ttm_fence_user.h | 147 ++
82 drivers/gpu/drm/psb/ttm/ttm_lock.c | 162 ++
83 drivers/gpu/drm/psb/ttm/ttm_lock.h | 181 ++
84 drivers/gpu/drm/psb/ttm/ttm_memory.c | 232 +++
85 drivers/gpu/drm/psb/ttm/ttm_memory.h | 154 ++
86 drivers/gpu/drm/psb/ttm/ttm_object.c | 444 +++++
87 drivers/gpu/drm/psb/ttm/ttm_object.h | 269 +++
88 drivers/gpu/drm/psb/ttm/ttm_pat_compat.c | 178 ++
89 drivers/gpu/drm/psb/ttm/ttm_pat_compat.h | 41 +
90 drivers/gpu/drm/psb/ttm/ttm_placement_common.h | 98 +
91 drivers/gpu/drm/psb/ttm/ttm_placement_user.c | 468 +++++
92 drivers/gpu/drm/psb/ttm/ttm_placement_user.h | 259 +++
93 drivers/gpu/drm/psb/ttm/ttm_regman.h | 74 +
94 drivers/gpu/drm/psb/ttm/ttm_tt.c | 655 +++++++
95 drivers/gpu/drm/psb/ttm/ttm_userobj_api.h | 79 +
96 include/drm/drm.h | 1 +
97 include/drm/drmP.h | 30 +
98 include/drm/drm_crtc.h | 12 +
99 include/drm/drm_mode.h | 18 +
100 include/linux/backlight.h | 3 +
101 89 files changed, 43758 insertions(+), 2 deletions(-)
102 create mode 100644 drivers/gpu/drm/drm_global.c
103 create mode 100644 drivers/gpu/drm/psb/Makefile
104 create mode 100644 drivers/gpu/drm/psb/lnc_topaz.c
105 create mode 100644 drivers/gpu/drm/psb/lnc_topaz.h
106 create mode 100644 drivers/gpu/drm/psb/lnc_topazinit.c
107 create mode 100644 drivers/gpu/drm/psb/psb_bl.c
108 create mode 100644 drivers/gpu/drm/psb/psb_buffer.c
109 create mode 100644 drivers/gpu/drm/psb/psb_dpst.c
110 create mode 100644 drivers/gpu/drm/psb/psb_dpst.h
111 create mode 100644 drivers/gpu/drm/psb/psb_drm.h
112 create mode 100644 drivers/gpu/drm/psb/psb_drv.c
113 create mode 100644 drivers/gpu/drm/psb/psb_drv.h
114 create mode 100644 drivers/gpu/drm/psb/psb_fb.c
115 create mode 100644 drivers/gpu/drm/psb/psb_fb.h
116 create mode 100644 drivers/gpu/drm/psb/psb_fence.c
117 create mode 100644 drivers/gpu/drm/psb/psb_gtt.c
118 create mode 100644 drivers/gpu/drm/psb/psb_hotplug.c
119 create mode 100644 drivers/gpu/drm/psb/psb_hotplug.h
120 create mode 100644 drivers/gpu/drm/psb/psb_intel_bios.c
121 create mode 100644 drivers/gpu/drm/psb/psb_intel_bios.h
122 create mode 100644 drivers/gpu/drm/psb/psb_intel_display.c
123 create mode 100644 drivers/gpu/drm/psb/psb_intel_display.h
124 create mode 100644 drivers/gpu/drm/psb/psb_intel_drv.h
125 create mode 100644 drivers/gpu/drm/psb/psb_intel_dsi.c
126 create mode 100644 drivers/gpu/drm/psb/psb_intel_i2c.c
127 create mode 100644 drivers/gpu/drm/psb/psb_intel_lvds.c
128 create mode 100644 drivers/gpu/drm/psb/psb_intel_modes.c
129 create mode 100644 drivers/gpu/drm/psb/psb_intel_reg.h
130 create mode 100644 drivers/gpu/drm/psb/psb_intel_sdvo.c
131 create mode 100644 drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
132 create mode 100644 drivers/gpu/drm/psb/psb_irq.c
133 create mode 100644 drivers/gpu/drm/psb/psb_mmu.c
134 create mode 100644 drivers/gpu/drm/psb/psb_msvdx.c
135 create mode 100644 drivers/gpu/drm/psb/psb_msvdx.h
136 create mode 100644 drivers/gpu/drm/psb/psb_msvdxinit.c
137 create mode 100644 drivers/gpu/drm/psb/psb_powermgmt.c
138 create mode 100644 drivers/gpu/drm/psb/psb_powermgmt.h
139 create mode 100644 drivers/gpu/drm/psb/psb_reg.h
140 create mode 100644 drivers/gpu/drm/psb/psb_reset.c
141 create mode 100644 drivers/gpu/drm/psb/psb_scene.c
142 create mode 100644 drivers/gpu/drm/psb/psb_scene.h
143 create mode 100644 drivers/gpu/drm/psb/psb_schedule.c
144 create mode 100644 drivers/gpu/drm/psb/psb_schedule.h
145 create mode 100644 drivers/gpu/drm/psb/psb_setup.c
146 create mode 100644 drivers/gpu/drm/psb/psb_sgx.c
147 create mode 100644 drivers/gpu/drm/psb/psb_sgx.h
148 create mode 100644 drivers/gpu/drm/psb/psb_socket.c
149 create mode 100644 drivers/gpu/drm/psb/psb_ttm_glue.c
150 create mode 100644 drivers/gpu/drm/psb/psb_umevents.c
151 create mode 100644 drivers/gpu/drm/psb/psb_umevents.h
152 create mode 100644 drivers/gpu/drm/psb/psb_xhw.c
153 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
154 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo.c
155 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_api.h
156 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
157 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_util.c
158 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
159 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
160 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
161 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence.c
162 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_api.h
163 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
164 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_user.c
165 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_user.h
166 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_lock.c
167 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_lock.h
168 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_memory.c
169 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_memory.h
170 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_object.c
171 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_object.h
172 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
173 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
174 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_common.h
175 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_user.c
176 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_user.h
177 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_regman.h
178 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_tt.c
179 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
180
181diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
182index 39b393d..9bd8ca1 100644
183--- a/drivers/gpu/drm/Kconfig
184+++ b/drivers/gpu/drm/Kconfig
185@@ -143,3 +143,15 @@ config DRM_SAVAGE
186 help
187 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
188 chipset. If M is selected the module will be called savage.
189+
190+config DRM_PSB
191+ tristate "Intel Poulsbo/Moorestown"
192+ depends on DRM && PCI
193+ select FB_CFB_COPYAREA
194+ select FB_CFB_FILLRECT
195+ select FB_CFB_IMAGEBLIT
196+ select MRST_RAR_HANDLER
197+ help
198+ Choose this option if you have a Poulsbo or Moorestown platform.
199+ If M is selected the module will be called psb.
200+
201diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
202index d76f167..4989b1e 100644
203--- a/drivers/gpu/drm/Makefile
204+++ b/drivers/gpu/drm/Makefile
205@@ -15,12 +15,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
206 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
207 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
208 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
209- drm_info.o drm_debugfs.o
210+ drm_info.o drm_debugfs.o drm_global.o
211
212 drm-$(CONFIG_COMPAT) += drm_ioc32.o
213
214 obj-$(CONFIG_DRM) += drm.o
215 obj-$(CONFIG_DRM_TTM) += ttm/
216+obj-$(CONFIG_DRM_PSB) +=psb/
217 obj-$(CONFIG_DRM_TDFX) += tdfx/
218 obj-$(CONFIG_DRM_R128) += r128/
219 obj-$(CONFIG_DRM_RADEON)+= radeon/
220diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
221index 2f631c7..11cd2e8 100644
222--- a/drivers/gpu/drm/drm_crtc.c
223+++ b/drivers/gpu/drm/drm_crtc.c
224@@ -146,6 +146,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
225 { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
226 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
227 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
228+ { DRM_MODE_CONNECTOR_MIPI, "MIPI", 0 },
229 };
230
231 static struct drm_prop_enum_list drm_encoder_enum_list[] =
232@@ -154,6 +155,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
233 { DRM_MODE_ENCODER_TMDS, "TMDS" },
234 { DRM_MODE_ENCODER_LVDS, "LVDS" },
235 { DRM_MODE_ENCODER_TVDAC, "TV" },
236+ { DRM_MODE_ENCODER_MIPI, "MIPI" },
237 };
238
239 char *drm_get_encoder_name(struct drm_encoder *encoder)
240diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
241index b7f3a41..81195a7 100644
242--- a/drivers/gpu/drm/drm_drv.c
243+++ b/drivers/gpu/drm/drm_drv.c
244@@ -344,6 +345,8 @@ static int __init drm_core_init(void)
245
246 DRM_INFO("Initialized %s %d.%d.%d %s\n",
247 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
248+ drm_global_init();
249+
250 return 0;
251 err_p3:
252 drm_sysfs_destroy();
253@@ -357,6 +360,7 @@ err_p1:
254
255 static void __exit drm_core_exit(void)
256 {
257+ drm_global_release();
258 remove_proc_entry("dri", NULL);
259 debugfs_remove(drm_debugfs_root);
260 drm_sysfs_destroy();
261@@ -408,9 +412,16 @@ static int drm_version(struct drm_device *dev, void *data,
262 * Looks up the ioctl function in the ::ioctls table, checking for root
263 * previleges if so required, and dispatches to the respective function.
264 */
265+
266 int drm_ioctl(struct inode *inode, struct file *filp,
267 unsigned int cmd, unsigned long arg)
268 {
269+ return drm_unlocked_ioctl(filp, cmd, arg);
270+}
271+EXPORT_SYMBOL(drm_ioctl);
272+
273+long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
274+{
275 struct drm_file *file_priv = filp->private_data;
276 struct drm_device *dev = file_priv->minor->dev;
277 struct drm_ioctl_desc *ioctl;
278@@ -493,7 +504,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
279 return retcode;
280 }
281
282-EXPORT_SYMBOL(drm_ioctl);
283+EXPORT_SYMBOL(drm_unlocked_ioctl);
284
285 struct drm_local_map *drm_getsarea(struct drm_device *dev)
286 {
287diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
288new file mode 100644
289index 0000000..e054c4f
290--- /dev/null
291+++ b/drivers/gpu/drm/drm_global.c
292@@ -0,0 +1,107 @@
293+/**************************************************************************
294+ *
295+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
296+ * All Rights Reserved.
297+ *
298+ * Permission is hereby granted, free of charge, to any person obtaining a
299+ * copy of this software and associated documentation files (the
300+ * "Software"), to deal in the Software without restriction, including
301+ * without limitation the rights to use, copy, modify, merge, publish,
302+ * distribute, sub license, and/or sell copies of the Software, and to
303+ * permit persons to whom the Software is furnished to do so, subject to
304+ * the following conditions:
305+ *
306+ * The above copyright notice and this permission notice (including the
307+ * next paragraph) shall be included in all copies or substantial portions
308+ * of the Software.
309+ *
310+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
311+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
312+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
313+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
314+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
315+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
316+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
317+ *
318+ **************************************************************************/
319+#include <drmP.h>
320+struct drm_global_item {
321+ struct mutex mutex;
322+ void *object;
323+ int refcount;
324+};
325+
326+static struct drm_global_item glob[DRM_GLOBAL_NUM];
327+
328+void drm_global_init(void)
329+{
330+ int i;
331+
332+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
333+ struct drm_global_item *item = &glob[i];
334+ mutex_init(&item->mutex);
335+ item->object = NULL;
336+ item->refcount = 0;
337+ }
338+}
339+
340+void drm_global_release(void)
341+{
342+ int i;
343+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
344+ struct drm_global_item *item = &glob[i];
345+ BUG_ON(item->object != NULL);
346+ BUG_ON(item->refcount != 0);
347+ }
348+}
349+
350+int drm_global_item_ref(struct drm_global_reference *ref)
351+{
352+ int ret;
353+ struct drm_global_item *item = &glob[ref->global_type];
354+ void *object;
355+
356+ mutex_lock(&item->mutex);
357+ if (item->refcount == 0) {
358+ item->object = kmalloc(ref->size, GFP_KERNEL);
359+ if (unlikely(item->object == NULL)) {
360+ ret = -ENOMEM;
361+ goto out_err;
362+ }
363+
364+ ref->object = item->object;
365+ ret = ref->init(ref);
366+ if (unlikely(ret != 0))
367+ goto out_err;
368+
369+ ++item->refcount;
370+ }
371+ ref->object = item->object;
372+ object = item->object;
373+ mutex_unlock(&item->mutex);
374+ return 0;
375+ out_err:
376+ kfree(item->object);
377+ mutex_unlock(&item->mutex);
378+ item->object = NULL;
379+ return ret;
380+}
381+
382+EXPORT_SYMBOL(drm_global_item_ref);
383+
384+void drm_global_item_unref(struct drm_global_reference *ref)
385+{
386+ struct drm_global_item *item = &glob[ref->global_type];
387+
388+ mutex_lock(&item->mutex);
389+ BUG_ON(item->refcount == 0);
390+ BUG_ON(ref->object != item->object);
391+ if (--item->refcount == 0) {
392+ ref->release(ref);
393+ kfree(item->object);
394+ item->object = NULL;
395+ }
396+ mutex_unlock(&item->mutex);
397+}
398+
399+EXPORT_SYMBOL(drm_global_item_unref);
400diff --git a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile
401new file mode 100644
402index 0000000..67319ba
403--- /dev/null
404+++ b/drivers/gpu/drm/psb/Makefile
405@@ -0,0 +1,19 @@
406+#
407+# Makefile for the drm device driver. This driver provides support for the
408+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
409+
410+ccflags-y := -Idrivers/gpu/drm/psb -Iinclude/drm -Iinclude/linux
411+
412+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \
413+ psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \
414+ psb_reset.o psb_xhw.o psb_msvdx.o psb_bl.o psb_intel_bios.o\
415+ psb_umevents.o psb_hotplug.o psb_socket.o psb_dpst.o \
416+ psb_powermgmt.o lnc_topaz.o lnc_topazinit.o \
417+ psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \
418+ ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \
419+ ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \
420+ ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \
421+ ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o
422+
423+obj-$(CONFIG_DRM_PSB) += psb.o
424+
425diff --git a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c
426new file mode 100644
427index 0000000..adabac5
428--- /dev/null
429+++ b/drivers/gpu/drm/psb/lnc_topaz.c
430@@ -0,0 +1,676 @@
431+/**
432+ * file lnc_topaz.c
433+ * TOPAZ I/O operations and IRQ handling
434+ *
435+ */
436+
437+/**************************************************************************
438+ *
439+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
440+ * Copyright (c) Imagination Technologies Limited, UK
441+ * All Rights Reserved.
442+ *
443+ * Permission is hereby granted, free of charge, to any person obtaining a
444+ * copy of this software and associated documentation files (the
445+ * "Software"), to deal in the Software without restriction, including
446+ * without limitation the rights to use, copy, modify, merge, publish,
447+ * distribute, sub license, and/or sell copies of the Software, and to
448+ * permit persons to whom the Software is furnished to do so, subject to
449+ * the following conditions:
450+ *
451+ * The above copyright notice and this permission notice (including the
452+ * next paragraph) shall be included in all copies or substantial portions
453+ * of the Software.
454+ *
455+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
456+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
457+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
458+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
459+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
460+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
461+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
462+ *
463+ **************************************************************************/
464+
465+/* include headers */
466+/* #define DRM_DEBUG_CODE 2 */
467+
468+#include <drm/drmP.h>
469+#include <drm/drm_os_linux.h>
470+
471+#include "psb_drv.h"
472+#include "psb_drm.h"
473+#include "lnc_topaz.h"
474+#include "psb_powermgmt.h"
475+
476+#include <linux/io.h>
477+#include <linux/delay.h>
478+
479+
480+/* static function define */
481+static int lnc_topaz_deliver_command(struct drm_device *dev,
482+ struct ttm_buffer_object *cmd_buffer,
483+ unsigned long cmd_offset,
484+ unsigned long cmd_size,
485+ void **topaz_cmd, uint32_t sequence,
486+ int copy_cmd);
487+static int lnc_topaz_send(struct drm_device *dev, void *cmd,
488+ unsigned long cmd_size, uint32_t sync_seq);
489+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
490+static int lnc_topaz_dequeue_send(struct drm_device *dev);
491+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
492+ unsigned long cmd_size, uint32_t sequence);
493+
494+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat)
495+{
496+ struct drm_psb_private *dev_priv =
497+ (struct drm_psb_private *)dev->dev_private;
498+ uint32_t clr_flag = lnc_topaz_queryirq(dev);
499+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
500+ uint32_t cur_seq;
501+
502+ lnc_topaz_clearirq(dev, clr_flag);
503+
504+ /* ignore non-SYNC interrupts */
505+ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
506+ return;
507+
508+ cur_seq = *(uint32_t *)topaz_priv->topaz_sync_addr;
509+
510+ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
511+ cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
512+
513+ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
514+
515+ /* save frame skip flag for query */
516+ topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv);
517+
518+ topaz_priv->topaz_busy = 1;
519+ lnc_topaz_dequeue_send(dev);
520+
521+ if (drm_topaz_pmpolicy == PSB_PMPOLICY_POWERDOWN)
522+ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
523+}
524+
525+static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
526+ struct ttm_buffer_object *cmd_buffer,
527+ unsigned long cmd_offset, unsigned long cmd_size,
528+ struct ttm_fence_object *fence)
529+{
530+ struct drm_psb_private *dev_priv = dev->dev_private;
531+ unsigned long irq_flags;
532+ int ret = 0;
533+ void *cmd;
534+ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
535+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
536+
537+ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
538+
539+ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
540+
541+ if (topaz_priv->topaz_fw_loaded == 0) {
542+ /* #.# load fw to driver */
543+ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
544+ ret = topaz_init_fw(dev);
545+ if (ret != 0) {
546+ /* FIXME: find a proper return value */
547+ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
548+ "ensure udevd is configured correctly!\n");
549+
550+ return -EFAULT;
551+ }
552+ topaz_priv->topaz_fw_loaded = 1;
553+ }
554+
555+ /* # schedule watchdog */
556+ /* psb_schedule_watchdog(dev_priv); */
557+
558+ /* # spin lock irq save [msvdx_lock] */
559+ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
560+
561+ /* # if topaz need to reset, reset it */
562+ if (topaz_priv->topaz_needs_reset) {
563+ /* #.# reset it */
564+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
565+ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
566+
567+ if (lnc_topaz_reset(dev_priv)) {
568+ ret = -EBUSY;
569+ DRM_ERROR("TOPAZ: reset failed.\n");
570+ return ret;
571+ }
572+
573+ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
574+
575+ /* #.# upload firmware */
576+ if (topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) {
577+ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
578+ return -EBUSY;
579+ }
580+
581+ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
582+ }
583+
584+ if (!topaz_priv->topaz_busy) {
585+ /* # direct map topaz command if topaz is free */
586+ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
587+ sequence);
588+
589+ topaz_priv->topaz_busy = 1;
590+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
591+
592+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
593+ cmd_size, NULL, sequence, 0);
594+
595+ if (ret) {
596+ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
597+ return ret;
598+ }
599+ } else {
600+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
601+ sequence);
602+ cmd = NULL;
603+
604+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
605+
606+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
607+ cmd_size, &cmd, sequence, 1);
608+ if (cmd == NULL || ret) {
609+ DRM_ERROR("TOPAZ: map command for save fialed\n");
610+ return ret;
611+ }
612+
613+ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
614+ if (ret)
615+ DRM_ERROR("TOPAZ: save command failed\n");
616+ }
617+
618+ return ret;
619+}
620+
621+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
622+ unsigned long cmd_size, uint32_t sequence)
623+{
624+ struct drm_psb_private *dev_priv = dev->dev_private;
625+ struct lnc_topaz_cmd_queue *topaz_cmd;
626+ unsigned long irq_flags;
627+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
628+
629+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
630+ sequence);
631+
632+ topaz_cmd = kzalloc(sizeof(struct lnc_topaz_cmd_queue),
633+ GFP_KERNEL);
634+ if (topaz_cmd == NULL) {
635+ mutex_unlock(&topaz_priv->topaz_mutex);
636+ DRM_ERROR("TOPAZ: out of memory....\n");
637+ return -ENOMEM;
638+ }
639+
640+ topaz_cmd->cmd = cmd;
641+ topaz_cmd->cmd_size = cmd_size;
642+ topaz_cmd->sequence = sequence;
643+
644+ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
645+ list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
646+ if (!topaz_priv->topaz_busy) {
647+ /* topaz_priv->topaz_busy = 1; */
648+ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
649+ lnc_topaz_dequeue_send(dev);
650+ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
651+ }
652+
653+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
654+
655+ return 0;
656+}
657+
658+
659+int lnc_cmdbuf_video(struct drm_file *priv,
660+ struct list_head *validate_list,
661+ uint32_t fence_type,
662+ struct drm_psb_cmdbuf_arg *arg,
663+ struct ttm_buffer_object *cmd_buffer,
664+ struct psb_ttm_fence_rep *fence_arg)
665+{
666+ struct drm_device *dev = priv->minor->dev;
667+ struct ttm_fence_object *fence = NULL;
668+ int ret;
669+
670+ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
671+ arg->cmdbuf_size, fence);
672+ if (ret)
673+ return ret;
674+
675+ /* workaround for interrupt issue */
676+ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
677+ validate_list, fence_arg, &fence);
678+
679+ if (fence)
680+ ttm_fence_object_unref(&fence);
681+
682+ mutex_lock(&cmd_buffer->mutex);
683+ if (cmd_buffer->sync_obj != NULL)
684+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
685+ mutex_unlock(&cmd_buffer->mutex);
686+
687+ return 0;
688+}
689+
690+static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
691+{
692+ struct drm_psb_private *dev_priv = dev->dev_private;
693+ uint32_t sync_cmd[3];
694+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
695+
696+#if 0
697+ struct ttm_fence_device *fdev = &dev_priv->fdev;
698+ struct ttm_fence_class_manager *fc =
699+ &fdev->fence_class[LNC_ENGINE_ENCODE];
700+ unsigned long irq_flags;
701+#endif
702+#if LNC_TOPAZ_NO_IRQ
703+ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
704+ int count = 10000;
705+ uint32_t cur_seq;
706+#endif
707+
708+ /* insert a SYNC command here */
709+ topaz_priv->topaz_sync_cmd_seq = (1 << 15) |
710+ topaz_priv->topaz_cmd_seq++;
711+ sync_cmd[0] = 1 | (MTX_CMDID_SYNC << 1) | (3 << 8) |
712+ (topaz_priv->topaz_sync_cmd_seq << 16);
713+ sync_cmd[1] = topaz_priv->topaz_sync_offset;
714+ sync_cmd[2] = sync_seq;
715+
716+ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
717+ "sync_seq (0x%08x)\n",
718+ topaz_priv->topaz_sync_cmd_seq, sync_seq);
719+
720+ lnc_mtx_send(dev_priv, sync_cmd);
721+
722+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
723+ /* # poll topaz register for certain times */
724+ while (count && *sync_p != sync_seq) {
725+ DRM_UDELAY(100);
726+ --count;
727+ }
728+ if ((count == 0) && (*sync_p != sync_seq)) {
729+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
730+ sync_seq, *sync_p);
731+ return -EBUSY;
732+ }
733+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
734+
735+ topaz_priv->topaz_busy = 0;
736+
737+ /* XXX: check psb_fence_handler is suitable for topaz */
738+ cur_seq = *sync_p;
739+#if 0
740+ write_lock_irqsave(&fc->lock, irq_flags);
741+ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
742+ cur_seq,
743+ _PSB_FENCE_TYPE_EXE, 0);
744+ write_unlock_irqrestore(&fc->lock, irq_flags);
745+#endif
746+#endif
747+ return 0;
748+}
749+
750+int
751+lnc_topaz_deliver_command(struct drm_device *dev,
752+ struct ttm_buffer_object *cmd_buffer,
753+ unsigned long cmd_offset, unsigned long cmd_size,
754+ void **topaz_cmd, uint32_t sequence,
755+ int copy_cmd)
756+{
757+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
758+ struct ttm_bo_kmap_obj cmd_kmap;
759+ bool is_iomem;
760+ int ret;
761+ unsigned char *cmd_start, *tmp;
762+
763+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
764+ &cmd_kmap);
765+ if (ret) {
766+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
767+ return ret;
768+ }
769+ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
770+ &is_iomem) + cmd_page_offset;
771+
772+ if (copy_cmd) {
773+ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
774+ tmp = kzalloc(cmd_size, GFP_KERNEL);
775+ if (tmp == NULL) {
776+ ret = -ENOMEM;
777+ goto out;
778+ }
779+ memcpy(tmp, cmd_start, cmd_size);
780+ *topaz_cmd = tmp;
781+ } else {
782+ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
783+ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
784+ if (ret) {
785+ DRM_ERROR("TOPAZ: commit commands failed.\n");
786+ ret = -EINVAL;
787+ }
788+ }
789+
790+out:
791+ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
792+ cmd_size, sequence, copy_cmd);
793+
794+ ttm_bo_kunmap(&cmd_kmap);
795+
796+ return ret;
797+}
798+
799+int
800+lnc_topaz_send(struct drm_device *dev, void *cmd,
801+ unsigned long cmd_size, uint32_t sync_seq)
802+{
803+ struct drm_psb_private *dev_priv = dev->dev_private;
804+ int ret = 0;
805+ unsigned char *command = (unsigned char *) cmd;
806+ struct topaz_cmd_header *cur_cmd_header;
807+ uint32_t cur_cmd_size, cur_cmd_id;
808+ uint32_t codec;
809+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
810+
811+ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
812+
813+ while (cmd_size > 0) {
814+ cur_cmd_header = (struct topaz_cmd_header *) command;
815+ cur_cmd_size = cur_cmd_header->size * 4;
816+ cur_cmd_id = cur_cmd_header->id;
817+
818+ switch (cur_cmd_id) {
819+ case MTX_CMDID_SW_NEW_CODEC:
820+ codec = *((uint32_t *) cmd + 1);
821+
822+ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
823+ codec_to_string(codec), codec);
824+ if (topaz_setup_fw(dev, codec)) {
825+ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
826+ return -EBUSY;
827+ }
828+
829+ topaz_priv->topaz_cur_codec = codec;
830+ break;
831+
832+ case MTX_CMDID_SW_ENTER_LOWPOWER:
833+ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
834+ PSB_DEBUG_GENERAL("XXX: implement it\n");
835+ break;
836+
837+ case MTX_CMDID_SW_LEAVE_LOWPOWER:
838+ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
839+ PSB_DEBUG_GENERAL("XXX: implement it\n");
840+ break;
841+
842+ /* ordinary commmand */
843+ case MTX_CMDID_START_PIC:
844+ /* XXX: specially handle START_PIC hw command */
845+ CCB_CTRL_SET_QP(dev_priv,
846+ *(command + cur_cmd_size - 4));
847+ /* strip the QP parameter (it's software arg) */
848+ cur_cmd_header->size--;
849+ default:
850+ cur_cmd_header->seq = 0x7fff &
851+ topaz_priv->topaz_cmd_seq++;
852+
853+ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
854+ " seq (0x%04x)\n",
855+ cmd_to_string(cur_cmd_id),
856+ cur_cmd_size, cur_cmd_header->seq);
857+ ret = lnc_mtx_send(dev_priv, command);
858+ if (ret) {
859+ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
860+ goto out;
861+ }
862+ break;
863+ }
864+
865+ command += cur_cmd_size;
866+ cmd_size -= cur_cmd_size;
867+ }
868+ lnc_topaz_sync(dev, sync_seq);
869+out:
870+ return ret;
871+}
872+
873+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
874+{
875+ struct topaz_cmd_header *cur_cmd_header =
876+ (struct topaz_cmd_header *) cmd;
877+ uint32_t cmd_size = cur_cmd_header->size;
878+ uint32_t read_index, write_index;
879+ const uint32_t *cmd_pointer = (uint32_t *) cmd;
880+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
881+
882+ int ret = 0;
883+
884+ /* <msvdx does> # enable all clock */
885+
886+ write_index = topaz_priv->topaz_cmd_windex;
887+ if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) {
888+ int free_space = topaz_priv->topaz_ccb_size - write_index;
889+
890+ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
891+ if (free_space > 0) {
892+ struct topaz_cmd_header pad_cmd;
893+
894+ pad_cmd.id = MTX_CMDID_NULL;
895+ pad_cmd.size = free_space;
896+ pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq;
897+
898+ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
899+ " size(%d),seq (0x%04x)\n",
900+ pad_cmd.size, pad_cmd.seq);
901+
902+ TOPAZ_BEGIN_CCB(dev_priv);
903+ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
904+ TOPAZ_END_CCB(dev_priv, 1);
905+
906+ POLL_WB_SEQ(dev_priv, pad_cmd.seq);
907+ ++topaz_priv->topaz_cmd_seq;
908+ }
909+ POLL_WB_RINDEX(dev_priv, 0);
910+ if (ret == 0)
911+ topaz_priv->topaz_cmd_windex = 0;
912+ else {
913+ DRM_ERROR("TOPAZ: poll rindex timeout\n");
914+ return ret; /* HW may hang, need reset */
915+ }
916+ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
917+ }
918+
919+ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
920+ write_index = topaz_priv->topaz_cmd_windex;
921+
922+ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
923+ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
924+ TOPAZ_BEGIN_CCB(dev_priv);
925+ while (cmd_size > 0) {
926+ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
927+ --cmd_size;
928+ }
929+ TOPAZ_END_CCB(dev_priv, 1);
930+
931+#if 0
932+ DRM_UDELAY(1000);
933+ lnc_topaz_clearirq(dev,
934+ lnc_topaz_queryirq(dev));
935+ LNC_TRACEL("TOPAZ: after clear, query again\n");
936+ lnc_topaz_queryirq(dev_priv);
937+#endif
938+
939+ return ret;
940+}
941+
942+int lnc_topaz_dequeue_send(struct drm_device *dev)
943+{
944+ struct drm_psb_private *dev_priv = dev->dev_private;
945+ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
946+ int ret;
947+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
948+
949+ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
950+
951+ if (list_empty(&topaz_priv->topaz_queue)) {
952+ topaz_priv->topaz_busy = 0;
953+ return 0;
954+ }
955+
956+ topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
957+ struct lnc_topaz_cmd_queue, head);
958+
959+ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
960+ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
961+ topaz_cmd->sequence);
962+ if (ret) {
963+ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
964+ ret = -EINVAL;
965+ }
966+
967+ list_del(&topaz_cmd->head);
968+ kfree(topaz_cmd->cmd);
969+ kfree(topaz_cmd
970+ );
971+
972+ return ret;
973+}
974+
975+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
976+{
977+ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
978+ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
979+}
980+
981+int lnc_check_topaz_idle(struct drm_device *dev)
982+{
983+ struct drm_psb_private *dev_priv =
984+ (struct drm_psb_private *)dev->dev_private;
985+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
986+ uint32_t reg_val;
987+
988+ if (topaz_priv->topaz_busy)
989+ return -EBUSY;
990+
991+ MVEA_READ32(MVEA_CR_MVEA_BUSY, &reg_val);
992+ if (reg_val != 0)
993+ return -EBUSY;
994+
995+ MVEA_READ32(MVEA_CR_MVEA_DMACMDFIFO_WAIT, &reg_val);
996+ if (reg_val != 0)
997+ return -EBUSY;
998+
999+ MVEA_READ32(MVEA_CR_MVEA_DMACMDFIFO_STATUS, &reg_val);
1000+ if ((reg_val & (1 << 8)) == 0)
1001+ return -EBUSY;
1002+
1003+ return 0; /* we think it is idle */
1004+}
1005+
1006+int lnc_wait_topaz_idle(struct drm_device *dev)
1007+{
1008+ struct drm_psb_private *dev_priv =
1009+ (struct drm_psb_private *)dev->dev_private;
1010+ struct ttm_fence_device *fdev = &dev_priv->fdev;
1011+ struct ttm_fence_class_manager *fc =
1012+ &fdev->fence_class[LNC_ENGINE_ENCODE];
1013+ struct ttm_fence_object *fence, *next;
1014+ unsigned long _end = jiffies + 5 * DRM_HZ;
1015+ int signaled = 0;
1016+ int ret = 0;
1017+
1018+ /* Ensure that all pending IRQs are serviced, */
1019+ /*
1020+ * Save the last MSVDX fence in dev_priv instead!!!
1021+ * Need to be fc->write_locked while accessing a fence from the ring.
1022+ */
1023+ list_for_each_entry_safe(fence, next, &fc->ring, ring) {
1024+ do {
1025+ signaled = ttm_fence_object_signaled(fence,
1026+ _PSB_FENCE_TYPE_EXE);
1027+ if (signaled)
1028+ break;
1029+ if (time_after_eq(jiffies, _end)) {
1030+ PSB_DEBUG_PM("TOPAZIDLE: fence 0x%x didn't get"
1031+ "signaled for 3 secs\n",
1032+ (unsigned int) fence);
1033+ break;
1034+ }
1035+ DRM_UDELAY(1000);
1036+ } while (1);
1037+ }
1038+
1039+ do {
1040+ ret = lnc_check_topaz_idle(dev);
1041+ if (ret == 0)
1042+ break;
1043+
1044+ if (time_after_eq(jiffies, _end)) {
1045+ PSB_DEBUG_PM("TOPAZIDLE: wait HW idle time out\n");
1046+ break;
1047+ }
1048+ DRM_UDELAY(1000);
1049+ } while (1);
1050+
1051+ return ret;
1052+}
1053+
1054+int lnc_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
1055+{
1056+ struct drm_psb_private *dev_priv =
1057+ (struct drm_psb_private *)dev->dev_private;
1058+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
1059+ int ret;
1060+
1061+ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
1062+ &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
1063+
1064+ if (ret)
1065+ return -EFAULT;
1066+
1067+ return 0;
1068+}
1069+
1070+static void lnc_topaz_flush_cmd_queue(struct topaz_private *topaz_priv)
1071+{
1072+ struct lnc_topaz_cmd_queue *entry, *next;
1073+
1074+ /* remind to reset topaz */
1075+ topaz_priv->topaz_needs_reset = 1;
1076+
1077+ if (list_empty(&topaz_priv->topaz_queue)) {
1078+ topaz_priv->topaz_busy = 0;
1079+ return;
1080+ }
1081+
1082+ /* flush all command in queue */
1083+ list_for_each_entry_safe(entry, next,
1084+ &topaz_priv->topaz_queue,
1085+ head) {
1086+ list_del(&entry->head);
1087+ kfree(entry->cmd);
1088+ kfree(entry);
1089+ }
1090+
1091+ return;
1092+}
1093+
1094+void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev)
1095+{
1096+ struct drm_psb_private *dev_priv =
1097+ container_of(fdev, struct drm_psb_private, fdev);
1098+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
1099+
1100+ lnc_topaz_flush_cmd_queue(topaz_priv);
1101+}
1102+
1103+inline int psb_try_power_down_topaz(struct drm_device *dev)
1104+{
1105+ return powermgmt_suspend_islands(dev->pdev, PSB_VIDEO_ENC_ISLAND, false);
1106+}
1107diff --git a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h
1108new file mode 100644
1109index 0000000..c48cab0
1110--- /dev/null
1111+++ b/drivers/gpu/drm/psb/lnc_topaz.h
1112@@ -0,0 +1,902 @@
1113+/**************************************************************************
1114+ *
1115+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
1116+ * Copyright (c) Imagination Technologies Limited, UK
1117+ * All Rights Reserved.
1118+ *
1119+ * Permission is hereby granted, free of charge, to any person obtaining a
1120+ * copy of this software and associated documentation files (the
1121+ * "Software"), to deal in the Software without restriction, including
1122+ * without limitation the rights to use, copy, modify, merge, publish,
1123+ * distribute, sub license, and/or sell copies of the Software, and to
1124+ * permit persons to whom the Software is furnished to do so, subject to
1125+ * the following conditions:
1126+ *
1127+ * The above copyright notice and this permission notice (including the
1128+ * next paragraph) shall be included in all copies or substantial portions
1129+ * of the Software.
1130+ *
1131+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1132+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1133+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1134+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
1135+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
1136+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
1137+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
1138+ *
1139+ **************************************************************************/
1140+
1141+#ifndef _LNC_TOPAZ_H_
1142+#define _LNC_TOPAZ_H_
1143+
1144+#include "psb_drv.h"
1145+
1146+#define LNC_TOPAZ_NO_IRQ 0
1147+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
1148+
1149+extern int drm_topaz_pmpolicy;
1150+
1151+/*
1152+ * MACROS to insert values into fields within a word. The basename of the
1153+ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
1154+ */
1155+#define MM_WRITE32(base, offset, value) \
1156+do { \
1157+ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
1158+ + base + offset)) = value; \
1159+} while (0)
1160+
1161+#define MM_READ32(base, offset, pointer) \
1162+do { \
1163+ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
1164+ + base + offset)); \
1165+} while (0)
1166+
1167+#define F_MASK(basename) (MASK_##basename)
1168+#define F_SHIFT(basename) (SHIFT_##basename)
1169+
1170+#define F_ENCODE(val, basename) \
1171+ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
1172+
1173+/* MVEA macro */
1174+#define MVEA_START 0x03000
1175+
1176+#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
1177+#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
1178+
1179+#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
1180+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
1181+#define F_ENCODE_MVEA(val, basename) \
1182+ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
1183+
1184+/* VLC macro */
1185+#define TOPAZ_VLC_START 0x05000
1186+
1187+/* TOPAZ macro */
1188+#define TOPAZ_START 0x02000
1189+
1190+#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
1191+#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
1192+
1193+#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
1194+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
1195+#define F_ENCODE_TOPAZ(val, basename) \
1196+ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
1197+
1198+/* MTX macro */
1199+#define MTX_START 0x0
1200+
1201+#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
1202+#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
1203+
1204+/* DMAC macro */
1205+#define DMAC_START 0x0f000
1206+
1207+#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
1208+#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
1209+
1210+#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
1211+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
1212+#define F_ENCODE_DMAC(val, basename) \
1213+ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
1214+
1215+
1216+/* Register CR_IMG_TOPAZ_INTENAB */
1217+#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
1218+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
1219+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
1220+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
1221+
1222+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
1223+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
1224+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
1225+
1226+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
1227+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
1228+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
1229+
1230+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
1231+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
1232+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
1233+
1234+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
1235+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
1236+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
1237+
1238+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
1239+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
1240+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
1241+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
1242+
1243+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
1244+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
1245+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
1246+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
1247+
1248+#define MTX_CCBCTRL_ROFF 0
1249+#define MTX_CCBCTRL_COMPLETE 4
1250+#define MTX_CCBCTRL_CCBSIZE 8
1251+#define MTX_CCBCTRL_QP 12
1252+#define MTX_CCBCTRL_FRAMESKIP 20
1253+#define MTX_CCBCTRL_INITQP 24
1254+
1255+#define TOPAZ_CR_MMU_STATUS 0x001C
1256+#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
1257+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
1258+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
1259+
1260+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
1261+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
1262+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
1263+
1264+#define TOPAZ_CR_MMU_MEM_REQ 0x0020
1265+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
1266+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
1267+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
1268+
1269+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
1270+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
1271+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
1272+
1273+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
1274+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
1275+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
1276+
1277+#define MTX_CR_MTX_KICK 0x0080
1278+#define MASK_MTX_MTX_KICK 0x0000FFFF
1279+#define SHIFT_MTX_MTX_KICK 0
1280+#define REGNUM_MTX_MTX_KICK 0x0080
1281+
1282+#define MTX_DATA_MEM_BASE 0x82880000
1283+
1284+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
1285+#define MASK_MTX_MTX_MCMR 0x00000001
1286+#define SHIFT_MTX_MTX_MCMR 0
1287+#define REGNUM_MTX_MTX_MCMR 0x0108
1288+
1289+#define MASK_MTX_MTX_MCMID 0x0FF00000
1290+#define SHIFT_MTX_MTX_MCMID 20
1291+#define REGNUM_MTX_MTX_MCMID 0x0108
1292+
1293+#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
1294+#define SHIFT_MTX_MTX_MCM_ADDR 2
1295+#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
1296+
1297+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
1298+#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
1299+#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
1300+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
1301+
1302+#define MASK_MTX_MTX_MCMAI 0x00000002
1303+#define SHIFT_MTX_MTX_MCMAI 1
1304+#define REGNUM_MTX_MTX_MCMAI 0x0108
1305+
1306+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
1307+
1308+#define MVEA_CR_MVEA_BUSY 0x0018
1309+#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
1310+#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
1311+
1312+#define MVEA_CR_IMG_MVEA_SRST 0x0000
1313+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
1314+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
1315+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
1316+
1317+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
1318+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
1319+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
1320+
1321+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
1322+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
1323+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
1324+
1325+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
1326+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
1327+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
1328+
1329+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
1330+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
1331+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
1332+
1333+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
1334+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
1335+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
1336+
1337+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
1338+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
1339+
1340+#define TOPAZ_MTX_PC (0x00000005)
1341+#define PC_START_ADDRESS (0x80900000)
1342+
1343+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
1344+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
1345+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
1346+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
1347+
1348+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
1349+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
1350+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
1351+
1352+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
1353+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
1354+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
1355+
1356+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
1357+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
1358+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
1359+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
1360+
1361+#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
1362+
1363+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
1364+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
1365+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
1366+
1367+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
1368+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
1369+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
1370+
1371+#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
1372+
1373+#define TOPAZ_CR_MMU_CONTROL0 0x0024
1374+#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
1375+#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
1376+#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
1377+
1378+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
1379+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
1380+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
1381+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
1382+
1383+#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
1384+#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
1385+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
1386+
1387+#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
1388+#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
1389+#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
1390+
1391+#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
1392+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
1393+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
1394+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
1395+
1396+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
1397+#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
1398+#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
1399+#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
1400+
1401+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
1402+#define TXRPT_WAITONKICK_VALUE 0x8ade0000
1403+
1404+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
1405+
1406+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
1407+#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
1408+
1409+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
1410+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
1411+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
1412+
1413+#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
1414+#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
1415+
1416+#define MTX_CR_MTX_SYSC_CDMAA 0x0344
1417+#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
1418+#define SHIFT_MTX_CDMAA_ADDRESS 2
1419+#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
1420+
1421+#define MTX_CR_MTX_SYSC_CDMAC 0x0340
1422+#define MASK_MTX_LENGTH 0x0000FFFF
1423+#define SHIFT_MTX_LENGTH 0
1424+#define REGNUM_MTX_LENGTH 0x0340
1425+
1426+#define MASK_MTX_BURSTSIZE 0x07000000
1427+#define SHIFT_MTX_BURSTSIZE 24
1428+#define REGNUM_MTX_BURSTSIZE 0x0340
1429+
1430+#define MASK_MTX_RNW 0x00020000
1431+#define SHIFT_MTX_RNW 17
1432+#define REGNUM_MTX_RNW 0x0340
1433+
1434+#define MASK_MTX_ENABLE 0x00010000
1435+#define SHIFT_MTX_ENABLE 16
1436+#define REGNUM_MTX_ENABLE 0x0340
1437+
1438+#define MASK_MTX_LENGTH 0x0000FFFF
1439+#define SHIFT_MTX_LENGTH 0
1440+#define REGNUM_MTX_LENGTH 0x0340
1441+
1442+#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
1443+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
1444+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
1445+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
1446+
1447+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
1448+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
1449+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
1450+
1451+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
1452+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
1453+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
1454+
1455+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
1456+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
1457+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
1458+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
1459+
1460+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
1461+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
1462+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
1463+
1464+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
1465+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
1466+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
1467+
1468+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
1469+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
1470+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
1471+
1472+#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
1473+#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
1474+#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
1475+#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
1476+
1477+#define MTX_CR_MTX_SYSC_CDMAT 0x0350
1478+#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
1479+#define SHIFT_MTX_TRANSFERDATA 0
1480+#define REGNUM_MTX_TRANSFERDATA 0x0350
1481+
1482+#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
1483+#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
1484+#define SHIFT_IMG_SOC_TRANSFER_FIN 17
1485+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
1486+
1487+#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
1488+#define MASK_IMG_SOC_CNT 0x0000FFFF
1489+#define SHIFT_IMG_SOC_CNT 0
1490+#define REGNUM_IMG_SOC_CNT 0x0004
1491+
1492+#define MASK_IMG_SOC_EN 0x00010000
1493+#define SHIFT_IMG_SOC_EN 16
1494+#define REGNUM_IMG_SOC_EN 0x0004
1495+
1496+#define MASK_IMG_SOC_LIST_EN 0x00040000
1497+#define SHIFT_IMG_SOC_LIST_EN 18
1498+#define REGNUM_IMG_SOC_LIST_EN 0x0004
1499+
1500+#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
1501+#define MASK_IMG_SOC_PER_HOLD 0x0000007F
1502+#define SHIFT_IMG_SOC_PER_HOLD 0
1503+#define REGNUM_IMG_SOC_PER_HOLD 0x0018
1504+
1505+#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
1506+#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
1507+#define SHIFT_IMG_SOC_START_ADDRESS 0
1508+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
1509+
1510+#define MASK_IMG_SOC_BSWAP 0x40000000
1511+#define SHIFT_IMG_SOC_BSWAP 30
1512+#define REGNUM_IMG_SOC_BSWAP 0x0004
1513+
1514+#define MASK_IMG_SOC_PW 0x18000000
1515+#define SHIFT_IMG_SOC_PW 27
1516+#define REGNUM_IMG_SOC_PW 0x0004
1517+
1518+#define MASK_IMG_SOC_DIR 0x04000000
1519+#define SHIFT_IMG_SOC_DIR 26
1520+#define REGNUM_IMG_SOC_DIR 0x0004
1521+
1522+#define MASK_IMG_SOC_PI 0x03000000
1523+#define SHIFT_IMG_SOC_PI 24
1524+#define REGNUM_IMG_SOC_PI 0x0004
1525+#define IMG_SOC_PI_1 0x00000002
1526+#define IMG_SOC_PI_2 0x00000001
1527+#define IMG_SOC_PI_4 0x00000000
1528+
1529+#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
1530+#define SHIFT_IMG_SOC_TRANSFER_IEN 29
1531+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
1532+
1533+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
1534+ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
1535+ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
1536+ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
1537+ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
1538+ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
1539+
1540+#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
1541+#define MASK_IMG_SOC_EXT_SA 0x0000000F
1542+#define SHIFT_IMG_SOC_EXT_SA 0
1543+#define REGNUM_IMG_SOC_EXT_SA 0x0008
1544+
1545+#define MASK_IMG_SOC_ACC_DEL 0xE0000000
1546+#define SHIFT_IMG_SOC_ACC_DEL 29
1547+#define REGNUM_IMG_SOC_ACC_DEL 0x0008
1548+
1549+#define MASK_IMG_SOC_INCR 0x08000000
1550+#define SHIFT_IMG_SOC_INCR 27
1551+#define REGNUM_IMG_SOC_INCR 0x0008
1552+
1553+#define MASK_IMG_SOC_BURST 0x07000000
1554+#define SHIFT_IMG_SOC_BURST 24
1555+#define REGNUM_IMG_SOC_BURST 0x0008
1556+
1557+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
1558+((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
1559+(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
1560+(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
1561+
1562+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
1563+#define MASK_IMG_SOC_ADDR 0x007FFFFF
1564+#define SHIFT_IMG_SOC_ADDR 0
1565+#define REGNUM_IMG_SOC_ADDR 0x0014
1566+
1567+/* **************** DMAC define **************** */
1568+enum DMAC_eBSwap {
1569+ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
1570+ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
1571+};
1572+
1573+enum DMAC_ePW {
1574+ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
1575+ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
1576+ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
1577+};
1578+
1579+enum DMAC_eAccDel {
1580+ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
1581+ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
1582+ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
1583+ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
1584+ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
1585+ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
1586+ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
1587+ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
1588+};
1589+
1590+enum DMAC_eBurst {
1591+ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
1592+ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
1593+ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
1594+ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
1595+ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
1596+ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
1597+ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
1598+ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
1599+};
1600+
1601+/* commands for topaz,shared with user space driver */
1602+enum drm_lnc_topaz_cmd {
1603+ MTX_CMDID_NULL = 0,
1604+ MTX_CMDID_DO_HEADER = 1,
1605+ MTX_CMDID_ENCODE_SLICE = 2,
1606+ MTX_CMDID_WRITEREG = 3,
1607+ MTX_CMDID_START_PIC = 4,
1608+ MTX_CMDID_END_PIC = 5,
1609+ MTX_CMDID_SYNC = 6,
1610+ MTX_CMDID_ENCODE_ONE_ROW = 7,
1611+ MTX_CMDID_FLUSH = 8,
1612+ MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
1613+ MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
1614+ MTX_CMDID_SW_NEW_CODEC = 0x7f
1615+};
1616+
1617+/* codecs topaz supports,shared with user space driver */
1618+enum drm_lnc_topaz_codec {
1619+ IMG_CODEC_JPEG = 0,
1620+ IMG_CODEC_H264_NO_RC,
1621+ IMG_CODEC_H264_VBR,
1622+ IMG_CODEC_H264_CBR,
1623+ IMG_CODEC_H263_NO_RC,
1624+ IMG_CODEC_H263_VBR,
1625+ IMG_CODEC_H263_CBR,
1626+ IMG_CODEC_MPEG4_NO_RC,
1627+ IMG_CODEC_MPEG4_VBR,
1628+ IMG_CODEC_MPEG4_CBR,
1629+ IMG_CODEC_NUM
1630+};
1631+
1632+/* XXX: it's a copy of msvdx cmd queue. should have some change? */
1633+struct lnc_topaz_cmd_queue {
1634+ struct list_head head;
1635+ void *cmd;
1636+ unsigned long cmd_size;
1637+ uint32_t sequence;
1638+};
1639+
1640+
1641+struct topaz_cmd_header {
1642+ union {
1643+ struct {
1644+ unsigned long enable_interrupt:1;
1645+ unsigned long id:7;
1646+ unsigned long size:8;
1647+ unsigned long seq:16;
1648+ };
1649+ uint32_t val;
1650+ };
1651+};
1652+
1653+/* define structure */
1654+/* firmware file's info head */
1655+struct topaz_fwinfo {
1656+ unsigned int ver:16;
1657+ unsigned int codec:16;
1658+
1659+ unsigned int text_size;
1660+ unsigned int data_size;
1661+ unsigned int data_location;
1662+};
1663+
1664+/* firmware data array define */
1665+struct topaz_codec_fw {
1666+ uint32_t ver;
1667+ uint32_t codec;
1668+
1669+ uint32_t text_size;
1670+ uint32_t data_size;
1671+ uint32_t data_location;
1672+
1673+ struct ttm_buffer_object *text;
1674+ struct ttm_buffer_object *data;
1675+};
1676+
1677+struct topaz_private {
1678+ /* current video task */
1679+ unsigned int pmstate;
1680+ struct sysfs_dirent *sysfs_pmstate;
1681+ int frame_skip;
1682+
1683+ void *topaz_mtx_reg_state;
1684+ struct ttm_buffer_object *topaz_mtx_data_mem;
1685+ uint32_t topaz_cur_codec;
1686+ uint32_t cur_mtx_data_size;
1687+ int topaz_needs_reset;
1688+
1689+ /*
1690+ *topaz command queue
1691+ */
1692+ spinlock_t topaz_lock;
1693+ struct mutex topaz_mutex;
1694+ struct list_head topaz_queue;
1695+ int topaz_busy; /* 0 means topaz is free */
1696+ int topaz_fw_loaded;
1697+
1698+ /* topaz ccb data */
1699+ /* XXX: should the addr stored by 32 bits? more compatible way?? */
1700+ uint32_t topaz_ccb_buffer_addr;
1701+ uint32_t topaz_ccb_ctrl_addr;
1702+ uint32_t topaz_ccb_size;
1703+ uint32_t topaz_cmd_windex;
1704+ uint16_t topaz_cmd_seq;
1705+
1706+ uint32_t stored_initial_qp;
1707+ uint32_t topaz_dash_access_ctrl;
1708+
1709+ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
1710+ struct ttm_bo_kmap_obj topaz_bo_kmap;
1711+ void *topaz_ccb_wb;
1712+ uint32_t topaz_wb_offset;
1713+ uint32_t *topaz_sync_addr;
1714+ uint32_t topaz_sync_offset;
1715+ uint32_t topaz_sync_cmd_seq;
1716+ uint32_t topaz_mtx_saved;
1717+
1718+ /* firmware */
1719+ struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
1720+};
1721+
1722+/* external function declare */
1723+/* lnc_topazinit.c */
1724+int lnc_topaz_init(struct drm_device *dev);
1725+int lnc_topaz_uninit(struct drm_device *dev);
1726+int lnc_topaz_reset(struct drm_psb_private *dev_priv);
1727+int topaz_init_fw(struct drm_device *dev);
1728+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
1729+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
1730+ uint32_t addr, uint32_t value,
1731+ uint32_t enable);
1732+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
1733+ uint32_t byte_addr, uint32_t val);
1734+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
1735+ uint32_t byte_addr);
1736+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
1737+ uint32_t addr);
1738+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
1739+ uint32_t val);
1740+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
1741+int lnc_topaz_save_mtx_state(struct drm_device *dev);
1742+int lnc_topaz_restore_mtx_state(struct drm_device *dev);
1743+
1744+/* lnc_topaz.c */
1745+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat);
1746+
1747+int lnc_cmdbuf_video(struct drm_file *priv,
1748+ struct list_head *validate_list,
1749+ uint32_t fence_type,
1750+ struct drm_psb_cmdbuf_arg *arg,
1751+ struct ttm_buffer_object *cmd_buffer,
1752+ struct psb_ttm_fence_rep *fence_arg);
1753+
1754+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
1755+void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev);
1756+
1757+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
1758+int lnc_wait_topaz_idle(struct drm_device *dev);
1759+int lnc_check_topaz_idle(struct drm_device *dev);
1760+
1761+/* macros to get/set CCB control data */
1762+#define WB_CCB_CTRL_RINDEX(dev_priv) \
1763+(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb))
1764+
1765+#define WB_CCB_CTRL_SEQ(dev_priv) \
1766+(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb\
1767+ + 1))
1768+
1769+#define POLL_WB_RINDEX(dev_priv, value) \
1770+do { \
1771+ int i; \
1772+ for (i = 0; i < 10000; i++) { \
1773+ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
1774+ break; \
1775+ else \
1776+ DRM_UDELAY(100); \
1777+ } \
1778+ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
1779+ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
1780+ ret = -EBUSY; \
1781+ } \
1782+} while (0)
1783+
1784+#define POLL_WB_SEQ(dev_priv, value) \
1785+do { \
1786+ int i; \
1787+ for (i = 0; i < 10000; i++) { \
1788+ if (CCB_CTRL_SEQ(dev_priv) == value) \
1789+ break; \
1790+ else \
1791+ DRM_UDELAY(1000); \
1792+ } \
1793+ if (CCB_CTRL_SEQ(dev_priv) != value) { \
1794+ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%08x(mtx) vs 0x%08x\n",\
1795+ WB_CCB_CTRL_SEQ(dev_priv), value); \
1796+ ret = -EBUSY; \
1797+ } \
1798+} while (0)
1799+
1800+#define CCB_CTRL_RINDEX(dev_priv) \
1801+ topaz_read_mtx_mem(dev_priv, \
1802+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1803+ + MTX_CCBCTRL_ROFF)
1804+
1805+#define CCB_CTRL_RINDEX(dev_priv) \
1806+ topaz_read_mtx_mem(dev_priv, \
1807+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1808+ + MTX_CCBCTRL_ROFF)
1809+
1810+#define CCB_CTRL_QP(dev_priv) \
1811+ topaz_read_mtx_mem(dev_priv, \
1812+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1813+ + MTX_CCBCTRL_QP)
1814+
1815+#define CCB_CTRL_SEQ(dev_priv) \
1816+ topaz_read_mtx_mem(dev_priv, \
1817+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1818+ + MTX_CCBCTRL_COMPLETE)
1819+
1820+#define CCB_CTRL_FRAMESKIP(dev_priv) \
1821+ topaz_read_mtx_mem(dev_priv, \
1822+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1823+ + MTX_CCBCTRL_FRAMESKIP)
1824+
1825+#define CCB_CTRL_SET_QP(dev_priv, qp) \
1826+ topaz_write_mtx_mem(dev_priv, \
1827+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1828+ + MTX_CCBCTRL_QP, qp)
1829+
1830+#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
1831+ topaz_write_mtx_mem(dev_priv, \
1832+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1833+ + MTX_CCBCTRL_INITQP, qp)
1834+
1835+
1836+#define TOPAZ_BEGIN_CCB(dev_priv) \
1837+ topaz_write_mtx_mem_multiple_setup(dev_priv, \
1838+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_buffer_addr + \
1839+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex * 4)
1840+
1841+#define TOPAZ_OUT_CCB(dev_priv, cmd) \
1842+do { \
1843+ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
1844+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex++; \
1845+} while (0)
1846+
1847+#define TOPAZ_END_CCB(dev_priv, kick_count) \
1848+ topaz_mtx_kick(dev_priv, 1);
1849+
1850+static inline char *cmd_to_string(int cmd_id)
1851+{
1852+ switch (cmd_id) {
1853+ case MTX_CMDID_START_PIC:
1854+ return "MTX_CMDID_START_PIC";
1855+ case MTX_CMDID_END_PIC:
1856+ return "MTX_CMDID_END_PIC";
1857+ case MTX_CMDID_DO_HEADER:
1858+ return "MTX_CMDID_DO_HEADER";
1859+ case MTX_CMDID_ENCODE_SLICE:
1860+ return "MTX_CMDID_ENCODE_SLICE";
1861+ case MTX_CMDID_SYNC:
1862+ return "MTX_CMDID_SYNC";
1863+
1864+ default:
1865+ return "Undefined command";
1866+
1867+ }
1868+}
1869+
1870+static inline char *codec_to_string(int codec)
1871+{
1872+ switch (codec) {
1873+ case IMG_CODEC_H264_NO_RC:
1874+ return "H264_NO_RC";
1875+ case IMG_CODEC_H264_VBR:
1876+ return "H264_VBR";
1877+ case IMG_CODEC_H264_CBR:
1878+ return "H264_CBR";
1879+ case IMG_CODEC_H263_NO_RC:
1880+ return "H263_NO_RC";
1881+ case IMG_CODEC_H263_VBR:
1882+ return "H263_VBR";
1883+ case IMG_CODEC_H263_CBR:
1884+ return "H263_CBR";
1885+ case IMG_CODEC_MPEG4_NO_RC:
1886+ return "MPEG4_NO_RC";
1887+ case IMG_CODEC_MPEG4_VBR:
1888+ return "MPEG4_VBR";
1889+ case IMG_CODEC_MPEG4_CBR:
1890+ return "MPEG4_CBR";
1891+ default:
1892+ return "Undefined codec";
1893+ }
1894+}
1895+
1896+
1897+static inline void lnc_topaz_enableirq(struct drm_device *dev)
1898+{
1899+ struct drm_psb_private *dev_priv = dev->dev_private;
1900+ /* uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; */
1901+
1902+ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
1903+
1904+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
1905+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
1906+ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
1907+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
1908+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
1909+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
1910+
1911+ /* write in psb_irq.c */
1912+ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
1913+}
1914+
1915+static inline void lnc_topaz_disableirq(struct drm_device *dev)
1916+{
1917+
1918+ struct drm_psb_private *dev_priv = dev->dev_private;
1919+ /* uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); */
1920+
1921+ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
1922+
1923+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
1924+
1925+ /* write in psb_irq.c */
1926+ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
1927+}
1928+
1929+
1930+static inline void lnc_topaz_clearirq(struct drm_device *dev,
1931+ uint32_t clear_topaz)
1932+{
1933+ struct drm_psb_private *dev_priv = dev->dev_private;
1934+
1935+ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
1936+ if (clear_topaz != 0)
1937+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
1938+
1939+ /* PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
1940+}
1941+
1942+static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
1943+{
1944+ struct drm_psb_private *dev_priv = dev->dev_private;
1945+ uint32_t val, /* iir, */ clear = 0;
1946+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
1947+
1948+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
1949+ /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
1950+
1951+ (void) topaz_priv;
1952+
1953+ if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
1954+ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
1955+ return 0;
1956+ }
1957+
1958+ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
1959+
1960+ if (val & (1<<31))
1961+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
1962+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1963+ CCB_CTRL_SEQ(dev_priv),
1964+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1965+ *(uint32_t *)topaz_priv->topaz_sync_addr);
1966+ else
1967+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
1968+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1969+ CCB_CTRL_SEQ(dev_priv),
1970+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1971+ *(uint32_t *)topaz_priv->topaz_sync_addr);
1972+
1973+ if (val & 0x8) {
1974+ uint32_t mmu_status, mmu_req;
1975+
1976+ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
1977+ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
1978+
1979+ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
1980+ "address=0x%08x,mem req=0x%08x\n",
1981+ mmu_status, mmu_req);
1982+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
1983+ }
1984+
1985+ if (val & 0x4) {
1986+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
1987+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
1988+ }
1989+
1990+ if (val & 0x2) {
1991+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
1992+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
1993+ }
1994+
1995+ if (val & 0x1) {
1996+ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
1997+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
1998+ }
1999+
2000+ return clear;
2001+}
2002+
2003+
2004+#define TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \
2005+do { \
2006+ topaz_priv->pmstate = new_state; \
2007+ sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \
2008+ PSB_DEBUG_PM("TOPAZ: %s\n", \
2009+ (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \
2010+ : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \
2011+ : "clockgated")); \
2012+} while (0)
2013+
2014+#endif /* _LNC_TOPAZ_H_ */
2015diff --git a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c
2016new file mode 100644
2017index 0000000..2e8365c
2018--- /dev/null
2019+++ b/drivers/gpu/drm/psb/lnc_topazinit.c
2020@@ -0,0 +1,2058 @@
2021+/**
2022+ * file lnc_topazinit.c
2023+ * TOPAZ initialization and mtx-firmware upload
2024+ *
2025+ */
2026+
2027+/**************************************************************************
2028+ *
2029+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
2030+ * Copyright (c) Imagination Technologies Limited, UK
2031+ * All Rights Reserved.
2032+ *
2033+ * Permission is hereby granted, free of charge, to any person obtaining a
2034+ * copy of this software and associated documentation files (the
2035+ * "Software"), to deal in the Software without restriction, including
2036+ * without limitation the rights to use, copy, modify, merge, publish,
2037+ * distribute, sub license, and/or sell copies of the Software, and to
2038+ * permit persons to whom the Software is furnished to do so, subject to
2039+ * the following conditions:
2040+ *
2041+ * The above copyright notice and this permission notice (including the
2042+ * next paragraph) shall be included in all copies or substantial portions
2043+ * of the Software.
2044+ *
2045+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2046+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2047+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
2048+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
2049+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2050+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2051+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
2052+ *
2053+ **************************************************************************/
2054+
2055+/* NOTE: (READ BEFORE REFINE CODE)
2056+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
2057+ * measured by word to DMAC.
2058+ *
2059+ *
2060+ *
2061+ */
2062+
2063+/* include headers */
2064+
2065+/* #define DRM_DEBUG_CODE 2 */
2066+
2067+#include <linux/firmware.h>
2068+
2069+#include <drm/drmP.h>
2070+#include <drm/drm.h>
2071+
2072+#include "psb_drv.h"
2073+#include "lnc_topaz.h"
2074+#include "psb_powermgmt.h"
2075+
2076+/* WARNING: this define is very important */
2077+#define RAM_SIZE (1024 * 24)
2078+
2079+/* register default values
2080+ * THIS HEADER IS ONLY INCLUDE ONCE*/
2081+static unsigned long topaz_default_regs[183][3] = {
2082+ {MVEA_START, 0x00000000, 0x00000000},
2083+ {MVEA_START, 0x00000004, 0x00000400},
2084+ {MVEA_START, 0x00000008, 0x00000000},
2085+ {MVEA_START, 0x0000000C, 0x00000000},
2086+ {MVEA_START, 0x00000010, 0x00000000},
2087+ {MVEA_START, 0x00000014, 0x00000000},
2088+ {MVEA_START, 0x00000018, 0x00000000},
2089+ {MVEA_START, 0x0000001C, 0x00000000},
2090+ {MVEA_START, 0x00000020, 0x00000120},
2091+ {MVEA_START, 0x00000024, 0x00000000},
2092+ {MVEA_START, 0x00000028, 0x00000000},
2093+ {MVEA_START, 0x00000100, 0x00000000},
2094+ {MVEA_START, 0x00000104, 0x00000000},
2095+ {MVEA_START, 0x00000108, 0x00000000},
2096+ {MVEA_START, 0x0000010C, 0x00000000},
2097+ {MVEA_START, 0x0000011C, 0x00000001},
2098+ {MVEA_START, 0x0000012C, 0x00000000},
2099+ {MVEA_START, 0x00000180, 0x00000000},
2100+ {MVEA_START, 0x00000184, 0x00000000},
2101+ {MVEA_START, 0x00000188, 0x00000000},
2102+ {MVEA_START, 0x0000018C, 0x00000000},
2103+ {MVEA_START, 0x00000190, 0x00000000},
2104+ {MVEA_START, 0x00000194, 0x00000000},
2105+ {MVEA_START, 0x00000198, 0x00000000},
2106+ {MVEA_START, 0x0000019C, 0x00000000},
2107+ {MVEA_START, 0x000001A0, 0x00000000},
2108+ {MVEA_START, 0x000001A4, 0x00000000},
2109+ {MVEA_START, 0x000001A8, 0x00000000},
2110+ {MVEA_START, 0x000001AC, 0x00000000},
2111+ {MVEA_START, 0x000001B0, 0x00000000},
2112+ {MVEA_START, 0x000001B4, 0x00000000},
2113+ {MVEA_START, 0x000001B8, 0x00000000},
2114+ {MVEA_START, 0x000001BC, 0x00000000},
2115+ {MVEA_START, 0x000001F8, 0x00000000},
2116+ {MVEA_START, 0x000001FC, 0x00000000},
2117+ {MVEA_START, 0x00000200, 0x00000000},
2118+ {MVEA_START, 0x00000204, 0x00000000},
2119+ {MVEA_START, 0x00000208, 0x00000000},
2120+ {MVEA_START, 0x0000020C, 0x00000000},
2121+ {MVEA_START, 0x00000210, 0x00000000},
2122+ {MVEA_START, 0x00000220, 0x00000001},
2123+ {MVEA_START, 0x00000224, 0x0000001F},
2124+ {MVEA_START, 0x00000228, 0x00000100},
2125+ {MVEA_START, 0x0000022C, 0x00001F00},
2126+ {MVEA_START, 0x00000230, 0x00000101},
2127+ {MVEA_START, 0x00000234, 0x00001F1F},
2128+ {MVEA_START, 0x00000238, 0x00001F01},
2129+ {MVEA_START, 0x0000023C, 0x0000011F},
2130+ {MVEA_START, 0x00000240, 0x00000200},
2131+ {MVEA_START, 0x00000244, 0x00001E00},
2132+ {MVEA_START, 0x00000248, 0x00000002},
2133+ {MVEA_START, 0x0000024C, 0x0000001E},
2134+ {MVEA_START, 0x00000250, 0x00000003},
2135+ {MVEA_START, 0x00000254, 0x0000001D},
2136+ {MVEA_START, 0x00000258, 0x00001F02},
2137+ {MVEA_START, 0x0000025C, 0x00000102},
2138+ {MVEA_START, 0x00000260, 0x0000011E},
2139+ {MVEA_START, 0x00000264, 0x00000000},
2140+ {MVEA_START, 0x00000268, 0x00000000},
2141+ {MVEA_START, 0x0000026C, 0x00000000},
2142+ {MVEA_START, 0x00000270, 0x00000000},
2143+ {MVEA_START, 0x00000274, 0x00000000},
2144+ {MVEA_START, 0x00000278, 0x00000000},
2145+ {MVEA_START, 0x00000280, 0x00008000},
2146+ {MVEA_START, 0x00000284, 0x00000000},
2147+ {MVEA_START, 0x00000288, 0x00000000},
2148+ {MVEA_START, 0x0000028C, 0x00000000},
2149+ {MVEA_START, 0x00000314, 0x00000000},
2150+ {MVEA_START, 0x00000318, 0x00000000},
2151+ {MVEA_START, 0x0000031C, 0x00000000},
2152+ {MVEA_START, 0x00000320, 0x00000000},
2153+ {MVEA_START, 0x00000324, 0x00000000},
2154+ {MVEA_START, 0x00000348, 0x00000000},
2155+ {MVEA_START, 0x00000380, 0x00000000},
2156+ {MVEA_START, 0x00000384, 0x00000000},
2157+ {MVEA_START, 0x00000388, 0x00000000},
2158+ {MVEA_START, 0x0000038C, 0x00000000},
2159+ {MVEA_START, 0x00000390, 0x00000000},
2160+ {MVEA_START, 0x00000394, 0x00000000},
2161+ {MVEA_START, 0x00000398, 0x00000000},
2162+ {MVEA_START, 0x0000039C, 0x00000000},
2163+ {MVEA_START, 0x000003A0, 0x00000000},
2164+ {MVEA_START, 0x000003A4, 0x00000000},
2165+ {MVEA_START, 0x000003A8, 0x00000000},
2166+ {MVEA_START, 0x000003B0, 0x00000000},
2167+ {MVEA_START, 0x000003B4, 0x00000000},
2168+ {MVEA_START, 0x000003B8, 0x00000000},
2169+ {MVEA_START, 0x000003BC, 0x00000000},
2170+ {MVEA_START, 0x000003D4, 0x00000000},
2171+ {MVEA_START, 0x000003D8, 0x00000000},
2172+ {MVEA_START, 0x000003DC, 0x00000000},
2173+ {MVEA_START, 0x000003E0, 0x00000000},
2174+ {MVEA_START, 0x000003E4, 0x00000000},
2175+ {MVEA_START, 0x000003EC, 0x00000000},
2176+ {MVEA_START, 0x000002D0, 0x00000000},
2177+ {MVEA_START, 0x000002D4, 0x00000000},
2178+ {MVEA_START, 0x000002D8, 0x00000000},
2179+ {MVEA_START, 0x000002DC, 0x00000000},
2180+ {MVEA_START, 0x000002E0, 0x00000000},
2181+ {MVEA_START, 0x000002E4, 0x00000000},
2182+ {MVEA_START, 0x000002E8, 0x00000000},
2183+ {MVEA_START, 0x000002EC, 0x00000000},
2184+ {MVEA_START, 0x000002F0, 0x00000000},
2185+ {MVEA_START, 0x000002F4, 0x00000000},
2186+ {MVEA_START, 0x000002F8, 0x00000000},
2187+ {MVEA_START, 0x000002FC, 0x00000000},
2188+ {MVEA_START, 0x00000300, 0x00000000},
2189+ {MVEA_START, 0x00000304, 0x00000000},
2190+ {MVEA_START, 0x00000308, 0x00000000},
2191+ {MVEA_START, 0x0000030C, 0x00000000},
2192+ {MVEA_START, 0x00000290, 0x00000000},
2193+ {MVEA_START, 0x00000294, 0x00000000},
2194+ {MVEA_START, 0x00000298, 0x00000000},
2195+ {MVEA_START, 0x0000029C, 0x00000000},
2196+ {MVEA_START, 0x000002A0, 0x00000000},
2197+ {MVEA_START, 0x000002A4, 0x00000000},
2198+ {MVEA_START, 0x000002A8, 0x00000000},
2199+ {MVEA_START, 0x000002AC, 0x00000000},
2200+ {MVEA_START, 0x000002B0, 0x00000000},
2201+ {MVEA_START, 0x000002B4, 0x00000000},
2202+ {MVEA_START, 0x000002B8, 0x00000000},
2203+ {MVEA_START, 0x000002BC, 0x00000000},
2204+ {MVEA_START, 0x000002C0, 0x00000000},
2205+ {MVEA_START, 0x000002C4, 0x00000000},
2206+ {MVEA_START, 0x000002C8, 0x00000000},
2207+ {MVEA_START, 0x000002CC, 0x00000000},
2208+ {MVEA_START, 0x00000080, 0x00000000},
2209+ {MVEA_START, 0x00000084, 0x80705700},
2210+ {MVEA_START, 0x00000088, 0x00000000},
2211+ {MVEA_START, 0x0000008C, 0x00000000},
2212+ {MVEA_START, 0x00000090, 0x00000000},
2213+ {MVEA_START, 0x00000094, 0x00000000},
2214+ {MVEA_START, 0x00000098, 0x00000000},
2215+ {MVEA_START, 0x0000009C, 0x00000000},
2216+ {MVEA_START, 0x000000A0, 0x00000000},
2217+ {MVEA_START, 0x000000A4, 0x00000000},
2218+ {MVEA_START, 0x000000A8, 0x00000000},
2219+ {MVEA_START, 0x000000AC, 0x00000000},
2220+ {MVEA_START, 0x000000B0, 0x00000000},
2221+ {MVEA_START, 0x000000B4, 0x00000000},
2222+ {MVEA_START, 0x000000B8, 0x00000000},
2223+ {MVEA_START, 0x000000BC, 0x00000000},
2224+ {MVEA_START, 0x000000C0, 0x00000000},
2225+ {MVEA_START, 0x000000C4, 0x00000000},
2226+ {MVEA_START, 0x000000C8, 0x00000000},
2227+ {MVEA_START, 0x000000CC, 0x00000000},
2228+ {MVEA_START, 0x000000D0, 0x00000000},
2229+ {MVEA_START, 0x000000D4, 0x00000000},
2230+ {MVEA_START, 0x000000D8, 0x00000000},
2231+ {MVEA_START, 0x000000DC, 0x00000000},
2232+ {MVEA_START, 0x000000E0, 0x00000000},
2233+ {MVEA_START, 0x000000E4, 0x00000000},
2234+ {MVEA_START, 0x000000E8, 0x00000000},
2235+ {MVEA_START, 0x000000EC, 0x00000000},
2236+ {MVEA_START, 0x000000F0, 0x00000000},
2237+ {MVEA_START, 0x000000F4, 0x00000000},
2238+ {MVEA_START, 0x000000F8, 0x00000000},
2239+ {MVEA_START, 0x000000FC, 0x00000000},
2240+ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
2241+ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
2242+ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
2243+ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
2244+ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
2245+ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
2246+ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
2247+ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
2248+ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
2249+ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
2250+ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
2251+ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
2252+ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
2253+ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
2254+ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
2255+ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
2256+ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
2257+ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
2258+ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
2259+ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
2260+ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
2261+ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
2262+ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
2263+ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
2264+ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
2265+};
2266+
2267+#define FIRMWARE_NAME "topaz_fw.bin"
2268+
2269+/* static function define */
2270+static int topaz_upload_fw(struct drm_device *dev,
2271+ enum drm_lnc_topaz_codec codec);
2272+static inline void topaz_set_default_regs(struct drm_psb_private
2273+ *dev_priv);
2274+
2275+#define UPLOAD_FW_BY_DMA 1
2276+
2277+#if UPLOAD_FW_BY_DMA
2278+static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
2279+ uint32_t channel, uint32_t src_phy_addr,
2280+ uint32_t offset, uint32_t dst_addr,
2281+ uint32_t byte_num, uint32_t is_increment,
2282+ uint32_t is_write);
2283+#else
2284+static void topaz_mtx_upload_by_register(struct drm_device *dev,
2285+ uint32_t mtx_mem, uint32_t addr,
2286+ uint32_t size,
2287+ struct ttm_buffer_object *buf);
2288+#endif
2289+
2290+static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
2291+ uint32_t reg, const uint32_t val);
2292+static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
2293+ uint32_t reg, uint32_t *ret_val);
2294+static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
2295+static void release_mtx_control_from_dash(struct drm_psb_private
2296+ *dev_priv);
2297+static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
2298+static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
2299+ uint32_t size);
2300+static void mtx_dma_write(struct drm_device *dev);
2301+
2302+
2303+#define DEBUG_FUNCTION 0
2304+
2305+#if DEBUG_FUNCTION
2306+static int topaz_test_null(struct drm_device *dev, uint32_t seq);
2307+static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
2308+ uint32_t sync_seq);
2309+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
2310+static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
2311+ uint32_t *data);
2312+static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
2313+ uint32_t *data);
2314+static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
2315+ uint32_t seq,
2316+ uint32_t sync_seq,
2317+ uint32_t offset);
2318+static int topaz_test_sync_tt_test(struct drm_device *dev,
2319+ uint32_t seq,
2320+ uint32_t sync_seq);
2321+#endif
2322+
2323+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
2324+ uint32_t byte_addr)
2325+{
2326+ uint32_t read_val;
2327+ uint32_t reg, bank_size, ram_bank_size, ram_id;
2328+
2329+ TOPAZ_READ32(0x3c, &reg);
2330+ reg = 0x0a0a0606;
2331+ bank_size = (reg & 0xF0000) >> 16;
2332+
2333+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
2334+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
2335+
2336+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
2337+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
2338+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
2339+ F_ENCODE(1, MTX_MTX_MCMR));
2340+
2341+ /* ?? poll this reg? */
2342+ topaz_wait_for_register(dev_priv,
2343+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
2344+ 1, 1);
2345+
2346+ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
2347+
2348+ return read_val;
2349+}
2350+
2351+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
2352+ uint32_t byte_addr, uint32_t val)
2353+{
2354+ uint32_t ram_id = 0;
2355+ uint32_t reg, bank_size, ram_bank_size;
2356+
2357+ TOPAZ_READ32(0x3c, &reg);
2358+
2359+ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
2360+ reg = 0x0a0a0606;
2361+
2362+ bank_size = (reg & 0xF0000) >> 16;
2363+
2364+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
2365+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
2366+
2367+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
2368+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
2369+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
2370+
2371+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
2372+
2373+ /* ?? poll this reg? */
2374+ topaz_wait_for_register(dev_priv,
2375+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
2376+ 1, 1);
2377+
2378+ return;
2379+}
2380+
2381+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
2382+ uint32_t byte_addr)
2383+{
2384+ uint32_t ram_id = 0;
2385+ uint32_t reg, bank_size, ram_bank_size;
2386+
2387+ TOPAZ_READ32(0x3c, &reg);
2388+
2389+ reg = 0x0a0a0606;
2390+
2391+ bank_size = (reg & 0xF0000) >> 16;
2392+
2393+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
2394+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
2395+
2396+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
2397+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
2398+ F_ENCODE(1, MTX_MTX_MCMAI) |
2399+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
2400+}
2401+
2402+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
2403+ uint32_t val)
2404+{
2405+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
2406+}
2407+
2408+
2409+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
2410+ uint32_t addr, uint32_t value, uint32_t mask)
2411+{
2412+ uint32_t tmp;
2413+ uint32_t count = 10000;
2414+
2415+ /* # poll topaz register for certain times */
2416+ while (count) {
2417+ /* #.# read */
2418+ MM_READ32(addr, 0, &tmp);
2419+
2420+ if (value == (tmp & mask))
2421+ return 0;
2422+
2423+ /* #.# delay and loop */
2424+ DRM_UDELAY(100);
2425+ --count;
2426+ }
2427+
2428+ /* # now waiting is timeout, return 1 indicat failed */
2429+ /* XXX: testsuit means a timeout 10000 */
2430+
2431+ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
2432+ "actual 0x%08x (0x%08x & 0x%08x)\n",
2433+ addr, value, tmp & mask, tmp, mask);
2434+
2435+ return -EBUSY;
2436+
2437+}
2438+
2439+static ssize_t psb_topaz_pmstate_show(struct device *dev,
2440+ struct device_attribute *attr, char *buf)
2441+{
2442+ struct drm_device *drm_dev = dev_get_drvdata(dev);
2443+ struct drm_psb_private *dev_priv;
2444+ struct topaz_private *topaz_priv;
2445+ unsigned int pmstate;
2446+ unsigned long flags;
2447+ int ret = -EINVAL;
2448+
2449+ if (drm_dev == NULL)
2450+ return 0;
2451+
2452+ dev_priv = drm_dev->dev_private;
2453+ topaz_priv = dev_priv->topaz_private;
2454+ pmstate = topaz_priv->pmstate;
2455+
2456+ pmstate = topaz_priv->pmstate;
2457+ spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
2458+ ret = sprintf(buf, "%s\n",
2459+ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup"
2460+ : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown"
2461+ : "clockgated"));
2462+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
2463+
2464+ return ret;
2465+}
2466+
2467+static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
2468+
2469+
2470+/* this function finish the first part of initialization, the rest
2471+ * should be done in topaz_setup_fw
2472+ */
2473+int lnc_topaz_init(struct drm_device *dev)
2474+{
2475+ struct drm_psb_private *dev_priv = dev->dev_private;
2476+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2477+ uint32_t core_id, core_rev;
2478+ int ret = 0, n;
2479+ bool is_iomem;
2480+ struct topaz_private *topaz_priv;
2481+ void *topaz_bo_virt;
2482+
2483+ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
2484+ topaz_priv = kmalloc(sizeof(struct topaz_private), GFP_KERNEL);
2485+ if (topaz_priv == NULL)
2486+ return -1;
2487+
2488+ dev_priv->topaz_private = topaz_priv;
2489+ memset(topaz_priv, 0, sizeof(struct topaz_private));
2490+
2491+ /* get device --> drm_device --> drm_psb_private --> topaz_priv
2492+ * for psb_topaz_pmstate_show: topaz_pmpolicy
2493+ * if not pci_set_drvdata, can't get drm_device from device
2494+ */
2495+ pci_set_drvdata(dev->pdev, dev);
2496+ if (device_create_file(&dev->pdev->dev,
2497+ &dev_attr_topaz_pmstate))
2498+ DRM_ERROR("TOPAZ: could not create sysfs file\n");
2499+ topaz_priv->sysfs_pmstate = sysfs_get_dirent(
2500+ dev->pdev->dev.kobj.sd, "topaz_pmstate");
2501+
2502+ topaz_priv = dev_priv->topaz_private;
2503+
2504+ /* # initialize comand topaz queueing [msvdx_queue] */
2505+ INIT_LIST_HEAD(&topaz_priv->topaz_queue);
2506+ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
2507+ mutex_init(&topaz_priv->topaz_mutex);
2508+ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
2509+ spin_lock_init(&topaz_priv->topaz_lock);
2510+
2511+ /* # topaz status init. [msvdx_busy] */
2512+ topaz_priv->topaz_busy = 0;
2513+ topaz_priv->topaz_cmd_seq = 0;
2514+ topaz_priv->topaz_fw_loaded = 0;
2515+ /* FIXME: workaround since JPEG firmware is not ready */
2516+ topaz_priv->topaz_cur_codec = 1;
2517+ topaz_priv->cur_mtx_data_size = 0;
2518+
2519+ topaz_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
2520+ GFP_KERNEL);
2521+ if (topaz_priv->topaz_mtx_reg_state == NULL) {
2522+ DRM_ERROR("TOPAZ: failed to allocate space "
2523+ "for mtx register\n");
2524+ return -1;
2525+ }
2526+
2527+ /* # gain write back structure,we may only need 32+4=40DW */
2528+ ret = ttm_buffer_object_create(bdev, 4096,
2529+ ttm_bo_type_kernel,
2530+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2531+ 0, 0, 0, NULL, &(topaz_priv->topaz_bo));
2532+ if (ret != 0) {
2533+ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
2534+ return ret;
2535+ }
2536+
2537+ ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
2538+ topaz_priv->topaz_bo->num_pages,
2539+ &topaz_priv->topaz_bo_kmap);
2540+ if (ret) {
2541+ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
2542+ ttm_bo_unref(&topaz_priv->topaz_bo);
2543+ return ret;
2544+ }
2545+
2546+ topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
2547+ &is_iomem);
2548+ topaz_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
2549+ topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
2550+ topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt
2551+ + 2048);
2552+ topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
2553+ + 2048;
2554+ PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack and SYNC\n");
2555+ PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
2556+ topaz_priv->topaz_wb_offset);
2557+ PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
2558+ topaz_priv->topaz_sync_offset);
2559+
2560+ *(topaz_priv->topaz_sync_addr) = ~0; /* reset sync seq */
2561+
2562+ /* # reset topaz */
2563+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2564+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2565+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2566+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2567+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2568+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2569+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2570+
2571+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2572+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2573+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2574+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2575+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2576+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2577+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2578+
2579+ /* # set up MMU */
2580+ topaz_mmu_hwsetup(dev_priv);
2581+
2582+ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
2583+ "when receiving user space commands\n");
2584+
2585+#if 0 /* can't load FW here */
2586+ /* #.# load fw to driver */
2587+ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
2588+ ret = topaz_init_fw(dev);
2589+ if (ret != 0)
2590+ return -1;
2591+
2592+ topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */
2593+#endif
2594+ /* <msvdx does> # minimal clock */
2595+
2596+ /* <msvdx does> # return 0 */
2597+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
2598+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
2599+
2600+ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
2601+ core_id, core_rev);
2602+
2603+ /* create firmware storage */
2604+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2605+ /* #.# malloc DRM object for fw storage */
2606+ ret = ttm_buffer_object_create(bdev, 12 * 4096,
2607+ ttm_bo_type_kernel,
2608+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2609+ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
2610+ if (ret) {
2611+ DRM_ERROR("Failed to allocate firmware.\n");
2612+ goto out;
2613+ }
2614+
2615+ /* #.# malloc DRM object for fw storage */
2616+ ret = ttm_buffer_object_create(bdev, 12 * 4096,
2617+ ttm_bo_type_kernel,
2618+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2619+ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
2620+ if (ret) {
2621+ DRM_ERROR("Failed to allocate firmware.\n");
2622+ goto out;
2623+ }
2624+ }
2625+
2626+ ret = ttm_buffer_object_create(bdev,
2627+ 12 * 4096,
2628+ ttm_bo_type_kernel,
2629+ DRM_PSB_FLAG_MEM_MMU |
2630+ TTM_PL_FLAG_NO_EVICT,
2631+ 0, 0, 0, NULL,
2632+ &topaz_priv->topaz_mtx_data_mem);
2633+ if (ret) {
2634+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
2635+ "mtx data save\n");
2636+ goto out;
2637+ }
2638+ topaz_priv->cur_mtx_data_size = 0;
2639+
2640+ PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n",
2641+ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
2642+ PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n");
2643+
2644+ PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING);
2645+
2646+ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
2647+ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
2648+
2649+ return 0;
2650+
2651+out:
2652+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2653+ if (topaz_priv->topaz_fw[n].text != NULL)
2654+ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
2655+ if (topaz_priv->topaz_fw[n].data != NULL)
2656+ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
2657+ }
2658+
2659+ if (topaz_priv->topaz_mtx_data_mem != NULL)
2660+ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
2661+
2662+ return ret;
2663+}
2664+
2665+int lnc_topaz_uninit(struct drm_device *dev)
2666+{
2667+ struct drm_psb_private *dev_priv = dev->dev_private;
2668+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
2669+ int n;
2670+
2671+ /* flush MMU */
2672+ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
2673+ /* topaz_mmu_flushcache (dev_priv); */
2674+
2675+ /* # reset TOPAZ chip */
2676+ lnc_topaz_reset(dev_priv);
2677+
2678+ /* release resources */
2679+ /* # release write back memory */
2680+ topaz_priv->topaz_ccb_wb = NULL;
2681+
2682+ /* release mtx register save space */
2683+ kfree(topaz_priv->topaz_mtx_reg_state);
2684+
2685+ /* release mtx data memory save space */
2686+ if (topaz_priv->topaz_mtx_data_mem)
2687+ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
2688+
2689+ /* # release firmware storage */
2690+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2691+ if (topaz_priv->topaz_fw[n].text != NULL)
2692+ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
2693+ if (topaz_priv->topaz_fw[n].data != NULL)
2694+ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
2695+ }
2696+
2697+ ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
2698+ ttm_bo_unref(&topaz_priv->topaz_bo);
2699+
2700+ if (topaz_priv) {
2701+ pci_set_drvdata(dev->pdev, NULL);
2702+ device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
2703+ sysfs_put(topaz_priv->sysfs_pmstate);
2704+ topaz_priv->sysfs_pmstate = NULL;
2705+
2706+ kfree(topaz_priv);
2707+ dev_priv->topaz_private = NULL;
2708+ }
2709+
2710+ return 0;
2711+}
2712+
2713+int lnc_topaz_reset(struct drm_psb_private *dev_priv)
2714+{
2715+ struct topaz_private *topaz_priv;
2716+
2717+ topaz_priv = dev_priv->topaz_private;
2718+ topaz_priv->topaz_busy = 0;
2719+ topaz_priv->topaz_cmd_seq = 0;
2720+ topaz_priv->cur_mtx_data_size = 0;
2721+ topaz_priv->topaz_cmd_windex = 0;
2722+ topaz_priv->topaz_needs_reset = 0;
2723+
2724+ /* # reset topaz */
2725+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2726+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2727+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2728+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2729+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2730+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2731+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2732+
2733+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2734+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2735+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2736+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2737+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2738+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2739+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2740+
2741+ /* # set up MMU */
2742+ topaz_mmu_hwsetup(dev_priv);
2743+
2744+ return 0;
2745+}
2746+
2747+/* read firmware bin file and load all data into driver */
2748+int topaz_init_fw(struct drm_device *dev)
2749+{
2750+ struct drm_psb_private *dev_priv = dev->dev_private;
2751+ const struct firmware *raw = NULL;
2752+ unsigned char *ptr;
2753+ int ret = 0;
2754+ int n;
2755+ struct topaz_fwinfo *cur_fw;
2756+ int cur_size;
2757+ struct topaz_codec_fw *cur_codec;
2758+ struct ttm_buffer_object **cur_drm_obj;
2759+ struct ttm_bo_kmap_obj tmp_kmap;
2760+ bool is_iomem;
2761+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
2762+
2763+ topaz_priv->stored_initial_qp = 0;
2764+
2765+ /* # get firmware */
2766+ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
2767+ if (ret != 0) {
2768+ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
2769+ return ret;
2770+ }
2771+
2772+ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
2773+
2774+ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
2775+ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
2776+ goto out;
2777+ }
2778+
2779+ ptr = (unsigned char *) raw->data;
2780+
2781+ if (!ptr) {
2782+ DRM_ERROR("TOPAZ: failed to load firmware.\n");
2783+ goto out;
2784+ }
2785+
2786+ /* # load fw from file */
2787+ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
2788+ cur_fw = NULL;
2789+ /* didn't use the first element */
2790+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2791+ cur_fw = (struct topaz_fwinfo *) ptr;
2792+
2793+ cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
2794+ cur_codec->ver = cur_fw->ver;
2795+ cur_codec->codec = cur_fw->codec;
2796+ cur_codec->text_size = cur_fw->text_size;
2797+ cur_codec->data_size = cur_fw->data_size;
2798+ cur_codec->data_location = cur_fw->data_location;
2799+
2800+ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
2801+ codec_to_string(cur_fw->codec));
2802+
2803+ /* #.# handle text section */
2804+ ptr += sizeof(struct topaz_fwinfo);
2805+ cur_drm_obj = &cur_codec->text;
2806+ cur_size = cur_fw->text_size;
2807+
2808+ /* #.# fill DRM object with firmware data */
2809+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2810+ &tmp_kmap);
2811+ if (ret) {
2812+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2813+ ttm_bo_unref(cur_drm_obj);
2814+ *cur_drm_obj = NULL;
2815+ goto out;
2816+ }
2817+
2818+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2819+ cur_size);
2820+
2821+ ttm_bo_kunmap(&tmp_kmap);
2822+
2823+ /* #.# handle data section */
2824+ ptr += cur_fw->text_size;
2825+ cur_drm_obj = &cur_codec->data;
2826+ cur_size = cur_fw->data_size;
2827+
2828+ /* #.# fill DRM object with firmware data */
2829+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2830+ &tmp_kmap);
2831+ if (ret) {
2832+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2833+ ttm_bo_unref(cur_drm_obj);
2834+ *cur_drm_obj = NULL;
2835+ goto out;
2836+ }
2837+
2838+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2839+ cur_size);
2840+
2841+ ttm_bo_kunmap(&tmp_kmap);
2842+
2843+ /* #.# validate firmware */
2844+
2845+ /* #.# update ptr */
2846+ ptr += cur_fw->data_size;
2847+ }
2848+
2849+ release_firmware(raw);
2850+
2851+ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
2852+
2853+ return 0;
2854+
2855+out:
2856+ if (raw) {
2857+ PSB_DEBUG_GENERAL("release firmware....\n");
2858+ release_firmware(raw);
2859+ }
2860+
2861+ return -1;
2862+}
2863+
2864+/* setup fw when start a new context */
2865+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2866+{
2867+ struct drm_psb_private *dev_priv = dev->dev_private;
2868+ uint32_t mem_size = RAM_SIZE; /* follow DDK */
2869+ uint32_t verify_pc;
2870+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
2871+
2872+#if 0
2873+ if (codec == topaz_priv->topaz_current_codec) {
2874+ LNC_TRACEL("TOPAZ: reuse previous codec\n");
2875+ return 0;
2876+ }
2877+#endif
2878+
2879+ /* XXX: need to rest topaz? */
2880+ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
2881+
2882+ /* XXX: interrupt enable shouldn't be enable here,
2883+ * this funtion is called when interrupt is enable,
2884+ * but here, we've no choice since we have to call setup_fw by
2885+ * manual */
2886+ /* # upload firmware, clear interruputs and start the firmware
2887+ * -- from hostutils.c in TestSuits*/
2888+
2889+ /* # reset MVEA */
2890+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2891+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2892+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2893+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2894+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2895+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2896+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2897+
2898+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2899+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2900+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2901+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2902+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2903+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2904+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2905+
2906+
2907+ topaz_mmu_hwsetup(dev_priv);
2908+
2909+#if !LNC_TOPAZ_NO_IRQ
2910+ psb_irq_uninstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
2911+#endif
2912+
2913+ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
2914+
2915+ topaz_set_default_regs(dev_priv);
2916+
2917+ /* # reset mtx */
2918+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
2919+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
2920+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
2921+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
2922+
2923+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
2924+
2925+ /* # upload fw by drm */
2926+ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
2927+
2928+ topaz_upload_fw(dev, codec);
2929+#if 0
2930+ /* allocate the space for context save & restore if needed */
2931+ if (topaz_priv->topaz_mtx_data_mem == NULL) {
2932+ ret = ttm_buffer_object_create(bdev,
2933+ topaz_priv->cur_mtx_data_size * 4,
2934+ ttm_bo_type_kernel,
2935+ DRM_PSB_FLAG_MEM_MMU |
2936+ TTM_PL_FLAG_NO_EVICT,
2937+ 0, 0, 0, NULL,
2938+ &topaz_priv->topaz_mtx_data_mem);
2939+ if (ret) {
2940+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
2941+ "mtx data save\n");
2942+ return -1;
2943+ }
2944+ }
2945+ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
2946+#endif
2947+
2948+ /* XXX: In power save mode, need to save the complete data memory
2949+ * and restore it. MTX_FWIF.c record the data size */
2950+ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
2951+
2952+ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
2953+ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
2954+
2955+ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
2956+
2957+ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
2958+
2959+ /* enable auto clock is essential for this driver */
2960+ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
2961+ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
2962+ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
2963+ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
2964+ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
2965+ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
2966+ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
2967+ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
2968+
2969+ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
2970+ verify_pc, PC_START_ADDRESS);
2971+
2972+ /* # turn on MTX */
2973+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2974+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2975+
2976+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
2977+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
2978+
2979+ /* # poll on the interrupt which the firmware will generate */
2980+ topaz_wait_for_register(dev_priv,
2981+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
2982+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
2983+ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
2984+
2985+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2986+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2987+
2988+ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
2989+
2990+ /* # get ccb buffer addr -- file hostutils.c */
2991+ topaz_priv->topaz_ccb_buffer_addr =
2992+ topaz_read_mtx_mem(dev_priv,
2993+ MTX_DATA_MEM_BASE + mem_size - 4);
2994+ topaz_priv->topaz_ccb_ctrl_addr =
2995+ topaz_read_mtx_mem(dev_priv,
2996+ MTX_DATA_MEM_BASE + mem_size - 8);
2997+ topaz_priv->topaz_ccb_size =
2998+ topaz_read_mtx_mem(dev_priv,
2999+ topaz_priv->topaz_ccb_ctrl_addr +
3000+ MTX_CCBCTRL_CCBSIZE);
3001+
3002+ topaz_priv->topaz_cmd_windex = 0;
3003+
3004+ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
3005+ topaz_priv->topaz_ccb_buffer_addr,
3006+ topaz_priv->topaz_ccb_ctrl_addr,
3007+ topaz_priv->topaz_ccb_size);
3008+
3009+ /* # write back the initial QP Value */
3010+ topaz_write_mtx_mem(dev_priv,
3011+ topaz_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
3012+ topaz_priv->stored_initial_qp);
3013+
3014+ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
3015+ topaz_priv->topaz_wb_offset);
3016+ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
3017+ topaz_priv->topaz_wb_offset);
3018+
3019+ /* this kick is essential for mtx.... */
3020+ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x01020304;
3021+ topaz_mtx_kick(dev_priv, 1);
3022+ DRM_UDELAY(1000);
3023+ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
3024+ " and here it is 0x%08x\n",
3025+ *((uint32_t *) topaz_priv->topaz_ccb_wb));
3026+
3027+ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
3028+ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
3029+
3030+ /* XXX: is there any need to record next cmd num??
3031+ * we use fence seqence number to record it
3032+ */
3033+ topaz_priv->topaz_busy = 0;
3034+ topaz_priv->topaz_cmd_seq = 0;
3035+
3036+#if !LNC_TOPAZ_NO_IRQ
3037+ psb_irq_preinstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
3038+ psb_irq_postinstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
3039+ lnc_topaz_enableirq(dev);
3040+#endif
3041+
3042+#if 0
3043+ topaz_mmu_flushcache(dev_priv);
3044+ topaz_test_null(dev, 0xe1e1);
3045+ topaz_test_null(dev, 0xe2e2);
3046+ topaz_test_sync(dev, 0xe2e2, 0x87654321);
3047+
3048+ topaz_mmu_test(dev, 0x12345678);
3049+ topaz_test_null(dev, 0xe3e3);
3050+ topaz_mmu_test(dev, 0x8764321);
3051+
3052+ topaz_test_null(dev, 0xe4e4);
3053+ topaz_test_null(dev, 0xf3f3);
3054+#endif
3055+
3056+ return 0;
3057+}
3058+
3059+#if UPLOAD_FW_BY_DMA
3060+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
3061+{
3062+ struct drm_psb_private *dev_priv = dev->dev_private;
3063+ const struct topaz_codec_fw *cur_codec_fw;
3064+ uint32_t text_size, data_size;
3065+ uint32_t data_location;
3066+ uint32_t cur_mtx_data_size;
3067+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3068+
3069+ /* # refer HLD document */
3070+
3071+ /* # MTX reset */
3072+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
3073+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3074+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3075+
3076+ DRM_UDELAY(6000);
3077+
3078+ /* # upload the firmware by DMA */
3079+ cur_codec_fw = &topaz_priv->topaz_fw[codec];
3080+
3081+ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
3082+ " data location(%d)\n", codec_to_string(codec), codec,
3083+ cur_codec_fw->text_size, cur_codec_fw->data_size,
3084+ cur_codec_fw->data_location);
3085+
3086+ /* # upload text */
3087+ text_size = cur_codec_fw->text_size / 4;
3088+
3089+ /* setup the MTX to start recieving data:
3090+ use a register for the transfer which will point to the source
3091+ (MTX_CR_MTX_SYSC_CDMAT) */
3092+ /* #.# fill the dst addr */
3093+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
3094+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3095+ F_ENCODE(2, MTX_BURSTSIZE) |
3096+ F_ENCODE(0, MTX_RNW) |
3097+ F_ENCODE(1, MTX_ENABLE) |
3098+ F_ENCODE(text_size, MTX_LENGTH));
3099+
3100+ /* #.# set DMAC access to host memory via BIF */
3101+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3102+
3103+ /* #.# transfer the codec */
3104+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
3105+ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
3106+
3107+ /* #.# wait dma finish */
3108+ topaz_wait_for_register(dev_priv,
3109+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
3110+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3111+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3112+
3113+ /* #.# clear interrupt */
3114+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3115+
3116+ /* # return access to topaz core */
3117+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3118+
3119+ /* # upload data */
3120+ data_size = cur_codec_fw->data_size / 4;
3121+ data_location = cur_codec_fw->data_location;
3122+
3123+ /* #.# fill the dst addr */
3124+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
3125+ 0x80900000 + (data_location - 0x82880000));
3126+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3127+ F_ENCODE(2, MTX_BURSTSIZE) |
3128+ F_ENCODE(0, MTX_RNW) |
3129+ F_ENCODE(1, MTX_ENABLE) |
3130+ F_ENCODE(data_size, MTX_LENGTH));
3131+
3132+ /* #.# set DMAC access to host memory via BIF */
3133+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3134+
3135+ /* #.# transfer the codec */
3136+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
3137+ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
3138+
3139+ /* #.# wait dma finish */
3140+ topaz_wait_for_register(dev_priv,
3141+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
3142+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3143+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3144+
3145+ /* #.# clear interrupt */
3146+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3147+
3148+ /* # return access to topaz core */
3149+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3150+
3151+ /* record this codec's mtx data size for
3152+ * context save & restore */
3153+ /* FIXME: since non-root sighting fixed by pre allocated,
3154+ * only need to correct the buffer size
3155+ */
3156+ cur_mtx_data_size = data_size;
3157+ if (topaz_priv->cur_mtx_data_size != cur_mtx_data_size)
3158+ topaz_priv->cur_mtx_data_size = cur_mtx_data_size;
3159+
3160+ return 0;
3161+}
3162+
3163+#else
3164+
3165+void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
3166+ uint32_t addr, uint32_t size,
3167+ struct ttm_buffer_object *buf)
3168+{
3169+ struct drm_psb_private *dev_priv = dev->dev_private;
3170+ uint32_t *buf_p;
3171+ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
3172+ uint32_t cur_ram_id, ram_addr , ram_id;
3173+ int map_ret, lp;
3174+ struct ttm_bo_kmap_obj bo_kmap;
3175+ bool is_iomem;
3176+ uint32_t cur_addr;
3177+
3178+ get_mtx_control_from_dash(dev_priv);
3179+
3180+ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
3181+ if (map_ret) {
3182+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
3183+ return;
3184+ }
3185+ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
3186+
3187+
3188+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
3189+ debug_reg = 0x0a0a0606;
3190+ bank_size = (debug_reg & 0xf0000) >> 16;
3191+ bank_ram_size = 1 << (bank_size + 2);
3192+
3193+ bank_count = (debug_reg & 0xf00) >> 8;
3194+
3195+ topaz_wait_for_register(dev_priv,
3196+ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
3197+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
3198+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
3199+
3200+ cur_ram_id = -1;
3201+ cur_addr = addr;
3202+ for (lp = 0; lp < size / 4; ++lp) {
3203+ ram_id = mtx_mem + (cur_addr / bank_ram_size);
3204+
3205+ if (cur_ram_id != ram_id) {
3206+ ram_addr = cur_addr >> 2;
3207+
3208+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
3209+ F_ENCODE(ram_id, MTX_MTX_MCMID) |
3210+ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
3211+ F_ENCODE(1, MTX_MTX_MCMAI));
3212+
3213+ cur_ram_id = ram_id;
3214+ }
3215+ cur_addr += 4;
3216+
3217+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
3218+ *(buf_p + lp));
3219+
3220+ topaz_wait_for_register(dev_priv,
3221+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
3222+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
3223+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
3224+ }
3225+
3226+ ttm_bo_kunmap(&bo_kmap);
3227+
3228+ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
3229+ return;
3230+}
3231+
3232+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
3233+{
3234+ struct drm_psb_private *dev_priv = dev->dev_private;
3235+ const struct topaz_codec_fw *cur_codec_fw;
3236+ uint32_t text_size, data_size;
3237+ uint32_t data_location;
3238+
3239+ /* # refer HLD document */
3240+ /* # MTX reset */
3241+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
3242+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3243+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3244+
3245+ DRM_UDELAY(6000);
3246+
3247+ /* # upload the firmware by DMA */
3248+ cur_codec_fw = &topaz_priv->topaz_fw[codec];
3249+
3250+ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
3251+ " data location(0x%08x)\n", codec_to_string(codec),
3252+ cur_codec_fw->text_size, cur_codec_fw->data_size,
3253+ cur_codec_fw->data_location);
3254+
3255+ /* # upload text */
3256+ text_size = cur_codec_fw->text_size;
3257+
3258+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
3259+ PC_START_ADDRESS - MTX_MEMORY_BASE,
3260+ text_size, cur_codec_fw->text);
3261+
3262+ /* # upload data */
3263+ data_size = cur_codec_fw->data_size;
3264+ data_location = cur_codec_fw->data_location;
3265+
3266+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
3267+ data_location - 0x82880000, data_size,
3268+ cur_codec_fw->data);
3269+
3270+ return 0;
3271+}
3272+
3273+#endif /* UPLOAD_FW_BY_DMA */
3274+
3275+void
3276+topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
3277+ uint32_t src_phy_addr, uint32_t offset,
3278+ uint32_t soc_addr, uint32_t byte_num,
3279+ uint32_t is_increment, uint32_t is_write)
3280+{
3281+ uint32_t dmac_count;
3282+ uint32_t irq_stat;
3283+ uint32_t count;
3284+
3285+ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
3286+ /* # check that no transfer is currently in progress and no
3287+ interrupts are outstanding ?? (why care interrupt) */
3288+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
3289+ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
3290+ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
3291+
3292+ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
3293+
3294+ /* no hold off period */
3295+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
3296+ /* clear previous interrupts */
3297+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
3298+ /* check irq status */
3299+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
3300+ /* assert(0 == irq_stat); */
3301+ if (0 != irq_stat)
3302+ DRM_ERROR("TOPAZ: there is hold up\n");
3303+
3304+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
3305+ (src_phy_addr + offset));
3306+ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
3307+ is_write, DMAC_PWIDTH_32_BIT, byte_num);
3308+ /* generate an interrupt at the end of transfer */
3309+ count |= MASK_IMG_SOC_TRANSFER_IEN;
3310+ count |= F_ENCODE(is_write, IMG_SOC_DIR);
3311+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
3312+
3313+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
3314+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
3315+ is_increment, DMAC_BURST_2));
3316+
3317+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
3318+
3319+ /* Finally, rewrite the count register with
3320+ * the enable bit set to kick off the transfer
3321+ */
3322+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
3323+
3324+ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
3325+
3326+ return;
3327+}
3328+
3329+void topaz_set_default_regs(struct drm_psb_private *dev_priv)
3330+{
3331+ int n;
3332+ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3333+
3334+ for (n = 0; n < count; n++)
3335+ MM_WRITE32(topaz_default_regs[n][0],
3336+ topaz_default_regs[n][1],
3337+ topaz_default_regs[n][2]);
3338+
3339+}
3340+
3341+void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
3342+ const uint32_t val)
3343+{
3344+ uint32_t tmp;
3345+ get_mtx_control_from_dash(dev_priv);
3346+
3347+ /* put data into MTX_RW_DATA */
3348+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
3349+
3350+ /* request a write */
3351+ tmp = reg &
3352+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
3353+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
3354+
3355+ /* wait for operation finished */
3356+ topaz_wait_for_register(dev_priv,
3357+ MTX_START +
3358+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
3359+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
3360+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
3361+
3362+ release_mtx_control_from_dash(dev_priv);
3363+}
3364+
3365+void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
3366+ uint32_t *ret_val)
3367+{
3368+ uint32_t tmp;
3369+
3370+ get_mtx_control_from_dash(dev_priv);
3371+
3372+ /* request a write */
3373+ tmp = (reg &
3374+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
3375+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
3376+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
3377+
3378+ /* wait for operation finished */
3379+ topaz_wait_for_register(dev_priv,
3380+ MTX_START +
3381+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
3382+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
3383+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
3384+
3385+ /* read */
3386+ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
3387+ ret_val);
3388+
3389+ release_mtx_control_from_dash(dev_priv);
3390+}
3391+
3392+void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
3393+{
3394+ int debug_reg_slave_val;
3395+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3396+
3397+ /* GetMTXControlFromDash */
3398+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
3399+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
3400+ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
3401+ do {
3402+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
3403+ &debug_reg_slave_val);
3404+ } while ((debug_reg_slave_val & 0x18) != 0);
3405+
3406+ /* save access control */
3407+ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
3408+ &topaz_priv->topaz_dash_access_ctrl);
3409+}
3410+
3411+void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
3412+{
3413+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3414+
3415+ /* restore access control */
3416+ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
3417+ topaz_priv->topaz_dash_access_ctrl);
3418+
3419+ /* release bus */
3420+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
3421+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
3422+}
3423+
3424+void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
3425+{
3426+ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
3427+
3428+ /* bypass all request while MMU is being configured */
3429+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
3430+ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
3431+
3432+ /* set MMU hardware at the page table directory */
3433+ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
3434+ "into MMU_DIR_LIST0/1\n", pd_addr);
3435+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
3436+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
3437+
3438+ /* setup index register, all pointing to directory bank 0 */
3439+ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
3440+
3441+ /* now enable MMU access for all requestors */
3442+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
3443+}
3444+
3445+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
3446+{
3447+ uint32_t mmu_control;
3448+
3449+ if (!powermgmt_is_hw_on(dev_priv->dev->pdev, PSB_VIDEO_ENC_ISLAND))
3450+ return;
3451+
3452+#if 0
3453+ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
3454+ " so flush using the master core\n");
3455+#endif
3456+ /* XXX: disable interrupt */
3457+
3458+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
3459+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
3460+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
3461+
3462+#if 0
3463+ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
3464+ "still operating afterwards even if not cleared,\n"
3465+ "but may want to replace with MMU_FLUSH?\n");
3466+#endif
3467+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
3468+
3469+ /* clear it */
3470+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
3471+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
3472+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
3473+}
3474+
3475+#if DEBUG_FUNCTION
3476+
3477+static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
3478+ uint32_t sync_seq)
3479+{
3480+ struct drm_psb_private *dev_priv = dev->dev_private;
3481+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3482+ uint32_t sync_cmd[3];
3483+ struct topaz_cmd_header *cmd_hdr;
3484+ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
3485+ int count = 1000;
3486+ uint32_t clr_flag;
3487+
3488+ cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0];
3489+
3490+ /* reset sync area */
3491+ *sync_p = 0;
3492+
3493+ /* insert a SYNC command here */
3494+ cmd_hdr->id = MTX_CMDID_SYNC;
3495+ cmd_hdr->size = 3;
3496+ cmd_hdr->seq = seq;
3497+
3498+ sync_cmd[1] = topaz_priv->topaz_sync_offset;
3499+ sync_cmd[2] = sync_seq;
3500+
3501+ TOPAZ_BEGIN_CCB(dev_priv);
3502+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
3503+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
3504+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
3505+ TOPAZ_END_CCB(dev_priv, 1);
3506+
3507+ PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x,"
3508+ "sync_seq=0x%08x\n", seq, sync_seq);
3509+
3510+ while (count && *sync_p != sync_seq) {
3511+ DRM_UDELAY(100);
3512+ --count;
3513+ }
3514+ if ((count == 0) && (*sync_p != sync_seq)) {
3515+ DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x,"
3516+ "actual 0x%08x\n", sync_seq, *sync_p);
3517+ }
3518+ PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p);
3519+ PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n");
3520+
3521+ clr_flag = lnc_topaz_queryirq(dev);
3522+ lnc_topaz_clearirq(dev, clr_flag);
3523+
3524+ return 0;
3525+}
3526+static int topaz_test_sync_tt_test(struct drm_device *dev,
3527+ uint32_t seq,
3528+ uint32_t sync_seq)
3529+{
3530+ struct drm_psb_private *dev_priv = dev->dev_private;
3531+ struct ttm_bo_device *bdev = &dev_priv->bdev;
3532+ int ret;
3533+ bool is_iomem;
3534+ struct ttm_buffer_object *test_obj;
3535+ struct ttm_bo_kmap_obj test_kmap;
3536+ unsigned int *test_adr;
3537+ uint32_t sync_cmd[3];
3538+ int count = 1000;
3539+ unsigned long pfn;
3540+
3541+ ret = ttm_buffer_object_create(bdev, 4096,
3542+ ttm_bo_type_kernel,
3543+ TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT,
3544+ 0, 0, 0, NULL, &test_obj);
3545+ if (ret) {
3546+ DRM_ERROR("failed create test object buffer\n");
3547+ return -1;
3548+ }
3549+
3550+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
3551+ test_obj->offset, &pfn);
3552+ if (ret) {
3553+ DRM_ERROR("failed to get pfn from virtual\n");
3554+ return -1;
3555+ }
3556+
3557+ PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn);
3558+
3559+ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
3560+ &test_kmap);
3561+ if (ret) {
3562+ DRM_ERROR("failed map buffer\n");
3563+ return -1;
3564+ }
3565+ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
3566+ *test_adr = 0xff55;
3567+ ttm_bo_kunmap(&test_kmap);
3568+
3569+ /* insert a SYNC command here */
3570+ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
3571+ (seq << 16);
3572+ sync_cmd[1] = test_obj->offset;
3573+ sync_cmd[2] = sync_seq;
3574+
3575+ TOPAZ_BEGIN_CCB(dev_priv);
3576+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
3577+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
3578+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
3579+ TOPAZ_END_CCB(dev_priv, 1);
3580+
3581+ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
3582+ &test_kmap);
3583+ if (ret) {
3584+ DRM_ERROR("failed map buffer\n");
3585+ return -1;
3586+ }
3587+ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
3588+
3589+ while (count && *test_adr != sync_seq) {
3590+ DRM_UDELAY(100);
3591+ --count;
3592+ }
3593+ if ((count == 0) && (*test_adr != sync_seq)) {
3594+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
3595+ "actual 0x%08x\n",
3596+ sync_seq, *test_adr);
3597+ }
3598+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr);
3599+ ttm_bo_kunmap(&test_kmap);
3600+ ttm_bo_unref(&test_obj);
3601+
3602+ return 0;
3603+}
3604+
3605+static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
3606+ uint32_t seq,
3607+ uint32_t sync_seq,
3608+ uint32_t offset)
3609+{
3610+ struct drm_psb_private *dev_priv = dev->dev_private;
3611+ int ret;
3612+ uint32_t sync_cmd[3];
3613+ int count = 1000;
3614+ unsigned long pfn;
3615+
3616+ struct page *p;
3617+ uint32_t *v;
3618+/* uint32_t offset = 0xd0000000; */
3619+
3620+ p = alloc_page(GFP_DMA32);
3621+ if (!p) {
3622+ DRM_ERROR("Topaz:Failed allocating page\n");
3623+ return -1;
3624+ }
3625+
3626+ v = kmap(p);
3627+ memset(v, 0x67, PAGE_SIZE);
3628+ pfn = (offset >> PAGE_SHIFT);
3629+ kunmap(p);
3630+
3631+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
3632+ &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
3633+ if (ret) {
3634+ DRM_ERROR("Topaz:Failed inserting mmu page\n");
3635+ return -1;
3636+ }
3637+
3638+ /* insert a SYNC command here */
3639+ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
3640+ (0x5b << 16);
3641+ sync_cmd[1] = pfn << PAGE_SHIFT;
3642+ sync_cmd[2] = seq;
3643+
3644+ TOPAZ_BEGIN_CCB(dev_priv);
3645+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
3646+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
3647+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
3648+ TOPAZ_END_CCB(dev_priv, 1);
3649+
3650+ v = kmap(p);
3651+ while (count && *v != sync_seq) {
3652+ DRM_UDELAY(100);
3653+ --count;
3654+ }
3655+ if ((count == 0) && (*v != sync_seq)) {
3656+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
3657+ "actual 0x%08x\n",
3658+ sync_seq, *v);
3659+ }
3660+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v);
3661+ kunmap(p);
3662+
3663+ return 0;
3664+}
3665+
3666+static int topaz_test_null(struct drm_device *dev, uint32_t seq)
3667+{
3668+ struct drm_psb_private *dev_priv = dev->dev_private;
3669+ struct topaz_cmd_header null_cmd;
3670+ uint32_t clr_flag;
3671+
3672+ /* XXX: here we finished firmware setup....
3673+ * using a NULL command to verify the
3674+ * correctness of firmware
3675+ */
3676+
3677+ null_cmd.id = MTX_CMDID_NULL;
3678+ null_cmd.size = 1;
3679+ null_cmd.seq = seq;
3680+
3681+ TOPAZ_BEGIN_CCB(dev_priv);
3682+ TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd));
3683+ TOPAZ_END_CCB(dev_priv, 1);
3684+
3685+ DRM_UDELAY(1000); /* wait to finish */
3686+
3687+ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
3688+ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
3689+ seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv),
3690+ WB_CCB_CTRL_RINDEX(dev_priv));
3691+
3692+ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
3693+
3694+ clr_flag = lnc_topaz_queryirq(dev);
3695+ lnc_topaz_clearirq(dev, clr_flag);
3696+
3697+ return 0;
3698+}
3699+
3700+
3701+/*
3702+ * this function will test whether the mmu is correct:
3703+ * it get a drm_buffer_object and use CMD_SYNC to write
3704+ * certain value into this buffer.
3705+ */
3706+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
3707+{
3708+ struct drm_psb_private *dev_priv = dev->dev_private;
3709+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3710+ unsigned long real_pfn;
3711+ int ret;
3712+
3713+ /* topaz_mmu_flush(dev); */
3714+ topaz_test_sync(dev, 0x55, sync_value);
3715+
3716+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
3717+ topaz_priv->topaz_sync_offset, &real_pfn);
3718+ if (ret != 0) {
3719+ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
3720+ return;
3721+ }
3722+ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
3723+ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
3724+ topaz_priv->topaz_sync_offset, real_pfn, sync_value);
3725+}
3726+
3727+void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
3728+{
3729+ int n;
3730+ int count;
3731+
3732+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3733+ for (n = 0; n < count; n++, ++data)
3734+ MM_READ32(topaz_default_regs[n][0],
3735+ topaz_default_regs[n][1],
3736+ data);
3737+
3738+}
3739+
3740+void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
3741+ uint32_t *data)
3742+{
3743+ int n;
3744+ int count;
3745+
3746+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3747+ for (n = 0; n < count; n++, ++data)
3748+ MM_WRITE32(topaz_default_regs[n][0],
3749+ topaz_default_regs[n][1],
3750+ *data);
3751+
3752+}
3753+
3754+#endif
3755+
3756+int lnc_topaz_restore_mtx_state(struct drm_device *dev)
3757+{
3758+ struct drm_psb_private *dev_priv =
3759+ (struct drm_psb_private *)dev->dev_private;
3760+ uint32_t reg_val;
3761+ uint32_t *mtx_reg_state;
3762+ int i;
3763+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3764+
3765+ if (!topaz_priv->topaz_mtx_saved)
3766+ return -1;
3767+
3768+ if (topaz_priv->topaz_mtx_data_mem == NULL) {
3769+ PSB_DEBUG_GENERAL("TOPAZ: try to restore context without "
3770+ "space allocated, return directly without restore\n");
3771+ return -1;
3772+ }
3773+
3774+ /* turn on mtx clocks */
3775+ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
3776+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3777+ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
3778+
3779+ /* reset mtx */
3780+ /* FIXME: should use core_write??? */
3781+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3782+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3783+ DRM_UDELAY(6000);
3784+
3785+ topaz_mmu_hwsetup(dev_priv);
3786+ /* upload code, restore mtx data */
3787+ mtx_dma_write(dev);
3788+
3789+ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
3790+ /* restore register */
3791+ /* FIXME: conside to put read/write into one function */
3792+ /* Saves 8 Registers of D0 Bank */
3793+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3794+ for (i = 0; i < 8; i++) {
3795+ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
3796+ *mtx_reg_state);
3797+ mtx_reg_state++;
3798+ }
3799+ /* Saves 8 Registers of D1 Bank */
3800+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3801+ for (i = 0; i < 8; i++) {
3802+ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
3803+ *mtx_reg_state);
3804+ mtx_reg_state++;
3805+ }
3806+ /* Saves 4 Registers of A0 Bank */
3807+ /* A0StP, A0FrP, A0.2 and A0.3 */
3808+ for (i = 0; i < 4; i++) {
3809+ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
3810+ *mtx_reg_state);
3811+ mtx_reg_state++;
3812+ }
3813+ /* Saves 4 Registers of A1 Bank */
3814+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3815+ for (i = 0; i < 4; i++) {
3816+ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
3817+ *mtx_reg_state);
3818+ mtx_reg_state++;
3819+ }
3820+ /* Saves PC and PCX */
3821+ for (i = 0; i < 2; i++) {
3822+ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
3823+ *mtx_reg_state);
3824+ mtx_reg_state++;
3825+ }
3826+ /* Saves 8 Control Registers */
3827+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3828+ * TXGPIOO */
3829+ for (i = 0; i < 8; i++) {
3830+ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
3831+ *mtx_reg_state);
3832+ mtx_reg_state++;
3833+ }
3834+
3835+ /* turn on MTX */
3836+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3837+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
3838+
3839+ topaz_priv->topaz_mtx_saved = 0;
3840+
3841+ return 0;
3842+}
3843+
3844+int lnc_topaz_save_mtx_state(struct drm_device *dev)
3845+{
3846+ struct drm_psb_private *dev_priv =
3847+ (struct drm_psb_private *)dev->dev_private;
3848+ uint32_t *mtx_reg_state;
3849+ int i;
3850+ struct topaz_codec_fw *cur_codec_fw;
3851+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3852+
3853+ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
3854+ if (topaz_priv->topaz_mtx_data_mem == NULL) {
3855+ PSB_DEBUG_GENERAL("TOPAZ: try to save context without space "
3856+ "allocated, return directly without save\n");
3857+ return -1;
3858+ }
3859+ if (topaz_priv->topaz_fw_loaded == 0) {
3860+ PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware "
3861+ "uploaded\n");
3862+ return -1;
3863+ }
3864+
3865+ topaz_wait_for_register(dev_priv,
3866+ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
3867+ TXRPT_WAITONKICK_VALUE,
3868+ 0xffffffff);
3869+
3870+ /* stop mtx */
3871+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3872+ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
3873+
3874+ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
3875+
3876+ /* FIXME: conside to put read/write into one function */
3877+ /* Saves 8 Registers of D0 Bank */
3878+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3879+ for (i = 0; i < 8; i++) {
3880+ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
3881+ mtx_reg_state);
3882+ mtx_reg_state++;
3883+ }
3884+ /* Saves 8 Registers of D1 Bank */
3885+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3886+ for (i = 0; i < 8; i++) {
3887+ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
3888+ mtx_reg_state);
3889+ mtx_reg_state++;
3890+ }
3891+ /* Saves 4 Registers of A0 Bank */
3892+ /* A0StP, A0FrP, A0.2 and A0.3 */
3893+ for (i = 0; i < 4; i++) {
3894+ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
3895+ mtx_reg_state);
3896+ mtx_reg_state++;
3897+ }
3898+ /* Saves 4 Registers of A1 Bank */
3899+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3900+ for (i = 0; i < 4; i++) {
3901+ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
3902+ mtx_reg_state);
3903+ mtx_reg_state++;
3904+ }
3905+ /* Saves PC and PCX */
3906+ for (i = 0; i < 2; i++) {
3907+ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
3908+ mtx_reg_state);
3909+ mtx_reg_state++;
3910+ }
3911+ /* Saves 8 Control Registers */
3912+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3913+ * TXGPIOO */
3914+ for (i = 0; i < 8; i++) {
3915+ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
3916+ mtx_reg_state);
3917+ mtx_reg_state++;
3918+ }
3919+
3920+ /* save mtx data memory */
3921+ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
3922+
3923+ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
3924+ topaz_priv->cur_mtx_data_size);
3925+
3926+ /* turn off mtx clocks */
3927+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3928+ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
3929+
3930+ topaz_priv->topaz_mtx_saved = 1;
3931+
3932+ return 0;
3933+}
3934+
3935+void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
3936+{
3937+ struct drm_psb_private *dev_priv =
3938+ (struct drm_psb_private *)dev->dev_private;
3939+ struct ttm_buffer_object *target;
3940+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3941+
3942+ /* setup mtx DMAC registers to do transfer */
3943+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
3944+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3945+ F_ENCODE(2, MTX_BURSTSIZE) |
3946+ F_ENCODE(1, MTX_RNW) |
3947+ F_ENCODE(1, MTX_ENABLE) |
3948+ F_ENCODE(size, MTX_LENGTH));
3949+
3950+ /* give the DMAC access to the host memory via BIF */
3951+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3952+
3953+ target = topaz_priv->topaz_mtx_data_mem;
3954+ /* transfert the data */
3955+ /* FIXME: size is meaured by bytes? */
3956+ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
3957+ MTX_CR_MTX_SYSC_CDMAT,
3958+ size, 0, 1);
3959+
3960+ /* wait for it transfer */
3961+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3962+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3963+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3964+ /* clear interrupt */
3965+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3966+ /* give access back to topaz core */
3967+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3968+}
3969+
3970+void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
3971+ uint32_t soc_addr, uint32_t bytes_num,
3972+ int increment, int rnw)
3973+{
3974+ struct drm_psb_private *dev_priv =
3975+ (struct drm_psb_private *)dev->dev_private;
3976+ uint32_t count_reg;
3977+ uint32_t irq_state;
3978+
3979+ /* check no transfer is in progress */
3980+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
3981+ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
3982+ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
3983+ "save mtx data\n");
3984+ /* FIXME: how to handle this error */
3985+ return;
3986+ }
3987+
3988+ /* no hold off period */
3989+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
3990+ /* cleare irq state */
3991+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
3992+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
3993+ if (0 != irq_state) {
3994+ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
3995+ return;
3996+ }
3997+
3998+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
3999+ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
4000+ DMAC_PWIDTH_32_BIT, rnw,
4001+ DMAC_PWIDTH_32_BIT, bytes_num);
4002+ /* generate an interrupt at end of transfer */
4003+ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
4004+ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
4005+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
4006+
4007+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
4008+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
4009+ DMAC_BURST_2));
4010+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
4011+
4012+ /* Finally, rewrite the count register with the enable
4013+ * bit set to kick off the transfer */
4014+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
4015+ count_reg | MASK_IMG_SOC_EN);
4016+}
4017+
4018+void mtx_dma_write(struct drm_device *dev)
4019+{
4020+ struct topaz_codec_fw *cur_codec_fw;
4021+ struct drm_psb_private *dev_priv =
4022+ (struct drm_psb_private *)dev->dev_private;
4023+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
4024+
4025+ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
4026+
4027+ /* upload code */
4028+ /* setup mtx DMAC registers to recieve transfer */
4029+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
4030+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
4031+ F_ENCODE(2, MTX_BURSTSIZE) |
4032+ F_ENCODE(0, MTX_RNW) |
4033+ F_ENCODE(1, MTX_ENABLE) |
4034+ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
4035+
4036+ /* give DMAC access to host memory */
4037+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
4038+
4039+ /* transfer code */
4040+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
4041+ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
4042+ 0, 0);
4043+ /* wait finished */
4044+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
4045+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
4046+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
4047+ /* clear interrupt */
4048+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
4049+
4050+ /* setup mtx start recieving data */
4051+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
4052+ (cur_codec_fw->data_location) - 0x82880000);
4053+
4054+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
4055+ F_ENCODE(2, MTX_BURSTSIZE) |
4056+ F_ENCODE(0, MTX_RNW) |
4057+ F_ENCODE(1, MTX_ENABLE) |
4058+ F_ENCODE(topaz_priv->cur_mtx_data_size, MTX_LENGTH));
4059+
4060+ /* give DMAC access to host memory */
4061+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
4062+
4063+ /* transfer data */
4064+ topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem->offset,
4065+ 0, MTX_CR_MTX_SYSC_CDMAT,
4066+ topaz_priv->cur_mtx_data_size,
4067+ 0, 0);
4068+ /* wait finished */
4069+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
4070+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
4071+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
4072+ /* clear interrupt */
4073+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
4074+
4075+ /* give access back to Topaz Core */
4076+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
4077+}
4078+
4079diff --git a/drivers/gpu/drm/psb/psb_bl.c b/drivers/gpu/drm/psb/psb_bl.c
4080new file mode 100644
4081index 0000000..2c723f4
4082--- /dev/null
4083+++ b/drivers/gpu/drm/psb/psb_bl.c
4084@@ -0,0 +1,232 @@
4085+/*
4086+ * psb backlight using HAL
4087+ *
4088+ * Copyright (c) 2009 Eric Knopp
4089+ *
4090+ * This program is free software; you can redistribute it and/or modify
4091+ * it under the terms of the GNU General Public License version 2 as
4092+ * published by the Free Software Foundation.
4093+ */
4094+
4095+#include <linux/backlight.h>
4096+#include "psb_drv.h"
4097+#include "psb_intel_reg.h"
4098+#include "psb_intel_drv.h"
4099+#include "psb_intel_bios.h"
4100+#include "psb_powermgmt.h"
4101+
4102+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
4103+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
4104+#define BLC_PWM_FREQ_CALC_CONSTANT 32
4105+#define MHz 1000000
4106+#define BRIGHTNESS_MIN_LEVEL 1
4107+#define BRIGHTNESS_MAX_LEVEL 100
4108+#define BRIGHTNESS_MASK 0xFF
4109+#define BLC_POLARITY_NORMAL 0
4110+#define BLC_POLARITY_INVERSE 1
4111+#define BLC_ADJUSTMENT_MAX 100
4112+
4113+#define PSB_BLC_PWM_PRECISION_FACTOR 10
4114+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
4115+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
4116+
4117+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
4118+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
4119+
4120+static int psb_brightness;
4121+static int blc_pol;
4122+static struct backlight_device *psb_backlight_device;
4123+static u8 blc_brightnesscmd;
4124+static u8 blc_type;
4125+
4126+int psb_set_brightness(struct backlight_device *bd)
4127+{
4128+ u32 blc_pwm_ctl;
4129+ u32 max_pwm_blc;
4130+
4131+ struct drm_device *dev =
4132+ (struct drm_device *)psb_backlight_device->priv;
4133+ struct drm_psb_private *dev_priv =
4134+ (struct drm_psb_private *) dev->dev_private;
4135+
4136+ int level = bd->props.brightness;
4137+
4138+ DRM_DEBUG("backlight level set to %d\n", level);
4139+
4140+ /* Perform value bounds checking */
4141+ if (level < BRIGHTNESS_MIN_LEVEL)
4142+ level = BRIGHTNESS_MIN_LEVEL;
4143+
4144+ if(IS_POULSBO(dev)) {
4145+ psb_intel_lvds_set_brightness(dev, level);
4146+ psb_brightness = level;
4147+ return 0;
4148+ }
4149+
4150+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
4151+ /* Calculate and set the brightness value */
4152+ max_pwm_blc = REG_READ(BLC_PWM_CTL) >>
4153+ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
4154+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
4155+
4156+ /* Adjust the backlight level with the percent in
4157+ * dev_priv->blc_adj1;
4158+ */
4159+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
4160+ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
4161+
4162+ if (blc_pol == BLC_POLARITY_INVERSE)
4163+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
4164+
4165+ /* force PWM bit on */
4166+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
4167+ REG_WRITE(BLC_PWM_CTL,
4168+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
4169+ blc_pwm_ctl);
4170+
4171+ /* printk("***backlight brightness = %i\n", level); */
4172+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
4173+ }
4174+
4175+ /* cache the brightness for later use */
4176+ psb_brightness = level;
4177+ return 0;
4178+}
4179+
4180+int psb_get_brightness(struct backlight_device *bd)
4181+{
4182+ /* return locally cached var instead of HW read (due to DPST etc.) */
4183+ return psb_brightness;
4184+}
4185+
4186+struct backlight_ops psb_ops = {
4187+ .get_brightness = psb_get_brightness,
4188+ .update_status = psb_set_brightness,
4189+};
4190+
4191+int psb_backlight_init(struct drm_device *dev)
4192+{
4193+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
4194+ unsigned long CoreClock;
4195+ // u32 bl_max_freq;
4196+ // unsigned long value;
4197+ u16 bl_max_freq;
4198+ uint32_t value;
4199+ uint32_t clock;
4200+ uint32_t blc_pwm_precision_factor;
4201+
4202+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
4203+
4204+ struct drm_psb_private *dev_priv =
4205+ (struct drm_psb_private *) dev->dev_private;
4206+
4207+ psb_backlight_device = backlight_device_register("psb-bl",
4208+ NULL, NULL, &psb_ops);
4209+ if (IS_ERR(psb_backlight_device))
4210+ return PTR_ERR(psb_backlight_device);
4211+
4212+ psb_backlight_device->priv = dev;
4213+
4214+ if(IS_MRST(dev)) {
4215+ /* HACK HACK HACK */
4216+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
4217+
4218+ bl_max_freq = 256; /* this needs to come from VBT when available */
4219+ blc_pol = BLC_POLARITY_NORMAL; /* this needs to be set elsewhere */
4220+ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
4221+
4222+ if (dev_priv->sku_83)
4223+ CoreClock = 166;
4224+ else if (dev_priv->sku_100)
4225+ CoreClock = 200;
4226+ else if (dev_priv->sku_100L)
4227+ CoreClock = 100;
4228+ else
4229+ return 1;
4230+ } else {
4231+ /* get bl_max_freq and pol from dev_priv*/
4232+ if(!dev_priv->lvds_bl){
4233+ DRM_ERROR("Has no valid LVDS backlight info\n");
4234+ return 1;
4235+ }
4236+ bl_max_freq = dev_priv->lvds_bl->freq;
4237+ blc_pol = dev_priv->lvds_bl->pol;
4238+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
4239+ blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd;
4240+ blc_type = dev_priv->lvds_bl->type;
4241+
4242+ //pci_write_config_dword(pci_root, 0xD4, 0x00C32004);
4243+ //pci_write_config_dword(pci_root, 0xD0, 0xE0033000);
4244+
4245+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
4246+ pci_read_config_dword(pci_root, 0xD4, &clock);
4247+
4248+ switch(clock & 0x07) {
4249+ case 0:
4250+ CoreClock = 100;
4251+ break;
4252+ case 1:
4253+ CoreClock = 133;
4254+ break;
4255+ case 2:
4256+ CoreClock = 150;
4257+ break;
4258+ case 3:
4259+ CoreClock = 178;
4260+ break;
4261+ case 4:
4262+ CoreClock = 200;
4263+ break;
4264+ case 5:
4265+ case 6:
4266+ case 7:
4267+ CoreClock = 266;
4268+ default:
4269+ return 1;
4270+ }
4271+ }/*end if(IS_MRST(dev))*/
4272+
4273+ value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
4274+ value *= blc_pwm_precision_factor;
4275+ value /= bl_max_freq;
4276+ value /= blc_pwm_precision_factor;
4277+
4278+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
4279+ if(IS_MRST(dev)) {
4280+ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
4281+ return 2;
4282+ else {
4283+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
4284+ REG_WRITE(BLC_PWM_CTL, value |
4285+ (value << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT));
4286+ }
4287+ } else {
4288+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
4289+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
4290+ return 2;
4291+ else {
4292+ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
4293+ REG_WRITE(BLC_PWM_CTL,
4294+ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
4295+ (value));
4296+ }
4297+ } /*end if(IS_MRST(dev))*/
4298+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
4299+ }
4300+
4301+ psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
4302+ psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
4303+ backlight_update_status(psb_backlight_device);
4304+#endif
4305+ return 0;
4306+}
4307+
4308+void psb_backlight_exit(void)
4309+{
4310+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
4311+ psb_backlight_device->props.brightness = 0;
4312+ backlight_update_status(psb_backlight_device);
4313+ backlight_device_unregister(psb_backlight_device);
4314+#endif
4315+ return;
4316+}
4317diff --git a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c
4318new file mode 100644
4319index 0000000..cb25bde
4320--- /dev/null
4321+++ b/drivers/gpu/drm/psb/psb_buffer.c
4322@@ -0,0 +1,519 @@
4323+/**************************************************************************
4324+ * Copyright (c) 2007, Intel Corporation.
4325+ * All Rights Reserved.
4326+ *
4327+ * This program is free software; you can redistribute it and/or modify it
4328+ * under the terms and conditions of the GNU General Public License,
4329+ * version 2, as published by the Free Software Foundation.
4330+ *
4331+ * This program is distributed in the hope it will be useful, but WITHOUT
4332+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4333+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4334+ * more details.
4335+ *
4336+ * You should have received a copy of the GNU General Public License along with
4337+ * this program; if not, write to the Free Software Foundation, Inc.,
4338+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4339+ *
4340+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4341+ * develop this driver.
4342+ *
4343+ **************************************************************************/
4344+/*
4345+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
4346+ */
4347+#include "ttm/ttm_placement_common.h"
4348+#include "ttm/ttm_execbuf_util.h"
4349+#include "ttm/ttm_fence_api.h"
4350+#include <drm/drmP.h>
4351+#include "psb_drv.h"
4352+#include "psb_schedule.h"
4353+
4354+#define DRM_MEM_TTM 26
4355+
4356+struct drm_psb_ttm_backend {
4357+ struct ttm_backend base;
4358+ struct page **pages;
4359+ unsigned int desired_tile_stride;
4360+ unsigned int hw_tile_stride;
4361+ int mem_type;
4362+ unsigned long offset;
4363+ unsigned long num_pages;
4364+};
4365+
4366+/*
4367+ * Poulsbo GPU virtual space looks like this
4368+ * (We currently use only one MMU context).
4369+ *
4370+ * gatt_start = Start of GATT aperture in bus space.
4371+ * stolen_end = End of GATT populated by stolen memory in bus space.
4372+ * gatt_end = End of GATT
4373+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
4374+ *
4375+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling-
4376+ * and copy operations.
4377+ * This space is not managed and is protected by the
4378+ * temp_mem mutex.
4379+ *
4380+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
4381+ *
4382+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
4383+ *
4384+ * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages.
4385+ *
4386+ * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine.
4387+ *
4388+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not
4389+ * usable by 2D engine.
4390+ *
4391+ * gatt_end -> 0xffffffff Currently unused.
4392+ */
4393+
4394+static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
4395+ struct ttm_mem_type_manager *man)
4396+{
4397+
4398+ struct drm_psb_private *dev_priv =
4399+ container_of(bdev, struct drm_psb_private, bdev);
4400+ struct psb_gtt *pg = dev_priv->pg;
4401+
4402+ switch (type) {
4403+ case TTM_PL_SYSTEM:
4404+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
4405+ man->available_caching = TTM_PL_FLAG_CACHED |
4406+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4407+ man->default_caching = TTM_PL_FLAG_CACHED;
4408+ break;
4409+ case DRM_PSB_MEM_KERNEL:
4410+ man->io_offset = 0x00000000;
4411+ man->io_size = 0x00000000;
4412+ man->io_addr = NULL;
4413+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4414+ TTM_MEMTYPE_FLAG_CMA;
4415+ man->gpu_offset = PSB_MEM_KERNEL_START;
4416+ man->available_caching = TTM_PL_FLAG_CACHED |
4417+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4418+ man->default_caching = TTM_PL_FLAG_WC;
4419+ break;
4420+ case DRM_PSB_MEM_MMU:
4421+ man->io_offset = 0x00000000;
4422+ man->io_size = 0x00000000;
4423+ man->io_addr = NULL;
4424+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4425+ TTM_MEMTYPE_FLAG_CMA;
4426+ man->gpu_offset = PSB_MEM_MMU_START;
4427+ man->available_caching = TTM_PL_FLAG_CACHED |
4428+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4429+ man->default_caching = TTM_PL_FLAG_WC;
4430+ break;
4431+ case DRM_PSB_MEM_PDS:
4432+ man->io_offset = 0x00000000;
4433+ man->io_size = 0x00000000;
4434+ man->io_addr = NULL;
4435+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4436+ TTM_MEMTYPE_FLAG_CMA;
4437+ man->gpu_offset = PSB_MEM_PDS_START;
4438+ man->available_caching = TTM_PL_FLAG_CACHED |
4439+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4440+ man->default_caching = TTM_PL_FLAG_WC;
4441+ break;
4442+ case DRM_PSB_MEM_RASTGEOM:
4443+ man->io_offset = 0x00000000;
4444+ man->io_size = 0x00000000;
4445+ man->io_addr = NULL;
4446+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4447+ TTM_MEMTYPE_FLAG_CMA;
4448+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
4449+ man->available_caching = TTM_PL_FLAG_CACHED |
4450+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4451+ man->default_caching = TTM_PL_FLAG_WC;
4452+ break;
4453+ case TTM_PL_VRAM:
4454+ man->io_addr = NULL;
4455+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4456+ TTM_MEMTYPE_FLAG_FIXED |
4457+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4458+#ifdef PSB_WORKING_HOST_MMU_ACCESS
4459+ man->io_offset = pg->gatt_start;
4460+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
4461+#else
4462+ man->io_offset = pg->stolen_base;
4463+ man->io_size = pg->vram_stolen_size;
4464+#endif
4465+ man->gpu_offset = pg->gatt_start;
4466+ man->available_caching = TTM_PL_FLAG_UNCACHED |
4467+ TTM_PL_FLAG_WC;
4468+ man->default_caching = TTM_PL_FLAG_WC;
4469+ break;
4470+ case TTM_PL_CI:
4471+ man->io_addr = NULL;
4472+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4473+ TTM_MEMTYPE_FLAG_FIXED |
4474+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4475+ man->io_offset = dev_priv->ci_region_start;
4476+ man->io_size = pg->ci_stolen_size;
4477+ man->gpu_offset = pg->gatt_start - pg->ci_stolen_size;
4478+ man->available_caching = TTM_PL_FLAG_UNCACHED;
4479+ man->default_caching = TTM_PL_FLAG_UNCACHED;
4480+ break;
4481+ case TTM_PL_RAR: /* Unmappable RAR memory */
4482+ man->io_offset = dev_priv->rar_region_start;
4483+ man->io_size = pg->rar_stolen_size;
4484+ man->io_addr = NULL;
4485+ man->flags = TTM_MEMTYPE_FLAG_FIXED;
4486+ man->available_caching = TTM_PL_FLAG_UNCACHED;
4487+ man->default_caching = TTM_PL_FLAG_UNCACHED;
4488+ man->gpu_offset = pg->gatt_start + pg->vram_stolen_size;
4489+ break;
4490+ case TTM_PL_TT: /* Mappable GATT memory */
4491+ man->io_offset = pg->gatt_start;
4492+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
4493+ man->io_addr = NULL;
4494+#ifdef PSB_WORKING_HOST_MMU_ACCESS
4495+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4496+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4497+#else
4498+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4499+ TTM_MEMTYPE_FLAG_CMA;
4500+#endif
4501+ man->available_caching = TTM_PL_FLAG_CACHED |
4502+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4503+ man->default_caching = TTM_PL_FLAG_WC;
4504+ man->gpu_offset = pg->gatt_start;
4505+ break;
4506+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
4507+ man->io_offset = pg->gatt_start;
4508+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
4509+ man->io_addr = NULL;
4510+#ifdef PSB_WORKING_HOST_MMU_ACCESS
4511+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4512+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4513+#else
4514+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4515+ TTM_MEMTYPE_FLAG_CMA;
4516+#endif
4517+ man->gpu_offset = pg->gatt_start;
4518+ man->available_caching = TTM_PL_FLAG_CACHED |
4519+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4520+ man->default_caching = TTM_PL_FLAG_WC;
4521+ break;
4522+ default:
4523+ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
4524+ return -EINVAL;
4525+ }
4526+ return 0;
4527+}
4528+
4529+static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
4530+{
4531+ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
4532+
4533+
4534+ switch (bo->mem.mem_type) {
4535+ case TTM_PL_VRAM:
4536+ if (bo->mem.proposed_flags & TTM_PL_FLAG_TT)
4537+ return cur_placement | TTM_PL_FLAG_TT;
4538+ else
4539+ return cur_placement | TTM_PL_FLAG_SYSTEM;
4540+ default:
4541+ return cur_placement | TTM_PL_FLAG_SYSTEM;
4542+ }
4543+}
4544+
4545+static int psb_invalidate_caches(struct ttm_bo_device *bdev,
4546+ uint32_t placement)
4547+{
4548+ return 0;
4549+}
4550+
4551+static int psb_move_blit(struct ttm_buffer_object *bo,
4552+ bool evict, bool no_wait,
4553+ struct ttm_mem_reg *new_mem)
4554+{
4555+ struct drm_psb_private *dev_priv =
4556+ container_of(bo->bdev, struct drm_psb_private, bdev);
4557+ struct drm_device *dev = dev_priv->dev;
4558+ struct ttm_mem_reg *old_mem = &bo->mem;
4559+ struct ttm_fence_object *fence;
4560+ int dir = 0;
4561+ int ret;
4562+
4563+ if ((old_mem->mem_type == new_mem->mem_type) &&
4564+ (new_mem->mm_node->start <
4565+ old_mem->mm_node->start + old_mem->mm_node->size)) {
4566+ dir = 1;
4567+ }
4568+
4569+ psb_emit_2d_copy_blit(dev,
4570+ old_mem->mm_node->start << PAGE_SHIFT,
4571+ new_mem->mm_node->start << PAGE_SHIFT,
4572+ new_mem->num_pages, dir);
4573+
4574+ ret = ttm_fence_object_create(&dev_priv->fdev, 0,
4575+ _PSB_FENCE_TYPE_EXE,
4576+ TTM_FENCE_FLAG_EMIT,
4577+ &fence);
4578+ if (unlikely(ret != 0)) {
4579+ psb_idle_2d(dev);
4580+ if (fence)
4581+ ttm_fence_object_unref(&fence);
4582+ }
4583+
4584+ ret = ttm_bo_move_accel_cleanup(bo, (void *) fence,
4585+ (void *) (unsigned long)
4586+ _PSB_FENCE_TYPE_EXE,
4587+ evict, no_wait, new_mem);
4588+ if (fence)
4589+ ttm_fence_object_unref(&fence);
4590+ return ret;
4591+}
4592+
4593+/*
4594+ * Flip destination ttm into GATT,
4595+ * then blit and subsequently move out again.
4596+ */
4597+
4598+static int psb_move_flip(struct ttm_buffer_object *bo,
4599+ bool evict, bool interruptible, bool no_wait,
4600+ struct ttm_mem_reg *new_mem)
4601+{
4602+ struct ttm_bo_device *bdev = bo->bdev;
4603+ struct ttm_mem_reg tmp_mem;
4604+ int ret;
4605+
4606+ tmp_mem = *new_mem;
4607+ tmp_mem.mm_node = NULL;
4608+ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
4609+
4610+ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
4611+ if (ret)
4612+ return ret;
4613+ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
4614+ if (ret)
4615+ goto out_cleanup;
4616+ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
4617+ if (ret)
4618+ goto out_cleanup;
4619+
4620+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
4621+out_cleanup:
4622+ if (tmp_mem.mm_node) {
4623+ spin_lock(&bdev->lru_lock);
4624+ drm_mm_put_block(tmp_mem.mm_node);
4625+ tmp_mem.mm_node = NULL;
4626+ spin_unlock(&bdev->lru_lock);
4627+ }
4628+ return ret;
4629+}
4630+
4631+static int psb_move(struct ttm_buffer_object *bo,
4632+ bool evict, bool interruptible,
4633+ bool no_wait, struct ttm_mem_reg *new_mem)
4634+{
4635+ struct ttm_mem_reg *old_mem = &bo->mem;
4636+
4637+ if ((old_mem->mem_type == TTM_PL_RAR) ||
4638+ (new_mem->mem_type == TTM_PL_RAR)) {
4639+ ttm_bo_free_old_node(bo);
4640+ *old_mem = *new_mem;
4641+ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
4642+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
4643+ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
4644+ int ret = psb_move_flip(bo, evict, interruptible,
4645+ no_wait, new_mem);
4646+ if (unlikely(ret != 0)) {
4647+ if (ret == -ERESTART)
4648+ return ret;
4649+ else
4650+ return ttm_bo_move_memcpy(bo, evict, no_wait,
4651+ new_mem);
4652+ }
4653+ } else {
4654+ if (psb_move_blit(bo, evict, no_wait, new_mem))
4655+ return ttm_bo_move_memcpy(bo, evict, no_wait,
4656+ new_mem);
4657+ }
4658+ return 0;
4659+}
4660+
4661+static int drm_psb_tbe_populate(struct ttm_backend *backend,
4662+ unsigned long num_pages,
4663+ struct page **pages,
4664+ struct page *dummy_read_page)
4665+{
4666+ struct drm_psb_ttm_backend *psb_be =
4667+ container_of(backend, struct drm_psb_ttm_backend, base);
4668+
4669+ psb_be->pages = pages;
4670+ return 0;
4671+}
4672+
4673+static int drm_psb_tbe_unbind(struct ttm_backend *backend)
4674+{
4675+ struct ttm_bo_device *bdev = backend->bdev;
4676+ struct drm_psb_private *dev_priv =
4677+ container_of(bdev, struct drm_psb_private, bdev);
4678+ struct drm_psb_ttm_backend *psb_be =
4679+ container_of(backend, struct drm_psb_ttm_backend, base);
4680+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
4681+ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
4682+
4683+ PSB_DEBUG_RENDER("MMU unbind.\n");
4684+
4685+ if (psb_be->mem_type == TTM_PL_TT) {
4686+ uint32_t gatt_p_offset =
4687+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
4688+
4689+ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
4690+ psb_be->num_pages,
4691+ psb_be->desired_tile_stride,
4692+ psb_be->hw_tile_stride);
4693+ }
4694+
4695+ psb_mmu_remove_pages(pd, psb_be->offset,
4696+ psb_be->num_pages,
4697+ psb_be->desired_tile_stride,
4698+ psb_be->hw_tile_stride);
4699+
4700+ return 0;
4701+}
4702+
4703+static int drm_psb_tbe_bind(struct ttm_backend *backend,
4704+ struct ttm_mem_reg *bo_mem)
4705+{
4706+ struct ttm_bo_device *bdev = backend->bdev;
4707+ struct drm_psb_private *dev_priv =
4708+ container_of(bdev, struct drm_psb_private, bdev);
4709+ struct drm_psb_ttm_backend *psb_be =
4710+ container_of(backend, struct drm_psb_ttm_backend, base);
4711+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
4712+ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
4713+ int type;
4714+ int ret = 0;
4715+
4716+ psb_be->mem_type = bo_mem->mem_type;
4717+ psb_be->num_pages = bo_mem->num_pages;
4718+ psb_be->desired_tile_stride = 0;
4719+ psb_be->hw_tile_stride = 0;
4720+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
4721+ man->gpu_offset;
4722+
4723+ type =
4724+ (bo_mem->
4725+ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
4726+
4727+ PSB_DEBUG_RENDER("MMU bind.\n");
4728+ if (psb_be->mem_type == TTM_PL_TT) {
4729+ uint32_t gatt_p_offset =
4730+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
4731+
4732+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
4733+ gatt_p_offset,
4734+ psb_be->num_pages,
4735+ psb_be->desired_tile_stride,
4736+ psb_be->hw_tile_stride, type);
4737+ }
4738+
4739+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
4740+ psb_be->offset, psb_be->num_pages,
4741+ psb_be->desired_tile_stride,
4742+ psb_be->hw_tile_stride, type);
4743+ if (ret)
4744+ goto out_err;
4745+
4746+ return 0;
4747+out_err:
4748+ drm_psb_tbe_unbind(backend);
4749+ return ret;
4750+
4751+}
4752+
4753+static void drm_psb_tbe_clear(struct ttm_backend *backend)
4754+{
4755+ struct drm_psb_ttm_backend *psb_be =
4756+ container_of(backend, struct drm_psb_ttm_backend, base);
4757+
4758+ psb_be->pages = NULL;
4759+ return;
4760+}
4761+
4762+static void drm_psb_tbe_destroy(struct ttm_backend *backend)
4763+{
4764+ struct drm_psb_ttm_backend *psb_be =
4765+ container_of(backend, struct drm_psb_ttm_backend, base);
4766+
4767+ if (backend)
4768+ kfree(psb_be);
4769+}
4770+
4771+static struct ttm_backend_func psb_ttm_backend = {
4772+ .populate = drm_psb_tbe_populate,
4773+ .clear = drm_psb_tbe_clear,
4774+ .bind = drm_psb_tbe_bind,
4775+ .unbind = drm_psb_tbe_unbind,
4776+ .destroy = drm_psb_tbe_destroy,
4777+};
4778+
4779+static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
4780+{
4781+ struct drm_psb_ttm_backend *psb_be;
4782+
4783+ psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
4784+ if (!psb_be)
4785+ return NULL;
4786+ psb_be->pages = NULL;
4787+ psb_be->base.func = &psb_ttm_backend;
4788+ psb_be->base.bdev = bdev;
4789+ return &psb_be->base;
4790+}
4791+
4792+/*
4793+ * Use this memory type priority if no eviction is needed.
4794+ */
4795+static uint32_t psb_mem_prios[] = {
4796+ TTM_PL_CI,
4797+ TTM_PL_RAR,
4798+ TTM_PL_VRAM,
4799+ TTM_PL_TT,
4800+ DRM_PSB_MEM_KERNEL,
4801+ DRM_PSB_MEM_MMU,
4802+ DRM_PSB_MEM_RASTGEOM,
4803+ DRM_PSB_MEM_PDS,
4804+ DRM_PSB_MEM_APER,
4805+ TTM_PL_SYSTEM
4806+};
4807+
4808+/*
4809+ * Use this memory type priority if need to evict.
4810+ */
4811+static uint32_t psb_busy_prios[] = {
4812+ TTM_PL_TT,
4813+ TTM_PL_VRAM,
4814+ TTM_PL_CI,
4815+ TTM_PL_RAR,
4816+ DRM_PSB_MEM_KERNEL,
4817+ DRM_PSB_MEM_MMU,
4818+ DRM_PSB_MEM_RASTGEOM,
4819+ DRM_PSB_MEM_PDS,
4820+ DRM_PSB_MEM_APER,
4821+ TTM_PL_SYSTEM
4822+};
4823+
4824+
4825+struct ttm_bo_driver psb_ttm_bo_driver = {
4826+ .mem_type_prio = psb_mem_prios,
4827+ .mem_busy_prio = psb_busy_prios,
4828+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
4829+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
4830+ .create_ttm_backend_entry = &drm_psb_tbe_init,
4831+ .invalidate_caches = &psb_invalidate_caches,
4832+ .init_mem_type = &psb_init_mem_type,
4833+ .evict_flags = &psb_evict_mask,
4834+ .move = &psb_move,
4835+ .verify_access = &psb_verify_access,
4836+ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
4837+ .sync_obj_wait = &ttm_fence_sync_obj_wait,
4838+ .sync_obj_flush = &ttm_fence_sync_obj_flush,
4839+ .sync_obj_unref = &ttm_fence_sync_obj_unref,
4840+ .sync_obj_ref = &ttm_fence_sync_obj_ref
4841+};
4842diff --git a/drivers/gpu/drm/psb/psb_dpst.c b/drivers/gpu/drm/psb/psb_dpst.c
4843new file mode 100644
4844index 0000000..435e53b
4845--- /dev/null
4846+++ b/drivers/gpu/drm/psb/psb_dpst.c
4847@@ -0,0 +1,208 @@
4848+/*
4849+ * Copyright © 2009 Intel Corporation
4850+ *
4851+ * Permission is hereby granted, free of charge, to any person obtaining a
4852+ * copy of this software and associated documentation files (the "Software"),
4853+ * to deal in the Software without restriction, including without limitation
4854+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4855+ * and/or sell copies of the Software, and to permit persons to whom the
4856+ * Software is furnished to do so, subject to the following conditions:
4857+ *
4858+ * The above copyright notice and this permission notice (including the next
4859+ * paragraph) shall be included in all copies or substantial portions of the
4860+ * Software.
4861+ *
4862+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4863+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4864+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
4865+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4866+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4867+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4868+ * IN THE SOFTWARE.
4869+ *
4870+ * Authors:
4871+ * James C. Gualario <james.c.gualario@intel.com>
4872+ *
4873+ */
4874+#include "psb_umevents.h"
4875+#include "psb_dpst.h"
4876+/**
4877+ * inform the kernel of the work to be performed and related function.
4878+ *
4879+ */
4880+DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq);
4881+/**
4882+ * psb_dpst_notify_change_um - notify user mode of hotplug changes
4883+ *
4884+ * @name: name of event to notify user mode of change to
4885+ * @state: dpst state struct to get workqueue from
4886+ *
4887+ */
4888+int psb_dpst_notify_change_um(struct umevent_obj *event,
4889+ struct dpst_state *state)
4890+{
4891+ state->dpst_change_wq_data.dev_name_arry_rw_status
4892+ [state->dpst_change_wq_data.dev_name_write] =
4893+ DRM_DPST_READY_TO_READ;
4894+ state->dpst_change_wq_data.dev_umevent_arry
4895+ [state->dpst_change_wq_data.dev_name_write] =
4896+ event;
4897+ if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1)
4898+ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
4899+ state->dpst_change_wq_data.dev_name_write++;
4900+ if (state->dpst_change_wq_data.dev_name_write ==
4901+ state->dpst_change_wq_data.dev_name_read) {
4902+ state->dpst_change_wq_data.dev_name_write--;
4903+ return IRQ_NONE;
4904+ }
4905+ if (state->dpst_change_wq_data.dev_name_write >
4906+ DRM_DPST_RING_DEPTH_MAX) {
4907+ state->dpst_change_wq_data.dev_name_write = 0;
4908+ state->dpst_change_wq_data.dev_name_write_wrap = 1;
4909+ }
4910+ state->dpst_change_wq_data.hotplug_dev_list = state->list;
4911+ queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work));
4912+ return IRQ_HANDLED;
4913+}
4914+EXPORT_SYMBOL(psb_dpst_notify_change_um);
4915+/**
4916+ *
4917+ * psb_dpst_create_and_notify_um - create and notify user mode of new dev
4918+ *
4919+ * @name: name to give for new event / device
4920+ * @state: dpst state instaces to associate event with
4921+ *
4922+ */
4923+struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
4924+ struct dpst_state *state)
4925+{
4926+ return psb_create_umevent_obj(name, state->list);
4927+
4928+}
4929+EXPORT_SYMBOL(psb_dpst_create_and_notify_um);
4930+/**
4931+ * psb_dpst_device_pool_create_and_init - make new hotplug device pool
4932+ *
4933+ * @parent_kobj - parent kobject to associate dpst kset with
4934+ * @state - dpst state instance to associate list with
4935+ *
4936+ */
4937+struct umevent_list *psb_dpst_device_pool_create_and_init(
4938+ struct kobject *parent_kobj,
4939+ struct dpst_state *state)
4940+{
4941+
4942+ struct umevent_list *new_hotplug_dev_list = NULL;
4943+ new_hotplug_dev_list = psb_umevent_create_list();
4944+ if (new_hotplug_dev_list)
4945+ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
4946+ "psb_dpst");
4947+
4948+ state->dpst_wq = create_singlethread_workqueue("dpst-wq");
4949+
4950+ if (!state->dpst_wq)
4951+ return NULL;
4952+
4953+ INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq);
4954+
4955+ state->dpst_change_wq_data.dev_name_read = 0;
4956+ state->dpst_change_wq_data.dev_name_write = 0;
4957+ state->dpst_change_wq_data.dev_name_write_wrap = 0;
4958+ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
4959+
4960+ memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]),
4961+ 0, sizeof(int)*DRM_DPST_RING_DEPTH);
4962+
4963+ return new_hotplug_dev_list;
4964+}
4965+EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init);
4966+/**
4967+ * psb_dpst_init - init dpst subsystem
4968+ * @parent_kobj - parent kobject to associate dpst state with
4969+ *
4970+ */
4971+struct dpst_state *psb_dpst_init(struct kobject *parent_kobj)
4972+{
4973+ struct dpst_state *state;
4974+ state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL);
4975+ printk(KERN_ALERT "after kzalloc\n");
4976+ state->list = NULL;
4977+ state->list = psb_dpst_device_pool_create_and_init(
4978+ parent_kobj,
4979+ state);
4980+ return state;
4981+}
4982+EXPORT_SYMBOL(psb_dpst_init);
4983+/**
4984+ * psb_dpst_device_pool_destroy - destroy all dpst related resources
4985+ *
4986+ * @state: dpst state instance to destroy
4987+ *
4988+ */
4989+void psb_dpst_device_pool_destroy(struct dpst_state *state)
4990+{
4991+ flush_workqueue(state->dpst_wq);
4992+ destroy_workqueue(state->dpst_wq);
4993+ psb_umevent_cleanup(state->list);
4994+ kfree(state);
4995+}
4996+EXPORT_SYMBOL(psb_dpst_device_pool_destroy);
4997+/**
4998+ * psb_dpst_dev_change_wq - change workqueue implementation
4999+ *
5000+ * @work: work struct to use for kernel scheduling
5001+ *
5002+ */
5003+void psb_dpst_dev_change_wq(struct work_struct *work)
5004+{
5005+ struct dpst_disp_workqueue_data *wq_data;
5006+ wq_data = to_dpst_disp_workqueue_data(work);
5007+ if (wq_data->dev_name_write_wrap == 1) {
5008+ wq_data->dev_name_read_write_wrap_ack = 1;
5009+ wq_data->dev_name_write_wrap = 0;
5010+ while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH_MAX) {
5011+ if (wq_data->dev_name_arry_rw_status
5012+ [wq_data->dev_name_read] ==
5013+ DRM_DPST_READY_TO_READ) {
5014+ wq_data->dev_name_arry_rw_status
5015+ [wq_data->dev_name_read] =
5016+ DRM_DPST_READ_COMPLETE;
5017+ psb_umevent_notify_change_gfxsock
5018+ (wq_data->dev_umevent_arry
5019+ [wq_data->dev_name_read]);
5020+ }
5021+ wq_data->dev_name_read++;
5022+ }
5023+ wq_data->dev_name_read = 0;
5024+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
5025+ if (wq_data->dev_name_arry_rw_status
5026+ [wq_data->dev_name_read] ==
5027+ DRM_DPST_READY_TO_READ) {
5028+ wq_data->dev_name_arry_rw_status
5029+ [wq_data->dev_name_read] =
5030+ DRM_DPST_READ_COMPLETE;
5031+ psb_umevent_notify_change_gfxsock
5032+ (wq_data->dev_umevent_arry
5033+ [wq_data->dev_name_read]);
5034+ }
5035+ wq_data->dev_name_read++;
5036+ }
5037+ } else {
5038+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
5039+ if (wq_data->dev_name_arry_rw_status
5040+ [wq_data->dev_name_read] ==
5041+ DRM_DPST_READY_TO_READ) {
5042+ wq_data->dev_name_arry_rw_status
5043+ [wq_data->dev_name_read] =
5044+ DRM_DPST_READ_COMPLETE;
5045+ psb_umevent_notify_change_gfxsock
5046+ (wq_data->dev_umevent_arry
5047+ [wq_data->dev_name_read]);
5048+ }
5049+ wq_data->dev_name_read++;
5050+ }
5051+ }
5052+ if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX)
5053+ wq_data->dev_name_read = 0;
5054+}
5055+EXPORT_SYMBOL(psb_dpst_dev_change_wq);
5056diff --git a/drivers/gpu/drm/psb/psb_dpst.h b/drivers/gpu/drm/psb/psb_dpst.h
5057new file mode 100644
5058index 0000000..43d3128
5059--- /dev/null
5060+++ b/drivers/gpu/drm/psb/psb_dpst.h
5061@@ -0,0 +1,90 @@
5062+/*
5063+ * Copyright © 2009 Intel Corporation
5064+ *
5065+ * Permission is hereby granted, free of charge, to any person obtaining a
5066+ * copy of this software and associated documentation files (the "Software"),
5067+ * to deal in the Software without restriction, including without limitation
5068+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5069+ * and/or sell copies of the Software, and to permit persons to whom the
5070+ * Software is furnished to do so, subject to the following conditions:
5071+ *
5072+ * The above copyright notice and this permission notice (including the next
5073+ * paragraph) shall be included in all copies or substantial portions of the
5074+ * Software.
5075+ *
5076+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5077+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5078+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
5079+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5080+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
5081+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
5082+ * IN THE SOFTWARE.
5083+ *
5084+ * Authors:
5085+ * James C. Gualario <james.c.gualario@intel.com>
5086+ *
5087+ */
5088+#ifndef _PSB_DPST_H_
5089+#define _PSB_DPST_H_
5090+/**
5091+ * required includes
5092+ *
5093+ */
5094+#include "psb_umevents.h"
5095+/**
5096+ * dpst specific defines
5097+ *
5098+ */
5099+#define DRM_DPST_RING_DEPTH 256
5100+#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1)
5101+#define DRM_DPST_READY_TO_READ 1
5102+#define DRM_DPST_READ_COMPLETE 2
5103+/**
5104+ * dpst workqueue data struct.
5105+ */
5106+struct dpst_disp_workqueue_data {
5107+ struct work_struct work;
5108+ const char *dev_name;
5109+ int dev_name_write;
5110+ int dev_name_read;
5111+ int dev_name_write_wrap;
5112+ int dev_name_read_write_wrap_ack;
5113+ struct umevent_obj *dev_umevent_arry[DRM_DPST_RING_DEPTH];
5114+ int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH];
5115+ struct umevent_list *hotplug_dev_list;
5116+};
5117+/**
5118+ * dpst state structure
5119+ *
5120+ */
5121+struct dpst_state {
5122+ struct workqueue_struct *dpst_wq;
5123+ struct dpst_disp_workqueue_data dpst_change_wq_data;
5124+ struct umevent_list *list;
5125+};
5126+/**
5127+ * main interface function prototytpes for dpst support.
5128+ *
5129+ */
5130+extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj);
5131+extern int psb_dpst_notify_change_um(struct umevent_obj *event,
5132+ struct dpst_state *state);
5133+extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
5134+ struct dpst_state *state);
5135+extern struct umevent_list *psb_dpst_device_pool_create_and_init(
5136+ struct kobject *parent_kobj,
5137+ struct dpst_state *state);
5138+extern void psb_dpst_device_pool_destroy(struct dpst_state *state);
5139+/**
5140+ * to go back and forth between work struct and workqueue data
5141+ *
5142+ */
5143+#define to_dpst_disp_workqueue_data(x) \
5144+ container_of(x, struct dpst_disp_workqueue_data, work)
5145+
5146+/**
5147+ * function prototypes for workqueue implementation
5148+ *
5149+ */
5150+extern void psb_dpst_dev_change_wq(struct work_struct *work);
5151+#endif
5152diff --git a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h
5153new file mode 100644
5154index 0000000..596a9f0
5155--- /dev/null
5156+++ b/drivers/gpu/drm/psb/psb_drm.h
5157@@ -0,0 +1,716 @@
5158+/**************************************************************************
5159+ * Copyright (c) 2007, Intel Corporation.
5160+ * All Rights Reserved.
5161+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
5162+ * All Rights Reserved.
5163+ *
5164+ * This program is free software; you can redistribute it and/or modify it
5165+ * under the terms and conditions of the GNU General Public License,
5166+ * version 2, as published by the Free Software Foundation.
5167+ *
5168+ * This program is distributed in the hope it will be useful, but WITHOUT
5169+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5170+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5171+ * more details.
5172+ *
5173+ * You should have received a copy of the GNU General Public License along with
5174+ * this program; if not, write to the Free Software Foundation, Inc.,
5175+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5176+ *
5177+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5178+ * develop this driver.
5179+ *
5180+ **************************************************************************/
5181+/*
5182+ */
5183+
5184+#ifndef _PSB_DRM_H_
5185+#define _PSB_DRM_H_
5186+
5187+#if defined(__linux__) && !defined(__KERNEL__)
5188+#include<stdint.h>
5189+#include "drm_mode.h"
5190+#endif
5191+
5192+#include "ttm/ttm_fence_user.h"
5193+#include "ttm/ttm_placement_user.h"
5194+
5195+/*
5196+ * Menlow/MRST graphics driver package version
5197+ * a.b.c.xxxx
5198+ * a - Product Family: 5 - Linux
5199+ * b - Major Release Version: 0 - non-Gallium (Unbuntu);
5200+ * 1 - Gallium (Moblin2)
5201+ * c - Hotfix Release
5202+ * xxxx - Graphics internal build #
5203+ */
5204+#define PSB_PACKAGE_VERSION "5.1.0.32L.0124"
5205+
5206+#define DRM_PSB_SAREA_MAJOR 0
5207+#define DRM_PSB_SAREA_MINOR 2
5208+#define PSB_FIXED_SHIFT 16
5209+
5210+#define DRM_PSB_FIRST_TA_USE_REG 3
5211+#define DRM_PSB_NUM_TA_USE_REG 5
5212+#define DRM_PSB_FIRST_RASTER_USE_REG 8
5213+#define DRM_PSB_NUM_RASTER_USE_REG 7
5214+
5215+#define PSB_NUM_PIPE 2
5216+
5217+/*
5218+ * Public memory types.
5219+ */
5220+
5221+#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
5222+#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
5223+#define DRM_PSB_MEM_PDS TTM_PL_PRIV2
5224+#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2
5225+#define DRM_PSB_MEM_APER TTM_PL_PRIV3
5226+#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3
5227+#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4
5228+#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4
5229+#define PSB_MEM_RASTGEOM_START 0x30000000
5230+
5231+typedef int32_t psb_fixed;
5232+typedef uint32_t psb_ufixed;
5233+
5234+static inline int32_t psb_int_to_fixed(int a)
5235+{
5236+ return a * (1 << PSB_FIXED_SHIFT);
5237+}
5238+
5239+static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
5240+{
5241+ return a << PSB_FIXED_SHIFT;
5242+}
5243+
5244+/*Status of the command sent to the gfx device.*/
5245+typedef enum {
5246+ DRM_CMD_SUCCESS,
5247+ DRM_CMD_FAILED,
5248+ DRM_CMD_HANG
5249+} drm_cmd_status_t;
5250+
5251+struct drm_psb_scanout {
5252+ uint32_t buffer_id; /* DRM buffer object ID */
5253+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
5254+ uint32_t stride; /* Buffer stride in bytes */
5255+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
5256+ uint32_t width; /* Buffer width in pixels */
5257+ uint32_t height; /* Buffer height in lines */
5258+ int32_t transform[3][3]; /* Buffer composite transform */
5259+ /* (scaling, rot, reflect) */
5260+};
5261+
5262+#define DRM_PSB_SAREA_OWNERS 16
5263+#define DRM_PSB_SAREA_OWNER_2D 0
5264+#define DRM_PSB_SAREA_OWNER_3D 1
5265+
5266+#define DRM_PSB_SAREA_SCANOUTS 3
5267+
5268+struct drm_psb_sarea {
5269+ /* Track changes of this data structure */
5270+
5271+ uint32_t major;
5272+ uint32_t minor;
5273+
5274+ /* Last context to touch part of hw */
5275+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
5276+
5277+ /* Definition of front- and rotated buffers */
5278+ uint32_t num_scanouts;
5279+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
5280+
5281+ int planeA_x;
5282+ int planeA_y;
5283+ int planeA_w;
5284+ int planeA_h;
5285+ int planeB_x;
5286+ int planeB_y;
5287+ int planeB_w;
5288+ int planeB_h;
5289+ /* Number of active scanouts */
5290+ uint32_t num_active_scanouts;
5291+};
5292+
5293+#define PSB_RELOC_MAGIC 0x67676767
5294+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
5295+#define PSB_RELOC_SHIFT_SHIFT 0
5296+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
5297+#define PSB_RELOC_ALSHIFT_SHIFT 16
5298+
5299+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
5300+ * buffer
5301+ */
5302+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
5303+ * buffer, relative to 2D
5304+ * base address
5305+ */
5306+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
5307+ * relative to PDS base address
5308+ */
5309+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
5310+ * buffer (for tiling)
5311+ */
5312+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
5313+ * relative to base reg
5314+ */
5315+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
5316+
5317+struct drm_psb_reloc {
5318+ uint32_t reloc_op;
5319+ uint32_t where; /* offset in destination buffer */
5320+ uint32_t buffer; /* Buffer reloc applies to */
5321+ uint32_t mask; /* Destination format: */
5322+ uint32_t shift; /* Destination format: */
5323+ uint32_t pre_add; /* Destination format: */
5324+ uint32_t background; /* Destination add */
5325+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
5326+ uint32_t arg0; /* Reloc-op dependant */
5327+ uint32_t arg1;
5328+};
5329+
5330+
5331+#define PSB_GPU_ACCESS_READ (1ULL << 32)
5332+#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
5333+#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
5334+
5335+#define PSB_BO_FLAG_TA (1ULL << 48)
5336+#define PSB_BO_FLAG_SCENE (1ULL << 49)
5337+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
5338+#define PSB_BO_FLAG_USSE (1ULL << 51)
5339+#define PSB_BO_FLAG_COMMAND (1ULL << 52)
5340+
5341+#define PSB_ENGINE_2D 0
5342+#define PSB_ENGINE_VIDEO 1
5343+#define PSB_ENGINE_RASTERIZER 2
5344+#define PSB_ENGINE_TA 3
5345+#define PSB_ENGINE_HPRAST 4
5346+#define LNC_ENGINE_ENCODE 5
5347+
5348+/*
5349+ * For this fence class we have a couple of
5350+ * fence types.
5351+ */
5352+
5353+#define _PSB_FENCE_EXE_SHIFT 0
5354+#define _PSB_FENCE_TA_DONE_SHIFT 1
5355+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
5356+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
5357+#define _PSB_FENCE_FEEDBACK_SHIFT 4
5358+
5359+#define _PSB_ENGINE_TA_FENCE_TYPES 5
5360+#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
5361+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
5362+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
5363+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
5364+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
5365+
5366+#define PSB_ENGINE_HPRAST 4
5367+#define PSB_NUM_ENGINES 6
5368+
5369+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
5370+#define PSB_TA_FLAG_LASTPASS (1 << 1)
5371+
5372+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
5373+
5374+struct drm_psb_extension_rep {
5375+ int32_t exists;
5376+ uint32_t driver_ioctl_offset;
5377+ uint32_t sarea_offset;
5378+ uint32_t major;
5379+ uint32_t minor;
5380+ uint32_t pl;
5381+};
5382+
5383+#define DRM_PSB_EXT_NAME_LEN 128
5384+
5385+union drm_psb_extension_arg {
5386+ char extension[DRM_PSB_EXT_NAME_LEN];
5387+ struct drm_psb_extension_rep rep;
5388+};
5389+
5390+struct psb_validate_req {
5391+ uint64_t set_flags;
5392+ uint64_t clear_flags;
5393+ uint64_t next;
5394+ uint64_t presumed_gpu_offset;
5395+ uint32_t buffer_handle;
5396+ uint32_t presumed_flags;
5397+ uint32_t group;
5398+ uint32_t pad64;
5399+};
5400+
5401+struct psb_validate_rep {
5402+ uint64_t gpu_offset;
5403+ uint32_t placement;
5404+ uint32_t fence_type_mask;
5405+};
5406+
5407+#define PSB_USE_PRESUMED (1 << 0)
5408+
5409+struct psb_validate_arg {
5410+ int handled;
5411+ int ret;
5412+ union {
5413+ struct psb_validate_req req;
5414+ struct psb_validate_rep rep;
5415+ } d;
5416+};
5417+
5418+struct drm_psb_scene {
5419+ int handle_valid;
5420+ uint32_t handle;
5421+ uint32_t w; /* also contains msaa info */
5422+ uint32_t h;
5423+ uint32_t num_buffers;
5424+};
5425+
5426+#define DRM_PSB_FENCE_NO_USER (1 << 0)
5427+
5428+struct psb_ttm_fence_rep {
5429+ uint32_t handle;
5430+ uint32_t fence_class;
5431+ uint32_t fence_type;
5432+ uint32_t signaled_types;
5433+ uint32_t error;
5434+};
5435+
5436+typedef struct drm_psb_cmdbuf_arg {
5437+ uint64_t buffer_list; /* List of buffers to validate */
5438+ uint64_t clip_rects; /* See i915 counterpart */
5439+ uint64_t scene_arg;
5440+ uint64_t fence_arg;
5441+
5442+ uint32_t ta_flags;
5443+
5444+ uint32_t ta_handle; /* TA reg-value pairs */
5445+ uint32_t ta_offset;
5446+ uint32_t ta_size;
5447+
5448+ uint32_t oom_handle;
5449+ uint32_t oom_offset;
5450+ uint32_t oom_size;
5451+
5452+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
5453+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
5454+ uint32_t cmdbuf_size;
5455+
5456+ uint32_t reloc_handle; /* Reloc buffer object */
5457+ uint32_t reloc_offset;
5458+ uint32_t num_relocs;
5459+
5460+ int32_t damage; /* Damage front buffer with cliprects */
5461+ /* Not implemented yet */
5462+ uint32_t fence_flags;
5463+ uint32_t engine;
5464+
5465+ /*
5466+ * Feedback;
5467+ */
5468+
5469+ uint32_t feedback_ops;
5470+ uint32_t feedback_handle;
5471+ uint32_t feedback_offset;
5472+ uint32_t feedback_breakpoints;
5473+ uint32_t feedback_size;
5474+}drm_psb_cmdbuf_arg_t;
5475+
5476+typedef struct drm_psb_pageflip_arg {
5477+ uint32_t flip_offset;
5478+ uint32_t stride;
5479+}drm_psb_pageflip_arg_t;
5480+
5481+typedef enum {
5482+ LNC_VIDEO_FRAME_SKIP,
5483+ LNC_VIDEO_GETPARAM_RAR_REGION_SIZE,
5484+ LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET
5485+} lnc_getparam_key_t;
5486+
5487+struct drm_lnc_video_getparam_arg {
5488+ lnc_getparam_key_t key;
5489+ uint64_t arg; /* argument pointer */
5490+ uint64_t value; /* feed back pointer */
5491+};
5492+
5493+struct drm_psb_xhw_init_arg {
5494+ uint32_t operation;
5495+ uint32_t buffer_handle;
5496+};
5497+
5498+/*
5499+ * Feedback components:
5500+ */
5501+
5502+/*
5503+ * Vistest component. The number of these in the feedback buffer
5504+ * equals the number of vistest breakpoints + 1.
5505+ * This is currently the only feedback component.
5506+ */
5507+
5508+struct drm_psb_vistest {
5509+ uint32_t vt[8];
5510+};
5511+
5512+struct drm_psb_sizes_arg {
5513+ uint32_t ta_mem_size;
5514+ uint32_t mmu_size;
5515+ uint32_t pds_size;
5516+ uint32_t rastgeom_size;
5517+ uint32_t tt_size;
5518+ uint32_t vram_size;
5519+};
5520+
5521+struct mrst_timing_info {
5522+ uint16_t pixel_clock;
5523+ uint8_t hactive_lo;
5524+ uint8_t hblank_lo;
5525+ uint8_t hblank_hi:4;
5526+ uint8_t hactive_hi:4;
5527+ uint8_t vactive_lo;
5528+ uint8_t vblank_lo;
5529+ uint8_t vblank_hi:4;
5530+ uint8_t vactive_hi:4;
5531+ uint8_t hsync_offset_lo;
5532+ uint8_t hsync_pulse_width_lo;
5533+ uint8_t vsync_pulse_width_lo:4;
5534+ uint8_t vsync_offset_lo:4;
5535+ uint8_t vsync_pulse_width_hi:2;
5536+ uint8_t vsync_offset_hi:2;
5537+ uint8_t hsync_pulse_width_hi:2;
5538+ uint8_t hsync_offset_hi:2;
5539+ uint8_t width_mm_lo;
5540+ uint8_t height_mm_lo;
5541+ uint8_t height_mm_hi:4;
5542+ uint8_t width_mm_hi:4;
5543+ uint8_t hborder;
5544+ uint8_t vborder;
5545+ uint8_t unknown0:1;
5546+ uint8_t hsync_positive:1;
5547+ uint8_t vsync_positive:1;
5548+ uint8_t separate_sync:2;
5549+ uint8_t stereo:1;
5550+ uint8_t unknown6:1;
5551+ uint8_t interlaced:1;
5552+} __attribute__((packed));
5553+
5554+struct mrst_panel_descriptor_v1{
5555+ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
5556+ /* 0x61190 if MIPI */
5557+ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
5558+ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
5559+ uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
5560+ /* Register 0x61210 */
5561+ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
5562+ uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
5563+ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
5564+ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
5565+ uint16_t Panel_MIPI_Display_Descriptor;
5566+ /*16 bits, Defined as follows: */
5567+ /* if MIPI, 0x0000 if LVDS */
5568+ /* Bit 0, Type, 2 bits, */
5569+ /* 0: Type-1, */
5570+ /* 1: Type-2, */
5571+ /* 2: Type-3, */
5572+ /* 3: Type-4 */
5573+ /* Bit 2, Pixel Format, 4 bits */
5574+ /* Bit0: 16bpp (not supported in LNC), */
5575+ /* Bit1: 18bpp loosely packed, */
5576+ /* Bit2: 18bpp packed, */
5577+ /* Bit3: 24bpp */
5578+ /* Bit 6, Reserved, 2 bits, 00b */
5579+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
5580+ /* Bit 14, Reserved, 2 bits, 00b */
5581+} __attribute__ ((packed));
5582+
5583+struct mrst_panel_descriptor_v2{
5584+ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
5585+ /* 0x61190 if MIPI */
5586+ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
5587+ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
5588+ uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
5589+ /* Register 0x61210 */
5590+ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
5591+ uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
5592+ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
5593+ uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */
5594+ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
5595+ uint16_t Panel_MIPI_Display_Descriptor;
5596+ /*16 bits, Defined as follows: */
5597+ /* if MIPI, 0x0000 if LVDS */
5598+ /* Bit 0, Type, 2 bits, */
5599+ /* 0: Type-1, */
5600+ /* 1: Type-2, */
5601+ /* 2: Type-3, */
5602+ /* 3: Type-4 */
5603+ /* Bit 2, Pixel Format, 4 bits */
5604+ /* Bit0: 16bpp (not supported in LNC), */
5605+ /* Bit1: 18bpp loosely packed, */
5606+ /* Bit2: 18bpp packed, */
5607+ /* Bit3: 24bpp */
5608+ /* Bit 6, Reserved, 2 bits, 00b */
5609+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
5610+ /* Bit 14, Reserved, 2 bits, 00b */
5611+} __attribute__ ((packed));
5612+
5613+union mrst_panel_rx{
5614+ struct{
5615+ uint16_t NumberOfLanes :2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
5616+ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
5617+ uint16_t MaxLaneFreq :3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
5618+ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
5619+ uint16_t SupportedVideoTransferMode :2; /*0: Non-burst only */
5620+ /* 1: Burst and non-burst */
5621+ /* 2/3: Reserved */
5622+ uint16_t HSClkBehavior :1; /*0: Continuous, 1: Non-continuous*/
5623+ uint16_t DuoDisplaySupport :1; /*1 bit,0: No, 1: Yes*/
5624+ uint16_t ECC_ChecksumCapabilities :1;/*1 bit,0: No, 1: Yes*/
5625+ uint16_t BidirectionalCommunication :1;/*1 bit,0: No, 1: Yes */
5626+ uint16_t Rsvd :5;/*5 bits,00000b */
5627+ }panelrx;
5628+ uint16_t panel_receiver;
5629+} __attribute__ ((packed));
5630+
5631+struct gct_ioctl_arg{
5632+ uint8_t bpi; /* boot panel index, number of panel used during boot */
5633+ uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
5634+ struct mrst_timing_info DTD; /* timing info for the selected panel */
5635+ uint32_t Panel_Port_Control;
5636+ uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/
5637+ uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
5638+ uint32_t PP_Cycle_Delay;
5639+ uint16_t Panel_Backlight_Inverter_Descriptor;
5640+} __attribute__ ((packed));
5641+
5642+struct mrst_vbt{
5643+ char Signature[4]; /*4 bytes,"$GCT" */
5644+ uint8_t Revision; /*1 byte */
5645+ uint8_t Size; /*1 byte */
5646+ uint8_t Checksum; /*1 byte,Calculated*/
5647+ void *mrst_gct;
5648+} __attribute__ ((packed));
5649+
5650+struct mrst_gct_v1{ /* expect this table to change per customer request*/
5651+ union{ /*8 bits,Defined as follows: */
5652+ struct{
5653+ uint8_t PanelType :4; /*4 bits, Bit field for panels*/
5654+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
5655+ uint8_t BootPanelIndex :2;/*2 bits,Specifies which of the*/
5656+ /* 4 panels to use by default*/
5657+ uint8_t BootMIPI_DSI_RxIndex :2;/*Specifies which of*/
5658+ /* the 4 MIPI DSI receivers to use*/
5659+ }PD;
5660+ uint8_t PanelDescriptor;
5661+ };
5662+ struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
5663+ union mrst_panel_rx panelrx[4]; /* panel receivers*/
5664+} __attribute__ ((packed));
5665+
5666+struct mrst_gct_v2{ /* expect this table to change per customer request*/
5667+ union{ /*8 bits,Defined as follows: */
5668+ struct{
5669+ uint8_t PanelType :4; /*4 bits, Bit field for panels*/
5670+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
5671+ uint8_t BootPanelIndex :2;/*2 bits,Specifies which of the*/
5672+ /* 4 panels to use by default*/
5673+ uint8_t BootMIPI_DSI_RxIndex :2;/*Specifies which of*/
5674+ /* the 4 MIPI DSI receivers to use*/
5675+ }PD;
5676+ uint8_t PanelDescriptor;
5677+ };
5678+ struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
5679+ union mrst_panel_rx panelrx[4]; /* panel receivers*/
5680+} __attribute__ ((packed));
5681+
5682+#define PSB_DC_CRTC_SAVE 0x01
5683+#define PSB_DC_CRTC_RESTORE 0x02
5684+#define PSB_DC_OUTPUT_SAVE 0x04
5685+#define PSB_DC_OUTPUT_RESTORE 0x08
5686+#define PSB_DC_CRTC_MASK 0x03
5687+#define PSB_DC_OUTPUT_MASK 0x0C
5688+
5689+struct drm_psb_dc_state_arg {
5690+ uint32_t flags;
5691+ uint32_t obj_id;
5692+};
5693+
5694+struct drm_psb_mode_operation_arg {
5695+ uint32_t obj_id;
5696+ uint16_t operation;
5697+ struct drm_mode_modeinfo mode;
5698+ void * data;
5699+};
5700+
5701+struct drm_psb_stolen_memory_arg {
5702+ uint32_t base;
5703+ uint32_t size;
5704+};
5705+
5706+/*Display Register Bits*/
5707+#define REGRWBITS_PFIT_CONTROLS (1 << 0)
5708+#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
5709+#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
5710+#define REGRWBITS_PIPEASRC (1 << 3)
5711+#define REGRWBITS_PIPEBSRC (1 << 4)
5712+#define REGRWBITS_VTOTAL_A (1 << 5)
5713+#define REGRWBITS_VTOTAL_B (1 << 6)
5714+
5715+/*Overlay Register Bits*/
5716+#define OV_REGRWBITS_OVADD (1 << 0)
5717+#define OV_REGRWBITS_OGAM_ALL (1 << 1)
5718+
5719+struct drm_psb_register_rw_arg {
5720+ uint32_t b_force_hw_on;
5721+
5722+ uint32_t display_read_mask;
5723+ uint32_t display_write_mask;
5724+
5725+ struct {
5726+ uint32_t pfit_controls;
5727+ uint32_t pfit_autoscale_ratios;
5728+ uint32_t pfit_programmed_scale_ratios;
5729+ uint32_t pipeasrc;
5730+ uint32_t pipebsrc;
5731+ uint32_t vtotal_a;
5732+ uint32_t vtotal_b;
5733+ } display;
5734+
5735+ uint32_t overlay_read_mask;
5736+ uint32_t overlay_write_mask;
5737+
5738+ struct {
5739+ uint32_t OVADD;
5740+ uint32_t OGAMC0;
5741+ uint32_t OGAMC1;
5742+ uint32_t OGAMC2;
5743+ uint32_t OGAMC3;
5744+ uint32_t OGAMC4;
5745+ uint32_t OGAMC5;
5746+ } overlay;
5747+};
5748+
5749+#define PSB_HW_COOKIE_SIZE 16
5750+#define PSB_HW_FEEDBACK_SIZE 8
5751+#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2)
5752+
5753+struct drm_psb_xhw_arg {
5754+ uint32_t op;
5755+ int ret;
5756+ uint32_t irq_op;
5757+ uint32_t issue_irq;
5758+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
5759+ union {
5760+ struct {
5761+ uint32_t w; /* also contains msaa info */
5762+ uint32_t h;
5763+ uint32_t size;
5764+ uint32_t clear_p_start;
5765+ uint32_t clear_num_pages;
5766+ } si;
5767+ struct {
5768+ uint32_t fire_flags;
5769+ uint32_t hw_context;
5770+ uint32_t offset;
5771+ uint32_t engine;
5772+ uint32_t flags;
5773+ uint32_t rca;
5774+ uint32_t num_oom_cmds;
5775+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
5776+ } sb;
5777+ struct {
5778+ uint32_t pages;
5779+ uint32_t size;
5780+ uint32_t ta_min_size;
5781+ } bi;
5782+ struct {
5783+ uint32_t bca;
5784+ uint32_t rca;
5785+ uint32_t flags;
5786+ } oom;
5787+ struct {
5788+ uint32_t pt_offset;
5789+ uint32_t param_offset;
5790+ uint32_t flags;
5791+ } bl;
5792+ struct {
5793+ uint32_t value;
5794+ } cl;
5795+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
5796+ } arg;
5797+};
5798+
5799+/* Controlling the kernel modesetting buffers */
5800+
5801+#define DRM_PSB_KMS_OFF 0x00
5802+#define DRM_PSB_KMS_ON 0x01
5803+#define DRM_PSB_VT_LEAVE 0x02
5804+#define DRM_PSB_VT_ENTER 0x03
5805+#define DRM_PSB_XHW_INIT 0x04
5806+#define DRM_PSB_XHW 0x05
5807+#define DRM_PSB_EXTENSION 0x06
5808+#define DRM_PSB_SIZES 0x07
5809+#define DRM_PSB_FUSE_REG 0x08
5810+#define DRM_PSB_VBT 0x09
5811+#define DRM_PSB_DC_STATE 0x0A
5812+#define DRM_PSB_ADB 0x0B
5813+#define DRM_PSB_MODE_OPERATION 0x0C
5814+#define DRM_PSB_STOLEN_MEMORY 0x0D
5815+#define DRM_PSB_REGISTER_RW 0x0E
5816+
5817+/*
5818+ * Xhw commands.
5819+ */
5820+
5821+#define PSB_XHW_INIT 0x00
5822+#define PSB_XHW_TAKEDOWN 0x01
5823+
5824+#define PSB_XHW_FIRE_RASTER 0x00
5825+#define PSB_XHW_SCENE_INFO 0x01
5826+#define PSB_XHW_SCENE_BIND_FIRE 0x02
5827+#define PSB_XHW_TA_MEM_INFO 0x03
5828+#define PSB_XHW_RESET_DPM 0x04
5829+#define PSB_XHW_OOM 0x05
5830+#define PSB_XHW_TERMINATE 0x06
5831+#define PSB_XHW_VISTEST 0x07
5832+#define PSB_XHW_RESUME 0x08
5833+#define PSB_XHW_TA_MEM_LOAD 0x09
5834+#define PSB_XHW_CHECK_LOCKUP 0x0a
5835+
5836+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
5837+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
5838+#define PSB_SCENE_FLAG_SETUP (1 << 2)
5839+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
5840+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
5841+
5842+#define PSB_TA_MEM_FLAG_TA (1 << 0)
5843+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
5844+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
5845+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
5846+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
5847+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
5848+
5849+/*Raster fire will deallocate memory */
5850+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
5851+/*Isp reset needed due to change in ZLS format */
5852+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
5853+/*These are set by Xpsb. */
5854+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
5855+/*The task has had at least one OOM and Xpsb will
5856+ send back messages on each fire. */
5857+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
5858+
5859+#define PSB_SCENE_ENGINE_TA 0
5860+#define PSB_SCENE_ENGINE_RASTER 1
5861+#define PSB_SCENE_NUM_ENGINES 2
5862+
5863+#define PSB_LOCKUP_RASTER (1 << 0)
5864+#define PSB_LOCKUP_TA (1 << 1)
5865+
5866+struct drm_psb_dev_info_arg {
5867+ uint32_t num_use_attribute_registers;
5868+};
5869+#define DRM_PSB_DEVINFO 0x01
5870+
5871+#define PSB_MODE_OPERATION_MODE_VALID 0x01
5872+
5873+#endif
5874diff --git a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c
5875new file mode 100644
5876index 0000000..7019b73
5877--- /dev/null
5878+++ b/drivers/gpu/drm/psb/psb_drv.c
5879@@ -0,0 +1,2239 @@
5880+/**************************************************************************
5881+ * Copyright (c) 2007, Intel Corporation.
5882+ * All Rights Reserved.
5883+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
5884+ * All Rights Reserved.
5885+ *
5886+ * This program is free software; you can redistribute it and/or modify it
5887+ * under the terms and conditions of the GNU General Public License,
5888+ * version 2, as published by the Free Software Foundation.
5889+ *
5890+ * This program is distributed in the hope it will be useful, but WITHOUT
5891+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5892+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5893+ * more details.
5894+ *
5895+ * You should have received a copy of the GNU General Public License along with
5896+ * this program; if not, write to the Free Software Foundation, Inc.,
5897+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5898+ *
5899+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5900+ * develop this driver.
5901+ *
5902+ **************************************************************************/
5903+/*
5904+ */
5905+
5906+#include <drm/drmP.h>
5907+#include <drm/drm.h>
5908+#include "psb_drm.h"
5909+#include "psb_drv.h"
5910+#include "psb_reg.h"
5911+#include "psb_intel_reg.h"
5912+#include "psb_intel_bios.h"
5913+#include "psb_msvdx.h"
5914+#include "lnc_topaz.h"
5915+#include <drm/drm_pciids.h>
5916+#include "psb_scene.h"
5917+#include "psb_powermgmt.h"
5918+#include <linux/cpu.h>
5919+#include <linux/notifier.h>
5920+#include <linux/spinlock.h>
5921+
5922+int drm_psb_debug;
5923+EXPORT_SYMBOL(drm_psb_debug);
5924+static int drm_psb_trap_pagefaults;
5925+static int drm_psb_clock_gating = 2;
5926+static int drm_psb_ta_mem_size = 32 * 1024;
5927+
5928+int drm_psb_disable_vsync = 1;
5929+int drm_psb_no_fb;
5930+int drm_psb_force_pipeb;
5931+int drm_idle_check_interval = 5;
5932+int drm_psb_ospm = 0;
5933+int drm_msvdx_pmpolicy = PSB_PMPOLICY_NOPM;
5934+int drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM;
5935+
5936+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
5937+
5938+MODULE_PARM_DESC(debug, "Enable debug output");
5939+MODULE_PARM_DESC(clock_gating, "clock gating");
5940+MODULE_PARM_DESC(no_fb, "Disable FBdev");
5941+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
5942+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
5943+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
5944+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
5945+MODULE_PARM_DESC(ospm, "switch for ospm support");
5946+MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
5947+MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
5948+module_param_named(debug, drm_psb_debug, int, 0600);
5949+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
5950+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
5951+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
5952+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
5953+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
5954+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
5955+module_param_named(ospm, drm_psb_ospm, int, 0600);
5956+module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
5957+module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
5958+
5959+#ifndef CONFIG_X86_PAT
5960+#warning "Don't build this driver without PAT support!!!"
5961+#endif
5962+
5963+#define psb_PCI_IDS \
5964+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
5965+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
5966+ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5967+ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5968+ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5969+ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5970+ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5971+ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5972+ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5973+ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5974+ {0, 0, 0}
5975+
5976+static struct pci_device_id pciidlist[] = {
5977+ psb_PCI_IDS
5978+};
5979+
5980+/*
5981+ * Standard IOCTLs.
5982+ */
5983+
5984+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
5985+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
5986+#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
5987+#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
5988+#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \
5989+ struct drm_psb_xhw_init_arg)
5990+#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE)
5991+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
5992+ union drm_psb_extension_arg)
5993+#define DRM_IOCTL_PSB_SIZES DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
5994+ struct drm_psb_sizes_arg)
5995+#define DRM_IOCTL_PSB_FUSE_REG DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, \
5996+ uint32_t)
5997+#define DRM_IOCTL_PSB_VBT DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
5998+ struct gct_ioctl_arg)
5999+#define DRM_IOCTL_PSB_DC_STATE DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
6000+ struct drm_psb_dc_state_arg)
6001+#define DRM_IOCTL_PSB_ADB DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, \
6002+ uint32_t)
6003+#define DRM_IOCTL_PSB_MODE_OPERATION DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
6004+ struct drm_psb_mode_operation_arg)
6005+#define DRM_IOCTL_PSB_STOLEN_MEMORY DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
6006+ struct drm_psb_stolen_memory_arg)
6007+#define DRM_IOCTL_PSB_REGISTER_RW DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
6008+ struct drm_psb_register_rw_arg)
6009+
6010+/*
6011+ * TTM execbuf extension.
6012+ */
6013+
6014+#define DRM_PSB_CMDBUF (DRM_PSB_REGISTER_RW + 1)
6015+#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
6016+#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
6017+ struct drm_psb_cmdbuf_arg)
6018+#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
6019+ struct drm_psb_scene)
6020+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
6021+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
6022+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
6023+ union drm_psb_extension_arg)
6024+/*
6025+ * TTM placement user extension.
6026+ */
6027+
6028+#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
6029+
6030+#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
6031+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
6032+#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
6033+#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
6034+#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
6035+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
6036+
6037+/*
6038+ * TTM fence extension.
6039+ */
6040+
6041+#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
6042+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
6043+#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
6044+#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
6045+
6046+#define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) //20
6047+/* PSB video extension */
6048+#define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1)
6049+
6050+#define DRM_IOCTL_PSB_TTM_PL_CREATE \
6051+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
6052+ union ttm_pl_create_arg)
6053+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
6054+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
6055+ union ttm_pl_reference_arg)
6056+#define DRM_IOCTL_PSB_TTM_PL_UNREF \
6057+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
6058+ struct ttm_pl_reference_req)
6059+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
6060+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
6061+ struct ttm_pl_synccpu_arg)
6062+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
6063+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
6064+ struct ttm_pl_waitidle_arg)
6065+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
6066+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
6067+ union ttm_pl_setstatus_arg)
6068+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
6069+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
6070+ union ttm_fence_signaled_arg)
6071+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
6072+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
6073+ union ttm_fence_finish_arg)
6074+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
6075+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
6076+ struct ttm_fence_unref_arg)
6077+#define DRM_IOCTL_PSB_FLIP \
6078+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
6079+ struct drm_psb_pageflip_arg)
6080+#define DRM_IOCTL_LNC_VIDEO_GETPARAM \
6081+ DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \
6082+ struct drm_lnc_video_getparam_arg)
6083+
6084+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
6085+ struct drm_file *file_priv);
6086+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
6087+ struct drm_file *file_priv);
6088+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
6089+ struct drm_file *file_priv);
6090+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
6091+ struct drm_file *file_priv);
6092+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
6093+ struct drm_file *file_priv);
6094+static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
6095+ struct drm_file *file_priv);
6096+static int psb_adb_ioctl(struct drm_device *dev, void *data,
6097+ struct drm_file *file_priv);
6098+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
6099+ struct drm_file *file_priv);
6100+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
6101+ struct drm_file *file_priv);
6102+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
6103+ struct drm_file *file_priv);
6104+
6105+#define PSB_IOCTL_DEF(ioctl, func, flags) \
6106+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
6107+
6108+static struct drm_ioctl_desc psb_ioctls[] = {
6109+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
6110+ DRM_ROOT_ONLY),
6111+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
6112+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
6113+ DRM_ROOT_ONLY),
6114+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY),
6115+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl,
6116+ DRM_ROOT_ONLY),
6117+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY),
6118+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
6119+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
6120+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
6121+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
6122+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
6123+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
6124+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
6125+ DRM_AUTH),
6126+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
6127+ DRM_AUTH),
6128+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
6129+ DRM_AUTH),
6130+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
6131+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
6132+ DRM_AUTH),
6133+
6134+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
6135+ DRM_AUTH),
6136+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
6137+ DRM_AUTH),
6138+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
6139+ DRM_AUTH),
6140+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
6141+ DRM_AUTH),
6142+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
6143+ DRM_AUTH),
6144+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
6145+ DRM_AUTH),
6146+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
6147+ psb_fence_signaled_ioctl, DRM_AUTH),
6148+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
6149+ DRM_AUTH),
6150+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
6151+ DRM_AUTH),
6152+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH),
6153+ PSB_IOCTL_DEF(DRM_IOCTL_LNC_VIDEO_GETPARAM, lnc_video_getparam, DRM_AUTH)
6154+};
6155+
6156+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
6157+
6158+static void get_ci_info(struct drm_psb_private *dev_priv)
6159+{
6160+ struct pci_dev *pdev;
6161+
6162+ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
6163+ if (pdev == NULL) {
6164+ /* IF no pci_device we set size & addr to 0, no ci
6165+ * share buffer can be created */
6166+ dev_priv->ci_region_start = 0;
6167+ dev_priv->ci_region_size = 0;
6168+ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
6169+ return;
6170+ }
6171+
6172+ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
6173+ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
6174+
6175+ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
6176+ dev_priv->ci_region_start, dev_priv->ci_region_size);
6177+
6178+ pci_dev_put(pdev);
6179+
6180+ return;
6181+}
6182+
6183+static void get_rar_info(struct drm_psb_private *dev_priv)
6184+{
6185+ struct pci_dev *pdev;
6186+ const uint32_t msg_opcode = 0xD0;
6187+ const uint32_t bunit_port = 0x3;
6188+ const uint32_t start_addr_reg_offset = 0x10;
6189+ const uint32_t end_addr_reg_offset = 0x11;
6190+ const uint32_t msg_byte_write_enable = 0xf;
6191+ const uint32_t vendor_id = 0x8086;
6192+ const uint32_t device_id = 0x4110;
6193+ const uint32_t lnc_mcr_offset = 0xd0;
6194+ const uint32_t lnc_mdr_offset = 0xd4;
6195+ uint32_t start_addr_msg, end_addr_msg, start_addr, end_addr;
6196+
6197+ pdev = pci_get_subsys(vendor_id, device_id, 0, 0, NULL);
6198+ if (pdev == NULL) {
6199+ dev_priv->rar_region_start = 0;
6200+ dev_priv->rar_region_size = 0;
6201+ goto out;
6202+ }
6203+
6204+ /* get the start msg */
6205+ start_addr_msg = (msg_opcode << 24) |
6206+ (bunit_port << 16) |
6207+ (start_addr_reg_offset << 8) |
6208+ (msg_byte_write_enable << 4);
6209+
6210+ /* thought write/read is always success */
6211+ pci_write_config_dword(pdev,
6212+ lnc_mcr_offset,
6213+ start_addr_msg);
6214+ pci_read_config_dword(pdev,
6215+ lnc_mdr_offset,
6216+ &start_addr);
6217+
6218+ start_addr &= 0xfffffc00u;
6219+
6220+ /* get the end msg */
6221+ end_addr_msg = (msg_opcode << 24) |
6222+ (bunit_port << 16) |
6223+ (end_addr_reg_offset << 8) |
6224+ (msg_byte_write_enable << 4);
6225+
6226+ pci_write_config_dword(pdev,
6227+ lnc_mcr_offset,
6228+ end_addr_msg);
6229+ pci_read_config_dword(pdev,
6230+ lnc_mdr_offset,
6231+ &end_addr);
6232+
6233+ end_addr |= 0x3ffu;
6234+
6235+ dev_priv->rar_region_start = start_addr;
6236+ dev_priv->rar_region_size = end_addr - start_addr + 1;
6237+
6238+ printk(KERN_INFO "rar for video region [0x%x, 0x%x], size %d\n",
6239+ start_addr, end_addr, dev_priv->rar_region_size);
6240+out:
6241+ if (pdev != NULL)
6242+ pci_dev_put(pdev);
6243+
6244+ return;
6245+}
6246+
6247+static void psb_set_uopt(struct drm_psb_uopt *uopt)
6248+{
6249+ uopt->clock_gating = drm_psb_clock_gating;
6250+}
6251+
6252+static void psb_lastclose(struct drm_device *dev)
6253+{
6254+ struct drm_psb_private *dev_priv =
6255+ (struct drm_psb_private *) dev->dev_private;
6256+
6257+ if (!dev->dev_private)
6258+ return;
6259+
6260+ if (dev_priv->ta_mem)
6261+ psb_ta_mem_unref(&dev_priv->ta_mem);
6262+ mutex_lock(&dev_priv->cmdbuf_mutex);
6263+ if (dev_priv->context.buffers) {
6264+ vfree(dev_priv->context.buffers);
6265+ dev_priv->context.buffers = NULL;
6266+ }
6267+ mutex_unlock(&dev_priv->cmdbuf_mutex);
6268+}
6269+
6270+static void psb_do_takedown(struct drm_device *dev)
6271+{
6272+ struct drm_psb_private *dev_priv =
6273+ (struct drm_psb_private *) dev->dev_private;
6274+ struct ttm_bo_device *bdev = &dev_priv->bdev;
6275+
6276+
6277+ if (dev_priv->have_mem_rastgeom) {
6278+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM);
6279+ dev_priv->have_mem_rastgeom = 0;
6280+ }
6281+ if (dev_priv->have_mem_mmu) {
6282+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
6283+ dev_priv->have_mem_mmu = 0;
6284+ }
6285+ if (dev_priv->have_mem_aper) {
6286+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER);
6287+ dev_priv->have_mem_aper = 0;
6288+ }
6289+ if (dev_priv->have_tt) {
6290+ ttm_bo_clean_mm(bdev, TTM_PL_TT);
6291+ dev_priv->have_tt = 0;
6292+ }
6293+ if (dev_priv->have_vram) {
6294+ ttm_bo_clean_mm(bdev, TTM_PL_VRAM);
6295+ dev_priv->have_vram = 0;
6296+ }
6297+ if (dev_priv->have_camera) {
6298+ ttm_bo_clean_mm(bdev, TTM_PL_CI);
6299+ dev_priv->have_camera = 0;
6300+ }
6301+ if (dev_priv->have_rar) {
6302+ ttm_bo_clean_mm(bdev, TTM_PL_RAR);
6303+ dev_priv->have_rar = 0;
6304+ }
6305+
6306+ psb_msvdx_uninit(dev);
6307+
6308+ if (IS_MRST(dev))
6309+ lnc_topaz_uninit(dev);
6310+
6311+ if (dev_priv->comm) {
6312+ kunmap(dev_priv->comm_page);
6313+ dev_priv->comm = NULL;
6314+ }
6315+ if (dev_priv->comm_page) {
6316+ __free_page(dev_priv->comm_page);
6317+ dev_priv->comm_page = NULL;
6318+ }
6319+}
6320+
6321+void psb_clockgating(struct drm_psb_private *dev_priv)
6322+{
6323+ uint32_t clock_gating;
6324+
6325+ if (dev_priv->uopt.clock_gating == 1) {
6326+ PSB_DEBUG_INIT("Disabling clock gating.\n");
6327+
6328+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6329+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
6330+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6331+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
6332+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6333+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
6334+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6335+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
6336+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6337+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
6338+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6339+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
6340+
6341+ } else if (dev_priv->uopt.clock_gating == 2) {
6342+ PSB_DEBUG_INIT("Enabling clock gating.\n");
6343+
6344+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6345+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
6346+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6347+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
6348+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6349+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
6350+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6351+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
6352+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6353+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
6354+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6355+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
6356+ } else
6357+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
6358+
6359+#ifdef FIX_TG_2D_CLOCKGATE
6360+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
6361+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6362+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
6363+#endif
6364+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
6365+ (void) PSB_RSGX32(PSB_CR_CLKGATECTL);
6366+}
6367+
6368+#define FB_REG06 0xD0810600
6369+#define FB_MIPI_DISABLE BIT11
6370+#define FB_REG09 0xD0810900
6371+#define FB_SKU_MASK (BIT12|BIT13|BIT14)
6372+#define FB_SKU_SHIFT 12
6373+#define FB_SKU_100 0
6374+#define FB_SKU_100L 1
6375+#define FB_SKU_83 2
6376+#if 1 /* FIXME remove it after PO */
6377+#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
6378+#define FB_GFX_CLK_DIVIDE_SHIFT 20
6379+#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
6380+#define FB_VED_CLK_DIVIDE_SHIFT 23
6381+#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
6382+#define FB_VEC_CLK_DIVIDE_SHIFT 25
6383+#endif /* FIXME remove it after PO */
6384+
6385+
6386+void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
6387+{
6388+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
6389+ uint32_t fuse_value = 0;
6390+ uint32_t fuse_value_tmp = 0;
6391+
6392+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
6393+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
6394+
6395+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
6396+
6397+ DRM_INFO("internal display is %s\n",
6398+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
6399+
6400+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
6401+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
6402+
6403+ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
6404+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
6405+
6406+ dev_priv->fuse_reg_value = fuse_value;
6407+
6408+ switch (fuse_value_tmp) {
6409+ case FB_SKU_100:
6410+ DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n");
6411+ dev_priv->sku_100 = true;
6412+ break;
6413+ case FB_SKU_100L:
6414+ DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n");
6415+ dev_priv->sku_100L = true;
6416+ break;
6417+ case FB_SKU_83:
6418+ DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n");
6419+ dev_priv->sku_83 = true;
6420+ break;
6421+ default:
6422+ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
6423+ fuse_value_tmp);
6424+ }
6425+
6426+#if 1 /* FIXME remove it after PO */
6427+ fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
6428+
6429+ switch (fuse_value_tmp) {
6430+ case 0:
6431+ DRM_INFO("Gfx clk : core clk = 1:1. \n");
6432+ break;
6433+ case 1:
6434+ DRM_INFO("Gfx clk : core clk = 4:3. \n");
6435+ break;
6436+ case 2:
6437+ DRM_INFO("Gfx clk : core clk = 8:5. \n");
6438+ break;
6439+ case 3:
6440+ DRM_INFO("Gfx clk : core clk = 2:1. \n");
6441+ break;
6442+ case 4:
6443+ DRM_INFO("Gfx clk : core clk = 16:7. \n");
6444+ break;
6445+ case 5:
6446+ DRM_INFO("Gfx clk : core clk = 8:3. \n");
6447+ break;
6448+ case 6:
6449+ DRM_INFO("Gfx clk : core clk = 16:5. \n");
6450+ break;
6451+ case 7:
6452+ DRM_INFO("Gfx clk : core clk = 4:1. \n");
6453+ break;
6454+ default:
6455+ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
6456+ fuse_value_tmp);
6457+ }
6458+
6459+ fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
6460+
6461+ switch (fuse_value_tmp) {
6462+ case 0:
6463+ DRM_INFO("Ved clk : core clk = 1:1. \n");
6464+ break;
6465+ case 1:
6466+ DRM_INFO("Ved clk : core clk = 4:3. \n");
6467+ break;
6468+ case 2:
6469+ DRM_INFO("Ved clk : core clk = 8:5. \n");
6470+ break;
6471+ case 3:
6472+ DRM_INFO("Ved clk : core clk = 2:1. \n");
6473+ break;
6474+ default:
6475+ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
6476+ fuse_value_tmp);
6477+ }
6478+
6479+ fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
6480+
6481+ switch (fuse_value_tmp) {
6482+ case 0:
6483+ DRM_INFO("Vec clk : core clk = 1:1. \n");
6484+ break;
6485+ case 1:
6486+ DRM_INFO("Vec clk : core clk = 4:3. \n");
6487+ break;
6488+ case 2:
6489+ DRM_INFO("Vec clk : core clk = 8:5. \n");
6490+ break;
6491+ case 3:
6492+ DRM_INFO("Vec clk : core clk = 2:1. \n");
6493+ break;
6494+ default:
6495+ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
6496+ fuse_value_tmp);
6497+ }
6498+#endif /* FIXME remove it after PO */
6499+
6500+ return;
6501+}
6502+
6503+bool mrst_get_vbt_data(struct drm_psb_private *dev_priv)
6504+{
6505+ struct mrst_vbt *pVBT = &dev_priv->vbt_data;
6506+ u32 platform_config_address;
6507+ u8 *pVBT_virtual;
6508+ u8 bpi;
6509+ void *pGCT;
6510+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0,PCI_DEVFN(2,0));
6511+
6512+ /*get the address of the platform config vbt, B0:D2:F0;0xFC */
6513+ pci_read_config_dword(pci_gfx_root,0xFC,&platform_config_address);
6514+ DRM_INFO("drm platform config address is %x\n",platform_config_address);
6515+
6516+ /* check for platform config address == 0. */
6517+ /* this means fw doesn't support vbt */
6518+
6519+ if(platform_config_address == 0) {
6520+ pVBT->Size = 0;
6521+ return false;
6522+ }
6523+
6524+ /* get the virtual address of the vbt */
6525+ pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT));
6526+
6527+ memcpy(pVBT, pVBT_virtual, sizeof(*pVBT));
6528+ iounmap(pVBT_virtual); /* Free virtual address space */
6529+
6530+ printk(KERN_ALERT "GCT Revision is %x\n",pVBT->Revision);
6531+ pVBT->mrst_gct = NULL;
6532+ pVBT->mrst_gct = ioremap(platform_config_address + sizeof(*pVBT) - 4,
6533+ pVBT->Size - sizeof(*pVBT) + 4);
6534+ pGCT = pVBT->mrst_gct;
6535+
6536+ switch (pVBT->Revision) {
6537+ case 0:
6538+ bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
6539+ dev_priv->gct_data.bpi = bpi;
6540+ dev_priv->gct_data.pt =
6541+ ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
6542+ memcpy(&dev_priv->gct_data.DTD,
6543+ &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
6544+ sizeof(struct mrst_timing_info));
6545+ dev_priv->gct_data.Panel_Port_Control =
6546+ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
6547+ break;
6548+ case 1:
6549+ bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
6550+ dev_priv->gct_data.bpi = bpi;
6551+ dev_priv->gct_data.pt =
6552+ ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
6553+ memcpy(&dev_priv->gct_data.DTD,
6554+ &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
6555+ sizeof(struct mrst_timing_info));
6556+ dev_priv->gct_data.Panel_Port_Control =
6557+ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
6558+ break;
6559+ default:
6560+ printk(KERN_ALERT "Unknown revision of GCT!\n");
6561+ pVBT->Size = 0;
6562+ return false;
6563+ }
6564+
6565+ return true;
6566+}
6567+
6568+int mrst_get_ospm_io(struct drm_psb_private *dev_priv)
6569+{
6570+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
6571+ uint32_t ospm_base = 0;
6572+
6573+ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
6574+ pci_read_config_dword(pci_root, 0xD4, &ospm_base);
6575+
6576+ dev_priv->ospm_base = ospm_base & 0x0ffff;
6577+
6578+ DRM_INFO("ospm base is %x\n", dev_priv->ospm_base);
6579+
6580+ return 0;
6581+}
6582+
6583+static int psb_do_init(struct drm_device *dev)
6584+{
6585+ struct drm_psb_private *dev_priv =
6586+ (struct drm_psb_private *) dev->dev_private;
6587+ struct ttm_bo_device *bdev = &dev_priv->bdev;
6588+ struct psb_gtt *pg = dev_priv->pg;
6589+
6590+ uint32_t stolen_gtt;
6591+ uint32_t tt_start;
6592+ uint32_t tt_pages;
6593+
6594+ int ret = -ENOMEM;
6595+
6596+ dev_priv->ta_mem_pages =
6597+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024,
6598+ PAGE_SIZE) >> PAGE_SHIFT;
6599+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
6600+ if (!dev_priv->comm_page)
6601+ goto out_err;
6602+
6603+ dev_priv->comm = kmap(dev_priv->comm_page);
6604+ memset((void *) dev_priv->comm, 0, PAGE_SIZE);
6605+
6606+ set_pages_uc(dev_priv->comm_page, 1);
6607+
6608+ /*
6609+ * Initialize sequence numbers for the different command
6610+ * submission mechanisms.
6611+ */
6612+
6613+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
6614+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
6615+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
6616+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
6617+
6618+ if (pg->gatt_start & 0x0FFFFFFF) {
6619+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
6620+ ret = -EINVAL;
6621+ goto out_err;
6622+ }
6623+
6624+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
6625+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
6626+ stolen_gtt =
6627+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
6628+
6629+ dev_priv->gatt_free_offset = pg->gatt_start +
6630+ (stolen_gtt << PAGE_SHIFT) * 1024;
6631+
6632+ /*
6633+ * Insert a cache-coherent communications page in mmu space
6634+ * just after the stolen area. Will be used for fencing etc.
6635+ */
6636+
6637+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
6638+ dev_priv->gatt_free_offset += PAGE_SIZE;
6639+
6640+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
6641+ &dev_priv->comm_page,
6642+ dev_priv->comm_mmu_offset, 1, 0, 0, 0);
6643+
6644+ if (ret)
6645+ goto out_err;
6646+
6647+ if (1 || drm_debug) {
6648+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
6649+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
6650+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
6651+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
6652+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
6653+ _PSB_CC_REVISION_MAJOR_SHIFT,
6654+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
6655+ _PSB_CC_REVISION_MINOR_SHIFT);
6656+ DRM_INFO
6657+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
6658+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
6659+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
6660+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
6661+ _PSB_CC_REVISION_DESIGNER_SHIFT);
6662+ }
6663+
6664+ spin_lock_init(&dev_priv->irqmask_lock);
6665+ dev_priv->fence0_irq_on = 0;
6666+
6667+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
6668+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
6669+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
6670+ tt_pages -= tt_start >> PAGE_SHIFT;
6671+
6672+ dev_priv->sizes.ta_mem_size = drm_psb_ta_mem_size / 1024;
6673+
6674+ if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0,
6675+ pg->vram_stolen_size >> PAGE_SHIFT)) {
6676+ dev_priv->have_vram = 1;
6677+ dev_priv->sizes.vram_size =
6678+ pg->vram_stolen_size / (1024 * 1024);
6679+ }
6680+
6681+ if (IS_MRST(dev) &&
6682+ (dev_priv->ci_region_size != 0) &&
6683+ !ttm_bo_init_mm(bdev, TTM_PL_CI, 0,
6684+ dev_priv->ci_region_size >> PAGE_SHIFT)) {
6685+ dev_priv->have_camera = 1;
6686+ }
6687+
6688+ /* since there is always rar region for video, it is ok */
6689+ if (IS_MRST(dev) &&
6690+ (dev_priv->rar_region_size != 0) &&
6691+ !ttm_bo_init_mm(bdev, TTM_PL_RAR, 0,
6692+ dev_priv->rar_region_size >> PAGE_SHIFT)) {
6693+ dev_priv->have_rar = 1;
6694+ }
6695+
6696+ if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT,
6697+ tt_pages)) {
6698+ dev_priv->have_tt = 1;
6699+ dev_priv->sizes.tt_size =
6700+ (tt_pages << PAGE_SHIFT) / (1024 * 1024);
6701+ }
6702+
6703+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000,
6704+ (pg->gatt_start - PSB_MEM_MMU_START -
6705+ pg->ci_stolen_size) >> PAGE_SHIFT)) {
6706+ dev_priv->have_mem_mmu = 1;
6707+ dev_priv->sizes.mmu_size =
6708+ (pg->gatt_start - PSB_MEM_MMU_START - pg->ci_stolen_size) /
6709+ (1024*1024);
6710+ }
6711+
6712+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
6713+ (PSB_MEM_MMU_START -
6714+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
6715+ dev_priv->have_mem_rastgeom = 1;
6716+ dev_priv->sizes.rastgeom_size =
6717+ (PSB_MEM_MMU_START - PSB_MEM_RASTGEOM_START) /
6718+ (1024 * 1024);
6719+ }
6720+#if 0
6721+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
6722+ if (!ttm_bo_init_mm
6723+ (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
6724+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) {
6725+ dev_priv->have_mem_aper = 1;
6726+ }
6727+ }
6728+#endif
6729+
6730+ PSB_DEBUG_INIT("Init MSVDX\n");
6731+ psb_msvdx_init(dev);
6732+
6733+ if (IS_MRST(dev)) {
6734+ PSB_DEBUG_INIT("Init Topaz\n");
6735+ lnc_topaz_init(dev);
6736+ }
6737+
6738+ return 0;
6739+out_err:
6740+ psb_do_takedown(dev);
6741+ return ret;
6742+}
6743+
6744+static int psb_intel_opregion_init(struct drm_device *dev)
6745+{
6746+ struct drm_psb_private * dev_priv = dev->dev_private;
6747+ /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/
6748+ u32 opregion_phy;
6749+ void * base;
6750+ u32 * lid_state;
6751+
6752+ dev_priv->lid_state = NULL;
6753+
6754+ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
6755+ if(opregion_phy == 0) {
6756+ DRM_DEBUG("Opregion not supported, won't support lid-switch\n");
6757+ return -ENOTSUPP;
6758+ }
6759+ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
6760+
6761+ base = ioremap(opregion_phy, 8*1024);
6762+ if(!base) {
6763+ return -ENOMEM;
6764+ }
6765+
6766+ lid_state = base + 0x01ac;
6767+
6768+ DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state);
6769+
6770+ dev_priv->lid_state = lid_state;
6771+ dev_priv->lid_last_state = *lid_state;
6772+ return 0;
6773+}
6774+
6775+#if 0
6776+/**
6777+ * Get a section from BDB by section id, port from i915 driver
6778+ */
6779+static void * psb_intel_vbt_find_section(struct bdb_header * bdb, int section_id)
6780+{
6781+ u8 * base = (u8 *)bdb;
6782+ int index = 0;
6783+ u16 total, current_size;
6784+ u8 current_id;
6785+
6786+ index += bdb->header_size;
6787+ total = bdb->bdb_size;
6788+
6789+ while(index < total) {
6790+ current_id = *(base + index);
6791+ index++;
6792+ current_size = *((u16 *)(base + index));
6793+ index += 2;
6794+ if(current_id == section_id)
6795+ return base + index;
6796+ index += current_size;
6797+ }
6798+
6799+ return NULL;
6800+}
6801+
6802+static void psb_intel_vbt_parse_backlight_data(struct drm_psb_private * dev_priv, struct bdb_header * bdb)
6803+{
6804+ struct bdb_lvds_backlight * lvds_bl = NULL;
6805+ u8 p_type = 0;
6806+ void * bl_start = NULL;
6807+ struct bdb_lvds_options * lvds_opts
6808+ = psb_intel_vbt_find_section(bdb, BDB_LVDS_OPTIONS);
6809+
6810+ if(lvds_opts) {
6811+ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
6812+ p_type = lvds_opts->panel_type;
6813+ } else {
6814+ DRM_DEBUG("no lvds_options\n");
6815+ }
6816+
6817+ bl_start = psb_intel_vbt_find_section(bdb, BDB_LVDS_BACKLIGHT);
6818+ lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
6819+
6820+ dev_priv->lvds_bl = lvds_bl;
6821+}
6822+
6823+/**
6824+ * Retrieve VBT and blc data. port from i915 driver
6825+ */
6826+static int psb_get_vbt_data(struct drm_device * dev)
6827+{
6828+ struct drm_psb_private * dev_priv = dev->dev_private;
6829+ struct pci_dev * pdev = dev->pdev;
6830+ struct vbt_header * vbt = NULL;
6831+ struct bdb_header * bdb;
6832+ u8 __iomem * bios;
6833+
6834+ size_t size;
6835+ int i;
6836+
6837+ /*FIXME: unmap it when driver exit*/
6838+ bios = pci_map_rom(pdev, &size);
6839+ if(!bios)
6840+ return -1;
6841+
6842+ for(i=0; i + 4 < size; i++) {
6843+ if(!memcmp(bios + i, "$VBT", 4)) {
6844+ vbt = (struct vbt_header *)(bios + i);
6845+ break;
6846+ }
6847+ }
6848+
6849+ if(!vbt) {
6850+ DRM_ERROR("VBT sigature missing\n");
6851+ pci_unmap_rom(pdev, bios);
6852+ return -1;
6853+ }
6854+
6855+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
6856+
6857+ psb_intel_vbt_parse_backlight_data(dev_priv, bdb);
6858+
6859+ DRM_INFO("BIOS Data Block found at %p\n", bdb);
6860+
6861+ return 0;
6862+}
6863+#endif
6864+
6865+static int psb_driver_unload(struct drm_device *dev)
6866+{
6867+ struct drm_psb_private *dev_priv =
6868+ (struct drm_psb_private *) dev->dev_private;
6869+
6870+ psb_backlight_exit(); /*writes minimum value to backlight HW reg */
6871+
6872+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
6873+ drm_irq_uninstall(dev);
6874+ }
6875+
6876+ if (drm_psb_no_fb == 0)
6877+ psb_modeset_cleanup(dev);
6878+
6879+ if (dev_priv) {
6880+ struct ttm_bo_device *bdev = &dev_priv->bdev;
6881+
6882+ if(IS_POULSBO(dev))
6883+ psb_lid_timer_takedown(dev_priv);
6884+
6885+ psb_watchdog_takedown(dev_priv);
6886+ psb_do_takedown(dev);
6887+ psb_xhw_takedown(dev_priv);
6888+ psb_scheduler_takedown(&dev_priv->scheduler);
6889+
6890+ if (dev_priv->have_mem_pds) {
6891+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS);
6892+ dev_priv->have_mem_pds = 0;
6893+ }
6894+ if (dev_priv->have_mem_kernel) {
6895+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL);
6896+ dev_priv->have_mem_kernel = 0;
6897+ }
6898+
6899+ if (dev_priv->pf_pd) {
6900+ psb_mmu_free_pagedir(dev_priv->pf_pd);
6901+ dev_priv->pf_pd = NULL;
6902+ }
6903+ if (dev_priv->mmu) {
6904+ struct psb_gtt *pg = dev_priv->pg;
6905+
6906+ down_read(&pg->sem);
6907+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
6908+ (dev_priv->mmu),
6909+ pg->gatt_start,
6910+ pg->vram_stolen_size >>
6911+ PAGE_SHIFT);
6912+ if (pg->ci_stolen_size != 0)
6913+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
6914+ (dev_priv->mmu),
6915+ pg->gatt_start - pg->ci_stolen_size,
6916+ pg->ci_stolen_size >>
6917+ PAGE_SHIFT);
6918+ if (pg->rar_stolen_size != 0)
6919+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
6920+ (dev_priv->mmu),
6921+ pg->gatt_start + pg->vram_stolen_size,
6922+ pg->rar_stolen_size >>
6923+ PAGE_SHIFT);
6924+ up_read(&pg->sem);
6925+ psb_mmu_driver_takedown(dev_priv->mmu);
6926+ dev_priv->mmu = NULL;
6927+ }
6928+ psb_gtt_takedown(dev_priv->pg, 1);
6929+ if (dev_priv->scratch_page) {
6930+ __free_page(dev_priv->scratch_page);
6931+ dev_priv->scratch_page = NULL;
6932+ }
6933+ if (dev_priv->has_bo_device) {
6934+ ttm_bo_device_release(&dev_priv->bdev);
6935+ dev_priv->has_bo_device = 0;
6936+ }
6937+ if (dev_priv->has_fence_device) {
6938+ ttm_fence_device_release(&dev_priv->fdev);
6939+ dev_priv->has_fence_device = 0;
6940+ }
6941+ if (dev_priv->vdc_reg) {
6942+ iounmap(dev_priv->vdc_reg);
6943+ dev_priv->vdc_reg = NULL;
6944+ }
6945+ if (dev_priv->sgx_reg) {
6946+ iounmap(dev_priv->sgx_reg);
6947+ dev_priv->sgx_reg = NULL;
6948+ }
6949+ if (dev_priv->msvdx_reg) {
6950+ iounmap(dev_priv->msvdx_reg);
6951+ dev_priv->msvdx_reg = NULL;
6952+ }
6953+
6954+ if (IS_MRST(dev)) {
6955+ if (dev_priv->topaz_reg) {
6956+ iounmap(dev_priv->topaz_reg);
6957+ dev_priv->topaz_reg = NULL;
6958+ }
6959+ }
6960+
6961+ if (dev_priv->tdev)
6962+ ttm_object_device_release(&dev_priv->tdev);
6963+
6964+ if (dev_priv->has_global)
6965+ psb_ttm_global_release(dev_priv);
6966+
6967+ kfree(dev_priv);
6968+ dev->dev_private = NULL;
6969+
6970+ /*destory VBT data*/
6971+ if(IS_POULSBO(dev))
6972+ psb_intel_destory_bios(dev);
6973+ }
6974+
6975+ powermgmt_shutdown();
6976+
6977+ return 0;
6978+}
6979+
6980+
6981+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
6982+{
6983+ struct drm_psb_private *dev_priv;
6984+ struct ttm_bo_device *bdev;
6985+ unsigned long resource_start;
6986+ struct psb_gtt *pg;
6987+ unsigned long irqflags;
6988+ int ret = -ENOMEM;
6989+
6990+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
6991+
6992+ if (IS_MRST(dev))
6993+ DRM_INFO("Run drivers on Moorestown platform!\n");
6994+ else
6995+ DRM_INFO("Run drivers on Poulsbo platform!\n");
6996+
6997+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
6998+ if (dev_priv == NULL)
6999+ return -ENOMEM;
7000+
7001+ dev_priv->dev = dev;
7002+ bdev = &dev_priv->bdev;
7003+
7004+ psb_init_ospm(dev_priv);
7005+
7006+ ret = psb_ttm_global_init(dev_priv);
7007+ if (unlikely(ret != 0))
7008+ goto out_err;
7009+ dev_priv->has_global = 1;
7010+
7011+ dev_priv->tdev = ttm_object_device_init
7012+ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
7013+ if (unlikely(dev_priv->tdev == NULL))
7014+ goto out_err;
7015+
7016+ powermgmt_init();
7017+
7018+ mutex_init(&dev_priv->temp_mem);
7019+ mutex_init(&dev_priv->cmdbuf_mutex);
7020+ mutex_init(&dev_priv->reset_mutex);
7021+ INIT_LIST_HEAD(&dev_priv->context.validate_list);
7022+ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
7023+ psb_init_disallowed();
7024+
7025+#ifdef FIX_TG_16
7026+ atomic_set(&dev_priv->lock_2d, 0);
7027+ atomic_set(&dev_priv->ta_wait_2d, 0);
7028+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
7029+ atomic_set(&dev_priv->waiters_2d, 0);;
7030+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
7031+#else
7032+ mutex_init(&dev_priv->mutex_2d);
7033+#endif
7034+
7035+ spin_lock_init(&dev_priv->reloc_lock);
7036+
7037+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
7038+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
7039+
7040+ dev->dev_private = (void *) dev_priv;
7041+ dev_priv->chipset = chipset;
7042+ psb_set_uopt(&dev_priv->uopt);
7043+
7044+ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
7045+ psb_watchdog_init(dev_priv);
7046+ psb_scheduler_init(dev, &dev_priv->scheduler);
7047+
7048+
7049+ PSB_DEBUG_INIT("Mapping MMIO\n");
7050+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
7051+
7052+ if (IS_MRST(dev))
7053+ dev_priv->msvdx_reg =
7054+ ioremap(resource_start + MRST_MSVDX_OFFSET,
7055+ PSB_MSVDX_SIZE);
7056+ else
7057+ dev_priv->msvdx_reg =
7058+ ioremap(resource_start + PSB_MSVDX_OFFSET,
7059+ PSB_MSVDX_SIZE);
7060+
7061+ if (!dev_priv->msvdx_reg)
7062+ goto out_err;
7063+
7064+ if (IS_MRST(dev)) {
7065+ dev_priv->topaz_reg =
7066+ ioremap(resource_start + LNC_TOPAZ_OFFSET,
7067+ LNC_TOPAZ_SIZE);
7068+ if (!dev_priv->topaz_reg)
7069+ goto out_err;
7070+ }
7071+
7072+ dev_priv->vdc_reg =
7073+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
7074+ if (!dev_priv->vdc_reg)
7075+ goto out_err;
7076+
7077+ if (IS_MRST(dev))
7078+ dev_priv->sgx_reg =
7079+ ioremap(resource_start + MRST_SGX_OFFSET,
7080+ PSB_SGX_SIZE);
7081+ else
7082+ dev_priv->sgx_reg =
7083+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
7084+
7085+ if (!dev_priv->sgx_reg)
7086+ goto out_err;
7087+
7088+ if (IS_MRST(dev)){
7089+ mrst_get_fuse_settings(dev_priv);
7090+ mrst_get_vbt_data(dev_priv);
7091+ } else {
7092+ psb_intel_opregion_init(dev);
7093+ psb_intel_init_bios(dev);
7094+ }
7095+
7096+ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
7097+
7098+ if (IS_MRST(dev))
7099+ mrst_get_ospm_io(dev_priv);
7100+
7101+ if (IS_MRST(dev)) {
7102+ get_ci_info(dev_priv);
7103+ get_rar_info(dev_priv);
7104+ }
7105+
7106+ psb_clockgating(dev_priv);
7107+
7108+ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
7109+ if (unlikely(ret != 0))
7110+ goto out_err;
7111+
7112+ dev_priv->has_fence_device = 1;
7113+ ret = ttm_bo_device_init(bdev,
7114+ dev_priv->mem_global_ref.object,
7115+ &psb_ttm_bo_driver,
7116+ DRM_PSB_FILE_PAGE_OFFSET);
7117+ if (unlikely(ret != 0))
7118+ goto out_err;
7119+ dev_priv->has_bo_device = 1;
7120+ ttm_lock_init(&dev_priv->ttm_lock);
7121+
7122+ ret = -ENOMEM;
7123+
7124+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
7125+ if (!dev_priv->scratch_page)
7126+ goto out_err;
7127+
7128+ set_pages_uc(dev_priv->scratch_page, 1);
7129+
7130+ dev_priv->pg = psb_gtt_alloc(dev);
7131+ if (!dev_priv->pg)
7132+ goto out_err;
7133+
7134+ ret = psb_gtt_init(dev_priv->pg, 0);
7135+ if (ret)
7136+ goto out_err;
7137+
7138+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
7139+ drm_psb_trap_pagefaults, 0,
7140+ dev_priv);
7141+ if (!dev_priv->mmu)
7142+ goto out_err;
7143+
7144+ pg = dev_priv->pg;
7145+
7146+ /*
7147+ * Make sgx MMU aware of the stolen memory area we call VRAM.
7148+ */
7149+
7150+ down_read(&pg->sem);
7151+ ret =
7152+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
7153+ (dev_priv->mmu),
7154+ pg->stolen_base >> PAGE_SHIFT,
7155+ pg->gatt_start,
7156+ pg->vram_stolen_size >> PAGE_SHIFT, 0);
7157+ up_read(&pg->sem);
7158+ if (ret)
7159+ goto out_err;
7160+
7161+ /*
7162+ * Make sgx MMU aware of the CI stolen memory area.
7163+ */
7164+ if (dev_priv->pg->ci_stolen_size != 0) {
7165+ down_read(&pg->sem);
7166+ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
7167+ (dev_priv->mmu),
7168+ dev_priv->ci_region_start >> PAGE_SHIFT,
7169+ pg->gatt_start - pg->ci_stolen_size,
7170+ pg->ci_stolen_size >> PAGE_SHIFT, 0);
7171+ up_read(&pg->sem);
7172+ if (ret)
7173+ goto out_err;
7174+ }
7175+
7176+ /*
7177+ * Make sgx MMU aware of the rar stolen memory area.
7178+ */
7179+ if (dev_priv->pg->rar_stolen_size != 0) {
7180+ down_read(&pg->sem);
7181+ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
7182+ dev_priv->rar_region_start >> PAGE_SHIFT,
7183+ pg->gatt_start + pg->vram_stolen_size,
7184+ pg->rar_stolen_size >> PAGE_SHIFT, 0);
7185+ up_read(&pg->sem);
7186+ if (ret)
7187+ goto out_err;
7188+ }
7189+
7190+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
7191+ if (!dev_priv->pf_pd)
7192+ goto out_err;
7193+
7194+ /*
7195+ * Make all presumably unused requestors page-fault by making them
7196+ * use context 1 which does not have any valid mappings.
7197+ */
7198+
7199+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
7200+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
7201+ PSB_RSGX32(PSB_CR_BIF_BANK1);
7202+
7203+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
7204+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
7205+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
7206+
7207+ psb_init_2d(dev_priv);
7208+
7209+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000,
7210+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
7211+ >> PAGE_SHIFT);
7212+ if (ret)
7213+ goto out_err;
7214+ dev_priv->have_mem_kernel = 1;
7215+
7216+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000,
7217+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
7218+ >> PAGE_SHIFT);
7219+ if (ret)
7220+ goto out_err;
7221+ dev_priv->have_mem_pds = 1;
7222+ dev_priv->sizes.pds_size =
7223+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START) / (1024 * 1024);
7224+ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
7225+
7226+ ret = psb_do_init(dev);
7227+ if (ret)
7228+ return ret;
7229+
7230+ ret = psb_xhw_init(dev);
7231+ if (ret)
7232+ return ret;
7233+
7234+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
7235+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
7236+
7237+ /**
7238+ * Init lid switch timer.
7239+ * NOTE: must do this after psb_intel_opregion_init
7240+ * and psb_backlight_init
7241+ */
7242+ if(IS_POULSBO(dev) && dev_priv->lid_state) {
7243+ psb_lid_timer_init(dev_priv);
7244+ }
7245+
7246+ /*initialize the MSI for MRST*/
7247+ if (IS_MRST(dev)) {
7248+ if (pci_enable_msi(dev->pdev)) {
7249+ DRM_ERROR("Enable MSI for MRST failed!\n");
7250+ } else {
7251+ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
7252+ dev->pdev->irq);
7253+ /* pci_write_config_word(pdev, 0x04, 0x07); */
7254+ }
7255+ }
7256+
7257+ //Init vblank module in DRM. Must be done before call to drm_irq_install()
7258+ ret = drm_vblank_init(dev, PSB_NUM_PIPE);
7259+ if (ret)
7260+ goto out_err;
7261+
7262+ /*
7263+ * Install interrupt handlers prior to powering off SGX or else we will
7264+ * crash.
7265+ */
7266+ dev_priv->vdc_irq_mask = 0;
7267+ dev_priv->sgx_irq_mask = 0;
7268+ dev_priv->sgx2_irq_mask = 0;
7269+ dev_priv->pipestat[0] = 0;
7270+ dev_priv->pipestat[1] = 0;
7271+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
7272+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
7273+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
7274+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
7275+ if (drm_core_check_feature(dev, DRIVER_MODESET))
7276+ drm_irq_install(dev);
7277+#if 0
7278+ /*set SGX in low power mode*/
7279+ if (drm_psb_ospm && IS_MRST(dev))
7280+ if (psb_try_power_down_sgx(dev))
7281+ PSB_DEBUG_PM("initialize SGX to low power failed\n");
7282+ if (IS_MRST(dev))
7283+ if (psb_try_power_down_msvdx(dev))
7284+ PSB_DEBUG_PM("Initialize MSVDX to low power failed\n");
7285+ if (IS_MRST(dev)) {
7286+ if (psb_try_power_down_topaz(dev))
7287+ PSB_DEBUG_PM("Initialize TOPAZ to low power failed\n");
7288+ }
7289+#endif
7290+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
7291+
7292+ dev->driver->get_vblank_counter = psb_get_vblank_counter;
7293+
7294+ ret = drm_vblank_init(dev, PSB_NUM_PIPE);
7295+
7296+ if (drm_psb_no_fb == 0) {
7297+ psb_modeset_init(dev);
7298+ drm_helper_initial_config(dev);
7299+ }
7300+
7301+ /*must be after mrst_get_fuse_settings()*/
7302+ ret = psb_backlight_init(dev);
7303+ if (ret)
7304+ return ret;
7305+
7306+ /*dri_page_flipping is set when flipping is enabled*/
7307+ dev_priv->dri_page_flipping = 0;
7308+
7309+ return 0;
7310+out_err:
7311+ psb_driver_unload(dev);
7312+ return ret;
7313+}
7314+
7315+int psb_driver_device_is_agp(struct drm_device *dev)
7316+{
7317+ return 0;
7318+}
7319+
7320+int psb_extension_ioctl(struct drm_device *dev, void *data,
7321+ struct drm_file *file_priv)
7322+{
7323+ union drm_psb_extension_arg *arg = data;
7324+ struct drm_psb_extension_rep *rep = &arg->rep;
7325+
7326+ /*tricky fix for sgx HW access from user space when XPSB is load*/
7327+ static int firsttime = 1;
7328+ if (firsttime) {
7329+ firsttime = 0;
7330+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
7331+ }
7332+ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
7333+ rep->exists = 1;
7334+ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
7335+ rep->sarea_offset = 0;
7336+ rep->major = 1;
7337+ rep->minor = 0;
7338+ rep->pl = 0;
7339+ return 0;
7340+ }
7341+ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
7342+ rep->exists = 1;
7343+ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
7344+ rep->sarea_offset = 0;
7345+ rep->major = 1;
7346+ rep->minor = 0;
7347+ rep->pl = 0;
7348+ return 0;
7349+ }
7350+ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
7351+ rep->exists = 1;
7352+ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
7353+ rep->sarea_offset = 0;
7354+ rep->major = 1;
7355+ rep->minor = 0;
7356+ rep->pl = 0;
7357+ return 0;
7358+ }
7359+
7360+ /*return the page flipping ioctl offset*/
7361+ if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
7362+ rep->exists = 1;
7363+ rep->driver_ioctl_offset = DRM_PSB_FLIP;
7364+ rep->sarea_offset = 0;
7365+ rep->major = 1;
7366+ rep->minor = 0;
7367+ rep->pl = 0;
7368+ return 0;
7369+ }
7370+
7371+ /* return the video rar offset */
7372+ if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
7373+ rep->exists = 1;
7374+ rep->driver_ioctl_offset = DRM_LNC_VIDEO_GETPARAM;
7375+ rep->sarea_offset = 0;
7376+ rep->major = 1;
7377+ rep->minor = 0;
7378+ rep->pl = 0;
7379+ return 0;
7380+ }
7381+
7382+ rep->exists = 0;
7383+ return 0;
7384+}
7385+
7386+/*keep following code*/
7387+#if 0
7388+static void psb_display_states_restore(struct drm_device * dev)
7389+{
7390+ struct drm_crtc * crtc = NULL;
7391+ struct drm_connector * connector = NULL;
7392+ struct drm_crtc_helper_funcs * crtc_helper_funcs = NULL;
7393+ struct drm_encoder * encoder = NULL;
7394+ struct drm_encoder_helper_funcs * encoder_helper_funcs = NULL;
7395+ struct drm_psb_private * dev_priv =
7396+ (struct drm_psb_private *)dev->dev_private;
7397+
7398+ mutex_lock(&dev->mode_config.mutex);
7399+#if 0
7400+ /*Output dpms off*/
7401+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7402+ encoder_helper_funcs =
7403+ (struct drm_encoder_helper_funcs *)encoder->helper_private;
7404+ if(encoder_helper_funcs && encoder_helper_funcs->dpms)
7405+ encoder_helper_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
7406+ }
7407+
7408+ psb_intel_wait_for_vblank(dev);
7409+
7410+ /*CRTC dpms off*/
7411+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7412+ crtc_helper_funcs =
7413+ (struct drm_crtc_helper_funcs *)crtc->helper_private;
7414+ //if(crtc_helper_funcs && crtc_helper_funcs->dpms)
7415+ if(drm_helper_crtc_in_use(crtc))
7416+ crtc_helper_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
7417+ }
7418+
7419+ /*Restore CRTC states*/
7420+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7421+ //if(crtc->funcs && crtc->funcs->restore)
7422+ if(drm_helper_crtc_in_use(crtc))
7423+ crtc->funcs->restore(crtc);
7424+ }
7425+#endif
7426+
7427+ /*Restore outputs states*/
7428+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7429+ if(connector->funcs && connector->funcs->restore)
7430+ connector->funcs->restore(connector);
7431+ }
7432+
7433+
7434+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7435+ if(drm_helper_crtc_in_use(crtc))
7436+ crtc->funcs->restore(crtc);
7437+ }
7438+
7439+ mutex_unlock(&dev->mode_config.mutex);
7440+
7441+ if(IS_MRST(dev))
7442+ return;
7443+
7444+ REG_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
7445+ REG_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
7446+ REG_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
7447+ REG_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
7448+
7449+ /*TODO: SWF registers restore*/
7450+}
7451+
7452+static void psb_display_states_save(struct drm_device * dev)
7453+{
7454+ struct drm_crtc * crtc = NULL;
7455+ struct drm_connector * connector = NULL;
7456+ struct drm_psb_private * dev_priv =
7457+ (struct drm_psb_private *)dev->dev_private;
7458+
7459+ mutex_lock(&dev->mode_config.mutex);
7460+ /*Save output states*/
7461+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7462+ if(connector->funcs && connector->funcs->save)
7463+ connector->funcs->save(connector);
7464+ }
7465+
7466+#if 1
7467+ /*Restore CRTC states*/
7468+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7469+ //if(crtc->funcs && crtc->funcs->save)
7470+ if(drm_helper_crtc_in_use(crtc))
7471+ crtc->funcs->save(crtc);
7472+ }
7473+#endif
7474+
7475+ mutex_unlock(&dev->mode_config.mutex);
7476+
7477+ if(IS_MRST(dev))
7478+ return;
7479+
7480+ dev_priv->saveVCLK_DIVISOR_VGA0 = REG_READ(VCLK_DIVISOR_VGA0);
7481+ dev_priv->saveVCLK_DIVISOR_VGA1 = REG_READ(VCLK_DIVISOR_VGA1);
7482+ dev_priv->saveVCLK_POST_DIV = REG_READ(VCLK_POST_DIV);
7483+ dev_priv->saveVGACNTRL = REG_READ(VGACNTRL);
7484+
7485+ /*TODO: save SWF here if necessary*/
7486+}
7487+#endif
7488+
7489+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
7490+ struct drm_file *file_priv)
7491+{
7492+ struct drm_psb_private *dev_priv = psb_priv(dev);
7493+ struct ttm_bo_device *bdev = &dev_priv->bdev;
7494+ struct ttm_mem_type_manager *man;
7495+ int clean;
7496+ int ret;
7497+
7498+ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
7499+ psb_fpriv(file_priv)->tfile);
7500+ if (unlikely(ret != 0))
7501+ return ret;
7502+
7503+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
7504+
7505+ /*
7506+ * Clean VRAM and TT for fbdev.
7507+ */
7508+
7509+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
7510+ if (unlikely(ret != 0))
7511+ goto out_unlock;
7512+
7513+ man = &bdev->man[TTM_PL_VRAM];
7514+ spin_lock(&bdev->lru_lock);
7515+ clean = drm_mm_clean(&man->manager);
7516+ spin_unlock(&bdev->lru_lock);
7517+ if (unlikely(!clean))
7518+ DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n");
7519+
7520+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
7521+ if (unlikely(ret != 0))
7522+ goto out_unlock;
7523+
7524+ man = &bdev->man[TTM_PL_TT];
7525+ spin_lock(&bdev->lru_lock);
7526+ clean = drm_mm_clean(&man->manager);
7527+ spin_unlock(&bdev->lru_lock);
7528+ if (unlikely(!clean))
7529+ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
7530+
7531+ ttm_bo_swapout_all(&dev_priv->bdev);
7532+
7533+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
7534+ if (drm_psb_ospm && IS_MRST(dev))
7535+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7536+ return 0;
7537+out_unlock:
7538+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
7539+ if (drm_psb_ospm && IS_MRST(dev))
7540+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7541+ (void) ttm_write_unlock(&dev_priv->ttm_lock,
7542+ psb_fpriv(file_priv)->tfile);
7543+ return ret;
7544+}
7545+
7546+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
7547+ struct drm_file *file_priv)
7548+{
7549+ struct drm_psb_private *dev_priv = psb_priv(dev);
7550+ return ttm_write_unlock(&dev_priv->ttm_lock,
7551+ psb_fpriv(file_priv)->tfile);
7552+}
7553+
7554+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
7555+ struct drm_file *file_priv)
7556+{
7557+ struct drm_psb_private *dev_priv = psb_priv(dev);
7558+ struct drm_psb_sizes_arg *arg =
7559+ (struct drm_psb_sizes_arg *) data;
7560+
7561+ *arg = dev_priv->sizes;
7562+ return 0;
7563+}
7564+
7565+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
7566+ struct drm_file *file_priv)
7567+{
7568+ struct drm_psb_private *dev_priv = psb_priv(dev);
7569+ uint32_t *arg = data;
7570+
7571+ *arg = dev_priv->fuse_reg_value;
7572+ return 0;
7573+}
7574+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
7575+ struct drm_file *file_priv)
7576+{
7577+ struct drm_psb_private *dev_priv = psb_priv(dev);
7578+ struct gct_ioctl_arg *pGCT = data;
7579+
7580+ memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
7581+
7582+ return 0;
7583+}
7584+
7585+static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
7586+ struct drm_file *file_priv)
7587+{
7588+ uint32_t flags;
7589+ uint32_t obj_id;
7590+ struct drm_mode_object * obj;
7591+ struct drm_connector * connector;
7592+ struct drm_crtc * crtc;
7593+ struct drm_psb_dc_state_arg * arg =
7594+ (struct drm_psb_dc_state_arg *)data;
7595+
7596+ if(IS_MRST(dev))
7597+ return 0;
7598+
7599+ flags = arg->flags;
7600+ obj_id = arg->obj_id;
7601+
7602+ if(flags & PSB_DC_CRTC_MASK) {
7603+ obj = drm_mode_object_find(dev, obj_id,
7604+ DRM_MODE_OBJECT_CRTC);
7605+ if(! obj) {
7606+ DRM_DEBUG("Invalid CRTC object.\n");
7607+ return -EINVAL;
7608+ }
7609+
7610+ crtc = obj_to_crtc(obj);
7611+
7612+ mutex_lock(&dev->mode_config.mutex);
7613+ if(drm_helper_crtc_in_use(crtc)) {
7614+ if(flags & PSB_DC_CRTC_SAVE)
7615+ crtc->funcs->save(crtc);
7616+ else
7617+ crtc->funcs->restore(crtc);
7618+ }
7619+ mutex_unlock(&dev->mode_config.mutex);
7620+
7621+ return 0;
7622+ } else if (flags & PSB_DC_OUTPUT_MASK) {
7623+ obj = drm_mode_object_find(dev, obj_id,
7624+ DRM_MODE_OBJECT_CONNECTOR);
7625+ if(! obj) {
7626+ DRM_DEBUG("Invalid connector id.\n");
7627+ return -EINVAL;
7628+ }
7629+
7630+ connector = obj_to_connector(obj);
7631+ if(flags & PSB_DC_OUTPUT_SAVE)
7632+ connector->funcs->save(connector);
7633+ else
7634+ connector->funcs->restore(connector);
7635+
7636+ return 0;
7637+ }
7638+
7639+ DRM_DEBUG("Bad flags 0x%x\n", flags);
7640+ return -EINVAL;
7641+}
7642+
7643+static int psb_adb_ioctl(struct drm_device *dev, void *data,
7644+ struct drm_file *file_priv)
7645+{
7646+ struct drm_psb_private *dev_priv = psb_priv(dev);
7647+ uint32_t *arg = data;
7648+ struct backlight_device bd;
7649+ dev_priv->blc_adj1 = *arg;
7650+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
7651+ bd.props.brightness = psb_get_brightness(&bd);
7652+ psb_set_brightness(&bd);
7653+#endif
7654+ return 0;
7655+}
7656+
7657+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
7658+ struct drm_file * file_priv)
7659+{
7660+ uint32_t obj_id;
7661+ uint16_t op;
7662+ struct drm_mode_modeinfo * umode;
7663+ struct drm_display_mode * mode;
7664+ struct drm_psb_mode_operation_arg * arg;
7665+ struct drm_mode_object * obj;
7666+ struct drm_connector * connector;
7667+ struct drm_connector_helper_funcs * connector_funcs;
7668+ int ret = 0;
7669+ int resp = MODE_OK;
7670+
7671+ if (IS_MRST(dev))
7672+ return 0;
7673+
7674+ arg = (struct drm_psb_mode_operation_arg *)data;
7675+ obj_id = arg->obj_id;
7676+ op = arg->operation;
7677+ umode = &arg->mode;
7678+
7679+ mutex_lock(&dev->mode_config.mutex);
7680+
7681+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
7682+ if(!obj) {
7683+ ret = - EINVAL;
7684+ goto mode_op_out;
7685+ }
7686+
7687+ connector = obj_to_connector(obj);
7688+
7689+ mode = drm_mode_create(dev);
7690+ if(!mode) {
7691+ ret = -ENOMEM;
7692+ goto mode_op_out;
7693+ }
7694+
7695+ //drm_crtc_convert_umode(mode, umode);
7696+ {
7697+ mode->clock = umode->clock;
7698+ mode->hdisplay = umode->hdisplay;
7699+ mode->hsync_start = umode->hsync_start;
7700+ mode->hsync_end = umode->hsync_end;
7701+ mode->htotal = umode->htotal;
7702+ mode->hskew = umode->hskew;
7703+ mode->vdisplay = umode->vdisplay;
7704+ mode->vsync_start = umode->vsync_start;
7705+ mode->vsync_end = umode->vsync_end;
7706+ mode->vtotal = umode->vtotal;
7707+ mode->vscan = umode->vscan;
7708+ mode->vrefresh = umode->vrefresh;
7709+ mode->flags = umode->flags;
7710+ mode->type = umode->type;
7711+ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
7712+ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
7713+ }
7714+
7715+ connector_funcs = (struct drm_connector_helper_funcs *)
7716+ connector->helper_private;
7717+
7718+ switch(op) {
7719+ case PSB_MODE_OPERATION_MODE_VALID:
7720+ if(connector_funcs->mode_valid) {
7721+ resp = connector_funcs->mode_valid(connector, mode);
7722+ arg->data = (void *)resp;
7723+ }
7724+ break;
7725+ default:
7726+ DRM_DEBUG("Unsupported psb mode operation");
7727+ ret = -EOPNOTSUPP;
7728+ goto mode_op_err;
7729+ }
7730+
7731+mode_op_err:
7732+ drm_mode_destroy(dev, mode);
7733+mode_op_out:
7734+ mutex_unlock(&dev->mode_config.mutex);
7735+ return ret;
7736+}
7737+
7738+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
7739+ struct drm_file *file_priv)
7740+{
7741+ struct drm_psb_private *dev_priv = psb_priv(dev);
7742+ struct drm_psb_stolen_memory_arg *arg = data;
7743+
7744+ arg->base = dev_priv->pg->stolen_base;
7745+ arg->size = dev_priv->pg->vram_stolen_size;
7746+
7747+ return 0;
7748+}
7749+
7750+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
7751+ struct drm_file *file_priv)
7752+{
7753+ struct drm_psb_private *dev_priv = psb_priv(dev);
7754+ struct drm_psb_register_rw_arg *arg = data;
7755+
7756+ if (arg->display_write_mask != 0) {
7757+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7758+ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
7759+ PSB_WVDC32(arg->display.pfit_controls, PFIT_CONTROL);
7760+ if (arg->display_write_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7761+ PSB_WVDC32(arg->display.pfit_autoscale_ratios, PFIT_AUTO_RATIOS);
7762+ if (arg->display_write_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7763+ PSB_WVDC32(arg->display.pfit_programmed_scale_ratios, PFIT_PGM_RATIOS);
7764+ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
7765+ PSB_WVDC32(arg->display.pipeasrc, PIPEASRC);
7766+ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
7767+ PSB_WVDC32(arg->display.pipebsrc, PIPEBSRC);
7768+ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
7769+ PSB_WVDC32(arg->display.vtotal_a, VTOTAL_A);
7770+ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
7771+ PSB_WVDC32(arg->display.vtotal_b, VTOTAL_B);
7772+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7773+ } else {
7774+ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
7775+ dev_priv->savePFIT_CONTROL = arg->display.pfit_controls;
7776+ if (arg->display_write_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7777+ dev_priv->savePFIT_AUTO_RATIOS = arg->display.pfit_autoscale_ratios;
7778+ if (arg->display_write_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7779+ dev_priv->savePFIT_PGM_RATIOS = arg->display.pfit_programmed_scale_ratios;
7780+ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
7781+ dev_priv->savePIPEASRC = arg->display.pipeasrc;
7782+ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
7783+ dev_priv->savePIPEBSRC = arg->display.pipebsrc;
7784+ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
7785+ dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
7786+ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
7787+ dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
7788+ }
7789+ }
7790+
7791+ if (arg->display_read_mask != 0) {
7792+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7793+ if (arg->display_read_mask & REGRWBITS_PFIT_CONTROLS)
7794+ arg->display.pfit_controls = PSB_RVDC32(PFIT_CONTROL);
7795+ if (arg->display_read_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7796+ arg->display.pfit_autoscale_ratios = PSB_RVDC32(PFIT_AUTO_RATIOS);
7797+ if (arg->display_read_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7798+ arg->display.pfit_programmed_scale_ratios = PSB_RVDC32(PFIT_PGM_RATIOS);
7799+ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
7800+ arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
7801+ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
7802+ arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
7803+ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
7804+ arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
7805+ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
7806+ arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
7807+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7808+ } else {
7809+ if (arg->display_read_mask & REGRWBITS_PFIT_CONTROLS)
7810+ arg->display.pfit_controls = dev_priv->savePFIT_CONTROL;
7811+ if (arg->display_read_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7812+ arg->display.pfit_autoscale_ratios = dev_priv->savePFIT_AUTO_RATIOS;
7813+ if (arg->display_read_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7814+ arg->display.pfit_programmed_scale_ratios = dev_priv->savePFIT_PGM_RATIOS;
7815+ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
7816+ arg->display.pipeasrc = dev_priv->savePIPEASRC;
7817+ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
7818+ arg->display.pipebsrc = dev_priv->savePIPEBSRC;
7819+ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
7820+ arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
7821+ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
7822+ arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
7823+ }
7824+ }
7825+
7826+ if (arg->overlay_write_mask != 0) {
7827+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7828+ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
7829+ PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
7830+ PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
7831+ PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
7832+ PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
7833+ PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
7834+ PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
7835+ }
7836+ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
7837+ PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
7838+ }
7839+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7840+ } else {
7841+ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
7842+ dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
7843+ dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
7844+ dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
7845+ dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
7846+ dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
7847+ dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
7848+ }
7849+ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
7850+ dev_priv->saveOV_OVADD = arg->overlay.OVADD;
7851+ }
7852+ }
7853+
7854+ if (arg->overlay_read_mask != 0) {
7855+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7856+ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
7857+ arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
7858+ arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
7859+ arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
7860+ arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
7861+ arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
7862+ arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
7863+ }
7864+ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
7865+ arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
7866+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7867+ } else {
7868+ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
7869+ arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
7870+ arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
7871+ arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
7872+ arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
7873+ arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
7874+ arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
7875+ }
7876+ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
7877+ arg->overlay.OVADD = dev_priv->saveOV_OVADD;
7878+ }
7879+ }
7880+
7881+ return 0;
7882+}
7883+
7884+/* always available as we are SIGIO'd */
7885+static unsigned int psb_poll(struct file *filp,
7886+ struct poll_table_struct *wait)
7887+{
7888+ return POLLIN | POLLRDNORM;
7889+}
7890+
7891+int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
7892+{
7893+ return 0;
7894+}
7895+
7896+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
7897+ unsigned long arg)
7898+{
7899+ struct drm_file *file_priv = filp->private_data;
7900+ struct drm_device *dev = file_priv->minor->dev;
7901+ unsigned int nr = DRM_IOCTL_NR(cmd);
7902+ long ret;
7903+
7904+ /*
7905+ * The driver private ioctls and TTM ioctls should be
7906+ * thread-safe.
7907+ */
7908+
7909+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
7910+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
7911+ struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE];
7912+
7913+ if (unlikely(ioctl->cmd != cmd)) {
7914+ DRM_ERROR("Invalid drm command %d\n",
7915+ nr - DRM_COMMAND_BASE);
7916+ return -EINVAL;
7917+ }
7918+
7919+ return drm_unlocked_ioctl(filp, cmd, arg);
7920+ }
7921+ /*
7922+ * Not all old drm ioctls are thread-safe.
7923+ */
7924+
7925+ lock_kernel();
7926+ ret = drm_unlocked_ioctl(filp, cmd, arg);
7927+ unlock_kernel();
7928+ return ret;
7929+}
7930+
7931+static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
7932+ int *eof, void *data)
7933+{
7934+ struct drm_minor *minor = (struct drm_minor *) data;
7935+ struct drm_device *dev = minor->dev;
7936+ struct drm_psb_private *dev_priv =
7937+ (struct drm_psb_private *) dev->dev_private;
7938+ int len = 0;
7939+#ifdef OSPM_STAT
7940+ unsigned long d0 = 0;
7941+ unsigned long d0i3 = 0;
7942+ unsigned long d3 = 0;
7943+#endif
7944+
7945+ *start = &buf[offset];
7946+ *eof = 0;
7947+ DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled");
7948+
7949+#ifdef OSPM_STAT
7950+ switch (dev_priv->graphics_state) {
7951+ case PSB_PWR_STATE_D0:
7952+ DRM_PROC_PRINT("GFX:%s\n", "D0");
7953+ break;
7954+ case PSB_PWR_STATE_D0i3:
7955+ DRM_PROC_PRINT("GFX:%s\n", "D0i3");
7956+ break;
7957+ case PSB_PWR_STATE_D3:
7958+ DRM_PROC_PRINT("GFX:%s\n", "D3");
7959+ break;
7960+ default:
7961+ DRM_PROC_PRINT("GFX:%s\n", "unknown");
7962+ }
7963+
7964+ d0 = dev_priv->gfx_d0_time * 1000 / HZ;
7965+ d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ;
7966+ d3 = dev_priv->gfx_d3_time * 1000 / HZ;
7967+ switch (dev_priv->graphics_state) {
7968+ case PSB_PWR_STATE_D0:
7969+ d0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
7970+ break;
7971+ case PSB_PWR_STATE_D0i3:
7972+ d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
7973+ break;
7974+ case PSB_PWR_STATE_D3:
7975+ d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
7976+ break;
7977+ }
7978+ DRM_PROC_PRINT("GFX(cnt/ms):\n");
7979+ DRM_PROC_PRINT("D0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n",
7980+ dev_priv->gfx_d0_cnt, d0, dev_priv->gfx_d0i3_cnt, d0i3,
7981+ dev_priv->gfx_d3_cnt, d3);
7982+#endif
7983+ if (len > request + offset)
7984+ return request;
7985+ *eof = 1;
7986+ return len - offset;
7987+}
7988+
7989+/* When a client dies:
7990+ * - Check for and clean up flipped page state
7991+ */
7992+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
7993+{
7994+ unsigned long irqflags;
7995+ int pipe, i;
7996+ if (dev->dev_private) {
7997+ struct drm_psb_private *dev_priv = dev->dev_private;
7998+ if (dev_priv->dri_page_flipping && dev_priv->current_page == 1) {
7999+ for (pipe=0; pipe<2; pipe++) {
8000+ if (dev_priv->pipe_active[pipe] == 1) {
8001+ dev_priv->flip_start[pipe] = dev_priv->saved_start[pipe];
8002+ dev_priv->flip_offset[pipe] = dev_priv->saved_offset[pipe];
8003+ dev_priv->flip_stride[pipe] = dev_priv->saved_stride[pipe];
8004+ psb_flip_set_base(dev_priv, pipe);
8005+ }
8006+ }
8007+ dev_priv->dri_page_flipping = 0;
8008+ dev_priv->current_page = 0;
8009+ }
8010+
8011+ drm_psb_disable_vsync = 1;
8012+ dev_priv->vdc_irq_mask &= ~(_PSB_VSYNC_PIPEA_FLAG | _PSB_VSYNC_PIPEB_FLAG);
8013+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
8014+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
8015+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
8016+
8017+ for (i = 0; i < dev->num_crtcs; i++) {
8018+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
8019+ dev->vblank_enabled[i]) {
8020+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
8021+ dev->last_vblank[i] =
8022+ dev->driver->get_vblank_counter(dev, i);
8023+ dev->vblank_enabled[i] = 0;
8024+ }
8025+ }
8026+ }
8027+}
8028+
8029+static void psb_remove(struct pci_dev *pdev)
8030+{
8031+ struct drm_device *dev = pci_get_drvdata(pdev);
8032+ drm_put_dev(dev);
8033+}
8034+
8035+static int psb_proc_init(struct drm_minor *minor)
8036+{
8037+ struct proc_dir_entry *ent;
8038+ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->proc_root,
8039+ psb_ospm_read, minor);
8040+ if (ent)
8041+ return 0;
8042+ else
8043+ return -1;
8044+}
8045+
8046+static void psb_proc_cleanup(struct drm_minor *minor)
8047+{
8048+ remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root);
8049+ return;
8050+}
8051+
8052+static struct drm_driver driver = {
8053+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | DRIVER_MODESET,
8054+ .load = psb_driver_load,
8055+ .unload = psb_driver_unload,
8056+
8057+ .get_reg_ofs = drm_core_get_reg_ofs,
8058+ .ioctls = psb_ioctls,
8059+ .device_is_agp = psb_driver_device_is_agp,
8060+ .irq_preinstall = psb_irq_preinstall,
8061+ .irq_postinstall = psb_irq_postinstall,
8062+ .irq_uninstall = psb_irq_uninstall,
8063+ .irq_handler = psb_irq_handler,
8064+ .enable_vblank = psb_enable_vblank,
8065+ .disable_vblank = psb_disable_vblank,
8066+ .firstopen = NULL,
8067+ .lastclose = psb_lastclose,
8068+ .open = psb_driver_open,
8069+ .proc_init = psb_proc_init,
8070+ .proc_cleanup = psb_proc_cleanup,
8071+ .preclose = psb_driver_preclose,
8072+ .fops = {
8073+ .owner = THIS_MODULE,
8074+ .open = psb_open,
8075+ .release = psb_release,
8076+ .unlocked_ioctl = psb_unlocked_ioctl,
8077+ .mmap = psb_mmap,
8078+ .poll = psb_poll,
8079+ .fasync = drm_fasync,
8080+ },
8081+ .pci_driver = {
8082+ .name = DRIVER_NAME,
8083+ .id_table = pciidlist,
8084+ .resume = powermgmt_resume,
8085+ .suspend = powermgmt_suspend,
8086+ .probe = psb_probe,
8087+ .remove = psb_remove,
8088+ },
8089+ .name = DRIVER_NAME,
8090+ .desc = DRIVER_DESC,
8091+ .date = PSB_DRM_DRIVER_DATE,
8092+ .major = PSB_DRM_DRIVER_MAJOR,
8093+ .minor = PSB_DRM_DRIVER_MINOR,
8094+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
8095+};
8096+
8097+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8098+{
8099+ return drm_get_dev(pdev, ent, &driver);
8100+}
8101+
8102+static int __init psb_init(void)
8103+{
8104+ driver.num_ioctls = psb_max_ioctl;
8105+ return drm_init(&driver);
8106+}
8107+
8108+static void __exit psb_exit(void)
8109+{
8110+ drm_exit(&driver);
8111+}
8112+
8113+late_initcall(psb_init);
8114+module_exit(psb_exit);
8115+
8116+MODULE_AUTHOR(DRIVER_AUTHOR);
8117+MODULE_DESCRIPTION(DRIVER_DESC);
8118+MODULE_LICENSE("GPL");
8119diff --git a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h
8120new file mode 100644
8121index 0000000..9b2c4e1
8122--- /dev/null
8123+++ b/drivers/gpu/drm/psb/psb_drv.h
8124@@ -0,0 +1,1224 @@
8125+/**************************************************************************
8126+ *Copyright (c) 2007-2008, Intel Corporation.
8127+ *All Rights Reserved.
8128+ *
8129+ *This program is free software; you can redistribute it and/or modify it
8130+ *under the terms and conditions of the GNU General Public License,
8131+ *version 2, as published by the Free Software Foundation.
8132+ *
8133+ *This program is distributed in the hope it will be useful, but WITHOUT
8134+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8135+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8136+ *more details.
8137+ *
8138+ *You should have received a copy of the GNU General Public License along with
8139+ *this program; if not, write to the Free Software Foundation, Inc.,
8140+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8141+ *
8142+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
8143+ *develop this driver.
8144+ *
8145+ **************************************************************************/
8146+/*
8147+ */
8148+#ifndef _PSB_DRV_H_
8149+#define _PSB_DRV_H_
8150+
8151+#include <drm/drmP.h>
8152+#include "psb_drm.h"
8153+#include "psb_reg.h"
8154+#include "psb_schedule.h"
8155+#include "psb_intel_drv.h"
8156+#include "psb_hotplug.h"
8157+#include "psb_dpst.h"
8158+#include "ttm/ttm_object.h"
8159+#include "ttm/ttm_fence_driver.h"
8160+#include "ttm/ttm_bo_driver.h"
8161+#include "ttm/ttm_lock.h"
8162+
8163+extern struct ttm_bo_driver psb_ttm_bo_driver;
8164+
8165+enum {
8166+ CHIP_PSB_8108 = 0,
8167+ CHIP_PSB_8109 = 1,
8168+ CHIP_MRST_4100 = 2
8169+};
8170+
8171+/*
8172+ *Hardware bugfixes
8173+ */
8174+
8175+#define FIX_TG_16
8176+#define FIX_TG_2D_CLOCKGATE
8177+#define OSPM_STAT
8178+
8179+#define DRIVER_NAME "psb"
8180+#define DRIVER_DESC "drm driver for the Intel GMA500"
8181+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
8182+#define OSPM_PROC_ENTRY "ospm"
8183+
8184+#define PSB_DRM_DRIVER_DATE "2009-03-10"
8185+#define PSB_DRM_DRIVER_MAJOR 8
8186+#define PSB_DRM_DRIVER_MINOR 1
8187+#define PSB_DRM_DRIVER_PATCHLEVEL 0
8188+
8189+/*
8190+ *TTM driver private offsets.
8191+ */
8192+
8193+#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
8194+
8195+#define PSB_OBJECT_HASH_ORDER 13
8196+#define PSB_FILE_OBJECT_HASH_ORDER 12
8197+#define PSB_BO_HASH_ORDER 12
8198+
8199+#define PSB_VDC_OFFSET 0x00000000
8200+#define PSB_VDC_SIZE 0x000080000
8201+#define MRST_MMIO_SIZE 0x0000C0000
8202+#define PSB_SGX_SIZE 0x8000
8203+#define PSB_SGX_OFFSET 0x00040000
8204+#define MRST_SGX_OFFSET 0x00080000
8205+#define PSB_MMIO_RESOURCE 0
8206+#define PSB_GATT_RESOURCE 2
8207+#define PSB_GTT_RESOURCE 3
8208+#define PSB_GMCH_CTRL 0x52
8209+#define PSB_BSM 0x5C
8210+#define _PSB_GMCH_ENABLED 0x4
8211+#define PSB_PGETBL_CTL 0x2020
8212+#define _PSB_PGETBL_ENABLED 0x00000001
8213+#define PSB_SGX_2D_SLAVE_PORT 0x4000
8214+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
8215+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
8216+#define PSB_NUM_VALIDATE_BUFFERS 2048
8217+#define PSB_MEM_KERNEL_START 0x10000000
8218+#define PSB_MEM_PDS_START 0x20000000
8219+#define PSB_MEM_MMU_START 0x40000000
8220+
8221+#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0
8222+#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0
8223+
8224+/*
8225+ *Flags for external memory type field.
8226+ */
8227+
8228+#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
8229+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
8230+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
8231+#define PSB_MSVDX_SIZE 0x10000
8232+
8233+#define LNC_TOPAZ_OFFSET 0xA0000
8234+#define LNC_TOPAZ_SIZE 0x10000
8235+
8236+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
8237+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
8238+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
8239+
8240+/*
8241+ *PTE's and PDE's
8242+ */
8243+
8244+#define PSB_PDE_MASK 0x003FFFFF
8245+#define PSB_PDE_SHIFT 22
8246+#define PSB_PTE_SHIFT 12
8247+
8248+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
8249+#define PSB_PTE_WO 0x0002 /* Write only */
8250+#define PSB_PTE_RO 0x0004 /* Read only */
8251+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
8252+
8253+/*
8254+ *VDC registers and bits
8255+ */
8256+#define PSB_MSVDX_CLOCKGATING 0x2064
8257+#define PSB_TOPAZ_CLOCKGATING 0x2068
8258+#define PSB_HWSTAM 0x2098
8259+#define PSB_INSTPM 0x20C0
8260+#define PSB_INT_IDENTITY_R 0x20A4
8261+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
8262+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
8263+#define _PSB_IRQ_SGX_FLAG (1<<18)
8264+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
8265+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
8266+#define PSB_INT_MASK_R 0x20A8
8267+#define PSB_INT_ENABLE_R 0x20A0
8268+
8269+#define _PSB_MMU_ER_MASK 0x0001FF00
8270+#define _PSB_MMU_ER_HOST (1 << 16)
8271+#define GPIOA 0x5010
8272+#define GPIOB 0x5014
8273+#define GPIOC 0x5018
8274+#define GPIOD 0x501c
8275+#define GPIOE 0x5020
8276+#define GPIOF 0x5024
8277+#define GPIOG 0x5028
8278+#define GPIOH 0x502c
8279+#define GPIO_CLOCK_DIR_MASK (1 << 0)
8280+#define GPIO_CLOCK_DIR_IN (0 << 1)
8281+#define GPIO_CLOCK_DIR_OUT (1 << 1)
8282+#define GPIO_CLOCK_VAL_MASK (1 << 2)
8283+#define GPIO_CLOCK_VAL_OUT (1 << 3)
8284+#define GPIO_CLOCK_VAL_IN (1 << 4)
8285+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
8286+#define GPIO_DATA_DIR_MASK (1 << 8)
8287+#define GPIO_DATA_DIR_IN (0 << 9)
8288+#define GPIO_DATA_DIR_OUT (1 << 9)
8289+#define GPIO_DATA_VAL_MASK (1 << 10)
8290+#define GPIO_DATA_VAL_OUT (1 << 11)
8291+#define GPIO_DATA_VAL_IN (1 << 12)
8292+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
8293+
8294+#define VCLK_DIVISOR_VGA0 0x6000
8295+#define VCLK_DIVISOR_VGA1 0x6004
8296+#define VCLK_POST_DIV 0x6010
8297+
8298+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
8299+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
8300+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
8301+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
8302+#define PSB_COMM_USER_IRQ (1024 >> 2)
8303+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
8304+#define PSB_COMM_FW (2048 >> 2)
8305+
8306+#define PSB_UIRQ_VISTEST 1
8307+#define PSB_UIRQ_OOM_REPLY 2
8308+#define PSB_UIRQ_FIRE_TA_REPLY 3
8309+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
8310+
8311+#define PSB_2D_SIZE (256*1024*1024)
8312+#define PSB_MAX_RELOC_PAGES 1024
8313+
8314+#define PSB_LOW_REG_OFFS 0x0204
8315+#define PSB_HIGH_REG_OFFS 0x0600
8316+
8317+#define PSB_NUM_VBLANKS 2
8318+
8319+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
8320+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
8321+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
8322+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
8323+#define PSB_COMM_FW (2048 >> 2)
8324+
8325+#define PSB_2D_SIZE (256*1024*1024)
8326+#define PSB_MAX_RELOC_PAGES 1024
8327+
8328+#define PSB_LOW_REG_OFFS 0x0204
8329+#define PSB_HIGH_REG_OFFS 0x0600
8330+
8331+#define PSB_NUM_VBLANKS 2
8332+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
8333+#define PSB_LID_DELAY (DRM_HZ / 10)
8334+
8335+#define PSB_PWR_STATE_D0 1
8336+#define PSB_PWR_STATE_D0i3 2
8337+#define PSB_PWR_STATE_D3 3
8338+
8339+#define PSB_PMPOLICY_NOPM 0
8340+#define PSB_PMPOLICY_CLOCKGATING 1
8341+#define PSB_PMPOLICY_POWERDOWN 2
8342+
8343+#define PSB_PMSTATE_POWERUP 0
8344+#define PSB_PMSTATE_CLOCKGATED 1
8345+#define PSB_PMSTATE_POWERDOWN 2
8346+
8347+/* Graphics MSI address and data region in PCIx */
8348+#define PSB_PCIx_MSI_ADDR_LOC 0x94
8349+#define PSB_PCIx_MSI_DATA_LOC 0x98
8350+
8351+
8352+/*
8353+ *User options.
8354+ */
8355+
8356+struct drm_psb_uopt {
8357+ int clock_gating;
8358+};
8359+
8360+/**
8361+ *struct psb_context
8362+ *
8363+ *@buffers: array of pre-allocated validate buffers.
8364+ *@used_buffers: number of buffers in @buffers array currently in use.
8365+ *@validate_buffer: buffers validated from user-space.
8366+ *@kern_validate_buffers : buffers validated from kernel-space.
8367+ *@fence_flags : Fence flags to be used for fence creation.
8368+ *
8369+ *This structure is used during execbuf validation.
8370+ */
8371+
8372+struct psb_context {
8373+ struct psb_validate_buffer *buffers;
8374+ uint32_t used_buffers;
8375+ struct list_head validate_list;
8376+ struct list_head kern_validate_list;
8377+ uint32_t fence_types;
8378+ uint32_t val_seq;
8379+};
8380+
8381+struct psb_gtt {
8382+ struct drm_device *dev;
8383+ int initialized;
8384+ uint32_t gatt_start;
8385+ uint32_t gtt_start;
8386+ uint32_t gtt_phys_start;
8387+ unsigned gtt_pages;
8388+ unsigned gatt_pages;
8389+ uint32_t stolen_base;
8390+ uint32_t pge_ctl;
8391+ u16 gmch_ctrl;
8392+ unsigned long stolen_size;
8393+ unsigned long vram_stolen_size;
8394+ unsigned long ci_stolen_size;
8395+ unsigned long rar_stolen_size;
8396+ uint32_t *gtt_map;
8397+ struct rw_semaphore sem;
8398+};
8399+
8400+struct psb_use_base {
8401+ struct list_head head;
8402+ struct ttm_fence_object *fence;
8403+ unsigned int reg;
8404+ unsigned long offset;
8405+ unsigned int dm;
8406+};
8407+
8408+struct psb_validate_buffer;
8409+
8410+struct psb_msvdx_cmd_queue {
8411+ struct list_head head;
8412+ void *cmd;
8413+ unsigned long cmd_size;
8414+ uint32_t sequence;
8415+};
8416+
8417+struct drm_psb_private {
8418+
8419+ /*
8420+ *TTM Glue.
8421+ */
8422+
8423+ struct drm_global_reference mem_global_ref;
8424+ int has_global;
8425+
8426+ struct drm_device *dev;
8427+ struct ttm_object_device *tdev;
8428+ struct ttm_fence_device fdev;
8429+ struct ttm_bo_device bdev;
8430+ struct ttm_lock ttm_lock;
8431+ struct vm_operations_struct *ttm_vm_ops;
8432+ int has_fence_device;
8433+ int has_bo_device;
8434+
8435+ unsigned long chipset;
8436+
8437+ struct psb_xhw_buf resume_buf;
8438+ struct drm_psb_dev_info_arg dev_info;
8439+ struct drm_psb_uopt uopt;
8440+
8441+ struct psb_gtt *pg;
8442+
8443+ struct page *scratch_page;
8444+ struct page *comm_page;
8445+ /* Deleted volatile because it is not recommended to use. */
8446+ uint32_t *comm;
8447+ uint32_t comm_mmu_offset;
8448+ uint32_t mmu_2d_offset;
8449+ uint32_t sequence[PSB_NUM_ENGINES];
8450+ uint32_t last_sequence[PSB_NUM_ENGINES];
8451+ int idle[PSB_NUM_ENGINES];
8452+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
8453+ int engine_lockup_2d;
8454+
8455+ struct psb_mmu_driver *mmu;
8456+ struct psb_mmu_pd *pf_pd;
8457+
8458+ uint8_t *sgx_reg;
8459+ uint8_t *vdc_reg;
8460+ uint32_t gatt_free_offset;
8461+
8462+ /*
8463+ *MSVDX
8464+ */
8465+ uint8_t *msvdx_reg;
8466+ atomic_t msvdx_mmu_invaldc;
8467+ void *msvdx_private;
8468+
8469+ /*
8470+ *TOPAZ
8471+ */
8472+ uint8_t *topaz_reg;
8473+ void *topaz_private;
8474+
8475+ /*
8476+ *Fencing / irq.
8477+ */
8478+
8479+ uint32_t sgx_irq_mask;
8480+ uint32_t sgx2_irq_mask;
8481+ uint32_t vdc_irq_mask;
8482+ u32 pipestat[2];
8483+
8484+ spinlock_t irqmask_lock;
8485+ spinlock_t sequence_lock;
8486+ int fence0_irq_on;
8487+ int irq_enabled;
8488+ unsigned int irqen_count_2d;
8489+ wait_queue_head_t event_2d_queue;
8490+
8491+#ifdef FIX_TG_16
8492+ wait_queue_head_t queue_2d;
8493+ atomic_t lock_2d;
8494+ atomic_t ta_wait_2d;
8495+ atomic_t ta_wait_2d_irq;
8496+ atomic_t waiters_2d;
8497+#else
8498+ struct mutex mutex_2d;
8499+#endif
8500+ int fence2_irq_on;
8501+
8502+ /*
8503+ *Modesetting
8504+ */
8505+ struct psb_intel_mode_device mode_dev;
8506+
8507+
8508+ /*
8509+ * CI share buffer
8510+ */
8511+ unsigned int ci_region_start;
8512+ unsigned int ci_region_size;
8513+
8514+ /*
8515+ * RAR share buffer;
8516+ */
8517+ unsigned int rar_region_start;
8518+ unsigned int rar_region_size;
8519+
8520+ /*
8521+ *Memory managers
8522+ */
8523+
8524+ int have_vram;
8525+ int have_camera;
8526+ int have_rar;
8527+ int have_tt;
8528+ int have_mem_mmu;
8529+ int have_mem_aper;
8530+ int have_mem_kernel;
8531+ int have_mem_pds;
8532+ int have_mem_rastgeom;
8533+ struct mutex temp_mem;
8534+
8535+ /*
8536+ *Relocation buffer mapping.
8537+ */
8538+
8539+ spinlock_t reloc_lock;
8540+ unsigned int rel_mapped_pages;
8541+ wait_queue_head_t rel_mapped_queue;
8542+
8543+ /*
8544+ *SAREA
8545+ */
8546+ struct drm_psb_sarea *sarea_priv;
8547+
8548+ /*
8549+ *OSPM info
8550+ */
8551+ uint32_t ospm_base;
8552+
8553+ /*
8554+ * Sizes info
8555+ */
8556+
8557+ struct drm_psb_sizes_arg sizes;
8558+
8559+ uint32_t fuse_reg_value;
8560+
8561+ /* vbt (gct) header information*/
8562+ struct mrst_vbt vbt_data;
8563+ /* info that is stored from the gct */
8564+ struct gct_ioctl_arg gct_data;
8565+
8566+ /*
8567+ *LVDS info
8568+ */
8569+ int backlight_duty_cycle; /* restore backlight to this value */
8570+ bool panel_wants_dither;
8571+ struct drm_display_mode *panel_fixed_mode;
8572+ struct drm_display_mode *lfp_lvds_vbt_mode;
8573+ struct drm_display_mode *sdvo_lvds_vbt_mode;
8574+
8575+ struct bdb_lvds_backlight * lvds_bl; /*LVDS backlight info from VBT*/
8576+ struct psb_intel_i2c_chan * lvds_i2c_bus;
8577+
8578+ /* Feature bits from the VBIOS*/
8579+ unsigned int int_tv_support:1;
8580+ unsigned int lvds_dither:1;
8581+ unsigned int lvds_vbt:1;
8582+ unsigned int int_crt_support:1;
8583+ unsigned int lvds_use_ssc:1;
8584+ int lvds_ssc_freq;
8585+
8586+/* MRST private date start */
8587+/*FIXME JLIU7 need to revisit */
8588+ bool sku_83;
8589+ bool sku_100;
8590+ bool sku_100L;
8591+ bool sku_bypass;
8592+ uint32_t iLVDS_enable;
8593+
8594+ /* pipe config register value */
8595+ uint32_t pipeconf;
8596+
8597+ /* plane control register value */
8598+ uint32_t dspcntr;
8599+
8600+/* MRST_DSI private date start */
8601+ /*
8602+ *MRST DSI info
8603+ */
8604+ /* The DSI device ready */
8605+ bool dsi_device_ready;
8606+
8607+ /* The DPI panel power on */
8608+ bool dpi_panel_on;
8609+
8610+ /* The DBI panel power on */
8611+ bool dbi_panel_on;
8612+
8613+ /* The DPI display */
8614+ bool dpi;
8615+
8616+ /* status */
8617+ uint32_t videoModeFormat:2;
8618+ uint32_t laneCount:3;
8619+ uint32_t status_reserved:27;
8620+
8621+ /* dual display - DPI & DBI */
8622+ bool dual_display;
8623+
8624+ /* HS or LP transmission */
8625+ bool lp_transmission;
8626+
8627+ /* configuration phase */
8628+ bool config_phase;
8629+
8630+ /* DSI clock */
8631+ uint32_t RRate;
8632+ uint32_t DDR_Clock;
8633+ uint32_t DDR_Clock_Calculated;
8634+ uint32_t ClockBits;
8635+
8636+ /* DBI Buffer pointer */
8637+ u8 *p_DBI_commandBuffer_orig;
8638+ u8 *p_DBI_commandBuffer;
8639+ uint32_t DBI_CB_pointer;
8640+ u8 *p_DBI_dataBuffer_orig;
8641+ u8 *p_DBI_dataBuffer;
8642+ uint32_t DBI_DB_pointer;
8643+
8644+ /* DPI panel spec */
8645+ uint32_t pixelClock;
8646+ uint32_t HsyncWidth;
8647+ uint32_t HbackPorch;
8648+ uint32_t HfrontPorch;
8649+ uint32_t HactiveArea;
8650+ uint32_t VsyncWidth;
8651+ uint32_t VbackPorch;
8652+ uint32_t VfrontPorch;
8653+ uint32_t VactiveArea;
8654+ uint32_t bpp:5;
8655+ uint32_t Reserved:27;
8656+
8657+ /* DBI panel spec */
8658+ uint32_t dbi_pixelClock;
8659+ uint32_t dbi_HsyncWidth;
8660+ uint32_t dbi_HbackPorch;
8661+ uint32_t dbi_HfrontPorch;
8662+ uint32_t dbi_HactiveArea;
8663+ uint32_t dbi_VsyncWidth;
8664+ uint32_t dbi_VbackPorch;
8665+ uint32_t dbi_VfrontPorch;
8666+ uint32_t dbi_VactiveArea;
8667+ uint32_t dbi_bpp:5;
8668+ uint32_t dbi_Reserved:27;
8669+
8670+/* MRST_DSI private date end */
8671+
8672+ /*
8673+ *Register state
8674+ */
8675+ uint32_t saveDSPACNTR;
8676+ uint32_t saveDSPBCNTR;
8677+ uint32_t savePIPEACONF;
8678+ uint32_t savePIPEBCONF;
8679+ uint32_t savePIPEASRC;
8680+ uint32_t savePIPEBSRC;
8681+ uint32_t saveFPA0;
8682+ uint32_t saveFPA1;
8683+ uint32_t saveDPLL_A;
8684+ uint32_t saveDPLL_A_MD;
8685+ uint32_t saveHTOTAL_A;
8686+ uint32_t saveHBLANK_A;
8687+ uint32_t saveHSYNC_A;
8688+ uint32_t saveVTOTAL_A;
8689+ uint32_t saveVBLANK_A;
8690+ uint32_t saveVSYNC_A;
8691+ uint32_t saveDSPASTRIDE;
8692+ uint32_t saveDSPASIZE;
8693+ uint32_t saveDSPAPOS;
8694+ uint32_t saveDSPABASE;
8695+ uint32_t saveDSPASURF;
8696+ uint32_t saveFPB0;
8697+ uint32_t saveFPB1;
8698+ uint32_t saveDPLL_B;
8699+ uint32_t saveDPLL_B_MD;
8700+ uint32_t saveHTOTAL_B;
8701+ uint32_t saveHBLANK_B;
8702+ uint32_t saveHSYNC_B;
8703+ uint32_t saveVTOTAL_B;
8704+ uint32_t saveVBLANK_B;
8705+ uint32_t saveVSYNC_B;
8706+ uint32_t saveDSPBSTRIDE;
8707+ uint32_t saveDSPBSIZE;
8708+ uint32_t saveDSPBPOS;
8709+ uint32_t saveDSPBBASE;
8710+ uint32_t saveDSPBSURF;
8711+ uint32_t saveVCLK_DIVISOR_VGA0;
8712+ uint32_t saveVCLK_DIVISOR_VGA1;
8713+ uint32_t saveVCLK_POST_DIV;
8714+ uint32_t saveVGACNTRL;
8715+ uint32_t saveADPA;
8716+ uint32_t saveLVDS;
8717+ uint32_t saveDVOA;
8718+ uint32_t saveDVOB;
8719+ uint32_t saveDVOC;
8720+ uint32_t savePP_ON;
8721+ uint32_t savePP_OFF;
8722+ uint32_t savePP_CONTROL;
8723+ uint32_t savePP_CYCLE;
8724+ uint32_t savePFIT_CONTROL;
8725+ uint32_t savePaletteA[256];
8726+ uint32_t savePaletteB[256];
8727+ uint32_t saveBLC_PWM_CTL2;
8728+ uint32_t saveBLC_PWM_CTL;
8729+ uint32_t saveCLOCKGATING;
8730+ uint32_t saveDSPARB;
8731+ uint32_t saveDSPATILEOFF;
8732+ uint32_t saveDSPBTILEOFF;
8733+ uint32_t saveDSPAADDR;
8734+ uint32_t saveDSPBADDR;
8735+ uint32_t savePFIT_AUTO_RATIOS;
8736+ uint32_t savePFIT_PGM_RATIOS;
8737+ uint32_t savePP_ON_DELAYS;
8738+ uint32_t savePP_OFF_DELAYS;
8739+ uint32_t savePP_DIVISOR;
8740+ uint32_t saveBSM;
8741+ uint32_t saveVBT;
8742+ uint32_t saveBCLRPAT_A;
8743+ uint32_t saveBCLRPAT_B;
8744+ uint32_t saveDSPALINOFF;
8745+ uint32_t saveDSPBLINOFF;
8746+ uint32_t savePERF_MODE;
8747+ uint32_t saveDSPFW1;
8748+ uint32_t saveDSPFW2;
8749+ uint32_t saveDSPFW3;
8750+ uint32_t saveDSPFW4;
8751+ uint32_t saveDSPFW5;
8752+ uint32_t saveDSPFW6;
8753+ uint32_t saveCHICKENBIT;
8754+ uint32_t saveDSPACURSOR_CTRL;
8755+ uint32_t saveDSPBCURSOR_CTRL;
8756+ uint32_t saveDSPACURSOR_BASE;
8757+ uint32_t saveDSPBCURSOR_BASE;
8758+ uint32_t saveDSPACURSOR_POS;
8759+ uint32_t saveDSPBCURSOR_POS;
8760+ uint32_t save_palette_a[256];
8761+ uint32_t save_palette_b[256];
8762+ uint32_t saveOV_OVADD;
8763+ uint32_t saveOV_OGAMC0;
8764+ uint32_t saveOV_OGAMC1;
8765+ uint32_t saveOV_OGAMC2;
8766+ uint32_t saveOV_OGAMC3;
8767+ uint32_t saveOV_OGAMC4;
8768+ uint32_t saveOV_OGAMC5;
8769+
8770+ /* MSI reg save */
8771+ uint32_t msi_addr;
8772+ uint32_t msi_data;
8773+
8774+ /*
8775+ *Xhw
8776+ */
8777+
8778+ uint32_t *xhw;
8779+ struct ttm_buffer_object *xhw_bo;
8780+ struct ttm_bo_kmap_obj xhw_kmap;
8781+ struct list_head xhw_in;
8782+ spinlock_t xhw_lock;
8783+ atomic_t xhw_client;
8784+ struct drm_file *xhw_file;
8785+ wait_queue_head_t xhw_queue;
8786+ wait_queue_head_t xhw_caller_queue;
8787+ struct mutex xhw_mutex;
8788+ struct psb_xhw_buf *xhw_cur_buf;
8789+ int xhw_submit_ok;
8790+ int xhw_on;
8791+
8792+ /*
8793+ *Scheduling.
8794+ */
8795+
8796+ struct mutex reset_mutex;
8797+ struct psb_scheduler scheduler;
8798+ struct mutex cmdbuf_mutex;
8799+ uint32_t ta_mem_pages;
8800+ struct psb_ta_mem *ta_mem;
8801+ int force_ta_mem_load;
8802+ atomic_t val_seq;
8803+
8804+ /*
8805+ *TODO: change this to be per drm-context.
8806+ */
8807+
8808+ struct psb_context context;
8809+
8810+ /*
8811+ * LID-Switch
8812+ */
8813+ spinlock_t lid_lock;
8814+ struct timer_list lid_timer;
8815+ struct psb_intel_opregion opregion;
8816+ u32 * lid_state;
8817+ u32 lid_last_state;
8818+
8819+ /*
8820+ *Watchdog
8821+ */
8822+
8823+ spinlock_t watchdog_lock;
8824+ struct timer_list watchdog_timer;
8825+ struct work_struct watchdog_wq;
8826+ struct work_struct msvdx_watchdog_wq;
8827+ struct work_struct topaz_watchdog_wq;
8828+ int timer_available;
8829+
8830+ uint32_t apm_reg;
8831+ uint16_t apm_base;
8832+#ifdef OSPM_STAT
8833+ unsigned char graphics_state;
8834+ unsigned long gfx_d0i3_time;
8835+ unsigned long gfx_d0_time;
8836+ unsigned long gfx_d3_time;
8837+ unsigned long gfx_last_mode_change;
8838+ unsigned long gfx_d0_cnt;
8839+ unsigned long gfx_d0i3_cnt;
8840+ unsigned long gfx_d3_cnt;
8841+#endif
8842+
8843+ int dri_page_flipping;
8844+ int current_page;
8845+ int pipe_active[2];
8846+ int saved_start[2];
8847+ int saved_offset[2];
8848+ int saved_stride[2];
8849+
8850+ int flip_start[2];
8851+ int flip_offset[2];
8852+ int flip_stride[2];
8853+
8854+
8855+ /*
8856+ *Used for modifying backlight from xrandr -- consider removing and using HAL instead
8857+ */
8858+ struct drm_property *backlight_property;
8859+ uint32_t blc_adj1;
8860+
8861+ /*
8862+ * DPST and Hotplug state
8863+ */
8864+
8865+ struct dpst_state *psb_dpst_state;
8866+ struct hotplug_state *psb_hotplug_state;
8867+
8868+};
8869+
8870+struct psb_fpriv {
8871+ struct ttm_object_file *tfile;
8872+};
8873+
8874+struct psb_mmu_driver;
8875+
8876+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
8877+extern int drm_pick_crtcs(struct drm_device *dev);
8878+
8879+
8880+static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
8881+{
8882+ return (struct psb_fpriv *) file_priv->driver_priv;
8883+}
8884+
8885+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
8886+{
8887+ return (struct drm_psb_private *) dev->dev_private;
8888+}
8889+
8890+/*
8891+ *TTM glue. psb_ttm_glue.c
8892+ */
8893+
8894+extern int psb_open(struct inode *inode, struct file *filp);
8895+extern int psb_release(struct inode *inode, struct file *filp);
8896+extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
8897+
8898+extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
8899+ struct drm_file *file_priv);
8900+extern int psb_verify_access(struct ttm_buffer_object *bo,
8901+ struct file *filp);
8902+extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
8903+ size_t count, loff_t *f_pos);
8904+extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
8905+ size_t count, loff_t *f_pos);
8906+extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
8907+ struct drm_file *file_priv);
8908+extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
8909+ struct drm_file *file_priv);
8910+extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
8911+ struct drm_file *file_priv);
8912+extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
8913+ struct drm_file *file_priv);
8914+extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
8915+ struct drm_file *file_priv);
8916+extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
8917+ struct drm_file *file_priv);
8918+extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
8919+ struct drm_file *file_priv);
8920+extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
8921+ struct drm_file *file_priv);
8922+extern int psb_extension_ioctl(struct drm_device *dev, void *data,
8923+ struct drm_file *file_priv);
8924+extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
8925+extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
8926+/*
8927+ *MMU stuff.
8928+ */
8929+
8930+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
8931+ int trap_pagefaults,
8932+ int invalid_type,
8933+ struct drm_psb_private *dev_priv);
8934+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
8935+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
8936+ *driver);
8937+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
8938+ uint32_t gtt_start, uint32_t gtt_pages);
8939+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
8940+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
8941+ int trap_pagefaults,
8942+ int invalid_type);
8943+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
8944+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
8945+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
8946+ unsigned long address,
8947+ uint32_t num_pages);
8948+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
8949+ uint32_t start_pfn,
8950+ unsigned long address,
8951+ uint32_t num_pages, int type);
8952+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
8953+ unsigned long *pfn);
8954+
8955+/*
8956+ *Enable / disable MMU for different requestors.
8957+ */
8958+
8959+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
8960+ uint32_t mask);
8961+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
8962+ uint32_t mask);
8963+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
8964+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
8965+ unsigned long address, uint32_t num_pages,
8966+ uint32_t desired_tile_stride,
8967+ uint32_t hw_tile_stride, int type);
8968+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
8969+ unsigned long address, uint32_t num_pages,
8970+ uint32_t desired_tile_stride,
8971+ uint32_t hw_tile_stride);
8972+/*
8973+ *psb_sgx.c
8974+ */
8975+
8976+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
8977+ uint32_t sequence);
8978+extern void psb_init_2d(struct drm_psb_private *dev_priv);
8979+extern int psb_idle_2d(struct drm_device *dev);
8980+extern int psb_idle_3d(struct drm_device *dev);
8981+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
8982+ uint32_t src_offset,
8983+ uint32_t dst_offset, uint32_t pages,
8984+ int direction);
8985+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
8986+ struct drm_file *file_priv);
8987+extern int psb_reg_submit(struct drm_psb_private *dev_priv,
8988+ uint32_t *regs, unsigned int cmds);
8989+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
8990+ struct ttm_buffer_object *cmd_buffer,
8991+ unsigned long cmd_offset,
8992+ unsigned long cmd_size, int engine,
8993+ uint32_t *copy_buffer);
8994+
8995+extern void psb_init_disallowed(void);
8996+extern void psb_fence_or_sync(struct drm_file *file_priv,
8997+ uint32_t engine,
8998+ uint32_t fence_types,
8999+ uint32_t fence_flags,
9000+ struct list_head *list,
9001+ struct psb_ttm_fence_rep *fence_arg,
9002+ struct ttm_fence_object **fence_p);
9003+extern int psb_validate_kernel_buffer(struct psb_context *context,
9004+ struct ttm_buffer_object *bo,
9005+ uint32_t fence_class,
9006+ uint64_t set_flags,
9007+ uint64_t clr_flags);
9008+extern void psb_init_ospm(struct drm_psb_private *dev_priv);
9009+extern int psb_try_power_down_sgx(struct drm_device *dev);
9010+extern int psb_page_flip(struct drm_device *dev, void *data,
9011+ struct drm_file *file_priv);
9012+extern int psb_flip_set_base(struct drm_psb_private *dev_priv, int pipe);
9013+
9014+/*
9015+ *psb_irq.c
9016+ */
9017+
9018+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
9019+extern void psb_irq_preinstall(struct drm_device *dev);
9020+extern int psb_irq_postinstall(struct drm_device *dev);
9021+extern void psb_irq_uninstall(struct drm_device *dev);
9022+extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
9023+extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
9024+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
9025+extern int psb_vblank_wait2(struct drm_device *dev,
9026+ unsigned int *sequence);
9027+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
9028+
9029+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
9030+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
9031+void
9032+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
9033+
9034+void
9035+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
9036+
9037+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
9038+/*
9039+ *psb_fence.c
9040+ */
9041+
9042+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
9043+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
9044+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
9045+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
9046+ uint32_t class);
9047+extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
9048+ uint32_t fence_class,
9049+ uint32_t flags, uint32_t *sequence,
9050+ unsigned long *timeout_jiffies);
9051+extern void psb_fence_error(struct drm_device *dev,
9052+ uint32_t class,
9053+ uint32_t sequence, uint32_t type, int error);
9054+extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
9055+
9056+/* MSVDX/Topaz stuff */
9057+extern int lnc_video_frameskip(struct drm_device *dev,
9058+ uint64_t user_pointer);
9059+extern int lnc_video_getparam(struct drm_device *dev, void *data,
9060+ struct drm_file *file_priv);
9061+extern int psb_try_power_down_topaz(struct drm_device *dev);
9062+extern int psb_try_power_down_msvdx(struct drm_device *dev);
9063+
9064+/*
9065+ *psb_gtt.c
9066+ */
9067+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
9068+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
9069+ unsigned offset_pages, unsigned num_pages,
9070+ unsigned desired_tile_stride,
9071+ unsigned hw_tile_stride, int type);
9072+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
9073+ unsigned num_pages,
9074+ unsigned desired_tile_stride,
9075+ unsigned hw_tile_stride);
9076+
9077+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
9078+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
9079+
9080+/*
9081+ *psb_fb.c
9082+ */
9083+extern int psbfb_probed(struct drm_device *dev);
9084+extern int psbfb_remove(struct drm_device *dev,
9085+ struct drm_framebuffer *fb);
9086+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
9087+ struct drm_file *file_priv);
9088+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
9089+ struct drm_file *file_priv);
9090+
9091+/*
9092+ *psb_reset.c
9093+ */
9094+
9095+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
9096+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
9097+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
9098+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
9099+extern void psb_lid_timer_init(struct drm_psb_private * dev_priv);
9100+extern void psb_lid_timer_takedown(struct drm_psb_private * dev_priv);
9101+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
9102+
9103+/*
9104+ *psb_xhw.c
9105+ */
9106+
9107+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
9108+ struct drm_file *file_priv);
9109+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
9110+ struct drm_file *file_priv);
9111+extern int psb_xhw_init(struct drm_device *dev);
9112+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
9113+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
9114+ struct drm_file *file_priv, int closing);
9115+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
9116+ struct psb_xhw_buf *buf,
9117+ uint32_t fire_flags,
9118+ uint32_t hw_context,
9119+ uint32_t *cookie,
9120+ uint32_t *oom_cmds,
9121+ uint32_t num_oom_cmds,
9122+ uint32_t offset,
9123+ uint32_t engine, uint32_t flags);
9124+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
9125+ struct psb_xhw_buf *buf,
9126+ uint32_t fire_flags);
9127+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
9128+ struct psb_xhw_buf *buf, uint32_t w,
9129+ uint32_t h, uint32_t *hw_cookie,
9130+ uint32_t *bo_size, uint32_t *clear_p_start,
9131+ uint32_t *clear_num_pages);
9132+
9133+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
9134+ struct psb_xhw_buf *buf);
9135+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
9136+ struct psb_xhw_buf *buf, uint32_t *value);
9137+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
9138+ struct psb_xhw_buf *buf,
9139+ uint32_t pages,
9140+ uint32_t * hw_cookie,
9141+ uint32_t * size,
9142+ uint32_t * ta_min_size);
9143+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
9144+ struct psb_xhw_buf *buf, uint32_t *cookie);
9145+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
9146+ struct psb_xhw_buf *buf,
9147+ uint32_t *cookie,
9148+ uint32_t *bca,
9149+ uint32_t *rca, uint32_t *flags);
9150+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
9151+ struct psb_xhw_buf *buf);
9152+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
9153+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
9154+ struct psb_xhw_buf *buf);
9155+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
9156+ struct psb_xhw_buf *buf, uint32_t *cookie);
9157+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
9158+ struct psb_xhw_buf *buf,
9159+ uint32_t flags,
9160+ uint32_t param_offset,
9161+ uint32_t pt_offset, uint32_t *hw_cookie);
9162+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
9163+ struct psb_xhw_buf *buf);
9164+
9165+/*
9166+ *psb_schedule.c: HW bug fixing.
9167+ */
9168+
9169+#ifdef FIX_TG_16
9170+
9171+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
9172+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
9173+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
9174+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
9175+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
9176+extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
9177+#else
9178+
9179+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
9180+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
9181+
9182+#endif
9183+
9184+/* modesetting */
9185+extern void psb_modeset_init(struct drm_device *dev);
9186+extern void psb_modeset_cleanup(struct drm_device *dev);
9187+
9188+/* psb_bl.c */
9189+int psb_backlight_init(struct drm_device *dev);
9190+void psb_backlight_exit(void);
9191+int psb_set_brightness(struct backlight_device *bd);
9192+int psb_get_brightness(struct backlight_device *bd);
9193+
9194+/*
9195+ *Utilities
9196+ */
9197+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
9198+
9199+static inline u32 MSG_READ32(uint port, uint offset)
9200+{
9201+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
9202+ outl(0x800000D0, 0xCF8);
9203+ outl(mcr, 0xCFC);
9204+ outl(0x800000D4, 0xCF8);
9205+ return inl(0xcfc);
9206+}
9207+static inline void MSG_WRITE32(uint port, uint offset, u32 value)
9208+{
9209+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
9210+ outl(0x800000D4, 0xCF8);
9211+ outl(value, 0xcfc);
9212+ outl(0x800000D0, 0xCF8);
9213+ outl(mcr, 0xCFC);
9214+}
9215+
9216+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
9217+{
9218+ struct drm_psb_private *dev_priv = dev->dev_private;
9219+
9220+ return ioread32(dev_priv->vdc_reg + (reg));
9221+}
9222+
9223+#define REG_READ(reg) REGISTER_READ(dev, (reg))
9224+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
9225+ uint32_t val)
9226+{
9227+ struct drm_psb_private *dev_priv = dev->dev_private;
9228+
9229+ iowrite32((val), dev_priv->vdc_reg + (reg));
9230+}
9231+
9232+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
9233+
9234+static inline void REGISTER_WRITE16(struct drm_device *dev,
9235+ uint32_t reg, uint32_t val)
9236+{
9237+ struct drm_psb_private *dev_priv = dev->dev_private;
9238+
9239+ iowrite16((val), dev_priv->vdc_reg + (reg));
9240+}
9241+
9242+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
9243+
9244+static inline void REGISTER_WRITE8(struct drm_device *dev,
9245+ uint32_t reg, uint32_t val)
9246+{
9247+ struct drm_psb_private *dev_priv = dev->dev_private;
9248+
9249+ iowrite8((val), dev_priv->vdc_reg + (reg));
9250+}
9251+
9252+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
9253+
9254+#define PSB_ALIGN_TO(_val, _align) \
9255+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
9256+#define PSB_WVDC32(_val, _offs) \
9257+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
9258+#define PSB_RVDC32(_offs) \
9259+ ioread32(dev_priv->vdc_reg + (_offs))
9260+
9261+//#define TRAP_SGX_PM_FAULT 1
9262+#ifdef TRAP_SGX_PM_FAULT
9263+#define PSB_WSGX32(_val, _offs) \
9264+{ \
9265+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
9266+ printk(KERN_ERR "access sgx when it's off!!(WRITE) %s, %d\n", \
9267+ __FILE__, __LINE__); \
9268+ mdelay(1000); \
9269+ } \
9270+ iowrite32(_val, dev_priv->sgx_reg + (_offs)); \
9271+}
9272+#define PSB_RSGX32(_offs) \
9273+({ \
9274+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
9275+ printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
9276+ __FILE__, __LINE__); \
9277+ mdelay(1000); \
9278+ } \
9279+ ioread32(dev_priv->sgx_reg + (_offs)); \
9280+})
9281+#else
9282+#define PSB_WSGX32(_val, _offs) \
9283+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
9284+#define PSB_RSGX32(_offs) \
9285+ ioread32(dev_priv->sgx_reg + (_offs))
9286+#endif
9287+
9288+#define PSB_WMSVDX32(_val, _offs) \
9289+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
9290+#define PSB_RMSVDX32(_offs) \
9291+ ioread32(dev_priv->msvdx_reg + (_offs))
9292+
9293+#define PSB_ALPL(_val, _base) \
9294+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
9295+#define PSB_ALPLM(_val, _base) \
9296+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
9297+
9298+#define PSB_D_RENDER (1 << 16)
9299+
9300+#define PSB_D_GENERAL (1 << 0)
9301+#define PSB_D_INIT (1 << 1)
9302+#define PSB_D_IRQ (1 << 2)
9303+#define PSB_D_FW (1 << 3)
9304+#define PSB_D_PERF (1 << 4)
9305+#define PSB_D_TMP (1 << 5)
9306+#define PSB_D_PM (1 << 6)
9307+
9308+extern int drm_psb_debug;
9309+extern int drm_psb_no_fb;
9310+extern int drm_psb_disable_vsync;
9311+extern int drm_idle_check_interval;
9312+extern int drm_psb_ospm;
9313+
9314+#define PSB_DEBUG_FW(_fmt, _arg...) \
9315+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
9316+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
9317+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
9318+#define PSB_DEBUG_INIT(_fmt, _arg...) \
9319+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
9320+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
9321+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
9322+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
9323+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
9324+#define PSB_DEBUG_PERF(_fmt, _arg...) \
9325+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
9326+#define PSB_DEBUG_TMP(_fmt, _arg...) \
9327+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
9328+#define PSB_DEBUG_PM(_fmt, _arg...) \
9329+ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
9330+
9331+#if DRM_DEBUG_CODE
9332+#define PSB_DEBUG(_flag, _fmt, _arg...) \
9333+ do { \
9334+ if (unlikely((_flag) & drm_psb_debug)) \
9335+ printk(KERN_DEBUG \
9336+ "[psb:0x%02x:%s] " _fmt , _flag, \
9337+ __func__ , ##_arg); \
9338+ } while (0)
9339+#else
9340+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
9341+#endif
9342+
9343+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
9344+ ((dev)->pci_device == 0x8109))
9345+
9346+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
9347+
9348+#endif
9349diff --git a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
9350new file mode 100644
9351index 0000000..a29694e
9352--- /dev/null
9353+++ b/drivers/gpu/drm/psb/psb_fb.c
9354@@ -0,0 +1,1833 @@
9355+/**************************************************************************
9356+ * Copyright (c) 2007, Intel Corporation.
9357+ * All Rights Reserved.
9358+ *
9359+ * This program is free software; you can redistribute it and/or modify it
9360+ * under the terms and conditions of the GNU General Public License,
9361+ * version 2, as published by the Free Software Foundation.
9362+ *
9363+ * This program is distributed in the hope it will be useful, but WITHOUT
9364+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9365+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9366+ * more details.
9367+ *
9368+ * You should have received a copy of the GNU General Public License along with
9369+ * this program; if not, write to the Free Software Foundation, Inc.,
9370+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9371+ *
9372+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
9373+ * develop this driver.
9374+ *
9375+ **************************************************************************/
9376+
9377+#include <linux/module.h>
9378+#include <linux/kernel.h>
9379+#include <linux/errno.h>
9380+#include <linux/string.h>
9381+#include <linux/mm.h>
9382+#include <linux/tty.h>
9383+#include <linux/slab.h>
9384+#include <linux/delay.h>
9385+#include <linux/fb.h>
9386+#include <linux/init.h>
9387+#include <linux/console.h>
9388+
9389+#include <drm/drmP.h>
9390+#include <drm/drm.h>
9391+#include <drm/drm_crtc.h>
9392+
9393+#include "psb_drv.h"
9394+#include "psb_intel_reg.h"
9395+#include "psb_intel_drv.h"
9396+#include "ttm/ttm_userobj_api.h"
9397+#include "psb_fb.h"
9398+#include "psb_sgx.h"
9399+#include "psb_powermgmt.h"
9400+
9401+static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
9402+{
9403+ switch (depth) {
9404+ case 8:
9405+ var->red.offset = 0;
9406+ var->green.offset = 0;
9407+ var->blue.offset = 0;
9408+ var->red.length = 8;
9409+ var->green.length = 8;
9410+ var->blue.length = 8;
9411+ var->transp.length = 0;
9412+ var->transp.offset = 0;
9413+ break;
9414+ case 15:
9415+ var->red.offset = 10;
9416+ var->green.offset = 5;
9417+ var->blue.offset = 0;
9418+ var->red.length = 5;
9419+ var->green.length = 5;
9420+ var->blue.length = 5;
9421+ var->transp.length = 1;
9422+ var->transp.offset = 15;
9423+ break;
9424+ case 16:
9425+ var->red.offset = 11;
9426+ var->green.offset = 5;
9427+ var->blue.offset = 0;
9428+ var->red.length = 5;
9429+ var->green.length = 6;
9430+ var->blue.length = 5;
9431+ var->transp.length = 0;
9432+ var->transp.offset = 0;
9433+ break;
9434+ case 24:
9435+ var->red.offset = 16;
9436+ var->green.offset = 8;
9437+ var->blue.offset = 0;
9438+ var->red.length = 8;
9439+ var->green.length = 8;
9440+ var->blue.length = 8;
9441+ var->transp.length = 0;
9442+ var->transp.offset = 0;
9443+ break;
9444+ case 32:
9445+ var->red.offset = 16;
9446+ var->green.offset = 8;
9447+ var->blue.offset = 0;
9448+ var->red.length = 8;
9449+ var->green.length = 8;
9450+ var->blue.length = 8;
9451+ var->transp.length = 8;
9452+ var->transp.offset = 24;
9453+ break;
9454+ default:
9455+ return -EINVAL;
9456+ }
9457+
9458+ return 0;
9459+}
9460+
9461+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
9462+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
9463+ struct drm_file *file_priv,
9464+ unsigned int *handle);
9465+
9466+static const struct drm_framebuffer_funcs psb_fb_funcs = {
9467+ .destroy = psb_user_framebuffer_destroy,
9468+ .create_handle = psb_user_framebuffer_create_handle,
9469+};
9470+
9471+struct psbfb_par {
9472+ struct drm_device *dev;
9473+ struct psb_framebuffer *psbfb;
9474+
9475+ int dpms_state;
9476+
9477+ int crtc_count;
9478+ /* crtc currently bound to this */
9479+ uint32_t crtc_ids[2];
9480+};
9481+
9482+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
9483+
9484+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
9485+ unsigned blue, unsigned transp,
9486+ struct fb_info *info)
9487+{
9488+ struct psbfb_par *par = info->par;
9489+ struct drm_framebuffer *fb = &par->psbfb->base;
9490+ uint32_t v;
9491+
9492+ if (!fb)
9493+ return -ENOMEM;
9494+
9495+ if (regno > 255)
9496+ return 1;
9497+
9498+#if 0 /* JB: not drop, check that this works */
9499+ if (fb->bits_per_pixel == 8) {
9500+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9501+ head) {
9502+ for (i = 0; i < par->crtc_count; i++)
9503+ if (crtc->base.id == par->crtc_ids[i])
9504+ break;
9505+
9506+ if (i == par->crtc_count)
9507+ continue;
9508+
9509+ if (crtc->funcs->gamma_set)
9510+ crtc->funcs->gamma_set(crtc, red, green,
9511+ blue, regno);
9512+ }
9513+ return 0;
9514+ }
9515+#endif
9516+
9517+ red = CMAP_TOHW(red, info->var.red.length);
9518+ blue = CMAP_TOHW(blue, info->var.blue.length);
9519+ green = CMAP_TOHW(green, info->var.green.length);
9520+ transp = CMAP_TOHW(transp, info->var.transp.length);
9521+
9522+ v = (red << info->var.red.offset) |
9523+ (green << info->var.green.offset) |
9524+ (blue << info->var.blue.offset) |
9525+ (transp << info->var.transp.offset);
9526+
9527+ if (regno < 16) {
9528+ switch (fb->bits_per_pixel) {
9529+ case 16:
9530+ ((uint32_t *) info->pseudo_palette)[regno] = v;
9531+ break;
9532+ case 24:
9533+ case 32:
9534+ ((uint32_t *) info->pseudo_palette)[regno] = v;
9535+ break;
9536+ }
9537+ }
9538+
9539+ return 0;
9540+}
9541+
9542+static struct drm_display_mode *psbfb_find_first_mode(struct
9543+ fb_var_screeninfo
9544+ *var,
9545+ struct fb_info *info,
9546+ struct drm_crtc
9547+ *crtc)
9548+{
9549+ struct psbfb_par *par = info->par;
9550+ struct drm_device *dev = par->dev;
9551+ struct drm_display_mode *drm_mode;
9552+ struct drm_display_mode *preferred_mode = NULL;
9553+ struct drm_display_mode *last_mode = NULL;
9554+ struct drm_connector *connector;
9555+ int found;
9556+
9557+ found = 0;
9558+ list_for_each_entry(connector, &dev->mode_config.connector_list,
9559+ head) {
9560+ if (connector->encoder && connector->encoder->crtc == crtc) {
9561+ found = 1;
9562+ break;
9563+ }
9564+ }
9565+
9566+ /* found no connector, bail */
9567+ if (!found)
9568+ return NULL;
9569+
9570+ found = 0;
9571+ list_for_each_entry(drm_mode, &connector->modes, head) {
9572+ if (drm_mode->hdisplay == var->xres &&
9573+ drm_mode->vdisplay == var->yres
9574+ && drm_mode->clock != 0) {
9575+ found = 1;
9576+ last_mode = drm_mode;
9577+ if(IS_POULSBO(dev)) {
9578+ if(last_mode->type & DRM_MODE_TYPE_PREFERRED) {
9579+ preferred_mode = last_mode;
9580+ }
9581+ }
9582+ }
9583+ }
9584+
9585+ /* No mode matching mode found */
9586+ if (!found)
9587+ return NULL;
9588+
9589+ if(IS_POULSBO(dev)) {
9590+ if(preferred_mode)
9591+ return preferred_mode;
9592+ else
9593+ return last_mode;
9594+ } else {
9595+ return last_mode;
9596+ }
9597+}
9598+
9599+static int psbfb_check_var(struct fb_var_screeninfo *var,
9600+ struct fb_info *info)
9601+{
9602+ struct psbfb_par *par = info->par;
9603+ struct psb_framebuffer *psbfb = par->psbfb;
9604+ struct drm_device *dev = par->dev;
9605+ int ret;
9606+ int depth;
9607+ int pitch;
9608+ int bpp = var->bits_per_pixel;
9609+
9610+ if (!psbfb)
9611+ return -ENOMEM;
9612+
9613+ if (!var->pixclock)
9614+ return -EINVAL;
9615+
9616+ /* don't support virtuals for now */
9617+ if (var->xres_virtual > var->xres)
9618+ return -EINVAL;
9619+
9620+ if (var->yres_virtual > var->yres)
9621+ return -EINVAL;
9622+
9623+ switch (bpp) {
9624+#if 0 /* JB: for now only support true color */
9625+ case 8:
9626+ depth = 8;
9627+ break;
9628+#endif
9629+ case 16:
9630+ depth = (var->green.length == 6) ? 16 : 15;
9631+ break;
9632+ case 24: /* assume this is 32bpp / depth 24 */
9633+ bpp = 32;
9634+ /* fallthrough */
9635+ case 32:
9636+ depth = (var->transp.length > 0) ? 32 : 24;
9637+ break;
9638+ default:
9639+ return -EINVAL;
9640+ }
9641+
9642+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
9643+
9644+ /* Check that we can resize */
9645+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
9646+#if 1
9647+ /* Need to resize the fb object.
9648+ * But the generic fbdev code doesn't really understand
9649+ * that we can do this. So disable for now.
9650+ */
9651+ DRM_INFO("Can't support requested size, too big!\n");
9652+ return -EINVAL;
9653+#else
9654+ struct drm_psb_private *dev_priv = psb_priv(dev);
9655+ struct ttm_bo_device *bdev = &dev_priv->bdev;
9656+ struct ttm_buffer_object *fbo = NULL;
9657+ struct ttm_bo_kmap_obj tmp_kmap;
9658+
9659+ /* a temporary BO to check if we could resize in setpar.
9660+ * Therefore no need to set NO_EVICT.
9661+ */
9662+ ret = ttm_buffer_object_create(bdev,
9663+ pitch * var->yres,
9664+ ttm_bo_type_kernel,
9665+ TTM_PL_FLAG_TT |
9666+ TTM_PL_FLAG_VRAM |
9667+ TTM_PL_FLAG_NO_EVICT,
9668+ 0, 0, &fbo);
9669+ if (ret || !fbo)
9670+ return -ENOMEM;
9671+
9672+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
9673+ if (ret) {
9674+ ttm_bo_usage_deref_unlocked(&fbo);
9675+ return -EINVAL;
9676+ }
9677+
9678+ ttm_bo_kunmap(&tmp_kmap);
9679+ /* destroy our current fbo! */
9680+ ttm_bo_usage_deref_unlocked(&fbo);
9681+#endif
9682+ }
9683+
9684+ ret = fill_fb_bitfield(var, depth);
9685+ if (ret)
9686+ return ret;
9687+
9688+#if 1
9689+ /* Here we walk the output mode list and look for modes. If we haven't
9690+ * got it, then bail. Not very nice, so this is disabled.
9691+ * In the set_par code, we create our mode based on the incoming
9692+ * parameters. Nicer, but may not be desired by some.
9693+ */
9694+ {
9695+ struct drm_crtc *crtc;
9696+ int i;
9697+
9698+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9699+ head) {
9700+ struct psb_intel_crtc *psb_intel_crtc =
9701+ to_psb_intel_crtc(crtc);
9702+
9703+ for (i = 0; i < par->crtc_count; i++)
9704+ if (crtc->base.id == par->crtc_ids[i])
9705+ break;
9706+
9707+ if (i == par->crtc_count)
9708+ continue;
9709+
9710+ if (psb_intel_crtc->mode_set.num_connectors == 0)
9711+ continue;
9712+
9713+ if (!psbfb_find_first_mode(&info->var, info, crtc))
9714+ return -EINVAL;
9715+ }
9716+ }
9717+#else
9718+ (void) i;
9719+ (void) dev; /* silence warnings */
9720+ (void) crtc;
9721+ (void) drm_mode;
9722+ (void) connector;
9723+#endif
9724+
9725+ return 0;
9726+}
9727+
9728+/* this will let fbcon do the mode init */
9729+static int psbfb_set_par(struct fb_info *info)
9730+{
9731+ struct psbfb_par *par = info->par;
9732+ struct psb_framebuffer *psbfb = par->psbfb;
9733+ struct drm_framebuffer *fb = &psbfb->base;
9734+ struct drm_device *dev = par->dev;
9735+ struct fb_var_screeninfo *var = &info->var;
9736+ struct drm_psb_private *dev_priv = dev->dev_private;
9737+ struct drm_display_mode *drm_mode;
9738+ int pitch;
9739+ int depth;
9740+ int bpp = var->bits_per_pixel;
9741+
9742+ if (!fb)
9743+ return -ENOMEM;
9744+
9745+ switch (bpp) {
9746+ case 8:
9747+ depth = 8;
9748+ break;
9749+ case 16:
9750+ depth = (var->green.length == 6) ? 16 : 15;
9751+ break;
9752+ case 24: /* assume this is 32bpp / depth 24 */
9753+ bpp = 32;
9754+ /* fallthrough */
9755+ case 32:
9756+ depth = (var->transp.length > 0) ? 32 : 24;
9757+ break;
9758+ default:
9759+ DRM_ERROR("Illegal BPP\n");
9760+ return -EINVAL;
9761+ }
9762+
9763+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
9764+
9765+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
9766+#if 1
9767+ /* Need to resize the fb object.
9768+ * But the generic fbdev code doesn't really understand
9769+ * that we can do this. So disable for now.
9770+ */
9771+ DRM_INFO("Can't support requested size, too big!\n");
9772+ return -EINVAL;
9773+#else
9774+ int ret;
9775+ struct ttm_buffer_object *fbo = NULL, *tfbo;
9776+ struct ttm_bo_kmap_obj tmp_kmap, tkmap;
9777+
9778+ ret = ttm_buffer_object_create(bdev,
9779+ pitch * var->yres,
9780+ ttm_bo_type_kernel,
9781+ TTM_PL_FLAG_MEM_TT |
9782+ TTM_PL_FLAG_MEM_VRAM |
9783+ TTM_PL_FLAG_NO_EVICT,
9784+ 0, 0, &fbo);
9785+ if (ret || !fbo) {
9786+ DRM_ERROR
9787+ ("failed to allocate new resized framebuffer\n");
9788+ return -ENOMEM;
9789+ }
9790+
9791+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
9792+ if (ret) {
9793+ DRM_ERROR("failed to kmap framebuffer.\n");
9794+ ttm_bo_usage_deref_unlocked(&fbo);
9795+ return -EINVAL;
9796+ }
9797+
9798+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n",
9799+ fb->width, fb->height, fb->offset, fbo);
9800+
9801+ /* set new screen base */
9802+ info->screen_base = tmp_kmap.virtual;
9803+
9804+ tkmap = fb->kmap;
9805+ fb->kmap = tmp_kmap;
9806+ ttm_bo_kunmap(&tkmap);
9807+
9808+ tfbo = fb->bo;
9809+ fb->bo = fbo;
9810+ ttm_bo_usage_deref_unlocked(&tfbo);
9811+#endif
9812+ }
9813+
9814+ psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start;
9815+ fb->width = var->xres;
9816+ fb->height = var->yres;
9817+ fb->bits_per_pixel = bpp;
9818+ fb->pitch = pitch;
9819+ fb->depth = depth;
9820+
9821+ info->fix.line_length = psbfb->base.pitch;
9822+ info->fix.visual =
9823+ (psbfb->base.depth ==
9824+ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
9825+
9826+ /* some fbdev's apps don't want these to change */
9827+ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
9828+
9829+#if 0
9830+ /* relates to resize - disable */
9831+ info->fix.smem_len = info->fix.line_length * var->yres;
9832+ info->screen_size = info->fix.smem_len; /* ??? */
9833+#endif
9834+
9835+ /* Should we walk the output's modelist or just create our own ???
9836+ * For now, we create and destroy a mode based on the incoming
9837+ * parameters. But there's commented out code below which scans
9838+ * the output list too.
9839+ */
9840+#if 1
9841+ /* This code is now in the for loop futher down. */
9842+#endif
9843+
9844+ {
9845+ struct drm_crtc *crtc;
9846+ int ret;
9847+ int i;
9848+
9849+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9850+ head) {
9851+ struct psb_intel_crtc *psb_intel_crtc =
9852+ to_psb_intel_crtc(crtc);
9853+
9854+ for (i = 0; i < par->crtc_count; i++)
9855+ if (crtc->base.id == par->crtc_ids[i])
9856+ break;
9857+
9858+ if (i == par->crtc_count)
9859+ continue;
9860+
9861+ if (psb_intel_crtc->mode_set.num_connectors == 0)
9862+ continue;
9863+
9864+#if 1
9865+ drm_mode =
9866+ psbfb_find_first_mode(&info->var, info, crtc);
9867+ if (!drm_mode)
9868+ DRM_ERROR("No matching mode found\n");
9869+ psb_intel_crtc->mode_set.mode = drm_mode;
9870+#endif
9871+
9872+#if 0 /* FIXME: TH */
9873+ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
9874+#endif
9875+ DRM_DEBUG
9876+ ("setting mode on crtc %p with id %u\n",
9877+ crtc, crtc->base.id);
9878+ ret =
9879+ crtc->funcs->
9880+ set_config(&psb_intel_crtc->mode_set);
9881+ if (ret) {
9882+ DRM_ERROR("Failed setting mode\n");
9883+ return ret;
9884+ }
9885+#if 0
9886+ }
9887+#endif
9888+ }
9889+ DRM_DEBUG("Set par returned OK.\n");
9890+ return 0;
9891+ }
9892+
9893+ return 0;
9894+}
9895+#if 0
9896+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
9897+ unsigned size)
9898+{
9899+ int ret = 0;
9900+ int i;
9901+ unsigned submit_size;
9902+
9903+ while (size > 0) {
9904+ submit_size = (size < 0x60) ? size : 0x60;
9905+ size -= submit_size;
9906+ ret = psb_2d_wait_available(dev_priv, submit_size);
9907+ if (ret)
9908+ return ret;
9909+
9910+ submit_size <<= 2;
9911+ for (i = 0; i < submit_size; i += 4) {
9912+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
9913+ }
9914+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
9915+ }
9916+ return 0;
9917+}
9918+
9919+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
9920+ uint32_t dst_offset, uint32_t dst_stride,
9921+ uint32_t dst_format, uint16_t dst_x,
9922+ uint16_t dst_y, uint16_t size_x,
9923+ uint16_t size_y, uint32_t fill)
9924+{
9925+ uint32_t buffer[10];
9926+ uint32_t *buf;
9927+
9928+ buf = buffer;
9929+
9930+ *buf++ = PSB_2D_FENCE_BH;
9931+
9932+ *buf++ =
9933+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
9934+ PSB_2D_DST_STRIDE_SHIFT);
9935+ *buf++ = dst_offset;
9936+
9937+ *buf++ =
9938+ PSB_2D_BLIT_BH |
9939+ PSB_2D_ROT_NONE |
9940+ PSB_2D_COPYORDER_TL2BR |
9941+ PSB_2D_DSTCK_DISABLE |
9942+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
9943+
9944+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
9945+ *buf++ =
9946+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
9947+ PSB_2D_DST_YSTART_SHIFT);
9948+ *buf++ =
9949+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
9950+ PSB_2D_DST_YSIZE_SHIFT);
9951+ *buf++ = PSB_2D_FLUSH_BH;
9952+
9953+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
9954+}
9955+
9956+static void psbfb_fillrect_accel(struct fb_info *info,
9957+ const struct fb_fillrect *r)
9958+{
9959+ struct psbfb_par *par = info->par;
9960+ struct psb_framebuffer *psbfb = par->psbfb;
9961+ struct drm_framebuffer *fb = &psbfb->base;
9962+ struct drm_psb_private *dev_priv = par->dev->dev_private;
9963+ uint32_t offset;
9964+ uint32_t stride;
9965+ uint32_t format;
9966+
9967+ if (!fb)
9968+ return;
9969+
9970+ offset = psbfb->offset;
9971+ stride = fb->pitch;
9972+
9973+ switch (fb->depth) {
9974+ case 8:
9975+ format = PSB_2D_DST_332RGB;
9976+ break;
9977+ case 15:
9978+ format = PSB_2D_DST_555RGB;
9979+ break;
9980+ case 16:
9981+ format = PSB_2D_DST_565RGB;
9982+ break;
9983+ case 24:
9984+ case 32:
9985+ /* this is wrong but since we don't do blending its okay */
9986+ format = PSB_2D_DST_8888ARGB;
9987+ break;
9988+ default:
9989+ /* software fallback */
9990+ cfb_fillrect(info, r);
9991+ return;
9992+ }
9993+
9994+ psb_accel_2d_fillrect(dev_priv,
9995+ offset, stride, format,
9996+ r->dx, r->dy, r->width, r->height, r->color);
9997+}
9998+
9999+static void psbfb_fillrect(struct fb_info *info,
10000+ const struct fb_fillrect *rect)
10001+{
10002+ struct psbfb_par *par = info->par;
10003+ struct drm_device *dev = par->dev;
10004+ struct drm_psb_private *dev_priv = dev->dev_private;
10005+
10006+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
10007+ return;
10008+
10009+ if (info->flags & FBINFO_HWACCEL_DISABLED)
10010+ return cfb_fillrect(info, rect);
10011+ /*
10012+ * psbfb_fillrect is atomic so need to do instantaneous check of
10013+ * power on
10014+ */
10015+ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) || powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
10016+ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
10017+ return cfb_fillrect(info, rect);
10018+ if (psb_2d_trylock(dev_priv)) {
10019+ psbfb_fillrect_accel(info, rect);
10020+ psb_2d_unlock(dev_priv);
10021+ if (drm_psb_ospm && IS_MRST(dev))
10022+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
10023+ } else
10024+ cfb_fillrect(info, rect);
10025+}
10026+
10027+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
10028+{
10029+ if (xdir < 0)
10030+ return (ydir <
10031+ 0) ? PSB_2D_COPYORDER_BR2TL :
10032+ PSB_2D_COPYORDER_TR2BL;
10033+ else
10034+ return (ydir <
10035+ 0) ? PSB_2D_COPYORDER_BL2TR :
10036+ PSB_2D_COPYORDER_TL2BR;
10037+}
10038+
10039+/*
10040+ * @srcOffset in bytes
10041+ * @srcStride in bytes
10042+ * @srcFormat psb 2D format defines
10043+ * @dstOffset in bytes
10044+ * @dstStride in bytes
10045+ * @dstFormat psb 2D format defines
10046+ * @srcX offset in pixels
10047+ * @srcY offset in pixels
10048+ * @dstX offset in pixels
10049+ * @dstY offset in pixels
10050+ * @sizeX of the copied area
10051+ * @sizeY of the copied area
10052+ */
10053+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
10054+ uint32_t src_offset, uint32_t src_stride,
10055+ uint32_t src_format, uint32_t dst_offset,
10056+ uint32_t dst_stride, uint32_t dst_format,
10057+ uint16_t src_x, uint16_t src_y,
10058+ uint16_t dst_x, uint16_t dst_y,
10059+ uint16_t size_x, uint16_t size_y)
10060+{
10061+ uint32_t blit_cmd;
10062+ uint32_t buffer[10];
10063+ uint32_t *buf;
10064+ uint32_t direction;
10065+
10066+ buf = buffer;
10067+
10068+ direction =
10069+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
10070+
10071+ if (direction == PSB_2D_COPYORDER_BR2TL ||
10072+ direction == PSB_2D_COPYORDER_TR2BL) {
10073+ src_x += size_x - 1;
10074+ dst_x += size_x - 1;
10075+ }
10076+ if (direction == PSB_2D_COPYORDER_BR2TL ||
10077+ direction == PSB_2D_COPYORDER_BL2TR) {
10078+ src_y += size_y - 1;
10079+ dst_y += size_y - 1;
10080+ }
10081+
10082+ blit_cmd =
10083+ PSB_2D_BLIT_BH |
10084+ PSB_2D_ROT_NONE |
10085+ PSB_2D_DSTCK_DISABLE |
10086+ PSB_2D_SRCCK_DISABLE |
10087+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
10088+
10089+ *buf++ = PSB_2D_FENCE_BH;
10090+ *buf++ =
10091+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
10092+ PSB_2D_DST_STRIDE_SHIFT);
10093+ *buf++ = dst_offset;
10094+ *buf++ =
10095+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
10096+ PSB_2D_SRC_STRIDE_SHIFT);
10097+ *buf++ = src_offset;
10098+ *buf++ =
10099+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
10100+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
10101+ *buf++ = blit_cmd;
10102+ *buf++ =
10103+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
10104+ PSB_2D_DST_YSTART_SHIFT);
10105+ *buf++ =
10106+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
10107+ PSB_2D_DST_YSIZE_SHIFT);
10108+ *buf++ = PSB_2D_FLUSH_BH;
10109+
10110+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
10111+}
10112+
10113+static void psbfb_copyarea_accel(struct fb_info *info,
10114+ const struct fb_copyarea *a)
10115+{
10116+ struct psbfb_par *par = info->par;
10117+ struct psb_framebuffer *psbfb = par->psbfb;
10118+ struct drm_framebuffer *fb = &psbfb->base;
10119+ struct drm_psb_private *dev_priv = par->dev->dev_private;
10120+ uint32_t offset;
10121+ uint32_t stride;
10122+ uint32_t src_format;
10123+ uint32_t dst_format;
10124+
10125+ if (!fb)
10126+ return;
10127+
10128+ offset = psbfb->offset;
10129+ stride = fb->pitch;
10130+
10131+ switch (fb->depth) {
10132+ case 8:
10133+ src_format = PSB_2D_SRC_332RGB;
10134+ dst_format = PSB_2D_DST_332RGB;
10135+ break;
10136+ case 15:
10137+ src_format = PSB_2D_SRC_555RGB;
10138+ dst_format = PSB_2D_DST_555RGB;
10139+ break;
10140+ case 16:
10141+ src_format = PSB_2D_SRC_565RGB;
10142+ dst_format = PSB_2D_DST_565RGB;
10143+ break;
10144+ case 24:
10145+ case 32:
10146+ /* this is wrong but since we don't do blending its okay */
10147+ src_format = PSB_2D_SRC_8888ARGB;
10148+ dst_format = PSB_2D_DST_8888ARGB;
10149+ break;
10150+ default:
10151+ /* software fallback */
10152+ cfb_copyarea(info, a);
10153+ return;
10154+ }
10155+
10156+ psb_accel_2d_copy(dev_priv,
10157+ offset, stride, src_format,
10158+ offset, stride, dst_format,
10159+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
10160+}
10161+
10162+static void psbfb_copyarea(struct fb_info *info,
10163+ const struct fb_copyarea *region)
10164+{
10165+ struct psbfb_par *par = info->par;
10166+ struct drm_device *dev = par->dev;
10167+ struct drm_psb_private *dev_priv = dev->dev_private;
10168+
10169+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
10170+ return;
10171+
10172+ if (info->flags & FBINFO_HWACCEL_DISABLED)
10173+ return cfb_copyarea(info, region);
10174+ /*
10175+ * psbfb_copyarea is atomic so need to do instantaneous check of
10176+ * power on
10177+ */
10178+ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) || powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
10179+ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
10180+ return cfb_copyarea(info, region);
10181+
10182+ if (psb_2d_trylock(dev_priv)) {
10183+ psbfb_copyarea_accel(info, region);
10184+ psb_2d_unlock(dev_priv);
10185+ if (drm_psb_ospm && IS_MRST(dev))
10186+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
10187+ } else
10188+ cfb_copyarea(info, region);
10189+}
10190+#endif
10191+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
10192+{
10193+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
10194+ return;
10195+
10196+ cfb_imageblit(info, image);
10197+}
10198+
10199+static void psbfb_onoff(struct fb_info *info, int dpms_mode)
10200+{
10201+ struct psbfb_par *par = info->par;
10202+ struct drm_device *dev = par->dev;
10203+ struct drm_crtc *crtc;
10204+ struct drm_encoder *encoder;
10205+ int i;
10206+
10207+ /*
10208+ * For each CRTC in this fb, find all associated encoders
10209+ * and turn them off, then turn off the CRTC.
10210+ */
10211+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10212+ struct drm_crtc_helper_funcs *crtc_funcs =
10213+ crtc->helper_private;
10214+
10215+ for (i = 0; i < par->crtc_count; i++)
10216+ if (crtc->base.id == par->crtc_ids[i])
10217+ break;
10218+
10219+ if (i == par->crtc_count)
10220+ continue;
10221+
10222+ if (dpms_mode == DRM_MODE_DPMS_ON)
10223+ crtc_funcs->dpms(crtc, dpms_mode);
10224+
10225+ /* Found a CRTC on this fb, now find encoders */
10226+ list_for_each_entry(encoder,
10227+ &dev->mode_config.encoder_list, head) {
10228+ if (encoder->crtc == crtc) {
10229+ struct drm_encoder_helper_funcs
10230+ *encoder_funcs;
10231+ encoder_funcs = encoder->helper_private;
10232+ encoder_funcs->dpms(encoder, dpms_mode);
10233+ }
10234+ }
10235+
10236+ if (dpms_mode == DRM_MODE_DPMS_OFF)
10237+ crtc_funcs->dpms(crtc, dpms_mode);
10238+ }
10239+}
10240+
10241+static int psbfb_blank(int blank_mode, struct fb_info *info)
10242+{
10243+ struct psbfb_par *par = info->par;
10244+
10245+ par->dpms_state = blank_mode;
10246+ PSB_DEBUG_PM("psbfb_blank \n");
10247+ switch (blank_mode) {
10248+ case FB_BLANK_UNBLANK:
10249+ psbfb_onoff(info, DRM_MODE_DPMS_ON);
10250+ break;
10251+ case FB_BLANK_NORMAL:
10252+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
10253+ break;
10254+ case FB_BLANK_HSYNC_SUSPEND:
10255+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
10256+ break;
10257+ case FB_BLANK_VSYNC_SUSPEND:
10258+ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
10259+ break;
10260+ case FB_BLANK_POWERDOWN:
10261+ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
10262+ break;
10263+ }
10264+
10265+ return 0;
10266+}
10267+
10268+
10269+static int psbfb_kms_off(struct drm_device *dev, int suspend)
10270+{
10271+ struct drm_framebuffer *fb = 0;
10272+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
10273+
10274+ mutex_lock(&dev->mode_config.mutex);
10275+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
10276+ struct fb_info *info = fb->fbdev;
10277+
10278+ if (suspend) {
10279+ fb_set_suspend(info, 1);
10280+ psbfb_blank(FB_BLANK_POWERDOWN, info);
10281+ }
10282+ }
10283+ mutex_unlock(&dev->mode_config.mutex);
10284+ return 0;
10285+}
10286+
10287+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
10288+ struct drm_file *file_priv)
10289+{
10290+ int ret;
10291+
10292+ if (drm_psb_no_fb)
10293+ return 0;
10294+ acquire_console_sem();
10295+ ret = psbfb_kms_off(dev, 0);
10296+ release_console_sem();
10297+
10298+ return ret;
10299+}
10300+
10301+static int psbfb_kms_on(struct drm_device *dev, int resume)
10302+{
10303+ struct drm_framebuffer *fb = 0;
10304+
10305+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
10306+
10307+ mutex_lock(&dev->mode_config.mutex);
10308+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
10309+ struct fb_info *info = fb->fbdev;
10310+
10311+ if (resume) {
10312+ fb_set_suspend(info, 0);
10313+ psbfb_blank(FB_BLANK_UNBLANK, info);
10314+ }
10315+
10316+ }
10317+ mutex_unlock(&dev->mode_config.mutex);
10318+
10319+ return 0;
10320+}
10321+
10322+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
10323+ struct drm_file *file_priv)
10324+{
10325+ int ret;
10326+
10327+ if (drm_psb_no_fb)
10328+ return 0;
10329+ acquire_console_sem();
10330+ ret = psbfb_kms_on(dev, 0);
10331+ release_console_sem();
10332+ drm_helper_disable_unused_functions(dev);
10333+ return ret;
10334+}
10335+
10336+void psbfb_suspend(struct drm_device *dev)
10337+{
10338+ acquire_console_sem();
10339+ psbfb_kms_off(dev, 1);
10340+ release_console_sem();
10341+}
10342+
10343+void psbfb_resume(struct drm_device *dev)
10344+{
10345+ acquire_console_sem();
10346+ psbfb_kms_on(dev, 1);
10347+ release_console_sem();
10348+ drm_helper_disable_unused_functions(dev);
10349+}
10350+
10351+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
10352+{
10353+ struct psbfb_par *par = info->par;
10354+ struct psb_framebuffer *psbfb = par->psbfb;
10355+ struct ttm_buffer_object *bo = psbfb->bo;
10356+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
10357+ unsigned long offset = vma->vm_pgoff;
10358+
10359+ if (vma->vm_pgoff != 0)
10360+ return -EINVAL;
10361+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
10362+ return -EINVAL;
10363+ if (offset + size > bo->num_pages)
10364+ return -EINVAL;
10365+
10366+ mutex_lock(&bo->mutex);
10367+ if (!psbfb->addr_space)
10368+ psbfb->addr_space = vma->vm_file->f_mapping;
10369+ mutex_unlock(&bo->mutex);
10370+
10371+ return ttm_fbdev_mmap(vma, bo);
10372+}
10373+
10374+int psbfb_sync(struct fb_info *info)
10375+{
10376+ struct psbfb_par *par = info->par;
10377+ struct drm_psb_private *dev_priv = par->dev->dev_private;
10378+
10379+ if (psb_2d_trylock(dev_priv)) {
10380+ /*
10381+ * psbfb_sync is atomic so need to do instantaneous check of
10382+ * power on
10383+ */
10384+ if (!powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) &&
10385+ !powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) &&
10386+ powermgmt_is_hw_on(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND))
10387+ psb_idle_2d(par->dev);
10388+
10389+ psb_2d_unlock(dev_priv);
10390+ } else
10391+ udelay(5);
10392+
10393+ return 0;
10394+}
10395+
10396+static struct fb_ops psbfb_ops = {
10397+ .owner = THIS_MODULE,
10398+ .fb_check_var = psbfb_check_var,
10399+ .fb_set_par = psbfb_set_par,
10400+ .fb_setcolreg = psbfb_setcolreg,
10401+ .fb_fillrect = cfb_fillrect,
10402+ .fb_copyarea = cfb_copyarea,
10403+ .fb_imageblit = cfb_imageblit,
10404+ .fb_mmap = psbfb_mmap,
10405+ /*.fb_sync = psbfb_sync,*/
10406+ .fb_blank = psbfb_blank,
10407+};
10408+
10409+static struct drm_mode_set panic_mode;
10410+
10411+int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
10412+ void *panic_str)
10413+{
10414+ DRM_ERROR("panic occurred, switching back to text console\n");
10415+ drm_crtc_helper_set_config(&panic_mode);
10416+
10417+ return 0;
10418+}
10419+EXPORT_SYMBOL(psbfb_panic);
10420+
10421+static struct notifier_block paniced = {
10422+ .notifier_call = psbfb_panic,
10423+};
10424+
10425+
10426+static struct drm_framebuffer *psb_framebuffer_create
10427+ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
10428+ void *mm_private)
10429+{
10430+ struct psb_framebuffer *fb;
10431+ int ret;
10432+
10433+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
10434+ if (!fb)
10435+ return NULL;
10436+
10437+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
10438+
10439+ if (ret)
10440+ goto err;
10441+
10442+ drm_helper_mode_fill_fb_struct(&fb->base, r);
10443+
10444+ fb->bo = mm_private;
10445+
10446+ return &fb->base;
10447+
10448+err:
10449+ kfree(fb);
10450+ return NULL;
10451+}
10452+
10453+static struct drm_framebuffer *psb_user_framebuffer_create
10454+ (struct drm_device *dev, struct drm_file *filp,
10455+ struct drm_mode_fb_cmd *r)
10456+{
10457+ struct psb_framebuffer *psbfb;
10458+ struct ttm_buffer_object *bo = NULL;
10459+ struct drm_framebuffer *fb;
10460+ struct fb_info *info;
10461+ struct ttm_bo_kmap_obj tmp_kmap;
10462+ bool is_iomem;
10463+ uint64_t size;
10464+
10465+ bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle);
10466+ if (!bo)
10467+ return NULL;
10468+ /*the buffer is used as fb, then it should not be put in swap list*/
10469+ list_del_init(&bo->swap);
10470+
10471+ /* JB: TODO not drop, make smarter */
10472+ size = ((uint64_t) bo->num_pages) << PAGE_SHIFT;
10473+ if (size < r->height * r->pitch)
10474+ return NULL;
10475+
10476+ /* JB: TODO not drop, refcount buffer */
10477+// return psb_framebuffer_create(dev, r, bo);
10478+
10479+ fb = psb_framebuffer_create(dev, r, bo);
10480+ if (!fb) {
10481+ DRM_ERROR("failed to allocate fb.\n");
10482+ return NULL;
10483+ }
10484+
10485+ psbfb = to_psb_fb(fb);
10486+ psbfb->bo = bo;
10487+
10488+ info = framebuffer_alloc(sizeof(struct psbfb_par), &dev->pdev->dev);
10489+ if (!info) {
10490+ return NULL;
10491+ }
10492+
10493+ strcpy(info->fix.id, "psbfb");
10494+ info->fix.type = FB_TYPE_PACKED_PIXELS;
10495+ info->fix.visual = FB_VISUAL_TRUECOLOR;
10496+ info->fix.type_aux = 0;
10497+ info->fix.xpanstep = 1; /* doing it in hw */
10498+ info->fix.ypanstep = 1; /* doing it in hw */
10499+ info->fix.ywrapstep = 0;
10500+ info->fix.accel = FB_ACCEL_I830;
10501+ info->fix.type_aux = 0;
10502+
10503+ info->flags = FBINFO_DEFAULT;
10504+
10505+ info->fbops = &psbfb_ops;
10506+
10507+ info->fix.line_length = fb->pitch;
10508+ info->fix.smem_start =
10509+ dev->mode_config.fb_base + psbfb->bo->offset;
10510+ info->fix.smem_len = size;
10511+
10512+ info->flags = FBINFO_DEFAULT;
10513+
10514+ if (ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap) != 0) {
10515+ DRM_ERROR("error mapping fb\n");
10516+ return NULL;
10517+ }
10518+
10519+ psbfb->kmap = tmp_kmap;
10520+
10521+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
10522+ info->screen_size = size;
10523+
10524+/* it is called for kms flip, the back buffer has been rendered, then we should not clear it*/
10525+#if 0
10526+ if (is_iomem)
10527+ memset_io(info->screen_base, 0, size);
10528+ else
10529+ memset(info->screen_base, 0, size);
10530+#endif
10531+ info->pseudo_palette = fb->pseudo_palette;
10532+ info->var.xres_virtual = fb->width;
10533+ info->var.yres_virtual = fb->height;
10534+ info->var.bits_per_pixel = fb->bits_per_pixel;
10535+ info->var.xoffset = 0;
10536+ info->var.yoffset = 0;
10537+ info->var.activate = FB_ACTIVATE_NOW;
10538+ info->var.height = -1;
10539+ info->var.width = -1;
10540+
10541+ info->var.xres = r->width;
10542+ info->var.yres = r->height;
10543+
10544+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
10545+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
10546+
10547+ info->pixmap.size = 64 * 1024;
10548+ info->pixmap.buf_align = 8;
10549+ info->pixmap.access_align = 32;
10550+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
10551+ info->pixmap.scan_align = 1;
10552+
10553+ fill_fb_bitfield(&info->var, fb->depth);
10554+
10555+ register_framebuffer(info);
10556+
10557+ fb->fbdev = info;
10558+
10559+ return fb;
10560+}
10561+
10562+int psbfb_create(struct drm_device *dev, uint32_t fb_width,
10563+ uint32_t fb_height, uint32_t surface_width,
10564+ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
10565+{
10566+ struct fb_info *info;
10567+ struct psbfb_par *par;
10568+ struct drm_framebuffer *fb;
10569+ struct psb_framebuffer *psbfb;
10570+ struct ttm_bo_kmap_obj tmp_kmap;
10571+ struct drm_mode_fb_cmd mode_cmd;
10572+ struct device *device = &dev->pdev->dev;
10573+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
10574+ struct ttm_buffer_object *fbo = NULL;
10575+ int size, aligned_size, ret;
10576+ bool is_iomem;
10577+
10578+ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
10579+ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
10580+
10581+ mode_cmd.bpp = 32;
10582+ mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8);
10583+ mode_cmd.depth = 24;
10584+
10585+ size = mode_cmd.pitch * mode_cmd.height;
10586+ aligned_size = ALIGN(size, PAGE_SIZE);
10587+ ret = ttm_buffer_object_create(bdev,
10588+ aligned_size,
10589+ ttm_bo_type_kernel,
10590+ TTM_PL_FLAG_TT |
10591+ TTM_PL_FLAG_VRAM |
10592+ TTM_PL_FLAG_NO_EVICT,
10593+ 0, 0, 0, NULL, &fbo);
10594+
10595+ if (unlikely(ret != 0)) {
10596+ DRM_ERROR("failed to allocate framebuffer.\n");
10597+ return -ENOMEM;
10598+ }
10599+
10600+ mutex_lock(&dev->struct_mutex);
10601+ fb = psb_framebuffer_create(dev, &mode_cmd, fbo);
10602+ if (!fb) {
10603+ DRM_ERROR("failed to allocate fb.\n");
10604+ ret = -ENOMEM;
10605+ goto out_err0;
10606+ }
10607+ psbfb = to_psb_fb(fb);
10608+ psbfb->bo = fbo;
10609+
10610+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
10611+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
10612+ if (!info) {
10613+ ret = -ENOMEM;
10614+ goto out_err1;
10615+ }
10616+
10617+ par = info->par;
10618+ par->psbfb = psbfb;
10619+
10620+ strcpy(info->fix.id, "psbfb");
10621+ info->fix.type = FB_TYPE_PACKED_PIXELS;
10622+ info->fix.visual = FB_VISUAL_TRUECOLOR;
10623+ info->fix.type_aux = 0;
10624+ info->fix.xpanstep = 1; /* doing it in hw */
10625+ info->fix.ypanstep = 1; /* doing it in hw */
10626+ info->fix.ywrapstep = 0;
10627+ info->fix.accel = FB_ACCEL_I830;
10628+ info->fix.type_aux = 0;
10629+
10630+ info->flags = FBINFO_DEFAULT;
10631+
10632+ info->fbops = &psbfb_ops;
10633+
10634+ info->fix.line_length = fb->pitch;
10635+ info->fix.smem_start =
10636+ dev->mode_config.fb_base + psbfb->bo->offset;
10637+ info->fix.smem_len = size;
10638+
10639+ info->flags = FBINFO_DEFAULT;
10640+
10641+ ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap);
10642+ if (ret) {
10643+ DRM_ERROR("error mapping fb: %d\n", ret);
10644+ goto out_err2;
10645+ }
10646+
10647+
10648+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
10649+ info->screen_size = size;
10650+
10651+ if (is_iomem)
10652+ memset_io(info->screen_base, 0, size);
10653+ else
10654+ memset(info->screen_base, 0, size);
10655+
10656+ info->pseudo_palette = fb->pseudo_palette;
10657+ info->var.xres_virtual = fb->width;
10658+ info->var.yres_virtual = fb->height;
10659+ info->var.bits_per_pixel = fb->bits_per_pixel;
10660+ info->var.xoffset = 0;
10661+ info->var.yoffset = 0;
10662+ info->var.activate = FB_ACTIVATE_NOW;
10663+ info->var.height = -1;
10664+ info->var.width = -1;
10665+
10666+ info->var.xres = fb_width;
10667+ info->var.yres = fb_height;
10668+
10669+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
10670+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
10671+
10672+ info->pixmap.size = 64 * 1024;
10673+ info->pixmap.buf_align = 8;
10674+ info->pixmap.access_align = 32;
10675+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
10676+ info->pixmap.scan_align = 1;
10677+
10678+ DRM_DEBUG("fb depth is %d\n", fb->depth);
10679+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
10680+ fill_fb_bitfield(&info->var, fb->depth);
10681+
10682+ fb->fbdev = info;
10683+
10684+ par->dev = dev;
10685+
10686+ /* To allow resizing without swapping buffers */
10687+ printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n",
10688+ psbfb->base.width,
10689+ psbfb->base.height, psbfb->bo->offset, psbfb->bo);
10690+
10691+ if (psbfb_p)
10692+ *psbfb_p = psbfb;
10693+
10694+ mutex_unlock(&dev->struct_mutex);
10695+
10696+ return 0;
10697+out_err2:
10698+ unregister_framebuffer(info);
10699+out_err1:
10700+ fb->funcs->destroy(fb);
10701+out_err0:
10702+ mutex_unlock(&dev->struct_mutex);
10703+ ttm_bo_unref(&fbo);
10704+ return ret;
10705+}
10706+
10707+static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
10708+ struct drm_crtc *crtc)
10709+{
10710+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10711+ struct drm_framebuffer *fb = crtc->fb;
10712+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
10713+ struct drm_connector *connector;
10714+ struct fb_info *info;
10715+ struct psbfb_par *par;
10716+ struct drm_mode_set *modeset;
10717+ unsigned int width, height;
10718+ int new_fb = 0;
10719+ int ret, i, conn_count;
10720+
10721+ if (!drm_helper_crtc_in_use(crtc))
10722+ return 0;
10723+
10724+ if (!crtc->desired_mode)
10725+ return 0;
10726+
10727+ width = crtc->desired_mode->hdisplay;
10728+ height = crtc->desired_mode->vdisplay;
10729+
10730+ /* is there an fb bound to this crtc already */
10731+ if (!psb_intel_crtc->mode_set.fb) {
10732+ ret =
10733+ psbfb_create(dev, width, height, width, height,
10734+ &psbfb);
10735+ if (ret)
10736+ return -EINVAL;
10737+ new_fb = 1;
10738+ } else {
10739+ fb = psb_intel_crtc->mode_set.fb;
10740+ if ((fb->width < width) || (fb->height < height))
10741+ return -EINVAL;
10742+ }
10743+
10744+ info = fb->fbdev;
10745+ par = info->par;
10746+
10747+ modeset = &psb_intel_crtc->mode_set;
10748+ modeset->fb = fb;
10749+ conn_count = 0;
10750+ list_for_each_entry(connector, &dev->mode_config.connector_list,
10751+ head) {
10752+ if (connector->encoder)
10753+ if (connector->encoder->crtc == modeset->crtc) {
10754+ modeset->connectors[conn_count] =
10755+ connector;
10756+ conn_count++;
10757+ if (conn_count > INTELFB_CONN_LIMIT)
10758+ BUG();
10759+ }
10760+ }
10761+
10762+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
10763+ modeset->connectors[i] = NULL;
10764+
10765+ par->crtc_ids[0] = crtc->base.id;
10766+
10767+ modeset->num_connectors = conn_count;
10768+ if (modeset->mode != modeset->crtc->desired_mode)
10769+ modeset->mode = modeset->crtc->desired_mode;
10770+
10771+ par->crtc_count = 1;
10772+
10773+ if (new_fb) {
10774+ info->var.pixclock = -1;
10775+ if (register_framebuffer(info) < 0)
10776+ return -EINVAL;
10777+ } else
10778+ psbfb_set_par(info);
10779+
10780+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
10781+ info->fix.id);
10782+
10783+ /* Switch back to kernel console on panic */
10784+ panic_mode = *modeset;
10785+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
10786+ printk(KERN_INFO "registered panic notifier\n");
10787+
10788+ return 0;
10789+}
10790+
10791+static int psbfb_multi_fb_probe(struct drm_device *dev)
10792+{
10793+
10794+ struct drm_crtc *crtc;
10795+ int ret = 0;
10796+
10797+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10798+ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
10799+ if (ret)
10800+ return ret;
10801+ }
10802+ return ret;
10803+}
10804+
10805+static int psbfb_single_fb_probe(struct drm_device *dev)
10806+{
10807+ struct drm_crtc *crtc;
10808+ struct drm_connector *connector;
10809+ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
10810+ unsigned int surface_width = 0, surface_height = 0;
10811+ int new_fb = 0;
10812+ int crtc_count = 0;
10813+ int ret, i, conn_count = 0;
10814+ struct fb_info *info;
10815+ struct psbfb_par *par;
10816+ struct drm_mode_set *modeset = NULL;
10817+ struct drm_framebuffer *fb = NULL;
10818+ struct psb_framebuffer *psbfb = NULL;
10819+
10820+ /* first up get a count of crtcs now in use and
10821+ * new min/maxes width/heights */
10822+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10823+ if (drm_helper_crtc_in_use(crtc)) {
10824+ if (crtc->desired_mode) {
10825+ fb = crtc->fb;
10826+ if (crtc->desired_mode->hdisplay <
10827+ fb_width)
10828+ fb_width =
10829+ crtc->desired_mode->hdisplay;
10830+
10831+ if (crtc->desired_mode->vdisplay <
10832+ fb_height)
10833+ fb_height =
10834+ crtc->desired_mode->vdisplay;
10835+
10836+ if (crtc->desired_mode->hdisplay >
10837+ surface_width)
10838+ surface_width =
10839+ crtc->desired_mode->hdisplay;
10840+
10841+ if (crtc->desired_mode->vdisplay >
10842+ surface_height)
10843+ surface_height =
10844+ crtc->desired_mode->vdisplay;
10845+
10846+ }
10847+ crtc_count++;
10848+ }
10849+ }
10850+
10851+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
10852+ /* hmm everyone went away - assume VGA cable just fell out
10853+ and will come back later. */
10854+ return 0;
10855+ }
10856+
10857+ /* do we have an fb already? */
10858+ if (list_empty(&dev->mode_config.fb_kernel_list)) {
10859+ /* create an fb if we don't have one */
10860+ ret =
10861+ psbfb_create(dev, fb_width, fb_height, surface_width,
10862+ surface_height, &psbfb);
10863+ if (ret)
10864+ return -EINVAL;
10865+ new_fb = 1;
10866+ fb = &psbfb->base;
10867+ } else {
10868+ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
10869+ struct drm_framebuffer, filp_head);
10870+
10871+ /* if someone hotplugs something bigger than we have already
10872+ * allocated, we are pwned. As really we can't resize an
10873+ * fbdev that is in the wild currently due to fbdev not really
10874+ * being designed for the lower layers moving stuff around
10875+ * under it. - so in the grand style of things - punt. */
10876+ if ((fb->width < surface_width)
10877+ || (fb->height < surface_height)) {
10878+ DRM_ERROR
10879+ ("Framebuffer not large enough to scale"
10880+ " console onto.\n");
10881+ return -EINVAL;
10882+ }
10883+ }
10884+
10885+ info = fb->fbdev;
10886+ par = info->par;
10887+
10888+ crtc_count = 0;
10889+ /* okay we need to setup new connector sets in the crtcs */
10890+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10891+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10892+ modeset = &psb_intel_crtc->mode_set;
10893+ modeset->fb = fb;
10894+ conn_count = 0;
10895+ list_for_each_entry(connector,
10896+ &dev->mode_config.connector_list,
10897+ head) {
10898+ if (connector->encoder)
10899+ if (connector->encoder->crtc ==
10900+ modeset->crtc) {
10901+ modeset->connectors[conn_count] =
10902+ connector;
10903+ conn_count++;
10904+ if (conn_count >
10905+ INTELFB_CONN_LIMIT)
10906+ BUG();
10907+ }
10908+ }
10909+
10910+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
10911+ modeset->connectors[i] = NULL;
10912+
10913+ par->crtc_ids[crtc_count++] = crtc->base.id;
10914+
10915+ modeset->num_connectors = conn_count;
10916+ if (modeset->mode != modeset->crtc->desired_mode)
10917+ modeset->mode = modeset->crtc->desired_mode;
10918+ }
10919+ par->crtc_count = crtc_count;
10920+
10921+ if (new_fb) {
10922+ info->var.pixclock = -1;
10923+ if (register_framebuffer(info) < 0)
10924+ return -EINVAL;
10925+ } else
10926+ psbfb_set_par(info);
10927+
10928+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
10929+ info->fix.id);
10930+
10931+ /* Switch back to kernel console on panic */
10932+ panic_mode = *modeset;
10933+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
10934+ printk(KERN_INFO "registered panic notifier\n");
10935+
10936+ return 0;
10937+}
10938+
10939+int psbfb_probe(struct drm_device *dev)
10940+{
10941+ int ret = 0;
10942+
10943+ DRM_DEBUG("\n");
10944+
10945+ /* something has changed in the lower levels of hell - deal with it
10946+ here */
10947+
10948+ /* two modes : a) 1 fb to rule all crtcs.
10949+ b) one fb per crtc.
10950+ two actions 1) new connected device
10951+ 2) device removed.
10952+ case a/1 : if the fb surface isn't big enough -
10953+ resize the surface fb.
10954+ if the fb size isn't big enough - resize fb into surface.
10955+ if everything big enough configure the new crtc/etc.
10956+ case a/2 : undo the configuration
10957+ possibly resize down the fb to fit the new configuration.
10958+ case b/1 : see if it is on a new crtc - setup a new fb and add it.
10959+ case b/2 : teardown the new fb.
10960+ */
10961+
10962+ /* mode a first */
10963+ /* search for an fb */
10964+ if (0 /*i915_fbpercrtc == 1 */)
10965+ ret = psbfb_multi_fb_probe(dev);
10966+ else
10967+ ret = psbfb_single_fb_probe(dev);
10968+
10969+ return ret;
10970+}
10971+EXPORT_SYMBOL(psbfb_probe);
10972+
10973+int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
10974+{
10975+ struct fb_info *info;
10976+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
10977+
10978+ if (drm_psb_no_fb)
10979+ return 0;
10980+
10981+ info = fb->fbdev;
10982+
10983+ if (info) {
10984+ unregister_framebuffer(info);
10985+ ttm_bo_kunmap(&psbfb->kmap);
10986+ ttm_bo_unref(&psbfb->bo);
10987+ framebuffer_release(info);
10988+ }
10989+
10990+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
10991+ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
10992+ return 0;
10993+}
10994+EXPORT_SYMBOL(psbfb_remove);
10995+
10996+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
10997+ struct drm_file *file_priv,
10998+ unsigned int *handle)
10999+{
11000+ /* JB: TODO currently we can't go from a bo to a handle with ttm */
11001+ (void) file_priv;
11002+ *handle = 0;
11003+ return 0;
11004+}
11005+
11006+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
11007+{
11008+ struct drm_device *dev = fb->dev;
11009+ if (fb->fbdev)
11010+ psbfb_remove(dev, fb);
11011+
11012+ /* JB: TODO not drop, refcount buffer */
11013+ drm_framebuffer_cleanup(fb);
11014+
11015+ kfree(fb);
11016+}
11017+
11018+static const struct drm_mode_config_funcs psb_mode_funcs = {
11019+ .fb_create = psb_user_framebuffer_create,
11020+ .fb_changed = psbfb_probe,
11021+};
11022+
11023+static int psb_create_backlight_property(struct drm_device *dev)
11024+{
11025+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private;
11026+ struct drm_property *backlight;
11027+
11028+ if (dev_priv->backlight_property)
11029+ return 0;
11030+
11031+ backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE, "backlight", 2);
11032+ backlight->values[0] = 0;
11033+ backlight->values[1] = 100;
11034+
11035+ dev_priv->backlight_property = backlight;
11036+
11037+ return 0;
11038+}
11039+
11040+static void psb_setup_outputs(struct drm_device *dev)
11041+{
11042+ struct drm_psb_private *dev_priv =
11043+ (struct drm_psb_private *) dev->dev_private;
11044+ struct drm_connector *connector;
11045+
11046+ drm_mode_create_scaling_mode_property(dev);
11047+
11048+ psb_create_backlight_property(dev);
11049+
11050+ if (IS_MRST(dev)) {
11051+ if (dev_priv->iLVDS_enable)
11052+ /* Set up integrated LVDS for MRST */
11053+ mrst_lvds_init(dev, &dev_priv->mode_dev);
11054+ else {
11055+ /* Set up integrated MIPI for MRST */
11056+ mrst_dsi_init(dev, &dev_priv->mode_dev);
11057+ }
11058+ } else {
11059+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
11060+ psb_intel_sdvo_init(dev, SDVOB);
11061+ }
11062+
11063+ list_for_each_entry(connector, &dev->mode_config.connector_list,
11064+ head) {
11065+ struct psb_intel_output *psb_intel_output =
11066+ to_psb_intel_output(connector);
11067+ struct drm_encoder *encoder = &psb_intel_output->enc;
11068+ int crtc_mask = 0, clone_mask = 0;
11069+
11070+ /* valid crtcs */
11071+ switch (psb_intel_output->type) {
11072+ case INTEL_OUTPUT_SDVO:
11073+ crtc_mask = ((1 << 0) | (1 << 1));
11074+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
11075+ break;
11076+ case INTEL_OUTPUT_LVDS:
11077+ if (IS_MRST(dev))
11078+ crtc_mask = (1 << 0);
11079+ else
11080+ crtc_mask = (1 << 1);
11081+
11082+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
11083+ break;
11084+ case INTEL_OUTPUT_MIPI:
11085+ crtc_mask = (1 << 0);
11086+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
11087+ break;
11088+ }
11089+ encoder->possible_crtcs = crtc_mask;
11090+ encoder->possible_clones =
11091+ psb_intel_connector_clones(dev, clone_mask);
11092+ }
11093+}
11094+
11095+static void *psb_bo_from_handle(struct drm_device *dev,
11096+ struct drm_file *file_priv,
11097+ unsigned int handle)
11098+{
11099+ return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
11100+ handle);
11101+}
11102+
11103+static size_t psb_bo_size(struct drm_device *dev, void *bof)
11104+{
11105+ struct ttm_buffer_object *bo = bof;
11106+ return bo->num_pages << PAGE_SHIFT;
11107+}
11108+
11109+static size_t psb_bo_offset(struct drm_device *dev, void *bof)
11110+{
11111+ struct drm_psb_private *dev_priv =
11112+ (struct drm_psb_private *) dev->dev_private;
11113+ struct ttm_buffer_object *bo = bof;
11114+
11115+ size_t offset = bo->offset - dev_priv->pg->gatt_start;
11116+ DRM_DEBUG("Offset %u\n", offset);
11117+ return offset;
11118+}
11119+
11120+static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
11121+{
11122+#if 0 /* JB: Not used for the drop */
11123+ struct ttm_buffer_object *bo = bof;
11124+ We should do things like check if
11125+ the buffer is in a scanout : able
11126+ place.And make sure that its pinned.
11127+#endif
11128+ return 0;
11129+ }
11130+
11131+ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
11132+ void *bo) {
11133+#if 0 /* JB: Not used for the drop */
11134+ struct ttm_buffer_object *bo = bof;
11135+#endif
11136+ return 0;
11137+ }
11138+
11139+ void psb_modeset_init(struct drm_device *dev)
11140+ {
11141+ struct drm_psb_private *dev_priv =
11142+ (struct drm_psb_private *) dev->dev_private;
11143+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
11144+ int i;
11145+ int num_pipe;
11146+
11147+ /* Init mm functions */
11148+ mode_dev->bo_from_handle = psb_bo_from_handle;
11149+ mode_dev->bo_size = psb_bo_size;
11150+ mode_dev->bo_offset = psb_bo_offset;
11151+ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
11152+ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
11153+
11154+ drm_mode_config_init(dev);
11155+
11156+ dev->mode_config.min_width = 0;
11157+ dev->mode_config.min_height = 0;
11158+
11159+ dev->mode_config.funcs = (void *) &psb_mode_funcs;
11160+
11161+ dev->mode_config.max_width = 2048;
11162+ dev->mode_config.max_height = 2048;
11163+
11164+ /* set memory base */
11165+ /* MRST and PSB should use BAR 2*/
11166+ dev->mode_config.fb_base =
11167+ pci_resource_start(dev->pdev, 2);
11168+
11169+ if (IS_MRST(dev))
11170+ num_pipe = 1;
11171+ else
11172+ num_pipe = 2;
11173+
11174+
11175+ for (i = 0; i < num_pipe; i++)
11176+ psb_intel_crtc_init(dev, i, mode_dev);
11177+
11178+ psb_setup_outputs(dev);
11179+
11180+ /* setup fbs */
11181+ /* drm_initial_config(dev); */
11182+ }
11183+
11184+ void psb_modeset_cleanup(struct drm_device *dev)
11185+ {
11186+ drm_mode_config_cleanup(dev);
11187+ }
11188diff --git a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h
11189new file mode 100644
11190index 0000000..aa0b23c
11191--- /dev/null
11192+++ b/drivers/gpu/drm/psb/psb_fb.h
11193@@ -0,0 +1,47 @@
11194+/*
11195+ * Copyright (c) 2008, Intel Corporation
11196+ *
11197+ * Permission is hereby granted, free of charge, to any person obtaining a
11198+ * copy of this software and associated documentation files (the "Software"),
11199+ * to deal in the Software without restriction, including without limitation
11200+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11201+ * and/or sell copies of the Software, and to permit persons to whom the
11202+ * Software is furnished to do so, subject to the following conditions:
11203+ *
11204+ * The above copyright notice and this permission notice (including the next
11205+ * paragraph) shall be included in all copies or substantial portions of the
11206+ * Software.
11207+ *
11208+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11209+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11210+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11211+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11212+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
11213+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
11214+ * SOFTWARE.
11215+ *
11216+ * Authors:
11217+ * Eric Anholt <eric@anholt.net>
11218+ *
11219+ **/
11220+
11221+#ifndef _PSB_FB_H_
11222+#define _PSB_FB_H_
11223+
11224+struct psb_framebuffer {
11225+ struct drm_framebuffer base;
11226+ struct address_space *addr_space;
11227+ struct ttm_buffer_object *bo;
11228+ struct ttm_bo_kmap_obj kmap;
11229+ uint64_t offset;
11230+};
11231+
11232+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
11233+
11234+
11235+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
11236+
11237+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);
11238+
11239+#endif
11240+
11241diff --git a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c
11242new file mode 100644
11243index 0000000..b8c64b0
11244--- /dev/null
11245+++ b/drivers/gpu/drm/psb/psb_fence.c
11246@@ -0,0 +1,359 @@
11247+/**************************************************************************
11248+ * Copyright (c) 2007, Intel Corporation.
11249+ * All Rights Reserved.
11250+ *
11251+ * This program is free software; you can redistribute it and/or modify it
11252+ * under the terms and conditions of the GNU General Public License,
11253+ * version 2, as published by the Free Software Foundation.
11254+ *
11255+ * This program is distributed in the hope it will be useful, but WITHOUT
11256+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11257+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11258+ * more details.
11259+ *
11260+ * You should have received a copy of the GNU General Public License along with
11261+ * this program; if not, write to the Free Software Foundation, Inc.,
11262+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11263+ *
11264+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
11265+ * develop this driver.
11266+ *
11267+ **************************************************************************/
11268+/*
11269+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
11270+ */
11271+
11272+#include <drm/drmP.h>
11273+#include "psb_drv.h"
11274+#include "psb_msvdx.h"
11275+#include "lnc_topaz.h"
11276+
11277+static void psb_print_ta_fence_status(struct ttm_fence_device *fdev)
11278+{
11279+ struct drm_psb_private *dev_priv =
11280+ container_of(fdev, struct drm_psb_private, fdev);
11281+ struct psb_scheduler_seq *seq = dev_priv->scheduler.seq;
11282+ int i;
11283+
11284+ for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
11285+ DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n",
11286+ (1 << i),
11287+ (unsigned long) seq->sequence,
11288+ seq->reported);
11289+ seq++;
11290+ }
11291+}
11292+
11293+static void psb_poll_ta(struct ttm_fence_device *fdev,
11294+ uint32_t waiting_types)
11295+{
11296+ struct drm_psb_private *dev_priv =
11297+ container_of(fdev, struct drm_psb_private, fdev);
11298+ uint32_t cur_flag = 1;
11299+ uint32_t flags = 0;
11300+ uint32_t sequence = 0;
11301+ uint32_t remaining = 0xFFFFFFFF;
11302+ uint32_t diff;
11303+
11304+ struct psb_scheduler *scheduler;
11305+ struct psb_scheduler_seq *seq;
11306+ struct ttm_fence_class_manager *fc =
11307+ &fdev->fence_class[PSB_ENGINE_TA];
11308+
11309+ scheduler = &dev_priv->scheduler;
11310+ seq = scheduler->seq;
11311+
11312+ while (likely(waiting_types & remaining)) {
11313+ if (!(waiting_types & cur_flag))
11314+ goto skip;
11315+ if (seq->reported)
11316+ goto skip;
11317+ if (flags == 0)
11318+ sequence = seq->sequence;
11319+ else if (sequence != seq->sequence) {
11320+ ttm_fence_handler(fdev, PSB_ENGINE_TA,
11321+ sequence, flags, 0);
11322+ sequence = seq->sequence;
11323+ flags = 0;
11324+ }
11325+ flags |= cur_flag;
11326+
11327+ /*
11328+ * Sequence may not have ended up on the ring yet.
11329+ * In that case, report it but don't mark it as
11330+ * reported. A subsequent poll will report it again.
11331+ */
11332+
11333+ diff = (fc->latest_queued_sequence - sequence) &
11334+ fc->sequence_mask;
11335+ if (diff < fc->wrap_diff)
11336+ seq->reported = 1;
11337+
11338+skip:
11339+ cur_flag <<= 1;
11340+ remaining <<= 1;
11341+ seq++;
11342+ }
11343+
11344+ if (flags)
11345+ ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0);
11346+
11347+}
11348+
11349+static void psb_poll_other(struct ttm_fence_device *fdev,
11350+ uint32_t fence_class, uint32_t waiting_types)
11351+{
11352+ struct drm_psb_private *dev_priv =
11353+ container_of(fdev, struct drm_psb_private, fdev);
11354+ struct ttm_fence_class_manager *fc =
11355+ &fdev->fence_class[fence_class];
11356+ uint32_t sequence;
11357+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
11358+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
11359+
11360+ if (unlikely(!dev_priv))
11361+ return;
11362+
11363+ if (waiting_types) {
11364+ switch (fence_class) {
11365+ case PSB_ENGINE_VIDEO:
11366+ sequence = msvdx_priv->msvdx_current_sequence;
11367+ break;
11368+ case LNC_ENGINE_ENCODE:
11369+ sequence = *((uint32_t *)topaz_priv->topaz_sync_addr);
11370+ break;
11371+ default:
11372+ sequence = dev_priv->comm[fence_class << 4];
11373+ break;
11374+ }
11375+
11376+ ttm_fence_handler(fdev, fence_class, sequence,
11377+ _PSB_FENCE_TYPE_EXE, 0);
11378+
11379+ switch (fence_class) {
11380+ case PSB_ENGINE_2D:
11381+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
11382+ psb_2D_irq_off(dev_priv);
11383+ dev_priv->fence0_irq_on = 0;
11384+ } else if (!dev_priv->fence0_irq_on
11385+ && fc->waiting_types) {
11386+ psb_2D_irq_on(dev_priv);
11387+ dev_priv->fence0_irq_on = 1;
11388+ }
11389+ break;
11390+#if 0
11391+ /*
11392+ * FIXME: MSVDX irq switching
11393+ */
11394+
11395+ case PSB_ENGINE_VIDEO:
11396+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
11397+ psb_msvdx_irq_off(dev_priv);
11398+ dev_priv->fence2_irq_on = 0;
11399+ } else if (!dev_priv->fence2_irq_on
11400+ && fc->pending_exe_flush) {
11401+ psb_msvdx_irq_on(dev_priv);
11402+ dev_priv->fence2_irq_on = 1;
11403+ }
11404+ break;
11405+#endif
11406+ default:
11407+ return;
11408+ }
11409+ }
11410+}
11411+
11412+static void psb_fence_poll(struct ttm_fence_device *fdev,
11413+ uint32_t fence_class, uint32_t waiting_types)
11414+{
11415+ if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0)))
11416+ PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class);
11417+ switch (fence_class) {
11418+ case PSB_ENGINE_TA:
11419+ psb_poll_ta(fdev, waiting_types);
11420+ break;
11421+ default:
11422+ psb_poll_other(fdev, fence_class, waiting_types);
11423+ break;
11424+ }
11425+}
11426+
11427+void psb_fence_error(struct drm_device *dev,
11428+ uint32_t fence_class,
11429+ uint32_t sequence, uint32_t type, int error)
11430+{
11431+ struct drm_psb_private *dev_priv = psb_priv(dev);
11432+ struct ttm_fence_device *fdev = &dev_priv->fdev;
11433+ unsigned long irq_flags;
11434+ struct ttm_fence_class_manager *fc =
11435+ &fdev->fence_class[fence_class];
11436+
11437+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
11438+ write_lock_irqsave(&fc->lock, irq_flags);
11439+ ttm_fence_handler(fdev, fence_class, sequence, type, error);
11440+ write_unlock_irqrestore(&fc->lock, irq_flags);
11441+}
11442+
11443+int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
11444+ uint32_t fence_class,
11445+ uint32_t flags, uint32_t *sequence,
11446+ unsigned long *timeout_jiffies)
11447+{
11448+ struct drm_psb_private *dev_priv =
11449+ container_of(fdev, struct drm_psb_private, fdev);
11450+ uint32_t seq = 0;
11451+ int ret;
11452+
11453+ if (!dev_priv)
11454+ return -EINVAL;
11455+
11456+ if (fence_class >= PSB_NUM_ENGINES)
11457+ return -EINVAL;
11458+
11459+ switch (fence_class) {
11460+ case PSB_ENGINE_2D:
11461+ spin_lock(&dev_priv->sequence_lock);
11462+ seq = ++dev_priv->sequence[fence_class];
11463+ spin_unlock(&dev_priv->sequence_lock);
11464+ ret = psb_blit_sequence(dev_priv, seq);
11465+ if (ret)
11466+ return ret;
11467+ break;
11468+ case PSB_ENGINE_VIDEO:
11469+ spin_lock(&dev_priv->sequence_lock);
11470+ seq = dev_priv->sequence[fence_class]++;
11471+ spin_unlock(&dev_priv->sequence_lock);
11472+ break;
11473+ case LNC_ENGINE_ENCODE:
11474+ spin_lock(&dev_priv->sequence_lock);
11475+ seq = dev_priv->sequence[fence_class]++;
11476+ spin_unlock(&dev_priv->sequence_lock);
11477+ break;
11478+ default:
11479+ spin_lock(&dev_priv->sequence_lock);
11480+ seq = dev_priv->sequence[fence_class];
11481+ spin_unlock(&dev_priv->sequence_lock);
11482+ }
11483+
11484+ *sequence = seq;
11485+
11486+ if (fence_class == PSB_ENGINE_TA)
11487+ *timeout_jiffies = jiffies + DRM_HZ / 2;
11488+ else
11489+ *timeout_jiffies = jiffies + DRM_HZ * 3;
11490+
11491+ return 0;
11492+}
11493+
11494+uint32_t psb_fence_advance_sequence(struct drm_device *dev,
11495+ uint32_t fence_class)
11496+{
11497+ struct drm_psb_private *dev_priv =
11498+ (struct drm_psb_private *) dev->dev_private;
11499+ uint32_t sequence;
11500+
11501+ spin_lock(&dev_priv->sequence_lock);
11502+ sequence = ++dev_priv->sequence[fence_class];
11503+ spin_unlock(&dev_priv->sequence_lock);
11504+
11505+ return sequence;
11506+}
11507+
11508+static void psb_fence_lockup(struct ttm_fence_object *fence,
11509+ uint32_t fence_types)
11510+{
11511+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
11512+
11513+ if (fence->fence_class == PSB_ENGINE_TA) {
11514+
11515+ /*
11516+ * The 3D engine has its own lockup detection.
11517+ * Just extend the fence expiry time.
11518+ */
11519+
11520+ DRM_INFO("Extending 3D fence timeout.\n");
11521+ write_lock(&fc->lock);
11522+
11523+ DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n",
11524+ (unsigned long) fence->sequence, fence_types,
11525+ fence->info.signaled_types);
11526+
11527+ if (time_after_eq(jiffies, fence->timeout_jiffies))
11528+ fence->timeout_jiffies = jiffies + DRM_HZ / 2;
11529+
11530+ psb_print_ta_fence_status(fence->fdev);
11531+ write_unlock(&fc->lock);
11532+ } else if (fence->fence_class == LNC_ENGINE_ENCODE) {
11533+ DRM_ERROR
11534+ ("TOPAZ timeout (probable lockup) detected on engine %u "
11535+ "fence type 0x%08x\n",
11536+ (unsigned int) fence->fence_class,
11537+ (unsigned int) fence_types);
11538+
11539+ write_lock(&fc->lock);
11540+ lnc_topaz_handle_timeout(fence->fdev);
11541+ ttm_fence_handler(fence->fdev, fence->fence_class,
11542+ fence->sequence, fence_types, -EBUSY);
11543+ write_unlock(&fc->lock);
11544+ } else {
11545+ DRM_ERROR
11546+ ("GPU timeout (probable lockup) detected on engine %u "
11547+ "fence type 0x%08x\n",
11548+ (unsigned int) fence->fence_class,
11549+ (unsigned int) fence_types);
11550+ write_lock(&fc->lock);
11551+ ttm_fence_handler(fence->fdev, fence->fence_class,
11552+ fence->sequence, fence_types, -EBUSY);
11553+ write_unlock(&fc->lock);
11554+ }
11555+}
11556+
11557+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
11558+{
11559+ struct drm_psb_private *dev_priv = psb_priv(dev);
11560+ struct ttm_fence_device *fdev = &dev_priv->fdev;
11561+ struct ttm_fence_class_manager *fc =
11562+ &fdev->fence_class[fence_class];
11563+ unsigned long irq_flags;
11564+
11565+#ifdef FIX_TG_16
11566+ if (fence_class == PSB_ENGINE_2D) {
11567+
11568+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
11569+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
11570+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
11571+ _PSB_C2B_STATUS_BUSY) == 0))
11572+ psb_resume_ta_2d_idle(dev_priv);
11573+ }
11574+#endif
11575+ write_lock_irqsave(&fc->lock, irq_flags);
11576+ psb_fence_poll(fdev, fence_class, fc->waiting_types);
11577+ write_unlock_irqrestore(&fc->lock, irq_flags);
11578+}
11579+
11580+
11581+static struct ttm_fence_driver psb_ttm_fence_driver = {
11582+ .has_irq = NULL,
11583+ .emit = psb_fence_emit_sequence,
11584+ .flush = NULL,
11585+ .poll = psb_fence_poll,
11586+ .needed_flush = NULL,
11587+ .wait = NULL,
11588+ .signaled = NULL,
11589+ .lockup = psb_fence_lockup,
11590+};
11591+
11592+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
11593+{
11594+ struct drm_psb_private *dev_priv =
11595+ container_of(fdev, struct drm_psb_private, fdev);
11596+ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
11597+ .flush_diff = (1 << 29),
11598+ .sequence_mask = 0xFFFFFFFF
11599+ };
11600+
11601+ return ttm_fence_device_init(PSB_NUM_ENGINES,
11602+ dev_priv->mem_global_ref.object,
11603+ fdev, &fci, 1,
11604+ &psb_ttm_fence_driver);
11605+}
11606diff --git a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c
11607new file mode 100644
11608index 0000000..7cb5a3d
11609--- /dev/null
11610+++ b/drivers/gpu/drm/psb/psb_gtt.c
11611@@ -0,0 +1,278 @@
11612+/**************************************************************************
11613+ * Copyright (c) 2007, Intel Corporation.
11614+ * All Rights Reserved.
11615+ *
11616+ * This program is free software; you can redistribute it and/or modify it
11617+ * under the terms and conditions of the GNU General Public License,
11618+ * version 2, as published by the Free Software Foundation.
11619+ *
11620+ * This program is distributed in the hope it will be useful, but WITHOUT
11621+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11622+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11623+ * more details.
11624+ *
11625+ * You should have received a copy of the GNU General Public License along with
11626+ * this program; if not, write to the Free Software Foundation, Inc.,
11627+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11628+ *
11629+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
11630+ * develop this driver.
11631+ *
11632+ **************************************************************************/
11633+/*
11634+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
11635+ */
11636+#include <drm/drmP.h>
11637+#include "psb_drv.h"
11638+
11639+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
11640+{
11641+ uint32_t mask = PSB_PTE_VALID;
11642+
11643+ if (type & PSB_MMU_CACHED_MEMORY)
11644+ mask |= PSB_PTE_CACHED;
11645+ if (type & PSB_MMU_RO_MEMORY)
11646+ mask |= PSB_PTE_RO;
11647+ if (type & PSB_MMU_WO_MEMORY)
11648+ mask |= PSB_PTE_WO;
11649+
11650+ return (pfn << PAGE_SHIFT) | mask;
11651+}
11652+
11653+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
11654+{
11655+ struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
11656+
11657+ if (!tmp)
11658+ return NULL;
11659+
11660+ init_rwsem(&tmp->sem);
11661+ tmp->dev = dev;
11662+
11663+ return tmp;
11664+}
11665+
11666+void psb_gtt_takedown(struct psb_gtt *pg, int free)
11667+{
11668+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
11669+
11670+ if (!pg)
11671+ return;
11672+
11673+ if (pg->gtt_map) {
11674+ iounmap(pg->gtt_map);
11675+ pg->gtt_map = NULL;
11676+ }
11677+ if (pg->initialized) {
11678+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
11679+ pg->gmch_ctrl);
11680+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
11681+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
11682+ }
11683+ if (free)
11684+ kfree(pg);
11685+}
11686+
11687+int psb_gtt_init(struct psb_gtt *pg, int resume)
11688+{
11689+ struct drm_device *dev = pg->dev;
11690+ struct drm_psb_private *dev_priv = dev->dev_private;
11691+ unsigned gtt_pages;
11692+ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
11693+ unsigned long rar_stolen_size;
11694+ unsigned i, num_pages;
11695+ unsigned pfn_base;
11696+ uint32_t vram_pages;
11697+
11698+ int ret = 0;
11699+ uint32_t pte;
11700+
11701+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
11702+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
11703+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
11704+
11705+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
11706+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
11707+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
11708+
11709+ pg->initialized = 1;
11710+
11711+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
11712+
11713+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
11714+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
11715+ gtt_pages =
11716+ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
11717+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
11718+ >> PAGE_SHIFT;
11719+
11720+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
11721+ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
11722+
11723+ /* CI is not included in the stolen size since the TOPAZ MMU bug */
11724+ ci_stolen_size = dev_priv->ci_region_size;
11725+ /* add CI & RAR share buffer space to stolen_size */
11726+ /* stolen_size = vram_stolen_size + ci_stolen_size; */
11727+ stolen_size = vram_stolen_size;
11728+
11729+ rar_stolen_size = dev_priv->rar_region_size;
11730+ stolen_size += rar_stolen_size;
11731+
11732+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
11733+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
11734+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
11735+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
11736+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
11737+
11738+ if (resume && (gtt_pages != pg->gtt_pages) &&
11739+ (stolen_size != pg->stolen_size)) {
11740+ DRM_ERROR("GTT resume error.\n");
11741+ ret = -EINVAL;
11742+ goto out_err;
11743+ }
11744+
11745+ pg->gtt_pages = gtt_pages;
11746+ pg->stolen_size = stolen_size;
11747+ pg->vram_stolen_size = vram_stolen_size;
11748+ pg->ci_stolen_size = ci_stolen_size;
11749+ pg->rar_stolen_size = rar_stolen_size;
11750+ pg->gtt_map =
11751+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
11752+ if (!pg->gtt_map) {
11753+ DRM_ERROR("Failure to map gtt.\n");
11754+ ret = -ENOMEM;
11755+ goto out_err;
11756+ }
11757+
11758+ /*
11759+ * insert vram stolen pages.
11760+ */
11761+
11762+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
11763+ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
11764+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
11765+ num_pages, pfn_base);
11766+ for (i = 0; i < num_pages; ++i) {
11767+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
11768+ iowrite32(pte, pg->gtt_map + i);
11769+ }
11770+#if 0
11771+ /*
11772+ * insert CI stolen pages
11773+ */
11774+
11775+ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
11776+ num_pages = ci_stolen_size >> PAGE_SHIFT;
11777+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
11778+ num_pages, pfn_base);
11779+ for (; i < num_pages; ++i) {
11780+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
11781+ iowrite32(pte, pg->gtt_map + i);
11782+ }
11783+#endif
11784+
11785+ /*
11786+ * insert RAR stolen pages
11787+ */
11788+ if (rar_stolen_size != 0) {
11789+ pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
11790+ num_pages = rar_stolen_size >> PAGE_SHIFT;
11791+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
11792+ num_pages, pfn_base);
11793+ for (; i < num_pages + vram_pages ; ++i) {
11794+ pte = psb_gtt_mask_pte(pfn_base + i - vram_pages, 0);
11795+ iowrite32(pte, pg->gtt_map + i);
11796+ }
11797+ }
11798+ /*
11799+ * Init rest of gtt.
11800+ */
11801+
11802+ pfn_base = page_to_pfn(dev_priv->scratch_page);
11803+ pte = psb_gtt_mask_pte(pfn_base, 0);
11804+ PSB_DEBUG_INIT("Initializing the rest of a total "
11805+ "of %d gtt pages.\n", pg->gatt_pages);
11806+
11807+ for (; i < pg->gatt_pages; ++i)
11808+ iowrite32(pte, pg->gtt_map + i);
11809+ (void) ioread32(pg->gtt_map + i - 1);
11810+
11811+ return 0;
11812+
11813+out_err:
11814+ psb_gtt_takedown(pg, 0);
11815+ return ret;
11816+}
11817+
11818+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
11819+ unsigned offset_pages, unsigned num_pages,
11820+ unsigned desired_tile_stride,
11821+ unsigned hw_tile_stride, int type)
11822+{
11823+ unsigned rows = 1;
11824+ unsigned add;
11825+ unsigned row_add;
11826+ unsigned i;
11827+ unsigned j;
11828+ uint32_t *cur_page = NULL;
11829+ uint32_t pte;
11830+
11831+ if (hw_tile_stride)
11832+ rows = num_pages / desired_tile_stride;
11833+ else
11834+ desired_tile_stride = num_pages;
11835+
11836+ add = desired_tile_stride;
11837+ row_add = hw_tile_stride;
11838+
11839+ down_read(&pg->sem);
11840+ for (i = 0; i < rows; ++i) {
11841+ cur_page = pg->gtt_map + offset_pages;
11842+ for (j = 0; j < desired_tile_stride; ++j) {
11843+ pte =
11844+ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
11845+ iowrite32(pte, cur_page++);
11846+ }
11847+ offset_pages += add;
11848+ }
11849+ (void) ioread32(cur_page - 1);
11850+ up_read(&pg->sem);
11851+
11852+ return 0;
11853+}
11854+
11855+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
11856+ unsigned num_pages, unsigned desired_tile_stride,
11857+ unsigned hw_tile_stride)
11858+{
11859+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
11860+ unsigned rows = 1;
11861+ unsigned add;
11862+ unsigned row_add;
11863+ unsigned i;
11864+ unsigned j;
11865+ uint32_t *cur_page = NULL;
11866+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
11867+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
11868+
11869+ if (hw_tile_stride)
11870+ rows = num_pages / desired_tile_stride;
11871+ else
11872+ desired_tile_stride = num_pages;
11873+
11874+ add = desired_tile_stride;
11875+ row_add = hw_tile_stride;
11876+
11877+ down_read(&pg->sem);
11878+ for (i = 0; i < rows; ++i) {
11879+ cur_page = pg->gtt_map + offset_pages;
11880+ for (j = 0; j < desired_tile_stride; ++j)
11881+ iowrite32(pte, cur_page++);
11882+
11883+ offset_pages += add;
11884+ }
11885+ (void) ioread32(cur_page - 1);
11886+ up_read(&pg->sem);
11887+
11888+ return 0;
11889+}
11890diff --git a/drivers/gpu/drm/psb/psb_hotplug.c b/drivers/gpu/drm/psb/psb_hotplug.c
11891new file mode 100644
11892index 0000000..38e1f35
11893--- /dev/null
11894+++ b/drivers/gpu/drm/psb/psb_hotplug.c
11895@@ -0,0 +1,427 @@
11896+/*
11897+ * Copyright © 2009 Intel Corporation
11898+ *
11899+ * Permission is hereby granted, free of charge, to any person obtaining a
11900+ * copy of this software and associated documentation files (the "Software"),
11901+ * to deal in the Software without restriction, including without limitation
11902+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11903+ * and/or sell copies of the Software, and to permit persons to whom the
11904+ * Software is furnished to do so, subject to the following conditions:
11905+ *
11906+ * The above copyright notice and this permission notice (including the next
11907+ * paragraph) shall be included in all copies or substantial portions of the
11908+ * Software.
11909+ *
11910+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11911+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11912+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11913+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11914+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11915+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
11916+ * IN THE SOFTWARE.
11917+ *
11918+ * Authors:
11919+ * James C. Gualario <james.c.gualario@intel.com>
11920+ *
11921+ */
11922+#include "psb_umevents.h"
11923+#include "psb_hotplug.h"
11924+/**
11925+ * inform the kernel of the work to be performed and related function.
11926+ *
11927+ */
11928+DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq);
11929+DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq);
11930+DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq);
11931+/**
11932+ * psb_hotplug_notify_change_um - notify user mode of hotplug changes
11933+ *
11934+ * @name: name of event to notify user mode of change to
11935+ * @state: hotplug state to search for event object in
11936+ *
11937+ */
11938+int psb_hotplug_notify_change_um(const char *name,
11939+ struct hotplug_state *state)
11940+{
11941+ strcpy(&(state->hotplug_change_wq_data.dev_name_arry
11942+ [state->hotplug_change_wq_data.dev_name_write][0]), name);
11943+ state->hotplug_change_wq_data.dev_name_arry_rw_status
11944+ [state->hotplug_change_wq_data.dev_name_write] =
11945+ DRM_HOTPLUG_READY_TO_READ;
11946+ if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1)
11947+ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
11948+ state->hotplug_change_wq_data.dev_name_write++;
11949+ if (state->hotplug_change_wq_data.dev_name_write ==
11950+ state->hotplug_change_wq_data.dev_name_read) {
11951+ state->hotplug_change_wq_data.dev_name_write--;
11952+ return IRQ_NONE;
11953+ }
11954+ if (state->hotplug_change_wq_data.dev_name_write >
11955+ DRM_HOTPLUG_RING_DEPTH_MAX) {
11956+ state->hotplug_change_wq_data.dev_name_write = 0;
11957+ state->hotplug_change_wq_data.dev_name_write_wrap = 1;
11958+ }
11959+ state->hotplug_change_wq_data.hotplug_dev_list = state->list;
11960+ queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work));
11961+ return IRQ_HANDLED;
11962+}
11963+/**
11964+ *
11965+ * psb_hotplug_create_and_notify_um - create and notify user mode of new dev
11966+ *
11967+ * @name: name to give for new event / device
11968+ * @state: hotplug state to track new event /device in
11969+ *
11970+ */
11971+int psb_hotplug_create_and_notify_um(const char *name,
11972+ struct hotplug_state *state)
11973+{
11974+ strcpy(&(state->hotplug_create_wq_data.dev_name_arry
11975+ [state->hotplug_create_wq_data.dev_name_write][0]), name);
11976+ state->hotplug_create_wq_data.dev_name_arry_rw_status
11977+ [state->hotplug_create_wq_data.dev_name_write] =
11978+ DRM_HOTPLUG_READY_TO_READ;
11979+ if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1)
11980+ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
11981+ state->hotplug_create_wq_data.dev_name_write++;
11982+ if (state->hotplug_create_wq_data.dev_name_write ==
11983+ state->hotplug_create_wq_data.dev_name_read) {
11984+ state->hotplug_create_wq_data.dev_name_write--;
11985+ return IRQ_NONE;
11986+ }
11987+ if (state->hotplug_create_wq_data.dev_name_write >
11988+ DRM_HOTPLUG_RING_DEPTH_MAX) {
11989+ state->hotplug_create_wq_data.dev_name_write = 0;
11990+ state->hotplug_create_wq_data.dev_name_write_wrap = 1;
11991+ }
11992+ state->hotplug_create_wq_data.hotplug_dev_list = state->list;
11993+ queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work));
11994+ return IRQ_HANDLED;
11995+}
11996+EXPORT_SYMBOL(psb_hotplug_create_and_notify_um);
11997+/**
11998+ * psb_hotplug_remove_and_notify_um - remove device and notify user mode
11999+ *
12000+ * @name: name of event / device to remove
12001+ * @state: hotplug state to remove event / device from
12002+ *
12003+ */
12004+int psb_hotplug_remove_and_notify_um(const char *name,
12005+ struct hotplug_state *state)
12006+{
12007+ strcpy(&(state->hotplug_remove_wq_data.dev_name_arry
12008+ [state->hotplug_remove_wq_data.dev_name_write][0]), name);
12009+ state->hotplug_remove_wq_data.dev_name_arry_rw_status
12010+ [state->hotplug_remove_wq_data.dev_name_write] =
12011+ DRM_HOTPLUG_READY_TO_READ;
12012+ if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1)
12013+ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
12014+ state->hotplug_remove_wq_data.dev_name_write++;
12015+ if (state->hotplug_remove_wq_data.dev_name_write ==
12016+ state->hotplug_remove_wq_data.dev_name_read) {
12017+ state->hotplug_remove_wq_data.dev_name_write--;
12018+ return IRQ_NONE;
12019+ }
12020+ if (state->hotplug_remove_wq_data.dev_name_write >
12021+ DRM_HOTPLUG_RING_DEPTH_MAX) {
12022+ state->hotplug_remove_wq_data.dev_name_write = 0;
12023+ state->hotplug_remove_wq_data.dev_name_write_wrap = 1;
12024+ }
12025+ state->hotplug_remove_wq_data.hotplug_dev_list = state->list;
12026+ queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work));
12027+ return IRQ_HANDLED;
12028+}
12029+EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um);
12030+/**
12031+ * psb_hotplug_device_pool_create_and_init - make new hotplug device pool
12032+ *
12033+ * @parent_kobj: parent kobject to associate hotplug kset with
12034+ * @state: hotplug state to assocaite workqueues with
12035+ *
12036+ */
12037+struct umevent_list *psb_hotplug_device_pool_create_and_init(
12038+ struct kobject *parent_kobj,
12039+ struct hotplug_state *state)
12040+{
12041+ struct umevent_list *new_hotplug_dev_list = NULL;
12042+
12043+ new_hotplug_dev_list = psb_umevent_create_list();
12044+ if (new_hotplug_dev_list)
12045+ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
12046+ "psb_hotplug");
12047+
12048+ state->hotplug_wq = create_singlethread_workqueue("hotplug-wq");
12049+ if (!state->hotplug_wq)
12050+ return NULL;
12051+
12052+ INIT_WORK(&state->hotplug_create_wq_data.work,
12053+ psb_hotplug_dev_create_wq);
12054+ INIT_WORK(&state->hotplug_remove_wq_data.work,
12055+ psb_hotplug_dev_remove_wq);
12056+ INIT_WORK(&state->hotplug_change_wq_data.work,
12057+ psb_hotplug_dev_change_wq);
12058+
12059+ state->hotplug_create_wq_data.dev_name_read = 0;
12060+ state->hotplug_create_wq_data.dev_name_write = 0;
12061+ state->hotplug_create_wq_data.dev_name_write_wrap = 0;
12062+ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
12063+ memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]),
12064+ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
12065+
12066+ state->hotplug_remove_wq_data.dev_name_read = 0;
12067+ state->hotplug_remove_wq_data.dev_name_write = 0;
12068+ state->hotplug_remove_wq_data.dev_name_write_wrap = 0;
12069+ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
12070+ memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]),
12071+ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
12072+
12073+ state->hotplug_change_wq_data.dev_name_read = 0;
12074+ state->hotplug_change_wq_data.dev_name_write = 0;
12075+ state->hotplug_change_wq_data.dev_name_write_wrap = 0;
12076+ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
12077+ memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]),
12078+ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
12079+
12080+ return new_hotplug_dev_list;
12081+}
12082+EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init);
12083+/**
12084+ *
12085+ * psb_hotplug_init - init hotplug subsystem
12086+ *
12087+ * @parent_kobj: parent kobject to associate hotplug state with
12088+ *
12089+ */
12090+struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj)
12091+{
12092+ struct hotplug_state *state;
12093+ state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL);
12094+ state->list = NULL;
12095+ state->list = psb_hotplug_device_pool_create_and_init(
12096+ parent_kobj,
12097+ state);
12098+ return state;
12099+}
12100+/**
12101+ * psb_hotplug_device_pool_destroy - destroy all hotplug related resources
12102+ *
12103+ * @state: hotplug state to destroy
12104+ *
12105+ */
12106+void psb_hotplug_device_pool_destroy(struct hotplug_state *state)
12107+{
12108+ flush_workqueue(state->hotplug_wq);
12109+ destroy_workqueue(state->hotplug_wq);
12110+ psb_umevent_cleanup(state->list);
12111+ kfree(state);
12112+}
12113+EXPORT_SYMBOL(psb_hotplug_device_pool_destroy);
12114+/**
12115+ * psb_hotplug_dev_create_wq - create workqueue implementation
12116+ *
12117+ * @work: work struct to use for kernel scheduling
12118+ *
12119+ */
12120+void psb_hotplug_dev_create_wq(struct work_struct *work)
12121+{
12122+ struct hotplug_disp_workqueue_data *wq_data;
12123+ struct umevent_obj *wq_working_hotplug_disp_obj;
12124+ wq_data = to_hotplug_disp_workqueue_data(work);
12125+ if (wq_data->dev_name_write_wrap == 1) {
12126+ wq_data->dev_name_read_write_wrap_ack = 1;
12127+ wq_data->dev_name_write_wrap = 0;
12128+ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
12129+ if (wq_data->dev_name_arry_rw_status
12130+ [wq_data->dev_name_read] ==
12131+ DRM_HOTPLUG_READY_TO_READ) {
12132+ wq_working_hotplug_disp_obj =
12133+ psb_create_umevent_obj(
12134+ &wq_data->dev_name_arry
12135+ [wq_data->dev_name_read][0],
12136+ wq_data->hotplug_dev_list);
12137+ wq_data->dev_name_arry_rw_status
12138+ [wq_data->dev_name_read] =
12139+ DRM_HOTPLUG_READ_COMPLETE;
12140+ psb_umevent_notify
12141+ (wq_working_hotplug_disp_obj);
12142+ }
12143+ wq_data->dev_name_read++;
12144+ }
12145+ wq_data->dev_name_read = 0;
12146+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
12147+ if (wq_data->dev_name_arry_rw_status
12148+ [wq_data->dev_name_read] ==
12149+ DRM_HOTPLUG_READY_TO_READ) {
12150+ wq_working_hotplug_disp_obj =
12151+ psb_create_umevent_obj(
12152+ &wq_data->dev_name_arry
12153+ [wq_data->dev_name_read][0],
12154+ wq_data->hotplug_dev_list);
12155+ wq_data->dev_name_arry_rw_status
12156+ [wq_data->dev_name_read] =
12157+ DRM_HOTPLUG_READ_COMPLETE;
12158+ psb_umevent_notify
12159+ (wq_working_hotplug_disp_obj);
12160+ }
12161+ wq_data->dev_name_read++;
12162+ }
12163+ } else {
12164+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
12165+ if (wq_data->dev_name_arry_rw_status
12166+ [wq_data->dev_name_read] ==
12167+ DRM_HOTPLUG_READY_TO_READ) {
12168+ wq_working_hotplug_disp_obj =
12169+ psb_create_umevent_obj(
12170+ &wq_data->dev_name_arry
12171+ [wq_data->dev_name_read][0],
12172+ wq_data->hotplug_dev_list);
12173+ wq_data->dev_name_arry_rw_status
12174+ [wq_data->dev_name_read] =
12175+ DRM_HOTPLUG_READ_COMPLETE;
12176+ psb_umevent_notify
12177+ (wq_working_hotplug_disp_obj);
12178+ }
12179+ wq_data->dev_name_read++;
12180+ }
12181+ }
12182+ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
12183+ wq_data->dev_name_read = 0;
12184+}
12185+EXPORT_SYMBOL(psb_hotplug_dev_create_wq);
12186+/**
12187+ * psb_hotplug_dev_remove_wq - remove workqueue implementation
12188+ *
12189+ * @work: work struct to use for kernel scheduling
12190+ *
12191+ */
12192+void psb_hotplug_dev_remove_wq(struct work_struct *work)
12193+{
12194+ struct hotplug_disp_workqueue_data *wq_data;
12195+ wq_data = to_hotplug_disp_workqueue_data(work);
12196+ if (wq_data->dev_name_write_wrap == 1) {
12197+ wq_data->dev_name_read_write_wrap_ack = 1;
12198+ wq_data->dev_name_write_wrap = 0;
12199+ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
12200+ if (wq_data->dev_name_arry_rw_status
12201+ [wq_data->dev_name_read] ==
12202+ DRM_HOTPLUG_READY_TO_READ) {
12203+ psb_umevent_remove_from_list(
12204+ wq_data->hotplug_dev_list,
12205+ &wq_data->dev_name_arry
12206+ [wq_data->dev_name_read][0]);
12207+ wq_data->dev_name_arry_rw_status
12208+ [wq_data->dev_name_read] =
12209+ DRM_HOTPLUG_READ_COMPLETE;
12210+ }
12211+ wq_data->dev_name_read++;
12212+ }
12213+ wq_data->dev_name_read = 0;
12214+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
12215+ if (wq_data->dev_name_arry_rw_status
12216+ [wq_data->dev_name_read] ==
12217+ DRM_HOTPLUG_READY_TO_READ) {
12218+ psb_umevent_remove_from_list(
12219+ wq_data->hotplug_dev_list,
12220+ &wq_data->dev_name_arry
12221+ [wq_data->dev_name_read][0]);
12222+ wq_data->dev_name_arry_rw_status
12223+ [wq_data->dev_name_read] =
12224+ DRM_HOTPLUG_READ_COMPLETE;
12225+ }
12226+ wq_data->dev_name_read++;
12227+ }
12228+ } else {
12229+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
12230+ if (wq_data->dev_name_arry_rw_status
12231+ [wq_data->dev_name_read] ==
12232+ DRM_HOTPLUG_READY_TO_READ) {
12233+ psb_umevent_remove_from_list(
12234+ wq_data->hotplug_dev_list,
12235+ &wq_data->dev_name_arry
12236+ [wq_data->dev_name_read][0]);
12237+ wq_data->dev_name_arry_rw_status
12238+ [wq_data->dev_name_read] =
12239+ DRM_HOTPLUG_READ_COMPLETE;
12240+ }
12241+ wq_data->dev_name_read++;
12242+ }
12243+ }
12244+ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
12245+ wq_data->dev_name_read = 0;
12246+}
12247+EXPORT_SYMBOL(psb_hotplug_dev_remove_wq);
12248+/**
12249+ * psb_hotplug_dev_change_wq - change workqueue implementation
12250+ *
12251+ * @work: work struct to use for kernel scheduling
12252+ *
12253+ */
12254+void psb_hotplug_dev_change_wq(struct work_struct *work)
12255+{
12256+ struct hotplug_disp_workqueue_data *wq_data;
12257+ struct umevent_obj *wq_working_hotplug_disp_obj;
12258+ wq_data = to_hotplug_disp_workqueue_data(work);
12259+ if (wq_data->dev_name_write_wrap == 1) {
12260+ wq_data->dev_name_read_write_wrap_ack = 1;
12261+ wq_data->dev_name_write_wrap = 0;
12262+ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
12263+ if (wq_data->dev_name_arry_rw_status
12264+ [wq_data->dev_name_read] ==
12265+ DRM_HOTPLUG_READY_TO_READ) {
12266+ wq_data->dev_name_arry_rw_status
12267+ [wq_data->dev_name_read] =
12268+ DRM_HOTPLUG_READ_COMPLETE;
12269+
12270+ wq_working_hotplug_disp_obj =
12271+ psb_umevent_find_obj(
12272+ &wq_data->dev_name_arry
12273+ [wq_data->dev_name_read][0],
12274+ wq_data->hotplug_dev_list);
12275+ psb_umevent_notify_change_gfxsock
12276+ (wq_working_hotplug_disp_obj);
12277+ }
12278+ wq_data->dev_name_read++;
12279+ }
12280+ wq_data->dev_name_read = 0;
12281+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
12282+ if (wq_data->dev_name_arry_rw_status
12283+ [wq_data->dev_name_read] ==
12284+ DRM_HOTPLUG_READY_TO_READ) {
12285+ wq_data->dev_name_arry_rw_status
12286+ [wq_data->dev_name_read] =
12287+ DRM_HOTPLUG_READ_COMPLETE;
12288+
12289+ wq_working_hotplug_disp_obj =
12290+ psb_umevent_find_obj(
12291+ &wq_data->dev_name_arry
12292+ [wq_data->dev_name_read][0],
12293+ wq_data->hotplug_dev_list);
12294+ psb_umevent_notify_change_gfxsock
12295+ (wq_working_hotplug_disp_obj);
12296+ }
12297+ wq_data->dev_name_read++;
12298+ }
12299+ } else {
12300+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
12301+ if (wq_data->dev_name_arry_rw_status
12302+ [wq_data->dev_name_read] ==
12303+ DRM_HOTPLUG_READY_TO_READ) {
12304+ wq_data->dev_name_arry_rw_status
12305+ [wq_data->dev_name_read] =
12306+ DRM_HOTPLUG_READ_COMPLETE;
12307+
12308+ wq_working_hotplug_disp_obj =
12309+ psb_umevent_find_obj(
12310+ &wq_data->dev_name_arry
12311+ [wq_data->dev_name_read][0],
12312+ wq_data->hotplug_dev_list);
12313+ psb_umevent_notify_change_gfxsock
12314+ (wq_working_hotplug_disp_obj);
12315+ }
12316+ wq_data->dev_name_read++;
12317+ }
12318+ }
12319+ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
12320+ wq_data->dev_name_read = 0;
12321+}
12322+EXPORT_SYMBOL(psb_hotplug_dev_change_wq);
12323diff --git a/drivers/gpu/drm/psb/psb_hotplug.h b/drivers/gpu/drm/psb/psb_hotplug.h
12324new file mode 100644
12325index 0000000..8a63efc
12326--- /dev/null
12327+++ b/drivers/gpu/drm/psb/psb_hotplug.h
12328@@ -0,0 +1,96 @@
12329+/*
12330+ * Copyright © 2009 Intel Corporation
12331+ *
12332+ * Permission is hereby granted, free of charge, to any person obtaining a
12333+ * copy of this software and associated documentation files (the "Software"),
12334+ * to deal in the Software without restriction, including without limitation
12335+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12336+ * and/or sell copies of the Software, and to permit persons to whom the
12337+ * Software is furnished to do so, subject to the following conditions:
12338+ *
12339+ * The above copyright notice and this permission notice (including the next
12340+ * paragraph) shall be included in all copies or substantial portions of the
12341+ * Software.
12342+ *
12343+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12344+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12345+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12346+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12347+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
12348+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
12349+ * IN THE SOFTWARE.
12350+ *
12351+ * Authors:
12352+ * James C. Gualario <james.c.gualario@intel.com>
12353+ *
12354+ */
12355+#ifndef _PSB_HOTPLUG_H_
12356+#define _PSB_HOTPLUG_H_
12357+/**
12358+ * required includes
12359+ *
12360+ */
12361+#include "psb_umevents.h"
12362+/**
12363+ * hotplug specific defines
12364+ *
12365+ */
12366+#define DRM_HOTPLUG_RING_DEPTH 256
12367+#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1)
12368+#define DRM_HOTPLUG_READY_TO_READ 1
12369+#define DRM_HOTPLUG_READ_COMPLETE 2
12370+/**
12371+ * hotplug workqueue data struct.
12372+ */
12373+struct hotplug_disp_workqueue_data {
12374+ struct work_struct work;
12375+ const char *dev_name;
12376+ int dev_name_write;
12377+ int dev_name_read;
12378+ int dev_name_write_wrap;
12379+ int dev_name_read_write_wrap_ack;
12380+ char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24];
12381+ int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH];
12382+ struct umevent_list *hotplug_dev_list;
12383+};
12384+/**
12385+ * hotplug state structure
12386+ *
12387+ */
12388+struct hotplug_state {
12389+ struct workqueue_struct *hotplug_wq;
12390+ struct hotplug_disp_workqueue_data hotplug_remove_wq_data;
12391+ struct hotplug_disp_workqueue_data hotplug_create_wq_data;
12392+ struct hotplug_disp_workqueue_data hotplug_change_wq_data;
12393+ struct umevent_list *list;
12394+};
12395+/**
12396+ * main interface function prototytpes for hotplug support.
12397+ *
12398+ */
12399+struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj);
12400+extern int psb_hotplug_notify_change_um(const char *name,
12401+ struct hotplug_state *state);
12402+extern int psb_hotplug_create_and_notify_um(const char *name,
12403+ struct hotplug_state *state);
12404+extern int psb_hotplug_remove_and_notify_um(const char *name,
12405+ struct hotplug_state *state);
12406+extern struct umevent_list *psb_hotplug_device_pool_create_and_init(
12407+ struct kobject *parent_kobj,
12408+ struct hotplug_state *state);
12409+extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state);
12410+/**
12411+ * to go back and forth between work strauct and workqueue data
12412+ *
12413+ */
12414+#define to_hotplug_disp_workqueue_data(x) \
12415+ container_of(x, struct hotplug_disp_workqueue_data, work)
12416+
12417+/**
12418+ * function prototypes for workqueue implementation
12419+ *
12420+ */
12421+extern void psb_hotplug_dev_create_wq(struct work_struct *work);
12422+extern void psb_hotplug_dev_remove_wq(struct work_struct *work);
12423+extern void psb_hotplug_dev_change_wq(struct work_struct *work);
12424+#endif
12425diff --git a/drivers/gpu/drm/psb/psb_intel_bios.c b/drivers/gpu/drm/psb/psb_intel_bios.c
12426new file mode 100644
12427index 0000000..02e4e27
12428--- /dev/null
12429+++ b/drivers/gpu/drm/psb/psb_intel_bios.c
12430@@ -0,0 +1,309 @@
12431+/*
12432+ * Copyright © 2006 Intel Corporation
12433+ *
12434+ * Permission is hereby granted, free of charge, to any person obtaining a
12435+ * copy of this software and associated documentation files (the "Software"),
12436+ * to deal in the Software without restriction, including without limitation
12437+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12438+ * and/or sell copies of the Software, and to permit persons to whom the
12439+ * Software is furnished to do so, subject to the following conditions:
12440+ *
12441+ * The above copyright notice and this permission notice (including the next
12442+ * paragraph) shall be included in all copies or substantial portions of the
12443+ * Software.
12444+ *
12445+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12446+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12447+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12448+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12449+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
12450+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
12451+ * SOFTWARE.
12452+ *
12453+ * Authors:
12454+ * Eric Anholt <eric@anholt.net>
12455+ *
12456+ */
12457+#include "drmP.h"
12458+#include "drm.h"
12459+#include "psb_drm.h"
12460+#include "psb_drv.h"
12461+#include "psb_intel_drv.h"
12462+#include "psb_intel_reg.h"
12463+#include "psb_intel_bios.h"
12464+
12465+
12466+static void * find_section(struct bdb_header *bdb, int section_id)
12467+{
12468+ u8 *base = (u8 *)bdb;
12469+ int index = 0;
12470+ u16 total, current_size;
12471+ u8 current_id;
12472+
12473+ /* skip to first section */
12474+ index += bdb->header_size;
12475+ total = bdb->bdb_size;
12476+
12477+ /* walk the sections looking for section_id */
12478+ while (index < total) {
12479+ current_id = *(base + index);
12480+ index++;
12481+ current_size = *((u16 *)(base + index));
12482+ index += 2;
12483+ if (current_id == section_id)
12484+ return base + index;
12485+ index += current_size;
12486+ }
12487+
12488+ return NULL;
12489+}
12490+
12491+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
12492+ struct lvds_dvo_timing *dvo_timing)
12493+{
12494+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
12495+ dvo_timing->hactive_lo;
12496+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
12497+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
12498+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
12499+ dvo_timing->hsync_pulse_width;
12500+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
12501+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
12502+
12503+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
12504+ dvo_timing->vactive_lo;
12505+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
12506+ dvo_timing->vsync_off;
12507+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
12508+ dvo_timing->vsync_pulse_width;
12509+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
12510+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
12511+ panel_fixed_mode->clock = dvo_timing->clock * 10;
12512+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
12513+
12514+ /* Some VBTs have bogus h/vtotal values */
12515+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
12516+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
12517+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
12518+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
12519+
12520+ drm_mode_set_name(panel_fixed_mode);
12521+}
12522+
12523+static void parse_backlight_data(struct drm_psb_private * dev_priv,
12524+ struct bdb_header *bdb)
12525+{
12526+ struct bdb_lvds_backlight * vbt_lvds_bl = NULL;
12527+ struct bdb_lvds_backlight * lvds_bl;
12528+ u8 p_type = 0;
12529+ void * bl_start = NULL;
12530+ struct bdb_lvds_options * lvds_opts
12531+ = find_section(bdb, BDB_LVDS_OPTIONS);
12532+
12533+ dev_priv->lvds_bl = NULL;
12534+
12535+ if(lvds_opts) {
12536+ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
12537+ p_type = lvds_opts->panel_type;
12538+ } else {
12539+ DRM_DEBUG("no lvds_options\n");
12540+ return;
12541+ }
12542+
12543+ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
12544+ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
12545+
12546+ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
12547+ if(!lvds_bl) {
12548+ DRM_DEBUG("No memory\n");
12549+ return;
12550+ }
12551+
12552+ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
12553+
12554+ dev_priv->lvds_bl = lvds_bl;
12555+}
12556+
12557+/* Try to find integrated panel data */
12558+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
12559+ struct bdb_header *bdb)
12560+{
12561+ struct bdb_lvds_options *lvds_options;
12562+ struct bdb_lvds_lfp_data *lvds_lfp_data;
12563+ struct bdb_lvds_lfp_data_entry *entry;
12564+ struct lvds_dvo_timing *dvo_timing;
12565+ struct drm_display_mode *panel_fixed_mode;
12566+
12567+ /* Defaults if we can't find VBT info */
12568+ dev_priv->lvds_dither = 0;
12569+ dev_priv->lvds_vbt = 0;
12570+
12571+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
12572+ if (!lvds_options)
12573+ return;
12574+
12575+ dev_priv->lvds_dither = lvds_options->pixel_dither;
12576+ if (lvds_options->panel_type == 0xff)
12577+ return;
12578+
12579+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
12580+ if (!lvds_lfp_data)
12581+ return;
12582+
12583+ dev_priv->lvds_vbt = 1;
12584+
12585+ entry = &lvds_lfp_data->data[lvds_options->panel_type];
12586+ dvo_timing = &entry->dvo_timing;
12587+
12588+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
12589+ GFP_KERNEL);
12590+
12591+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
12592+
12593+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
12594+
12595+ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
12596+ drm_mode_debug_printmodeline(panel_fixed_mode);
12597+
12598+ return;
12599+}
12600+
12601+/* Try to find sdvo panel data */
12602+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
12603+ struct bdb_header *bdb)
12604+{
12605+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
12606+ struct lvds_dvo_timing *dvo_timing;
12607+ struct drm_display_mode *panel_fixed_mode;
12608+
12609+ dev_priv->sdvo_lvds_vbt_mode = NULL;
12610+
12611+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
12612+ if (!sdvo_lvds_options)
12613+ return;
12614+
12615+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
12616+ if (!dvo_timing)
12617+ return;
12618+
12619+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
12620+
12621+ if (!panel_fixed_mode)
12622+ return;
12623+
12624+ fill_detail_timing_data(panel_fixed_mode,
12625+ dvo_timing + sdvo_lvds_options->panel_type);
12626+
12627+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
12628+
12629+ return;
12630+}
12631+
12632+static void parse_general_features(struct drm_psb_private *dev_priv,
12633+ struct bdb_header *bdb)
12634+{
12635+ struct bdb_general_features *general;
12636+
12637+ /* Set sensible defaults in case we can't find the general block */
12638+ dev_priv->int_tv_support = 1;
12639+ dev_priv->int_crt_support = 1;
12640+
12641+ general = find_section(bdb, BDB_GENERAL_FEATURES);
12642+ if (general) {
12643+ dev_priv->int_tv_support = general->int_tv_support;
12644+ dev_priv->int_crt_support = general->int_crt_support;
12645+ dev_priv->lvds_use_ssc = general->enable_ssc;
12646+
12647+ if (dev_priv->lvds_use_ssc) {
12648+ if (IS_I855(dev_priv->dev))
12649+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
12650+ else
12651+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
12652+ }
12653+ }
12654+}
12655+
12656+/**
12657+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
12658+ * @dev: DRM device
12659+ *
12660+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
12661+ * to appropriate values.
12662+ *
12663+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
12664+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
12665+ * feed an updated VBT back through that, compared to what we'll fetch using
12666+ * this method of groping around in the BIOS data.
12667+ *
12668+ * Returns 0 on success, nonzero on failure.
12669+ */
12670+bool psb_intel_init_bios(struct drm_device *dev)
12671+{
12672+ struct drm_psb_private *dev_priv = dev->dev_private;
12673+ struct pci_dev *pdev = dev->pdev;
12674+ struct vbt_header *vbt = NULL;
12675+ struct bdb_header *bdb;
12676+ u8 __iomem *bios;
12677+ size_t size;
12678+ int i;
12679+
12680+ bios = pci_map_rom(pdev, &size);
12681+ if (!bios)
12682+ return -1;
12683+
12684+ /* Scour memory looking for the VBT signature */
12685+ for (i = 0; i + 4 < size; i++) {
12686+ if (!memcmp(bios + i, "$VBT", 4)) {
12687+ vbt = (struct vbt_header *)(bios + i);
12688+ break;
12689+ }
12690+ }
12691+
12692+ if (!vbt) {
12693+ DRM_ERROR("VBT signature missing\n");
12694+ pci_unmap_rom(pdev, bios);
12695+ return -1;
12696+ }
12697+
12698+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
12699+
12700+ /* Grab useful general definitions */
12701+ parse_general_features(dev_priv, bdb);
12702+ parse_lfp_panel_data(dev_priv, bdb);
12703+ parse_sdvo_panel_data(dev_priv, bdb);
12704+ parse_backlight_data(dev_priv, bdb);
12705+
12706+ pci_unmap_rom(pdev, bios);
12707+
12708+ return 0;
12709+}
12710+
12711+/**
12712+ * Destory and free VBT data
12713+ */
12714+void psb_intel_destory_bios(struct drm_device * dev)
12715+{
12716+ struct drm_psb_private *dev_priv = dev->dev_private;
12717+ struct drm_display_mode * sdvo_lvds_vbt_mode =
12718+ dev_priv->sdvo_lvds_vbt_mode;
12719+ struct drm_display_mode * lfp_lvds_vbt_mode =
12720+ dev_priv->lfp_lvds_vbt_mode;
12721+ struct bdb_lvds_backlight * lvds_bl =
12722+ dev_priv->lvds_bl;
12723+
12724+ /*free sdvo panel mode*/
12725+ if(sdvo_lvds_vbt_mode) {
12726+ dev_priv->sdvo_lvds_vbt_mode = NULL;
12727+ kfree(sdvo_lvds_vbt_mode);
12728+ }
12729+
12730+ if(lfp_lvds_vbt_mode) {
12731+ dev_priv->lfp_lvds_vbt_mode = NULL;
12732+ kfree(lfp_lvds_vbt_mode);
12733+ }
12734+
12735+ if(lvds_bl) {
12736+ dev_priv->lvds_bl = NULL;
12737+ kfree(lvds_bl);
12738+ }
12739+}
12740diff --git a/drivers/gpu/drm/psb/psb_intel_bios.h b/drivers/gpu/drm/psb/psb_intel_bios.h
12741new file mode 100644
12742index 0000000..1b0251d
12743--- /dev/null
12744+++ b/drivers/gpu/drm/psb/psb_intel_bios.h
12745@@ -0,0 +1,436 @@
12746+/*
12747+ * Copyright © 2006 Intel Corporation
12748+ *
12749+ * Permission is hereby granted, free of charge, to any person obtaining a
12750+ * copy of this software and associated documentation files (the "Software"),
12751+ * to deal in the Software without restriction, including without limitation
12752+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12753+ * and/or sell copies of the Software, and to permit persons to whom the
12754+ * Software is furnished to do so, subject to the following conditions:
12755+ *
12756+ * The above copyright notice and this permission notice (including the next
12757+ * paragraph) shall be included in all copies or substantial portions of the
12758+ * Software.
12759+ *
12760+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12761+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12762+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12763+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12764+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
12765+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
12766+ * SOFTWARE.
12767+ *
12768+ * Authors:
12769+ * Eric Anholt <eric@anholt.net>
12770+ *
12771+ */
12772+
12773+#ifndef _I830_BIOS_H_
12774+#define _I830_BIOS_H_
12775+
12776+#include "drmP.h"
12777+
12778+struct vbt_header {
12779+ u8 signature[20]; /**< Always starts with 'VBT$' */
12780+ u16 version; /**< decimal */
12781+ u16 header_size; /**< in bytes */
12782+ u16 vbt_size; /**< in bytes */
12783+ u8 vbt_checksum;
12784+ u8 reserved0;
12785+ u32 bdb_offset; /**< from beginning of VBT */
12786+ u32 aim_offset[4]; /**< from beginning of VBT */
12787+} __attribute__((packed));
12788+
12789+
12790+struct bdb_header {
12791+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
12792+ u16 version; /**< decimal */
12793+ u16 header_size; /**< in bytes */
12794+ u16 bdb_size; /**< in bytes */
12795+};
12796+
12797+/* strictly speaking, this is a "skip" block, but it has interesting info */
12798+struct vbios_data {
12799+ u8 type; /* 0 == desktop, 1 == mobile */
12800+ u8 relstage;
12801+ u8 chipset;
12802+ u8 lvds_present:1;
12803+ u8 tv_present:1;
12804+ u8 rsvd2:6; /* finish byte */
12805+ u8 rsvd3[4];
12806+ u8 signon[155];
12807+ u8 copyright[61];
12808+ u16 code_segment;
12809+ u8 dos_boot_mode;
12810+ u8 bandwidth_percent;
12811+ u8 rsvd4; /* popup memory size */
12812+ u8 resize_pci_bios;
12813+ u8 rsvd5; /* is crt already on ddc2 */
12814+} __attribute__((packed));
12815+
12816+/*
12817+ * There are several types of BIOS data blocks (BDBs), each block has
12818+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
12819+ * Known types are listed below.
12820+ */
12821+#define BDB_GENERAL_FEATURES 1
12822+#define BDB_GENERAL_DEFINITIONS 2
12823+#define BDB_OLD_TOGGLE_LIST 3
12824+#define BDB_MODE_SUPPORT_LIST 4
12825+#define BDB_GENERIC_MODE_TABLE 5
12826+#define BDB_EXT_MMIO_REGS 6
12827+#define BDB_SWF_IO 7
12828+#define BDB_SWF_MMIO 8
12829+#define BDB_DOT_CLOCK_TABLE 9
12830+#define BDB_MODE_REMOVAL_TABLE 10
12831+#define BDB_CHILD_DEVICE_TABLE 11
12832+#define BDB_DRIVER_FEATURES 12
12833+#define BDB_DRIVER_PERSISTENCE 13
12834+#define BDB_EXT_TABLE_PTRS 14
12835+#define BDB_DOT_CLOCK_OVERRIDE 15
12836+#define BDB_DISPLAY_SELECT 16
12837+/* 17 rsvd */
12838+#define BDB_DRIVER_ROTATION 18
12839+#define BDB_DISPLAY_REMOVE 19
12840+#define BDB_OEM_CUSTOM 20
12841+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
12842+#define BDB_SDVO_LVDS_OPTIONS 22
12843+#define BDB_SDVO_PANEL_DTDS 23
12844+#define BDB_SDVO_LVDS_PNP_IDS 24
12845+#define BDB_SDVO_LVDS_POWER_SEQ 25
12846+#define BDB_TV_OPTIONS 26
12847+#define BDB_LVDS_OPTIONS 40
12848+#define BDB_LVDS_LFP_DATA_PTRS 41
12849+#define BDB_LVDS_LFP_DATA 42
12850+#define BDB_LVDS_BACKLIGHT 43
12851+#define BDB_LVDS_POWER 44
12852+#define BDB_SKIP 254 /* VBIOS private block, ignore */
12853+
12854+struct bdb_general_features {
12855+ /* bits 1 */
12856+ u8 panel_fitting:2;
12857+ u8 flexaim:1;
12858+ u8 msg_enable:1;
12859+ u8 clear_screen:3;
12860+ u8 color_flip:1;
12861+
12862+ /* bits 2 */
12863+ u8 download_ext_vbt:1;
12864+ u8 enable_ssc:1;
12865+ u8 ssc_freq:1;
12866+ u8 enable_lfp_on_override:1;
12867+ u8 disable_ssc_ddt:1;
12868+ u8 rsvd8:3; /* finish byte */
12869+
12870+ /* bits 3 */
12871+ u8 disable_smooth_vision:1;
12872+ u8 single_dvi:1;
12873+ u8 rsvd9:6; /* finish byte */
12874+
12875+ /* bits 4 */
12876+ u8 legacy_monitor_detect;
12877+
12878+ /* bits 5 */
12879+ u8 int_crt_support:1;
12880+ u8 int_tv_support:1;
12881+ u8 rsvd11:6; /* finish byte */
12882+} __attribute__((packed));
12883+
12884+struct bdb_general_definitions {
12885+ /* DDC GPIO */
12886+ u8 crt_ddc_gmbus_pin;
12887+
12888+ /* DPMS bits */
12889+ u8 dpms_acpi:1;
12890+ u8 skip_boot_crt_detect:1;
12891+ u8 dpms_aim:1;
12892+ u8 rsvd1:5; /* finish byte */
12893+
12894+ /* boot device bits */
12895+ u8 boot_display[2];
12896+ u8 child_dev_size;
12897+
12898+ /* device info */
12899+ u8 tv_or_lvds_info[33];
12900+ u8 dev1[33];
12901+ u8 dev2[33];
12902+ u8 dev3[33];
12903+ u8 dev4[33];
12904+ /* may be another device block here on some platforms */
12905+};
12906+
12907+struct bdb_lvds_options {
12908+ u8 panel_type;
12909+ u8 rsvd1;
12910+ /* LVDS capabilities, stored in a dword */
12911+ u8 pfit_mode:2;
12912+ u8 pfit_text_mode_enhanced:1;
12913+ u8 pfit_gfx_mode_enhanced:1;
12914+ u8 pfit_ratio_auto:1;
12915+ u8 pixel_dither:1;
12916+ u8 lvds_edid:1;
12917+ u8 rsvd2:1;
12918+ u8 rsvd4;
12919+} __attribute__((packed));
12920+
12921+struct bdb_lvds_backlight {
12922+ u8 type:2;
12923+ u8 pol:1;
12924+ u8 gpio:3;
12925+ u8 gmbus:2;
12926+ u16 freq;
12927+ u8 minbrightness;
12928+ u8 i2caddr;
12929+ u8 brightnesscmd;
12930+ /*FIXME: more...*/
12931+}__attribute__((packed));
12932+
12933+/* LFP pointer table contains entries to the struct below */
12934+struct bdb_lvds_lfp_data_ptr {
12935+ u16 fp_timing_offset; /* offsets are from start of bdb */
12936+ u8 fp_table_size;
12937+ u16 dvo_timing_offset;
12938+ u8 dvo_table_size;
12939+ u16 panel_pnp_id_offset;
12940+ u8 pnp_table_size;
12941+} __attribute__((packed));
12942+
12943+struct bdb_lvds_lfp_data_ptrs {
12944+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
12945+ struct bdb_lvds_lfp_data_ptr ptr[16];
12946+} __attribute__((packed));
12947+
12948+/* LFP data has 3 blocks per entry */
12949+struct lvds_fp_timing {
12950+ u16 x_res;
12951+ u16 y_res;
12952+ u32 lvds_reg;
12953+ u32 lvds_reg_val;
12954+ u32 pp_on_reg;
12955+ u32 pp_on_reg_val;
12956+ u32 pp_off_reg;
12957+ u32 pp_off_reg_val;
12958+ u32 pp_cycle_reg;
12959+ u32 pp_cycle_reg_val;
12960+ u32 pfit_reg;
12961+ u32 pfit_reg_val;
12962+ u16 terminator;
12963+} __attribute__((packed));
12964+
12965+struct lvds_dvo_timing {
12966+ u16 clock; /**< In 10khz */
12967+ u8 hactive_lo;
12968+ u8 hblank_lo;
12969+ u8 hblank_hi:4;
12970+ u8 hactive_hi:4;
12971+ u8 vactive_lo;
12972+ u8 vblank_lo;
12973+ u8 vblank_hi:4;
12974+ u8 vactive_hi:4;
12975+ u8 hsync_off_lo;
12976+ u8 hsync_pulse_width;
12977+ u8 vsync_pulse_width:4;
12978+ u8 vsync_off:4;
12979+ u8 rsvd0:6;
12980+ u8 hsync_off_hi:2;
12981+ u8 h_image;
12982+ u8 v_image;
12983+ u8 max_hv;
12984+ u8 h_border;
12985+ u8 v_border;
12986+ u8 rsvd1:3;
12987+ u8 digital:2;
12988+ u8 vsync_positive:1;
12989+ u8 hsync_positive:1;
12990+ u8 rsvd2:1;
12991+} __attribute__((packed));
12992+
12993+struct lvds_pnp_id {
12994+ u16 mfg_name;
12995+ u16 product_code;
12996+ u32 serial;
12997+ u8 mfg_week;
12998+ u8 mfg_year;
12999+} __attribute__((packed));
13000+
13001+struct bdb_lvds_lfp_data_entry {
13002+ struct lvds_fp_timing fp_timing;
13003+ struct lvds_dvo_timing dvo_timing;
13004+ struct lvds_pnp_id pnp_id;
13005+} __attribute__((packed));
13006+
13007+struct bdb_lvds_lfp_data {
13008+ struct bdb_lvds_lfp_data_entry data[16];
13009+} __attribute__((packed));
13010+
13011+struct aimdb_header {
13012+ char signature[16];
13013+ char oem_device[20];
13014+ u16 aimdb_version;
13015+ u16 aimdb_header_size;
13016+ u16 aimdb_size;
13017+} __attribute__((packed));
13018+
13019+struct aimdb_block {
13020+ u8 aimdb_id;
13021+ u16 aimdb_size;
13022+} __attribute__((packed));
13023+
13024+struct vch_panel_data {
13025+ u16 fp_timing_offset;
13026+ u8 fp_timing_size;
13027+ u16 dvo_timing_offset;
13028+ u8 dvo_timing_size;
13029+ u16 text_fitting_offset;
13030+ u8 text_fitting_size;
13031+ u16 graphics_fitting_offset;
13032+ u8 graphics_fitting_size;
13033+} __attribute__((packed));
13034+
13035+struct vch_bdb_22 {
13036+ struct aimdb_block aimdb_block;
13037+ struct vch_panel_data panels[16];
13038+} __attribute__((packed));
13039+
13040+struct bdb_sdvo_lvds_options {
13041+ u8 panel_backlight;
13042+ u8 h40_set_panel_type;
13043+ u8 panel_type;
13044+ u8 ssc_clk_freq;
13045+ u16 als_low_trip;
13046+ u16 als_high_trip;
13047+ u8 sclalarcoeff_tab_row_num;
13048+ u8 sclalarcoeff_tab_row_size;
13049+ u8 coefficient[8];
13050+ u8 panel_misc_bits_1;
13051+ u8 panel_misc_bits_2;
13052+ u8 panel_misc_bits_3;
13053+ u8 panel_misc_bits_4;
13054+} __attribute__((packed));
13055+
13056+
13057+extern bool psb_intel_init_bios(struct drm_device *dev);
13058+extern void psb_intel_destory_bios(struct drm_device * dev);
13059+
13060+/*
13061+ * Driver<->VBIOS interaction occurs through scratch bits in
13062+ * GR18 & SWF*.
13063+ */
13064+
13065+/* GR18 bits are set on display switch and hotkey events */
13066+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
13067+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
13068+#define GR18_HK_NONE (0x0<<3)
13069+#define GR18_HK_LFP_STRETCH (0x1<<3)
13070+#define GR18_HK_TOGGLE_DISP (0x2<<3)
13071+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
13072+#define GR18_HK_POPUP_DISABLED (0x6<<3)
13073+#define GR18_HK_POPUP_ENABLED (0x7<<3)
13074+#define GR18_HK_PFIT (0x8<<3)
13075+#define GR18_HK_APM_CHANGE (0xa<<3)
13076+#define GR18_HK_MULTIPLE (0xc<<3)
13077+#define GR18_USER_INT_EN (1<<2)
13078+#define GR18_A0000_FLUSH_EN (1<<1)
13079+#define GR18_SMM_EN (1<<0)
13080+
13081+/* Set by driver, cleared by VBIOS */
13082+#define SWF00_YRES_SHIFT 16
13083+#define SWF00_XRES_SHIFT 0
13084+#define SWF00_RES_MASK 0xffff
13085+
13086+/* Set by VBIOS at boot time and driver at runtime */
13087+#define SWF01_TV2_FORMAT_SHIFT 8
13088+#define SWF01_TV1_FORMAT_SHIFT 0
13089+#define SWF01_TV_FORMAT_MASK 0xffff
13090+
13091+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
13092+#define SWF10_GTT_OVERRIDE_EN (1<<28)
13093+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
13094+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
13095+#define SWF10_OLD_TOGGLE 0x0
13096+#define SWF10_TOGGLE_LIST_1 0x1
13097+#define SWF10_TOGGLE_LIST_2 0x2
13098+#define SWF10_TOGGLE_LIST_3 0x3
13099+#define SWF10_TOGGLE_LIST_4 0x4
13100+#define SWF10_PANNING_EN (1<<23)
13101+#define SWF10_DRIVER_LOADED (1<<22)
13102+#define SWF10_EXTENDED_DESKTOP (1<<21)
13103+#define SWF10_EXCLUSIVE_MODE (1<<20)
13104+#define SWF10_OVERLAY_EN (1<<19)
13105+#define SWF10_PLANEB_HOLDOFF (1<<18)
13106+#define SWF10_PLANEA_HOLDOFF (1<<17)
13107+#define SWF10_VGA_HOLDOFF (1<<16)
13108+#define SWF10_ACTIVE_DISP_MASK 0xffff
13109+#define SWF10_PIPEB_LFP2 (1<<15)
13110+#define SWF10_PIPEB_EFP2 (1<<14)
13111+#define SWF10_PIPEB_TV2 (1<<13)
13112+#define SWF10_PIPEB_CRT2 (1<<12)
13113+#define SWF10_PIPEB_LFP (1<<11)
13114+#define SWF10_PIPEB_EFP (1<<10)
13115+#define SWF10_PIPEB_TV (1<<9)
13116+#define SWF10_PIPEB_CRT (1<<8)
13117+#define SWF10_PIPEA_LFP2 (1<<7)
13118+#define SWF10_PIPEA_EFP2 (1<<6)
13119+#define SWF10_PIPEA_TV2 (1<<5)
13120+#define SWF10_PIPEA_CRT2 (1<<4)
13121+#define SWF10_PIPEA_LFP (1<<3)
13122+#define SWF10_PIPEA_EFP (1<<2)
13123+#define SWF10_PIPEA_TV (1<<1)
13124+#define SWF10_PIPEA_CRT (1<<0)
13125+
13126+#define SWF11_MEMORY_SIZE_SHIFT 16
13127+#define SWF11_SV_TEST_EN (1<<15)
13128+#define SWF11_IS_AGP (1<<14)
13129+#define SWF11_DISPLAY_HOLDOFF (1<<13)
13130+#define SWF11_DPMS_REDUCED (1<<12)
13131+#define SWF11_IS_VBE_MODE (1<<11)
13132+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
13133+#define SWF11_DPMS_MASK 0x07
13134+#define SWF11_DPMS_OFF (1<<2)
13135+#define SWF11_DPMS_SUSPEND (1<<1)
13136+#define SWF11_DPMS_STANDBY (1<<0)
13137+#define SWF11_DPMS_ON 0
13138+
13139+#define SWF14_GFX_PFIT_EN (1<<31)
13140+#define SWF14_TEXT_PFIT_EN (1<<30)
13141+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
13142+#define SWF14_POPUP_EN (1<<28)
13143+#define SWF14_DISPLAY_HOLDOFF (1<<27)
13144+#define SWF14_DISP_DETECT_EN (1<<26)
13145+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
13146+#define SWF14_DRIVER_STATUS (1<<24)
13147+#define SWF14_OS_TYPE_WIN9X (1<<23)
13148+#define SWF14_OS_TYPE_WINNT (1<<22)
13149+/* 21:19 rsvd */
13150+#define SWF14_PM_TYPE_MASK 0x00070000
13151+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
13152+#define SWF14_PM_ACPI (0x3 << 16)
13153+#define SWF14_PM_APM_12 (0x2 << 16)
13154+#define SWF14_PM_APM_11 (0x1 << 16)
13155+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
13156+ /* if GR18 indicates a display switch */
13157+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
13158+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
13159+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
13160+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
13161+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
13162+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
13163+#define SWF14_DS_PIPEB_TV_EN (1<<9)
13164+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
13165+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
13166+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
13167+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
13168+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
13169+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
13170+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
13171+#define SWF14_DS_PIPEA_TV_EN (1<<1)
13172+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
13173+ /* if GR18 indicates a panel fitting request */
13174+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
13175+ /* if GR18 indicates an APM change request */
13176+#define SWF14_APM_HIBERNATE 0x4
13177+#define SWF14_APM_SUSPEND 0x3
13178+#define SWF14_APM_STANDBY 0x1
13179+#define SWF14_APM_RESTORE 0x0
13180+
13181+#endif /* _I830_BIOS_H_ */
13182diff --git a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c
13183new file mode 100644
13184index 0000000..9cc0ec1
13185--- /dev/null
13186+++ b/drivers/gpu/drm/psb/psb_intel_display.c
13187@@ -0,0 +1,2484 @@
13188+/*
13189+ * Copyright © 2006-2007 Intel Corporation
13190+ *
13191+ * Permission is hereby granted, free of charge, to any person obtaining a
13192+ * copy of this software and associated documentation files (the "Software"),
13193+ * to deal in the Software without restriction, including without limitation
13194+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13195+ * and/or sell copies of the Software, and to permit persons to whom the
13196+ * Software is furnished to do so, subject to the following conditions:
13197+ *
13198+ * The above copyright notice and this permission notice (including the next
13199+ * paragraph) shall be included in all copies or substantial portions of the
13200+ * Software.
13201+ *
13202+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13203+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13204+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13205+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13206+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13207+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13208+ * DEALINGS IN THE SOFTWARE.
13209+ *
13210+ * Authors:
13211+ * Eric Anholt <eric@anholt.net>
13212+ */
13213+
13214+#include <linux/i2c.h>
13215+
13216+#include <drm/drm_crtc_helper.h>
13217+#include "psb_fb.h"
13218+#include "psb_intel_display.h"
13219+#include "psb_powermgmt.h"
13220+
13221+struct psb_intel_clock_t {
13222+ /* given values */
13223+ int n;
13224+ int m1, m2;
13225+ int p1, p2;
13226+ /* derived values */
13227+ int dot;
13228+ int vco;
13229+ int m;
13230+ int p;
13231+};
13232+
13233+struct psb_intel_range_t {
13234+ int min, max;
13235+};
13236+
13237+struct psb_intel_p2_t {
13238+ int dot_limit;
13239+ int p2_slow, p2_fast;
13240+};
13241+
13242+#define INTEL_P2_NUM 2
13243+
13244+struct psb_intel_limit_t {
13245+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
13246+ struct psb_intel_p2_t p2;
13247+};
13248+
13249+#define I8XX_DOT_MIN 25000
13250+#define I8XX_DOT_MAX 350000
13251+#define I8XX_VCO_MIN 930000
13252+#define I8XX_VCO_MAX 1400000
13253+#define I8XX_N_MIN 3
13254+#define I8XX_N_MAX 16
13255+#define I8XX_M_MIN 96
13256+#define I8XX_M_MAX 140
13257+#define I8XX_M1_MIN 18
13258+#define I8XX_M1_MAX 26
13259+#define I8XX_M2_MIN 6
13260+#define I8XX_M2_MAX 16
13261+#define I8XX_P_MIN 4
13262+#define I8XX_P_MAX 128
13263+#define I8XX_P1_MIN 2
13264+#define I8XX_P1_MAX 33
13265+#define I8XX_P1_LVDS_MIN 1
13266+#define I8XX_P1_LVDS_MAX 6
13267+#define I8XX_P2_SLOW 4
13268+#define I8XX_P2_FAST 2
13269+#define I8XX_P2_LVDS_SLOW 14
13270+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
13271+#define I8XX_P2_SLOW_LIMIT 165000
13272+
13273+#define I9XX_DOT_MIN 20000
13274+#define I9XX_DOT_MAX 400000
13275+#define I9XX_VCO_MIN 1400000
13276+#define I9XX_VCO_MAX 2800000
13277+#define I9XX_N_MIN 3
13278+#define I9XX_N_MAX 8
13279+#define I9XX_M_MIN 70
13280+#define I9XX_M_MAX 120
13281+#define I9XX_M1_MIN 10
13282+#define I9XX_M1_MAX 20
13283+#define I9XX_M2_MIN 5
13284+#define I9XX_M2_MAX 9
13285+#define I9XX_P_SDVO_DAC_MIN 5
13286+#define I9XX_P_SDVO_DAC_MAX 80
13287+#define I9XX_P_LVDS_MIN 7
13288+#define I9XX_P_LVDS_MAX 98
13289+#define I9XX_P1_MIN 1
13290+#define I9XX_P1_MAX 8
13291+#define I9XX_P2_SDVO_DAC_SLOW 10
13292+#define I9XX_P2_SDVO_DAC_FAST 5
13293+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
13294+#define I9XX_P2_LVDS_SLOW 14
13295+#define I9XX_P2_LVDS_FAST 7
13296+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
13297+
13298+#define INTEL_LIMIT_I8XX_DVO_DAC 0
13299+#define INTEL_LIMIT_I8XX_LVDS 1
13300+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
13301+#define INTEL_LIMIT_I9XX_LVDS 3
13302+
13303+static const struct psb_intel_limit_t psb_intel_limits[] = {
13304+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
13305+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
13306+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
13307+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
13308+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
13309+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
13310+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
13311+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
13312+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
13313+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
13314+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
13315+ },
13316+ { /* INTEL_LIMIT_I8XX_LVDS */
13317+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
13318+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
13319+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
13320+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
13321+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
13322+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
13323+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
13324+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
13325+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
13326+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
13327+ },
13328+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
13329+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
13330+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
13331+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
13332+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
13333+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
13334+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
13335+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
13336+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
13337+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
13338+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
13339+ I9XX_P2_SDVO_DAC_FAST},
13340+ },
13341+ { /* INTEL_LIMIT_I9XX_LVDS */
13342+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
13343+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
13344+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
13345+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
13346+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
13347+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
13348+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
13349+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
13350+ /* The single-channel range is 25-112Mhz, and dual-channel
13351+ * is 80-224Mhz. Prefer single channel as much as possible.
13352+ */
13353+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
13354+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
13355+ },
13356+};
13357+
13358+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
13359+{
13360+ struct drm_device *dev = crtc->dev;
13361+ const struct psb_intel_limit_t *limit;
13362+
13363+ if (IS_I9XX(dev)) {
13364+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
13365+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
13366+ else
13367+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
13368+ } else {
13369+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
13370+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
13371+ else
13372+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
13373+ }
13374+ return limit;
13375+}
13376+
13377+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
13378+
13379+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
13380+{
13381+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
13382+ clock->p = clock->p1 * clock->p2;
13383+ clock->vco = refclk * clock->m / (clock->n + 2);
13384+ clock->dot = clock->vco / clock->p;
13385+}
13386+
13387+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
13388+
13389+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
13390+{
13391+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
13392+ clock->p = clock->p1 * clock->p2;
13393+ clock->vco = refclk * clock->m / (clock->n + 2);
13394+ clock->dot = clock->vco / clock->p;
13395+}
13396+
13397+static void psb_intel_clock(struct drm_device *dev, int refclk,
13398+ struct psb_intel_clock_t *clock)
13399+{
13400+ if (IS_I9XX(dev))
13401+ return i9xx_clock(refclk, clock);
13402+ else
13403+ return i8xx_clock(refclk, clock);
13404+}
13405+
13406+/**
13407+ * Returns whether any output on the specified pipe is of the specified type
13408+ */
13409+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
13410+{
13411+ struct drm_device *dev = crtc->dev;
13412+ struct drm_mode_config *mode_config = &dev->mode_config;
13413+ struct drm_connector *l_entry;
13414+
13415+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
13416+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
13417+ struct psb_intel_output *psb_intel_output =
13418+ to_psb_intel_output(l_entry);
13419+ if (psb_intel_output->type == type)
13420+ return true;
13421+ }
13422+ }
13423+ return false;
13424+}
13425+
13426+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
13427+/**
13428+ * Returns whether the given set of divisors are valid for a given refclk with
13429+ * the given connectors.
13430+ */
13431+
13432+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
13433+ struct psb_intel_clock_t *clock)
13434+{
13435+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
13436+
13437+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
13438+ INTELPllInvalid("p1 out of range\n");
13439+ if (clock->p < limit->p.min || limit->p.max < clock->p)
13440+ INTELPllInvalid("p out of range\n");
13441+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
13442+ INTELPllInvalid("m2 out of range\n");
13443+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
13444+ INTELPllInvalid("m1 out of range\n");
13445+ if (clock->m1 <= clock->m2)
13446+ INTELPllInvalid("m1 <= m2\n");
13447+ if (clock->m < limit->m.min || limit->m.max < clock->m)
13448+ INTELPllInvalid("m out of range\n");
13449+ if (clock->n < limit->n.min || limit->n.max < clock->n)
13450+ INTELPllInvalid("n out of range\n");
13451+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
13452+ INTELPllInvalid("vco out of range\n");
13453+ /* XXX: We may need to be checking "Dot clock"
13454+ * depending on the multiplier, connector, etc.,
13455+ * rather than just a single range.
13456+ */
13457+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
13458+ INTELPllInvalid("dot out of range\n");
13459+
13460+ return true;
13461+}
13462+
13463+/**
13464+ * Returns a set of divisors for the desired target clock with the given
13465+ * refclk, or FALSE. The returned values represent the clock equation:
13466+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
13467+ */
13468+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
13469+ int refclk,
13470+ struct psb_intel_clock_t *best_clock)
13471+{
13472+ struct drm_device *dev = crtc->dev;
13473+ struct psb_intel_clock_t clock;
13474+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
13475+ int err = target;
13476+
13477+ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
13478+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
13479+ /*
13480+ * For LVDS, if the panel is on, just rely on its current
13481+ * settings for dual-channel. We haven't figured out how to
13482+ * reliably set up different single/dual channel state, if we
13483+ * even can.
13484+ */
13485+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
13486+ LVDS_CLKB_POWER_UP)
13487+ clock.p2 = limit->p2.p2_fast;
13488+ else
13489+ clock.p2 = limit->p2.p2_slow;
13490+ } else {
13491+ if (target < limit->p2.dot_limit)
13492+ clock.p2 = limit->p2.p2_slow;
13493+ else
13494+ clock.p2 = limit->p2.p2_fast;
13495+ }
13496+
13497+ memset(best_clock, 0, sizeof(*best_clock));
13498+
13499+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
13500+ clock.m1++) {
13501+ for (clock.m2 = limit->m2.min;
13502+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
13503+ clock.m2++) {
13504+ for (clock.n = limit->n.min;
13505+ clock.n <= limit->n.max; clock.n++) {
13506+ for (clock.p1 = limit->p1.min;
13507+ clock.p1 <= limit->p1.max;
13508+ clock.p1++) {
13509+ int this_err;
13510+
13511+ psb_intel_clock(dev, refclk, &clock);
13512+
13513+ if (!psb_intel_PLL_is_valid
13514+ (crtc, &clock))
13515+ continue;
13516+
13517+ this_err = abs(clock.dot - target);
13518+ if (this_err < err) {
13519+ *best_clock = clock;
13520+ err = this_err;
13521+ }
13522+ }
13523+ }
13524+ }
13525+ }
13526+
13527+ return err != target;
13528+}
13529+
13530+void psb_intel_wait_for_vblank(struct drm_device *dev)
13531+{
13532+ /* Wait for 20ms, i.e. one cycle at 50hz. */
13533+ udelay(20000);
13534+}
13535+
13536+int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
13537+{
13538+ struct drm_device *dev = crtc->dev;
13539+ /* struct drm_i915_master_private *master_priv; */
13540+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13541+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
13542+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
13543+ int pipe = psb_intel_crtc->pipe;
13544+ unsigned long Start, Offset;
13545+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
13546+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
13547+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
13548+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13549+ u32 dspcntr;
13550+ int ret = 0;
13551+
13552+ /* no fb bound */
13553+ if (!crtc->fb) {
13554+ DRM_DEBUG("No FB bound\n");
13555+ return 0;
13556+ }
13557+
13558+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
13559+
13560+ if (IS_MRST(dev) && (pipe == 0))
13561+ dspbase = MRST_DSPABASE;
13562+
13563+ Start = mode_dev->bo_offset(dev, psbfb->bo);
13564+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
13565+
13566+ REG_WRITE(dspstride, crtc->fb->pitch);
13567+
13568+ dspcntr = REG_READ(dspcntr_reg);
13569+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
13570+
13571+ switch (crtc->fb->bits_per_pixel) {
13572+ case 8:
13573+ dspcntr |= DISPPLANE_8BPP;
13574+ break;
13575+ case 16:
13576+ if (crtc->fb->depth == 15)
13577+ dspcntr |= DISPPLANE_15_16BPP;
13578+ else
13579+ dspcntr |= DISPPLANE_16BPP;
13580+ break;
13581+ case 24:
13582+ case 32:
13583+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
13584+ break;
13585+ default:
13586+ DRM_ERROR("Unknown color depth\n");
13587+ ret = -EINVAL;
13588+ goto psb_intel_pipe_set_base_exit;
13589+ }
13590+ REG_WRITE(dspcntr_reg, dspcntr);
13591+
13592+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
13593+ if (IS_I965G(dev) || IS_MRST(dev)) {
13594+ REG_WRITE(dspbase, Offset);
13595+ REG_READ(dspbase);
13596+ REG_WRITE(dspsurf, Start);
13597+ REG_READ(dspsurf);
13598+ } else {
13599+ REG_WRITE(dspbase, Start + Offset);
13600+ REG_READ(dspbase);
13601+ }
13602+
13603+psb_intel_pipe_set_base_exit:
13604+
13605+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
13606+
13607+ return ret;
13608+}
13609+
13610+int psb_kms_flip_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
13611+{
13612+ struct drm_device *dev = crtc->dev;
13613+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
13614+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13615+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
13616+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
13617+ int pipe = psb_intel_crtc->pipe;
13618+
13619+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
13620+ struct psb_task *task = NULL;
13621+ task = kzalloc(sizeof(*task), GFP_KERNEL);
13622+ if (!task)
13623+ return -ENOMEM;
13624+ INIT_LIST_HEAD(&task->head);
13625+ INIT_LIST_HEAD(&task->buf.head);
13626+ task->task_type = psb_flip_task;
13627+
13628+ spin_lock_irq(&scheduler->lock);
13629+ list_add_tail(&task->head, &scheduler->ta_queue);
13630+ /**
13631+ * From this point we may no longer dereference task,
13632+ * as the object it points to may be freed by another thread.
13633+ */
13634+
13635+ task = NULL;
13636+ spin_unlock_irq(&scheduler->lock);
13637+
13638+ /* no fb bound */
13639+ if (!crtc->fb) {
13640+ DRM_DEBUG("No FB bound\n");
13641+ return 0;
13642+ }
13643+
13644+ dev_priv->flip_start[pipe] = mode_dev->bo_offset(dev, psbfb->bo);
13645+ dev_priv->flip_offset[pipe] = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
13646+ dev_priv->flip_stride[pipe] = crtc->fb->pitch;
13647+ dev_priv->pipe_active[pipe] = 1;
13648+ dev_priv->pipe_active[1-pipe] = 0;
13649+
13650+ return 0;
13651+}
13652+
13653+/**
13654+ * Sets the power management mode of the pipe and plane.
13655+ *
13656+ * This code should probably grow support for turning the cursor off and back
13657+ * on appropriately at the same time as we're turning the pipe off/on.
13658+ */
13659+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
13660+{
13661+ struct drm_device *dev = crtc->dev;
13662+ /* struct drm_i915_master_private *master_priv; */
13663+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
13664+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13665+ int pipe = psb_intel_crtc->pipe;
13666+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
13667+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13668+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
13669+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
13670+ u32 temp;
13671+ bool enabled;
13672+
13673+ /* XXX: When our outputs are all unaware of DPMS modes other than off
13674+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
13675+ */
13676+ switch (mode) {
13677+ case DRM_MODE_DPMS_ON:
13678+ case DRM_MODE_DPMS_STANDBY:
13679+ case DRM_MODE_DPMS_SUSPEND:
13680+ /* Enable the DPLL */
13681+ temp = REG_READ(dpll_reg);
13682+ if ((temp & DPLL_VCO_ENABLE) == 0) {
13683+ REG_WRITE(dpll_reg, temp);
13684+ REG_READ(dpll_reg);
13685+ /* Wait for the clocks to stabilize. */
13686+ udelay(150);
13687+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
13688+ REG_READ(dpll_reg);
13689+ /* Wait for the clocks to stabilize. */
13690+ udelay(150);
13691+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
13692+ REG_READ(dpll_reg);
13693+ /* Wait for the clocks to stabilize. */
13694+ udelay(150);
13695+ }
13696+
13697+ /* Enable the pipe */
13698+ temp = REG_READ(pipeconf_reg);
13699+ if ((temp & PIPEACONF_ENABLE) == 0)
13700+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
13701+
13702+ /* Enable the plane */
13703+ temp = REG_READ(dspcntr_reg);
13704+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
13705+ REG_WRITE(dspcntr_reg,
13706+ temp | DISPLAY_PLANE_ENABLE);
13707+ /* Flush the plane changes */
13708+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
13709+ }
13710+
13711+ psb_intel_crtc_load_lut(crtc);
13712+
13713+ /* Give the overlay scaler a chance to enable
13714+ * if it's on this pipe */
13715+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
13716+ break;
13717+ case DRM_MODE_DPMS_OFF:
13718+ /* Give the overlay scaler a chance to disable
13719+ * if it's on this pipe */
13720+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
13721+
13722+ /* Disable the VGA plane that we never use */
13723+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
13724+
13725+ /* Disable display plane */
13726+ temp = REG_READ(dspcntr_reg);
13727+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
13728+ REG_WRITE(dspcntr_reg,
13729+ temp & ~DISPLAY_PLANE_ENABLE);
13730+ /* Flush the plane changes */
13731+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
13732+ REG_READ(dspbase_reg);
13733+ }
13734+
13735+ if (!IS_I9XX(dev)) {
13736+ /* Wait for vblank for the disable to take effect */
13737+ psb_intel_wait_for_vblank(dev);
13738+ }
13739+
13740+ /* Next, disable display pipes */
13741+ temp = REG_READ(pipeconf_reg);
13742+ if ((temp & PIPEACONF_ENABLE) != 0) {
13743+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
13744+ REG_READ(pipeconf_reg);
13745+ }
13746+
13747+ /* Wait for vblank for the disable to take effect. */
13748+ psb_intel_wait_for_vblank(dev);
13749+
13750+ temp = REG_READ(dpll_reg);
13751+ if ((temp & DPLL_VCO_ENABLE) != 0) {
13752+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
13753+ REG_READ(dpll_reg);
13754+ }
13755+
13756+ /* Wait for the clocks to turn off. */
13757+ udelay(150);
13758+ break;
13759+ }
13760+
13761+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
13762+
13763+#if 0 /* JB: Add vblank support later */
13764+ if (enabled)
13765+ dev_priv->vblank_pipe |= (1 << pipe);
13766+ else
13767+ dev_priv->vblank_pipe &= ~(1 << pipe);
13768+#endif
13769+
13770+ psb_intel_crtc->dpms_mode = mode;
13771+
13772+#if 0 /* JB: Add sarea support later */
13773+ if (!dev->primary->master)
13774+ return 0;
13775+
13776+ master_priv = dev->primary->master->driver_priv;
13777+ if (!master_priv->sarea_priv)
13778+ return 0;
13779+
13780+ switch (pipe) {
13781+ case 0:
13782+ master_priv->sarea_priv->planeA_w =
13783+ enabled ? crtc->mode.hdisplay : 0;
13784+ master_priv->sarea_priv->planeA_h =
13785+ enabled ? crtc->mode.vdisplay : 0;
13786+ break;
13787+ case 1:
13788+ master_priv->sarea_priv->planeB_w =
13789+ enabled ? crtc->mode.hdisplay : 0;
13790+ master_priv->sarea_priv->planeB_h =
13791+ enabled ? crtc->mode.vdisplay : 0;
13792+ break;
13793+ default:
13794+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
13795+ break;
13796+ }
13797+#endif
13798+
13799+ /*Set FIFO Watermarks*/
13800+ REG_WRITE(DSPARB, 0x3F3E);
13801+}
13802+
13803+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
13804+{
13805+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
13806+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
13807+}
13808+
13809+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
13810+{
13811+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
13812+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
13813+}
13814+
13815+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
13816+{
13817+ struct drm_encoder_helper_funcs *encoder_funcs =
13818+ encoder->helper_private;
13819+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
13820+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
13821+}
13822+
13823+void psb_intel_encoder_commit(struct drm_encoder *encoder)
13824+{
13825+ struct drm_encoder_helper_funcs *encoder_funcs =
13826+ encoder->helper_private;
13827+ /* lvds has its own version of commit see psb_intel_lvds_commit */
13828+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
13829+}
13830+
13831+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
13832+ struct drm_display_mode *mode,
13833+ struct drm_display_mode *adjusted_mode)
13834+{
13835+ return true;
13836+}
13837+
13838+
13839+/**
13840+ * Return the pipe currently connected to the panel fitter,
13841+ * or -1 if the panel fitter is not present or not in use
13842+ */
13843+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
13844+{
13845+ u32 pfit_control;
13846+
13847+ /* i830 doesn't have a panel fitter */
13848+ if (IS_I830(dev))
13849+ return -1;
13850+
13851+ pfit_control = REG_READ(PFIT_CONTROL);
13852+
13853+ /* See if the panel fitter is in use */
13854+ if ((pfit_control & PFIT_ENABLE) == 0)
13855+ return -1;
13856+
13857+ /* 965 can place panel fitter on either pipe */
13858+ if (IS_I965G(dev) || IS_MRST(dev))
13859+ return (pfit_control >> 29) & 0x3;
13860+
13861+ /* older chips can only use pipe 1 */
13862+ return 1;
13863+}
13864+
13865+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
13866+ struct drm_display_mode *mode,
13867+ struct drm_display_mode *adjusted_mode,
13868+ int x, int y,
13869+ struct drm_framebuffer *old_fb)
13870+{
13871+ struct drm_device *dev = crtc->dev;
13872+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13873+ int pipe = psb_intel_crtc->pipe;
13874+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
13875+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
13876+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
13877+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13878+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
13879+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
13880+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
13881+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
13882+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
13883+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
13884+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
13885+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
13886+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
13887+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
13888+ int refclk;
13889+ struct psb_intel_clock_t clock;
13890+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
13891+ bool ok, is_sdvo = false, is_dvo = false;
13892+ bool is_crt = false, is_lvds = false, is_tv = false;
13893+ struct drm_mode_config *mode_config = &dev->mode_config;
13894+ struct drm_connector *connector;
13895+
13896+ list_for_each_entry(connector, &mode_config->connector_list, head) {
13897+ struct psb_intel_output *psb_intel_output =
13898+ to_psb_intel_output(connector);
13899+
13900+ if (!connector->encoder
13901+ || connector->encoder->crtc != crtc)
13902+ continue;
13903+
13904+ switch (psb_intel_output->type) {
13905+ case INTEL_OUTPUT_LVDS:
13906+ is_lvds = true;
13907+ break;
13908+ case INTEL_OUTPUT_SDVO:
13909+ is_sdvo = true;
13910+ break;
13911+ case INTEL_OUTPUT_DVO:
13912+ is_dvo = true;
13913+ break;
13914+ case INTEL_OUTPUT_TVOUT:
13915+ is_tv = true;
13916+ break;
13917+ case INTEL_OUTPUT_ANALOG:
13918+ is_crt = true;
13919+ break;
13920+ }
13921+ }
13922+
13923+ if (IS_I9XX(dev))
13924+ refclk = 96000;
13925+ else
13926+ refclk = 48000;
13927+
13928+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
13929+ &clock);
13930+ if (!ok) {
13931+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
13932+ return 0;
13933+ }
13934+
13935+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
13936+
13937+ dpll = DPLL_VGA_MODE_DIS;
13938+ if (IS_I9XX(dev)) {
13939+ if (is_lvds) {
13940+ dpll |= DPLLB_MODE_LVDS;
13941+ if (IS_POULSBO(dev))
13942+ dpll |= DPLL_DVO_HIGH_SPEED;
13943+ } else
13944+ dpll |= DPLLB_MODE_DAC_SERIAL;
13945+ if (is_sdvo) {
13946+ dpll |= DPLL_DVO_HIGH_SPEED;
13947+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
13948+ int sdvo_pixel_multiply =
13949+ adjusted_mode->clock / mode->clock;
13950+ dpll |=
13951+ (sdvo_pixel_multiply -
13952+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
13953+ }
13954+ }
13955+
13956+ /* compute bitmask from p1 value */
13957+ dpll |= (1 << (clock.p1 - 1)) << 16;
13958+ switch (clock.p2) {
13959+ case 5:
13960+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
13961+ break;
13962+ case 7:
13963+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
13964+ break;
13965+ case 10:
13966+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
13967+ break;
13968+ case 14:
13969+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
13970+ break;
13971+ }
13972+ if (IS_I965G(dev))
13973+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
13974+ } else {
13975+ if (is_lvds) {
13976+ dpll |=
13977+ (1 << (clock.p1 - 1)) <<
13978+ DPLL_FPA01_P1_POST_DIV_SHIFT;
13979+ } else {
13980+ if (clock.p1 == 2)
13981+ dpll |= PLL_P1_DIVIDE_BY_TWO;
13982+ else
13983+ dpll |=
13984+ (clock.p1 -
13985+ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
13986+ if (clock.p2 == 4)
13987+ dpll |= PLL_P2_DIVIDE_BY_4;
13988+ }
13989+ }
13990+
13991+ if (is_tv) {
13992+ /* XXX: just matching BIOS for now */
13993+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
13994+ dpll |= 3;
13995+ }
13996+#if 0
13997+ else if (is_lvds)
13998+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
13999+#endif
14000+ else
14001+ dpll |= PLL_REF_INPUT_DREFCLK;
14002+
14003+ /* setup pipeconf */
14004+ pipeconf = REG_READ(pipeconf_reg);
14005+
14006+ /* Set up the display plane register */
14007+ dspcntr = DISPPLANE_GAMMA_ENABLE;
14008+
14009+ if (pipe == 0)
14010+ dspcntr |= DISPPLANE_SEL_PIPE_A;
14011+ else
14012+ dspcntr |= DISPPLANE_SEL_PIPE_B;
14013+
14014+ dspcntr |= DISPLAY_PLANE_ENABLE;
14015+ pipeconf |= PIPEACONF_ENABLE;
14016+ dpll |= DPLL_VCO_ENABLE;
14017+
14018+
14019+ /* Disable the panel fitter if it was on our pipe */
14020+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
14021+ REG_WRITE(PFIT_CONTROL, 0);
14022+
14023+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
14024+ drm_mode_debug_printmodeline(mode);
14025+
14026+ if (dpll & DPLL_VCO_ENABLE) {
14027+ REG_WRITE(fp_reg, fp);
14028+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
14029+ REG_READ(dpll_reg);
14030+ udelay(150);
14031+ }
14032+
14033+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
14034+ * This is an exception to the general rule that mode_set doesn't turn
14035+ * things on.
14036+ */
14037+ if (is_lvds) {
14038+ u32 lvds = REG_READ(LVDS);
14039+
14040+ lvds |=
14041+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
14042+ LVDS_PIPEB_SELECT;
14043+ /* Set the B0-B3 data pairs corresponding to
14044+ * whether we're going to
14045+ * set the DPLLs for dual-channel mode or not.
14046+ */
14047+ if (clock.p2 == 7)
14048+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
14049+ else
14050+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
14051+
14052+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
14053+ * appropriately here, but we need to look more
14054+ * thoroughly into how panels behave in the two modes.
14055+ */
14056+
14057+ REG_WRITE(LVDS, lvds);
14058+ REG_READ(LVDS);
14059+ }
14060+
14061+ REG_WRITE(fp_reg, fp);
14062+ REG_WRITE(dpll_reg, dpll);
14063+ REG_READ(dpll_reg);
14064+ /* Wait for the clocks to stabilize. */
14065+ udelay(150);
14066+
14067+ if (IS_I965G(dev)) {
14068+ int sdvo_pixel_multiply =
14069+ adjusted_mode->clock / mode->clock;
14070+ REG_WRITE(dpll_md_reg,
14071+ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
14072+ ((sdvo_pixel_multiply -
14073+ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
14074+ } else {
14075+ /* write it again -- the BIOS does, after all */
14076+ REG_WRITE(dpll_reg, dpll);
14077+ }
14078+ REG_READ(dpll_reg);
14079+ /* Wait for the clocks to stabilize. */
14080+ udelay(150);
14081+
14082+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
14083+ ((adjusted_mode->crtc_htotal - 1) << 16));
14084+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
14085+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
14086+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
14087+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
14088+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
14089+ ((adjusted_mode->crtc_vtotal - 1) << 16));
14090+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
14091+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
14092+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
14093+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
14094+ /* pipesrc and dspsize control the size that is scaled from,
14095+ * which should always be the user's requested size.
14096+ */
14097+ REG_WRITE(dspsize_reg,
14098+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
14099+ REG_WRITE(dsppos_reg, 0);
14100+ REG_WRITE(pipesrc_reg,
14101+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
14102+ REG_WRITE(pipeconf_reg, pipeconf);
14103+ REG_READ(pipeconf_reg);
14104+
14105+ psb_intel_wait_for_vblank(dev);
14106+
14107+ REG_WRITE(dspcntr_reg, dspcntr);
14108+
14109+ /* Flush the plane changes */
14110+ {
14111+ struct drm_crtc_helper_funcs *crtc_funcs =
14112+ crtc->helper_private;
14113+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
14114+ }
14115+
14116+ psb_intel_wait_for_vblank(dev);
14117+
14118+ return 0;
14119+}
14120+
14121+/** Loads the palette/gamma unit for the CRTC with the prepared values */
14122+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
14123+{
14124+ struct drm_device *dev = crtc->dev;
14125+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14126+ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
14127+ int i;
14128+
14129+ /* The clocks have to be on to load the palette. */
14130+ if (!crtc->enabled)
14131+ return;
14132+
14133+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14134+ for (i = 0; i < 256; i++) {
14135+ REG_WRITE(palreg + 4 * i,
14136+ (psb_intel_crtc->lut_r[i] << 16) |
14137+ (psb_intel_crtc->lut_g[i] << 8) |
14138+ psb_intel_crtc->lut_b[i]);
14139+ }
14140+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14141+ }
14142+}
14143+
14144+#ifndef CONFIG_MRST
14145+/**
14146+ * Save HW states of giving crtc
14147+ */
14148+static void psb_intel_crtc_save(struct drm_crtc * crtc)
14149+{
14150+ struct drm_device * dev = crtc->dev;
14151+ // struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
14152+ struct psb_intel_crtc * psb_intel_crtc = to_psb_intel_crtc(crtc);
14153+ struct psb_intel_crtc_state * crtc_state = psb_intel_crtc->crtc_state;
14154+ int pipeA = (psb_intel_crtc->pipe == 0);
14155+ uint32_t paletteReg;
14156+ int i;
14157+
14158+ DRM_DEBUG("\n");
14159+
14160+ if(!crtc_state) {
14161+ DRM_DEBUG("No CRTC state found\n");
14162+ return;
14163+ }
14164+
14165+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
14166+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
14167+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
14168+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
14169+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
14170+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
14171+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
14172+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
14173+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
14174+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
14175+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
14176+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
14177+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
14178+
14179+ /*NOTE: DSPSIZE DSPPOS only for psb*/
14180+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
14181+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
14182+
14183+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
14184+
14185+ DRM_DEBUG("(%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n",
14186+ crtc_state->saveDSPCNTR,
14187+ crtc_state->savePIPECONF,
14188+ crtc_state->savePIPESRC,
14189+ crtc_state->saveFP0,
14190+ crtc_state->saveFP1,
14191+ crtc_state->saveDPLL,
14192+ crtc_state->saveHTOTAL,
14193+ crtc_state->saveHBLANK,
14194+ crtc_state->saveHSYNC,
14195+ crtc_state->saveVTOTAL,
14196+ crtc_state->saveVBLANK,
14197+ crtc_state->saveVSYNC,
14198+ crtc_state->saveDSPSTRIDE,
14199+ crtc_state->saveDSPSIZE,
14200+ crtc_state->saveDSPPOS,
14201+ crtc_state->saveDSPBASE
14202+ );
14203+
14204+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
14205+ for(i=0; i<256; ++i) {
14206+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
14207+ }
14208+}
14209+
14210+/**
14211+ * Restore HW states of giving crtc
14212+ */
14213+static void psb_intel_crtc_restore(struct drm_crtc * crtc)
14214+{
14215+ struct drm_device * dev = crtc->dev;
14216+ // struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
14217+ struct psb_intel_crtc * psb_intel_crtc = to_psb_intel_crtc(crtc);
14218+ struct psb_intel_crtc_state * crtc_state = psb_intel_crtc->crtc_state;
14219+ // struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private;
14220+ int pipeA = (psb_intel_crtc->pipe == 0);
14221+ uint32_t paletteReg;
14222+ int i;
14223+
14224+ DRM_DEBUG("\n");
14225+
14226+ if(!crtc_state) {
14227+ DRM_DEBUG("No crtc state\n");
14228+ return;
14229+ }
14230+
14231+ DRM_DEBUG("current: (%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n",
14232+ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
14233+ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
14234+ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
14235+ REG_READ(pipeA ? FPA0 : FPB0),
14236+ REG_READ(pipeA ? FPA1 : FPB1),
14237+ REG_READ(pipeA ? DPLL_A : DPLL_B),
14238+ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
14239+ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
14240+ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
14241+ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
14242+ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
14243+ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
14244+ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
14245+ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
14246+ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
14247+ REG_READ(pipeA ? DSPABASE : DSPBBASE)
14248+ );
14249+
14250+ DRM_DEBUG("saved: (%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n",
14251+ crtc_state->saveDSPCNTR,
14252+ crtc_state->savePIPECONF,
14253+ crtc_state->savePIPESRC,
14254+ crtc_state->saveFP0,
14255+ crtc_state->saveFP1,
14256+ crtc_state->saveDPLL,
14257+ crtc_state->saveHTOTAL,
14258+ crtc_state->saveHBLANK,
14259+ crtc_state->saveHSYNC,
14260+ crtc_state->saveVTOTAL,
14261+ crtc_state->saveVBLANK,
14262+ crtc_state->saveVSYNC,
14263+ crtc_state->saveDSPSTRIDE,
14264+ crtc_state->saveDSPSIZE,
14265+ crtc_state->saveDSPPOS,
14266+ crtc_state->saveDSPBASE
14267+ );
14268+
14269+
14270+#if 0
14271+ if(drm_helper_crtc_in_use(crtc))
14272+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
14273+
14274+
14275+ if(psb_intel_panel_fitter_pipe(dev) == psb_intel_crtc->pipe) {
14276+ REG_WRITE(PFIT_CONTROL, crtc_state->savePFITCTRL);
14277+ DRM_DEBUG("write pfit_controle: %x\n", REG_READ(PFIT_CONTROL));
14278+ }
14279+#endif
14280+
14281+ if(crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
14282+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
14283+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
14284+ REG_READ(pipeA ? DPLL_A : DPLL_B);
14285+ DRM_DEBUG("write dpll: %x\n", REG_READ(pipeA ? DPLL_A : DPLL_B));
14286+ udelay(150);
14287+ }
14288+
14289+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
14290+ REG_READ(pipeA ? FPA0 : FPB0);
14291+
14292+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
14293+ REG_READ(pipeA ? FPA1 : FPB1);
14294+
14295+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
14296+ REG_READ(pipeA ? DPLL_A : DPLL_B);
14297+ udelay(150);
14298+
14299+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
14300+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
14301+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
14302+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
14303+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
14304+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
14305+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
14306+
14307+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
14308+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
14309+
14310+ REG_WRITE(pipeA ? PIPEASRC :PIPEBSRC, crtc_state->savePIPESRC);
14311+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
14312+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
14313+
14314+ psb_intel_wait_for_vblank(dev);
14315+
14316+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
14317+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
14318+
14319+ psb_intel_wait_for_vblank(dev);
14320+
14321+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
14322+ for(i=0; i<256; ++i) {
14323+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
14324+ }
14325+}
14326+#endif
14327+
14328+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
14329+ struct drm_file *file_priv,
14330+ uint32_t handle,
14331+ uint32_t width, uint32_t height)
14332+{
14333+ struct drm_device *dev = crtc->dev;
14334+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
14335+ struct psb_gtt * pg = dev_priv->pg;
14336+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14337+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
14338+ int pipe = psb_intel_crtc->pipe;
14339+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
14340+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
14341+ uint32_t temp;
14342+ size_t addr = 0;
14343+ size_t size;
14344+ void *bo;
14345+ int ret;
14346+
14347+ DRM_DEBUG("\n");
14348+
14349+ /* if we want to turn of the cursor ignore width and height */
14350+ if (!handle) {
14351+ DRM_DEBUG("cursor off\n");
14352+ /* turn of the cursor */
14353+ temp = 0;
14354+ temp |= CURSOR_MODE_DISABLE;
14355+
14356+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14357+ REG_WRITE(control, temp);
14358+ REG_WRITE(base, 0);
14359+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14360+ }
14361+
14362+ /* unpin the old bo */
14363+ if (psb_intel_crtc->cursor_bo) {
14364+ mode_dev->bo_unpin_for_scanout(dev,
14365+ psb_intel_crtc->
14366+ cursor_bo);
14367+ psb_intel_crtc->cursor_bo = NULL;
14368+ }
14369+
14370+ return 0;
14371+ }
14372+
14373+ /* Currently we only support 64x64 cursors */
14374+ if (width != 64 || height != 64) {
14375+ DRM_ERROR("we currently only support 64x64 cursors\n");
14376+ return -EINVAL;
14377+ }
14378+
14379+ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
14380+ if (!bo)
14381+ return -ENOENT;
14382+ ret = mode_dev->bo_pin_for_scanout(dev, bo);
14383+ if (ret)
14384+ return ret;
14385+ size = mode_dev->bo_size(dev, bo);
14386+ if (size < width * height * 4) {
14387+ DRM_ERROR("buffer is to small\n");
14388+ return -ENOMEM;
14389+ }
14390+ addr = mode_dev->bo_offset(dev, bo);
14391+ if(IS_POULSBO(dev)) {
14392+ addr += pg->stolen_base;
14393+ }
14394+
14395+ psb_intel_crtc->cursor_addr = addr;
14396+
14397+ temp = 0;
14398+ /* set the pipe for the cursor */
14399+ temp |= (pipe << 28);
14400+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
14401+
14402+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14403+ REG_WRITE(control, temp);
14404+ REG_WRITE(base, addr);
14405+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14406+ }
14407+
14408+ /* unpin the old bo */
14409+ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
14410+ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
14411+ psb_intel_crtc->cursor_bo = bo;
14412+ }
14413+
14414+ return 0;
14415+}
14416+
14417+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
14418+{
14419+ struct drm_device *dev = crtc->dev;
14420+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14421+ int pipe = psb_intel_crtc->pipe;
14422+ uint32_t temp = 0;
14423+ uint32_t adder;
14424+
14425+ if (x < 0) {
14426+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
14427+ x = -x;
14428+ }
14429+ if (y < 0) {
14430+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
14431+ y = -y;
14432+ }
14433+
14434+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
14435+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
14436+
14437+ adder = psb_intel_crtc->cursor_addr;
14438+
14439+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14440+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
14441+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
14442+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14443+ }
14444+ return 0;
14445+}
14446+
14447+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
14448+ u16 *green, u16 *blue, uint32_t size)
14449+{
14450+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14451+ int i;
14452+
14453+ if (size != 256)
14454+ return;
14455+
14456+ for (i = 0; i < 256; i++) {
14457+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
14458+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
14459+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
14460+ }
14461+
14462+ psb_intel_crtc_load_lut(crtc);
14463+}
14464+
14465+/* Returns the clock of the currently programmed mode of the given pipe. */
14466+static int psb_intel_crtc_clock_get(struct drm_device *dev,
14467+ struct drm_crtc *crtc)
14468+{
14469+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14470+ int pipe = psb_intel_crtc->pipe;
14471+ u32 dpll;
14472+ u32 fp;
14473+ struct psb_intel_clock_t clock;
14474+ bool is_lvds;
14475+ struct drm_psb_private *dev_priv = dev->dev_private;
14476+
14477+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14478+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
14479+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
14480+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
14481+ else
14482+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
14483+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
14484+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14485+ } else {
14486+ dpll = (pipe == 0) ? dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
14487+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
14488+ fp = (pipe == 0) ? dev_priv->saveFPA0 : dev_priv->saveFPB0;
14489+ else
14490+ fp = (pipe == 0) ? dev_priv->saveFPA1 : dev_priv->saveFPB1;
14491+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
14492+ }
14493+
14494+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
14495+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
14496+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
14497+
14498+ if (is_lvds) {
14499+ clock.p1 =
14500+ ffs((dpll &
14501+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
14502+ DPLL_FPA01_P1_POST_DIV_SHIFT);
14503+ clock.p2 = 14;
14504+
14505+ if ((dpll & PLL_REF_INPUT_MASK) ==
14506+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
14507+ /* XXX: might not be 66MHz */
14508+ i8xx_clock(66000, &clock);
14509+ } else
14510+ i8xx_clock(48000, &clock);
14511+ } else {
14512+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
14513+ clock.p1 = 2;
14514+ else {
14515+ clock.p1 =
14516+ ((dpll &
14517+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
14518+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
14519+ }
14520+ if (dpll & PLL_P2_DIVIDE_BY_4)
14521+ clock.p2 = 4;
14522+ else
14523+ clock.p2 = 2;
14524+
14525+ i8xx_clock(48000, &clock);
14526+ }
14527+
14528+ /* XXX: It would be nice to validate the clocks, but we can't reuse
14529+ * i830PllIsValid() because it relies on the xf86_config connector
14530+ * configuration being accurate, which it isn't necessarily.
14531+ */
14532+
14533+ return clock.dot;
14534+}
14535+
14536+/** Returns the currently programmed mode of the given pipe. */
14537+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
14538+ struct drm_crtc *crtc)
14539+{
14540+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14541+ int pipe = psb_intel_crtc->pipe;
14542+ struct drm_display_mode *mode;
14543+ int htot;
14544+ int hsync;
14545+ int vtot;
14546+ int vsync;
14547+ struct drm_psb_private *dev_priv = dev->dev_private;
14548+
14549+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14550+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
14551+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
14552+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
14553+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
14554+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14555+ } else {
14556+ htot = (pipe == 0) ? dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
14557+ hsync = (pipe == 0) ? dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
14558+ vtot = (pipe == 0) ? dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
14559+ vsync = (pipe == 0) ? dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
14560+ }
14561+
14562+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
14563+ if (!mode)
14564+ return NULL;
14565+
14566+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
14567+ mode->hdisplay = (htot & 0xffff) + 1;
14568+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
14569+ mode->hsync_start = (hsync & 0xffff) + 1;
14570+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
14571+ mode->vdisplay = (vtot & 0xffff) + 1;
14572+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
14573+ mode->vsync_start = (vsync & 0xffff) + 1;
14574+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
14575+
14576+ drm_mode_set_name(mode);
14577+ drm_mode_set_crtcinfo(mode, 0);
14578+
14579+ return mode;
14580+}
14581+
14582+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
14583+{
14584+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14585+
14586+#ifndef CONFIG_MRST
14587+ if(psb_intel_crtc->crtc_state)
14588+ kfree(psb_intel_crtc->crtc_state);
14589+#endif
14590+ drm_crtc_cleanup(crtc);
14591+ kfree(psb_intel_crtc);
14592+}
14593+
14594+static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
14595+ .dpms = psb_intel_crtc_dpms,
14596+ .mode_fixup = psb_intel_crtc_mode_fixup,
14597+ .mode_set = psb_intel_crtc_mode_set,
14598+ .mode_set_base = psb_intel_pipe_set_base,
14599+ .prepare = psb_intel_crtc_prepare,
14600+ .commit = psb_intel_crtc_commit,
14601+};
14602+
14603+static const struct drm_crtc_helper_funcs mrst_helper_funcs;
14604+
14605+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
14606+#ifndef CONFIG_MRST
14607+ .save = psb_intel_crtc_save,
14608+ .restore = psb_intel_crtc_restore,
14609+#endif
14610+ .cursor_set = psb_intel_crtc_cursor_set,
14611+ .cursor_move = psb_intel_crtc_cursor_move,
14612+ .gamma_set = psb_intel_crtc_gamma_set,
14613+ .set_config = drm_crtc_helper_set_config,
14614+ .destroy = psb_intel_crtc_destroy,
14615+ .set_base = psb_kms_flip_set_base,
14616+};
14617+
14618+
14619+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
14620+ struct psb_intel_mode_device *mode_dev)
14621+{
14622+ struct psb_intel_crtc *psb_intel_crtc;
14623+ int i;
14624+ uint16_t *r_base, *g_base, *b_base;
14625+
14626+#if PRINT_JLIU7
14627+ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
14628+#endif /* PRINT_JLIU7 */
14629+
14630+ /* We allocate a extra array of drm_connector pointers
14631+ * for fbdev after the crtc */
14632+ psb_intel_crtc =
14633+ kzalloc(sizeof(struct psb_intel_crtc) +
14634+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
14635+ GFP_KERNEL);
14636+ if (psb_intel_crtc == NULL)
14637+ return;
14638+
14639+#ifndef CONFIG_MRST
14640+ psb_intel_crtc->crtc_state = kzalloc(sizeof(struct psb_intel_crtc_state),
14641+ GFP_KERNEL);
14642+ if(!psb_intel_crtc->crtc_state) {
14643+ DRM_INFO("Crtc state error: No memory\n");
14644+ kfree(psb_intel_crtc);
14645+ return;
14646+ }
14647+#endif
14648+
14649+ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
14650+
14651+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
14652+ psb_intel_crtc->pipe = pipe;
14653+
14654+ r_base = psb_intel_crtc->base.gamma_store;
14655+ g_base = r_base + 256;
14656+ b_base = g_base + 256;
14657+ for (i = 0; i < 256; i++) {
14658+ psb_intel_crtc->lut_r[i] = i;
14659+ psb_intel_crtc->lut_g[i] = i;
14660+ psb_intel_crtc->lut_b[i] = i;
14661+ r_base[i] = i << 8;
14662+ g_base[i] = i << 8;
14663+ b_base[i] = i << 8;
14664+ }
14665+
14666+ psb_intel_crtc->mode_dev = mode_dev;
14667+ psb_intel_crtc->cursor_addr = 0;
14668+ psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
14669+
14670+ if (IS_MRST(dev)) {
14671+ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
14672+ } else {
14673+ drm_crtc_helper_add(&psb_intel_crtc->base,
14674+ &psb_intel_helper_funcs);
14675+ }
14676+
14677+ /* Setup the array of drm_connector pointer array */
14678+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
14679+ psb_intel_crtc->mode_set.connectors =
14680+ (struct drm_connector **) (psb_intel_crtc + 1);
14681+ psb_intel_crtc->mode_set.num_connectors = 0;
14682+
14683+#if 0 /* JB: not drop, What should go in here? */
14684+ if (i915_fbpercrtc)
14685+#endif
14686+}
14687+
14688+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
14689+{
14690+ struct drm_crtc *crtc = NULL;
14691+
14692+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
14693+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14694+ if (psb_intel_crtc->pipe == pipe)
14695+ break;
14696+ }
14697+ return crtc;
14698+}
14699+
14700+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
14701+{
14702+ int index_mask = 0;
14703+ struct drm_connector *connector;
14704+ int entry = 0;
14705+
14706+ list_for_each_entry(connector, &dev->mode_config.connector_list,
14707+ head) {
14708+ struct psb_intel_output *psb_intel_output =
14709+ to_psb_intel_output(connector);
14710+ if (type_mask & (1 << psb_intel_output->type))
14711+ index_mask |= (1 << entry);
14712+ entry++;
14713+ }
14714+ return index_mask;
14715+}
14716+
14717+#if 0 /* JB: Should be per device */
14718+static void psb_intel_setup_outputs(struct drm_device *dev)
14719+{
14720+ struct drm_connector *connector;
14721+
14722+ psb_intel_crt_init(dev);
14723+
14724+ /* Set up integrated LVDS */
14725+ if (IS_MOBILE(dev) && !IS_I830(dev))
14726+ psb_intel_lvds_init(dev);
14727+
14728+ if (IS_I9XX(dev)) {
14729+ psb_intel_sdvo_init(dev, SDVOB);
14730+ psb_intel_sdvo_init(dev, SDVOC);
14731+ } else
14732+ psb_intel_dvo_init(dev);
14733+
14734+ if (IS_I9XX(dev) && !IS_I915G(dev))
14735+ psb_intel_tv_init(dev);
14736+
14737+ list_for_each_entry(connector, &dev->mode_config.connector_list,
14738+ head) {
14739+ struct psb_intel_output *psb_intel_output =
14740+ to_psb_intel_output(connector);
14741+ struct drm_encoder *encoder = &psb_intel_output->enc;
14742+ int crtc_mask = 0, clone_mask = 0;
14743+
14744+ /* valid crtcs */
14745+ switch (psb_intel_output->type) {
14746+ case INTEL_OUTPUT_DVO:
14747+ case INTEL_OUTPUT_SDVO:
14748+ crtc_mask = ((1 << 0) | (1 << 1));
14749+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
14750+ (1 << INTEL_OUTPUT_DVO) |
14751+ (1 << INTEL_OUTPUT_SDVO));
14752+ break;
14753+ case INTEL_OUTPUT_ANALOG:
14754+ crtc_mask = ((1 << 0) | (1 << 1));
14755+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
14756+ (1 << INTEL_OUTPUT_DVO) |
14757+ (1 << INTEL_OUTPUT_SDVO));
14758+ break;
14759+ case INTEL_OUTPUT_LVDS:
14760+ crtc_mask = (1 << 1);
14761+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
14762+ break;
14763+ case INTEL_OUTPUT_TVOUT:
14764+ crtc_mask = ((1 << 0) | (1 << 1));
14765+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
14766+ break;
14767+ }
14768+ encoder->possible_crtcs = crtc_mask;
14769+ encoder->possible_clones =
14770+ psb_intel_connector_clones(dev, clone_mask);
14771+ }
14772+}
14773+#endif
14774+
14775+#if 0 /* JB: Rework framebuffer code into something none device specific */
14776+static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14777+{
14778+ struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb);
14779+ struct drm_device *dev = fb->dev;
14780+
14781+ if (fb->fbdev)
14782+ intelfb_remove(dev, fb);
14783+
14784+ drm_framebuffer_cleanup(fb);
14785+ drm_gem_object_unreference(fb->mm_private);
14786+
14787+ kfree(psb_intel_fb);
14788+}
14789+
14790+static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14791+ struct drm_file *file_priv,
14792+ unsigned int *handle)
14793+{
14794+ struct drm_gem_object *object = fb->mm_private;
14795+
14796+ return drm_gem_handle_create(file_priv, object, handle);
14797+}
14798+
14799+static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
14800+ .destroy = psb_intel_user_framebuffer_destroy,
14801+ .create_handle = psb_intel_user_framebuffer_create_handle,
14802+};
14803+
14804+struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
14805+ struct drm_mode_fb_cmd
14806+ *mode_cmd,
14807+ void *mm_private)
14808+{
14809+ struct psb_intel_framebuffer *psb_intel_fb;
14810+
14811+ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
14812+ if (!psb_intel_fb)
14813+ return NULL;
14814+
14815+ if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs))
14816+ return NULL;
14817+
14818+ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
14819+
14820+ return &psb_intel_fb->base;
14821+}
14822+
14823+
14824+static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
14825+ drm_device
14826+ *dev,
14827+ struct
14828+ drm_file
14829+ *filp,
14830+ struct
14831+ drm_mode_fb_cmd
14832+ *mode_cmd)
14833+{
14834+ struct drm_gem_object *obj;
14835+
14836+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
14837+ if (!obj)
14838+ return NULL;
14839+
14840+ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
14841+}
14842+
14843+static int psb_intel_insert_new_fb(struct drm_device *dev,
14844+ struct drm_file *file_priv,
14845+ struct drm_framebuffer *fb,
14846+ struct drm_mode_fb_cmd *mode_cmd)
14847+{
14848+ struct psb_intel_framebuffer *psb_intel_fb;
14849+ struct drm_gem_object *obj;
14850+ struct drm_crtc *crtc;
14851+
14852+ psb_intel_fb = to_psb_intel_framebuffer(fb);
14853+
14854+ mutex_lock(&dev->struct_mutex);
14855+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
14856+
14857+ if (!obj) {
14858+ mutex_unlock(&dev->struct_mutex);
14859+ return -EINVAL;
14860+ }
14861+ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
14862+ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
14863+ mutex_unlock(&dev->struct_mutex);
14864+
14865+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
14866+ if (crtc->fb == fb) {
14867+ struct drm_crtc_helper_funcs *crtc_funcs =
14868+ crtc->helper_private;
14869+ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
14870+ }
14871+ }
14872+ return 0;
14873+}
14874+
14875+static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
14876+ .resize_fb = psb_intel_insert_new_fb,
14877+ .fb_create = psb_intel_user_framebuffer_create,
14878+ .fb_changed = intelfb_probe,
14879+};
14880+#endif
14881+
14882+#if 0 /* Should be per device */
14883+void psb_intel_modeset_init(struct drm_device *dev)
14884+{
14885+ int num_pipe;
14886+ int i;
14887+
14888+ drm_mode_config_init(dev);
14889+
14890+ dev->mode_config.min_width = 0;
14891+ dev->mode_config.min_height = 0;
14892+
14893+ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
14894+
14895+ if (IS_I965G(dev)) {
14896+ dev->mode_config.max_width = 8192;
14897+ dev->mode_config.max_height = 8192;
14898+ } else {
14899+ dev->mode_config.max_width = 2048;
14900+ dev->mode_config.max_height = 2048;
14901+ }
14902+
14903+ /* set memory base */
14904+ /* MRST and PSB should use BAR 2*/
14905+ dev->mode_config.fb_base =
14906+ pci_resource_start(dev->pdev, 2);
14907+
14908+ if (IS_MOBILE(dev) || IS_I9XX(dev))
14909+ num_pipe = 2;
14910+ else
14911+ num_pipe = 1;
14912+ DRM_DEBUG("%d display pipe%s available.\n",
14913+ num_pipe, num_pipe > 1 ? "s" : "");
14914+
14915+ for (i = 0; i < num_pipe; i++)
14916+ psb_intel_crtc_init(dev, i);
14917+
14918+ psb_intel_setup_outputs(dev);
14919+
14920+ /* setup fbs */
14921+ /* drm_initial_config(dev); */
14922+}
14923+#endif
14924+
14925+void psb_intel_modeset_cleanup(struct drm_device *dev)
14926+{
14927+ drm_mode_config_cleanup(dev);
14928+}
14929+
14930+
14931+/* current intel driver doesn't take advantage of encoders
14932+ always give back the encoder for the connector
14933+*/
14934+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
14935+{
14936+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14937+
14938+ return &psb_intel_output->enc;
14939+}
14940+
14941+/* MRST_PLATFORM start */
14942+
14943+#if DUMP_REGISTER
14944+void dump_dc_registers(struct drm_device *dev)
14945+{
14946+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14947+ unsigned int i = 0;
14948+
14949+ DRM_INFO("jliu7 dump_dc_registers\n");
14950+
14951+
14952+ if (0x80000000 & REG_READ(0x70008)) {
14953+ for (i = 0x20a0; i < 0x20af; i += 4) {
14954+ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i));
14955+ }
14956+
14957+ for (i = 0xf014; i < 0xf047; i += 4) {
14958+ DRM_INFO
14959+ ("jliu7 pipe A dpll register=0x%x, value=%x\n",
14960+ i, (unsigned int) REG_READ(i));
14961+ }
14962+
14963+ for (i = 0x60000; i < 0x6005f; i += 4) {
14964+ DRM_INFO
14965+ ("jliu7 pipe A timing register=0x%x, value=%x\n",
14966+ i, (unsigned int) REG_READ(i));
14967+ }
14968+
14969+ for (i = 0x61140; i < 0x61143; i += 4) {
14970+ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
14971+ i, (unsigned int) REG_READ(i));
14972+ }
14973+
14974+ for (i = 0x61180; i < 0x6123F; i += 4) {
14975+ DRM_INFO
14976+ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
14977+ i, (unsigned int) REG_READ(i));
14978+ }
14979+
14980+ for (i = 0x61254; i < 0x612AB; i += 4) {
14981+ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
14982+ i, (unsigned int) REG_READ(i));
14983+ }
14984+
14985+ for (i = 0x70000; i < 0x70047; i += 4) {
14986+ DRM_INFO
14987+ ("jliu7 PIPE A control register=0x%x, value=%x\n",
14988+ i, (unsigned int) REG_READ(i));
14989+ }
14990+
14991+ for (i = 0x70180; i < 0x7020b; i += 4) {
14992+ DRM_INFO("jliu7 display A control register=0x%x,"
14993+ "value=%x\n", i,
14994+ (unsigned int) REG_READ(i));
14995+ }
14996+
14997+ for (i = 0x71400; i < 0x71403; i += 4) {
14998+ DRM_INFO
14999+ ("jliu7 VGA Display Plane Control register=0x%x,"
15000+ "value=%x\n", i, (unsigned int) REG_READ(i));
15001+ }
15002+ }
15003+
15004+ if (0x80000000 & REG_READ(0x71008)) {
15005+ for (i = 0x61000; i < 0x6105f; i += 4) {
15006+ DRM_INFO
15007+ ("jliu7 pipe B timing register=0x%x, value=%x\n",
15008+ i, (unsigned int) REG_READ(i));
15009+ }
15010+
15011+ for (i = 0x71000; i < 0x71047; i += 4) {
15012+ DRM_INFO
15013+ ("jliu7 PIPE B control register=0x%x, value=%x\n",
15014+ i, (unsigned int) REG_READ(i));
15015+ }
15016+
15017+ for (i = 0x71180; i < 0x7120b; i += 4) {
15018+ DRM_INFO("jliu7 display B control register=0x%x,"
15019+ "value=%x\n", i,
15020+ (unsigned int) REG_READ(i));
15021+ }
15022+ }
15023+#if 0
15024+ for (i = 0x70080; i < 0x700df; i += 4) {
15025+ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
15026+ i, (unsigned int) REG_READ(i));
15027+ }
15028+#endif
15029+
15030+}
15031+
15032+void dump_dsi_registers(struct drm_device *dev)
15033+{
15034+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15035+ unsigned int i = 0;
15036+
15037+ DRM_INFO("jliu7 dump_dsi_registers\n");
15038+
15039+ for (i = 0xb000; i < 0xb064; i += 4) {
15040+ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
15041+ (unsigned int) REG_READ(i));
15042+ }
15043+
15044+ i = 0xb104;
15045+ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
15046+ i, (unsigned int) REG_READ(i));
15047+}
15048+#endif /* DUMP_REGISTER */
15049+
15050+
15051+struct mrst_limit_t {
15052+ struct psb_intel_range_t dot, m, p1;
15053+};
15054+
15055+struct mrst_clock_t {
15056+ /* derived values */
15057+ int dot;
15058+ int m;
15059+ int p1;
15060+};
15061+
15062+#define MRST_LIMIT_LVDS_100L 0
15063+#define MRST_LIMIT_LVDS_83 1
15064+#define MRST_LIMIT_LVDS_100 2
15065+
15066+#define MRST_DOT_MIN 19750
15067+#define MRST_DOT_MAX 120000
15068+#define MRST_M_MIN_100L 20
15069+#define MRST_M_MIN_100 10
15070+#define MRST_M_MIN_83 12
15071+#define MRST_M_MAX_100L 34
15072+#define MRST_M_MAX_100 17
15073+#define MRST_M_MAX_83 20
15074+#define MRST_P1_MIN 2
15075+#define MRST_P1_MAX_0 7
15076+#define MRST_P1_MAX_1 8
15077+
15078+static const struct mrst_limit_t mrst_limits[] = {
15079+ { /* MRST_LIMIT_LVDS_100L */
15080+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
15081+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
15082+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
15083+ },
15084+ { /* MRST_LIMIT_LVDS_83L */
15085+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
15086+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
15087+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
15088+ },
15089+ { /* MRST_LIMIT_LVDS_100 */
15090+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
15091+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
15092+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
15093+ },
15094+};
15095+
15096+#define MRST_M_MIN 10
15097+static const u32 mrst_m_converts[] = {
15098+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
15099+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
15100+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
15101+};
15102+
15103+#define COUNT_MAX 0x10000000
15104+void mrstWaitForPipeDisable(struct drm_device *dev)
15105+{
15106+ int count, temp;
15107+
15108+ /* FIXME JLIU7_PO */
15109+ psb_intel_wait_for_vblank(dev);
15110+ return;
15111+
15112+ /* Wait for for the pipe disable to take effect. */
15113+ for (count = 0; count < COUNT_MAX; count++) {
15114+ temp = REG_READ(PIPEACONF);
15115+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
15116+ break;
15117+ }
15118+
15119+ if (count == COUNT_MAX) {
15120+#if PRINT_JLIU7
15121+ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
15122+#endif /* PRINT_JLIU7 */
15123+ } else {
15124+#if PRINT_JLIU7
15125+ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
15126+ count);
15127+#endif /* PRINT_JLIU7 */
15128+ }
15129+}
15130+
15131+void mrstWaitForPipeEnable(struct drm_device *dev)
15132+{
15133+ int count, temp;
15134+
15135+ /* FIXME JLIU7_PO */
15136+ psb_intel_wait_for_vblank(dev);
15137+ return;
15138+
15139+ /* Wait for for the pipe disable to take effect. */
15140+ for (count = 0; count < COUNT_MAX; count++) {
15141+ temp = REG_READ(PIPEACONF);
15142+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
15143+ break;
15144+ }
15145+
15146+ if (count == COUNT_MAX) {
15147+#if PRINT_JLIU7
15148+ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
15149+#endif /* PRINT_JLIU7 */
15150+ } else {
15151+#if PRINT_JLIU7
15152+ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
15153+ count);
15154+#endif /* PRINT_JLIU7 */
15155+ }
15156+}
15157+
15158+static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
15159+{
15160+ const struct mrst_limit_t *limit;
15161+ struct drm_device *dev = crtc->dev;
15162+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15163+
15164+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
15165+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
15166+ if (dev_priv->sku_100L)
15167+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
15168+ if (dev_priv->sku_83)
15169+ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
15170+ if (dev_priv->sku_100)
15171+ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
15172+ } else {
15173+ limit = NULL;
15174+#if PRINT_JLIU7
15175+ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
15176+#endif /* PRINT_JLIU7 */
15177+ }
15178+
15179+ return limit;
15180+}
15181+
15182+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
15183+static void mrst_clock(int refclk, struct mrst_clock_t *clock)
15184+{
15185+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
15186+}
15187+
15188+void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
15189+{
15190+#if PRINT_JLIU7
15191+ DRM_INFO
15192+ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
15193+ prefix, clock->dot, clock->m, clock->p1);
15194+#endif /* PRINT_JLIU7 */
15195+}
15196+
15197+/**
15198+ * Returns a set of divisors for the desired target clock with the given refclk,
15199+ * or FALSE. Divisor values are the actual divisors for
15200+ */
15201+static bool
15202+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
15203+ struct mrst_clock_t *best_clock)
15204+{
15205+ struct mrst_clock_t clock;
15206+ const struct mrst_limit_t *limit = mrst_limit(crtc);
15207+ int err = target;
15208+
15209+ memset(best_clock, 0, sizeof(*best_clock));
15210+
15211+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
15212+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
15213+ clock.p1++) {
15214+ int this_err;
15215+
15216+ mrst_clock(refclk, &clock);
15217+
15218+ this_err = abs(clock.dot - target);
15219+ if (this_err < err) {
15220+ *best_clock = clock;
15221+ err = this_err;
15222+ }
15223+ }
15224+ }
15225+ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
15226+
15227+ return err != target;
15228+}
15229+
15230+/**
15231+ * Sets the power management mode of the pipe and plane.
15232+ *
15233+ * This code should probably grow support for turning the cursor off and back
15234+ * on appropriately at the same time as we're turning the pipe off/on.
15235+ */
15236+static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
15237+{
15238+ struct drm_device *dev = crtc->dev;
15239+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
15240+ int pipe = psb_intel_crtc->pipe;
15241+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
15242+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
15243+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
15244+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
15245+ u32 temp;
15246+ bool enabled;
15247+
15248+#if PRINT_JLIU7
15249+ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
15250+ mode, pipe);
15251+#endif /* PRINT_JLIU7 */
15252+
15253+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
15254+
15255+ /* XXX: When our outputs are all unaware of DPMS modes other than off
15256+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
15257+ */
15258+ switch (mode) {
15259+ case DRM_MODE_DPMS_ON:
15260+ case DRM_MODE_DPMS_STANDBY:
15261+ case DRM_MODE_DPMS_SUSPEND:
15262+ /* Enable the DPLL */
15263+ temp = REG_READ(dpll_reg);
15264+ if ((temp & DPLL_VCO_ENABLE) == 0) {
15265+ REG_WRITE(dpll_reg, temp);
15266+ REG_READ(dpll_reg);
15267+ /* Wait for the clocks to stabilize. */
15268+ udelay(150);
15269+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
15270+ REG_READ(dpll_reg);
15271+ /* Wait for the clocks to stabilize. */
15272+ udelay(150);
15273+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
15274+ REG_READ(dpll_reg);
15275+ /* Wait for the clocks to stabilize. */
15276+ udelay(150);
15277+ }
15278+
15279+ /* Enable the pipe */
15280+ temp = REG_READ(pipeconf_reg);
15281+ if ((temp & PIPEACONF_ENABLE) == 0)
15282+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
15283+
15284+ /* Enable the plane */
15285+ temp = REG_READ(dspcntr_reg);
15286+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
15287+ REG_WRITE(dspcntr_reg,
15288+ temp | DISPLAY_PLANE_ENABLE);
15289+ /* Flush the plane changes */
15290+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
15291+ }
15292+
15293+ psb_intel_crtc_load_lut(crtc);
15294+
15295+ /* Give the overlay scaler a chance to enable
15296+ if it's on this pipe */
15297+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
15298+ break;
15299+ case DRM_MODE_DPMS_OFF:
15300+ /* Give the overlay scaler a chance to disable
15301+ * if it's on this pipe */
15302+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
15303+
15304+ /* Disable the VGA plane that we never use */
15305+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
15306+
15307+ /* Disable display plane */
15308+ temp = REG_READ(dspcntr_reg);
15309+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
15310+ REG_WRITE(dspcntr_reg,
15311+ temp & ~DISPLAY_PLANE_ENABLE);
15312+ /* Flush the plane changes */
15313+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
15314+ REG_READ(dspbase_reg);
15315+ }
15316+
15317+ if (!IS_I9XX(dev)) {
15318+ /* Wait for vblank for the disable to take effect */
15319+ psb_intel_wait_for_vblank(dev);
15320+ }
15321+
15322+ /* Next, disable display pipes */
15323+ temp = REG_READ(pipeconf_reg);
15324+ if ((temp & PIPEACONF_ENABLE) != 0) {
15325+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
15326+ REG_READ(pipeconf_reg);
15327+ }
15328+
15329+ /* Wait for for the pipe disable to take effect. */
15330+ mrstWaitForPipeDisable(dev);
15331+
15332+ temp = REG_READ(dpll_reg);
15333+ if ((temp & DPLL_VCO_ENABLE) != 0) {
15334+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
15335+ REG_READ(dpll_reg);
15336+ }
15337+
15338+ /* Wait for the clocks to turn off. */
15339+ udelay(150);
15340+ break;
15341+ }
15342+
15343+#if DUMP_REGISTER
15344+ dump_dc_registers(dev);
15345+#endif /* DUMP_REGISTER */
15346+
15347+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
15348+
15349+#if 0 /* JB: Add vblank support later */
15350+ if (enabled)
15351+ dev_priv->vblank_pipe |= (1 << pipe);
15352+ else
15353+ dev_priv->vblank_pipe &= ~(1 << pipe);
15354+#endif
15355+
15356+ psb_intel_crtc->dpms_mode = mode;
15357+
15358+#if 0 /* JB: Add sarea support later */
15359+ if (!dev->primary->master)
15360+ return;
15361+
15362+ master_priv = dev->primary->master->driver_priv;
15363+ if (!master_priv->sarea_priv)
15364+ return;
15365+
15366+ switch (pipe) {
15367+ case 0:
15368+ master_priv->sarea_priv->planeA_w =
15369+ enabled ? crtc->mode.hdisplay : 0;
15370+ master_priv->sarea_priv->planeA_h =
15371+ enabled ? crtc->mode.vdisplay : 0;
15372+ break;
15373+ case 1:
15374+ master_priv->sarea_priv->planeB_w =
15375+ enabled ? crtc->mode.hdisplay : 0;
15376+ master_priv->sarea_priv->planeB_h =
15377+ enabled ? crtc->mode.vdisplay : 0;
15378+ break;
15379+ default:
15380+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
15381+ break;
15382+ }
15383+#endif
15384+
15385+ /*Set FIFO Watermarks*/
15386+ REG_WRITE(DSPARB, 0x3FFF);
15387+ REG_WRITE(DSPFW1, 0x3F88080A);
15388+ REG_WRITE(DSPFW2, 0x0b060808);
15389+ REG_WRITE(DSPFW3, 0x0);
15390+ REG_WRITE(DSPFW4, 0x08030404);
15391+ REG_WRITE(DSPFW5, 0x04040404);
15392+ REG_WRITE(DSPFW6, 0x78);
15393+ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
15394+ /* Must write Bit 14 of the Chicken Bit Register */
15395+
15396+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
15397+}
15398+
15399+static int mrst_crtc_mode_set(struct drm_crtc *crtc,
15400+ struct drm_display_mode *mode,
15401+ struct drm_display_mode *adjusted_mode,
15402+ int x, int y,
15403+ struct drm_framebuffer *old_fb)
15404+{
15405+ struct drm_device *dev = crtc->dev;
15406+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
15407+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15408+ int pipe = psb_intel_crtc->pipe;
15409+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
15410+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
15411+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
15412+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
15413+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
15414+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
15415+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
15416+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
15417+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
15418+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
15419+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
15420+ int refclk = 0;
15421+ struct mrst_clock_t clock;
15422+ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
15423+ bool ok, is_sdvo = false;
15424+ bool is_crt = false, is_lvds = false, is_tv = false;
15425+ bool is_mipi = false;
15426+ struct drm_mode_config *mode_config = &dev->mode_config;
15427+ struct psb_intel_output *psb_intel_output = NULL;
15428+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
15429+ struct drm_encoder *encoder;
15430+
15431+#if PRINT_JLIU7
15432+ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
15433+#endif /* PRINT_JLIU7 */
15434+
15435+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
15436+
15437+ memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
15438+ memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
15439+
15440+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
15441+
15442+ if (encoder->crtc != crtc)
15443+ continue;
15444+
15445+ psb_intel_output = enc_to_psb_intel_output(encoder);
15446+ switch (psb_intel_output->type) {
15447+ case INTEL_OUTPUT_LVDS:
15448+ is_lvds = true;
15449+ break;
15450+ case INTEL_OUTPUT_SDVO:
15451+ is_sdvo = true;
15452+ break;
15453+ case INTEL_OUTPUT_TVOUT:
15454+ is_tv = true;
15455+ break;
15456+ case INTEL_OUTPUT_ANALOG:
15457+ is_crt = true;
15458+ break;
15459+ case INTEL_OUTPUT_MIPI:
15460+ is_mipi = true;
15461+ break;
15462+ }
15463+ }
15464+
15465+ if (is_lvds | is_mipi) {
15466+ /*FIXME JLIU7 Get panel power delay parameters from
15467+ config data */
15468+ REG_WRITE(0x61208, 0x25807d0);
15469+ REG_WRITE(0x6120c, 0x1f407d0);
15470+ REG_WRITE(0x61210, 0x270f04);
15471+ }
15472+
15473+ /* Disable the VGA plane that we never use */
15474+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
15475+
15476+ /* Disable the panel fitter if it was on our pipe */
15477+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
15478+ REG_WRITE(PFIT_CONTROL, 0);
15479+
15480+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
15481+
15482+ if (psb_intel_output)
15483+ drm_connector_property_get_value(&psb_intel_output->base,
15484+ dev->mode_config.scaling_mode_property, &scalingType);
15485+
15486+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
15487+ /*Moorestown doesn't have register support for centering so we need to
15488+ mess with the h/vblank and h/vsync start and ends to get centering*/
15489+ int offsetX = 0, offsetY = 0;
15490+
15491+ offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
15492+ offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
15493+
15494+ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
15495+ ((adjusted_mode->crtc_htotal - 1) << 16));
15496+ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
15497+ ((adjusted_mode->crtc_vtotal - 1) << 16));
15498+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
15499+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
15500+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
15501+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
15502+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
15503+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
15504+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
15505+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
15506+ } else {
15507+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
15508+ ((adjusted_mode->crtc_htotal - 1) << 16));
15509+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
15510+ ((adjusted_mode->crtc_vtotal - 1) << 16));
15511+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
15512+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
15513+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
15514+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
15515+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
15516+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
15517+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
15518+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
15519+ }
15520+
15521+ /* Flush the plane changes */
15522+ {
15523+ struct drm_crtc_helper_funcs *crtc_funcs =
15524+ crtc->helper_private;
15525+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
15526+ }
15527+
15528+ /* setup pipeconf */
15529+ pipeconf = REG_READ(pipeconf_reg);
15530+
15531+ /* Set up the display plane register */
15532+ dspcntr = REG_READ(dspcntr_reg);
15533+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
15534+
15535+ if (pipe == 0)
15536+ dspcntr |= DISPPLANE_SEL_PIPE_A;
15537+ else
15538+ dspcntr |= DISPPLANE_SEL_PIPE_B;
15539+
15540+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
15541+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
15542+
15543+ if (is_mipi)
15544+ goto mrst_crtc_mode_set_exit;
15545+
15546+ if (dev_priv->sku_100L)
15547+ refclk = 100000;
15548+ else if (dev_priv->sku_83)
15549+ refclk = 166000;
15550+ else if (dev_priv->sku_100)
15551+ refclk = 200000;
15552+
15553+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
15554+
15555+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
15556+
15557+ if (!ok) {
15558+#if PRINT_JLIU7
15559+ DRM_INFO
15560+ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
15561+#endif /* PRINT_JLIU7 */
15562+ } else {
15563+#if PRINT_JLIU7
15564+ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
15565+ "m = %x, p1 = %x. \n", clock.dot, clock.m,
15566+ clock.p1);
15567+#endif /* PRINT_JLIU7 */
15568+ }
15569+
15570+ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
15571+
15572+ dpll |= DPLL_VGA_MODE_DIS;
15573+
15574+
15575+ dpll |= DPLL_VCO_ENABLE;
15576+
15577+ if (is_lvds)
15578+ dpll |= DPLLA_MODE_LVDS;
15579+ else
15580+ dpll |= DPLLB_MODE_DAC_SERIAL;
15581+
15582+ if (is_sdvo) {
15583+ int sdvo_pixel_multiply =
15584+ adjusted_mode->clock / mode->clock;
15585+
15586+ dpll |= DPLL_DVO_HIGH_SPEED;
15587+ dpll |=
15588+ (sdvo_pixel_multiply -
15589+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
15590+ }
15591+
15592+
15593+ /* compute bitmask from p1 value */
15594+ dpll |= (1 << (clock.p1 - 2)) << 17;
15595+
15596+ dpll |= DPLL_VCO_ENABLE;
15597+
15598+#if PRINT_JLIU7
15599+ mrstPrintPll("chosen", &clock);
15600+#endif /* PRINT_JLIU7 */
15601+
15602+#if 0
15603+ if (!xf86ModesEqual(mode, adjusted_mode)) {
15604+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
15605+ "Adjusted mode for pipe %c:\n",
15606+ pipe == 0 ? 'A' : 'B');
15607+ xf86PrintModeline(pScrn->scrnIndex, mode);
15608+ }
15609+ i830PrintPll("chosen", &clock);
15610+#endif
15611+
15612+ if (dpll & DPLL_VCO_ENABLE) {
15613+ REG_WRITE(fp_reg, fp);
15614+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
15615+ REG_READ(dpll_reg);
15616+/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
15617+ udelay(150);
15618+ }
15619+
15620+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
15621+ * This is an exception to the general rule that mode_set doesn't turn
15622+ * things on.
15623+ */
15624+ if (is_lvds) {
15625+
15626+ /*lvdsport = 0x803003c0;*/
15627+ /*lvdsport = 0x813003c0;*/
15628+ lvdsport = dev_priv->gct_data.Panel_Port_Control;
15629+
15630+ REG_WRITE(LVDS, lvdsport);
15631+ }
15632+
15633+ REG_WRITE(fp_reg, fp);
15634+ REG_WRITE(dpll_reg, dpll);
15635+ REG_READ(dpll_reg);
15636+ /* Wait for the clocks to stabilize. */
15637+ udelay(150);
15638+
15639+ /* write it again -- the BIOS does, after all */
15640+ REG_WRITE(dpll_reg, dpll);
15641+ REG_READ(dpll_reg);
15642+ /* Wait for the clocks to stabilize. */
15643+ udelay(150);
15644+
15645+ REG_WRITE(pipeconf_reg, pipeconf);
15646+ REG_READ(pipeconf_reg);
15647+
15648+ /* Wait for for the pipe enable to take effect. */
15649+ mrstWaitForPipeEnable(dev);
15650+
15651+ REG_WRITE(dspcntr_reg, dspcntr);
15652+ psb_intel_wait_for_vblank(dev);
15653+
15654+mrst_crtc_mode_set_exit:
15655+
15656+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
15657+
15658+ return 0;
15659+}
15660+
15661+
15662+static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
15663+ .dpms = mrst_crtc_dpms,
15664+ .mode_fixup = psb_intel_crtc_mode_fixup,
15665+ .mode_set = mrst_crtc_mode_set,
15666+ .mode_set_base = psb_intel_pipe_set_base,
15667+ .prepare = psb_intel_crtc_prepare,
15668+ .commit = psb_intel_crtc_commit,
15669+};
15670+
15671+/* MRST_PLATFORM end */
15672diff --git a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h
15673new file mode 100644
15674index 0000000..dcb79d4
15675--- /dev/null
15676+++ b/drivers/gpu/drm/psb/psb_intel_display.h
15677@@ -0,0 +1,31 @@
15678+
15679+/* copyright (c) 2008, Intel Corporation
15680+ * Permission is hereby granted, free of charge, to any person obtaining a
15681+ * copy of this software and associated documentation files (the "Software"),
15682+ * to deal in the Software without restriction, including without limitation
15683+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15684+ * and/or sell copies of the Software, and to permit persons to whom the
15685+ * Software is furnished to do so, subject to the following conditions:
15686+ *
15687+ * The above copyright notice and this permission notice (including the next
15688+ * paragraph) shall be included in all copies or substantial portions of the
15689+ * Software.
15690+ *
15691+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15692+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15693+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15694+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15695+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15696+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
15697+ * DEALINGS IN THE SOFTWARE.
15698+ *
15699+ * Authors:
15700+ * Eric Anholt <eric@anholt.net>
15701+ */
15702+
15703+#ifndef _INTEL_DISPLAY_H_
15704+#define _INTEL_DISPLAY_H_
15705+
15706+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
15707+
15708+#endif
15709diff --git a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h
15710new file mode 100644
15711index 0000000..a64ce59
15712--- /dev/null
15713+++ b/drivers/gpu/drm/psb/psb_intel_drv.h
15714@@ -0,0 +1,246 @@
15715+/*
15716+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
15717+ * Copyright (c) 2007 Intel Corporation
15718+ * Jesse Barnes <jesse.barnes@intel.com>
15719+ */
15720+#ifndef __INTEL_DRV_H__
15721+#define __INTEL_DRV_H__
15722+
15723+#include <linux/i2c.h>
15724+#include <linux/i2c-id.h>
15725+#include <linux/i2c-algo-bit.h>
15726+#include <drm/drm_crtc.h>
15727+
15728+#include <drm/drm_crtc_helper.h>
15729+
15730+/*
15731+ * MOORESTOWN defines
15732+ */
15733+#define MRST_I2C 0
15734+
15735+#define DUMP_REGISTER 0
15736+#define MRST_24BIT_LVDS 1
15737+#define MRST_24BIT_DOT_1 0
15738+#define MRST_24BIT_WA 0
15739+
15740+#define PRINT_JLIU7 0
15741+#define DELAY_TIME1 2000 /* 1000 = 1ms */
15742+
15743+/*
15744+ * Display related stuff
15745+ */
15746+
15747+/* store information about an Ixxx DVO */
15748+/* The i830->i865 use multiple DVOs with multiple i2cs */
15749+/* the i915, i945 have a single sDVO i2c bus - which is different */
15750+#define MAX_OUTPUTS 6
15751+/* maximum connectors per crtcs in the mode set */
15752+#define INTELFB_CONN_LIMIT 4
15753+
15754+#define INTEL_I2C_BUS_DVO 1
15755+#define INTEL_I2C_BUS_SDVO 2
15756+
15757+/* these are outputs from the chip - integrated only
15758+ * external chips are via DVO or SDVO output */
15759+#define INTEL_OUTPUT_UNUSED 0
15760+#define INTEL_OUTPUT_ANALOG 1
15761+#define INTEL_OUTPUT_DVO 2
15762+#define INTEL_OUTPUT_SDVO 3
15763+#define INTEL_OUTPUT_LVDS 4
15764+#define INTEL_OUTPUT_TVOUT 5
15765+#define INTEL_OUTPUT_MIPI 6
15766+
15767+#define INTEL_DVO_CHIP_NONE 0
15768+#define INTEL_DVO_CHIP_LVDS 1
15769+#define INTEL_DVO_CHIP_TMDS 2
15770+#define INTEL_DVO_CHIP_TVOUT 4
15771+
15772+struct opregion_header {
15773+ u8 signature[16];
15774+ u32 size;
15775+ u32 opregion_ver;
15776+ u8 bios_ver[32];
15777+ u8 vbios_ver[16];
15778+ u8 driver_ver[16];
15779+ u32 mboxes;
15780+ u8 reserved[164];
15781+}__attribute__((packed));
15782+
15783+struct opregion_apci {
15784+ /*FIXME: add it later*/
15785+}__attribute__((packed));
15786+
15787+struct opregion_swsci {
15788+ /*FIXME: add it later*/
15789+}__attribute__((packed));
15790+
15791+struct opregion_acpi {
15792+ /*FIXME: add it later*/
15793+}__attribute__((packed));
15794+
15795+struct psb_intel_opregion {
15796+ struct opregion_header * header;
15797+ struct opregion_acpi * acpi;
15798+ struct opregion_swsci * swsci;
15799+ struct opregion_asle * asle;
15800+ int enabled;
15801+};
15802+
15803+/**
15804+ * Hold information useally put on the device driver privates here,
15805+ * since it needs to be shared across multiple of devices drivers privates.
15806+ */
15807+struct psb_intel_mode_device {
15808+
15809+ /*
15810+ * Abstracted memory manager operations
15811+ */
15812+ void *(*bo_from_handle) (struct drm_device *dev,
15813+ struct drm_file *file_priv,
15814+ unsigned int handle);
15815+ size_t(*bo_size) (struct drm_device *dev, void *bo);
15816+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
15817+ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
15818+ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
15819+
15820+ /*
15821+ * Cursor
15822+ */
15823+ int cursor_needs_physical;
15824+
15825+ /*
15826+ * LVDS info
15827+ */
15828+ int backlight_duty_cycle; /* restore backlight to this value */
15829+ bool panel_wants_dither;
15830+ struct drm_display_mode *panel_fixed_mode;
15831+ struct drm_display_mode *vbt_mode; /* if any */
15832+
15833+ uint32_t saveBLC_PWM_CTL;
15834+};
15835+
15836+struct psb_intel_i2c_chan {
15837+ /* for getting at dev. private (mmio etc.) */
15838+ struct drm_device *drm_dev;
15839+ u32 reg; /* GPIO reg */
15840+ struct i2c_adapter adapter;
15841+ struct i2c_algo_bit_data algo;
15842+ u8 slave_addr;
15843+};
15844+
15845+struct psb_intel_output {
15846+ struct drm_connector base;
15847+
15848+ struct drm_encoder enc;
15849+ int type;
15850+ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
15851+ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
15852+ bool load_detect_temp;
15853+ void *dev_priv;
15854+
15855+ struct psb_intel_mode_device *mode_dev;
15856+
15857+};
15858+
15859+struct psb_intel_crtc_state {
15860+ uint32_t saveDSPCNTR;
15861+ uint32_t savePIPECONF;
15862+ uint32_t savePIPESRC;
15863+ uint32_t saveDPLL;
15864+ uint32_t saveFP0;
15865+ uint32_t saveFP1;
15866+ uint32_t saveHTOTAL;
15867+ uint32_t saveHBLANK;
15868+ uint32_t saveHSYNC;
15869+ uint32_t saveVTOTAL;
15870+ uint32_t saveVBLANK;
15871+ uint32_t saveVSYNC;
15872+ uint32_t saveDSPSTRIDE;
15873+ uint32_t saveDSPSIZE;
15874+ uint32_t saveDSPPOS;
15875+ uint32_t saveDSPBASE;
15876+ uint32_t savePalette[256];
15877+};
15878+
15879+struct psb_intel_crtc {
15880+ struct drm_crtc base;
15881+ int pipe;
15882+ int plane;
15883+ uint32_t cursor_addr;
15884+ u8 lut_r[256], lut_g[256], lut_b[256];
15885+ int dpms_mode;
15886+ struct psb_intel_framebuffer *fbdev_fb;
15887+ /* a mode_set for fbdev users on this crtc */
15888+ struct drm_mode_set mode_set;
15889+
15890+ /* current bo we scanout from */
15891+ void *scanout_bo;
15892+
15893+ /* current bo we cursor from */
15894+ void *cursor_bo;
15895+
15896+ struct drm_display_mode saved_mode;
15897+ struct drm_display_mode saved_adjusted_mode;
15898+
15899+ struct psb_intel_mode_device *mode_dev;
15900+
15901+/*FIXME: Workaround to avoid MRST block.*/
15902+#ifndef CONFIG_MRST
15903+ /**
15904+ * Saved Crtc HW states
15905+ */
15906+ struct psb_intel_crtc_state * crtc_state;
15907+#endif
15908+};
15909+
15910+#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base)
15911+#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base)
15912+#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc)
15913+#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base)
15914+
15915+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
15916+ const u32 reg, const char *name);
15917+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
15918+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
15919+extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
15920+
15921+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
15922+ struct psb_intel_mode_device *mode_dev);
15923+extern void psb_intel_crt_init(struct drm_device *dev);
15924+extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
15925+extern void psb_intel_dvo_init(struct drm_device *dev);
15926+extern void psb_intel_tv_init(struct drm_device *dev);
15927+extern void psb_intel_lvds_init(struct drm_device *dev,
15928+ struct psb_intel_mode_device *mode_dev);
15929+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
15930+extern void mrst_lvds_init(struct drm_device *dev,
15931+ struct psb_intel_mode_device *mode_dev);
15932+extern void mrst_dsi_init(struct drm_device *dev,
15933+ struct psb_intel_mode_device *mode_dev);
15934+
15935+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
15936+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
15937+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
15938+
15939+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
15940+ *connector);
15941+
15942+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
15943+ struct drm_crtc *crtc);
15944+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
15945+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
15946+ int pipe);
15947+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
15948+ int sdvoB);
15949+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
15950+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
15951+ int enable);
15952+extern int intelfb_probe(struct drm_device *dev);
15953+extern int intelfb_remove(struct drm_device *dev,
15954+ struct drm_framebuffer *fb);
15955+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
15956+ *dev, struct
15957+ drm_mode_fb_cmd
15958+ *mode_cmd,
15959+ void *mm_private);
15960+#endif /* __INTEL_DRV_H__ */
15961diff --git a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c
15962new file mode 100644
15963index 0000000..bcfee62
15964--- /dev/null
15965+++ b/drivers/gpu/drm/psb/psb_intel_dsi.c
15966@@ -0,0 +1,1798 @@
15967+/*
15968+ * Copyright © 2006-2007 Intel Corporation
15969+ *
15970+ * Permission is hereby granted, free of charge, to any person obtaining a
15971+ * copy of this software and associated documentation files (the "Software"),
15972+ * to deal in the Software without restriction, including without limitation
15973+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15974+ * and/or sell copies of the Software, and to permit persons to whom the
15975+ * Software is furnished to do so, subject to the following conditions:
15976+ *
15977+ * The above copyright notice and this permission notice (including the next
15978+ * paragraph) shall be included in all copies or substantial portions of the
15979+ * Software.
15980+ *
15981+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15982+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15983+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15984+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15985+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15986+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
15987+ * DEALINGS IN THE SOFTWARE.
15988+ *
15989+ * Authors:
15990+ * jim liu <jim.liu@intel.com>
15991+ */
15992+
15993+#include <linux/backlight.h>
15994+#include <drm/drm_crtc.h>
15995+#include <drm/drm_edid.h>
15996+
15997+#define DRM_MODE_ENCODER_MIPI 5
15998+#define DRM_MODE_CONNECTOR_MIPI 13
15999+
16000+#if DUMP_REGISTER
16001+extern void dump_dsi_registers(struct drm_device *dev);
16002+#endif /* DUMP_REGISTER */
16003+
16004+int dsi_backlight; /* restore backlight to this value */
16005+
16006+/**
16007+ * Returns the maximum level of the backlight duty cycle field.
16008+ */
16009+static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
16010+{
16011+#if PRINT_JLIU7
16012+ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
16013+#endif /* PRINT_JLIU7 */
16014+
16015+ return BRIGHTNESS_MAX_LEVEL;
16016+
16017+/* FIXME jliu7 need to revisit */
16018+}
16019+
16020+/**
16021+ * Sets the power state for the panel.
16022+ */
16023+static void mrst_dsi_set_power(struct drm_device *dev,
16024+ struct psb_intel_output *output, bool on)
16025+{
16026+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
16027+ u32 pp_status;
16028+
16029+#if PRINT_JLIU7
16030+ DRM_INFO("JLIU7 enter mrst_dsi_set_power \n");
16031+#endif /* PRINT_JLIU7 */
16032+ /*
16033+ * The DIS device must be ready before we can change power state.
16034+ */
16035+ if (!dev_priv->dsi_device_ready)
16036+ {
16037+ return;
16038+ }
16039+
16040+ /*
16041+ * We don't support dual DSI yet. May be in POR in the future.
16042+ */
16043+ if (dev_priv->dual_display)
16044+ {
16045+ return;
16046+ }
16047+
16048+
16049+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16050+
16051+ if (on) {
16052+ if (dev_priv->dpi & (!dev_priv->dpi_panel_on))
16053+ {
16054+
16055+#if PRINT_JLIU7
16056+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n");
16057+#endif /* PRINT_JLIU7 */
16058+ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
16059+#if 0 /*FIXME JLIU7 */
16060+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA);
16061+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON);
16062+#endif /*FIXME JLIU7 */
16063+
16064+ dev_priv->dpi_panel_on = true;
16065+
16066+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
16067+ POWER_TARGET_ON);
16068+ do {
16069+ pp_status = REG_READ(PP_STATUS);
16070+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
16071+ }
16072+ else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on))
16073+ {
16074+#if PRINT_JLIU7
16075+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n");
16076+#endif /* PRINT_JLIU7 */
16077+
16078+ dev_priv->DBI_CB_pointer = 0;
16079+ /* exit sleep mode */
16080+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode;
16081+
16082+#if 0 /*FIXME JLIU7 */
16083+ /* Check MIPI Adatper command registers */
16084+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
16085+#endif /*FIXME JLIU7 */
16086+
16087+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
16088+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
16089+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
16090+
16091+ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
16092+ command. This delay allows the supply voltages and clock circuits to stabilize */
16093+ udelay(5000);
16094+
16095+ dev_priv->DBI_CB_pointer = 0;
16096+
16097+ /* set display on */
16098+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ;
16099+
16100+#if 0 /*FIXME JLIU7 */
16101+ /* Check MIPI Adatper command registers */
16102+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
16103+#endif /*FIXME JLIU7 */
16104+
16105+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
16106+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
16107+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
16108+
16109+ dev_priv->dbi_panel_on = true;
16110+ }
16111+/*FIXME JLIU7 */
16112+/* Need to figure out how to control the MIPI panel power on sequence*/
16113+
16114+ }
16115+ else
16116+ {
16117+/*FIXME JLIU7 */
16118+/* Need to figure out how to control the MIPI panel power down sequence*/
16119+ /*
16120+ * Only save the current backlight value if we're going from
16121+ * on to off.
16122+ */
16123+ if (dev_priv->dpi & dev_priv->dpi_panel_on)
16124+ {
16125+#if PRINT_JLIU7
16126+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n");
16127+#endif /* PRINT_JLIU7 */
16128+
16129+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
16130+ ~POWER_TARGET_ON);
16131+ do {
16132+ pp_status = REG_READ(PP_STATUS);
16133+ } while (pp_status & PP_ON);
16134+
16135+#if 0 /*FIXME JLIU7 */
16136+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA);
16137+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF);
16138+#endif /*FIXME JLIU7 */
16139+ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
16140+ dev_priv->dpi_panel_on = false;
16141+ }
16142+ else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on)
16143+ {
16144+#if PRINT_JLIU7
16145+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n");
16146+#endif /* PRINT_JLIU7 */
16147+ dev_priv->DBI_CB_pointer = 0;
16148+ /* enter sleep mode */
16149+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode;
16150+
16151+ /* Check MIPI Adatper command registers */
16152+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
16153+
16154+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
16155+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
16156+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
16157+ dev_priv->dbi_panel_on = false;
16158+ }
16159+ }
16160+
16161+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16162+}
16163+
16164+static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
16165+{
16166+ struct drm_device *dev = encoder->dev;
16167+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
16168+
16169+#if PRINT_JLIU7
16170+ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
16171+#endif /* PRINT_JLIU7 */
16172+
16173+ if (mode == DRM_MODE_DPMS_ON)
16174+ mrst_dsi_set_power(dev, output, true);
16175+ else
16176+ mrst_dsi_set_power(dev, output, false);
16177+
16178+ /* XXX: We never power down the DSI pairs. */
16179+}
16180+
16181+static void mrst_dsi_save(struct drm_connector *connector)
16182+{
16183+#if 0 /* JB: Disable for drop */
16184+ struct drm_device *dev = connector->dev;
16185+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
16186+
16187+#if PRINT_JLIU7
16188+ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
16189+#endif /* PRINT_JLIU7 */
16190+
16191+ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
16192+ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
16193+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
16194+ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
16195+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
16196+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
16197+ BACKLIGHT_DUTY_CYCLE_MASK);
16198+
16199+ /*
16200+ * make backlight to full brightness
16201+ */
16202+ dsi_backlight = mrst_dsi_get_max_backlight(dev);
16203+#endif
16204+}
16205+
16206+static void mrst_dsi_restore(struct drm_connector *connector)
16207+{
16208+#if 0 /* JB: Disable for drop */
16209+ struct drm_device *dev = connector->dev;
16210+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
16211+
16212+#if PRINT_JLIU7
16213+ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
16214+#endif /* PRINT_JLIU7 */
16215+
16216+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
16217+ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
16218+ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
16219+ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
16220+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
16221+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
16222+ mrst_dsi_set_power(dev, true);
16223+ else
16224+ mrst_dsi_set_power(dev, false);
16225+#endif
16226+}
16227+
16228+static void mrst_dsi_prepare(struct drm_encoder *encoder)
16229+{
16230+ struct drm_device *dev = encoder->dev;
16231+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
16232+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
16233+
16234+#if PRINT_JLIU7
16235+ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
16236+#endif /* PRINT_JLIU7 */
16237+
16238+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16239+
16240+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
16241+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
16242+ BACKLIGHT_DUTY_CYCLE_MASK);
16243+
16244+ mrst_dsi_set_power(dev, output, false);
16245+
16246+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16247+}
16248+
16249+static void mrst_dsi_commit( struct drm_encoder *encoder)
16250+{
16251+ struct drm_device *dev = encoder->dev;
16252+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
16253+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
16254+
16255+#if PRINT_JLIU7
16256+ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
16257+#endif /* PRINT_JLIU7 */
16258+
16259+ if (mode_dev->backlight_duty_cycle == 0)
16260+ mode_dev->backlight_duty_cycle =
16261+ mrst_dsi_get_max_backlight(dev);
16262+
16263+ mrst_dsi_set_power(dev, output, true);
16264+
16265+#if DUMP_REGISTER
16266+ dump_dsi_registers(dev);
16267+#endif /* DUMP_REGISTER */
16268+}
16269+
16270+#if 0
16271+/* ************************************************************************* *\
16272+FUNCTION: GetHS_TX_timeoutCount
16273+ `
16274+DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
16275+ (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended.
16276+
16277+ In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs).
16278+ To timeout this timer 1+ of the above said value is recommended.
16279+
16280+\* ************************************************************************* */
16281+static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
16282+{
16283+
16284+ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
16285+
16286+ /* Total pixels need to be transfer per line*/
16287+ HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea;
16288+
16289+ /* byte count = (pixel count * bits per pixel) / 8 */
16290+ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
16291+
16292+ if (dev_priv->videoModeFormat == BURST_MODE)
16293+ {
16294+ timeoutCount = HTOT_count + 1;
16295+#if 1 /*FIXME remove it after power-on */
16296+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
16297+ + dev_priv->VsyncWidth;
16298+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
16299+ timeoutCount = (HTOT_count * VTOT_count) + 1;
16300+#endif
16301+ }
16302+ else
16303+ {
16304+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
16305+ + dev_priv->VsyncWidth;
16306+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
16307+ timeoutCount = (HTOT_count * VTOT_count) + 1;
16308+ }
16309+
16310+ return timeoutCount & 0xFFFF;
16311+}
16312+
16313+/* ************************************************************************* *\
16314+FUNCTION: GetLP_RX_timeoutCount
16315+
16316+DESCRIPTION: The timeout value is protocol specific. Time out value is calculated
16317+ from txclkesc(50ns).
16318+
16319+ Minimum value =
16320+ Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence)
16321+ + 8-bit trigger message (2x8xtxclkesc)
16322+ +1 txclksesc [stop_state]
16323+ = 21 X txclkesc [ 15h]
16324+
16325+ Maximum Value =
16326+ Time to send a long packet with maximum payload data
16327+ = 4 X txclkesc [Escape mode entry sequence)
16328+ + 8-bit Low power data transmission Command (2x8xtxclkesc)
16329+ + packet header [ 4X8X2X txclkesc]
16330+ +payload [ nX8X2Xtxclkesc]
16331+ +CRC[2X8X2txclkesc]
16332+ +1 txclksesc [stop_state]
16333+ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
16334+
16335+\* ************************************************************************* */
16336+static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
16337+{
16338+
16339+ u32 timeoutCount = 0;
16340+
16341+ if (dev_priv->config_phase)
16342+ {
16343+ /* Assuming 256 byte DDB data.*/
16344+ timeoutCount = 117 + 256 * 16;
16345+ }
16346+ else
16347+ {
16348+ /* For DPI video only mode use the minimum value.*/
16349+ timeoutCount = 0x15;
16350+#if 1 /*FIXME remove it after power-on */
16351+ /* Assuming 256 byte DDB data.*/
16352+ timeoutCount = 117 + 256 * 16;
16353+#endif
16354+ }
16355+
16356+ return timeoutCount;
16357+}
16358+#endif // #if 0 - to avoid warnings
16359+
16360+/* ************************************************************************* *\
16361+FUNCTION: GetHSA_Count
16362+
16363+DESCRIPTION: Shows the horizontal sync value in terms of byte clock
16364+ (txbyteclkhs)
16365+ Minimum HSA period should be sufficient to transmit a hsync start short
16366+ packet(4 bytes)
16367+ i) For Non-burst Mode with sync pulse, Min value � 4 in decimal [plus
16368+ an optional 6 bytes for a zero payload blanking packet]. But if
16369+ the value is less than 10 but more than 4, then this count will
16370+ be added to the HBP�s count for one lane.
16371+ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you
16372+ can program this to zero. If you program this register, these
16373+ byte values will be added to HBP.
16374+ iii) For Burst mode of operation, normally the values programmed in
16375+ terms of byte clock are based on the principle - time for transfering
16376+ HSA in Burst mode is the same as in non-bust mode.
16377+\* ************************************************************************* */
16378+static u32 GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16379+{
16380+ u32 HSA_count;
16381+ u32 HSA_countX8;
16382+
16383+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16384+ /*HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
16385+
16386+ if (dev_priv->videoModeFormat == BURST_MODE)
16387+ {
16388+ HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16389+ }
16390+
16391+ HSA_count = HSA_countX8 / 8;*/
16392+
16393+ /* since mode_set already computed Display Controller timings,
16394+ * read the register and compute mipi timings.
16395+ */
16396+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16397+ HSA_countX8 = REG_READ(HSYNC_A);
16398+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16399+ } else
16400+ HSA_countX8 = dev_priv->saveHSYNC_A;
16401+
16402+ /* Get the hsync pulse width */
16403+ HSA_count = ((HSA_countX8 & 0xffff0000)>>16) - (HSA_countX8 & 0xffff);
16404+ /* compute HSA according to equation:
16405+ (hsync_end - hsync_start) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
16406+ HSA_count = (HSA_count * dev_priv->bpp)/(2 * 8 * 2);
16407+ if (HSA_count < 4) /* minimum value of 4 */
16408+ HSA_count = 4;
16409+
16410+ return HSA_count;
16411+}
16412+
16413+/* ************************************************************************* *\
16414+FUNCTION: GetHBP_Count
16415+
16416+DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
16417+ Minimum HBP period should be sufficient to transmit a �hsync end short
16418+ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)�
16419+ For Burst mode of operation, normally the values programmed in terms of
16420+ byte clock are based on the principle - time for transfering HBP
16421+ in Burst mode is the same as in non-bust mode.
16422+
16423+ Min value � 14 in decimal [ accounted with zero payload for blanking packet] for one lane.
16424+ Max value � any value greater than 14 based on DPI resolution
16425+\* ************************************************************************* */
16426+static u32 GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16427+{
16428+ u32 HBP_count;
16429+ u32 HBE, HSE;
16430+
16431+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16432+ /*HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
16433+
16434+ if (dev_priv->videoModeFormat == BURST_MODE)
16435+ {
16436+ HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16437+ }
16438+
16439+ HBP_count = HBP_countX8 / 8;*/
16440+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16441+ HBE = (REG_READ(HBLANK_A) & 0xffff0000) >> 16;
16442+ HSE = (REG_READ(HSYNC_A) & 0xffff0000) >> 16;
16443+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16444+ } else {
16445+ HBE = (dev_priv->saveHBLANK_A & 0xffff0000) >> 16;
16446+ HSE = (dev_priv->saveHSYNC_A & 0xffff0000) >> 16;
16447+ }
16448+
16449+ /* Get the hsync pulse width */
16450+ HBP_count = HBE - HSE;
16451+ /* compute HSA according to equation:
16452+ * (hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
16453+ HBP_count = (HBP_count * dev_priv->bpp)/(2 * 8 * 2);
16454+ if (HBP_count < 8) /* minimum value of 8 */
16455+ HBP_count = 8;
16456+
16457+ return HBP_count;
16458+}
16459+
16460+/* ************************************************************************* *\
16461+FUNCTION: GetHFP_Count
16462+
16463+DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
16464+ Minimum HFP period should be sufficient to transmit �RGB Data packet
16465+ footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode.
16466+
16467+ For burst mode, Minimum HFP period should be sufficient to transmit
16468+ Blanking packet overhead(6 bytes)�
16469+
16470+ For Burst mode of operation, normally the values programmed in terms of
16471+ byte clock are based on the principle - time for transfering HFP
16472+ in Burst mode is the same as in non-bust mode.
16473+
16474+ Min value � 8 in decimal for non-burst mode [accounted with zero payload
16475+ for blanking packet] for one lane.
16476+ Min value � 6 in decimal for burst mode for one lane.
16477+
16478+ Max value � any value greater than the minimum vaue based on DPI resolution
16479+\* ************************************************************************* */
16480+static u32 GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16481+{
16482+ u32 HFP_count;
16483+ u32 HBS, HSS;
16484+
16485+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16486+ /*HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
16487+
16488+ if (dev_priv->videoModeFormat == BURST_MODE)
16489+ {
16490+ HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16491+ }
16492+
16493+ HFP_count = HFP_countX8 / 8;*/
16494+
16495+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16496+ HBS = REG_READ(HBLANK_A) & 0xffff;
16497+ HSS = REG_READ(HSYNC_A) & 0xffff;
16498+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16499+ } else {
16500+ HBS = dev_priv->saveHBLANK_A & 0xffff;
16501+ HSS = dev_priv->saveHSYNC_A & 0xffff;
16502+ }
16503+
16504+ /* Get the hsync pulse width */
16505+ HFP_count = HSS - HBS;
16506+ /* compute HSA according to equation:
16507+ * (hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
16508+ HFP_count = (HFP_count * dev_priv->bpp)/(2 * 8 * 2);
16509+ if (HFP_count < 8) /* minimum value of 8 */
16510+ HFP_count = 8;
16511+
16512+ return HFP_count;
16513+}
16514+
16515+/* ************************************************************************* *\
16516+FUNCTION: GetHAdr_Count
16517+
16518+DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
16519+ In Non Burst Mode, Count equal to RGB word count value
16520+
16521+ In Burst Mode, RGB pixel packets are time-compressed, leaving more time
16522+ during a scan line for LP mode (saving power) or for multiplexing
16523+ other transmissions onto the DSI link. Hence, the count equals the
16524+ time in txbyteclkhs for sending time compressed RGB pixels plus
16525+ the time needed for moving to power save mode or the time needed
16526+ for secondary channel to use the DSI link.
16527+
16528+ But if the left out time for moving to low power mode is less than
16529+ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
16530+ 6txbyteclkhs for a blanking packet with zero payload], then
16531+ this count will be added to the HFP's count for one lane.
16532+
16533+ Min value � 8 in decimal for non-burst mode [accounted with zero payload
16534+ for blanking packet] for one lane.
16535+ Min value � 6 in decimal for burst mode for one lane.
16536+
16537+ Max value � any value greater than the minimum vaue based on DPI resolution
16538+\* ************************************************************************* */
16539+static u32 GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16540+{
16541+ u32 HAdr_count;
16542+ u32 Hactive;
16543+
16544+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16545+ /*HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
16546+
16547+ if (dev_priv->videoModeFormat == BURST_MODE)
16548+ {
16549+ HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16550+ }
16551+
16552+ HAdr_count = HAdr_countX8 / 8;*/
16553+
16554+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16555+ Hactive = (REG_READ(HTOTAL_A) & 0x0fff) + 1;
16556+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16557+ } else
16558+ Hactive = (dev_priv->saveHTOTAL_A & 0x0fff) + 1;
16559+
16560+ /* compute HAdr according to equation:
16561+ * (hactive * 24 bpp/8) / 2 lanes)*/
16562+
16563+ HAdr_count = (Hactive * dev_priv->bpp/8) / 2;
16564+
16565+ return HAdr_count;
16566+}
16567+
16568+/* ************************************************************************* *\
16569+FUNCTION: GetVSA_Count
16570+
16571+DESCRIPTION: Shows the vertical sync value in terms of lines
16572+
16573+\* ************************************************************************* */
16574+static u32 GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16575+{
16576+ u32 VSA_count;
16577+ u32 VSA_countX8;
16578+
16579+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16580+ VSA_countX8 = REG_READ(VSYNC_A);
16581+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16582+ } else
16583+ VSA_countX8 = dev_priv->saveVSYNC_A;
16584+
16585+ /* Get the vsync pulse width */
16586+ VSA_count = ((VSA_countX8 & 0xffff0000)>>16) - (VSA_countX8 & 0xffff);
16587+
16588+ if (VSA_count < 2) /* minimum value of 2 */
16589+ VSA_count = 2;
16590+
16591+ return VSA_count;
16592+}
16593+
16594+/* ************************************************************************* *\
16595+ * FUNCTION: GetVBP_Count
16596+ *
16597+ * DESCRIPTION: Shows the vertical back porch value in lines.
16598+ *
16599+\* ************************************************************************* */
16600+static u32 GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16601+{
16602+ u32 VBP_count;
16603+ u32 VBE, VSE;
16604+
16605+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16606+ VBE = (REG_READ(VBLANK_A) & 0xffff0000) >> 16;
16607+ VSE = (REG_READ(VSYNC_A) & 0xffff0000) >> 16;
16608+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16609+ } else {
16610+ VBE = (dev_priv->saveVBLANK_A & 0xffff0000) >> 16;
16611+ VSE = (dev_priv->saveVSYNC_A & 0xffff0000) >> 16;
16612+ }
16613+
16614+ /* Get the hsync pulse width */
16615+ VBP_count = VBE - VSE;
16616+
16617+ if (VBP_count < 2) /* minimum value of 2 */
16618+ VBP_count = 2;
16619+
16620+ return VBP_count;
16621+}
16622+/* ************************************************************************* *\
16623+ * FUNCTION: GetVFP_Count
16624+ *
16625+ * DESCRIPTION: Shows the vertical front porch value in terms of lines.
16626+ *
16627+\* ************************************************************************* */
16628+static u32 GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16629+{
16630+ u32 VFP_count;
16631+ u32 VBS, VSS;
16632+
16633+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16634+ VBS = REG_READ(VBLANK_A) & 0xffff;
16635+ VSS = REG_READ(VSYNC_A) & 0xffff;
16636+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16637+ } else {
16638+ VBS = dev_priv->saveVBLANK_A & 0xffff;
16639+ VSS = dev_priv->saveVSYNC_A & 0xffff;
16640+ }
16641+
16642+ /* Get the hsync pulse width */
16643+ VFP_count = VSS - VBS;
16644+
16645+ if (VFP_count < 2) /* minimum value of 2 */
16646+ VFP_count = 2;
16647+
16648+ return VFP_count;
16649+}
16650+
16651+#if 0
16652+/* ************************************************************************* *\
16653+FUNCTION: GetHighLowSwitchCount
16654+
16655+DESCRIPTION: High speed to low power or Low power to high speed switching time
16656+ in terms byte clock (txbyteclkhs). This value is based on the
16657+ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
16658+
16659+ Typical value - Number of byte clocks required to switch from low power mode
16660+ to high speed mode after "txrequesths" is asserted.
16661+
16662+ The worst count value among the low to high or high to low switching time
16663+ in terms of txbyteclkhs has to be programmed in this register.
16664+
16665+ Usefull Formulae:
16666+ DDR clock period = 2 times UI
16667+ txbyteclkhs clock = 8 times UI
16668+ Tlpx = 1 / txclkesc
16669+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
16670+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
16671+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
16672+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
16673+\* ************************************************************************* */
16674+static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
16675+{
16676+ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
16677+
16678+/* ************************************************************************* *\
16679+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
16680+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
16681+
16682+ Tlpx = 50 ns, Using max txclkesc (20MHz)
16683+
16684+ txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
16685+ UI_period = 500 / dev_priv->DDR_Clock; in ns
16686+
16687+ HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
16688+ = 9000 / dev_priv->DDR_Clock + 200;
16689+
16690+ HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
16691+ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
16692+ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
16693+
16694+\* ************************************************************************* */
16695+ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
16696+
16697+/* ************************************************************************* *\
16698+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
16699+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
16700+
16701+ LP_to_HS = 10 * UI_period + 5 * Tlpx =
16702+ = 5000 / dev_priv->DDR_Clock + 250;
16703+
16704+ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
16705+ = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock)
16706+ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
16707+
16708+\* ************************************************************************* */
16709+ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
16710+
16711+ if (HighToLowSwitchCount > LowToHighSwitchCount)
16712+ {
16713+ HighLowSwitchCount = HighToLowSwitchCount;
16714+ }
16715+ else
16716+ {
16717+ HighLowSwitchCount = LowToHighSwitchCount;
16718+ }
16719+
16720+
16721+ /* FIXME jliu need to fine tune the above formulae and remove the following after power on */
16722+ if (HighLowSwitchCount < 0x1f)
16723+ HighLowSwitchCount = 0x1f;
16724+
16725+ return HighLowSwitchCount;
16726+}
16727+
16728+/* ************************************************************************* *\
16729+FUNCTION: mrst_gen_long_write
16730+ `
16731+DESCRIPTION:
16732+
16733+\* ************************************************************************* */
16734+static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc)
16735+{
16736+ u32 gen_data_reg = HS_GEN_DATA_REG;
16737+ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
16738+ u32 date_full_bit = HS_DATA_FIFO_FULL;
16739+ u32 control_full_bit = HS_CTRL_FIFO_FULL;
16740+ u16 wc_saved = wc;
16741+
16742+#if PRINT_JLIU7
16743+ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
16744+#endif /* PRINT_JLIU7 */
16745+
16746+ /* sanity check */
16747+ if (vc > 4)
16748+ {
16749+ DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n");
16750+ return;
16751+ }
16752+
16753+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16754+
16755+ if (0) /* FIXME JLIU7 check if it is in LP*/
16756+ {
16757+ gen_data_reg = LP_GEN_DATA_REG;
16758+ gen_ctrl_reg = LP_GEN_CTRL_REG;
16759+ date_full_bit = LP_DATA_FIFO_FULL;
16760+ control_full_bit = LP_CTRL_FIFO_FULL;
16761+ }
16762+
16763+ while (wc >= 4)
16764+ {
16765+ /* Check if MIPI IP generic data fifo is not full */
16766+ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit);
16767+
16768+ /* write to data buffer */
16769+ REG_WRITE(gen_data_reg, *data);
16770+
16771+ wc -= 4;
16772+ data ++;
16773+ }
16774+
16775+ switch (wc)
16776+ {
16777+ case 1:
16778+ REG_WRITE8(gen_data_reg, *((u8 *)data));
16779+ break;
16780+ case 2:
16781+ REG_WRITE16(gen_data_reg, *((u16 *)data));
16782+ break;
16783+ case 3:
16784+ REG_WRITE16(gen_data_reg, *((u16 *)data));
16785+ data = (u32*)((u8*) data + 2);
16786+ REG_WRITE8(gen_data_reg, *((u8 *)data));
16787+ break;
16788+ }
16789+
16790+ /* Check if MIPI IP generic control fifo is not full */
16791+ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit);
16792+ /* write to control buffer */
16793+ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
16794+
16795+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16796+}
16797+
16798+/* ************************************************************************* *\
16799+FUNCTION: mrst_init_HIMAX_MIPI_bridge
16800+ `
16801+DESCRIPTION:
16802+
16803+\* ************************************************************************* */
16804+static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
16805+{
16806+ u32 gen_data[2];
16807+ u16 wc = 0;
16808+ u8 vc =0;
16809+ u32 gen_data_intel = 0x200105;
16810+
16811+#if PRINT_JLIU7
16812+ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
16813+#endif /* PRINT_JLIU7 */
16814+
16815+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16816+
16817+ /* exit sleep mode */
16818+ wc = 0x5;
16819+ gen_data[0] = gen_data_intel | (0x11 << 24);
16820+ gen_data[1] = 0;
16821+ mrst_gen_long_write(dev, gen_data, wc, vc);
16822+
16823+ /* set_pixel_format */
16824+ gen_data[0] = gen_data_intel | (0x3A << 24);
16825+ gen_data[1] = 0x77;
16826+ mrst_gen_long_write(dev, gen_data, wc, vc);
16827+
16828+ /* Set resolution for (800X480) */
16829+ wc = 0x8;
16830+ gen_data[0] = gen_data_intel | (0x2A << 24);
16831+ gen_data[1] = 0x1F030000;
16832+ mrst_gen_long_write(dev, gen_data, wc, vc);
16833+ gen_data[0] = gen_data_intel | (0x2B << 24);
16834+ gen_data[1] = 0xDF010000;
16835+ mrst_gen_long_write(dev, gen_data, wc, vc);
16836+
16837+ /* System control */
16838+ wc = 0x6;
16839+ gen_data[0] = gen_data_intel | (0xEE << 24);
16840+ gen_data[1] = 0x10FA;
16841+ mrst_gen_long_write(dev, gen_data, wc, vc);
16842+
16843+ /* INPUT TIMING FOR TEST PATTERN(800X480) */
16844+ /* H-size */
16845+ gen_data[1] = 0x2000;
16846+ mrst_gen_long_write(dev, gen_data, wc, vc);
16847+ gen_data[1] = 0x0301;
16848+ mrst_gen_long_write(dev, gen_data, wc, vc);
16849+
16850+ /* V-size */
16851+ gen_data[1] = 0xE002;
16852+ mrst_gen_long_write(dev, gen_data, wc, vc);
16853+ gen_data[1] = 0x0103;
16854+ mrst_gen_long_write(dev, gen_data, wc, vc);
16855+
16856+ /* H-total */
16857+ gen_data[1] = 0x2004;
16858+ mrst_gen_long_write(dev, gen_data, wc, vc);
16859+ gen_data[1] = 0x0405;
16860+ mrst_gen_long_write(dev, gen_data, wc, vc);
16861+
16862+ /* V-total */
16863+ gen_data[1] = 0x0d06;
16864+ mrst_gen_long_write(dev, gen_data, wc, vc);
16865+ gen_data[1] = 0x0207;
16866+ mrst_gen_long_write(dev, gen_data, wc, vc);
16867+
16868+ /* H-blank */
16869+ gen_data[1] = 0x0308;
16870+ mrst_gen_long_write(dev, gen_data, wc, vc);
16871+ gen_data[1] = 0x0009;
16872+ mrst_gen_long_write(dev, gen_data, wc, vc);
16873+
16874+ /* H-blank */
16875+ gen_data[1] = 0x030A;
16876+ mrst_gen_long_write(dev, gen_data, wc, vc);
16877+ gen_data[1] = 0x000B;
16878+ mrst_gen_long_write(dev, gen_data, wc, vc);
16879+
16880+ /* H-start */
16881+ gen_data[1] = 0xD80C;
16882+ mrst_gen_long_write(dev, gen_data, wc, vc);
16883+ gen_data[1] = 0x000D;
16884+ mrst_gen_long_write(dev, gen_data, wc, vc);
16885+
16886+ /* V-start */
16887+ gen_data[1] = 0x230E;
16888+ mrst_gen_long_write(dev, gen_data, wc, vc);
16889+ gen_data[1] = 0x000F;
16890+ mrst_gen_long_write(dev, gen_data, wc, vc);
16891+
16892+ /* RGB domain */
16893+ gen_data[1] = 0x0027;
16894+ mrst_gen_long_write(dev, gen_data, wc, vc);
16895+
16896+ /* INP_FORM Setting */
16897+ /* set_1 */
16898+ gen_data[1] = 0x1C10;
16899+ mrst_gen_long_write(dev, gen_data, wc, vc);
16900+
16901+ /* set_2 */
16902+ gen_data[1] = 0x0711;
16903+ mrst_gen_long_write(dev, gen_data, wc, vc);
16904+
16905+ /* set_3 */
16906+ gen_data[1] = 0x0012;
16907+ mrst_gen_long_write(dev, gen_data, wc, vc);
16908+
16909+ /* set_4 */
16910+ gen_data[1] = 0x0013;
16911+ mrst_gen_long_write(dev, gen_data, wc, vc);
16912+
16913+ /* set_5 */
16914+ gen_data[1] = 0x2314;
16915+ mrst_gen_long_write(dev, gen_data, wc, vc);
16916+
16917+ /* set_6 */
16918+ gen_data[1] = 0x0015;
16919+ mrst_gen_long_write(dev, gen_data, wc, vc);
16920+
16921+ /* set_7 */
16922+ gen_data[1] = 0x2316;
16923+ mrst_gen_long_write(dev, gen_data, wc, vc);
16924+
16925+ /* set_8 */
16926+ gen_data[1] = 0x0017;
16927+ mrst_gen_long_write(dev, gen_data, wc, vc);
16928+
16929+ /* set_1 */
16930+ gen_data[1] = 0x0330;
16931+ mrst_gen_long_write(dev, gen_data, wc, vc);
16932+
16933+ /* FRC Setting */
16934+ /* FRC_set_2 */
16935+ gen_data[1] = 0x237A;
16936+ mrst_gen_long_write(dev, gen_data, wc, vc);
16937+
16938+ /* FRC_set_3 */
16939+ gen_data[1] = 0x4C7B;
16940+ mrst_gen_long_write(dev, gen_data, wc, vc);
16941+
16942+ /* FRC_set_4 */
16943+ gen_data[1] = 0x037C;
16944+ mrst_gen_long_write(dev, gen_data, wc, vc);
16945+
16946+ /* FRC_set_5 */
16947+ gen_data[1] = 0x3482;
16948+ mrst_gen_long_write(dev, gen_data, wc, vc);
16949+
16950+ /* FRC_set_7 */
16951+ gen_data[1] = 0x1785;
16952+ mrst_gen_long_write(dev, gen_data, wc, vc);
16953+
16954+#if 0
16955+ /* FRC_set_8 */
16956+ gen_data[1] = 0xD08F;
16957+ mrst_gen_long_write(dev, gen_data, wc, vc);
16958+#endif
16959+
16960+ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
16961+ /* out_htotal */
16962+ gen_data[1] = 0x2090;
16963+ mrst_gen_long_write(dev, gen_data, wc, vc);
16964+ gen_data[1] = 0x0491;
16965+ mrst_gen_long_write(dev, gen_data, wc, vc);
16966+
16967+ /* out_hsync */
16968+ gen_data[1] = 0x0392;
16969+ mrst_gen_long_write(dev, gen_data, wc, vc);
16970+ gen_data[1] = 0x0093;
16971+ mrst_gen_long_write(dev, gen_data, wc, vc);
16972+
16973+ /* out_hstart */
16974+ gen_data[1] = 0xD894;
16975+ mrst_gen_long_write(dev, gen_data, wc, vc);
16976+ gen_data[1] = 0x0095;
16977+ mrst_gen_long_write(dev, gen_data, wc, vc);
16978+
16979+ /* out_hsize */
16980+ gen_data[1] = 0x2096;
16981+ mrst_gen_long_write(dev, gen_data, wc, vc);
16982+ gen_data[1] = 0x0397;
16983+ mrst_gen_long_write(dev, gen_data, wc, vc);
16984+
16985+ /* out_vtotal */
16986+ gen_data[1] = 0x0D98;
16987+ mrst_gen_long_write(dev, gen_data, wc, vc);
16988+ gen_data[1] = 0x0299;
16989+ mrst_gen_long_write(dev, gen_data, wc, vc);
16990+
16991+ /* out_vsync */
16992+ gen_data[1] = 0x039A;
16993+ mrst_gen_long_write(dev, gen_data, wc, vc);
16994+ gen_data[1] = 0x009B;
16995+ mrst_gen_long_write(dev, gen_data, wc, vc);
16996+
16997+ /* out_vstart */
16998+ gen_data[1] = 0x239C;
16999+ mrst_gen_long_write(dev, gen_data, wc, vc);
17000+ gen_data[1] = 0x009D;
17001+ mrst_gen_long_write(dev, gen_data, wc, vc);
17002+
17003+ /* out_vsize */
17004+ gen_data[1] = 0xE09E;
17005+ mrst_gen_long_write(dev, gen_data, wc, vc);
17006+ gen_data[1] = 0x019F;
17007+ mrst_gen_long_write(dev, gen_data, wc, vc);
17008+
17009+ /* FRC_set_6 */
17010+ gen_data[1] = 0x9084;
17011+ mrst_gen_long_write(dev, gen_data, wc, vc);
17012+
17013+ /* Other setting */
17014+ gen_data[1] = 0x0526;
17015+ mrst_gen_long_write(dev, gen_data, wc, vc);
17016+
17017+ /* RBG domain */
17018+ gen_data[1] = 0x1177;
17019+ mrst_gen_long_write(dev, gen_data, wc, vc);
17020+
17021+ /* rgbw */
17022+ /* set_1 */
17023+ gen_data[1] = 0xD28F;
17024+ mrst_gen_long_write(dev, gen_data, wc, vc);
17025+
17026+ /* set_2 */
17027+ gen_data[1] = 0x02D0;
17028+ mrst_gen_long_write(dev, gen_data, wc, vc);
17029+
17030+ /* set_3 */
17031+ gen_data[1] = 0x08D1;
17032+ mrst_gen_long_write(dev, gen_data, wc, vc);
17033+
17034+ /* set_4 */
17035+ gen_data[1] = 0x05D2;
17036+ mrst_gen_long_write(dev, gen_data, wc, vc);
17037+
17038+ /* set_5 */
17039+ gen_data[1] = 0x24D4;
17040+ mrst_gen_long_write(dev, gen_data, wc, vc);
17041+
17042+ /* set_6 */
17043+ gen_data[1] = 0x00D5;
17044+ mrst_gen_long_write(dev, gen_data, wc, vc);
17045+ gen_data[1] = 0x02D7;
17046+ mrst_gen_long_write(dev, gen_data, wc, vc);
17047+ gen_data[1] = 0x00D8;
17048+ mrst_gen_long_write(dev, gen_data, wc, vc);
17049+
17050+ gen_data[1] = 0x48F3;
17051+ mrst_gen_long_write(dev, gen_data, wc, vc);
17052+ gen_data[1] = 0xD4F2;
17053+ mrst_gen_long_write(dev, gen_data, wc, vc);
17054+ gen_data[1] = 0x3D8E;
17055+ mrst_gen_long_write(dev, gen_data, wc, vc);
17056+ gen_data[1] = 0x60FD;
17057+ mrst_gen_long_write(dev, gen_data, wc, vc);
17058+ gen_data[1] = 0x00B5;
17059+ mrst_gen_long_write(dev, gen_data, wc, vc);
17060+ gen_data[1] = 0x48F4;
17061+ mrst_gen_long_write(dev, gen_data, wc, vc);
17062+
17063+ /* inside patten */
17064+ gen_data[1] = 0x0060;
17065+ mrst_gen_long_write(dev, gen_data, wc, vc);
17066+
17067+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
17068+}
17069+#endif
17070+static void mrst_wait_for_LP_CTRL_FIFO(struct drm_device *dev)
17071+{
17072+ while(REG_READ(GEN_FIFO_STAT_REG) & LP_CTRL_FIFO_FULL);
17073+}
17074+
17075+/* ************************************************************************* *\
17076+FUNCTION: mrst_init_NSC_MIPI_bridge
17077+ `
17078+DESCRIPTION:
17079+
17080+\* ************************************************************************* */
17081+static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
17082+{
17083+
17084+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17085+#if PRINT_JLIU7
17086+ DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n");
17087+#endif /* PRINT_JLIU7 */
17088+ /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event,
17089+ 1 or 2 Data Lanes */
17090+
17091+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
17092+
17093+ mrst_wait_for_LP_CTRL_FIFO(dev);
17094+ /* enable RGB24*/
17095+ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
17096+
17097+ mrst_wait_for_LP_CTRL_FIFO(dev);
17098+ /* enable all error reporting*/
17099+ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
17100+ mrst_wait_for_LP_CTRL_FIFO(dev);
17101+ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
17102+
17103+ mrst_wait_for_LP_CTRL_FIFO(dev);
17104+ /* enable 2 data lane; video shaping & error reporting */
17105+ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
17106+
17107+ mrst_wait_for_LP_CTRL_FIFO(dev);
17108+ /* HS timeout */
17109+ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
17110+
17111+ mrst_wait_for_LP_CTRL_FIFO(dev);
17112+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
17113+ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
17114+
17115+ mrst_wait_for_LP_CTRL_FIFO(dev);
17116+ /* enable all virtual channels */
17117+ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
17118+
17119+ mrst_wait_for_LP_CTRL_FIFO(dev);
17120+ /* set output strength to low-drive */
17121+ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
17122+
17123+ mrst_wait_for_LP_CTRL_FIFO(dev);
17124+ if (dev_priv->sku_83)
17125+ {
17126+ /* set escape clock to divede by 8 */
17127+ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
17128+ }
17129+ else if(dev_priv->sku_100L)
17130+ {
17131+ /* set escape clock to divede by 16 */
17132+ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
17133+ }
17134+ else if(dev_priv->sku_100)
17135+ {
17136+ /* set escape clock to divede by 32*/
17137+ REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);
17138+
17139+ mrst_wait_for_LP_CTRL_FIFO(dev);
17140+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
17141+ REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);
17142+ }
17143+
17144+ mrst_wait_for_LP_CTRL_FIFO(dev);
17145+ /* CFG_VALID=1; RGB_CLK_EN=1. */
17146+ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
17147+
17148+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
17149+}
17150+
17151+static void mrst_dsi_mode_set(struct drm_encoder *encoder,
17152+ struct drm_display_mode *mode,
17153+ struct drm_display_mode *adjusted_mode)
17154+{
17155+ struct drm_device *dev = encoder->dev;
17156+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17157+ u32 dsiFuncPrgValue = 0;
17158+ u32 SupportedFormat = 0;
17159+ u32 channelNumber = 0;
17160+ u32 DBI_dataWidth = 0;
17161+ u32 resolution = 0;
17162+ u32 mipiport = 0;
17163+ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
17164+
17165+#if PRINT_JLIU7
17166+ DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n");
17167+#endif /* PRINT_JLIU7 */
17168+
17169+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
17170+
17171+ switch (dev_priv->bpp)
17172+ {
17173+ case 16:
17174+ SupportedFormat = RGB_565_FMT;
17175+ break;
17176+ case 18:
17177+ SupportedFormat = RGB_666_FMT;
17178+ break;
17179+ case 24:
17180+ SupportedFormat = RGB_888_FMT;
17181+ break;
17182+ default:
17183+ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
17184+ break;
17185+ }
17186+
17187+ resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
17188+
17189+ if (dev_priv->dpi)
17190+ {
17191+ drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base, dev->mode_config.scaling_mode_property, &curValue);
17192+
17193+ if (curValue == DRM_MODE_SCALE_NO_SCALE)
17194+ REG_WRITE(PFIT_CONTROL, 0);
17195+ else if (curValue == DRM_MODE_SCALE_ASPECT) {
17196+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
17197+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay))
17198+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
17199+ else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay))
17200+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX);
17201+ else
17202+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX);
17203+ } else
17204+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
17205+ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
17206+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
17207+
17208+ /* Enable MIPI Port */
17209+ mipiport = MIPI_PORT_EN | MIPI_BORDER_EN;
17210+ REG_WRITE(MIPI, mipiport);
17211+
17212+ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
17213+ REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
17214+
17215+ /* Enable all the error interrupt */
17216+ REG_WRITE(INTR_EN_REG, 0xffffffff);
17217+ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
17218+ REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/
17219+ REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */
17220+
17221+ SupportedFormat <<= FMT_DPI_POS;
17222+ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
17223+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
17224+
17225+ REG_WRITE(DPI_RESOLUTION_REG, resolution);
17226+ /*REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);*/
17227+
17228+ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, GetVSA_Count(dev, dev_priv));
17229+ REG_WRITE(VERT_BACK_PORCH_COUNT_REG,
17230+ GetVBP_Count(dev, dev_priv));
17231+ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG,
17232+ GetVFP_Count(dev, dev_priv));
17233+
17234+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG,
17235+ GetHSA_Count(dev, dev_priv));
17236+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG,
17237+ GetHBP_Count(dev, dev_priv));
17238+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG,
17239+ GetHFP_Count(dev, dev_priv));
17240+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG,
17241+ GetHAdr_Count(dev, dev_priv));
17242+
17243+ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
17244+ }
17245+ else
17246+ {
17247+ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/
17248+ channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
17249+ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
17250+ dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth;
17251+ /* JLIU7 FIXME */
17252+ SupportedFormat <<= FMT_DBI_POS;
17253+ dsiFuncPrgValue |= SupportedFormat;
17254+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
17255+
17256+ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
17257+ REG_WRITE(DBI_RESOLUTION_REG, resolution);
17258+ }
17259+
17260+#if 1 /*JLIU7_PO hard code for NSC PO */
17261+ REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
17262+ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
17263+
17264+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
17265+#else /*JLIU7_PO hard code for NSC PO */
17266+ REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv));
17267+ REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv));
17268+
17269+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv));
17270+#endif /*JLIU7_PO hard code for NSC PO */
17271+
17272+
17273+ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
17274+
17275+ /* FIXME JLIU7 for NSC PO */
17276+ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
17277+
17278+ REG_WRITE(DEVICE_READY_REG, 0x00000001);
17279+ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
17280+
17281+ dev_priv->dsi_device_ready = true;
17282+
17283+#if 0 /*JLIU7_PO */
17284+ mrst_init_HIMAX_MIPI_bridge(dev);
17285+#endif /*JLIU7_PO */
17286+ mrst_init_NSC_MIPI_bridge(dev);
17287+
17288+ if (dev_priv->sku_100L)
17289+ /* Set DSI link to 100MHz; 2:1 clock ratio */
17290+ REG_WRITE(MIPI_CONTROL_REG, 0x00000009);
17291+
17292+ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
17293+ REG_READ(PIPEACONF);
17294+
17295+ /* Wait for 20ms for the pipe enable to take effect. */
17296+ udelay(20000);
17297+
17298+ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
17299+
17300+ /* Wait for 20ms for the plane enable to take effect. */
17301+ udelay(20000);
17302+
17303+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
17304+}
17305+
17306+/**
17307+ * Detect the MIPI connection.
17308+ *
17309+ * This always returns CONNECTOR_STATUS_CONNECTED.
17310+ * This connector should only have
17311+ * been set up if the MIPI was actually connected anyway.
17312+ */
17313+static enum drm_connector_status mrst_dsi_detect(struct drm_connector
17314+ *connector)
17315+{
17316+#if PRINT_JLIU7
17317+ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
17318+#endif /* PRINT_JLIU7 */
17319+
17320+ return connector_status_connected;
17321+}
17322+
17323+/**
17324+ * Return the list of MIPI DDB modes if available.
17325+ */
17326+static int mrst_dsi_get_modes(struct drm_connector *connector)
17327+{
17328+ struct drm_device *dev = connector->dev;
17329+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
17330+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
17331+
17332+/* FIXME get the MIPI DDB modes */
17333+
17334+ /* Didn't get an DDB, so
17335+ * Set wide sync ranges so we get all modes
17336+ * handed to valid_mode for checking
17337+ */
17338+ connector->display_info.min_vfreq = 0;
17339+ connector->display_info.max_vfreq = 200;
17340+ connector->display_info.min_hfreq = 0;
17341+ connector->display_info.max_hfreq = 200;
17342+
17343+ if (mode_dev->panel_fixed_mode != NULL) {
17344+ struct drm_display_mode *mode =
17345+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
17346+ drm_mode_probed_add(connector, mode);
17347+ return 1;
17348+ }
17349+
17350+ return 0;
17351+}
17352+
17353+static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
17354+ .dpms = mrst_dsi_dpms,
17355+ .mode_fixup = psb_intel_lvds_mode_fixup,
17356+ .prepare = mrst_dsi_prepare,
17357+ .mode_set = mrst_dsi_mode_set,
17358+ .commit = mrst_dsi_commit,
17359+};
17360+
17361+static const struct drm_connector_helper_funcs
17362+ mrst_dsi_connector_helper_funcs = {
17363+ .get_modes = mrst_dsi_get_modes,
17364+ .mode_valid = psb_intel_lvds_mode_valid,
17365+ .best_encoder = psb_intel_best_encoder,
17366+};
17367+
17368+static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
17369+ .dpms = psb_intel_lvds_connector_dpms,
17370+ .save = mrst_dsi_save,
17371+ .restore = mrst_dsi_restore,
17372+ .detect = mrst_dsi_detect,
17373+ .fill_modes = drm_helper_probe_single_connector_modes,
17374+ .set_property = psb_intel_lvds_set_property,
17375+ .destroy = psb_intel_lvds_destroy,
17376+};
17377+
17378+/** Returns the panel fixed mode from configuration. */
17379+/** FIXME JLIU7 need to revist it. */
17380+struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
17381+{
17382+ struct drm_display_mode *mode;
17383+ struct drm_psb_private *dev_priv =
17384+ (struct drm_psb_private *) dev->dev_private;
17385+ u8 panel_index = dev_priv->gct_data.bpi;
17386+ u8 panel_type = dev_priv->gct_data.pt;
17387+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
17388+ bool use_gct = false;
17389+
17390+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
17391+ if (!mode)
17392+ return NULL;
17393+
17394+ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
17395+ if ((1<<panel_index) & panel_type) /* if non-zero,*/
17396+ use_gct = true; /*then mipi panel.*/
17397+
17398+ if (use_gct) {
17399+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
17400+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
17401+ mode->hsync_start = mode->hdisplay + \
17402+ ((ti->hsync_offset_hi << 8) | \
17403+ ti->hsync_offset_lo);
17404+ mode->hsync_end = mode->hsync_start + \
17405+ ((ti->hsync_pulse_width_hi << 8) | \
17406+ ti->hsync_pulse_width_lo);
17407+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
17408+ ti->hblank_lo);
17409+ mode->vsync_start = \
17410+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
17411+ ti->vsync_offset_lo);
17412+ mode->vsync_end = \
17413+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
17414+ ti->vsync_pulse_width_lo);
17415+ mode->vtotal = mode->vdisplay + \
17416+ ((ti->vblank_hi << 8) | ti->vblank_lo);
17417+ mode->clock = ti->pixel_clock * 10;
17418+#if 1
17419+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
17420+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
17421+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
17422+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
17423+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
17424+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
17425+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
17426+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
17427+ printk(KERN_INFO "clock is %d\n", mode->clock);
17428+#endif
17429+
17430+ } else {
17431+#if 1 /*FIXME jliu7 remove it later */
17432+ /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
17433+ mode->hdisplay = 800;
17434+ mode->vdisplay = 480;
17435+ mode->hsync_start = 808;
17436+ mode->hsync_end = 848;
17437+ mode->htotal = 880;
17438+ mode->vsync_start = 482;
17439+ mode->vsync_end = 483;
17440+ mode->vtotal = 486;
17441+ mode->clock = 33264;
17442+#endif /*FIXME jliu7 remove it later */
17443+
17444+#if 0 /*FIXME jliu7 remove it later */
17445+ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
17446+ mode->hdisplay = 800;
17447+ mode->vdisplay = 480;
17448+ mode->hsync_start = 836;
17449+ mode->hsync_end = 846;
17450+ mode->htotal = 1056;
17451+ mode->vsync_start = 489;
17452+ mode->vsync_end = 491;
17453+ mode->vtotal = 525;
17454+ mode->clock = 33264;
17455+#endif /*FIXME jliu7 remove it later */
17456+
17457+#if 0 /*FIXME jliu7 remove it later */
17458+ /* hard coded fixed mode for LVDS 800x480 */
17459+ mode->hdisplay = 800;
17460+ mode->vdisplay = 480;
17461+ mode->hsync_start = 801;
17462+ mode->hsync_end = 802;
17463+ mode->htotal = 1024;
17464+ mode->vsync_start = 481;
17465+ mode->vsync_end = 482;
17466+ mode->vtotal = 525;
17467+ mode->clock = 30994;
17468+#endif /*FIXME jliu7 remove it later */
17469+
17470+#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
17471+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
17472+ mode->hdisplay = 1024;
17473+ mode->vdisplay = 600;
17474+ mode->hsync_start = 1072;
17475+ mode->hsync_end = 1104;
17476+ mode->htotal = 1184;
17477+ mode->vsync_start = 603;
17478+ mode->vsync_end = 604;
17479+ mode->vtotal = 608;
17480+ mode->clock = 53990;
17481+#endif /*FIXME jliu7 remove it later */
17482+
17483+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
17484+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
17485+ mode->hdisplay = 1024;
17486+ mode->vdisplay = 600;
17487+ mode->hsync_start = 1104;
17488+ mode->hsync_end = 1136;
17489+ mode->htotal = 1184;
17490+ mode->vsync_start = 603;
17491+ mode->vsync_end = 604;
17492+ mode->vtotal = 608;
17493+ mode->clock = 53990;
17494+#endif /*FIXME jliu7 remove it later */
17495+
17496+#if 0 /*FIXME jliu7 remove it later */
17497+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
17498+ mode->hdisplay = 1024;
17499+ mode->vdisplay = 600;
17500+ mode->hsync_start = 1124;
17501+ mode->hsync_end = 1204;
17502+ mode->htotal = 1312;
17503+ mode->vsync_start = 607;
17504+ mode->vsync_end = 610;
17505+ mode->vtotal = 621;
17506+ mode->clock = 48885;
17507+#endif /*FIXME jliu7 remove it later */
17508+
17509+#if 0 /*FIXME jliu7 remove it later */
17510+ /* hard coded fixed mode for LVDS 1024x768 */
17511+ mode->hdisplay = 1024;
17512+ mode->vdisplay = 768;
17513+ mode->hsync_start = 1048;
17514+ mode->hsync_end = 1184;
17515+ mode->htotal = 1344;
17516+ mode->vsync_start = 771;
17517+ mode->vsync_end = 777;
17518+ mode->vtotal = 806;
17519+ mode->clock = 65000;
17520+#endif /*FIXME jliu7 remove it later */
17521+
17522+#if 0 /*FIXME jliu7 remove it later */
17523+ /* hard coded fixed mode for LVDS 1366x768 */
17524+ mode->hdisplay = 1366;
17525+ mode->vdisplay = 768;
17526+ mode->hsync_start = 1430;
17527+ mode->hsync_end = 1558;
17528+ mode->htotal = 1664;
17529+ mode->vsync_start = 769;
17530+ mode->vsync_end = 770;
17531+ mode->vtotal = 776;
17532+ mode->clock = 77500;
17533+#endif /*FIXME jliu7 remove it later */
17534+ }
17535+ drm_mode_set_name(mode);
17536+ drm_mode_set_crtcinfo(mode, 0);
17537+
17538+ return mode;
17539+}
17540+
17541+/* ************************************************************************* *\
17542+FUNCTION: mrstDSI_clockInit
17543+ `
17544+DESCRIPTION:
17545+
17546+\* ************************************************************************* */
17547+static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
17548+static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
17549+static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
17550+#define MIPI_2XCLK_COUNT 0x04
17551+
17552+static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
17553+{
17554+ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
17555+ u32 i = 0;
17556+ u32 *p_mipi_2xclk = NULL;
17557+
17558+#if 0 /* JLIU7_PO old values */
17559+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
17560+ dev_priv->pixelClock = 33264; /*KHz*/
17561+ dev_priv->HsyncWidth = 10;
17562+ dev_priv->HbackPorch = 210;
17563+ dev_priv->HfrontPorch = 36;
17564+ dev_priv->HactiveArea = 800;
17565+ dev_priv->VsyncWidth = 2;
17566+ dev_priv->VbackPorch = 34;
17567+ dev_priv->VfrontPorch = 9;
17568+ dev_priv->VactiveArea = 480;
17569+ dev_priv->bpp = 24;
17570+
17571+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
17572+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
17573+ dev_priv->dbi_HsyncWidth = 10;
17574+ dev_priv->dbi_HbackPorch = 210;
17575+ dev_priv->dbi_HfrontPorch = 36;
17576+ dev_priv->dbi_HactiveArea = 800;
17577+ dev_priv->dbi_VsyncWidth = 2;
17578+ dev_priv->dbi_VbackPorch = 34;
17579+ dev_priv->dbi_VfrontPorch = 9;
17580+ dev_priv->dbi_VactiveArea = 480;
17581+ dev_priv->dbi_bpp = 24;
17582+#else /* JLIU7_PO old values */
17583+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
17584+ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
17585+ dev_priv->pixelClock = 33264; /*KHz*/
17586+ dev_priv->HsyncWidth = 10;
17587+ dev_priv->HbackPorch = 8;
17588+ dev_priv->HfrontPorch = 3;
17589+ dev_priv->HactiveArea = 800;
17590+ dev_priv->VsyncWidth = 2;
17591+ dev_priv->VbackPorch = 3;
17592+ dev_priv->VfrontPorch = 2;
17593+ dev_priv->VactiveArea = 480;
17594+ dev_priv->bpp = 24;
17595+
17596+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
17597+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
17598+ dev_priv->dbi_HsyncWidth = 10;
17599+ dev_priv->dbi_HbackPorch = 8;
17600+ dev_priv->dbi_HfrontPorch = 3;
17601+ dev_priv->dbi_HactiveArea = 800;
17602+ dev_priv->dbi_VsyncWidth = 2;
17603+ dev_priv->dbi_VbackPorch = 3;
17604+ dev_priv->dbi_VfrontPorch = 2;
17605+ dev_priv->dbi_VactiveArea = 480;
17606+ dev_priv->dbi_bpp = 24;
17607+#endif /* JLIU7_PO old values */
17608+
17609+ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea;
17610+ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea;
17611+
17612+ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
17613+
17614+ dev_priv->RRate = RRate;
17615+
17616+ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
17617+ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */
17618+ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
17619+
17620+ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
17621+
17622+ if (dev_priv->sku_100)
17623+ {
17624+ p_mipi_2xclk = sku_100_mipi_2xclk;
17625+ }
17626+ else if (dev_priv->sku_100L)
17627+ {
17628+ p_mipi_2xclk = sku_100L_mipi_2xclk;
17629+ }
17630+ else
17631+ {
17632+ p_mipi_2xclk = sku_83_mipi_2xclk;
17633+ }
17634+
17635+ for (; i < MIPI_2XCLK_COUNT; i++)
17636+ {
17637+ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
17638+ break;
17639+ }
17640+
17641+ if (i == MIPI_2XCLK_COUNT)
17642+ {
17643+ DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
17644+ return false;
17645+ }
17646+
17647+ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
17648+ dev_priv->ClockBits = i;
17649+
17650+#if 1 /* FIXME remove it after power on*/
17651+ DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated);
17652+#endif /* FIXME remove it after power on*/
17653+
17654+ return true;
17655+}
17656+
17657+/**
17658+ * mrst_dsi_init - setup MIPI connectors on this device
17659+ * @dev: drm device
17660+ *
17661+ * Create the connector, try to figure out what
17662+ * modes we can display on the MIPI panel (if present).
17663+ */
17664+void mrst_dsi_init(struct drm_device *dev,
17665+ struct psb_intel_mode_device *mode_dev)
17666+{
17667+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17668+ struct psb_intel_output *psb_intel_output;
17669+ struct drm_connector *connector;
17670+ struct drm_encoder *encoder;
17671+
17672+#if PRINT_JLIU7
17673+ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
17674+#endif /* PRINT_JLIU7 */
17675+
17676+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
17677+ if (!psb_intel_output)
17678+ return;
17679+
17680+ psb_intel_output->mode_dev = mode_dev;
17681+ connector = &psb_intel_output->base;
17682+ encoder = &psb_intel_output->enc;
17683+ drm_connector_init(dev, &psb_intel_output->base,
17684+ &mrst_dsi_connector_funcs,
17685+ DRM_MODE_CONNECTOR_MIPI);
17686+
17687+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
17688+ DRM_MODE_ENCODER_MIPI);
17689+
17690+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
17691+ &psb_intel_output->enc);
17692+ psb_intel_output->type = INTEL_OUTPUT_MIPI;
17693+
17694+ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
17695+ drm_connector_helper_add(connector,
17696+ &mrst_dsi_connector_helper_funcs);
17697+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
17698+ connector->interlace_allowed = false;
17699+ connector->doublescan_allowed = false;
17700+
17701+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
17702+ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
17703+
17704+ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
17705+ blc_pol = BLC_POLARITY_NORMAL;
17706+ blc_freq = 0xc8;
17707+
17708+ /*
17709+ * MIPI discovery:
17710+ * 1) check for DDB data
17711+ * 2) check for VBT data
17712+ * 4) make sure lid is open
17713+ * if closed, act like it's not there for now
17714+ */
17715+
17716+ /* FIXME jliu7 we only support DPI */
17717+ dev_priv->dpi = true;
17718+
17719+ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */
17720+ dev_priv->laneCount = 2;
17721+
17722+ /* FIXME hard coded for NSC PO. */
17723+ /* We only support BUST_MODE */
17724+ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */
17725+ /* FIXME change it to true if GET_DDB works */
17726+ dev_priv->config_phase = false;
17727+
17728+ if (!mrstDSI_clockInit(dev_priv))
17729+ {
17730+ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
17731+#if 0 /* FIXME JLIU7 */
17732+ goto failed_find;
17733+#endif /* FIXME JLIU7 */
17734+ }
17735+
17736+ /*
17737+ * If we didn't get DDB data, try geting panel timing
17738+ * from configuration data
17739+ */
17740+ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
17741+
17742+ if (mode_dev->panel_fixed_mode) {
17743+ mode_dev->panel_fixed_mode->type |=
17744+ DRM_MODE_TYPE_PREFERRED;
17745+ goto out; /* FIXME: check for quirks */
17746+ }
17747+
17748+ /* If we still don't have a mode after all that, give up. */
17749+ if (!mode_dev->panel_fixed_mode) {
17750+ DRM_DEBUG
17751+ ("Found no modes on the lvds, ignoring the LVDS\n");
17752+ goto failed_find;
17753+ }
17754+
17755+out:
17756+ drm_sysfs_connector_add(connector);
17757+ return;
17758+
17759+failed_find:
17760+ DRM_DEBUG("No MIIP modes found, disabling.\n");
17761+ drm_encoder_cleanup(encoder);
17762+ drm_connector_cleanup(connector);
17763+ kfree(connector);
17764+}
17765diff --git a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c
17766new file mode 100644
17767index 0000000..60165fd
17768--- /dev/null
17769+++ b/drivers/gpu/drm/psb/psb_intel_i2c.c
17770@@ -0,0 +1,179 @@
17771+/*
17772+ * Copyright © 2006-2007 Intel Corporation
17773+ *
17774+ * Permission is hereby granted, free of charge, to any person obtaining a
17775+ * copy of this software and associated documentation files (the "Software"),
17776+ * to deal in the Software without restriction, including without limitation
17777+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17778+ * and/or sell copies of the Software, and to permit persons to whom the
17779+ * Software is furnished to do so, subject to the following conditions:
17780+ *
17781+ * The above copyright notice and this permission notice (including the next
17782+ * paragraph) shall be included in all copies or substantial portions of the
17783+ * Software.
17784+ *
17785+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17786+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17787+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17788+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17789+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17790+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
17791+ * DEALINGS IN THE SOFTWARE.
17792+ *
17793+ * Authors:
17794+ * Eric Anholt <eric@anholt.net>
17795+ */
17796+/*
17797+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
17798+ * Jesse Barnes <jesse.barnes@intel.com>
17799+ */
17800+
17801+#include <linux/i2c.h>
17802+#include <linux/i2c-id.h>
17803+#include <linux/i2c-algo-bit.h>
17804+
17805+/*
17806+ * Intel GPIO access functions
17807+ */
17808+
17809+#define I2C_RISEFALL_TIME 20
17810+
17811+static int get_clock(void *data)
17812+{
17813+ struct psb_intel_i2c_chan *chan = data;
17814+ struct drm_device *dev = chan->drm_dev;
17815+ u32 val;
17816+
17817+ val = REG_READ(chan->reg);
17818+ return (val & GPIO_CLOCK_VAL_IN) != 0;
17819+}
17820+
17821+static int get_data(void *data)
17822+{
17823+ struct psb_intel_i2c_chan *chan = data;
17824+ struct drm_device *dev = chan->drm_dev;
17825+ u32 val;
17826+
17827+ val = REG_READ(chan->reg);
17828+ return (val & GPIO_DATA_VAL_IN) != 0;
17829+}
17830+
17831+static void set_clock(void *data, int state_high)
17832+{
17833+ struct psb_intel_i2c_chan *chan = data;
17834+ struct drm_device *dev = chan->drm_dev;
17835+ u32 reserved = 0, clock_bits;
17836+
17837+ /* On most chips, these bits must be preserved in software. */
17838+ if (!IS_I830(dev) && !IS_845G(dev))
17839+ reserved =
17840+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
17841+ GPIO_CLOCK_PULLUP_DISABLE);
17842+
17843+ if (state_high)
17844+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
17845+ else
17846+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
17847+ GPIO_CLOCK_VAL_MASK;
17848+ REG_WRITE(chan->reg, reserved | clock_bits);
17849+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
17850+}
17851+
17852+static void set_data(void *data, int state_high)
17853+{
17854+ struct psb_intel_i2c_chan *chan = data;
17855+ struct drm_device *dev = chan->drm_dev;
17856+ u32 reserved = 0, data_bits;
17857+
17858+ /* On most chips, these bits must be preserved in software. */
17859+ if (!IS_I830(dev) && !IS_845G(dev))
17860+ reserved =
17861+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
17862+ GPIO_CLOCK_PULLUP_DISABLE);
17863+
17864+ if (state_high)
17865+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
17866+ else
17867+ data_bits =
17868+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
17869+ GPIO_DATA_VAL_MASK;
17870+
17871+ REG_WRITE(chan->reg, reserved | data_bits);
17872+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
17873+}
17874+
17875+/**
17876+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
17877+ * @dev: DRM device
17878+ * @output: driver specific output device
17879+ * @reg: GPIO reg to use
17880+ * @name: name for this bus
17881+ *
17882+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
17883+ * in output probing and control (e.g. DDC or SDVO control functions).
17884+ *
17885+ * Possible values for @reg include:
17886+ * %GPIOA
17887+ * %GPIOB
17888+ * %GPIOC
17889+ * %GPIOD
17890+ * %GPIOE
17891+ * %GPIOF
17892+ * %GPIOG
17893+ * %GPIOH
17894+ * see PRM for details on how these different busses are used.
17895+ */
17896+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
17897+ const u32 reg, const char *name)
17898+{
17899+ struct psb_intel_i2c_chan *chan;
17900+
17901+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
17902+ if (!chan)
17903+ goto out_free;
17904+
17905+ chan->drm_dev = dev;
17906+ chan->reg = reg;
17907+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
17908+ chan->adapter.owner = THIS_MODULE;
17909+ chan->adapter.algo_data = &chan->algo;
17910+ chan->adapter.dev.parent = &dev->pdev->dev;
17911+ chan->algo.setsda = set_data;
17912+ chan->algo.setscl = set_clock;
17913+ chan->algo.getsda = get_data;
17914+ chan->algo.getscl = get_clock;
17915+ chan->algo.udelay = 20;
17916+ chan->algo.timeout = usecs_to_jiffies(2200);
17917+ chan->algo.data = chan;
17918+
17919+ i2c_set_adapdata(&chan->adapter, chan);
17920+
17921+ if (i2c_bit_add_bus(&chan->adapter))
17922+ goto out_free;
17923+
17924+ /* JJJ: raise SCL and SDA? */
17925+ set_data(chan, 1);
17926+ set_clock(chan, 1);
17927+ udelay(20);
17928+
17929+ return chan;
17930+
17931+out_free:
17932+ kfree(chan);
17933+ return NULL;
17934+}
17935+
17936+/**
17937+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
17938+ * @output: channel to free
17939+ *
17940+ * Unregister the adapter from the i2c layer, then free the structure.
17941+ */
17942+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
17943+{
17944+ if (!chan)
17945+ return;
17946+
17947+ i2c_del_adapter(&chan->adapter);
17948+ kfree(chan);
17949+}
17950diff --git a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c
17951new file mode 100644
17952index 0000000..4fa29f8
17953--- /dev/null
17954+++ b/drivers/gpu/drm/psb/psb_intel_lvds.c
17955@@ -0,0 +1,1343 @@
17956+/*
17957+ * Copyright © 2006-2007 Intel Corporation
17958+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
17959+ *
17960+ * Permission is hereby granted, free of charge, to any person obtaining a
17961+ * copy of this software and associated documentation files (the "Software"),
17962+ * to deal in the Software without restriction, including without limitation
17963+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17964+ * and/or sell copies of the Software, and to permit persons to whom the
17965+ * Software is furnished to do so, subject to the following conditions:
17966+ *
17967+ * The above copyright notice and this permission notice (including the next
17968+ * paragraph) shall be included in all copies or substantial portions of the
17969+ * Software.
17970+ *
17971+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17972+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17973+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17974+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17975+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17976+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
17977+ * DEALINGS IN THE SOFTWARE.
17978+ *
17979+ * Authors:
17980+ * Eric Anholt <eric@anholt.net>
17981+ * Dave Airlie <airlied@linux.ie>
17982+ * Jesse Barnes <jesse.barnes@intel.com>
17983+ */
17984+
17985+#include <linux/i2c.h>
17986+#include <drm/drm_crtc.h>
17987+#include <drm/drm_edid.h>
17988+
17989+#include "psb_intel_bios.h"
17990+#include "psb_powermgmt.h"
17991+
17992+/* MRST defines start */
17993+uint8_t blc_type;
17994+uint8_t blc_pol;
17995+uint8_t blc_freq;
17996+uint8_t blc_minbrightness;
17997+uint8_t blc_i2caddr;
17998+uint8_t blc_brightnesscmd;
17999+int lvds_backlight; /* restore backlight to this value */
18000+
18001+u32 CoreClock;
18002+u32 PWMControlRegFreq;
18003+
18004+/**
18005+ * LVDS I2C backlight control macros
18006+ */
18007+#define BRIGHTNESS_MAX_LEVEL 100
18008+#define BRIGHTNESS_MASK 0xFF
18009+#define BLC_I2C_TYPE 0x01
18010+#define BLC_PWM_TYPT 0x02
18011+
18012+#define BLC_POLARITY_NORMAL 0
18013+#define BLC_POLARITY_INVERSE 1
18014+
18015+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
18016+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
18017+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
18018+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
18019+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
18020+
18021+struct psb_intel_lvds_priv {
18022+ /**
18023+ * Saved LVDO output states
18024+ */
18025+ uint32_t savePP_ON;
18026+ uint32_t savePP_OFF;
18027+ uint32_t saveLVDS;
18028+ uint32_t savePP_CONTROL;
18029+ uint32_t savePP_CYCLE;
18030+ uint32_t savePFIT_CONTROL;
18031+ uint32_t savePFIT_PGM_RATIOS;
18032+ uint32_t saveBLC_PWM_CTL;
18033+};
18034+
18035+/* MRST defines end */
18036+
18037+/**
18038+ * Returns the maximum level of the backlight duty cycle field.
18039+ */
18040+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
18041+{
18042+ struct drm_psb_private *dev_priv = dev->dev_private;
18043+ u32 retVal;
18044+
18045+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
18046+ retVal = ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
18047+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
18048+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18049+ } else
18050+ retVal = ((dev_priv->saveBLC_PWM_CTL & BACKLIGHT_MODULATION_FREQ_MASK) >>
18051+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
18052+
18053+ return retVal;
18054+}
18055+
18056+/**
18057+ * Set LVDS backlight level by I2C command
18058+ */
18059+static int psb_lvds_i2c_set_brightness(struct drm_device * dev, unsigned int level)
18060+ {
18061+ struct drm_psb_private * dev_priv =
18062+ (struct drm_psb_private*)dev->dev_private;
18063+
18064+ struct psb_intel_i2c_chan * lvds_i2c_bus = dev_priv->lvds_i2c_bus;
18065+ u8 out_buf[2];
18066+ unsigned int blc_i2c_brightness;
18067+
18068+ struct i2c_msg msgs[] = {
18069+ {
18070+ .addr = lvds_i2c_bus->slave_addr,
18071+ .flags = 0,
18072+ .len = 2,
18073+ .buf = out_buf,
18074+ }
18075+ };
18076+
18077+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
18078+ BRIGHTNESS_MASK /
18079+ BRIGHTNESS_MAX_LEVEL);
18080+
18081+ if(dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) {
18082+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
18083+ }
18084+
18085+
18086+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
18087+ out_buf[1] = (u8)blc_i2c_brightness;
18088+
18089+ if(i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
18090+ DRM_DEBUG("I2C set brightness done.(command, value) (%d, %d)\n", blc_brightnesscmd, blc_i2c_brightness);
18091+ return 0;
18092+ }
18093+
18094+ DRM_ERROR("I2C transfer error\n");
18095+ return -1;
18096+}
18097+
18098+
18099+static int psb_lvds_pwm_set_brightness(struct drm_device * dev, int level)
18100+{
18101+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18102+
18103+ u32 max_pwm_blc;
18104+ u32 blc_pwm_duty_cycle;
18105+
18106+ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
18107+
18108+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
18109+ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ ) == 0);
18110+
18111+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
18112+
18113+ if(dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE){
18114+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
18115+ }
18116+
18117+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
18118+ REG_WRITE(BLC_PWM_CTL,
18119+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
18120+ (blc_pwm_duty_cycle));
18121+
18122+ return 0;
18123+}
18124+
18125+/**
18126+ * Set LVDS backlight level either by I2C or PWM
18127+ */
18128+void psb_intel_lvds_set_brightness(struct drm_device * dev, int level)
18129+{
18130+ /*u32 blc_pwm_ctl;*/
18131+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18132+
18133+ DRM_DEBUG("backlight level is %d\n", level);
18134+
18135+ if(!dev_priv->lvds_bl) {
18136+ DRM_ERROR("NO LVDS Backlight Info\n");
18137+ return;
18138+ }
18139+
18140+ if(IS_MRST(dev)) {
18141+ DRM_ERROR("psb_intel_lvds_set_brightness called from MRST...not expected\n");
18142+ return;
18143+ }
18144+
18145+ if(dev_priv->lvds_bl->type == BLC_I2C_TYPE) {
18146+ psb_lvds_i2c_set_brightness(dev, level);
18147+ } else {
18148+ psb_lvds_pwm_set_brightness(dev, level);
18149+ }
18150+}
18151+
18152+/**
18153+ * Sets the backlight level.
18154+ *
18155+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
18156+ */
18157+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
18158+{
18159+ struct drm_psb_private *dev_priv = dev->dev_private;
18160+ u32 blc_pwm_ctl;
18161+
18162+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false) ) {
18163+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
18164+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
18165+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
18166+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18167+ } else {
18168+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL & ~BACKLIGHT_DUTY_CYCLE_MASK;
18169+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
18170+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
18171+ }
18172+}
18173+
18174+/**
18175+ * Sets the power state for the panel.
18176+ */
18177+static void psb_intel_lvds_set_power(struct drm_device *dev,
18178+ struct psb_intel_output *output, bool on)
18179+{
18180+ u32 pp_status;
18181+
18182+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18183+
18184+ if (on) {
18185+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
18186+ POWER_TARGET_ON);
18187+ do {
18188+ pp_status = REG_READ(PP_STATUS);
18189+ } while ((pp_status & PP_ON) == 0);
18190+
18191+ psb_intel_lvds_set_backlight(dev,
18192+ output->
18193+ mode_dev->backlight_duty_cycle);
18194+ } else {
18195+ psb_intel_lvds_set_backlight(dev, 0);
18196+
18197+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
18198+ ~POWER_TARGET_ON);
18199+ do {
18200+ pp_status = REG_READ(PP_STATUS);
18201+ } while (pp_status & PP_ON);
18202+ }
18203+
18204+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18205+}
18206+
18207+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
18208+{
18209+ struct drm_device *dev = encoder->dev;
18210+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18211+
18212+ if (mode == DRM_MODE_DPMS_ON)
18213+ psb_intel_lvds_set_power(dev, output, true);
18214+ else
18215+ psb_intel_lvds_set_power(dev, output, false);
18216+
18217+ /* XXX: We never power down the LVDS pairs. */
18218+}
18219+
18220+static void psb_intel_lvds_save(struct drm_connector *connector)
18221+{
18222+ struct drm_device *dev = connector->dev;
18223+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18224+ struct psb_intel_output * psb_intel_output = to_psb_intel_output(connector);
18225+ struct psb_intel_lvds_priv * lvds_priv =
18226+ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
18227+
18228+ if(IS_POULSBO(dev)) {
18229+ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
18230+ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
18231+ lvds_priv->saveLVDS = REG_READ(LVDS);
18232+ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
18233+ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
18234+ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
18235+ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
18236+ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
18237+ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
18238+
18239+ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
18240+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
18241+ BACKLIGHT_DUTY_CYCLE_MASK);
18242+
18243+ /*
18244+ * If the light is off at server startup, just make it full brightness
18245+ */
18246+ if (dev_priv->backlight_duty_cycle == 0)
18247+ dev_priv->backlight_duty_cycle =
18248+ psb_intel_lvds_get_max_backlight(dev);
18249+
18250+ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON,
18251+ lvds_priv->savePP_OFF,
18252+ lvds_priv->saveLVDS,
18253+ lvds_priv->savePP_CONTROL,
18254+ lvds_priv->savePP_CYCLE,
18255+ lvds_priv->saveBLC_PWM_CTL);
18256+ }
18257+}
18258+
18259+static void psb_intel_lvds_restore(struct drm_connector *connector)
18260+{
18261+ struct drm_device *dev = connector->dev;
18262+ u32 pp_status;
18263+
18264+ /*struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;*/
18265+ struct psb_intel_output * psb_intel_output = to_psb_intel_output(connector);
18266+ struct psb_intel_lvds_priv * lvds_priv =
18267+ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
18268+
18269+ if(IS_POULSBO(dev)) {
18270+ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON,
18271+ lvds_priv->savePP_OFF,
18272+ lvds_priv->saveLVDS,
18273+ lvds_priv->savePP_CONTROL,
18274+ lvds_priv->savePP_CYCLE,
18275+ lvds_priv->saveBLC_PWM_CTL);
18276+
18277+ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
18278+ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
18279+ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
18280+ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
18281+ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
18282+ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
18283+ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
18284+ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
18285+ REG_WRITE(LVDS, lvds_priv->saveLVDS);
18286+
18287+ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
18288+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
18289+ POWER_TARGET_ON);
18290+ do {
18291+ pp_status = REG_READ(PP_STATUS);
18292+ } while((pp_status & PP_ON) == 0);
18293+ } else {
18294+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
18295+ ~POWER_TARGET_ON);
18296+ do {
18297+ pp_status = REG_READ(PP_STATUS);
18298+ }while(pp_status & PP_ON);
18299+ }
18300+ }
18301+}
18302+
18303+static int psb_intel_lvds_mode_valid(struct drm_connector *connector,
18304+ struct drm_display_mode *mode)
18305+{
18306+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
18307+ struct drm_display_mode *fixed_mode =
18308+ psb_intel_output->mode_dev->panel_fixed_mode;
18309+
18310+#if PRINT_JLIU7
18311+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
18312+#endif /* PRINT_JLIU7 */
18313+
18314+ /* just in case */
18315+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
18316+ return MODE_NO_DBLESCAN;
18317+
18318+ /* just in case */
18319+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
18320+ return MODE_NO_INTERLACE;
18321+
18322+ if (fixed_mode) {
18323+ if (mode->hdisplay > fixed_mode->hdisplay)
18324+ return MODE_PANEL;
18325+ if (mode->vdisplay > fixed_mode->vdisplay)
18326+ return MODE_PANEL;
18327+ }
18328+ return MODE_OK;
18329+}
18330+
18331+static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
18332+ struct drm_display_mode *mode,
18333+ struct drm_display_mode *adjusted_mode)
18334+{
18335+ struct psb_intel_mode_device *mode_dev =
18336+ enc_to_psb_intel_output(encoder)->mode_dev;
18337+ struct drm_device *dev = encoder->dev;
18338+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
18339+ struct drm_encoder *tmp_encoder;
18340+
18341+#if PRINT_JLIU7
18342+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
18343+#endif /* PRINT_JLIU7 */
18344+
18345+ /* Should never happen!! */
18346+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
18347+ printk(KERN_ERR
18348+ "Can't support LVDS/MIPI on pipe B on MRST\n");
18349+ return false;
18350+ } else if (!IS_MRST(dev) && !IS_I965G(dev)
18351+ && psb_intel_crtc->pipe == 0) {
18352+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
18353+ return false;
18354+ }
18355+ /* Should never happen!! */
18356+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
18357+ head) {
18358+ if (tmp_encoder != encoder
18359+ && tmp_encoder->crtc == encoder->crtc) {
18360+ printk(KERN_ERR "Can't enable LVDS and another "
18361+ "encoder on the same pipe\n");
18362+ return false;
18363+ }
18364+ }
18365+
18366+ /*
18367+ * If we have timings from the BIOS for the panel, put them in
18368+ * to the adjusted mode. The CRTC will be set up for this mode,
18369+ * with the panel scaling set up to source from the H/VDisplay
18370+ * of the original mode.
18371+ */
18372+ if (mode_dev->panel_fixed_mode != NULL) {
18373+ adjusted_mode->hdisplay =
18374+ mode_dev->panel_fixed_mode->hdisplay;
18375+ adjusted_mode->hsync_start =
18376+ mode_dev->panel_fixed_mode->hsync_start;
18377+ adjusted_mode->hsync_end =
18378+ mode_dev->panel_fixed_mode->hsync_end;
18379+ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
18380+ adjusted_mode->vdisplay =
18381+ mode_dev->panel_fixed_mode->vdisplay;
18382+ adjusted_mode->vsync_start =
18383+ mode_dev->panel_fixed_mode->vsync_start;
18384+ adjusted_mode->vsync_end =
18385+ mode_dev->panel_fixed_mode->vsync_end;
18386+ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
18387+ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
18388+ drm_mode_set_crtcinfo(adjusted_mode,
18389+ CRTC_INTERLACE_HALVE_V);
18390+ }
18391+
18392+ /*
18393+ * XXX: It would be nice to support lower refresh rates on the
18394+ * panels to reduce power consumption, and perhaps match the
18395+ * user's requested refresh rate.
18396+ */
18397+
18398+ return true;
18399+}
18400+
18401+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
18402+{
18403+ struct drm_device *dev = encoder->dev;
18404+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18405+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
18406+
18407+#if PRINT_JLIU7
18408+ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
18409+#endif /* PRINT_JLIU7 */
18410+
18411+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18412+
18413+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
18414+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
18415+ BACKLIGHT_DUTY_CYCLE_MASK);
18416+
18417+ psb_intel_lvds_set_power(dev, output, false);
18418+
18419+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18420+}
18421+
18422+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
18423+{
18424+ struct drm_device *dev = encoder->dev;
18425+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18426+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
18427+
18428+#if PRINT_JLIU7
18429+ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
18430+#endif /* PRINT_JLIU7 */
18431+
18432+ if (mode_dev->backlight_duty_cycle == 0)
18433+ mode_dev->backlight_duty_cycle =
18434+ psb_intel_lvds_get_max_backlight(dev);
18435+
18436+ psb_intel_lvds_set_power(dev, output, true);
18437+}
18438+
18439+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
18440+ struct drm_display_mode *mode,
18441+ struct drm_display_mode *adjusted_mode)
18442+{
18443+ struct psb_intel_mode_device *mode_dev =
18444+ enc_to_psb_intel_output(encoder)->mode_dev;
18445+ struct drm_device *dev = encoder->dev;
18446+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
18447+ u32 pfit_control;
18448+
18449+ /*
18450+ * The LVDS pin pair will already have been turned on in the
18451+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
18452+ * settings.
18453+ */
18454+
18455+ /*
18456+ * Enable automatic panel scaling so that non-native modes fill the
18457+ * screen. Should be enabled before the pipe is enabled, according to
18458+ * register description and PRM.
18459+ */
18460+ if (mode->hdisplay != adjusted_mode->hdisplay ||
18461+ mode->vdisplay != adjusted_mode->vdisplay)
18462+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
18463+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
18464+ HORIZ_INTERP_BILINEAR);
18465+ else
18466+ pfit_control = 0;
18467+
18468+ if (!IS_I965G(dev)) {
18469+ if (mode_dev->panel_wants_dither)
18470+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
18471+ } else
18472+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
18473+
18474+ REG_WRITE(PFIT_CONTROL, pfit_control);
18475+}
18476+
18477+/**
18478+ * Detect the LVDS connection.
18479+ *
18480+ * This always returns CONNECTOR_STATUS_CONNECTED.
18481+ * This connector should only have
18482+ * been set up if the LVDS was actually connected anyway.
18483+ */
18484+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
18485+ *connector)
18486+{
18487+ return connector_status_connected;
18488+}
18489+
18490+/**
18491+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
18492+ */
18493+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
18494+{
18495+ struct drm_device *dev = connector->dev;
18496+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
18497+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
18498+ int ret = 0;
18499+
18500+ if (!IS_MRST(dev))
18501+ ret = psb_intel_ddc_get_modes(psb_intel_output);
18502+
18503+ if (ret)
18504+ return ret;
18505+
18506+ /* Didn't get an EDID, so
18507+ * Set wide sync ranges so we get all modes
18508+ * handed to valid_mode for checking
18509+ */
18510+ connector->display_info.min_vfreq = 0;
18511+ connector->display_info.max_vfreq = 200;
18512+ connector->display_info.min_hfreq = 0;
18513+ connector->display_info.max_hfreq = 200;
18514+
18515+ if (mode_dev->panel_fixed_mode != NULL) {
18516+ struct drm_display_mode *mode =
18517+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
18518+ drm_mode_probed_add(connector, mode);
18519+ return 1;
18520+ }
18521+
18522+ return 0;
18523+}
18524+
18525+/**
18526+ * psb_intel_lvds_destroy - unregister and free LVDS structures
18527+ * @connector: connector to free
18528+ *
18529+ * Unregister the DDC bus for this connector then free the driver private
18530+ * structure.
18531+ */
18532+static void psb_intel_lvds_destroy(struct drm_connector *connector)
18533+{
18534+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
18535+
18536+ if (psb_intel_output->ddc_bus)
18537+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
18538+ drm_sysfs_connector_remove(connector);
18539+ drm_connector_cleanup(connector);
18540+ kfree(connector);
18541+}
18542+
18543+static void psb_intel_lvds_connector_dpms(struct drm_connector *connector, int mode)
18544+{
18545+ struct drm_encoder *pEncoder = connector->encoder;
18546+ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
18547+ struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;
18548+ struct drm_device * dev = connector->dev;
18549+ pEncHFuncs->dpms(pEncoder, mode);
18550+ /*FIXME: crtc dpms will crash kernel on menlow*/
18551+ if (IS_MRST(dev))
18552+ pCrtcHFuncs->dpms(pEncoder->crtc, mode);
18553+}
18554+
18555+static int psb_intel_lvds_set_property(struct drm_connector *connector,
18556+ struct drm_property *property,
18557+ uint64_t value)
18558+{
18559+ struct drm_encoder *pEncoder = connector->encoder;
18560+
18561+ if (!strcmp(property->name, "scaling mode") && pEncoder) {
18562+ struct psb_intel_crtc *pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc);
18563+ bool bTransitionFromToCentered;
18564+ uint64_t curValue;
18565+
18566+ if (!pPsbCrtc)
18567+ goto set_prop_error;
18568+
18569+ switch (value) {
18570+ case DRM_MODE_SCALE_FULLSCREEN:
18571+ break;
18572+ case DRM_MODE_SCALE_NO_SCALE:
18573+ break;
18574+ case DRM_MODE_SCALE_ASPECT:
18575+ break;
18576+ default:
18577+ goto set_prop_error;
18578+ }
18579+
18580+ if (drm_connector_property_get_value(connector, property, &curValue))
18581+ goto set_prop_error;
18582+
18583+ if (curValue == value)
18584+ goto set_prop_done;
18585+
18586+ if (drm_connector_property_set_value(connector, property, value))
18587+ goto set_prop_error;
18588+
18589+ bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
18590+ (value == DRM_MODE_SCALE_NO_SCALE);
18591+
18592+ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
18593+ pPsbCrtc->saved_mode.vdisplay != 0) {
18594+ if (bTransitionFromToCentered) {
18595+ if (!drm_crtc_helper_set_mode(pEncoder->crtc, &pPsbCrtc->saved_mode,
18596+ pEncoder->crtc->x, pEncoder->crtc->y, pEncoder->crtc->fb))
18597+ goto set_prop_error;
18598+ } else {
18599+ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
18600+ pEncHFuncs->mode_set(pEncoder, &pPsbCrtc->saved_mode,
18601+ &pPsbCrtc->saved_adjusted_mode);
18602+ }
18603+ }
18604+ } else if (!strcmp(property->name, "backlight") && pEncoder) {
18605+ if (drm_connector_property_set_value(connector, property, value))
18606+ goto set_prop_error;
18607+ else {
18608+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
18609+ struct backlight_device bd;
18610+ bd.props.brightness = value;
18611+ psb_set_brightness(&bd);
18612+#endif
18613+ }
18614+ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
18615+ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
18616+ /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;*/
18617+ pEncHFuncs->dpms(pEncoder, value);
18618+ /*pCrtcHFuncs->dpms(pEncoder->crtc, value);*/
18619+ }
18620+
18621+set_prop_done:
18622+ return 0;
18623+set_prop_error:
18624+ return -1;
18625+}
18626+
18627+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
18628+ .dpms = psb_intel_lvds_encoder_dpms,
18629+ .mode_fixup = psb_intel_lvds_mode_fixup,
18630+ .prepare = psb_intel_lvds_prepare,
18631+ .mode_set = psb_intel_lvds_mode_set,
18632+ .commit = psb_intel_lvds_commit,
18633+};
18634+
18635+static const struct drm_connector_helper_funcs
18636+ psb_intel_lvds_connector_helper_funcs = {
18637+ .get_modes = psb_intel_lvds_get_modes,
18638+ .mode_valid = psb_intel_lvds_mode_valid,
18639+ .best_encoder = psb_intel_best_encoder,
18640+};
18641+
18642+static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
18643+ .dpms = psb_intel_lvds_connector_dpms,
18644+ .save = psb_intel_lvds_save,
18645+ .restore = psb_intel_lvds_restore,
18646+ .detect = psb_intel_lvds_detect,
18647+ .fill_modes = drm_helper_probe_single_connector_modes,
18648+ .set_property = psb_intel_lvds_set_property,
18649+ .destroy = psb_intel_lvds_destroy,
18650+};
18651+
18652+
18653+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
18654+{
18655+ drm_encoder_cleanup(encoder);
18656+}
18657+
18658+static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
18659+ .destroy = psb_intel_lvds_enc_destroy,
18660+};
18661+
18662+
18663+
18664+/**
18665+ * psb_intel_lvds_init - setup LVDS connectors on this device
18666+ * @dev: drm device
18667+ *
18668+ * Create the connector, register the LVDS DDC bus, and try to figure out what
18669+ * modes we can display on the LVDS panel (if present).
18670+ */
18671+void psb_intel_lvds_init(struct drm_device *dev,
18672+ struct psb_intel_mode_device *mode_dev)
18673+{
18674+ struct psb_intel_output *psb_intel_output;
18675+ struct psb_intel_lvds_priv * lvds_priv;
18676+ struct drm_connector *connector;
18677+ struct drm_encoder *encoder;
18678+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
18679+ struct drm_crtc *crtc;
18680+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18681+ u32 lvds;
18682+ int pipe;
18683+
18684+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
18685+ if (!psb_intel_output)
18686+ return;
18687+
18688+ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
18689+ if(!lvds_priv) {
18690+ kfree(psb_intel_output);
18691+ DRM_DEBUG("LVDS private allocation error\n");
18692+ return;
18693+ }
18694+
18695+ psb_intel_output->dev_priv = lvds_priv;
18696+
18697+ psb_intel_output->mode_dev = mode_dev;
18698+ connector = &psb_intel_output->base;
18699+ encoder = &psb_intel_output->enc;
18700+ drm_connector_init(dev, &psb_intel_output->base,
18701+ &psb_intel_lvds_connector_funcs,
18702+ DRM_MODE_CONNECTOR_LVDS);
18703+
18704+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
18705+ DRM_MODE_ENCODER_LVDS);
18706+
18707+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
18708+ &psb_intel_output->enc);
18709+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
18710+
18711+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
18712+ drm_connector_helper_add(connector,
18713+ &psb_intel_lvds_connector_helper_funcs);
18714+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
18715+ connector->interlace_allowed = false;
18716+ connector->doublescan_allowed = false;
18717+
18718+ /*Attach connector properties*/
18719+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
18720+ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
18721+
18722+ /**
18723+ * Set up I2C bus
18724+ * FIXME: distroy i2c_bus when exit
18725+ */
18726+ psb_intel_output->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
18727+ if(!psb_intel_output->i2c_bus) {
18728+ dev_printk(KERN_ERR,
18729+ &dev->pdev->dev, "I2C bus registration failed.\n");
18730+ goto failed_blc_i2c;
18731+ }
18732+ psb_intel_output->i2c_bus->slave_addr = 0x2C;
18733+ dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
18734+
18735+ /*
18736+ * LVDS discovery:
18737+ * 1) check for EDID on DDC
18738+ * 2) check for VBT data
18739+ * 3) check to see if LVDS is already on
18740+ * if none of the above, no panel
18741+ * 4) make sure lid is open
18742+ * if closed, act like it's not there for now
18743+ */
18744+
18745+ /* Set up the DDC bus. */
18746+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
18747+ if (!psb_intel_output->ddc_bus) {
18748+ dev_printk(KERN_ERR, &dev->pdev->dev,
18749+ "DDC bus registration " "failed.\n");
18750+ goto failed_ddc;
18751+ }
18752+
18753+ /*
18754+ * Attempt to get the fixed panel mode from DDC. Assume that the
18755+ * preferred mode is the right one.
18756+ */
18757+ psb_intel_ddc_get_modes(psb_intel_output);
18758+ list_for_each_entry(scan, &connector->probed_modes, head) {
18759+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
18760+ mode_dev->panel_fixed_mode =
18761+ drm_mode_duplicate(dev, scan);
18762+ goto out; /* FIXME: check for quirks */
18763+ }
18764+ }
18765+
18766+ /* Failed to get EDID, what about VBT? do we need this?*/
18767+ if (mode_dev->vbt_mode)
18768+ mode_dev->panel_fixed_mode =
18769+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
18770+
18771+ if(!mode_dev->panel_fixed_mode)
18772+ if (dev_priv->lfp_lvds_vbt_mode)
18773+ mode_dev->panel_fixed_mode =
18774+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
18775+
18776+ /*
18777+ * If we didn't get EDID, try checking if the panel is already turned
18778+ * on. If so, assume that whatever is currently programmed is the
18779+ * correct mode.
18780+ */
18781+ lvds = REG_READ(LVDS);
18782+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
18783+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
18784+
18785+ if (crtc && (lvds & LVDS_PORT_EN)) {
18786+ mode_dev->panel_fixed_mode =
18787+ psb_intel_crtc_mode_get(dev, crtc);
18788+ if (mode_dev->panel_fixed_mode) {
18789+ mode_dev->panel_fixed_mode->type |=
18790+ DRM_MODE_TYPE_PREFERRED;
18791+ goto out; /* FIXME: check for quirks */
18792+ }
18793+ }
18794+
18795+ /* If we still don't have a mode after all that, give up. */
18796+ if (!mode_dev->panel_fixed_mode) {
18797+ DRM_DEBUG
18798+ ("Found no modes on the lvds, ignoring the LVDS\n");
18799+ goto failed_find;
18800+ }
18801+
18802+ /* FIXME: detect aopen & mac mini type stuff automatically? */
18803+ /*
18804+ * Blacklist machines with BIOSes that list an LVDS panel without
18805+ * actually having one.
18806+ */
18807+ if (IS_I945GM(dev)) {
18808+ /* aopen mini pc */
18809+ if (dev->pdev->subsystem_vendor == 0xa0a0) {
18810+ DRM_DEBUG
18811+ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
18812+ goto failed_find;
18813+ }
18814+
18815+ if ((dev->pdev->subsystem_vendor == 0x8086) &&
18816+ (dev->pdev->subsystem_device == 0x7270)) {
18817+ /* It's a Mac Mini or Macbook Pro. */
18818+
18819+ if (mode_dev->panel_fixed_mode != NULL &&
18820+ mode_dev->panel_fixed_mode->hdisplay == 800 &&
18821+ mode_dev->panel_fixed_mode->vdisplay == 600) {
18822+ DRM_DEBUG
18823+ ("Suspected Mac Mini, ignoring the LVDS\n");
18824+ goto failed_find;
18825+ }
18826+ }
18827+ }
18828+
18829+out:
18830+ drm_sysfs_connector_add(connector);
18831+
18832+#if PRINT_JLIU7
18833+ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
18834+ mode_dev->panel_fixed_mode->hdisplay);
18835+ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
18836+ mode_dev->panel_fixed_mode->vdisplay);
18837+ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
18838+ mode_dev->panel_fixed_mode->hsync_start);
18839+ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
18840+ mode_dev->panel_fixed_mode->hsync_end);
18841+ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
18842+ mode_dev->panel_fixed_mode->htotal);
18843+ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
18844+ mode_dev->panel_fixed_mode->vsync_start);
18845+ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
18846+ mode_dev->panel_fixed_mode->vsync_end);
18847+ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
18848+ mode_dev->panel_fixed_mode->vtotal);
18849+ DRM_INFO("PRINT_JLIU7 clock = %d\n",
18850+ mode_dev->panel_fixed_mode->clock);
18851+#endif /* PRINT_JLIU7 */
18852+ return;
18853+
18854+failed_find:
18855+ if (psb_intel_output->ddc_bus)
18856+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
18857+failed_ddc:
18858+ if (psb_intel_output->i2c_bus)
18859+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
18860+failed_blc_i2c:
18861+ drm_encoder_cleanup(encoder);
18862+ drm_connector_cleanup(connector);
18863+ kfree(connector);
18864+}
18865+
18866+/* MRST platform start */
18867+
18868+/*
18869+ * FIXME need to move to register define head file
18870+ */
18871+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
18872+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
18873+
18874+/* The max/min PWM frequency in BPCR[31:17] - */
18875+/* The smallest number is 1 (not 0) that can fit in the
18876+ * 15-bit field of the and then*/
18877+/* shifts to the left by one bit to get the actual 16-bit
18878+ * value that the 15-bits correspond to.*/
18879+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
18880+
18881+#define BRIGHTNESS_MAX_LEVEL 100
18882+#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
18883+#define BLC_PWM_FREQ_CALC_CONSTANT 32
18884+#define MHz 1000000
18885+#define BLC_POLARITY_NORMAL 0
18886+#define BLC_POLARITY_INVERSE 1
18887+
18888+/**
18889+ * Calculate PWM control register value.
18890+ */
18891+#if 0
18892+static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
18893+{
18894+ unsigned long value = 0;
18895+ if (blc_freq == 0) {
18896+ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
18897+ * Frequency Requested is 0.\n"); */
18898+ return false;
18899+ }
18900+
18901+ value = (CoreClock * MHz);
18902+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
18903+ value = (value * BLC_PWM_PRECISION_FACTOR);
18904+ value = (value / blc_freq);
18905+ value = (value / BLC_PWM_PRECISION_FACTOR);
18906+
18907+ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
18908+ return 0;
18909+ } else {
18910+ PWMControlRegFreq = (u32) value;
18911+ return 1;
18912+ }
18913+}
18914+#endif
18915+/**
18916+ * Sets the power state for the panel.
18917+ */
18918+static void mrst_lvds_set_power(struct drm_device *dev,
18919+ struct psb_intel_output *output, bool on)
18920+{
18921+ u32 pp_status;
18922+
18923+#if PRINT_JLIU7
18924+ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
18925+#endif /* PRINT_JLIU7 */
18926+
18927+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18928+
18929+ if (on) {
18930+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
18931+ POWER_TARGET_ON);
18932+ do {
18933+ pp_status = REG_READ(PP_STATUS);
18934+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
18935+ } else {
18936+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
18937+ ~POWER_TARGET_ON);
18938+ do {
18939+ pp_status = REG_READ(PP_STATUS);
18940+ } while (pp_status & PP_ON);
18941+ }
18942+
18943+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18944+}
18945+
18946+static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
18947+{
18948+ struct drm_device *dev = encoder->dev;
18949+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18950+
18951+#if PRINT_JLIU7
18952+ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
18953+#endif /* PRINT_JLIU7 */
18954+
18955+ if (mode == DRM_MODE_DPMS_ON)
18956+ mrst_lvds_set_power(dev, output, true);
18957+ else
18958+ mrst_lvds_set_power(dev, output, false);
18959+
18960+ /* XXX: We never power down the LVDS pairs. */
18961+}
18962+
18963+static void mrst_lvds_mode_set(struct drm_encoder *encoder,
18964+ struct drm_display_mode *mode,
18965+ struct drm_display_mode *adjusted_mode)
18966+{
18967+ struct psb_intel_mode_device *mode_dev = enc_to_psb_intel_output(encoder)->mode_dev;
18968+ struct drm_device *dev = encoder->dev;
18969+ u32 lvds_port;
18970+ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
18971+
18972+#if PRINT_JLIU7
18973+ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
18974+#endif /* PRINT_JLIU7 */
18975+
18976+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18977+
18978+ /*
18979+ * The LVDS pin pair will already have been turned on in the
18980+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
18981+ * settings.
18982+ */
18983+ /*FIXME JLIU7 Get panel power delay parameters from config data */
18984+ REG_WRITE(0x61208, 0x25807d0);
18985+ REG_WRITE(0x6120c, 0x1f407d0);
18986+ REG_WRITE(0x61210, 0x270f04);
18987+
18988+ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN | LVDS_BORDER_EN;
18989+
18990+ if (mode_dev->panel_wants_dither)
18991+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
18992+
18993+ REG_WRITE(LVDS, lvds_port);
18994+
18995+ drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base,
18996+ dev->mode_config.scaling_mode_property, &curValue);
18997+
18998+ if (curValue == DRM_MODE_SCALE_NO_SCALE)
18999+ REG_WRITE(PFIT_CONTROL, 0);
19000+ else if (curValue == DRM_MODE_SCALE_ASPECT) {
19001+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
19002+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay))
19003+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
19004+ else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay))
19005+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX);
19006+ else
19007+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX);
19008+ } else
19009+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
19010+ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
19011+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
19012+
19013+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
19014+}
19015+
19016+
19017+static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
19018+ .dpms = mrst_lvds_dpms,
19019+ .mode_fixup = psb_intel_lvds_mode_fixup,
19020+ .prepare = psb_intel_lvds_prepare,
19021+ .mode_set = mrst_lvds_mode_set,
19022+ .commit = psb_intel_lvds_commit,
19023+};
19024+
19025+/** Returns the panel fixed mode from configuration. */
19026+/** FIXME JLIU7 need to revist it. */
19027+struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
19028+ *dev)
19029+{
19030+ struct drm_display_mode *mode;
19031+ struct drm_psb_private *dev_priv =
19032+ (struct drm_psb_private *) dev->dev_private;
19033+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
19034+
19035+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
19036+ if (!mode)
19037+ return NULL;
19038+
19039+ if (dev_priv->vbt_data.Size != 0x00) { /*if non-zero, then use vbt*/
19040+
19041+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
19042+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
19043+ mode->hsync_start = mode->hdisplay + \
19044+ ((ti->hsync_offset_hi << 8) | \
19045+ ti->hsync_offset_lo);
19046+ mode->hsync_end = mode->hsync_start + \
19047+ ((ti->hsync_pulse_width_hi << 8) | \
19048+ ti->hsync_pulse_width_lo);
19049+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
19050+ ti->hblank_lo);
19051+ mode->vsync_start = \
19052+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
19053+ ti->vsync_offset_lo);
19054+ mode->vsync_end = \
19055+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
19056+ ti->vsync_pulse_width_lo);
19057+ mode->vtotal = mode->vdisplay + \
19058+ ((ti->vblank_hi << 8) | ti->vblank_lo);
19059+ mode->clock = ti->pixel_clock * 10;
19060+#if 0
19061+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
19062+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
19063+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
19064+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
19065+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
19066+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
19067+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
19068+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
19069+ printk(KERN_INFO "clock is %d\n", mode->clock);
19070+#endif
19071+ }
19072+ else {
19073+
19074+#if 0 /*FIXME jliu7 remove it later */
19075+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
19076+ mode->hdisplay = 800;
19077+ mode->vdisplay = 480;
19078+ mode->hsync_start = 836;
19079+ mode->hsync_end = 846;
19080+ mode->htotal = 1056;
19081+ mode->vsync_start = 489;
19082+ mode->vsync_end = 491;
19083+ mode->vtotal = 525;
19084+ mode->clock = 33264;
19085+#endif /*FIXME jliu7 remove it later */
19086+
19087+#if 0 /*FIXME jliu7 remove it later */
19088+ /* hard coded fixed mode for LVDS 800x480 */
19089+ mode->hdisplay = 800;
19090+ mode->vdisplay = 480;
19091+ mode->hsync_start = 801;
19092+ mode->hsync_end = 802;
19093+ mode->htotal = 1024;
19094+ mode->vsync_start = 481;
19095+ mode->vsync_end = 482;
19096+ mode->vtotal = 525;
19097+ mode->clock = 30994;
19098+#endif /*FIXME jliu7 remove it later */
19099+
19100+#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
19101+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
19102+ mode->hdisplay = 1024;
19103+ mode->vdisplay = 600;
19104+ mode->hsync_start = 1072;
19105+ mode->hsync_end = 1104;
19106+ mode->htotal = 1184;
19107+ mode->vsync_start = 603;
19108+ mode->vsync_end = 604;
19109+ mode->vtotal = 608;
19110+ mode->clock = 53990;
19111+#endif /*FIXME jliu7 remove it later */
19112+
19113+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
19114+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
19115+ mode->hdisplay = 1024;
19116+ mode->vdisplay = 600;
19117+ mode->hsync_start = 1104;
19118+ mode->hsync_end = 1136;
19119+ mode->htotal = 1184;
19120+ mode->vsync_start = 603;
19121+ mode->vsync_end = 604;
19122+ mode->vtotal = 608;
19123+ mode->clock = 53990;
19124+#endif /*FIXME jliu7 remove it later */
19125+
19126+#if 0 /*FIXME jliu7 remove it later */
19127+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
19128+ mode->hdisplay = 1024;
19129+ mode->vdisplay = 600;
19130+ mode->hsync_start = 1124;
19131+ mode->hsync_end = 1204;
19132+ mode->htotal = 1312;
19133+ mode->vsync_start = 607;
19134+ mode->vsync_end = 610;
19135+ mode->vtotal = 621;
19136+ mode->clock = 48885;
19137+#endif /*FIXME jliu7 remove it later */
19138+
19139+#if 0 /*FIXME jliu7 remove it later */
19140+ /* hard coded fixed mode for LVDS 1024x768 */
19141+ mode->hdisplay = 1024;
19142+ mode->vdisplay = 768;
19143+ mode->hsync_start = 1048;
19144+ mode->hsync_end = 1184;
19145+ mode->htotal = 1344;
19146+ mode->vsync_start = 771;
19147+ mode->vsync_end = 777;
19148+ mode->vtotal = 806;
19149+ mode->clock = 65000;
19150+#endif /*FIXME jliu7 remove it later */
19151+
19152+#if 0 /*FIXME jliu7 remove it later */
19153+ /* hard coded fixed mode for LVDS 1366x768 */
19154+ mode->hdisplay = 1366;
19155+ mode->vdisplay = 768;
19156+ mode->hsync_start = 1430;
19157+ mode->hsync_end = 1558;
19158+ mode->htotal = 1664;
19159+ mode->vsync_start = 769;
19160+ mode->vsync_end = 770;
19161+ mode->vtotal = 776;
19162+ mode->clock = 77500;
19163+#endif /*FIXME jliu7 remove it later */
19164+ }
19165+ drm_mode_set_name(mode);
19166+ drm_mode_set_crtcinfo(mode, 0);
19167+
19168+ return mode;
19169+}
19170+
19171+/**
19172+ * mrst_lvds_init - setup LVDS connectors on this device
19173+ * @dev: drm device
19174+ *
19175+ * Create the connector, register the LVDS DDC bus, and try to figure out what
19176+ * modes we can display on the LVDS panel (if present).
19177+ */
19178+void mrst_lvds_init(struct drm_device *dev,
19179+ struct psb_intel_mode_device *mode_dev)
19180+{
19181+ struct psb_intel_output *psb_intel_output;
19182+ struct drm_connector *connector;
19183+ struct drm_encoder *encoder;
19184+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private;
19185+ struct edid *edid;
19186+ int ret = 0;
19187+ struct i2c_adapter *i2c_adap;
19188+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
19189+
19190+#if PRINT_JLIU7
19191+ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
19192+#endif /* PRINT_JLIU7 */
19193+
19194+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
19195+ if (!psb_intel_output)
19196+ return;
19197+
19198+ psb_intel_output->mode_dev = mode_dev;
19199+ connector = &psb_intel_output->base;
19200+ encoder = &psb_intel_output->enc;
19201+ drm_connector_init(dev, &psb_intel_output->base,
19202+ &psb_intel_lvds_connector_funcs,
19203+ DRM_MODE_CONNECTOR_LVDS);
19204+
19205+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
19206+ DRM_MODE_ENCODER_LVDS);
19207+
19208+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
19209+ &psb_intel_output->enc);
19210+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
19211+
19212+ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
19213+ drm_connector_helper_add(connector,
19214+ &psb_intel_lvds_connector_helper_funcs);
19215+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
19216+ connector->interlace_allowed = false;
19217+ connector->doublescan_allowed = false;
19218+
19219+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
19220+ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
19221+
19222+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
19223+
19224+ /*
19225+ * LVDS discovery:
19226+ * 1) check for EDID on DDC
19227+ * 2) check for VBT data
19228+ * 3) check to see if LVDS is already on
19229+ * if none of the above, no panel
19230+ * 4) make sure lid is open
19231+ * if closed, act like it's not there for now
19232+ */
19233+ i2c_adap = i2c_get_adapter(2);
19234+ if (i2c_adap == NULL)
19235+ printk(KERN_ALERT "No ddc adapter available!\n");
19236+ /* Set up the DDC bus. */
19237+/* psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
19238+ if (!psb_intel_output->ddc_bus) {
19239+ dev_printk(KERN_ERR, &dev->pdev->dev,
19240+ "DDC bus registration " "failed.\n");
19241+ goto failed_ddc;
19242+ }*/
19243+
19244+ /*
19245+ * Attempt to get the fixed panel mode from DDC. Assume that the
19246+ * preferred mode is the right one.
19247+ */
19248+ edid = drm_get_edid(connector, i2c_adap);
19249+ if (edid) {
19250+ drm_mode_connector_update_edid_property(connector, edid);
19251+ ret = drm_add_edid_modes(connector, edid);
19252+ kfree(edid);
19253+ }
19254+
19255+ list_for_each_entry(scan, &connector->probed_modes, head) {
19256+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
19257+ mode_dev->panel_fixed_mode =
19258+ drm_mode_duplicate(dev, scan);
19259+ goto out; /* FIXME: check for quirks */
19260+ }
19261+ }
19262+
19263+ /*
19264+ * If we didn't get EDID, try geting panel timing
19265+ * from configuration data
19266+ */
19267+ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
19268+
19269+ if (mode_dev->panel_fixed_mode) {
19270+ mode_dev->panel_fixed_mode->type |=
19271+ DRM_MODE_TYPE_PREFERRED;
19272+ goto out; /* FIXME: check for quirks */
19273+ }
19274+
19275+ /* If we still don't have a mode after all that, give up. */
19276+ if (!mode_dev->panel_fixed_mode) {
19277+ DRM_DEBUG
19278+ ("Found no modes on the lvds, ignoring the LVDS\n");
19279+ goto failed_find;
19280+ }
19281+
19282+out:
19283+ drm_sysfs_connector_add(connector);
19284+ return;
19285+
19286+failed_find:
19287+ DRM_DEBUG("No LVDS modes found, disabling.\n");
19288+ if (psb_intel_output->ddc_bus)
19289+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
19290+
19291+failed_ddc:
19292+
19293+ drm_encoder_cleanup(encoder);
19294+ drm_connector_cleanup(connector);
19295+ kfree(connector);
19296+}
19297+
19298+/* MRST platform end */
19299diff --git a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c
19300new file mode 100644
19301index 0000000..54abe86
19302--- /dev/null
19303+++ b/drivers/gpu/drm/psb/psb_intel_modes.c
19304@@ -0,0 +1,64 @@
19305+/*
19306+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
19307+ * Copyright (c) 2007 Intel Corporation
19308+ * Jesse Barnes <jesse.barnes@intel.com>
19309+ */
19310+
19311+#include <linux/i2c.h>
19312+#include <linux/fb.h>
19313+#include <drm/drmP.h>
19314+#include "psb_intel_drv.h"
19315+
19316+/**
19317+ * psb_intel_ddc_probe
19318+ *
19319+ */
19320+bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
19321+{
19322+ u8 out_buf[] = { 0x0, 0x0 };
19323+ u8 buf[2];
19324+ int ret;
19325+ struct i2c_msg msgs[] = {
19326+ {
19327+ .addr = 0x50,
19328+ .flags = 0,
19329+ .len = 1,
19330+ .buf = out_buf,
19331+ },
19332+ {
19333+ .addr = 0x50,
19334+ .flags = I2C_M_RD,
19335+ .len = 1,
19336+ .buf = buf,
19337+ }
19338+ };
19339+
19340+ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
19341+ if (ret == 2)
19342+ return true;
19343+
19344+ return false;
19345+}
19346+
19347+/**
19348+ * psb_intel_ddc_get_modes - get modelist from monitor
19349+ * @connector: DRM connector device to use
19350+ *
19351+ * Fetch the EDID information from @connector using the DDC bus.
19352+ */
19353+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
19354+{
19355+ struct edid *edid;
19356+ int ret = 0;
19357+
19358+ edid =
19359+ drm_get_edid(&psb_intel_output->base,
19360+ &psb_intel_output->ddc_bus->adapter);
19361+ if (edid) {
19362+ drm_mode_connector_update_edid_property(&psb_intel_output->
19363+ base, edid);
19364+ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
19365+ kfree(edid);
19366+ }
19367+ return ret;
19368+}
19369diff --git a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h
19370new file mode 100644
19371index 0000000..7e22463
19372--- /dev/null
19373+++ b/drivers/gpu/drm/psb/psb_intel_reg.h
19374@@ -0,0 +1,1015 @@
19375+#define BLC_PWM_CTL 0x61254
19376+#define BLC_PWM_CTL2 0x61250
19377+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
19378+/**
19379+ * This is the most significant 15 bits of the number of backlight cycles in a
19380+ * complete cycle of the modulated backlight control.
19381+ *
19382+ * The actual value is this field multiplied by two.
19383+ */
19384+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
19385+#define BLM_LEGACY_MODE (1 << 16)
19386+/**
19387+ * This is the number of cycles out of the backlight modulation cycle for which
19388+ * the backlight is on.
19389+ *
19390+ * This field must be no greater than the number of cycles in the complete
19391+ * backlight modulation cycle.
19392+ */
19393+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
19394+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
19395+
19396+#define I915_GCFGC 0xf0
19397+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
19398+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
19399+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
19400+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
19401+
19402+#define I855_HPLLCC 0xc0
19403+#define I855_CLOCK_CONTROL_MASK (3 << 0)
19404+#define I855_CLOCK_133_200 (0 << 0)
19405+#define I855_CLOCK_100_200 (1 << 0)
19406+#define I855_CLOCK_100_133 (2 << 0)
19407+#define I855_CLOCK_166_250 (3 << 0)
19408+
19409+/* I830 CRTC registers */
19410+#define HTOTAL_A 0x60000
19411+#define HBLANK_A 0x60004
19412+#define HSYNC_A 0x60008
19413+#define VTOTAL_A 0x6000c
19414+#define VBLANK_A 0x60010
19415+#define VSYNC_A 0x60014
19416+#define PIPEASRC 0x6001c
19417+#define BCLRPAT_A 0x60020
19418+#define VSYNCSHIFT_A 0x60028
19419+
19420+#define HTOTAL_B 0x61000
19421+#define HBLANK_B 0x61004
19422+#define HSYNC_B 0x61008
19423+#define VTOTAL_B 0x6100c
19424+#define VBLANK_B 0x61010
19425+#define VSYNC_B 0x61014
19426+#define PIPEBSRC 0x6101c
19427+#define BCLRPAT_B 0x61020
19428+#define VSYNCSHIFT_B 0x61028
19429+
19430+#define PP_STATUS 0x61200
19431+# define PP_ON (1 << 31)
19432+/**
19433+ * Indicates that all dependencies of the panel are on:
19434+ *
19435+ * - PLL enabled
19436+ * - pipe enabled
19437+ * - LVDS/DVOB/DVOC on
19438+ */
19439+# define PP_READY (1 << 30)
19440+# define PP_SEQUENCE_NONE (0 << 28)
19441+# define PP_SEQUENCE_ON (1 << 28)
19442+# define PP_SEQUENCE_OFF (2 << 28)
19443+# define PP_SEQUENCE_MASK 0x30000000
19444+#define PP_CONTROL 0x61204
19445+# define POWER_TARGET_ON (1 << 0)
19446+
19447+#define LVDSPP_ON 0x61208
19448+#define LVDSPP_OFF 0x6120c
19449+#define PP_CYCLE 0x61210
19450+
19451+#define PFIT_CONTROL 0x61230
19452+# define PFIT_ENABLE (1 << 31)
19453+# define PFIT_PIPE_MASK (3 << 29)
19454+# define PFIT_PIPE_SHIFT 29
19455+# define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
19456+# define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
19457+# define VERT_INTERP_DISABLE (0 << 10)
19458+# define VERT_INTERP_BILINEAR (1 << 10)
19459+# define VERT_INTERP_MASK (3 << 10)
19460+# define VERT_AUTO_SCALE (1 << 9)
19461+# define HORIZ_INTERP_DISABLE (0 << 6)
19462+# define HORIZ_INTERP_BILINEAR (1 << 6)
19463+# define HORIZ_INTERP_MASK (3 << 6)
19464+# define HORIZ_AUTO_SCALE (1 << 5)
19465+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
19466+
19467+#define PFIT_PGM_RATIOS 0x61234
19468+# define PFIT_VERT_SCALE_MASK 0xfff00000
19469+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
19470+
19471+#define PFIT_AUTO_RATIOS 0x61238
19472+
19473+
19474+#define DPLL_A 0x06014
19475+#define DPLL_B 0x06018
19476+# define DPLL_VCO_ENABLE (1 << 31)
19477+# define DPLL_DVO_HIGH_SPEED (1 << 30)
19478+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
19479+# define DPLL_VGA_MODE_DIS (1 << 28)
19480+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
19481+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
19482+# define DPLL_MODE_MASK (3 << 26)
19483+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
19484+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
19485+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
19486+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
19487+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
19488+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
19489+/**
19490+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
19491+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
19492+ */
19493+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
19494+/**
19495+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
19496+ * this field (only one bit may be set).
19497+ */
19498+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
19499+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
19500+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
19501+ * in DVO non-gang */
19502+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
19503+# define PLL_REF_INPUT_DREFCLK (0 << 13)
19504+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
19505+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
19506+ * TVCLKIN */
19507+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
19508+# define PLL_REF_INPUT_MASK (3 << 13)
19509+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
19510+/*
19511+ * Parallel to Serial Load Pulse phase selection.
19512+ * Selects the phase for the 10X DPLL clock for the PCIe
19513+ * digital display port. The range is 4 to 13; 10 or more
19514+ * is just a flip delay. The default is 6
19515+ */
19516+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
19517+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
19518+
19519+/**
19520+ * SDVO multiplier for 945G/GM. Not used on 965.
19521+ *
19522+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
19523+ */
19524+# define SDVO_MULTIPLIER_MASK 0x000000ff
19525+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
19526+# define SDVO_MULTIPLIER_SHIFT_VGA 0
19527+
19528+/** @defgroup DPLL_MD
19529+ * @{
19530+ */
19531+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
19532+#define DPLL_A_MD 0x0601c
19533+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
19534+#define DPLL_B_MD 0x06020
19535+/**
19536+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
19537+ *
19538+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
19539+ */
19540+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
19541+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
19542+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
19543+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
19544+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
19545+/**
19546+ * SDVO/UDI pixel multiplier.
19547+ *
19548+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
19549+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
19550+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
19551+ * dummy bytes in the datastream at an increased clock rate, with both sides of
19552+ * the link knowing how many bytes are fill.
19553+ *
19554+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
19555+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
19556+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
19557+ * through an SDVO command.
19558+ *
19559+ * This register field has values of multiplication factor minus 1, with
19560+ * a maximum multiplier of 5 for SDVO.
19561+ */
19562+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
19563+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
19564+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
19565+ * This best be set to the default value (3) or the CRT won't work. No,
19566+ * I don't entirely understand what this does...
19567+ */
19568+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
19569+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
19570+/** @} */
19571+
19572+#define DPLL_TEST 0x606c
19573+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
19574+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
19575+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
19576+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
19577+# define DPLLB_TEST_N_BYPASS (1 << 19)
19578+# define DPLLB_TEST_M_BYPASS (1 << 18)
19579+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
19580+# define DPLLA_TEST_N_BYPASS (1 << 3)
19581+# define DPLLA_TEST_M_BYPASS (1 << 2)
19582+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
19583+
19584+#define ADPA 0x61100
19585+#define ADPA_DAC_ENABLE (1<<31)
19586+#define ADPA_DAC_DISABLE 0
19587+#define ADPA_PIPE_SELECT_MASK (1<<30)
19588+#define ADPA_PIPE_A_SELECT 0
19589+#define ADPA_PIPE_B_SELECT (1<<30)
19590+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
19591+#define ADPA_SETS_HVPOLARITY 0
19592+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
19593+#define ADPA_VSYNC_CNTL_ENABLE 0
19594+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
19595+#define ADPA_HSYNC_CNTL_ENABLE 0
19596+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
19597+#define ADPA_VSYNC_ACTIVE_LOW 0
19598+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
19599+#define ADPA_HSYNC_ACTIVE_LOW 0
19600+
19601+#define FPA0 0x06040
19602+#define FPA1 0x06044
19603+#define FPB0 0x06048
19604+#define FPB1 0x0604c
19605+# define FP_N_DIV_MASK 0x003f0000
19606+# define FP_N_DIV_SHIFT 16
19607+# define FP_M1_DIV_MASK 0x00003f00
19608+# define FP_M1_DIV_SHIFT 8
19609+# define FP_M2_DIV_MASK 0x0000003f
19610+# define FP_M2_DIV_SHIFT 0
19611+
19612+
19613+#define PORT_HOTPLUG_EN 0x61110
19614+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
19615+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
19616+# define TV_HOTPLUG_INT_EN (1 << 18)
19617+# define CRT_HOTPLUG_INT_EN (1 << 9)
19618+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
19619+
19620+#define PORT_HOTPLUG_STAT 0x61114
19621+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
19622+# define TV_HOTPLUG_INT_STATUS (1 << 10)
19623+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
19624+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
19625+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
19626+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
19627+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
19628+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
19629+
19630+#define SDVOB 0x61140
19631+#define SDVOC 0x61160
19632+#define SDVO_ENABLE (1 << 31)
19633+#define SDVO_PIPE_B_SELECT (1 << 30)
19634+#define SDVO_STALL_SELECT (1 << 29)
19635+#define SDVO_INTERRUPT_ENABLE (1 << 26)
19636+/**
19637+ * 915G/GM SDVO pixel multiplier.
19638+ *
19639+ * Programmed value is multiplier - 1, up to 5x.
19640+ *
19641+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
19642+ */
19643+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
19644+#define SDVO_PORT_MULTIPLY_SHIFT 23
19645+#define SDVO_PHASE_SELECT_MASK (15 << 19)
19646+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
19647+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
19648+#define SDVOC_GANG_MODE (1 << 16)
19649+#define SDVO_BORDER_ENABLE (1 << 7)
19650+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
19651+#define SDVO_DETECTED (1 << 2)
19652+/* Bits to be preserved when writing */
19653+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
19654+#define SDVOC_PRESERVE_MASK (1 << 17)
19655+
19656+/** @defgroup LVDS
19657+ * @{
19658+ */
19659+/**
19660+ * This register controls the LVDS output enable, pipe selection, and data
19661+ * format selection.
19662+ *
19663+ * All of the clock/data pairs are force powered down by power sequencing.
19664+ */
19665+#define LVDS 0x61180
19666+/**
19667+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
19668+ * the DPLL semantics change when the LVDS is assigned to that pipe.
19669+ */
19670+# define LVDS_PORT_EN (1 << 31)
19671+/** Selects pipe B for LVDS data. Must be set on pre-965. */
19672+# define LVDS_PIPEB_SELECT (1 << 30)
19673+
19674+/** Turns on border drawing to allow centered display. */
19675+# define LVDS_BORDER_EN (1 << 15)
19676+
19677+/**
19678+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
19679+ * pixel.
19680+ */
19681+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
19682+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
19683+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
19684+/**
19685+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
19686+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
19687+ * on.
19688+ */
19689+# define LVDS_A3_POWER_MASK (3 << 6)
19690+# define LVDS_A3_POWER_DOWN (0 << 6)
19691+# define LVDS_A3_POWER_UP (3 << 6)
19692+/**
19693+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
19694+ * is set.
19695+ */
19696+# define LVDS_CLKB_POWER_MASK (3 << 4)
19697+# define LVDS_CLKB_POWER_DOWN (0 << 4)
19698+# define LVDS_CLKB_POWER_UP (3 << 4)
19699+
19700+/**
19701+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
19702+ * setting for whether we are in dual-channel mode. The B3 pair will
19703+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
19704+ */
19705+# define LVDS_B0B3_POWER_MASK (3 << 2)
19706+# define LVDS_B0B3_POWER_DOWN (0 << 2)
19707+# define LVDS_B0B3_POWER_UP (3 << 2)
19708+
19709+#define PIPEACONF 0x70008
19710+#define PIPEACONF_ENABLE (1<<31)
19711+#define PIPEACONF_DISABLE 0
19712+#define PIPEACONF_DOUBLE_WIDE (1<<30)
19713+#define I965_PIPECONF_ACTIVE (1<<30)
19714+#define PIPEACONF_SINGLE_WIDE 0
19715+#define PIPEACONF_PIPE_UNLOCKED 0
19716+#define PIPEACONF_PIPE_LOCKED (1<<25)
19717+#define PIPEACONF_PALETTE 0
19718+#define PIPEACONF_GAMMA (1<<24)
19719+#define PIPECONF_FORCE_BORDER (1<<25)
19720+#define PIPECONF_PROGRESSIVE (0 << 21)
19721+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
19722+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
19723+
19724+#define PIPEBCONF 0x71008
19725+#define PIPEBCONF_ENABLE (1<<31)
19726+#define PIPEBCONF_DISABLE 0
19727+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
19728+#define PIPEBCONF_DISABLE 0
19729+#define PIPEBCONF_GAMMA (1<<24)
19730+#define PIPEBCONF_PALETTE 0
19731+
19732+#define PIPEBGCMAXRED 0x71010
19733+#define PIPEBGCMAXGREEN 0x71014
19734+#define PIPEBGCMAXBLUE 0x71018
19735+
19736+#define PIPEASTAT 0x70024
19737+#define PIPEBSTAT 0x71024
19738+#define PIPE_VBLANK_CLEAR (1 << 1)
19739+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18)
19740+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
19741+
19742+#define PIPEAFRAMEHIGH 0x70040
19743+#define PIPEAFRAMEPIXEL 0x70044
19744+#define PIPEBFRAMEHIGH 0x71040
19745+#define PIPEBFRAMEPIXEL 0x71044
19746+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
19747+#define PIPE_FRAME_HIGH_SHIFT 0
19748+#define PIPE_FRAME_LOW_MASK 0xff000000
19749+#define PIPE_FRAME_LOW_SHIFT 24
19750+#define PIPE_PIXEL_MASK 0x00ffffff
19751+#define PIPE_PIXEL_SHIFT 0
19752+
19753+#define DSPARB 0x70030
19754+#define DSPFW1 0x70034
19755+#define DSPFW2 0x70038
19756+#define DSPFW3 0x7003c
19757+#define DSPFW4 0x70050
19758+#define DSPFW5 0x70054
19759+#define DSPFW6 0x70058
19760+#define DSPCHICKENBIT 0x70400
19761+#define DSPACNTR 0x70180
19762+#define DSPBCNTR 0x71180
19763+#define DISPLAY_PLANE_ENABLE (1<<31)
19764+#define DISPLAY_PLANE_DISABLE 0
19765+#define DISPPLANE_GAMMA_ENABLE (1<<30)
19766+#define DISPPLANE_GAMMA_DISABLE 0
19767+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
19768+#define DISPPLANE_8BPP (0x2<<26)
19769+#define DISPPLANE_15_16BPP (0x4<<26)
19770+#define DISPPLANE_16BPP (0x5<<26)
19771+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
19772+#define DISPPLANE_32BPP (0x7<<26)
19773+#define DISPPLANE_STEREO_ENABLE (1<<25)
19774+#define DISPPLANE_STEREO_DISABLE 0
19775+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
19776+#define DISPPLANE_SEL_PIPE_A 0
19777+#define DISPPLANE_SEL_PIPE_B (1<<24)
19778+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
19779+#define DISPPLANE_SRC_KEY_DISABLE 0
19780+#define DISPPLANE_LINE_DOUBLE (1<<20)
19781+#define DISPPLANE_NO_LINE_DOUBLE 0
19782+#define DISPPLANE_STEREO_POLARITY_FIRST 0
19783+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
19784+/* plane B only */
19785+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
19786+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
19787+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
19788+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
19789+
19790+#define DSPABASE 0x70184
19791+#define DSPALINOFF 0x70184
19792+#define DSPASTRIDE 0x70188
19793+
19794+#define DSPBBASE 0x71184
19795+#define DSPBLINOFF 0X71184
19796+#define DSPBADDR DSPBBASE
19797+#define DSPBSTRIDE 0x71188
19798+
19799+#define DSPAKEYVAL 0x70194
19800+#define DSPAKEYMASK 0x70198
19801+
19802+#define DSPAPOS 0x7018C /* reserved */
19803+#define DSPASIZE 0x70190
19804+#define DSPBPOS 0x7118C
19805+#define DSPBSIZE 0x71190
19806+
19807+#define DSPASURF 0x7019C
19808+#define DSPATILEOFF 0x701A4
19809+
19810+#define DSPBSURF 0x7119C
19811+#define DSPBTILEOFF 0x711A4
19812+
19813+#define VGACNTRL 0x71400
19814+# define VGA_DISP_DISABLE (1 << 31)
19815+# define VGA_2X_MODE (1 << 30)
19816+# define VGA_PIPE_B_SELECT (1 << 29)
19817+
19818+/*
19819+ * Overlay registers
19820+ */
19821+#define OV_OVADD 0x30000
19822+#define OV_OGAMC5 0x30010
19823+#define OV_OGAMC4 0x30014
19824+#define OV_OGAMC3 0x30018
19825+#define OV_OGAMC2 0x3001C
19826+#define OV_OGAMC1 0x30020
19827+#define OV_OGAMC0 0x30024
19828+
19829+/*
19830+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
19831+ * of video memory available to the BIOS in SWF1.
19832+ */
19833+
19834+#define SWF0 0x71410
19835+#define SWF1 0x71414
19836+#define SWF2 0x71418
19837+#define SWF3 0x7141c
19838+#define SWF4 0x71420
19839+#define SWF5 0x71424
19840+#define SWF6 0x71428
19841+
19842+/*
19843+ * 855 scratch registers.
19844+ */
19845+#define SWF00 0x70410
19846+#define SWF01 0x70414
19847+#define SWF02 0x70418
19848+#define SWF03 0x7041c
19849+#define SWF04 0x70420
19850+#define SWF05 0x70424
19851+#define SWF06 0x70428
19852+
19853+#define SWF10 SWF0
19854+#define SWF11 SWF1
19855+#define SWF12 SWF2
19856+#define SWF13 SWF3
19857+#define SWF14 SWF4
19858+#define SWF15 SWF5
19859+#define SWF16 SWF6
19860+
19861+#define SWF30 0x72414
19862+#define SWF31 0x72418
19863+#define SWF32 0x7241c
19864+
19865+
19866+/*
19867+ * Palette registers
19868+ */
19869+#define PALETTE_A 0x0a000
19870+#define PALETTE_B 0x0a800
19871+
19872+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
19873+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
19874+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
19875+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
19876+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
19877+
19878+
19879+/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
19880+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
19881+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
19882+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
19883+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
19884+
19885+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
19886+ (dev)->pci_device == 0x2982 || \
19887+ (dev)->pci_device == 0x2992 || \
19888+ (dev)->pci_device == 0x29A2 || \
19889+ (dev)->pci_device == 0x2A02 || \
19890+ (dev)->pci_device == 0x2A12)
19891+
19892+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
19893+
19894+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
19895+ (dev)->pci_device == 0x29B2 || \
19896+ (dev)->pci_device == 0x29D2)
19897+
19898+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
19899+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
19900+ IS_MRST(dev))
19901+
19902+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
19903+ IS_I945GM(dev) || IS_I965GM(dev) || \
19904+ IS_POULSBO(dev) || IS_MRST(dev))
19905+
19906+/* Cursor A & B regs */
19907+#define CURACNTR 0x70080
19908+#define CURSOR_MODE_DISABLE 0x00
19909+#define CURSOR_MODE_64_32B_AX 0x07
19910+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
19911+#define MCURSOR_GAMMA_ENABLE (1 << 26)
19912+#define CURABASE 0x70084
19913+#define CURAPOS 0x70088
19914+#define CURSOR_POS_MASK 0x007FF
19915+#define CURSOR_POS_SIGN 0x8000
19916+#define CURSOR_X_SHIFT 0
19917+#define CURSOR_Y_SHIFT 16
19918+#define CURBCNTR 0x700c0
19919+#define CURBBASE 0x700c4
19920+#define CURBPOS 0x700c8
19921+
19922+/*
19923+ * Interrupt Registers
19924+ */
19925+#define IER 0x020a0
19926+#define IIR 0x020a4
19927+#define IMR 0x020a8
19928+#define ISR 0x020ac
19929+
19930+/*
19931+ * MOORESTOWN delta registers
19932+ */
19933+#define MRST_DPLL_A 0x0f014
19934+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
19935+#define MRST_FPA0 0x0f040
19936+#define MRST_FPA1 0x0f044
19937+#define MRST_PERF_MODE 0x020f4
19938+
19939+/* #define LVDS 0x61180 */
19940+# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
19941+# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
19942+# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
19943+
19944+#define MIPI 0x61190
19945+# define MIPI_PORT_EN (1 << 31)
19946+/** Turns on border drawing to allow centered display. */
19947+# define MIPI_BORDER_EN (1 << 15)
19948+
19949+/* #define PP_CONTROL 0x61204 */
19950+# define POWER_DOWN_ON_RESET (1 << 1)
19951+
19952+/* #define PFIT_CONTROL 0x61230 */
19953+# define PFIT_PIPE_SELECT (3 << 29)
19954+# define PFIT_PIPE_SELECT_SHIFT (29)
19955+
19956+/* #define BLC_PWM_CTL 0x61254 */
19957+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
19958+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
19959+
19960+/* #define PIPEACONF 0x70008 */
19961+#define PIPEACONF_PIPE_STATE (1<<30)
19962+/* #define DSPACNTR 0x70180 */
19963+#if 0 /*FIXME JLIU7 need to define the following */
19964+1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
19965+pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
19966+(16 : 16 : 16 : 16) 16 bit floating point pixel format.
19967+Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
19968+ Ignore
19969+ alpha.
19970+#endif /*FIXME JLIU7 need to define the following */
19971+
19972+#define MRST_DSPABASE 0x7019c
19973+
19974+/*
19975+ * MOORESTOWN reserved registers
19976+ */
19977+#if 0
19978+#define DSPAPOS 0x7018C /* reserved */
19979+#define DSPASIZE 0x70190
19980+#endif
19981+/*
19982+ * Moorestown registers.
19983+ */
19984+/*===========================================================================
19985+; General Constants
19986+;--------------------------------------------------------------------------*/
19987+#define BIT0 0x00000001
19988+#define BIT1 0x00000002
19989+#define BIT2 0x00000004
19990+#define BIT3 0x00000008
19991+#define BIT4 0x00000010
19992+#define BIT5 0x00000020
19993+#define BIT6 0x00000040
19994+#define BIT7 0x00000080
19995+#define BIT8 0x00000100
19996+#define BIT9 0x00000200
19997+#define BIT10 0x00000400
19998+#define BIT11 0x00000800
19999+#define BIT12 0x00001000
20000+#define BIT13 0x00002000
20001+#define BIT14 0x00004000
20002+#define BIT15 0x00008000
20003+#define BIT16 0x00010000
20004+#define BIT17 0x00020000
20005+#define BIT18 0x00040000
20006+#define BIT19 0x00080000
20007+#define BIT20 0x00100000
20008+#define BIT21 0x00200000
20009+#define BIT22 0x00400000
20010+#define BIT23 0x00800000
20011+#define BIT24 0x01000000
20012+#define BIT25 0x02000000
20013+#define BIT26 0x04000000
20014+#define BIT27 0x08000000
20015+#define BIT28 0x10000000
20016+#define BIT29 0x20000000
20017+#define BIT30 0x40000000
20018+#define BIT31 0x80000000
20019+/*===========================================================================
20020+; MIPI IP registers
20021+;--------------------------------------------------------------------------*/
20022+#define DEVICE_READY_REG 0xb000
20023+#define INTR_STAT_REG 0xb004
20024+#define RX_SOT_ERROR BIT0
20025+#define RX_SOT_SYNC_ERROR BIT1
20026+#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
20027+#define RX_LP_TX_SYNC_ERROR BIT4
20028+#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
20029+#define RX_FALSE_CONTROL_ERROR BIT6
20030+#define RX_ECC_SINGLE_BIT_ERROR BIT7
20031+#define RX_ECC_MULTI_BIT_ERROR BIT8
20032+#define RX_CHECKSUM_ERROR BIT9
20033+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
20034+#define RX_DSI_VC_ID_INVALID BIT11
20035+#define TX_FALSE_CONTROL_ERROR BIT12
20036+#define TX_ECC_SINGLE_BIT_ERROR BIT13
20037+#define TX_ECC_MULTI_BIT_ERROR BIT14
20038+#define TX_CHECKSUM_ERROR BIT15
20039+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
20040+#define TX_DSI_VC_ID_INVALID BIT17
20041+#define HIGH_CONTENTION BIT18
20042+#define LOW_CONTENTION BIT19
20043+#define DPI_FIFO_UNDER_RUN BIT20
20044+#define HS_TX_TIMEOUT BIT21
20045+#define LP_RX_TIMEOUT BIT22
20046+#define TURN_AROUND_ACK_TIMEOUT BIT23
20047+#define ACK_WITH_NO_ERROR BIT24
20048+#define INTR_EN_REG 0xb008
20049+#define DSI_FUNC_PRG_REG 0xb00c
20050+#define DPI_CHANNEL_NUMBER_POS 0x03
20051+#define DBI_CHANNEL_NUMBER_POS 0x05
20052+#define FMT_DPI_POS 0x07
20053+#define FMT_DBI_POS 0x0A
20054+#define DBI_DATA_WIDTH_POS 0x0D
20055+#define HS_TX_TIMEOUT_REG 0xb010
20056+#define LP_RX_TIMEOUT_REG 0xb014
20057+#define TURN_AROUND_TIMEOUT_REG 0xb018
20058+#define DEVICE_RESET_REG 0xb01C
20059+#define DPI_RESOLUTION_REG 0xb020
20060+#define RES_V_POS 0x10
20061+#define DBI_RESOLUTION_REG 0xb024
20062+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
20063+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
20064+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
20065+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
20066+#define VERT_SYNC_PAD_COUNT_REG 0xb038
20067+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
20068+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
20069+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
20070+#define DPI_CONTROL_REG 0xb048
20071+#define DPI_SHUT_DOWN BIT0
20072+#define DPI_TURN_ON BIT1
20073+#define DPI_COLOR_MODE_ON BIT2
20074+#define DPI_COLOR_MODE_OFF BIT3
20075+#define DPI_BACK_LIGHT_ON BIT4
20076+#define DPI_BACK_LIGHT_OFF BIT5
20077+#define DPI_LP BIT6
20078+#define DPI_DATA_REG 0xb04c
20079+#define DPI_BACK_LIGHT_ON_DATA 0x07
20080+#define DPI_BACK_LIGHT_OFF_DATA 0x17
20081+#define INIT_COUNT_REG 0xb050
20082+#define MAX_RET_PAK_REG 0xb054
20083+#define VIDEO_FMT_REG 0xb058
20084+#define EOT_DISABLE_REG 0xb05c
20085+#define LP_BYTECLK_REG 0xb060
20086+#define LP_GEN_DATA_REG 0xb064
20087+#define HS_GEN_DATA_REG 0xb068
20088+#define LP_GEN_CTRL_REG 0xb06C
20089+#define HS_GEN_CTRL_REG 0xb070
20090+#define GEN_FIFO_STAT_REG 0xb074
20091+#define HS_DATA_FIFO_FULL BIT0
20092+#define HS_DATA_FIFO_HALF_EMPTY BIT1
20093+#define HS_DATA_FIFO_EMPTY BIT2
20094+#define LP_DATA_FIFO_FULL BIT8
20095+#define LP_DATA_FIFO_HALF_EMPTY BIT9
20096+#define LP_DATA_FIFO_EMPTY BIT10
20097+#define HS_CTRL_FIFO_FULL BIT16
20098+#define HS_CTRL_FIFO_HALF_EMPTY BIT17
20099+#define HS_CTRL_FIFO_EMPTY BIT18
20100+#define LP_CTRL_FIFO_FULL BIT24
20101+#define LP_CTRL_FIFO_HALF_EMPTY BIT25
20102+#define LP_CTRL_FIFO_EMPTY BIT26
20103+/*===========================================================================
20104+; MIPI Adapter registers
20105+;--------------------------------------------------------------------------*/
20106+#define MIPI_CONTROL_REG 0xb104
20107+#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
20108+#define MIPI_DATA_ADDRESS_REG 0xb108
20109+#define MIPI_DATA_LENGTH_REG 0xb10C
20110+#define MIPI_COMMAND_ADDRESS_REG 0xb110
20111+#define MIPI_COMMAND_LENGTH_REG 0xb114
20112+#define MIPI_READ_DATA_RETURN_REG0 0xb118
20113+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
20114+#define MIPI_READ_DATA_RETURN_REG2 0xb120
20115+#define MIPI_READ_DATA_RETURN_REG3 0xb124
20116+#define MIPI_READ_DATA_RETURN_REG4 0xb128
20117+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
20118+#define MIPI_READ_DATA_RETURN_REG6 0xb130
20119+#define MIPI_READ_DATA_RETURN_REG7 0xb134
20120+#define MIPI_READ_DATA_VALID_REG 0xb138
20121+/* DBI COMMANDS */
20122+#define soft_reset 0x01
20123+/* ************************************************************************* *\
20124+The display module performs a software reset.
20125+Registers are written with their SW Reset default values.
20126+\* ************************************************************************* */
20127+#define get_power_mode 0x0a
20128+/* ************************************************************************* *\
20129+The display module returns the current power mode
20130+\* ************************************************************************* */
20131+#define get_address_mode 0x0b
20132+/* ************************************************************************* *\
20133+The display module returns the current status.
20134+\* ************************************************************************* */
20135+#define get_pixel_format 0x0c
20136+/* ************************************************************************* *\
20137+This command gets the pixel format for the RGB image data
20138+used by the interface.
20139+\* ************************************************************************* */
20140+#define get_display_mode 0x0d
20141+/* ************************************************************************* *\
20142+The display module returns the Display Image Mode status.
20143+\* ************************************************************************* */
20144+#define get_signal_mode 0x0e
20145+/* ************************************************************************* *\
20146+The display module returns the Display Signal Mode.
20147+\* ************************************************************************* */
20148+#define get_diagnostic_result 0x0f
20149+/* ************************************************************************* *\
20150+The display module returns the self-diagnostic results following
20151+a Sleep Out command.
20152+\* ************************************************************************* */
20153+#define enter_sleep_mode 0x10
20154+/* ************************************************************************* *\
20155+This command causes the display module to enter the Sleep mode.
20156+In this mode, all unnecessary blocks inside the display module are disabled
20157+except interface communication. This is the lowest power mode
20158+the display module supports.
20159+\* ************************************************************************* */
20160+#define exit_sleep_mode 0x11
20161+/* ************************************************************************* *\
20162+This command causes the display module to exit Sleep mode.
20163+All blocks inside the display module are enabled.
20164+\* ************************************************************************* */
20165+#define enter_partial_mode 0x12
20166+/* ************************************************************************* *\
20167+This command causes the display module to enter the Partial Display Mode.
20168+The Partial Display Mode window is described by the set_partial_area command.
20169+\* ************************************************************************* */
20170+#define enter_normal_mode 0x13
20171+/* ************************************************************************* *\
20172+This command causes the display module to enter the Normal mode.
20173+Normal Mode is defined as Partial Display mode and Scroll mode are off
20174+\* ************************************************************************* */
20175+#define exit_invert_mode 0x20
20176+/* ************************************************************************* *\
20177+This command causes the display module to stop inverting the image data on
20178+the display device. The frame memory contents remain unchanged.
20179+No status bits are changed.
20180+\* ************************************************************************* */
20181+#define enter_invert_mode 0x21
20182+/* ************************************************************************* *\
20183+This command causes the display module to invert the image data only on
20184+the display device. The frame memory contents remain unchanged.
20185+No status bits are changed.
20186+\* ************************************************************************* */
20187+#define set_gamma_curve 0x26
20188+/* ************************************************************************* *\
20189+This command selects the desired gamma curve for the display device.
20190+Four fixed gamma curves are defined in section DCS spec.
20191+\* ************************************************************************* */
20192+#define set_display_off 0x28
20193+/* ************************************************************************* *\
20194+This command causes the display module to stop displaying the image data
20195+on the display device. The frame memory contents remain unchanged.
20196+No status bits are changed.
20197+\* ************************************************************************* */
20198+#define set_display_on 0x29
20199+/* ************************************************************************* *\
20200+This command causes the display module to start displaying the image data
20201+on the display device. The frame memory contents remain unchanged.
20202+No status bits are changed.
20203+\* ************************************************************************* */
20204+#define set_column_address 0x2a
20205+/* ************************************************************************* *\
20206+This command defines the column extent of the frame memory accessed by the
20207+hostprocessor with the read_memory_continue and write_memory_continue commands.
20208+No status bits are changed.
20209+\* ************************************************************************* */
20210+#define set_page_address 0x2b
20211+/* ************************************************************************* *\
20212+This command defines the page extent of the frame memory accessed by the host
20213+processor with the write_memory_continue and read_memory_continue command.
20214+No status bits are changed.
20215+\* ************************************************************************* */
20216+#define write_mem_start 0x2c
20217+/* ************************************************************************* *\
20218+This command transfers image data from the host processor to the display
20219+module s frame memory starting at the pixel location specified by
20220+preceding set_column_address and set_page_address commands.
20221+\* ************************************************************************* */
20222+#define set_partial_area 0x30
20223+/* ************************************************************************* *\
20224+This command defines the Partial Display mode s display area.
20225+There are two parameters associated with
20226+this command, the first defines the Start Row (SR) and the second the End Row
20227+(ER). SR and ER refer to the Frame Memory Line Pointer.
20228+\* ************************************************************************* */
20229+#define set_scroll_area 0x33
20230+/* ************************************************************************* *\
20231+This command defines the display modules Vertical Scrolling Area.
20232+\* ************************************************************************* */
20233+#define set_tear_off 0x34
20234+/* ************************************************************************* *\
20235+This command turns off the display modules Tearing Effect output signal on
20236+the TE signal line.
20237+\* ************************************************************************* */
20238+#define set_tear_on 0x35
20239+/* ************************************************************************* *\
20240+This command turns on the display modules Tearing Effect output signal
20241+on the TE signal line.
20242+\* ************************************************************************* */
20243+#define set_address_mode 0x36
20244+/* ************************************************************************* *\
20245+This command sets the data order for transfers from the host processor to
20246+display modules frame memory,bits B[7:5] and B3, and from the display
20247+modules frame memory to the display device, bits B[2:0] and B4.
20248+\* ************************************************************************* */
20249+#define set_scroll_start 0x37
20250+/* ************************************************************************* *\
20251+This command sets the start of the vertical scrolling area in the frame memory.
20252+The vertical scrolling area is fully defined when this command is used with
20253+the set_scroll_area command The set_scroll_start command has one parameter,
20254+the Vertical Scroll Pointer. The VSP defines the line in the frame memory
20255+that is written to the display device as the first line of the vertical
20256+scroll area.
20257+\* ************************************************************************* */
20258+#define exit_idle_mode 0x38
20259+/* ************************************************************************* *\
20260+This command causes the display module to exit Idle mode.
20261+\* ************************************************************************* */
20262+#define enter_idle_mode 0x39
20263+/* ************************************************************************* *\
20264+This command causes the display module to enter Idle Mode.
20265+In Idle Mode, color expression is reduced. Colors are shown on the display
20266+device using the MSB of each of the R, G and B color components in the frame
20267+memory
20268+\* ************************************************************************* */
20269+#define set_pixel_format 0x3a
20270+/* ************************************************************************* *\
20271+This command sets the pixel format for the RGB image data used by the interface.
20272+Bits D[6:4] DPI Pixel Format Definition
20273+Bits D[2:0] DBI Pixel Format Definition
20274+Bits D7 and D3 are not used.
20275+\* ************************************************************************* */
20276+#define write_mem_cont 0x3c
20277+/* ************************************************************************* *\
20278+This command transfers image data from the host processor to the display
20279+module's frame memory continuing from the pixel location following the
20280+previous write_memory_continue or write_memory_start command.
20281+\* ************************************************************************* */
20282+#define set_tear_scanline 0x44
20283+/* ************************************************************************* *\
20284+This command turns on the display modules Tearing Effect output signal on the
20285+TE signal line when the display module reaches line N.
20286+\* ************************************************************************* */
20287+#define get_scanline 0x45
20288+/* ************************************************************************* *\
20289+The display module returns the current scanline, N, used to update the
20290+display device. The total number of scanlines on a display device is
20291+defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
20292+the first line of V Sync and is denoted as Line 0.
20293+When in Sleep Mode, the value returned by get_scanline is undefined.
20294+\* ************************************************************************* */
20295+/* DCS Interface Pixel Formats */
20296+#define DCS_PIXEL_FORMAT_3BPP 0x1
20297+#define DCS_PIXEL_FORMAT_8BPP 0x2
20298+#define DCS_PIXEL_FORMAT_12BPP 0x3
20299+#define DCS_PIXEL_FORMAT_16BPP 0x5
20300+#define DCS_PIXEL_FORMAT_18BPP 0x6
20301+#define DCS_PIXEL_FORMAT_24BPP 0x7
20302+/* ONE PARAMETER READ DATA */
20303+#define addr_mode_data 0xfc
20304+#define diag_res_data 0x00
20305+#define disp_mode_data 0x23
20306+#define pxl_fmt_data 0x77
20307+#define pwr_mode_data 0x74
20308+#define sig_mode_data 0x00
20309+/* TWO PARAMETERS READ DATA */
20310+#define scanline_data1 0xff
20311+#define scanline_data2 0xff
20312+/* DPI PIXEL FORMATS */
20313+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
20314+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
20315+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
20316+ * 666 FORMAT
20317+ */
20318+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
20319+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
20320+ * with Sync Pulse
20321+ */
20322+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
20323+ * with Sync events
20324+ */
20325+#define BURST_MODE 0x03 /* Burst Mode */
20326+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
20327+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
20328+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
20329+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
20330+#define DBI_NOT_SUPPORTED 0x00 /* command mode
20331+ * is not supported
20332+ */
20333+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
20334+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
20335+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
20336+#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
20337+ * 0x100 Byte with 32
20338+ * byte alignment
20339+ */
20340+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
20341+ * 0x100 Byte with 32
20342+ * byte alignment
20343+ */
20344+#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
20345+#define SKU_83 0x01
20346+#define SKU_100 0x02
20347+#define SKU_100L 0x04
20348+#define SKU_BYPASS 0x08
20349+#if 0
20350+/* ************************************************************************* *\
20351+DSI command data structure
20352+\* ************************************************************************* */
20353+union DSI_LONG_PACKET_HEADER {
20354+ u32 DSI_longPacketHeader;
20355+ struct {
20356+ u8 dataID;
20357+ u16 wordCount;
20358+ u8 ECC;
20359+ };
20360+#if 0 /*FIXME JLIU7 */
20361+ struct {
20362+ u8 DT:6;
20363+ u8 VC:2;
20364+ };
20365+#endif /*FIXME JLIU7 */
20366+};
20367+
20368+union MIPI_ADPT_CMD_LNG_REG {
20369+ u32 commnadLengthReg;
20370+ struct {
20371+ u8 command0;
20372+ u8 command1;
20373+ u8 command2;
20374+ u8 command3;
20375+ };
20376+};
20377+
20378+struct SET_COLUMN_ADDRESS_DATA {
20379+ u8 command;
20380+ u16 SC; /* Start Column */
20381+ u16 EC; /* End Column */
20382+};
20383+
20384+struct SET_PAGE_ADDRESS_DATA {
20385+ u8 command;
20386+ u16 SP; /* Start Page */
20387+ u16 EP; /* End Page */
20388+};
20389+#endif
20390diff --git a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c
20391new file mode 100644
20392index 0000000..9f68d8d
20393--- /dev/null
20394+++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c
20395@@ -0,0 +1,1350 @@
20396+/*
20397+ * Copyright © 2006-2007 Intel Corporation
20398+ *
20399+ * Permission is hereby granted, free of charge, to any person obtaining a
20400+ * copy of this software and associated documentation files (the "Software"),
20401+ * to deal in the Software without restriction, including without limitation
20402+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20403+ * and/or sell copies of the Software, and to permit persons to whom the
20404+ * Software is furnished to do so, subject to the following conditions:
20405+ *
20406+ * The above copyright notice and this permission notice (including the next
20407+ * paragraph) shall be included in all copies or substantial portions of the
20408+ * Software.
20409+ *
20410+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20411+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20412+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20413+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20414+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20415+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20416+ * DEALINGS IN THE SOFTWARE.
20417+ *
20418+ * Authors:
20419+ * Eric Anholt <eric@anholt.net>
20420+ */
20421+/*
20422+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
20423+ * Jesse Barnes <jesse.barnes@intel.com>
20424+ */
20425+
20426+#include <linux/i2c.h>
20427+#include <linux/delay.h>
20428+#include <drm/drm_crtc.h>
20429+#include "psb_intel_sdvo_regs.h"
20430+
20431+struct psb_intel_sdvo_priv {
20432+ struct psb_intel_i2c_chan *i2c_bus;
20433+ int slaveaddr;
20434+ int output_device;
20435+
20436+ u16 active_outputs;
20437+
20438+ struct psb_intel_sdvo_caps caps;
20439+ int pixel_clock_min, pixel_clock_max;
20440+
20441+ int save_sdvo_mult;
20442+ u16 save_active_outputs;
20443+ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
20444+ struct psb_intel_sdvo_dtd save_output_dtd[16];
20445+ u32 save_SDVOX;
20446+ u8 in_out_map[4];
20447+
20448+ u8 by_input_wiring;
20449+ u32 active_device;
20450+};
20451+
20452+/**
20453+ * Writes the SDVOB or SDVOC with the given value, but always writes both
20454+ * SDVOB and SDVOC to work around apparent hardware issues (according to
20455+ * comments in the BIOS).
20456+ */
20457+void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val)
20458+{
20459+ struct drm_device *dev = psb_intel_output->base.dev;
20460+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20461+ u32 bval = val, cval = val;
20462+ int i;
20463+
20464+ if (sdvo_priv->output_device == SDVOB)
20465+ cval = REG_READ(SDVOC);
20466+ else
20467+ bval = REG_READ(SDVOB);
20468+ /*
20469+ * Write the registers twice for luck. Sometimes,
20470+ * writing them only once doesn't appear to 'stick'.
20471+ * The BIOS does this too. Yay, magic
20472+ */
20473+ for (i = 0; i < 2; i++) {
20474+ REG_WRITE(SDVOB, bval);
20475+ REG_READ(SDVOB);
20476+ REG_WRITE(SDVOC, cval);
20477+ REG_READ(SDVOC);
20478+ }
20479+}
20480+
20481+static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output,
20482+ u8 addr, u8 *ch)
20483+{
20484+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20485+ u8 out_buf[2];
20486+ u8 buf[2];
20487+ int ret;
20488+
20489+ struct i2c_msg msgs[] = {
20490+ {
20491+ .addr = sdvo_priv->i2c_bus->slave_addr,
20492+ .flags = 0,
20493+ .len = 1,
20494+ .buf = out_buf,
20495+ },
20496+ {
20497+ .addr = sdvo_priv->i2c_bus->slave_addr,
20498+ .flags = I2C_M_RD,
20499+ .len = 1,
20500+ .buf = buf,
20501+ }
20502+ };
20503+
20504+ out_buf[0] = addr;
20505+ out_buf[1] = 0;
20506+
20507+ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
20508+ if (ret == 2) {
20509+ /* DRM_DEBUG("got back from addr %02X = %02x\n",
20510+ * out_buf[0], buf[0]);
20511+ */
20512+ *ch = buf[0];
20513+ return true;
20514+ }
20515+
20516+ DRM_DEBUG("i2c transfer returned %d\n", ret);
20517+ return false;
20518+}
20519+
20520+static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output,
20521+ int addr, u8 ch)
20522+{
20523+ u8 out_buf[2];
20524+ struct i2c_msg msgs[] = {
20525+ {
20526+ .addr = psb_intel_output->i2c_bus->slave_addr,
20527+ .flags = 0,
20528+ .len = 2,
20529+ .buf = out_buf,
20530+ }
20531+ };
20532+
20533+ out_buf[0] = addr;
20534+ out_buf[1] = ch;
20535+
20536+ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
20537+ return true;
20538+ return false;
20539+}
20540+
20541+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
20542+/** Mapping of command numbers to names, for debug output */
20543+const static struct _sdvo_cmd_name {
20544+ u8 cmd;
20545+ char *name;
20546+} sdvo_cmd_names[] = {
20547+SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
20548+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
20549+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
20550+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
20551+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
20552+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
20553+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
20554+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
20555+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
20556+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
20557+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
20558+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
20559+ SDVO_CMD_NAME_ENTRY
20560+ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
20561+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
20562+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
20563+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
20564+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
20565+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
20566+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
20567+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
20568+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
20569+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
20570+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
20571+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
20572+ SDVO_CMD_NAME_ENTRY
20573+ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
20574+ SDVO_CMD_NAME_ENTRY
20575+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
20576+ SDVO_CMD_NAME_ENTRY
20577+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
20578+ SDVO_CMD_NAME_ENTRY
20579+ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
20580+ SDVO_CMD_NAME_ENTRY
20581+ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
20582+ SDVO_CMD_NAME_ENTRY
20583+ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
20584+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
20585+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
20586+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
20587+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
20588+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
20589+ SDVO_CMD_NAME_ENTRY
20590+ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
20591+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
20592+
20593+#define SDVO_NAME(dev_priv) \
20594+ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
20595+#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
20596+
20597+static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd,
20598+ void *args, int args_len)
20599+{
20600+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20601+ int i;
20602+
20603+ if (1) {
20604+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
20605+ for (i = 0; i < args_len; i++)
20606+ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
20607+ for (; i < 8; i++)
20608+ printk(" ");
20609+ for (i = 0;
20610+ i <
20611+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
20612+ i++) {
20613+ if (cmd == sdvo_cmd_names[i].cmd) {
20614+ printk("(%s)", sdvo_cmd_names[i].name);
20615+ break;
20616+ }
20617+ }
20618+ if (i ==
20619+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
20620+ printk("(%02X)", cmd);
20621+ printk("\n");
20622+ }
20623+
20624+ for (i = 0; i < args_len; i++) {
20625+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i,
20626+ ((u8 *) args)[i]);
20627+ }
20628+
20629+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
20630+}
20631+
20632+static const char *cmd_status_names[] = {
20633+ "Power on",
20634+ "Success",
20635+ "Not supported",
20636+ "Invalid arg",
20637+ "Pending",
20638+ "Target not specified",
20639+ "Scaling not supported"
20640+};
20641+
20642+static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output,
20643+ void *response, int response_len)
20644+{
20645+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20646+ int i;
20647+ u8 status;
20648+ u8 retry = 50;
20649+
20650+ while (retry--) {
20651+ /* Read the command response */
20652+ for (i = 0; i < response_len; i++) {
20653+ psb_intel_sdvo_read_byte(psb_intel_output,
20654+ SDVO_I2C_RETURN_0 + i,
20655+ &((u8 *) response)[i]);
20656+ }
20657+
20658+ /* read the return status */
20659+ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS,
20660+ &status);
20661+
20662+ if (1) {
20663+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
20664+ for (i = 0; i < response_len; i++)
20665+ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
20666+ for (; i < 8; i++)
20667+ printk(" ");
20668+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
20669+ printk(KERN_INFO"(%s)",
20670+ cmd_status_names[status]);
20671+ else
20672+ printk(KERN_INFO"(??? %d)", status);
20673+ printk("\n");
20674+ }
20675+
20676+ if (status != SDVO_CMD_STATUS_PENDING)
20677+ return status;
20678+
20679+ mdelay(50);
20680+ }
20681+
20682+ return status;
20683+}
20684+
20685+int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
20686+{
20687+ if (mode->clock >= 100000)
20688+ return 1;
20689+ else if (mode->clock >= 50000)
20690+ return 2;
20691+ else
20692+ return 4;
20693+}
20694+
20695+/**
20696+ * Don't check status code from this as it switches the bus back to the
20697+ * SDVO chips which defeats the purpose of doing a bus switch in the first
20698+ * place.
20699+ */
20700+void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output,
20701+ u8 target)
20702+{
20703+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
20704+ &target, 1);
20705+}
20706+
20707+static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output,
20708+ bool target_0, bool target_1)
20709+{
20710+ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
20711+ u8 status;
20712+
20713+ if (target_0 && target_1)
20714+ return SDVO_CMD_STATUS_NOTSUPP;
20715+
20716+ if (target_1)
20717+ targets.target_1 = 1;
20718+
20719+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
20720+ &targets, sizeof(targets));
20721+
20722+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20723+
20724+ return status == SDVO_CMD_STATUS_SUCCESS;
20725+}
20726+
20727+/**
20728+ * Return whether each input is trained.
20729+ *
20730+ * This function is making an assumption about the layout of the response,
20731+ * which should be checked against the docs.
20732+ */
20733+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
20734+ *psb_intel_output, bool *input_1,
20735+ bool *input_2)
20736+{
20737+ struct psb_intel_sdvo_get_trained_inputs_response response;
20738+ u8 status;
20739+
20740+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
20741+ NULL, 0);
20742+ status =
20743+ psb_intel_sdvo_read_response(psb_intel_output, &response,
20744+ sizeof(response));
20745+ if (status != SDVO_CMD_STATUS_SUCCESS)
20746+ return false;
20747+
20748+ *input_1 = response.input0_trained;
20749+ *input_2 = response.input1_trained;
20750+ return true;
20751+}
20752+
20753+static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
20754+ *psb_intel_output, u16 *outputs)
20755+{
20756+ u8 status;
20757+
20758+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
20759+ NULL, 0);
20760+ status =
20761+ psb_intel_sdvo_read_response(psb_intel_output, outputs,
20762+ sizeof(*outputs));
20763+
20764+ return status == SDVO_CMD_STATUS_SUCCESS;
20765+}
20766+
20767+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
20768+ *psb_intel_output, u16 outputs)
20769+{
20770+ u8 status;
20771+
20772+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
20773+ &outputs, sizeof(outputs));
20774+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20775+ return status == SDVO_CMD_STATUS_SUCCESS;
20776+}
20777+
20778+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
20779+ *psb_intel_output, int mode)
20780+{
20781+ u8 status, state = SDVO_ENCODER_STATE_ON;
20782+
20783+ switch (mode) {
20784+ case DRM_MODE_DPMS_ON:
20785+ state = SDVO_ENCODER_STATE_ON;
20786+ break;
20787+ case DRM_MODE_DPMS_STANDBY:
20788+ state = SDVO_ENCODER_STATE_STANDBY;
20789+ break;
20790+ case DRM_MODE_DPMS_SUSPEND:
20791+ state = SDVO_ENCODER_STATE_SUSPEND;
20792+ break;
20793+ case DRM_MODE_DPMS_OFF:
20794+ state = SDVO_ENCODER_STATE_OFF;
20795+ break;
20796+ }
20797+
20798+ psb_intel_sdvo_write_cmd(psb_intel_output,
20799+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
20800+ sizeof(state));
20801+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20802+
20803+ return status == SDVO_CMD_STATUS_SUCCESS;
20804+}
20805+
20806+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
20807+ *psb_intel_output,
20808+ int *clock_min,
20809+ int *clock_max)
20810+{
20811+ struct psb_intel_sdvo_pixel_clock_range clocks;
20812+ u8 status;
20813+
20814+ psb_intel_sdvo_write_cmd(psb_intel_output,
20815+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
20816+ 0);
20817+
20818+ status =
20819+ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
20820+ sizeof(clocks));
20821+
20822+ if (status != SDVO_CMD_STATUS_SUCCESS)
20823+ return false;
20824+
20825+ /* Convert the values from units of 10 kHz to kHz. */
20826+ *clock_min = clocks.min * 10;
20827+ *clock_max = clocks.max * 10;
20828+
20829+ return true;
20830+}
20831+
20832+static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output,
20833+ u16 outputs)
20834+{
20835+ u8 status;
20836+
20837+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
20838+ &outputs, sizeof(outputs));
20839+
20840+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20841+ return status == SDVO_CMD_STATUS_SUCCESS;
20842+}
20843+
20844+static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
20845+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
20846+{
20847+ u8 status;
20848+
20849+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
20850+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
20851+ sizeof(dtd->part1));
20852+ if (status != SDVO_CMD_STATUS_SUCCESS)
20853+ return false;
20854+
20855+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
20856+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
20857+ sizeof(dtd->part2));
20858+ if (status != SDVO_CMD_STATUS_SUCCESS)
20859+ return false;
20860+
20861+ return true;
20862+}
20863+
20864+static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output,
20865+ struct psb_intel_sdvo_dtd *dtd)
20866+{
20867+ return psb_intel_sdvo_get_timing(psb_intel_output,
20868+ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
20869+ dtd);
20870+}
20871+#if 0
20872+static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output,
20873+ struct psb_intel_sdvo_dtd *dtd)
20874+{
20875+ return psb_intel_sdvo_get_timing(psb_intel_output,
20876+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
20877+ dtd);
20878+}
20879+#endif
20880+static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output,
20881+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
20882+{
20883+ u8 status;
20884+
20885+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
20886+ sizeof(dtd->part1));
20887+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20888+ if (status != SDVO_CMD_STATUS_SUCCESS)
20889+ return false;
20890+
20891+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
20892+ sizeof(dtd->part2));
20893+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20894+ if (status != SDVO_CMD_STATUS_SUCCESS)
20895+ return false;
20896+
20897+ return true;
20898+}
20899+
20900+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output,
20901+ struct psb_intel_sdvo_dtd *dtd)
20902+{
20903+ return psb_intel_sdvo_set_timing(psb_intel_output,
20904+ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
20905+ dtd);
20906+}
20907+
20908+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output,
20909+ struct psb_intel_sdvo_dtd *dtd)
20910+{
20911+ return psb_intel_sdvo_set_timing(psb_intel_output,
20912+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
20913+ dtd);
20914+}
20915+
20916+#if 0
20917+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
20918+ *psb_intel_output,
20919+ struct psb_intel_sdvo_dtd
20920+ *dtd)
20921+{
20922+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20923+ u8 status;
20924+
20925+ psb_intel_sdvo_write_cmd(psb_intel_output,
20926+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
20927+ NULL, 0);
20928+
20929+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
20930+ sizeof(dtd->part1));
20931+ if (status != SDVO_CMD_STATUS_SUCCESS)
20932+ return false;
20933+
20934+ psb_intel_sdvo_write_cmd(psb_intel_output,
20935+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
20936+ NULL, 0);
20937+ status =
20938+ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
20939+ sizeof(dtd->part2));
20940+ if (status != SDVO_CMD_STATUS_SUCCESS)
20941+ return false;
20942+
20943+ return true;
20944+}
20945+#endif
20946+
20947+static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
20948+ *psb_intel_output)
20949+{
20950+ u8 response, status;
20951+
20952+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT,
20953+ NULL, 0);
20954+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
20955+
20956+ if (status != SDVO_CMD_STATUS_SUCCESS) {
20957+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
20958+ return SDVO_CLOCK_RATE_MULT_1X;
20959+ } else {
20960+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
20961+ }
20962+
20963+ return response;
20964+}
20965+
20966+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
20967+ *psb_intel_output, u8 val)
20968+{
20969+ u8 status;
20970+
20971+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT,
20972+ &val, 1);
20973+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20974+ if (status != SDVO_CMD_STATUS_SUCCESS)
20975+ return false;
20976+
20977+ return true;
20978+}
20979+
20980+static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output * output, u32 in0outputmask,
20981+ u32 in1outputmask)
20982+{
20983+ u8 byArgs[4];
20984+ u8 status;
20985+ int i;
20986+ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
20987+
20988+ /* Make all fields of the args/ret to zero */
20989+ memset(byArgs, 0, sizeof(byArgs));
20990+
20991+ /* Fill up the arguement values; */
20992+ byArgs[0] = (u8) (in0outputmask & 0xFF);
20993+ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
20994+ byArgs[2] = (u8) (in1outputmask & 0xFF);
20995+ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
20996+
20997+
20998+ /*save inoutmap arg here*/
20999+ for(i=0; i<4; i++) {
21000+ sdvo_priv->in_out_map[i] = byArgs[0];
21001+ }
21002+
21003+
21004+ psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
21005+ status = psb_intel_sdvo_read_response(output, NULL, 0);
21006+
21007+ if (status != SDVO_CMD_STATUS_SUCCESS)
21008+ return false;
21009+ return true;
21010+}
21011+
21012+
21013+static void psb_intel_sdvo_set_iomap(struct psb_intel_output * output)
21014+{
21015+ u32 dwCurrentSDVOIn0 = 0;
21016+ u32 dwCurrentSDVOIn1 = 0;
21017+ u32 dwDevMask = 0;
21018+
21019+
21020+ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
21021+
21022+ /* Please DO NOT change the following code. */
21023+ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
21024+ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
21025+ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
21026+ switch (sdvo_priv->active_device) {
21027+ case SDVO_DEVICE_LVDS:
21028+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
21029+ break;
21030+ case SDVO_DEVICE_TMDS:
21031+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
21032+ break;
21033+ case SDVO_DEVICE_TV:
21034+ dwDevMask =
21035+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
21036+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
21037+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
21038+ break;
21039+ case SDVO_DEVICE_CRT:
21040+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
21041+ break;
21042+ }
21043+ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
21044+ } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
21045+ switch (sdvo_priv->active_device) {
21046+ case SDVO_DEVICE_LVDS:
21047+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
21048+ break;
21049+ case SDVO_DEVICE_TMDS:
21050+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
21051+ break;
21052+ case SDVO_DEVICE_TV:
21053+ dwDevMask =
21054+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
21055+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
21056+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
21057+ break;
21058+ case SDVO_DEVICE_CRT:
21059+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
21060+ break;
21061+ }
21062+ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
21063+ }
21064+
21065+ psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
21066+ dwCurrentSDVOIn1);
21067+}
21068+
21069+
21070+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
21071+ struct drm_display_mode *mode,
21072+ struct drm_display_mode *adjusted_mode)
21073+{
21074+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
21075+ * device will be told of the multiplier during mode_set.
21076+ */
21077+ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
21078+ return true;
21079+}
21080+
21081+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
21082+ struct drm_display_mode *mode,
21083+ struct drm_display_mode *adjusted_mode)
21084+{
21085+ struct drm_device *dev = encoder->dev;
21086+ struct drm_crtc *crtc = encoder->crtc;
21087+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
21088+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
21089+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21090+ u16 width, height;
21091+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
21092+ u16 h_sync_offset, v_sync_offset;
21093+ u32 sdvox;
21094+ struct psb_intel_sdvo_dtd output_dtd;
21095+ int sdvo_pixel_multiply;
21096+
21097+ if (!mode)
21098+ return;
21099+
21100+ psb_intel_sdvo_set_target_output(psb_intel_output, 0);
21101+
21102+ width = mode->crtc_hdisplay;
21103+ height = mode->crtc_vdisplay;
21104+
21105+ /* do some mode translations */
21106+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
21107+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
21108+
21109+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
21110+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
21111+
21112+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
21113+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
21114+
21115+ output_dtd.part1.clock = mode->clock / 10;
21116+ output_dtd.part1.h_active = width & 0xff;
21117+ output_dtd.part1.h_blank = h_blank_len & 0xff;
21118+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
21119+ ((h_blank_len >> 8) & 0xf);
21120+ output_dtd.part1.v_active = height & 0xff;
21121+ output_dtd.part1.v_blank = v_blank_len & 0xff;
21122+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
21123+ ((v_blank_len >> 8) & 0xf);
21124+
21125+ output_dtd.part2.h_sync_off = h_sync_offset;
21126+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
21127+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
21128+ (v_sync_len & 0xf);
21129+ output_dtd.part2.sync_off_width_high =
21130+ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
21131+ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
21132+
21133+ output_dtd.part2.dtd_flags = 0x18;
21134+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
21135+ output_dtd.part2.dtd_flags |= 0x2;
21136+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
21137+ output_dtd.part2.dtd_flags |= 0x4;
21138+
21139+ output_dtd.part2.sdvo_flags = 0;
21140+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
21141+ output_dtd.part2.reserved = 0;
21142+
21143+ /* Set the output timing to the screen */
21144+ psb_intel_sdvo_set_target_output(psb_intel_output,
21145+ sdvo_priv->active_outputs);
21146+
21147+ /* Set the input timing to the screen. Assume always input 0. */
21148+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21149+
21150+ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
21151+
21152+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
21153+ * provide the device with a timing it can support, if it supports that
21154+ * feature. However, presumably we would need to adjust the CRTC to
21155+ * output the preferred timing, and we don't support that currently.
21156+ */
21157+#if 0
21158+ success =
21159+ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock,
21160+ width, height);
21161+ if (success) {
21162+ struct psb_intel_sdvo_dtd *input_dtd;
21163+
21164+ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
21165+ &input_dtd);
21166+ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
21167+ }
21168+#else
21169+ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
21170+#endif
21171+
21172+ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
21173+ case 1:
21174+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21175+ SDVO_CLOCK_RATE_MULT_1X);
21176+ break;
21177+ case 2:
21178+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21179+ SDVO_CLOCK_RATE_MULT_2X);
21180+ break;
21181+ case 4:
21182+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21183+ SDVO_CLOCK_RATE_MULT_4X);
21184+ break;
21185+ }
21186+
21187+ /* Set the SDVO control regs. */
21188+ if (0 /*IS_I965GM(dev) */) {
21189+ sdvox = SDVO_BORDER_ENABLE;
21190+ } else {
21191+ sdvox = REG_READ(sdvo_priv->output_device);
21192+ switch (sdvo_priv->output_device) {
21193+ case SDVOB:
21194+ sdvox &= SDVOB_PRESERVE_MASK;
21195+ break;
21196+ case SDVOC:
21197+ sdvox &= SDVOC_PRESERVE_MASK;
21198+ break;
21199+ }
21200+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
21201+ }
21202+ if (psb_intel_crtc->pipe == 1)
21203+ sdvox |= SDVO_PIPE_B_SELECT;
21204+
21205+ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
21206+
21207+#if 0
21208+ if (IS_I965G(dev)) {
21209+ /* done in crtc_mode_set as the dpll_md reg must be written
21210+ * early */
21211+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
21212+ /* done in crtc_mode_set as it lives inside the
21213+ * dpll register */
21214+ } else {
21215+ sdvox |=
21216+ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
21217+ }
21218+#endif
21219+
21220+ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
21221+
21222+ psb_intel_sdvo_set_iomap(psb_intel_output);
21223+}
21224+
21225+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
21226+{
21227+ struct drm_device *dev = encoder->dev;
21228+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
21229+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21230+ u32 temp;
21231+
21232+ if (mode != DRM_MODE_DPMS_ON) {
21233+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
21234+ if (0)
21235+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
21236+ mode);
21237+
21238+ if (mode == DRM_MODE_DPMS_OFF) {
21239+ temp = REG_READ(sdvo_priv->output_device);
21240+ if ((temp & SDVO_ENABLE) != 0) {
21241+ psb_intel_sdvo_write_sdvox(psb_intel_output,
21242+ temp &
21243+ ~SDVO_ENABLE);
21244+ }
21245+ }
21246+ } else {
21247+ bool input1, input2;
21248+ int i;
21249+ u8 status;
21250+
21251+ temp = REG_READ(sdvo_priv->output_device);
21252+ if ((temp & SDVO_ENABLE) == 0)
21253+ psb_intel_sdvo_write_sdvox(psb_intel_output,
21254+ temp | SDVO_ENABLE);
21255+ for (i = 0; i < 2; i++)
21256+ psb_intel_wait_for_vblank(dev);
21257+
21258+ status =
21259+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
21260+ &input2);
21261+
21262+
21263+ /* Warn if the device reported failure to sync.
21264+ * A lot of SDVO devices fail to notify of sync, but it's
21265+ * a given it the status is a success, we succeeded.
21266+ */
21267+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
21268+ DRM_DEBUG
21269+ ("First %s output reported failure to sync\n",
21270+ SDVO_NAME(sdvo_priv));
21271+ }
21272+
21273+ if (0)
21274+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
21275+ mode);
21276+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
21277+ sdvo_priv->active_outputs);
21278+ }
21279+ return;
21280+}
21281+
21282+static void psb_intel_sdvo_save(struct drm_connector *connector)
21283+{
21284+ struct drm_device *dev = connector->dev;
21285+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21286+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21287+ /*int o;*/
21288+
21289+ sdvo_priv->save_sdvo_mult =
21290+ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
21291+ psb_intel_sdvo_get_active_outputs(psb_intel_output,
21292+ &sdvo_priv->save_active_outputs);
21293+
21294+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
21295+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21296+ psb_intel_sdvo_get_input_timing(psb_intel_output,
21297+ &sdvo_priv->save_input_dtd_1);
21298+ }
21299+
21300+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
21301+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
21302+ psb_intel_sdvo_get_input_timing(psb_intel_output,
21303+ &sdvo_priv->save_input_dtd_2);
21304+ }
21305+
21306+#if 0
21307+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
21308+ u16 this_output = (1 << o);
21309+ if (sdvo_priv->caps.output_flags & this_output) {
21310+ psb_intel_sdvo_set_target_output(psb_intel_output,
21311+ this_output);
21312+ psb_intel_sdvo_get_output_timing(psb_intel_output,
21313+ &sdvo_priv->
21314+ save_output_dtd[o]);
21315+ }
21316+ }
21317+#endif
21318+
21319+ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
21320+
21321+ /*TODO: save the in_out_map state*/
21322+}
21323+
21324+static void psb_intel_sdvo_restore(struct drm_connector *connector)
21325+{
21326+ struct drm_device *dev = connector->dev;
21327+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21328+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21329+ /*int o;*/
21330+ int i;
21331+ bool input1, input2;
21332+ u8 status;
21333+
21334+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
21335+
21336+#if 0
21337+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
21338+ u16 this_output = (1 << o);
21339+ if (sdvo_priv->caps.output_flags & this_output) {
21340+ psb_intel_sdvo_set_target_output(psb_intel_output,
21341+ this_output);
21342+ psb_intel_sdvo_set_output_timing(psb_intel_output,
21343+ &sdvo_priv->
21344+ save_output_dtd[o]);
21345+ }
21346+ }
21347+#endif
21348+
21349+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
21350+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21351+ psb_intel_sdvo_set_input_timing(psb_intel_output,
21352+ &sdvo_priv->save_input_dtd_1);
21353+ }
21354+
21355+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
21356+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
21357+ psb_intel_sdvo_set_input_timing(psb_intel_output,
21358+ &sdvo_priv->save_input_dtd_2);
21359+ }
21360+
21361+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21362+ sdvo_priv->save_sdvo_mult);
21363+
21364+ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
21365+
21366+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
21367+ for (i = 0; i < 2; i++)
21368+ psb_intel_wait_for_vblank(dev);
21369+ status =
21370+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
21371+ &input2);
21372+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
21373+ DRM_DEBUG
21374+ ("First %s output reported failure to sync\n",
21375+ SDVO_NAME(sdvo_priv));
21376+ }
21377+
21378+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
21379+ sdvo_priv->save_active_outputs);
21380+
21381+ /*TODO: restore in_out_map*/
21382+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_IN_OUT_MAP, sdvo_priv->in_out_map, 4);
21383+ psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
21384+}
21385+
21386+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
21387+ struct drm_display_mode *mode)
21388+{
21389+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21390+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21391+
21392+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
21393+ return MODE_NO_DBLESCAN;
21394+
21395+ if (sdvo_priv->pixel_clock_min > mode->clock)
21396+ return MODE_CLOCK_LOW;
21397+
21398+ if (sdvo_priv->pixel_clock_max < mode->clock)
21399+ return MODE_CLOCK_HIGH;
21400+
21401+ return MODE_OK;
21402+}
21403+
21404+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output,
21405+ struct psb_intel_sdvo_caps *caps)
21406+{
21407+ u8 status;
21408+
21409+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL,
21410+ 0);
21411+ status =
21412+ psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps));
21413+ if (status != SDVO_CMD_STATUS_SUCCESS)
21414+ return false;
21415+
21416+ return true;
21417+}
21418+
21419+struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
21420+{
21421+ struct drm_connector *connector = NULL;
21422+ struct psb_intel_output *iout = NULL;
21423+ struct psb_intel_sdvo_priv *sdvo;
21424+
21425+ /* find the sdvo connector */
21426+ list_for_each_entry(connector, &dev->mode_config.connector_list,
21427+ head) {
21428+ iout = to_psb_intel_output(connector);
21429+
21430+ if (iout->type != INTEL_OUTPUT_SDVO)
21431+ continue;
21432+
21433+ sdvo = iout->dev_priv;
21434+
21435+ if (sdvo->output_device == SDVOB && sdvoB)
21436+ return connector;
21437+
21438+ if (sdvo->output_device == SDVOC && !sdvoB)
21439+ return connector;
21440+
21441+ }
21442+
21443+ return NULL;
21444+}
21445+
21446+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
21447+{
21448+ u8 response[2];
21449+ u8 status;
21450+ struct psb_intel_output *psb_intel_output;
21451+ DRM_DEBUG("\n");
21452+
21453+ if (!connector)
21454+ return 0;
21455+
21456+ psb_intel_output = to_psb_intel_output(connector);
21457+
21458+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
21459+ NULL, 0);
21460+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21461+
21462+ if (response[0] != 0)
21463+ return 1;
21464+
21465+ return 0;
21466+}
21467+
21468+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
21469+{
21470+ u8 response[2];
21471+ u8 status;
21472+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21473+
21474+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
21475+ NULL, 0);
21476+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21477+
21478+ if (on) {
21479+ psb_intel_sdvo_write_cmd(psb_intel_output,
21480+ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
21481+ 0);
21482+ status =
21483+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21484+
21485+ psb_intel_sdvo_write_cmd(psb_intel_output,
21486+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
21487+ &response, 2);
21488+ } else {
21489+ response[0] = 0;
21490+ response[1] = 0;
21491+ psb_intel_sdvo_write_cmd(psb_intel_output,
21492+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
21493+ &response, 2);
21494+ }
21495+
21496+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
21497+ NULL, 0);
21498+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21499+}
21500+
21501+static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
21502+ *connector)
21503+{
21504+ u8 response[2];
21505+ u8 status;
21506+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21507+
21508+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS,
21509+ NULL, 0);
21510+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21511+
21512+ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
21513+ if ((response[0] != 0) || (response[1] != 0))
21514+ return connector_status_connected;
21515+ else
21516+ return connector_status_disconnected;
21517+}
21518+
21519+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
21520+{
21521+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21522+
21523+ /* set the bus switch and get the modes */
21524+ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
21525+ SDVO_CONTROL_BUS_DDC2);
21526+ psb_intel_ddc_get_modes(psb_intel_output);
21527+
21528+ if (list_empty(&connector->probed_modes))
21529+ return 0;
21530+ return 1;
21531+#if 0
21532+ /* Mac mini hack. On this device, I get DDC through the analog, which
21533+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
21534+ * but it does load-detect as connected. So, just steal the DDC bits
21535+ * from analog when we fail at finding it the right way.
21536+ */
21537+ /* TODO */
21538+ return NULL;
21539+
21540+ return NULL;
21541+#endif
21542+}
21543+
21544+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
21545+{
21546+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21547+
21548+ if (psb_intel_output->i2c_bus)
21549+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
21550+ drm_sysfs_connector_remove(connector);
21551+ drm_connector_cleanup(connector);
21552+ kfree(psb_intel_output);
21553+}
21554+
21555+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
21556+ .dpms = psb_intel_sdvo_dpms,
21557+ .mode_fixup = psb_intel_sdvo_mode_fixup,
21558+ .prepare = psb_intel_encoder_prepare,
21559+ .mode_set = psb_intel_sdvo_mode_set,
21560+ .commit = psb_intel_encoder_commit,
21561+};
21562+
21563+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
21564+ .dpms = drm_helper_connector_dpms,
21565+ .save = psb_intel_sdvo_save,
21566+ .restore = psb_intel_sdvo_restore,
21567+ .detect = psb_intel_sdvo_detect,
21568+ .fill_modes = drm_helper_probe_single_connector_modes,
21569+ .destroy = psb_intel_sdvo_destroy,
21570+};
21571+
21572+static const struct drm_connector_helper_funcs
21573+ psb_intel_sdvo_connector_helper_funcs = {
21574+ .get_modes = psb_intel_sdvo_get_modes,
21575+ .mode_valid = psb_intel_sdvo_mode_valid,
21576+ .best_encoder = psb_intel_best_encoder,
21577+};
21578+
21579+void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
21580+{
21581+ drm_encoder_cleanup(encoder);
21582+}
21583+
21584+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
21585+ .destroy = psb_intel_sdvo_enc_destroy,
21586+};
21587+
21588+
21589+void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
21590+{
21591+ struct drm_connector *connector;
21592+ struct psb_intel_output *psb_intel_output;
21593+ struct psb_intel_sdvo_priv *sdvo_priv;
21594+ struct psb_intel_i2c_chan *i2cbus = NULL;
21595+ int connector_type;
21596+ u8 ch[0x40];
21597+ int i;
21598+ int encoder_type, output_id;
21599+
21600+ psb_intel_output =
21601+ kcalloc(sizeof(struct psb_intel_output) +
21602+ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
21603+ if (!psb_intel_output)
21604+ return;
21605+
21606+ connector = &psb_intel_output->base;
21607+
21608+ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
21609+ DRM_MODE_CONNECTOR_Unknown);
21610+ drm_connector_helper_add(connector,
21611+ &psb_intel_sdvo_connector_helper_funcs);
21612+ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
21613+ psb_intel_output->type = INTEL_OUTPUT_SDVO;
21614+
21615+ connector->interlace_allowed = 0;
21616+ connector->doublescan_allowed = 0;
21617+
21618+ /* setup the DDC bus. */
21619+ if (output_device == SDVOB)
21620+ i2cbus =
21621+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
21622+ else
21623+ i2cbus =
21624+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
21625+
21626+ if (!i2cbus)
21627+ goto err_connector;
21628+
21629+ sdvo_priv->i2c_bus = i2cbus;
21630+
21631+ if (output_device == SDVOB) {
21632+ output_id = 1;
21633+ sdvo_priv->by_input_wiring = SDVOB_IN0;
21634+ sdvo_priv->i2c_bus->slave_addr = 0x38;
21635+ } else {
21636+ output_id = 2;
21637+ sdvo_priv->i2c_bus->slave_addr = 0x39;
21638+ }
21639+
21640+ sdvo_priv->output_device = output_device;
21641+ psb_intel_output->i2c_bus = i2cbus;
21642+ psb_intel_output->dev_priv = sdvo_priv;
21643+
21644+
21645+ /* Read the regs to test if we can talk to the device */
21646+ for (i = 0; i < 0x40; i++) {
21647+ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
21648+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
21649+ output_device == SDVOB ? 'B' : 'C');
21650+ goto err_i2c;
21651+ }
21652+ }
21653+
21654+ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
21655+
21656+ memset(&sdvo_priv->active_outputs, 0,
21657+ sizeof(sdvo_priv->active_outputs));
21658+
21659+ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
21660+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
21661+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
21662+ sdvo_priv->active_device = SDVO_DEVICE_CRT;
21663+ connector->display_info.subpixel_order =
21664+ SubPixelHorizontalRGB;
21665+ encoder_type = DRM_MODE_ENCODER_DAC;
21666+ connector_type = DRM_MODE_CONNECTOR_VGA;
21667+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
21668+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
21669+ sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
21670+ connector->display_info.subpixel_order =
21671+ SubPixelHorizontalRGB;
21672+ encoder_type = DRM_MODE_ENCODER_DAC;
21673+ connector_type = DRM_MODE_CONNECTOR_VGA;
21674+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
21675+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
21676+ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
21677+ connector->display_info.subpixel_order =
21678+ SubPixelHorizontalRGB;
21679+ encoder_type = DRM_MODE_ENCODER_TMDS;
21680+ connector_type = DRM_MODE_CONNECTOR_DVID;
21681+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
21682+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
21683+ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
21684+ connector->display_info.subpixel_order =
21685+ SubPixelHorizontalRGB;
21686+ encoder_type = DRM_MODE_ENCODER_TMDS;
21687+ connector_type = DRM_MODE_CONNECTOR_DVID;
21688+ } else {
21689+ unsigned char bytes[2];
21690+
21691+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
21692+ DRM_DEBUG
21693+ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
21694+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
21695+ goto err_i2c;
21696+ }
21697+
21698+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
21699+ encoder_type);
21700+ drm_encoder_helper_add(&psb_intel_output->enc,
21701+ &psb_intel_sdvo_helper_funcs);
21702+ connector->connector_type = connector_type;
21703+
21704+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
21705+ &psb_intel_output->enc);
21706+ drm_sysfs_connector_add(connector);
21707+
21708+ /* Set the input timing to the screen. Assume always input 0. */
21709+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21710+
21711+ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
21712+ &sdvo_priv->pixel_clock_min,
21713+ &sdvo_priv->
21714+ pixel_clock_max);
21715+
21716+
21717+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
21718+ "clock range %dMHz - %dMHz, "
21719+ "input 1: %c, input 2: %c, "
21720+ "output 1: %c, output 2: %c\n",
21721+ SDVO_NAME(sdvo_priv),
21722+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
21723+ sdvo_priv->caps.device_rev_id,
21724+ sdvo_priv->pixel_clock_min / 1000,
21725+ sdvo_priv->pixel_clock_max / 1000,
21726+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
21727+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
21728+ /* check currently supported outputs */
21729+ sdvo_priv->caps.output_flags &
21730+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
21731+ sdvo_priv->caps.output_flags &
21732+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
21733+
21734+ psb_intel_output->ddc_bus = i2cbus;
21735+
21736+ return;
21737+
21738+err_i2c:
21739+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
21740+err_connector:
21741+ drm_connector_cleanup(connector);
21742+ kfree(psb_intel_output);
21743+
21744+ return;
21745+}
21746diff --git a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
21747new file mode 100644
21748index 0000000..bf3d72e
21749--- /dev/null
21750+++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
21751@@ -0,0 +1,345 @@
21752+/*
21753+ * Copyright (c) 2008, Intel Corporation
21754+ *
21755+ * Permission is hereby granted, free of charge, to any person obtaining a
21756+ * copy of this software and associated documentation files (the "Software"),
21757+ * to deal in the Software without restriction, including without limitation
21758+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21759+ * and/or sell copies of the Software, and to permit persons to whom the
21760+ * Software is furnished to do so, subject to the following conditions:
21761+ *
21762+ * The above copyright notice and this permission notice (including the next
21763+ * paragraph) shall be included in all copies or substantial portions of the
21764+ * Software.
21765+ *
21766+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21767+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21768+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21769+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21770+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21771+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21772+ * DEALINGS IN THE SOFTWARE.
21773+ *
21774+ * Authors:
21775+ * Eric Anholt <eric@anholt.net>
21776+ */
21777+
21778+/**
21779+ * @file SDVO command definitions and structures.
21780+ */
21781+
21782+#define SDVO_OUTPUT_FIRST (0)
21783+#define SDVO_OUTPUT_TMDS0 (1 << 0)
21784+#define SDVO_OUTPUT_RGB0 (1 << 1)
21785+#define SDVO_OUTPUT_CVBS0 (1 << 2)
21786+#define SDVO_OUTPUT_SVID0 (1 << 3)
21787+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
21788+#define SDVO_OUTPUT_SCART0 (1 << 5)
21789+#define SDVO_OUTPUT_LVDS0 (1 << 6)
21790+#define SDVO_OUTPUT_TMDS1 (1 << 8)
21791+#define SDVO_OUTPUT_RGB1 (1 << 9)
21792+#define SDVO_OUTPUT_CVBS1 (1 << 10)
21793+#define SDVO_OUTPUT_SVID1 (1 << 11)
21794+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
21795+#define SDVO_OUTPUT_SCART1 (1 << 13)
21796+#define SDVO_OUTPUT_LVDS1 (1 << 14)
21797+#define SDVO_OUTPUT_LAST (14)
21798+
21799+struct psb_intel_sdvo_caps {
21800+ u8 vendor_id;
21801+ u8 device_id;
21802+ u8 device_rev_id;
21803+ u8 sdvo_version_major;
21804+ u8 sdvo_version_minor;
21805+ unsigned int sdvo_inputs_mask:2;
21806+ unsigned int smooth_scaling:1;
21807+ unsigned int sharp_scaling:1;
21808+ unsigned int up_scaling:1;
21809+ unsigned int down_scaling:1;
21810+ unsigned int stall_support:1;
21811+ unsigned int pad:1;
21812+ u16 output_flags;
21813+} __attribute__ ((packed));
21814+
21815+/** This matches the EDID DTD structure, more or less */
21816+struct psb_intel_sdvo_dtd {
21817+ struct {
21818+ u16 clock; /**< pixel clock, in 10kHz units */
21819+ u8 h_active; /**< lower 8 bits (pixels) */
21820+ u8 h_blank; /**< lower 8 bits (pixels) */
21821+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
21822+ u8 v_active; /**< lower 8 bits (lines) */
21823+ u8 v_blank; /**< lower 8 bits (lines) */
21824+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
21825+ } part1;
21826+
21827+ struct {
21828+ u8 h_sync_off;
21829+ /**< lower 8 bits, from hblank start */
21830+ u8 h_sync_width;/**< lower 8 bits (pixels) */
21831+ /** lower 4 bits each vsync offset, vsync width */
21832+ u8 v_sync_off_width;
21833+ /**
21834+ * 2 high bits of hsync offset, 2 high bits of hsync width,
21835+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
21836+ */
21837+ u8 sync_off_width_high;
21838+ u8 dtd_flags;
21839+ u8 sdvo_flags;
21840+ /** bits 6-7 of vsync offset at bits 6-7 */
21841+ u8 v_sync_off_high;
21842+ u8 reserved;
21843+ } part2;
21844+} __attribute__ ((packed));
21845+
21846+struct psb_intel_sdvo_pixel_clock_range {
21847+ u16 min; /**< pixel clock, in 10kHz units */
21848+ u16 max; /**< pixel clock, in 10kHz units */
21849+} __attribute__ ((packed));
21850+
21851+struct psb_intel_sdvo_preferred_input_timing_args {
21852+ u16 clock;
21853+ u16 width;
21854+ u16 height;
21855+} __attribute__ ((packed));
21856+
21857+/* I2C registers for SDVO */
21858+#define SDVO_I2C_ARG_0 0x07
21859+#define SDVO_I2C_ARG_1 0x06
21860+#define SDVO_I2C_ARG_2 0x05
21861+#define SDVO_I2C_ARG_3 0x04
21862+#define SDVO_I2C_ARG_4 0x03
21863+#define SDVO_I2C_ARG_5 0x02
21864+#define SDVO_I2C_ARG_6 0x01
21865+#define SDVO_I2C_ARG_7 0x00
21866+#define SDVO_I2C_OPCODE 0x08
21867+#define SDVO_I2C_CMD_STATUS 0x09
21868+#define SDVO_I2C_RETURN_0 0x0a
21869+#define SDVO_I2C_RETURN_1 0x0b
21870+#define SDVO_I2C_RETURN_2 0x0c
21871+#define SDVO_I2C_RETURN_3 0x0d
21872+#define SDVO_I2C_RETURN_4 0x0e
21873+#define SDVO_I2C_RETURN_5 0x0f
21874+#define SDVO_I2C_RETURN_6 0x10
21875+#define SDVO_I2C_RETURN_7 0x11
21876+#define SDVO_I2C_VENDOR_BEGIN 0x20
21877+
21878+/* Status results */
21879+#define SDVO_CMD_STATUS_POWER_ON 0x0
21880+#define SDVO_CMD_STATUS_SUCCESS 0x1
21881+#define SDVO_CMD_STATUS_NOTSUPP 0x2
21882+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
21883+#define SDVO_CMD_STATUS_PENDING 0x4
21884+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
21885+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
21886+
21887+/* SDVO commands, argument/result registers */
21888+
21889+#define SDVO_CMD_RESET 0x01
21890+
21891+/** Returns a struct psb_intel_sdvo_caps */
21892+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
21893+
21894+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
21895+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
21896+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
21897+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
21898+
21899+/**
21900+ * Reports which inputs are trained (managed to sync).
21901+ *
21902+ * Devices must have trained within 2 vsyncs of a mode change.
21903+ */
21904+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
21905+struct psb_intel_sdvo_get_trained_inputs_response {
21906+ unsigned int input0_trained:1;
21907+ unsigned int input1_trained:1;
21908+ unsigned int pad:6;
21909+} __attribute__ ((packed));
21910+
21911+/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
21912+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
21913+
21914+/**
21915+ * Sets the current set of active outputs.
21916+ *
21917+ * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
21918+ * on multi-output devices.
21919+ */
21920+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
21921+
21922+/**
21923+ * Returns the current mapping of SDVO inputs to outputs on the device.
21924+ *
21925+ * Returns two struct psb_intel_sdvo_output_flags structures.
21926+ */
21927+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
21928+
21929+/**
21930+ * Sets the current mapping of SDVO inputs to outputs on the device.
21931+ *
21932+ * Takes two struct i380_sdvo_output_flags structures.
21933+ */
21934+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
21935+
21936+/**
21937+ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
21938+ */
21939+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
21940+
21941+/**
21942+ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
21943+ */
21944+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
21945+
21946+/**
21947+ * Takes a struct psb_intel_sdvo_output_flags.
21948+ */
21949+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
21950+
21951+/**
21952+ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
21953+ * interrupts enabled.
21954+ */
21955+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
21956+
21957+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
21958+struct psb_intel_sdvo_get_interrupt_event_source_response {
21959+ u16 interrupt_status;
21960+ unsigned int ambient_light_interrupt:1;
21961+ unsigned int pad:7;
21962+} __attribute__ ((packed));
21963+
21964+/**
21965+ * Selects which input is affected by future input commands.
21966+ *
21967+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
21968+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
21969+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
21970+ */
21971+#define SDVO_CMD_SET_TARGET_INPUT 0x10
21972+struct psb_intel_sdvo_set_target_input_args {
21973+ unsigned int target_1:1;
21974+ unsigned int pad:7;
21975+} __attribute__ ((packed));
21976+
21977+/**
21978+ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
21979+ * future output commands.
21980+ *
21981+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
21982+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
21983+ */
21984+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
21985+
21986+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
21987+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
21988+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
21989+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
21990+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
21991+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
21992+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
21993+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
21994+/* Part 1 */
21995+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
21996+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
21997+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
21998+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
21999+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
22000+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
22001+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
22002+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
22003+/* Part 2 */
22004+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
22005+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
22006+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
22007+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
22008+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
22009+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
22010+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
22011+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
22012+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
22013+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
22014+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
22015+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
22016+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
22017+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
22018+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
22019+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
22020+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
22021+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
22022+
22023+/**
22024+ * Generates a DTD based on the given width, height, and flags.
22025+ *
22026+ * This will be supported by any device supporting scaling or interlaced
22027+ * modes.
22028+ */
22029+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
22030+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
22031+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
22032+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
22033+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
22034+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
22035+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
22036+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
22037+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
22038+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
22039+
22040+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
22041+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
22042+
22043+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
22044+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
22045+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
22046+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
22047+
22048+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
22049+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
22050+
22051+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
22052+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
22053+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
22054+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
22055+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
22056+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
22057+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
22058+
22059+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
22060+
22061+#define SDVO_CMD_GET_TV_FORMAT 0x28
22062+
22063+#define SDVO_CMD_SET_TV_FORMAT 0x29
22064+
22065+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
22066+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
22067+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
22068+# define SDVO_ENCODER_STATE_ON (1 << 0)
22069+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
22070+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
22071+# define SDVO_ENCODER_STATE_OFF (1 << 3)
22072+
22073+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
22074+
22075+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
22076+# define SDVO_CONTROL_BUS_PROM 0x0
22077+# define SDVO_CONTROL_BUS_DDC1 0x1
22078+# define SDVO_CONTROL_BUS_DDC2 0x2
22079+# define SDVO_CONTROL_BUS_DDC3 0x3
22080+
22081+/* SDVO Bus & SDVO Inputs wiring details*/
22082+/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
22083+/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
22084+/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
22085+/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
22086+#define SDVOB_IN0 0x01
22087+#define SDVOB_IN1 0x02
22088+#define SDVOC_IN0 0x04
22089+#define SDVOC_IN1 0x08
22090+
22091+#define SDVO_DEVICE_NONE 0x00
22092+#define SDVO_DEVICE_CRT 0x01
22093+#define SDVO_DEVICE_TV 0x02
22094+#define SDVO_DEVICE_LVDS 0x04
22095+#define SDVO_DEVICE_TMDS 0x08
22096+
22097diff --git a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c
22098new file mode 100644
22099index 0000000..983e2ad
22100--- /dev/null
22101+++ b/drivers/gpu/drm/psb/psb_irq.c
22102@@ -0,0 +1,621 @@
22103+/**************************************************************************
22104+ * Copyright (c) 2007, Intel Corporation.
22105+ * All Rights Reserved.
22106+ *
22107+ * This program is free software; you can redistribute it and/or modify it
22108+ * under the terms and conditions of the GNU General Public License,
22109+ * version 2, as published by the Free Software Foundation.
22110+ *
22111+ * This program is distributed in the hope it will be useful, but WITHOUT
22112+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22113+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22114+ * more details.
22115+ *
22116+ * You should have received a copy of the GNU General Public License along with
22117+ * this program; if not, write to the Free Software Foundation, Inc.,
22118+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22119+ *
22120+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22121+ * develop this driver.
22122+ *
22123+ **************************************************************************/
22124+/*
22125+ */
22126+
22127+#include <drm/drmP.h>
22128+#include "psb_drv.h"
22129+#include "psb_reg.h"
22130+#include "psb_msvdx.h"
22131+#include "lnc_topaz.h"
22132+#include "psb_intel_reg.h"
22133+#include "psb_powermgmt.h"
22134+
22135+/*
22136+ * Video display controller interrupt.
22137+ */
22138+
22139+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
22140+{
22141+ struct drm_psb_private *dev_priv =
22142+ (struct drm_psb_private *) dev->dev_private;
22143+
22144+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
22145+#ifdef PSB_FIXME
22146+ atomic_inc(&dev->vbl_received);
22147+#endif
22148+ PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE |
22149+ PIPE_VBLANK_CLEAR, PIPEASTAT);
22150+ drm_handle_vblank(dev, 0);
22151+ }
22152+
22153+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
22154+#ifdef PSB_FIXME
22155+ atomic_inc(&dev->vbl_received2);
22156+#endif
22157+ PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE |
22158+ PIPE_VBLANK_CLEAR, PIPEBSTAT);
22159+ drm_handle_vblank(dev, 1);
22160+ }
22161+}
22162+
22163+/*
22164+ * SGX interrupt source 1.
22165+ */
22166+
22167+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
22168+ uint32_t sgx_stat2)
22169+{
22170+ struct drm_psb_private *dev_priv =
22171+ (struct drm_psb_private *) dev->dev_private;
22172+
22173+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
22174+ DRM_WAKEUP(&dev_priv->event_2d_queue);
22175+ psb_fence_handler(dev, PSB_ENGINE_2D);
22176+ }
22177+
22178+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
22179+ psb_print_pagefault(dev_priv);
22180+
22181+ psb_scheduler_handler(dev_priv, sgx_stat);
22182+}
22183+
22184+
22185+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
22186+{
22187+ struct drm_device *dev = (struct drm_device *) arg;
22188+ struct drm_psb_private *dev_priv =
22189+ (struct drm_psb_private *) dev->dev_private;
22190+
22191+ uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0;
22192+ uint32_t sgx_stat = 0;
22193+ uint32_t sgx_stat2 = 0;
22194+ uint32_t sgx_int = 0;
22195+ int handled = 0;
22196+
22197+ spin_lock(&dev_priv->irqmask_lock);
22198+
22199+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
22200+
22201+ if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
22202+ PSB_DEBUG_IRQ("Got SGX interrupt\n");
22203+ sgx_int = 1;
22204+ }
22205+ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
22206+ PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
22207+ msvdx_int = 1;
22208+ }
22209+
22210+ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
22211+ PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
22212+ topaz_int = 1;
22213+ }
22214+ if (sgx_int && powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22215+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
22216+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
22217+
22218+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
22219+ sgx_stat &= dev_priv->sgx_irq_mask;
22220+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
22221+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
22222+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
22223+ } else if (unlikely(PSB_D_PM & drm_psb_debug)) {
22224+ if (sgx_int)
22225+ PSB_DEBUG_PM("sgx int in down mode\n");
22226+ }
22227+ vdc_stat &= dev_priv->vdc_irq_mask;
22228+ spin_unlock(&dev_priv->irqmask_lock);
22229+
22230+ if (msvdx_int &&
22231+ powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)) {
22232+ uint32_t msvdx_stat = 0;
22233+
22234+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
22235+ psb_msvdx_interrupt(dev, msvdx_stat);
22236+ handled = 1;
22237+ }
22238+
22239+ if (IS_MRST(dev) && topaz_int &&
22240+ powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)) {
22241+ /* sometimes, even topaz power down, IIR
22242+ * may still have topaz bit set
22243+ */
22244+ uint32_t topaz_stat = 0;
22245+
22246+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat);
22247+ lnc_topaz_interrupt (dev, topaz_stat);
22248+ handled = 1;
22249+ }
22250+
22251+ if (vdc_stat && powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22252+ psb_vdc_interrupt(dev, vdc_stat);
22253+ handled = 1;
22254+ }
22255+
22256+ if (sgx_stat || sgx_stat2) {
22257+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
22258+ handled = 1;
22259+ }
22260+
22261+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
22262+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
22263+ DRM_READMEMORYBARRIER();
22264+
22265+ if (!handled)
22266+ return IRQ_NONE;
22267+
22268+
22269+ return IRQ_HANDLED;
22270+}
22271+
22272+void psb_irq_preinstall(struct drm_device *dev)
22273+{
22274+ psb_irq_preinstall_islands(dev, PSB_ALL_ISLANDS);
22275+}
22276+
22277+void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands)
22278+{
22279+ struct drm_psb_private *dev_priv =
22280+ (struct drm_psb_private *) dev->dev_private;
22281+ unsigned long irqflags;
22282+
22283+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22284+
22285+ if (hw_islands & PSB_DISPLAY_ISLAND) {
22286+ if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22287+ if (IS_POULSBO(dev))
22288+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
22289+ if (dev->vblank_enabled[0])
22290+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
22291+ if (dev->vblank_enabled[1])
22292+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
22293+ }
22294+ }
22295+
22296+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22297+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22298+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
22299+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22300+
22301+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
22302+ _PSB_CE_DPM_3D_MEM_FREE |
22303+ _PSB_CE_TA_FINISHED |
22304+ _PSB_CE_DPM_REACHED_MEM_THRESH |
22305+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
22306+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
22307+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
22308+
22309+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
22310+ dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
22311+ }
22312+ }
22313+
22314+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22315+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND))
22316+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
22317+
22318+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22319+ if (IS_MRST(dev) && powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND))
22320+ dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
22321+
22322+ /*This register is safe even if display island is off*/
22323+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22324+
22325+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22326+}
22327+
22328+int psb_irq_postinstall(struct drm_device *dev)
22329+{
22330+ return psb_irq_postinstall_islands(dev, PSB_ALL_ISLANDS);
22331+}
22332+
22333+int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands)
22334+{
22335+ struct drm_psb_private *dev_priv =
22336+ (struct drm_psb_private *) dev->dev_private;
22337+ unsigned long irqflags;
22338+
22339+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22340+
22341+ /*This register is safe even if display island is off*/
22342+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22343+
22344+ if (hw_islands & PSB_DISPLAY_ISLAND) {
22345+ if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22346+ if (IS_POULSBO(dev))
22347+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
22348+ if (dev->vblank_enabled[0]) {
22349+ if (IS_MRST(dev))
22350+ psb_enable_pipestat(dev_priv, 0,
22351+ PIPE_START_VBLANK_INTERRUPT_ENABLE |
22352+ PIPE_VBLANK_INTERRUPT_ENABLE);
22353+ else
22354+ psb_enable_pipestat(dev_priv, 0,
22355+ PIPE_VBLANK_INTERRUPT_ENABLE);
22356+ } else
22357+ psb_disable_pipestat(dev_priv, 0,
22358+ PIPE_VBLANK_INTERRUPT_ENABLE |
22359+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22360+
22361+ if (dev->vblank_enabled[1]) {
22362+ if (IS_MRST(dev))
22363+ psb_enable_pipestat(dev_priv, 1,
22364+ PIPE_START_VBLANK_INTERRUPT_ENABLE |
22365+ PIPE_VBLANK_INTERRUPT_ENABLE);
22366+ else
22367+ psb_enable_pipestat(dev_priv, 1,
22368+ PIPE_VBLANK_INTERRUPT_ENABLE);
22369+ } else
22370+ psb_disable_pipestat(dev_priv, 1,
22371+ PIPE_VBLANK_INTERRUPT_ENABLE |
22372+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22373+ }
22374+ }
22375+
22376+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22377+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22378+ PSB_WSGX32(dev_priv->sgx2_irq_mask,
22379+ PSB_CR_EVENT_HOST_ENABLE2);
22380+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22381+ PSB_CR_EVENT_HOST_ENABLE);
22382+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22383+ }
22384+ }
22385+
22386+ if (IS_MRST(dev))
22387+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22388+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND))
22389+ lnc_topaz_enableirq(dev);
22390+
22391+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22392+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND))
22393+ psb_msvdx_enableirq(dev);
22394+
22395+ if (hw_islands == PSB_ALL_ISLANDS)
22396+ dev_priv->irq_enabled = 1;
22397+
22398+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22399+
22400+ return 0;
22401+}
22402+
22403+void psb_irq_uninstall(struct drm_device *dev)
22404+{
22405+ psb_irq_uninstall_islands(dev, PSB_ALL_ISLANDS);
22406+}
22407+
22408+void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands)
22409+{
22410+ struct drm_psb_private *dev_priv =
22411+ (struct drm_psb_private *) dev->dev_private;
22412+ unsigned long irqflags;
22413+
22414+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22415+
22416+ if (hw_islands & PSB_DISPLAY_ISLAND) {
22417+ if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22418+ if (IS_POULSBO(dev))
22419+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
22420+ if (dev->vblank_enabled[0])
22421+ psb_disable_pipestat(dev_priv, 0,
22422+ PIPE_VBLANK_INTERRUPT_ENABLE |
22423+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22424+ if (dev->vblank_enabled[1])
22425+ psb_disable_pipestat(dev_priv, 1,
22426+ PIPE_VBLANK_INTERRUPT_ENABLE |
22427+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22428+ }
22429+ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
22430+ _PSB_IRQ_MSVDX_FLAG |
22431+ _LNC_IRQ_TOPAZ_FLAG;
22432+ }
22433+
22434+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22435+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
22436+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22437+ dev_priv->sgx_irq_mask = 0x00000000;
22438+ dev_priv->sgx2_irq_mask = 0x00000000;
22439+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22440+ PSB_CR_EVENT_HOST_ENABLE);
22441+ PSB_WSGX32(dev_priv->sgx2_irq_mask,
22442+ PSB_CR_EVENT_HOST_ENABLE2);
22443+ }
22444+ }
22445+
22446+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22447+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
22448+
22449+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22450+ dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
22451+
22452+ /*These two registers are safe even if display island is off*/
22453+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22454+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22455+
22456+ wmb();
22457+
22458+ /*This register is safe even if display island is off*/
22459+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
22460+
22461+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22462+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22463+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS),
22464+ PSB_CR_EVENT_HOST_CLEAR);
22465+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2),
22466+ PSB_CR_EVENT_HOST_CLEAR2);
22467+ }
22468+ }
22469+
22470+ if (IS_MRST(dev))
22471+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22472+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND))
22473+ lnc_topaz_disableirq(dev);
22474+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22475+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND))
22476+ psb_msvdx_disableirq(dev);
22477+
22478+
22479+ if (hw_islands == PSB_ALL_ISLANDS)
22480+ dev_priv->irq_enabled = 0;
22481+
22482+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22483+}
22484+
22485+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
22486+{
22487+ unsigned long irqflags;
22488+ uint32_t old_mask;
22489+ uint32_t cleared_mask;
22490+ struct drm_device *dev;
22491+
22492+ dev = container_of((void *) dev_priv, struct drm_device, dev_private);
22493+
22494+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22495+ --dev_priv->irqen_count_2d;
22496+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
22497+
22498+ old_mask = dev_priv->sgx_irq_mask;
22499+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
22500+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22501+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22502+ PSB_CR_EVENT_HOST_ENABLE);
22503+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22504+
22505+ cleared_mask =
22506+ (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
22507+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
22508+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
22509+ }
22510+ }
22511+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22512+}
22513+
22514+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
22515+{
22516+ unsigned long irqflags;
22517+ struct drm_device *dev;
22518+
22519+ dev = container_of((void *) dev_priv, struct drm_device, dev_private);
22520+
22521+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22522+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
22523+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
22524+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22525+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22526+ PSB_CR_EVENT_HOST_ENABLE);
22527+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22528+ }
22529+ }
22530+ ++dev_priv->irqen_count_2d;
22531+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22532+}
22533+
22534+#ifdef PSB_FIXME
22535+static int psb_vblank_do_wait(struct drm_device *dev,
22536+ unsigned int *sequence, atomic_t *counter)
22537+{
22538+ unsigned int cur_vblank;
22539+ int ret = 0;
22540+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
22541+ (((cur_vblank = atomic_read(counter))
22542+ - *sequence) <= (1 << 23)));
22543+ *sequence = cur_vblank;
22544+
22545+ return ret;
22546+}
22547+#endif
22548+
22549+
22550+/* Called from drm generic code, passed 'crtc' which
22551+ * we use as a pipe index
22552+ */
22553+int psb_enable_vblank(struct drm_device *dev, int pipe)
22554+{
22555+ struct drm_psb_private *dev_priv =
22556+ (struct drm_psb_private *) dev->dev_private;
22557+ unsigned long irqflags;
22558+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
22559+ u32 pipeconf = 0;
22560+
22561+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22562+ pipeconf = REG_READ(pipeconf_reg);
22563+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22564+ }
22565+ if (!(pipeconf & PIPEACONF_ENABLE))
22566+ return -EINVAL;
22567+
22568+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22569+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22570+ drm_psb_disable_vsync = 0;
22571+ if (pipe == 0)
22572+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
22573+ else
22574+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
22575+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22576+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22577+ if (IS_MRST(dev)) {
22578+ psb_enable_pipestat(dev_priv, pipe,
22579+ PIPE_START_VBLANK_INTERRUPT_ENABLE |
22580+ PIPE_VBLANK_INTERRUPT_ENABLE);
22581+ } else
22582+ psb_enable_pipestat(dev_priv, pipe,
22583+ PIPE_VBLANK_INTERRUPT_ENABLE);
22584+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22585+ }
22586+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22587+
22588+ return 0;
22589+}
22590+
22591+/* Called from drm generic code, passed 'crtc' which
22592+ * we use as a pipe index
22593+ */
22594+void psb_disable_vblank(struct drm_device *dev, int pipe)
22595+{
22596+ struct drm_psb_private *dev_priv =
22597+ (struct drm_psb_private *) dev->dev_private;
22598+ unsigned long irqflags;
22599+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22600+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22601+ if (pipe == 0)
22602+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
22603+ else
22604+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
22605+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22606+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22607+ psb_disable_pipestat(dev_priv, pipe,
22608+ PIPE_VBLANK_INTERRUPT_ENABLE |
22609+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22610+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22611+ }
22612+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22613+}
22614+
22615+static inline u32
22616+psb_pipestat(int pipe)
22617+{
22618+ if (pipe == 0)
22619+ return PIPEASTAT;
22620+ if (pipe == 1)
22621+ return PIPEBSTAT;
22622+ BUG();
22623+}
22624+
22625+void
22626+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
22627+{
22628+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
22629+ u32 reg = psb_pipestat(pipe);
22630+ dev_priv->pipestat[pipe] |= mask;
22631+ /* Enable the interrupt, clear any pending status */
22632+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22633+ u32 writeVal = PSB_RVDC32(reg);
22634+ writeVal |= (mask | (mask >> 16));
22635+ PSB_WVDC32(writeVal, reg);
22636+ (void) PSB_RVDC32(reg);
22637+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22638+ }
22639+ }
22640+}
22641+
22642+void
22643+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
22644+{
22645+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
22646+ u32 reg = psb_pipestat(pipe);
22647+ dev_priv->pipestat[pipe] &= ~mask;
22648+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22649+ u32 writeVal = PSB_RVDC32(reg);
22650+ writeVal &= ~mask;
22651+ PSB_WVDC32(writeVal, reg);
22652+ (void) PSB_RVDC32(reg);
22653+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22654+ }
22655+ }
22656+}
22657+
22658+/**
22659+ * psb_pipe_enabled - check if a pipe is enabled
22660+ * @dev: DRM device
22661+ * @pipe: pipe to check
22662+ *
22663+ * Reading certain registers when the pipe is disabled can hang the chip.
22664+ * Use this routine to make sure the PLL is running and the pipe is active
22665+ * before reading such registers if unsure.
22666+ */
22667+static int
22668+psb_pipe_enabled(struct drm_device *dev, int pipe)
22669+{
22670+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
22671+ int ret = 0;
22672+
22673+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22674+ ret = (REG_READ(pipeconf) & PIPEACONF_ENABLE);
22675+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22676+ }
22677+
22678+ return ret;
22679+}
22680+
22681+/* Called from drm generic code, passed a 'crtc', which
22682+ * we use as a pipe index
22683+ */
22684+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
22685+{
22686+ unsigned long high_frame;
22687+ unsigned long low_frame;
22688+ u32 high1, high2, low;
22689+ u32 count = 0;
22690+
22691+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
22692+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
22693+
22694+ if (!powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false))
22695+ return 0;
22696+
22697+ if (!psb_pipe_enabled(dev, pipe)) {
22698+ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
22699+ goto psb_get_vblank_counter_exit;
22700+ }
22701+
22702+ /*
22703+ * High & low register fields aren't synchronized, so make sure
22704+ * we get a low value that's stable across two reads of the high
22705+ * register.
22706+ */
22707+ do {
22708+ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
22709+ PIPE_FRAME_HIGH_SHIFT);
22710+ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
22711+ PIPE_FRAME_LOW_SHIFT);
22712+ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
22713+ PIPE_FRAME_HIGH_SHIFT);
22714+ } while (high1 != high2);
22715+
22716+ count = (high1 << 8) | low;
22717+
22718+psb_get_vblank_counter_exit:
22719+
22720+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22721+
22722+ return count;
22723+}
22724diff --git a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c
22725new file mode 100644
22726index 0000000..d3ff8e0
22727--- /dev/null
22728+++ b/drivers/gpu/drm/psb/psb_mmu.c
22729@@ -0,0 +1,1073 @@
22730+/**************************************************************************
22731+ * Copyright (c) 2007, Intel Corporation.
22732+ * All Rights Reserved.
22733+ *
22734+ * This program is free software; you can redistribute it and/or modify it
22735+ * under the terms and conditions of the GNU General Public License,
22736+ * version 2, as published by the Free Software Foundation.
22737+ *
22738+ * This program is distributed in the hope it will be useful, but WITHOUT
22739+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22740+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22741+ * more details.
22742+ *
22743+ * You should have received a copy of the GNU General Public License along with
22744+ * this program; if not, write to the Free Software Foundation, Inc.,
22745+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22746+ *
22747+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22748+ * develop this driver.
22749+ *
22750+ **************************************************************************/
22751+#include <drm/drmP.h>
22752+#include "psb_drv.h"
22753+#include "psb_reg.h"
22754+#include "psb_powermgmt.h"
22755+
22756+/*
22757+ * Code for the SGX MMU:
22758+ */
22759+
22760+/*
22761+ * clflush on one processor only:
22762+ * clflush should apparently flush the cache line on all processors in an
22763+ * SMP system.
22764+ */
22765+
22766+/*
22767+ * kmap atomic:
22768+ * The usage of the slots must be completely encapsulated within a spinlock, and
22769+ * no other functions that may be using the locks for other purposed may be
22770+ * called from within the locked region.
22771+ * Since the slots are per processor, this will guarantee that we are the only
22772+ * user.
22773+ */
22774+
22775+/*
22776+ * TODO: Inserting ptes from an interrupt handler:
22777+ * This may be desirable for some SGX functionality where the GPU can fault in
22778+ * needed pages. For that, we need to make an atomic insert_pages function, that
22779+ * may fail.
22780+ * If it fails, the caller need to insert the page using a workqueue function,
22781+ * but on average it should be fast.
22782+ */
22783+
22784+struct psb_mmu_driver {
22785+ /* protects driver- and pd structures. Always take in read mode
22786+ * before taking the page table spinlock.
22787+ */
22788+ struct rw_semaphore sem;
22789+
22790+ /* protects page tables, directory tables and pt tables.
22791+ * and pt structures.
22792+ */
22793+ spinlock_t lock;
22794+
22795+ atomic_t needs_tlbflush;
22796+
22797+ uint8_t __iomem *register_map;
22798+ struct psb_mmu_pd *default_pd;
22799+ uint32_t bif_ctrl;
22800+ int has_clflush;
22801+ int clflush_add;
22802+ unsigned long clflush_mask;
22803+
22804+ struct drm_psb_private *dev_priv;
22805+};
22806+
22807+struct psb_mmu_pd;
22808+
22809+struct psb_mmu_pt {
22810+ struct psb_mmu_pd *pd;
22811+ uint32_t index;
22812+ uint32_t count;
22813+ struct page *p;
22814+ uint32_t *v;
22815+};
22816+
22817+struct psb_mmu_pd {
22818+ struct psb_mmu_driver *driver;
22819+ int hw_context;
22820+ struct psb_mmu_pt **tables;
22821+ struct page *p;
22822+ struct page *dummy_pt;
22823+ struct page *dummy_page;
22824+ uint32_t pd_mask;
22825+ uint32_t invalid_pde;
22826+ uint32_t invalid_pte;
22827+};
22828+
22829+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
22830+
22831+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
22832+{
22833+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
22834+}
22835+
22836+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
22837+{
22838+ return offset >> PSB_PDE_SHIFT;
22839+}
22840+
22841+#if defined(CONFIG_X86)
22842+static inline void psb_clflush(void *addr)
22843+{
22844+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
22845+}
22846+
22847+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
22848+ void *addr)
22849+{
22850+ if (!driver->has_clflush)
22851+ return;
22852+
22853+ mb();
22854+ psb_clflush(addr);
22855+ mb();
22856+}
22857+#else
22858+
22859+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
22860+ void *addr)
22861+{;
22862+}
22863+
22864+#endif
22865+
22866+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
22867+ uint32_t val, uint32_t offset)
22868+{
22869+ iowrite32(val, d->register_map + offset);
22870+}
22871+
22872+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
22873+ uint32_t offset)
22874+{
22875+ return ioread32(d->register_map + offset);
22876+}
22877+
22878+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
22879+ int force)
22880+{
22881+ if (atomic_read(&driver->needs_tlbflush) || force) {
22882+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
22883+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
22884+ PSB_CR_BIF_CTRL);
22885+ wmb();
22886+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
22887+ PSB_CR_BIF_CTRL);
22888+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
22889+ if (driver->dev_priv) {
22890+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
22891+ if (IS_MRST(driver->dev_priv->dev))
22892+ topaz_mmu_flushcache(driver->dev_priv);
22893+ }
22894+ }
22895+ atomic_set(&driver->needs_tlbflush, 0);
22896+}
22897+
22898+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
22899+{
22900+ down_write(&driver->sem);
22901+ psb_mmu_flush_pd_locked(driver, force);
22902+ up_write(&driver->sem);
22903+}
22904+
22905+void psb_mmu_flush(struct psb_mmu_driver *driver)
22906+{
22907+ uint32_t val;
22908+
22909+ if (powermgmt_using_hw_begin(driver->dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, false)) {
22910+ down_write(&driver->sem);
22911+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
22912+ if (atomic_read(&driver->needs_tlbflush))
22913+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
22914+ PSB_CR_BIF_CTRL);
22915+ else
22916+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
22917+ PSB_CR_BIF_CTRL);
22918+ wmb();
22919+ psb_iowrite32(driver,
22920+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
22921+ PSB_CR_BIF_CTRL);
22922+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
22923+ atomic_set(&driver->needs_tlbflush, 0);
22924+ up_write(&driver->sem);
22925+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
22926+ } else {
22927+ PSB_DEBUG_PM("mmu flush when down\n");
22928+ }
22929+
22930+ down_write(&driver->sem);
22931+ if (driver->dev_priv) {
22932+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
22933+ if (IS_MRST(driver->dev_priv->dev))
22934+ topaz_mmu_flushcache(driver->dev_priv);
22935+ }
22936+
22937+ up_write(&driver->sem);
22938+}
22939+
22940+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
22941+{
22942+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
22943+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
22944+
22945+ ttm_tt_cache_flush(&pd->p, 1);
22946+ down_write(&pd->driver->sem);
22947+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT),
22948+ offset);
22949+ wmb();
22950+ psb_mmu_flush_pd_locked(pd->driver, 1);
22951+ pd->hw_context = hw_context;
22952+ up_write(&pd->driver->sem);
22953+
22954+}
22955+
22956+static inline unsigned long psb_pd_addr_end(unsigned long addr,
22957+ unsigned long end)
22958+{
22959+
22960+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
22961+ return (addr < end) ? addr : end;
22962+}
22963+
22964+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
22965+{
22966+ uint32_t mask = PSB_PTE_VALID;
22967+
22968+ if (type & PSB_MMU_CACHED_MEMORY)
22969+ mask |= PSB_PTE_CACHED;
22970+ if (type & PSB_MMU_RO_MEMORY)
22971+ mask |= PSB_PTE_RO;
22972+ if (type & PSB_MMU_WO_MEMORY)
22973+ mask |= PSB_PTE_WO;
22974+
22975+ return (pfn << PAGE_SHIFT) | mask;
22976+}
22977+
22978+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
22979+ int trap_pagefaults, int invalid_type)
22980+{
22981+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
22982+ uint32_t *v;
22983+ int i;
22984+
22985+ if (!pd)
22986+ return NULL;
22987+
22988+ pd->p = alloc_page(GFP_DMA32);
22989+ if (!pd->p)
22990+ goto out_err1;
22991+ pd->dummy_pt = alloc_page(GFP_DMA32);
22992+ if (!pd->dummy_pt)
22993+ goto out_err2;
22994+ pd->dummy_page = alloc_page(GFP_DMA32);
22995+ if (!pd->dummy_page)
22996+ goto out_err3;
22997+
22998+ if (!trap_pagefaults) {
22999+ pd->invalid_pde =
23000+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
23001+ invalid_type);
23002+ pd->invalid_pte =
23003+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
23004+ invalid_type);
23005+ } else {
23006+ pd->invalid_pde = 0;
23007+ pd->invalid_pte = 0;
23008+ }
23009+
23010+ v = kmap(pd->dummy_pt);
23011+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
23012+ v[i] = pd->invalid_pte;
23013+
23014+ kunmap(pd->dummy_pt);
23015+
23016+ v = kmap(pd->p);
23017+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
23018+ v[i] = pd->invalid_pde;
23019+
23020+ kunmap(pd->p);
23021+
23022+ clear_page(kmap(pd->dummy_page));
23023+ kunmap(pd->dummy_page);
23024+
23025+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
23026+ if (!pd->tables)
23027+ goto out_err4;
23028+
23029+ pd->hw_context = -1;
23030+ pd->pd_mask = PSB_PTE_VALID;
23031+ pd->driver = driver;
23032+
23033+ return pd;
23034+
23035+out_err4:
23036+ __free_page(pd->dummy_page);
23037+out_err3:
23038+ __free_page(pd->dummy_pt);
23039+out_err2:
23040+ __free_page(pd->p);
23041+out_err1:
23042+ kfree(pd);
23043+ return NULL;
23044+}
23045+
23046+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
23047+{
23048+ __free_page(pt->p);
23049+ kfree(pt);
23050+}
23051+
23052+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
23053+{
23054+ struct psb_mmu_driver *driver = pd->driver;
23055+ struct psb_mmu_pt *pt;
23056+ int i;
23057+
23058+ down_write(&driver->sem);
23059+ if (pd->hw_context != -1) {
23060+ psb_iowrite32(driver, 0,
23061+ PSB_CR_BIF_DIR_LIST_BASE0 +
23062+ pd->hw_context * 4);
23063+ psb_mmu_flush_pd_locked(driver, 1);
23064+ }
23065+
23066+ /* Should take the spinlock here, but we don't need to do that
23067+ since we have the semaphore in write mode. */
23068+
23069+ for (i = 0; i < 1024; ++i) {
23070+ pt = pd->tables[i];
23071+ if (pt)
23072+ psb_mmu_free_pt(pt);
23073+ }
23074+
23075+ vfree(pd->tables);
23076+ __free_page(pd->dummy_page);
23077+ __free_page(pd->dummy_pt);
23078+ __free_page(pd->p);
23079+ kfree(pd);
23080+ up_write(&driver->sem);
23081+}
23082+
23083+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
23084+{
23085+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
23086+ void *v;
23087+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
23088+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
23089+ spinlock_t *lock = &pd->driver->lock;
23090+ uint8_t *clf;
23091+ uint32_t *ptes;
23092+ int i;
23093+
23094+ if (!pt)
23095+ return NULL;
23096+
23097+ pt->p = alloc_page(GFP_DMA32);
23098+ if (!pt->p) {
23099+ kfree(pt);
23100+ return NULL;
23101+ }
23102+
23103+ spin_lock(lock);
23104+
23105+ v = kmap_atomic(pt->p, KM_USER0);
23106+ clf = (uint8_t *) v;
23107+ ptes = (uint32_t *) v;
23108+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
23109+ *ptes++ = pd->invalid_pte;
23110+
23111+
23112+#if defined(CONFIG_X86)
23113+ if (pd->driver->has_clflush && pd->hw_context != -1) {
23114+ mb();
23115+ for (i = 0; i < clflush_count; ++i) {
23116+ psb_clflush(clf);
23117+ clf += clflush_add;
23118+ }
23119+ mb();
23120+ }
23121+#endif
23122+ kunmap_atomic(v, KM_USER0);
23123+ spin_unlock(lock);
23124+
23125+ pt->count = 0;
23126+ pt->pd = pd;
23127+ pt->index = 0;
23128+
23129+ return pt;
23130+}
23131+
23132+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
23133+ unsigned long addr)
23134+{
23135+ uint32_t index = psb_mmu_pd_index(addr);
23136+ struct psb_mmu_pt *pt;
23137+ uint32_t *v;
23138+ spinlock_t *lock = &pd->driver->lock;
23139+
23140+ spin_lock(lock);
23141+ pt = pd->tables[index];
23142+ while (!pt) {
23143+ spin_unlock(lock);
23144+ pt = psb_mmu_alloc_pt(pd);
23145+ if (!pt)
23146+ return NULL;
23147+ spin_lock(lock);
23148+
23149+ if (pd->tables[index]) {
23150+ spin_unlock(lock);
23151+ psb_mmu_free_pt(pt);
23152+ spin_lock(lock);
23153+ pt = pd->tables[index];
23154+ continue;
23155+ }
23156+
23157+ v = kmap_atomic(pd->p, KM_USER0);
23158+ pd->tables[index] = pt;
23159+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
23160+ pt->index = index;
23161+ kunmap_atomic((void *) v, KM_USER0);
23162+
23163+ if (pd->hw_context != -1) {
23164+ psb_mmu_clflush(pd->driver, (void *) &v[index]);
23165+ atomic_set(&pd->driver->needs_tlbflush, 1);
23166+ }
23167+ }
23168+ pt->v = kmap_atomic(pt->p, KM_USER0);
23169+ return pt;
23170+}
23171+
23172+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
23173+ unsigned long addr)
23174+{
23175+ uint32_t index = psb_mmu_pd_index(addr);
23176+ struct psb_mmu_pt *pt;
23177+ spinlock_t *lock = &pd->driver->lock;
23178+
23179+ spin_lock(lock);
23180+ pt = pd->tables[index];
23181+ if (!pt) {
23182+ spin_unlock(lock);
23183+ return NULL;
23184+ }
23185+ pt->v = kmap_atomic(pt->p, KM_USER0);
23186+ return pt;
23187+}
23188+
23189+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
23190+{
23191+ struct psb_mmu_pd *pd = pt->pd;
23192+ uint32_t *v;
23193+
23194+ kunmap_atomic(pt->v, KM_USER0);
23195+ if (pt->count == 0) {
23196+ v = kmap_atomic(pd->p, KM_USER0);
23197+ v[pt->index] = pd->invalid_pde;
23198+ pd->tables[pt->index] = NULL;
23199+
23200+ if (pd->hw_context != -1) {
23201+ psb_mmu_clflush(pd->driver,
23202+ (void *) &v[pt->index]);
23203+ atomic_set(&pd->driver->needs_tlbflush, 1);
23204+ }
23205+ kunmap_atomic(pt->v, KM_USER0);
23206+ spin_unlock(&pd->driver->lock);
23207+ psb_mmu_free_pt(pt);
23208+ return;
23209+ }
23210+ spin_unlock(&pd->driver->lock);
23211+}
23212+
23213+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
23214+ unsigned long addr, uint32_t pte)
23215+{
23216+ pt->v[psb_mmu_pt_index(addr)] = pte;
23217+}
23218+
23219+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
23220+ unsigned long addr)
23221+{
23222+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
23223+}
23224+
23225+#if 0
23226+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
23227+ uint32_t mmu_offset)
23228+{
23229+ uint32_t *v;
23230+ uint32_t pfn;
23231+
23232+ v = kmap_atomic(pd->p, KM_USER0);
23233+ if (!v) {
23234+ printk(KERN_INFO "Could not kmap pde page.\n");
23235+ return 0;
23236+ }
23237+ pfn = v[psb_mmu_pd_index(mmu_offset)];
23238+ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
23239+ kunmap_atomic(v, KM_USER0);
23240+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
23241+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
23242+ mmu_offset, pfn);
23243+ }
23244+ v = ioremap(pfn & 0xFFFFF000, 4096);
23245+ if (!v) {
23246+ printk(KERN_INFO "Could not kmap pte page.\n");
23247+ return 0;
23248+ }
23249+ pfn = v[psb_mmu_pt_index(mmu_offset)];
23250+ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
23251+ iounmap(v);
23252+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
23253+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
23254+ mmu_offset, pfn);
23255+ }
23256+ return pfn >> PAGE_SHIFT;
23257+}
23258+
23259+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
23260+ uint32_t mmu_offset,
23261+ uint32_t gtt_pages)
23262+{
23263+ uint32_t start;
23264+ uint32_t next;
23265+
23266+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
23267+ mmu_offset, gtt_pages);
23268+ down_read(&pd->driver->sem);
23269+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
23270+ mmu_offset += PAGE_SIZE;
23271+ gtt_pages -= 1;
23272+ while (gtt_pages--) {
23273+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
23274+ if (next != start + 1) {
23275+ printk(KERN_INFO
23276+ "Ptes out of order: 0x%08x, 0x%08x.\n",
23277+ start, next);
23278+ }
23279+ start = next;
23280+ mmu_offset += PAGE_SIZE;
23281+ }
23282+ up_read(&pd->driver->sem);
23283+}
23284+
23285+#endif
23286+
23287+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
23288+ uint32_t mmu_offset, uint32_t gtt_start,
23289+ uint32_t gtt_pages)
23290+{
23291+ uint32_t *v;
23292+ uint32_t start = psb_mmu_pd_index(mmu_offset);
23293+ struct psb_mmu_driver *driver = pd->driver;
23294+ int num_pages = gtt_pages;
23295+
23296+ down_read(&driver->sem);
23297+ spin_lock(&driver->lock);
23298+
23299+ v = kmap_atomic(pd->p, KM_USER0);
23300+ v += start;
23301+
23302+ while (gtt_pages--) {
23303+ *v++ = gtt_start | pd->pd_mask;
23304+ gtt_start += PAGE_SIZE;
23305+ }
23306+
23307+ ttm_tt_cache_flush(&pd->p, num_pages);
23308+ kunmap_atomic(v, KM_USER0);
23309+ spin_unlock(&driver->lock);
23310+
23311+ if (pd->hw_context != -1)
23312+ atomic_set(&pd->driver->needs_tlbflush, 1);
23313+
23314+ up_read(&pd->driver->sem);
23315+ psb_mmu_flush_pd(pd->driver, 0);
23316+}
23317+
23318+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
23319+{
23320+ struct psb_mmu_pd *pd;
23321+
23322+ down_read(&driver->sem);
23323+ pd = driver->default_pd;
23324+ up_read(&driver->sem);
23325+
23326+ return pd;
23327+}
23328+
23329+/* Returns the physical address of the PD shared by sgx/msvdx */
23330+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
23331+{
23332+ struct psb_mmu_pd *pd;
23333+
23334+ pd = psb_mmu_get_default_pd(driver);
23335+ return page_to_pfn(pd->p) << PAGE_SHIFT;
23336+}
23337+
23338+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
23339+{
23340+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
23341+ psb_mmu_free_pagedir(driver->default_pd);
23342+ kfree(driver);
23343+}
23344+
23345+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
23346+ int trap_pagefaults,
23347+ int invalid_type,
23348+ struct drm_psb_private *dev_priv)
23349+{
23350+ struct psb_mmu_driver *driver;
23351+
23352+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
23353+
23354+ if (!driver)
23355+ return NULL;
23356+ driver->dev_priv = dev_priv;
23357+
23358+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
23359+ invalid_type);
23360+ if (!driver->default_pd)
23361+ goto out_err1;
23362+
23363+ spin_lock_init(&driver->lock);
23364+ init_rwsem(&driver->sem);
23365+ down_write(&driver->sem);
23366+ driver->register_map = registers;
23367+ atomic_set(&driver->needs_tlbflush, 1);
23368+
23369+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
23370+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
23371+ PSB_CR_BIF_CTRL);
23372+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
23373+ PSB_CR_BIF_CTRL);
23374+
23375+ driver->has_clflush = 0;
23376+
23377+#if defined(CONFIG_X86)
23378+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
23379+ uint32_t tfms, misc, cap0, cap4, clflush_size;
23380+
23381+ /*
23382+ * clflush size is determined at kernel setup for x86_64
23383+ * but not for i386. We have to do it here.
23384+ */
23385+
23386+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
23387+ clflush_size = ((misc >> 8) & 0xff) * 8;
23388+ driver->has_clflush = 1;
23389+ driver->clflush_add =
23390+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
23391+ driver->clflush_mask = driver->clflush_add - 1;
23392+ driver->clflush_mask = ~driver->clflush_mask;
23393+ }
23394+#endif
23395+
23396+ up_write(&driver->sem);
23397+ return driver;
23398+
23399+out_err1:
23400+ kfree(driver);
23401+ return NULL;
23402+}
23403+
23404+#if defined(CONFIG_X86)
23405+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
23406+ unsigned long address, uint32_t num_pages,
23407+ uint32_t desired_tile_stride,
23408+ uint32_t hw_tile_stride)
23409+{
23410+ struct psb_mmu_pt *pt;
23411+ uint32_t rows = 1;
23412+ uint32_t i;
23413+ unsigned long addr;
23414+ unsigned long end;
23415+ unsigned long next;
23416+ unsigned long add;
23417+ unsigned long row_add;
23418+ unsigned long clflush_add = pd->driver->clflush_add;
23419+ unsigned long clflush_mask = pd->driver->clflush_mask;
23420+
23421+ if (!pd->driver->has_clflush) {
23422+ ttm_tt_cache_flush(&pd->p, num_pages);
23423+ return;
23424+ }
23425+
23426+ if (hw_tile_stride)
23427+ rows = num_pages / desired_tile_stride;
23428+ else
23429+ desired_tile_stride = num_pages;
23430+
23431+ add = desired_tile_stride << PAGE_SHIFT;
23432+ row_add = hw_tile_stride << PAGE_SHIFT;
23433+ mb();
23434+ for (i = 0; i < rows; ++i) {
23435+
23436+ addr = address;
23437+ end = addr + add;
23438+
23439+ do {
23440+ next = psb_pd_addr_end(addr, end);
23441+ pt = psb_mmu_pt_map_lock(pd, addr);
23442+ if (!pt)
23443+ continue;
23444+ do {
23445+ psb_clflush(&pt->v
23446+ [psb_mmu_pt_index(addr)]);
23447+ } while (addr +=
23448+ clflush_add,
23449+ (addr & clflush_mask) < next);
23450+
23451+ psb_mmu_pt_unmap_unlock(pt);
23452+ } while (addr = next, next != end);
23453+ address += row_add;
23454+ }
23455+ mb();
23456+}
23457+#else
23458+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
23459+ unsigned long address, uint32_t num_pages,
23460+ uint32_t desired_tile_stride,
23461+ uint32_t hw_tile_stride)
23462+{
23463+ drm_ttm_cache_flush(&pd->p, num_pages);
23464+}
23465+#endif
23466+
23467+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
23468+ unsigned long address, uint32_t num_pages)
23469+{
23470+ struct psb_mmu_pt *pt;
23471+ unsigned long addr;
23472+ unsigned long end;
23473+ unsigned long next;
23474+ unsigned long f_address = address;
23475+
23476+ down_read(&pd->driver->sem);
23477+
23478+ addr = address;
23479+ end = addr + (num_pages << PAGE_SHIFT);
23480+
23481+ do {
23482+ next = psb_pd_addr_end(addr, end);
23483+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
23484+ if (!pt)
23485+ goto out;
23486+ do {
23487+ psb_mmu_invalidate_pte(pt, addr);
23488+ --pt->count;
23489+ } while (addr += PAGE_SIZE, addr < next);
23490+ psb_mmu_pt_unmap_unlock(pt);
23491+
23492+ } while (addr = next, next != end);
23493+
23494+out:
23495+ if (pd->hw_context != -1)
23496+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
23497+
23498+ up_read(&pd->driver->sem);
23499+
23500+ if (pd->hw_context != -1)
23501+ psb_mmu_flush(pd->driver);
23502+
23503+ return;
23504+}
23505+
23506+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
23507+ uint32_t num_pages, uint32_t desired_tile_stride,
23508+ uint32_t hw_tile_stride)
23509+{
23510+ struct psb_mmu_pt *pt;
23511+ uint32_t rows = 1;
23512+ uint32_t i;
23513+ unsigned long addr;
23514+ unsigned long end;
23515+ unsigned long next;
23516+ unsigned long add;
23517+ unsigned long row_add;
23518+ unsigned long f_address = address;
23519+
23520+ if (hw_tile_stride)
23521+ rows = num_pages / desired_tile_stride;
23522+ else
23523+ desired_tile_stride = num_pages;
23524+
23525+ add = desired_tile_stride << PAGE_SHIFT;
23526+ row_add = hw_tile_stride << PAGE_SHIFT;
23527+
23528+ down_read(&pd->driver->sem);
23529+
23530+ /* Make sure we only need to flush this processor's cache */
23531+
23532+ for (i = 0; i < rows; ++i) {
23533+
23534+ addr = address;
23535+ end = addr + add;
23536+
23537+ do {
23538+ next = psb_pd_addr_end(addr, end);
23539+ pt = psb_mmu_pt_map_lock(pd, addr);
23540+ if (!pt)
23541+ continue;
23542+ do {
23543+ psb_mmu_invalidate_pte(pt, addr);
23544+ --pt->count;
23545+
23546+ } while (addr += PAGE_SIZE, addr < next);
23547+ psb_mmu_pt_unmap_unlock(pt);
23548+
23549+ } while (addr = next, next != end);
23550+ address += row_add;
23551+ }
23552+ if (pd->hw_context != -1)
23553+ psb_mmu_flush_ptes(pd, f_address, num_pages,
23554+ desired_tile_stride, hw_tile_stride);
23555+
23556+ up_read(&pd->driver->sem);
23557+
23558+ if (pd->hw_context != -1)
23559+ psb_mmu_flush(pd->driver);
23560+}
23561+
23562+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
23563+ unsigned long address, uint32_t num_pages,
23564+ int type)
23565+{
23566+ struct psb_mmu_pt *pt;
23567+ uint32_t pte;
23568+ unsigned long addr;
23569+ unsigned long end;
23570+ unsigned long next;
23571+ unsigned long f_address = address;
23572+ int ret = 0;
23573+
23574+ down_read(&pd->driver->sem);
23575+
23576+ addr = address;
23577+ end = addr + (num_pages << PAGE_SHIFT);
23578+
23579+ do {
23580+ next = psb_pd_addr_end(addr, end);
23581+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
23582+ if (!pt) {
23583+ ret = -ENOMEM;
23584+ goto out;
23585+ }
23586+ do {
23587+ pte = psb_mmu_mask_pte(start_pfn++, type);
23588+ psb_mmu_set_pte(pt, addr, pte);
23589+ pt->count++;
23590+ } while (addr += PAGE_SIZE, addr < next);
23591+ psb_mmu_pt_unmap_unlock(pt);
23592+
23593+ } while (addr = next, next != end);
23594+
23595+out:
23596+ if (pd->hw_context != -1)
23597+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
23598+
23599+ up_read(&pd->driver->sem);
23600+
23601+ if (pd->hw_context != -1)
23602+ psb_mmu_flush(pd->driver);
23603+
23604+ return ret;
23605+}
23606+
23607+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
23608+ unsigned long address, uint32_t num_pages,
23609+ uint32_t desired_tile_stride,
23610+ uint32_t hw_tile_stride, int type)
23611+{
23612+ struct psb_mmu_pt *pt;
23613+ uint32_t rows = 1;
23614+ uint32_t i;
23615+ uint32_t pte;
23616+ unsigned long addr;
23617+ unsigned long end;
23618+ unsigned long next;
23619+ unsigned long add;
23620+ unsigned long row_add;
23621+ unsigned long f_address = address;
23622+ int ret = 0;
23623+
23624+ if (hw_tile_stride) {
23625+ if (num_pages % desired_tile_stride != 0)
23626+ return -EINVAL;
23627+ rows = num_pages / desired_tile_stride;
23628+ } else {
23629+ desired_tile_stride = num_pages;
23630+ }
23631+
23632+ add = desired_tile_stride << PAGE_SHIFT;
23633+ row_add = hw_tile_stride << PAGE_SHIFT;
23634+
23635+ down_read(&pd->driver->sem);
23636+
23637+ for (i = 0; i < rows; ++i) {
23638+
23639+ addr = address;
23640+ end = addr + add;
23641+
23642+ do {
23643+ next = psb_pd_addr_end(addr, end);
23644+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
23645+ if (!pt) {
23646+ ret = -ENOMEM;
23647+ goto out;
23648+ }
23649+ do {
23650+ pte =
23651+ psb_mmu_mask_pte(page_to_pfn(*pages++),
23652+ type);
23653+ psb_mmu_set_pte(pt, addr, pte);
23654+ pt->count++;
23655+ } while (addr += PAGE_SIZE, addr < next);
23656+ psb_mmu_pt_unmap_unlock(pt);
23657+
23658+ } while (addr = next, next != end);
23659+
23660+ address += row_add;
23661+ }
23662+out:
23663+ if (pd->hw_context != -1)
23664+ psb_mmu_flush_ptes(pd, f_address, num_pages,
23665+ desired_tile_stride, hw_tile_stride);
23666+
23667+ up_read(&pd->driver->sem);
23668+
23669+ if (pd->hw_context != -1)
23670+ psb_mmu_flush(pd->driver);
23671+
23672+ return ret;
23673+}
23674+
23675+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
23676+{
23677+ mask &= _PSB_MMU_ER_MASK;
23678+ psb_iowrite32(driver,
23679+ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
23680+ PSB_CR_BIF_CTRL);
23681+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
23682+}
23683+
23684+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
23685+ uint32_t mask)
23686+{
23687+ mask &= _PSB_MMU_ER_MASK;
23688+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
23689+ PSB_CR_BIF_CTRL);
23690+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
23691+}
23692+
23693+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
23694+ unsigned long *pfn)
23695+{
23696+ int ret;
23697+ struct psb_mmu_pt *pt;
23698+ uint32_t tmp;
23699+ spinlock_t *lock = &pd->driver->lock;
23700+
23701+ down_read(&pd->driver->sem);
23702+ pt = psb_mmu_pt_map_lock(pd, virtual);
23703+ if (!pt) {
23704+ uint32_t *v;
23705+
23706+ spin_lock(lock);
23707+ v = kmap_atomic(pd->p, KM_USER0);
23708+ tmp = v[psb_mmu_pd_index(virtual)];
23709+ kunmap_atomic(v, KM_USER0);
23710+ spin_unlock(lock);
23711+
23712+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
23713+ !(pd->invalid_pte & PSB_PTE_VALID)) {
23714+ ret = -EINVAL;
23715+ goto out;
23716+ }
23717+ ret = 0;
23718+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
23719+ goto out;
23720+ }
23721+ tmp = pt->v[psb_mmu_pt_index(virtual)];
23722+ if (!(tmp & PSB_PTE_VALID)) {
23723+ ret = -EINVAL;
23724+ } else {
23725+ ret = 0;
23726+ *pfn = tmp >> PAGE_SHIFT;
23727+ }
23728+ psb_mmu_pt_unmap_unlock(pt);
23729+out:
23730+ up_read(&pd->driver->sem);
23731+ return ret;
23732+}
23733+
23734+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
23735+{
23736+ struct page *p;
23737+ unsigned long pfn;
23738+ int ret = 0;
23739+ struct psb_mmu_pd *pd;
23740+ uint32_t *v;
23741+ uint32_t *vmmu;
23742+
23743+ pd = driver->default_pd;
23744+ if (!pd)
23745+ printk(KERN_WARNING "Could not get default pd\n");
23746+
23747+
23748+ p = alloc_page(GFP_DMA32);
23749+
23750+ if (!p) {
23751+ printk(KERN_WARNING "Failed allocating page\n");
23752+ return;
23753+ }
23754+
23755+ v = kmap(p);
23756+ memset(v, 0x67, PAGE_SIZE);
23757+
23758+ pfn = (offset >> PAGE_SHIFT);
23759+
23760+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
23761+ if (ret) {
23762+ printk(KERN_WARNING "Failed inserting mmu page\n");
23763+ goto out_err1;
23764+ }
23765+
23766+ /* Ioremap the page through the GART aperture */
23767+
23768+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
23769+ if (!vmmu) {
23770+ printk(KERN_WARNING "Failed ioremapping page\n");
23771+ goto out_err2;
23772+ }
23773+
23774+ /* Read from the page with mmu disabled. */
23775+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
23776+
23777+ /* Enable the mmu for host accesses and read again. */
23778+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
23779+
23780+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
23781+ ioread32(vmmu));
23782+ *v = 0x15243705;
23783+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
23784+ ioread32(vmmu));
23785+ iowrite32(0x16243355, vmmu);
23786+ (void) ioread32(vmmu);
23787+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
23788+
23789+ printk(KERN_INFO "Int stat is 0x%08x\n",
23790+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
23791+ printk(KERN_INFO "Fault is 0x%08x\n",
23792+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
23793+
23794+ /* Disable MMU for host accesses and clear page fault register */
23795+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
23796+ iounmap(vmmu);
23797+out_err2:
23798+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
23799+out_err1:
23800+ kunmap(p);
23801+ __free_page(p);
23802+}
23803diff --git a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c
23804new file mode 100644
23805index 0000000..6930880
23806--- /dev/null
23807+++ b/drivers/gpu/drm/psb/psb_msvdx.c
23808@@ -0,0 +1,855 @@
23809+/**
23810+ * file psb_msvdx.c
23811+ * MSVDX I/O operations and IRQ handling
23812+ *
23813+ */
23814+
23815+/**************************************************************************
23816+ *
23817+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
23818+ * Copyright (c) Imagination Technologies Limited, UK
23819+ * All Rights Reserved.
23820+ *
23821+ * Permission is hereby granted, free of charge, to any person obtaining a
23822+ * copy of this software and associated documentation files (the
23823+ * "Software"), to deal in the Software without restriction, including
23824+ * without limitation the rights to use, copy, modify, merge, publish,
23825+ * distribute, sub license, and/or sell copies of the Software, and to
23826+ * permit persons to whom the Software is furnished to do so, subject to
23827+ * the following conditions:
23828+ *
23829+ * The above copyright notice and this permission notice (including the
23830+ * next paragraph) shall be included in all copies or substantial portions
23831+ * of the Software.
23832+ *
23833+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23834+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23835+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23836+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23837+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23838+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23839+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
23840+ *
23841+ **************************************************************************/
23842+
23843+#include <drm/drmP.h>
23844+#include <drm/drm_os_linux.h>
23845+#include "psb_drv.h"
23846+#include "psb_drm.h"
23847+#include "psb_msvdx.h"
23848+#include "lnc_topaz.h"
23849+#include "psb_powermgmt.h"
23850+#include <linux/io.h>
23851+#include <linux/delay.h>
23852+
23853+#ifndef list_first_entry
23854+#define list_first_entry(ptr, type, member) \
23855+ list_entry((ptr)->next, type, member)
23856+#endif
23857+
23858+
23859+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
23860+ unsigned long cmd_size);
23861+
23862+static int psb_msvdx_dequeue_send(struct drm_device *dev)
23863+{
23864+ struct drm_psb_private *dev_priv = dev->dev_private;
23865+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
23866+ int ret = 0;
23867+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
23868+
23869+ if (list_empty(&msvdx_priv->msvdx_queue)) {
23870+ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
23871+ msvdx_priv->msvdx_busy = 0;
23872+ return -EINVAL;
23873+ }
23874+ msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue,
23875+ struct psb_msvdx_cmd_queue, head);
23876+ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
23877+ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
23878+ if (ret) {
23879+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
23880+ ret = -EINVAL;
23881+ }
23882+ list_del(&msvdx_cmd->head);
23883+ kfree(msvdx_cmd->cmd);
23884+ kfree(msvdx_cmd);
23885+
23886+ return ret;
23887+}
23888+
23889+static int psb_msvdx_map_command(struct drm_device *dev,
23890+ struct ttm_buffer_object *cmd_buffer,
23891+ unsigned long cmd_offset, unsigned long cmd_size,
23892+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
23893+{
23894+ struct drm_psb_private *dev_priv = dev->dev_private;
23895+ int ret = 0;
23896+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
23897+ unsigned long cmd_size_remaining;
23898+ struct ttm_bo_kmap_obj cmd_kmap;
23899+ void *cmd, *tmp, *cmd_start;
23900+ bool is_iomem;
23901+
23902+ /* command buffers may not exceed page boundary */
23903+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
23904+ return -EINVAL;
23905+
23906+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap);
23907+ if (ret) {
23908+ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
23909+ return ret;
23910+ }
23911+
23912+ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
23913+ + cmd_page_offset;
23914+ cmd = cmd_start;
23915+ cmd_size_remaining = cmd_size;
23916+
23917+ while (cmd_size_remaining > 0) {
23918+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
23919+ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
23920+ uint32_t mmu_ptd = 0, tmp = 0;
23921+
23922+ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
23923+ " cur_cmd_id = %02x fence = %08x\n",
23924+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
23925+ if ((cur_cmd_size % sizeof(uint32_t))
23926+ || (cur_cmd_size > cmd_size_remaining)) {
23927+ ret = -EINVAL;
23928+ DRM_ERROR("MSVDX: ret:%d\n", ret);
23929+ goto out;
23930+ }
23931+
23932+ switch (cur_cmd_id) {
23933+ case VA_MSGID_RENDER:
23934+ /* Fence ID */
23935+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
23936+ sequence);
23937+ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
23938+ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
23939+ 1, 0);
23940+ if (tmp == 1) {
23941+ mmu_ptd |= 1;
23942+ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
23943+ }
23944+
23945+ /* PTD */
23946+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
23947+ break;
23948+
23949+ default:
23950+ /* Msg not supported */
23951+ ret = -EINVAL;
23952+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
23953+ goto out;
23954+ }
23955+
23956+ cmd += cur_cmd_size;
23957+ cmd_size_remaining -= cur_cmd_size;
23958+ }
23959+
23960+ if (copy_cmd) {
23961+ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
23962+
23963+ tmp = kzalloc(cmd_size, GFP_KERNEL);
23964+ if (tmp == NULL) {
23965+ ret = -ENOMEM;
23966+ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
23967+ goto out;
23968+ }
23969+ memcpy(tmp, cmd_start, cmd_size);
23970+ *msvdx_cmd = tmp;
23971+ } else {
23972+ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
23973+ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
23974+ if (ret) {
23975+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
23976+ ret = -EINVAL;
23977+ }
23978+ }
23979+
23980+out:
23981+ ttm_bo_kunmap(&cmd_kmap);
23982+
23983+ return ret;
23984+}
23985+
23986+int psb_submit_video_cmdbuf(struct drm_device *dev,
23987+ struct ttm_buffer_object *cmd_buffer,
23988+ unsigned long cmd_offset, unsigned long cmd_size,
23989+ struct ttm_fence_object *fence)
23990+{
23991+ struct drm_psb_private *dev_priv = dev->dev_private;
23992+ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
23993+ unsigned long irq_flags;
23994+ int ret = 0;
23995+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
23996+
23997+ psb_schedule_watchdog(dev_priv);
23998+
23999+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24000+ if (msvdx_priv->msvdx_needs_reset) {
24001+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24002+ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
24003+ if (psb_msvdx_reset(dev_priv)) {
24004+ ret = -EBUSY;
24005+ DRM_ERROR("MSVDX: Reset failed\n");
24006+ return ret;
24007+ }
24008+ msvdx_priv->msvdx_needs_reset = 0;
24009+ msvdx_priv->msvdx_busy = 0;
24010+
24011+ psb_msvdx_init(dev);
24012+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24013+ }
24014+
24015+ if (!msvdx_priv->msvdx_fw_loaded) {
24016+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24017+ PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n");
24018+
24019+ ret = psb_setup_fw(dev);
24020+ if (ret) {
24021+ DRM_ERROR("MSVDX:fail to load FW\n");
24022+ /* FIXME: find a proper return value */
24023+ return -EFAULT;
24024+ }
24025+ msvdx_priv->msvdx_fw_loaded = 1;
24026+
24027+ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
24028+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24029+ }
24030+
24031+ if (!msvdx_priv->msvdx_busy) {
24032+ msvdx_priv->msvdx_busy = 1;
24033+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24034+ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
24035+ sequence);
24036+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
24037+ cmd_size, NULL, sequence, 0);
24038+ if (ret) {
24039+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
24040+ return ret;
24041+ }
24042+ } else {
24043+ struct psb_msvdx_cmd_queue *msvdx_cmd;
24044+ void *cmd = NULL;
24045+
24046+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24047+ /* queue the command to be sent when the h/w is ready */
24048+ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
24049+ sequence);
24050+ msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue),
24051+ GFP_KERNEL);
24052+ if (msvdx_cmd == NULL) {
24053+ DRM_ERROR("MSVDXQUE: Out of memory...\n");
24054+ return -ENOMEM;
24055+ }
24056+
24057+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
24058+ cmd_size, &cmd, sequence, 1);
24059+ if (ret) {
24060+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
24061+ kfree(msvdx_cmd
24062+ );
24063+ return ret;
24064+ }
24065+ msvdx_cmd->cmd = cmd;
24066+ msvdx_cmd->cmd_size = cmd_size;
24067+ msvdx_cmd->sequence = sequence;
24068+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24069+ list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue);
24070+ if (!msvdx_priv->msvdx_busy) {
24071+ msvdx_priv->msvdx_busy = 1;
24072+ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
24073+ psb_msvdx_dequeue_send(dev);
24074+ }
24075+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24076+ }
24077+
24078+ return ret;
24079+}
24080+
24081+int psb_cmdbuf_video(struct drm_file *priv,
24082+ struct list_head *validate_list,
24083+ uint32_t fence_type,
24084+ struct drm_psb_cmdbuf_arg *arg,
24085+ struct ttm_buffer_object *cmd_buffer,
24086+ struct psb_ttm_fence_rep *fence_arg)
24087+{
24088+ struct drm_device *dev = priv->minor->dev;
24089+ struct ttm_fence_object *fence;
24090+ int ret;
24091+
24092+ /*
24093+ * Check this. Doesn't seem right. Have fencing done AFTER command
24094+ * submission and make sure drm_psb_idle idles the MSVDX completely.
24095+ */
24096+ ret =
24097+ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
24098+ arg->cmdbuf_size, NULL);
24099+ if (ret)
24100+ return ret;
24101+
24102+
24103+ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
24104+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
24105+ arg->fence_flags, validate_list, fence_arg,
24106+ &fence);
24107+
24108+ ttm_fence_object_unref(&fence);
24109+ mutex_lock(&cmd_buffer->mutex);
24110+ if (cmd_buffer->sync_obj != NULL)
24111+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
24112+ mutex_unlock(&cmd_buffer->mutex);
24113+
24114+ return 0;
24115+}
24116+
24117+
24118+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
24119+ unsigned long cmd_size)
24120+{
24121+ int ret = 0;
24122+ struct drm_psb_private *dev_priv = dev->dev_private;
24123+
24124+ while (cmd_size > 0) {
24125+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
24126+ if (cur_cmd_size > cmd_size) {
24127+ ret = -EINVAL;
24128+ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
24129+ cmd_size, (unsigned long)cur_cmd_size);
24130+ goto out;
24131+ }
24132+
24133+ /* Send the message to h/w */
24134+ ret = psb_mtx_send(dev_priv, cmd);
24135+ if (ret) {
24136+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
24137+ goto out;
24138+ }
24139+ cmd += cur_cmd_size;
24140+ cmd_size -= cur_cmd_size;
24141+ }
24142+
24143+out:
24144+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
24145+ return ret;
24146+}
24147+
24148+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
24149+{
24150+ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
24151+ const uint32_t *p_msg = (uint32_t *) msg;
24152+ uint32_t msg_num, words_free, ridx, widx;
24153+ int ret = 0;
24154+
24155+ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
24156+
24157+ /* we need clocks enabled before we touch VEC local ram */
24158+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24159+
24160+ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
24161+ if (msg_num > NUM_WORDS_MTX_BUF) {
24162+ ret = -EINVAL;
24163+ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
24164+ goto out;
24165+ }
24166+
24167+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
24168+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
24169+
24170+ /* message would wrap, need to send a pad message */
24171+ if (widx + msg_num > NUM_WORDS_MTX_BUF) {
24172+ /* Shouldn't happen for a PAD message itself */
24173+ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
24174+ == FWRK_MSGID_PADDING);
24175+
24176+ /* if the read pointer is at zero then we must wait for it to
24177+ * change otherwise the write pointer will equal the read
24178+ * pointer,which should only happen when the buffer is empty
24179+ *
24180+ * This will only happens if we try to overfill the queue,
24181+ * queue management should make
24182+ * sure this never happens in the first place.
24183+ */
24184+ BUG_ON(0 == ridx);
24185+ if (0 == ridx) {
24186+ ret = -EINVAL;
24187+ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
24188+ goto out;
24189+ }
24190+
24191+ /* Send a pad message */
24192+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
24193+ (NUM_WORDS_MTX_BUF - widx) << 2);
24194+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
24195+ FWRK_MSGID_PADDING);
24196+ psb_mtx_send(dev_priv, pad_msg);
24197+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
24198+ }
24199+
24200+ if (widx >= ridx)
24201+ words_free = NUM_WORDS_MTX_BUF - (widx - ridx);
24202+ else
24203+ words_free = ridx - widx;
24204+
24205+ BUG_ON(msg_num > words_free);
24206+ if (msg_num > words_free) {
24207+ ret = -EINVAL;
24208+ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
24209+ goto out;
24210+ }
24211+ while (msg_num > 0) {
24212+ PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2));
24213+ msg_num--;
24214+ widx++;
24215+ if (NUM_WORDS_MTX_BUF == widx)
24216+ widx = 0;
24217+ }
24218+ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
24219+
24220+ /* Make sure clocks are enabled before we kick */
24221+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24222+
24223+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24224+
24225+ /* signal an interrupt to let the mtx know there is a new message */
24226+ PSB_WMSVDX32(1, MSVDX_MTX_KICKI);
24227+
24228+out:
24229+ return ret;
24230+}
24231+
24232+/*
24233+ * MSVDX MTX interrupt
24234+ */
24235+static void psb_msvdx_mtx_interrupt(struct drm_device *dev)
24236+{
24237+ struct drm_psb_private *dev_priv =
24238+ (struct drm_psb_private *)dev->dev_private;
24239+ static uint32_t buf[128]; /* message buffer */
24240+ uint32_t ridx, widx;
24241+ uint32_t num, ofs; /* message num and offset */
24242+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24243+
24244+ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
24245+
24246+ /* Are clocks enabled - If not enable before
24247+ * attempting to read from VLR
24248+ */
24249+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
24250+ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
24251+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24252+ }
24253+
24254+loop: /* just for coding style check */
24255+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
24256+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
24257+
24258+ /* Get out of here if nothing */
24259+ if (ridx == widx)
24260+ goto done;
24261+
24262+ ofs = 0;
24263+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
24264+
24265+ /* round to nearest word */
24266+ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
24267+
24268+ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
24269+
24270+ if (++ridx >= NUM_WORDS_HOST_BUF)
24271+ ridx = 0;
24272+
24273+ for (ofs++; ofs < num; ofs++) {
24274+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
24275+
24276+ if (++ridx >= NUM_WORDS_HOST_BUF)
24277+ ridx = 0;
24278+ }
24279+
24280+ /* Update the Read index */
24281+ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
24282+
24283+ if (msvdx_priv->msvdx_needs_reset)
24284+ goto loop;
24285+
24286+ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
24287+ case VA_MSGID_CMD_HW_PANIC:
24288+ case VA_MSGID_CMD_FAILED: {
24289+ uint32_t fence = MEMIO_READ_FIELD(buf,
24290+ FW_VA_CMD_FAILED_FENCE_VALUE);
24291+ uint32_t fault = MEMIO_READ_FIELD(buf,
24292+ FW_VA_CMD_FAILED_IRQSTATUS);
24293+ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
24294+ uint32_t diff = 0;
24295+
24296+ (void) fault;
24297+ if (msg_id == VA_MSGID_CMD_HW_PANIC)
24298+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
24299+ "Fault detected"
24300+ " - Fence: %08x, Status: %08x"
24301+ " - resetting and ignoring error\n",
24302+ fence, fault);
24303+ else
24304+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
24305+ "Fault detected"
24306+ " - Fence: %08x, Status: %08x"
24307+ " - resetting and ignoring error\n",
24308+ fence, fault);
24309+
24310+ msvdx_priv->msvdx_needs_reset = 1;
24311+
24312+ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
24313+ diff = msvdx_priv->msvdx_current_sequence
24314+ - dev_priv->sequence[PSB_ENGINE_VIDEO];
24315+
24316+ if (diff > 0x0FFFFFFF)
24317+ msvdx_priv->msvdx_current_sequence++;
24318+
24319+ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
24320+ "assuming %08x\n",
24321+ msvdx_priv->msvdx_current_sequence);
24322+ } else {
24323+ msvdx_priv->msvdx_current_sequence = fence;
24324+ }
24325+
24326+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
24327+ msvdx_priv->msvdx_current_sequence,
24328+ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
24329+
24330+ /* Flush the command queue */
24331+ psb_msvdx_flush_cmd_queue(dev);
24332+
24333+ goto done;
24334+ }
24335+ case VA_MSGID_CMD_COMPLETED: {
24336+ uint32_t fence = MEMIO_READ_FIELD(buf,
24337+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
24338+ uint32_t flags = MEMIO_READ_FIELD(buf,
24339+ FW_VA_CMD_COMPLETED_FLAGS);
24340+
24341+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
24342+ "FenceID: %08x, flags: 0x%x\n",
24343+ fence, flags);
24344+
24345+ msvdx_priv->msvdx_current_sequence = fence;
24346+
24347+ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
24348+
24349+ if (flags & FW_VA_RENDER_HOST_INT) {
24350+ /*Now send the next command from the msvdx cmd queue */
24351+ psb_msvdx_dequeue_send(dev);
24352+ goto done;
24353+ }
24354+
24355+ break;
24356+ }
24357+ case VA_MSGID_CMD_COMPLETED_BATCH: {
24358+ uint32_t fence = MEMIO_READ_FIELD(buf,
24359+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
24360+ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
24361+ FW_VA_CMD_COMPLETED_NO_TICKS);
24362+ (void)tickcnt;
24363+ /* we have the fence value in the message */
24364+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
24365+ " FenceID: %08x, TickCount: %08x\n",
24366+ fence, tickcnt);
24367+ msvdx_priv->msvdx_current_sequence = fence;
24368+
24369+ break;
24370+ }
24371+ case VA_MSGID_ACK:
24372+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
24373+ break;
24374+
24375+ case VA_MSGID_TEST1:
24376+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
24377+ break;
24378+
24379+ case VA_MSGID_TEST2:
24380+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
24381+ break;
24382+ /* Don't need to do anything with these messages */
24383+
24384+ case VA_MSGID_DEBLOCK_REQUIRED: {
24385+ uint32_t ctxid = MEMIO_READ_FIELD(buf,
24386+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
24387+ (void) ctxid;
24388+ /* The BE we now be locked. */
24389+ /* Unblock rendec by reading the mtx2mtx end of slice */
24390+ (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA);
24391+
24392+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
24393+ " Context=%08x\n", ctxid);
24394+ goto done;
24395+ }
24396+ default:
24397+ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
24398+ goto done;
24399+ }
24400+
24401+done:
24402+ /* we get a frame/slice done, try to save some power*/
24403+ if (drm_msvdx_pmpolicy == PSB_PMPOLICY_POWERDOWN)
24404+ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
24405+
24406+ DRM_MEMORYBARRIER(); /* TBD check this... */
24407+}
24408+
24409+
24410+/*
24411+ * MSVDX interrupt.
24412+ */
24413+void psb_msvdx_interrupt(struct drm_device *dev,
24414+ uint32_t msvdx_stat)
24415+{
24416+ struct drm_psb_private *dev_priv =
24417+ (struct drm_psb_private *) dev->dev_private;
24418+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24419+
24420+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
24421+ /*Ideally we should we should never get to this */
24422+ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n",
24423+ msvdx_stat, dev_priv->fence2_irq_on);
24424+
24425+ /* Pause MMU */
24426+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
24427+ MSVDX_MMU_CONTROL0);
24428+ DRM_WRITEMEMORYBARRIER();
24429+
24430+ /* Clear this interupt bit only */
24431+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
24432+ MSVDX_INTERRUPT_CLEAR);
24433+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
24434+ DRM_READMEMORYBARRIER();
24435+
24436+ msvdx_priv->msvdx_needs_reset = 1;
24437+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
24438+ PSB_DEBUG_IRQ
24439+ ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n",
24440+ msvdx_stat, dev_priv->fence2_irq_on);
24441+
24442+ /* Clear all interupt bits */
24443+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
24444+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
24445+ DRM_READMEMORYBARRIER();
24446+
24447+ psb_msvdx_mtx_interrupt(dev);
24448+ }
24449+}
24450+
24451+
24452+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
24453+ int *msvdx_lockup, int *msvdx_idle)
24454+{
24455+ int tmp;
24456+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24457+
24458+ *msvdx_lockup = 0;
24459+ *msvdx_idle = 1;
24460+
24461+#if 0
24462+ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
24463+ "last_sequence:%d and last_submitted_sequence :%d\n",
24464+ msvdx_priv->msvdx_current_sequence,
24465+ msvdx_priv->msvdx_last_sequence,
24466+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
24467+#endif
24468+
24469+ tmp = msvdx_priv->msvdx_current_sequence -
24470+ dev_priv->sequence[PSB_ENGINE_VIDEO];
24471+
24472+ if (tmp > 0x0FFFFFFF) {
24473+ if (msvdx_priv->msvdx_current_sequence ==
24474+ msvdx_priv->msvdx_last_sequence) {
24475+ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
24476+ msvdx_priv->msvdx_current_sequence);
24477+ *msvdx_lockup = 1;
24478+ } else {
24479+ PSB_DEBUG_GENERAL("MSVDXTimer: "
24480+ "msvdx responded fine so far\n");
24481+ msvdx_priv->msvdx_last_sequence =
24482+ msvdx_priv->msvdx_current_sequence;
24483+ *msvdx_idle = 0;
24484+ }
24485+ }
24486+}
24487+
24488+int psb_check_msvdx_idle(struct drm_device *dev)
24489+{
24490+ struct drm_psb_private *dev_priv =
24491+ (struct drm_psb_private *)dev->dev_private;
24492+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24493+ uint32_t fs_status, ccb_roff, ccb_woff;
24494+
24495+ if (msvdx_priv->msvdx_busy) {
24496+ PSB_DEBUG_PM("MSVDX: psb_check_msvdx_idle returns busy\n");
24497+ return -EBUSY;
24498+ }
24499+
24500+ /* check that clocks are enabled before reading VLR */
24501+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
24502+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24503+
24504+ fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS);
24505+ ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
24506+ ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
24507+
24508+ /* If the firmware says the hardware is idle
24509+ * and the CCB is empty then we can say it is IDLE
24510+ */
24511+ if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) && (ccb_roff == ccb_woff)) {
24512+ PSB_DEBUG_PM("MSVDXIDLE: FW indicate IDLE\n");
24513+ return 0;
24514+ }
24515+
24516+ return -EBUSY; /* not checkout fence, CCB, etc here*/
24517+}
24518+
24519+int psb_wait_msvdx_idle(struct drm_device *dev)
24520+{
24521+ struct drm_psb_private *dev_priv =
24522+ (struct drm_psb_private *)dev->dev_private;
24523+ struct ttm_fence_device *fdev = &dev_priv->fdev;
24524+ struct ttm_fence_class_manager *fc =
24525+ &fdev->fence_class[PSB_ENGINE_VIDEO];
24526+ struct ttm_fence_object *fence, *next;
24527+ int signaled = 0;
24528+ unsigned long _end = jiffies + 5 * DRM_HZ;
24529+ int ret = 0;
24530+
24531+ /* Ensure that all pending IRQs are serviced, */
24532+
24533+ /*
24534+ * Save the last MSVDX fence in dev_priv instead!!!
24535+ * Need to be fc->write_locked while accessing a fence from the ring.
24536+ */
24537+ list_for_each_entry_safe(fence, next, &fc->ring, ring) {
24538+ do {
24539+ signaled = ttm_fence_object_signaled(fence,
24540+ _PSB_FENCE_TYPE_EXE);
24541+ if (signaled) {
24542+ PSB_DEBUG_PM("MSVDXIDLE:wait_fence success\n");
24543+ break;
24544+ }
24545+ if (time_after_eq(jiffies, _end)) {
24546+ PSB_DEBUG_PM("MSVDXIDLE: fence 0x%x didn't get"
24547+ "signaled for 3 secs\n",
24548+ (unsigned int) fence);
24549+ break;
24550+ }
24551+ DRM_UDELAY(1000);
24552+ } while (1);
24553+ }
24554+ do {
24555+ ret = psb_check_msvdx_idle(dev);
24556+ if (ret == 0) {
24557+ PSB_DEBUG_PM("MSVDXIDLE: check_idle succeeded!\n");
24558+ break;
24559+ }
24560+
24561+ if (time_after_eq(jiffies, _end)) {
24562+ PSB_DEBUG_PM("MSVDXIDLE: wait HW idle time out\n");
24563+ break;
24564+ }
24565+ DRM_UDELAY(1000);
24566+ } while (1);
24567+
24568+ return ret;
24569+}
24570+
24571+#if 0
24572+static int psb_power_gated_msvdx(struct drm_device *dev)
24573+{
24574+ struct drm_psb_private *dev_priv =
24575+ (struct drm_psb_private *)dev->dev_private;
24576+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24577+
24578+ PSB_DEBUG_PM("MSVDX: Setting clock to minimal\n");
24579+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
24580+
24581+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_CLOCKGATED);
24582+
24583+ return 0;
24584+}
24585+
24586+static int psb_power_ungated_msvdx(struct drm_device *dev)
24587+{
24588+ struct drm_psb_private *dev_priv =
24589+ (struct drm_psb_private *)dev->dev_private;
24590+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24591+
24592+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP);
24593+
24594+ return 0;
24595+}
24596+#endif
24597+
24598+int lnc_video_getparam(struct drm_device *dev, void *data,
24599+ struct drm_file *file_priv)
24600+{
24601+ struct drm_lnc_video_getparam_arg *arg = data;
24602+ int ret = 0;
24603+ struct drm_psb_private *dev_priv =
24604+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
24605+#if defined(CONFIG_MRST_RAR_HANDLER)
24606+ struct RAR_buffer rar_buf;
24607+ size_t rar_status;
24608+#endif
24609+ void *rar_handler;
24610+ uint32_t offset = 0;
24611+
24612+ switch (arg->key) {
24613+ case LNC_VIDEO_GETPARAM_RAR_REGION_SIZE:
24614+ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
24615+ &dev_priv->rar_region_size,
24616+ sizeof(dev_priv->rar_region_size));
24617+ break;
24618+ case LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET:
24619+ ret = copy_from_user(&rar_handler,
24620+ (void __user *)((unsigned long)arg->arg),
24621+ sizeof(rar_handler));
24622+ if (ret)
24623+ break;
24624+
24625+#if defined(CONFIG_MRST_RAR_HANDLER)
24626+ rar_buf.info.handle = rar_handler;
24627+ rar_buf.bus_address = dev_priv->rar_region_start;
24628+ rar_status = 1;
24629+
24630+ rar_status = rar_handle_to_bus(&rar_buf, 1);
24631+ if (rar_status != 1) {
24632+ DRM_ERROR("MSVDX:rar_handle_to_bus failed\n");
24633+ ret = -1;
24634+ break;
24635+ }
24636+
24637+ offset = rar_buf.bus_address - dev_priv->rar_region_start;
24638+ PSB_DEBUG_GENERAL("MSVDX:RAR handler %p, bus address=0x%08x,"
24639+ "RAR region=0x%08x\n", rar_handler,
24640+ rar_buf.bus_address,dev_priv->rar_region_start);
24641+#endif
24642+ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
24643+ &offset,
24644+ sizeof(offset));
24645+ break;
24646+ case LNC_VIDEO_FRAME_SKIP:
24647+ ret = lnc_video_frameskip(dev, arg->value);
24648+ break;
24649+ default:
24650+ ret = -EFAULT;
24651+ break;
24652+ }
24653+
24654+ if (ret)
24655+ return -EFAULT;
24656+
24657+ return 0;
24658+}
24659+
24660+inline int psb_try_power_down_msvdx(struct drm_device *dev)
24661+{
24662+ return powermgmt_suspend_islands(dev->pdev, PSB_VIDEO_DEC_ISLAND, false);
24663+}
24664diff --git a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h
24665new file mode 100644
24666index 0000000..8d8d8b5
24667--- /dev/null
24668+++ b/drivers/gpu/drm/psb/psb_msvdx.h
24669@@ -0,0 +1,527 @@
24670+/**************************************************************************
24671+ *
24672+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
24673+ * Copyright (c) Imagination Technologies Limited, UK
24674+ * All Rights Reserved.
24675+ *
24676+ * Permission is hereby granted, free of charge, to any person obtaining a
24677+ * copy of this software and associated documentation files (the
24678+ * "Software"), to deal in the Software without restriction, including
24679+ * without limitation the rights to use, copy, modify, merge, publish,
24680+ * distribute, sub license, and/or sell copies of the Software, and to
24681+ * permit persons to whom the Software is furnished to do so, subject to
24682+ * the following conditions:
24683+ *
24684+ * The above copyright notice and this permission notice (including the
24685+ * next paragraph) shall be included in all copies or substantial portions
24686+ * of the Software.
24687+ *
24688+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24689+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24690+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24691+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24692+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24693+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24694+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
24695+ *
24696+ **************************************************************************/
24697+
24698+#ifndef _PSB_MSVDX_H_
24699+#define _PSB_MSVDX_H_
24700+
24701+#include "psb_drv.h"
24702+
24703+#if defined(CONFIG_MRST_RAR_HANDLER)
24704+#include "rar/memrar.h"
24705+#endif
24706+
24707+extern int drm_msvdx_pmpolicy;
24708+
24709+void psb_msvdx_interrupt(struct drm_device *dev,
24710+ uint32_t msvdx_stat);
24711+
24712+int psb_msvdx_init(struct drm_device *dev);
24713+int psb_msvdx_uninit(struct drm_device *dev);
24714+int psb_msvdx_reset(struct drm_psb_private *dev_priv);
24715+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
24716+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
24717+void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
24718+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
24719+ int *msvdx_lockup, int *msvdx_idle);
24720+int psb_setup_fw(struct drm_device *dev);
24721+int psb_check_msvdx_idle(struct drm_device *dev);
24722+int psb_wait_msvdx_idle(struct drm_device *dev);
24723+int psb_cmdbuf_video(struct drm_file *priv,
24724+ struct list_head *validate_list,
24725+ uint32_t fence_type,
24726+ struct drm_psb_cmdbuf_arg *arg,
24727+ struct ttm_buffer_object *cmd_buffer,
24728+ struct psb_ttm_fence_rep *fence_arg);
24729+
24730+/* Non-Optimal Invalidation is not default */
24731+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
24732+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
24733+
24734+#define FW_VA_RENDER_HOST_INT 0x00004000
24735+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
24736+
24737+/* There is no work currently underway on the hardware */
24738+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
24739+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
24740+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
24741+ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
24742+ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
24743+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
24744+
24745+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
24746+ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
24747+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
24748+
24749+#define POULSBO_D0 0x5
24750+#define POULSBO_D1 0x6
24751+#define PSB_REVID_OFFSET 0x8
24752+
24753+#define MTX_CODE_BASE (0x80900000)
24754+#define MTX_DATA_BASE (0x82880000)
24755+#define PC_START_ADDRESS (0x80900000)
24756+
24757+#define MTX_CORE_CODE_MEM (0x10)
24758+#define MTX_CORE_DATA_MEM (0x18)
24759+
24760+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
24761+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
24762+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
24763+ (0x00010000)
24764+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
24765+ (0x00100000)
24766+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
24767+ (0x01000000)
24768+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
24769+ (0x10000000)
24770+
24771+#define clk_enable_all \
24772+(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24773+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
24774+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
24775+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
24776+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
24777+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
24778+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
24779+
24780+#define clk_enable_minimal \
24781+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24782+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
24783+
24784+#define clk_enable_auto \
24785+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
24786+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
24787+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
24788+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
24789+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
24790+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24791+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
24792+
24793+#define msvdx_sw_reset_all \
24794+(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
24795+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
24796+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
24797+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
24798+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
24799+
24800+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
24801+ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
24802+#define MTX_PC MTX_INTERNAL_REG(0, 5)
24803+
24804+#define RENDEC_A_SIZE (1024 * 1024)
24805+#define RENDEC_B_SIZE (1024 * 1024)
24806+
24807+#define MEMIO_READ_FIELD(vpMem, field) \
24808+ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
24809+ & field##_MASK) >> field##_SHIFT))
24810+
24811+#define MEMIO_WRITE_FIELD(vpMem, field, value) \
24812+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
24813+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
24814+ & (field##_TYPE)~field##_MASK) | \
24815+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
24816+
24817+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
24818+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
24819+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
24820+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
24821+
24822+#define REGIO_READ_FIELD(reg_val, reg, field) \
24823+ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
24824+
24825+#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
24826+ (reg_val) = \
24827+ ((reg_val) & ~(reg##_##field##_MASK)) | \
24828+ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
24829+
24830+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
24831+ (reg_val) = \
24832+ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
24833+
24834+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
24835+ (0x00000001)
24836+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
24837+ (0x00000002)
24838+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
24839+ (0x00000004)
24840+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
24841+ (0x00000008)
24842+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
24843+ (0x00000010)
24844+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
24845+ (0x00000020)
24846+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
24847+ (0x00000040)
24848+
24849+#define clk_enable_all \
24850+ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24851+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
24852+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
24853+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
24854+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
24855+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
24856+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
24857+
24858+#define clk_enable_minimal \
24859+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24860+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
24861+
24862+/* MTX registers */
24863+#define MSVDX_MTX_ENABLE (0x0000)
24864+#define MSVDX_MTX_KICKI (0x0088)
24865+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
24866+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
24867+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
24868+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
24869+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
24870+#define MSVDX_MTX_SOFT_RESET (0x0200)
24871+
24872+/* MSVDX registers */
24873+#define MSVDX_CONTROL (0x0600)
24874+#define MSVDX_INTERRUPT_CLEAR (0x060C)
24875+#define MSVDX_INTERRUPT_STATUS (0x0608)
24876+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
24877+#define MSVDX_MMU_CONTROL0 (0x0680)
24878+#define MSVDX_MTX_RAM_BANK (0x06F0)
24879+#define MSVDX_MAN_CLK_ENABLE (0x0620)
24880+
24881+/* RENDEC registers */
24882+#define MSVDX_RENDEC_CONTROL0 (0x0868)
24883+#define MSVDX_RENDEC_CONTROL1 (0x086C)
24884+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
24885+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
24886+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
24887+#define MSVDX_RENDEC_READ_DATA (0x0898)
24888+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
24889+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
24890+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
24891+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
24892+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
24893+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
24894+
24895+/*
24896+ * This defines the MSVDX communication buffer
24897+ */
24898+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
24899+/*!< Host buffer size (in 32-bit words) */
24900+#define NUM_WORDS_HOST_BUF (100)
24901+/*!< MTX buffer size (in 32-bit words) */
24902+#define NUM_WORDS_MTX_BUF (100)
24903+
24904+/* There is no work currently underway on the hardware */
24905+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
24906+
24907+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
24908+
24909+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
24910+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
24911+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
24912+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
24913+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
24914+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
24915+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
24916+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
24917+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
24918+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
24919+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
24920+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
24921+#define MSVDX_COMMS_TO_MTX_BUF \
24922+ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
24923+
24924+#define MSVDX_COMMS_AREA_END \
24925+ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
24926+
24927+#if (MSVDX_COMMS_AREA_END != 0x03000)
24928+#error
24929+#endif
24930+
24931+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
24932+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
24933+
24934+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
24935+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
24936+
24937+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
24938+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
24939+
24940+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
24941+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
24942+
24943+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
24944+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
24945+
24946+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
24947+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
24948+
24949+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
24950+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
24951+
24952+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
24953+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
24954+
24955+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
24956+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
24957+
24958+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
24959+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
24960+
24961+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
24962+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
24963+
24964+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
24965+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
24966+
24967+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
24968+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
24969+
24970+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
24971+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
24972+
24973+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
24974+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
24975+
24976+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
24977+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
24978+
24979+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
24980+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
24981+
24982+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
24983+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
24984+
24985+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
24986+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
24987+
24988+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
24989+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
24990+
24991+/* Start of parser specific Host->MTX messages. */
24992+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
24993+
24994+/* Start of parser specific MTX->Host messages. */
24995+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
24996+
24997+#define FWRK_MSGID_PADDING (0)
24998+
24999+#define FWRK_GENMSG_SIZE_TYPE uint8_t
25000+#define FWRK_GENMSG_SIZE_MASK (0xFF)
25001+#define FWRK_GENMSG_SIZE_SHIFT (0)
25002+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
25003+#define FWRK_GENMSG_ID_TYPE uint8_t
25004+#define FWRK_GENMSG_ID_MASK (0xFF)
25005+#define FWRK_GENMSG_ID_SHIFT (0)
25006+#define FWRK_GENMSG_ID_OFFSET (0x0001)
25007+#define FWRK_PADMSG_SIZE (2)
25008+
25009+/* This type defines the framework specified message ids */
25010+enum {
25011+ /* ! Sent by the DXVA driver on the host to the mtx firmware.
25012+ */
25013+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
25014+ VA_MSGID_RENDER,
25015+ VA_MSGID_DEBLOCK,
25016+ VA_MSGID_BUBBLE,
25017+
25018+ /* Test Messages */
25019+ VA_MSGID_TEST1,
25020+ VA_MSGID_TEST2,
25021+
25022+ /*! Sent by the mtx firmware to itself.
25023+ */
25024+ VA_MSGID_RENDER_MC_INTERRUPT,
25025+
25026+ /*! Sent by the DXVA firmware on the MTX to the host.
25027+ */
25028+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
25029+ VA_MSGID_CMD_COMPLETED_BATCH,
25030+ VA_MSGID_DEBLOCK_REQUIRED,
25031+ VA_MSGID_TEST_RESPONCE,
25032+ VA_MSGID_ACK,
25033+
25034+ VA_MSGID_CMD_FAILED,
25035+ VA_MSGID_CMD_UNSUPPORTED,
25036+ VA_MSGID_CMD_HW_PANIC,
25037+};
25038+
25039+/* MSVDX private structure */
25040+struct msvdx_private {
25041+ int msvdx_needs_reset;
25042+
25043+ unsigned int pmstate;
25044+
25045+ struct sysfs_dirent *sysfs_pmstate;
25046+
25047+ uint32_t msvdx_current_sequence;
25048+ uint32_t msvdx_last_sequence;
25049+
25050+ /*
25051+ *MSVDX Rendec Memory
25052+ */
25053+ struct ttm_buffer_object *ccb0;
25054+ uint32_t base_addr0;
25055+ struct ttm_buffer_object *ccb1;
25056+ uint32_t base_addr1;
25057+
25058+ /*
25059+ *msvdx command queue
25060+ */
25061+ spinlock_t msvdx_lock;
25062+ struct mutex msvdx_mutex;
25063+ struct list_head msvdx_queue;
25064+ int msvdx_busy;
25065+ int msvdx_fw_loaded;
25066+ void *msvdx_fw;
25067+ int msvdx_fw_size;
25068+};
25069+
25070+/* MSVDX Firmware interface */
25071+#define FW_VA_INIT_SIZE (8)
25072+#define FW_VA_DEBUG_TEST2_SIZE (4)
25073+
25074+/* FW_VA_DEBUG_TEST2 MSG_SIZE */
25075+#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
25076+#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
25077+#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
25078+#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
25079+
25080+/* FW_VA_DEBUG_TEST2 ID */
25081+#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
25082+#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
25083+#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
25084+#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
25085+
25086+/* FW_VA_CMD_FAILED FENCE_VALUE */
25087+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
25088+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
25089+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
25090+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
25091+
25092+/* FW_VA_CMD_FAILED IRQSTATUS */
25093+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
25094+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
25095+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
25096+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
25097+
25098+/* FW_VA_CMD_COMPLETED FENCE_VALUE */
25099+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
25100+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
25101+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
25102+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
25103+
25104+/* FW_VA_CMD_COMPLETED FLAGS */
25105+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
25106+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
25107+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
25108+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
25109+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
25110+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
25111+
25112+/* FW_VA_CMD_COMPLETED NO_TICKS */
25113+#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
25114+#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
25115+#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
25116+#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
25117+
25118+/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
25119+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
25120+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
25121+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
25122+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
25123+
25124+/* FW_VA_INIT GLOBAL_PTD */
25125+#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
25126+#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
25127+#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
25128+#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
25129+
25130+/* FW_VA_RENDER FENCE_VALUE */
25131+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
25132+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
25133+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
25134+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
25135+
25136+/* FW_VA_RENDER MMUPTD */
25137+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
25138+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
25139+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
25140+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
25141+
25142+/* FW_VA_RENDER BUFFER_ADDRESS */
25143+#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
25144+#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
25145+#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
25146+#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
25147+
25148+/* FW_VA_RENDER BUFFER_SIZE */
25149+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
25150+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
25151+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
25152+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
25153+
25154+
25155+static inline void psb_msvdx_clearirq(struct drm_device *dev)
25156+{
25157+ struct drm_psb_private *dev_priv = dev->dev_private;
25158+ unsigned long mtx_int = 0;
25159+
25160+ PSB_DEBUG_IRQ("MSVDX: clear IRQ\n");
25161+
25162+ /* Clear MTX interrupt */
25163+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
25164+ 1);
25165+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
25166+}
25167+
25168+
25169+static inline void psb_msvdx_disableirq(struct drm_device *dev)
25170+{
25171+ /* nothing */
25172+}
25173+
25174+
25175+static inline void psb_msvdx_enableirq(struct drm_device *dev)
25176+{
25177+ struct drm_psb_private *dev_priv = dev->dev_private;
25178+ unsigned long enables = 0;
25179+
25180+ PSB_DEBUG_IRQ("MSVDX: enable MSVDX MTX IRQ\n");
25181+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
25182+ 1);
25183+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
25184+}
25185+
25186+#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state) \
25187+do { \
25188+ msvdx_priv->pmstate = new_state; \
25189+ sysfs_notify_dirent(msvdx_priv->sysfs_pmstate); \
25190+ PSB_DEBUG_PM("MSVDX: %s\n", \
25191+ (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \
25192+ : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \
25193+ : "clockgated")); \
25194+} while (0)
25195+
25196+#endif
25197diff --git a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c
25198new file mode 100644
25199index 0000000..49c5041
25200--- /dev/null
25201+++ b/drivers/gpu/drm/psb/psb_msvdxinit.c
25202@@ -0,0 +1,747 @@
25203+/**
25204+ * file psb_msvdxinit.c
25205+ * MSVDX initialization and mtx-firmware upload
25206+ *
25207+ */
25208+
25209+/**************************************************************************
25210+ *
25211+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
25212+ * Copyright (c) Imagination Technologies Limited, UK
25213+ * All Rights Reserved.
25214+ *
25215+ * Permission is hereby granted, free of charge, to any person obtaining a
25216+ * copy of this software and associated documentation files (the
25217+ * "Software"), to deal in the Software without restriction, including
25218+ * without limitation the rights to use, copy, modify, merge, publish,
25219+ * distribute, sub license, and/or sell copies of the Software, and to
25220+ * permit persons to whom the Software is furnished to do so, subject to
25221+ * the following conditions:
25222+ *
25223+ * The above copyright notice and this permission notice (including the
25224+ * next paragraph) shall be included in all copies or substantial portions
25225+ * of the Software.
25226+ *
25227+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25228+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25229+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25230+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25231+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25232+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25233+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
25234+ *
25235+ **************************************************************************/
25236+
25237+#include <drm/drmP.h>
25238+#include <drm/drm.h>
25239+#include "psb_drv.h"
25240+#include "psb_msvdx.h"
25241+#include <linux/firmware.h>
25242+
25243+#define MSVDX_REG (dev_priv->msvdx_reg)
25244+uint8_t psb_rev_id;
25245+/*MSVDX FW header*/
25246+struct msvdx_fw {
25247+ uint32_t ver;
25248+ uint32_t text_size;
25249+ uint32_t data_size;
25250+ uint32_t data_location;
25251+};
25252+
25253+int psb_wait_for_register(struct drm_psb_private *dev_priv,
25254+ uint32_t offset, uint32_t value, uint32_t enable)
25255+{
25256+ uint32_t tmp;
25257+ uint32_t poll_cnt = 10000;
25258+ while (poll_cnt) {
25259+ tmp = PSB_RMSVDX32(offset);
25260+ if (value == (tmp & enable)) /* All the bits are reset */
25261+ return 0; /* So exit */
25262+
25263+ /* Wait a bit */
25264+ DRM_UDELAY(1000);
25265+ poll_cnt--;
25266+ }
25267+ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
25268+ " expecting %08x (mask %08x), got %08x\n",
25269+ offset, value, enable, tmp);
25270+
25271+ return 1;
25272+}
25273+
25274+int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
25275+{
25276+ int ret = 0;
25277+ uint32_t mtx_int = 0;
25278+
25279+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
25280+ 1);
25281+
25282+ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
25283+ /* Required value */
25284+ mtx_int,
25285+ /* Enabled bits */
25286+ mtx_int);
25287+
25288+ if (ret) {
25289+ DRM_ERROR("MSVDX: Error Mtx did not return"
25290+ " int within a resonable time\n");
25291+ return ret;
25292+ }
25293+
25294+ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
25295+
25296+ /* Got it so clear the bit */
25297+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
25298+
25299+ return ret;
25300+}
25301+
25302+void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
25303+ const uint32_t core_reg, const uint32_t val)
25304+{
25305+ uint32_t reg = 0;
25306+
25307+ /* Put data in MTX_RW_DATA */
25308+ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
25309+
25310+ /* DREADY is set to 0 and request a write */
25311+ reg = core_reg;
25312+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
25313+ MTX_RNW, 0);
25314+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
25315+ MTX_DREADY, 0);
25316+ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
25317+
25318+ psb_wait_for_register(dev_priv,
25319+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
25320+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
25321+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
25322+}
25323+
25324+void psb_upload_fw(struct drm_psb_private *dev_priv,
25325+ const uint32_t data_mem, uint32_t ram_bank_size,
25326+ uint32_t address, const unsigned int words,
25327+ const uint32_t * const data)
25328+{
25329+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
25330+ uint32_t access_ctrl;
25331+
25332+ /* Save the access control register... */
25333+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
25334+
25335+ /* Wait for MCMSTAT to become be idle 1 */
25336+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25337+ 1, /* Required Value */
25338+ 0xffffffff /* Enables */);
25339+
25340+ for (loop = 0; loop < words; loop++) {
25341+ ram_id = data_mem + (address / ram_bank_size);
25342+ if (ram_id != cur_bank) {
25343+ addr = address >> 2;
25344+ ctrl = 0;
25345+ REGIO_WRITE_FIELD_LITE(ctrl,
25346+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25347+ MTX_MCMID, ram_id);
25348+ REGIO_WRITE_FIELD_LITE(ctrl,
25349+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25350+ MTX_MCM_ADDR, addr);
25351+ REGIO_WRITE_FIELD_LITE(ctrl,
25352+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25353+ MTX_MCMAI, 1);
25354+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25355+ cur_bank = ram_id;
25356+ }
25357+ address += 4;
25358+
25359+ PSB_WMSVDX32(data[loop],
25360+ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
25361+
25362+ /* Wait for MCMSTAT to become be idle 1 */
25363+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25364+ 1, /* Required Value */
25365+ 0xffffffff /* Enables */);
25366+ }
25367+ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
25368+
25369+ /* Restore the access control register... */
25370+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25371+}
25372+
25373+static int psb_verify_fw(struct drm_psb_private *dev_priv,
25374+ const uint32_t ram_bank_size,
25375+ const uint32_t data_mem, uint32_t address,
25376+ const uint32_t words, const uint32_t * const data)
25377+{
25378+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
25379+ uint32_t access_ctrl;
25380+ int ret = 0;
25381+
25382+ /* Save the access control register... */
25383+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
25384+
25385+ /* Wait for MCMSTAT to become be idle 1 */
25386+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25387+ 1, /* Required Value */
25388+ 0xffffffff /* Enables */);
25389+
25390+ for (loop = 0; loop < words; loop++) {
25391+ uint32_t tmp;
25392+ ram_id = data_mem + (address / ram_bank_size);
25393+
25394+ if (ram_id != cur_bank) {
25395+ addr = address >> 2;
25396+ ctrl = 0;
25397+ REGIO_WRITE_FIELD_LITE(ctrl,
25398+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25399+ MTX_MCMID, ram_id);
25400+ REGIO_WRITE_FIELD_LITE(ctrl,
25401+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25402+ MTX_MCM_ADDR, addr);
25403+ REGIO_WRITE_FIELD_LITE(ctrl,
25404+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25405+ MTX_MCMAI, 1);
25406+ REGIO_WRITE_FIELD_LITE(ctrl,
25407+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25408+ MTX_MCMR, 1);
25409+
25410+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25411+
25412+ cur_bank = ram_id;
25413+ }
25414+ address += 4;
25415+
25416+ /* Wait for MCMSTAT to become be idle 1 */
25417+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25418+ 1, /* Required Value */
25419+ 0xffffffff /* Enables */);
25420+
25421+ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
25422+ if (data[loop] != tmp) {
25423+ DRM_ERROR("psb: Firmware validation fails"
25424+ " at index=%08x\n", loop);
25425+ ret = 1;
25426+ break;
25427+ }
25428+ }
25429+
25430+ /* Restore the access control register... */
25431+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25432+
25433+ return ret;
25434+}
25435+
25436+static uint32_t *msvdx_get_fw(struct drm_device *dev,
25437+ const struct firmware **raw, uint8_t *name)
25438+{
25439+ struct drm_psb_private *dev_priv = dev->dev_private;
25440+ int rc, fw_size;
25441+ int *ptr = NULL;
25442+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
25443+
25444+ rc = request_firmware(raw, name, &dev->pdev->dev);
25445+ if (rc < 0) {
25446+ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
25447+ name, rc);
25448+ return NULL;
25449+ }
25450+
25451+ if ((*raw)->size < sizeof(struct msvdx_fw)) {
25452+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
25453+ name, (*raw)->size);
25454+ return NULL;
25455+ }
25456+
25457+ ptr = (int *) ((*raw))->data;
25458+
25459+ if (!ptr) {
25460+ DRM_ERROR("MSVDX: Failed to load %s\n", name);
25461+ return NULL;
25462+ }
25463+
25464+ /* another sanity check... */
25465+ fw_size = sizeof(struct msvdx_fw) +
25466+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
25467+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
25468+ if ((*raw)->size != fw_size) {
25469+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
25470+ name, (*raw)->size);
25471+ return NULL;
25472+ }
25473+ msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL);
25474+ if (msvdx_priv->msvdx_fw == NULL)
25475+ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
25476+ else {
25477+ memcpy(msvdx_priv->msvdx_fw, ptr, fw_size);
25478+ msvdx_priv->msvdx_fw_size = fw_size;
25479+ }
25480+
25481+ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
25482+ release_firmware(*raw);
25483+
25484+ return msvdx_priv->msvdx_fw;
25485+}
25486+
25487+int psb_setup_fw(struct drm_device *dev)
25488+{
25489+ struct drm_psb_private *dev_priv = dev->dev_private;
25490+ int ret = 0;
25491+
25492+ uint32_t ram_bank_size;
25493+ struct msvdx_fw *fw;
25494+ uint32_t *fw_ptr = NULL;
25495+ uint32_t *text_ptr = NULL;
25496+ uint32_t *data_ptr = NULL;
25497+ const struct firmware *raw = NULL;
25498+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
25499+
25500+ /* todo : Assert the clock is on - if not turn it on to upload code */
25501+ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
25502+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25503+
25504+ /* Reset MTX */
25505+ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
25506+ MSVDX_MTX_SOFT_RESET);
25507+
25508+ /* Initialses Communication controll area to 0 */
25509+ if (psb_rev_id >= POULSBO_D1) {
25510+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
25511+ " or later revision.\n");
25512+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
25513+ MSVDX_COMMS_OFFSET_FLAGS);
25514+ } else {
25515+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
25516+ " or earlier revision.\n");
25517+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
25518+ MSVDX_COMMS_OFFSET_FLAGS);
25519+ }
25520+
25521+ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
25522+ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
25523+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
25524+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
25525+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
25526+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
25527+ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
25528+
25529+ /* read register bank size */
25530+ {
25531+ uint32_t bank_size, reg;
25532+ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
25533+ bank_size =
25534+ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
25535+ CR_MTX_RAM_BANK_SIZE);
25536+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
25537+ }
25538+
25539+ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
25540+ ram_bank_size);
25541+
25542+ /* if FW already loaded from storage */
25543+ if (msvdx_priv->msvdx_fw)
25544+ fw_ptr = msvdx_priv->msvdx_fw;
25545+ else {
25546+ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd\n");
25547+ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
25548+ }
25549+
25550+ if (!fw_ptr) {
25551+ DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n");
25552+ ret = 1;
25553+ goto out;
25554+ }
25555+
25556+ fw = (struct msvdx_fw *) fw_ptr;
25557+ if (fw->ver != 0x02) {
25558+ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
25559+ "got version=%02x expected version=%02x\n",
25560+ fw->ver, 0x02);
25561+ ret = 1;
25562+ goto out;
25563+ }
25564+
25565+ text_ptr =
25566+ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
25567+ data_ptr = text_ptr + fw->text_size;
25568+
25569+ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
25570+ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
25571+ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
25572+ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
25573+ fw->data_location);
25574+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
25575+ *text_ptr);
25576+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
25577+ *data_ptr);
25578+
25579+ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
25580+ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
25581+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
25582+ text_ptr);
25583+ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
25584+ fw->data_location - MTX_DATA_BASE, fw->data_size,
25585+ data_ptr);
25586+
25587+#if 0
25588+ /* todo : Verify code upload possibly only in debug */
25589+ ret = psb_verify_fw(dev_priv, ram_bank_size,
25590+ MTX_CORE_CODE_MEM,
25591+ PC_START_ADDRESS - MTX_CODE_BASE,
25592+ fw->text_size, text_ptr);
25593+ if (ret) {
25594+ /* Firmware code upload failed */
25595+ ret = 1;
25596+ goto out;
25597+ }
25598+
25599+ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
25600+ fw->data_location - MTX_DATA_BASE,
25601+ fw->data_size, data_ptr);
25602+ if (ret) {
25603+ /* Firmware data upload failed */
25604+ ret = 1;
25605+ goto out;
25606+ }
25607+#else
25608+ (void)psb_verify_fw;
25609+#endif
25610+ /* -- Set starting PC address */
25611+ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
25612+
25613+ /* -- Turn on the thread */
25614+ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
25615+
25616+ /* Wait for the signature value to be written back */
25617+ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
25618+ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
25619+ 0xffffffff /* Enabled bits */);
25620+ if (ret) {
25621+ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
25622+ goto out;
25623+ }
25624+
25625+ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
25626+ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
25627+ MSVDX_COMMS_AREA_ADDR);
25628+#if 0
25629+
25630+ /* Send test message */
25631+ {
25632+ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
25633+
25634+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
25635+ FW_VA_DEBUG_TEST2_SIZE);
25636+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
25637+ VA_MSGID_TEST2);
25638+
25639+ ret = psb_mtx_send(dev_priv, msg_buf);
25640+ if (ret) {
25641+ DRM_ERROR("psb: MSVDX sending fails.\n");
25642+ goto out;
25643+ }
25644+
25645+ /* Wait for Mtx to ack this message */
25646+ psb_poll_mtx_irq(dev_priv);
25647+
25648+ }
25649+#endif
25650+out:
25651+
25652+ return ret;
25653+}
25654+
25655+
25656+static void psb_free_ccb(struct ttm_buffer_object **ccb)
25657+{
25658+ ttm_bo_unref(ccb);
25659+ *ccb = NULL;
25660+}
25661+
25662+/**
25663+ * Reset chip and disable interrupts.
25664+ * Return 0 success, 1 failure
25665+ */
25666+int psb_msvdx_reset(struct drm_psb_private *dev_priv)
25667+{
25668+ int ret = 0;
25669+
25670+ /* Issue software reset */
25671+ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
25672+
25673+ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
25674+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
25675+
25676+ if (!ret) {
25677+ /* Clear interrupt enabled flag */
25678+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
25679+
25680+ /* Clear any pending interrupt flags */
25681+ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
25682+ }
25683+
25684+ /* mutex_destroy(&msvdx_priv->msvdx_mutex); */
25685+
25686+ return ret;
25687+}
25688+
25689+static int psb_allocate_ccb(struct drm_device *dev,
25690+ struct ttm_buffer_object **ccb,
25691+ uint32_t *base_addr, int size)
25692+{
25693+ struct drm_psb_private *dev_priv = psb_priv(dev);
25694+ struct ttm_bo_device *bdev = &dev_priv->bdev;
25695+ int ret;
25696+ struct ttm_bo_kmap_obj tmp_kmap;
25697+ bool is_iomem;
25698+
25699+ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
25700+
25701+ ret = ttm_buffer_object_create(bdev, size,
25702+ ttm_bo_type_kernel,
25703+ DRM_PSB_FLAG_MEM_KERNEL |
25704+ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
25705+ NULL, ccb);
25706+ if (ret) {
25707+ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
25708+ *ccb = NULL;
25709+ return 1;
25710+ }
25711+
25712+ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
25713+ if (ret) {
25714+ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
25715+ ttm_bo_unref(ccb);
25716+ *ccb = NULL;
25717+ return 1;
25718+ }
25719+
25720+ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
25721+ RENDEC_A_SIZE);
25722+ ttm_bo_kunmap(&tmp_kmap);
25723+
25724+ *base_addr = (*ccb)->offset;
25725+ return 0;
25726+}
25727+
25728+static ssize_t psb_msvdx_pmstate_show(struct device *dev,
25729+ struct device_attribute *attr, char *buf)
25730+{
25731+ struct drm_device *drm_dev = dev_get_drvdata(dev);
25732+ struct drm_psb_private *dev_priv;
25733+ struct msvdx_private *msvdx_priv;
25734+ unsigned int pmstate;
25735+ unsigned long flags;
25736+ int ret = -EINVAL;
25737+
25738+ if (drm_dev == NULL)
25739+ return 0;
25740+
25741+ dev_priv = drm_dev->dev_private;
25742+ msvdx_priv = dev_priv->msvdx_private;
25743+ pmstate = msvdx_priv->pmstate;
25744+
25745+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, flags);
25746+ ret = sprintf(buf, "%s\n",
25747+ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup"
25748+ : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown"
25749+ : "clockgated"));
25750+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, flags);
25751+
25752+ return ret;
25753+}
25754+
25755+static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL);
25756+
25757+
25758+int psb_msvdx_init(struct drm_device *dev)
25759+{
25760+ struct drm_psb_private *dev_priv = dev->dev_private;
25761+ /* uint32_t clk_gate_ctrl = clk_enable_all; */
25762+ uint32_t cmd;
25763+ int ret;
25764+ struct msvdx_private *msvdx_priv;
25765+
25766+ if (!dev_priv->msvdx_private) {
25767+ msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL);
25768+ if (msvdx_priv == NULL)
25769+ goto err_exit;
25770+
25771+ dev_priv->msvdx_private = msvdx_priv;
25772+ memset(msvdx_priv, 0, sizeof(struct msvdx_private));
25773+
25774+ /* get device --> drm_device --> drm_psb_private --> msvdx_priv
25775+ * for psb_msvdx_pmstate_show: msvdx_pmpolicy
25776+ * if not pci_set_drvdata, can't get drm_device from device
25777+ */
25778+ /* pci_set_drvdata(dev->pdev, dev); */
25779+ if (device_create_file(&dev->pdev->dev,
25780+ &dev_attr_msvdx_pmstate))
25781+ DRM_ERROR("MSVDX: could not create sysfs file\n");
25782+ msvdx_priv->sysfs_pmstate = sysfs_get_dirent(
25783+ dev->pdev->dev.kobj.sd, "msvdx_pmstate");
25784+ }
25785+
25786+ msvdx_priv = dev_priv->msvdx_private;
25787+ if (!msvdx_priv->ccb0) { /* one for the first time */
25788+ /* Initialize comand msvdx queueing */
25789+ INIT_LIST_HEAD(&msvdx_priv->msvdx_queue);
25790+ mutex_init(&msvdx_priv->msvdx_mutex);
25791+ spin_lock_init(&msvdx_priv->msvdx_lock);
25792+ /*figure out the stepping */
25793+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
25794+ }
25795+
25796+ msvdx_priv->msvdx_busy = 0;
25797+
25798+ /* Enable Clocks */
25799+ PSB_DEBUG_GENERAL("Enabling clocks\n");
25800+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25801+
25802+ /* Enable MMU by removing all bypass bits */
25803+ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
25804+
25805+ /* move firmware loading to the place receiving first command buffer */
25806+
25807+ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
25808+ /* Allocate device virtual memory as required by rendec.... */
25809+ if (!msvdx_priv->ccb0) {
25810+ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb0,
25811+ &msvdx_priv->base_addr0,
25812+ RENDEC_A_SIZE);
25813+ if (ret)
25814+ goto err_exit;
25815+ }
25816+
25817+ if (!msvdx_priv->ccb1) {
25818+ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb1,
25819+ &msvdx_priv->base_addr1,
25820+ RENDEC_B_SIZE);
25821+ if (ret)
25822+ goto err_exit;
25823+ }
25824+
25825+
25826+ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
25827+ msvdx_priv->base_addr0, msvdx_priv->base_addr1);
25828+
25829+ PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
25830+ PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
25831+
25832+ cmd = 0;
25833+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
25834+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
25835+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
25836+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
25837+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
25838+
25839+ cmd = 0;
25840+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25841+ RENDEC_DECODE_START_SIZE, 0);
25842+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25843+ RENDEC_BURST_SIZE_W, 1);
25844+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25845+ RENDEC_BURST_SIZE_R, 1);
25846+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25847+ RENDEC_EXTERNAL_MEMORY, 1);
25848+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
25849+
25850+ cmd = 0x00101010;
25851+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
25852+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
25853+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
25854+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
25855+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
25856+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
25857+
25858+ cmd = 0;
25859+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
25860+ 1);
25861+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
25862+
25863+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
25864+ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
25865+ " place when receiving user space commands\n");
25866+
25867+ msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */
25868+
25869+ psb_msvdx_clearirq(dev);
25870+ psb_msvdx_enableirq(dev);
25871+
25872+ if (IS_MRST(dev)) {
25873+ PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n",
25874+ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
25875+ PSB_DEBUG_INIT("MSVDX:rest MSDVX to disable clock gating\n");
25876+
25877+ PSB_WVDC32(0x000101ff, PSB_MSVDX_CLOCKGATING);
25878+
25879+ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
25880+ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
25881+ }
25882+
25883+#if 0
25884+ ret = psb_setup_fw(dev);
25885+ if (ret)
25886+ goto err_exit;
25887+ /* Send Initialisation message to firmware */
25888+ if (0) {
25889+ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
25890+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
25891+ FW_VA_INIT_SIZE);
25892+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
25893+
25894+ /* Need to set this for all but A0 */
25895+ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
25896+ psb_get_default_pd_addr(dev_priv->mmu));
25897+
25898+ ret = psb_mtx_send(dev_priv, msg_init);
25899+ if (ret)
25900+ goto err_exit;
25901+
25902+ psb_poll_mtx_irq(dev_priv);
25903+ }
25904+#endif
25905+
25906+ return 0;
25907+
25908+err_exit:
25909+ DRM_ERROR("MSVDX: initialization failed\n");
25910+ if (msvdx_priv->ccb0)
25911+ psb_free_ccb(&msvdx_priv->ccb0);
25912+ if (msvdx_priv->ccb1)
25913+ psb_free_ccb(&msvdx_priv->ccb1);
25914+ kfree(dev_priv->msvdx_private);
25915+
25916+ return 1;
25917+}
25918+
25919+int psb_msvdx_uninit(struct drm_device *dev)
25920+{
25921+ struct drm_psb_private *dev_priv = dev->dev_private;
25922+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
25923+
25924+ /* Reset MSVDX chip */
25925+ psb_msvdx_reset(dev_priv);
25926+
25927+ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
25928+ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
25929+ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
25930+
25931+ if (msvdx_priv->ccb0)
25932+ psb_free_ccb(&msvdx_priv->ccb0);
25933+ if (msvdx_priv->ccb1)
25934+ psb_free_ccb(&msvdx_priv->ccb1);
25935+ if (msvdx_priv->msvdx_fw)
25936+ kfree(msvdx_priv->msvdx_fw
25937+ );
25938+ if (msvdx_priv) {
25939+ /* pci_set_drvdata(dev->pdev, NULL); */
25940+ device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate);
25941+ sysfs_put(msvdx_priv->sysfs_pmstate);
25942+ msvdx_priv->sysfs_pmstate = NULL;
25943+
25944+ kfree(msvdx_priv);
25945+ dev_priv->msvdx_private = NULL;
25946+ }
25947+
25948+ return 0;
25949+}
25950diff --git a/drivers/gpu/drm/psb/psb_powermgmt.c b/drivers/gpu/drm/psb/psb_powermgmt.c
25951new file mode 100644
25952index 0000000..c59a701
25953--- /dev/null
25954+++ b/drivers/gpu/drm/psb/psb_powermgmt.c
25955@@ -0,0 +1,1146 @@
25956+/**************************************************************************
25957+ * Copyright (c) 2009, Intel Corporation.
25958+ * All Rights Reserved.
25959+
25960+ * Permission is hereby granted, free of charge, to any person obtaining a
25961+ * copy of this software and associated documentation files (the "Software"),
25962+ * to deal in the Software without restriction, including without limitation
25963+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
25964+ * and/or sell copies of the Software, and to permit persons to whom the
25965+ * Software is furnished to do so, subject to the following conditions:
25966+ *
25967+ * The above copyright notice and this permission notice (including the next
25968+ * paragraph) shall be included in all copies or substantial portions of the
25969+ * Software.
25970+ *
25971+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25972+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25973+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25974+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25975+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25976+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25977+ * SOFTWARE.
25978+ *
25979+ * Authors:
25980+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
25981+ *
25982+ */
25983+#include "psb_powermgmt.h"
25984+#include "psb_drv.h"
25985+#include "psb_intel_reg.h"
25986+#include "psb_scene.h"
25987+#include "lnc_topaz.h"
25988+#include "psb_msvdx.h"
25989+
25990+#include <linux/mutex.h>
25991+
25992+static struct mutex g_state_change_mutex;
25993+static int g_hw_power_status_mask;
25994+static int g_pci_power_status;
25995+static atomic_t g_display_access_count;
25996+static atomic_t g_graphics_access_count;
25997+static atomic_t g_videoenc_access_count;
25998+static atomic_t g_videodec_access_count;
25999+static bool g_suspend_in_progress;
26000+static bool g_resume_in_progress;
26001+static int g_suspend_mask;
26002+static int g_resume_mask;
26003+static bool g_forcing_resume;
26004+static atomic_t g_pm_waiters;
26005+
26006+/*#define PWRMGMT_DEBUG*/
26007+#ifdef PWRMGMT_DEBUG
26008+ #define PWR_PRINT(_fmt, _arg...) \
26009+ printk(KERN_INFO _fmt, ##_arg)
26010+#else
26011+ #define PWR_PRINT(_fmt, _arg...) {}
26012+#endif
26013+
26014+/*
26015+ * powermgmt_init
26016+ *
26017+ * Description: Initialize this power management module
26018+ */
26019+void powermgmt_init(void)
26020+{
26021+ mutex_init(&g_state_change_mutex);
26022+ g_hw_power_status_mask = PSB_ALL_ISLANDS;
26023+ g_pci_power_status = 1;
26024+ atomic_set(&g_display_access_count, 0);
26025+ atomic_set(&g_graphics_access_count, 0);
26026+ atomic_set(&g_videoenc_access_count, 0);
26027+ atomic_set(&g_videodec_access_count, 0);
26028+ atomic_set(&g_pm_waiters, 0);
26029+}
26030+
26031+/*
26032+ * powermgmt_shutdown
26033+ *
26034+ * Description: Shut down this power management module
26035+ */
26036+void powermgmt_shutdown(void)
26037+{
26038+ mutex_destroy(&g_state_change_mutex);
26039+}
26040+
26041+/*
26042+ * powermgmt_down_island_power
26043+ *
26044+ * Description: Cut power to the specified island (powergating)
26045+ */
26046+void powermgmt_down_island_power(struct drm_device *dev, int islands)
26047+{
26048+ u32 pwr_cnt = 0;
26049+ u32 pwr_mask = 0;
26050+ u32 pwr_sts;
26051+
26052+ struct drm_psb_private *dev_priv =
26053+ (struct drm_psb_private *) dev->dev_private;
26054+
26055+ PWR_PRINT("BEN_KERNEL_OSPM************DOWN ISLAND POWER %d\n", islands);
26056+
26057+ if (!IS_MRST(dev)) {
26058+ g_hw_power_status_mask &= ~islands;
26059+ return;
26060+ }
26061+
26062+ g_hw_power_status_mask &= ~islands;
26063+
26064+ if (islands & PSB_GRAPHICS_ISLAND) {
26065+ pwr_cnt |= PSB_PWRGT_GFX_MASK;
26066+ pwr_mask |= PSB_PWRGT_GFX_MASK;
26067+ }
26068+ if (islands & PSB_VIDEO_ENC_ISLAND) {
26069+ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
26070+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
26071+ }
26072+ if (islands & PSB_VIDEO_DEC_ISLAND) {
26073+ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
26074+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
26075+ }
26076+ if (pwr_cnt) {
26077+ pwr_cnt |= inl(dev_priv->apm_base);
26078+ outl(pwr_cnt, dev_priv->apm_base);
26079+ while (true) {
26080+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
26081+ if ((pwr_sts & pwr_mask) == pwr_mask)
26082+ break;
26083+ else
26084+ udelay(10);
26085+ }
26086+ }
26087+
26088+ if (islands & PSB_DISPLAY_ISLAND) {
26089+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
26090+ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
26091+ while (true) {
26092+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
26093+ if ((pwr_sts & pwr_mask) == pwr_mask)
26094+ break;
26095+ else
26096+ udelay(10);
26097+ }
26098+ }
26099+}
26100+
26101+/*
26102+ * powermgmt_up_island_power
26103+ *
26104+ * Description: Restore power to the specified island (powergating)
26105+ */
26106+void powermgmt_up_island_power(struct drm_device *dev, int islands)
26107+{
26108+ u32 pwr_cnt;
26109+ u32 pwr_sts;
26110+ u32 pwr_mask;
26111+ u32 count;
26112+ struct drm_psb_private *dev_priv =
26113+ (struct drm_psb_private *) dev->dev_private;
26114+
26115+ PWR_PRINT("BEN_KERNEL_OSPM************UP ISLAND POWER %d\n", islands);
26116+
26117+ if (!IS_MRST(dev)) {
26118+ g_hw_power_status_mask |= islands;
26119+ return;
26120+ }
26121+
26122+ if (islands & (PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND |
26123+ PSB_VIDEO_DEC_ISLAND)) {
26124+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
26125+ pwr_mask = 0;
26126+ if (islands & PSB_GRAPHICS_ISLAND) {
26127+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
26128+ pwr_mask |= PSB_PWRGT_GFX_MASK;
26129+ }
26130+ if (islands & PSB_VIDEO_ENC_ISLAND) {
26131+ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
26132+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
26133+ }
26134+ if (islands & PSB_VIDEO_DEC_ISLAND) {
26135+ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
26136+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
26137+ }
26138+
26139+ if (pwr_mask) {
26140+ count = 5;
26141+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
26142+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
26143+ while (true) {
26144+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
26145+ if ((pwr_sts & pwr_mask) == 0)
26146+ break;
26147+ else
26148+ udelay(10);
26149+ }
26150+ }
26151+ }
26152+
26153+ if (islands & PSB_DISPLAY_ISLAND) {
26154+ count = 5;
26155+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
26156+ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
26157+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
26158+ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
26159+ while (true) {
26160+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
26161+ if ((pwr_sts & pwr_mask) == 0)
26162+ break;
26163+ else
26164+ udelay(10);
26165+ }
26166+ }
26167+
26168+ g_hw_power_status_mask |= islands;
26169+}
26170+
26171+/*
26172+ * save_display_registers
26173+ *
26174+ * Description: We are going to suspend so save current display
26175+ * register state.
26176+ */
26177+static int save_display_registers(struct drm_device *dev)
26178+{
26179+ struct drm_psb_private *dev_priv = dev->dev_private;
26180+ struct drm_crtc * crtc;
26181+ struct drm_connector * connector;
26182+ int i;
26183+
26184+ /* Display arbitration control + watermarks */
26185+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
26186+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
26187+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
26188+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
26189+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
26190+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
26191+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
26192+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
26193+
26194+ if (IS_MRST(dev)) {
26195+ /* Pipe & plane A info */
26196+ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
26197+ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
26198+ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
26199+ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
26200+ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
26201+ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
26202+ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
26203+ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
26204+ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
26205+ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
26206+ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
26207+ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
26208+ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
26209+ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
26210+ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
26211+ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
26212+ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
26213+ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
26214+
26215+ /*save cursor regs*/
26216+ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
26217+ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
26218+ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
26219+
26220+ /*save palette (gamma) */
26221+ for (i = 0; i < 256; i++)
26222+ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2));
26223+
26224+ /*save performance state*/
26225+ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
26226+
26227+ /* LVDS state */
26228+ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
26229+ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
26230+ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
26231+ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
26232+ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
26233+ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
26234+ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
26235+ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
26236+ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
26237+ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
26238+
26239+ /* HW overlay */
26240+ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
26241+ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
26242+ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
26243+ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
26244+ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
26245+ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
26246+ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
26247+
26248+ } else { /*PSB*/
26249+ /*save crtc and output state*/
26250+ mutex_lock(&dev->mode_config.mutex);
26251+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
26252+ if(drm_helper_crtc_in_use(crtc)) {
26253+ crtc->funcs->save(crtc);
26254+ }
26255+ }
26256+
26257+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
26258+ connector->funcs->save(connector);
26259+ }
26260+ mutex_unlock(&dev->mode_config.mutex);
26261+ }
26262+
26263+ /* Interrupt state */
26264+ /*
26265+ * Handled in psb_irq.c
26266+ */
26267+
26268+ return 0;
26269+}
26270+
26271+/*
26272+ * restore_display_registers
26273+ *
26274+ * Description: We are going to resume so restore display register state.
26275+ */
26276+static int restore_display_registers(struct drm_device *dev)
26277+{
26278+ struct drm_psb_private *dev_priv = dev->dev_private;
26279+ struct drm_crtc * crtc;
26280+ struct drm_connector * connector;
26281+ unsigned long i, pp_stat;
26282+
26283+ /* Display arbitration + watermarks */
26284+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
26285+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
26286+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
26287+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
26288+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
26289+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
26290+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
26291+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
26292+
26293+ /*make sure VGA plane is off. it initializes to on after reset!*/
26294+ PSB_WVDC32(0x80000000, VGACNTRL);
26295+
26296+ if (IS_MRST(dev)) {
26297+ /* set the plls */
26298+ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
26299+ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
26300+ /* Actually enable it */
26301+ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
26302+ DRM_UDELAY(150);
26303+
26304+ /* Restore mode */
26305+ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
26306+ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
26307+ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
26308+ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
26309+ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
26310+ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
26311+ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
26312+ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
26313+
26314+ /*restore performance mode*/
26315+ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
26316+
26317+ /*enable the pipe*/
26318+ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
26319+
26320+ /*set up the plane*/
26321+ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
26322+ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
26323+ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
26324+
26325+ /* Enable the plane */
26326+ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
26327+ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
26328+
26329+ /*Enable Cursor A*/
26330+ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
26331+ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
26332+ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
26333+
26334+ /* restore palette (gamma) */
26335+ /*DRM_UDELAY(50000); */
26336+ for (i = 0; i < 256; i++)
26337+ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2));
26338+
26339+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
26340+ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
26341+ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
26342+ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
26343+ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
26344+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
26345+ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
26346+ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
26347+ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
26348+ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
26349+
26350+ /*wait for cycle delay*/
26351+ do {
26352+ pp_stat = PSB_RVDC32(PP_STATUS);
26353+ } while (pp_stat & 0x08000000);
26354+
26355+ DRM_UDELAY(999);
26356+ /*wait for panel power up*/
26357+ do {
26358+ pp_stat = PSB_RVDC32(PP_STATUS);
26359+ } while (pp_stat & 0x10000000);
26360+
26361+ /* restore HW overlay */
26362+ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
26363+ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
26364+ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
26365+ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
26366+ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
26367+ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
26368+ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
26369+
26370+ } else { /*PSB*/
26371+ mutex_lock(&dev->mode_config.mutex);
26372+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
26373+ if(drm_helper_crtc_in_use(crtc))
26374+ crtc->funcs->restore(crtc);
26375+ }
26376+
26377+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
26378+ connector->funcs->restore(connector);
26379+ }
26380+ mutex_unlock(&dev->mode_config.mutex);
26381+ }
26382+
26383+
26384+ /*Interrupt state*/
26385+ /*
26386+ * Handled in psb_irq.c
26387+ */
26388+
26389+ return 0;
26390+}
26391+
26392+/*
26393+ * powermgmt_suspend_graphics
26394+ *
26395+ * Description: Suspend the graphics hardware saving state and disabling
26396+ * as necessary.
26397+ */
26398+static void powermgmt_suspend_graphics(struct drm_device *dev, bool b_initiated_by_ospm)
26399+{
26400+ struct drm_psb_private *dev_priv = dev->dev_private;
26401+
26402+ if (!(g_hw_power_status_mask & PSB_GRAPHICS_ISLAND))
26403+ return;
26404+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics\n");
26405+
26406+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
26407+ if (b_initiated_by_ospm) {
26408+ int ret = -EBUSY;
26409+ ret = psb_idle_3d(dev);
26410+ if (ret == -EBUSY)
26411+ {
26412+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics ***3d BUSY!!!!!!\n");
26413+ return;
26414+ }
26415+
26416+ ret = psb_idle_2d(dev);
26417+ if (ret == -EBUSY)
26418+ {
26419+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics ***2d BUSY!!!!!!\n");
26420+ return;
26421+ }
26422+ if (IS_POULSBO(dev))
26423+ flush_scheduled_work();
26424+ }
26425+ psb_irq_uninstall_islands(dev, PSB_GRAPHICS_ISLAND);
26426+ powermgmt_down_island_power(dev, PSB_GRAPHICS_ISLAND);
26427+}
26428+
26429+/*
26430+ * powermgmt_resume_graphics
26431+ *
26432+ * Description: Resume the graphics hardware restoring state and enabling
26433+ * as necessary.
26434+ */
26435+static void powermgmt_resume_graphics(struct drm_device *dev)
26436+{
26437+ struct drm_psb_private *dev_priv = dev->dev_private;
26438+
26439+ if (g_hw_power_status_mask & PSB_GRAPHICS_ISLAND)
26440+ return;
26441+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_graphics\n");
26442+
26443+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
26444+
26445+ powermgmt_up_island_power(dev, PSB_GRAPHICS_ISLAND);
26446+
26447+ /*
26448+ * The SGX loses it's register contents.
26449+ * Restore BIF registers. The MMU page tables are
26450+ * "normal" pages, so their contents should be kept.
26451+ */
26452+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
26453+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
26454+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
26455+ PSB_RSGX32(PSB_CR_BIF_BANK1);
26456+
26457+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
26458+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
26459+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
26460+
26461+ if (IS_POULSBO(dev))
26462+ psb_reset(dev_priv, 1);
26463+
26464+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
26465+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
26466+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
26467+
26468+ /*
26469+ * Persistant 3D base registers and USSE base registers..
26470+ */
26471+
26472+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
26473+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
26474+
26475+ /*
26476+ * Now, re-initialize the 3D engine.
26477+ */
26478+
26479+ if (dev_priv->xhw_on)
26480+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
26481+
26482+ psb_scheduler_ta_mem_check(dev_priv);
26483+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
26484+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
26485+ PSB_TA_MEM_FLAG_TA |
26486+ PSB_TA_MEM_FLAG_RASTER |
26487+ PSB_TA_MEM_FLAG_HOSTA |
26488+ PSB_TA_MEM_FLAG_HOSTD |
26489+ PSB_TA_MEM_FLAG_INIT,
26490+ dev_priv->ta_mem->ta_memory->offset,
26491+ dev_priv->ta_mem->hw_data->offset,
26492+ dev_priv->ta_mem->hw_cookie);
26493+ }
26494+}
26495+
26496+/*
26497+ * powermgmt_suspend_videodec
26498+ *
26499+ * Description: Suspend the video decode hardware saving state and disabling
26500+ * as necessary.
26501+ */
26502+static void powermgmt_suspend_videodec(struct drm_device *dev, bool b_initiated_by_ospm)
26503+{
26504+ struct drm_psb_private *dev_priv =
26505+ (struct drm_psb_private *)dev->dev_private;
26506+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
26507+
26508+ if (!(g_hw_power_status_mask & PSB_VIDEO_DEC_ISLAND))
26509+ return;
26510+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_videodec\n");
26511+
26512+ if (b_initiated_by_ospm)
26513+ psb_wait_msvdx_idle(dev);
26514+ else {
26515+ /* return without power off for D0i3/APM */
26516+ if (psb_check_msvdx_idle(dev))
26517+ return;
26518+ }
26519+
26520+ psb_irq_uninstall_islands(dev, PSB_VIDEO_DEC_ISLAND);
26521+ /* UGLY ... expose internal structure..
26522+ * it should be a function of save_context
26523+ * but there is no need for restore_context...
26524+ * replace it with a function?
26525+ */
26526+ msvdx_priv->msvdx_needs_reset = 1;
26527+ powermgmt_down_island_power(dev, PSB_VIDEO_DEC_ISLAND);
26528+
26529+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERDOWN);
26530+}
26531+
26532+/*
26533+ * powermgmt_resume_videodec
26534+ *
26535+ * Description: Resume the video decode hardware restoring state and enabling
26536+ * as necessary.
26537+ */
26538+static void powermgmt_resume_videodec(struct drm_device *dev)
26539+{
26540+ struct drm_psb_private *dev_priv =
26541+ (struct drm_psb_private *)dev->dev_private;
26542+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
26543+
26544+ if (g_hw_power_status_mask & PSB_VIDEO_DEC_ISLAND)
26545+ return;
26546+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_videodec\n");
26547+
26548+ powermgmt_up_island_power(dev, PSB_VIDEO_DEC_ISLAND);
26549+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP);
26550+}
26551+
26552+/*
26553+ * powermgmt_suspend_videoenc
26554+ *
26555+ * Description: Suspend the video encode hardware saving state and disabling
26556+ * as necessary.
26557+ */
26558+static void powermgmt_suspend_videoenc(struct drm_device *dev, bool b_initiated_by_ospm)
26559+{
26560+ struct drm_psb_private *dev_priv =
26561+ (struct drm_psb_private *)dev->dev_private;
26562+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
26563+
26564+ if (!(g_hw_power_status_mask & PSB_VIDEO_ENC_ISLAND))
26565+ return;
26566+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_videoenc\n");
26567+
26568+ if (b_initiated_by_ospm)
26569+ lnc_wait_topaz_idle(dev);
26570+ else {
26571+ /* return without power off for D0i3/APM */
26572+ if (lnc_check_topaz_idle(dev))
26573+ return;
26574+ }
26575+
26576+ psb_irq_uninstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
26577+ lnc_topaz_save_mtx_state(dev);
26578+ powermgmt_down_island_power(dev, PSB_VIDEO_ENC_ISLAND);
26579+
26580+ TOPAZ_NEW_PMSTATE(dev, topaz_priv, PSB_PMSTATE_POWERDOWN);
26581+}
26582+
26583+/*
26584+ * powermgmt_resume_videoenc
26585+ *
26586+ * Description: Resume the video encode hardware restoring state and enabling
26587+ * as necessary.
26588+ */
26589+static void powermgmt_resume_videoenc(struct drm_device *dev)
26590+{
26591+ struct drm_psb_private *dev_priv =
26592+ (struct drm_psb_private *)dev->dev_private;
26593+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
26594+
26595+ if (g_hw_power_status_mask & PSB_VIDEO_ENC_ISLAND)
26596+ return;
26597+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_videoenc\n");
26598+
26599+ powermgmt_up_island_power(dev, PSB_VIDEO_ENC_ISLAND);
26600+ lnc_topaz_restore_mtx_state(dev);
26601+
26602+ TOPAZ_NEW_PMSTATE(dev, topaz_priv, PSB_PMSTATE_POWERUP);
26603+}
26604+
26605+/*
26606+ * powermgmt_suspend_display
26607+ *
26608+ * Description: Suspend the display hardware saving state and disabling
26609+ * as necessary.
26610+ */
26611+static void powermgmt_suspend_display(struct drm_device *dev)
26612+{
26613+ struct drm_psb_private *dev_priv = dev->dev_private;
26614+ int pp_stat, jb;
26615+
26616+ if (!(g_hw_power_status_mask & PSB_DISPLAY_ISLAND))
26617+ return;
26618+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_display\n");
26619+
26620+ save_display_registers(dev);
26621+
26622+ /*shutdown the panel*/
26623+ PSB_WVDC32(0, PP_CONTROL);
26624+
26625+ do {
26626+ pp_stat = PSB_RVDC32(PP_STATUS);
26627+ } while (pp_stat & 0x80000000);
26628+
26629+ /*turn off the plane*/
26630+ PSB_WVDC32(0x58000000, DSPACNTR);
26631+ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
26632+ jb = jiffies + 4; /*wait 4 ticks*/
26633+ while (jiffies < jb)
26634+ schedule();
26635+
26636+ /*turn off pipe*/
26637+ PSB_WVDC32(0x0, PIPEACONF);
26638+ jb = jiffies + 8; /*wait 8 ticks*/
26639+ while (jiffies < jb)
26640+ schedule();
26641+
26642+ /*turn off PLLs*/
26643+ PSB_WVDC32(0, MRST_DPLL_A);
26644+
26645+ powermgmt_down_island_power(dev, PSB_DISPLAY_ISLAND);
26646+}
26647+
26648+/*
26649+ * powermgmt_resume_display
26650+ *
26651+ * Description: Resume the display hardware restoring state and enabling
26652+ * as necessary.
26653+ */
26654+static void powermgmt_resume_display(struct pci_dev *pdev)
26655+{
26656+ struct drm_device *dev = pci_get_drvdata(pdev);
26657+ struct drm_psb_private *dev_priv = dev->dev_private;
26658+ struct psb_gtt *pg = dev_priv->pg;
26659+
26660+ if (g_hw_power_status_mask & PSB_DISPLAY_ISLAND)
26661+ return;
26662+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_display\n");
26663+
26664+ /* turn on the display power island */
26665+ powermgmt_up_island_power(dev, PSB_DISPLAY_ISLAND);
26666+
26667+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
26668+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
26669+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
26670+
26671+ /* Don't reinitialize the GTT as it is unnecessary. The gtt is
26672+ * stored in memory so it will automatically be restored. All
26673+ * we need to do is restore the PGETBL_CTL which we already do
26674+ * above.
26675+ */
26676+ /*psb_gtt_init(dev_priv->pg, 1);*/
26677+
26678+ restore_display_registers(dev);
26679+}
26680+
26681+/*
26682+ * powermgmt_suspend_pci
26683+ *
26684+ * Description: Suspend the pci device saving state and disabling
26685+ * as necessary.
26686+ */
26687+static void powermgmt_suspend_pci(struct pci_dev *pdev)
26688+{
26689+ struct drm_device *dev = pci_get_drvdata(pdev);
26690+ struct drm_psb_private *dev_priv = dev->dev_private;
26691+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
26692+ int bsm, vbt;
26693+
26694+ if (!g_pci_power_status)
26695+ return;
26696+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_pci\n");
26697+
26698+ pci_save_state(pdev);
26699+ pci_read_config_dword(pci_gfx_root, 0x5C, &bsm);
26700+ dev_priv->saveBSM = bsm;
26701+ pci_read_config_dword(pci_gfx_root, 0xFC, &vbt);
26702+ dev_priv->saveVBT = vbt;
26703+ pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
26704+ pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
26705+
26706+ pci_disable_device(pdev);
26707+ pci_set_power_state(pdev, PCI_D3hot);
26708+
26709+ g_pci_power_status = 0;
26710+}
26711+
26712+/*
26713+ * powermgmt_resume_pci
26714+ *
26715+ * Description: Resume the pci device restoring state and enabling
26716+ * as necessary.
26717+ */
26718+static int powermgmt_resume_pci(struct pci_dev *pdev)
26719+{
26720+ struct drm_device *dev = pci_get_drvdata(pdev);
26721+ struct drm_psb_private *dev_priv = dev->dev_private;
26722+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
26723+ int ret = 0;
26724+
26725+ if (g_pci_power_status)
26726+ return ret;
26727+
26728+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_pci\n");
26729+
26730+ pci_set_power_state(pdev, PCI_D0);
26731+ pci_restore_state(pdev);
26732+ pci_write_config_dword(pci_gfx_root, 0x5c, dev_priv->saveBSM);
26733+ pci_write_config_dword(pci_gfx_root, 0xFC, dev_priv->saveVBT);
26734+ /* retoring MSI address and data in PCIx space */
26735+ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
26736+ pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
26737+ ret = pci_enable_device(pdev);
26738+
26739+ g_pci_power_status = 1;
26740+
26741+ return ret;
26742+}
26743+
26744+/*
26745+ * powermgmt_suspend
26746+ *
26747+ * Description: OSPM is telling our driver to suspend to save state
26748+ * and power down all hardware.
26749+ */
26750+int powermgmt_suspend(struct pci_dev *pdev, pm_message_t state)
26751+{
26752+ int ret;
26753+ ret = powermgmt_suspend_islands(pdev, PSB_ALL_ISLANDS, true);
26754+ if (ret == -EBUSY)
26755+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend***BUSY!!!!!!\n");
26756+
26757+ return ret;
26758+}
26759+
26760+/*
26761+ * powermgmt_suspend_islands
26762+ *
26763+ * Description: Suspend the specified island by saving state
26764+ * and power down the hardware.
26765+ */
26766+int powermgmt_suspend_islands(struct pci_dev *pdev, int hw_islands, bool b_initiated_by_ospm)
26767+{
26768+ struct drm_device *dev = pci_get_drvdata(pdev);
26769+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
26770+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
26771+ unsigned long irq_flags;
26772+ int ret = 0;
26773+
26774+ if (in_interrupt()) {
26775+ /*
26776+ * If an interrupt handler calls powermgmt_suspend_islands(), we can't call mutex_lock.
26777+ * Right now, only video enc/dec calls us from interrupt handler. Should be safe to
26778+ * just proceed since the only code that resumes video enc/dec is internal to our driver
26779+ * and should be written in such a way that shouldn't cause any issues. If we are already
26780+ * in the middle of an OSPM initiated suspend, then just return since that will take care
26781+ * of powering off video enc/dec for us. Also, don't set g_suspend_mask and
26782+ * g_suspend_in_progress since this function will be atomic since we are in an
26783+ * interrupt handler and thus no outside parties will get the chance to care and we
26784+ * don't want to overright any pending suspend operations that go interrupted.
26785+ */
26786+ if (b_initiated_by_ospm)
26787+ return ret;
26788+ }
26789+ else {
26790+ mutex_lock(&g_state_change_mutex);
26791+
26792+ g_suspend_mask = hw_islands;
26793+ g_suspend_in_progress = true;
26794+ }
26795+ atomic_inc(&g_pm_waiters);
26796+
26797+ if (g_hw_power_status_mask & PSB_GRAPHICS_ISLAND) {
26798+ if (atomic_read(&g_graphics_access_count))
26799+ ret = -EBUSY;
26800+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) !=
26801+ _PSB_C2_SOCIF_EMPTY) ||
26802+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
26803+ _PSB_C2B_STATUS_BUSY) != 0)) {
26804+ ret = -EBUSY;
26805+ }
26806+ spin_lock_irqsave(&scheduler->lock, irq_flags);
26807+ if (!scheduler->idle ||
26808+ !list_empty(&scheduler->raster_queue) ||
26809+ !list_empty(&scheduler->ta_queue) ||
26810+ !list_empty(&scheduler->hp_raster_queue) ||
26811+ scheduler->feedback_task) {
26812+ ret = -EBUSY;
26813+ }
26814+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
26815+ }
26816+ if ((hw_islands & PSB_VIDEO_DEC_ISLAND) &&
26817+ atomic_read(&g_videodec_access_count))
26818+ ret = -EBUSY;
26819+ if ((hw_islands & PSB_VIDEO_ENC_ISLAND) &&
26820+ atomic_read(&g_videoenc_access_count))
26821+ ret = -EBUSY;
26822+ if ((hw_islands & PSB_DISPLAY_ISLAND) &&
26823+ atomic_read(&g_display_access_count))
26824+ ret = -EBUSY;
26825+
26826+ atomic_dec(&g_pm_waiters);
26827+
26828+ if (!ret) {
26829+ /*disable gfx interupt later when sgx is idle*/
26830+ psb_irq_uninstall_islands(dev, hw_islands & ~PSB_GRAPHICS_ISLAND &
26831+ ~PSB_VIDEO_ENC_ISLAND & ~PSB_VIDEO_DEC_ISLAND);
26832+
26833+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
26834+ powermgmt_suspend_videodec(dev, b_initiated_by_ospm);
26835+ if(IS_MRST(dev)) {
26836+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
26837+ powermgmt_suspend_videoenc(dev, b_initiated_by_ospm);
26838+ }
26839+ if (hw_islands & PSB_GRAPHICS_ISLAND)
26840+ powermgmt_suspend_graphics(dev, b_initiated_by_ospm);
26841+ if (hw_islands & PSB_DISPLAY_ISLAND)
26842+ powermgmt_suspend_display(dev);
26843+ if (g_hw_power_status_mask == 0) {
26844+ if (drm_core_check_feature(dev, DRIVER_MODESET))
26845+ drm_irq_uninstall(dev);
26846+ powermgmt_suspend_pci(pdev);
26847+ }
26848+ }
26849+
26850+#ifdef OSPM_STAT
26851+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
26852+ bool b_change = true;
26853+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0)
26854+ dev_priv->gfx_d0_time += jiffies - dev_priv->gfx_last_mode_change;
26855+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
26856+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
26857+ else
26858+ b_change = false;
26859+ if (b_change) {
26860+ dev_priv->gfx_last_mode_change = jiffies;
26861+ if (g_hw_power_status_mask & PSB_DISPLAY_ISLAND) {
26862+ dev_priv->graphics_state = PSB_PWR_STATE_D0i3;
26863+ dev_priv->gfx_d0i3_cnt++;
26864+ } else {
26865+ dev_priv->graphics_state = PSB_PWR_STATE_D3;
26866+ dev_priv->gfx_d3_cnt++;
26867+ }
26868+ }
26869+ }
26870+#endif
26871+
26872+ if (!in_interrupt()) {
26873+ g_suspend_in_progress = false;
26874+ mutex_unlock(&g_state_change_mutex);
26875+ }
26876+
26877+ return ret;
26878+}
26879+
26880+/*
26881+ * powermgmt_resume
26882+ *
26883+ * Description: OSPM is telling our driver to resume so restore state
26884+ * and power up display. Leave graphics and video powered off as they
26885+ * will be powered up once needed.
26886+ */
26887+int powermgmt_resume(struct pci_dev *pdev)
26888+{
26889+ return 0;
26890+ //return powermgmt_resume_islands(pdev, PSB_DISPLAY_ISLAND);
26891+}
26892+
26893+/*
26894+ * powermgmt_resume_islands
26895+ *
26896+ * Description: Resume the specified islands by restoring state
26897+ * and power things up.
26898+ */
26899+int powermgmt_resume_islands(struct pci_dev *pdev, int hw_islands)
26900+{
26901+ struct drm_device *dev = pci_get_drvdata(pdev);
26902+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
26903+ bool b_install_irq = false;
26904+ int ret = 0;
26905+
26906+ if (!g_forcing_resume)
26907+ mutex_lock(&g_state_change_mutex);
26908+
26909+ g_resume_mask = hw_islands;
26910+ g_resume_in_progress = true;
26911+
26912+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_islands\n");
26913+
26914+ if (g_hw_power_status_mask == 0) {
26915+ if (powermgmt_resume_pci(pdev))
26916+ goto resume_exit;
26917+ b_install_irq = drm_core_check_feature(dev, DRIVER_MODESET);
26918+ }
26919+
26920+ if (hw_islands & PSB_DISPLAY_ISLAND)
26921+ powermgmt_resume_display(pdev);
26922+ if (IS_MRST(dev)) {
26923+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
26924+ powermgmt_resume_videoenc(dev);
26925+ }
26926+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
26927+ powermgmt_resume_videodec(dev);
26928+ if (hw_islands & PSB_GRAPHICS_ISLAND)
26929+ powermgmt_resume_graphics(dev);
26930+ if (b_install_irq)
26931+ drm_irq_install(dev);
26932+ else {
26933+ psb_irq_preinstall_islands(dev, hw_islands);
26934+ psb_irq_postinstall_islands(dev, hw_islands);
26935+ }
26936+
26937+#ifdef OSPM_STAT
26938+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
26939+ bool b_change = true;
26940+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
26941+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
26942+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
26943+ dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change;
26944+ else
26945+ b_change = false;
26946+
26947+ if (b_change) {
26948+ dev_priv->gfx_last_mode_change = jiffies;
26949+ dev_priv->graphics_state = PSB_PWR_STATE_D0;
26950+ dev_priv->gfx_d0_cnt++;
26951+ }
26952+ }
26953+#endif
26954+
26955+ g_resume_in_progress = false;
26956+
26957+resume_exit:
26958+ if (!g_forcing_resume)
26959+ mutex_unlock(&g_state_change_mutex);
26960+ return ret;
26961+}
26962+
26963+/*
26964+ * powermgmt_using_hw_begin
26965+ *
26966+ * Description: Notify PowerMgmt module that you will be accessing the
26967+ * specified islands' hw so don't power it off. If force_on is true,
26968+ * this will power on any of the specified islands which are off.
26969+ * Otherwise, this will return false and the caller is expected to not
26970+ * access the hw.
26971+ *
26972+ * NOTE *** If this is called from and interrupt handler or other atomic
26973+ * context, then it will return false if we are in the middle of a
26974+ * power state transition and the caller will be expected to handle that
26975+ * even if force_on is set to true.
26976+ */
26977+bool powermgmt_using_hw_begin(struct pci_dev *pdev, int hw_islands, bool force_on)
26978+{
26979+ bool ret = true;
26980+ int off_islands = 0;
26981+ bool b_atomic = (in_interrupt() || in_atomic());
26982+
26983+ if (!b_atomic)
26984+ mutex_lock(&g_state_change_mutex);
26985+
26986+ if (b_atomic &&
26987+ (powermgmt_is_suspend_in_progress(hw_islands) ||
26988+ powermgmt_is_resume_in_progress(hw_islands))) {
26989+ if (force_on)
26990+ printk(KERN_WARNING "!!!WARNING!!! powermgmt_using_hw_begin - force_on failed - be sure to check return value !!!WARNING!!!\n");
26991+ ret = false;
26992+ } else {
26993+ off_islands = hw_islands & (PSB_ALL_ISLANDS & ~g_hw_power_status_mask);
26994+ if (off_islands) {
26995+ if (force_on) {
26996+ g_forcing_resume = true;
26997+ powermgmt_resume_islands(pdev, off_islands);
26998+ g_forcing_resume = false;
26999+ } else {
27000+ ret = false;
27001+ }
27002+ }
27003+ }
27004+
27005+ if (ret) {
27006+ if (hw_islands & PSB_GRAPHICS_ISLAND)
27007+ atomic_inc(&g_graphics_access_count);
27008+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
27009+ atomic_inc(&g_videoenc_access_count);
27010+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
27011+ atomic_inc(&g_videodec_access_count);
27012+ if (hw_islands & PSB_DISPLAY_ISLAND)
27013+ atomic_inc(&g_display_access_count);
27014+ }
27015+
27016+ if (!b_atomic)
27017+ mutex_unlock(&g_state_change_mutex);
27018+
27019+ return ret;
27020+}
27021+
27022+/*
27023+ * powermgmt_using_hw_end
27024+ *
27025+ * Description: Notify PowerMgmt module that you are done accessing the
27026+ * specified islands' hw so feel free to power it off. Note that this
27027+ * function doesn't actually power off the islands. The caller should
27028+ * call psb_suspend(hw_islands) if it wishes to proactively power them
27029+ * down.
27030+ */
27031+void powermgmt_using_hw_end(int hw_islands)
27032+{
27033+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
27034+ atomic_dec(&g_graphics_access_count);
27035+ }
27036+ if (hw_islands & PSB_VIDEO_ENC_ISLAND){
27037+ atomic_dec(&g_videoenc_access_count);
27038+ }
27039+ if (hw_islands & PSB_VIDEO_DEC_ISLAND){
27040+ atomic_dec(&g_videodec_access_count);
27041+ }
27042+ if (hw_islands & PSB_DISPLAY_ISLAND){
27043+ atomic_dec(&g_display_access_count);
27044+ }
27045+
27046+ if(!atomic_read(&g_graphics_access_count) &&
27047+ !atomic_read(&g_videoenc_access_count) &&
27048+ !atomic_read(&g_videodec_access_count) &&
27049+ !atomic_read(&g_display_access_count) &&
27050+ atomic_read(&g_pm_waiters))
27051+
27052+ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
27053+ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
27054+ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
27055+ WARN_ON(atomic_read(&g_display_access_count) < 0);
27056+}
27057+
27058+/*
27059+ * powermgmt_is_hw_on
27060+ *
27061+ * Description: do an instantaneous check for if the specified islands
27062+ * are on. Only use this in cases where you know the g_state_change_mutex
27063+ * is already held such as in irq install/uninstall. Otherwise, use
27064+ * powermgmt_usinghw_begin().
27065+ */
27066+bool powermgmt_is_hw_on(struct pci_dev *pdev, int hw_islands)
27067+{
27068+ return ((g_hw_power_status_mask & hw_islands) == hw_islands);
27069+}
27070+
27071+/*
27072+ * powermgmt_is_suspend_in_progress
27073+ *
27074+ * Description: Are we in the middle of suspending any of the
27075+ * specified hardware?
27076+ */
27077+bool powermgmt_is_suspend_in_progress(int hw_islands)
27078+{
27079+ return (g_suspend_in_progress) ? ((g_suspend_mask & hw_islands) ? true : false) : false;
27080+}
27081+
27082+/*
27083+ * powermgmt_is_resume_in_progress
27084+ *
27085+ * Description: Are we in the middle of resuming any of the
27086+ * specified hardware?
27087+ */
27088+bool powermgmt_is_resume_in_progress(int hw_islands)
27089+{
27090+ return (g_resume_in_progress) ? ((g_resume_mask & hw_islands) ? true : false) : false;
27091+}
27092+/*
27093+ * powermgmt_is_gfx_busy
27094+ *
27095+ * Description: Is someone useing GFX HW currently?
27096+ *
27097+ */
27098+bool powermgmt_is_gfx_busy()
27099+{
27100+ return (atomic_read(&g_graphics_access_count) ? true : false);
27101+}
27102diff --git a/drivers/gpu/drm/psb/psb_powermgmt.h b/drivers/gpu/drm/psb/psb_powermgmt.h
27103new file mode 100644
27104index 0000000..5b40495
27105--- /dev/null
27106+++ b/drivers/gpu/drm/psb/psb_powermgmt.h
27107@@ -0,0 +1,73 @@
27108+/**************************************************************************
27109+ * Copyright (c) 2009, Intel Corporation.
27110+ * All Rights Reserved.
27111+
27112+ * Permission is hereby granted, free of charge, to any person obtaining a
27113+ * copy of this software and associated documentation files (the "Software"),
27114+ * to deal in the Software without restriction, including without limitation
27115+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
27116+ * and/or sell copies of the Software, and to permit persons to whom the
27117+ * Software is furnished to do so, subject to the following conditions:
27118+ *
27119+ * The above copyright notice and this permission notice (including the next
27120+ * paragraph) shall be included in all copies or substantial portions of the
27121+ * Software.
27122+ *
27123+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27124+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27125+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27126+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27127+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27128+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27129+ * SOFTWARE.
27130+ *
27131+ * Authors:
27132+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
27133+ *
27134+ */
27135+#ifndef _PSB_POWERMGMT_H_
27136+#define _PSB_POWERMGMT_H_
27137+
27138+#include <linux/pci.h>
27139+
27140+#define PSB_GRAPHICS_ISLAND 0x1
27141+#define PSB_VIDEO_ENC_ISLAND 0x2
27142+#define PSB_VIDEO_DEC_ISLAND 0x4
27143+#define PSB_DISPLAY_ISLAND 0x8
27144+#define PSB_ALL_ISLANDS 0xf
27145+
27146+void powermgmt_init(void);
27147+void powermgmt_shutdown(void);
27148+
27149+/*
27150+ * OSPM will call these functions
27151+ */
27152+int powermgmt_suspend(struct pci_dev *pdev, pm_message_t state);
27153+int powermgmt_resume(struct pci_dev *pdev);
27154+
27155+/*
27156+ * These are the functions the driver should call to do internally driven
27157+ * power gating (D0i3)
27158+ */
27159+int powermgmt_suspend_islands(struct pci_dev *pdev, int hw_islands, bool b_initiated_by_ospm);
27160+int powermgmt_resume_islands(struct pci_dev *pdev, int hw_islands);
27161+
27162+/*
27163+ * These are the functions the driver should use to wrap all hw access
27164+ * (i.e. register reads and writes)
27165+ */
27166+bool powermgmt_using_hw_begin(struct pci_dev *pdev, int hw_islands, bool force_on);
27167+void powermgmt_using_hw_end(int hw_islands);
27168+
27169+/*
27170+ * Use this function to do an instantaneous check for if the hw is on.
27171+ * Only use this in cases where you know the g_state_change_mutex
27172+ * is already held such as in irq install/uninstall and you need to
27173+ * prevent a deadlock situation. Otherwise use powermgmt_using_hw_begin().
27174+ */
27175+bool powermgmt_is_hw_on(struct pci_dev *pdev, int hw_islands);
27176+
27177+bool powermgmt_is_suspend_in_progress(int hw_islands);
27178+bool powermgmt_is_resume_in_progress(int hw_islands);
27179+bool powermgmt_is_gfx_busy(void);
27180+#endif /*_PSB_POWERMGMT_H_*/
27181diff --git a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h
27182new file mode 100644
27183index 0000000..4974689
27184--- /dev/null
27185+++ b/drivers/gpu/drm/psb/psb_reg.h
27186@@ -0,0 +1,574 @@
27187+/**************************************************************************
27188+ *
27189+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
27190+ * Copyright (c) 2007, Intel Corporation.
27191+ * All Rights Reserved.
27192+ *
27193+ * This program is free software; you can redistribute it and/or modify it
27194+ * under the terms and conditions of the GNU General Public License,
27195+ * version 2, as published by the Free Software Foundation.
27196+ *
27197+ * This program is distributed in the hope it will be useful, but WITHOUT
27198+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27199+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27200+ * more details.
27201+ *
27202+ * You should have received a copy of the GNU General Public License along with
27203+ * this program; if not, write to the Free Software Foundation, Inc.,
27204+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27205+ *
27206+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27207+ * develop this driver.
27208+ *
27209+ **************************************************************************/
27210+/*
27211+ */
27212+#ifndef _PSB_REG_H_
27213+#define _PSB_REG_H_
27214+
27215+#define PSB_CR_CLKGATECTL 0x0000
27216+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
27217+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
27218+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
27219+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
27220+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
27221+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
27222+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
27223+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
27224+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
27225+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
27226+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
27227+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
27228+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
27229+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
27230+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
27231+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
27232+
27233+#define PSB_CR_CORE_ID 0x0010
27234+#define _PSB_CC_ID_ID_SHIFT (16)
27235+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
27236+#define _PSB_CC_ID_CONFIG_SHIFT (0)
27237+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
27238+
27239+#define PSB_CR_CORE_REVISION 0x0014
27240+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
27241+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
27242+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
27243+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
27244+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
27245+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
27246+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
27247+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
27248+
27249+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
27250+
27251+#define PSB_CR_SOFT_RESET 0x0080
27252+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
27253+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
27254+#define _PSB_CS_RESET_USE_RESET (1 << 4)
27255+#define _PSB_CS_RESET_TA_RESET (1 << 3)
27256+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
27257+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
27258+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
27259+
27260+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
27261+
27262+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
27263+
27264+#define PSB_CR_EVENT_STATUS2 0x0118
27265+
27266+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
27267+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
27268+
27269+#define PSB_CR_EVENT_STATUS 0x012C
27270+
27271+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
27272+
27273+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
27274+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
27275+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
27276+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
27277+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
27278+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
27279+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
27280+#define _PSB_CE_SW_EVENT (1 << 14)
27281+#define _PSB_CE_TA_FINISHED (1 << 13)
27282+#define _PSB_CE_TA_TERMINATE (1 << 12)
27283+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
27284+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
27285+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
27286+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
27287+
27288+
27289+#define PSB_USE_OFFSET_MASK 0x0007FFFF
27290+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
27291+#define PSB_CR_USE_CODE_BASE0 0x0A0C
27292+#define PSB_CR_USE_CODE_BASE1 0x0A10
27293+#define PSB_CR_USE_CODE_BASE2 0x0A14
27294+#define PSB_CR_USE_CODE_BASE3 0x0A18
27295+#define PSB_CR_USE_CODE_BASE4 0x0A1C
27296+#define PSB_CR_USE_CODE_BASE5 0x0A20
27297+#define PSB_CR_USE_CODE_BASE6 0x0A24
27298+#define PSB_CR_USE_CODE_BASE7 0x0A28
27299+#define PSB_CR_USE_CODE_BASE8 0x0A2C
27300+#define PSB_CR_USE_CODE_BASE9 0x0A30
27301+#define PSB_CR_USE_CODE_BASE10 0x0A34
27302+#define PSB_CR_USE_CODE_BASE11 0x0A38
27303+#define PSB_CR_USE_CODE_BASE12 0x0A3C
27304+#define PSB_CR_USE_CODE_BASE13 0x0A40
27305+#define PSB_CR_USE_CODE_BASE14 0x0A44
27306+#define PSB_CR_USE_CODE_BASE15 0x0A48
27307+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
27308+#define _PSB_CUC_BASE_DM_SHIFT (25)
27309+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
27310+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
27311+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
27312+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
27313+#define _PSB_CUC_DM_VERTEX (0)
27314+#define _PSB_CUC_DM_PIXEL (1)
27315+#define _PSB_CUC_DM_RESERVED (2)
27316+#define _PSB_CUC_DM_EDM (3)
27317+
27318+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
27319+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
27320+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
27321+
27322+#define PSB_CR_EVENT_KICKER 0x0AC4
27323+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
27324+
27325+#define PSB_CR_EVENT_KICK 0x0AC8
27326+#define _PSB_CE_KICK_NOW (1 << 0)
27327+
27328+
27329+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
27330+
27331+#define PSB_CR_BIF_CTRL 0x0C00
27332+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
27333+#define _PSB_CB_CTRL_INVALDC (1 << 3)
27334+#define _PSB_CB_CTRL_FLUSH (1 << 2)
27335+
27336+#define PSB_CR_BIF_INT_STAT 0x0C04
27337+
27338+#define PSB_CR_BIF_FAULT 0x0C08
27339+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
27340+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
27341+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
27342+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
27343+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
27344+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
27345+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
27346+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
27347+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
27348+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
27349+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
27350+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
27351+
27352+#define PSB_CR_BIF_BANK0 0x0C78
27353+
27354+#define PSB_CR_BIF_BANK1 0x0C7C
27355+
27356+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
27357+
27358+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
27359+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
27360+
27361+#define PSB_CR_2D_SOCIF 0x0E18
27362+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
27363+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
27364+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
27365+
27366+#define PSB_CR_2D_BLIT_STATUS 0x0E04
27367+#define _PSB_C2B_STATUS_BUSY (1 << 24)
27368+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
27369+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
27370+
27371+/*
27372+ * 2D defs.
27373+ */
27374+
27375+/*
27376+ * 2D Slave Port Data : Block Header's Object Type
27377+ */
27378+
27379+#define PSB_2D_CLIP_BH (0x00000000)
27380+#define PSB_2D_PAT_BH (0x10000000)
27381+#define PSB_2D_CTRL_BH (0x20000000)
27382+#define PSB_2D_SRC_OFF_BH (0x30000000)
27383+#define PSB_2D_MASK_OFF_BH (0x40000000)
27384+#define PSB_2D_RESERVED1_BH (0x50000000)
27385+#define PSB_2D_RESERVED2_BH (0x60000000)
27386+#define PSB_2D_FENCE_BH (0x70000000)
27387+#define PSB_2D_BLIT_BH (0x80000000)
27388+#define PSB_2D_SRC_SURF_BH (0x90000000)
27389+#define PSB_2D_DST_SURF_BH (0xA0000000)
27390+#define PSB_2D_PAT_SURF_BH (0xB0000000)
27391+#define PSB_2D_SRC_PAL_BH (0xC0000000)
27392+#define PSB_2D_PAT_PAL_BH (0xD0000000)
27393+#define PSB_2D_MASK_SURF_BH (0xE0000000)
27394+#define PSB_2D_FLUSH_BH (0xF0000000)
27395+
27396+/*
27397+ * Clip Definition block (PSB_2D_CLIP_BH)
27398+ */
27399+#define PSB_2D_CLIPCOUNT_MAX (1)
27400+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
27401+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
27402+#define PSB_2D_CLIPCOUNT_SHIFT (0)
27403+/* clip rectangle min & max */
27404+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
27405+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
27406+#define PSB_2D_CLIP_XMAX_SHIFT (12)
27407+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
27408+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
27409+#define PSB_2D_CLIP_XMIN_SHIFT (0)
27410+/* clip rectangle offset */
27411+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
27412+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
27413+#define PSB_2D_CLIP_YMAX_SHIFT (12)
27414+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
27415+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
27416+#define PSB_2D_CLIP_YMIN_SHIFT (0)
27417+
27418+/*
27419+ * Pattern Control (PSB_2D_PAT_BH)
27420+ */
27421+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
27422+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
27423+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
27424+#define PSB_2D_PAT_WIDTH_SHIFT (5)
27425+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
27426+#define PSB_2D_PAT_YSTART_SHIFT (10)
27427+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
27428+#define PSB_2D_PAT_XSTART_SHIFT (15)
27429+
27430+/*
27431+ * 2D Control block (PSB_2D_CTRL_BH)
27432+ */
27433+/* Present Flags */
27434+#define PSB_2D_SRCCK_CTRL (0x00000001)
27435+#define PSB_2D_DSTCK_CTRL (0x00000002)
27436+#define PSB_2D_ALPHA_CTRL (0x00000004)
27437+/* Colour Key Colour (SRC/DST)*/
27438+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
27439+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
27440+#define PSB_2D_CK_COL_SHIFT (0)
27441+/* Colour Key Mask (SRC/DST)*/
27442+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
27443+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
27444+#define PSB_2D_CK_MASK_SHIFT (0)
27445+/* Alpha Control (Alpha/RGB)*/
27446+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
27447+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
27448+#define PSB_2D_GBLALPHA_SHIFT (12)
27449+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
27450+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
27451+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
27452+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
27453+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
27454+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
27455+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
27456+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
27457+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
27458+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
27459+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
27460+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
27461+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
27462+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
27463+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
27464+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
27465+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
27466+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
27467+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
27468+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
27469+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
27470+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
27471+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
27472+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
27473+
27474+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
27475+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
27476+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
27477+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
27478+
27479+/*
27480+ *Source Offset (PSB_2D_SRC_OFF_BH)
27481+ */
27482+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
27483+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
27484+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
27485+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
27486+
27487+/*
27488+ * Mask Offset (PSB_2D_MASK_OFF_BH)
27489+ */
27490+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
27491+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
27492+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
27493+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
27494+
27495+/*
27496+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
27497+ */
27498+
27499+/*
27500+ *Blit Rectangle (PSB_2D_BLIT_BH)
27501+ */
27502+
27503+#define PSB_2D_ROT_MASK (3<<25)
27504+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
27505+#define PSB_2D_ROT_NONE (0<<25)
27506+#define PSB_2D_ROT_90DEGS (1<<25)
27507+#define PSB_2D_ROT_180DEGS (2<<25)
27508+#define PSB_2D_ROT_270DEGS (3<<25)
27509+
27510+#define PSB_2D_COPYORDER_MASK (3<<23)
27511+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
27512+#define PSB_2D_COPYORDER_TL2BR (0<<23)
27513+#define PSB_2D_COPYORDER_BR2TL (1<<23)
27514+#define PSB_2D_COPYORDER_TR2BL (2<<23)
27515+#define PSB_2D_COPYORDER_BL2TR (3<<23)
27516+
27517+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
27518+#define PSB_2D_DSTCK_DISABLE (0x00000000)
27519+#define PSB_2D_DSTCK_PASS (0x00200000)
27520+#define PSB_2D_DSTCK_REJECT (0x00400000)
27521+
27522+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
27523+#define PSB_2D_SRCCK_DISABLE (0x00000000)
27524+#define PSB_2D_SRCCK_PASS (0x00080000)
27525+#define PSB_2D_SRCCK_REJECT (0x00100000)
27526+
27527+#define PSB_2D_CLIP_ENABLE (0x00040000)
27528+
27529+#define PSB_2D_ALPHA_ENABLE (0x00020000)
27530+
27531+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
27532+#define PSB_2D_PAT_MASK (0x00010000)
27533+#define PSB_2D_USE_PAT (0x00010000)
27534+#define PSB_2D_USE_FILL (0x00000000)
27535+/*
27536+ * Tungsten Graphics note on rop codes: If rop A and rop B are
27537+ * identical, the mask surface will not be read and need not be
27538+ * set up.
27539+ */
27540+
27541+#define PSB_2D_ROP3B_MASK (0x0000FF00)
27542+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
27543+#define PSB_2D_ROP3B_SHIFT (8)
27544+/* rop code A */
27545+#define PSB_2D_ROP3A_MASK (0x000000FF)
27546+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
27547+#define PSB_2D_ROP3A_SHIFT (0)
27548+
27549+#define PSB_2D_ROP4_MASK (0x0000FFFF)
27550+/*
27551+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
27552+ * Fill Colour RGBA8888
27553+ */
27554+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
27555+#define PSB_2D_FILLCOLOUR_SHIFT (0)
27556+/*
27557+ * DWORD1: (Always Present)
27558+ * X Start (Dest)
27559+ * Y Start (Dest)
27560+ */
27561+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
27562+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
27563+#define PSB_2D_DST_XSTART_SHIFT (12)
27564+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
27565+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
27566+#define PSB_2D_DST_YSTART_SHIFT (0)
27567+/*
27568+ * DWORD2: (Always Present)
27569+ * X Size (Dest)
27570+ * Y Size (Dest)
27571+ */
27572+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
27573+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
27574+#define PSB_2D_DST_XSIZE_SHIFT (12)
27575+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
27576+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
27577+#define PSB_2D_DST_YSIZE_SHIFT (0)
27578+
27579+/*
27580+ * Source Surface (PSB_2D_SRC_SURF_BH)
27581+ */
27582+/*
27583+ * WORD 0
27584+ */
27585+
27586+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
27587+#define PSB_2D_SRC_1_PAL (0x00000000)
27588+#define PSB_2D_SRC_2_PAL (0x00008000)
27589+#define PSB_2D_SRC_4_PAL (0x00010000)
27590+#define PSB_2D_SRC_8_PAL (0x00018000)
27591+#define PSB_2D_SRC_8_ALPHA (0x00020000)
27592+#define PSB_2D_SRC_4_ALPHA (0x00028000)
27593+#define PSB_2D_SRC_332RGB (0x00030000)
27594+#define PSB_2D_SRC_4444ARGB (0x00038000)
27595+#define PSB_2D_SRC_555RGB (0x00040000)
27596+#define PSB_2D_SRC_1555ARGB (0x00048000)
27597+#define PSB_2D_SRC_565RGB (0x00050000)
27598+#define PSB_2D_SRC_0888ARGB (0x00058000)
27599+#define PSB_2D_SRC_8888ARGB (0x00060000)
27600+#define PSB_2D_SRC_8888UYVY (0x00068000)
27601+#define PSB_2D_SRC_RESERVED (0x00070000)
27602+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
27603+
27604+
27605+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
27606+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
27607+#define PSB_2D_SRC_STRIDE_SHIFT (0)
27608+/*
27609+ * WORD 1 - Base Address
27610+ */
27611+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
27612+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
27613+#define PSB_2D_SRC_ADDR_SHIFT (2)
27614+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
27615+
27616+/*
27617+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
27618+ */
27619+/*
27620+ * WORD 0
27621+ */
27622+
27623+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
27624+#define PSB_2D_PAT_1_PAL (0x00000000)
27625+#define PSB_2D_PAT_2_PAL (0x00008000)
27626+#define PSB_2D_PAT_4_PAL (0x00010000)
27627+#define PSB_2D_PAT_8_PAL (0x00018000)
27628+#define PSB_2D_PAT_8_ALPHA (0x00020000)
27629+#define PSB_2D_PAT_4_ALPHA (0x00028000)
27630+#define PSB_2D_PAT_332RGB (0x00030000)
27631+#define PSB_2D_PAT_4444ARGB (0x00038000)
27632+#define PSB_2D_PAT_555RGB (0x00040000)
27633+#define PSB_2D_PAT_1555ARGB (0x00048000)
27634+#define PSB_2D_PAT_565RGB (0x00050000)
27635+#define PSB_2D_PAT_0888ARGB (0x00058000)
27636+#define PSB_2D_PAT_8888ARGB (0x00060000)
27637+
27638+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
27639+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
27640+#define PSB_2D_PAT_STRIDE_SHIFT (0)
27641+/*
27642+ * WORD 1 - Base Address
27643+ */
27644+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
27645+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
27646+#define PSB_2D_PAT_ADDR_SHIFT (2)
27647+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
27648+
27649+/*
27650+ * Destination Surface (PSB_2D_DST_SURF_BH)
27651+ */
27652+/*
27653+ * WORD 0
27654+ */
27655+
27656+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
27657+#define PSB_2D_DST_332RGB (0x00030000)
27658+#define PSB_2D_DST_4444ARGB (0x00038000)
27659+#define PSB_2D_DST_555RGB (0x00040000)
27660+#define PSB_2D_DST_1555ARGB (0x00048000)
27661+#define PSB_2D_DST_565RGB (0x00050000)
27662+#define PSB_2D_DST_0888ARGB (0x00058000)
27663+#define PSB_2D_DST_8888ARGB (0x00060000)
27664+#define PSB_2D_DST_8888AYUV (0x00070000)
27665+
27666+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
27667+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
27668+#define PSB_2D_DST_STRIDE_SHIFT (0)
27669+/*
27670+ * WORD 1 - Base Address
27671+ */
27672+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
27673+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
27674+#define PSB_2D_DST_ADDR_SHIFT (2)
27675+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
27676+
27677+/*
27678+ * Mask Surface (PSB_2D_MASK_SURF_BH)
27679+ */
27680+/*
27681+ * WORD 0
27682+ */
27683+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
27684+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
27685+#define PSB_2D_MASK_STRIDE_SHIFT (0)
27686+/*
27687+ * WORD 1 - Base Address
27688+ */
27689+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
27690+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
27691+#define PSB_2D_MASK_ADDR_SHIFT (2)
27692+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
27693+
27694+/*
27695+ * Source Palette (PSB_2D_SRC_PAL_BH)
27696+ */
27697+
27698+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
27699+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
27700+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
27701+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
27702+
27703+/*
27704+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
27705+ */
27706+
27707+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
27708+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
27709+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
27710+#define PSB_2D_PATPAL_BYTEALIGN (1024)
27711+
27712+/*
27713+ * Rop3 Codes (2 LS bytes)
27714+ */
27715+
27716+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
27717+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
27718+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
27719+#define PSB_2D_ROP3_BLACKNESS (0x0000)
27720+#define PSB_2D_ROP3_SRC (0xCC)
27721+#define PSB_2D_ROP3_PAT (0xF0)
27722+#define PSB_2D_ROP3_DST (0xAA)
27723+
27724+
27725+/*
27726+ * Sizes.
27727+ */
27728+
27729+#define PSB_SCENE_HW_COOKIE_SIZE 16
27730+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
27731+
27732+/*
27733+ * Scene stuff.
27734+ */
27735+
27736+#define PSB_NUM_HW_SCENES 2
27737+
27738+/*
27739+ * Scheduler completion actions.
27740+ */
27741+
27742+#define PSB_RASTER_BLOCK 0
27743+#define PSB_RASTER 1
27744+#define PSB_RETURN 2
27745+#define PSB_TA 3
27746+
27747+
27748+/*Power management*/
27749+#define PSB_PUNIT_PORT 0x04
27750+#define PSB_APMBA 0x7a
27751+#define PSB_APM_CMD 0x0
27752+#define PSB_APM_STS 0x04
27753+#define PSB_PWRGT_GFX_MASK 0x3
27754+#define PSB_PWRGT_VID_ENC_MASK 0x30
27755+#define PSB_PWRGT_VID_DEC_MASK 0xc
27756+
27757+#define PSB_PM_SSC 0x20
27758+#define PSB_PM_SSS 0x30
27759+#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
27760+#endif
27761diff --git a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c
27762new file mode 100644
27763index 0000000..04c9378
27764--- /dev/null
27765+++ b/drivers/gpu/drm/psb/psb_reset.c
27766@@ -0,0 +1,484 @@
27767+/**************************************************************************
27768+ * Copyright (c) 2007, Intel Corporation.
27769+ * All Rights Reserved.
27770+ *
27771+ * This program is free software; you can redistribute it and/or modify it
27772+ * under the terms and conditions of the GNU General Public License,
27773+ * version 2, as published by the Free Software Foundation.
27774+ *
27775+ * This program is distributed in the hope it will be useful, but WITHOUT
27776+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27777+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27778+ * more details.
27779+ *
27780+ * You should have received a copy of the GNU General Public License along with
27781+ * this program; if not, write to the Free Software Foundation, Inc.,
27782+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27783+ *
27784+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27785+ * develop this driver.
27786+ *
27787+ **************************************************************************/
27788+/*
27789+ * Authors:
27790+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27791+ */
27792+
27793+#include <drm/drmP.h>
27794+#include "psb_drv.h"
27795+#include "psb_reg.h"
27796+#include "psb_intel_reg.h"
27797+#include "psb_scene.h"
27798+#include "psb_msvdx.h"
27799+#include "lnc_topaz.h"
27800+#include <linux/spinlock.h>
27801+#include "psb_powermgmt.h"
27802+#define PSB_2D_TIMEOUT_MSEC 100
27803+
27804+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
27805+{
27806+ uint32_t val;
27807+
27808+ val = _PSB_CS_RESET_BIF_RESET |
27809+ _PSB_CS_RESET_DPM_RESET |
27810+ _PSB_CS_RESET_TA_RESET |
27811+ _PSB_CS_RESET_USE_RESET |
27812+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
27813+
27814+ if (reset_2d)
27815+ val |= _PSB_CS_RESET_TWOD_RESET;
27816+
27817+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
27818+ (void) PSB_RSGX32(PSB_CR_SOFT_RESET);
27819+
27820+ udelay(100);
27821+
27822+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
27823+ wmb();
27824+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
27825+ PSB_CR_BIF_CTRL);
27826+ wmb();
27827+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
27828+
27829+ udelay(100);
27830+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
27831+ PSB_CR_BIF_CTRL);
27832+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
27833+}
27834+
27835+void psb_print_pagefault(struct drm_psb_private *dev_priv)
27836+{
27837+ uint32_t val;
27838+ uint32_t addr;
27839+
27840+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
27841+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
27842+
27843+ if (val) {
27844+ if (val & _PSB_CBI_STAT_PF_N_RW)
27845+ DRM_ERROR("Poulsbo MMU page fault:\n");
27846+ else
27847+ DRM_ERROR("Poulsbo MMU read / write "
27848+ "protection fault:\n");
27849+
27850+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
27851+ DRM_ERROR("\tCache requestor.\n");
27852+ if (val & _PSB_CBI_STAT_FAULT_TA)
27853+ DRM_ERROR("\tTA requestor.\n");
27854+ if (val & _PSB_CBI_STAT_FAULT_VDM)
27855+ DRM_ERROR("\tVDM requestor.\n");
27856+ if (val & _PSB_CBI_STAT_FAULT_2D)
27857+ DRM_ERROR("\t2D requestor.\n");
27858+ if (val & _PSB_CBI_STAT_FAULT_PBE)
27859+ DRM_ERROR("\tPBE requestor.\n");
27860+ if (val & _PSB_CBI_STAT_FAULT_TSP)
27861+ DRM_ERROR("\tTSP requestor.\n");
27862+ if (val & _PSB_CBI_STAT_FAULT_ISP)
27863+ DRM_ERROR("\tISP requestor.\n");
27864+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
27865+ DRM_ERROR("\tUSSEPDS requestor.\n");
27866+ if (val & _PSB_CBI_STAT_FAULT_HOST)
27867+ DRM_ERROR("\tHost requestor.\n");
27868+
27869+ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
27870+ (unsigned) addr);
27871+ }
27872+}
27873+
27874+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
27875+{
27876+ struct timer_list *wt = &dev_priv->watchdog_timer;
27877+ unsigned long irq_flags;
27878+
27879+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27880+ if (dev_priv->timer_available && !timer_pending(wt)) {
27881+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
27882+ add_timer(wt);
27883+ }
27884+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
27885+}
27886+
27887+#if 0
27888+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
27889+ unsigned int engine, int *lockup,
27890+ int *idle)
27891+{
27892+ uint32_t received_seq;
27893+
27894+ received_seq = dev_priv->comm[engine << 4];
27895+ spin_lock(&dev_priv->sequence_lock);
27896+ *idle = (received_seq == dev_priv->sequence[engine]);
27897+ spin_unlock(&dev_priv->sequence_lock);
27898+
27899+ if (*idle) {
27900+ dev_priv->idle[engine] = 1;
27901+ *lockup = 0;
27902+ return;
27903+ }
27904+
27905+ if (dev_priv->idle[engine]) {
27906+ dev_priv->idle[engine] = 0;
27907+ dev_priv->last_sequence[engine] = received_seq;
27908+ *lockup = 0;
27909+ return;
27910+ }
27911+
27912+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
27913+}
27914+
27915+#endif
27916+static void psb_watchdog_func(unsigned long data)
27917+{
27918+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
27919+ int lockup;
27920+ int msvdx_lockup;
27921+ int msvdx_idle;
27922+ int lockup_2d;
27923+ int idle_2d;
27924+ int idle;
27925+ unsigned long irq_flags;
27926+
27927+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
27928+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
27929+
27930+#if 0
27931+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
27932+#else
27933+ lockup_2d = false;
27934+ idle_2d = true;
27935+#endif
27936+ if (lockup || msvdx_lockup || lockup_2d) {
27937+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27938+ dev_priv->timer_available = 0;
27939+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
27940+ irq_flags);
27941+ if (lockup) {
27942+ /*comment out this to aviod illegal access for ospm*/
27943+ /*psb_print_pagefault(dev_priv);*/
27944+ schedule_work(&dev_priv->watchdog_wq);
27945+ }
27946+ if (msvdx_lockup)
27947+ schedule_work(&dev_priv->msvdx_watchdog_wq);
27948+ }
27949+ if (!idle || !msvdx_idle || !idle_2d)
27950+ psb_schedule_watchdog(dev_priv);
27951+}
27952+
27953+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
27954+{
27955+ struct drm_psb_private *dev_priv = dev->dev_private;
27956+ struct psb_msvdx_cmd_queue *msvdx_cmd;
27957+ struct list_head *list, *next;
27958+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
27959+
27960+ /*Flush the msvdx cmd queue and signal all fences in the queue */
27961+ list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) {
27962+ msvdx_cmd =
27963+ list_entry(list, struct psb_msvdx_cmd_queue, head);
27964+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
27965+ msvdx_cmd->sequence);
27966+ msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence;
27967+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
27968+ msvdx_priv->msvdx_current_sequence,
27969+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
27970+ list_del(list);
27971+ kfree(msvdx_cmd->cmd);
27972+ kfree(msvdx_cmd
27973+ );
27974+ }
27975+}
27976+
27977+static void psb_msvdx_reset_wq(struct work_struct *work)
27978+{
27979+ struct drm_psb_private *dev_priv =
27980+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
27981+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
27982+
27983+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
27984+ unsigned long irq_flags;
27985+
27986+ mutex_lock(&msvdx_priv->msvdx_mutex);
27987+ msvdx_priv->msvdx_needs_reset = 1;
27988+ msvdx_priv->msvdx_current_sequence++;
27989+ PSB_DEBUG_GENERAL
27990+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
27991+ msvdx_priv->msvdx_current_sequence);
27992+
27993+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
27994+ msvdx_priv->msvdx_current_sequence,
27995+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
27996+
27997+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27998+ dev_priv->timer_available = 1;
27999+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28000+
28001+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
28002+ psb_msvdx_flush_cmd_queue(scheduler->dev);
28003+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
28004+
28005+ psb_schedule_watchdog(dev_priv);
28006+ mutex_unlock(&msvdx_priv->msvdx_mutex);
28007+}
28008+
28009+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
28010+{
28011+ struct psb_xhw_buf buf;
28012+ uint32_t bif_ctrl;
28013+
28014+ INIT_LIST_HEAD(&buf.head);
28015+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
28016+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
28017+ PSB_WSGX32(bif_ctrl |
28018+ _PSB_CB_CTRL_CLEAR_FAULT |
28019+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
28020+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
28021+ udelay(100);
28022+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
28023+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
28024+ return psb_xhw_reset_dpm(dev_priv, &buf);
28025+}
28026+
28027+/*
28028+ * Block command submission and reset hardware and schedulers.
28029+ */
28030+
28031+static void psb_reset_wq(struct work_struct *work)
28032+{
28033+ struct drm_psb_private *dev_priv =
28034+ container_of(work, struct drm_psb_private, watchdog_wq);
28035+ int lockup_2d;
28036+ int idle_2d;
28037+ unsigned long irq_flags;
28038+ int ret;
28039+ int reset_count = 0;
28040+ struct psb_xhw_buf buf;
28041+ uint32_t xhw_lockup;
28042+
28043+ /*
28044+ * Block command submission.
28045+ */
28046+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
28047+
28048+ if (!powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, false)) {
28049+ DRM_ERROR("lock up hapeens when island off \n");
28050+ return;
28051+ }
28052+ mutex_lock(&dev_priv->reset_mutex);
28053+
28054+ INIT_LIST_HEAD(&buf.head);
28055+ ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup);
28056+ if (likely(ret == 0)) {
28057+ if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) {
28058+ /*
28059+ * no lockup, just re-schedule
28060+ */
28061+ spin_lock_irqsave(&dev_priv->watchdog_lock,
28062+ irq_flags);
28063+ dev_priv->timer_available = 1;
28064+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
28065+ irq_flags);
28066+ psb_schedule_watchdog(dev_priv);
28067+ mutex_unlock(&dev_priv->reset_mutex);
28068+ return;
28069+ }
28070+ } else {
28071+ DRM_ERROR("Check lockup returned %d\n", ret);
28072+ }
28073+#if 0
28074+ mdelay(PSB_2D_TIMEOUT_MSEC);
28075+
28076+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
28077+
28078+ if (lockup_2d) {
28079+ uint32_t seq_2d;
28080+ spin_lock(&dev_priv->sequence_lock);
28081+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
28082+ spin_unlock(&dev_priv->sequence_lock);
28083+ psb_fence_error(dev_priv->scheduler.dev,
28084+ PSB_ENGINE_2D,
28085+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
28086+ DRM_INFO("Resetting 2D engine.\n");
28087+ }
28088+
28089+ psb_reset(dev_priv, lockup_2d);
28090+#else
28091+ (void) lockup_2d;
28092+ (void) idle_2d;
28093+ psb_reset(dev_priv, 0);
28094+#endif
28095+ (void) psb_xhw_mmu_reset(dev_priv);
28096+ DRM_INFO("Resetting scheduler.\n");
28097+ psb_scheduler_pause(dev_priv);
28098+ psb_scheduler_reset(dev_priv, -EBUSY);
28099+ psb_scheduler_ta_mem_check(dev_priv);
28100+
28101+ while (dev_priv->ta_mem &&
28102+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
28103+ struct ttm_fence_object *fence;
28104+
28105+ /*
28106+ * TA memory is currently fenced so offsets
28107+ * are valid. Reload offsets into the dpm now.
28108+ */
28109+
28110+ struct psb_xhw_buf buf;
28111+ INIT_LIST_HEAD(&buf.head);
28112+
28113+ mdelay(100);
28114+
28115+ fence = dev_priv->ta_mem->ta_memory->sync_obj;
28116+
28117+ DRM_INFO("Reloading TA memory at offset "
28118+ "0x%08lx to 0x%08lx seq %d\n",
28119+ dev_priv->ta_mem->ta_memory->offset,
28120+ dev_priv->ta_mem->ta_memory->offset +
28121+ (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT),
28122+ fence->sequence);
28123+
28124+ fence = dev_priv->ta_mem->hw_data->sync_obj;
28125+
28126+ DRM_INFO("Reloading TA HW memory at offset "
28127+ "0x%08lx to 0x%08lx seq %u\n",
28128+ dev_priv->ta_mem->hw_data->offset,
28129+ dev_priv->ta_mem->hw_data->offset +
28130+ (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT),
28131+ fence->sequence);
28132+
28133+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28134+ PSB_TA_MEM_FLAG_TA |
28135+ PSB_TA_MEM_FLAG_RASTER |
28136+ PSB_TA_MEM_FLAG_HOSTA |
28137+ PSB_TA_MEM_FLAG_HOSTD |
28138+ PSB_TA_MEM_FLAG_INIT,
28139+ dev_priv->ta_mem->ta_memory->
28140+ offset,
28141+ dev_priv->ta_mem->hw_data->
28142+ offset,
28143+ dev_priv->ta_mem->hw_cookie);
28144+ if (!ret)
28145+ break;
28146+
28147+ DRM_INFO("Reloading TA memory failed. Retrying.\n");
28148+ psb_reset(dev_priv, 0);
28149+ (void) psb_xhw_mmu_reset(dev_priv);
28150+ }
28151+
28152+ psb_scheduler_restart(dev_priv);
28153+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28154+ dev_priv->timer_available = 1;
28155+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28156+ mutex_unlock(&dev_priv->reset_mutex);
28157+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
28158+}
28159+
28160+void psb_watchdog_init(struct drm_psb_private *dev_priv)
28161+{
28162+ struct timer_list *wt = &dev_priv->watchdog_timer;
28163+ unsigned long irq_flags;
28164+
28165+ spin_lock_init(&dev_priv->watchdog_lock);
28166+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28167+ init_timer(wt);
28168+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
28169+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
28170+ wt->data = (unsigned long) dev_priv;
28171+ wt->function = &psb_watchdog_func;
28172+ dev_priv->timer_available = 1;
28173+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28174+}
28175+
28176+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
28177+{
28178+ unsigned long irq_flags;
28179+
28180+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28181+ dev_priv->timer_available = 0;
28182+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28183+ (void) del_timer_sync(&dev_priv->watchdog_timer);
28184+}
28185+
28186+static void psb_lid_timer_func(unsigned long data)
28187+{
28188+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
28189+ struct drm_device * dev = (struct drm_device *)dev_priv->dev;
28190+ struct timer_list * lid_timer = &dev_priv->lid_timer;
28191+ unsigned long irq_flags;
28192+ u32 * lid_state = dev_priv->lid_state;
28193+ u32 pp_status;
28194+
28195+ if(*lid_state == dev_priv->lid_last_state)
28196+ goto lid_timer_schedule;
28197+
28198+ if((*lid_state) & 0x01) {
28199+ /*lid state is open*/
28200+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
28201+ do {
28202+ pp_status = REG_READ(PP_STATUS);
28203+ } while((pp_status & PP_ON) == 0);
28204+
28205+ /*FIXME: should be backlight level before*/
28206+ psb_intel_lvds_set_brightness(dev, 100);
28207+ } else {
28208+ psb_intel_lvds_set_brightness(dev, 0);
28209+
28210+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
28211+ do {
28212+ pp_status = REG_READ(PP_STATUS);
28213+ } while((pp_status & PP_ON) == 0);
28214+ }
28215+ //printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__);
28216+
28217+ dev_priv->lid_last_state = *lid_state;
28218+
28219+lid_timer_schedule:
28220+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
28221+ if(!timer_pending(lid_timer)){
28222+ lid_timer->expires = jiffies + PSB_LID_DELAY;
28223+ add_timer(lid_timer);
28224+ }
28225+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
28226+}
28227+
28228+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
28229+{
28230+ struct timer_list * lid_timer = &dev_priv->lid_timer;
28231+ unsigned long irq_flags;
28232+
28233+ spin_lock_init(&dev_priv->lid_lock);
28234+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
28235+
28236+ init_timer(lid_timer);
28237+
28238+ lid_timer->data = (unsigned long)dev_priv;
28239+ lid_timer->function = psb_lid_timer_func;
28240+ lid_timer->expires = jiffies + PSB_LID_DELAY;
28241+
28242+ add_timer(lid_timer);
28243+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
28244+}
28245+
28246+void psb_lid_timer_takedown(struct drm_psb_private * dev_priv)
28247+{
28248+ del_timer_sync(&dev_priv->lid_timer);
28249+}
28250+
28251diff --git a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c
28252new file mode 100644
28253index 0000000..42b823d
28254--- /dev/null
28255+++ b/drivers/gpu/drm/psb/psb_scene.c
28256@@ -0,0 +1,523 @@
28257+/**************************************************************************
28258+ * Copyright (c) 2007, Intel Corporation.
28259+ * All Rights Reserved.
28260+ *
28261+ * This program is free software; you can redistribute it and/or modify it
28262+ * under the terms and conditions of the GNU General Public License,
28263+ * version 2, as published by the Free Software Foundation.
28264+ *
28265+ * This program is distributed in the hope it will be useful, but WITHOUT
28266+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28267+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28268+ * more details.
28269+ *
28270+ * You should have received a copy of the GNU General Public License along with
28271+ * this program; if not, write to the Free Software Foundation, Inc.,
28272+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28273+ *
28274+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28275+ * develop this driver.
28276+ *
28277+ **************************************************************************/
28278+/*
28279+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28280+ */
28281+
28282+#include <drm/drmP.h>
28283+#include "psb_drv.h"
28284+#include "psb_scene.h"
28285+#include "psb_powermgmt.h"
28286+
28287+void psb_clear_scene_atomic(struct psb_scene *scene)
28288+{
28289+ int i;
28290+ struct page *page;
28291+ void *v;
28292+
28293+ for (i = 0; i < scene->clear_num_pages; ++i) {
28294+ page = ttm_tt_get_page(scene->hw_data->ttm,
28295+ scene->clear_p_start + i);
28296+ if (in_irq())
28297+ v = kmap_atomic(page, KM_IRQ0);
28298+ else
28299+ v = kmap_atomic(page, KM_USER0);
28300+
28301+ memset(v, 0, PAGE_SIZE);
28302+
28303+ if (in_irq())
28304+ kunmap_atomic(v, KM_IRQ0);
28305+ else
28306+ kunmap_atomic(v, KM_USER0);
28307+ }
28308+}
28309+
28310+int psb_clear_scene(struct psb_scene *scene)
28311+{
28312+ struct ttm_bo_kmap_obj bmo;
28313+ bool is_iomem;
28314+ void *addr;
28315+
28316+ int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start,
28317+ scene->clear_num_pages, &bmo);
28318+
28319+ PSB_DEBUG_RENDER("Scene clear.\n");
28320+ if (ret)
28321+ return ret;
28322+
28323+ addr = ttm_kmap_obj_virtual(&bmo, &is_iomem);
28324+ BUG_ON(is_iomem);
28325+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
28326+ ttm_bo_kunmap(&bmo);
28327+
28328+ return 0;
28329+}
28330+
28331+static void psb_destroy_scene(struct kref *kref)
28332+{
28333+ struct psb_scene *scene =
28334+ container_of(kref, struct psb_scene, kref);
28335+
28336+ PSB_DEBUG_RENDER("Scene destroy.\n");
28337+ psb_scheduler_remove_scene_refs(scene);
28338+ ttm_bo_unref(&scene->hw_data);
28339+ kfree(scene);
28340+}
28341+
28342+void psb_scene_unref(struct psb_scene **p_scene)
28343+{
28344+ struct psb_scene *scene = *p_scene;
28345+
28346+ PSB_DEBUG_RENDER("Scene unref.\n");
28347+ *p_scene = NULL;
28348+ kref_put(&scene->kref, &psb_destroy_scene);
28349+}
28350+
28351+struct psb_scene *psb_scene_ref(struct psb_scene *src)
28352+{
28353+ PSB_DEBUG_RENDER("Scene ref.\n");
28354+ kref_get(&src->kref);
28355+ return src;
28356+}
28357+
28358+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
28359+ uint32_t w, uint32_t h)
28360+{
28361+ struct drm_psb_private *dev_priv =
28362+ (struct drm_psb_private *) dev->dev_private;
28363+ struct ttm_bo_device *bdev = &dev_priv->bdev;
28364+ int ret = -EINVAL;
28365+ struct psb_scene *scene;
28366+ uint32_t bo_size;
28367+ struct psb_xhw_buf buf;
28368+
28369+ PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h,
28370+ w >> 16);
28371+
28372+ scene = kzalloc(sizeof(*scene), GFP_KERNEL);
28373+
28374+ if (!scene) {
28375+ DRM_ERROR("Out of memory allocating scene object.\n");
28376+ return NULL;
28377+ }
28378+
28379+ scene->dev = dev;
28380+ scene->w = w;
28381+ scene->h = h;
28382+ scene->hw_scene = NULL;
28383+ kref_init(&scene->kref);
28384+
28385+ INIT_LIST_HEAD(&buf.head);
28386+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
28387+ scene->hw_cookie, &bo_size,
28388+ &scene->clear_p_start,
28389+ &scene->clear_num_pages);
28390+ if (ret)
28391+ goto out_err;
28392+
28393+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
28394+ DRM_PSB_FLAG_MEM_MMU |
28395+ TTM_PL_FLAG_CACHED,
28396+ 0, 0, 1, NULL, &scene->hw_data);
28397+ if (ret)
28398+ goto out_err;
28399+
28400+ return scene;
28401+out_err:
28402+ kfree(scene);
28403+ return NULL;
28404+}
28405+
28406+int psb_validate_scene_pool(struct psb_context *context,
28407+ struct psb_scene_pool *pool,
28408+ uint32_t w,
28409+ uint32_t h,
28410+ int final_pass, struct psb_scene **scene_p)
28411+{
28412+ struct drm_device *dev = pool->dev;
28413+ struct drm_psb_private *dev_priv =
28414+ (struct drm_psb_private *) dev->dev_private;
28415+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
28416+ int ret;
28417+ unsigned long irq_flags;
28418+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
28419+ uint32_t bin_pt_offset;
28420+ uint32_t bin_param_offset;
28421+
28422+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n",
28423+ pool->cur_scene);
28424+
28425+ if (unlikely(!dev_priv->ta_mem)) {
28426+ dev_priv->ta_mem =
28427+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
28428+ if (!dev_priv->ta_mem)
28429+ return -ENOMEM;
28430+
28431+ bin_pt_offset = ~0;
28432+ bin_param_offset = ~0;
28433+ } else {
28434+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
28435+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
28436+ }
28437+
28438+ pool->w = w;
28439+ pool->h = h;
28440+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
28441+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28442+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
28443+ spin_unlock_irqrestore(&scheduler->lock,
28444+ irq_flags);
28445+ DRM_ERROR("Trying to resize a dirty scene.\n");
28446+ return -EINVAL;
28447+ }
28448+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28449+ psb_scene_unref(&pool->scenes[pool->cur_scene]);
28450+ scene = NULL;
28451+ }
28452+
28453+ if (!scene) {
28454+ pool->scenes[pool->cur_scene] = scene =
28455+ psb_alloc_scene(pool->dev, pool->w, pool->h);
28456+
28457+ if (!scene)
28458+ return -ENOMEM;
28459+
28460+ scene->flags = PSB_SCENE_FLAG_CLEARED;
28461+ }
28462+
28463+ ret = psb_validate_kernel_buffer(context, scene->hw_data,
28464+ PSB_ENGINE_TA,
28465+ PSB_BO_FLAG_SCENE |
28466+ PSB_GPU_ACCESS_READ |
28467+ PSB_GPU_ACCESS_WRITE, 0);
28468+ if (unlikely(ret != 0))
28469+ return ret;
28470+
28471+ /*
28472+ * FIXME: We need atomic bit manipulation here for the
28473+ * scheduler. For now use the spinlock.
28474+ */
28475+
28476+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28477+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
28478+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28479+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
28480+ mutex_lock(&scene->hw_data->mutex);
28481+
28482+ ret = ttm_bo_wait(scene->hw_data, 0, 1, 0);
28483+ mutex_unlock(&scene->hw_data->mutex);
28484+ if (ret)
28485+ return ret;
28486+
28487+ ret = psb_clear_scene(scene);
28488+
28489+ if (ret)
28490+ return ret;
28491+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28492+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
28493+ }
28494+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28495+
28496+ ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data,
28497+ PSB_ENGINE_TA,
28498+ PSB_BO_FLAG_SCENE |
28499+ PSB_GPU_ACCESS_READ |
28500+ PSB_GPU_ACCESS_WRITE, 0);
28501+ if (unlikely(ret != 0))
28502+ return ret;
28503+
28504+ ret =
28505+ psb_validate_kernel_buffer(context,
28506+ dev_priv->ta_mem->ta_memory,
28507+ PSB_ENGINE_TA,
28508+ PSB_BO_FLAG_SCENE |
28509+ PSB_GPU_ACCESS_READ |
28510+ PSB_GPU_ACCESS_WRITE, 0);
28511+
28512+ if (unlikely(ret != 0))
28513+ return ret;
28514+
28515+ if (unlikely(bin_param_offset !=
28516+ dev_priv->ta_mem->ta_memory->offset ||
28517+ bin_pt_offset !=
28518+ dev_priv->ta_mem->hw_data->offset ||
28519+ dev_priv->force_ta_mem_load)) {
28520+
28521+ struct psb_xhw_buf buf;
28522+
28523+ INIT_LIST_HEAD(&buf.head);
28524+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28525+ PSB_TA_MEM_FLAG_TA |
28526+ PSB_TA_MEM_FLAG_RASTER |
28527+ PSB_TA_MEM_FLAG_HOSTA |
28528+ PSB_TA_MEM_FLAG_HOSTD |
28529+ PSB_TA_MEM_FLAG_INIT,
28530+ dev_priv->ta_mem->ta_memory->
28531+ offset,
28532+ dev_priv->ta_mem->hw_data->
28533+ offset,
28534+ dev_priv->ta_mem->hw_cookie);
28535+ if (ret)
28536+ return ret;
28537+
28538+ dev_priv->force_ta_mem_load = 0;
28539+ }
28540+
28541+ if (final_pass) {
28542+
28543+ /*
28544+ * Clear the scene on next use. Advance the scene counter.
28545+ */
28546+
28547+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28548+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
28549+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28550+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
28551+ }
28552+
28553+ *scene_p = psb_scene_ref(scene);
28554+ return 0;
28555+}
28556+
28557+static void psb_scene_pool_destroy(struct kref *kref)
28558+{
28559+ struct psb_scene_pool *pool =
28560+ container_of(kref, struct psb_scene_pool, kref);
28561+ int i;
28562+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
28563+
28564+ for (i = 0; i < pool->num_scenes; ++i) {
28565+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
28566+ (unsigned long) pool->scenes[i]);
28567+ if (pool->scenes[i])
28568+ psb_scene_unref(&pool->scenes[i]);
28569+ }
28570+
28571+ kfree(pool);
28572+}
28573+
28574+void psb_scene_pool_unref(struct psb_scene_pool **p_pool)
28575+{
28576+ struct psb_scene_pool *pool = *p_pool;
28577+
28578+ PSB_DEBUG_RENDER("Scene pool unref\n");
28579+ *p_pool = NULL;
28580+ kref_put(&pool->kref, &psb_scene_pool_destroy);
28581+}
28582+
28583+struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src)
28584+{
28585+ kref_get(&src->kref);
28586+ return src;
28587+}
28588+
28589+/*
28590+ * Callback for base object manager.
28591+ */
28592+
28593+static void psb_scene_pool_release(struct ttm_base_object **p_base)
28594+{
28595+ struct ttm_base_object *base = *p_base;
28596+ struct psb_scene_pool *pool =
28597+ container_of(base, struct psb_scene_pool, base);
28598+ *p_base = NULL;
28599+
28600+ psb_scene_pool_unref(&pool);
28601+}
28602+
28603+struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv,
28604+ uint32_t handle,
28605+ int check_owner)
28606+{
28607+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
28608+ struct ttm_base_object *base;
28609+ struct psb_scene_pool *pool;
28610+
28611+
28612+ base = ttm_base_object_lookup(tfile, handle);
28613+ if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) {
28614+ DRM_ERROR("Could not find scene pool object 0x%08x\n",
28615+ handle);
28616+ return NULL;
28617+ }
28618+
28619+ if (check_owner && tfile != base->tfile && !base->shareable) {
28620+ ttm_base_object_unref(&base);
28621+ return NULL;
28622+ }
28623+
28624+ pool = container_of(base, struct psb_scene_pool, base);
28625+ kref_get(&pool->kref);
28626+ ttm_base_object_unref(&base);
28627+ return pool;
28628+}
28629+
28630+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv,
28631+ int shareable,
28632+ uint32_t num_scenes,
28633+ uint32_t w, uint32_t h)
28634+{
28635+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
28636+ struct drm_device *dev = file_priv->minor->dev;
28637+ struct psb_scene_pool *pool;
28638+ int ret;
28639+
28640+ PSB_DEBUG_RENDER("Scene pool alloc\n");
28641+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
28642+ if (!pool) {
28643+ DRM_ERROR("Out of memory allocating scene pool object.\n");
28644+ return NULL;
28645+ }
28646+ pool->w = w;
28647+ pool->h = h;
28648+ pool->dev = dev;
28649+ pool->num_scenes = num_scenes;
28650+ kref_init(&pool->kref);
28651+
28652+ /*
28653+ * The base object holds a reference.
28654+ */
28655+
28656+ kref_get(&pool->kref);
28657+ ret = ttm_base_object_init(tfile, &pool->base, shareable,
28658+ PSB_USER_OBJECT_SCENE_POOL,
28659+ &psb_scene_pool_release, NULL);
28660+ if (unlikely(ret != 0))
28661+ goto out_err;
28662+
28663+ return pool;
28664+out_err:
28665+ kfree(pool);
28666+ return NULL;
28667+}
28668+
28669+/*
28670+ * Code to support multiple ta memory buffers.
28671+ */
28672+
28673+static void psb_ta_mem_destroy(struct kref *kref)
28674+{
28675+ struct psb_ta_mem *ta_mem =
28676+ container_of(kref, struct psb_ta_mem, kref);
28677+
28678+ ttm_bo_unref(&ta_mem->hw_data);
28679+ ttm_bo_unref(&ta_mem->ta_memory);
28680+ kfree(ta_mem);
28681+}
28682+
28683+void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem)
28684+{
28685+ struct psb_ta_mem *ta_mem = *p_ta_mem;
28686+ *p_ta_mem = NULL;
28687+ kref_put(&ta_mem->kref, psb_ta_mem_destroy);
28688+}
28689+
28690+struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src)
28691+{
28692+ kref_get(&src->kref);
28693+ return src;
28694+}
28695+
28696+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
28697+{
28698+ struct drm_psb_private *dev_priv =
28699+ (struct drm_psb_private *) dev->dev_private;
28700+ struct ttm_bo_device *bdev = &dev_priv->bdev;
28701+ int ret = -EINVAL;
28702+ struct psb_ta_mem *ta_mem;
28703+ uint32_t bo_size;
28704+ uint32_t ta_min_size;
28705+ struct psb_xhw_buf buf;
28706+
28707+ INIT_LIST_HEAD(&buf.head);
28708+
28709+ ta_mem = kzalloc(sizeof(*ta_mem), GFP_KERNEL);
28710+
28711+ if (!ta_mem) {
28712+ DRM_ERROR("Out of memory allocating parameter memory.\n");
28713+ return NULL;
28714+ }
28715+
28716+ kref_init(&ta_mem->kref);
28717+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
28718+ ta_mem->hw_cookie,
28719+ &bo_size,
28720+ &ta_min_size);
28721+ if (ret == -ENOMEM) {
28722+ DRM_ERROR("Parameter memory size is too small.\n");
28723+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
28724+ (unsigned int) (pages * (PAGE_SIZE / 1024)));
28725+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
28726+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
28727+ (unsigned int)(ta_min_size / 1024));
28728+ DRM_INFO("\"ta_mem_size\" parameter!\n");
28729+ }
28730+ if (ret)
28731+ goto out_err0;
28732+
28733+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
28734+ DRM_PSB_FLAG_MEM_MMU,
28735+ 0, 0, 0, NULL,
28736+ &ta_mem->hw_data);
28737+ if (ret)
28738+ goto out_err0;
28739+
28740+ bo_size = pages * PAGE_SIZE;
28741+ ret =
28742+ ttm_buffer_object_create(bdev, bo_size,
28743+ ttm_bo_type_kernel,
28744+ DRM_PSB_FLAG_MEM_RASTGEOM,
28745+ 0,
28746+ 1024 * 1024 >> PAGE_SHIFT, 0,
28747+ NULL,
28748+ &ta_mem->ta_memory);
28749+ if (ret)
28750+ goto out_err1;
28751+
28752+ return ta_mem;
28753+out_err1:
28754+ ttm_bo_unref(&ta_mem->hw_data);
28755+out_err0:
28756+ kfree(ta_mem);
28757+ return NULL;
28758+}
28759+
28760+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
28761+ void *data, struct drm_file *file_priv)
28762+{
28763+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
28764+ struct drm_psb_scene *scene = (struct drm_psb_scene *) data;
28765+ int ret = 0;
28766+ struct drm_psb_private *dev_priv = psb_priv(dev);
28767+ if (!scene->handle_valid)
28768+ return 0;
28769+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
28770+
28771+ ret =
28772+ ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE);
28773+ if (unlikely(ret != 0))
28774+ DRM_ERROR("Could not unreference a scene object.\n");
28775+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
28776+ if (drm_psb_ospm && IS_MRST(dev))
28777+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
28778+ return ret;
28779+}
28780diff --git a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h
28781new file mode 100644
28782index 0000000..2a4f8bc
28783--- /dev/null
28784+++ b/drivers/gpu/drm/psb/psb_scene.h
28785@@ -0,0 +1,119 @@
28786+/**************************************************************************
28787+ * Copyright (c) 2007, Intel Corporation.
28788+ * All Rights Reserved.
28789+ *
28790+ * This program is free software; you can redistribute it and/or modify it
28791+ * under the terms and conditions of the GNU General Public License,
28792+ * version 2, as published by the Free Software Foundation.
28793+ *
28794+ * This program is distributed in the hope it will be useful, but WITHOUT
28795+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28796+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28797+ * more details.
28798+ *
28799+ * You should have received a copy of the GNU General Public License along with
28800+ * this program; if not, write to the Free Software Foundation, Inc.,
28801+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28802+ *
28803+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28804+ * develop this driver.
28805+ *
28806+ **************************************************************************/
28807+/*
28808+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28809+ */
28810+
28811+#ifndef _PSB_SCENE_H_
28812+#define _PSB_SCENE_H_
28813+
28814+#include "ttm/ttm_object.h"
28815+
28816+#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0
28817+#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1
28818+#define PSB_MAX_NUM_SCENES 8
28819+
28820+struct psb_hw_scene;
28821+struct psb_hw_ta_mem;
28822+
28823+struct psb_scene_pool {
28824+ struct ttm_base_object base;
28825+ struct drm_device *dev;
28826+ struct kref kref;
28827+ uint32_t w;
28828+ uint32_t h;
28829+ uint32_t cur_scene;
28830+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
28831+ uint32_t num_scenes;
28832+};
28833+
28834+struct psb_scene {
28835+ struct drm_device *dev;
28836+ struct kref kref;
28837+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
28838+ uint32_t bo_size;
28839+ uint32_t w;
28840+ uint32_t h;
28841+ struct psb_ta_mem *ta_mem;
28842+ struct psb_hw_scene *hw_scene;
28843+ struct ttm_buffer_object *hw_data;
28844+ uint32_t flags;
28845+ uint32_t clear_p_start;
28846+ uint32_t clear_num_pages;
28847+};
28848+
28849+#if 0
28850+struct psb_scene_entry {
28851+ struct list_head head;
28852+ struct psb_scene *scene;
28853+};
28854+
28855+struct psb_user_scene {
28856+ struct ttm_base_object base;
28857+ struct drm_device *dev;
28858+};
28859+
28860+#endif
28861+
28862+struct psb_ta_mem {
28863+ struct ttm_base_object base;
28864+ struct drm_device *dev;
28865+ struct kref kref;
28866+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
28867+ uint32_t bo_size;
28868+ struct ttm_buffer_object *ta_memory;
28869+ struct ttm_buffer_object *hw_data;
28870+ int is_deallocating;
28871+ int deallocating_scheduled;
28872+};
28873+
28874+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
28875+ int shareable,
28876+ uint32_t num_scenes,
28877+ uint32_t w, uint32_t h);
28878+extern void psb_scene_pool_unref(struct psb_scene_pool **pool);
28879+extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file
28880+ *priv,
28881+ uint32_t handle,
28882+ int check_owner);
28883+extern int psb_validate_scene_pool(struct psb_context *context,
28884+ struct psb_scene_pool *pool,
28885+ uint32_t w,
28886+ uint32_t h, int final_pass,
28887+ struct psb_scene **scene_p);
28888+extern void psb_scene_unref(struct psb_scene **scene);
28889+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
28890+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
28891+ void *data,
28892+ struct drm_file *file_priv);
28893+
28894+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
28895+{
28896+ return pool->base.hash.key;
28897+}
28898+
28899+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
28900+ uint32_t pages);
28901+extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src);
28902+extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem);
28903+
28904+#endif
28905diff --git a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c
28906new file mode 100644
28907index 0000000..9c4e2cd
28908--- /dev/null
28909+++ b/drivers/gpu/drm/psb/psb_schedule.c
28910@@ -0,0 +1,1593 @@
28911+/**************************************************************************
28912+ * Copyright (c) 2007, Intel Corporation.
28913+ * All Rights Reserved.
28914+ *
28915+ * This program is free software; you can redistribute it and/or modify it
28916+ * under the terms and conditions of the GNU General Public License,
28917+ * version 2, as published by the Free Software Foundation.
28918+ *
28919+ * This program is distributed in the hope it will be useful, but WITHOUT
28920+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28921+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28922+ * more details.
28923+ *
28924+ * You should have received a copy of the GNU General Public License along with
28925+ * this program; if not, write to the Free Software Foundation, Inc.,
28926+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28927+ *
28928+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28929+ * develop this driver.
28930+ *
28931+ **************************************************************************/
28932+/*
28933+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28934+ */
28935+
28936+#include <drm/drmP.h>
28937+#include "psb_drm.h"
28938+#include "psb_drv.h"
28939+#include "psb_reg.h"
28940+#include "psb_scene.h"
28941+#include "ttm/ttm_execbuf_util.h"
28942+
28943+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
28944+#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 20)
28945+#define PSB_RASTER_TIMEOUT (DRM_HZ / 10)
28946+#define PSB_TA_TIMEOUT (DRM_HZ / 10)
28947+
28948+#undef PSB_SOFTWARE_WORKAHEAD
28949+
28950+#ifdef PSB_STABLE_SETTING
28951+
28952+/*
28953+ * Software blocks completely while the engines are working so there can be no
28954+ * overlap.
28955+ */
28956+
28957+#define PSB_WAIT_FOR_RASTER_COMPLETION
28958+#define PSB_WAIT_FOR_TA_COMPLETION
28959+
28960+#elif defined(PSB_PARANOID_SETTING)
28961+/*
28962+ * Software blocks "almost" while the engines are working so there can be no
28963+ * overlap.
28964+ */
28965+
28966+#define PSB_WAIT_FOR_RASTER_COMPLETION
28967+#define PSB_WAIT_FOR_TA_COMPLETION
28968+#define PSB_BE_PARANOID
28969+
28970+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
28971+/*
28972+ * Software leaps ahead while the rasterizer is running and prepares
28973+ * a new ta job that can be scheduled before the rasterizer has
28974+ * finished.
28975+ */
28976+
28977+#define PSB_WAIT_FOR_TA_COMPLETION
28978+
28979+#elif defined(PSB_SOFTWARE_WORKAHEAD)
28980+/*
28981+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
28982+ * But block overlapping in the scheduler.
28983+ */
28984+
28985+#define PSB_BLOCK_OVERLAP
28986+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
28987+
28988+#endif
28989+
28990+/*
28991+ * Avoid pixelbe pagefaults on C0.
28992+ */
28993+#if 0
28994+#define PSB_BLOCK_OVERLAP
28995+#endif
28996+
28997+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
28998+ struct psb_scheduler *scheduler,
28999+ uint32_t reply_flag);
29000+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29001+ struct psb_scheduler *scheduler,
29002+ uint32_t reply_flag);
29003+
29004+#ifdef FIX_TG_16
29005+
29006+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
29007+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
29008+
29009+#endif
29010+
29011+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
29012+ int *lockup, int *idle)
29013+{
29014+ unsigned long irq_flags;
29015+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29016+
29017+ *lockup = 0;
29018+ *idle = 1;
29019+
29020+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29021+
29022+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
29023+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
29024+ *lockup = 1;
29025+ }
29026+ if (!*lockup
29027+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29028+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
29029+ *lockup = 1;
29030+ }
29031+ if (!*lockup)
29032+ *idle = scheduler->idle;
29033+
29034+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29035+}
29036+
29037+static inline void psb_set_idle(struct psb_scheduler *scheduler)
29038+{
29039+ scheduler->idle =
29040+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
29041+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
29042+ if (scheduler->idle)
29043+ wake_up(&scheduler->idle_queue);
29044+}
29045+
29046+/*
29047+ * Call with the scheduler spinlock held.
29048+ * Assigns a scene context to either the ta or the rasterizer,
29049+ * flushing out other scenes to memory if necessary.
29050+ */
29051+
29052+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
29053+ struct psb_scene *scene,
29054+ int engine, struct psb_task *task)
29055+{
29056+ uint32_t flags = 0;
29057+ struct psb_hw_scene *hw_scene;
29058+ struct drm_device *dev = scene->dev;
29059+ struct drm_psb_private *dev_priv =
29060+ (struct drm_psb_private *) dev->dev_private;
29061+
29062+ hw_scene = scene->hw_scene;
29063+ if (hw_scene && hw_scene->last_scene == scene) {
29064+
29065+ /*
29066+ * Reuse the last hw scene context and delete it from the
29067+ * free list.
29068+ */
29069+
29070+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
29071+ hw_scene->context_number);
29072+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
29073+
29074+ /*
29075+ * No hw context initialization to be done.
29076+ */
29077+
29078+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
29079+ }
29080+
29081+ list_del_init(&hw_scene->head);
29082+
29083+ } else {
29084+ struct list_head *list;
29085+ hw_scene = NULL;
29086+
29087+ /*
29088+ * Grab a new hw scene context.
29089+ */
29090+
29091+ list_for_each(list, &scheduler->hw_scenes) {
29092+ hw_scene =
29093+ list_entry(list, struct psb_hw_scene, head);
29094+ break;
29095+ }
29096+ BUG_ON(!hw_scene);
29097+ PSB_DEBUG_RENDER("New hw scene %d.\n",
29098+ hw_scene->context_number);
29099+
29100+ list_del_init(list);
29101+ }
29102+ scene->hw_scene = hw_scene;
29103+ hw_scene->last_scene = scene;
29104+
29105+ flags |= PSB_SCENE_FLAG_SETUP;
29106+
29107+ /*
29108+ * Switch context and setup the engine.
29109+ */
29110+
29111+ return psb_xhw_scene_bind_fire(dev_priv,
29112+ &task->buf,
29113+ task->flags,
29114+ hw_scene->context_number,
29115+ scene->hw_cookie,
29116+ task->oom_cmds,
29117+ task->oom_cmd_size,
29118+ scene->hw_data->offset,
29119+ engine, flags | scene->flags);
29120+}
29121+
29122+static inline void psb_report_fence(struct drm_psb_private *dev_priv,
29123+ struct psb_scheduler *scheduler,
29124+ uint32_t class,
29125+ uint32_t sequence,
29126+ uint32_t type, int call_handler)
29127+{
29128+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
29129+ struct ttm_fence_device *fdev = &dev_priv->fdev;
29130+ struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA];
29131+ unsigned long irq_flags;
29132+
29133+ /**
29134+ * Block racing poll_ta calls, that take the lock in write mode.
29135+ */
29136+
29137+ read_lock_irqsave(&fc->lock, irq_flags);
29138+ seq->sequence = sequence;
29139+ seq->reported = 0;
29140+ read_unlock_irqrestore(&fc->lock, irq_flags);
29141+
29142+ if (call_handler)
29143+ psb_fence_handler(scheduler->dev, class);
29144+}
29145+
29146+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29147+ struct psb_scheduler *scheduler);
29148+
29149+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
29150+ struct psb_scheduler *scheduler)
29151+{
29152+ struct psb_task *task = NULL;
29153+ struct list_head *list, *next;
29154+ int pushed_raster_task = 0;
29155+
29156+ PSB_DEBUG_RENDER("schedule ta\n");
29157+
29158+ if (scheduler->idle_count != 0)
29159+ return;
29160+
29161+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
29162+ return;
29163+
29164+ if (scheduler->ta_state)
29165+ return;
29166+
29167+ /*
29168+ * Skip the ta stage for rasterization-only
29169+ * tasks. They arrive here to make sure we're rasterizing
29170+ * tasks in the correct order.
29171+ */
29172+
29173+ list_for_each_safe(list, next, &scheduler->ta_queue) {
29174+ task = list_entry(list, struct psb_task, head);
29175+ if (task->task_type != psb_raster_task && task->task_type != psb_flip_task)
29176+ break;
29177+
29178+ if (task->task_type == psb_flip_task) {
29179+ list_del_init(list);
29180+ list_add_tail(list, &scheduler->raster_queue);
29181+ task = NULL;
29182+ }
29183+ else {
29184+ list_del_init(list);
29185+ list_add_tail(list, &scheduler->raster_queue);
29186+ psb_report_fence(dev_priv, scheduler, task->engine,
29187+ task->sequence,
29188+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29189+ task = NULL;
29190+ pushed_raster_task = 1;
29191+ }
29192+ }
29193+
29194+ if (pushed_raster_task)
29195+ psb_schedule_raster(dev_priv, scheduler);
29196+
29197+ if (!task)
29198+ return;
29199+
29200+ /*
29201+ * Still waiting for a vistest?
29202+ */
29203+
29204+ if (scheduler->feedback_task == task)
29205+ return;
29206+
29207+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
29208+
29209+ /*
29210+ * Block ta from trying to use both hardware contexts
29211+ * without the rasterizer starting to render from one of them.
29212+ */
29213+
29214+ if (!list_empty(&scheduler->raster_queue))
29215+ return;
29216+
29217+#endif
29218+
29219+#ifdef PSB_BLOCK_OVERLAP
29220+ /*
29221+ * Make sure rasterizer isn't doing anything.
29222+ */
29223+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29224+ return;
29225+#endif
29226+ if (list_empty(&scheduler->hw_scenes))
29227+ return;
29228+
29229+#ifdef FIX_TG_16
29230+ if (psb_check_2d_idle(dev_priv))
29231+ return;
29232+#endif
29233+
29234+ list_del_init(&task->head);
29235+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
29236+ scheduler->ta_state = 1;
29237+
29238+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
29239+ scheduler->idle = 0;
29240+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
29241+ scheduler->total_ta_jiffies = 0;
29242+
29243+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29244+ 0x00000000 : PSB_RF_FIRE_TA;
29245+
29246+ (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
29247+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA,
29248+ task);
29249+ psb_schedule_watchdog(dev_priv);
29250+}
29251+
29252+static int psb_fire_raster(struct psb_scheduler *scheduler,
29253+ struct psb_task *task)
29254+{
29255+ struct drm_device *dev = scheduler->dev;
29256+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
29257+ dev->dev_private;
29258+
29259+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
29260+
29261+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
29262+}
29263+
29264+/*
29265+ * Take the first rasterization task from the hp raster queue or from the
29266+ * raster queue and fire the rasterizer.
29267+ */
29268+
29269+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29270+ struct psb_scheduler *scheduler)
29271+{
29272+ struct psb_task *task;
29273+ struct list_head *list;
29274+ int pipe;
29275+
29276+ if (scheduler->idle_count != 0)
29277+ return;
29278+
29279+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
29280+ PSB_DEBUG_RENDER("Raster busy.\n");
29281+ return;
29282+ }
29283+#ifdef PSB_BLOCK_OVERLAP
29284+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
29285+ PSB_DEBUG_RENDER("TA busy.\n");
29286+ return;
29287+ }
29288+#endif
29289+
29290+ if (!list_empty(&scheduler->hp_raster_queue))
29291+ list = scheduler->hp_raster_queue.next;
29292+ else if (!list_empty(&scheduler->raster_queue))
29293+ list = scheduler->raster_queue.next;
29294+ else {
29295+ PSB_DEBUG_RENDER("Nothing in list\n");
29296+ return;
29297+ }
29298+
29299+ task = list_entry(list, struct psb_task, head);
29300+
29301+ if (task->task_type == psb_flip_task) {
29302+ for (pipe=0; pipe<2; pipe++) {
29303+ if (dev_priv->pipe_active[pipe] == 1)
29304+ psb_flip_set_base(dev_priv, pipe);
29305+ }
29306+ list_del_init(list);
29307+ task = NULL;
29308+ psb_schedule_raster(dev_priv, scheduler);
29309+ return;
29310+ }
29311+
29312+ /*
29313+ * Sometimes changing ZLS format requires an ISP reset.
29314+ * Doesn't seem to consume too much time.
29315+ */
29316+
29317+ if (task->scene)
29318+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
29319+
29320+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
29321+
29322+ list_del_init(list);
29323+ scheduler->idle = 0;
29324+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
29325+ scheduler->total_raster_jiffies = 0;
29326+
29327+ if (task->scene)
29328+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
29329+
29330+ (void) psb_reg_submit(dev_priv, task->raster_cmds,
29331+ task->raster_cmd_size);
29332+
29333+ if (task->scene) {
29334+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29335+ 0x00000000 : PSB_RF_FIRE_RASTER;
29336+ psb_set_scene_fire(scheduler,
29337+ task->scene, PSB_SCENE_ENGINE_RASTER,
29338+ task);
29339+ } else {
29340+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
29341+ psb_fire_raster(scheduler, task);
29342+ }
29343+ psb_schedule_watchdog(dev_priv);
29344+}
29345+
29346+int psb_extend_timeout(struct drm_psb_private *dev_priv,
29347+ uint32_t xhw_lockup)
29348+{
29349+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29350+ unsigned long irq_flags;
29351+ int ret = -EBUSY;
29352+
29353+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29354+
29355+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
29356+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
29357+ if (xhw_lockup & PSB_LOCKUP_TA) {
29358+ goto out_unlock;
29359+ } else {
29360+ scheduler->total_ta_jiffies +=
29361+ jiffies - scheduler->ta_end_jiffies +
29362+ PSB_TA_TIMEOUT;
29363+ if (scheduler->total_ta_jiffies >
29364+ PSB_ALLOWED_TA_RUNTIME)
29365+ goto out_unlock;
29366+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
29367+ }
29368+ }
29369+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL &&
29370+ time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
29371+ if (xhw_lockup & PSB_LOCKUP_RASTER) {
29372+ goto out_unlock;
29373+ } else {
29374+ scheduler->total_raster_jiffies +=
29375+ jiffies - scheduler->raster_end_jiffies +
29376+ PSB_RASTER_TIMEOUT;
29377+ if (scheduler->total_raster_jiffies >
29378+ PSB_ALLOWED_RASTER_RUNTIME)
29379+ goto out_unlock;
29380+ scheduler->raster_end_jiffies =
29381+ jiffies + PSB_RASTER_TIMEOUT;
29382+ }
29383+ }
29384+
29385+ ret = 0;
29386+
29387+out_unlock:
29388+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29389+ return ret;
29390+}
29391+
29392+/*
29393+ * TA done handler.
29394+ */
29395+
29396+static void psb_ta_done(struct drm_psb_private *dev_priv,
29397+ struct psb_scheduler *scheduler)
29398+{
29399+ struct psb_task *task =
29400+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29401+ struct psb_scene *scene = task->scene;
29402+
29403+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
29404+
29405+ switch (task->ta_complete_action) {
29406+ case PSB_RASTER_BLOCK:
29407+ scheduler->ta_state = 1;
29408+ scene->flags |=
29409+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29410+ list_add_tail(&task->head, &scheduler->raster_queue);
29411+ break;
29412+ case PSB_RASTER:
29413+ scene->flags |=
29414+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29415+ list_add_tail(&task->head, &scheduler->raster_queue);
29416+ break;
29417+ case PSB_RETURN:
29418+ scheduler->ta_state = 0;
29419+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
29420+ list_add_tail(&scene->hw_scene->head,
29421+ &scheduler->hw_scenes);
29422+
29423+ break;
29424+ }
29425+
29426+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
29427+
29428+#ifdef FIX_TG_16
29429+ psb_2d_atomic_unlock(dev_priv);
29430+#endif
29431+
29432+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
29433+ psb_report_fence(dev_priv, scheduler, task->engine,
29434+ task->sequence,
29435+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29436+
29437+ psb_schedule_raster(dev_priv, scheduler);
29438+ psb_schedule_ta(dev_priv, scheduler);
29439+ psb_set_idle(scheduler);
29440+
29441+ if (task->ta_complete_action != PSB_RETURN)
29442+ return;
29443+
29444+ list_add_tail(&task->head, &scheduler->task_done_queue);
29445+ schedule_delayed_work(&scheduler->wq, 0);
29446+}
29447+
29448+/*
29449+ * Rasterizer done handler.
29450+ */
29451+
29452+static void psb_raster_done(struct drm_psb_private *dev_priv,
29453+ struct psb_scheduler *scheduler)
29454+{
29455+ struct psb_task *task =
29456+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29457+ struct psb_scene *scene = task->scene;
29458+ uint32_t complete_action = task->raster_complete_action;
29459+
29460+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
29461+
29462+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
29463+
29464+ if (complete_action != PSB_RASTER)
29465+ psb_schedule_raster(dev_priv, scheduler);
29466+
29467+ if (scene) {
29468+ if (task->feedback.page) {
29469+ if (unlikely(scheduler->feedback_task)) {
29470+ /*
29471+ * This should never happen, since the previous
29472+ * feedback query will return before the next
29473+ * raster task is fired.
29474+ */
29475+ DRM_ERROR("Feedback task busy.\n");
29476+ }
29477+ scheduler->feedback_task = task;
29478+ psb_xhw_vistest(dev_priv, &task->buf);
29479+ }
29480+ switch (complete_action) {
29481+ case PSB_RETURN:
29482+ scene->flags &=
29483+ ~(PSB_SCENE_FLAG_DIRTY |
29484+ PSB_SCENE_FLAG_COMPLETE);
29485+ list_add_tail(&scene->hw_scene->head,
29486+ &scheduler->hw_scenes);
29487+ psb_report_fence(dev_priv, scheduler, task->engine,
29488+ task->sequence,
29489+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
29490+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
29491+ scheduler->ta_state = 0;
29492+
29493+ break;
29494+ case PSB_RASTER:
29495+ list_add(&task->head, &scheduler->raster_queue);
29496+ task->raster_complete_action = PSB_RETURN;
29497+ psb_schedule_raster(dev_priv, scheduler);
29498+ break;
29499+ case PSB_TA:
29500+ list_add(&task->head, &scheduler->ta_queue);
29501+ scheduler->ta_state = 0;
29502+ task->raster_complete_action = PSB_RETURN;
29503+ task->ta_complete_action = PSB_RASTER;
29504+ break;
29505+
29506+ }
29507+ }
29508+ psb_schedule_ta(dev_priv, scheduler);
29509+ psb_set_idle(scheduler);
29510+
29511+ if (complete_action == PSB_RETURN) {
29512+ if (task->scene == NULL) {
29513+ psb_report_fence(dev_priv, scheduler, task->engine,
29514+ task->sequence,
29515+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29516+ }
29517+ if (!task->feedback.page) {
29518+ list_add_tail(&task->head,
29519+ &scheduler->task_done_queue);
29520+ schedule_delayed_work(&scheduler->wq, 0);
29521+ }
29522+ }
29523+}
29524+
29525+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
29526+{
29527+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29528+ unsigned long irq_flags;
29529+
29530+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29531+ scheduler->idle_count++;
29532+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29533+}
29534+
29535+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
29536+{
29537+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29538+ unsigned long irq_flags;
29539+
29540+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29541+ if (--scheduler->idle_count == 0) {
29542+ psb_schedule_ta(dev_priv, scheduler);
29543+ psb_schedule_raster(dev_priv, scheduler);
29544+ }
29545+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29546+}
29547+
29548+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
29549+{
29550+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29551+ unsigned long irq_flags;
29552+ int ret;
29553+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29554+ ret = scheduler->idle_count != 0 && scheduler->idle;
29555+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29556+ return ret;
29557+}
29558+
29559+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
29560+{
29561+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29562+ unsigned long irq_flags;
29563+ int ret;
29564+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29565+ ret = (scheduler->idle &&
29566+ list_empty(&scheduler->raster_queue) &&
29567+ list_empty(&scheduler->ta_queue) &&
29568+ list_empty(&scheduler->hp_raster_queue));
29569+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29570+ return ret;
29571+}
29572+
29573+static void psb_ta_oom(struct drm_psb_private *dev_priv,
29574+ struct psb_scheduler *scheduler)
29575+{
29576+
29577+ struct psb_task *task =
29578+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29579+ if (!task)
29580+ return;
29581+
29582+ if (task->aborting)
29583+ return;
29584+ task->aborting = 1;
29585+
29586+ DRM_INFO("Info: TA out of parameter memory.\n");
29587+
29588+ (void) psb_xhw_ta_oom(dev_priv, &task->buf,
29589+ task->scene->hw_cookie);
29590+}
29591+
29592+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
29593+ struct psb_scheduler *scheduler)
29594+{
29595+
29596+ struct psb_task *task =
29597+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29598+ uint32_t flags;
29599+ if (!task)
29600+ return;
29601+
29602+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
29603+ task->scene->hw_cookie,
29604+ &task->ta_complete_action,
29605+ &task->raster_complete_action, &flags);
29606+ task->flags |= flags;
29607+ task->aborting = 0;
29608+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
29609+}
29610+
29611+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
29612+ struct psb_scheduler *scheduler)
29613+{
29614+ DRM_ERROR("TA hw scene freed.\n");
29615+}
29616+
29617+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
29618+ struct psb_scheduler *scheduler)
29619+{
29620+ struct psb_task *task = scheduler->feedback_task;
29621+ uint8_t *feedback_map;
29622+ uint32_t add;
29623+ uint32_t cur;
29624+ struct drm_psb_vistest *vistest;
29625+ int i;
29626+
29627+ scheduler->feedback_task = NULL;
29628+ if (!task) {
29629+ DRM_ERROR("No Poulsbo feedback task.\n");
29630+ return;
29631+ }
29632+ if (!task->feedback.page) {
29633+ DRM_ERROR("No Poulsbo feedback page.\n");
29634+ goto out;
29635+ }
29636+
29637+ if (in_irq())
29638+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
29639+ else
29640+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
29641+
29642+ /*
29643+ * Loop over all requested vistest components here.
29644+ * Only one (vistest) currently.
29645+ */
29646+
29647+ vistest = (struct drm_psb_vistest *)
29648+ (feedback_map + task->feedback.offset);
29649+
29650+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
29651+ add = task->buf.arg.arg.feedback[i];
29652+ cur = vistest->vt[i];
29653+
29654+ /*
29655+ * Vistest saturates.
29656+ */
29657+
29658+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
29659+ }
29660+ if (in_irq())
29661+ kunmap_atomic(feedback_map, KM_IRQ0);
29662+ else
29663+ kunmap_atomic(feedback_map, KM_USER0);
29664+out:
29665+ psb_report_fence(dev_priv, scheduler, task->engine, task->sequence,
29666+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
29667+
29668+ if (list_empty(&task->head)) {
29669+ list_add_tail(&task->head, &scheduler->task_done_queue);
29670+ schedule_delayed_work(&scheduler->wq, 0);
29671+ } else
29672+ psb_schedule_ta(dev_priv, scheduler);
29673+}
29674+
29675+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
29676+ struct psb_scheduler *scheduler)
29677+{
29678+ struct psb_task *task =
29679+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29680+
29681+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29682+
29683+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
29684+}
29685+
29686+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
29687+ struct psb_scheduler *scheduler)
29688+{
29689+ struct psb_task *task =
29690+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29691+ uint32_t reply_flags;
29692+
29693+ if (!task) {
29694+ DRM_ERROR("Null task.\n");
29695+ return;
29696+ }
29697+
29698+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
29699+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29700+
29701+ reply_flags = PSB_RF_FIRE_RASTER;
29702+ if (task->raster_complete_action == PSB_RASTER)
29703+ reply_flags |= PSB_RF_DEALLOC;
29704+
29705+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
29706+}
29707+
29708+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
29709+ struct psb_scheduler *scheduler)
29710+{
29711+ uint32_t type;
29712+ int ret;
29713+ unsigned long irq_flags;
29714+
29715+ /*
29716+ * Xhw cannot write directly to the comm page, so
29717+ * do it here. Firmware would have written directly.
29718+ */
29719+
29720+ ret = psb_xhw_handler(dev_priv);
29721+ if (unlikely(ret))
29722+ return ret;
29723+
29724+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
29725+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
29726+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
29727+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
29728+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
29729+ DRM_ERROR("Lost Poulsbo hardware event.\n");
29730+ }
29731+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
29732+
29733+ if (type == 0)
29734+ return 0;
29735+
29736+ switch (type) {
29737+ case PSB_UIRQ_VISTEST:
29738+ psb_vistest_reply(dev_priv, scheduler);
29739+ break;
29740+ case PSB_UIRQ_OOM_REPLY:
29741+ psb_ta_oom_reply(dev_priv, scheduler);
29742+ break;
29743+ case PSB_UIRQ_FIRE_TA_REPLY:
29744+ psb_ta_fire_reply(dev_priv, scheduler);
29745+ break;
29746+ case PSB_UIRQ_FIRE_RASTER_REPLY:
29747+ psb_raster_fire_reply(dev_priv, scheduler);
29748+ break;
29749+ default:
29750+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
29751+ }
29752+ return 0;
29753+}
29754+
29755+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
29756+{
29757+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29758+ unsigned long irq_flags;
29759+ int ret;
29760+
29761+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29762+ ret = psb_user_interrupt(dev_priv, scheduler);
29763+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29764+ return ret;
29765+}
29766+
29767+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
29768+ struct psb_scheduler *scheduler,
29769+ uint32_t reply_flag)
29770+{
29771+ struct psb_task *task =
29772+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29773+ uint32_t flags;
29774+ uint32_t mask;
29775+
29776+ if (unlikely(!task))
29777+ return;
29778+
29779+ task->reply_flags |= reply_flag;
29780+ flags = task->reply_flags;
29781+ mask = PSB_RF_FIRE_TA;
29782+
29783+ if (!(flags & mask))
29784+ return;
29785+
29786+ mask = PSB_RF_TA_DONE;
29787+ if ((flags & mask) == mask) {
29788+ task->reply_flags &= ~mask;
29789+ psb_ta_done(dev_priv, scheduler);
29790+ }
29791+
29792+ mask = PSB_RF_OOM;
29793+ if ((flags & mask) == mask) {
29794+ task->reply_flags &= ~mask;
29795+ psb_ta_oom(dev_priv, scheduler);
29796+ }
29797+
29798+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
29799+ if ((flags & mask) == mask) {
29800+ task->reply_flags &= ~mask;
29801+ psb_ta_done(dev_priv, scheduler);
29802+ }
29803+}
29804+
29805+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29806+ struct psb_scheduler *scheduler,
29807+ uint32_t reply_flag)
29808+{
29809+ struct psb_task *task =
29810+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29811+ uint32_t flags;
29812+ uint32_t mask;
29813+
29814+ if (unlikely(!task))
29815+ return;
29816+
29817+ task->reply_flags |= reply_flag;
29818+ flags = task->reply_flags;
29819+ mask = PSB_RF_FIRE_RASTER;
29820+
29821+ if (!(flags & mask))
29822+ return;
29823+
29824+ /*
29825+ * For rasterizer-only tasks, don't report fence done here,
29826+ * as this is time consuming and the rasterizer wants a new
29827+ * task immediately. For other tasks, the hardware is probably
29828+ * still busy deallocating TA memory, so we can report
29829+ * fence done in parallel.
29830+ */
29831+
29832+ if (task->raster_complete_action == PSB_RETURN &&
29833+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
29834+ psb_report_fence(dev_priv, scheduler, task->engine,
29835+ task->sequence,
29836+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29837+ }
29838+
29839+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
29840+ if ((flags & mask) == mask) {
29841+ task->reply_flags &= ~mask;
29842+ psb_raster_done(dev_priv, scheduler);
29843+ }
29844+}
29845+
29846+void psb_scheduler_handler(struct drm_psb_private *dev_priv,
29847+ uint32_t status)
29848+{
29849+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29850+
29851+ spin_lock(&scheduler->lock);
29852+
29853+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
29854+ psb_dispatch_raster(dev_priv, scheduler,
29855+ PSB_RF_RASTER_DONE);
29856+ }
29857+ if (status & _PSB_CE_DPM_3D_MEM_FREE)
29858+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
29859+
29860+ if (status & _PSB_CE_TA_FINISHED)
29861+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
29862+
29863+ if (status & _PSB_CE_TA_TERMINATE)
29864+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
29865+
29866+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
29867+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
29868+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
29869+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
29870+ }
29871+ if (status & _PSB_CE_DPM_TA_MEM_FREE)
29872+ psb_ta_hw_scene_freed(dev_priv, scheduler);
29873+
29874+ if (status & _PSB_CE_SW_EVENT)
29875+ psb_user_interrupt(dev_priv, scheduler);
29876+
29877+ spin_unlock(&scheduler->lock);
29878+}
29879+
29880+static void psb_free_task_wq(struct work_struct *work)
29881+{
29882+ struct psb_scheduler *scheduler =
29883+ container_of(work, struct psb_scheduler, wq.work);
29884+
29885+ struct list_head *list, *next;
29886+ unsigned long irq_flags;
29887+ struct psb_task *task;
29888+
29889+ if (!mutex_trylock(&scheduler->task_wq_mutex))
29890+ return;
29891+
29892+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29893+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
29894+ task = list_entry(list, struct psb_task, head);
29895+ list_del_init(list);
29896+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29897+
29898+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
29899+ "Feedback bo 0x%08lx, done %d\n",
29900+ task->sequence,
29901+ (unsigned long) task->scene,
29902+ (unsigned long) task->feedback.bo,
29903+ atomic_read(&task->buf.done));
29904+
29905+ if (task->scene) {
29906+ PSB_DEBUG_RENDER("Unref scene %d\n",
29907+ task->sequence);
29908+ psb_scene_unref(&task->scene);
29909+ if (task->feedback.bo) {
29910+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
29911+ task->sequence);
29912+ ttm_bo_unref(&task->feedback.bo);
29913+ }
29914+ }
29915+
29916+ if (atomic_read(&task->buf.done)) {
29917+ PSB_DEBUG_RENDER("Deleting task %d\n",
29918+ task->sequence);
29919+ kfree(task);
29920+ task = NULL;
29921+ }
29922+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29923+ if (task != NULL)
29924+ list_add(list, &scheduler->task_done_queue);
29925+ }
29926+ if (!list_empty(&scheduler->task_done_queue)) {
29927+ PSB_DEBUG_RENDER("Rescheduling wq\n");
29928+ schedule_delayed_work(&scheduler->wq, 1);
29929+ }
29930+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29931+ if (list_empty(&scheduler->task_done_queue) &&
29932+ drm_psb_ospm && IS_MRST(scheduler->dev)) {
29933+ psb_try_power_down_sgx(scheduler->dev);
29934+ }
29935+ mutex_unlock(&scheduler->task_wq_mutex);
29936+}
29937+
29938+static void psb_powerdown_topaz(struct work_struct *work)
29939+{
29940+ struct psb_scheduler *scheduler =
29941+ container_of(work, struct psb_scheduler, topaz_suspend_wq.work);
29942+
29943+ if (!mutex_trylock(&scheduler->topaz_power_mutex))
29944+ return;
29945+
29946+ psb_try_power_down_topaz(scheduler->dev);
29947+ mutex_unlock(&scheduler->topaz_power_mutex);
29948+}
29949+
29950+static void psb_powerdown_msvdx(struct work_struct *work)
29951+{
29952+ struct psb_scheduler *scheduler =
29953+ container_of(work, struct psb_scheduler, msvdx_suspend_wq.work);
29954+
29955+ if (!mutex_trylock(&scheduler->msvdx_power_mutex))
29956+ return;
29957+
29958+ psb_try_power_down_msvdx(scheduler->dev);
29959+ mutex_unlock(&scheduler->msvdx_power_mutex);
29960+}
29961+
29962+/*
29963+ * Check if any of the tasks in the queues is using a scene.
29964+ * In that case we know the TA memory buffer objects are
29965+ * fenced and will not be evicted until that fence is signaled.
29966+ */
29967+
29968+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
29969+{
29970+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29971+ unsigned long irq_flags;
29972+ struct psb_task *task;
29973+ struct psb_task *next_task;
29974+
29975+ dev_priv->force_ta_mem_load = 1;
29976+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29977+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue,
29978+ head) {
29979+ if (task->scene) {
29980+ dev_priv->force_ta_mem_load = 0;
29981+ break;
29982+ }
29983+ }
29984+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
29985+ head) {
29986+ if (task->scene) {
29987+ dev_priv->force_ta_mem_load = 0;
29988+ break;
29989+ }
29990+ }
29991+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29992+}
29993+
29994+void psb_scheduler_reset(struct drm_psb_private *dev_priv,
29995+ int error_condition)
29996+{
29997+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29998+ unsigned long wait_jiffies;
29999+ unsigned long cur_jiffies;
30000+ struct psb_task *task;
30001+ struct psb_task *next_task;
30002+ unsigned long irq_flags;
30003+
30004+ psb_scheduler_pause(dev_priv);
30005+ if (!psb_scheduler_idle(dev_priv)) {
30006+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30007+
30008+ cur_jiffies = jiffies;
30009+ wait_jiffies = cur_jiffies;
30010+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
30011+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
30012+ wait_jiffies = scheduler->ta_end_jiffies;
30013+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
30014+ time_after_eq(scheduler->raster_end_jiffies,
30015+ wait_jiffies))
30016+ wait_jiffies = scheduler->raster_end_jiffies;
30017+
30018+ wait_jiffies -= cur_jiffies;
30019+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30020+
30021+ (void) wait_event_timeout(scheduler->idle_queue,
30022+ psb_scheduler_idle(dev_priv),
30023+ wait_jiffies);
30024+ }
30025+
30026+ if (!psb_scheduler_idle(dev_priv)) {
30027+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30028+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
30029+ if (task) {
30030+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
30031+ if (task->engine == PSB_ENGINE_HPRAST) {
30032+ psb_fence_error(scheduler->dev,
30033+ PSB_ENGINE_HPRAST,
30034+ task->sequence,
30035+ _PSB_FENCE_TYPE_RASTER_DONE,
30036+ error_condition);
30037+
30038+ list_del(&task->head);
30039+ psb_xhw_clean_buf(dev_priv, &task->buf);
30040+ list_add_tail(&task->head,
30041+ &scheduler->task_done_queue);
30042+ } else {
30043+ list_add(&task->head,
30044+ &scheduler->raster_queue);
30045+ }
30046+ }
30047+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
30048+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
30049+ if (task) {
30050+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
30051+ list_add_tail(&task->head,
30052+ &scheduler->raster_queue);
30053+#ifdef FIX_TG_16
30054+ psb_2d_atomic_unlock(dev_priv);
30055+#endif
30056+ }
30057+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
30058+ scheduler->ta_state = 0;
30059+
30060+#ifdef FIX_TG_16
30061+ atomic_set(&dev_priv->ta_wait_2d, 0);
30062+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
30063+ wake_up(&dev_priv->queue_2d);
30064+#endif
30065+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30066+ }
30067+
30068+ /*
30069+ * Empty raster queue.
30070+ */
30071+
30072+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30073+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
30074+ head) {
30075+ struct psb_scene *scene = task->scene;
30076+
30077+ DRM_INFO("Signaling fence sequence %u\n",
30078+ task->sequence);
30079+
30080+ psb_fence_error(scheduler->dev,
30081+ task->engine,
30082+ task->sequence,
30083+ _PSB_FENCE_TYPE_TA_DONE |
30084+ _PSB_FENCE_TYPE_RASTER_DONE |
30085+ _PSB_FENCE_TYPE_SCENE_DONE |
30086+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
30087+ if (scene) {
30088+ scene->flags = 0;
30089+ if (scene->hw_scene) {
30090+ list_add_tail(&scene->hw_scene->head,
30091+ &scheduler->hw_scenes);
30092+ scene->hw_scene = NULL;
30093+ }
30094+ }
30095+
30096+ psb_xhw_clean_buf(dev_priv, &task->buf);
30097+ list_del(&task->head);
30098+ list_add_tail(&task->head, &scheduler->task_done_queue);
30099+ }
30100+
30101+ schedule_delayed_work(&scheduler->wq, 1);
30102+ scheduler->idle = 1;
30103+ wake_up(&scheduler->idle_queue);
30104+
30105+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30106+ psb_scheduler_restart(dev_priv);
30107+
30108+}
30109+
30110+int psb_scheduler_init(struct drm_device *dev,
30111+ struct psb_scheduler *scheduler)
30112+{
30113+ struct psb_hw_scene *hw_scene;
30114+ int i;
30115+
30116+ memset(scheduler, 0, sizeof(*scheduler));
30117+ scheduler->dev = dev;
30118+ mutex_init(&scheduler->task_wq_mutex);
30119+ mutex_init(&scheduler->topaz_power_mutex);
30120+ mutex_init(&scheduler->msvdx_power_mutex);
30121+ spin_lock_init(&scheduler->lock);
30122+ scheduler->idle = 1;
30123+
30124+ INIT_LIST_HEAD(&scheduler->ta_queue);
30125+ INIT_LIST_HEAD(&scheduler->raster_queue);
30126+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
30127+ INIT_LIST_HEAD(&scheduler->hw_scenes);
30128+ INIT_LIST_HEAD(&scheduler->task_done_queue);
30129+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
30130+ INIT_DELAYED_WORK(&scheduler->topaz_suspend_wq,
30131+ &psb_powerdown_topaz);
30132+ INIT_DELAYED_WORK(&scheduler->msvdx_suspend_wq,
30133+ &psb_powerdown_msvdx);
30134+ init_waitqueue_head(&scheduler->idle_queue);
30135+
30136+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
30137+ hw_scene = &scheduler->hs[i];
30138+ hw_scene->context_number = i;
30139+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
30140+ }
30141+
30142+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i)
30143+ scheduler->seq[i].reported = 0;
30144+ return 0;
30145+}
30146+
30147+/*
30148+ * Scene references maintained by the scheduler are not refcounted.
30149+ * Remove all references to a particular scene here.
30150+ */
30151+
30152+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
30153+{
30154+ struct drm_psb_private *dev_priv =
30155+ (struct drm_psb_private *) scene->dev->dev_private;
30156+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30157+ struct psb_hw_scene *hw_scene;
30158+ unsigned long irq_flags;
30159+ unsigned int i;
30160+
30161+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30162+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
30163+ hw_scene = &scheduler->hs[i];
30164+ if (hw_scene->last_scene == scene) {
30165+ BUG_ON(list_empty(&hw_scene->head));
30166+ hw_scene->last_scene = NULL;
30167+ }
30168+ }
30169+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30170+}
30171+
30172+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
30173+{
30174+ flush_scheduled_work();
30175+}
30176+
30177+static int psb_setup_task(struct drm_device *dev,
30178+ struct drm_psb_cmdbuf_arg *arg,
30179+ struct ttm_buffer_object *raster_cmd_buffer,
30180+ struct ttm_buffer_object *ta_cmd_buffer,
30181+ struct ttm_buffer_object *oom_cmd_buffer,
30182+ struct psb_scene *scene,
30183+ enum psb_task_type task_type,
30184+ uint32_t engine,
30185+ uint32_t flags, struct psb_task **task_p)
30186+{
30187+ struct psb_task *task;
30188+ int ret;
30189+
30190+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
30191+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
30192+ return -EINVAL;
30193+ }
30194+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
30195+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
30196+ return -EINVAL;
30197+ }
30198+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
30199+ DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size);
30200+ return -EINVAL;
30201+ }
30202+
30203+ task = kzalloc(sizeof(*task), GFP_KERNEL);
30204+ if (!task)
30205+ return -ENOMEM;
30206+
30207+ atomic_set(&task->buf.done, 1);
30208+ task->engine = engine;
30209+ INIT_LIST_HEAD(&task->head);
30210+ INIT_LIST_HEAD(&task->buf.head);
30211+ if (ta_cmd_buffer && arg->ta_size != 0) {
30212+ task->ta_cmd_size = arg->ta_size;
30213+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
30214+ arg->ta_offset,
30215+ arg->ta_size,
30216+ PSB_ENGINE_TA, task->ta_cmds);
30217+ if (ret)
30218+ goto out_err;
30219+ }
30220+ if (raster_cmd_buffer) {
30221+ task->raster_cmd_size = arg->cmdbuf_size;
30222+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
30223+ arg->cmdbuf_offset,
30224+ arg->cmdbuf_size,
30225+ PSB_ENGINE_TA,
30226+ task->raster_cmds);
30227+ if (ret)
30228+ goto out_err;
30229+ }
30230+ if (oom_cmd_buffer && arg->oom_size != 0) {
30231+ task->oom_cmd_size = arg->oom_size;
30232+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
30233+ arg->oom_offset,
30234+ arg->oom_size,
30235+ PSB_ENGINE_TA,
30236+ task->oom_cmds);
30237+ if (ret)
30238+ goto out_err;
30239+ }
30240+ task->task_type = task_type;
30241+ task->flags = flags;
30242+ if (scene)
30243+ task->scene = psb_scene_ref(scene);
30244+
30245+ *task_p = task;
30246+ return 0;
30247+out_err:
30248+ kfree(task);
30249+ *task_p = NULL;
30250+ return ret;
30251+}
30252+
30253+int psb_cmdbuf_ta(struct drm_file *priv,
30254+ struct psb_context *context,
30255+ struct drm_psb_cmdbuf_arg *arg,
30256+ struct ttm_buffer_object *cmd_buffer,
30257+ struct ttm_buffer_object *ta_buffer,
30258+ struct ttm_buffer_object *oom_buffer,
30259+ struct psb_scene *scene,
30260+ struct psb_feedback_info *feedback,
30261+ struct psb_ttm_fence_rep *fence_arg)
30262+{
30263+ struct drm_device *dev = priv->minor->dev;
30264+ struct drm_psb_private *dev_priv = dev->dev_private;
30265+ struct ttm_fence_object *fence = NULL;
30266+ struct psb_task *task = NULL;
30267+ int ret;
30268+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30269+ uint32_t sequence;
30270+
30271+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
30272+
30273+ ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer,
30274+ oom_buffer, scene,
30275+ psb_ta_task, PSB_ENGINE_TA,
30276+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
30277+
30278+ if (ret)
30279+ goto out_err;
30280+
30281+ task->feedback = *feedback;
30282+ mutex_lock(&dev_priv->reset_mutex);
30283+
30284+ /*
30285+ * Hand the task over to the scheduler.
30286+ */
30287+
30288+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30289+
30290+ task->ta_complete_action = PSB_RASTER;
30291+ task->raster_complete_action = PSB_RETURN;
30292+ sequence = task->sequence;
30293+
30294+ spin_lock_irq(&scheduler->lock);
30295+
30296+ list_add_tail(&task->head, &scheduler->ta_queue);
30297+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
30298+
30299+ psb_schedule_ta(dev_priv, scheduler);
30300+
30301+ /**
30302+ * From this point we may no longer dereference task,
30303+ * as the object it points to may be freed by another thread.
30304+ */
30305+
30306+ task = NULL;
30307+ spin_unlock_irq(&scheduler->lock);
30308+ mutex_unlock(&dev_priv->reset_mutex);
30309+
30310+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
30311+ arg->fence_flags,
30312+ &context->validate_list, fence_arg, &fence);
30313+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
30314+
30315+ if (fence) {
30316+ spin_lock_irq(&scheduler->lock);
30317+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA,
30318+ sequence, _PSB_FENCE_EXE_SHIFT, 1);
30319+ spin_unlock_irq(&scheduler->lock);
30320+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
30321+ }
30322+
30323+out_err:
30324+ if (ret && ret != -ERESTART)
30325+ DRM_ERROR("TA task queue job failed.\n");
30326+
30327+ if (fence) {
30328+#ifdef PSB_WAIT_FOR_TA_COMPLETION
30329+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30330+ _PSB_FENCE_TYPE_TA_DONE);
30331+#ifdef PSB_BE_PARANOID
30332+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30333+ _PSB_FENCE_TYPE_SCENE_DONE);
30334+#endif
30335+#endif
30336+ ttm_fence_object_unref(&fence);
30337+ }
30338+ return ret;
30339+}
30340+
30341+int psb_cmdbuf_raster(struct drm_file *priv,
30342+ struct psb_context *context,
30343+ struct drm_psb_cmdbuf_arg *arg,
30344+ struct ttm_buffer_object *cmd_buffer,
30345+ struct psb_ttm_fence_rep *fence_arg)
30346+{
30347+ struct drm_device *dev = priv->minor->dev;
30348+ struct drm_psb_private *dev_priv = dev->dev_private;
30349+ struct ttm_fence_object *fence = NULL;
30350+ struct psb_task *task = NULL;
30351+ int ret;
30352+ uint32_t sequence;
30353+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30354+
30355+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
30356+
30357+ ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL,
30358+ NULL, psb_raster_task,
30359+ PSB_ENGINE_TA, 0, &task);
30360+
30361+ if (ret)
30362+ goto out_err;
30363+
30364+ /*
30365+ * Hand the task over to the scheduler.
30366+ */
30367+
30368+ mutex_lock(&dev_priv->reset_mutex);
30369+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30370+ task->ta_complete_action = PSB_RASTER;
30371+ task->raster_complete_action = PSB_RETURN;
30372+ sequence = task->sequence;
30373+
30374+ spin_lock_irq(&scheduler->lock);
30375+ list_add_tail(&task->head, &scheduler->ta_queue);
30376+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
30377+ psb_schedule_ta(dev_priv, scheduler);
30378+
30379+ /**
30380+ * From this point we may no longer dereference task,
30381+ * as the object it points to may be freed by another thread.
30382+ */
30383+
30384+ task = NULL;
30385+ spin_unlock_irq(&scheduler->lock);
30386+ mutex_unlock(&dev_priv->reset_mutex);
30387+
30388+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
30389+ arg->fence_flags,
30390+ &context->validate_list, fence_arg, &fence);
30391+
30392+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
30393+ if (fence) {
30394+ spin_lock_irq(&scheduler->lock);
30395+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence,
30396+ _PSB_FENCE_EXE_SHIFT, 1);
30397+ spin_unlock_irq(&scheduler->lock);
30398+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
30399+ }
30400+out_err:
30401+ if (ret && ret != -ERESTART)
30402+ DRM_ERROR("Raster task queue job failed.\n");
30403+
30404+ if (fence) {
30405+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
30406+ ttm_fence_object_wait(fence, 1, 1, fence->type);
30407+#endif
30408+ ttm_fence_object_unref(&fence);
30409+ }
30410+
30411+ return ret;
30412+}
30413+
30414+#ifdef FIX_TG_16
30415+
30416+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
30417+{
30418+ if (psb_2d_trylock(dev_priv)) {
30419+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
30420+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
30421+ _PSB_C2B_STATUS_BUSY))) {
30422+ return 0;
30423+ }
30424+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
30425+ psb_2D_irq_on(dev_priv);
30426+
30427+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
30428+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
30429+ (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
30430+
30431+ psb_2d_atomic_unlock(dev_priv);
30432+ }
30433+
30434+ atomic_set(&dev_priv->ta_wait_2d, 1);
30435+ return -EBUSY;
30436+}
30437+
30438+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30439+{
30440+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30441+
30442+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
30443+ psb_schedule_ta(dev_priv, scheduler);
30444+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30445+ wake_up(&dev_priv->queue_2d);
30446+ }
30447+}
30448+
30449+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30450+{
30451+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30452+ unsigned long irq_flags;
30453+
30454+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30455+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
30456+ atomic_set(&dev_priv->ta_wait_2d, 0);
30457+ psb_2D_irq_off(dev_priv);
30458+ psb_schedule_ta(dev_priv, scheduler);
30459+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30460+ wake_up(&dev_priv->queue_2d);
30461+ }
30462+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30463+}
30464+
30465+/*
30466+ * 2D locking functions. Can't use a mutex since the trylock() and
30467+ * unlock() methods need to be accessible from interrupt context.
30468+ */
30469+
30470+int psb_2d_trylock(struct drm_psb_private *dev_priv)
30471+{
30472+ return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0;
30473+}
30474+
30475+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
30476+{
30477+ atomic_set(&dev_priv->lock_2d, 0);
30478+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30479+ wake_up(&dev_priv->queue_2d);
30480+}
30481+
30482+void psb_2d_unlock(struct drm_psb_private *dev_priv)
30483+{
30484+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30485+ unsigned long irq_flags;
30486+
30487+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30488+ psb_2d_atomic_unlock(dev_priv);
30489+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
30490+ psb_atomic_resume_ta_2d_idle(dev_priv);
30491+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30492+}
30493+
30494+void psb_2d_lock(struct drm_psb_private *dev_priv)
30495+{
30496+ atomic_inc(&dev_priv->waiters_2d);
30497+ wait_event(dev_priv->queue_2d,
30498+ atomic_read(&dev_priv->ta_wait_2d) == 0);
30499+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
30500+ atomic_dec(&dev_priv->waiters_2d);
30501+}
30502+
30503+#endif
30504diff --git a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h
30505new file mode 100644
30506index 0000000..01c27b0
30507--- /dev/null
30508+++ b/drivers/gpu/drm/psb/psb_schedule.h
30509@@ -0,0 +1,181 @@
30510+/**************************************************************************
30511+ * Copyright (c) 2007, Intel Corporation.
30512+ * All Rights Reserved.
30513+ *
30514+ * This program is free software; you can redistribute it and/or modify it
30515+ * under the terms and conditions of the GNU General Public License,
30516+ * version 2, as published by the Free Software Foundation.
30517+ *
30518+ * This program is distributed in the hope it will be useful, but WITHOUT
30519+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30520+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30521+ * more details.
30522+ *
30523+ * You should have received a copy of the GNU General Public License along with
30524+ * this program; if not, write to the Free Software Foundation, Inc.,
30525+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30526+ *
30527+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30528+ * develop this driver.
30529+ *
30530+ **************************************************************************/
30531+/*
30532+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
30533+ */
30534+
30535+#ifndef _PSB_SCHEDULE_H_
30536+#define _PSB_SCHEDULE_H_
30537+
30538+#include <drm/drmP.h>
30539+
30540+struct psb_context;
30541+
30542+enum psb_task_type {
30543+ psb_ta_midscene_task,
30544+ psb_ta_task,
30545+ psb_raster_task,
30546+ psb_freescene_task,
30547+ psb_flip_task
30548+};
30549+
30550+#define PSB_MAX_TA_CMDS 60
30551+#define PSB_MAX_RASTER_CMDS 66
30552+#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6)
30553+
30554+struct psb_xhw_buf {
30555+ struct list_head head;
30556+ int copy_back;
30557+ atomic_t done;
30558+ struct drm_psb_xhw_arg arg;
30559+
30560+};
30561+
30562+struct psb_feedback_info {
30563+ struct ttm_buffer_object *bo;
30564+ struct page *page;
30565+ uint32_t offset;
30566+};
30567+
30568+struct psb_task {
30569+ struct list_head head;
30570+ struct psb_scene *scene;
30571+ struct psb_feedback_info feedback;
30572+ enum psb_task_type task_type;
30573+ uint32_t engine;
30574+ uint32_t sequence;
30575+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
30576+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
30577+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
30578+ uint32_t ta_cmd_size;
30579+ uint32_t raster_cmd_size;
30580+ uint32_t oom_cmd_size;
30581+ uint32_t feedback_offset;
30582+ uint32_t ta_complete_action;
30583+ uint32_t raster_complete_action;
30584+ uint32_t hw_cookie;
30585+ uint32_t flags;
30586+ uint32_t reply_flags;
30587+ uint32_t aborting;
30588+ struct psb_xhw_buf buf;
30589+};
30590+
30591+struct psb_hw_scene {
30592+ struct list_head head;
30593+ uint32_t context_number;
30594+
30595+ /*
30596+ * This pointer does not refcount the last_scene_buffer,
30597+ * so we must make sure it is set to NULL before destroying
30598+ * the corresponding task.
30599+ */
30600+
30601+ struct psb_scene *last_scene;
30602+};
30603+
30604+struct psb_scene;
30605+struct drm_psb_private;
30606+
30607+struct psb_scheduler_seq {
30608+ uint32_t sequence;
30609+ int reported;
30610+};
30611+
30612+struct psb_scheduler {
30613+ struct drm_device *dev;
30614+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
30615+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
30616+ struct mutex task_wq_mutex;
30617+ struct mutex topaz_power_mutex;
30618+ struct mutex msvdx_power_mutex;
30619+ spinlock_t lock;
30620+ struct list_head hw_scenes;
30621+ struct list_head ta_queue;
30622+ struct list_head raster_queue;
30623+ struct list_head hp_raster_queue;
30624+ struct list_head task_done_queue;
30625+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
30626+ struct psb_task *feedback_task;
30627+ int ta_state;
30628+ struct psb_hw_scene *pending_hw_scene;
30629+ uint32_t pending_hw_scene_seq;
30630+ struct delayed_work wq;
30631+ struct delayed_work topaz_suspend_wq;
30632+ struct delayed_work msvdx_suspend_wq;
30633+ struct psb_scene_pool *pool;
30634+ uint32_t idle_count;
30635+ int idle;
30636+ wait_queue_head_t idle_queue;
30637+ unsigned long ta_end_jiffies;
30638+ unsigned long total_ta_jiffies;
30639+ unsigned long raster_end_jiffies;
30640+ unsigned long total_raster_jiffies;
30641+};
30642+
30643+#define PSB_RF_FIRE_TA (1 << 0)
30644+#define PSB_RF_OOM (1 << 1)
30645+#define PSB_RF_OOM_REPLY (1 << 2)
30646+#define PSB_RF_TERMINATE (1 << 3)
30647+#define PSB_RF_TA_DONE (1 << 4)
30648+#define PSB_RF_FIRE_RASTER (1 << 5)
30649+#define PSB_RF_RASTER_DONE (1 << 6)
30650+#define PSB_RF_DEALLOC (1 << 7)
30651+
30652+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
30653+ int shareable,
30654+ uint32_t w, uint32_t h);
30655+extern uint32_t psb_scene_handle(struct psb_scene *scene);
30656+extern int psb_scheduler_init(struct drm_device *dev,
30657+ struct psb_scheduler *scheduler);
30658+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
30659+extern int psb_cmdbuf_ta(struct drm_file *priv,
30660+ struct psb_context *context,
30661+ struct drm_psb_cmdbuf_arg *arg,
30662+ struct ttm_buffer_object *cmd_buffer,
30663+ struct ttm_buffer_object *ta_buffer,
30664+ struct ttm_buffer_object *oom_buffer,
30665+ struct psb_scene *scene,
30666+ struct psb_feedback_info *feedback,
30667+ struct psb_ttm_fence_rep *fence_arg);
30668+extern int psb_cmdbuf_raster(struct drm_file *priv,
30669+ struct psb_context *context,
30670+ struct drm_psb_cmdbuf_arg *arg,
30671+ struct ttm_buffer_object *cmd_buffer,
30672+ struct psb_ttm_fence_rep *fence_arg);
30673+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
30674+ uint32_t status);
30675+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
30676+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
30677+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
30678+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
30679+
30680+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
30681+ int *lockup, int *idle);
30682+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
30683+ int error_condition);
30684+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
30685+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
30686+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
30687+extern int psb_extend_timeout(struct drm_psb_private *dev_priv,
30688+ uint32_t xhw_lockup);
30689+
30690+#endif
30691diff --git a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c
30692new file mode 100644
30693index 0000000..134ff08
30694--- /dev/null
30695+++ b/drivers/gpu/drm/psb/psb_setup.c
30696@@ -0,0 +1,18 @@
30697+#include <drm/drmP.h>
30698+#include <drm/drm.h>
30699+#include <drm/drm_crtc.h>
30700+#include <drm/drm_edid.h>
30701+#include "psb_intel_drv.h"
30702+#include "psb_drv.h"
30703+#include "psb_intel_reg.h"
30704+
30705+/* Fixed name */
30706+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
30707+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
30708+
30709+#include "psb_intel_i2c.c"
30710+#include "psb_intel_sdvo.c"
30711+#include "psb_intel_modes.c"
30712+#include "psb_intel_lvds.c"
30713+#include "psb_intel_dsi.c"
30714+#include "psb_intel_display.c"
30715diff --git a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c
30716new file mode 100644
30717index 0000000..2c1f1a4
30718--- /dev/null
30719+++ b/drivers/gpu/drm/psb/psb_sgx.c
30720@@ -0,0 +1,1784 @@
30721+/**************************************************************************
30722+ * Copyright (c) 2007, Intel Corporation.
30723+ * All Rights Reserved.
30724+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
30725+ * All Rights Reserved.
30726+ *
30727+ * This program is free software; you can redistribute it and/or modify it
30728+ * under the terms and conditions of the GNU General Public License,
30729+ * version 2, as published by the Free Software Foundation.
30730+ *
30731+ * This program is distributed in the hope it will be useful, but WITHOUT
30732+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30733+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30734+ * more details.
30735+ *
30736+ * You should have received a copy of the GNU General Public License along with
30737+ * this program; if not, write to the Free Software Foundation, Inc.,
30738+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30739+ *
30740+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30741+ * develop this driver.
30742+ *
30743+ **************************************************************************/
30744+/*
30745+ */
30746+
30747+#include <drm/drmP.h>
30748+#include "psb_drv.h"
30749+#include "psb_drm.h"
30750+#include "psb_reg.h"
30751+#include "psb_scene.h"
30752+#include "psb_msvdx.h"
30753+#include "lnc_topaz.h"
30754+#include "ttm/ttm_bo_api.h"
30755+#include "ttm/ttm_execbuf_util.h"
30756+#include "ttm/ttm_userobj_api.h"
30757+#include "ttm/ttm_placement_common.h"
30758+#include "psb_sgx.h"
30759+#include "psb_intel_reg.h"
30760+#include "psb_powermgmt.h"
30761+
30762+
30763+static inline int psb_same_page(unsigned long offset,
30764+ unsigned long offset2)
30765+{
30766+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
30767+}
30768+
30769+static inline unsigned long psb_offset_end(unsigned long offset,
30770+ unsigned long end)
30771+{
30772+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
30773+ return (end < offset) ? end : offset;
30774+}
30775+
30776+static void psb_idle_engine(struct drm_device *dev, int engine);
30777+
30778+struct psb_dstbuf_cache {
30779+ unsigned int dst;
30780+ struct ttm_buffer_object *dst_buf;
30781+ unsigned long dst_offset;
30782+ uint32_t *dst_page;
30783+ unsigned int dst_page_offset;
30784+ struct ttm_bo_kmap_obj dst_kmap;
30785+ bool dst_is_iomem;
30786+};
30787+
30788+struct psb_validate_buffer {
30789+ struct ttm_validate_buffer base;
30790+ struct psb_validate_req req;
30791+ int ret;
30792+ struct psb_validate_arg __user *user_val_arg;
30793+ uint32_t flags;
30794+ uint32_t offset;
30795+ int po_correct;
30796+};
30797+
30798+
30799+
30800+#define PSB_REG_GRAN_SHIFT 2
30801+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
30802+#define PSB_MAX_REG 0x1000
30803+
30804+static const uint32_t disallowed_ranges[][2] = {
30805+ {0x0000, 0x0200},
30806+ {0x0208, 0x0214},
30807+ {0x021C, 0x0224},
30808+ {0x0230, 0x0234},
30809+ {0x0248, 0x024C},
30810+ {0x0254, 0x0358},
30811+ {0x0428, 0x0428},
30812+ {0x0430, 0x043C},
30813+ {0x0498, 0x04B4},
30814+ {0x04CC, 0x04D8},
30815+ {0x04E0, 0x07FC},
30816+ {0x0804, 0x0A14},
30817+ {0x0A4C, 0x0A58},
30818+ {0x0A68, 0x0A80},
30819+ {0x0AA0, 0x0B1C},
30820+ {0x0B2C, 0x0CAC},
30821+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
30822+};
30823+
30824+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
30825+ (PSB_REG_GRANULARITY *
30826+ (sizeof(uint32_t) << 3))];
30827+
30828+static inline int psb_disallowed(uint32_t reg)
30829+{
30830+ reg >>= PSB_REG_GRAN_SHIFT;
30831+ return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0;
30832+}
30833+
30834+void psb_init_disallowed(void)
30835+{
30836+ int i;
30837+ uint32_t reg, tmp;
30838+ static int initialized;
30839+
30840+ if (initialized)
30841+ return;
30842+
30843+ initialized = 1;
30844+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
30845+
30846+ for (i = 0;
30847+ i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
30848+ ++i) {
30849+ for (reg = disallowed_ranges[i][0];
30850+ reg <= disallowed_ranges[i][1]; reg += 4) {
30851+ tmp = reg >> 2;
30852+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
30853+ }
30854+ }
30855+}
30856+
30857+static int psb_memcpy_check(uint32_t *dst, const uint32_t *src,
30858+ uint32_t size)
30859+{
30860+ size >>= 3;
30861+ while (size--) {
30862+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
30863+ DRM_ERROR("Forbidden SGX register access: "
30864+ "0x%04x.\n", *src);
30865+ return -EPERM;
30866+ }
30867+ *dst++ = *src++;
30868+ *dst++ = *src++;
30869+ }
30870+ return 0;
30871+}
30872+
30873+int psb_2d_wait_available(struct drm_psb_private *dev_priv,
30874+ unsigned size)
30875+{
30876+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30877+ int ret = 0;
30878+
30879+retry:
30880+ if (avail < size) {
30881+#if 0
30882+ /* We'd ideally
30883+ * like to have an IRQ-driven event here.
30884+ */
30885+
30886+ psb_2D_irq_on(dev_priv);
30887+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
30888+ ((avail =
30889+ PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
30890+ psb_2D_irq_off(dev_priv);
30891+ if (ret == 0)
30892+ return 0;
30893+ if (ret == -EINTR) {
30894+ ret = 0;
30895+ goto retry;
30896+ }
30897+#else
30898+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30899+ goto retry;
30900+#endif
30901+ }
30902+ return ret;
30903+}
30904+
30905+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
30906+ unsigned size)
30907+{
30908+ int ret = 0;
30909+ int i;
30910+ unsigned submit_size;
30911+
30912+ while (size > 0) {
30913+ submit_size = (size < 0x60) ? size : 0x60;
30914+ size -= submit_size;
30915+ ret = psb_2d_wait_available(dev_priv, submit_size);
30916+ if (ret)
30917+ return ret;
30918+
30919+ submit_size <<= 2;
30920+ mutex_lock(&dev_priv->reset_mutex);
30921+ for (i = 0; i < submit_size; i += 4) {
30922+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
30923+ }
30924+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
30925+ mutex_unlock(&dev_priv->reset_mutex);
30926+ }
30927+ return 0;
30928+}
30929+
30930+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
30931+{
30932+ uint32_t buffer[8];
30933+ uint32_t *bufp = buffer;
30934+ int ret;
30935+
30936+ *bufp++ = PSB_2D_FENCE_BH;
30937+
30938+ *bufp++ = PSB_2D_DST_SURF_BH |
30939+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
30940+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
30941+
30942+ *bufp++ = PSB_2D_BLIT_BH |
30943+ PSB_2D_ROT_NONE |
30944+ PSB_2D_COPYORDER_TL2BR |
30945+ PSB_2D_DSTCK_DISABLE |
30946+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
30947+
30948+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
30949+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
30950+ (0 << PSB_2D_DST_YSTART_SHIFT);
30951+ *bufp++ =
30952+ (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
30953+
30954+ *bufp++ = PSB_2D_FLUSH_BH;
30955+
30956+ psb_2d_lock(dev_priv);
30957+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
30958+ psb_2d_unlock(dev_priv);
30959+
30960+ if (!ret)
30961+ psb_schedule_watchdog(dev_priv);
30962+ return ret;
30963+}
30964+
30965+int psb_emit_2d_copy_blit(struct drm_device *dev,
30966+ uint32_t src_offset,
30967+ uint32_t dst_offset, uint32_t pages,
30968+ int direction)
30969+{
30970+ uint32_t cur_pages;
30971+ struct drm_psb_private *dev_priv = dev->dev_private;
30972+ uint32_t buf[10];
30973+ uint32_t *bufp;
30974+ uint32_t xstart;
30975+ uint32_t ystart;
30976+ uint32_t blit_cmd;
30977+ uint32_t pg_add;
30978+ int ret = 0;
30979+
30980+ if (!dev_priv)
30981+ return 0;
30982+
30983+ if (direction) {
30984+ pg_add = (pages - 1) << PAGE_SHIFT;
30985+ src_offset += pg_add;
30986+ dst_offset += pg_add;
30987+ }
30988+
30989+ blit_cmd = PSB_2D_BLIT_BH |
30990+ PSB_2D_ROT_NONE |
30991+ PSB_2D_DSTCK_DISABLE |
30992+ PSB_2D_SRCCK_DISABLE |
30993+ PSB_2D_USE_PAT |
30994+ PSB_2D_ROP3_SRCCOPY |
30995+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
30996+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
30997+
30998+ psb_2d_lock(dev_priv);
30999+ while (pages > 0) {
31000+ cur_pages = pages;
31001+ if (cur_pages > 2048)
31002+ cur_pages = 2048;
31003+ pages -= cur_pages;
31004+ ystart = (direction) ? cur_pages - 1 : 0;
31005+
31006+ bufp = buf;
31007+ *bufp++ = PSB_2D_FENCE_BH;
31008+
31009+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
31010+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
31011+ *bufp++ = dst_offset;
31012+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
31013+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
31014+ *bufp++ = src_offset;
31015+ *bufp++ =
31016+ PSB_2D_SRC_OFF_BH | (xstart <<
31017+ PSB_2D_SRCOFF_XSTART_SHIFT) |
31018+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
31019+ *bufp++ = blit_cmd;
31020+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
31021+ (ystart << PSB_2D_DST_YSTART_SHIFT);
31022+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
31023+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
31024+
31025+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
31026+ if (ret)
31027+ goto out;
31028+ pg_add =
31029+ (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
31030+ src_offset += pg_add;
31031+ dst_offset += pg_add;
31032+ }
31033+out:
31034+ psb_2d_unlock(dev_priv);
31035+ return ret;
31036+}
31037+
31038+void psb_init_2d(struct drm_psb_private *dev_priv)
31039+{
31040+ spin_lock_init(&dev_priv->sequence_lock);
31041+ psb_reset(dev_priv, 1);
31042+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
31043+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
31044+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
31045+}
31046+
31047+int psb_idle_2d(struct drm_device *dev)
31048+{
31049+ struct drm_psb_private *dev_priv = dev->dev_private;
31050+ unsigned long _end = jiffies + DRM_HZ;
31051+ int busy = 0;
31052+ bool b_need_release = false;
31053+
31054+ if (!powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND)) {
31055+ if (!powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, false))
31056+ return 0;
31057+ else
31058+ b_need_release = true;
31059+ }
31060+
31061+ /*
31062+ * First idle the 2D engine.
31063+ */
31064+
31065+ if (dev_priv->engine_lockup_2d) {
31066+ busy = -EBUSY;
31067+ goto out;
31068+ }
31069+
31070+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
31071+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) ==
31072+ 0))
31073+ goto out;
31074+
31075+ do {
31076+ busy =
31077+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
31078+ } while (busy && !time_after_eq(jiffies, _end));
31079+
31080+ if (busy)
31081+ busy =
31082+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
31083+ if (busy)
31084+ goto out;
31085+
31086+ do {
31087+ busy =
31088+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
31089+ _PSB_C2B_STATUS_BUSY)
31090+ != 0);
31091+ } while (busy && !time_after_eq(jiffies, _end));
31092+ if (busy)
31093+ busy =
31094+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
31095+ _PSB_C2B_STATUS_BUSY)
31096+ != 0);
31097+
31098+out:
31099+ if (busy)
31100+ dev_priv->engine_lockup_2d = 1;
31101+
31102+ if (b_need_release)
31103+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
31104+
31105+ return (busy) ? -EBUSY : 0;
31106+}
31107+
31108+int psb_idle_3d(struct drm_device *dev)
31109+{
31110+ struct drm_psb_private *dev_priv = dev->dev_private;
31111+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
31112+ int ret;
31113+
31114+ ret = wait_event_timeout(scheduler->idle_queue,
31115+ psb_scheduler_finished(dev_priv),
31116+ DRM_HZ * 10);
31117+
31118+ /*
31119+ *
31120+ * wait_event_timeout - function returns 0 if the @timeout elapsed, and the remaining
31121+ * jiffies if the condition evaluated to true before the timeout elapsed.
31122+ *
31123+ */
31124+ if(ret == 0)
31125+ DRM_ERROR(" wait_event_timeout - timeout elapsed in waiting for scheduler wq \n");
31126+
31127+ return (ret < 1) ? -EBUSY : 0;
31128+}
31129+
31130+static int psb_check_presumed(struct psb_validate_req *req,
31131+ struct ttm_buffer_object *bo,
31132+ struct psb_validate_arg __user *data,
31133+ int *presumed_ok)
31134+{
31135+ struct psb_validate_req __user *user_req = &(data->d.req);
31136+
31137+ *presumed_ok = 0;
31138+
31139+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
31140+ *presumed_ok = 1;
31141+ return 0;
31142+ }
31143+
31144+ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
31145+ return 0;
31146+
31147+ if (bo->offset == req->presumed_gpu_offset) {
31148+ *presumed_ok = 1;
31149+ return 0;
31150+ }
31151+
31152+ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
31153+ &user_req->presumed_flags);
31154+}
31155+
31156+
31157+static void psb_unreference_buffers(struct psb_context *context)
31158+{
31159+ struct ttm_validate_buffer *entry, *next;
31160+ struct psb_validate_buffer *vbuf;
31161+ struct list_head *list = &context->validate_list;
31162+
31163+ list_for_each_entry_safe(entry, next, list, head) {
31164+ vbuf =
31165+ container_of(entry, struct psb_validate_buffer, base);
31166+ list_del(&entry->head);
31167+ ttm_bo_unref(&entry->bo);
31168+ }
31169+
31170+ list = &context->kern_validate_list;
31171+
31172+ list_for_each_entry_safe(entry, next, list, head) {
31173+ vbuf =
31174+ container_of(entry, struct psb_validate_buffer, base);
31175+ list_del(&entry->head);
31176+ ttm_bo_unref(&entry->bo);
31177+ }
31178+}
31179+
31180+
31181+static int psb_lookup_validate_buffer(struct drm_file *file_priv,
31182+ uint64_t data,
31183+ struct psb_validate_buffer *item)
31184+{
31185+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
31186+
31187+ item->user_val_arg =
31188+ (struct psb_validate_arg __user *) (unsigned long) data;
31189+
31190+ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
31191+ sizeof(item->req)) != 0)) {
31192+ DRM_ERROR("Lookup copy fault.\n");
31193+ return -EFAULT;
31194+ }
31195+
31196+ item->base.bo =
31197+ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
31198+
31199+ if (unlikely(item->base.bo == NULL)) {
31200+ DRM_ERROR("Bo lookup fault.\n");
31201+ return -EINVAL;
31202+ }
31203+
31204+ return 0;
31205+}
31206+
31207+static int psb_reference_buffers(struct drm_file *file_priv,
31208+ uint64_t data,
31209+ struct psb_context *context)
31210+{
31211+ struct psb_validate_buffer *item;
31212+ int ret;
31213+
31214+ while (likely(data != 0)) {
31215+ if (unlikely(context->used_buffers >=
31216+ PSB_NUM_VALIDATE_BUFFERS)) {
31217+ DRM_ERROR("Too many buffers "
31218+ "on validate list.\n");
31219+ ret = -EINVAL;
31220+ goto out_err0;
31221+ }
31222+
31223+ item = &context->buffers[context->used_buffers];
31224+
31225+ ret = psb_lookup_validate_buffer(file_priv, data, item);
31226+ if (unlikely(ret != 0))
31227+ goto out_err0;
31228+
31229+ item->base.reserved = 0;
31230+ list_add_tail(&item->base.head, &context->validate_list);
31231+ context->used_buffers++;
31232+ data = item->req.next;
31233+ }
31234+ return 0;
31235+
31236+out_err0:
31237+ psb_unreference_buffers(context);
31238+ return ret;
31239+}
31240+
31241+static int
31242+psb_placement_fence_type(struct ttm_buffer_object *bo,
31243+ uint64_t set_val_flags,
31244+ uint64_t clr_val_flags,
31245+ uint32_t new_fence_class,
31246+ uint32_t *new_fence_type)
31247+{
31248+ int ret;
31249+ uint32_t n_fence_type;
31250+ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
31251+ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
31252+ struct ttm_fence_object *old_fence;
31253+ uint32_t old_fence_type;
31254+
31255+ if (unlikely
31256+ (!(set_val_flags &
31257+ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
31258+ DRM_ERROR
31259+ ("GPU access type (read / write) is not indicated.\n");
31260+ return -EINVAL;
31261+ }
31262+
31263+ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
31264+ if (unlikely(ret != 0))
31265+ return ret;
31266+
31267+ switch (new_fence_class) {
31268+ case PSB_ENGINE_TA:
31269+ n_fence_type = _PSB_FENCE_TYPE_EXE |
31270+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
31271+ if (set_val_flags & PSB_BO_FLAG_TA)
31272+ n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
31273+ if (set_val_flags & PSB_BO_FLAG_COMMAND)
31274+ n_fence_type &=
31275+ ~(_PSB_FENCE_TYPE_RASTER_DONE |
31276+ _PSB_FENCE_TYPE_TA_DONE);
31277+ if (set_val_flags & PSB_BO_FLAG_SCENE)
31278+ n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE;
31279+ if (set_val_flags & PSB_BO_FLAG_FEEDBACK)
31280+ n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK;
31281+ break;
31282+ default:
31283+ n_fence_type = _PSB_FENCE_TYPE_EXE;
31284+ }
31285+
31286+ *new_fence_type = n_fence_type;
31287+ old_fence = (struct ttm_fence_object *) bo->sync_obj;
31288+ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
31289+
31290+ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
31291+ ((n_fence_type ^ old_fence_type) &
31292+ old_fence_type))) {
31293+ ret = ttm_bo_wait(bo, 0, 1, 0);
31294+ if (unlikely(ret != 0))
31295+ return ret;
31296+ }
31297+
31298+ bo->proposed_flags = (bo->proposed_flags | set_flags)
31299+ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
31300+
31301+ return 0;
31302+}
31303+
31304+int psb_validate_kernel_buffer(struct psb_context *context,
31305+ struct ttm_buffer_object *bo,
31306+ uint32_t fence_class,
31307+ uint64_t set_flags, uint64_t clr_flags)
31308+{
31309+ struct psb_validate_buffer *item;
31310+ uint32_t cur_fence_type;
31311+ int ret;
31312+
31313+ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
31314+ DRM_ERROR("Out of free validation buffer entries for "
31315+ "kernel buffer validation.\n");
31316+ return -ENOMEM;
31317+ }
31318+
31319+ item = &context->buffers[context->used_buffers];
31320+ item->user_val_arg = NULL;
31321+ item->base.reserved = 0;
31322+
31323+ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
31324+ if (unlikely(ret != 0))
31325+ goto out_unlock;
31326+
31327+ mutex_lock(&bo->mutex);
31328+ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
31329+ &cur_fence_type);
31330+ if (unlikely(ret != 0)) {
31331+ ttm_bo_unreserve(bo);
31332+ goto out_unlock;
31333+ }
31334+
31335+ item->base.bo = ttm_bo_reference(bo);
31336+ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
31337+ item->base.reserved = 1;
31338+
31339+ list_add_tail(&item->base.head, &context->kern_validate_list);
31340+ context->used_buffers++;
31341+
31342+ ret = ttm_buffer_object_validate(bo, 1, 0);
31343+ if (unlikely(ret != 0))
31344+ goto out_unlock;
31345+
31346+ item->offset = bo->offset;
31347+ item->flags = bo->mem.flags;
31348+ context->fence_types |= cur_fence_type;
31349+
31350+out_unlock:
31351+ mutex_unlock(&bo->mutex);
31352+ return ret;
31353+}
31354+
31355+
31356+static int psb_validate_buffer_list(struct drm_file *file_priv,
31357+ uint32_t fence_class,
31358+ struct psb_context *context,
31359+ int *po_correct)
31360+{
31361+ struct psb_validate_buffer *item;
31362+ struct ttm_buffer_object *bo;
31363+ int ret;
31364+ struct psb_validate_req *req;
31365+ uint32_t fence_types = 0;
31366+ uint32_t cur_fence_type;
31367+ struct ttm_validate_buffer *entry;
31368+ struct list_head *list = &context->validate_list;
31369+
31370+ *po_correct = 1;
31371+
31372+ list_for_each_entry(entry, list, head) {
31373+ item =
31374+ container_of(entry, struct psb_validate_buffer, base);
31375+ bo = entry->bo;
31376+ item->ret = 0;
31377+ req = &item->req;
31378+
31379+ mutex_lock(&bo->mutex);
31380+ ret = psb_placement_fence_type(bo,
31381+ req->set_flags,
31382+ req->clear_flags,
31383+ fence_class,
31384+ &cur_fence_type);
31385+ if (unlikely(ret != 0))
31386+ goto out_err;
31387+
31388+ ret = ttm_buffer_object_validate(bo, 1, 0);
31389+
31390+ if (unlikely(ret != 0))
31391+ goto out_err;
31392+
31393+ fence_types |= cur_fence_type;
31394+ entry->new_sync_obj_arg = (void *)
31395+ (unsigned long) cur_fence_type;
31396+
31397+ item->offset = bo->offset;
31398+ item->flags = bo->mem.flags;
31399+ mutex_unlock(&bo->mutex);
31400+
31401+ ret =
31402+ psb_check_presumed(&item->req, bo, item->user_val_arg,
31403+ &item->po_correct);
31404+ if (unlikely(ret != 0))
31405+ goto out_err;
31406+
31407+ if (unlikely(!item->po_correct))
31408+ *po_correct = 0;
31409+
31410+ item++;
31411+ }
31412+
31413+ context->fence_types |= fence_types;
31414+
31415+ return 0;
31416+out_err:
31417+ mutex_unlock(&bo->mutex);
31418+ item->ret = ret;
31419+ return ret;
31420+}
31421+
31422+
31423+int
31424+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs,
31425+ unsigned int cmds)
31426+{
31427+ int i;
31428+
31429+ /*
31430+ * cmds is 32-bit words.
31431+ */
31432+
31433+ cmds >>= 1;
31434+ for (i = 0; i < cmds; ++i) {
31435+ PSB_WSGX32(regs[1], regs[0]);
31436+ regs += 2;
31437+ }
31438+ wmb();
31439+ return 0;
31440+}
31441+
31442+/*
31443+ * Security: Block user-space writing to MMU mapping registers.
31444+ * This is important for security and brings Poulsbo DRM
31445+ * up to par with the other DRM drivers. Using this,
31446+ * user-space should not be able to map arbitrary memory
31447+ * pages to graphics memory, but all user-space processes
31448+ * basically have access to all buffer objects mapped to
31449+ * graphics memory.
31450+ */
31451+
31452+int
31453+psb_submit_copy_cmdbuf(struct drm_device *dev,
31454+ struct ttm_buffer_object *cmd_buffer,
31455+ unsigned long cmd_offset,
31456+ unsigned long cmd_size,
31457+ int engine, uint32_t *copy_buffer)
31458+{
31459+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
31460+ struct drm_psb_private *dev_priv = dev->dev_private;
31461+ unsigned long cmd_page_offset =
31462+ cmd_offset - (cmd_offset & PAGE_MASK);
31463+ unsigned long cmd_next;
31464+ struct ttm_bo_kmap_obj cmd_kmap;
31465+ uint32_t *cmd_page;
31466+ unsigned cmds;
31467+ bool is_iomem;
31468+ int ret = 0;
31469+
31470+ if (cmd_size == 0)
31471+ return 0;
31472+
31473+ if (engine == PSB_ENGINE_2D)
31474+ psb_2d_lock(dev_priv);
31475+
31476+ do {
31477+ cmd_next = psb_offset_end(cmd_offset, cmd_end);
31478+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
31479+ 1, &cmd_kmap);
31480+
31481+ if (ret) {
31482+ if (engine == PSB_ENGINE_2D)
31483+ psb_2d_unlock(dev_priv);
31484+ return ret;
31485+ }
31486+ cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem);
31487+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
31488+ cmds = (cmd_next - cmd_offset) >> 2;
31489+
31490+ switch (engine) {
31491+ case PSB_ENGINE_2D:
31492+ ret =
31493+ psb_2d_submit(dev_priv,
31494+ cmd_page + cmd_page_offset,
31495+ cmds);
31496+ break;
31497+ case PSB_ENGINE_RASTERIZER:
31498+ case PSB_ENGINE_TA:
31499+ case PSB_ENGINE_HPRAST:
31500+ PSB_DEBUG_GENERAL("Reg copy.\n");
31501+ ret = psb_memcpy_check(copy_buffer,
31502+ cmd_page + cmd_page_offset,
31503+ cmds * sizeof(uint32_t));
31504+ copy_buffer += cmds;
31505+ break;
31506+ default:
31507+ ret = -EINVAL;
31508+ }
31509+ ttm_bo_kunmap(&cmd_kmap);
31510+ if (ret)
31511+ break;
31512+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
31513+
31514+ if (engine == PSB_ENGINE_2D)
31515+ psb_2d_unlock(dev_priv);
31516+
31517+ return ret;
31518+}
31519+
31520+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
31521+{
31522+ if (dst_cache->dst_page) {
31523+ ttm_bo_kunmap(&dst_cache->dst_kmap);
31524+ dst_cache->dst_page = NULL;
31525+ }
31526+ dst_cache->dst_buf = NULL;
31527+ dst_cache->dst = ~0;
31528+}
31529+
31530+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
31531+ struct psb_validate_buffer *buffers,
31532+ unsigned int dst,
31533+ unsigned long dst_offset)
31534+{
31535+ int ret;
31536+
31537+ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
31538+
31539+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
31540+ psb_clear_dstbuf_cache(dst_cache);
31541+ dst_cache->dst = dst;
31542+ dst_cache->dst_buf = buffers[dst].base.bo;
31543+ }
31544+
31545+ if (unlikely
31546+ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
31547+ DRM_ERROR("Relocation destination out of bounds.\n");
31548+ return -EINVAL;
31549+ }
31550+
31551+ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
31552+ NULL == dst_cache->dst_page) {
31553+ if (NULL != dst_cache->dst_page) {
31554+ ttm_bo_kunmap(&dst_cache->dst_kmap);
31555+ dst_cache->dst_page = NULL;
31556+ }
31557+
31558+ ret =
31559+ ttm_bo_kmap(dst_cache->dst_buf,
31560+ dst_offset >> PAGE_SHIFT, 1,
31561+ &dst_cache->dst_kmap);
31562+ if (ret) {
31563+ DRM_ERROR("Could not map destination buffer for "
31564+ "relocation.\n");
31565+ return ret;
31566+ }
31567+
31568+ dst_cache->dst_page =
31569+ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
31570+ &dst_cache->dst_is_iomem);
31571+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
31572+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
31573+ }
31574+ return 0;
31575+}
31576+
31577+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
31578+ uint32_t fence_class,
31579+ const struct drm_psb_reloc *reloc,
31580+ struct psb_validate_buffer *buffers,
31581+ int num_buffers,
31582+ struct psb_dstbuf_cache *dst_cache,
31583+ int no_wait, int interruptible)
31584+{
31585+ uint32_t val;
31586+ uint32_t background;
31587+ unsigned int index;
31588+ int ret;
31589+ unsigned int shift;
31590+ unsigned int align_shift;
31591+ struct ttm_buffer_object *reloc_bo;
31592+
31593+
31594+ PSB_DEBUG_GENERAL("Reloc type %d\n"
31595+ "\t where 0x%04x\n"
31596+ "\t buffer 0x%04x\n"
31597+ "\t mask 0x%08x\n"
31598+ "\t shift 0x%08x\n"
31599+ "\t pre_add 0x%08x\n"
31600+ "\t background 0x%08x\n"
31601+ "\t dst_buffer 0x%08x\n"
31602+ "\t arg0 0x%08x\n"
31603+ "\t arg1 0x%08x\n",
31604+ reloc->reloc_op,
31605+ reloc->where,
31606+ reloc->buffer,
31607+ reloc->mask,
31608+ reloc->shift,
31609+ reloc->pre_add,
31610+ reloc->background,
31611+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
31612+
31613+ if (unlikely(reloc->buffer >= num_buffers)) {
31614+ DRM_ERROR("Illegal relocation buffer %d.\n",
31615+ reloc->buffer);
31616+ return -EINVAL;
31617+ }
31618+
31619+ if (buffers[reloc->buffer].po_correct)
31620+ return 0;
31621+
31622+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
31623+ DRM_ERROR
31624+ ("Illegal destination buffer for relocation %d.\n",
31625+ reloc->dst_buffer);
31626+ return -EINVAL;
31627+ }
31628+
31629+ ret =
31630+ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
31631+ reloc->where << 2);
31632+ if (ret)
31633+ return ret;
31634+
31635+ reloc_bo = buffers[reloc->buffer].base.bo;
31636+
31637+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
31638+ DRM_ERROR("Illegal relocation offset add.\n");
31639+ return -EINVAL;
31640+ }
31641+
31642+ switch (reloc->reloc_op) {
31643+ case PSB_RELOC_OP_OFFSET:
31644+ val = reloc_bo->offset + reloc->pre_add;
31645+ break;
31646+ case PSB_RELOC_OP_2D_OFFSET:
31647+ val = reloc_bo->offset + reloc->pre_add -
31648+ dev_priv->mmu_2d_offset;
31649+ if (unlikely(val >= PSB_2D_SIZE)) {
31650+ DRM_ERROR("2D relocation out of bounds\n");
31651+ return -EINVAL;
31652+ }
31653+ break;
31654+ case PSB_RELOC_OP_PDS_OFFSET:
31655+ val =
31656+ reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
31657+ if (unlikely
31658+ (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
31659+ DRM_ERROR("PDS relocation out of bounds\n");
31660+ return -EINVAL;
31661+ }
31662+ break;
31663+ default:
31664+ DRM_ERROR("Unimplemented relocation.\n");
31665+ return -EINVAL;
31666+ }
31667+
31668+ shift =
31669+ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
31670+ align_shift =
31671+ (reloc->
31672+ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
31673+
31674+ val = ((val >> align_shift) << shift);
31675+ index = reloc->where - dst_cache->dst_page_offset;
31676+
31677+ background = reloc->background;
31678+ val = (background & ~reloc->mask) | (val & reloc->mask);
31679+ dst_cache->dst_page[index] = val;
31680+
31681+ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
31682+ reloc->dst_buffer, index,
31683+ dst_cache->dst_page[index]);
31684+
31685+ return 0;
31686+}
31687+
31688+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
31689+ unsigned int num_pages)
31690+{
31691+ int ret = 0;
31692+
31693+ spin_lock(&dev_priv->reloc_lock);
31694+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
31695+ dev_priv->rel_mapped_pages += num_pages;
31696+ ret = 1;
31697+ }
31698+ spin_unlock(&dev_priv->reloc_lock);
31699+ return ret;
31700+}
31701+
31702+static int psb_fixup_relocs(struct drm_file *file_priv,
31703+ uint32_t fence_class,
31704+ unsigned int num_relocs,
31705+ unsigned int reloc_offset,
31706+ uint32_t reloc_handle,
31707+ struct psb_context *context,
31708+ int no_wait, int interruptible)
31709+{
31710+ struct drm_device *dev = file_priv->minor->dev;
31711+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
31712+ struct drm_psb_private *dev_priv =
31713+ (struct drm_psb_private *) dev->dev_private;
31714+ struct ttm_buffer_object *reloc_buffer = NULL;
31715+ unsigned int reloc_num_pages;
31716+ unsigned int reloc_first_page;
31717+ unsigned int reloc_last_page;
31718+ struct psb_dstbuf_cache dst_cache;
31719+ struct drm_psb_reloc *reloc;
31720+ struct ttm_bo_kmap_obj reloc_kmap;
31721+ bool reloc_is_iomem;
31722+ int count;
31723+ int ret = 0;
31724+ int registered = 0;
31725+ uint32_t num_buffers = context->used_buffers;
31726+
31727+ if (num_relocs == 0)
31728+ return 0;
31729+
31730+ memset(&dst_cache, 0, sizeof(dst_cache));
31731+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
31732+
31733+ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
31734+ if (!reloc_buffer)
31735+ goto out;
31736+
31737+ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
31738+ DRM_ERROR("Relocation buffer was not on validate list.\n");
31739+ ret = -EINVAL;
31740+ goto out;
31741+ }
31742+
31743+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
31744+ reloc_last_page =
31745+ (reloc_offset +
31746+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
31747+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
31748+ reloc_offset &= ~PAGE_MASK;
31749+
31750+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
31751+ DRM_ERROR("Relocation buffer is too large\n");
31752+ ret = -EINVAL;
31753+ goto out;
31754+ }
31755+
31756+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
31757+ (registered =
31758+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
31759+
31760+ if (ret == -EINTR) {
31761+ ret = -ERESTART;
31762+ goto out;
31763+ }
31764+ if (ret) {
31765+ DRM_ERROR("Error waiting for space to map "
31766+ "relocation buffer.\n");
31767+ goto out;
31768+ }
31769+
31770+ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
31771+ reloc_num_pages, &reloc_kmap);
31772+
31773+ if (ret) {
31774+ DRM_ERROR("Could not map relocation buffer.\n"
31775+ "\tReloc buffer id 0x%08x.\n"
31776+ "\tReloc first page %d.\n"
31777+ "\tReloc num pages %d.\n",
31778+ reloc_handle, reloc_first_page, reloc_num_pages);
31779+ goto out;
31780+ }
31781+
31782+ reloc = (struct drm_psb_reloc *)
31783+ ((unsigned long)
31784+ ttm_kmap_obj_virtual(&reloc_kmap,
31785+ &reloc_is_iomem) + reloc_offset);
31786+
31787+ for (count = 0; count < num_relocs; ++count) {
31788+ ret = psb_apply_reloc(dev_priv, fence_class,
31789+ reloc, context->buffers,
31790+ num_buffers, &dst_cache,
31791+ no_wait, interruptible);
31792+ if (ret)
31793+ goto out1;
31794+ reloc++;
31795+ }
31796+
31797+out1:
31798+ ttm_bo_kunmap(&reloc_kmap);
31799+out:
31800+ if (registered) {
31801+ spin_lock(&dev_priv->reloc_lock);
31802+ dev_priv->rel_mapped_pages -= reloc_num_pages;
31803+ spin_unlock(&dev_priv->reloc_lock);
31804+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
31805+ }
31806+
31807+ psb_clear_dstbuf_cache(&dst_cache);
31808+ if (reloc_buffer)
31809+ ttm_bo_unref(&reloc_buffer);
31810+ return ret;
31811+}
31812+
31813+void psb_fence_or_sync(struct drm_file *file_priv,
31814+ uint32_t engine,
31815+ uint32_t fence_types,
31816+ uint32_t fence_flags,
31817+ struct list_head *list,
31818+ struct psb_ttm_fence_rep *fence_arg,
31819+ struct ttm_fence_object **fence_p)
31820+{
31821+ struct drm_device *dev = file_priv->minor->dev;
31822+ struct drm_psb_private *dev_priv = psb_priv(dev);
31823+ struct ttm_fence_device *fdev = &dev_priv->fdev;
31824+ int ret;
31825+ struct ttm_fence_object *fence;
31826+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
31827+ uint32_t handle;
31828+
31829+ ret = ttm_fence_user_create(fdev, tfile,
31830+ engine, fence_types,
31831+ TTM_FENCE_FLAG_EMIT, &fence, &handle);
31832+ if (ret) {
31833+
31834+ /*
31835+ * Fence creation failed.
31836+ * Fall back to synchronous operation and idle the engine.
31837+ */
31838+
31839+ psb_idle_engine(dev, engine);
31840+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
31841+
31842+ /*
31843+ * Communicate to user-space that
31844+ * fence creation has failed and that
31845+ * the engine is idle.
31846+ */
31847+
31848+ fence_arg->handle = ~0;
31849+ fence_arg->error = ret;
31850+ }
31851+
31852+ ttm_eu_backoff_reservation(list);
31853+ if (fence_p)
31854+ *fence_p = NULL;
31855+ return;
31856+ }
31857+
31858+ ttm_eu_fence_buffer_objects(list, fence);
31859+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
31860+ struct ttm_fence_info info = ttm_fence_get_info(fence);
31861+ fence_arg->handle = handle;
31862+ fence_arg->fence_class = ttm_fence_class(fence);
31863+ fence_arg->fence_type = ttm_fence_types(fence);
31864+ fence_arg->signaled_types = info.signaled_types;
31865+ fence_arg->error = 0;
31866+ } else {
31867+ ret =
31868+ ttm_ref_object_base_unref(tfile, handle,
31869+ ttm_fence_type);
31870+ BUG_ON(ret);
31871+ }
31872+
31873+ if (fence_p)
31874+ *fence_p = fence;
31875+ else if (fence)
31876+ ttm_fence_object_unref(&fence);
31877+}
31878+
31879+
31880+
31881+static int psb_cmdbuf_2d(struct drm_file *priv,
31882+ struct list_head *validate_list,
31883+ uint32_t fence_type,
31884+ struct drm_psb_cmdbuf_arg *arg,
31885+ struct ttm_buffer_object *cmd_buffer,
31886+ struct psb_ttm_fence_rep *fence_arg)
31887+{
31888+ struct drm_device *dev = priv->minor->dev;
31889+ int ret;
31890+
31891+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
31892+ arg->cmdbuf_size, PSB_ENGINE_2D,
31893+ NULL);
31894+ if (ret)
31895+ goto out_unlock;
31896+
31897+ psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type,
31898+ arg->fence_flags, validate_list, fence_arg,
31899+ NULL);
31900+
31901+ mutex_lock(&cmd_buffer->mutex);
31902+ if (cmd_buffer->sync_obj != NULL)
31903+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
31904+ mutex_unlock(&cmd_buffer->mutex);
31905+out_unlock:
31906+ return ret;
31907+}
31908+
31909+#if 0
31910+static int psb_dump_page(struct ttm_buffer_object *bo,
31911+ unsigned int page_offset, unsigned int num)
31912+{
31913+ struct ttm_bo_kmap_obj kmobj;
31914+ int is_iomem;
31915+ uint32_t *p;
31916+ int ret;
31917+ unsigned int i;
31918+
31919+ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
31920+ if (ret)
31921+ return ret;
31922+
31923+ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
31924+ for (i = 0; i < num; ++i)
31925+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
31926+
31927+ ttm_bo_kunmap(&kmobj);
31928+ return 0;
31929+}
31930+#endif
31931+
31932+static void psb_idle_engine(struct drm_device *dev, int engine)
31933+{
31934+ struct drm_psb_private *dev_priv =
31935+ (struct drm_psb_private *) dev->dev_private;
31936+ uint32_t dummy;
31937+ unsigned long dummy2;
31938+
31939+ switch (engine) {
31940+ case PSB_ENGINE_2D:
31941+
31942+ /*
31943+ * Make sure we flush 2D properly using a dummy
31944+ * fence sequence emit.
31945+ */
31946+
31947+ (void) psb_fence_emit_sequence(&dev_priv->fdev,
31948+ PSB_ENGINE_2D, 0,
31949+ &dummy, &dummy2);
31950+ psb_2d_lock(dev_priv);
31951+ (void) psb_idle_2d(dev);
31952+ psb_2d_unlock(dev_priv);
31953+ break;
31954+ case PSB_ENGINE_TA:
31955+ case PSB_ENGINE_RASTERIZER:
31956+ case PSB_ENGINE_HPRAST:
31957+ (void) psb_idle_3d(dev);
31958+ break;
31959+ default:
31960+
31961+ /*
31962+ * FIXME: Insert video engine idle command here.
31963+ */
31964+
31965+ break;
31966+ }
31967+}
31968+
31969+static int psb_handle_copyback(struct drm_device *dev,
31970+ struct psb_context *context,
31971+ int ret)
31972+{
31973+ int err = ret;
31974+ struct ttm_validate_buffer *entry;
31975+ struct psb_validate_arg arg;
31976+ struct list_head *list = &context->validate_list;
31977+
31978+ if (ret) {
31979+ ttm_eu_backoff_reservation(list);
31980+ ttm_eu_backoff_reservation(&context->kern_validate_list);
31981+ }
31982+
31983+
31984+ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
31985+ list_for_each_entry(entry, list, head) {
31986+ struct psb_validate_buffer *vbuf =
31987+ container_of(entry, struct psb_validate_buffer,
31988+ base);
31989+ arg.handled = 1;
31990+ arg.ret = vbuf->ret;
31991+ if (!arg.ret) {
31992+ struct ttm_buffer_object *bo = entry->bo;
31993+ mutex_lock(&bo->mutex);
31994+ arg.d.rep.gpu_offset = bo->offset;
31995+ arg.d.rep.placement = bo->mem.flags;
31996+ arg.d.rep.fence_type_mask =
31997+ (uint32_t) (unsigned long)
31998+ entry->new_sync_obj_arg;
31999+ mutex_unlock(&bo->mutex);
32000+ }
32001+
32002+ if (__copy_to_user(vbuf->user_val_arg,
32003+ &arg, sizeof(arg)))
32004+ err = -EFAULT;
32005+
32006+ if (arg.ret)
32007+ break;
32008+ }
32009+ }
32010+
32011+ return err;
32012+}
32013+
32014+
32015+
32016+static int psb_feedback_buf(struct ttm_object_file *tfile,
32017+ struct psb_context *context,
32018+ uint32_t feedback_ops,
32019+ uint32_t handle,
32020+ uint32_t offset,
32021+ uint32_t feedback_breakpoints,
32022+ uint32_t feedback_size,
32023+ struct psb_feedback_info *feedback)
32024+{
32025+ struct ttm_buffer_object *bo;
32026+ struct page *page;
32027+ uint32_t page_no;
32028+ uint32_t page_offset;
32029+ int ret;
32030+
32031+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
32032+ DRM_ERROR("Illegal feedback op.\n");
32033+ return -EINVAL;
32034+ }
32035+
32036+ if (feedback_breakpoints != 0) {
32037+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
32038+ return -EINVAL;
32039+ }
32040+
32041+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
32042+ DRM_ERROR("Feedback buffer size too small.\n");
32043+ return -EINVAL;
32044+ }
32045+
32046+ page_offset = offset & ~PAGE_MASK;
32047+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
32048+ < page_offset) {
32049+ DRM_ERROR("Illegal feedback buffer alignment.\n");
32050+ return -EINVAL;
32051+ }
32052+
32053+ bo = ttm_buffer_object_lookup(tfile, handle);
32054+ if (unlikely(bo == NULL)) {
32055+ DRM_ERROR("Failed looking up feedback buffer.\n");
32056+ return -EINVAL;
32057+ }
32058+
32059+
32060+ ret = psb_validate_kernel_buffer(context, bo,
32061+ PSB_ENGINE_TA,
32062+ TTM_PL_FLAG_SYSTEM |
32063+ TTM_PL_FLAG_CACHED |
32064+ PSB_GPU_ACCESS_WRITE |
32065+ PSB_BO_FLAG_FEEDBACK,
32066+ TTM_PL_MASK_MEM &
32067+ ~(TTM_PL_FLAG_SYSTEM |
32068+ TTM_PL_FLAG_CACHED));
32069+ if (unlikely(ret != 0))
32070+ goto out_unref;
32071+
32072+ page_no = offset >> PAGE_SHIFT;
32073+ if (unlikely(page_no >= bo->num_pages)) {
32074+ ret = -EINVAL;
32075+ DRM_ERROR("Illegal feedback buffer offset.\n");
32076+ goto out_unref;
32077+ }
32078+
32079+ if (unlikely(bo->ttm == NULL)) {
32080+ ret = -EINVAL;
32081+ DRM_ERROR("Vistest buffer without TTM.\n");
32082+ goto out_unref;
32083+ }
32084+
32085+ page = ttm_tt_get_page(bo->ttm, page_no);
32086+ if (unlikely(page == NULL)) {
32087+ ret = -ENOMEM;
32088+ goto out_unref;
32089+ }
32090+
32091+ feedback->page = page;
32092+ feedback->offset = page_offset;
32093+
32094+ /*
32095+ * Note: bo referece transferred.
32096+ */
32097+
32098+ feedback->bo = bo;
32099+ return 0;
32100+
32101+out_unref:
32102+ ttm_bo_unref(&bo);
32103+ return ret;
32104+}
32105+
32106+inline int psb_try_power_down_sgx(struct drm_device *dev)
32107+{
32108+ if(powermgmt_is_gfx_busy()){
32109+ return 0;
32110+ }
32111+
32112+ return powermgmt_suspend_islands(dev->pdev, PSB_GRAPHICS_ISLAND, false);
32113+}
32114+
32115+void psb_init_ospm(struct drm_psb_private *dev_priv)
32116+{
32117+ static int init;
32118+ if (!init) {
32119+ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
32120+ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
32121+ PSB_DEBUG_PM("apm_reg:%x\n", dev_priv->apm_reg);
32122+#ifdef OSPM_STAT
32123+ dev_priv->graphics_state = PSB_PWR_STATE_D0;
32124+ dev_priv->gfx_last_mode_change = jiffies;
32125+ dev_priv->gfx_d0_time = 0;
32126+ dev_priv->gfx_d0i3_time = 0;
32127+ dev_priv->gfx_d3_time = 0;
32128+#endif
32129+ init = 1;
32130+ }
32131+}
32132+
32133+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
32134+ struct drm_file *file_priv)
32135+{
32136+ struct drm_psb_cmdbuf_arg *arg = data;
32137+ int ret = 0;
32138+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
32139+ struct ttm_buffer_object *cmd_buffer = NULL;
32140+ struct ttm_buffer_object *ta_buffer = NULL;
32141+ struct ttm_buffer_object *oom_buffer = NULL;
32142+ struct psb_ttm_fence_rep fence_arg;
32143+ struct drm_psb_scene user_scene;
32144+ struct psb_scene_pool *pool = NULL;
32145+ struct psb_scene *scene = NULL;
32146+ struct drm_psb_private *dev_priv =
32147+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
32148+ int engine;
32149+ struct psb_feedback_info feedback;
32150+ int po_correct;
32151+ struct psb_context *context;
32152+ unsigned num_buffers;
32153+
32154+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
32155+
32156+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
32157+ if (unlikely(ret != 0))
32158+ return ret;
32159+
32160+ if (arg->engine == PSB_ENGINE_VIDEO)
32161+ powermgmt_using_hw_begin(dev->pdev, PSB_VIDEO_DEC_ISLAND, true);
32162+
32163+ if (arg->engine == LNC_ENGINE_ENCODE)
32164+ powermgmt_using_hw_begin(dev->pdev, PSB_VIDEO_ENC_ISLAND, true);
32165+
32166+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA) ||
32167+ (arg->engine == PSB_ENGINE_RASTERIZER))
32168+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
32169+
32170+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
32171+ if (unlikely(ret != 0))
32172+ goto out_err0;
32173+
32174+
32175+ context = &dev_priv->context;
32176+ context->used_buffers = 0;
32177+ context->fence_types = 0;
32178+ BUG_ON(!list_empty(&context->validate_list));
32179+ BUG_ON(!list_empty(&context->kern_validate_list));
32180+
32181+ if (unlikely(context->buffers == NULL)) {
32182+ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
32183+ sizeof(*context->buffers));
32184+ if (unlikely(context->buffers == NULL)) {
32185+ ret = -ENOMEM;
32186+ goto out_err1;
32187+ }
32188+ }
32189+
32190+ ret = psb_reference_buffers(file_priv,
32191+ arg->buffer_list,
32192+ context);
32193+
32194+ if (unlikely(ret != 0))
32195+ goto out_err1;
32196+
32197+ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
32198+
32199+ ret = ttm_eu_reserve_buffers(&context->validate_list,
32200+ context->val_seq);
32201+ if (unlikely(ret != 0)) {
32202+ goto out_err2;
32203+ }
32204+
32205+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
32206+ PSB_ENGINE_TA : arg->engine;
32207+
32208+ ret = psb_validate_buffer_list(file_priv, engine,
32209+ context, &po_correct);
32210+ if (unlikely(ret != 0))
32211+ goto out_err3;
32212+
32213+ if (!po_correct) {
32214+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
32215+ arg->reloc_offset,
32216+ arg->reloc_handle, context, 0, 1);
32217+ if (unlikely(ret != 0))
32218+ goto out_err3;
32219+
32220+ }
32221+
32222+ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
32223+ if (unlikely(cmd_buffer == NULL)) {
32224+ ret = -EINVAL;
32225+ goto out_err4;
32226+ }
32227+
32228+ switch (arg->engine) {
32229+ case PSB_ENGINE_2D:
32230+ ret = psb_cmdbuf_2d(file_priv, &context->validate_list,
32231+ context->fence_types, arg, cmd_buffer,
32232+ &fence_arg);
32233+ if (unlikely(ret != 0))
32234+ goto out_err4;
32235+ break;
32236+ case PSB_ENGINE_VIDEO:
32237+ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
32238+ context->fence_types, arg,
32239+ cmd_buffer, &fence_arg);
32240+
32241+ if (unlikely(ret != 0))
32242+ goto out_err4;
32243+ break;
32244+ case LNC_ENGINE_ENCODE:
32245+ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
32246+ context->fence_types, arg,
32247+ cmd_buffer, &fence_arg);
32248+ if (unlikely(ret != 0))
32249+ goto out_err4;
32250+ break;
32251+ case PSB_ENGINE_RASTERIZER:
32252+ ret = psb_cmdbuf_raster(file_priv, context,
32253+ arg, cmd_buffer, &fence_arg);
32254+ if (unlikely(ret != 0))
32255+ goto out_err4;
32256+ break;
32257+ case PSB_ENGINE_TA:
32258+ if (arg->ta_handle == arg->cmdbuf_handle) {
32259+ ta_buffer = ttm_bo_reference(cmd_buffer);
32260+ } else {
32261+ ta_buffer =
32262+ ttm_buffer_object_lookup(tfile,
32263+ arg->ta_handle);
32264+ if (!ta_buffer) {
32265+ ret = -EINVAL;
32266+ goto out_err4;
32267+ }
32268+ }
32269+ if (arg->oom_size != 0) {
32270+ if (arg->oom_handle == arg->cmdbuf_handle) {
32271+ oom_buffer = ttm_bo_reference(cmd_buffer);
32272+ } else {
32273+ oom_buffer =
32274+ ttm_buffer_object_lookup(tfile,
32275+ arg->
32276+ oom_handle);
32277+ if (!oom_buffer) {
32278+ ret = -EINVAL;
32279+ goto out_err4;
32280+ }
32281+ }
32282+ }
32283+
32284+ ret = copy_from_user(&user_scene, (void __user *)
32285+ ((unsigned long) arg->scene_arg),
32286+ sizeof(user_scene));
32287+ if (ret)
32288+ goto out_err4;
32289+
32290+ if (!user_scene.handle_valid) {
32291+ pool = psb_scene_pool_alloc(file_priv, 0,
32292+ user_scene.num_buffers,
32293+ user_scene.w,
32294+ user_scene.h);
32295+ if (!pool) {
32296+ ret = -ENOMEM;
32297+ goto out_err0;
32298+ }
32299+
32300+ user_scene.handle = psb_scene_pool_handle(pool);
32301+ user_scene.handle_valid = 1;
32302+ ret = copy_to_user((void __user *)
32303+ ((unsigned long) arg->
32304+ scene_arg), &user_scene,
32305+ sizeof(user_scene));
32306+
32307+ if (ret)
32308+ goto out_err4;
32309+ } else {
32310+ pool =
32311+ psb_scene_pool_lookup(file_priv,
32312+ user_scene.handle, 1);
32313+ if (!pool) {
32314+ ret = -EINVAL;
32315+ goto out_err4;
32316+ }
32317+ }
32318+
32319+ ret = psb_validate_scene_pool(context, pool,
32320+ user_scene.w,
32321+ user_scene.h,
32322+ arg->ta_flags &
32323+ PSB_TA_FLAG_LASTPASS, &scene);
32324+ if (ret)
32325+ goto out_err4;
32326+
32327+ memset(&feedback, 0, sizeof(feedback));
32328+ if (arg->feedback_ops) {
32329+ ret = psb_feedback_buf(tfile,
32330+ context,
32331+ arg->feedback_ops,
32332+ arg->feedback_handle,
32333+ arg->feedback_offset,
32334+ arg->feedback_breakpoints,
32335+ arg->feedback_size,
32336+ &feedback);
32337+ if (ret)
32338+ goto out_err4;
32339+ }
32340+ ret = psb_cmdbuf_ta(file_priv, context,
32341+ arg, cmd_buffer, ta_buffer,
32342+ oom_buffer, scene, &feedback,
32343+ &fence_arg);
32344+ if (ret)
32345+ goto out_err4;
32346+ break;
32347+ default:
32348+ DRM_ERROR
32349+ ("Unimplemented command submission mechanism (%x).\n",
32350+ arg->engine);
32351+ ret = -EINVAL;
32352+ goto out_err4;
32353+ }
32354+
32355+ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
32356+ ret = copy_to_user((void __user *)
32357+ ((unsigned long) arg->fence_arg),
32358+ &fence_arg, sizeof(fence_arg));
32359+ }
32360+
32361+out_err4:
32362+ if (scene)
32363+ psb_scene_unref(&scene);
32364+ if (pool)
32365+ psb_scene_pool_unref(&pool);
32366+ if (cmd_buffer)
32367+ ttm_bo_unref(&cmd_buffer);
32368+ if (ta_buffer)
32369+ ttm_bo_unref(&ta_buffer);
32370+ if (oom_buffer)
32371+ ttm_bo_unref(&oom_buffer);
32372+out_err3:
32373+ ret = psb_handle_copyback(dev, context, ret);
32374+out_err2:
32375+ psb_unreference_buffers(context);
32376+out_err1:
32377+ mutex_unlock(&dev_priv->cmdbuf_mutex);
32378+out_err0:
32379+ ttm_read_unlock(&dev_priv->ttm_lock);
32380+
32381+ if (arg->engine == PSB_ENGINE_VIDEO)
32382+ powermgmt_using_hw_end(PSB_VIDEO_DEC_ISLAND);
32383+
32384+ if (arg->engine == LNC_ENGINE_ENCODE)
32385+ powermgmt_using_hw_end(PSB_VIDEO_ENC_ISLAND);
32386+
32387+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
32388+ || (arg->engine == PSB_ENGINE_RASTERIZER))
32389+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
32390+ return ret;
32391+}
32392+
32393+static int psb_do_init_pageflip(struct drm_device * dev)
32394+{
32395+ struct drm_psb_private *dev_priv = dev->dev_private;
32396+ u32 pipe_status[2];
32397+ int pipe, dspbase;
32398+
32399+ if (!powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false))
32400+ return -1;
32401+
32402+ dev_priv->dri_page_flipping = 1;
32403+ dev_priv->current_page = 0;
32404+ for (pipe = 0; pipe < 2; pipe++){
32405+ pipe_status[pipe] = REG_READ(pipe == 0 ? PIPEACONF : PIPEBCONF);
32406+ if (pipe_status[pipe] & PIPEACONF_ENABLE){
32407+ dev_priv->pipe_active[pipe] = 1;
32408+ dev_priv->saved_stride[pipe] = REG_READ((pipe == 0) ? DSPASTRIDE : DSPBSTRIDE);
32409+ dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
32410+ if (IS_MRST(dev) && (pipe == 0))
32411+ dspbase = MRST_DSPABASE;
32412+ if (IS_MRST(dev)) {
32413+ dev_priv->saved_start[pipe] = REG_READ(pipe == 0 ? DSPASURF : DSPBSURF);
32414+ dev_priv->saved_offset[pipe] = REG_READ(dspbase);
32415+ } else {
32416+ dev_priv->saved_start[pipe] = REG_READ(pipe == 0 ? DSPABASE : DSPBBASE);
32417+ dev_priv->saved_offset[pipe] = 0;
32418+ }
32419+ }
32420+ else
32421+ dev_priv->pipe_active[pipe] = 0;
32422+ }
32423+
32424+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
32425+
32426+ return 0;
32427+}
32428+
32429+int psb_page_flip(struct drm_device *dev, void *data,
32430+ struct drm_file *file_priv)
32431+{
32432+ struct drm_psb_pageflip_arg *arg = data;
32433+ int pipe;
32434+
32435+ struct drm_psb_private *dev_priv =
32436+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
32437+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
32438+ struct psb_task *task = NULL;
32439+
32440+ if (!dev_priv->dri_page_flipping)
32441+ if (psb_do_init_pageflip(dev))
32442+ return 0;
32443+
32444+ task = kzalloc(sizeof(*task), GFP_KERNEL);
32445+ if (!task)
32446+ return -ENOMEM;
32447+ INIT_LIST_HEAD(&task->head);
32448+ INIT_LIST_HEAD(&task->buf.head);
32449+ task->task_type = psb_flip_task;
32450+
32451+ spin_lock_irq(&scheduler->lock);
32452+ list_add_tail(&task->head, &scheduler->ta_queue);
32453+ /**
32454+ * From this point we may no longer dereference task,
32455+ * as the object it points to may be freed by another thread.
32456+ */
32457+
32458+ task = NULL;
32459+ spin_unlock_irq(&scheduler->lock);
32460+ for (pipe=0; pipe<2; pipe++) {
32461+ if (dev_priv->pipe_active[pipe] == 1) {
32462+ dev_priv->flip_start[pipe] = arg->flip_offset;
32463+ dev_priv->flip_offset[pipe] = dev_priv->saved_offset[pipe];
32464+ dev_priv->flip_stride[pipe] = dev_priv->saved_stride[pipe];
32465+ }
32466+ }
32467+ return 0;
32468+}
32469+
32470+int psb_flip_set_base(struct drm_psb_private *dev_priv, int pipe)
32471+{
32472+ struct drm_device *dev = dev_priv->dev;
32473+
32474+ unsigned long Start, Offset, Stride;
32475+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
32476+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
32477+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
32478+
32479+ if (IS_MRST(dev) && (pipe == 0))
32480+ dspbase = MRST_DSPABASE;
32481+
32482+ Start = dev_priv->flip_start[pipe];
32483+ Offset = dev_priv->flip_offset[pipe];
32484+ Stride = dev_priv->flip_stride[pipe];
32485+
32486+ REG_WRITE(dspstride, Stride);
32487+
32488+ DRM_DEBUG("Writing base: %08lX Offset: %08lX Stride: %08lXn", Start, Offset, Stride);
32489+ if (IS_MRST(dev)) {
32490+ REG_WRITE(dspbase, Offset);
32491+ REG_READ(dspbase);
32492+ REG_WRITE(dspsurf, Start);
32493+ REG_READ(dspsurf);
32494+ } else {
32495+ REG_WRITE(dspbase, Start + Offset);
32496+ REG_READ(dspbase);
32497+ }
32498+
32499+ if (dev_priv->dri_page_flipping == 1)
32500+ dev_priv->current_page = 1 - dev_priv->current_page;
32501+
32502+ return 0;
32503+}
32504+
32505diff --git a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h
32506new file mode 100644
32507index 0000000..9321b98
32508--- /dev/null
32509+++ b/drivers/gpu/drm/psb/psb_sgx.h
32510@@ -0,0 +1,41 @@
32511+/*
32512+ * Copyright (c) 2008, Intel Corporation
32513+ *
32514+ * Permission is hereby granted, free of charge, to any person obtaining a
32515+ * copy of this software and associated documentation files (the "Software"),
32516+ * to deal in the Software without restriction, including without limitation
32517+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
32518+ * and/or sell copies of the Software, and to permit persons to whom the
32519+ * Software is furnished to do so, subject to the following conditions:
32520+ *
32521+ * The above copyright notice and this permission notice (including the next
32522+ * paragraph) shall be included in all copies or substantial portions of the
32523+ * Software.
32524+ *
32525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32526+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32527+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
32528+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32529+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
32530+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32531+ * SOFTWARE.
32532+ *
32533+ * Authors:
32534+ * Eric Anholt <eric@anholt.net>
32535+ *
32536+ **/
32537+#ifndef _PSB_SGX_H_
32538+#define _PSB_SGX_H_
32539+
32540+extern int psb_submit_video_cmdbuf(struct drm_device *dev,
32541+ struct ttm_buffer_object *cmd_buffer,
32542+ unsigned long cmd_offset,
32543+ unsigned long cmd_size,
32544+ struct ttm_fence_object *fence);
32545+
32546+extern int psb_2d_wait_available(struct drm_psb_private *dev_priv,
32547+ unsigned size);
32548+extern int drm_idle_check_interval;
32549+extern int drm_psb_ospm;
32550+
32551+#endif
32552diff --git a/drivers/gpu/drm/psb/psb_socket.c b/drivers/gpu/drm/psb/psb_socket.c
32553new file mode 100644
32554index 0000000..4814e55
32555--- /dev/null
32556+++ b/drivers/gpu/drm/psb/psb_socket.c
32557@@ -0,0 +1,340 @@
32558+/*
32559+ * kernel userspace event delivery
32560+ *
32561+ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
32562+ * Copyright (C) 2004 Novell, Inc. All rights reserved.
32563+ * Copyright (C) 2004 IBM, Inc. All rights reserved.
32564+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
32565+ *
32566+ * Licensed under the GNU GPL v2.
32567+ *
32568+ * Authors:
32569+ * Robert Love <rml@novell.com>
32570+ * Kay Sievers <kay.sievers@vrfy.org>
32571+ * Arjan van de Ven <arjanv@redhat.com>
32572+ * Greg Kroah-Hartman <greg@kroah.com>
32573+ * James C. Gualario <james.c.gualario@intel.com>
32574+ *
32575+ */
32576+
32577+#include <linux/spinlock.h>
32578+#include <linux/string.h>
32579+#include <linux/kobject.h>
32580+#include <linux/module.h>
32581+#include <linux/socket.h>
32582+#include <linux/skbuff.h>
32583+#include <linux/netlink.h>
32584+#include <net/sock.h>
32585+
32586+#define NETLINK_PSB_KOBJECT_UEVENT 31
32587+
32588+u64 psb_uevent_seqnum;
32589+char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
32590+static DEFINE_SPINLOCK(sequence_lock);
32591+#if defined(CONFIG_NET)
32592+static struct sock *uevent_sock;
32593+#endif
32594+
32595+/* the strings here must match the enum in include/linux/kobject.h */
32596+static const char *psb_kobject_actions[] = {
32597+ [KOBJ_ADD] = "add",
32598+ [KOBJ_REMOVE] = "remove",
32599+ [KOBJ_CHANGE] = "change",
32600+ [KOBJ_MOVE] = "move",
32601+ [KOBJ_ONLINE] = "online",
32602+ [KOBJ_OFFLINE] = "offline",
32603+};
32604+
32605+/**
32606+ * kobject_action_type - translate action string to numeric type
32607+ *
32608+ * @buf: buffer containing the action string, newline is ignored
32609+ * @len: length of buffer
32610+ * @type: pointer to the location to store the action type
32611+ *
32612+ * Returns 0 if the action string was recognized.
32613+ */
32614+int psb_kobject_action_type(const char *buf, size_t count,
32615+ enum kobject_action *type)
32616+{
32617+ enum kobject_action action;
32618+ int ret = -EINVAL;
32619+
32620+ if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
32621+ count--;
32622+
32623+ if (!count)
32624+ goto out;
32625+
32626+ for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) {
32627+ if (strncmp(psb_kobject_actions[action], buf, count) != 0)
32628+ continue;
32629+ if (psb_kobject_actions[action][count] != '\0')
32630+ continue;
32631+ *type = action;
32632+ ret = 0;
32633+ break;
32634+ }
32635+out:
32636+ return ret;
32637+}
32638+
32639+/**
32640+ * psb_kobject_uevent_env - send an uevent with environmental data
32641+ *
32642+ * @action: action that is happening
32643+ * @kobj: struct kobject that the action is happening to
32644+ * @envp_ext: pointer to environmental data
32645+ *
32646+ * Returns 0 if kobject_uevent() is completed with success or the
32647+ * corresponding error when it fails.
32648+ */
32649+int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
32650+ char *envp_ext[])
32651+{
32652+ struct kobj_uevent_env *env;
32653+ const char *action_string = psb_kobject_actions[action];
32654+ const char *devpath = NULL;
32655+ const char *subsystem;
32656+ struct kobject *top_kobj;
32657+ struct kset *kset;
32658+ struct kset_uevent_ops *uevent_ops;
32659+ u64 seq;
32660+ int i = 0;
32661+ int retval = 0;
32662+
32663+ pr_debug("kobject: '%s' (%p): %s\n",
32664+ kobject_name(kobj), kobj, __func__);
32665+
32666+ /* search the kset we belong to */
32667+ top_kobj = kobj;
32668+ while (!top_kobj->kset && top_kobj->parent)
32669+ top_kobj = top_kobj->parent;
32670+
32671+ if (!top_kobj->kset) {
32672+ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
32673+ "without kset!\n", kobject_name(kobj), kobj,
32674+ __func__);
32675+ return -EINVAL;
32676+ }
32677+
32678+ kset = top_kobj->kset;
32679+ uevent_ops = kset->uevent_ops;
32680+
32681+ /* skip the event, if uevent_suppress is set*/
32682+ if (kobj->uevent_suppress) {
32683+ pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
32684+ "caused the event to drop!\n",
32685+ kobject_name(kobj), kobj, __func__);
32686+ return 0;
32687+ }
32688+ /* skip the event, if the filter returns zero. */
32689+ if (uevent_ops && uevent_ops->filter)
32690+ if (!uevent_ops->filter(kset, kobj)) {
32691+ pr_debug("kobject: '%s' (%p): %s: filter function "
32692+ "caused the event to drop!\n",
32693+ kobject_name(kobj), kobj, __func__);
32694+ return 0;
32695+ }
32696+
32697+ /* originating subsystem */
32698+ if (uevent_ops && uevent_ops->name)
32699+ subsystem = uevent_ops->name(kset, kobj);
32700+ else
32701+ subsystem = kobject_name(&kset->kobj);
32702+ if (!subsystem) {
32703+ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
32704+ "event to drop!\n", kobject_name(kobj), kobj,
32705+ __func__);
32706+ return 0;
32707+ }
32708+
32709+ /* environment buffer */
32710+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
32711+ if (!env)
32712+ return -ENOMEM;
32713+
32714+ /* complete object path */
32715+ devpath = kobject_get_path(kobj, GFP_KERNEL);
32716+ if (!devpath) {
32717+ retval = -ENOENT;
32718+ goto exit;
32719+ }
32720+
32721+ /* default keys */
32722+ retval = add_uevent_var(env, "ACTION=%s", action_string);
32723+ if (retval)
32724+ goto exit;
32725+ retval = add_uevent_var(env, "DEVPATH=%s", devpath);
32726+ if (retval)
32727+ goto exit;
32728+ retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
32729+ if (retval)
32730+ goto exit;
32731+
32732+ /* keys passed in from the caller */
32733+ if (envp_ext) {
32734+ for (i = 0; envp_ext[i]; i++) {
32735+ retval = add_uevent_var(env, "%s", envp_ext[i]);
32736+ if (retval)
32737+ goto exit;
32738+ }
32739+ }
32740+
32741+ /* let the kset specific function add its stuff */
32742+ if (uevent_ops && uevent_ops->uevent) {
32743+ retval = uevent_ops->uevent(kset, kobj, env);
32744+ if (retval) {
32745+ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
32746+ "%d\n", kobject_name(kobj), kobj,
32747+ __func__, retval);
32748+ goto exit;
32749+ }
32750+ }
32751+
32752+ /*
32753+ * Mark "add" and "remove" events in the object to ensure proper
32754+ * events to userspace during automatic cleanup. If the object did
32755+ * send an "add" event, "remove" will automatically generated by
32756+ * the core, if not already done by the caller.
32757+ */
32758+ if (action == KOBJ_ADD)
32759+ kobj->state_add_uevent_sent = 1;
32760+ else if (action == KOBJ_REMOVE)
32761+ kobj->state_remove_uevent_sent = 1;
32762+
32763+ /* we will send an event, so request a new sequence number */
32764+ spin_lock(&sequence_lock);
32765+ seq = ++psb_uevent_seqnum;
32766+ spin_unlock(&sequence_lock);
32767+ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
32768+ if (retval)
32769+ goto exit;
32770+
32771+#if defined(CONFIG_NET)
32772+ /* send netlink message */
32773+ if (uevent_sock) {
32774+ struct sk_buff *skb;
32775+ size_t len;
32776+
32777+ /* allocate message with the maximum possible size */
32778+ len = strlen(action_string) + strlen(devpath) + 2;
32779+ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
32780+ if (skb) {
32781+ char *scratch;
32782+
32783+ /* add header */
32784+ scratch = skb_put(skb, len);
32785+ sprintf(scratch, "%s@%s", action_string, devpath);
32786+
32787+ /* copy keys to our continuous event payload buffer */
32788+ for (i = 0; i < env->envp_idx; i++) {
32789+ len = strlen(env->envp[i]) + 1;
32790+ scratch = skb_put(skb, len);
32791+ strcpy(scratch, env->envp[i]);
32792+ }
32793+
32794+ NETLINK_CB(skb).dst_group = 1;
32795+ retval = netlink_broadcast(uevent_sock, skb, 0, 1,
32796+ GFP_KERNEL);
32797+ /* ENOBUFS should be handled in userspace */
32798+ if (retval == -ENOBUFS)
32799+ retval = 0;
32800+ } else
32801+ retval = -ENOMEM;
32802+ }
32803+#endif
32804+
32805+ /* call psb_uevent_helper, usually only enabled during early boot */
32806+ if (psb_uevent_helper[0]) {
32807+ char *argv[3];
32808+
32809+ argv[0] = psb_uevent_helper;
32810+ argv[1] = (char *)subsystem;
32811+ argv[2] = NULL;
32812+ retval = add_uevent_var(env, "HOME=/");
32813+ if (retval)
32814+ goto exit;
32815+ retval = add_uevent_var(env,
32816+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
32817+ if (retval)
32818+ goto exit;
32819+
32820+ retval = call_usermodehelper(argv[0], argv,
32821+ env->envp, UMH_WAIT_EXEC);
32822+ }
32823+
32824+exit:
32825+ kfree(devpath);
32826+ kfree(env);
32827+ return retval;
32828+}
32829+EXPORT_SYMBOL_GPL(psb_kobject_uevent_env);
32830+
32831+/**
32832+ * psb_kobject_uevent - notify userspace by ending an uevent
32833+ *
32834+ * @action: action that is happening
32835+ * @kobj: struct kobject that the action is happening to
32836+ *
32837+ * Returns 0 if psb_kobject_uevent() is completed with success or the
32838+ * corresponding error when it fails.
32839+ */
32840+int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action)
32841+{
32842+ return psb_kobject_uevent_env(kobj, action, NULL);
32843+}
32844+EXPORT_SYMBOL_GPL(psb_kobject_uevent);
32845+
32846+/**
32847+ * psb_add_uevent_var - add key value string to the environment buffer
32848+ * @env: environment buffer structure
32849+ * @format: printf format for the key=value pair
32850+ *
32851+ * Returns 0 if environment variable was added successfully or -ENOMEM
32852+ * if no space was available.
32853+ */
32854+int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
32855+{
32856+ va_list args;
32857+ int len;
32858+
32859+ if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
32860+ WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n");
32861+ return -ENOMEM;
32862+ }
32863+
32864+ va_start(args, format);
32865+ len = vsnprintf(&env->buf[env->buflen],
32866+ sizeof(env->buf) - env->buflen,
32867+ format, args);
32868+ va_end(args);
32869+
32870+ if (len >= (sizeof(env->buf) - env->buflen)) {
32871+ WARN(1,
32872+ KERN_ERR "psb_add_uevent_var: buffer size too small\n");
32873+ return -ENOMEM;
32874+ }
32875+
32876+ env->envp[env->envp_idx++] = &env->buf[env->buflen];
32877+ env->buflen += len + 1;
32878+ return 0;
32879+}
32880+EXPORT_SYMBOL_GPL(psb_add_uevent_var);
32881+
32882+#if defined(CONFIG_NET)
32883+static int __init psb_kobject_uevent_init(void)
32884+{
32885+ uevent_sock = netlink_kernel_create(&init_net,
32886+ NETLINK_PSB_KOBJECT_UEVENT,
32887+ 1, NULL, NULL, THIS_MODULE);
32888+ if (!uevent_sock) {
32889+ printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n");
32890+ return -ENODEV;
32891+ }
32892+ netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV);
32893+ return 0;
32894+}
32895+
32896+postcore_initcall(psb_kobject_uevent_init);
32897+#endif
32898diff --git a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c
32899new file mode 100644
32900index 0000000..cada0d9
32901--- /dev/null
32902+++ b/drivers/gpu/drm/psb/psb_ttm_glue.c
32903@@ -0,0 +1,342 @@
32904+/**************************************************************************
32905+ * Copyright (c) 2008, Intel Corporation.
32906+ * All Rights Reserved.
32907+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
32908+ * All Rights Reserved.
32909+ *
32910+ * This program is free software; you can redistribute it and/or modify it
32911+ * under the terms and conditions of the GNU General Public License,
32912+ * version 2, as published by the Free Software Foundation.
32913+ *
32914+ * This program is distributed in the hope it will be useful, but WITHOUT
32915+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32916+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32917+ * more details.
32918+ *
32919+ * You should have received a copy of the GNU General Public License along with
32920+ * this program; if not, write to the Free Software Foundation, Inc.,
32921+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
32922+ *
32923+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
32924+ * develop this driver.
32925+ *
32926+ **************************************************************************/
32927+/*
32928+ */
32929+
32930+#include <drm/drmP.h>
32931+#include "psb_drv.h"
32932+#include "ttm/ttm_userobj_api.h"
32933+#include "psb_powermgmt.h"
32934+
32935+static struct vm_operations_struct psb_ttm_vm_ops;
32936+
32937+int psb_open(struct inode *inode, struct file *filp)
32938+{
32939+ struct drm_file *file_priv;
32940+ struct drm_psb_private *dev_priv;
32941+ struct psb_fpriv *psb_fp;
32942+ int ret;
32943+
32944+ ret = drm_open(inode, filp);
32945+ if (unlikely(ret))
32946+ return ret;
32947+
32948+ psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
32949+
32950+ if (unlikely(psb_fp == NULL))
32951+ goto out_err0;
32952+
32953+ file_priv = (struct drm_file *) filp->private_data;
32954+ dev_priv = psb_priv(file_priv->minor->dev);
32955+
32956+
32957+ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
32958+ PSB_FILE_OBJECT_HASH_ORDER);
32959+ if (unlikely(psb_fp->tfile == NULL))
32960+ goto out_err1;
32961+
32962+ file_priv->driver_priv = psb_fp;
32963+
32964+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
32965+ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
32966+
32967+ return 0;
32968+
32969+out_err1:
32970+ kfree(psb_fp);
32971+out_err0:
32972+ (void) drm_release(inode, filp);
32973+ return ret;
32974+}
32975+
32976+int psb_release(struct inode *inode, struct file *filp)
32977+{
32978+ struct drm_file *file_priv;
32979+ struct psb_fpriv *psb_fp;
32980+ struct drm_psb_private *dev_priv;
32981+ int ret;
32982+
32983+ file_priv = (struct drm_file *) filp->private_data;
32984+ psb_fp = psb_fpriv(file_priv);
32985+ dev_priv = psb_priv(file_priv->minor->dev);
32986+
32987+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND|PSB_DISPLAY_ISLAND, true);
32988+
32989+ ttm_object_file_release(&psb_fp->tfile);
32990+ kfree(psb_fp);
32991+
32992+ if (dev_priv && dev_priv->xhw_file)
32993+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
32994+
32995+ ret = drm_release(inode, filp);
32996+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND|PSB_DISPLAY_ISLAND);
32997+ if (drm_psb_ospm && IS_MRST(dev_priv->dev))
32998+ schedule_delayed_work(&dev_priv->scheduler.wq, 0);
32999+
33000+ if (IS_MRST(dev_priv->dev))
33001+ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
33002+ if (IS_MRST(dev_priv->dev))
33003+ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
33004+
33005+ return ret;
33006+}
33007+
33008+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
33009+ struct drm_file *file_priv)
33010+{
33011+ int ret;
33012+ struct drm_psb_private *dev_priv = psb_priv(dev);
33013+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33014+ ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
33015+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33016+ if (drm_psb_ospm && IS_MRST(dev))
33017+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33018+ return ret;
33019+}
33020+
33021+int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
33022+ struct drm_file *file_priv)
33023+{
33024+ int ret;
33025+ struct drm_psb_private *dev_priv = psb_priv(dev);
33026+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33027+ ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
33028+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33029+ if (drm_psb_ospm && IS_MRST(dev))
33030+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33031+ return ret;
33032+}
33033+
33034+int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
33035+ struct drm_file *file_priv)
33036+{
33037+ int ret;
33038+ struct drm_psb_private *dev_priv = psb_priv(dev);
33039+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33040+ ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
33041+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33042+ if (drm_psb_ospm && IS_MRST(dev))
33043+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33044+ return ret;
33045+}
33046+
33047+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
33048+ struct drm_file *file_priv)
33049+{
33050+ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
33051+}
33052+
33053+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
33054+ struct drm_file *file_priv)
33055+{
33056+ int ret;
33057+ struct drm_psb_private *dev_priv = psb_priv(dev);
33058+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33059+ ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
33060+ &psb_priv(dev)->ttm_lock, data);
33061+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33062+ if (drm_psb_ospm && IS_MRST(dev))
33063+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33064+ return ret;
33065+}
33066+
33067+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
33068+ struct drm_file *file_priv)
33069+{
33070+ int ret;
33071+ struct drm_psb_private *dev_priv = psb_priv(dev);
33072+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33073+ ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
33074+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33075+ if (drm_psb_ospm && IS_MRST(dev))
33076+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33077+ return ret;
33078+}
33079+
33080+int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
33081+ struct drm_file *file_priv)
33082+{
33083+ struct drm_psb_private *dev_priv = psb_priv(dev);
33084+ int ret;
33085+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33086+ ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
33087+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33088+ if (drm_psb_ospm && IS_MRST(dev))
33089+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33090+ return ret;
33091+}
33092+
33093+int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
33094+ struct drm_file *file_priv)
33095+{
33096+ struct drm_psb_private *dev_priv = psb_priv(dev);
33097+ int ret;
33098+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33099+ ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
33100+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33101+ if (drm_psb_ospm && IS_MRST(dev))
33102+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33103+ return ret;
33104+}
33105+
33106+int psb_pl_create_ioctl(struct drm_device *dev, void *data,
33107+ struct drm_file *file_priv)
33108+{
33109+ struct drm_psb_private *dev_priv = psb_priv(dev);
33110+ int ret;
33111+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33112+ ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
33113+ &dev_priv->bdev, &dev_priv->ttm_lock, data);
33114+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33115+ if (drm_psb_ospm && IS_MRST(dev))
33116+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33117+ return ret;
33118+}
33119+
33120+/**
33121+ * psb_ttm_fault - Wrapper around the ttm fault method.
33122+ *
33123+ * @vma: The struct vm_area_struct as in the vm fault() method.
33124+ * @vmf: The struct vm_fault as in the vm fault() method.
33125+ *
33126+ * Since ttm_fault() will reserve buffers while faulting,
33127+ * we need to take the ttm read lock around it, as this driver
33128+ * relies on the ttm_lock in write mode to exclude all threads from
33129+ * reserving and thus validating buffers in aperture- and memory shortage
33130+ * situations.
33131+ */
33132+
33133+static int psb_ttm_fault(struct vm_area_struct *vma,
33134+ struct vm_fault *vmf)
33135+{
33136+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33137+ vma->vm_private_data;
33138+ struct drm_psb_private *dev_priv =
33139+ container_of(bo->bdev, struct drm_psb_private, bdev);
33140+ int ret;
33141+
33142+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
33143+ if (unlikely(ret != 0))
33144+ return VM_FAULT_NOPAGE;
33145+
33146+ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
33147+
33148+ ttm_read_unlock(&dev_priv->ttm_lock);
33149+ return ret;
33150+}
33151+
33152+
33153+int psb_mmap(struct file *filp, struct vm_area_struct *vma)
33154+{
33155+ struct drm_file *file_priv;
33156+ struct drm_psb_private *dev_priv;
33157+ int ret;
33158+
33159+ if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET))
33160+ return drm_mmap(filp, vma);
33161+
33162+ file_priv = (struct drm_file *) filp->private_data;
33163+ dev_priv = psb_priv(file_priv->minor->dev);
33164+
33165+ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
33166+ if (unlikely(ret != 0))
33167+ return ret;
33168+
33169+ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
33170+ dev_priv->ttm_vm_ops = vma->vm_ops;
33171+ psb_ttm_vm_ops = *vma->vm_ops;
33172+ psb_ttm_vm_ops.fault = &psb_ttm_fault;
33173+ }
33174+
33175+ vma->vm_ops = &psb_ttm_vm_ops;
33176+
33177+ return 0;
33178+}
33179+
33180+ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
33181+ size_t count, loff_t *f_pos)
33182+{
33183+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
33184+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
33185+
33186+ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
33187+}
33188+
33189+ssize_t psb_ttm_read(struct file *filp, char __user *buf,
33190+ size_t count, loff_t *f_pos)
33191+{
33192+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
33193+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
33194+
33195+ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
33196+}
33197+
33198+int psb_verify_access(struct ttm_buffer_object *bo,
33199+ struct file *filp)
33200+{
33201+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
33202+
33203+ if (capable(CAP_SYS_ADMIN))
33204+ return 0;
33205+
33206+ if (unlikely(!file_priv->authenticated))
33207+ return -EPERM;
33208+
33209+ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
33210+}
33211+
33212+static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
33213+{
33214+ return ttm_mem_global_init(ref->object);
33215+}
33216+
33217+static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
33218+{
33219+ ttm_mem_global_release(ref->object);
33220+}
33221+
33222+int psb_ttm_global_init(struct drm_psb_private *dev_priv)
33223+{
33224+ struct drm_global_reference *global_ref;
33225+ int ret;
33226+
33227+ global_ref = &dev_priv->mem_global_ref;
33228+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
33229+ global_ref->size = sizeof(struct ttm_mem_global);
33230+ global_ref->init = &psb_ttm_mem_global_init;
33231+ global_ref->release = &psb_ttm_mem_global_release;
33232+
33233+ ret = drm_global_item_ref(global_ref);
33234+ if (unlikely(ret != 0)) {
33235+ DRM_ERROR("Failed referencing a global TTM memory object.\n");
33236+ return ret;
33237+ }
33238+
33239+ return 0;
33240+}
33241+
33242+void psb_ttm_global_release(struct drm_psb_private *dev_priv)
33243+{
33244+ drm_global_item_unref(&dev_priv->mem_global_ref);
33245+}
33246diff --git a/drivers/gpu/drm/psb/psb_umevents.c b/drivers/gpu/drm/psb/psb_umevents.c
33247new file mode 100644
33248index 0000000..90b91c1
33249--- /dev/null
33250+++ b/drivers/gpu/drm/psb/psb_umevents.c
33251@@ -0,0 +1,490 @@
33252+/*
33253+ * Copyright © 2009 Intel Corporation
33254+ *
33255+ * Permission is hereby granted, free of charge, to any person obtaining a
33256+ * copy of this software and associated documentation files (the "Software"),
33257+ * to deal in the Software without restriction, including without limitation
33258+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33259+ * and/or sell copies of the Software, and to permit persons to whom the
33260+ * Software is furnished to do so, subject to the following conditions:
33261+ *
33262+ * The above copyright notice and this permission notice (including the next
33263+ * paragraph) shall be included in all copies or substantial portions of the
33264+ * Software.
33265+ *
33266+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33267+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33268+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33269+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33270+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33271+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33272+ * IN THE SOFTWARE.
33273+ *
33274+ * Authors:
33275+ * James C. Gualario <james.c.gualario@intel.com>
33276+ *
33277+ */
33278+#include "psb_umevents.h"
33279+/**
33280+ * define sysfs operations supported by umevent objects.
33281+ *
33282+ */
33283+static struct sysfs_ops umevent_obj_sysfs_ops = {
33284+ .show = psb_umevent_attr_show,
33285+ .store = psb_umevent_attr_store,
33286+};
33287+/**
33288+ * define the data attributes we will expose through sysfs.
33289+ *
33290+ */
33291+static struct umevent_attribute data_0 =
33292+ __ATTR(data_0_val, 0666, psb_umevent_attr_show_imp,
33293+ psb_umevent_attr_store_imp);
33294+static struct umevent_attribute data_1 =
33295+ __ATTR(data_1_val, 0666, psb_umevent_attr_show_imp,
33296+ psb_umevent_attr_store_imp);
33297+static struct umevent_attribute data_2 =
33298+ __ATTR(data_2_val, 0666, psb_umevent_attr_show_imp,
33299+ psb_umevent_attr_store_imp);
33300+static struct umevent_attribute data_3 =
33301+ __ATTR(data_3_val, 0666, psb_umevent_attr_show_imp,
33302+ psb_umevent_attr_store_imp);
33303+static struct umevent_attribute data_4 =
33304+ __ATTR(data_4_val, 0666, psb_umevent_attr_show_imp,
33305+ psb_umevent_attr_store_imp);
33306+static struct umevent_attribute data_5 =
33307+ __ATTR(data_5_val, 0666, psb_umevent_attr_show_imp,
33308+ psb_umevent_attr_store_imp);
33309+static struct umevent_attribute data_6 =
33310+ __ATTR(data_6_val, 0666, psb_umevent_attr_show_imp,
33311+ psb_umevent_attr_store_imp);
33312+static struct umevent_attribute data_7 =
33313+ __ATTR(data_7_val, 0666, psb_umevent_attr_show_imp,
33314+ psb_umevent_attr_store_imp);
33315+/**
33316+ * define the structure used to seed our ktype.
33317+ *
33318+ */
33319+static struct attribute *umevent_obj_default_attrs[] = {
33320+ &data_0.attr,
33321+ &data_1.attr,
33322+ &data_2.attr,
33323+ &data_3.attr,
33324+ &data_4.attr,
33325+ &data_5.attr,
33326+ &data_6.attr,
33327+ &data_7.attr,
33328+ NULL, /* need to NULL terminate the list of attributes */
33329+};
33330+/**
33331+ * specify the ktype for our kobjects.
33332+ *
33333+ */
33334+static struct kobj_type umevent_obj_ktype = {
33335+ .sysfs_ops = &umevent_obj_sysfs_ops,
33336+ .release = psb_umevent_obj_release,
33337+ .default_attrs = umevent_obj_default_attrs,
33338+};
33339+/**
33340+ * psb_umevent_attr_show - default kobject show function
33341+ *
33342+ * @kobj: kobject associated with the show operation
33343+ * @attr: attribute being requested
33344+ * @buf: pointer to the return buffer
33345+ *
33346+ */
33347+ssize_t psb_umevent_attr_show(struct kobject *kobj,
33348+ struct attribute *attr,
33349+ char *buf)
33350+{
33351+ struct umevent_attribute *attribute;
33352+ struct umevent_obj *any_umevent_obj;
33353+ attribute = to_umevent_attr(attr);
33354+ any_umevent_obj = to_umevent_obj(kobj);
33355+ if (!attribute->show)
33356+ return -EIO;
33357+
33358+ return attribute->show(any_umevent_obj, attribute, buf);
33359+}
33360+/**
33361+ * psb_umevent_attr_store - default kobject store function
33362+ *
33363+ * @kobj: kobject associated with the store operation
33364+ * @attr: attribute being requested
33365+ * @buf: input data to write to attribute
33366+ * @len: character count
33367+ *
33368+ */
33369+ssize_t psb_umevent_attr_store(struct kobject *kobj,
33370+ struct attribute *attr,
33371+ const char *buf, size_t len)
33372+{
33373+ struct umevent_attribute *attribute;
33374+ struct umevent_obj *any_umevent_obj;
33375+ attribute = to_umevent_attr(attr);
33376+ any_umevent_obj = to_umevent_obj(kobj);
33377+ if (!attribute->store)
33378+ return -EIO;
33379+
33380+ return attribute->store(any_umevent_obj, attribute, buf, len);
33381+}
33382+/**
33383+ * psb_umevent_obj_release - kobject release funtion
33384+ *
33385+ * @kobj: kobject to be released.
33386+ */
33387+void psb_umevent_obj_release(struct kobject *kobj)
33388+{
33389+ struct umevent_obj *any_umevent_obj;
33390+ any_umevent_obj = to_umevent_obj(kobj);
33391+ kfree(any_umevent_obj);
33392+}
33393+/**
33394+ * psb_umevent_attr_show_imp - attribute show implementation
33395+ *
33396+ * @any_umevent_obj: kobject managed data to read from
33397+ * @attr: attribute being requested
33398+ * @buf: pointer to the return buffer
33399+ *
33400+ */
33401+ssize_t psb_umevent_attr_show_imp(struct umevent_obj
33402+ *any_umevent_obj,
33403+ struct umevent_attribute *attr,
33404+ char *buf)
33405+{
33406+ int var;
33407+
33408+ if (strcmp(attr->attr.name, "data_0_val") == 0)
33409+ var = any_umevent_obj->data_0_val;
33410+ else if (strcmp(attr->attr.name, "data_1_val") == 0)
33411+ var = any_umevent_obj->data_1_val;
33412+ else if (strcmp(attr->attr.name, "data_2_val") == 0)
33413+ var = any_umevent_obj->data_2_val;
33414+ else if (strcmp(attr->attr.name, "data_3_val") == 0)
33415+ var = any_umevent_obj->data_3_val;
33416+ else if (strcmp(attr->attr.name, "data_4_val") == 0)
33417+ var = any_umevent_obj->data_4_val;
33418+ else if (strcmp(attr->attr.name, "data_5_val") == 0)
33419+ var = any_umevent_obj->data_5_val;
33420+ else if (strcmp(attr->attr.name, "data_6_val") == 0)
33421+ var = any_umevent_obj->data_6_val;
33422+ else
33423+ var = any_umevent_obj->data_7_val;
33424+
33425+ return sprintf(buf, "%d\n", var);
33426+}
33427+/**
33428+ * psb_umevent_attr_store_imp - attribute store implementation
33429+ *
33430+ * @any_umevent_obj: kobject managed data to write to
33431+ * @attr: attribute being requested
33432+ * @buf: input data to write to attribute
33433+ * @count: character count
33434+ *
33435+ */
33436+ssize_t psb_umevent_attr_store_imp(struct umevent_obj
33437+ *any_umevent_obj,
33438+ struct umevent_attribute *attr,
33439+ const char *buf, size_t count)
33440+{
33441+ int var;
33442+
33443+ sscanf(buf, "%du", &var);
33444+ if (strcmp(attr->attr.name, "data_0_val") == 0)
33445+ any_umevent_obj->data_0_val = var;
33446+ else if (strcmp(attr->attr.name, "data_1_val") == 0)
33447+ any_umevent_obj->data_1_val = var;
33448+ else if (strcmp(attr->attr.name, "data_2_val") == 0)
33449+ any_umevent_obj->data_2_val = var;
33450+ else if (strcmp(attr->attr.name, "data_3_val") == 0)
33451+ any_umevent_obj->data_3_val = var;
33452+ else if (strcmp(attr->attr.name, "data_4_val") == 0)
33453+ any_umevent_obj->data_4_val = var;
33454+ else if (strcmp(attr->attr.name, "data_5_val") == 0)
33455+ any_umevent_obj->data_5_val = var;
33456+ else if (strcmp(attr->attr.name, "data_6_val") == 0)
33457+ any_umevent_obj->data_6_val = var;
33458+ else
33459+ any_umevent_obj->data_7_val = var;
33460+ return count;
33461+}
33462+/**
33463+ * psb_create_umevent_obj - create and track new event objects
33464+ *
33465+ * @name: name to give to new sysfs / kobject entry
33466+ * @list: event object list to track the kobject in
33467+ */
33468+struct umevent_obj *psb_create_umevent_obj(const char *name,
33469+ struct umevent_list
33470+ *list)
33471+{
33472+ struct umevent_obj *new_umevent_obj;
33473+ int retval;
33474+ new_umevent_obj = kzalloc(sizeof(*new_umevent_obj),
33475+ GFP_KERNEL);
33476+ if (!new_umevent_obj)
33477+ return NULL;
33478+
33479+ new_umevent_obj->kobj.kset = list->umevent_disp_pool;
33480+ retval = kobject_init_and_add(&new_umevent_obj->kobj,
33481+ &umevent_obj_ktype, NULL,
33482+ "%s", name);
33483+ if (retval) {
33484+ kobject_put(&new_umevent_obj->kobj);
33485+ return NULL;
33486+ }
33487+ psb_umevent_add_to_list(list, new_umevent_obj);
33488+ return new_umevent_obj;
33489+}
33490+EXPORT_SYMBOL(psb_create_umevent_obj);
33491+/**
33492+ * psb_umevent_notify - info user mode of a new device
33493+ *
33494+ * @notify_disp_obj: event object to perform notification for
33495+ *
33496+ */
33497+void psb_umevent_notify(struct umevent_obj *notify_disp_obj)
33498+{
33499+ kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD);
33500+}
33501+EXPORT_SYMBOL(psb_umevent_notify);
33502+/**
33503+ * psb_umevent_notify_change - notify user mode of a change to a device
33504+ *
33505+ * @notify_disp_obj: event object to perform notification for
33506+ *
33507+ */
33508+void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj)
33509+{
33510+ kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
33511+}
33512+EXPORT_SYMBOL(psb_umevent_notify_change);
33513+/**
33514+ * psb_umevent_notify_change - notify user mode of a change to a device
33515+ *
33516+ * @notify_disp_obj: event object to perform notification for
33517+ *
33518+ */
33519+void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj)
33520+{
33521+ psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
33522+}
33523+EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock);
33524+/**
33525+ * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it
33526+ *
33527+ * @any_umevent_obj: event object to destroy
33528+ *
33529+ */
33530+void psb_destroy_umevent_obj(struct umevent_obj
33531+ *any_umevent_obj)
33532+{
33533+ kobject_put(&any_umevent_obj->kobj);
33534+}
33535+/**
33536+ *
33537+ * psb_umevent_init - init the event pool
33538+ *
33539+ * @parent_kobj: parent kobject to associate new kset with
33540+ * @new_umevent_list: event list to associate kset with
33541+ * @name: name to give to new sysfs entry
33542+ *
33543+ */
33544+int psb_umevent_init(struct kobject *parent_kobj,
33545+ struct umevent_list *new_umevent_list,
33546+ const char *name)
33547+{
33548+ psb_umevent_init_list(new_umevent_list);
33549+ new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL,
33550+ parent_kobj);
33551+ if (!new_umevent_list->umevent_disp_pool)
33552+ return -ENOMEM;
33553+
33554+ return 0;
33555+}
33556+EXPORT_SYMBOL(psb_umevent_init);
33557+/**
33558+ *
33559+ * psb_umevent_cleanup - cleanup all event objects
33560+ *
33561+ * @kill_list: list of events to destroy
33562+ *
33563+ */
33564+void psb_umevent_cleanup(struct umevent_list *kill_list)
33565+{
33566+ psb_umevent_destroy_list(kill_list);
33567+}
33568+EXPORT_SYMBOL(psb_umevent_cleanup);
33569+/**
33570+ * psb_umevent_add_to_list - add an event to the event list
33571+ *
33572+ * @list: list to add the event to
33573+ * @umevent_obj_to_add: event to add
33574+ *
33575+ */
33576+void psb_umevent_add_to_list(struct umevent_list *list,
33577+ struct umevent_obj *umevent_obj_to_add)
33578+{
33579+ unsigned long flags;
33580+ spin_lock_irqsave(&list->list_lock, flags);
33581+ list_add(&umevent_obj_to_add->head, &list->head);
33582+ spin_unlock_irqrestore(&list->list_lock, flags);
33583+}
33584+/**
33585+ * psb_umevent_init_list - initialize event list
33586+ *
33587+ * @list: list to initialize
33588+ *
33589+ */
33590+void psb_umevent_init_list(struct umevent_list *list)
33591+{
33592+ spin_lock_init(&list->list_lock);
33593+ INIT_LIST_HEAD(&list->head);
33594+}
33595+/**
33596+ * psb_umevent_create_list - allocate an event list
33597+ *
33598+ */
33599+struct umevent_list *psb_umevent_create_list()
33600+{
33601+ struct umevent_list *new_umevent_list;
33602+ new_umevent_list = NULL;
33603+ new_umevent_list = kmalloc(sizeof(struct umevent_list),
33604+ GFP_ATOMIC);
33605+ return new_umevent_list;
33606+}
33607+EXPORT_SYMBOL(psb_umevent_create_list);
33608+/**
33609+ * psb_umevent_destroy_list - destroy a list and clean up all mem
33610+ *
33611+ * @list: list to destroy and clean up after
33612+ *
33613+ */
33614+void psb_umevent_destroy_list(struct umevent_list *list)
33615+{
33616+ struct umevent_obj *umevent_obj_curr;
33617+ struct list_head *node;
33618+ struct list_head *node_kill;
33619+ int i;
33620+ i = 0;
33621+ node = NULL;
33622+ node_kill = NULL;
33623+ node = list->head.next;
33624+ while (node != (&list->head)) {
33625+ umevent_obj_curr = list_entry(node,
33626+ struct umevent_obj,
33627+ head);
33628+ node_kill = node;
33629+ node = umevent_obj_curr->head.next;
33630+ psb_destroy_umevent_obj(umevent_obj_curr);
33631+ umevent_obj_curr = NULL;
33632+ list_del(node_kill);
33633+ i++;
33634+ }
33635+ kset_unregister(list->umevent_disp_pool);
33636+ kfree(list);
33637+}
33638+/**
33639+ * psb_umevent_remove_from_list - remove an event from tracking list
33640+ *
33641+ * @list: list to remove the event from
33642+ * @disp_to_remove: name of event to remove.
33643+ *
33644+ */
33645+void psb_umevent_remove_from_list(struct umevent_list *list,
33646+ const char *disp_to_remove)
33647+{
33648+ struct umevent_obj *umevent_obj_curr = NULL;
33649+ struct list_head *node = NULL;
33650+ struct list_head *node_kill = NULL;
33651+ int i = 0;
33652+ int found_match = 0;
33653+ i = 0;
33654+ node = NULL;
33655+ node_kill = NULL;
33656+ node = list->head.next;
33657+ while (node != (&list->head)) {
33658+ umevent_obj_curr = list_entry(node,
33659+ struct umevent_obj, head);
33660+ if (strcmp(umevent_obj_curr->kobj.name,
33661+ disp_to_remove) == 0) {
33662+ found_match = 1;
33663+ break;
33664+ }
33665+ node = NULL;
33666+ node = umevent_obj_curr->head.next;
33667+ i++;
33668+ }
33669+ if (found_match == 1) {
33670+ node_kill = node;
33671+ node = umevent_obj_curr->head.next;
33672+ psb_destroy_umevent_obj(umevent_obj_curr);
33673+ umevent_obj_curr = NULL;
33674+ list_del(node_kill);
33675+ }
33676+}
33677+EXPORT_SYMBOL(psb_umevent_remove_from_list);
33678+/**
33679+ * psb_umevent_find_obj - find an event in a tracking list
33680+ *
33681+ * @name: name of the event to find
33682+ * @list: list to find the event in
33683+ *
33684+ */
33685+struct umevent_obj *psb_umevent_find_obj(const char *name,
33686+ struct umevent_list *list)
33687+{
33688+ struct umevent_obj *umevent_obj_curr = NULL;
33689+ struct list_head *node = NULL;
33690+ struct list_head *node_find = NULL;
33691+ int i = 0;
33692+ int found_match = 0;
33693+ i = 0;
33694+ node = NULL;
33695+ node_find = NULL;
33696+ node = list->head.next;
33697+ while (node != (&list->head)) {
33698+ umevent_obj_curr = list_entry(node,
33699+ struct umevent_obj, head);
33700+ if (strcmp(umevent_obj_curr->kobj.name,
33701+ name) == 0) {
33702+ found_match = 1;
33703+ break;
33704+ }
33705+ node = NULL;
33706+ node = umevent_obj_curr->head.next;
33707+ i++;
33708+ }
33709+ if (found_match == 1)
33710+ return umevent_obj_curr;
33711+
33712+ return NULL;
33713+}
33714+EXPORT_SYMBOL(psb_umevent_find_obj);
33715+/**
33716+ * psb_umevent_debug_dump_list - debug list dump
33717+ *
33718+ * @list: list to dump
33719+ *
33720+ */
33721+void psb_umevent_debug_dump_list(struct umevent_list *list)
33722+{
33723+ struct umevent_obj *umevent_obj_curr;
33724+ unsigned long flags;
33725+ struct list_head *node;
33726+ int i;
33727+ spin_lock_irqsave(&list->list_lock, flags);
33728+ i = 0;
33729+ node = NULL;
33730+ node = list->head.next;
33731+ while (node != (&list->head)) {
33732+ umevent_obj_curr = list_entry(node,
33733+ struct umevent_obj,
33734+ head);
33735+ /*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK*/
33736+ node = NULL;
33737+ node = umevent_obj_curr->head.next;
33738+ i++;
33739+ }
33740+ spin_unlock_irqrestore(&list->list_lock, flags);
33741+}
33742diff --git a/drivers/gpu/drm/psb/psb_umevents.h b/drivers/gpu/drm/psb/psb_umevents.h
33743new file mode 100644
33744index 0000000..05dbc8b
33745--- /dev/null
33746+++ b/drivers/gpu/drm/psb/psb_umevents.h
33747@@ -0,0 +1,150 @@
33748+/*
33749+ * Copyright © 2009 Intel Corporation
33750+ *
33751+ * Permission is hereby granted, free of charge, to any person obtaining a
33752+ * copy of this software and associated documentation files (the "Software"),
33753+ * to deal in the Software without restriction, including without limitation
33754+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33755+ * and/or sell copies of the Software, and to permit persons to whom the
33756+ * Software is furnished to do so, subject to the following conditions:
33757+ *
33758+ * The above copyright notice and this permission notice (including the next
33759+ * paragraph) shall be included in all copies or substantial portions of the
33760+ * Software.
33761+ *
33762+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33763+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33764+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33765+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33766+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33767+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33768+ * IN THE SOFTWARE.
33769+ *
33770+ * Authors:
33771+ * James C. Gualario <james.c.gualario@intel.com>
33772+ *
33773+ */
33774+#ifndef _PSB_UMEVENT_H_
33775+#define _PSB_UMEVENT_H_
33776+/**
33777+ * required includes
33778+ *
33779+ */
33780+#include <linux/init.h>
33781+#include <linux/module.h>
33782+#include <linux/slab.h>
33783+#include <drm/drmP.h>
33784+#include <drm/drm_core.h>
33785+#include <drm/drm_pciids.h>
33786+#include <linux/spinlock.h>
33787+/**
33788+ * event structure managed by kobjects
33789+ *
33790+ */
33791+struct umevent_obj {
33792+ struct kobject kobj;
33793+ struct list_head head;
33794+ int data_0_val;
33795+ int data_1_val;
33796+ int data_2_val;
33797+ int data_3_val;
33798+ int data_4_val;
33799+ int data_5_val;
33800+ int data_6_val;
33801+ int data_7_val;
33802+};
33803+/**
33804+ * event tracking list element
33805+ *
33806+ */
33807+struct umevent_list{
33808+ struct list_head head;
33809+ struct kset *umevent_disp_pool;
33810+ spinlock_t list_lock;
33811+};
33812+/**
33813+ * to go back and forth between kobjects and their main container
33814+ *
33815+ */
33816+#define to_umevent_obj(x) \
33817+ container_of(x, struct umevent_obj, kobj)
33818+
33819+/**
33820+ * event attributes exposed via sysfs
33821+ *
33822+ */
33823+struct umevent_attribute {
33824+ struct attribute attr;
33825+ ssize_t (*show)(struct umevent_obj *any_umevent_obj,
33826+ struct umevent_attribute *attr, char *buf);
33827+ ssize_t (*store)(struct umevent_obj *any_umevent_obj,
33828+ struct umevent_attribute *attr,
33829+ const char *buf, size_t count);
33830+};
33831+/**
33832+ * to go back and forth between the attribute passed to us by the OS
33833+ * and the umevent_attribute
33834+ *
33835+ */
33836+#define to_umevent_attr(x) \
33837+ container_of(x, struct umevent_attribute, \
33838+ attr)
33839+
33840+/**
33841+ * umevent function prototypes
33842+ *
33843+ */
33844+extern struct umevent_obj *psb_create_umevent_obj(const char *name,
33845+ struct umevent_list
33846+ *list);
33847+extern ssize_t psb_umevent_attr_show(struct kobject *kobj,
33848+ struct attribute *attr, char *buf);
33849+extern ssize_t psb_umevent_attr_store(struct kobject *kobj,
33850+ struct attribute *attr,
33851+ const char *buf, size_t len);
33852+extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj
33853+ *any_umevent_obj,
33854+ struct umevent_attribute *attr,
33855+ char *buf);
33856+extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj
33857+ *any_umevent_obj,
33858+ struct umevent_attribute *attr,
33859+ const char *buf, size_t count);
33860+extern void psb_umevent_cleanup(struct umevent_list *kill_list);
33861+extern int psb_umevent_init(struct kobject *parent_kobj,
33862+ struct umevent_list *new_umevent_list,
33863+ const char *name);
33864+extern void psb_umevent_init_list(struct umevent_list *list);
33865+extern void psb_umevent_debug_dump_list(struct umevent_list *list);
33866+extern void psb_umevent_add_to_list(struct umevent_list *list,
33867+ struct umevent_obj
33868+ *umevent_obj_to_add);
33869+extern void psb_umevent_destroy_list(struct umevent_list *list);
33870+extern struct umevent_list *psb_umevent_create_list(void);
33871+extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj);
33872+extern void psb_umevent_obj_release(struct kobject *kobj);
33873+extern void psb_umevent_remove_from_list(struct umevent_list *list,
33874+ const char *disp_to_remove);
33875+extern void psb_umevent_workqueue_dispatch(int work_type, const char *name,
33876+ struct umevent_list *list);
33877+extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj);
33878+extern void psb_umevent_notify_change_gfxsock(struct umevent_obj
33879+ *notify_disp_obj);
33880+extern struct umevent_obj *psb_umevent_find_obj(const char *name,
33881+ struct umevent_list
33882+ *list);
33883+/**
33884+ * socket function prototypes
33885+ *
33886+ */
33887+extern int psb_kobject_uevent(struct kobject *kobj,
33888+ enum kobject_action action);
33889+extern int psb_kobject_uevent_env(struct kobject *kobj,
33890+ enum kobject_action action,
33891+ char *envp[]);
33892+int psb_add_uevent_var(struct kobj_uevent_env *env,
33893+ const char *format, ...)
33894+ __attribute__((format (printf, 2, 3)));
33895+int psb_kobject_action_type(const char *buf,
33896+ size_t count, enum kobject_action *type);
33897+#endif
33898diff --git a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c
33899new file mode 100644
33900index 0000000..58ce493
33901--- /dev/null
33902+++ b/drivers/gpu/drm/psb/psb_xhw.c
33903@@ -0,0 +1,652 @@
33904+/**************************************************************************
33905+ *Copyright (c) 2007-2008, Intel Corporation.
33906+ *All Rights Reserved.
33907+ *
33908+ *This program is free software; you can redistribute it and/or modify it
33909+ *under the terms and conditions of the GNU General Public License,
33910+ *version 2, as published by the Free Software Foundation.
33911+ *
33912+ *This program is distributed in the hope it will be useful, but WITHOUT
33913+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
33914+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33915+ *more details.
33916+ *
33917+ *You should have received a copy of the GNU General Public License along with
33918+ *this program; if not, write to the Free Software Foundation, Inc.,
33919+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
33920+ *
33921+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
33922+ *develop this driver.
33923+ *
33924+ **************************************************************************/
33925+/*
33926+ *Make calls into closed source X server code.
33927+ */
33928+
33929+#include <drm/drmP.h>
33930+#include "psb_drv.h"
33931+#include "ttm/ttm_userobj_api.h"
33932+#include "psb_powermgmt.h"
33933+
33934+void
33935+psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
33936+ struct psb_xhw_buf *buf)
33937+{
33938+ unsigned long irq_flags;
33939+
33940+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
33941+ list_del_init(&buf->head);
33942+ if (dev_priv->xhw_cur_buf == buf)
33943+ dev_priv->xhw_cur_buf = NULL;
33944+ atomic_set(&buf->done, 1);
33945+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
33946+}
33947+
33948+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
33949+ struct psb_xhw_buf *buf)
33950+{
33951+ unsigned long irq_flags;
33952+
33953+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
33954+ atomic_set(&buf->done, 0);
33955+ if (unlikely(!dev_priv->xhw_submit_ok)) {
33956+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
33957+ DRM_ERROR("No Xpsb 3D extension available.\n");
33958+ return -EINVAL;
33959+ }
33960+ if (!list_empty(&buf->head)) {
33961+ DRM_ERROR("Recursive list adding.\n");
33962+ goto out;
33963+ }
33964+ list_add_tail(&buf->head, &dev_priv->xhw_in);
33965+ wake_up_interruptible(&dev_priv->xhw_queue);
33966+out:
33967+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
33968+ return 0;
33969+}
33970+
33971+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
33972+ struct psb_xhw_buf *buf,
33973+ uint32_t w,
33974+ uint32_t h,
33975+ uint32_t *hw_cookie,
33976+ uint32_t *bo_size,
33977+ uint32_t *clear_p_start,
33978+ uint32_t *clear_num_pages)
33979+{
33980+ struct drm_psb_xhw_arg *xa = &buf->arg;
33981+ int ret;
33982+
33983+ buf->copy_back = 1;
33984+ xa->op = PSB_XHW_SCENE_INFO;
33985+ xa->irq_op = 0;
33986+ xa->issue_irq = 0;
33987+ xa->arg.si.w = w;
33988+ xa->arg.si.h = h;
33989+
33990+ ret = psb_xhw_add(dev_priv, buf);
33991+ if (ret)
33992+ return ret;
33993+
33994+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
33995+ atomic_read(&buf->done), DRM_HZ);
33996+
33997+ if (!atomic_read(&buf->done)) {
33998+ psb_xhw_clean_buf(dev_priv, buf);
33999+ return -EBUSY;
34000+ }
34001+
34002+ if (!xa->ret) {
34003+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
34004+ *bo_size = xa->arg.si.size;
34005+ *clear_p_start = xa->arg.si.clear_p_start;
34006+ *clear_num_pages = xa->arg.si.clear_num_pages;
34007+ }
34008+ return xa->ret;
34009+}
34010+
34011+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
34012+ struct psb_xhw_buf *buf, uint32_t fire_flags)
34013+{
34014+ struct drm_psb_xhw_arg *xa = &buf->arg;
34015+
34016+ buf->copy_back = 0;
34017+ xa->op = PSB_XHW_FIRE_RASTER;
34018+ xa->issue_irq = 0;
34019+ xa->arg.sb.fire_flags = 0;
34020+
34021+ return psb_xhw_add(dev_priv, buf);
34022+}
34023+
34024+int psb_xhw_vistest(struct drm_psb_private *dev_priv,
34025+ struct psb_xhw_buf *buf)
34026+{
34027+ struct drm_psb_xhw_arg *xa = &buf->arg;
34028+
34029+ buf->copy_back = 1;
34030+ xa->op = PSB_XHW_VISTEST;
34031+ /*
34032+ *Could perhaps decrease latency somewhat by
34033+ *issuing an irq in this case.
34034+ */
34035+ xa->issue_irq = 0;
34036+ xa->irq_op = PSB_UIRQ_VISTEST;
34037+ return psb_xhw_add(dev_priv, buf);
34038+}
34039+
34040+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
34041+ struct psb_xhw_buf *buf,
34042+ uint32_t fire_flags,
34043+ uint32_t hw_context,
34044+ uint32_t *cookie,
34045+ uint32_t *oom_cmds,
34046+ uint32_t num_oom_cmds,
34047+ uint32_t offset, uint32_t engine,
34048+ uint32_t flags)
34049+{
34050+ struct drm_psb_xhw_arg *xa = &buf->arg;
34051+
34052+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
34053+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
34054+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
34055+ if (unlikely(buf->copy_back))
34056+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
34057+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
34058+ else
34059+ xa->irq_op = 0;
34060+ xa->arg.sb.fire_flags = fire_flags;
34061+ xa->arg.sb.hw_context = hw_context;
34062+ xa->arg.sb.offset = offset;
34063+ xa->arg.sb.engine = engine;
34064+ xa->arg.sb.flags = flags;
34065+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
34066+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
34067+ if (num_oom_cmds)
34068+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
34069+ sizeof(uint32_t) * num_oom_cmds);
34070+ return psb_xhw_add(dev_priv, buf);
34071+}
34072+
34073+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
34074+ struct psb_xhw_buf *buf)
34075+{
34076+ struct drm_psb_xhw_arg *xa = &buf->arg;
34077+ int ret;
34078+
34079+ buf->copy_back = 1;
34080+ xa->op = PSB_XHW_RESET_DPM;
34081+ xa->issue_irq = 0;
34082+ xa->irq_op = 0;
34083+
34084+ ret = psb_xhw_add(dev_priv, buf);
34085+ if (ret)
34086+ return ret;
34087+
34088+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34089+ atomic_read(&buf->done), 3 * DRM_HZ);
34090+
34091+ if (!atomic_read(&buf->done)) {
34092+ psb_xhw_clean_buf(dev_priv, buf);
34093+ return -EBUSY;
34094+ }
34095+
34096+ return xa->ret;
34097+}
34098+
34099+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
34100+ struct psb_xhw_buf *buf, uint32_t *value)
34101+{
34102+ struct drm_psb_xhw_arg *xa = &buf->arg;
34103+ int ret;
34104+
34105+ *value = 0;
34106+
34107+ buf->copy_back = 1;
34108+ xa->op = PSB_XHW_CHECK_LOCKUP;
34109+ xa->issue_irq = 0;
34110+ xa->irq_op = 0;
34111+
34112+ ret = psb_xhw_add(dev_priv, buf);
34113+ if (ret)
34114+ return ret;
34115+
34116+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34117+ atomic_read(&buf->done), DRM_HZ * 3);
34118+
34119+ if (!atomic_read(&buf->done)) {
34120+ psb_xhw_clean_buf(dev_priv, buf);
34121+ return -EBUSY;
34122+ }
34123+
34124+ if (!xa->ret)
34125+ *value = xa->arg.cl.value;
34126+
34127+ return xa->ret;
34128+}
34129+
34130+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
34131+ struct psb_xhw_buf *buf)
34132+{
34133+ struct drm_psb_xhw_arg *xa = &buf->arg;
34134+ unsigned long irq_flags;
34135+
34136+ buf->copy_back = 0;
34137+ xa->op = PSB_XHW_TERMINATE;
34138+ xa->issue_irq = 0;
34139+
34140+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34141+ dev_priv->xhw_submit_ok = 0;
34142+ atomic_set(&buf->done, 0);
34143+ if (!list_empty(&buf->head)) {
34144+ DRM_ERROR("Recursive list adding.\n");
34145+ goto out;
34146+ }
34147+ list_add_tail(&buf->head, &dev_priv->xhw_in);
34148+out:
34149+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34150+ wake_up_interruptible(&dev_priv->xhw_queue);
34151+
34152+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34153+ atomic_read(&buf->done), DRM_HZ / 10);
34154+
34155+ if (!atomic_read(&buf->done)) {
34156+ DRM_ERROR("Xpsb terminate timeout.\n");
34157+ psb_xhw_clean_buf(dev_priv, buf);
34158+ return -EBUSY;
34159+ }
34160+
34161+ return 0;
34162+}
34163+
34164+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
34165+ struct psb_xhw_buf *buf,
34166+ uint32_t pages, uint32_t * hw_cookie,
34167+ uint32_t * size,
34168+ uint32_t * ta_min_size)
34169+{
34170+ struct drm_psb_xhw_arg *xa = &buf->arg;
34171+ int ret;
34172+
34173+ buf->copy_back = 1;
34174+ xa->op = PSB_XHW_TA_MEM_INFO;
34175+ xa->issue_irq = 0;
34176+ xa->irq_op = 0;
34177+ xa->arg.bi.pages = pages;
34178+
34179+ ret = psb_xhw_add(dev_priv, buf);
34180+ if (ret)
34181+ return ret;
34182+
34183+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34184+ atomic_read(&buf->done), DRM_HZ);
34185+
34186+ if (!atomic_read(&buf->done)) {
34187+ psb_xhw_clean_buf(dev_priv, buf);
34188+ return -EBUSY;
34189+ }
34190+
34191+ if (!xa->ret)
34192+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
34193+
34194+ *size = xa->arg.bi.size;
34195+ *ta_min_size = xa->arg.bi.ta_min_size;
34196+ return xa->ret;
34197+}
34198+
34199+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
34200+ struct psb_xhw_buf *buf,
34201+ uint32_t flags,
34202+ uint32_t param_offset,
34203+ uint32_t pt_offset, uint32_t *hw_cookie)
34204+{
34205+ struct drm_psb_xhw_arg *xa = &buf->arg;
34206+ int ret;
34207+
34208+ buf->copy_back = 1;
34209+ xa->op = PSB_XHW_TA_MEM_LOAD;
34210+ xa->issue_irq = 0;
34211+ xa->irq_op = 0;
34212+ xa->arg.bl.flags = flags;
34213+ xa->arg.bl.param_offset = param_offset;
34214+ xa->arg.bl.pt_offset = pt_offset;
34215+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
34216+
34217+ ret = psb_xhw_add(dev_priv, buf);
34218+ if (ret)
34219+ return ret;
34220+
34221+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34222+ atomic_read(&buf->done), 3 * DRM_HZ);
34223+
34224+ if (!atomic_read(&buf->done)) {
34225+ psb_xhw_clean_buf(dev_priv, buf);
34226+ return -EBUSY;
34227+ }
34228+
34229+ if (!xa->ret)
34230+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
34231+
34232+ return xa->ret;
34233+}
34234+
34235+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
34236+ struct psb_xhw_buf *buf, uint32_t *cookie)
34237+{
34238+ struct drm_psb_xhw_arg *xa = &buf->arg;
34239+
34240+ /*
34241+ *This calls the extensive closed source
34242+ *OOM handler, which resolves the condition and
34243+ *sends a reply telling the scheduler what to do
34244+ *with the task.
34245+ */
34246+
34247+ buf->copy_back = 1;
34248+ xa->op = PSB_XHW_OOM;
34249+ xa->issue_irq = 1;
34250+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
34251+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
34252+
34253+ return psb_xhw_add(dev_priv, buf);
34254+}
34255+
34256+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
34257+ struct psb_xhw_buf *buf,
34258+ uint32_t *cookie,
34259+ uint32_t *bca, uint32_t *rca, uint32_t *flags)
34260+{
34261+ struct drm_psb_xhw_arg *xa = &buf->arg;
34262+
34263+ /*
34264+ *Get info about how to schedule an OOM task.
34265+ */
34266+
34267+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
34268+ *bca = xa->arg.oom.bca;
34269+ *rca = xa->arg.oom.rca;
34270+ *flags = xa->arg.oom.flags;
34271+}
34272+
34273+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
34274+ struct psb_xhw_buf *buf, uint32_t *cookie)
34275+{
34276+ struct drm_psb_xhw_arg *xa = &buf->arg;
34277+
34278+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
34279+}
34280+
34281+int psb_xhw_resume(struct drm_psb_private *dev_priv,
34282+ struct psb_xhw_buf *buf)
34283+{
34284+ struct drm_psb_xhw_arg *xa = &buf->arg;
34285+ int ret;
34286+ /*
34287+ *For D0i3, force resume to complete
34288+ */
34289+ buf->copy_back = 1;
34290+ xa->op = PSB_XHW_RESUME;
34291+ xa->issue_irq = 0;
34292+ xa->irq_op = 0;
34293+ ret = psb_xhw_add(dev_priv, buf);
34294+ if (ret)
34295+ return ret;
34296+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34297+ atomic_read(&buf->done), 3 * DRM_HZ);
34298+
34299+ if (!atomic_read(&buf->done)) {
34300+ psb_xhw_clean_buf(dev_priv, buf);
34301+ DRM_ERROR("Xpsb resume fail\n");
34302+ return -EBUSY;
34303+ }
34304+ return ret;
34305+}
34306+
34307+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
34308+{
34309+}
34310+
34311+int psb_xhw_init(struct drm_device *dev)
34312+{
34313+ struct drm_psb_private *dev_priv =
34314+ (struct drm_psb_private *) dev->dev_private;
34315+ unsigned long irq_flags;
34316+
34317+ INIT_LIST_HEAD(&dev_priv->xhw_in);
34318+ spin_lock_init(&dev_priv->xhw_lock);
34319+ atomic_set(&dev_priv->xhw_client, 0);
34320+ init_waitqueue_head(&dev_priv->xhw_queue);
34321+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
34322+ mutex_init(&dev_priv->xhw_mutex);
34323+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34324+ dev_priv->xhw_on = 0;
34325+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34326+
34327+ return 0;
34328+}
34329+
34330+static int psb_xhw_init_init(struct drm_device *dev,
34331+ struct drm_file *file_priv,
34332+ struct drm_psb_xhw_init_arg *arg)
34333+{
34334+ struct drm_psb_private *dev_priv =
34335+ (struct drm_psb_private *) dev->dev_private;
34336+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
34337+ int ret;
34338+ bool is_iomem;
34339+
34340+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
34341+ unsigned long irq_flags;
34342+
34343+ dev_priv->xhw_bo =
34344+ ttm_buffer_object_lookup(tfile, arg->buffer_handle);
34345+ if (!dev_priv->xhw_bo) {
34346+ ret = -EINVAL;
34347+ goto out_err;
34348+ }
34349+ ret = ttm_bo_kmap(dev_priv->xhw_bo, 0,
34350+ dev_priv->xhw_bo->num_pages,
34351+ &dev_priv->xhw_kmap);
34352+ if (ret) {
34353+ DRM_ERROR("Failed mapping X server "
34354+ "communications buffer.\n");
34355+ goto out_err0;
34356+ }
34357+ dev_priv->xhw =
34358+ ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem);
34359+ if (is_iomem) {
34360+ DRM_ERROR("X server communications buffer"
34361+ "is in device memory.\n");
34362+ ret = -EINVAL;
34363+ goto out_err1;
34364+ }
34365+ dev_priv->xhw_file = file_priv;
34366+
34367+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34368+ dev_priv->xhw_on = 1;
34369+ dev_priv->xhw_submit_ok = 1;
34370+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34371+ return 0;
34372+ } else {
34373+ DRM_ERROR("Xhw is already initialized.\n");
34374+ return -EBUSY;
34375+ }
34376+out_err1:
34377+ dev_priv->xhw = NULL;
34378+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
34379+out_err0:
34380+ ttm_bo_unref(&dev_priv->xhw_bo);
34381+out_err:
34382+ atomic_dec(&dev_priv->xhw_client);
34383+ return ret;
34384+}
34385+
34386+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
34387+{
34388+ struct psb_xhw_buf *cur_buf, *next;
34389+ unsigned long irq_flags;
34390+
34391+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34392+ dev_priv->xhw_submit_ok = 0;
34393+
34394+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
34395+ list_del_init(&cur_buf->head);
34396+ if (cur_buf->copy_back)
34397+ cur_buf->arg.ret = -EINVAL;
34398+ atomic_set(&cur_buf->done, 1);
34399+ }
34400+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34401+ wake_up(&dev_priv->xhw_caller_queue);
34402+}
34403+
34404+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
34405+ struct drm_file *file_priv, int closing)
34406+{
34407+
34408+ if (dev_priv->xhw_file == file_priv &&
34409+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
34410+
34411+ if (closing)
34412+ psb_xhw_queue_empty(dev_priv);
34413+ else {
34414+ struct psb_xhw_buf buf;
34415+ INIT_LIST_HEAD(&buf.head);
34416+
34417+ psb_xhw_terminate(dev_priv, &buf);
34418+ psb_xhw_queue_empty(dev_priv);
34419+ }
34420+
34421+ dev_priv->xhw = NULL;
34422+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
34423+ ttm_bo_unref(&dev_priv->xhw_bo);
34424+ dev_priv->xhw_file = NULL;
34425+ }
34426+}
34427+
34428+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
34429+ struct drm_file *file_priv)
34430+{
34431+ struct drm_psb_xhw_init_arg *arg =
34432+ (struct drm_psb_xhw_init_arg *) data;
34433+ struct drm_psb_private *dev_priv =
34434+ (struct drm_psb_private *) dev->dev_private;
34435+ int ret = 0;
34436+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
34437+ switch (arg->operation) {
34438+ case PSB_XHW_INIT:
34439+ ret = psb_xhw_init_init(dev, file_priv, arg);
34440+ break;
34441+ case PSB_XHW_TAKEDOWN:
34442+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
34443+ break;
34444+ }
34445+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
34446+ return ret;
34447+}
34448+
34449+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
34450+{
34451+ int empty;
34452+ unsigned long irq_flags;
34453+
34454+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34455+ empty = list_empty(&dev_priv->xhw_in);
34456+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34457+ return empty;
34458+}
34459+
34460+int psb_xhw_handler(struct drm_psb_private *dev_priv)
34461+{
34462+ unsigned long irq_flags;
34463+ struct drm_psb_xhw_arg *xa;
34464+ struct psb_xhw_buf *buf;
34465+
34466+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34467+
34468+ if (!dev_priv->xhw_on) {
34469+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34470+ return -EINVAL;
34471+ }
34472+
34473+ buf = dev_priv->xhw_cur_buf;
34474+ if (buf && buf->copy_back) {
34475+ xa = &buf->arg;
34476+ /*w/a for resume, save this memcpy for perfmance*/
34477+ if (xa->op != PSB_XHW_RESUME)
34478+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
34479+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
34480+ atomic_set(&buf->done, 1);
34481+ wake_up(&dev_priv->xhw_caller_queue);
34482+ } else
34483+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
34484+
34485+ dev_priv->xhw_cur_buf = 0;
34486+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34487+ return 0;
34488+}
34489+
34490+int psb_xhw_ioctl(struct drm_device *dev, void *data,
34491+ struct drm_file *file_priv)
34492+{
34493+ struct drm_psb_private *dev_priv =
34494+ (struct drm_psb_private *) dev->dev_private;
34495+ unsigned long irq_flags;
34496+ struct drm_psb_xhw_arg *xa;
34497+ int ret;
34498+ struct list_head *list;
34499+ struct psb_xhw_buf *buf;
34500+ static int firsttime = 1;
34501+
34502+ if (!dev_priv)
34503+ return -EINVAL;
34504+
34505+ /*tricky fix for sgx HW access from user space when XPSB is load*/
34506+ if(firsttime) {
34507+ firsttime = 0;
34508+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
34509+ }
34510+
34511+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
34512+ return -ERESTART;
34513+
34514+ if (psb_forced_user_interrupt(dev_priv)) {
34515+ mutex_unlock(&dev_priv->xhw_mutex);
34516+ return -EINVAL;
34517+ }
34518+
34519+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34520+ while (list_empty(&dev_priv->xhw_in)) {
34521+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34522+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
34523+ !psb_xhw_in_empty
34524+ (dev_priv), DRM_HZ);
34525+ if (ret == -ERESTARTSYS || ret == 0) {
34526+ mutex_unlock(&dev_priv->xhw_mutex);
34527+ return -ERESTART;
34528+ }
34529+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34530+ }
34531+
34532+ list = dev_priv->xhw_in.next;
34533+ list_del_init(list);
34534+
34535+ buf = list_entry(list, struct psb_xhw_buf, head);
34536+ xa = &buf->arg;
34537+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
34538+
34539+ if (unlikely(buf->copy_back))
34540+ dev_priv->xhw_cur_buf = buf;
34541+ else {
34542+ atomic_set(&buf->done, 1);
34543+ dev_priv->xhw_cur_buf = NULL;
34544+ }
34545+
34546+ if (xa->op == PSB_XHW_TERMINATE) {
34547+ dev_priv->xhw_on = 0;
34548+ wake_up(&dev_priv->xhw_caller_queue);
34549+ }
34550+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34551+
34552+ mutex_unlock(&dev_priv->xhw_mutex);
34553+
34554+ return 0;
34555+}
34556diff --git a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
34557new file mode 100644
34558index 0000000..28fbe3b
34559--- /dev/null
34560+++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
34561@@ -0,0 +1,149 @@
34562+/**************************************************************************
34563+ *
34564+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34565+ * All Rights Reserved.
34566+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34567+ * All Rights Reserved.
34568+ *
34569+ * Permission is hereby granted, free of charge, to any person obtaining a
34570+ * copy of this software and associated documentation files (the
34571+ * "Software"), to deal in the Software without restriction, including
34572+ * without limitation the rights to use, copy, modify, merge, publish,
34573+ * distribute, sub license, and/or sell copies of the Software, and to
34574+ * permit persons to whom the Software is furnished to do so, subject to
34575+ * the following conditions:
34576+ *
34577+ * The above copyright notice and this permission notice (including the
34578+ * next paragraph) shall be included in all copies or substantial portions
34579+ * of the Software.
34580+ *
34581+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34582+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34583+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34584+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34585+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34586+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34587+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34588+ *
34589+ **************************************************************************/
34590+/*
34591+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34592+ * Keith Packard.
34593+ */
34594+
34595+#include "ttm/ttm_bo_driver.h"
34596+#ifdef TTM_HAS_AGP
34597+#include "ttm/ttm_placement_common.h"
34598+#include <linux/agp_backend.h>
34599+#include <asm/agp.h>
34600+#include <asm/io.h>
34601+
34602+struct ttm_agp_backend {
34603+ struct ttm_backend backend;
34604+ struct agp_memory *mem;
34605+ struct agp_bridge_data *bridge;
34606+};
34607+
34608+static int ttm_agp_populate(struct ttm_backend *backend,
34609+ unsigned long num_pages, struct page **pages,
34610+ struct page *dummy_read_page)
34611+{
34612+ struct ttm_agp_backend *agp_be =
34613+ container_of(backend, struct ttm_agp_backend, backend);
34614+ struct page **cur_page, **last_page = pages + num_pages;
34615+ struct agp_memory *mem;
34616+
34617+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
34618+ if (unlikely(mem == NULL))
34619+ return -ENOMEM;
34620+
34621+ mem->page_count = 0;
34622+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
34623+ struct page *page = *cur_page;
34624+ if (!page) {
34625+ page = dummy_read_page;
34626+ }
34627+ mem->memory[mem->page_count++] =
34628+ phys_to_gart(page_to_phys(page));
34629+ }
34630+ agp_be->mem = mem;
34631+ return 0;
34632+}
34633+
34634+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
34635+{
34636+ struct ttm_agp_backend *agp_be =
34637+ container_of(backend, struct ttm_agp_backend, backend);
34638+ struct agp_memory *mem = agp_be->mem;
34639+ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
34640+ int ret;
34641+
34642+ mem->is_flushed = 1;
34643+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
34644+
34645+ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
34646+ if (ret)
34647+ printk(KERN_ERR "AGP Bind memory failed.\n");
34648+
34649+ return ret;
34650+}
34651+
34652+static int ttm_agp_unbind(struct ttm_backend *backend)
34653+{
34654+ struct ttm_agp_backend *agp_be =
34655+ container_of(backend, struct ttm_agp_backend, backend);
34656+
34657+ if (agp_be->mem->is_bound)
34658+ return agp_unbind_memory(agp_be->mem);
34659+ else
34660+ return 0;
34661+}
34662+
34663+static void ttm_agp_clear(struct ttm_backend *backend)
34664+{
34665+ struct ttm_agp_backend *agp_be =
34666+ container_of(backend, struct ttm_agp_backend, backend);
34667+ struct agp_memory *mem = agp_be->mem;
34668+
34669+ if (mem) {
34670+ ttm_agp_unbind(backend);
34671+ agp_free_memory(mem);
34672+ }
34673+ agp_be->mem = NULL;
34674+}
34675+
34676+static void ttm_agp_destroy(struct ttm_backend *backend)
34677+{
34678+ struct ttm_agp_backend *agp_be =
34679+ container_of(backend, struct ttm_agp_backend, backend);
34680+
34681+ if (agp_be->mem)
34682+ ttm_agp_clear(backend);
34683+ kfree(agp_be);
34684+}
34685+
34686+static struct ttm_backend_func ttm_agp_func = {
34687+ .populate = ttm_agp_populate,
34688+ .clear = ttm_agp_clear,
34689+ .bind = ttm_agp_bind,
34690+ .unbind = ttm_agp_unbind,
34691+ .destroy = ttm_agp_destroy,
34692+};
34693+
34694+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
34695+ struct agp_bridge_data *bridge)
34696+{
34697+ struct ttm_agp_backend *agp_be;
34698+
34699+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
34700+ if (!agp_be)
34701+ return NULL;
34702+
34703+ agp_be->mem = NULL;
34704+ agp_be->bridge = bridge;
34705+ agp_be->backend.func = &ttm_agp_func;
34706+ agp_be->backend.bdev = bdev;
34707+ return &agp_be->backend;
34708+}
34709+
34710+#endif
34711diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c
34712new file mode 100644
34713index 0000000..7cdbd45
34714--- /dev/null
34715+++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c
34716@@ -0,0 +1,1716 @@
34717+/**************************************************************************
34718+ *
34719+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34720+ * All Rights Reserved.
34721+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34722+ * All Rights Reserved.
34723+ *
34724+ * Permission is hereby granted, free of charge, to any person obtaining a
34725+ * copy of this software and associated documentation files (the
34726+ * "Software"), to deal in the Software without restriction, including
34727+ * without limitation the rights to use, copy, modify, merge, publish,
34728+ * distribute, sub license, and/or sell copies of the Software, and to
34729+ * permit persons to whom the Software is furnished to do so, subject to
34730+ * the following conditions:
34731+ *
34732+ * The above copyright notice and this permission notice (including the
34733+ * next paragraph) shall be included in all copies or substantial portions
34734+ * of the Software.
34735+ *
34736+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34737+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34738+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34739+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34740+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34741+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34742+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34743+ *
34744+ **************************************************************************/
34745+/*
34746+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34747+ */
34748+
34749+#include "ttm/ttm_bo_driver.h"
34750+#include "ttm/ttm_placement_common.h"
34751+#include <linux/jiffies.h>
34752+#include <linux/slab.h>
34753+#include <linux/sched.h>
34754+#include <linux/mm.h>
34755+#include <linux/file.h>
34756+
34757+#define TTM_ASSERT_LOCKED(param)
34758+#define TTM_DEBUG(fmt, arg...)
34759+#define TTM_BO_HASH_ORDER 13
34760+
34761+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
34762+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
34763+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
34764+
34765+static inline uint32_t ttm_bo_type_flags(unsigned type)
34766+{
34767+ return (1 << (type));
34768+}
34769+
34770+static void ttm_bo_release_list(struct kref *list_kref)
34771+{
34772+ struct ttm_buffer_object *bo =
34773+ container_of(list_kref, struct ttm_buffer_object, list_kref);
34774+ struct ttm_bo_device *bdev = bo->bdev;
34775+
34776+ BUG_ON(atomic_read(&bo->list_kref.refcount));
34777+ BUG_ON(atomic_read(&bo->kref.refcount));
34778+ BUG_ON(atomic_read(&bo->cpu_writers));
34779+ BUG_ON(bo->sync_obj != NULL);
34780+ BUG_ON(bo->mem.mm_node != NULL);
34781+ BUG_ON(!list_empty(&bo->lru));
34782+ BUG_ON(!list_empty(&bo->ddestroy));
34783+
34784+ if (bo->ttm)
34785+ ttm_tt_destroy(bo->ttm);
34786+ if (bo->destroy)
34787+ bo->destroy(bo);
34788+ else {
34789+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
34790+ kfree(bo);
34791+ }
34792+}
34793+
34794+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
34795+{
34796+
34797+ if (interruptible) {
34798+ int ret = 0;
34799+
34800+ ret = wait_event_interruptible(bo->event_queue,
34801+ atomic_read(&bo->reserved) == 0);
34802+ if (unlikely(ret != 0))
34803+ return -ERESTART;
34804+ } else {
34805+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
34806+ }
34807+ return 0;
34808+}
34809+
34810+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
34811+{
34812+ struct ttm_bo_device *bdev = bo->bdev;
34813+ struct ttm_mem_type_manager *man;
34814+
34815+ BUG_ON(!atomic_read(&bo->reserved));
34816+
34817+ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
34818+
34819+ BUG_ON(!list_empty(&bo->lru));
34820+
34821+ man = &bdev->man[bo->mem.mem_type];
34822+ list_add_tail(&bo->lru, &man->lru);
34823+ kref_get(&bo->list_kref);
34824+
34825+ if (bo->ttm != NULL) {
34826+ list_add_tail(&bo->swap, &bdev->swap_lru);
34827+ kref_get(&bo->list_kref);
34828+ }
34829+ }
34830+}
34831+
34832+/*
34833+ * Call with bdev->lru_lock and bdev->global->swap_lock held..
34834+ */
34835+
34836+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
34837+{
34838+ int put_count = 0;
34839+
34840+ if (!list_empty(&bo->swap)) {
34841+ list_del_init(&bo->swap);
34842+ ++put_count;
34843+ }
34844+ if (!list_empty(&bo->lru)) {
34845+ list_del_init(&bo->lru);
34846+ ++put_count;
34847+ }
34848+
34849+ /*
34850+ * TODO: Add a driver hook to delete from
34851+ * driver-specific LRU's here.
34852+ */
34853+
34854+ return put_count;
34855+}
34856+
34857+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
34858+ bool interruptible,
34859+ bool no_wait, bool use_sequence, uint32_t sequence)
34860+{
34861+ struct ttm_bo_device *bdev = bo->bdev;
34862+ int ret;
34863+
34864+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
34865+ if (use_sequence && bo->seq_valid &&
34866+ (sequence - bo->val_seq < (1 << 31))) {
34867+ return -EAGAIN;
34868+ }
34869+
34870+ if (no_wait)
34871+ return -EBUSY;
34872+
34873+ spin_unlock(&bdev->lru_lock);
34874+ ret = ttm_bo_wait_unreserved(bo, interruptible);
34875+ spin_lock(&bdev->lru_lock);
34876+
34877+ if (unlikely(ret))
34878+ return ret;
34879+ }
34880+
34881+ if (use_sequence) {
34882+ bo->val_seq = sequence;
34883+ bo->seq_valid = true;
34884+ } else {
34885+ bo->seq_valid = false;
34886+ }
34887+
34888+ return 0;
34889+}
34890+
34891+static void ttm_bo_ref_bug(struct kref *list_kref)
34892+{
34893+ BUG();
34894+}
34895+
34896+int ttm_bo_reserve(struct ttm_buffer_object *bo,
34897+ bool interruptible,
34898+ bool no_wait, bool use_sequence, uint32_t sequence)
34899+{
34900+ struct ttm_bo_device *bdev = bo->bdev;
34901+ int put_count = 0;
34902+ int ret;
34903+
34904+ spin_lock(&bdev->lru_lock);
34905+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
34906+ sequence);
34907+ if (likely(ret == 0))
34908+ put_count = ttm_bo_del_from_lru(bo);
34909+ spin_unlock(&bdev->lru_lock);
34910+
34911+ while (put_count--)
34912+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
34913+
34914+ return ret;
34915+}
34916+
34917+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
34918+{
34919+ struct ttm_bo_device *bdev = bo->bdev;
34920+
34921+ spin_lock(&bdev->lru_lock);
34922+ ttm_bo_add_to_lru(bo);
34923+ atomic_set(&bo->reserved, 0);
34924+ wake_up_all(&bo->event_queue);
34925+ spin_unlock(&bdev->lru_lock);
34926+}
34927+
34928+/*
34929+ * Call bo->mutex locked.
34930+ */
34931+
34932+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
34933+{
34934+ struct ttm_bo_device *bdev = bo->bdev;
34935+ int ret = 0;
34936+ uint32_t page_flags = 0;
34937+
34938+ TTM_ASSERT_LOCKED(&bo->mutex);
34939+ bo->ttm = NULL;
34940+
34941+ switch (bo->type) {
34942+ case ttm_bo_type_device:
34943+ case ttm_bo_type_kernel:
34944+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
34945+ page_flags, bdev->dummy_read_page);
34946+ if (unlikely(bo->ttm == NULL))
34947+ ret = -ENOMEM;
34948+ break;
34949+ case ttm_bo_type_user:
34950+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
34951+ page_flags | TTM_PAGE_FLAG_USER,
34952+ bdev->dummy_read_page);
34953+ if (unlikely(bo->ttm == NULL))
34954+ ret = -ENOMEM;
34955+ break;
34956+
34957+ ret = ttm_tt_set_user(bo->ttm, current,
34958+ bo->buffer_start, bo->num_pages);
34959+ if (unlikely(ret != 0))
34960+ ttm_tt_destroy(bo->ttm);
34961+ break;
34962+ default:
34963+ printk(KERN_ERR "Illegal buffer object type\n");
34964+ ret = -EINVAL;
34965+ break;
34966+ }
34967+
34968+ return ret;
34969+}
34970+
34971+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
34972+ struct ttm_mem_reg *mem,
34973+ bool evict, bool interruptible, bool no_wait)
34974+{
34975+ struct ttm_bo_device *bdev = bo->bdev;
34976+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
34977+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
34978+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
34979+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
34980+ int ret = 0;
34981+
34982+ if (old_is_pci || new_is_pci ||
34983+ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
34984+ ttm_bo_unmap_virtual(bo);
34985+
34986+ /*
34987+ * Create and bind a ttm if required.
34988+ */
34989+
34990+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
34991+ ret = ttm_bo_add_ttm(bo);
34992+ if (ret)
34993+ goto out_err;
34994+
34995+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
34996+ if (ret)
34997+ return ret;
34998+
34999+ if (mem->mem_type != TTM_PL_SYSTEM) {
35000+ ret = ttm_tt_bind(bo->ttm, mem);
35001+ if (ret)
35002+ goto out_err;
35003+ }
35004+
35005+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
35006+
35007+ struct ttm_mem_reg *old_mem = &bo->mem;
35008+ uint32_t save_flags = old_mem->flags;
35009+ uint32_t save_proposed_flags = old_mem->proposed_flags;
35010+
35011+ *old_mem = *mem;
35012+ mem->mm_node = NULL;
35013+ old_mem->proposed_flags = save_proposed_flags;
35014+ ttm_flag_masked(&save_flags, mem->flags,
35015+ TTM_PL_MASK_MEMTYPE);
35016+ goto moved;
35017+ }
35018+
35019+ }
35020+
35021+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
35022+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
35023+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
35024+ else if (bdev->driver->move)
35025+ ret = bdev->driver->move(bo, evict, interruptible,
35026+ no_wait, mem);
35027+ else
35028+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
35029+
35030+ if (ret)
35031+ goto out_err;
35032+
35033+ moved:
35034+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
35035+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
35036+ if (ret)
35037+ printk(KERN_ERR "Can not flush read caches\n");
35038+ }
35039+
35040+ ttm_flag_masked(&bo->priv_flags,
35041+ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
35042+ TTM_BO_PRIV_FLAG_EVICTED);
35043+
35044+ if (bo->mem.mm_node)
35045+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
35046+ bdev->man[bo->mem.mem_type].gpu_offset;
35047+
35048+ return 0;
35049+
35050+ out_err:
35051+ new_man = &bdev->man[bo->mem.mem_type];
35052+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
35053+ ttm_tt_unbind(bo->ttm);
35054+ ttm_tt_destroy(bo->ttm);
35055+ bo->ttm = NULL;
35056+ }
35057+
35058+ return ret;
35059+}
35060+
35061+static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
35062+ bool allow_errors)
35063+{
35064+ struct ttm_bo_device *bdev = bo->bdev;
35065+ struct ttm_bo_driver *driver = bdev->driver;
35066+
35067+ if (bo->sync_obj) {
35068+ if (bdev->nice_mode) {
35069+ unsigned long _end = jiffies + 3 * HZ;
35070+ int ret;
35071+ do {
35072+ ret = ttm_bo_wait(bo, false, false, false);
35073+ if (ret && allow_errors)
35074+ return ret;
35075+
35076+ } while (ret && !time_after_eq(jiffies, _end));
35077+
35078+ if (bo->sync_obj) {
35079+ bdev->nice_mode = false;
35080+ printk(KERN_ERR "Detected probable GPU lockup. "
35081+ "Evicting buffer.\n");
35082+ }
35083+ }
35084+ if (bo->sync_obj) {
35085+ driver->sync_obj_unref(&bo->sync_obj);
35086+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35087+ }
35088+ }
35089+ return 0;
35090+}
35091+
35092+/**
35093+ * If bo idle, remove from delayed- and lru lists, and unref.
35094+ * If not idle, and already on delayed list, do nothing.
35095+ * If not idle, and not on delayed list, put on delayed list,
35096+ * up the list_kref and schedule a delayed list check.
35097+ */
35098+
35099+static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
35100+{
35101+ struct ttm_bo_device *bdev = bo->bdev;
35102+ struct ttm_bo_driver *driver = bdev->driver;
35103+
35104+ mutex_lock(&bo->mutex);
35105+
35106+ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
35107+ bo->sync_obj_arg)) {
35108+ driver->sync_obj_unref(&bo->sync_obj);
35109+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35110+ }
35111+
35112+ if (bo->sync_obj && remove_all)
35113+ (void)ttm_bo_expire_sync_obj(bo, false);
35114+
35115+ if (!bo->sync_obj) {
35116+ int put_count;
35117+
35118+ if (bo->ttm)
35119+ ttm_tt_unbind(bo->ttm);
35120+ spin_lock(&bdev->lru_lock);
35121+ if (!list_empty(&bo->ddestroy)) {
35122+ list_del_init(&bo->ddestroy);
35123+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
35124+ }
35125+ if (bo->mem.mm_node) {
35126+ drm_mm_put_block(bo->mem.mm_node);
35127+ bo->mem.mm_node = NULL;
35128+ }
35129+ put_count = ttm_bo_del_from_lru(bo);
35130+ spin_unlock(&bdev->lru_lock);
35131+ mutex_unlock(&bo->mutex);
35132+ while (put_count--)
35133+ kref_put(&bo->list_kref, ttm_bo_release_list);
35134+
35135+ return;
35136+ }
35137+
35138+ spin_lock(&bdev->lru_lock);
35139+ if (list_empty(&bo->ddestroy)) {
35140+ spin_unlock(&bdev->lru_lock);
35141+ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
35142+ spin_lock(&bdev->lru_lock);
35143+ if (list_empty(&bo->ddestroy)) {
35144+ kref_get(&bo->list_kref);
35145+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
35146+ }
35147+ spin_unlock(&bdev->lru_lock);
35148+ schedule_delayed_work(&bdev->wq,
35149+ ((HZ / 100) < 1) ? 1 : HZ / 100);
35150+ } else
35151+ spin_unlock(&bdev->lru_lock);
35152+
35153+ mutex_unlock(&bo->mutex);
35154+ return;
35155+}
35156+
35157+/**
35158+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
35159+ * encountered buffers.
35160+ */
35161+
35162+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
35163+{
35164+ struct ttm_buffer_object *entry, *nentry;
35165+ struct list_head *list, *next;
35166+ int ret;
35167+
35168+ spin_lock(&bdev->lru_lock);
35169+ list_for_each_safe(list, next, &bdev->ddestroy) {
35170+ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
35171+ nentry = NULL;
35172+
35173+ /*
35174+ * Protect the next list entry from destruction while we
35175+ * unlock the lru_lock.
35176+ */
35177+
35178+ if (next != &bdev->ddestroy) {
35179+ nentry = list_entry(next, struct ttm_buffer_object,
35180+ ddestroy);
35181+ kref_get(&nentry->list_kref);
35182+ }
35183+ kref_get(&entry->list_kref);
35184+
35185+ spin_unlock(&bdev->lru_lock);
35186+ ttm_bo_cleanup_refs(entry, remove_all);
35187+ kref_put(&entry->list_kref, ttm_bo_release_list);
35188+ spin_lock(&bdev->lru_lock);
35189+
35190+ if (nentry) {
35191+ bool next_onlist = !list_empty(next);
35192+ kref_put(&nentry->list_kref, ttm_bo_release_list);
35193+
35194+ /*
35195+ * Someone might have raced us and removed the
35196+ * next entry from the list. We don't bother restarting
35197+ * list traversal.
35198+ */
35199+
35200+ if (!next_onlist)
35201+ break;
35202+ }
35203+ }
35204+ ret = !list_empty(&bdev->ddestroy);
35205+ spin_unlock(&bdev->lru_lock);
35206+
35207+ return ret;
35208+}
35209+
35210+static void ttm_bo_delayed_workqueue(struct work_struct *work)
35211+{
35212+ struct ttm_bo_device *bdev =
35213+ container_of(work, struct ttm_bo_device, wq.work);
35214+
35215+ if (ttm_bo_delayed_delete(bdev, false)) {
35216+ schedule_delayed_work(&bdev->wq,
35217+ ((HZ / 100) < 1) ? 1 : HZ / 100);
35218+ }
35219+}
35220+
35221+static void ttm_bo_release(struct kref *kref)
35222+{
35223+ struct ttm_buffer_object *bo =
35224+ container_of(kref, struct ttm_buffer_object, kref);
35225+ struct ttm_bo_device *bdev = bo->bdev;
35226+
35227+ if (likely(bo->vm_node != NULL)) {
35228+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
35229+ drm_mm_put_block(bo->vm_node);
35230+ }
35231+ write_unlock(&bdev->vm_lock);
35232+ ttm_bo_cleanup_refs(bo, false);
35233+ kref_put(&bo->list_kref, ttm_bo_release_list);
35234+ write_lock(&bdev->vm_lock);
35235+}
35236+
35237+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
35238+{
35239+ struct ttm_buffer_object *bo = *p_bo;
35240+ struct ttm_bo_device *bdev = bo->bdev;
35241+
35242+ *p_bo = NULL;
35243+ write_lock(&bdev->vm_lock);
35244+ kref_put(&bo->kref, ttm_bo_release);
35245+ write_unlock(&bdev->vm_lock);
35246+}
35247+
35248+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
35249+ bool interruptible, bool no_wait)
35250+{
35251+ int ret = 0;
35252+ struct ttm_bo_device *bdev = bo->bdev;
35253+ struct ttm_mem_reg evict_mem;
35254+
35255+ if (bo->mem.mem_type != mem_type)
35256+ goto out;
35257+
35258+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
35259+ if (ret && ret != -ERESTART) {
35260+ printk(KERN_ERR "Failed to expire sync object before "
35261+ "buffer eviction.\n");
35262+ goto out;
35263+ }
35264+
35265+ BUG_ON(!atomic_read(&bo->reserved));
35266+
35267+ evict_mem = bo->mem;
35268+ evict_mem.mm_node = NULL;
35269+
35270+ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
35271+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
35272+
35273+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
35274+ if (unlikely(ret != 0 && ret != -ERESTART)) {
35275+ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
35276+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
35277+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
35278+ }
35279+
35280+ if (ret) {
35281+ if (ret != -ERESTART)
35282+ printk(KERN_ERR "Failed to find memory space for "
35283+ "buffer 0x%p eviction.\n", bo);
35284+ goto out;
35285+ }
35286+
35287+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait);
35288+ if (ret) {
35289+ if (ret != -ERESTART)
35290+ printk(KERN_ERR "Buffer eviction failed\n");
35291+ goto out;
35292+ }
35293+
35294+ spin_lock(&bdev->lru_lock);
35295+ if (evict_mem.mm_node) {
35296+ drm_mm_put_block(evict_mem.mm_node);
35297+ evict_mem.mm_node = NULL;
35298+ }
35299+ spin_unlock(&bdev->lru_lock);
35300+
35301+ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
35302+ TTM_BO_PRIV_FLAG_EVICTED);
35303+
35304+ out:
35305+ return ret;
35306+}
35307+
35308+/**
35309+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
35310+ * space, or we've evicted everything and there isn't enough space.
35311+ */
35312+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
35313+ struct ttm_mem_reg *mem,
35314+ uint32_t mem_type,
35315+ bool interruptible, bool no_wait)
35316+{
35317+ struct drm_mm_node *node;
35318+ struct ttm_buffer_object *entry;
35319+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
35320+ struct list_head *lru;
35321+ unsigned long num_pages = mem->num_pages;
35322+ int put_count = 0;
35323+ int ret;
35324+
35325+ retry_pre_get:
35326+ ret = drm_mm_pre_get(&man->manager);
35327+ if (unlikely(ret != 0))
35328+ return ret;
35329+
35330+ spin_lock(&bdev->lru_lock);
35331+ do {
35332+ node = drm_mm_search_free(&man->manager, num_pages,
35333+ mem->page_alignment, 1);
35334+ if (node)
35335+ break;
35336+
35337+ lru = &man->lru;
35338+ if (list_empty(lru))
35339+ break;
35340+
35341+ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
35342+ kref_get(&entry->list_kref);
35343+
35344+ ret =
35345+ ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0);
35346+
35347+ if (likely(ret == 0))
35348+ put_count = ttm_bo_del_from_lru(entry);
35349+
35350+ spin_unlock(&bdev->lru_lock);
35351+
35352+ if (unlikely(ret != 0))
35353+ return ret;
35354+
35355+ while (put_count--)
35356+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
35357+
35358+ mutex_lock(&entry->mutex);
35359+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
35360+ mutex_unlock(&entry->mutex);
35361+
35362+ ttm_bo_unreserve(entry);
35363+
35364+ kref_put(&entry->list_kref, ttm_bo_release_list);
35365+ if (ret)
35366+ return ret;
35367+
35368+ spin_lock(&bdev->lru_lock);
35369+ } while (1);
35370+
35371+ if (!node) {
35372+ spin_unlock(&bdev->lru_lock);
35373+ return -ENOMEM;
35374+ }
35375+
35376+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
35377+ if (unlikely(!node)) {
35378+ spin_unlock(&bdev->lru_lock);
35379+ goto retry_pre_get;
35380+ }
35381+
35382+ spin_unlock(&bdev->lru_lock);
35383+ mem->mm_node = node;
35384+ mem->mem_type = mem_type;
35385+ return 0;
35386+}
35387+
35388+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
35389+ bool disallow_fixed,
35390+ uint32_t mem_type,
35391+ uint32_t mask, uint32_t * res_mask)
35392+{
35393+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
35394+
35395+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
35396+ return false;
35397+
35398+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
35399+ return false;
35400+
35401+ if ((mask & man->available_caching) == 0)
35402+ return false;
35403+ if (mask & man->default_caching)
35404+ cur_flags |= man->default_caching;
35405+ else if (mask & TTM_PL_FLAG_CACHED)
35406+ cur_flags |= TTM_PL_FLAG_CACHED;
35407+ else if (mask & TTM_PL_FLAG_WC)
35408+ cur_flags |= TTM_PL_FLAG_WC;
35409+ else
35410+ cur_flags |= TTM_PL_FLAG_UNCACHED;
35411+
35412+ *res_mask = cur_flags;
35413+ return true;
35414+}
35415+
35416+/**
35417+ * Creates space for memory region @mem according to its type.
35418+ *
35419+ * This function first searches for free space in compatible memory types in
35420+ * the priority order defined by the driver. If free space isn't found, then
35421+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
35422+ * space.
35423+ */
35424+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
35425+ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
35426+{
35427+ struct ttm_bo_device *bdev = bo->bdev;
35428+ struct ttm_mem_type_manager *man;
35429+
35430+ uint32_t num_prios = bdev->driver->num_mem_type_prio;
35431+ const uint32_t *prios = bdev->driver->mem_type_prio;
35432+ uint32_t i;
35433+ uint32_t mem_type = TTM_PL_SYSTEM;
35434+ uint32_t cur_flags = 0;
35435+ bool type_found = false;
35436+ bool type_ok = false;
35437+ bool has_eagain = false;
35438+ struct drm_mm_node *node = NULL;
35439+ int ret;
35440+
35441+ mem->mm_node = NULL;
35442+ for (i = 0; i < num_prios; ++i) {
35443+ mem_type = prios[i];
35444+ man = &bdev->man[mem_type];
35445+
35446+ type_ok = ttm_bo_mt_compatible(man,
35447+ bo->type == ttm_bo_type_user,
35448+ mem_type, mem->proposed_flags,
35449+ &cur_flags);
35450+
35451+ if (!type_ok)
35452+ continue;
35453+
35454+ if (mem_type == TTM_PL_SYSTEM)
35455+ break;
35456+
35457+ if (man->has_type && man->use_type) {
35458+ type_found = true;
35459+ do {
35460+ ret = drm_mm_pre_get(&man->manager);
35461+ if (unlikely(ret))
35462+ return ret;
35463+
35464+ spin_lock(&bdev->lru_lock);
35465+ node = drm_mm_search_free(&man->manager,
35466+ mem->num_pages,
35467+ mem->page_alignment,
35468+ 1);
35469+ if (unlikely(!node)) {
35470+ spin_unlock(&bdev->lru_lock);
35471+ break;
35472+ }
35473+ node = drm_mm_get_block_atomic(node,
35474+ mem->num_pages,
35475+ mem->
35476+ page_alignment);
35477+ spin_unlock(&bdev->lru_lock);
35478+ } while (!node);
35479+ }
35480+ if (node)
35481+ break;
35482+ }
35483+
35484+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
35485+ mem->mm_node = node;
35486+ mem->mem_type = mem_type;
35487+ mem->flags = cur_flags;
35488+ return 0;
35489+ }
35490+
35491+ if (!type_found)
35492+ return -EINVAL;
35493+
35494+ num_prios = bdev->driver->num_mem_busy_prio;
35495+ prios = bdev->driver->mem_busy_prio;
35496+
35497+ for (i = 0; i < num_prios; ++i) {
35498+ mem_type = prios[i];
35499+ man = &bdev->man[mem_type];
35500+
35501+ if (!man->has_type)
35502+ continue;
35503+
35504+ if (!ttm_bo_mt_compatible(man,
35505+ bo->type == ttm_bo_type_user,
35506+ mem_type,
35507+ mem->proposed_flags, &cur_flags))
35508+ continue;
35509+
35510+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
35511+ interruptible, no_wait);
35512+
35513+ if (ret == 0 && mem->mm_node) {
35514+ mem->flags = cur_flags;
35515+ return 0;
35516+ }
35517+
35518+ if (ret == -ERESTART)
35519+ has_eagain = true;
35520+ }
35521+
35522+ ret = (has_eagain) ? -ERESTART : -ENOMEM;
35523+ return ret;
35524+}
35525+
35526+/*
35527+ * Call bo->mutex locked.
35528+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
35529+ */
35530+
35531+static int ttm_bo_busy(struct ttm_buffer_object *bo)
35532+{
35533+ void *sync_obj = bo->sync_obj;
35534+ struct ttm_bo_driver *driver = bo->bdev->driver;
35535+
35536+ if (sync_obj) {
35537+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
35538+ driver->sync_obj_unref(&bo->sync_obj);
35539+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35540+ return 0;
35541+ }
35542+ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
35543+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
35544+ driver->sync_obj_unref(&bo->sync_obj);
35545+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35546+ return 0;
35547+ }
35548+ return 1;
35549+ }
35550+ return 0;
35551+}
35552+
35553+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
35554+{
35555+ int ret = 0;
35556+
35557+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
35558+ return -EBUSY;
35559+
35560+ ret = wait_event_interruptible(bo->event_queue,
35561+ atomic_read(&bo->cpu_writers) == 0);
35562+
35563+ if (ret == -ERESTARTSYS)
35564+ ret = -ERESTART;
35565+
35566+ return ret;
35567+}
35568+
35569+/*
35570+ * bo->mutex locked.
35571+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
35572+ */
35573+
35574+int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
35575+ bool interruptible, bool no_wait)
35576+{
35577+ struct ttm_bo_device *bdev = bo->bdev;
35578+ int ret = 0;
35579+ struct ttm_mem_reg mem;
35580+
35581+ BUG_ON(!atomic_read(&bo->reserved));
35582+
35583+ /*
35584+ * FIXME: It's possible to pipeline buffer moves.
35585+ * Have the driver move function wait for idle when necessary,
35586+ * instead of doing it here.
35587+ */
35588+
35589+ ttm_bo_busy(bo);
35590+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
35591+ if (ret)
35592+ return ret;
35593+
35594+ mem.num_pages = bo->num_pages;
35595+ mem.size = mem.num_pages << PAGE_SHIFT;
35596+ mem.proposed_flags = new_mem_flags;
35597+ mem.page_alignment = bo->mem.page_alignment;
35598+
35599+ /*
35600+ * Determine where to move the buffer.
35601+ */
35602+
35603+ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
35604+ if (ret)
35605+ goto out_unlock;
35606+
35607+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
35608+
35609+ out_unlock:
35610+ if (ret && mem.mm_node) {
35611+ spin_lock(&bdev->lru_lock);
35612+ drm_mm_put_block(mem.mm_node);
35613+ spin_unlock(&bdev->lru_lock);
35614+ }
35615+ return ret;
35616+}
35617+
35618+static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
35619+{
35620+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
35621+ return 0;
35622+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
35623+ return 0;
35624+
35625+ return 1;
35626+}
35627+
35628+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
35629+ bool interruptible, bool no_wait)
35630+{
35631+ int ret;
35632+
35633+ BUG_ON(!atomic_read(&bo->reserved));
35634+ bo->mem.proposed_flags = bo->proposed_flags;
35635+
35636+ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
35637+ (unsigned long)bo->mem.proposed_flags,
35638+ (unsigned long)bo->mem.flags);
35639+
35640+ /*
35641+ * Check whether we need to move buffer.
35642+ */
35643+
35644+ if (!ttm_bo_mem_compat(&bo->mem)) {
35645+ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
35646+ interruptible, no_wait);
35647+ if (ret) {
35648+ if (ret != -ERESTART)
35649+ printk(KERN_ERR "Failed moving buffer. "
35650+ "Proposed placement 0x%08x\n",
35651+ bo->mem.proposed_flags);
35652+ if (ret == -ENOMEM)
35653+ printk(KERN_ERR "Out of aperture space or "
35654+ "DRM memory quota.\n");
35655+ return ret;
35656+ }
35657+ }
35658+
35659+ /*
35660+ * We might need to add a TTM.
35661+ */
35662+
35663+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
35664+ ret = ttm_bo_add_ttm(bo);
35665+ if (ret)
35666+ return ret;
35667+ }
35668+ /*
35669+ * Validation has succeeded, move the access and other
35670+ * non-mapping-related flag bits from the proposed flags to
35671+ * the active flags
35672+ */
35673+
35674+ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
35675+ ~TTM_PL_MASK_MEMTYPE);
35676+
35677+ return 0;
35678+}
35679+
35680+int
35681+ttm_bo_check_placement(struct ttm_buffer_object *bo,
35682+ uint32_t set_flags, uint32_t clr_flags)
35683+{
35684+ uint32_t new_mask = set_flags | clr_flags;
35685+
35686+ if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) {
35687+ printk(KERN_ERR
35688+ "User buffers require cache-coherent memory.\n");
35689+ return -EINVAL;
35690+ }
35691+
35692+ if (!capable(CAP_SYS_ADMIN)) {
35693+ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
35694+ printk(KERN_ERR "Need to be root to modify"
35695+ " NO_EVICT status.\n");
35696+ return -EINVAL;
35697+ }
35698+
35699+ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
35700+ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
35701+ printk(KERN_ERR "Incompatible memory specification"
35702+ " for NO_EVICT buffer.\n");
35703+ return -EINVAL;
35704+ }
35705+ }
35706+ return 0;
35707+}
35708+
35709+int ttm_buffer_object_init(struct ttm_bo_device *bdev,
35710+ struct ttm_buffer_object *bo,
35711+ unsigned long size,
35712+ enum ttm_bo_type type,
35713+ uint32_t flags,
35714+ uint32_t page_alignment,
35715+ unsigned long buffer_start,
35716+ bool interruptible,
35717+ struct file *persistant_swap_storage,
35718+ size_t acc_size,
35719+ void (*destroy) (struct ttm_buffer_object *))
35720+{
35721+ int ret = 0;
35722+ unsigned long num_pages;
35723+
35724+ size += buffer_start & ~PAGE_MASK;
35725+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
35726+ if (num_pages == 0) {
35727+ printk(KERN_ERR "Illegal buffer object size.\n");
35728+ return -EINVAL;
35729+ }
35730+ bo->destroy = destroy;
35731+
35732+ mutex_init(&bo->mutex);
35733+ mutex_lock(&bo->mutex);
35734+ kref_init(&bo->kref);
35735+ kref_init(&bo->list_kref);
35736+ atomic_set(&bo->cpu_writers, 0);
35737+ atomic_set(&bo->reserved, 1);
35738+ init_waitqueue_head(&bo->event_queue);
35739+ INIT_LIST_HEAD(&bo->lru);
35740+ INIT_LIST_HEAD(&bo->ddestroy);
35741+ INIT_LIST_HEAD(&bo->swap);
35742+ bo->bdev = bdev;
35743+ bo->type = type;
35744+ bo->num_pages = num_pages;
35745+ bo->mem.mem_type = TTM_PL_SYSTEM;
35746+ bo->mem.num_pages = bo->num_pages;
35747+ bo->mem.mm_node = NULL;
35748+ bo->mem.page_alignment = page_alignment;
35749+ bo->buffer_start = buffer_start & PAGE_MASK;
35750+ bo->priv_flags = 0;
35751+ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
35752+ bo->seq_valid = false;
35753+ bo->persistant_swap_storage = persistant_swap_storage;
35754+ bo->acc_size = acc_size;
35755+
35756+ ret = ttm_bo_check_placement(bo, flags, 0ULL);
35757+ if (unlikely(ret != 0))
35758+ goto out_err;
35759+
35760+ /*
35761+ * If no caching attributes are set, accept any form of caching.
35762+ */
35763+
35764+ if ((flags & TTM_PL_MASK_CACHING) == 0)
35765+ flags |= TTM_PL_MASK_CACHING;
35766+
35767+ bo->proposed_flags = flags;
35768+ bo->mem.proposed_flags = flags;
35769+
35770+ /*
35771+ * For ttm_bo_type_device buffers, allocate
35772+ * address space from the device.
35773+ */
35774+
35775+ if (bo->type == ttm_bo_type_device) {
35776+ ret = ttm_bo_setup_vm(bo);
35777+ if (ret)
35778+ goto out_err;
35779+ }
35780+
35781+ ret = ttm_buffer_object_validate(bo, interruptible, false);
35782+ if (ret)
35783+ goto out_err;
35784+
35785+ mutex_unlock(&bo->mutex);
35786+ ttm_bo_unreserve(bo);
35787+ return 0;
35788+
35789+ out_err:
35790+ mutex_unlock(&bo->mutex);
35791+ ttm_bo_unreserve(bo);
35792+ ttm_bo_unref(&bo);
35793+
35794+ return ret;
35795+}
35796+
35797+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
35798+ unsigned long num_pages)
35799+{
35800+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
35801+ PAGE_MASK;
35802+
35803+ return bdev->ttm_bo_size + 2 * page_array_size;
35804+}
35805+
35806+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
35807+ unsigned long size,
35808+ enum ttm_bo_type type,
35809+ uint32_t flags,
35810+ uint32_t page_alignment,
35811+ unsigned long buffer_start,
35812+ bool interruptible,
35813+ struct file *persistant_swap_storage,
35814+ struct ttm_buffer_object **p_bo)
35815+{
35816+ struct ttm_buffer_object *bo;
35817+ int ret;
35818+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
35819+
35820+ size_t acc_size =
35821+ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
35822+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
35823+ if (unlikely(ret != 0))
35824+ return ret;
35825+
35826+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
35827+
35828+ if (unlikely(bo == NULL)) {
35829+ ttm_mem_global_free(mem_glob, acc_size, false);
35830+ return -ENOMEM;
35831+ }
35832+
35833+ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
35834+ page_alignment, buffer_start,
35835+ interruptible,
35836+ persistant_swap_storage, acc_size, NULL);
35837+ if (likely(ret == 0))
35838+ *p_bo = bo;
35839+
35840+ return ret;
35841+}
35842+
35843+static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
35844+ uint32_t mem_type, bool allow_errors)
35845+{
35846+ int ret;
35847+
35848+ mutex_lock(&bo->mutex);
35849+
35850+ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
35851+ if (ret)
35852+ goto out;
35853+
35854+ if (bo->mem.mem_type == mem_type)
35855+ ret = ttm_bo_evict(bo, mem_type, false, false);
35856+
35857+ if (ret) {
35858+ if (allow_errors) {
35859+ goto out;
35860+ } else {
35861+ ret = 0;
35862+ printk(KERN_ERR "Cleanup eviction failed\n");
35863+ }
35864+ }
35865+
35866+ out:
35867+ mutex_unlock(&bo->mutex);
35868+ return ret;
35869+}
35870+
35871+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
35872+ struct list_head *head,
35873+ unsigned mem_type, bool allow_errors)
35874+{
35875+ struct ttm_buffer_object *entry;
35876+ int ret;
35877+ int put_count;
35878+
35879+ /*
35880+ * Can't use standard list traversal since we're unlocking.
35881+ */
35882+
35883+ spin_lock(&bdev->lru_lock);
35884+
35885+ while (!list_empty(head)) {
35886+ entry = list_first_entry(head, struct ttm_buffer_object, lru);
35887+ kref_get(&entry->list_kref);
35888+ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
35889+ put_count = ttm_bo_del_from_lru(entry);
35890+ spin_unlock(&bdev->lru_lock);
35891+ while (put_count--)
35892+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
35893+ BUG_ON(ret);
35894+ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
35895+ ttm_bo_unreserve(entry);
35896+ kref_put(&entry->list_kref, ttm_bo_release_list);
35897+ spin_lock(&bdev->lru_lock);
35898+ }
35899+
35900+ spin_unlock(&bdev->lru_lock);
35901+
35902+ return 0;
35903+}
35904+
35905+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
35906+{
35907+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
35908+ int ret = -EINVAL;
35909+
35910+ if (mem_type >= TTM_NUM_MEM_TYPES) {
35911+ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
35912+ return ret;
35913+ }
35914+
35915+ if (!man->has_type) {
35916+ printk(KERN_ERR "Trying to take down uninitialized "
35917+ "memory manager type %u\n", mem_type);
35918+ return ret;
35919+ }
35920+
35921+ man->use_type = false;
35922+ man->has_type = false;
35923+
35924+ ret = 0;
35925+ if (mem_type > 0) {
35926+ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
35927+
35928+ spin_lock(&bdev->lru_lock);
35929+ if (drm_mm_clean(&man->manager)) {
35930+ drm_mm_takedown(&man->manager);
35931+ } else {
35932+ ret = -EBUSY;
35933+ }
35934+ spin_unlock(&bdev->lru_lock);
35935+ }
35936+
35937+ return ret;
35938+}
35939+
35940+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
35941+{
35942+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
35943+
35944+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
35945+ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
35946+ mem_type);
35947+ return -EINVAL;
35948+ }
35949+
35950+ if (!man->has_type) {
35951+ printk(KERN_ERR "Memory type %u has not been initialized.\n",
35952+ mem_type);
35953+ return 0;
35954+ }
35955+
35956+ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
35957+}
35958+
35959+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
35960+ unsigned long p_offset, unsigned long p_size)
35961+{
35962+ int ret = -EINVAL;
35963+ struct ttm_mem_type_manager *man;
35964+
35965+ if (type >= TTM_NUM_MEM_TYPES) {
35966+ printk(KERN_ERR "Illegal memory type %d\n", type);
35967+ return ret;
35968+ }
35969+
35970+ man = &bdev->man[type];
35971+ if (man->has_type) {
35972+ printk(KERN_ERR
35973+ "Memory manager already initialized for type %d\n",
35974+ type);
35975+ return ret;
35976+ }
35977+
35978+ ret = bdev->driver->init_mem_type(bdev, type, man);
35979+ if (ret)
35980+ return ret;
35981+
35982+ ret = 0;
35983+ if (type != TTM_PL_SYSTEM) {
35984+ if (!p_size) {
35985+ printk(KERN_ERR "Zero size memory manager type %d\n",
35986+ type);
35987+ return ret;
35988+ }
35989+ ret = drm_mm_init(&man->manager, p_offset, p_size);
35990+ if (ret)
35991+ return ret;
35992+ }
35993+ man->has_type = true;
35994+ man->use_type = true;
35995+ man->size = p_size;
35996+
35997+ INIT_LIST_HEAD(&man->lru);
35998+
35999+ return 0;
36000+}
36001+
36002+int ttm_bo_device_release(struct ttm_bo_device *bdev)
36003+{
36004+ int ret = 0;
36005+ unsigned i = TTM_NUM_MEM_TYPES;
36006+ struct ttm_mem_type_manager *man;
36007+
36008+ while (i--) {
36009+ man = &bdev->man[i];
36010+ if (man->has_type) {
36011+ man->use_type = false;
36012+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
36013+ ret = -EBUSY;
36014+ printk(KERN_ERR "DRM memory manager type %d "
36015+ "is not clean.\n", i);
36016+ }
36017+ man->has_type = false;
36018+ }
36019+ }
36020+
36021+ if (!cancel_delayed_work(&bdev->wq))
36022+ flush_scheduled_work();
36023+
36024+ while (ttm_bo_delayed_delete(bdev, true)) ;
36025+
36026+ spin_lock(&bdev->lru_lock);
36027+ if (list_empty(&bdev->ddestroy))
36028+ TTM_DEBUG("Delayed destroy list was clean\n");
36029+
36030+ if (list_empty(&bdev->man[0].lru))
36031+ TTM_DEBUG("Swap list was clean\n");
36032+ spin_unlock(&bdev->lru_lock);
36033+
36034+ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
36035+ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
36036+ write_lock(&bdev->vm_lock);
36037+ drm_mm_takedown(&bdev->addr_space_mm);
36038+ write_unlock(&bdev->vm_lock);
36039+
36040+ __free_page(bdev->dummy_read_page);
36041+ return ret;
36042+}
36043+
36044+/*
36045+ * This function is intended to be called on drm driver load.
36046+ * If you decide to call it from firstopen, you must protect the call
36047+ * from a potentially racing ttm_bo_driver_finish in lastclose.
36048+ * (This may happen on X server restart).
36049+ */
36050+
36051+int ttm_bo_device_init(struct ttm_bo_device *bdev,
36052+ struct ttm_mem_global *mem_glob,
36053+ struct ttm_bo_driver *driver, uint64_t file_page_offset)
36054+{
36055+ int ret = -EINVAL;
36056+
36057+ bdev->dummy_read_page = NULL;
36058+ rwlock_init(&bdev->vm_lock);
36059+ spin_lock_init(&bdev->lru_lock);
36060+
36061+ bdev->driver = driver;
36062+ bdev->mem_glob = mem_glob;
36063+
36064+ memset(bdev->man, 0, sizeof(bdev->man));
36065+
36066+ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
36067+ if (unlikely(bdev->dummy_read_page == NULL)) {
36068+ ret = -ENOMEM;
36069+ goto out_err0;
36070+ }
36071+
36072+ /*
36073+ * Initialize the system memory buffer type.
36074+ * Other types need to be driver / IOCTL initialized.
36075+ */
36076+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
36077+ if (unlikely(ret != 0))
36078+ goto out_err1;
36079+
36080+ bdev->addr_space_rb = RB_ROOT;
36081+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
36082+ if (unlikely(ret != 0))
36083+ goto out_err2;
36084+
36085+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
36086+ bdev->nice_mode = true;
36087+ INIT_LIST_HEAD(&bdev->ddestroy);
36088+ INIT_LIST_HEAD(&bdev->swap_lru);
36089+ bdev->dev_mapping = NULL;
36090+ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
36091+ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
36092+ if (unlikely(ret != 0)) {
36093+ printk(KERN_ERR "Could not register buffer object swapout.\n");
36094+ goto out_err2;
36095+ }
36096+ return 0;
36097+ out_err2:
36098+ ttm_bo_clean_mm(bdev, 0);
36099+ out_err1:
36100+ __free_page(bdev->dummy_read_page);
36101+ out_err0:
36102+ return ret;
36103+}
36104+
36105+/*
36106+ * buffer object vm functions.
36107+ */
36108+
36109+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
36110+{
36111+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
36112+
36113+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
36114+ if (mem->mem_type == TTM_PL_SYSTEM)
36115+ return false;
36116+
36117+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
36118+ return false;
36119+
36120+ if (mem->flags & TTM_PL_FLAG_CACHED)
36121+ return false;
36122+ }
36123+ return true;
36124+}
36125+
36126+int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
36127+ struct ttm_mem_reg *mem,
36128+ unsigned long *bus_base,
36129+ unsigned long *bus_offset, unsigned long *bus_size)
36130+{
36131+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
36132+
36133+ *bus_size = 0;
36134+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
36135+ return -EINVAL;
36136+
36137+ if (ttm_mem_reg_is_pci(bdev, mem)) {
36138+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
36139+ *bus_size = mem->num_pages << PAGE_SHIFT;
36140+ *bus_base = man->io_offset;
36141+ }
36142+
36143+ return 0;
36144+}
36145+
36146+/**
36147+ * \c Kill all user-space virtual mappings of this buffer object.
36148+ *
36149+ * \param bo The buffer object.
36150+ *
36151+ * Call bo->mutex locked.
36152+ */
36153+
36154+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
36155+{
36156+ struct ttm_bo_device *bdev = bo->bdev;
36157+ loff_t offset = (loff_t) bo->addr_space_offset;
36158+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
36159+
36160+ if (!bdev->dev_mapping)
36161+ return;
36162+
36163+ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
36164+}
36165+
36166+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
36167+{
36168+ struct ttm_bo_device *bdev = bo->bdev;
36169+ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
36170+ struct rb_node *parent = NULL;
36171+ struct ttm_buffer_object *cur_bo;
36172+ unsigned long offset = bo->vm_node->start;
36173+ unsigned long cur_offset;
36174+
36175+ while (*cur) {
36176+ parent = *cur;
36177+ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
36178+ cur_offset = cur_bo->vm_node->start;
36179+ if (offset < cur_offset)
36180+ cur = &parent->rb_left;
36181+ else if (offset > cur_offset)
36182+ cur = &parent->rb_right;
36183+ else
36184+ BUG();
36185+ }
36186+
36187+ rb_link_node(&bo->vm_rb, parent, cur);
36188+ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
36189+}
36190+
36191+/**
36192+ * ttm_bo_setup_vm:
36193+ *
36194+ * @bo: the buffer to allocate address space for
36195+ *
36196+ * Allocate address space in the drm device so that applications
36197+ * can mmap the buffer and access the contents. This only
36198+ * applies to ttm_bo_type_device objects as others are not
36199+ * placed in the drm device address space.
36200+ */
36201+
36202+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
36203+{
36204+ struct ttm_bo_device *bdev = bo->bdev;
36205+ int ret;
36206+
36207+ retry_pre_get:
36208+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
36209+ if (unlikely(ret != 0))
36210+ return ret;
36211+
36212+ write_lock(&bdev->vm_lock);
36213+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
36214+ bo->mem.num_pages, 0, 0);
36215+
36216+ if (unlikely(bo->vm_node == NULL)) {
36217+ ret = -ENOMEM;
36218+ goto out_unlock;
36219+ }
36220+
36221+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
36222+ bo->mem.num_pages, 0);
36223+
36224+ if (unlikely(bo->vm_node == NULL)) {
36225+ write_unlock(&bdev->vm_lock);
36226+ goto retry_pre_get;
36227+ }
36228+
36229+ ttm_bo_vm_insert_rb(bo);
36230+ write_unlock(&bdev->vm_lock);
36231+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
36232+
36233+ return 0;
36234+ out_unlock:
36235+ write_unlock(&bdev->vm_lock);
36236+ return ret;
36237+}
36238+
36239+int ttm_bo_wait(struct ttm_buffer_object *bo,
36240+ bool lazy, bool interruptible, bool no_wait)
36241+{
36242+ struct ttm_bo_driver *driver = bo->bdev->driver;
36243+ void *sync_obj;
36244+ void *sync_obj_arg;
36245+ int ret = 0;
36246+
36247+ while (bo->sync_obj) {
36248+ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
36249+ driver->sync_obj_unref(&bo->sync_obj);
36250+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
36251+ goto out;
36252+ }
36253+ if (no_wait) {
36254+ ret = -EBUSY;
36255+ goto out;
36256+ }
36257+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
36258+ sync_obj_arg = bo->sync_obj_arg;
36259+ mutex_unlock(&bo->mutex);
36260+ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
36261+ lazy, interruptible);
36262+
36263+ mutex_lock(&bo->mutex);
36264+ if (unlikely(ret != 0)) {
36265+ driver->sync_obj_unref(&sync_obj);
36266+ return ret;
36267+ }
36268+
36269+ if (bo->sync_obj == sync_obj) {
36270+ driver->sync_obj_unref(&bo->sync_obj);
36271+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
36272+ }
36273+ driver->sync_obj_unref(&sync_obj);
36274+ }
36275+ out:
36276+ return 0;
36277+}
36278+
36279+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
36280+{
36281+ atomic_set(&bo->reserved, 0);
36282+ wake_up_all(&bo->event_queue);
36283+}
36284+
36285+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
36286+ bool no_wait)
36287+{
36288+ int ret;
36289+
36290+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
36291+ if (no_wait)
36292+ return -EBUSY;
36293+ else if (interruptible) {
36294+ ret = wait_event_interruptible
36295+ (bo->event_queue, atomic_read(&bo->reserved) == 0);
36296+ if (unlikely(ret != 0))
36297+ return -ERESTART;
36298+ } else {
36299+ wait_event(bo->event_queue,
36300+ atomic_read(&bo->reserved) == 0);
36301+ }
36302+ }
36303+ return 0;
36304+}
36305+
36306+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
36307+{
36308+ int ret = 0;
36309+
36310+ /*
36311+ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
36312+ * makes sure the lru lists are updated.
36313+ */
36314+
36315+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
36316+ if (unlikely(ret != 0))
36317+ return ret;
36318+ mutex_lock(&bo->mutex);
36319+ ret = ttm_bo_wait(bo, false, true, no_wait);
36320+ if (unlikely(ret != 0))
36321+ goto out_err0;
36322+ atomic_inc(&bo->cpu_writers);
36323+ out_err0:
36324+ mutex_unlock(&bo->mutex);
36325+ ttm_bo_unreserve(bo);
36326+ return ret;
36327+}
36328+
36329+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
36330+{
36331+ if (atomic_dec_and_test(&bo->cpu_writers))
36332+ wake_up_all(&bo->event_queue);
36333+}
36334+
36335+/**
36336+ * A buffer object shrink method that tries to swap out the first
36337+ * buffer object on the bo_global::swap_lru list.
36338+ */
36339+
36340+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
36341+{
36342+ struct ttm_bo_device *bdev =
36343+ container_of(shrink, struct ttm_bo_device, shrink);
36344+ struct ttm_buffer_object *bo;
36345+ int ret = -EBUSY;
36346+ int put_count;
36347+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
36348+
36349+ spin_lock(&bdev->lru_lock);
36350+ while (ret == -EBUSY) {
36351+ if (unlikely(list_empty(&bdev->swap_lru))) {
36352+ spin_unlock(&bdev->lru_lock);
36353+ return -EBUSY;
36354+ }
36355+
36356+ bo = list_first_entry(&bdev->swap_lru,
36357+ struct ttm_buffer_object, swap);
36358+ kref_get(&bo->list_kref);
36359+
36360+ /**
36361+ * Reserve buffer. Since we unlock while sleeping, we need
36362+ * to re-check that nobody removed us from the swap-list while
36363+ * we slept.
36364+ */
36365+
36366+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
36367+ if (unlikely(ret == -EBUSY)) {
36368+ spin_unlock(&bdev->lru_lock);
36369+ ttm_bo_wait_unreserved(bo, false);
36370+ kref_put(&bo->list_kref, ttm_bo_release_list);
36371+ spin_lock(&bdev->lru_lock);
36372+ }
36373+ }
36374+
36375+ BUG_ON(ret != 0);
36376+ put_count = ttm_bo_del_from_lru(bo);
36377+ spin_unlock(&bdev->lru_lock);
36378+
36379+ while (put_count--)
36380+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
36381+
36382+ /**
36383+ * Wait for GPU, then move to system cached.
36384+ */
36385+
36386+ mutex_lock(&bo->mutex);
36387+ ret = ttm_bo_wait(bo, false, false, false);
36388+ if (unlikely(ret != 0))
36389+ goto out;
36390+
36391+ if ((bo->mem.flags & swap_placement) != swap_placement) {
36392+ struct ttm_mem_reg evict_mem;
36393+
36394+ evict_mem = bo->mem;
36395+ evict_mem.mm_node = NULL;
36396+ evict_mem.proposed_flags =
36397+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
36398+ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
36399+ evict_mem.mem_type = TTM_PL_SYSTEM;
36400+
36401+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false);
36402+ if (unlikely(ret != 0))
36403+ goto out;
36404+ }
36405+
36406+ ttm_bo_unmap_virtual(bo);
36407+
36408+ /**
36409+ * Swap out. Buffer will be swapped in again as soon as
36410+ * anyone tries to access a ttm page.
36411+ */
36412+
36413+ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
36414+ out:
36415+ mutex_unlock(&bo->mutex);
36416+
36417+ /**
36418+ *
36419+ * Unreserve without putting on LRU to avoid swapping out an
36420+ * already swapped buffer.
36421+ */
36422+
36423+ atomic_set(&bo->reserved, 0);
36424+ wake_up_all(&bo->event_queue);
36425+ kref_put(&bo->list_kref, ttm_bo_release_list);
36426+ return ret;
36427+}
36428+
36429+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
36430+{
36431+ while (ttm_bo_swapout(&bdev->shrink) == 0) ;
36432+}
36433diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
36434new file mode 100644
36435index 0000000..faf7475
36436--- /dev/null
36437+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
36438@@ -0,0 +1,578 @@
36439+/**************************************************************************
36440+ *
36441+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
36442+ * All Rights Reserved.
36443+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
36444+ * All Rights Reserved.
36445+ *
36446+ * Permission is hereby granted, free of charge, to any person obtaining a
36447+ * copy of this software and associated documentation files (the
36448+ * "Software"), to deal in the Software without restriction, including
36449+ * without limitation the rights to use, copy, modify, merge, publish,
36450+ * distribute, sub license, and/or sell copies of the Software, and to
36451+ * permit persons to whom the Software is furnished to do so, subject to
36452+ * the following conditions:
36453+ *
36454+ * The above copyright notice and this permission notice (including the
36455+ * next paragraph) shall be included in all copies or substantial portions
36456+ * of the Software.
36457+ *
36458+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36459+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36460+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
36461+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36462+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36463+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
36464+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
36465+ *
36466+ **************************************************************************/
36467+/*
36468+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36469+ */
36470+
36471+#ifndef _TTM_BO_API_H_
36472+#define _TTM_BO_API_H_
36473+
36474+#include <drm/drm_hashtab.h>
36475+#include <linux/kref.h>
36476+#include <linux/list.h>
36477+#include <linux/wait.h>
36478+#include <linux/mutex.h>
36479+#include <linux/mm.h>
36480+#include <linux/rbtree.h>
36481+
36482+struct ttm_bo_device;
36483+
36484+struct drm_mm_node;
36485+
36486+/**
36487+ * struct ttm_mem_reg
36488+ *
36489+ * @mm_node: Memory manager node.
36490+ * @size: Requested size of memory region.
36491+ * @num_pages: Actual size of memory region in pages.
36492+ * @page_alignment: Page alignment.
36493+ * @flags: Placement flags.
36494+ * @proposed_flags: Proposed placement flags.
36495+ *
36496+ * Structure indicating the placement and space resources used by a
36497+ * buffer object.
36498+ */
36499+
36500+struct ttm_mem_reg {
36501+ struct drm_mm_node *mm_node;
36502+ unsigned long size;
36503+ unsigned long num_pages;
36504+ uint32_t page_alignment;
36505+ uint32_t mem_type;
36506+ uint32_t flags;
36507+ uint32_t proposed_flags;
36508+};
36509+
36510+/**
36511+ * enum ttm_bo_type
36512+ *
36513+ * @ttm_bo_type_device: These are 'normal' buffers that can
36514+ * be mmapped by user space. Each of these bos occupy a slot in the
36515+ * device address space, that can be used for normal vm operations.
36516+ *
36517+ * @ttm_bo_type_user: These are user-space memory areas that are made
36518+ * available to the GPU by mapping the buffer pages into the GPU aperture
36519+ * space. These buffers cannot be mmaped from the device address space.
36520+ *
36521+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
36522+ * but they cannot be accessed from user-space. For kernel-only use.
36523+ */
36524+
36525+enum ttm_bo_type {
36526+ ttm_bo_type_device,
36527+ ttm_bo_type_user,
36528+ ttm_bo_type_kernel
36529+};
36530+
36531+struct ttm_tt;
36532+
36533+/**
36534+ * struct ttm_buffer_object
36535+ *
36536+ * @bdev: Pointer to the buffer object device structure.
36537+ * @kref: Reference count of this buffer object. When this refcount reaches
36538+ * zero, the object is put on the delayed delete list.
36539+ * @list_kref: List reference count of this buffer object. This member is
36540+ * used to avoid destruction while the buffer object is still on a list.
36541+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
36542+ * keeps one refcount. When this refcount reaches zero,
36543+ * the object is destroyed.
36544+ * @proposed_flags: Proposed placement for the buffer. Changed only by the
36545+ * creator prior to validation as opposed to bo->mem.proposed_flags which is
36546+ * changed by the implementation prior to a buffer move if it wants to outsmart
36547+ * the buffer creator / user. This latter happens, for example, at eviction.
36548+ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
36549+ * buffers.
36550+ * @type: The bo type.
36551+ * @offset: The current GPU offset, which can have different meanings
36552+ * depending on the memory type. For SYSTEM type memory, it should be 0.
36553+ * @mem: structure describing current placement.
36554+ * @val_seq: Sequence of the validation holding the @reserved lock.
36555+ * Used to avoid starvation when many processes compete to validate the
36556+ * buffer. This member is protected by the bo_device::lru_lock.
36557+ * @seq_valid: The value of @val_seq is valid. This value is protected by
36558+ * the bo_device::lru_lock.
36559+ * @lru: List head for the lru list.
36560+ * @ddestroy: List head for the delayed destroy list.
36561+ * @swap: List head for swap LRU list.
36562+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
36563+ * pinned in physical memory. If this behaviour is not desired, this member
36564+ * holds a pointer to a persistant shmem object.
36565+ * @destroy: Destruction function. If NULL, kfree is used.
36566+ * @sync_obj_arg: Opaque argument to synchronization object function.
36567+ * @sync_obj: Pointer to a synchronization object.
36568+ * @priv_flags: Flags describing buffer object internal state.
36569+ * @event_queue: Queue for processes waiting on buffer object status change.
36570+ * @mutex: Lock protecting all members with the exception of constant members
36571+ * and list heads. We should really use a spinlock here.
36572+ * @num_pages: Actual number of pages.
36573+ * @ttm: TTM structure holding system pages.
36574+ * @vm_hash: Hash item for fast address space lookup. Need to change to a
36575+ * rb-tree node.
36576+ * @vm_node: Address space manager node.
36577+ * @addr_space_offset: Address space offset.
36578+ * @cpu_writes: For synchronization. Number of cpu writers.
36579+ * @reserved: Deadlock-free lock used for synchronization state transitions.
36580+ * @acc_size: Accounted size for this object.
36581+ *
36582+ * Base class for TTM buffer object, that deals with data placement and CPU
36583+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
36584+ * the driver can usually use the placement offset @offset directly as the
36585+ * GPU virtual address. For drivers implementing multiple
36586+ * GPU memory manager contexts, the driver should manage the address space
36587+ * in these contexts separately and use these objects to get the correct
36588+ * placement and caching for these GPU maps. This makes it possible to use
36589+ * these objects for even quite elaborate memory management schemes.
36590+ * The destroy member, the API visibility of this object makes it possible
36591+ * to derive driver specific types.
36592+ */
36593+
36594+struct ttm_buffer_object {
36595+ struct ttm_bo_device *bdev;
36596+ struct kref kref;
36597+ struct kref list_kref;
36598+
36599+ /*
36600+ * If there is a possibility that the usage variable is zero,
36601+ * then dev->struct_mutex should be locked before incrementing it.
36602+ */
36603+
36604+ uint32_t proposed_flags;
36605+ unsigned long buffer_start;
36606+ enum ttm_bo_type type;
36607+ unsigned long offset;
36608+ struct ttm_mem_reg mem;
36609+ uint32_t val_seq;
36610+ bool seq_valid;
36611+
36612+ struct list_head lru;
36613+ struct list_head ddestroy;
36614+ struct list_head swap;
36615+
36616+ struct file *persistant_swap_storage;
36617+
36618+ void (*destroy) (struct ttm_buffer_object *);
36619+
36620+ void *sync_obj_arg;
36621+ void *sync_obj;
36622+
36623+ uint32_t priv_flags;
36624+ wait_queue_head_t event_queue;
36625+ struct mutex mutex;
36626+ unsigned long num_pages;
36627+
36628+ struct ttm_tt *ttm;
36629+ struct rb_node vm_rb;
36630+ struct drm_mm_node *vm_node;
36631+ uint64_t addr_space_offset;
36632+
36633+ atomic_t cpu_writers;
36634+ atomic_t reserved;
36635+
36636+ size_t acc_size;
36637+};
36638+
36639+/**
36640+ * struct ttm_bo_kmap_obj
36641+ *
36642+ * @virtual: The current kernel virtual address.
36643+ * @page: The page when kmap'ing a single page.
36644+ * @bo_kmap_type: Type of bo_kmap.
36645+ *
36646+ * Object describing a kernel mapping. Since a TTM bo may be located
36647+ * in various memory types with various caching policies, the
36648+ * mapping can either be an ioremap, a vmap, a kmap or part of a
36649+ * premapped region.
36650+ */
36651+
36652+struct ttm_bo_kmap_obj {
36653+ void *virtual;
36654+ struct page *page;
36655+ enum {
36656+ ttm_bo_map_iomap,
36657+ ttm_bo_map_vmap,
36658+ ttm_bo_map_kmap,
36659+ ttm_bo_map_premapped,
36660+ } bo_kmap_type;
36661+};
36662+
36663+/**
36664+ * ttm_bo_reference - reference a struct ttm_buffer_object
36665+ *
36666+ * @bo: The buffer object.
36667+ *
36668+ * Returns a refcounted pointer to a buffer object.
36669+ */
36670+
36671+static inline struct ttm_buffer_object *ttm_bo_reference(struct
36672+ ttm_buffer_object *bo)
36673+{
36674+ kref_get(&bo->kref);
36675+ return bo;
36676+}
36677+
36678+/**
36679+ * ttm_bo_wait - wait for buffer idle.
36680+ *
36681+ * @bo: The buffer object.
36682+ * @interruptible: Use interruptible wait.
36683+ * @no_wait: Return immediately if buffer is busy.
36684+ *
36685+ * This function must be called with the bo::mutex held, and makes
36686+ * sure any previous rendering to the buffer is completed.
36687+ * Note: It might be necessary to block validations before the
36688+ * wait by reserving the buffer.
36689+ * Returns -EBUSY if no_wait is true and the buffer is busy.
36690+ * Returns -ERESTART if interrupted by a signal.
36691+ */
36692+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
36693+ bool interruptible, bool no_wait);
36694+/**
36695+ * ttm_buffer_object_validate
36696+ *
36697+ * @bo: The buffer object.
36698+ * @interruptible: Sleep interruptible if sleeping.
36699+ * @no_wait: Return immediately if the buffer is busy.
36700+ *
36701+ * Changes placement and caching policy of the buffer object
36702+ * according to bo::proposed_flags.
36703+ * Returns
36704+ * -EINVAL on invalid proposed_flags.
36705+ * -ENOMEM on out-of-memory condition.
36706+ * -EBUSY if no_wait is true and buffer busy.
36707+ * -ERESTART if interrupted by a signal.
36708+ */
36709+extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
36710+ bool interruptible, bool no_wait);
36711+/**
36712+ * ttm_bo_unref
36713+ *
36714+ * @bo: The buffer object.
36715+ *
36716+ * Unreference and clear a pointer to a buffer object.
36717+ */
36718+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
36719+
36720+/**
36721+ * ttm_bo_synccpu_write_grab
36722+ *
36723+ * @bo: The buffer object:
36724+ * @no_wait: Return immediately if buffer is busy.
36725+ *
36726+ * Synchronizes a buffer object for CPU RW access. This means
36727+ * blocking command submission that affects the buffer and
36728+ * waiting for buffer idle. This lock is recursive.
36729+ * Returns
36730+ * -EBUSY if the buffer is busy and no_wait is true.
36731+ * -ERESTART if interrupted by a signal.
36732+ */
36733+
36734+extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
36735+/**
36736+ * ttm_bo_synccpu_write_release:
36737+ *
36738+ * @bo : The buffer object.
36739+ *
36740+ * Releases a synccpu lock.
36741+ */
36742+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
36743+
36744+/**
36745+ * ttm_buffer_object_init
36746+ *
36747+ * @bdev: Pointer to a ttm_bo_device struct.
36748+ * @bo: Pointer to a ttm_buffer_object to be initialized.
36749+ * @size: Requested size of buffer object.
36750+ * @type: Requested type of buffer object.
36751+ * @flags: Initial placement flags.
36752+ * @page_alignment: Data alignment in pages.
36753+ * @buffer_start: Virtual address of user space data backing a
36754+ * user buffer object.
36755+ * @interruptible: If needing to sleep to wait for GPU resources,
36756+ * sleep interruptible.
36757+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
36758+ * pinned in physical memory. If this behaviour is not desired, this member
36759+ * holds a pointer to a persistant shmem object. Typically, this would
36760+ * point to the shmem object backing a GEM object if TTM is used to back a
36761+ * GEM user interface.
36762+ * @acc_size: Accounted size for this object.
36763+ * @destroy: Destroy function. Use NULL for kfree().
36764+ *
36765+ * This function initializes a pre-allocated struct ttm_buffer_object.
36766+ * As this object may be part of a larger structure, this function,
36767+ * together with the @destroy function,
36768+ * enables driver-specific objects derived from a ttm_buffer_object.
36769+ * On successful return, the object kref and list_kref are set to 1.
36770+ * Returns
36771+ * -ENOMEM: Out of memory.
36772+ * -EINVAL: Invalid placement flags.
36773+ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
36774+ */
36775+
36776+extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
36777+ struct ttm_buffer_object *bo,
36778+ unsigned long size,
36779+ enum ttm_bo_type type,
36780+ uint32_t flags,
36781+ uint32_t page_alignment,
36782+ unsigned long buffer_start,
36783+ bool interrubtible,
36784+ struct file *persistant_swap_storage,
36785+ size_t acc_size,
36786+ void (*destroy) (struct ttm_buffer_object *));
36787+/**
36788+ * ttm_bo_synccpu_object_init
36789+ *
36790+ * @bdev: Pointer to a ttm_bo_device struct.
36791+ * @bo: Pointer to a ttm_buffer_object to be initialized.
36792+ * @size: Requested size of buffer object.
36793+ * @type: Requested type of buffer object.
36794+ * @flags: Initial placement flags.
36795+ * @page_alignment: Data alignment in pages.
36796+ * @buffer_start: Virtual address of user space data backing a
36797+ * user buffer object.
36798+ * @interruptible: If needing to sleep while waiting for GPU resources,
36799+ * sleep interruptible.
36800+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
36801+ * pinned in physical memory. If this behaviour is not desired, this member
36802+ * holds a pointer to a persistant shmem object. Typically, this would
36803+ * point to the shmem object backing a GEM object if TTM is used to back a
36804+ * GEM user interface.
36805+ * @p_bo: On successful completion *p_bo points to the created object.
36806+ *
36807+ * This function allocates a ttm_buffer_object, and then calls
36808+ * ttm_buffer_object_init on that object.
36809+ * The destroy function is set to kfree().
36810+ * Returns
36811+ * -ENOMEM: Out of memory.
36812+ * -EINVAL: Invalid placement flags.
36813+ * -ERESTART: Interrupted by signal while waiting for resources.
36814+ */
36815+
36816+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
36817+ unsigned long size,
36818+ enum ttm_bo_type type,
36819+ uint32_t flags,
36820+ uint32_t page_alignment,
36821+ unsigned long buffer_start,
36822+ bool interruptible,
36823+ struct file *persistant_swap_storage,
36824+ struct ttm_buffer_object **p_bo);
36825+
36826+/**
36827+ * ttm_bo_check_placement
36828+ *
36829+ * @bo: the buffer object.
36830+ * @set_flags: placement flags to set.
36831+ * @clr_flags: placement flags to clear.
36832+ *
36833+ * Performs minimal validity checking on an intended change of
36834+ * placement flags.
36835+ * Returns
36836+ * -EINVAL: Intended change is invalid or not allowed.
36837+ */
36838+
36839+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
36840+ uint32_t set_flags, uint32_t clr_flags);
36841+
36842+/**
36843+ * ttm_bo_init_mm
36844+ *
36845+ * @bdev: Pointer to a ttm_bo_device struct.
36846+ * @mem_type: The memory type.
36847+ * @p_offset: offset for managed area in pages.
36848+ * @p_size: size managed area in pages.
36849+ *
36850+ * Initialize a manager for a given memory type.
36851+ * Note: if part of driver firstopen, it must be protected from a
36852+ * potentially racing lastclose.
36853+ * Returns:
36854+ * -EINVAL: invalid size or memory type.
36855+ * -ENOMEM: Not enough memory.
36856+ * May also return driver-specified errors.
36857+ */
36858+
36859+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
36860+ unsigned long p_offset, unsigned long p_size);
36861+/**
36862+ * ttm_bo_clean_mm
36863+ *
36864+ * @bdev: Pointer to a ttm_bo_device struct.
36865+ * @mem_type: The memory type.
36866+ *
36867+ * Take down a manager for a given memory type after first walking
36868+ * the LRU list to evict any buffers left alive.
36869+ *
36870+ * Normally, this function is part of lastclose() or unload(), and at that
36871+ * point there shouldn't be any buffers left created by user-space, since
36872+ * there should've been removed by the file descriptor release() method.
36873+ * However, before this function is run, make sure to signal all sync objects,
36874+ * and verify that the delayed delete queue is empty. The driver must also
36875+ * make sure that there are no NO_EVICT buffers present in this memory type
36876+ * when the call is made.
36877+ *
36878+ * If this function is part of a VT switch, the caller must make sure that
36879+ * there are no appications currently validating buffers before this
36880+ * function is called. The caller can do that by first taking the
36881+ * struct ttm_bo_device::ttm_lock in write mode.
36882+ *
36883+ * Returns:
36884+ * -EINVAL: invalid or uninitialized memory type.
36885+ * -EBUSY: There are still buffers left in this memory type.
36886+ */
36887+
36888+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
36889+
36890+/**
36891+ * ttm_bo_evict_mm
36892+ *
36893+ * @bdev: Pointer to a ttm_bo_device struct.
36894+ * @mem_type: The memory type.
36895+ *
36896+ * Evicts all buffers on the lru list of the memory type.
36897+ * This is normally part of a VT switch or an
36898+ * out-of-memory-space-due-to-fragmentation handler.
36899+ * The caller must make sure that there are no other processes
36900+ * currently validating buffers, and can do that by taking the
36901+ * struct ttm_bo_device::ttm_lock in write mode.
36902+ *
36903+ * Returns:
36904+ * -EINVAL: Invalid or uninitialized memory type.
36905+ * -ERESTART: The call was interrupted by a signal while waiting to
36906+ * evict a buffer.
36907+ */
36908+
36909+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
36910+
36911+/**
36912+ * ttm_kmap_obj_virtual
36913+ *
36914+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
36915+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
36916+ * virtual map is io memory, 0 if normal memory.
36917+ *
36918+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
36919+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
36920+ * that should strictly be accessed by the iowriteXX() and similar functions.
36921+ */
36922+
36923+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
36924+ bool *is_iomem)
36925+{
36926+ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
36927+ map->bo_kmap_type == ttm_bo_map_premapped);
36928+ return map->virtual;
36929+}
36930+
36931+/**
36932+ * ttm_bo_kmap
36933+ *
36934+ * @bo: The buffer object.
36935+ * @start_page: The first page to map.
36936+ * @num_pages: Number of pages to map.
36937+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
36938+ *
36939+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
36940+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
36941+ * used to obtain a virtual address to the data.
36942+ *
36943+ * Returns
36944+ * -ENOMEM: Out of memory.
36945+ * -EINVAL: Invalid range.
36946+ */
36947+
36948+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
36949+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
36950+
36951+/**
36952+ * ttm_bo_kunmap
36953+ *
36954+ * @map: Object describing the map to unmap.
36955+ *
36956+ * Unmaps a kernel map set up by ttm_bo_kmap.
36957+ */
36958+
36959+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
36960+
36961+#if 0
36962+#endif
36963+
36964+/**
36965+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
36966+ *
36967+ * @vma: vma as input from the fbdev mmap method.
36968+ * @bo: The bo backing the address space. The address space will
36969+ * have the same size as the bo, and start at offset 0.
36970+ *
36971+ * This function is intended to be called by the fbdev mmap method
36972+ * if the fbdev address space is to be backed by a bo.
36973+ */
36974+
36975+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
36976+ struct ttm_buffer_object *bo);
36977+
36978+/**
36979+ * ttm_bo_mmap - mmap out of the ttm device address space.
36980+ *
36981+ * @filp: filp as input from the mmap method.
36982+ * @vma: vma as input from the mmap method.
36983+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
36984+ *
36985+ * This function is intended to be called by the device mmap method.
36986+ * if the device address space is to be backed by the bo manager.
36987+ */
36988+
36989+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
36990+ struct ttm_bo_device *bdev);
36991+
36992+/**
36993+ * ttm_bo_io
36994+ *
36995+ * @bdev: Pointer to the struct ttm_bo_device.
36996+ * @filp: Pointer to the struct file attempting to read / write.
36997+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
36998+ * @rbuf: User-space pointer to address of buffer to read into. Null on write.
36999+ * @count: Number of bytes to read / write.
37000+ * @f_pos: Pointer to current file position.
37001+ * @write: 1 for read, 0 for write.
37002+ *
37003+ * This function implements read / write into ttm buffer objects, and is intended to
37004+ * be called from the fops::read and fops::write method.
37005+ * Returns:
37006+ * See man (2) write, man(2) read. In particular, the function may return -EINTR if
37007+ * interrupted by a signal.
37008+ */
37009+
37010+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
37011+ const char __user * wbuf, char __user * rbuf,
37012+ size_t count, loff_t * f_pos, bool write);
37013+
37014+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
37015+
37016+#endif
37017diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
37018new file mode 100644
37019index 0000000..f7efb45
37020--- /dev/null
37021+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
37022@@ -0,0 +1,859 @@
37023+/**************************************************************************
37024+ *
37025+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37026+ * All Rights Reserved.
37027+ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
37028+ * All Rights Reserved.
37029+ *
37030+ * Permission is hereby granted, free of charge, to any person obtaining a
37031+ * copy of this software and associated documentation files (the
37032+ * "Software"), to deal in the Software without restriction, including
37033+ * without limitation the rights to use, copy, modify, merge, publish,
37034+ * distribute, sub license, and/or sell copies of the Software, and to
37035+ * permit persons to whom the Software is furnished to do so, subject to
37036+ * the following conditions:
37037+ *
37038+ * The above copyright notice and this permission notice (including the
37039+ * next paragraph) shall be included in all copies or substantial portions
37040+ * of the Software.
37041+ *
37042+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37043+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37044+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37045+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37046+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37047+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37048+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37049+ *
37050+ **************************************************************************/
37051+/*
37052+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
37053+ */
37054+#ifndef _TTM_BO_DRIVER_H_
37055+#define _TTM_BO_DRIVER_H_
37056+
37057+#include "ttm/ttm_bo_api.h"
37058+#include "ttm/ttm_memory.h"
37059+#include <drm/drm_mm.h>
37060+#include "linux/workqueue.h"
37061+#include "linux/fs.h"
37062+#include "linux/spinlock.h"
37063+
37064+struct ttm_backend;
37065+
37066+struct ttm_backend_func {
37067+ /**
37068+ * struct ttm_backend_func member populate
37069+ *
37070+ * @backend: Pointer to a struct ttm_backend.
37071+ * @num_pages: Number of pages to populate.
37072+ * @pages: Array of pointers to ttm pages.
37073+ * @dummy_read_page: Page to be used instead of NULL pages in the
37074+ * array @pages.
37075+ *
37076+ * Populate the backend with ttm pages. Depending on the backend,
37077+ * it may or may not copy the @pages array.
37078+ */
37079+ int (*populate) (struct ttm_backend * backend,
37080+ unsigned long num_pages, struct page ** pages,
37081+ struct page * dummy_read_page);
37082+ /**
37083+ * struct ttm_backend_func member clear
37084+ *
37085+ * @backend: Pointer to a struct ttm_backend.
37086+ *
37087+ * This is an "unpopulate" function. Release all resources
37088+ * allocated with populate.
37089+ */
37090+ void (*clear) (struct ttm_backend * backend);
37091+
37092+ /**
37093+ * struct ttm_backend_func member bind
37094+ *
37095+ * @backend: Pointer to a struct ttm_backend.
37096+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
37097+ * memory type and location for binding.
37098+ *
37099+ * Bind the backend pages into the aperture in the location
37100+ * indicated by @bo_mem. This function should be able to handle
37101+ * differences between aperture- and system page sizes.
37102+ */
37103+ int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem);
37104+
37105+ /**
37106+ * struct ttm_backend_func member unbind
37107+ *
37108+ * @backend: Pointer to a struct ttm_backend.
37109+ *
37110+ * Unbind previously bound backend pages. This function should be
37111+ * able to handle differences between aperture- and system page sizes.
37112+ */
37113+ int (*unbind) (struct ttm_backend * backend);
37114+
37115+ /**
37116+ * struct ttm_backend_func member destroy
37117+ *
37118+ * @backend: Pointer to a struct ttm_backend.
37119+ *
37120+ * Destroy the backend.
37121+ */
37122+ void (*destroy) (struct ttm_backend * backend);
37123+};
37124+
37125+/**
37126+ * struct ttm_backend
37127+ *
37128+ * @bdev: Pointer to a struct ttm_bo_device.
37129+ * @flags: For driver use.
37130+ * @func: Pointer to a struct ttm_backend_func that describes
37131+ * the backend methods.
37132+ *
37133+ */
37134+
37135+struct ttm_backend {
37136+ struct ttm_bo_device *bdev;
37137+ uint32_t flags;
37138+ struct ttm_backend_func *func;
37139+};
37140+
37141+#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
37142+#define TTM_PAGE_FLAG_USER (1 << 1)
37143+#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
37144+#define TTM_PAGE_FLAG_WRITE (1 << 3)
37145+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
37146+#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
37147+
37148+enum ttm_caching_state {
37149+ tt_uncached,
37150+ tt_wc,
37151+ tt_cached
37152+};
37153+
37154+/**
37155+ * struct ttm_tt
37156+ *
37157+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
37158+ * pointer.
37159+ * @pages: Array of pages backing the data.
37160+ * @first_himem_page: Himem pages are put last in the page array, which
37161+ * enables us to run caching attribute changes on only the first part
37162+ * of the page array containing lomem pages. This is the index of the
37163+ * first himem page.
37164+ * @last_lomem_page: Index of the last lomem page in the page array.
37165+ * @num_pages: Number of pages in the page array.
37166+ * @bdev: Pointer to the current struct ttm_bo_device.
37167+ * @be: Pointer to the ttm backend.
37168+ * @tsk: The task for user ttm.
37169+ * @start: virtual address for user ttm.
37170+ * @swap_storage: Pointer to shmem struct file for swap storage.
37171+ * @caching_state: The current caching state of the pages.
37172+ * @state: The current binding state of the pages.
37173+ *
37174+ * This is a structure holding the pages, caching- and aperture binding
37175+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
37176+ * memory.
37177+ */
37178+
37179+struct ttm_tt {
37180+ struct page *dummy_read_page;
37181+ struct page **pages;
37182+ long first_himem_page;
37183+ long last_lomem_page;
37184+ uint32_t page_flags;
37185+ unsigned long num_pages;
37186+ struct ttm_bo_device *bdev;
37187+ struct ttm_backend *be;
37188+ struct task_struct *tsk;
37189+ unsigned long start;
37190+ struct file *swap_storage;
37191+ enum ttm_caching_state caching_state;
37192+ enum {
37193+ tt_bound,
37194+ tt_unbound,
37195+ tt_unpopulated,
37196+ } state;
37197+};
37198+
37199+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
37200+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
37201+#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
37202+ before kernel access. */
37203+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
37204+
37205+/**
37206+ * struct ttm_mem_type_manager
37207+ *
37208+ * @has_type: The memory type has been initialized.
37209+ * @use_type: The memory type is enabled.
37210+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
37211+ * managed by this memory type.
37212+ * @gpu_offset: If used, the GPU offset of the first managed page of
37213+ * fixed memory or the first managed location in an aperture.
37214+ * @io_offset: The io_offset of the first managed page of IO memory or
37215+ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
37216+ * memory, this should be set to NULL.
37217+ * @io_size: The size of a managed IO region (fixed memory or aperture).
37218+ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
37219+ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
37220+ * @io_addr should be set to NULL.
37221+ * @size: Size of the managed region.
37222+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
37223+ * as defined in ttm_placement_common.h
37224+ * @default_caching: The default caching policy used for a buffer object
37225+ * placed in this memory type if the user doesn't provide one.
37226+ * @manager: The range manager used for this memory type. FIXME: If the aperture
37227+ * has a page size different from the underlying system, the granularity
37228+ * of this manager should take care of this. But the range allocating code
37229+ * in ttm_bo.c needs to be modified for this.
37230+ * @lru: The lru list for this memory type.
37231+ *
37232+ * This structure is used to identify and manage memory types for a device.
37233+ * It's set up by the ttm_bo_driver::init_mem_type method.
37234+ */
37235+
37236+struct ttm_mem_type_manager {
37237+
37238+ /*
37239+ * No protection. Constant from start.
37240+ */
37241+
37242+ bool has_type;
37243+ bool use_type;
37244+ uint32_t flags;
37245+ unsigned long gpu_offset;
37246+ unsigned long io_offset;
37247+ unsigned long io_size;
37248+ void *io_addr;
37249+ uint64_t size;
37250+ uint32_t available_caching;
37251+ uint32_t default_caching;
37252+
37253+ /*
37254+ * Protected by the bdev->lru_lock.
37255+ * TODO: Consider one lru_lock per ttm_mem_type_manager.
37256+ * Plays ill with list removal, though.
37257+ */
37258+
37259+ struct drm_mm manager;
37260+ struct list_head lru;
37261+};
37262+
37263+/**
37264+ * struct ttm_bo_driver
37265+ *
37266+ * @mem_type_prio: Priority array of memory types to place a buffer object in
37267+ * if it fits without evicting buffers from any of these memory types.
37268+ * @mem_busy_prio: Priority array of memory types to place a buffer object in
37269+ * if it needs to evict buffers to make room.
37270+ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
37271+ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
37272+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
37273+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
37274+ * has been evicted.
37275+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure.
37276+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
37277+ * @move: Callback for a driver to hook in accelerated functions to move a buffer.
37278+ * If set to NULL, a potentially slow memcpy() move is used.
37279+ * @sync_obj_signaled: See ttm_fence_api.h
37280+ * @sync_obj_wait: See ttm_fence_api.h
37281+ * @sync_obj_flush: See ttm_fence_api.h
37282+ * @sync_obj_unref: See ttm_fence_api.h
37283+ * @sync_obj_ref: See ttm_fence_api.h
37284+ */
37285+
37286+struct ttm_bo_driver {
37287+ const uint32_t *mem_type_prio;
37288+ const uint32_t *mem_busy_prio;
37289+ uint32_t num_mem_type_prio;
37290+ uint32_t num_mem_busy_prio;
37291+
37292+ /**
37293+ * struct ttm_bo_driver member create_ttm_backend_entry
37294+ *
37295+ * @bdev: The buffer object device.
37296+ *
37297+ * Create a driver specific struct ttm_backend.
37298+ */
37299+
37300+ struct ttm_backend *(*create_ttm_backend_entry)
37301+ (struct ttm_bo_device * bdev);
37302+
37303+ /**
37304+ * struct ttm_bo_driver member invalidate_caches
37305+ *
37306+ * @bdev: the buffer object device.
37307+ * @flags: new placement of the rebound buffer object.
37308+ *
37309+ * A previosly evicted buffer has been rebound in a
37310+ * potentially new location. Tell the driver that it might
37311+ * consider invalidating read (texture) caches on the next command
37312+ * submission as a consequence.
37313+ */
37314+
37315+ int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags);
37316+ int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type,
37317+ struct ttm_mem_type_manager * man);
37318+ /**
37319+ * struct ttm_bo_driver member evict_flags:
37320+ *
37321+ * @bo: the buffer object to be evicted
37322+ *
37323+ * Return the bo flags for a buffer which is not mapped to the hardware.
37324+ * These will be placed in proposed_flags so that when the move is
37325+ * finished, they'll end up in bo->mem.flags
37326+ */
37327+
37328+ uint32_t(*evict_flags) (struct ttm_buffer_object * bo);
37329+ /**
37330+ * struct ttm_bo_driver member move:
37331+ *
37332+ * @bo: the buffer to move
37333+ * @evict: whether this motion is evicting the buffer from
37334+ * the graphics address space
37335+ * @interruptible: Use interruptible sleeps if possible when sleeping.
37336+ * @no_wait: whether this should give up and return -EBUSY
37337+ * if this move would require sleeping
37338+ * @new_mem: the new memory region receiving the buffer
37339+ *
37340+ * Move a buffer between two memory regions.
37341+ */
37342+ int (*move) (struct ttm_buffer_object * bo,
37343+ bool evict, bool interruptible,
37344+ bool no_wait, struct ttm_mem_reg * new_mem);
37345+
37346+ /**
37347+ * struct ttm_bo_driver_member verify_access
37348+ *
37349+ * @bo: Pointer to a buffer object.
37350+ * @filp: Pointer to a struct file trying to access the object.
37351+ *
37352+ * Called from the map / write / read methods to verify that the
37353+ * caller is permitted to access the buffer object.
37354+ * This member may be set to NULL, which will refuse this kind of
37355+ * access for all buffer objects.
37356+ * This function should return 0 if access is granted, -EPERM otherwise.
37357+ */
37358+ int (*verify_access) (struct ttm_buffer_object * bo,
37359+ struct file * filp);
37360+
37361+ /**
37362+ * In case a driver writer dislikes the TTM fence objects,
37363+ * the driver writer can replace those with sync objects of
37364+ * his / her own. If it turns out that no driver writer is
37365+ * using these. I suggest we remove these hooks and plug in
37366+ * fences directly. The bo driver needs the following functionality:
37367+ * See the corresponding functions in the fence object API
37368+ * documentation.
37369+ */
37370+
37371+ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
37372+ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
37373+ bool lazy, bool interruptible);
37374+ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
37375+ void (*sync_obj_unref) (void **sync_obj);
37376+ void *(*sync_obj_ref) (void *sync_obj);
37377+};
37378+
37379+#define TTM_NUM_MEM_TYPES 11
37380+
37381+#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
37382+#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs
37383+ idling before CPU mapping */
37384+/**
37385+ * struct ttm_bo_device - Buffer object driver device-specific data.
37386+ *
37387+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
37388+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
37389+ * @count: Current number of buffer object.
37390+ * @pages: Current number of pinned pages.
37391+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
37392+ * of unpopulated pages.
37393+ * @shrink: A shrink callback object used for buffre object swap.
37394+ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
37395+ * used by a buffer object. This is excluding page arrays and backing pages.
37396+ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
37397+ * @man: An array of mem_type_managers.
37398+ * @addr_space_mm: Range manager for the device address space.
37399+ * lru_lock: Spinlock that protects the buffer+device lru lists and
37400+ * ddestroy lists.
37401+ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
37402+ * If a GPU lockup has been detected, this is forced to 0.
37403+ * @dev_mapping: A pointer to the struct address_space representing the
37404+ * device address space.
37405+ * @wq: Work queue structure for the delayed delete workqueue.
37406+ *
37407+ */
37408+
37409+struct ttm_bo_device {
37410+
37411+ /*
37412+ * Constant after bo device init / atomic.
37413+ */
37414+
37415+ struct ttm_mem_global *mem_glob;
37416+ struct ttm_bo_driver *driver;
37417+ struct page *dummy_read_page;
37418+ struct ttm_mem_shrink shrink;
37419+
37420+ size_t ttm_bo_extra_size;
37421+ size_t ttm_bo_size;
37422+
37423+ rwlock_t vm_lock;
37424+ /*
37425+ * Protected by the vm lock.
37426+ */
37427+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
37428+ struct rb_root addr_space_rb;
37429+ struct drm_mm addr_space_mm;
37430+
37431+ /*
37432+ * Might want to change this to one lock per manager.
37433+ */
37434+ spinlock_t lru_lock;
37435+ /*
37436+ * Protected by the lru lock.
37437+ */
37438+ struct list_head ddestroy;
37439+ struct list_head swap_lru;
37440+
37441+ /*
37442+ * Protected by load / firstopen / lastclose /unload sync.
37443+ */
37444+
37445+ bool nice_mode;
37446+ struct address_space *dev_mapping;
37447+
37448+ /*
37449+ * Internal protection.
37450+ */
37451+
37452+ struct delayed_work wq;
37453+};
37454+
37455+/**
37456+ * ttm_flag_masked
37457+ *
37458+ * @old: Pointer to the result and original value.
37459+ * @new: New value of bits.
37460+ * @mask: Mask of bits to change.
37461+ *
37462+ * Convenience function to change a number of bits identified by a mask.
37463+ */
37464+
37465+static inline uint32_t
37466+ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask)
37467+{
37468+ *old ^= (*old ^ new) & mask;
37469+ return *old;
37470+}
37471+
37472+/**
37473+ * ttm_tt_create
37474+ *
37475+ * @bdev: pointer to a struct ttm_bo_device:
37476+ * @size: Size of the data needed backing.
37477+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
37478+ * @dummy_read_page: See struct ttm_bo_device.
37479+ *
37480+ * Create a struct ttm_tt to back data with system memory pages.
37481+ * No pages are actually allocated.
37482+ * Returns:
37483+ * NULL: Out of memory.
37484+ */
37485+extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
37486+ unsigned long size,
37487+ uint32_t page_flags,
37488+ struct page *dummy_read_page);
37489+
37490+/**
37491+ * ttm_tt_set_user:
37492+ *
37493+ * @ttm: The struct ttm_tt to populate.
37494+ * @tsk: A struct task_struct for which @start is a valid user-space address.
37495+ * @start: A valid user-space address.
37496+ * @num_pages: Size in pages of the user memory area.
37497+ *
37498+ * Populate a struct ttm_tt with a user-space memory area after first pinning
37499+ * the pages backing it.
37500+ * Returns:
37501+ * !0: Error.
37502+ */
37503+
37504+extern int ttm_tt_set_user(struct ttm_tt *ttm,
37505+ struct task_struct *tsk,
37506+ unsigned long start, unsigned long num_pages);
37507+
37508+/**
37509+ * ttm_ttm_bind:
37510+ *
37511+ * @ttm: The struct ttm_tt containing backing pages.
37512+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
37513+ *
37514+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
37515+ */
37516+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
37517+
37518+/**
37519+ * ttm_ttm_destroy:
37520+ *
37521+ * @ttm: The struct ttm_tt.
37522+ *
37523+ * Unbind, unpopulate and destroy a struct ttm_tt.
37524+ */
37525+extern void ttm_tt_destroy(struct ttm_tt *ttm);
37526+
37527+/**
37528+ * ttm_ttm_unbind:
37529+ *
37530+ * @ttm: The struct ttm_tt.
37531+ *
37532+ * Unbind a struct ttm_tt.
37533+ */
37534+extern void ttm_tt_unbind(struct ttm_tt *ttm);
37535+
37536+/**
37537+ * ttm_ttm_destroy:
37538+ *
37539+ * @ttm: The struct ttm_tt.
37540+ * @index: Index of the desired page.
37541+ *
37542+ * Return a pointer to the struct page backing @ttm at page
37543+ * index @index. If the page is unpopulated, one will be allocated to
37544+ * populate that index.
37545+ *
37546+ * Returns:
37547+ * NULL on OOM.
37548+ */
37549+extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
37550+
37551+/**
37552+ * ttm_tt_cache_flush:
37553+ *
37554+ * @pages: An array of pointers to struct page:s to flush.
37555+ * @num_pages: Number of pages to flush.
37556+ *
37557+ * Flush the data of the indicated pages from the cpu caches.
37558+ * This is used when changing caching attributes of the pages from
37559+ * cache-coherent.
37560+ */
37561+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
37562+
37563+/**
37564+ * ttm_tt_set_placement_caching:
37565+ *
37566+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
37567+ * @placement: Flag indicating the desired caching policy.
37568+ *
37569+ * This function will change caching policy of any default kernel mappings of
37570+ * the pages backing @ttm. If changing from cached to uncached or write-combined,
37571+ * all CPU caches will first be flushed to make sure the data of the pages
37572+ * hit RAM. This function may be very costly as it involves global TLB
37573+ * and cache flushes and potential page splitting / combining.
37574+ */
37575+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
37576+extern int ttm_tt_swapout(struct ttm_tt *ttm,
37577+ struct file *persistant_swap_storage);
37578+
37579+/*
37580+ * ttm_bo.c
37581+ */
37582+
37583+/**
37584+ * ttm_mem_reg_is_pci
37585+ *
37586+ * @bdev: Pointer to a struct ttm_bo_device.
37587+ * @mem: A valid struct ttm_mem_reg.
37588+ *
37589+ * Returns true if the memory described by @mem is PCI memory,
37590+ * false otherwise.
37591+ */
37592+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
37593+ struct ttm_mem_reg *mem);
37594+
37595+/**
37596+ * ttm_bo_mem_space
37597+ *
37598+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
37599+ * we want to allocate space for.
37600+ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
37601+ * up.
37602+ * @interruptible: Sleep interruptible when sliping.
37603+ * @no_wait: Don't sleep waiting for space to become available.
37604+ *
37605+ * Allocate memory space for the buffer object pointed to by @bo, using
37606+ * the placement flags in @mem, potentially evicting other idle buffer objects.
37607+ * This function may sleep while waiting for space to become available.
37608+ * Returns:
37609+ * -EBUSY: No space available (only if no_wait == 1).
37610+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
37611+ * fragmentation or concurrent allocators.
37612+ * -ERESTART: An interruptible sleep was interrupted by a signal.
37613+ */
37614+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
37615+ struct ttm_mem_reg *mem,
37616+ bool interruptible, bool no_wait);
37617+/**
37618+ * ttm_bo_wait_for_cpu
37619+ *
37620+ * @bo: Pointer to a struct ttm_buffer_object.
37621+ * @no_wait: Don't sleep while waiting.
37622+ *
37623+ * Wait until a buffer object is no longer sync'ed for CPU access.
37624+ * Returns:
37625+ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
37626+ * -ERESTART: An interruptible sleep was interrupted by a signal.
37627+ */
37628+
37629+extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
37630+
37631+/**
37632+ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
37633+ *
37634+ * @bo Pointer to a struct ttm_buffer_object.
37635+ * @bus_base On return the base of the PCI region
37636+ * @bus_offset On return the byte offset into the PCI region
37637+ * @bus_size On return the byte size of the buffer object or zero if
37638+ * the buffer object memory is not accessible through a PCI region.
37639+ *
37640+ * Returns:
37641+ * -EINVAL if the buffer object is currently not mappable.
37642+ * 0 otherwise.
37643+ */
37644+
37645+extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
37646+ struct ttm_mem_reg *mem,
37647+ unsigned long *bus_base,
37648+ unsigned long *bus_offset,
37649+ unsigned long *bus_size);
37650+
37651+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
37652+
37653+/**
37654+ * ttm_bo_device_init
37655+ *
37656+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
37657+ * @mem_global: A pointer to an initialized struct ttm_mem_global.
37658+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
37659+ * @file_page_offset: Offset into the device address space that is available
37660+ * for buffer data. This ensures compatibility with other users of the
37661+ * address space.
37662+ *
37663+ * Initializes a struct ttm_bo_device:
37664+ * Returns:
37665+ * !0: Failure.
37666+ */
37667+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
37668+ struct ttm_mem_global *mem_glob,
37669+ struct ttm_bo_driver *driver,
37670+ uint64_t file_page_offset);
37671+
37672+/**
37673+ * ttm_bo_reserve:
37674+ *
37675+ * @bo: A pointer to a struct ttm_buffer_object.
37676+ * @interruptible: Sleep interruptible if waiting.
37677+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
37678+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
37679+ * it to become unreserved if @sequence < (@bo)->sequence.
37680+ *
37681+ * Locks a buffer object for validation. (Or prevents other processes from
37682+ * locking it for validation) and removes it from lru lists, while taking
37683+ * a number of measures to prevent deadlocks.
37684+ *
37685+ * Deadlocks may occur when two processes try to reserve multiple buffers in
37686+ * different order, either by will or as a result of a buffer being evicted
37687+ * to make room for a buffer already reserved. (Buffers are reserved before
37688+ * they are evicted). The following algorithm prevents such deadlocks from
37689+ * occuring:
37690+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
37691+ * reservation they are removed from the lru list. This stops a reserved buffer
37692+ * from being evicted. However the lru spinlock is released between the time
37693+ * a buffer is selected for eviction and the time it is reserved.
37694+ * Therefore a check is made when a buffer is reserved for eviction, that it
37695+ * is still the first buffer in the lru list, before it is removed from the
37696+ * list. @check_lru == 1 forces this check. If it fails, the function returns
37697+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
37698+ * the procedure.
37699+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
37700+ * (typically execbuf), should first obtain a unique 32-bit
37701+ * validation sequence number,
37702+ * and call this function with @use_sequence == 1 and @sequence == the unique
37703+ * sequence number. If upon call of this function, the buffer object is already
37704+ * reserved, the validation sequence is checked against the validation
37705+ * sequence of the process currently reserving the buffer,
37706+ * and if the current validation sequence is greater than that of the process
37707+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
37708+ * waiting for the buffer to become unreserved, after which it retries reserving.
37709+ * The caller should, when receiving an -EAGAIN error
37710+ * release all its buffer reservations, wait for @bo to become unreserved, and
37711+ * then rerun the validation with the same validation sequence. This procedure
37712+ * will always guarantee that the process with the lowest validation sequence
37713+ * will eventually succeed, preventing both deadlocks and starvation.
37714+ *
37715+ * Returns:
37716+ * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations,
37717+ * wait for @bo to become unreserved and try again. (only if use_sequence == 1).
37718+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
37719+ * a signal. Release all buffer reservations and return to user-space.
37720+ */
37721+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
37722+ bool interruptible,
37723+ bool no_wait, bool use_sequence, uint32_t sequence);
37724+
37725+/**
37726+ * ttm_bo_unreserve
37727+ *
37728+ * @bo: A pointer to a struct ttm_buffer_object.
37729+ *
37730+ * Unreserve a previous reservation of @bo.
37731+ */
37732+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
37733+
37734+/**
37735+ * ttm_bo_wait_unreserved
37736+ *
37737+ * @bo: A pointer to a struct ttm_buffer_object.
37738+ *
37739+ * Wait for a struct ttm_buffer_object to become unreserved.
37740+ * This is typically used in the execbuf code to relax cpu-usage when
37741+ * a potential deadlock condition backoff.
37742+ */
37743+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
37744+ bool interruptible);
37745+
37746+/**
37747+ * ttm_bo_block_reservation
37748+ *
37749+ * @bo: A pointer to a struct ttm_buffer_object.
37750+ * @interruptible: Use interruptible sleep when waiting.
37751+ * @no_wait: Don't sleep, but rather return -EBUSY.
37752+ *
37753+ * Block reservation for validation by simply reserving the buffer. This is intended
37754+ * for single buffer use only without eviction, and thus needs no deadlock protection.
37755+ *
37756+ * Returns:
37757+ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
37758+ * -ERESTART: If interruptible == 1 and the process received a signal while sleeping.
37759+ */
37760+extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
37761+ bool interruptible, bool no_wait);
37762+
37763+/**
37764+ * ttm_bo_unblock_reservation
37765+ *
37766+ * @bo: A pointer to a struct ttm_buffer_object.
37767+ *
37768+ * Unblocks reservation leaving lru lists untouched.
37769+ */
37770+extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
37771+
37772+/*
37773+ * ttm_bo_util.c
37774+ */
37775+
37776+/**
37777+ * ttm_bo_move_ttm
37778+ *
37779+ * @bo: A pointer to a struct ttm_buffer_object.
37780+ * @evict: 1: This is an eviction. Don't try to pipeline.
37781+ * @no_wait: Never sleep, but rather return with -EBUSY.
37782+ * @new_mem: struct ttm_mem_reg indicating where to move.
37783+ *
37784+ * Optimized move function for a buffer object with both old and
37785+ * new placement backed by a TTM. The function will, if successful,
37786+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
37787+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
37788+ * data remains untouched, and it's up to the caller to free the
37789+ * memory space indicated by @new_mem.
37790+ * Returns:
37791+ * !0: Failure.
37792+ */
37793+
37794+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
37795+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem);
37796+
37797+/**
37798+ * ttm_bo_move_memcpy
37799+ *
37800+ * @bo: A pointer to a struct ttm_buffer_object.
37801+ * @evict: 1: This is an eviction. Don't try to pipeline.
37802+ * @no_wait: Never sleep, but rather return with -EBUSY.
37803+ * @new_mem: struct ttm_mem_reg indicating where to move.
37804+ *
37805+ * Fallback move function for a mappable buffer object in mappable memory.
37806+ * The function will, if successful,
37807+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
37808+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
37809+ * data remains untouched, and it's up to the caller to free the
37810+ * memory space indicated by @new_mem.
37811+ * Returns:
37812+ * !0: Failure.
37813+ */
37814+
37815+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
37816+ bool evict,
37817+ bool no_wait, struct ttm_mem_reg *new_mem);
37818+
37819+/**
37820+ * ttm_bo_free_old_node
37821+ *
37822+ * @bo: A pointer to a struct ttm_buffer_object.
37823+ *
37824+ * Utility function to free an old placement after a successful move.
37825+ */
37826+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
37827+
37828+/**
37829+ * ttm_bo_move_accel_cleanup.
37830+ *
37831+ * @bo: A pointer to a struct ttm_buffer_object.
37832+ * @sync_obj: A sync object that signals when moving is complete.
37833+ * @sync_obj_arg: An argument to pass to the sync object idle / wait
37834+ * functions.
37835+ * @evict: This is an evict move. Don't return until the buffer is idle.
37836+ * @no_wait: Never sleep, but rather return with -EBUSY.
37837+ * @new_mem: struct ttm_mem_reg indicating where to move.
37838+ *
37839+ * Accelerated move function to be called when an accelerated move
37840+ * has been scheduled. The function will create a new temporary buffer object
37841+ * representing the old placement, and put the sync object on both buffer
37842+ * objects. After that the newly created buffer object is unref'd to be
37843+ * destroyed when the move is complete. This will help pipeline
37844+ * buffer moves.
37845+ */
37846+
37847+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
37848+ void *sync_obj,
37849+ void *sync_obj_arg,
37850+ bool evict, bool no_wait,
37851+ struct ttm_mem_reg *new_mem);
37852+/**
37853+ * ttm_io_prot
37854+ *
37855+ * @c_state: Caching state.
37856+ * @tmp: Page protection flag for a normal, cached mapping.
37857+ *
37858+ * Utility function that returns the pgprot_t that should be used for
37859+ * setting up a PTE with the caching model indicated by @c_state.
37860+ */
37861+extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
37862+
37863+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
37864+#define TTM_HAS_AGP
37865+#include <linux/agp_backend.h>
37866+
37867+/**
37868+ * ttm_agp_backend_init
37869+ *
37870+ * @bdev: Pointer to a struct ttm_bo_device.
37871+ * @bridge: The agp bridge this device is sitting on.
37872+ *
37873+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
37874+ * for TT memory. This function uses the linux agpgart interface to
37875+ * bind and unbind memory backing a ttm_tt.
37876+ */
37877+extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
37878+ struct agp_bridge_data *bridge);
37879+#endif
37880+
37881+#endif
37882diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
37883new file mode 100644
37884index 0000000..6c92310
37885--- /dev/null
37886+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
37887@@ -0,0 +1,536 @@
37888+/**************************************************************************
37889+ *
37890+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37891+ * All Rights Reserved.
37892+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37893+ * All Rights Reserved.
37894+ *
37895+ * Permission is hereby granted, free of charge, to any person obtaining a
37896+ * copy of this software and associated documentation files (the
37897+ * "Software"), to deal in the Software without restriction, including
37898+ * without limitation the rights to use, copy, modify, merge, publish,
37899+ * distribute, sub license, and/or sell copies of the Software, and to
37900+ * permit persons to whom the Software is furnished to do so, subject to
37901+ * the following conditions:
37902+ *
37903+ * The above copyright notice and this permission notice (including the
37904+ * next paragraph) shall be included in all copies or substantial portions
37905+ * of the Software.
37906+ *
37907+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37908+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37909+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37910+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37911+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37912+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37913+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37914+ *
37915+ **************************************************************************/
37916+/*
37917+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37918+ */
37919+
37920+#include "ttm/ttm_bo_driver.h"
37921+#include "ttm/ttm_placement_common.h"
37922+#include "ttm/ttm_pat_compat.h"
37923+#include <linux/io.h>
37924+#include <linux/highmem.h>
37925+#include <linux/wait.h>
37926+#include <linux/version.h>
37927+
37928+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
37929+{
37930+ struct ttm_mem_reg *old_mem = &bo->mem;
37931+
37932+ if (old_mem->mm_node) {
37933+ spin_lock(&bo->bdev->lru_lock);
37934+ drm_mm_put_block(old_mem->mm_node);
37935+ spin_unlock(&bo->bdev->lru_lock);
37936+ }
37937+ old_mem->mm_node = NULL;
37938+}
37939+
37940+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
37941+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
37942+{
37943+ struct ttm_tt *ttm = bo->ttm;
37944+ struct ttm_mem_reg *old_mem = &bo->mem;
37945+ uint32_t save_flags = old_mem->flags;
37946+ uint32_t save_proposed_flags = old_mem->proposed_flags;
37947+ int ret;
37948+
37949+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
37950+ ttm_tt_unbind(ttm);
37951+ ttm_bo_free_old_node(bo);
37952+ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
37953+ TTM_PL_MASK_MEM);
37954+ old_mem->mem_type = TTM_PL_SYSTEM;
37955+ save_flags = old_mem->flags;
37956+ }
37957+
37958+ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
37959+ if (unlikely(ret != 0))
37960+ return ret;
37961+
37962+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
37963+ ret = ttm_tt_bind(ttm, new_mem);
37964+ if (unlikely(ret != 0))
37965+ return ret;
37966+ }
37967+
37968+ *old_mem = *new_mem;
37969+ new_mem->mm_node = NULL;
37970+ old_mem->proposed_flags = save_proposed_flags;
37971+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
37972+ return 0;
37973+}
37974+
37975+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
37976+ void **virtual)
37977+{
37978+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
37979+ unsigned long bus_offset;
37980+ unsigned long bus_size;
37981+ unsigned long bus_base;
37982+ int ret;
37983+ void *addr;
37984+
37985+ *virtual = NULL;
37986+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
37987+ if (ret || bus_size == 0)
37988+ return ret;
37989+
37990+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
37991+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
37992+ else {
37993+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
37994+ if (mem->flags & TTM_PL_FLAG_WC)
37995+ addr = ioremap_wc(bus_base + bus_offset, bus_size);
37996+ else
37997+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
37998+#else
37999+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
38000+#endif
38001+ if (!addr)
38002+ return -ENOMEM;
38003+ }
38004+ *virtual = addr;
38005+ return 0;
38006+}
38007+
38008+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
38009+ void *virtual)
38010+{
38011+ struct ttm_mem_type_manager *man;
38012+
38013+ man = &bdev->man[mem->mem_type];
38014+
38015+ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
38016+ iounmap(virtual);
38017+}
38018+
38019+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
38020+{
38021+ uint32_t *dstP =
38022+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
38023+ uint32_t *srcP =
38024+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
38025+
38026+ int i;
38027+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
38028+ iowrite32(ioread32(srcP++), dstP++);
38029+ return 0;
38030+}
38031+
38032+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
38033+ unsigned long page)
38034+{
38035+ struct page *d = ttm_tt_get_page(ttm, page);
38036+ void *dst;
38037+
38038+ if (!d)
38039+ return -ENOMEM;
38040+
38041+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
38042+ dst = kmap(d);
38043+ if (!dst)
38044+ return -ENOMEM;
38045+
38046+ memcpy_fromio(dst, src, PAGE_SIZE);
38047+ kunmap(d);
38048+ return 0;
38049+}
38050+
38051+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
38052+ unsigned long page)
38053+{
38054+ struct page *s = ttm_tt_get_page(ttm, page);
38055+ void *src;
38056+
38057+ if (!s)
38058+ return -ENOMEM;
38059+
38060+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
38061+ src = kmap(s);
38062+ if (!src)
38063+ return -ENOMEM;
38064+
38065+ memcpy_toio(dst, src, PAGE_SIZE);
38066+ kunmap(s);
38067+ return 0;
38068+}
38069+
38070+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
38071+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
38072+{
38073+ struct ttm_bo_device *bdev = bo->bdev;
38074+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
38075+ struct ttm_tt *ttm = bo->ttm;
38076+ struct ttm_mem_reg *old_mem = &bo->mem;
38077+ struct ttm_mem_reg old_copy = *old_mem;
38078+ void *old_iomap;
38079+ void *new_iomap;
38080+ int ret;
38081+ uint32_t save_flags = old_mem->flags;
38082+ uint32_t save_proposed_flags = old_mem->proposed_flags;
38083+ unsigned long i;
38084+ unsigned long page;
38085+ unsigned long add = 0;
38086+ int dir;
38087+
38088+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
38089+ if (ret)
38090+ return ret;
38091+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
38092+ if (ret)
38093+ goto out;
38094+
38095+ if (old_iomap == NULL && new_iomap == NULL)
38096+ goto out2;
38097+ if (old_iomap == NULL && ttm == NULL)
38098+ goto out2;
38099+
38100+ add = 0;
38101+ dir = 1;
38102+
38103+ if ((old_mem->mem_type == new_mem->mem_type) &&
38104+ (new_mem->mm_node->start <
38105+ old_mem->mm_node->start + old_mem->mm_node->size)) {
38106+ dir = -1;
38107+ add = new_mem->num_pages - 1;
38108+ }
38109+
38110+ for (i = 0; i < new_mem->num_pages; ++i) {
38111+ page = i * dir + add;
38112+ if (old_iomap == NULL)
38113+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
38114+ else if (new_iomap == NULL)
38115+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
38116+ else
38117+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
38118+ if (ret)
38119+ goto out1;
38120+ }
38121+ mb();
38122+ out2:
38123+ ttm_bo_free_old_node(bo);
38124+
38125+ *old_mem = *new_mem;
38126+ new_mem->mm_node = NULL;
38127+ old_mem->proposed_flags = save_proposed_flags;
38128+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
38129+
38130+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
38131+ ttm_tt_unbind(ttm);
38132+ ttm_tt_destroy(ttm);
38133+ bo->ttm = NULL;
38134+ }
38135+
38136+ out1:
38137+ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
38138+ out:
38139+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
38140+ return ret;
38141+}
38142+
38143+/**
38144+ * ttm_buffer_object_transfer
38145+ *
38146+ * @bo: A pointer to a struct ttm_buffer_object.
38147+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
38148+ * holding the data of @bo with the old placement.
38149+ *
38150+ * This is a utility function that may be called after an accelerated move
38151+ * has been scheduled. A new buffer object is created as a placeholder for
38152+ * the old data while it's being copied. When that buffer object is idle,
38153+ * it can be destroyed, releasing the space of the old placement.
38154+ * Returns:
38155+ * !0: Failure.
38156+ */
38157+
38158+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
38159+ struct ttm_buffer_object **new_obj)
38160+{
38161+ struct ttm_buffer_object *fbo;
38162+ struct ttm_bo_device *bdev = bo->bdev;
38163+ struct ttm_bo_driver *driver = bdev->driver;
38164+
38165+ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
38166+ if (!fbo)
38167+ return -ENOMEM;
38168+
38169+ *fbo = *bo;
38170+ mutex_init(&fbo->mutex);
38171+ mutex_lock(&fbo->mutex);
38172+
38173+ init_waitqueue_head(&fbo->event_queue);
38174+ INIT_LIST_HEAD(&fbo->ddestroy);
38175+ INIT_LIST_HEAD(&fbo->lru);
38176+
38177+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
38178+ if (fbo->mem.mm_node)
38179+ fbo->mem.mm_node->private = (void *)fbo;
38180+ kref_init(&fbo->list_kref);
38181+ kref_init(&fbo->kref);
38182+
38183+ mutex_unlock(&fbo->mutex);
38184+
38185+ *new_obj = fbo;
38186+ return 0;
38187+}
38188+
38189+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
38190+{
38191+#if defined(__i386__) || defined(__x86_64__)
38192+ if (caching_flags & TTM_PL_FLAG_WC) {
38193+ tmp = pgprot_ttm_x86_wc(tmp);
38194+ } else if (boot_cpu_data.x86 > 3 &&
38195+ (caching_flags & TTM_PL_FLAG_UNCACHED)) {
38196+ tmp = pgprot_noncached(tmp);
38197+ }
38198+#elif defined(__powerpc__)
38199+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
38200+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
38201+ if (caching_flags & TTM_PL_FLAG_UNCACHED)
38202+ pgprot_val(tmp) |= _PAGE_GUARDED;
38203+ }
38204+#endif
38205+#if defined(__ia64__)
38206+ if (caching_flags & TTM_PL_FLAG_WC)
38207+ tmp = pgprot_writecombine(tmp);
38208+ else
38209+ tmp = pgprot_noncached(tmp);
38210+#endif
38211+#if defined(__sparc__)
38212+ if (!(caching_flags & TTM_PL_FLAG_CACHED))
38213+ tmp = pgprot_noncached(tmp);
38214+#endif
38215+ return tmp;
38216+}
38217+
38218+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
38219+ unsigned long bus_base,
38220+ unsigned long bus_offset,
38221+ unsigned long bus_size,
38222+ struct ttm_bo_kmap_obj *map)
38223+{
38224+ struct ttm_bo_device * bdev = bo->bdev;
38225+ struct ttm_mem_reg * mem = &bo->mem;
38226+ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
38227+
38228+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
38229+ map->bo_kmap_type = ttm_bo_map_premapped;
38230+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
38231+ map->bo_kmap_type = ttm_bo_map_iomap;
38232+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
38233+ if (mem->flags & TTM_PL_FLAG_WC)
38234+ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
38235+ else
38236+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
38237+#else
38238+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
38239+#endif
38240+ }
38241+ return (!map->virtual) ? -ENOMEM : 0;
38242+}
38243+
38244+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
38245+ unsigned long start_page,
38246+ unsigned long num_pages,
38247+ struct ttm_bo_kmap_obj *map)
38248+{
38249+ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
38250+ struct ttm_tt * ttm = bo->ttm;
38251+ struct page * d;
38252+ bool do_kmap = false;
38253+ int i;
38254+ BUG_ON(!ttm);
38255+ if (num_pages == 1) {
38256+ map->page = ttm_tt_get_page(ttm, start_page);
38257+ do_kmap = (!PageHighMem(map->page) ||
38258+ (mem->flags & TTM_PL_FLAG_CACHED));
38259+ }
38260+
38261+ if (do_kmap) {
38262+ /*
38263+ * We're mapping a single page, and the desired
38264+ * page protection is consistent with the bo.
38265+ */
38266+ map->bo_kmap_type = ttm_bo_map_kmap;
38267+ map->virtual = kmap(map->page);
38268+ } else {
38269+ /*
38270+ * Populate the part we're mapping;
38271+ */
38272+ for (i = start_page; i < start_page + num_pages; ++i) {
38273+ d = ttm_tt_get_page(ttm, i); if (!d)
38274+ return -ENOMEM;
38275+ }
38276+
38277+ /*
38278+ * We need to use vmap to get the desired page protection
38279+ * or to make the buffer object look contigous.
38280+ */
38281+ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
38282+ PAGE_KERNEL :
38283+ ttm_io_prot(mem->flags, PAGE_KERNEL);
38284+ map->bo_kmap_type = ttm_bo_map_vmap;
38285+ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);
38286+ }
38287+ return (!map->virtual) ? -ENOMEM : 0;
38288+}
38289+
38290+int ttm_bo_kmap(struct ttm_buffer_object *bo,
38291+ unsigned long start_page, unsigned long num_pages,
38292+ struct ttm_bo_kmap_obj *map)
38293+{
38294+ int ret;
38295+ unsigned long bus_base;
38296+ unsigned long bus_offset;
38297+ unsigned long bus_size;
38298+ BUG_ON(!list_empty(&bo->swap));
38299+ map->virtual = NULL;
38300+ if (num_pages > bo->num_pages)
38301+ return -EINVAL;
38302+ if (start_page > bo->num_pages)
38303+ return -EINVAL;
38304+#if 0
38305+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
38306+ return -EPERM;
38307+#endif
38308+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
38309+ &bus_offset, &bus_size);
38310+ if (ret)
38311+ return ret;
38312+ if (bus_size == 0) {
38313+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
38314+ } else {
38315+ bus_offset += start_page << PAGE_SHIFT;
38316+ bus_size = num_pages << PAGE_SHIFT;
38317+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
38318+ }
38319+}
38320+
38321+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
38322+{
38323+ if (!map->virtual)
38324+ return;
38325+ switch (map->bo_kmap_type) {
38326+ case ttm_bo_map_iomap:
38327+ iounmap(map->virtual);
38328+ break;
38329+ case ttm_bo_map_vmap:
38330+ vunmap(map->virtual);
38331+ break;
38332+ case ttm_bo_map_kmap:
38333+ kunmap(map->page);
38334+ break;
38335+ case ttm_bo_map_premapped:
38336+ break;
38337+ default:
38338+ BUG();
38339+ }
38340+ map->virtual = NULL;
38341+ map->page = NULL;
38342+}
38343+
38344+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
38345+ unsigned long dst_offset,
38346+ unsigned long *pfn, pgprot_t * prot)
38347+{
38348+ struct ttm_mem_reg * mem = &bo->mem;
38349+ struct ttm_bo_device * bdev = bo->bdev;
38350+ unsigned long bus_offset;
38351+ unsigned long bus_size;
38352+ unsigned long bus_base;
38353+ int ret;
38354+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
38355+ &bus_size);
38356+ if (ret)
38357+ return -EINVAL;
38358+ if (bus_size != 0)
38359+ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
38360+ else
38361+ if (!bo->ttm)
38362+ return -EINVAL;
38363+ else
38364+ *pfn =
38365+ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
38366+ *prot =
38367+ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
38368+ flags,
38369+ PAGE_KERNEL);
38370+ return 0;
38371+}
38372+
38373+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
38374+ void *sync_obj,
38375+ void *sync_obj_arg,
38376+ bool evict, bool no_wait,
38377+ struct ttm_mem_reg *new_mem)
38378+{
38379+ struct ttm_bo_device * bdev = bo->bdev;
38380+ struct ttm_bo_driver * driver = bdev->driver;
38381+ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
38382+ struct ttm_mem_reg * old_mem = &bo->mem;
38383+ int ret;
38384+ uint32_t save_flags = old_mem->flags;
38385+ uint32_t save_proposed_flags = old_mem->proposed_flags;
38386+ struct ttm_buffer_object * old_obj;
38387+ if (bo->sync_obj)
38388+ driver->sync_obj_unref(&bo->sync_obj);
38389+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
38390+ bo->sync_obj_arg = sync_obj_arg;
38391+ if (evict) {
38392+ ret = ttm_bo_wait(bo, false, false, false);
38393+ if (ret)
38394+ return ret;
38395+ ttm_bo_free_old_node(bo);
38396+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
38397+ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;
38398+ }
38399+ } else {
38400+
38401+ /* This should help pipeline ordinary buffer moves.
38402+ *
38403+ * Hang old buffer memory on a new buffer object,
38404+ * and leave it to be released when the GPU
38405+ * operation has completed.
38406+ */
38407+ ret = ttm_buffer_object_transfer(bo, &old_obj);
38408+ if (ret)
38409+ return ret;
38410+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
38411+ old_obj->ttm = NULL;
38412+ else
38413+ bo->ttm = NULL;
38414+ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
38415+ ttm_bo_unreserve(old_obj);
38416+ }
38417+
38418+ *old_mem = *new_mem;
38419+ new_mem->mm_node = NULL;
38420+ old_mem->proposed_flags = save_proposed_flags;
38421+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
38422+ return 0;
38423+}
38424diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
38425new file mode 100644
38426index 0000000..4d950fc
38427--- /dev/null
38428+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
38429@@ -0,0 +1,596 @@
38430+/**************************************************************************
38431+ *
38432+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
38433+ * All Rights Reserved.
38434+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
38435+ * All Rights Reserved.
38436+ *
38437+ * Permission is hereby granted, free of charge, to any person obtaining a
38438+ * copy of this software and associated documentation files (the
38439+ * "Software"), to deal in the Software without restriction, including
38440+ * without limitation the rights to use, copy, modify, merge, publish,
38441+ * distribute, sub license, and/or sell copies of the Software, and to
38442+ * permit persons to whom the Software is furnished to do so, subject to
38443+ * the following conditions:
38444+ *
38445+ * The above copyright notice and this permission notice (including the
38446+ * next paragraph) shall be included in all copies or substantial portions
38447+ * of the Software.
38448+ *
38449+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38450+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38451+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
38452+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
38453+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
38454+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
38455+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
38456+ *
38457+ **************************************************************************/
38458+/*
38459+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
38460+ */
38461+
38462+
38463+#include "ttm/ttm_bo_driver.h"
38464+#include "ttm/ttm_placement_common.h"
38465+#include <linux/mm.h>
38466+#include <linux/version.h>
38467+#include <linux/rbtree.h>
38468+#include <asm/uaccess.h>
38469+
38470+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
38471+#error "TTM doesn't build on kernel versions below 2.6.25."
38472+#endif
38473+
38474+#define TTM_BO_VM_NUM_PREFAULT 16
38475+
38476+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
38477+ unsigned long page_start,
38478+ unsigned long num_pages)
38479+{
38480+ struct rb_node *cur = bdev->addr_space_rb.rb_node;
38481+ unsigned long cur_offset;
38482+ struct ttm_buffer_object *bo;
38483+ struct ttm_buffer_object *best_bo = NULL;
38484+
38485+ while (likely(cur != NULL)) {
38486+ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
38487+ cur_offset = bo->vm_node->start;
38488+ if (page_start >= cur_offset) {
38489+ cur = cur->rb_right;
38490+ best_bo = bo;
38491+ if (page_start == cur_offset)
38492+ break;
38493+ } else
38494+ cur = cur->rb_left;
38495+ }
38496+
38497+ if (unlikely(best_bo == NULL))
38498+ return NULL;
38499+
38500+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
38501+ (page_start + num_pages)))
38502+ return NULL;
38503+
38504+ return best_bo;
38505+}
38506+
38507+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
38508+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38509+{
38510+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
38511+ vma->vm_private_data;
38512+ struct ttm_bo_device *bdev = bo->bdev;
38513+ unsigned long bus_base;
38514+ unsigned long bus_offset;
38515+ unsigned long bus_size;
38516+ unsigned long page_offset;
38517+ unsigned long page_last;
38518+ unsigned long pfn;
38519+ struct ttm_tt *ttm = NULL;
38520+ struct page *page;
38521+ int ret;
38522+ int i;
38523+ bool is_iomem;
38524+ unsigned long address = (unsigned long)vmf->virtual_address;
38525+ int retval = VM_FAULT_NOPAGE;
38526+
38527+ ret = ttm_bo_reserve(bo, true, false, false, 0);
38528+ if (unlikely(ret != 0))
38529+ return VM_FAULT_NOPAGE;
38530+
38531+ mutex_lock(&bo->mutex);
38532+
38533+ /*
38534+ * Wait for buffer data in transit, due to a pipelined
38535+ * move.
38536+ */
38537+
38538+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
38539+ ret = ttm_bo_wait(bo, false, true, false);
38540+ if (unlikely(ret != 0)) {
38541+ retval = (ret != -ERESTART) ?
38542+ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
38543+ goto out_unlock;
38544+ }
38545+ }
38546+
38547+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
38548+ &bus_size);
38549+ if (unlikely(ret != 0)) {
38550+ retval = VM_FAULT_SIGBUS;
38551+ goto out_unlock;
38552+ }
38553+
38554+ is_iomem = (bus_size != 0);
38555+
38556+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
38557+ bo->vm_node->start - vma->vm_pgoff;
38558+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
38559+ bo->vm_node->start - vma->vm_pgoff;
38560+
38561+ if (unlikely(page_offset >= bo->num_pages)) {
38562+ retval = VM_FAULT_SIGBUS;
38563+ goto out_unlock;
38564+ }
38565+
38566+ /*
38567+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
38568+ * since the mmap_sem is only held in read mode. However, we
38569+ * modify only the caching bits of vma->vm_page_prot and
38570+ * consider those bits protected by
38571+ * the bo->mutex, as we should be the only writers.
38572+ * There shouldn't really be any readers of these bits except
38573+ * within vm_insert_mixed()? fork?
38574+ *
38575+ * TODO: Add a list of vmas to the bo, and change the
38576+ * vma->vm_page_prot when the object changes caching policy, with
38577+ * the correct locks held.
38578+ */
38579+
38580+ if (is_iomem) {
38581+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
38582+ vma->vm_page_prot);
38583+ } else {
38584+ ttm = bo->ttm;
38585+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
38586+ vm_get_page_prot(vma->vm_flags) :
38587+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
38588+ }
38589+
38590+ /*
38591+ * Speculatively prefault a number of pages. Only error on
38592+ * first page.
38593+ */
38594+
38595+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
38596+
38597+ if (is_iomem)
38598+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
38599+ page_offset;
38600+ else {
38601+ page = ttm_tt_get_page(ttm, page_offset);
38602+ if (unlikely(!page && i == 0)) {
38603+ retval = VM_FAULT_OOM;
38604+ goto out_unlock;
38605+ } else if (unlikely(!page)) {
38606+ break;
38607+ }
38608+ pfn = page_to_pfn(page);
38609+ }
38610+
38611+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
38612+ ret = vm_insert_mixed(vma, address, pfn);
38613+#else
38614+ ret = vm_insert_pfn(vma, address, pfn);
38615+#endif
38616+ /*
38617+ * Somebody beat us to this PTE or prefaulting to
38618+ * an already populated PTE, or prefaulting error.
38619+ */
38620+
38621+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
38622+ break;
38623+ else if (unlikely(ret != 0)) {
38624+ retval =
38625+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
38626+ goto out_unlock;
38627+
38628+ }
38629+
38630+ address += PAGE_SIZE;
38631+ if (unlikely(++page_offset >= page_last))
38632+ break;
38633+ }
38634+
38635+ out_unlock:
38636+ mutex_unlock(&bo->mutex);
38637+ ttm_bo_unreserve(bo);
38638+ return retval;
38639+}
38640+
38641+#else
38642+
38643+static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
38644+ unsigned long address)
38645+{
38646+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
38647+ vma->vm_private_data;
38648+ struct ttm_bo_device *bdev = bo->bdev;
38649+ unsigned long bus_base;
38650+ unsigned long bus_offset;
38651+ unsigned long bus_size;
38652+ unsigned long page_offset;
38653+ unsigned long page_last;
38654+ unsigned long pfn;
38655+ struct ttm_tt *ttm = NULL;
38656+ struct page *page;
38657+ int ret;
38658+ int i;
38659+ bool is_iomem;
38660+ unsigned long retval = NOPFN_REFAULT;
38661+
38662+ ret = ttm_bo_reserve(bo, true, false, false, 0);
38663+ if (unlikely(ret != 0))
38664+ return NOPFN_REFAULT;
38665+
38666+ mutex_lock(&bo->mutex);
38667+
38668+ /*
38669+ * Wait for buffer data in transit, due to a pipelined
38670+ * move.
38671+ */
38672+
38673+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
38674+ ret = ttm_bo_wait(bo, false, true, false);
38675+ if (unlikely(ret != 0)) {
38676+ retval = (ret != -ERESTART) ?
38677+ NOPFN_SIGBUS : NOPFN_REFAULT;
38678+ goto out_unlock;
38679+ }
38680+ }
38681+
38682+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
38683+ &bus_size);
38684+ if (unlikely(ret != 0)) {
38685+ printk(KERN_ERR "Attempted buffer object access "
38686+ "of unmappable object.\n");
38687+ retval = NOPFN_SIGBUS;
38688+ goto out_unlock;
38689+ }
38690+
38691+ is_iomem = (bus_size != 0);
38692+
38693+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
38694+ bo->vm_node->start - vma->vm_pgoff;
38695+
38696+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
38697+ bo->vm_node->start - vma->vm_pgoff;
38698+
38699+ if (unlikely(page_offset >= bo->num_pages)) {
38700+ printk(KERN_ERR "Attempted buffer object access "
38701+ "outside object.\n");
38702+ retval = NOPFN_SIGBUS;
38703+ goto out_unlock;
38704+ }
38705+
38706+ /*
38707+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
38708+ * since the mmap_sem is only held in read mode. However, we
38709+ * modify only the caching bits of vma->vm_page_prot and
38710+ * consider those bits protected by
38711+ * the bo->mutex, as we should be the only writers.
38712+ * There shouldn't really be any readers of these bits except
38713+ * within vm_insert_mixed()? fork?
38714+ *
38715+ * TODO: Add a list of vmas to the bo, and change the
38716+ * vma->vm_page_prot when the object changes caching policy, with
38717+ * the correct locks held.
38718+ */
38719+
38720+ if (is_iomem) {
38721+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
38722+ vma->vm_page_prot);
38723+ } else {
38724+ ttm = bo->ttm;
38725+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
38726+ vm_get_page_prot(vma->vm_flags) :
38727+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
38728+ }
38729+
38730+ /*
38731+ * Speculatively prefault a number of pages. Only error on
38732+ * first page.
38733+ */
38734+
38735+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
38736+
38737+ if (is_iomem)
38738+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
38739+ page_offset;
38740+ else {
38741+ page = ttm_tt_get_page(ttm, page_offset);
38742+ if (unlikely(!page && i == 0)) {
38743+ retval = NOPFN_OOM;
38744+ goto out_unlock;
38745+ } else if (unlikely(!page)) {
38746+ break;
38747+ }
38748+ pfn = page_to_pfn(page);
38749+ }
38750+
38751+ ret = vm_insert_pfn(vma, address, pfn);
38752+ if (unlikely(ret == -EBUSY || (ret != 0 && i != 0)))
38753+ break;
38754+
38755+ /*
38756+ * Somebody beat us to this PTE or prefaulting to
38757+ * an already populated PTE, or prefaulting error.
38758+ */
38759+
38760+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
38761+ break;
38762+ else if (unlikely(ret != 0)) {
38763+ retval =
38764+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
38765+ goto out_unlock;
38766+ }
38767+
38768+ address += PAGE_SIZE;
38769+ if (unlikely(++page_offset >= page_last))
38770+ break;
38771+ }
38772+
38773+ out_unlock:
38774+ mutex_unlock(&bo->mutex);
38775+ ttm_bo_unreserve(bo);
38776+ return retval;
38777+}
38778+#endif
38779+
38780+static void ttm_bo_vm_open(struct vm_area_struct *vma)
38781+{
38782+ struct ttm_buffer_object *bo =
38783+ (struct ttm_buffer_object *)vma->vm_private_data;
38784+
38785+ (void)ttm_bo_reference(bo);
38786+}
38787+
38788+static void ttm_bo_vm_close(struct vm_area_struct *vma)
38789+{
38790+ struct ttm_buffer_object *bo =
38791+ (struct ttm_buffer_object *)vma->vm_private_data;
38792+
38793+ ttm_bo_unref(&bo);
38794+ vma->vm_private_data = NULL;
38795+}
38796+
38797+static struct vm_operations_struct ttm_bo_vm_ops = {
38798+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
38799+ .fault = ttm_bo_vm_fault,
38800+#else
38801+ .nopfn = ttm_bo_vm_nopfn,
38802+#endif
38803+ .open = ttm_bo_vm_open,
38804+ .close = ttm_bo_vm_close
38805+};
38806+
38807+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
38808+ struct ttm_bo_device *bdev)
38809+{
38810+ struct ttm_bo_driver *driver;
38811+ struct ttm_buffer_object *bo;
38812+ int ret;
38813+
38814+ read_lock(&bdev->vm_lock);
38815+ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
38816+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
38817+ if (likely(bo != NULL))
38818+ ttm_bo_reference(bo);
38819+ read_unlock(&bdev->vm_lock);
38820+
38821+ if (unlikely(bo == NULL)) {
38822+ printk(KERN_ERR "Could not find buffer object to map.\n");
38823+ ret = -EINVAL;
38824+ goto out_unref;
38825+ }
38826+
38827+ driver = bo->bdev->driver;
38828+ if (unlikely(!driver->verify_access)) {
38829+ ret = -EPERM;
38830+ goto out_unref;
38831+ }
38832+ ret = driver->verify_access(bo, filp);
38833+ if (unlikely(ret != 0))
38834+ goto out_unref;
38835+
38836+ vma->vm_ops = &ttm_bo_vm_ops;
38837+
38838+ /*
38839+ * Note: We're transferring the bo reference to
38840+ * vma->vm_private_data here.
38841+ */
38842+
38843+ vma->vm_private_data = bo;
38844+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
38845+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
38846+#else
38847+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
38848+#endif
38849+ return 0;
38850+ out_unref:
38851+ ttm_bo_unref(&bo);
38852+ return ret;
38853+}
38854+
38855+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
38856+{
38857+ if (vma->vm_pgoff != 0)
38858+ return -EACCES;
38859+
38860+ vma->vm_ops = &ttm_bo_vm_ops;
38861+ vma->vm_private_data = ttm_bo_reference(bo);
38862+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
38863+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
38864+#else
38865+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
38866+#endif
38867+ return 0;
38868+}
38869+
38870+ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
38871+ const char __user * wbuf, char __user * rbuf, size_t count,
38872+ loff_t * f_pos, bool write)
38873+{
38874+ struct ttm_buffer_object *bo;
38875+ struct ttm_bo_driver *driver;
38876+ struct ttm_bo_kmap_obj map;
38877+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
38878+ unsigned long kmap_offset;
38879+ unsigned long kmap_end;
38880+ unsigned long kmap_num;
38881+ size_t io_size;
38882+ unsigned int page_offset;
38883+ char *virtual;
38884+ int ret;
38885+ bool no_wait = false;
38886+ bool dummy;
38887+
38888+ read_lock(&bdev->vm_lock);
38889+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
38890+ if (likely(bo != NULL))
38891+ ttm_bo_reference(bo);
38892+ read_unlock(&bdev->vm_lock);
38893+
38894+ if (unlikely(bo == NULL))
38895+ return -EFAULT;
38896+
38897+ driver = bo->bdev->driver;
38898+ if (unlikely(driver->verify_access))
38899+ return -EPERM;
38900+
38901+ ret = driver->verify_access(bo, filp);
38902+ if (unlikely(ret != 0))
38903+ goto out_unref;
38904+
38905+ kmap_offset = dev_offset - bo->vm_node->start;
38906+ if (unlikely(kmap_offset) >= bo->num_pages) {
38907+ ret = -EFBIG;
38908+ goto out_unref;
38909+ }
38910+
38911+ page_offset = *f_pos & ~PAGE_MASK;
38912+ io_size = bo->num_pages - kmap_offset;
38913+ io_size = (io_size << PAGE_SHIFT) - page_offset;
38914+ if (count < io_size)
38915+ io_size = count;
38916+
38917+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
38918+ kmap_num = kmap_end - kmap_offset + 1;
38919+
38920+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
38921+
38922+ switch (ret) {
38923+ case 0:
38924+ break;
38925+ case -ERESTART:
38926+ ret = -EINTR;
38927+ goto out_unref;
38928+ case -EBUSY:
38929+ ret = -EAGAIN;
38930+ goto out_unref;
38931+ default:
38932+ goto out_unref;
38933+ }
38934+
38935+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
38936+ if (unlikely(ret != 0))
38937+ goto out_unref;
38938+
38939+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
38940+ virtual += page_offset;
38941+
38942+ if (write)
38943+ ret = copy_from_user(virtual, wbuf, io_size);
38944+ else
38945+ ret = copy_to_user(rbuf, virtual, io_size);
38946+
38947+ ttm_bo_kunmap(&map);
38948+ ttm_bo_unreserve(bo);
38949+ ttm_bo_unref(&bo);
38950+
38951+ if (unlikely(ret != 0))
38952+ return -EFBIG;
38953+
38954+ *f_pos += io_size;
38955+
38956+ return io_size;
38957+ out_unref:
38958+ ttm_bo_unref(&bo);
38959+ return ret;
38960+}
38961+
38962+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
38963+ char __user * rbuf, size_t count, loff_t * f_pos,
38964+ bool write)
38965+{
38966+ struct ttm_bo_kmap_obj map;
38967+ unsigned long kmap_offset;
38968+ unsigned long kmap_end;
38969+ unsigned long kmap_num;
38970+ size_t io_size;
38971+ unsigned int page_offset;
38972+ char *virtual;
38973+ int ret;
38974+ bool no_wait = false;
38975+ bool dummy;
38976+
38977+ kmap_offset = (*f_pos >> PAGE_SHIFT);
38978+ if (unlikely(kmap_offset) >= bo->num_pages)
38979+ return -EFBIG;
38980+
38981+ page_offset = *f_pos & ~PAGE_MASK;
38982+ io_size = bo->num_pages - kmap_offset;
38983+ io_size = (io_size << PAGE_SHIFT) - page_offset;
38984+ if (count < io_size)
38985+ io_size = count;
38986+
38987+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
38988+ kmap_num = kmap_end - kmap_offset + 1;
38989+
38990+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
38991+
38992+ switch (ret) {
38993+ case 0:
38994+ break;
38995+ case -ERESTART:
38996+ return -EINTR;
38997+ case -EBUSY:
38998+ return -EAGAIN;
38999+ default:
39000+ return ret;
39001+ }
39002+
39003+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
39004+ if (unlikely(ret != 0))
39005+ return ret;
39006+
39007+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
39008+ virtual += page_offset;
39009+
39010+ if (write)
39011+ ret = copy_from_user(virtual, wbuf, io_size);
39012+ else
39013+ ret = copy_to_user(rbuf, virtual, io_size);
39014+
39015+ ttm_bo_kunmap(&map);
39016+ ttm_bo_unreserve(bo);
39017+ ttm_bo_unref(&bo);
39018+
39019+ if (unlikely(ret != 0))
39020+ return ret;
39021+
39022+ *f_pos += io_size;
39023+
39024+ return io_size;
39025+}
39026diff --git a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
39027new file mode 100644
39028index 0000000..4a34c18
39029--- /dev/null
39030+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
39031@@ -0,0 +1,115 @@
39032+/**************************************************************************
39033+ *
39034+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39035+ * All Rights Reserved.
39036+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39037+ * All Rights Reserved.
39038+ *
39039+ * Permission is hereby granted, free of charge, to any person obtaining a
39040+ * copy of this software and associated documentation files (the
39041+ * "Software"), to deal in the Software without restriction, including
39042+ * without limitation the rights to use, copy, modify, merge, publish,
39043+ * distribute, sub license, and/or sell copies of the Software, and to
39044+ * permit persons to whom the Software is furnished to do so, subject to
39045+ * the following conditions:
39046+ *
39047+ * The above copyright notice and this permission notice (including the
39048+ * next paragraph) shall be included in all copies or substantial portions
39049+ * of the Software.
39050+ *
39051+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39052+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39053+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39054+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39055+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39056+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39057+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39058+ *
39059+ **************************************************************************/
39060+
39061+#include "ttm/ttm_execbuf_util.h"
39062+#include "ttm/ttm_bo_driver.h"
39063+#include "ttm/ttm_placement_common.h"
39064+#include <linux/wait.h>
39065+#include <linux/sched.h>
39066+
39067+void ttm_eu_backoff_reservation(struct list_head *list)
39068+{
39069+ struct ttm_validate_buffer *entry;
39070+
39071+ list_for_each_entry(entry, list, head) {
39072+ struct ttm_buffer_object *bo = entry->bo;
39073+ if (!entry->reserved)
39074+ continue;
39075+
39076+ entry->reserved = false;
39077+ ttm_bo_unreserve(bo);
39078+ }
39079+}
39080+
39081+/*
39082+ * Reserve buffers for validation.
39083+ *
39084+ * If a buffer in the list is marked for CPU access, we back off and
39085+ * wait for that buffer to become free for GPU access.
39086+ *
39087+ * If a buffer is reserved for another validation, the validator with
39088+ * the highest validation sequence backs off and waits for that buffer
39089+ * to become unreserved. This prevents deadlocks when validating multiple
39090+ * buffers in different orders.
39091+ */
39092+
39093+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
39094+{
39095+ struct ttm_validate_buffer *entry;
39096+ int ret;
39097+
39098+ retry:
39099+ list_for_each_entry(entry, list, head) {
39100+ struct ttm_buffer_object *bo = entry->bo;
39101+
39102+ entry->reserved = false;
39103+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
39104+ if (ret != 0) {
39105+ ttm_eu_backoff_reservation(list);
39106+ if (ret == -EAGAIN) {
39107+ ret = ttm_bo_wait_unreserved(bo, true);
39108+ if (unlikely(ret != 0))
39109+ return ret;
39110+ goto retry;
39111+ } else
39112+ return ret;
39113+ }
39114+
39115+ entry->reserved = true;
39116+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
39117+ ttm_eu_backoff_reservation(list);
39118+ ret = ttm_bo_wait_cpu(bo, false);
39119+ if (ret)
39120+ return ret;
39121+ goto retry;
39122+ }
39123+ }
39124+ return 0;
39125+}
39126+
39127+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
39128+{
39129+ struct ttm_validate_buffer *entry;
39130+
39131+ list_for_each_entry(entry, list, head) {
39132+ struct ttm_buffer_object *bo = entry->bo;
39133+ struct ttm_bo_driver *driver = bo->bdev->driver;
39134+ void *old_sync_obj;
39135+
39136+ mutex_lock(&bo->mutex);
39137+ old_sync_obj = bo->sync_obj;
39138+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
39139+ bo->sync_obj_arg = entry->new_sync_obj_arg;
39140+ mutex_unlock(&bo->mutex);
39141+ ttm_bo_unreserve(bo);
39142+ entry->reserved = false;
39143+ if (old_sync_obj)
39144+ driver->sync_obj_unref(&old_sync_obj);
39145+ }
39146+}
39147diff --git a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
39148new file mode 100644
39149index 0000000..6577f63
39150--- /dev/null
39151+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
39152@@ -0,0 +1,110 @@
39153+/**************************************************************************
39154+ *
39155+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39156+ * All Rights Reserved.
39157+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39158+ * All Rights Reserved.
39159+ *
39160+ * Permission is hereby granted, free of charge, to any person obtaining a
39161+ * copy of this software and associated documentation files (the
39162+ * "Software"), to deal in the Software without restriction, including
39163+ * without limitation the rights to use, copy, modify, merge, publish,
39164+ * distribute, sub license, and/or sell copies of the Software, and to
39165+ * permit persons to whom the Software is furnished to do so, subject to
39166+ * the following conditions:
39167+ *
39168+ * The above copyright notice and this permission notice (including the
39169+ * next paragraph) shall be included in all copies or substantial portions
39170+ * of the Software.
39171+ *
39172+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39173+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39174+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39175+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39176+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39177+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39178+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39179+ *
39180+ **************************************************************************/
39181+/*
39182+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
39183+ */
39184+
39185+#ifndef _TTM_EXECBUF_UTIL_H_
39186+#define _TTM_EXECBUF_UTIL_H_
39187+
39188+#include "ttm/ttm_bo_api.h"
39189+#include "ttm/ttm_fence_api.h"
39190+#include <linux/list.h>
39191+
39192+/**
39193+ * struct ttm_validate_buffer
39194+ *
39195+ * @head: list head for thread-private list.
39196+ * @bo: refcounted buffer object pointer.
39197+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
39198+ * adding a new sync object.
39199+ * @reservied: Indicates whether @bo has been reserved for validation.
39200+ */
39201+
39202+struct ttm_validate_buffer {
39203+ struct list_head head;
39204+ struct ttm_buffer_object *bo;
39205+ void *new_sync_obj_arg;
39206+ bool reserved;
39207+};
39208+
39209+/**
39210+ * function ttm_eu_backoff_reservation
39211+ *
39212+ * @list: thread private list of ttm_validate_buffer structs.
39213+ *
39214+ * Undoes all buffer validation reservations for bos pointed to by
39215+ * the list entries.
39216+ */
39217+
39218+extern void ttm_eu_backoff_reservation(struct list_head *list);
39219+
39220+/**
39221+ * function ttm_eu_reserve_buffers
39222+ *
39223+ * @list: thread private list of ttm_validate_buffer structs.
39224+ * @val_seq: A unique sequence number.
39225+ *
39226+ * Tries to reserve bos pointed to by the list entries for validation.
39227+ * If the function returns 0, all buffers are marked as "unfenced",
39228+ * taken off the lru lists and are not synced for write CPU usage.
39229+ *
39230+ * If the function detects a deadlock due to multiple threads trying to
39231+ * reserve the same buffers in reverse order, all threads except one will
39232+ * back off and retry. This function may sleep while waiting for
39233+ * CPU write reservations to be cleared, and for other threads to
39234+ * unreserve their buffers.
39235+ *
39236+ * This function may return -ERESTART or -EAGAIN if the calling process
39237+ * receives a signal while waiting. In that case, no buffers on the list
39238+ * will be reserved upon return.
39239+ *
39240+ * Buffers reserved by this function should be unreserved by
39241+ * a call to either ttm_eu_backoff_reservation() or
39242+ * ttm_eu_fence_buffer_objects() when command submission is complete or
39243+ * has failed.
39244+ */
39245+
39246+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
39247+
39248+/**
39249+ * function ttm_eu_fence_buffer_objects.
39250+ *
39251+ * @list: thread private list of ttm_validate_buffer structs.
39252+ * @sync_obj: The new sync object for the buffers.
39253+ *
39254+ * This function should be called when command submission is complete, and
39255+ * it will add a new sync object to bos pointed to by entries on @list.
39256+ * It also unreserves all buffers, putting them on lru lists.
39257+ *
39258+ */
39259+
39260+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
39261+
39262+#endif
39263diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c
39264new file mode 100644
39265index 0000000..115e7b7
39266--- /dev/null
39267+++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c
39268@@ -0,0 +1,607 @@
39269+/**************************************************************************
39270+ *
39271+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39272+ * All Rights Reserved.
39273+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39274+ * All Rights Reserved.
39275+ *
39276+ * Permission is hereby granted, free of charge, to any person obtaining a
39277+ * copy of this software and associated documentation files (the
39278+ * "Software"), to deal in the Software without restriction, including
39279+ * without limitation the rights to use, copy, modify, merge, publish,
39280+ * distribute, sub license, and/or sell copies of the Software, and to
39281+ * permit persons to whom the Software is furnished to do so, subject to
39282+ * the following conditions:
39283+ *
39284+ * The above copyright notice and this permission notice (including the
39285+ * next paragraph) shall be included in all copies or substantial portions
39286+ * of the Software.
39287+ *
39288+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39289+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39290+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39291+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39292+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39293+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39294+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39295+ *
39296+ **************************************************************************/
39297+/*
39298+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
39299+ */
39300+
39301+#include "ttm/ttm_fence_api.h"
39302+#include "ttm/ttm_fence_driver.h"
39303+#include <linux/wait.h>
39304+#include <linux/sched.h>
39305+
39306+#include <drm/drmP.h>
39307+
39308+/*
39309+ * Simple implementation for now.
39310+ */
39311+
39312+static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
39313+{
39314+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39315+
39316+ printk(KERN_ERR "GPU lockup dectected on engine %u "
39317+ "fence type 0x%08x\n",
39318+ (unsigned int)fence->fence_class, (unsigned int)mask);
39319+ /*
39320+ * Give engines some time to idle?
39321+ */
39322+
39323+ write_lock(&fc->lock);
39324+ ttm_fence_handler(fence->fdev, fence->fence_class,
39325+ fence->sequence, mask, -EBUSY);
39326+ write_unlock(&fc->lock);
39327+}
39328+
39329+/*
39330+ * Convenience function to be called by fence::wait methods that
39331+ * need polling.
39332+ */
39333+
39334+int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
39335+ bool interruptible, uint32_t mask)
39336+{
39337+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39338+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39339+ uint32_t count = 0;
39340+ int ret;
39341+ unsigned long end_jiffies = fence->timeout_jiffies;
39342+
39343+ DECLARE_WAITQUEUE(entry, current);
39344+ add_wait_queue(&fc->fence_queue, &entry);
39345+
39346+ ret = 0;
39347+
39348+ for (;;) {
39349+ __set_current_state((interruptible) ?
39350+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
39351+ if (ttm_fence_object_signaled(fence, mask))
39352+ break;
39353+ if (time_after_eq(jiffies, end_jiffies)) {
39354+ if (driver->lockup)
39355+ driver->lockup(fence, mask);
39356+ else
39357+ ttm_fence_lockup(fence, mask);
39358+ continue;
39359+ }
39360+ if (lazy)
39361+ schedule_timeout(1);
39362+ else if ((++count & 0x0F) == 0) {
39363+ __set_current_state(TASK_RUNNING);
39364+ schedule();
39365+ __set_current_state((interruptible) ?
39366+ TASK_INTERRUPTIBLE :
39367+ TASK_UNINTERRUPTIBLE);
39368+ }
39369+ if (interruptible && signal_pending(current)) {
39370+ ret = -ERESTART;
39371+ break;
39372+ }
39373+ }
39374+ __set_current_state(TASK_RUNNING);
39375+ remove_wait_queue(&fc->fence_queue, &entry);
39376+ return ret;
39377+}
39378+
39379+/*
39380+ * Typically called by the IRQ handler.
39381+ */
39382+
39383+void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
39384+ uint32_t sequence, uint32_t type, uint32_t error)
39385+{
39386+ int wake = 0;
39387+ uint32_t diff;
39388+ uint32_t relevant_type;
39389+ uint32_t new_type;
39390+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
39391+ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
39392+ struct list_head *head;
39393+ struct ttm_fence_object *fence, *next;
39394+ bool found = false;
39395+
39396+ if (list_empty(&fc->ring))
39397+ return;
39398+
39399+ list_for_each_entry(fence, &fc->ring, ring) {
39400+ diff = (sequence - fence->sequence) & fc->sequence_mask;
39401+ if (diff > fc->wrap_diff) {
39402+ found = true;
39403+ break;
39404+ }
39405+ }
39406+
39407+ fc->waiting_types &= ~type;
39408+ head = (found) ? &fence->ring : &fc->ring;
39409+
39410+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
39411+ if (&fence->ring == &fc->ring)
39412+ break;
39413+
39414+ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
39415+ (unsigned long)fence, fence->sequence,
39416+ fence->fence_type);
39417+
39418+ if (error) {
39419+ fence->info.error = error;
39420+ fence->info.signaled_types = fence->fence_type;
39421+ list_del_init(&fence->ring);
39422+ wake = 1;
39423+ break;
39424+ }
39425+
39426+ relevant_type = type & fence->fence_type;
39427+ new_type = (fence->info.signaled_types | relevant_type) ^
39428+ fence->info.signaled_types;
39429+
39430+ if (new_type) {
39431+ fence->info.signaled_types |= new_type;
39432+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
39433+ (unsigned long)fence,
39434+ fence->info.signaled_types);
39435+
39436+ if (unlikely(driver->signaled))
39437+ driver->signaled(fence);
39438+
39439+ if (driver->needed_flush)
39440+ fc->pending_flush |=
39441+ driver->needed_flush(fence);
39442+
39443+ if (new_type & fence->waiting_types)
39444+ wake = 1;
39445+ }
39446+
39447+ fc->waiting_types |=
39448+ fence->waiting_types & ~fence->info.signaled_types;
39449+
39450+ if (!(fence->fence_type & ~fence->info.signaled_types)) {
39451+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
39452+ (unsigned long)fence);
39453+ list_del_init(&fence->ring);
39454+ }
39455+ }
39456+
39457+ /*
39458+ * Reinstate lost waiting types.
39459+ */
39460+
39461+ if ((fc->waiting_types & type) != type) {
39462+ head = head->prev;
39463+ list_for_each_entry(fence, head, ring) {
39464+ if (&fence->ring == &fc->ring)
39465+ break;
39466+ diff =
39467+ (fc->highest_waiting_sequence -
39468+ fence->sequence) & fc->sequence_mask;
39469+ if (diff > fc->wrap_diff)
39470+ break;
39471+
39472+ fc->waiting_types |=
39473+ fence->waiting_types & ~fence->info.signaled_types;
39474+ }
39475+ }
39476+
39477+ if (wake)
39478+ wake_up_all(&fc->fence_queue);
39479+}
39480+
39481+static void ttm_fence_unring(struct ttm_fence_object *fence)
39482+{
39483+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39484+ unsigned long irq_flags;
39485+
39486+ write_lock_irqsave(&fc->lock, irq_flags);
39487+ list_del_init(&fence->ring);
39488+ write_unlock_irqrestore(&fc->lock, irq_flags);
39489+}
39490+
39491+bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
39492+{
39493+ unsigned long flags;
39494+ bool signaled;
39495+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39496+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39497+
39498+ mask &= fence->fence_type;
39499+ read_lock_irqsave(&fc->lock, flags);
39500+ signaled = (mask & fence->info.signaled_types) == mask;
39501+ read_unlock_irqrestore(&fc->lock, flags);
39502+ if (!signaled && driver->poll) {
39503+ write_lock_irqsave(&fc->lock, flags);
39504+ driver->poll(fence->fdev, fence->fence_class, mask);
39505+ signaled = (mask & fence->info.signaled_types) == mask;
39506+ write_unlock_irqrestore(&fc->lock, flags);
39507+ }
39508+ return signaled;
39509+}
39510+
39511+int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
39512+{
39513+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39514+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39515+ unsigned long irq_flags;
39516+ uint32_t saved_pending_flush;
39517+ uint32_t diff;
39518+ bool call_flush;
39519+
39520+ if (type & ~fence->fence_type) {
39521+ DRM_ERROR("Flush trying to extend fence type, "
39522+ "0x%x, 0x%x\n", type, fence->fence_type);
39523+ return -EINVAL;
39524+ }
39525+
39526+ write_lock_irqsave(&fc->lock, irq_flags);
39527+ fence->waiting_types |= type;
39528+ fc->waiting_types |= fence->waiting_types;
39529+ diff = (fence->sequence - fc->highest_waiting_sequence) &
39530+ fc->sequence_mask;
39531+
39532+ if (diff < fc->wrap_diff)
39533+ fc->highest_waiting_sequence = fence->sequence;
39534+
39535+ /*
39536+ * fence->waiting_types has changed. Determine whether
39537+ * we need to initiate some kind of flush as a result of this.
39538+ */
39539+
39540+ saved_pending_flush = fc->pending_flush;
39541+ if (driver->needed_flush)
39542+ fc->pending_flush |= driver->needed_flush(fence);
39543+
39544+ if (driver->poll)
39545+ driver->poll(fence->fdev, fence->fence_class,
39546+ fence->waiting_types);
39547+
39548+ call_flush = (fc->pending_flush != 0);
39549+ write_unlock_irqrestore(&fc->lock, irq_flags);
39550+
39551+ if (call_flush && driver->flush)
39552+ driver->flush(fence->fdev, fence->fence_class);
39553+
39554+ return 0;
39555+}
39556+
39557+/*
39558+ * Make sure old fence objects are signaled before their fence sequences are
39559+ * wrapped around and reused.
39560+ */
39561+
39562+void ttm_fence_flush_old(struct ttm_fence_device *fdev,
39563+ uint32_t fence_class, uint32_t sequence)
39564+{
39565+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
39566+ struct ttm_fence_object *fence;
39567+ unsigned long irq_flags;
39568+ const struct ttm_fence_driver *driver = fdev->driver;
39569+ bool call_flush;
39570+
39571+ uint32_t diff;
39572+
39573+ write_lock_irqsave(&fc->lock, irq_flags);
39574+
39575+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
39576+ diff = (sequence - fence->sequence) & fc->sequence_mask;
39577+ if (diff <= fc->flush_diff)
39578+ break;
39579+
39580+ fence->waiting_types = fence->fence_type;
39581+ fc->waiting_types |= fence->fence_type;
39582+
39583+ if (driver->needed_flush)
39584+ fc->pending_flush |= driver->needed_flush(fence);
39585+ }
39586+
39587+ if (driver->poll)
39588+ driver->poll(fdev, fence_class, fc->waiting_types);
39589+
39590+ call_flush = (fc->pending_flush != 0);
39591+ write_unlock_irqrestore(&fc->lock, irq_flags);
39592+
39593+ if (call_flush && driver->flush)
39594+ driver->flush(fdev, fence->fence_class);
39595+
39596+ /*
39597+ * FIXME: Shold we implement a wait here for really old fences?
39598+ */
39599+
39600+}
39601+
39602+int ttm_fence_object_wait(struct ttm_fence_object *fence,
39603+ bool lazy, bool interruptible, uint32_t mask)
39604+{
39605+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39606+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39607+ int ret = 0;
39608+ unsigned long timeout;
39609+ unsigned long cur_jiffies;
39610+ unsigned long to_jiffies;
39611+
39612+ if (mask & ~fence->fence_type) {
39613+ DRM_ERROR("Wait trying to extend fence type"
39614+ " 0x%08x 0x%08x\n", mask, fence->fence_type);
39615+ BUG();
39616+ return -EINVAL;
39617+ }
39618+
39619+ if (driver->wait)
39620+ return driver->wait(fence, lazy, interruptible, mask);
39621+
39622+ ttm_fence_object_flush(fence, mask);
39623+ retry:
39624+ if (!driver->has_irq ||
39625+ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
39626+
39627+ cur_jiffies = jiffies;
39628+ to_jiffies = fence->timeout_jiffies;
39629+
39630+ timeout = (time_after(to_jiffies, cur_jiffies)) ?
39631+ to_jiffies - cur_jiffies : 1;
39632+
39633+ if (interruptible)
39634+ ret = wait_event_interruptible_timeout
39635+ (fc->fence_queue,
39636+ ttm_fence_object_signaled(fence, mask), timeout);
39637+ else
39638+ ret = wait_event_timeout
39639+ (fc->fence_queue,
39640+ ttm_fence_object_signaled(fence, mask), timeout);
39641+
39642+ if (unlikely(ret == -ERESTARTSYS))
39643+ return -ERESTART;
39644+
39645+ if (unlikely(ret == 0)) {
39646+ if (driver->lockup)
39647+ driver->lockup(fence, mask);
39648+ else
39649+ ttm_fence_lockup(fence, mask);
39650+ goto retry;
39651+ }
39652+
39653+ return 0;
39654+ }
39655+
39656+ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
39657+}
39658+
39659+int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
39660+ uint32_t fence_class, uint32_t type)
39661+{
39662+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39663+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39664+ unsigned long flags;
39665+ uint32_t sequence;
39666+ unsigned long timeout;
39667+ int ret;
39668+
39669+ ttm_fence_unring(fence);
39670+ ret = driver->emit(fence->fdev,
39671+ fence_class, fence_flags, &sequence, &timeout);
39672+ if (ret)
39673+ return ret;
39674+
39675+ write_lock_irqsave(&fc->lock, flags);
39676+ fence->fence_class = fence_class;
39677+ fence->fence_type = type;
39678+ fence->waiting_types = 0;
39679+ fence->info.signaled_types = 0;
39680+ fence->info.error = 0;
39681+ fence->sequence = sequence;
39682+ fence->timeout_jiffies = timeout;
39683+ if (list_empty(&fc->ring))
39684+ fc->highest_waiting_sequence = sequence - 1;
39685+ list_add_tail(&fence->ring, &fc->ring);
39686+ fc->latest_queued_sequence = sequence;
39687+ write_unlock_irqrestore(&fc->lock, flags);
39688+ return 0;
39689+}
39690+
39691+int ttm_fence_object_init(struct ttm_fence_device *fdev,
39692+ uint32_t fence_class,
39693+ uint32_t type,
39694+ uint32_t create_flags,
39695+ void (*destroy) (struct ttm_fence_object *),
39696+ struct ttm_fence_object *fence)
39697+{
39698+ int ret = 0;
39699+
39700+ kref_init(&fence->kref);
39701+ fence->fence_class = fence_class;
39702+ fence->fence_type = type;
39703+ fence->info.signaled_types = 0;
39704+ fence->waiting_types = 0;
39705+ fence->sequence = 0;
39706+ fence->info.error = 0;
39707+ fence->fdev = fdev;
39708+ fence->destroy = destroy;
39709+ INIT_LIST_HEAD(&fence->ring);
39710+ atomic_inc(&fdev->count);
39711+
39712+ if (create_flags & TTM_FENCE_FLAG_EMIT) {
39713+ ret = ttm_fence_object_emit(fence, create_flags,
39714+ fence->fence_class, type);
39715+ }
39716+
39717+ return ret;
39718+}
39719+
39720+int ttm_fence_object_create(struct ttm_fence_device *fdev,
39721+ uint32_t fence_class,
39722+ uint32_t type,
39723+ uint32_t create_flags,
39724+ struct ttm_fence_object **c_fence)
39725+{
39726+ struct ttm_fence_object *fence;
39727+ int ret;
39728+
39729+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false);
39730+ if (unlikely(ret != 0)) {
39731+ printk(KERN_ERR "Out of memory creating fence object\n");
39732+ return ret;
39733+ }
39734+
39735+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
39736+ if (!fence) {
39737+ printk(KERN_ERR "Out of memory creating fence object\n");
39738+ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
39739+ return -ENOMEM;
39740+ }
39741+
39742+ ret = ttm_fence_object_init(fdev, fence_class, type,
39743+ create_flags, NULL, fence);
39744+ if (ret) {
39745+ ttm_fence_object_unref(&fence);
39746+ return ret;
39747+ }
39748+ *c_fence = fence;
39749+
39750+ return 0;
39751+}
39752+
39753+static void ttm_fence_object_destroy(struct kref *kref)
39754+{
39755+ struct ttm_fence_object *fence =
39756+ container_of(kref, struct ttm_fence_object, kref);
39757+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39758+ unsigned long irq_flags;
39759+
39760+ write_lock_irqsave(&fc->lock, irq_flags);
39761+ list_del_init(&fence->ring);
39762+ write_unlock_irqrestore(&fc->lock, irq_flags);
39763+
39764+ atomic_dec(&fence->fdev->count);
39765+ if (fence->destroy)
39766+ fence->destroy(fence);
39767+ else {
39768+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false);
39769+ kfree(fence);
39770+ }
39771+}
39772+
39773+void ttm_fence_device_release(struct ttm_fence_device *fdev)
39774+{
39775+ kfree(fdev->fence_class);
39776+}
39777+
39778+int
39779+ttm_fence_device_init(int num_classes,
39780+ struct ttm_mem_global *mem_glob,
39781+ struct ttm_fence_device *fdev,
39782+ const struct ttm_fence_class_init *init,
39783+ bool replicate_init, const struct ttm_fence_driver *driver)
39784+{
39785+ struct ttm_fence_class_manager *fc;
39786+ const struct ttm_fence_class_init *fci;
39787+ int i;
39788+
39789+ fdev->mem_glob = mem_glob;
39790+ fdev->fence_class = kzalloc(num_classes *
39791+ sizeof(*fdev->fence_class), GFP_KERNEL);
39792+
39793+ if (unlikely(!fdev->fence_class))
39794+ return -ENOMEM;
39795+
39796+ fdev->num_classes = num_classes;
39797+ atomic_set(&fdev->count, 0);
39798+ fdev->driver = driver;
39799+
39800+ for (i = 0; i < fdev->num_classes; ++i) {
39801+ fc = &fdev->fence_class[i];
39802+ fci = &init[(replicate_init) ? 0 : i];
39803+
39804+ fc->wrap_diff = fci->wrap_diff;
39805+ fc->flush_diff = fci->flush_diff;
39806+ fc->sequence_mask = fci->sequence_mask;
39807+
39808+ rwlock_init(&fc->lock);
39809+ INIT_LIST_HEAD(&fc->ring);
39810+ init_waitqueue_head(&fc->fence_queue);
39811+ }
39812+
39813+ return 0;
39814+}
39815+
39816+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
39817+{
39818+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39819+ struct ttm_fence_info tmp;
39820+ unsigned long irq_flags;
39821+
39822+ read_lock_irqsave(&fc->lock, irq_flags);
39823+ tmp = fence->info;
39824+ read_unlock_irqrestore(&fc->lock, irq_flags);
39825+
39826+ return tmp;
39827+}
39828+
39829+void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
39830+{
39831+ struct ttm_fence_object *fence = *p_fence;
39832+
39833+ *p_fence = NULL;
39834+ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
39835+}
39836+
39837+/*
39838+ * Placement / BO sync object glue.
39839+ */
39840+
39841+bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
39842+{
39843+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
39844+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
39845+
39846+ return ttm_fence_object_signaled(fence, fence_types);
39847+}
39848+
39849+int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
39850+ bool lazy, bool interruptible)
39851+{
39852+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
39853+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
39854+
39855+ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
39856+}
39857+
39858+int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
39859+{
39860+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
39861+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
39862+
39863+ return ttm_fence_object_flush(fence, fence_types);
39864+}
39865+
39866+void ttm_fence_sync_obj_unref(void **sync_obj)
39867+{
39868+ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
39869+}
39870+
39871+void *ttm_fence_sync_obj_ref(void *sync_obj)
39872+{
39873+ return (void *)
39874+ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
39875+}
39876diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
39877new file mode 100644
39878index 0000000..2a4e12b
39879--- /dev/null
39880+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
39881@@ -0,0 +1,277 @@
39882+/**************************************************************************
39883+ *
39884+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39885+ * All Rights Reserved.
39886+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39887+ * All Rights Reserved.
39888+ *
39889+ * Permission is hereby granted, free of charge, to any person obtaining a
39890+ * copy of this software and associated documentation files (the
39891+ * "Software"), to deal in the Software without restriction, including
39892+ * without limitation the rights to use, copy, modify, merge, publish,
39893+ * distribute, sub license, and/or sell copies of the Software, and to
39894+ * permit persons to whom the Software is furnished to do so, subject to
39895+ * the following conditions:
39896+ *
39897+ * The above copyright notice and this permission notice (including the
39898+ * next paragraph) shall be included in all copies or substantial portions
39899+ * of the Software.
39900+ *
39901+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39902+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39903+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39904+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39905+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39906+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39907+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39908+ *
39909+ **************************************************************************/
39910+/*
39911+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
39912+ */
39913+#ifndef _TTM_FENCE_API_H_
39914+#define _TTM_FENCE_API_H_
39915+
39916+#include <linux/list.h>
39917+#include <linux/kref.h>
39918+
39919+#define TTM_FENCE_FLAG_EMIT (1 << 0)
39920+#define TTM_FENCE_TYPE_EXE (1 << 0)
39921+
39922+struct ttm_fence_device;
39923+
39924+/**
39925+ * struct ttm_fence_info
39926+ *
39927+ * @fence_class: The fence class.
39928+ * @fence_type: Bitfield indicating types for this fence.
39929+ * @signaled_types: Bitfield indicating which types are signaled.
39930+ * @error: Last error reported from the device.
39931+ *
39932+ * Used as output from the ttm_fence_get_info
39933+ */
39934+
39935+struct ttm_fence_info {
39936+ uint32_t signaled_types;
39937+ uint32_t error;
39938+};
39939+
39940+/**
39941+ * struct ttm_fence_object
39942+ *
39943+ * @fdev: Pointer to the fence device struct.
39944+ * @kref: Holds the reference count of this fence object.
39945+ * @ring: List head used for the circular list of not-completely
39946+ * signaled fences.
39947+ * @info: Data for fast retrieval using the ttm_fence_get_info()
39948+ * function.
39949+ * @timeout_jiffies: Absolute jiffies value indicating when this fence
39950+ * object times out and, if waited on, calls ttm_fence_lockup
39951+ * to check for and resolve a GPU lockup.
39952+ * @sequence: Fence sequence number.
39953+ * @waiting_types: Types currently waited on.
39954+ * @destroy: Called to free the fence object, when its refcount has
39955+ * reached zero. If NULL, kfree is used.
39956+ *
39957+ * This struct is provided in the driver interface so that drivers can
39958+ * derive from it and create their own fence implementation. All members
39959+ * are private to the fence implementation and the fence driver callbacks.
39960+ * Otherwise a driver may access the derived object using container_of().
39961+ */
39962+
39963+struct ttm_fence_object {
39964+ struct ttm_fence_device *fdev;
39965+ struct kref kref;
39966+ uint32_t fence_class;
39967+ uint32_t fence_type;
39968+
39969+ /*
39970+ * The below fields are protected by the fence class
39971+ * manager spinlock.
39972+ */
39973+
39974+ struct list_head ring;
39975+ struct ttm_fence_info info;
39976+ unsigned long timeout_jiffies;
39977+ uint32_t sequence;
39978+ uint32_t waiting_types;
39979+ void (*destroy) (struct ttm_fence_object *);
39980+};
39981+
39982+/**
39983+ * ttm_fence_object_init
39984+ *
39985+ * @fdev: Pointer to a struct ttm_fence_device.
39986+ * @fence_class: Fence class for this fence.
39987+ * @type: Fence type for this fence.
39988+ * @create_flags: Flags indicating varios actions at init time. At this point
39989+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
39990+ * the command stream.
39991+ * @destroy: Destroy function. If NULL, kfree() is used.
39992+ * @fence: The struct ttm_fence_object to initialize.
39993+ *
39994+ * Initialize a pre-allocated fence object. This function, together with the
39995+ * destroy function makes it possible to derive driver-specific fence objects.
39996+ */
39997+
39998+extern int
39999+ttm_fence_object_init(struct ttm_fence_device *fdev,
40000+ uint32_t fence_class,
40001+ uint32_t type,
40002+ uint32_t create_flags,
40003+ void (*destroy) (struct ttm_fence_object * fence),
40004+ struct ttm_fence_object *fence);
40005+
40006+/**
40007+ * ttm_fence_object_create
40008+ *
40009+ * @fdev: Pointer to a struct ttm_fence_device.
40010+ * @fence_class: Fence class for this fence.
40011+ * @type: Fence type for this fence.
40012+ * @create_flags: Flags indicating varios actions at init time. At this point
40013+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
40014+ * the command stream.
40015+ * @c_fence: On successful termination, *(@c_fence) will point to the created
40016+ * fence object.
40017+ *
40018+ * Create and initialize a struct ttm_fence_object. The destroy function will
40019+ * be set to kfree().
40020+ */
40021+
40022+extern int
40023+ttm_fence_object_create(struct ttm_fence_device *fdev,
40024+ uint32_t fence_class,
40025+ uint32_t type,
40026+ uint32_t create_flags,
40027+ struct ttm_fence_object **c_fence);
40028+
40029+/**
40030+ * ttm_fence_object_wait
40031+ *
40032+ * @fence: The fence object to wait on.
40033+ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
40034+ * @interruptible: Sleep interruptible when waiting.
40035+ * @type_mask: Wait for the given type_mask to signal.
40036+ *
40037+ * Wait for a fence to signal the given type_mask. The function will
40038+ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
40039+ *
40040+ * Returns
40041+ * -ERESTART if interrupted by a signal.
40042+ * May return driver-specific error codes if timed-out.
40043+ */
40044+
40045+extern int
40046+ttm_fence_object_wait(struct ttm_fence_object *fence,
40047+ bool lazy, bool interruptible, uint32_t type_mask);
40048+
40049+/**
40050+ * ttm_fence_object_flush
40051+ *
40052+ * @fence: The fence object to flush.
40053+ * @flush_mask: Fence types to flush.
40054+ *
40055+ * Make sure that the given fence eventually signals the
40056+ * types indicated by @flush_mask. Note that this may or may not
40057+ * map to a CPU or GPU flush.
40058+ */
40059+
40060+extern int
40061+ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
40062+
40063+/**
40064+ * ttm_fence_get_info
40065+ *
40066+ * @fence: The fence object.
40067+ *
40068+ * Copy the info block from the fence while holding relevant locks.
40069+ */
40070+
40071+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
40072+
40073+/**
40074+ * ttm_fence_object_ref
40075+ *
40076+ * @fence: The fence object.
40077+ *
40078+ * Return a ref-counted pointer to the fence object indicated by @fence.
40079+ */
40080+
40081+static inline struct ttm_fence_object *ttm_fence_object_ref(struct
40082+ ttm_fence_object
40083+ *fence)
40084+{
40085+ kref_get(&fence->kref);
40086+ return fence;
40087+}
40088+
40089+/**
40090+ * ttm_fence_object_unref
40091+ *
40092+ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
40093+ *
40094+ * Unreference the fence object pointed to by *(@p_fence), clearing
40095+ * *(p_fence).
40096+ */
40097+
40098+extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
40099+
40100+/**
40101+ * ttm_fence_object_signaled
40102+ *
40103+ * @fence: Pointer to the struct ttm_fence_object.
40104+ * @mask: Type mask to check whether signaled.
40105+ *
40106+ * This function checks (without waiting) whether the fence object
40107+ * pointed to by @fence has signaled the types indicated by @mask,
40108+ * and returns 1 if true, 0 if false. This function does NOT perform
40109+ * an implicit fence flush.
40110+ */
40111+
40112+extern bool
40113+ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
40114+
40115+/**
40116+ * ttm_fence_class
40117+ *
40118+ * @fence: Pointer to the struct ttm_fence_object.
40119+ *
40120+ * Convenience function that returns the fence class of a struct ttm_fence_object.
40121+ */
40122+
40123+static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
40124+{
40125+ return fence->fence_class;
40126+}
40127+
40128+/**
40129+ * ttm_fence_types
40130+ *
40131+ * @fence: Pointer to the struct ttm_fence_object.
40132+ *
40133+ * Convenience function that returns the fence types of a struct ttm_fence_object.
40134+ */
40135+
40136+static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
40137+{
40138+ return fence->fence_type;
40139+}
40140+
40141+/*
40142+ * The functions below are wrappers to the above functions, with
40143+ * similar names but with sync_obj omitted. These wrappers are intended
40144+ * to be plugged directly into the buffer object driver's sync object
40145+ * API, if the driver chooses to use ttm_fence_objects as buffer object
40146+ * sync objects. In the prototypes below, a sync_obj is cast to a
40147+ * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing
40148+ * a fence_type argument.
40149+ */
40150+
40151+extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
40152+extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
40153+ bool lazy, bool interruptible);
40154+extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
40155+extern void ttm_fence_sync_obj_unref(void **sync_obj);
40156+extern void *ttm_fence_sync_obj_ref(void *sync_obj);
40157+
40158+#endif
40159diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
40160new file mode 100644
40161index 0000000..2eca494
40162--- /dev/null
40163+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
40164@@ -0,0 +1,309 @@
40165+/**************************************************************************
40166+ *
40167+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40168+ * All Rights Reserved.
40169+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40170+ * All Rights Reserved.
40171+ *
40172+ * Permission is hereby granted, free of charge, to any person obtaining a
40173+ * copy of this software and associated documentation files (the
40174+ * "Software"), to deal in the Software without restriction, including
40175+ * without limitation the rights to use, copy, modify, merge, publish,
40176+ * distribute, sub license, and/or sell copies of the Software, and to
40177+ * permit persons to whom the Software is furnished to do so, subject to
40178+ * the following conditions:
40179+ *
40180+ * The above copyright notice and this permission notice (including the
40181+ * next paragraph) shall be included in all copies or substantial portions
40182+ * of the Software.
40183+ *
40184+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40185+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40186+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40187+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40188+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40189+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40190+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40191+ *
40192+ **************************************************************************/
40193+/*
40194+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
40195+ */
40196+#ifndef _TTM_FENCE_DRIVER_H_
40197+#define _TTM_FENCE_DRIVER_H_
40198+
40199+#include <linux/kref.h>
40200+#include <linux/spinlock.h>
40201+#include <linux/wait.h>
40202+#include "ttm_fence_api.h"
40203+#include "ttm_memory.h"
40204+
40205+/** @file ttm_fence_driver.h
40206+ *
40207+ * Definitions needed for a driver implementing the
40208+ * ttm_fence subsystem.
40209+ */
40210+
40211+/**
40212+ * struct ttm_fence_class_manager:
40213+ *
40214+ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
40215+ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
40216+ * @flush_diff: Sequence difference to trigger fence flush.
40217+ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
40218+ * seqa as old an needing a flush.
40219+ * @sequence_mask: Mask of valid bits in a fence sequence.
40220+ * @lock: Lock protecting this struct as well as fence objects
40221+ * associated with this struct.
40222+ * @ring: Circular sequence-ordered list of fence objects.
40223+ * @pending_flush: Fence types currently needing a flush.
40224+ * @waiting_types: Fence types that are currently waited for.
40225+ * @fence_queue: Queue of waiters on fences belonging to this fence class.
40226+ * @highest_waiting_sequence: Sequence number of the fence with highest sequence
40227+ * number and that is waited for.
40228+ * @latest_queued_sequence: Sequence number of the fence latest queued on the ring.
40229+ */
40230+
40231+struct ttm_fence_class_manager {
40232+
40233+ /*
40234+ * Unprotected constant members.
40235+ */
40236+
40237+ uint32_t wrap_diff;
40238+ uint32_t flush_diff;
40239+ uint32_t sequence_mask;
40240+
40241+ /*
40242+ * The rwlock protects this structure as well as
40243+ * the data in all fence objects belonging to this
40244+ * class. This should be OK as most fence objects are
40245+ * only read from once they're created.
40246+ */
40247+
40248+ rwlock_t lock;
40249+ struct list_head ring;
40250+ uint32_t pending_flush;
40251+ uint32_t waiting_types;
40252+ wait_queue_head_t fence_queue;
40253+ uint32_t highest_waiting_sequence;
40254+ uint32_t latest_queued_sequence;
40255+};
40256+
40257+/**
40258+ * struct ttm_fence_device
40259+ *
40260+ * @fence_class: Array of fence class managers.
40261+ * @num_classes: Array dimension of @fence_class.
40262+ * @count: Current number of fence objects for statistics.
40263+ * @driver: Driver struct.
40264+ *
40265+ * Provided in the driver interface so that the driver can derive
40266+ * from this struct for its driver_private, and accordingly
40267+ * access the driver_private from the fence driver callbacks.
40268+ *
40269+ * All members except "count" are initialized at creation and
40270+ * never touched after that. No protection needed.
40271+ *
40272+ * This struct is private to the fence implementation and to the fence
40273+ * driver callbacks, and may otherwise be used by drivers only to
40274+ * obtain the derived device_private object using container_of().
40275+ */
40276+
40277+struct ttm_fence_device {
40278+ struct ttm_mem_global *mem_glob;
40279+ struct ttm_fence_class_manager *fence_class;
40280+ uint32_t num_classes;
40281+ atomic_t count;
40282+ const struct ttm_fence_driver *driver;
40283+};
40284+
40285+/**
40286+ * struct ttm_fence_class_init
40287+ *
40288+ * @wrap_diff: Fence sequence number wrap indicator. If
40289+ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
40290+ * considered to be older than sequence2.
40291+ * @flush_diff: Fence sequence number flush indicator.
40292+ * If a non-completely-signaled fence has a fence sequence number
40293+ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
40294+ * the fence is considered too old and it will be flushed upon the
40295+ * next call of ttm_fence_flush_old(), to make sure no fences with
40296+ * stale sequence numbers remains unsignaled. @flush_diff should
40297+ * be sufficiently less than @wrap_diff.
40298+ * @sequence_mask: Mask with valid bits of the fence sequence
40299+ * number set to 1.
40300+ *
40301+ * This struct is used as input to ttm_fence_device_init.
40302+ */
40303+
40304+struct ttm_fence_class_init {
40305+ uint32_t wrap_diff;
40306+ uint32_t flush_diff;
40307+ uint32_t sequence_mask;
40308+};
40309+
40310+/**
40311+ * struct ttm_fence_driver
40312+ *
40313+ * @has_irq: Called by a potential waiter. Should return 1 if a
40314+ * fence object with indicated parameters is expected to signal
40315+ * automatically, and 0 if the fence implementation needs to
40316+ * repeatedly call @poll to make it signal.
40317+ * @emit: Make sure a fence with the given parameters is
40318+ * present in the indicated command stream. Return its sequence number
40319+ * in "breadcrumb".
40320+ * @poll: Check and report sequences of the given "fence_class"
40321+ * that have signaled "types"
40322+ * @flush: Make sure that the types indicated by the bitfield
40323+ * ttm_fence_class_manager::pending_flush will eventually
40324+ * signal. These bits have been put together using the
40325+ * result from the needed_flush function described below.
40326+ * @needed_flush: Given the fence_class and fence_types indicated by
40327+ * "fence", and the last received fence sequence of this
40328+ * fence class, indicate what types need a fence flush to
40329+ * signal. Return as a bitfield.
40330+ * @wait: Set to non-NULL if the driver wants to override the fence
40331+ * wait implementation. Return 0 on success, -EBUSY on failure,
40332+ * and -ERESTART if interruptible and a signal is pending.
40333+ * @signaled: Driver callback that is called whenever a
40334+ * ttm_fence_object::signaled_types has changed status.
40335+ * This function is called from atomic context,
40336+ * with the ttm_fence_class_manager::lock held in write mode.
40337+ * @lockup: Driver callback that is called whenever a wait has exceeded
40338+ * the lifetime of a fence object.
40339+ * If there is a GPU lockup,
40340+ * this function should, if possible, reset the GPU,
40341+ * call the ttm_fence_handler with an error status, and
40342+ * return. If no lockup was detected, simply extend the
40343+ * fence timeout_jiffies and return. The driver might
40344+ * want to protect the lockup check with a mutex and cache a
40345+ * non-locked-up status for a while to avoid an excessive
40346+ * amount of lockup checks from every waiting thread.
40347+ */
40348+
40349+struct ttm_fence_driver {
40350+ bool (*has_irq) (struct ttm_fence_device * fdev,
40351+ uint32_t fence_class, uint32_t flags);
40352+ int (*emit) (struct ttm_fence_device * fdev,
40353+ uint32_t fence_class,
40354+ uint32_t flags,
40355+ uint32_t * breadcrumb, unsigned long *timeout_jiffies);
40356+ void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class);
40357+ void (*poll) (struct ttm_fence_device * fdev,
40358+ uint32_t fence_class, uint32_t types);
40359+ uint32_t(*needed_flush)
40360+ (struct ttm_fence_object * fence);
40361+ int (*wait) (struct ttm_fence_object * fence, bool lazy,
40362+ bool interruptible, uint32_t mask);
40363+ void (*signaled) (struct ttm_fence_object * fence);
40364+ void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types);
40365+};
40366+
40367+/**
40368+ * function ttm_fence_device_init
40369+ *
40370+ * @num_classes: Number of fence classes for this fence implementation.
40371+ * @mem_global: Pointer to the global memory accounting info.
40372+ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
40373+ * @init: Array of initialization info for each fence class.
40374+ * @replicate_init: Use the first @init initialization info for all classes.
40375+ * @driver: Driver callbacks.
40376+ *
40377+ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
40378+ * out-of-memory. Otherwise returns 0.
40379+ */
40380+extern int
40381+ttm_fence_device_init(int num_classes,
40382+ struct ttm_mem_global *mem_glob,
40383+ struct ttm_fence_device *fdev,
40384+ const struct ttm_fence_class_init *init,
40385+ bool replicate_init,
40386+ const struct ttm_fence_driver *driver);
40387+
40388+/**
40389+ * function ttm_fence_device_release
40390+ *
40391+ * @fdev: Pointer to the fence device.
40392+ *
40393+ * Release all resources held by a fence device. Note that before
40394+ * this function is called, the caller must have made sure all fence
40395+ * objects belonging to this fence device are completely signaled.
40396+ */
40397+
40398+extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
40399+
40400+/**
40401+ * ttm_fence_handler - the fence handler.
40402+ *
40403+ * @fdev: Pointer to the fence device.
40404+ * @fence_class: Fence class that signals.
40405+ * @sequence: Signaled sequence.
40406+ * @type: Types that signal.
40407+ * @error: Error from the engine.
40408+ *
40409+ * This function signals all fences with a sequence previous to the
40410+ * @sequence argument, and belonging to @fence_class. The signaled fence
40411+ * types are provided in @type. If error is non-zero, the error member
40412+ * of the fence with sequence = @sequence is set to @error. This value
40413+ * may be reported back to user-space, indicating, for example an illegal
40414+ * 3D command or illegal mpeg data.
40415+ *
40416+ * This function is typically called from the driver::poll method when the
40417+ * command sequence preceding the fence marker has executed. It should be
40418+ * called with the ttm_fence_class_manager::lock held in write mode and
40419+ * may be called from interrupt context.
40420+ */
40421+
40422+extern void
40423+ttm_fence_handler(struct ttm_fence_device *fdev,
40424+ uint32_t fence_class,
40425+ uint32_t sequence, uint32_t type, uint32_t error);
40426+
40427+/**
40428+ * ttm_fence_driver_from_dev
40429+ *
40430+ * @fdev: The ttm fence device.
40431+ *
40432+ * Returns a pointer to the fence driver struct.
40433+ */
40434+
40435+static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct
40436+ ttm_fence_device
40437+ *fdev)
40438+{
40439+ return fdev->driver;
40440+}
40441+
40442+/**
40443+ * ttm_fence_driver
40444+ *
40445+ * @fence: Pointer to a ttm fence object.
40446+ *
40447+ * Returns a pointer to the fence driver struct.
40448+ */
40449+
40450+static inline const struct ttm_fence_driver *ttm_fence_driver(struct
40451+ ttm_fence_object
40452+ *fence)
40453+{
40454+ return ttm_fence_driver_from_dev(fence->fdev);
40455+}
40456+
40457+/**
40458+ * ttm_fence_fc
40459+ *
40460+ * @fence: Pointer to a ttm fence object.
40461+ *
40462+ * Returns a pointer to the struct ttm_fence_class_manager for the
40463+ * fence class of @fence.
40464+ */
40465+
40466+static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
40467+ ttm_fence_object
40468+ *fence)
40469+{
40470+ return &fence->fdev->fence_class[fence->fence_class];
40471+}
40472+
40473+#endif
40474diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
40475new file mode 100644
40476index 0000000..d9bb787
40477--- /dev/null
40478+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
40479@@ -0,0 +1,242 @@
40480+/**************************************************************************
40481+ *
40482+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40483+ * All Rights Reserved.
40484+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40485+ * All Rights Reserved.
40486+ *
40487+ * Permission is hereby granted, free of charge, to any person obtaining a
40488+ * copy of this software and associated documentation files (the
40489+ * "Software"), to deal in the Software without restriction, including
40490+ * without limitation the rights to use, copy, modify, merge, publish,
40491+ * distribute, sub license, and/or sell copies of the Software, and to
40492+ * permit persons to whom the Software is furnished to do so, subject to
40493+ * the following conditions:
40494+ *
40495+ * The above copyright notice and this permission notice (including the
40496+ * next paragraph) shall be included in all copies or substantial portions
40497+ * of the Software.
40498+ *
40499+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40500+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40501+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40502+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40503+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40504+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40505+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40506+ *
40507+ **************************************************************************/
40508+/*
40509+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
40510+ */
40511+
40512+#include <drm/drmP.h>
40513+#include "ttm/ttm_fence_user.h"
40514+#include "ttm/ttm_object.h"
40515+#include "ttm/ttm_fence_driver.h"
40516+#include "ttm/ttm_userobj_api.h"
40517+
40518+/**
40519+ * struct ttm_fence_user_object
40520+ *
40521+ * @base: The base object used for user-space visibility and refcounting.
40522+ *
40523+ * @fence: The fence object itself.
40524+ *
40525+ */
40526+
40527+struct ttm_fence_user_object {
40528+ struct ttm_base_object base;
40529+ struct ttm_fence_object fence;
40530+};
40531+
40532+static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct
40533+ ttm_object_file
40534+ *tfile,
40535+ uint32_t
40536+ handle)
40537+{
40538+ struct ttm_base_object *base;
40539+
40540+ base = ttm_base_object_lookup(tfile, handle);
40541+ if (unlikely(base == NULL)) {
40542+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
40543+ (unsigned long)handle);
40544+ return NULL;
40545+ }
40546+
40547+ if (unlikely(base->object_type != ttm_fence_type)) {
40548+ ttm_base_object_unref(&base);
40549+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
40550+ (unsigned long)handle);
40551+ return NULL;
40552+ }
40553+
40554+ return container_of(base, struct ttm_fence_user_object, base);
40555+}
40556+
40557+/*
40558+ * The fence object destructor.
40559+ */
40560+
40561+static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
40562+{
40563+ struct ttm_fence_user_object *ufence =
40564+ container_of(fence, struct ttm_fence_user_object, fence);
40565+
40566+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
40567+ kfree(ufence);
40568+}
40569+
40570+/*
40571+ * The base object destructor. We basically unly unreference the
40572+ * attached fence object.
40573+ */
40574+
40575+static void ttm_fence_user_release(struct ttm_base_object **p_base)
40576+{
40577+ struct ttm_fence_user_object *ufence;
40578+ struct ttm_base_object *base = *p_base;
40579+ struct ttm_fence_object *fence;
40580+
40581+ *p_base = NULL;
40582+
40583+ if (unlikely(base == NULL))
40584+ return;
40585+
40586+ ufence = container_of(base, struct ttm_fence_user_object, base);
40587+ fence = &ufence->fence;
40588+ ttm_fence_object_unref(&fence);
40589+}
40590+
40591+int
40592+ttm_fence_user_create(struct ttm_fence_device *fdev,
40593+ struct ttm_object_file *tfile,
40594+ uint32_t fence_class,
40595+ uint32_t fence_types,
40596+ uint32_t create_flags,
40597+ struct ttm_fence_object **fence, uint32_t * user_handle)
40598+{
40599+ int ret;
40600+ struct ttm_fence_object *tmp;
40601+ struct ttm_fence_user_object *ufence;
40602+
40603+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false);
40604+ if (unlikely(ret != 0))
40605+ return -ENOMEM;
40606+
40607+ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
40608+ if (unlikely(ufence == NULL)) {
40609+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
40610+ return -ENOMEM;
40611+ }
40612+
40613+ ret = ttm_fence_object_init(fdev,
40614+ fence_class,
40615+ fence_types, create_flags,
40616+ &ttm_fence_user_destroy, &ufence->fence);
40617+
40618+ if (unlikely(ret != 0))
40619+ goto out_err0;
40620+
40621+ /*
40622+ * One fence ref is held by the fence ptr we return.
40623+ * The other one by the base object. Need to up the
40624+ * fence refcount before we publish this object to
40625+ * user-space.
40626+ */
40627+
40628+ tmp = ttm_fence_object_ref(&ufence->fence);
40629+ ret = ttm_base_object_init(tfile, &ufence->base,
40630+ false, ttm_fence_type,
40631+ &ttm_fence_user_release, NULL);
40632+
40633+ if (unlikely(ret != 0))
40634+ goto out_err1;
40635+
40636+ *fence = &ufence->fence;
40637+ *user_handle = ufence->base.hash.key;
40638+
40639+ return 0;
40640+ out_err1:
40641+ ttm_fence_object_unref(&tmp);
40642+ tmp = &ufence->fence;
40643+ ttm_fence_object_unref(&tmp);
40644+ return ret;
40645+ out_err0:
40646+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
40647+ kfree(ufence);
40648+ return ret;
40649+}
40650+
40651+int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
40652+{
40653+ int ret;
40654+ union ttm_fence_signaled_arg *arg = data;
40655+ struct ttm_fence_object *fence;
40656+ struct ttm_fence_info info;
40657+ struct ttm_fence_user_object *ufence;
40658+ struct ttm_base_object *base;
40659+ ret = 0;
40660+
40661+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
40662+ if (unlikely(ufence == NULL))
40663+ return -EINVAL;
40664+
40665+ fence = &ufence->fence;
40666+
40667+ if (arg->req.flush) {
40668+ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
40669+ if (unlikely(ret != 0))
40670+ goto out;
40671+ }
40672+
40673+ info = ttm_fence_get_info(fence);
40674+ arg->rep.signaled_types = info.signaled_types;
40675+ arg->rep.fence_error = info.error;
40676+
40677+ out:
40678+ base = &ufence->base;
40679+ ttm_base_object_unref(&base);
40680+ return ret;
40681+}
40682+
40683+int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
40684+{
40685+ int ret;
40686+ union ttm_fence_finish_arg *arg = data;
40687+ struct ttm_fence_user_object *ufence;
40688+ struct ttm_base_object *base;
40689+ struct ttm_fence_object *fence;
40690+ ret = 0;
40691+
40692+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
40693+ if (unlikely(ufence == NULL))
40694+ return -EINVAL;
40695+
40696+ fence = &ufence->fence;
40697+
40698+ ret = ttm_fence_object_wait(fence,
40699+ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
40700+ true, arg->req.fence_type);
40701+ if (likely(ret == 0)) {
40702+ struct ttm_fence_info info = ttm_fence_get_info(fence);
40703+
40704+ arg->rep.signaled_types = info.signaled_types;
40705+ arg->rep.fence_error = info.error;
40706+ }
40707+
40708+ base = &ufence->base;
40709+ ttm_base_object_unref(&base);
40710+
40711+ return ret;
40712+}
40713+
40714+int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
40715+{
40716+ struct ttm_fence_unref_arg *arg = data;
40717+ int ret = 0;
40718+
40719+ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
40720+ return ret;
40721+}
40722diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
40723new file mode 100644
40724index 0000000..0cad597
40725--- /dev/null
40726+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
40727@@ -0,0 +1,147 @@
40728+/**************************************************************************
40729+ *
40730+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40731+ * All Rights Reserved.
40732+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40733+ * All Rights Reserved.
40734+ *
40735+ * Permission is hereby granted, free of charge, to any person obtaining a
40736+ * copy of this software and associated documentation files (the
40737+ * "Software"), to deal in the Software without restriction, including
40738+ * without limitation the rights to use, copy, modify, merge, publish,
40739+ * distribute, sub license, and/or sell copies of the Software, and to
40740+ * permit persons to whom the Software is furnished to do so, subject to
40741+ * the following conditions:
40742+ *
40743+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40744+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40745+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40746+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40747+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40748+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40749+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40750+ *
40751+ * The above copyright notice and this permission notice (including the
40752+ * next paragraph) shall be included in all copies or substantial portions
40753+ * of the Software.
40754+ *
40755+ **************************************************************************/
40756+/*
40757+ * Authors
40758+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
40759+ */
40760+
40761+#ifndef TTM_FENCE_USER_H
40762+#define TTM_FENCE_USER_H
40763+
40764+#if !defined(__KERNEL__) && !defined(_KERNEL)
40765+#include <stdint.h>
40766+#endif
40767+
40768+#define TTM_FENCE_MAJOR 0
40769+#define TTM_FENCE_MINOR 1
40770+#define TTM_FENCE_PL 0
40771+#define TTM_FENCE_DATE "080819"
40772+
40773+/**
40774+ * struct ttm_fence_signaled_req
40775+ *
40776+ * @handle: Handle to the fence object. Input.
40777+ *
40778+ * @fence_type: Fence types we want to flush. Input.
40779+ *
40780+ * @flush: Boolean. Flush the indicated fence_types. Input.
40781+ *
40782+ * Argument to the TTM_FENCE_SIGNALED ioctl.
40783+ */
40784+
40785+struct ttm_fence_signaled_req {
40786+ uint32_t handle;
40787+ uint32_t fence_type;
40788+ int32_t flush;
40789+ uint32_t pad64;
40790+};
40791+
40792+/**
40793+ * struct ttm_fence_rep
40794+ *
40795+ * @signaled_types: Fence type that has signaled.
40796+ *
40797+ * @fence_error: Command execution error.
40798+ * Hardware errors that are consequences of the execution
40799+ * of the command stream preceding the fence are reported
40800+ * here.
40801+ *
40802+ * Output argument to the TTM_FENCE_SIGNALED and
40803+ * TTM_FENCE_FINISH ioctls.
40804+ */
40805+
40806+struct ttm_fence_rep {
40807+ uint32_t signaled_types;
40808+ uint32_t fence_error;
40809+};
40810+
40811+union ttm_fence_signaled_arg {
40812+ struct ttm_fence_signaled_req req;
40813+ struct ttm_fence_rep rep;
40814+};
40815+
40816+/*
40817+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
40818+ *
40819+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
40820+ * wait.
40821+ *
40822+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
40823+ * but return -EBUSY if the buffer is busy.
40824+ */
40825+
40826+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
40827+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
40828+
40829+/**
40830+ * struct ttm_fence_finish_req
40831+ *
40832+ * @handle: Handle to the fence object. Input.
40833+ *
40834+ * @fence_type: Fence types we want to finish.
40835+ *
40836+ * @mode: Wait mode.
40837+ *
40838+ * Input to the TTM_FENCE_FINISH ioctl.
40839+ */
40840+
40841+struct ttm_fence_finish_req {
40842+ uint32_t handle;
40843+ uint32_t fence_type;
40844+ uint32_t mode;
40845+ uint32_t pad64;
40846+};
40847+
40848+union ttm_fence_finish_arg {
40849+ struct ttm_fence_finish_req req;
40850+ struct ttm_fence_rep rep;
40851+};
40852+
40853+/**
40854+ * struct ttm_fence_unref_arg
40855+ *
40856+ * @handle: Handle to the fence object.
40857+ *
40858+ * Argument to the TTM_FENCE_UNREF ioctl.
40859+ */
40860+
40861+struct ttm_fence_unref_arg {
40862+ uint32_t handle;
40863+ uint32_t pad64;
40864+};
40865+
40866+/*
40867+ * Ioctl offsets frome extenstion start.
40868+ */
40869+
40870+#define TTM_FENCE_SIGNALED 0x01
40871+#define TTM_FENCE_FINISH 0x02
40872+#define TTM_FENCE_UNREF 0x03
40873+
40874+#endif
40875diff --git a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c
40876new file mode 100644
40877index 0000000..a3b503f
40878--- /dev/null
40879+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c
40880@@ -0,0 +1,162 @@
40881+/**************************************************************************
40882+ *
40883+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40884+ * All Rights Reserved.
40885+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40886+ * All Rights Reserved.
40887+ *
40888+ * Permission is hereby granted, free of charge, to any person obtaining a
40889+ * copy of this software and associated documentation files (the
40890+ * "Software"), to deal in the Software without restriction, including
40891+ * without limitation the rights to use, copy, modify, merge, publish,
40892+ * distribute, sub license, and/or sell copies of the Software, and to
40893+ * permit persons to whom the Software is furnished to do so, subject to
40894+ * the following conditions:
40895+ *
40896+ * The above copyright notice and this permission notice (including the
40897+ * next paragraph) shall be included in all copies or substantial portions
40898+ * of the Software.
40899+ *
40900+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40901+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40902+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40903+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40904+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40905+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40906+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40907+ *
40908+ **************************************************************************/
40909+/*
40910+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
40911+ */
40912+
40913+#include "ttm/ttm_lock.h"
40914+#include <asm/atomic.h>
40915+#include <linux/errno.h>
40916+#include <linux/wait.h>
40917+#include <linux/sched.h>
40918+
40919+void ttm_lock_init(struct ttm_lock *lock)
40920+{
40921+ init_waitqueue_head(&lock->queue);
40922+ atomic_set(&lock->write_lock_pending, 0);
40923+ atomic_set(&lock->readers, 0);
40924+ lock->kill_takers = false;
40925+ lock->signal = SIGKILL;
40926+}
40927+
40928+void ttm_read_unlock(struct ttm_lock *lock)
40929+{
40930+ if (atomic_dec_and_test(&lock->readers))
40931+ wake_up_all(&lock->queue);
40932+}
40933+
40934+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
40935+{
40936+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
40937+ int ret;
40938+
40939+ if (!interruptible) {
40940+ wait_event(lock->queue,
40941+ atomic_read(&lock->write_lock_pending) == 0);
40942+ continue;
40943+ }
40944+ ret = wait_event_interruptible
40945+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
40946+ if (ret)
40947+ return -ERESTART;
40948+ }
40949+
40950+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
40951+ int ret;
40952+ if (!interruptible) {
40953+ wait_event(lock->queue,
40954+ atomic_read(&lock->readers) != -1);
40955+ continue;
40956+ }
40957+ ret = wait_event_interruptible
40958+ (lock->queue, atomic_read(&lock->readers) != -1);
40959+ if (ret)
40960+ return -ERESTART;
40961+ }
40962+
40963+ if (unlikely(lock->kill_takers)) {
40964+ send_sig(lock->signal, current, 0);
40965+ ttm_read_unlock(lock);
40966+ return -ERESTART;
40967+ }
40968+
40969+ return 0;
40970+}
40971+
40972+static int __ttm_write_unlock(struct ttm_lock *lock)
40973+{
40974+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
40975+ return -EINVAL;
40976+ wake_up_all(&lock->queue);
40977+ return 0;
40978+}
40979+
40980+static void ttm_write_lock_remove(struct ttm_base_object **p_base)
40981+{
40982+ struct ttm_base_object *base = *p_base;
40983+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
40984+ int ret;
40985+
40986+ *p_base = NULL;
40987+ ret = __ttm_write_unlock(lock);
40988+ BUG_ON(ret != 0);
40989+}
40990+
40991+int ttm_write_lock(struct ttm_lock *lock,
40992+ bool interruptible,
40993+ struct ttm_object_file *tfile)
40994+{
40995+ int ret = 0;
40996+
40997+ atomic_inc(&lock->write_lock_pending);
40998+
40999+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
41000+ if (!interruptible) {
41001+ wait_event(lock->queue,
41002+ atomic_read(&lock->readers) == 0);
41003+ continue;
41004+ }
41005+ ret = wait_event_interruptible
41006+ (lock->queue, atomic_read(&lock->readers) == 0);
41007+
41008+ if (ret) {
41009+ if (atomic_dec_and_test(&lock->write_lock_pending))
41010+ wake_up_all(&lock->queue);
41011+ return -ERESTART;
41012+ }
41013+ }
41014+
41015+ if (atomic_dec_and_test(&lock->write_lock_pending))
41016+ wake_up_all(&lock->queue);
41017+
41018+ if (unlikely(lock->kill_takers)) {
41019+ send_sig(lock->signal, current, 0);
41020+ __ttm_write_unlock(lock);
41021+ return -ERESTART;
41022+ }
41023+
41024+ /*
41025+ * Add a base-object, the destructor of which will
41026+ * make sure the lock is released if the client dies
41027+ * while holding it.
41028+ */
41029+
41030+ ret = ttm_base_object_init(tfile, &lock->base, false,
41031+ ttm_lock_type, &ttm_write_lock_remove, NULL);
41032+ if (ret)
41033+ (void)__ttm_write_unlock(lock);
41034+
41035+ return ret;
41036+}
41037+
41038+int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
41039+{
41040+ return ttm_ref_object_base_unref(tfile,
41041+ lock->base.hash.key, TTM_REF_USAGE);
41042+}
41043diff --git a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h
41044new file mode 100644
41045index 0000000..0169ad7
41046--- /dev/null
41047+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h
41048@@ -0,0 +1,181 @@
41049+/**************************************************************************
41050+ *
41051+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41052+ * All Rights Reserved.
41053+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41054+ * All Rights Reserved.
41055+ *
41056+ * Permission is hereby granted, free of charge, to any person obtaining a
41057+ * copy of this software and associated documentation files (the
41058+ * "Software"), to deal in the Software without restriction, including
41059+ * without limitation the rights to use, copy, modify, merge, publish,
41060+ * distribute, sub license, and/or sell copies of the Software, and to
41061+ * permit persons to whom the Software is furnished to do so, subject to
41062+ * the following conditions:
41063+ *
41064+ * The above copyright notice and this permission notice (including the
41065+ * next paragraph) shall be included in all copies or substantial portions
41066+ * of the Software.
41067+ *
41068+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41069+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41070+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41071+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41072+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41073+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41074+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41075+ *
41076+ **************************************************************************/
41077+/*
41078+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
41079+ */
41080+
41081+/** @file ttm_lock.h
41082+ * This file implements a simple replacement for the buffer manager use
41083+ * of the DRM heavyweight hardware lock.
41084+ * The lock is a read-write lock. Taking it in read mode is fast, and
41085+ * intended for in-kernel use only.
41086+ * Taking it in write mode is slow.
41087+ *
41088+ * The write mode is used only when there is a need to block all
41089+ * user-space processes from validating buffers.
41090+ * It's allowed to leave kernel space with the write lock held.
41091+ * If a user-space process dies while having the write-lock,
41092+ * it will be released during the file descriptor release.
41093+ *
41094+ * The read lock is typically placed at the start of an IOCTL- or
41095+ * user-space callable function that may end up allocating a memory area.
41096+ * This includes setstatus, super-ioctls and faults; the latter may move
41097+ * unmappable regions to mappable. It's a bug to leave kernel space with the
41098+ * read lock held.
41099+ *
41100+ * Both read- and write lock taking is interruptible for low signal-delivery
41101+ * latency. The locking functions will return -ERESTART if interrupted by a
41102+ * signal.
41103+ *
41104+ * Locking order: The lock should be taken BEFORE any TTM mutexes
41105+ * or spinlocks.
41106+ *
41107+ * Typical usages:
41108+ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
41109+ * stops it from being repopulated.
41110+ * b) out-of-VRAM or out-of-aperture space, in which case the process
41111+ * receiving the out-of-space notification may take the lock in write mode
41112+ * and evict all buffers prior to start validating its own buffers.
41113+ */
41114+
41115+#ifndef _TTM_LOCK_H_
41116+#define _TTM_LOCK_H_
41117+
41118+#include "ttm_object.h"
41119+#include <linux/wait.h>
41120+#include <asm/atomic.h>
41121+
41122+/**
41123+ * struct ttm_lock
41124+ *
41125+ * @base: ttm base object used solely to release the lock if the client
41126+ * holding the lock dies.
41127+ * @queue: Queue for processes waiting for lock change-of-status.
41128+ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
41129+ * write lock starvation.
41130+ * @readers: The lock status: A negative number indicates that a write lock is
41131+ * held. Positive values indicate number of concurrent readers.
41132+ */
41133+
41134+struct ttm_lock {
41135+ struct ttm_base_object base;
41136+ wait_queue_head_t queue;
41137+ atomic_t write_lock_pending;
41138+ atomic_t readers;
41139+ bool kill_takers;
41140+ int signal;
41141+};
41142+
41143+/**
41144+ * ttm_lock_init
41145+ *
41146+ * @lock: Pointer to a struct ttm_lock
41147+ * Initializes the lock.
41148+ */
41149+extern void ttm_lock_init(struct ttm_lock *lock);
41150+
41151+/**
41152+ * ttm_read_unlock
41153+ *
41154+ * @lock: Pointer to a struct ttm_lock
41155+ *
41156+ * Releases a read lock.
41157+ */
41158+
41159+extern void ttm_read_unlock(struct ttm_lock *lock);
41160+
41161+/**
41162+ * ttm_read_unlock
41163+ *
41164+ * @lock: Pointer to a struct ttm_lock
41165+ * @interruptible: Interruptible sleeping while waiting for a lock.
41166+ *
41167+ * Takes the lock in read mode.
41168+ * Returns:
41169+ * -ERESTART If interrupted by a signal and interruptible is true.
41170+ */
41171+
41172+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
41173+
41174+/**
41175+ * ttm_write_lock
41176+ *
41177+ * @lock: Pointer to a struct ttm_lock
41178+ * @interruptible: Interruptible sleeping while waiting for a lock.
41179+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
41180+ * application taking the lock.
41181+ *
41182+ * Takes the lock in write mode.
41183+ * Returns:
41184+ * -ERESTART If interrupted by a signal and interruptible is true.
41185+ * -ENOMEM: Out of memory when locking.
41186+ */
41187+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
41188+ struct ttm_object_file *tfile);
41189+
41190+/**
41191+ * ttm_write_unlock
41192+ *
41193+ * @lock: Pointer to a struct ttm_lock
41194+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
41195+ * application taking the lock.
41196+ *
41197+ * Releases a write lock.
41198+ * Returns:
41199+ * -EINVAL If the lock was not held.
41200+ */
41201+extern int ttm_write_unlock(struct ttm_lock *lock,
41202+ struct ttm_object_file *tfile);
41203+
41204+/**
41205+ * ttm_lock_set_kill
41206+ *
41207+ * @lock: Pointer to a struct ttm_lock
41208+ * @val: Boolean whether to kill processes taking the lock.
41209+ * @signal: Signal to send to the process taking the lock.
41210+ *
41211+ * The kill-when-taking-lock functionality is used to kill processes that keep
41212+ * on using the TTM functionality when its resources has been taken down, for
41213+ * example when the X server exits. A typical sequence would look like this:
41214+ * - X server takes lock in write mode.
41215+ * - ttm_lock_set_kill() is called with @val set to true.
41216+ * - As part of X server exit, TTM resources are taken down.
41217+ * - X server releases the lock on file release.
41218+ * - Another dri client wants to render, takes the lock and is killed.
41219+ *
41220+ */
41221+
41222+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal)
41223+{
41224+ lock->kill_takers = val;
41225+ if (val)
41226+ lock->signal = signal;
41227+}
41228+
41229+#endif
41230diff --git a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c
41231new file mode 100644
41232index 0000000..75df380
41233--- /dev/null
41234+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c
41235@@ -0,0 +1,232 @@
41236+/**************************************************************************
41237+ *
41238+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41239+ * All Rights Reserved.
41240+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41241+ * All Rights Reserved.
41242+ *
41243+ * Permission is hereby granted, free of charge, to any person obtaining a
41244+ * copy of this software and associated documentation files (the
41245+ * "Software"), to deal in the Software without restriction, including
41246+ * without limitation the rights to use, copy, modify, merge, publish,
41247+ * distribute, sub license, and/or sell copies of the Software, and to
41248+ * permit persons to whom the Software is furnished to do so, subject to
41249+ * the following conditions:
41250+ *
41251+ * The above copyright notice and this permission notice (including the
41252+ * next paragraph) shall be included in all copies or substantial portions
41253+ * of the Software.
41254+ *
41255+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41256+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41257+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41258+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41259+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41260+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41261+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41262+ *
41263+ **************************************************************************/
41264+
41265+#include "ttm/ttm_memory.h"
41266+#include <linux/spinlock.h>
41267+#include <linux/sched.h>
41268+#include <linux/wait.h>
41269+#include <linux/mm.h>
41270+
41271+#define TTM_MEMORY_ALLOC_RETRIES 4
41272+
41273+/**
41274+ * At this point we only support a single shrink callback.
41275+ * Extend this if needed, perhaps using a linked list of callbacks.
41276+ * Note that this function is reentrant:
41277+ * many threads may try to swap out at any given time.
41278+ */
41279+
41280+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
41281+ uint64_t extra)
41282+{
41283+ int ret;
41284+ struct ttm_mem_shrink *shrink;
41285+ uint64_t target;
41286+ uint64_t total_target;
41287+
41288+ spin_lock(&glob->lock);
41289+ if (glob->shrink == NULL)
41290+ goto out;
41291+
41292+ if (from_workqueue) {
41293+ target = glob->swap_limit;
41294+ total_target = glob->total_memory_swap_limit;
41295+ } else if (capable(CAP_SYS_ADMIN)) {
41296+ total_target = glob->emer_total_memory;
41297+ target = glob->emer_memory;
41298+ } else {
41299+ total_target = glob->max_total_memory;
41300+ target = glob->max_memory;
41301+ }
41302+
41303+ total_target = (extra >= total_target) ? 0: total_target - extra;
41304+ target = (extra >= target) ? 0: target - extra;
41305+
41306+ while (glob->used_memory > target ||
41307+ glob->used_total_memory > total_target) {
41308+ shrink = glob->shrink;
41309+ spin_unlock(&glob->lock);
41310+ ret = shrink->do_shrink(shrink);
41311+ spin_lock(&glob->lock);
41312+ if (unlikely(ret != 0))
41313+ goto out;
41314+ }
41315+ out:
41316+ spin_unlock(&glob->lock);
41317+}
41318+
41319+static void ttm_shrink_work(struct work_struct *work)
41320+{
41321+ struct ttm_mem_global *glob =
41322+ container_of(work, struct ttm_mem_global, work);
41323+
41324+ ttm_shrink(glob, true, 0ULL);
41325+}
41326+
41327+int ttm_mem_global_init(struct ttm_mem_global *glob)
41328+{
41329+ struct sysinfo si;
41330+ uint64_t mem;
41331+
41332+ spin_lock_init(&glob->lock);
41333+ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
41334+ INIT_WORK(&glob->work, ttm_shrink_work);
41335+ init_waitqueue_head(&glob->queue);
41336+
41337+ si_meminfo(&si);
41338+
41339+ mem = si.totalram - si.totalhigh;
41340+ mem *= si.mem_unit;
41341+
41342+ glob->max_memory = mem >> 1;
41343+ glob->emer_memory = glob->max_memory + (mem >> 2);
41344+ glob->swap_limit = glob->max_memory - (mem >> 5);
41345+ glob->used_memory = 0;
41346+ glob->used_total_memory = 0;
41347+ glob->shrink = NULL;
41348+
41349+ mem = si.totalram;
41350+ mem *= si.mem_unit;
41351+
41352+ glob->max_total_memory = mem >> 1;
41353+ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
41354+ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
41355+
41356+ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
41357+ glob->max_total_memory >> 20);
41358+ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
41359+ glob->max_memory >> 20);
41360+ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
41361+ glob->swap_limit >> 20);
41362+
41363+ return 0;
41364+}
41365+
41366+void ttm_mem_global_release(struct ttm_mem_global *glob)
41367+{
41368+ printk(KERN_INFO "Used total memory is %llu bytes.\n",
41369+ (unsigned long long)glob->used_total_memory);
41370+ flush_workqueue(glob->swap_queue);
41371+ destroy_workqueue(glob->swap_queue);
41372+ glob->swap_queue = NULL;
41373+}
41374+
41375+static inline void ttm_check_swapping(struct ttm_mem_global *glob)
41376+{
41377+ bool needs_swapping;
41378+
41379+ spin_lock(&glob->lock);
41380+ needs_swapping = (glob->used_memory > glob->swap_limit ||
41381+ glob->used_total_memory >
41382+ glob->total_memory_swap_limit);
41383+ spin_unlock(&glob->lock);
41384+
41385+ if (unlikely(needs_swapping))
41386+ (void)queue_work(glob->swap_queue, &glob->work);
41387+
41388+}
41389+
41390+void ttm_mem_global_free(struct ttm_mem_global *glob,
41391+ uint64_t amount, bool himem)
41392+{
41393+ spin_lock(&glob->lock);
41394+ glob->used_total_memory -= amount;
41395+ if (!himem)
41396+ glob->used_memory -= amount;
41397+ wake_up_all(&glob->queue);
41398+ spin_unlock(&glob->lock);
41399+}
41400+
41401+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
41402+ uint64_t amount, bool himem, bool reserve)
41403+{
41404+ uint64_t limit;
41405+ uint64_t lomem_limit;
41406+ int ret = -ENOMEM;
41407+
41408+ spin_lock(&glob->lock);
41409+
41410+ if (capable(CAP_SYS_ADMIN)) {
41411+ limit = glob->emer_total_memory;
41412+ lomem_limit = glob->emer_memory;
41413+ } else {
41414+ limit = glob->max_total_memory;
41415+ lomem_limit = glob->max_memory;
41416+ }
41417+
41418+ if (unlikely(glob->used_total_memory + amount > limit))
41419+ goto out_unlock;
41420+ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
41421+ goto out_unlock;
41422+
41423+ if (reserve) {
41424+ glob->used_total_memory += amount;
41425+ if (!himem)
41426+ glob->used_memory += amount;
41427+ }
41428+ ret = 0;
41429+ out_unlock:
41430+ spin_unlock(&glob->lock);
41431+ ttm_check_swapping(glob);
41432+
41433+ return ret;
41434+}
41435+
41436+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
41437+ bool no_wait, bool interruptible, bool himem)
41438+{
41439+ int count = TTM_MEMORY_ALLOC_RETRIES;
41440+
41441+ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) {
41442+ if (no_wait)
41443+ return -ENOMEM;
41444+ if (unlikely(count-- == 0))
41445+ return -ENOMEM;
41446+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
41447+ }
41448+
41449+ return 0;
41450+}
41451+
41452+size_t ttm_round_pot(size_t size)
41453+{
41454+ if ((size & (size - 1)) == 0)
41455+ return size;
41456+ else if (size > PAGE_SIZE)
41457+ return PAGE_ALIGN(size);
41458+ else {
41459+ size_t tmp_size = 4;
41460+
41461+ while (tmp_size < size)
41462+ tmp_size <<= 1;
41463+
41464+ return tmp_size;
41465+ }
41466+ return 0;
41467+}
41468diff --git a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h
41469new file mode 100644
41470index 0000000..9bff60f
41471--- /dev/null
41472+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h
41473@@ -0,0 +1,154 @@
41474+/**************************************************************************
41475+ *
41476+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41477+ * All Rights Reserved.
41478+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41479+ * All Rights Reserved.
41480+ *
41481+ * Permission is hereby granted, free of charge, to any person obtaining a
41482+ * copy of this software and associated documentation files (the
41483+ * "Software"), to deal in the Software without restriction, including
41484+ * without limitation the rights to use, copy, modify, merge, publish,
41485+ * distribute, sub license, and/or sell copies of the Software, and to
41486+ * permit persons to whom the Software is furnished to do so, subject to
41487+ * the following conditions:
41488+ *
41489+ * The above copyright notice and this permission notice (including the
41490+ * next paragraph) shall be included in all copies or substantial portions
41491+ * of the Software.
41492+ *
41493+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41494+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41495+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41496+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41497+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41498+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41499+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41500+ *
41501+ **************************************************************************/
41502+
41503+#ifndef TTM_MEMORY_H
41504+#define TTM_MEMORY_H
41505+
41506+#include <linux/workqueue.h>
41507+#include <linux/spinlock.h>
41508+#include <linux/wait.h>
41509+
41510+/**
41511+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
41512+ *
41513+ * @do_shrink: The callback function.
41514+ *
41515+ * Arguments to the do_shrink functions are intended to be passed using
41516+ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
41517+ * and can be accessed using container_of().
41518+ */
41519+
41520+struct ttm_mem_shrink {
41521+ int (*do_shrink) (struct ttm_mem_shrink *);
41522+};
41523+
41524+/**
41525+ * struct ttm_mem_global - Global memory accounting structure.
41526+ *
41527+ * @shrink: A single callback to shrink TTM memory usage. Extend this
41528+ * to a linked list to be able to handle multiple callbacks when needed.
41529+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
41530+ * need a separate workqueue since it will spend a lot of time waiting
41531+ * for the GPU, and this will otherwise block other workqueue tasks(?)
41532+ * At this point we use only a single-threaded workqueue.
41533+ * @work: The workqueue callback for the shrink queue.
41534+ * @queue: Wait queue for processes suspended waiting for memory.
41535+ * @lock: Lock to protect the @shrink - and the memory accounting members,
41536+ * that is, essentially the whole structure with some exceptions.
41537+ * @emer_memory: Lowmem memory limit available for root.
41538+ * @max_memory: Lowmem memory limit available for non-root.
41539+ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
41540+ * @used_memory: Currently used lowmem memory.
41541+ * @used_total_memory: Currently used total (lowmem + highmem) memory.
41542+ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
41543+ * kicks in.
41544+ * @max_total_memory: Total memory available to non-root processes.
41545+ * @emer_total_memory: Total memory available to root processes.
41546+ *
41547+ * Note that this structure is not per device. It should be global for all
41548+ * graphics devices.
41549+ */
41550+
41551+struct ttm_mem_global {
41552+ struct ttm_mem_shrink *shrink;
41553+ struct workqueue_struct *swap_queue;
41554+ struct work_struct work;
41555+ wait_queue_head_t queue;
41556+ spinlock_t lock;
41557+ uint64_t emer_memory;
41558+ uint64_t max_memory;
41559+ uint64_t swap_limit;
41560+ uint64_t used_memory;
41561+ uint64_t used_total_memory;
41562+ uint64_t total_memory_swap_limit;
41563+ uint64_t max_total_memory;
41564+ uint64_t emer_total_memory;
41565+};
41566+
41567+/**
41568+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
41569+ *
41570+ * @shrink: The object to initialize.
41571+ * @func: The callback function.
41572+ */
41573+
41574+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
41575+ int (*func) (struct ttm_mem_shrink *))
41576+{
41577+ shrink->do_shrink = func;
41578+}
41579+
41580+/**
41581+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
41582+ *
41583+ * @glob: The struct ttm_mem_global object to register with.
41584+ * @shrink: An initialized struct ttm_mem_shrink object to register.
41585+ *
41586+ * Returns:
41587+ * -EBUSY: There's already a callback registered. (May change).
41588+ */
41589+
41590+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
41591+ struct ttm_mem_shrink *shrink)
41592+{
41593+ spin_lock(&glob->lock);
41594+ if (glob->shrink != NULL) {
41595+ spin_unlock(&glob->lock);
41596+ return -EBUSY;
41597+ }
41598+ glob->shrink = shrink;
41599+ spin_unlock(&glob->lock);
41600+ return 0;
41601+}
41602+
41603+/**
41604+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
41605+ *
41606+ * @glob: The struct ttm_mem_global object to unregister from.
41607+ * @shrink: A previously registert struct ttm_mem_shrink object.
41608+ *
41609+ */
41610+
41611+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
41612+ struct ttm_mem_shrink *shrink)
41613+{
41614+ spin_lock(&glob->lock);
41615+ BUG_ON(glob->shrink != shrink);
41616+ glob->shrink = NULL;
41617+ spin_unlock(&glob->lock);
41618+}
41619+
41620+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
41621+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
41622+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
41623+ bool no_wait, bool interruptible, bool himem);
41624+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
41625+ uint64_t amount, bool himem);
41626+extern size_t ttm_round_pot(size_t size);
41627+#endif
41628diff --git a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c
41629new file mode 100644
41630index 0000000..294a795
41631--- /dev/null
41632+++ b/drivers/gpu/drm/psb/ttm/ttm_object.c
41633@@ -0,0 +1,444 @@
41634+/**************************************************************************
41635+ *
41636+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41637+ * All Rights Reserved.
41638+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41639+ * All Rights Reserved.
41640+ *
41641+ * Permission is hereby granted, free of charge, to any person obtaining a
41642+ * copy of this software and associated documentation files (the
41643+ * "Software"), to deal in the Software without restriction, including
41644+ * without limitation the rights to use, copy, modify, merge, publish,
41645+ * distribute, sub license, and/or sell copies of the Software, and to
41646+ * permit persons to whom the Software is furnished to do so, subject to
41647+ * the following conditions:
41648+ *
41649+ * The above copyright notice and this permission notice (including the
41650+ * next paragraph) shall be included in all copies or substantial portions
41651+ * of the Software.
41652+ *
41653+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41654+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41655+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41656+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41657+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41658+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41659+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41660+ *
41661+ **************************************************************************/
41662+/*
41663+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
41664+ */
41665+/** @file ttm_ref_object.c
41666+ *
41667+ * Base- and reference object implementation for the various
41668+ * ttm objects. Implements reference counting, minimal security checks
41669+ * and release on file close.
41670+ */
41671+
41672+/**
41673+ * struct ttm_object_file
41674+ *
41675+ * @tdev: Pointer to the ttm_object_device.
41676+ *
41677+ * @lock: Lock that protects the ref_list list and the
41678+ * ref_hash hash tables.
41679+ *
41680+ * @ref_list: List of ttm_ref_objects to be destroyed at
41681+ * file release.
41682+ *
41683+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
41684+ * for fast lookup of ref objects given a base object.
41685+ */
41686+
41687+#include "ttm/ttm_object.h"
41688+#include <linux/list.h>
41689+#include <linux/spinlock.h>
41690+#include <linux/slab.h>
41691+#include <asm/atomic.h>
41692+
41693+struct ttm_object_file {
41694+ struct ttm_object_device *tdev;
41695+ rwlock_t lock;
41696+ struct list_head ref_list;
41697+ struct drm_open_hash ref_hash[TTM_REF_NUM];
41698+ struct kref refcount;
41699+};
41700+
41701+/**
41702+ * struct ttm_object_device
41703+ *
41704+ * @object_lock: lock that protects the object_hash hash table.
41705+ *
41706+ * @object_hash: hash table for fast lookup of object global names.
41707+ *
41708+ * @object_count: Per device object count.
41709+ *
41710+ * This is the per-device data structure needed for ttm object management.
41711+ */
41712+
41713+struct ttm_object_device {
41714+ rwlock_t object_lock;
41715+ struct drm_open_hash object_hash;
41716+ atomic_t object_count;
41717+ struct ttm_mem_global *mem_glob;
41718+};
41719+
41720+/**
41721+ * struct ttm_ref_object
41722+ *
41723+ * @hash: Hash entry for the per-file object reference hash.
41724+ *
41725+ * @head: List entry for the per-file list of ref-objects.
41726+ *
41727+ * @kref: Ref count.
41728+ *
41729+ * @obj: Base object this ref object is referencing.
41730+ *
41731+ * @ref_type: Type of ref object.
41732+ *
41733+ * This is similar to an idr object, but it also has a hash table entry
41734+ * that allows lookup with a pointer to the referenced object as a key. In
41735+ * that way, one can easily detect whether a base object is referenced by
41736+ * a particular ttm_object_file. It also carries a ref count to avoid creating
41737+ * multiple ref objects if a ttm_object_file references the same base object more
41738+ * than once.
41739+ */
41740+
41741+struct ttm_ref_object {
41742+ struct drm_hash_item hash;
41743+ struct list_head head;
41744+ struct kref kref;
41745+ struct ttm_base_object *obj;
41746+ enum ttm_ref_type ref_type;
41747+ struct ttm_object_file *tfile;
41748+};
41749+
41750+static inline struct ttm_object_file *
41751+ttm_object_file_ref(struct ttm_object_file *tfile)
41752+{
41753+ kref_get(&tfile->refcount);
41754+ return tfile;
41755+}
41756+
41757+static void ttm_object_file_destroy(struct kref *kref)
41758+{
41759+ struct ttm_object_file *tfile =
41760+ container_of(kref, struct ttm_object_file, refcount);
41761+
41762+// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile);
41763+ kfree(tfile);
41764+}
41765+
41766+
41767+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
41768+{
41769+ struct ttm_object_file *tfile = *p_tfile;
41770+
41771+ *p_tfile = NULL;
41772+ kref_put(&tfile->refcount, ttm_object_file_destroy);
41773+}
41774+
41775+
41776+int ttm_base_object_init(struct ttm_object_file *tfile,
41777+ struct ttm_base_object *base,
41778+ bool shareable,
41779+ enum ttm_object_type object_type,
41780+ void (*refcount_release) (struct ttm_base_object **),
41781+ void (*ref_obj_release) (struct ttm_base_object *,
41782+ enum ttm_ref_type ref_type))
41783+{
41784+ struct ttm_object_device *tdev = tfile->tdev;
41785+ int ret;
41786+
41787+ base->shareable = shareable;
41788+ base->tfile = ttm_object_file_ref(tfile);
41789+ base->refcount_release = refcount_release;
41790+ base->ref_obj_release = ref_obj_release;
41791+ base->object_type = object_type;
41792+ write_lock(&tdev->object_lock);
41793+ kref_init(&base->refcount);
41794+ ret = drm_ht_just_insert_please(&tdev->object_hash,
41795+ &base->hash,
41796+ (unsigned long)base, 31, 0, 0);
41797+ write_unlock(&tdev->object_lock);
41798+ if (unlikely(ret != 0))
41799+ goto out_err0;
41800+
41801+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
41802+ if (unlikely(ret != 0))
41803+ goto out_err1;
41804+
41805+ ttm_base_object_unref(&base);
41806+
41807+ return 0;
41808+ out_err1:
41809+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
41810+ out_err0:
41811+ return ret;
41812+}
41813+
41814+static void ttm_release_base(struct kref *kref)
41815+{
41816+ struct ttm_base_object *base =
41817+ container_of(kref, struct ttm_base_object, refcount);
41818+ struct ttm_object_device *tdev = base->tfile->tdev;
41819+
41820+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
41821+ write_unlock(&tdev->object_lock);
41822+ if (base->refcount_release) {
41823+ ttm_object_file_unref(&base->tfile);
41824+ base->refcount_release(&base);
41825+ }
41826+ write_lock(&tdev->object_lock);
41827+}
41828+
41829+void ttm_base_object_unref(struct ttm_base_object **p_base)
41830+{
41831+ struct ttm_base_object *base = *p_base;
41832+ struct ttm_object_device *tdev = base->tfile->tdev;
41833+
41834+ // printk(KERN_INFO "TTM base object unref.\n");
41835+ *p_base = NULL;
41836+
41837+ /*
41838+ * Need to take the lock here to avoid racing with
41839+ * users trying to look up the object.
41840+ */
41841+
41842+ write_lock(&tdev->object_lock);
41843+ (void)kref_put(&base->refcount, &ttm_release_base);
41844+ write_unlock(&tdev->object_lock);
41845+}
41846+
41847+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
41848+ uint32_t key)
41849+{
41850+ struct ttm_object_device *tdev = tfile->tdev;
41851+ struct ttm_base_object *base;
41852+ struct drm_hash_item *hash;
41853+ int ret;
41854+
41855+ read_lock(&tdev->object_lock);
41856+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
41857+
41858+ if (likely(ret == 0)) {
41859+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
41860+ kref_get(&base->refcount);
41861+ }
41862+ read_unlock(&tdev->object_lock);
41863+
41864+ if (unlikely(ret != 0))
41865+ return NULL;
41866+
41867+ if (tfile != base->tfile && !base->shareable) {
41868+ printk(KERN_ERR "Attempted access of non-shareable object.\n");
41869+ ttm_base_object_unref(&base);
41870+ return NULL;
41871+ }
41872+
41873+ return base;
41874+}
41875+
41876+int ttm_ref_object_add(struct ttm_object_file *tfile,
41877+ struct ttm_base_object *base,
41878+ enum ttm_ref_type ref_type, bool *existed)
41879+{
41880+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
41881+ struct ttm_ref_object *ref;
41882+ struct drm_hash_item *hash;
41883+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
41884+ int ret = -EINVAL;
41885+
41886+ if (existed != NULL)
41887+ *existed = true;
41888+
41889+ while (ret == -EINVAL) {
41890+ read_lock(&tfile->lock);
41891+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
41892+
41893+ if (ret == 0) {
41894+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
41895+ kref_get(&ref->kref);
41896+ read_unlock(&tfile->lock);
41897+ break;
41898+ }
41899+
41900+ read_unlock(&tfile->lock);
41901+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false);
41902+ if (unlikely(ret != 0))
41903+ return ret;
41904+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
41905+ if (unlikely(ref == NULL)) {
41906+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
41907+ return -ENOMEM;
41908+ }
41909+
41910+ ref->hash.key = base->hash.key;
41911+ ref->obj = base;
41912+ ref->tfile = tfile;
41913+ ref->ref_type = ref_type;
41914+ kref_init(&ref->kref);
41915+
41916+ write_lock(&tfile->lock);
41917+ ret = drm_ht_insert_item(ht, &ref->hash);
41918+
41919+ if (likely(ret == 0)) {
41920+ list_add_tail(&ref->head, &tfile->ref_list);
41921+ kref_get(&base->refcount);
41922+ write_unlock(&tfile->lock);
41923+ if (existed != NULL)
41924+ *existed = false;
41925+ break;
41926+ }
41927+
41928+ write_unlock(&tfile->lock);
41929+ BUG_ON(ret != -EINVAL);
41930+
41931+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
41932+ kfree(ref);
41933+ }
41934+
41935+ return ret;
41936+}
41937+
41938+static void ttm_ref_object_release(struct kref *kref)
41939+{
41940+ struct ttm_ref_object *ref =
41941+ container_of(kref, struct ttm_ref_object, kref);
41942+ struct ttm_base_object *base = ref->obj;
41943+ struct ttm_object_file *tfile = ref->tfile;
41944+ struct drm_open_hash *ht;
41945+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
41946+
41947+ ht = &tfile->ref_hash[ref->ref_type];
41948+ (void)drm_ht_remove_item(ht, &ref->hash);
41949+ list_del(&ref->head);
41950+ write_unlock(&tfile->lock);
41951+
41952+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
41953+ base->ref_obj_release(base, ref->ref_type);
41954+
41955+ ttm_base_object_unref(&ref->obj);
41956+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
41957+ kfree(ref);
41958+ write_lock(&tfile->lock);
41959+}
41960+
41961+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
41962+ unsigned long key, enum ttm_ref_type ref_type)
41963+{
41964+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
41965+ struct ttm_ref_object *ref;
41966+ struct drm_hash_item *hash;
41967+ int ret;
41968+
41969+ write_lock(&tfile->lock);
41970+ ret = drm_ht_find_item(ht, key, &hash);
41971+ if (unlikely(ret != 0)) {
41972+ write_unlock(&tfile->lock);
41973+ return -EINVAL;
41974+ }
41975+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
41976+ kref_put(&ref->kref, ttm_ref_object_release);
41977+ write_unlock(&tfile->lock);
41978+ return 0;
41979+}
41980+
41981+void ttm_object_file_release(struct ttm_object_file **p_tfile)
41982+{
41983+ struct ttm_ref_object *ref;
41984+ struct list_head *list;
41985+ unsigned int i;
41986+ struct ttm_object_file *tfile = *p_tfile;
41987+
41988+ *p_tfile = NULL;
41989+ write_lock(&tfile->lock);
41990+
41991+ /*
41992+ * Since we release the lock within the loop, we have to
41993+ * restart it from the beginning each time.
41994+ */
41995+
41996+ while (!list_empty(&tfile->ref_list)) {
41997+ list = tfile->ref_list.next;
41998+ ref = list_entry(list, struct ttm_ref_object, head);
41999+ ttm_ref_object_release(&ref->kref);
42000+ }
42001+
42002+ for (i = 0; i < TTM_REF_NUM; ++i) {
42003+ drm_ht_remove(&tfile->ref_hash[i]);
42004+ }
42005+
42006+ write_unlock(&tfile->lock);
42007+ ttm_object_file_unref(&tfile);
42008+}
42009+
42010+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
42011+ unsigned int hash_order)
42012+{
42013+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
42014+ unsigned int i;
42015+ unsigned int j = 0;
42016+ int ret;
42017+
42018+ if (unlikely(tfile == NULL))
42019+ return NULL;
42020+
42021+ rwlock_init(&tfile->lock);
42022+ tfile->tdev = tdev;
42023+ kref_init(&tfile->refcount);
42024+ INIT_LIST_HEAD(&tfile->ref_list);
42025+
42026+ for (i = 0; i < TTM_REF_NUM; ++i) {
42027+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
42028+ if (ret) {
42029+ j = i;
42030+ goto out_err;
42031+ }
42032+ }
42033+
42034+ return tfile;
42035+ out_err:
42036+ for (i = 0; i < j; ++i) {
42037+ drm_ht_remove(&tfile->ref_hash[i]);
42038+ }
42039+ kfree(tfile);
42040+
42041+ return NULL;
42042+}
42043+
42044+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
42045+ *mem_glob,
42046+ unsigned int hash_order)
42047+{
42048+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
42049+ int ret;
42050+
42051+ if (unlikely(tdev == NULL))
42052+ return NULL;
42053+
42054+ tdev->mem_glob = mem_glob;
42055+ rwlock_init(&tdev->object_lock);
42056+ atomic_set(&tdev->object_count, 0);
42057+ ret = drm_ht_create(&tdev->object_hash, hash_order);
42058+
42059+ if (likely(ret == 0))
42060+ return tdev;
42061+
42062+ kfree(tdev);
42063+ return NULL;
42064+}
42065+
42066+void ttm_object_device_release(struct ttm_object_device **p_tdev)
42067+{
42068+ struct ttm_object_device *tdev = *p_tdev;
42069+
42070+ *p_tdev = NULL;
42071+
42072+ write_lock(&tdev->object_lock);
42073+ drm_ht_remove(&tdev->object_hash);
42074+ write_unlock(&tdev->object_lock);
42075+
42076+ kfree(tdev);
42077+}
42078diff --git a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h
42079new file mode 100644
42080index 0000000..0925ac5
42081--- /dev/null
42082+++ b/drivers/gpu/drm/psb/ttm/ttm_object.h
42083@@ -0,0 +1,269 @@
42084+/**************************************************************************
42085+ *
42086+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42087+ * All Rights Reserved.
42088+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42089+ * All Rights Reserved.
42090+ *
42091+ * Permission is hereby granted, free of charge, to any person obtaining a
42092+ * copy of this software and associated documentation files (the
42093+ * "Software"), to deal in the Software without restriction, including
42094+ * without limitation the rights to use, copy, modify, merge, publish,
42095+ * distribute, sub license, and/or sell copies of the Software, and to
42096+ * permit persons to whom the Software is furnished to do so, subject to
42097+ * the following conditions:
42098+ *
42099+ * The above copyright notice and this permission notice (including the
42100+ * next paragraph) shall be included in all copies or substantial portions
42101+ * of the Software.
42102+ *
42103+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42104+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42105+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42106+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42107+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42108+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42109+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42110+ *
42111+ **************************************************************************/
42112+/*
42113+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42114+ */
42115+/** @file ttm_ref_object.h
42116+ *
42117+ * Base- and reference object implementation for the various
42118+ * ttm objects. Implements reference counting, minimal security checks
42119+ * and release on file close.
42120+ */
42121+
42122+#ifndef _TTM_OBJECT_H_
42123+#define _TTM_OBJECT_H_
42124+
42125+#include <linux/list.h>
42126+#include <drm/drm_hashtab.h>
42127+#include <linux/kref.h>
42128+#include <ttm/ttm_memory.h>
42129+
42130+/**
42131+ * enum ttm_ref_type
42132+ *
42133+ * Describes what type of reference a ref object holds.
42134+ *
42135+ * TTM_REF_USAGE is a simple refcount on a base object.
42136+ *
42137+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
42138+ * buffer object.
42139+ *
42140+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
42141+ * buffer object.
42142+ *
42143+ */
42144+
42145+enum ttm_ref_type {
42146+ TTM_REF_USAGE,
42147+ TTM_REF_SYNCCPU_READ,
42148+ TTM_REF_SYNCCPU_WRITE,
42149+ TTM_REF_NUM
42150+};
42151+
42152+/**
42153+ * enum ttm_object_type
42154+ *
42155+ * One entry per ttm object type.
42156+ * Device-specific types should use the
42157+ * ttm_driver_typex types.
42158+ */
42159+
42160+enum ttm_object_type {
42161+ ttm_fence_type,
42162+ ttm_buffer_type,
42163+ ttm_lock_type,
42164+ ttm_driver_type0 = 256,
42165+ ttm_driver_type1
42166+};
42167+
42168+struct ttm_object_file;
42169+struct ttm_object_device;
42170+
42171+/**
42172+ * struct ttm_base_object
42173+ *
42174+ * @hash: hash entry for the per-device object hash.
42175+ * @type: derived type this object is base class for.
42176+ * @shareable: Other ttm_object_files can access this object.
42177+ *
42178+ * @tfile: Pointer to ttm_object_file of the creator.
42179+ * NULL if the object was not created by a user request.
42180+ * (kernel object).
42181+ *
42182+ * @refcount: Number of references to this object, not
42183+ * including the hash entry. A reference to a base object can
42184+ * only be held by a ref object.
42185+ *
42186+ * @refcount_release: A function to be called when there are
42187+ * no more references to this object. This function should
42188+ * destroy the object (or make sure destruction eventually happens),
42189+ * and when it is called, the object has
42190+ * already been taken out of the per-device hash. The parameter
42191+ * "base" should be set to NULL by the function.
42192+ *
42193+ * @ref_obj_release: A function to be called when a reference object
42194+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
42195+ * this function may, for example, release a lock held by a user-space
42196+ * process.
42197+ *
42198+ * This struct is intended to be used as a base struct for objects that
42199+ * are visible to user-space. It provides a global name, race-safe
42200+ * access and refcounting, minimal access contol and hooks for unref actions.
42201+ */
42202+
42203+struct ttm_base_object {
42204+ struct drm_hash_item hash;
42205+ enum ttm_object_type object_type;
42206+ bool shareable;
42207+ struct ttm_object_file *tfile;
42208+ struct kref refcount;
42209+ void (*refcount_release) (struct ttm_base_object ** base);
42210+ void (*ref_obj_release) (struct ttm_base_object * base,
42211+ enum ttm_ref_type ref_type);
42212+};
42213+
42214+/**
42215+ * ttm_base_object_init
42216+ *
42217+ * @tfile: Pointer to a struct ttm_object_file.
42218+ * @base: The struct ttm_base_object to initialize.
42219+ * @shareable: This object is shareable with other applcations.
42220+ * (different @tfile pointers.)
42221+ * @type: The object type.
42222+ * @refcount_release: See the struct ttm_base_object description.
42223+ * @ref_obj_release: See the struct ttm_base_object description.
42224+ *
42225+ * Initializes a struct ttm_base_object.
42226+ */
42227+
42228+extern int ttm_base_object_init(struct ttm_object_file *tfile,
42229+ struct ttm_base_object *base,
42230+ bool shareable,
42231+ enum ttm_object_type type,
42232+ void (*refcount_release) (struct ttm_base_object
42233+ **),
42234+ void (*ref_obj_release) (struct ttm_base_object
42235+ *,
42236+ enum ttm_ref_type
42237+ ref_type));
42238+
42239+/**
42240+ * ttm_base_object_lookup
42241+ *
42242+ * @tfile: Pointer to a struct ttm_object_file.
42243+ * @key: Hash key
42244+ *
42245+ * Looks up a struct ttm_base_object with the key @key.
42246+ * Also verifies that the object is visible to the application, by
42247+ * comparing the @tfile argument and checking the object shareable flag.
42248+ */
42249+
42250+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
42251+ *tfile, uint32_t key);
42252+
42253+/**
42254+ * ttm_base_object_unref
42255+ *
42256+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
42257+ *
42258+ * Decrements the base object refcount and clears the pointer pointed to by
42259+ * p_base.
42260+ */
42261+
42262+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
42263+
42264+/**
42265+ * ttm_ref_object_add.
42266+ *
42267+ * @tfile: A struct ttm_object_file representing the application owning the
42268+ * ref_object.
42269+ * @base: The base object to reference.
42270+ * @ref_type: The type of reference.
42271+ * @existed: Upon completion, indicates that an identical reference object
42272+ * already existed, and the refcount was upped on that object instead.
42273+ *
42274+ * Adding a ref object to a base object is basically like referencing the
42275+ * base object, but a user-space application holds the reference. When the
42276+ * file corresponding to @tfile is closed, all its reference objects are
42277+ * deleted. A reference object can have different types depending on what
42278+ * it's intended for. It can be refcounting to prevent object destruction,
42279+ * When user-space takes a lock, it can add a ref object to that lock to
42280+ * make sure the lock is released if the application dies. A ref object
42281+ * will hold a single reference on a base object.
42282+ */
42283+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
42284+ struct ttm_base_object *base,
42285+ enum ttm_ref_type ref_type, bool *existed);
42286+/**
42287+ * ttm_ref_object_base_unref
42288+ *
42289+ * @key: Key representing the base object.
42290+ * @ref_type: Ref type of the ref object to be dereferenced.
42291+ *
42292+ * Unreference a ref object with type @ref_type
42293+ * on the base object identified by @key. If there are no duplicate
42294+ * references, the ref object will be destroyed and the base object
42295+ * will be unreferenced.
42296+ */
42297+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
42298+ unsigned long key,
42299+ enum ttm_ref_type ref_type);
42300+
42301+/**
42302+ * ttm_object_file_init - initialize a struct ttm_object file
42303+ *
42304+ * @tdev: A struct ttm_object device this file is initialized on.
42305+ * @hash_order: Order of the hash table used to hold the reference objects.
42306+ *
42307+ * This is typically called by the file_ops::open function.
42308+ */
42309+
42310+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
42311+ *tdev,
42312+ unsigned int hash_order);
42313+
42314+/**
42315+ * ttm_object_file_release - release data held by a ttm_object_file
42316+ *
42317+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
42318+ * *p_tfile will be set to NULL by this function.
42319+ *
42320+ * Releases all data associated by a ttm_object_file.
42321+ * Typically called from file_ops::release. The caller must
42322+ * ensure that there are no concurrent users of tfile.
42323+ */
42324+
42325+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
42326+
42327+/**
42328+ * ttm_object device init - initialize a struct ttm_object_device
42329+ *
42330+ * @hash_order: Order of hash table used to hash the base objects.
42331+ *
42332+ * This function is typically called on device initialization to prepare
42333+ * data structures needed for ttm base and ref objects.
42334+ */
42335+
42336+extern struct ttm_object_device *ttm_object_device_init
42337+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
42338+
42339+/**
42340+ * ttm_object_device_release - release data held by a ttm_object_device
42341+ *
42342+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
42343+ * *p_tdev will be set to NULL by this function.
42344+ *
42345+ * Releases all data associated by a ttm_object_device.
42346+ * Typically called from driver::unload before the destruction of the
42347+ * device private data structure.
42348+ */
42349+
42350+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
42351+
42352+#endif
42353diff --git a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
42354new file mode 100644
42355index 0000000..701be0d
42356--- /dev/null
42357+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
42358@@ -0,0 +1,178 @@
42359+/**************************************************************************
42360+ *
42361+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42362+ * All Rights Reserved.
42363+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42364+ * All Rights Reserved.
42365+ *
42366+ * Permission is hereby granted, free of charge, to any person obtaining a
42367+ * copy of this software and associated documentation files (the
42368+ * "Software"), to deal in the Software without restriction, including
42369+ * without limitation the rights to use, copy, modify, merge, publish,
42370+ * distribute, sub license, and/or sell copies of the Software, and to
42371+ * permit persons to whom the Software is furnished to do so, subject to
42372+ * the following conditions:
42373+ *
42374+ * The above copyright notice and this permission notice (including the
42375+ * next paragraph) shall be included in all copies or substantial portions
42376+ * of the Software.
42377+ *
42378+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42379+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42380+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42381+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42382+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42383+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42384+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42385+ *
42386+ **************************************************************************/
42387+/*
42388+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42389+ */
42390+
42391+#include "ttm/ttm_pat_compat.h"
42392+#include <linux/version.h>
42393+#include <asm/page.h>
42394+#include <linux/spinlock.h>
42395+#include <asm/pgtable.h>
42396+
42397+#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
42398+#include <asm/tlbflush.h>
42399+#include <asm/msr.h>
42400+#include <asm/system.h>
42401+#include <linux/notifier.h>
42402+#include <linux/cpu.h>
42403+
42404+#ifndef MSR_IA32_CR_PAT
42405+#define MSR_IA32_CR_PAT 0x0277
42406+#endif
42407+
42408+#ifndef _PAGE_PAT
42409+#define _PAGE_PAT 0x080
42410+#endif
42411+
42412+static int ttm_has_pat = 0;
42413+
42414+/*
42415+ * Used at resume-time when CPU-s are fired up.
42416+ */
42417+
42418+static void ttm_pat_ipi_handler(void *notused)
42419+{
42420+ u32 v1, v2;
42421+
42422+ rdmsr(MSR_IA32_CR_PAT, v1, v2);
42423+ v2 &= 0xFFFFFFF8;
42424+ v2 |= 0x00000001;
42425+ wbinvd();
42426+ wrmsr(MSR_IA32_CR_PAT, v1, v2);
42427+ wbinvd();
42428+ __flush_tlb_all();
42429+}
42430+
42431+static void ttm_pat_enable(void)
42432+{
42433+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
42434+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) {
42435+#else
42436+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) {
42437+#endif
42438+ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
42439+ }
42440+}
42441+
42442+void ttm_pat_resume(void)
42443+{
42444+ if (unlikely(!ttm_has_pat))
42445+ return;
42446+
42447+ ttm_pat_enable();
42448+}
42449+
42450+static int psb_cpu_callback(struct notifier_block *nfb,
42451+ unsigned long action, void *hcpu)
42452+{
42453+ if (action == CPU_ONLINE) {
42454+ ttm_pat_resume();
42455+ }
42456+
42457+ return 0;
42458+}
42459+
42460+static struct notifier_block psb_nb = {
42461+ .notifier_call = psb_cpu_callback,
42462+ .priority = 1
42463+};
42464+
42465+/*
42466+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
42467+ */
42468+
42469+void ttm_pat_init(void)
42470+{
42471+ if (likely(ttm_has_pat))
42472+ return;
42473+
42474+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
42475+ return;
42476+ }
42477+
42478+ ttm_pat_enable();
42479+
42480+ if (num_present_cpus() > 1)
42481+ register_cpu_notifier(&psb_nb);
42482+
42483+ ttm_has_pat = 1;
42484+}
42485+
42486+void ttm_pat_takedown(void)
42487+{
42488+ if (unlikely(!ttm_has_pat))
42489+ return;
42490+
42491+ if (num_present_cpus() > 1)
42492+ unregister_cpu_notifier(&psb_nb);
42493+
42494+ ttm_has_pat = 0;
42495+}
42496+
42497+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
42498+{
42499+ if (likely(ttm_has_pat)) {
42500+ pgprot_val(prot) |= _PAGE_PAT;
42501+ return prot;
42502+ } else {
42503+ return pgprot_noncached(prot);
42504+ }
42505+}
42506+
42507+#else
42508+
42509+void ttm_pat_init(void)
42510+{
42511+}
42512+
42513+void ttm_pat_takedown(void)
42514+{
42515+}
42516+
42517+void ttm_pat_resume(void)
42518+{
42519+}
42520+
42521+#ifdef CONFIG_X86
42522+#include <asm/pat.h>
42523+
42524+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
42525+{
42526+ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
42527+
42528+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
42529+}
42530+#else
42531+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
42532+{
42533+ BUG();
42534+}
42535+#endif
42536+#endif
42537diff --git a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
42538new file mode 100644
42539index 0000000..d767570
42540--- /dev/null
42541+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
42542@@ -0,0 +1,41 @@
42543+/**************************************************************************
42544+ *
42545+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42546+ * All Rights Reserved.
42547+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42548+ * All Rights Reserved.
42549+ *
42550+ * Permission is hereby granted, free of charge, to any person obtaining a
42551+ * copy of this software and associated documentation files (the
42552+ * "Software"), to deal in the Software without restriction, including
42553+ * without limitation the rights to use, copy, modify, merge, publish,
42554+ * distribute, sub license, and/or sell copies of the Software, and to
42555+ * permit persons to whom the Software is furnished to do so, subject to
42556+ * the following conditions:
42557+ *
42558+ * The above copyright notice and this permission notice (including the
42559+ * next paragraph) shall be included in all copies or substantial portions
42560+ * of the Software.
42561+ *
42562+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42563+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42564+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42565+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42566+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42567+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42568+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42569+ *
42570+ **************************************************************************/
42571+/*
42572+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42573+ */
42574+
42575+#ifndef _TTM_PAT_COMPAT_
42576+#define _TTM_PAT_COMPAT_
42577+#include <asm/page.h>
42578+#include <asm/pgtable_types.h>
42579+extern void ttm_pat_init(void);
42580+extern void ttm_pat_takedown(void);
42581+extern void ttm_pat_resume(void);
42582+extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
42583+#endif
42584diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
42585new file mode 100644
42586index 0000000..13f3861
42587--- /dev/null
42588+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
42589@@ -0,0 +1,98 @@
42590+/**************************************************************************
42591+ *
42592+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42593+ * All Rights Reserved.
42594+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42595+ * All Rights Reserved.
42596+ *
42597+ * Permission is hereby granted, free of charge, to any person obtaining a
42598+ * copy of this software and associated documentation files (the
42599+ * "Software"), to deal in the Software without restriction, including
42600+ * without limitation the rights to use, copy, modify, merge, publish,
42601+ * distribute, sub license, and/or sell copies of the Software, and to
42602+ * permit persons to whom the Software is furnished to do so, subject to
42603+ * the following conditions:
42604+ *
42605+ * The above copyright notice and this permission notice (including the
42606+ * next paragraph) shall be included in all copies or substantial portions
42607+ * of the Software.
42608+ *
42609+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42610+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42611+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42612+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42613+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42614+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42615+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42616+ *
42617+ **************************************************************************/
42618+/*
42619+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42620+ */
42621+
42622+#ifndef _TTM_PL_COMMON_H_
42623+#define _TTM_PL_COMMON_H_
42624+/*
42625+ * Memory regions for data placement.
42626+ */
42627+
42628+#define TTM_PL_SYSTEM 0
42629+#define TTM_PL_TT 1
42630+#define TTM_PL_VRAM 2
42631+#define TTM_PL_PRIV0 3
42632+#define TTM_PL_PRIV1 4
42633+#define TTM_PL_PRIV2 5
42634+#define TTM_PL_PRIV3 6
42635+#define TTM_PL_PRIV4 7
42636+#define TTM_PL_PRIV5 8
42637+#define TTM_PL_CI 9
42638+#define TTM_PL_RAR 10
42639+#define TTM_PL_SWAPPED 15
42640+
42641+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
42642+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
42643+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
42644+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
42645+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
42646+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
42647+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
42648+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
42649+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
42650+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
42651+#define TTM_PL_FLAG_RAR (1 << TTM_PL_RAR)
42652+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
42653+#define TTM_PL_MASK_MEM 0x0000FFFF
42654+
42655+/*
42656+ * Other flags that affects data placement.
42657+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
42658+ * if available.
42659+ * TTM_PL_FLAG_SHARED means that another application may
42660+ * reference the buffer.
42661+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
42662+ * be evicted to make room for other buffers.
42663+ */
42664+
42665+#define TTM_PL_FLAG_CACHED (1 << 16)
42666+#define TTM_PL_FLAG_UNCACHED (1 << 17)
42667+#define TTM_PL_FLAG_WC (1 << 18)
42668+#define TTM_PL_FLAG_SHARED (1 << 20)
42669+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
42670+
42671+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
42672+ TTM_PL_FLAG_UNCACHED | \
42673+ TTM_PL_FLAG_WC)
42674+
42675+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
42676+
42677+/*
42678+ * Access flags to be used for CPU- and GPU- mappings.
42679+ * The idea is that the TTM synchronization mechanism will
42680+ * allow concurrent READ access and exclusive write access.
42681+ * Currently GPU- and CPU accesses are exclusive.
42682+ */
42683+
42684+#define TTM_ACCESS_READ (1 << 0)
42685+#define TTM_ACCESS_WRITE (1 << 1)
42686+
42687+#endif
42688diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
42689new file mode 100644
42690index 0000000..68cbb08
42691--- /dev/null
42692+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
42693@@ -0,0 +1,468 @@
42694+/**************************************************************************
42695+ *
42696+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42697+ * All Rights Reserved.
42698+ *
42699+ * Permission is hereby granted, free of charge, to any person obtaining a
42700+ * copy of this software and associated documentation files (the
42701+ * "Software"), to deal in the Software without restriction, including
42702+ * without limitation the rights to use, copy, modify, merge, publish,
42703+ * distribute, sub license, and/or sell copies of the Software, and to
42704+ * permit persons to whom the Software is furnished to do so, subject to
42705+ * the following conditions:
42706+ *
42707+ * The above copyright notice and this permission notice (including the
42708+ * next paragraph) shall be included in all copies or substantial portions
42709+ * of the Software.
42710+ *
42711+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42712+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42713+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42714+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42715+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42716+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42717+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42718+ *
42719+ **************************************************************************/
42720+/*
42721+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42722+ */
42723+
42724+#include "ttm/ttm_placement_user.h"
42725+#include "ttm/ttm_bo_driver.h"
42726+#include "ttm/ttm_object.h"
42727+#include "ttm/ttm_userobj_api.h"
42728+#include "ttm/ttm_lock.h"
42729+
42730+struct ttm_bo_user_object {
42731+ struct ttm_base_object base;
42732+ struct ttm_buffer_object bo;
42733+};
42734+
42735+static size_t pl_bo_size = 0;
42736+
42737+static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
42738+{
42739+ size_t page_array_size =
42740+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
42741+
42742+ if (unlikely(pl_bo_size == 0)) {
42743+ pl_bo_size = bdev->ttm_bo_extra_size +
42744+ ttm_round_pot(sizeof(struct ttm_bo_user_object));
42745+ }
42746+
42747+ return bdev->ttm_bo_size + 2 * page_array_size;
42748+}
42749+
42750+static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
42751+ *tfile, uint32_t handle)
42752+{
42753+ struct ttm_base_object *base;
42754+
42755+ base = ttm_base_object_lookup(tfile, handle);
42756+ if (unlikely(base == NULL)) {
42757+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
42758+ (unsigned long)handle);
42759+ return NULL;
42760+ }
42761+
42762+ if (unlikely(base->object_type != ttm_buffer_type)) {
42763+ ttm_base_object_unref(&base);
42764+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
42765+ (unsigned long)handle);
42766+ return NULL;
42767+ }
42768+
42769+ return container_of(base, struct ttm_bo_user_object, base);
42770+}
42771+
42772+struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
42773+ *tfile, uint32_t handle)
42774+{
42775+ struct ttm_bo_user_object *user_bo;
42776+ struct ttm_base_object *base;
42777+
42778+ user_bo = ttm_bo_user_lookup(tfile, handle);
42779+ if (unlikely(user_bo == NULL))
42780+ return NULL;
42781+
42782+ (void)ttm_bo_reference(&user_bo->bo);
42783+ base = &user_bo->base;
42784+ ttm_base_object_unref(&base);
42785+ return &user_bo->bo;
42786+}
42787+
42788+static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
42789+{
42790+ struct ttm_bo_user_object *user_bo =
42791+ container_of(bo, struct ttm_bo_user_object, bo);
42792+
42793+ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
42794+ kfree(user_bo);
42795+}
42796+
42797+static void ttm_bo_user_release(struct ttm_base_object **p_base)
42798+{
42799+ struct ttm_bo_user_object *user_bo;
42800+ struct ttm_base_object *base = *p_base;
42801+ struct ttm_buffer_object *bo;
42802+
42803+ *p_base = NULL;
42804+
42805+ if (unlikely(base == NULL))
42806+ return;
42807+
42808+ user_bo = container_of(base, struct ttm_bo_user_object, base);
42809+ bo = &user_bo->bo;
42810+ ttm_bo_unref(&bo);
42811+}
42812+
42813+static void ttm_bo_user_ref_release(struct ttm_base_object *base,
42814+ enum ttm_ref_type ref_type)
42815+{
42816+ struct ttm_bo_user_object *user_bo =
42817+ container_of(base, struct ttm_bo_user_object, base);
42818+ struct ttm_buffer_object *bo = &user_bo->bo;
42819+
42820+ switch (ref_type) {
42821+ case TTM_REF_SYNCCPU_WRITE:
42822+ ttm_bo_synccpu_write_release(bo);
42823+ break;
42824+ default:
42825+ BUG();
42826+ }
42827+}
42828+
42829+static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
42830+ struct ttm_pl_rep *rep)
42831+{
42832+ struct ttm_bo_user_object *user_bo =
42833+ container_of(bo, struct ttm_bo_user_object, bo);
42834+
42835+ rep->gpu_offset = bo->offset;
42836+ rep->bo_size = bo->num_pages << PAGE_SHIFT;
42837+ rep->map_handle = bo->addr_space_offset;
42838+ rep->placement = bo->mem.flags;
42839+ rep->handle = user_bo->base.hash.key;
42840+ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
42841+}
42842+
42843+int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
42844+ struct ttm_bo_device *bdev,
42845+ struct ttm_lock *lock, void *data)
42846+{
42847+ union ttm_pl_create_arg *arg = data;
42848+ struct ttm_pl_create_req *req = &arg->req;
42849+ struct ttm_pl_rep *rep = &arg->rep;
42850+ struct ttm_buffer_object *bo;
42851+ struct ttm_buffer_object *tmp;
42852+ struct ttm_bo_user_object *user_bo;
42853+ uint32_t flags;
42854+ int ret = 0;
42855+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
42856+ size_t acc_size =
42857+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
42858+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
42859+ if (unlikely(ret != 0))
42860+ return ret;
42861+
42862+ flags = req->placement;
42863+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
42864+ if (unlikely(user_bo == NULL)) {
42865+ ttm_mem_global_free(mem_glob, acc_size, false);
42866+ return -ENOMEM;
42867+ }
42868+
42869+ bo = &user_bo->bo;
42870+ ret = ttm_read_lock(lock, true);
42871+ if (unlikely(ret != 0)) {
42872+ ttm_mem_global_free(mem_glob, acc_size, false);
42873+ kfree(user_bo);
42874+ return ret;
42875+ }
42876+
42877+ ret = ttm_buffer_object_init(bdev, bo, req->size,
42878+ ttm_bo_type_device, flags,
42879+ req->page_alignment, 0, true,
42880+ NULL, acc_size, &ttm_bo_user_destroy);
42881+ ttm_read_unlock(lock);
42882+
42883+ /*
42884+ * Note that the ttm_buffer_object_init function
42885+ * would've called the destroy function on failure!!
42886+ */
42887+
42888+ if (unlikely(ret != 0))
42889+ goto out;
42890+
42891+ tmp = ttm_bo_reference(bo);
42892+ ret = ttm_base_object_init(tfile, &user_bo->base,
42893+ flags & TTM_PL_FLAG_SHARED,
42894+ ttm_buffer_type,
42895+ &ttm_bo_user_release,
42896+ &ttm_bo_user_ref_release);
42897+ if (unlikely(ret != 0))
42898+ goto out_err;
42899+
42900+ mutex_lock(&bo->mutex);
42901+ ttm_pl_fill_rep(bo, rep);
42902+ mutex_unlock(&bo->mutex);
42903+ ttm_bo_unref(&bo);
42904+ out:
42905+ return 0;
42906+ out_err:
42907+ ttm_bo_unref(&tmp);
42908+ ttm_bo_unref(&bo);
42909+ return ret;
42910+}
42911+
42912+int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
42913+ struct ttm_bo_device *bdev,
42914+ struct ttm_lock *lock, void *data)
42915+{
42916+ union ttm_pl_create_ub_arg *arg = data;
42917+ struct ttm_pl_create_ub_req *req = &arg->req;
42918+ struct ttm_pl_rep *rep = &arg->rep;
42919+ struct ttm_buffer_object *bo;
42920+ struct ttm_buffer_object *tmp;
42921+ struct ttm_bo_user_object *user_bo;
42922+ uint32_t flags;
42923+ int ret = 0;
42924+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
42925+ size_t acc_size =
42926+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
42927+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
42928+ if (unlikely(ret != 0))
42929+ return ret;
42930+
42931+ flags = req->placement;
42932+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
42933+ if (unlikely(user_bo == NULL)) {
42934+ ttm_mem_global_free(mem_glob, acc_size, false);
42935+ return -ENOMEM;
42936+ }
42937+ ret = ttm_read_lock(lock, true);
42938+ if (unlikely(ret != 0)) {
42939+ ttm_mem_global_free(mem_glob, acc_size, false);
42940+ kfree(user_bo);
42941+ return ret;
42942+ }
42943+ bo = &user_bo->bo;
42944+ ret = ttm_buffer_object_init(bdev, bo, req->size,
42945+ ttm_bo_type_user, flags,
42946+ req->page_alignment, req->user_address,
42947+ true, NULL, acc_size, &ttm_bo_user_destroy);
42948+
42949+ /*
42950+ * Note that the ttm_buffer_object_init function
42951+ * would've called the destroy function on failure!!
42952+ */
42953+ ttm_read_unlock(lock);
42954+ if (unlikely(ret != 0))
42955+ goto out;
42956+
42957+ tmp = ttm_bo_reference(bo);
42958+ ret = ttm_base_object_init(tfile, &user_bo->base,
42959+ flags & TTM_PL_FLAG_SHARED,
42960+ ttm_buffer_type,
42961+ &ttm_bo_user_release,
42962+ &ttm_bo_user_ref_release);
42963+ if (unlikely(ret != 0))
42964+ goto out_err;
42965+
42966+ mutex_lock(&bo->mutex);
42967+ ttm_pl_fill_rep(bo, rep);
42968+ mutex_unlock(&bo->mutex);
42969+ ttm_bo_unref(&bo);
42970+ out:
42971+ return 0;
42972+ out_err:
42973+ ttm_bo_unref(&tmp);
42974+ ttm_bo_unref(&bo);
42975+ return ret;
42976+}
42977+
42978+int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
42979+{
42980+ union ttm_pl_reference_arg *arg = data;
42981+ struct ttm_pl_rep *rep = &arg->rep;
42982+ struct ttm_bo_user_object *user_bo;
42983+ struct ttm_buffer_object *bo;
42984+ struct ttm_base_object *base;
42985+ int ret;
42986+
42987+ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
42988+ if (unlikely(user_bo == NULL)) {
42989+ printk(KERN_ERR "Could not reference buffer object.\n");
42990+ return -EINVAL;
42991+ }
42992+
42993+ bo = &user_bo->bo;
42994+ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
42995+ if (unlikely(ret != 0)) {
42996+ printk(KERN_ERR
42997+ "Could not add a reference to buffer object.\n");
42998+ goto out;
42999+ }
43000+
43001+ mutex_lock(&bo->mutex);
43002+ ttm_pl_fill_rep(bo, rep);
43003+ mutex_unlock(&bo->mutex);
43004+
43005+ out:
43006+ base = &user_bo->base;
43007+ ttm_base_object_unref(&base);
43008+ return ret;
43009+}
43010+
43011+int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
43012+{
43013+ struct ttm_pl_reference_req *arg = data;
43014+
43015+ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
43016+}
43017+
43018+int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
43019+{
43020+ struct ttm_pl_synccpu_arg *arg = data;
43021+ struct ttm_bo_user_object *user_bo;
43022+ struct ttm_buffer_object *bo;
43023+ struct ttm_base_object *base;
43024+ bool existed;
43025+ int ret;
43026+
43027+ switch (arg->op) {
43028+ case TTM_PL_SYNCCPU_OP_GRAB:
43029+ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
43030+ if (unlikely(user_bo == NULL)) {
43031+ printk(KERN_ERR
43032+ "Could not find buffer object for synccpu.\n");
43033+ return -EINVAL;
43034+ }
43035+ bo = &user_bo->bo;
43036+ base = &user_bo->base;
43037+ ret = ttm_bo_synccpu_write_grab(bo,
43038+ arg->access_mode &
43039+ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
43040+ if (unlikely(ret != 0)) {
43041+ ttm_base_object_unref(&base);
43042+ goto out;
43043+ }
43044+ ret = ttm_ref_object_add(tfile, &user_bo->base,
43045+ TTM_REF_SYNCCPU_WRITE, &existed);
43046+ if (existed || ret != 0)
43047+ ttm_bo_synccpu_write_release(bo);
43048+ ttm_base_object_unref(&base);
43049+ break;
43050+ case TTM_PL_SYNCCPU_OP_RELEASE:
43051+ ret = ttm_ref_object_base_unref(tfile, arg->handle,
43052+ TTM_REF_SYNCCPU_WRITE);
43053+ break;
43054+ default:
43055+ ret = -EINVAL;
43056+ break;
43057+ }
43058+ out:
43059+ return ret;
43060+}
43061+
43062+int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
43063+ struct ttm_lock *lock, void *data)
43064+{
43065+ union ttm_pl_setstatus_arg *arg = data;
43066+ struct ttm_pl_setstatus_req *req = &arg->req;
43067+ struct ttm_pl_rep *rep = &arg->rep;
43068+ struct ttm_buffer_object *bo;
43069+ struct ttm_bo_device *bdev;
43070+ int ret;
43071+
43072+ bo = ttm_buffer_object_lookup(tfile, req->handle);
43073+ if (unlikely(bo == NULL)) {
43074+ printk(KERN_ERR
43075+ "Could not find buffer object for setstatus.\n");
43076+ return -EINVAL;
43077+ }
43078+
43079+ bdev = bo->bdev;
43080+
43081+ ret = ttm_read_lock(lock, true);
43082+ if (unlikely(ret != 0))
43083+ goto out_err0;
43084+
43085+ ret = ttm_bo_reserve(bo, true, false, false, 0);
43086+ if (unlikely(ret != 0))
43087+ goto out_err1;
43088+
43089+ ret = ttm_bo_wait_cpu(bo, false);
43090+ if (unlikely(ret != 0))
43091+ goto out_err2;
43092+
43093+ mutex_lock(&bo->mutex);
43094+ ret = ttm_bo_check_placement(bo, req->set_placement,
43095+ req->clr_placement);
43096+ if (unlikely(ret != 0))
43097+ goto out_err2;
43098+
43099+ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
43100+ & ~req->clr_placement;
43101+ ret = ttm_buffer_object_validate(bo, true, false);
43102+ if (unlikely(ret != 0))
43103+ goto out_err2;
43104+
43105+ ttm_pl_fill_rep(bo, rep);
43106+ out_err2:
43107+ mutex_unlock(&bo->mutex);
43108+ ttm_bo_unreserve(bo);
43109+ out_err1:
43110+ ttm_read_unlock(lock);
43111+ out_err0:
43112+ ttm_bo_unref(&bo);
43113+ return ret;
43114+}
43115+
43116+int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
43117+{
43118+ struct ttm_pl_waitidle_arg *arg = data;
43119+ struct ttm_buffer_object *bo;
43120+ int ret;
43121+
43122+ bo = ttm_buffer_object_lookup(tfile, arg->handle);
43123+ if (unlikely(bo == NULL)) {
43124+ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
43125+ return -EINVAL;
43126+ }
43127+
43128+ ret =
43129+ ttm_bo_block_reservation(bo, true,
43130+ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
43131+ if (unlikely(ret != 0))
43132+ goto out;
43133+ mutex_lock(&bo->mutex);
43134+ ret = ttm_bo_wait(bo,
43135+ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
43136+ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
43137+ mutex_unlock(&bo->mutex);
43138+ ttm_bo_unblock_reservation(bo);
43139+ out:
43140+ ttm_bo_unref(&bo);
43141+ return ret;
43142+}
43143+
43144+int ttm_pl_verify_access(struct ttm_buffer_object *bo,
43145+ struct ttm_object_file *tfile)
43146+{
43147+ struct ttm_bo_user_object *ubo;
43148+
43149+ /*
43150+ * Check bo subclass.
43151+ */
43152+
43153+ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
43154+ return -EPERM;
43155+
43156+ ubo = container_of(bo, struct ttm_bo_user_object, bo);
43157+ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
43158+ return 0;
43159+
43160+ return -EPERM;
43161+}
43162diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
43163new file mode 100644
43164index 0000000..9f69cdc
43165--- /dev/null
43166+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
43167@@ -0,0 +1,259 @@
43168+/**************************************************************************
43169+ *
43170+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
43171+ * All Rights Reserved.
43172+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
43173+ * All Rights Reserved.
43174+ *
43175+ * Permission is hereby granted, free of charge, to any person obtaining a
43176+ * copy of this software and associated documentation files (the
43177+ * "Software"), to deal in the Software without restriction, including
43178+ * without limitation the rights to use, copy, modify, merge, publish,
43179+ * distribute, sub license, and/or sell copies of the Software, and to
43180+ * permit persons to whom the Software is furnished to do so, subject to
43181+ * the following conditions:
43182+ *
43183+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43184+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43185+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43186+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
43187+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
43188+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
43189+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
43190+ *
43191+ * The above copyright notice and this permission notice (including the
43192+ * next paragraph) shall be included in all copies or substantial portions
43193+ * of the Software.
43194+ *
43195+ **************************************************************************/
43196+/*
43197+ * Authors
43198+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43199+ */
43200+
43201+#ifndef _TTM_PLACEMENT_USER_H_
43202+#define _TTM_PLACEMENT_USER_H_
43203+
43204+#if !defined(__KERNEL__) && !defined(_KERNEL)
43205+#include <stdint.h>
43206+#else
43207+#include <linux/kernel.h>
43208+#endif
43209+
43210+#include "ttm/ttm_placement_common.h"
43211+
43212+#define TTM_PLACEMENT_MAJOR 0
43213+#define TTM_PLACEMENT_MINOR 1
43214+#define TTM_PLACEMENT_PL 0
43215+#define TTM_PLACEMENT_DATE "080819"
43216+
43217+/**
43218+ * struct ttm_pl_create_req
43219+ *
43220+ * @size: The buffer object size.
43221+ * @placement: Flags that indicate initial acceptable
43222+ * placement.
43223+ * @page_alignment: Required alignment in pages.
43224+ *
43225+ * Input to the TTM_BO_CREATE ioctl.
43226+ */
43227+
43228+struct ttm_pl_create_req {
43229+ uint64_t size;
43230+ uint32_t placement;
43231+ uint32_t page_alignment;
43232+};
43233+
43234+/**
43235+ * struct ttm_pl_create_ub_req
43236+ *
43237+ * @size: The buffer object size.
43238+ * @user_address: User-space address of the memory area that
43239+ * should be used to back the buffer object cast to 64-bit.
43240+ * @placement: Flags that indicate initial acceptable
43241+ * placement.
43242+ * @page_alignment: Required alignment in pages.
43243+ *
43244+ * Input to the TTM_BO_CREATE_UB ioctl.
43245+ */
43246+
43247+struct ttm_pl_create_ub_req {
43248+ uint64_t size;
43249+ uint64_t user_address;
43250+ uint32_t placement;
43251+ uint32_t page_alignment;
43252+};
43253+
43254+/**
43255+ * struct ttm_pl_rep
43256+ *
43257+ * @gpu_offset: The current offset into the memory region used.
43258+ * This can be used directly by the GPU if there are no
43259+ * additional GPU mapping procedures used by the driver.
43260+ *
43261+ * @bo_size: Actual buffer object size.
43262+ *
43263+ * @map_handle: Offset into the device address space.
43264+ * Used for map, seek, read, write. This will never change
43265+ * during the lifetime of an object.
43266+ *
43267+ * @placement: Flag indicating the placement status of
43268+ * the buffer object using the TTM_PL flags above.
43269+ *
43270+ * @sync_object_arg: Used for user-space synchronization and
43271+ * depends on the synchronization model used. If fences are
43272+ * used, this is the buffer_object::fence_type_mask
43273+ *
43274+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
43275+ * TTM_PL_SETSTATUS ioctls.
43276+ */
43277+
43278+struct ttm_pl_rep {
43279+ uint64_t gpu_offset;
43280+ uint64_t bo_size;
43281+ uint64_t map_handle;
43282+ uint32_t placement;
43283+ uint32_t handle;
43284+ uint32_t sync_object_arg;
43285+ uint32_t pad64;
43286+};
43287+
43288+/**
43289+ * struct ttm_pl_setstatus_req
43290+ *
43291+ * @set_placement: Placement flags to set.
43292+ *
43293+ * @clr_placement: Placement flags to clear.
43294+ *
43295+ * @handle: The object handle
43296+ *
43297+ * Input to the TTM_PL_SETSTATUS ioctl.
43298+ */
43299+
43300+struct ttm_pl_setstatus_req {
43301+ uint32_t set_placement;
43302+ uint32_t clr_placement;
43303+ uint32_t handle;
43304+ uint32_t pad64;
43305+};
43306+
43307+/**
43308+ * struct ttm_pl_reference_req
43309+ *
43310+ * @handle: The object to put a reference on.
43311+ *
43312+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
43313+ */
43314+
43315+struct ttm_pl_reference_req {
43316+ uint32_t handle;
43317+ uint32_t pad64;
43318+};
43319+
43320+/*
43321+ * ACCESS mode flags for SYNCCPU.
43322+ *
43323+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
43324+ * writing to the buffer.
43325+ *
43326+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
43327+ * accessing the buffer.
43328+ *
43329+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
43330+ * for GPU accesses to finish but return -EBUSY.
43331+ *
43332+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
43333+ * memory while synchronized for CPU.
43334+ */
43335+
43336+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
43337+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
43338+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
43339+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
43340+
43341+/**
43342+ * struct ttm_pl_synccpu_arg
43343+ *
43344+ * @handle: The object to synchronize.
43345+ *
43346+ * @access_mode: access mode indicated by the
43347+ * TTM_SYNCCPU_MODE flags.
43348+ *
43349+ * @op: indicates whether to grab or release the
43350+ * buffer for cpu usage.
43351+ *
43352+ * Input to the TTM_PL_SYNCCPU ioctl.
43353+ */
43354+
43355+struct ttm_pl_synccpu_arg {
43356+ uint32_t handle;
43357+ uint32_t access_mode;
43358+ enum {
43359+ TTM_PL_SYNCCPU_OP_GRAB,
43360+ TTM_PL_SYNCCPU_OP_RELEASE
43361+ } op;
43362+ uint32_t pad64;
43363+};
43364+
43365+/*
43366+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
43367+ *
43368+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
43369+ * wait.
43370+ *
43371+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
43372+ * but return -EBUSY if the buffer is busy.
43373+ */
43374+
43375+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
43376+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
43377+
43378+/**
43379+ * struct ttm_waitidle_arg
43380+ *
43381+ * @handle: The object to synchronize.
43382+ *
43383+ * @mode: wait mode indicated by the
43384+ * TTM_SYNCCPU_MODE flags.
43385+ *
43386+ * Argument to the TTM_BO_WAITIDLE ioctl.
43387+ */
43388+
43389+struct ttm_pl_waitidle_arg {
43390+ uint32_t handle;
43391+ uint32_t mode;
43392+};
43393+
43394+union ttm_pl_create_arg {
43395+ struct ttm_pl_create_req req;
43396+ struct ttm_pl_rep rep;
43397+};
43398+
43399+union ttm_pl_reference_arg {
43400+ struct ttm_pl_reference_req req;
43401+ struct ttm_pl_rep rep;
43402+};
43403+
43404+union ttm_pl_setstatus_arg {
43405+ struct ttm_pl_setstatus_req req;
43406+ struct ttm_pl_rep rep;
43407+};
43408+
43409+union ttm_pl_create_ub_arg {
43410+ struct ttm_pl_create_ub_req req;
43411+ struct ttm_pl_rep rep;
43412+};
43413+
43414+/*
43415+ * Ioctl offsets.
43416+ */
43417+
43418+#define TTM_PL_CREATE 0x00
43419+#define TTM_PL_REFERENCE 0x01
43420+#define TTM_PL_UNREF 0x02
43421+#define TTM_PL_SYNCCPU 0x03
43422+#define TTM_PL_WAITIDLE 0x04
43423+#define TTM_PL_SETSTATUS 0x05
43424+#define TTM_PL_CREATE_UB 0x06
43425+
43426+#endif
43427diff --git a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h
43428new file mode 100644
43429index 0000000..5db5eda
43430--- /dev/null
43431+++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h
43432@@ -0,0 +1,74 @@
43433+/**************************************************************************
43434+ *
43435+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
43436+ * All Rights Reserved.
43437+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
43438+ * All Rights Reserved.
43439+ *
43440+ * Permission is hereby granted, free of charge, to any person obtaining a
43441+ * copy of this software and associated documentation files (the
43442+ * "Software"), to deal in the Software without restriction, including
43443+ * without limitation the rights to use, copy, modify, merge, publish,
43444+ * distribute, sub license, and/or sell copies of the Software, and to
43445+ * permit persons to whom the Software is furnished to do so, subject to
43446+ * the following conditions:
43447+ *
43448+ * The above copyright notice and this permission notice (including the
43449+ * next paragraph) shall be included in all copies or substantial portions
43450+ * of the Software.
43451+ *
43452+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43453+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43454+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43455+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
43456+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
43457+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
43458+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
43459+ *
43460+ **************************************************************************/
43461+/*
43462+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
43463+ */
43464+
43465+#ifndef _TTM_REGMAN_H_
43466+#define _TTM_REGMAN_H_
43467+
43468+#include <linux/list.h>
43469+
43470+struct ttm_fence_object;
43471+
43472+struct ttm_reg {
43473+ struct list_head head;
43474+ struct ttm_fence_object *fence;
43475+ uint32_t fence_type;
43476+ uint32_t new_fence_type;
43477+};
43478+
43479+struct ttm_reg_manager {
43480+ struct list_head free;
43481+ struct list_head lru;
43482+ struct list_head unfenced;
43483+
43484+ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
43485+ void (*reg_destroy)(struct ttm_reg *reg);
43486+};
43487+
43488+extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
43489+ const void *data,
43490+ uint32_t fence_class,
43491+ uint32_t fence_type,
43492+ int interruptible,
43493+ int no_wait,
43494+ struct ttm_reg **reg);
43495+
43496+extern void ttm_regs_fence(struct ttm_reg_manager *regs,
43497+ struct ttm_fence_object *fence);
43498+
43499+extern void ttm_regs_free(struct ttm_reg_manager *manager);
43500+extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
43501+extern void ttm_regs_init(struct ttm_reg_manager *manager,
43502+ int (*reg_reusable)(const struct ttm_reg *,
43503+ const void *),
43504+ void (*reg_destroy)(struct ttm_reg *));
43505+
43506+#endif
43507diff --git a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c
43508new file mode 100644
43509index 0000000..5119aec
43510--- /dev/null
43511+++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c
43512@@ -0,0 +1,655 @@
43513+/**************************************************************************
43514+ *
43515+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
43516+ * All Rights Reserved.
43517+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
43518+ * All Rights Reserved.
43519+ *
43520+ * Permission is hereby granted, free of charge, to any person obtaining a
43521+ * copy of this software and associated documentation files (the
43522+ * "Software"), to deal in the Software without restriction, including
43523+ * without limitation the rights to use, copy, modify, merge, publish,
43524+ * distribute, sub license, and/or sell copies of the Software, and to
43525+ * permit persons to whom the Software is furnished to do so, subject to
43526+ * the following conditions:
43527+ *
43528+ * The above copyright notice and this permission notice (including the
43529+ * next paragraph) shall be included in all copies or substantial portions
43530+ * of the Software.
43531+ *
43532+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43533+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43534+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43535+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
43536+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
43537+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
43538+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
43539+ *
43540+ **************************************************************************/
43541+/*
43542+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43543+ */
43544+
43545+#include <linux/version.h>
43546+#include <linux/vmalloc.h>
43547+#include <linux/sched.h>
43548+#include <linux/highmem.h>
43549+#include <linux/pagemap.h>
43550+#include <linux/file.h>
43551+#include <linux/swap.h>
43552+#include "ttm/ttm_bo_driver.h"
43553+#include "ttm/ttm_placement_common.h"
43554+
43555+static int ttm_tt_swapin(struct ttm_tt *ttm);
43556+
43557+#if defined( CONFIG_X86 )
43558+static void ttm_tt_clflush_page(struct page *page)
43559+{
43560+ uint8_t *page_virtual;
43561+ unsigned int i;
43562+
43563+ if (unlikely(page == NULL))
43564+ return;
43565+
43566+ page_virtual = kmap_atomic(page, KM_USER0);
43567+
43568+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
43569+ clflush(page_virtual + i);
43570+
43571+ kunmap_atomic(page_virtual, KM_USER0);
43572+}
43573+
43574+static void ttm_tt_cache_flush_clflush(struct page *pages[],
43575+ unsigned long num_pages)
43576+{
43577+ unsigned long i;
43578+
43579+ mb();
43580+ for (i = 0; i < num_pages; ++i)
43581+ ttm_tt_clflush_page(*pages++);
43582+ mb();
43583+}
43584+#else
43585+static void ttm_tt_ipi_handler(void *null)
43586+{
43587+ ;
43588+}
43589+#endif
43590+
43591+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
43592+{
43593+
43594+#if defined( CONFIG_X86 )
43595+ if (cpu_has_clflush) {
43596+ ttm_tt_cache_flush_clflush(pages, num_pages);
43597+ return;
43598+ }
43599+#else
43600+ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
43601+ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
43602+#endif
43603+}
43604+
43605+/**
43606+ * Allocates storage for pointers to the pages that back the ttm.
43607+ *
43608+ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
43609+ */
43610+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
43611+{
43612+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
43613+ ttm->pages = NULL;
43614+
43615+ if (size <= PAGE_SIZE)
43616+ ttm->pages = kzalloc(size, GFP_KERNEL);
43617+
43618+ if (!ttm->pages) {
43619+ ttm->pages = vmalloc_user(size);
43620+ if (ttm->pages)
43621+ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
43622+ }
43623+}
43624+
43625+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
43626+{
43627+ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
43628+ vfree(ttm->pages);
43629+ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
43630+ } else {
43631+ kfree(ttm->pages);
43632+ }
43633+ ttm->pages = NULL;
43634+}
43635+
43636+static struct page *ttm_tt_alloc_page(void)
43637+{
43638+ return alloc_page(GFP_KERNEL | __GFP_ZERO);
43639+}
43640+
43641+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
43642+{
43643+ int write;
43644+ int dirty;
43645+ struct page *page;
43646+ int i;
43647+ struct ttm_backend *be = ttm->be;
43648+
43649+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
43650+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
43651+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
43652+
43653+ if (be)
43654+ be->func->clear(be);
43655+
43656+ for (i = 0; i < ttm->num_pages; ++i) {
43657+ page = ttm->pages[i];
43658+ if (page == NULL)
43659+ continue;
43660+
43661+ if (page == ttm->dummy_read_page) {
43662+ BUG_ON(write);
43663+ continue;
43664+ }
43665+
43666+ if (write && dirty && !PageReserved(page))
43667+ set_page_dirty_lock(page);
43668+
43669+ ttm->pages[i] = NULL;
43670+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
43671+ put_page(page);
43672+ }
43673+ ttm->state = tt_unpopulated;
43674+ ttm->first_himem_page = ttm->num_pages;
43675+ ttm->last_lomem_page = -1;
43676+}
43677+
43678+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
43679+{
43680+ struct page *p;
43681+ struct ttm_bo_device *bdev = ttm->bdev;
43682+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
43683+ int ret;
43684+
43685+ while (NULL == (p = ttm->pages[index])) {
43686+ p = ttm_tt_alloc_page();
43687+
43688+ if (!p)
43689+ return NULL;
43690+
43691+ if (PageHighMem(p)) {
43692+ ret =
43693+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true);
43694+ if (unlikely(ret != 0))
43695+ goto out_err;
43696+ ttm->pages[--ttm->first_himem_page] = p;
43697+ } else {
43698+ ret =
43699+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false);
43700+ if (unlikely(ret != 0))
43701+ goto out_err;
43702+ ttm->pages[++ttm->last_lomem_page] = p;
43703+ }
43704+ }
43705+ return p;
43706+ out_err:
43707+ put_page(p);
43708+ return NULL;
43709+}
43710+
43711+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
43712+{
43713+ int ret;
43714+
43715+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
43716+ ret = ttm_tt_swapin(ttm);
43717+ if (unlikely(ret != 0))
43718+ return NULL;
43719+ }
43720+ return __ttm_tt_get_page(ttm, index);
43721+}
43722+
43723+int ttm_tt_populate(struct ttm_tt *ttm)
43724+{
43725+ struct page *page;
43726+ unsigned long i;
43727+ struct ttm_backend *be;
43728+ int ret;
43729+
43730+ if (ttm->state != tt_unpopulated)
43731+ return 0;
43732+
43733+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
43734+ ret = ttm_tt_swapin(ttm);
43735+ if (unlikely(ret != 0))
43736+ return ret;
43737+ }
43738+
43739+ be = ttm->be;
43740+
43741+ for (i = 0; i < ttm->num_pages; ++i) {
43742+ page = __ttm_tt_get_page(ttm, i);
43743+ if (!page)
43744+ return -ENOMEM;
43745+ }
43746+
43747+ be->func->populate(be, ttm->num_pages, ttm->pages,
43748+ ttm->dummy_read_page);
43749+ ttm->state = tt_unbound;
43750+ return 0;
43751+}
43752+
43753+#ifdef CONFIG_X86
43754+static inline int ttm_tt_set_page_caching(struct page *p,
43755+ enum ttm_caching_state c_state)
43756+{
43757+ if (PageHighMem(p))
43758+ return 0;
43759+
43760+ switch (c_state) {
43761+ case tt_cached:
43762+ return set_pages_wb(p, 1);
43763+ case tt_wc:
43764+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
43765+ return set_memory_wc((unsigned long) page_address(p), 1);
43766+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */
43767+ default:
43768+ return set_pages_uc(p, 1);
43769+ }
43770+}
43771+#else /* CONFIG_X86 */
43772+static inline int ttm_tt_set_page_caching(struct page *p,
43773+ enum ttm_caching_state c_state)
43774+{
43775+ return 0;
43776+}
43777+#endif /* CONFIG_X86 */
43778+
43779+/*
43780+ * Change caching policy for the linear kernel map
43781+ * for range of pages in a ttm.
43782+ */
43783+
43784+static int ttm_tt_set_caching(struct ttm_tt *ttm,
43785+ enum ttm_caching_state c_state)
43786+{
43787+ int i, j;
43788+ struct page *cur_page;
43789+ int ret;
43790+
43791+ if (ttm->caching_state == c_state)
43792+ return 0;
43793+
43794+ if (c_state != tt_cached) {
43795+ ret = ttm_tt_populate(ttm);
43796+ if (unlikely(ret != 0))
43797+ return ret;
43798+ }
43799+
43800+ if (ttm->caching_state == tt_cached)
43801+ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
43802+
43803+ for (i = 0; i < ttm->num_pages; ++i) {
43804+ cur_page = ttm->pages[i];
43805+ if (likely(cur_page != NULL)) {
43806+ ret = ttm_tt_set_page_caching(cur_page, c_state);
43807+ if (unlikely(ret != 0))
43808+ goto out_err;
43809+ }
43810+ }
43811+
43812+ ttm->caching_state = c_state;
43813+
43814+ return 0;
43815+
43816+ out_err:
43817+ for (j = 0; j < i; ++j) {
43818+ cur_page = ttm->pages[j];
43819+ if (likely(cur_page != NULL)) {
43820+ (void)ttm_tt_set_page_caching(cur_page,
43821+ ttm->caching_state);
43822+ }
43823+ }
43824+
43825+ return ret;
43826+}
43827+
43828+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
43829+{
43830+ enum ttm_caching_state state;
43831+
43832+ if (placement & TTM_PL_FLAG_WC)
43833+ state = tt_wc;
43834+ else if (placement & TTM_PL_FLAG_UNCACHED)
43835+ state = tt_uncached;
43836+ else
43837+ state = tt_cached;
43838+
43839+ return ttm_tt_set_caching(ttm, state);
43840+}
43841+
43842+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
43843+{
43844+ int i;
43845+ struct page *cur_page;
43846+ struct ttm_backend *be = ttm->be;
43847+
43848+ if (be)
43849+ be->func->clear(be);
43850+ (void)ttm_tt_set_caching(ttm, tt_cached);
43851+ for (i = 0; i < ttm->num_pages; ++i) {
43852+ cur_page = ttm->pages[i];
43853+ ttm->pages[i] = NULL;
43854+ if (cur_page) {
43855+ if (page_count(cur_page) != 1)
43856+ printk(KERN_ERR
43857+ "Erroneous page count. Leaking pages.\n");
43858+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
43859+ PageHighMem(cur_page));
43860+ __free_page(cur_page);
43861+ }
43862+ }
43863+ ttm->state = tt_unpopulated;
43864+ ttm->first_himem_page = ttm->num_pages;
43865+ ttm->last_lomem_page = -1;
43866+}
43867+
43868+void ttm_tt_destroy(struct ttm_tt *ttm)
43869+{
43870+ struct ttm_backend *be;
43871+
43872+ if (unlikely(ttm == NULL))
43873+ return;
43874+
43875+ be = ttm->be;
43876+ if (likely(be != NULL)) {
43877+ be->func->destroy(be);
43878+ ttm->be = NULL;
43879+ }
43880+
43881+ if (likely(ttm->pages != NULL)) {
43882+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
43883+ ttm_tt_free_user_pages(ttm);
43884+ else
43885+ ttm_tt_free_alloced_pages(ttm);
43886+
43887+ ttm_tt_free_page_directory(ttm);
43888+ }
43889+
43890+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
43891+ ttm->swap_storage)
43892+ fput(ttm->swap_storage);
43893+
43894+ kfree(ttm);
43895+}
43896+
43897+int ttm_tt_set_user(struct ttm_tt *ttm,
43898+ struct task_struct *tsk,
43899+ unsigned long start, unsigned long num_pages)
43900+{
43901+ struct mm_struct *mm = tsk->mm;
43902+ int ret;
43903+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
43904+ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
43905+
43906+ BUG_ON(num_pages != ttm->num_pages);
43907+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
43908+
43909+ /**
43910+ * Account user pages as lowmem pages for now.
43911+ */
43912+
43913+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false);
43914+ if (unlikely(ret != 0))
43915+ return ret;
43916+
43917+ down_read(&mm->mmap_sem);
43918+ ret = get_user_pages(tsk, mm, start, num_pages,
43919+ write, 0, ttm->pages, NULL);
43920+ up_read(&mm->mmap_sem);
43921+
43922+ if (ret != num_pages && write) {
43923+ ttm_tt_free_user_pages(ttm);
43924+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
43925+ return -ENOMEM;
43926+ }
43927+
43928+ ttm->tsk = tsk;
43929+ ttm->start = start;
43930+ ttm->state = tt_unbound;
43931+
43932+ return 0;
43933+}
43934+
43935+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
43936+ uint32_t page_flags, struct page *dummy_read_page)
43937+{
43938+ struct ttm_bo_driver *bo_driver = bdev->driver;
43939+ struct ttm_tt *ttm;
43940+
43941+ if (!bo_driver)
43942+ return NULL;
43943+
43944+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
43945+ if (!ttm)
43946+ return NULL;
43947+
43948+ ttm->bdev = bdev;
43949+
43950+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
43951+ ttm->first_himem_page = ttm->num_pages;
43952+ ttm->last_lomem_page = -1;
43953+ ttm->caching_state = tt_cached;
43954+ ttm->page_flags = page_flags;
43955+
43956+ ttm->dummy_read_page = dummy_read_page;
43957+
43958+ ttm_tt_alloc_page_directory(ttm);
43959+ if (!ttm->pages) {
43960+ ttm_tt_destroy(ttm);
43961+ printk(KERN_ERR "Failed allocating page table\n");
43962+ return NULL;
43963+ }
43964+ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
43965+ if (!ttm->be) {
43966+ ttm_tt_destroy(ttm);
43967+ printk(KERN_ERR "Failed creating ttm backend entry\n");
43968+ return NULL;
43969+ }
43970+ ttm->state = tt_unpopulated;
43971+ return ttm;
43972+}
43973+
43974+/**
43975+ * ttm_tt_unbind:
43976+ *
43977+ * @ttm: the object to unbind from the graphics device
43978+ *
43979+ * Unbind an object from the aperture. This removes the mappings
43980+ * from the graphics device and flushes caches if necessary.
43981+ */
43982+void ttm_tt_unbind(struct ttm_tt *ttm)
43983+{
43984+ int ret;
43985+ struct ttm_backend *be = ttm->be;
43986+
43987+ if (ttm->state == tt_bound) {
43988+ ret = be->func->unbind(be);
43989+ BUG_ON(ret);
43990+ }
43991+ ttm->state = tt_unbound;
43992+}
43993+
43994+/**
43995+ * ttm_tt_bind:
43996+ *
43997+ * @ttm: the ttm object to bind to the graphics device
43998+ *
43999+ * @bo_mem: the aperture memory region which will hold the object
44000+ *
44001+ * Bind a ttm object to the aperture. This ensures that the necessary
44002+ * pages are allocated, flushes CPU caches as needed and marks the
44003+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
44004+ * modified by the GPU
44005+ */
44006+
44007+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
44008+{
44009+ int ret = 0;
44010+ struct ttm_backend *be;
44011+
44012+ if (!ttm)
44013+ return -EINVAL;
44014+
44015+ if (ttm->state == tt_bound)
44016+ return 0;
44017+
44018+ be = ttm->be;
44019+
44020+ ret = ttm_tt_populate(ttm);
44021+ if (ret)
44022+ return ret;
44023+
44024+ ret = be->func->bind(be, bo_mem);
44025+ if (ret) {
44026+ printk(KERN_ERR "Couldn't bind backend.\n");
44027+ return ret;
44028+ }
44029+
44030+ ttm->state = tt_bound;
44031+
44032+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
44033+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
44034+ return 0;
44035+}
44036+
44037+static int ttm_tt_swapin(struct ttm_tt *ttm)
44038+{
44039+ struct address_space *swap_space;
44040+ struct file *swap_storage;
44041+ struct page *from_page;
44042+ struct page *to_page;
44043+ void *from_virtual;
44044+ void *to_virtual;
44045+ int i;
44046+ int ret;
44047+
44048+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
44049+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
44050+ ttm->num_pages);
44051+ if (unlikely(ret != 0))
44052+ return ret;
44053+
44054+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
44055+ return 0;
44056+ }
44057+
44058+ swap_storage = ttm->swap_storage;
44059+ BUG_ON(swap_storage == NULL);
44060+
44061+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
44062+
44063+ for (i = 0; i < ttm->num_pages; ++i) {
44064+ from_page = read_mapping_page(swap_space, i, NULL);
44065+ if (IS_ERR(from_page))
44066+ goto out_err;
44067+ to_page = __ttm_tt_get_page(ttm, i);
44068+ if (unlikely(to_page == NULL))
44069+ goto out_err;
44070+
44071+ preempt_disable();
44072+ from_virtual = kmap_atomic(from_page, KM_USER0);
44073+ to_virtual = kmap_atomic(to_page, KM_USER1);
44074+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
44075+ kunmap_atomic(to_virtual, KM_USER1);
44076+ kunmap_atomic(from_virtual, KM_USER0);
44077+ preempt_enable();
44078+ page_cache_release(from_page);
44079+ }
44080+
44081+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
44082+ fput(swap_storage);
44083+ ttm->swap_storage = NULL;
44084+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
44085+
44086+ return 0;
44087+ out_err:
44088+ ttm_tt_free_alloced_pages(ttm);
44089+ return -ENOMEM;
44090+}
44091+
44092+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
44093+{
44094+ struct address_space *swap_space;
44095+ struct file *swap_storage;
44096+ struct page *from_page;
44097+ struct page *to_page;
44098+ void *from_virtual;
44099+ void *to_virtual;
44100+ int i;
44101+
44102+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
44103+ BUG_ON(ttm->caching_state != tt_cached);
44104+
44105+ /*
44106+ * For user buffers, just unpin the pages, as there should be
44107+ * vma references.
44108+ */
44109+
44110+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
44111+ ttm_tt_free_user_pages(ttm);
44112+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
44113+ ttm->swap_storage = NULL;
44114+ return 0;
44115+ }
44116+
44117+ if (!persistant_swap_storage) {
44118+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
44119+ swap_storage = shmem_file_setup("ttm swap",
44120+ ttm->num_pages << PAGE_SHIFT,
44121+ 0);
44122+ if (unlikely(IS_ERR(swap_storage))) {
44123+ printk(KERN_ERR "Failed allocating swap storage.\n");
44124+ return -ENOMEM;
44125+ }
44126+#else
44127+ return -ENOMEM;
44128+#endif
44129+ } else
44130+ swap_storage = persistant_swap_storage;
44131+
44132+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
44133+
44134+ for (i = 0; i < ttm->num_pages; ++i) {
44135+ from_page = ttm->pages[i];
44136+ if (unlikely(from_page == NULL))
44137+ continue;
44138+ to_page = read_mapping_page(swap_space, i, NULL);
44139+ if (unlikely(to_page == NULL))
44140+ goto out_err;
44141+
44142+ preempt_disable();
44143+ from_virtual = kmap_atomic(from_page, KM_USER0);
44144+ to_virtual = kmap_atomic(to_page, KM_USER1);
44145+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
44146+ kunmap_atomic(to_virtual, KM_USER1);
44147+ kunmap_atomic(from_virtual, KM_USER0);
44148+ preempt_enable();
44149+ set_page_dirty(to_page);
44150+ mark_page_accessed(to_page);
44151+// unlock_page(to_page);
44152+ page_cache_release(to_page);
44153+ }
44154+
44155+ ttm_tt_free_alloced_pages(ttm);
44156+ ttm->swap_storage = swap_storage;
44157+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
44158+ if (persistant_swap_storage)
44159+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
44160+
44161+ return 0;
44162+ out_err:
44163+ if (!persistant_swap_storage)
44164+ fput(swap_storage);
44165+
44166+ return -ENOMEM;
44167+}
44168diff --git a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
44169new file mode 100644
44170index 0000000..5309050
44171--- /dev/null
44172+++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
44173@@ -0,0 +1,79 @@
44174+/**************************************************************************
44175+ *
44176+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
44177+ * All Rights Reserved.
44178+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
44179+ * All Rights Reserved.
44180+ *
44181+ * Permission is hereby granted, free of charge, to any person obtaining a
44182+ * copy of this software and associated documentation files (the
44183+ * "Software"), to deal in the Software without restriction, including
44184+ * without limitation the rights to use, copy, modify, merge, publish,
44185+ * distribute, sub license, and/or sell copies of the Software, and to
44186+ * permit persons to whom the Software is furnished to do so, subject to
44187+ * the following conditions:
44188+ *
44189+ * The above copyright notice and this permission notice (including the
44190+ * next paragraph) shall be included in all copies or substantial portions
44191+ * of the Software.
44192+ *
44193+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44194+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
44195+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
44196+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
44197+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
44198+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
44199+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
44200+ *
44201+ **************************************************************************/
44202+/*
44203+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
44204+ */
44205+
44206+#ifndef _TTM_USEROBJ_API_H_
44207+#define _TTM_USEROBJ_API_H_
44208+
44209+#include "ttm/ttm_placement_user.h"
44210+#include "ttm/ttm_fence_user.h"
44211+#include "ttm/ttm_object.h"
44212+#include "ttm/ttm_fence_api.h"
44213+#include "ttm/ttm_bo_api.h"
44214+
44215+struct ttm_lock;
44216+
44217+/*
44218+ * User ioctls.
44219+ */
44220+
44221+extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
44222+ struct ttm_bo_device *bdev,
44223+ struct ttm_lock *lock, void *data);
44224+extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
44225+ struct ttm_bo_device *bdev,
44226+ struct ttm_lock *lock, void *data);
44227+extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
44228+extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
44229+extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
44230+extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
44231+ struct ttm_lock *lock, void *data);
44232+extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
44233+extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
44234+extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
44235+extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
44236+
44237+extern int
44238+ttm_fence_user_create(struct ttm_fence_device *fdev,
44239+ struct ttm_object_file *tfile,
44240+ uint32_t fence_class,
44241+ uint32_t fence_types,
44242+ uint32_t create_flags,
44243+ struct ttm_fence_object **fence, uint32_t * user_handle);
44244+
44245+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
44246+ *tfile,
44247+ uint32_t handle);
44248+
44249+extern int
44250+ttm_pl_verify_access(struct ttm_buffer_object *bo,
44251+ struct ttm_object_file *tfile);
44252+#endif
44253diff --git a/include/drm/drmP.h b/include/drm/drmP.h
44254index 5575b9a..9c0b919 100644
44255--- a/include/drm/drmP.h
44256+++ b/include/drm/drmP.h
44257@@ -1101,6 +1109,8 @@ extern int drm_init(struct drm_driver *driver);
44258 extern void drm_exit(struct drm_driver *driver);
44259 extern int drm_ioctl(struct inode *inode, struct file *filp,
44260 unsigned int cmd, unsigned long arg);
44261+extern long drm_unlocked_ioctl(struct file *filp,
44262+ unsigned int cmd, unsigned long arg);
44263 extern long drm_compat_ioctl(struct file *filp,
44264 unsigned int cmd, unsigned long arg);
44265 extern int drm_lastclose(struct drm_device *dev);
44266@@ -1514,5 +1524,25 @@ static __inline void drm_free_large(void *ptr)
44267 }
44268 /*@}*/
44269
44270+enum drm_global_types {
44271+ DRM_GLOBAL_TTM_MEM = 0,
44272+ DRM_GLOBAL_TTM_BO,
44273+ DRM_GLOBAL_TTM_OBJECT,
44274+ DRM_GLOBAL_NUM
44275+};
44276+
44277+struct drm_global_reference {
44278+ enum drm_global_types global_type;
44279+ size_t size;
44280+ void *object;
44281+ int (*init) (struct drm_global_reference *);
44282+ void (*release) (struct drm_global_reference *);
44283+};
44284+
44285+extern void drm_global_init(void);
44286+extern void drm_global_release(void);
44287+extern int drm_global_item_ref(struct drm_global_reference *ref);
44288+extern void drm_global_item_unref(struct drm_global_reference *ref);
44289+
44290 #endif /* __KERNEL__ */
44291 #endif
44292diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
44293index ae304cc..43a62a8 100644
44294--- a/include/drm/drm_mode.h
44295+++ b/include/drm/drm_mode.h
44296@@ -121,6 +121,7 @@ struct drm_mode_crtc {
44297 #define DRM_MODE_ENCODER_TMDS 2
44298 #define DRM_MODE_ENCODER_LVDS 3
44299 #define DRM_MODE_ENCODER_TVDAC 4
44300+#define DRM_MODE_ENCODER_MIPI 5
44301
44302 struct drm_mode_get_encoder {
44303 __u32 encoder_id;
44304@@ -155,6 +156,7 @@ struct drm_mode_get_encoder {
44305 #define DRM_MODE_CONNECTOR_DisplayPort 10
44306 #define DRM_MODE_CONNECTOR_HDMIA 11
44307 #define DRM_MODE_CONNECTOR_HDMIB 12
44308+#define DRM_MODE_CONNECTOR_MIPI 13
44309
44310 struct drm_mode_get_connector {
44311
44312diff --git a/include/linux/backlight.h b/include/linux/backlight.h
44313index 79ca2da..00d7255 100644
44314--- a/include/linux/backlight.h
44315+++ b/include/linux/backlight.h
44316@@ -87,6 +87,9 @@ struct backlight_device {
44317 struct notifier_block fb_notif;
44318
44319 struct device dev;
44320+
44321+ /* Private Backlight Data */
44322+ void *priv;
44323 };
44324
44325 static inline void backlight_update_status(struct backlight_device *bd)
44326--
443271.6.0.6
44328
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch
deleted file mode 100644
index 4aa8469042..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch
+++ /dev/null
@@ -1,38 +0,0 @@
1diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
2index 80176b2..fc682be 100644
3--- a/drivers/staging/rt2860/rt_linux.c
4+++ b/drivers/staging/rt2860/rt_linux.c
5@@ -27,7 +27,7 @@
6
7 #include "rt_config.h"
8
9-ULONG RTDebugLevel = RT_DEBUG_ERROR;
10+ULONG RTDebugLevel = RT_DEBUG_OFF;
11
12 BUILD_TIMER_FUNCTION(MlmePeriodicExec);
13 BUILD_TIMER_FUNCTION(AsicRxAntEvalTimeout);
14diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
15index 25b53ac..56ac9ac 100644
16--- a/drivers/staging/rt2860/rt_linux.h
17+++ b/drivers/staging/rt2860/rt_linux.h
18@@ -375,18 +375,9 @@ extern ULONG RTDebugLevel;
19
20 #define DBGPRINT(Level, Fmt) DBGPRINT_RAW(Level, Fmt)
21
22+#define DBGPRINT_ERR(Fmt)
23
24-#define DBGPRINT_ERR(Fmt) \
25-{ \
26- printk("ERROR!!! "); \
27- printk Fmt; \
28-}
29-
30-#define DBGPRINT_S(Status, Fmt) \
31-{ \
32- printk Fmt; \
33-}
34-
35+#define DBGPRINT_S(Status, Fmt)
36
37 #else
38 #define DBGPRINT(Level, Fmt)
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook
deleted file mode 100644
index f88e5becb9..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook
+++ /dev/null
@@ -1,3220 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.rc7-4.netbook
4# Mon Dec 8 01:05:27 2008
5#
6# CONFIG_64BIT is not set
7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set
9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11CONFIG_GENERIC_TIME=y
12CONFIG_GENERIC_CMOS_UPDATE=y
13CONFIG_CLOCKSOURCE_WATCHDOG=y
14CONFIG_GENERIC_CLOCKEVENTS=y
15CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
16CONFIG_LOCKDEP_SUPPORT=y
17CONFIG_STACKTRACE_SUPPORT=y
18CONFIG_HAVE_LATENCYTOP_SUPPORT=y
19CONFIG_FAST_CMPXCHG_LOCAL=y
20CONFIG_MMU=y
21CONFIG_ZONE_DMA=y
22CONFIG_GENERIC_ISA_DMA=y
23CONFIG_GENERIC_IOMAP=y
24CONFIG_GENERIC_BUG=y
25CONFIG_GENERIC_HWEIGHT=y
26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
30CONFIG_GENERIC_CALIBRATE_DELAY=y
31# CONFIG_GENERIC_TIME_VSYSCALL is not set
32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
37CONFIG_ARCH_HIBERNATION_POSSIBLE=y
38CONFIG_ARCH_SUSPEND_POSSIBLE=y
39# CONFIG_ZONE_DMA32 is not set
40CONFIG_ARCH_POPULATES_NODE_MAP=y
41# CONFIG_AUDIT_ARCH is not set
42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
43CONFIG_GENERIC_HARDIRQS=y
44CONFIG_GENERIC_IRQ_PROBE=y
45CONFIG_GENERIC_PENDING_IRQ=y
46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
48CONFIG_X86_32_SMP=y
49CONFIG_X86_HT=y
50CONFIG_X86_BIOS_REBOOT=y
51CONFIG_X86_TRAMPOLINE=y
52CONFIG_KTIME_SCALAR=y
53CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
54
55#
56# General setup
57#
58CONFIG_EXPERIMENTAL=y
59CONFIG_LOCK_KERNEL=y
60CONFIG_INIT_ENV_ARG_LIMIT=32
61CONFIG_LOCALVERSION="-netbook"
62# CONFIG_LOCALVERSION_AUTO is not set
63CONFIG_SWAP=y
64CONFIG_SYSVIPC=y
65CONFIG_SYSVIPC_SYSCTL=y
66CONFIG_POSIX_MQUEUE=y
67CONFIG_BSD_PROCESS_ACCT=y
68CONFIG_BSD_PROCESS_ACCT_V3=y
69# CONFIG_TASKSTATS is not set
70# CONFIG_AUDIT is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=17
73# CONFIG_CGROUPS is not set
74CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
75# CONFIG_GROUP_SCHED is not set
76# CONFIG_SYSFS_DEPRECATED_V2 is not set
77CONFIG_RELAY=y
78CONFIG_NAMESPACES=y
79# CONFIG_UTS_NS is not set
80# CONFIG_IPC_NS is not set
81# CONFIG_USER_NS is not set
82# CONFIG_PID_NS is not set
83CONFIG_BLK_DEV_INITRD=y
84CONFIG_INITRAMFS_SOURCE=""
85CONFIG_CC_OPTIMIZE_FOR_SIZE=y
86CONFIG_FASTBOOT=y
87CONFIG_SYSCTL=y
88# CONFIG_EMBEDDED is not set
89CONFIG_UID16=y
90CONFIG_SYSCTL_SYSCALL=y
91CONFIG_KALLSYMS=y
92CONFIG_KALLSYMS_ALL=y
93CONFIG_KALLSYMS_EXTRA_PASS=y
94CONFIG_KALLSYMS_STRIP_GENERATED=y
95CONFIG_HOTPLUG=y
96CONFIG_PRINTK=y
97CONFIG_BUG=y
98CONFIG_ELF_CORE=y
99CONFIG_PCSPKR_PLATFORM=y
100# CONFIG_COMPAT_BRK is not set
101CONFIG_BASE_FULL=y
102CONFIG_FUTEX=y
103CONFIG_ANON_INODES=y
104CONFIG_EPOLL=y
105CONFIG_SIGNALFD=y
106CONFIG_TIMERFD=y
107CONFIG_EVENTFD=y
108CONFIG_SHMEM=y
109CONFIG_AIO=y
110CONFIG_VM_EVENT_COUNTERS=y
111CONFIG_PCI_QUIRKS=y
112CONFIG_SLAB=y
113# CONFIG_SLUB is not set
114# CONFIG_SLOB is not set
115CONFIG_PROFILING=y
116# CONFIG_MARKERS is not set
117CONFIG_OPROFILE=y
118# CONFIG_OPROFILE_IBS is not set
119CONFIG_HAVE_OPROFILE=y
120# CONFIG_KPROBES is not set
121CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
122CONFIG_HAVE_IOREMAP_PROT=y
123CONFIG_HAVE_KPROBES=y
124CONFIG_HAVE_KRETPROBES=y
125CONFIG_HAVE_ARCH_TRACEHOOK=y
126CONFIG_HAVE_GENERIC_DMA_COHERENT=y
127CONFIG_SLABINFO=y
128CONFIG_RT_MUTEXES=y
129# CONFIG_TINY_SHMEM is not set
130CONFIG_BASE_SMALL=0
131CONFIG_MODULES=y
132# CONFIG_MODULE_FORCE_LOAD is not set
133CONFIG_MODULE_UNLOAD=y
134# CONFIG_MODULE_FORCE_UNLOAD is not set
135# CONFIG_MODVERSIONS is not set
136# CONFIG_MODULE_SRCVERSION_ALL is not set
137CONFIG_KMOD=y
138CONFIG_STOP_MACHINE=y
139CONFIG_BLOCK=y
140CONFIG_LBD=y
141CONFIG_BLK_DEV_IO_TRACE=y
142# CONFIG_LSF is not set
143CONFIG_BLK_DEV_BSG=y
144# CONFIG_BLK_DEV_INTEGRITY is not set
145
146#
147# IO Schedulers
148#
149CONFIG_IOSCHED_NOOP=y
150# CONFIG_IOSCHED_AS is not set
151# CONFIG_IOSCHED_DEADLINE is not set
152CONFIG_IOSCHED_CFQ=y
153# CONFIG_DEFAULT_AS is not set
154# CONFIG_DEFAULT_DEADLINE is not set
155CONFIG_DEFAULT_CFQ=y
156# CONFIG_DEFAULT_NOOP is not set
157CONFIG_DEFAULT_IOSCHED="cfq"
158CONFIG_CLASSIC_RCU=y
159CONFIG_FREEZER=y
160
161#
162# Processor type and features
163#
164CONFIG_TICK_ONESHOT=y
165CONFIG_NO_HZ=y
166CONFIG_HIGH_RES_TIMERS=y
167CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
168CONFIG_SMP=y
169# CONFIG_SPARSE_IRQ is not set
170CONFIG_X86_FIND_SMP_CONFIG=y
171CONFIG_X86_MPPARSE=y
172# CONFIG_X86_PC is not set
173# CONFIG_X86_ELAN is not set
174# CONFIG_X86_VOYAGER is not set
175CONFIG_X86_GENERICARCH=y
176# CONFIG_X86_NUMAQ is not set
177# CONFIG_X86_SUMMIT is not set
178# CONFIG_X86_ES7000 is not set
179# CONFIG_X86_BIGSMP is not set
180# CONFIG_X86_VSMP is not set
181# CONFIG_X86_RDC321X is not set
182CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
183# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
184# CONFIG_PARAVIRT_GUEST is not set
185# CONFIG_MEMTEST is not set
186CONFIG_X86_CYCLONE_TIMER=y
187# CONFIG_M386 is not set
188# CONFIG_M486 is not set
189# CONFIG_M586 is not set
190# CONFIG_M586TSC is not set
191# CONFIG_M586MMX is not set
192# CONFIG_M686 is not set
193# CONFIG_MPENTIUMII is not set
194# CONFIG_MPENTIUMIII is not set
195CONFIG_MPENTIUMM=y
196# CONFIG_MPENTIUM4 is not set
197# CONFIG_MK6 is not set
198# CONFIG_MK7 is not set
199# CONFIG_MK8 is not set
200# CONFIG_MCRUSOE is not set
201# CONFIG_MEFFICEON is not set
202# CONFIG_MWINCHIPC6 is not set
203# CONFIG_MWINCHIP3D is not set
204# CONFIG_MGEODEGX1 is not set
205# CONFIG_MGEODE_LX is not set
206# CONFIG_MCYRIXIII is not set
207# CONFIG_MVIAC3_2 is not set
208# CONFIG_MVIAC7 is not set
209# CONFIG_MPSC is not set
210# CONFIG_MCORE2 is not set
211# CONFIG_GENERIC_CPU is not set
212CONFIG_X86_GENERIC=y
213CONFIG_X86_CPU=y
214CONFIG_X86_CMPXCHG=y
215CONFIG_X86_L1_CACHE_SHIFT=7
216CONFIG_X86_XADD=y
217CONFIG_X86_WP_WORKS_OK=y
218CONFIG_X86_INVLPG=y
219CONFIG_X86_BSWAP=y
220CONFIG_X86_POPAD_OK=y
221CONFIG_X86_INTEL_USERCOPY=y
222CONFIG_X86_USE_PPRO_CHECKSUM=y
223CONFIG_X86_TSC=y
224CONFIG_X86_CMPXCHG64=y
225CONFIG_X86_CMOV=y
226CONFIG_X86_MINIMUM_CPU_FAMILY=4
227CONFIG_X86_DEBUGCTLMSR=y
228CONFIG_CPU_SUP_INTEL=y
229CONFIG_CPU_SUP_CYRIX_32=y
230CONFIG_CPU_SUP_AMD=y
231CONFIG_CPU_SUP_CENTAUR_32=y
232CONFIG_CPU_SUP_TRANSMETA_32=y
233CONFIG_CPU_SUP_UMC_32=y
234# CONFIG_X86_DS is not set
235# CONFIG_X86_PTRACE_BTS is not set
236CONFIG_HPET_TIMER=y
237CONFIG_HPET_EMULATE_RTC=y
238CONFIG_DMI=y
239# CONFIG_IOMMU_HELPER is not set
240CONFIG_NR_CPUS=8
241CONFIG_SCHED_SMT=y
242CONFIG_SCHED_MC=y
243# CONFIG_PREEMPT_NONE is not set
244# CONFIG_PREEMPT_VOLUNTARY is not set
245CONFIG_PREEMPT=y
246# CONFIG_DEBUG_PREEMPT is not set
247# CONFIG_PREEMPT_TRACER is not set
248CONFIG_X86_LOCAL_APIC=y
249CONFIG_X86_IO_APIC=y
250# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
251CONFIG_X86_MCE=y
252# CONFIG_X86_MCE_NONFATAL is not set
253CONFIG_X86_MCE_P4THERMAL=y
254CONFIG_VM86=y
255CONFIG_TOSHIBA=m
256CONFIG_I8K=m
257CONFIG_X86_REBOOTFIXUPS=y
258CONFIG_MICROCODE=y
259CONFIG_MICROCODE_INTEL=y
260# CONFIG_MICROCODE_AMD is not set
261CONFIG_MICROCODE_OLD_INTERFACE=y
262CONFIG_X86_MSR=y
263CONFIG_X86_CPUID=y
264# CONFIG_NOHIGHMEM is not set
265# CONFIG_HIGHMEM4G is not set
266CONFIG_HIGHMEM64G=y
267CONFIG_PAGE_OFFSET=0xC0000000
268CONFIG_HIGHMEM=y
269CONFIG_X86_PAE=y
270CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
271CONFIG_ARCH_FLATMEM_ENABLE=y
272CONFIG_ARCH_SPARSEMEM_ENABLE=y
273CONFIG_ARCH_SELECT_MEMORY_MODEL=y
274CONFIG_SELECT_MEMORY_MODEL=y
275CONFIG_FLATMEM_MANUAL=y
276# CONFIG_DISCONTIGMEM_MANUAL is not set
277# CONFIG_SPARSEMEM_MANUAL is not set
278CONFIG_FLATMEM=y
279CONFIG_FLAT_NODE_MEM_MAP=y
280CONFIG_SPARSEMEM_STATIC=y
281CONFIG_PAGEFLAGS_EXTENDED=y
282CONFIG_SPLIT_PTLOCK_CPUS=4
283CONFIG_RESOURCES_64BIT=y
284CONFIG_PHYS_ADDR_T_64BIT=y
285CONFIG_ZONE_DMA_FLAG=1
286CONFIG_BOUNCE=y
287CONFIG_VIRT_TO_BUS=y
288CONFIG_UNEVICTABLE_LRU=y
289CONFIG_HIGHPTE=y
290# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
291CONFIG_X86_RESERVE_LOW_64K=y
292# CONFIG_MATH_EMULATION is not set
293CONFIG_MTRR=y
294CONFIG_MTRR_SANITIZER=y
295CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
296CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
297CONFIG_X86_PAT=y
298CONFIG_EFI=y
299# CONFIG_SECCOMP is not set
300# CONFIG_HZ_100 is not set
301# CONFIG_HZ_250 is not set
302# CONFIG_HZ_300 is not set
303CONFIG_HZ_1000=y
304CONFIG_HZ=1000
305CONFIG_SCHED_HRTICK=y
306# CONFIG_KEXEC is not set
307# CONFIG_CRASH_DUMP is not set
308CONFIG_PHYSICAL_START=0x100000
309# CONFIG_RELOCATABLE is not set
310CONFIG_PHYSICAL_ALIGN=0x400000
311CONFIG_HOTPLUG_CPU=y
312# CONFIG_COMPAT_VDSO is not set
313# CONFIG_CMDLINE_BOOL is not set
314# CONFIG_CMDLINE is not set
315# CONFIG_CMDLINE_OVERRIDE is not set
316CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
317
318#
319# Power management and ACPI options
320#
321CONFIG_PM=y
322CONFIG_PM_DEBUG=y
323# CONFIG_PM_VERBOSE is not set
324CONFIG_CAN_PM_TRACE=y
325CONFIG_PM_TRACE=y
326CONFIG_PM_TRACE_RTC=y
327CONFIG_PM_SLEEP_SMP=y
328CONFIG_PM_SLEEP=y
329CONFIG_SUSPEND=y
330# CONFIG_PM_TEST_SUSPEND is not set
331CONFIG_SUSPEND_FREEZER=y
332CONFIG_HIBERNATION=y
333CONFIG_PM_STD_PARTITION=""
334CONFIG_ACPI=y
335CONFIG_ACPI_SLEEP=y
336CONFIG_ACPI_PROCFS=y
337CONFIG_ACPI_PROCFS_POWER=y
338CONFIG_ACPI_SYSFS_POWER=y
339CONFIG_ACPI_PROC_EVENT=y
340CONFIG_ACPI_AC=y
341CONFIG_ACPI_BATTERY=m
342CONFIG_ACPI_BUTTON=y
343CONFIG_ACPI_VIDEO=y
344CONFIG_ACPI_FAN=y
345CONFIG_ACPI_DOCK=y
346CONFIG_ACPI_PROCESSOR=y
347CONFIG_ACPI_HOTPLUG_CPU=y
348CONFIG_ACPI_THERMAL=y
349CONFIG_ACPI_WMI=y
350CONFIG_ACPI_ASUS=m
351CONFIG_ACPI_TOSHIBA=m
352# CONFIG_ACPI_CUSTOM_DSDT is not set
353CONFIG_ACPI_BLACKLIST_YEAR=1999
354# CONFIG_ACPI_DEBUG is not set
355# CONFIG_ACPI_PCI_SLOT is not set
356CONFIG_ACPI_SYSTEM=y
357CONFIG_X86_PM_TIMER=y
358CONFIG_ACPI_CONTAINER=y
359CONFIG_ACPI_SBS=m
360# CONFIG_APM is not set
361
362#
363# CPU Frequency scaling
364#
365CONFIG_CPU_FREQ=y
366CONFIG_CPU_FREQ_TABLE=y
367CONFIG_CPU_FREQ_DEBUG=y
368CONFIG_CPU_FREQ_STAT=y
369CONFIG_CPU_FREQ_STAT_DETAILS=y
370# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
371# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
372# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
373CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
374# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
375CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
376# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
377CONFIG_CPU_FREQ_GOV_USERSPACE=y
378CONFIG_CPU_FREQ_GOV_ONDEMAND=y
379# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
380
381#
382# CPUFreq processor drivers
383#
384CONFIG_X86_ACPI_CPUFREQ=y
385# CONFIG_X86_POWERNOW_K6 is not set
386# CONFIG_X86_POWERNOW_K7 is not set
387# CONFIG_X86_POWERNOW_K8 is not set
388# CONFIG_X86_GX_SUSPMOD is not set
389# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
390# CONFIG_X86_SPEEDSTEP_ICH is not set
391# CONFIG_X86_SPEEDSTEP_SMI is not set
392# CONFIG_X86_P4_CLOCKMOD is not set
393# CONFIG_X86_CPUFREQ_NFORCE2 is not set
394# CONFIG_X86_LONGRUN is not set
395# CONFIG_X86_LONGHAUL is not set
396# CONFIG_X86_E_POWERSAVER is not set
397
398#
399# shared options
400#
401# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
402# CONFIG_X86_SPEEDSTEP_LIB is not set
403CONFIG_CPU_IDLE=y
404CONFIG_CPU_IDLE_GOV_LADDER=y
405CONFIG_CPU_IDLE_GOV_MENU=y
406
407#
408# Bus options (PCI etc.)
409#
410CONFIG_PCI=y
411# CONFIG_PCI_GOBIOS is not set
412# CONFIG_PCI_GOMMCONFIG is not set
413# CONFIG_PCI_GODIRECT is not set
414# CONFIG_PCI_GOOLPC is not set
415CONFIG_PCI_GOANY=y
416CONFIG_PCI_BIOS=y
417CONFIG_PCI_DIRECT=y
418CONFIG_PCI_MMCONFIG=y
419CONFIG_PCI_DOMAINS=y
420CONFIG_PCIEPORTBUS=y
421# CONFIG_PCIEAER is not set
422# CONFIG_PCIEASPM is not set
423# CONFIG_PCIEASPM_DEBUG is not set
424CONFIG_ARCH_SUPPORTS_MSI=y
425CONFIG_PCI_MSI=y
426# CONFIG_PCI_LEGACY is not set
427# CONFIG_PCI_DEBUG is not set
428# CONFIG_PCI_STUB is not set
429# CONFIG_HT_IRQ is not set
430CONFIG_ISA_DMA_API=y
431CONFIG_ISA=y
432# CONFIG_EISA is not set
433# CONFIG_MCA is not set
434# CONFIG_SCx200 is not set
435# CONFIG_OLPC is not set
436CONFIG_PCCARD=y
437# CONFIG_PCMCIA_DEBUG is not set
438# CONFIG_PCMCIA is not set
439CONFIG_CARDBUS=y
440
441#
442# PC-card bridges
443#
444CONFIG_YENTA=y
445CONFIG_YENTA_O2=y
446CONFIG_YENTA_RICOH=y
447CONFIG_YENTA_TI=y
448CONFIG_YENTA_ENE_TUNE=y
449CONFIG_YENTA_TOSHIBA=y
450CONFIG_PCMCIA_PROBE=y
451CONFIG_PCCARD_NONSTATIC=y
452CONFIG_HOTPLUG_PCI=y
453# CONFIG_HOTPLUG_PCI_PCIE is not set
454# CONFIG_HOTPLUG_PCI_FAKE is not set
455# CONFIG_HOTPLUG_PCI_COMPAQ is not set
456# CONFIG_HOTPLUG_PCI_IBM is not set
457# CONFIG_HOTPLUG_PCI_ACPI is not set
458# CONFIG_HOTPLUG_PCI_CPCI is not set
459# CONFIG_HOTPLUG_PCI_SHPC is not set
460
461#
462# Executable file formats / Emulations
463#
464CONFIG_BINFMT_ELF=y
465# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
466CONFIG_HAVE_AOUT=y
467# CONFIG_BINFMT_AOUT is not set
468CONFIG_BINFMT_MISC=y
469CONFIG_HAVE_ATOMIC_IOMAP=y
470CONFIG_NET=y
471
472#
473# Networking options
474#
475# CONFIG_NET_NS is not set
476CONFIG_PACKET=y
477CONFIG_PACKET_MMAP=y
478CONFIG_UNIX=y
479CONFIG_XFRM=y
480CONFIG_XFRM_USER=y
481CONFIG_XFRM_SUB_POLICY=y
482CONFIG_XFRM_MIGRATE=y
483CONFIG_XFRM_STATISTICS=y
484CONFIG_XFRM_IPCOMP=m
485CONFIG_NET_KEY=m
486CONFIG_NET_KEY_MIGRATE=y
487CONFIG_INET=y
488CONFIG_IP_MULTICAST=y
489# CONFIG_IP_ADVANCED_ROUTER is not set
490CONFIG_IP_FIB_HASH=y
491# CONFIG_IP_PNP is not set
492# CONFIG_NET_IPIP is not set
493# CONFIG_NET_IPGRE is not set
494CONFIG_IP_MROUTE=y
495CONFIG_IP_PIMSM_V1=y
496CONFIG_IP_PIMSM_V2=y
497# CONFIG_ARPD is not set
498CONFIG_SYN_COOKIES=y
499CONFIG_INET_AH=m
500CONFIG_INET_ESP=m
501CONFIG_INET_IPCOMP=m
502CONFIG_INET_XFRM_TUNNEL=m
503CONFIG_INET_TUNNEL=m
504CONFIG_INET_XFRM_MODE_TRANSPORT=m
505CONFIG_INET_XFRM_MODE_TUNNEL=m
506CONFIG_INET_XFRM_MODE_BEET=m
507CONFIG_INET_LRO=y
508CONFIG_INET_DIAG=m
509CONFIG_INET_TCP_DIAG=m
510CONFIG_TCP_CONG_ADVANCED=y
511CONFIG_TCP_CONG_BIC=m
512CONFIG_TCP_CONG_CUBIC=y
513# CONFIG_TCP_CONG_WESTWOOD is not set
514# CONFIG_TCP_CONG_HTCP is not set
515# CONFIG_TCP_CONG_HSTCP is not set
516# CONFIG_TCP_CONG_HYBLA is not set
517# CONFIG_TCP_CONG_VEGAS is not set
518# CONFIG_TCP_CONG_SCALABLE is not set
519# CONFIG_TCP_CONG_LP is not set
520# CONFIG_TCP_CONG_VENO is not set
521# CONFIG_TCP_CONG_YEAH is not set
522# CONFIG_TCP_CONG_ILLINOIS is not set
523# CONFIG_DEFAULT_BIC is not set
524CONFIG_DEFAULT_CUBIC=y
525# CONFIG_DEFAULT_HTCP is not set
526# CONFIG_DEFAULT_VEGAS is not set
527# CONFIG_DEFAULT_WESTWOOD is not set
528# CONFIG_DEFAULT_RENO is not set
529CONFIG_DEFAULT_TCP_CONG="cubic"
530CONFIG_TCP_MD5SIG=y
531CONFIG_IPV6=y
532CONFIG_IPV6_PRIVACY=y
533CONFIG_IPV6_ROUTER_PREF=y
534CONFIG_IPV6_ROUTE_INFO=y
535CONFIG_IPV6_OPTIMISTIC_DAD=y
536CONFIG_INET6_AH=m
537CONFIG_INET6_ESP=m
538CONFIG_INET6_IPCOMP=m
539CONFIG_IPV6_MIP6=m
540CONFIG_INET6_XFRM_TUNNEL=m
541CONFIG_INET6_TUNNEL=m
542CONFIG_INET6_XFRM_MODE_TRANSPORT=m
543CONFIG_INET6_XFRM_MODE_TUNNEL=m
544CONFIG_INET6_XFRM_MODE_BEET=m
545CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
546CONFIG_IPV6_SIT=m
547CONFIG_IPV6_NDISC_NODETYPE=y
548CONFIG_IPV6_TUNNEL=m
549CONFIG_IPV6_MULTIPLE_TABLES=y
550CONFIG_IPV6_SUBTREES=y
551# CONFIG_IPV6_MROUTE is not set
552CONFIG_NETWORK_SECMARK=y
553CONFIG_NETFILTER=y
554# CONFIG_NETFILTER_DEBUG is not set
555CONFIG_NETFILTER_ADVANCED=y
556
557#
558# Core Netfilter Configuration
559#
560CONFIG_NETFILTER_NETLINK=m
561CONFIG_NETFILTER_NETLINK_QUEUE=m
562CONFIG_NETFILTER_NETLINK_LOG=m
563CONFIG_NF_CONNTRACK=y
564CONFIG_NF_CT_ACCT=y
565CONFIG_NF_CONNTRACK_MARK=y
566CONFIG_NF_CONNTRACK_SECMARK=y
567CONFIG_NF_CONNTRACK_EVENTS=y
568# CONFIG_NF_CT_PROTO_DCCP is not set
569CONFIG_NF_CT_PROTO_GRE=m
570CONFIG_NF_CT_PROTO_SCTP=m
571CONFIG_NF_CT_PROTO_UDPLITE=m
572CONFIG_NF_CONNTRACK_AMANDA=m
573CONFIG_NF_CONNTRACK_FTP=m
574CONFIG_NF_CONNTRACK_H323=m
575CONFIG_NF_CONNTRACK_IRC=m
576CONFIG_NF_CONNTRACK_NETBIOS_NS=m
577CONFIG_NF_CONNTRACK_PPTP=m
578CONFIG_NF_CONNTRACK_SANE=m
579CONFIG_NF_CONNTRACK_SIP=m
580CONFIG_NF_CONNTRACK_TFTP=m
581CONFIG_NF_CT_NETLINK=m
582# CONFIG_NETFILTER_TPROXY is not set
583CONFIG_NETFILTER_XTABLES=y
584CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
585CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
586CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
587CONFIG_NETFILTER_XT_TARGET_DSCP=m
588CONFIG_NETFILTER_XT_TARGET_MARK=m
589CONFIG_NETFILTER_XT_TARGET_NFLOG=m
590CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
591CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
592CONFIG_NETFILTER_XT_TARGET_RATEEST=m
593CONFIG_NETFILTER_XT_TARGET_TRACE=m
594CONFIG_NETFILTER_XT_TARGET_SECMARK=m
595CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
596CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
597CONFIG_NETFILTER_XT_MATCH_COMMENT=m
598CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
599CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
600CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
601CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
602# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
603CONFIG_NETFILTER_XT_MATCH_DSCP=m
604CONFIG_NETFILTER_XT_MATCH_ESP=m
605CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
606CONFIG_NETFILTER_XT_MATCH_HELPER=m
607CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
608CONFIG_NETFILTER_XT_MATCH_LENGTH=m
609CONFIG_NETFILTER_XT_MATCH_LIMIT=m
610CONFIG_NETFILTER_XT_MATCH_MAC=m
611CONFIG_NETFILTER_XT_MATCH_MARK=m
612CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
613CONFIG_NETFILTER_XT_MATCH_OWNER=m
614CONFIG_NETFILTER_XT_MATCH_POLICY=m
615CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
616CONFIG_NETFILTER_XT_MATCH_QUOTA=m
617CONFIG_NETFILTER_XT_MATCH_RATEEST=m
618CONFIG_NETFILTER_XT_MATCH_REALM=m
619# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
620CONFIG_NETFILTER_XT_MATCH_SCTP=m
621CONFIG_NETFILTER_XT_MATCH_STATE=y
622CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
623CONFIG_NETFILTER_XT_MATCH_STRING=m
624CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
625CONFIG_NETFILTER_XT_MATCH_TIME=m
626CONFIG_NETFILTER_XT_MATCH_U32=m
627# CONFIG_IP_VS is not set
628
629#
630# IP: Netfilter Configuration
631#
632CONFIG_NF_DEFRAG_IPV4=y
633CONFIG_NF_CONNTRACK_IPV4=y
634# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
635CONFIG_IP_NF_QUEUE=m
636CONFIG_IP_NF_IPTABLES=y
637CONFIG_IP_NF_MATCH_ADDRTYPE=m
638CONFIG_IP_NF_MATCH_AH=m
639CONFIG_IP_NF_MATCH_ECN=m
640CONFIG_IP_NF_MATCH_TTL=m
641CONFIG_IP_NF_FILTER=y
642CONFIG_IP_NF_TARGET_REJECT=y
643CONFIG_IP_NF_TARGET_LOG=m
644CONFIG_IP_NF_TARGET_ULOG=m
645CONFIG_NF_NAT=m
646CONFIG_NF_NAT_NEEDED=y
647CONFIG_IP_NF_TARGET_MASQUERADE=m
648CONFIG_IP_NF_TARGET_NETMAP=m
649CONFIG_IP_NF_TARGET_REDIRECT=m
650CONFIG_NF_NAT_SNMP_BASIC=m
651CONFIG_NF_NAT_PROTO_GRE=m
652CONFIG_NF_NAT_PROTO_UDPLITE=m
653CONFIG_NF_NAT_PROTO_SCTP=m
654CONFIG_NF_NAT_FTP=m
655CONFIG_NF_NAT_IRC=m
656CONFIG_NF_NAT_TFTP=m
657CONFIG_NF_NAT_AMANDA=m
658CONFIG_NF_NAT_PPTP=m
659CONFIG_NF_NAT_H323=m
660CONFIG_NF_NAT_SIP=m
661CONFIG_IP_NF_MANGLE=m
662CONFIG_IP_NF_TARGET_CLUSTERIP=m
663CONFIG_IP_NF_TARGET_ECN=m
664CONFIG_IP_NF_TARGET_TTL=m
665CONFIG_IP_NF_RAW=m
666CONFIG_IP_NF_ARPTABLES=m
667CONFIG_IP_NF_ARPFILTER=m
668CONFIG_IP_NF_ARP_MANGLE=m
669
670#
671# IPv6: Netfilter Configuration
672#
673CONFIG_NF_CONNTRACK_IPV6=y
674CONFIG_IP6_NF_QUEUE=m
675CONFIG_IP6_NF_IPTABLES=y
676CONFIG_IP6_NF_MATCH_AH=m
677CONFIG_IP6_NF_MATCH_EUI64=m
678CONFIG_IP6_NF_MATCH_FRAG=m
679CONFIG_IP6_NF_MATCH_OPTS=m
680CONFIG_IP6_NF_MATCH_HL=m
681CONFIG_IP6_NF_MATCH_IPV6HEADER=m
682CONFIG_IP6_NF_MATCH_MH=m
683CONFIG_IP6_NF_MATCH_RT=m
684CONFIG_IP6_NF_TARGET_LOG=m
685CONFIG_IP6_NF_FILTER=y
686CONFIG_IP6_NF_TARGET_REJECT=y
687CONFIG_IP6_NF_MANGLE=m
688CONFIG_IP6_NF_TARGET_HL=m
689CONFIG_IP6_NF_RAW=m
690# CONFIG_IP_DCCP is not set
691# CONFIG_IP_SCTP is not set
692# CONFIG_TIPC is not set
693# CONFIG_ATM is not set
694# CONFIG_BRIDGE is not set
695# CONFIG_NET_DSA is not set
696# CONFIG_VLAN_8021Q is not set
697# CONFIG_DECNET is not set
698# CONFIG_LLC2 is not set
699# CONFIG_IPX is not set
700# CONFIG_ATALK is not set
701# CONFIG_X25 is not set
702# CONFIG_LAPB is not set
703# CONFIG_ECONET is not set
704# CONFIG_WAN_ROUTER is not set
705# CONFIG_NET_SCHED is not set
706CONFIG_NET_CLS_ROUTE=y
707# CONFIG_DCB is not set
708
709#
710# Network testing
711#
712# CONFIG_NET_PKTGEN is not set
713# CONFIG_HAMRADIO is not set
714# CONFIG_CAN is not set
715# CONFIG_IRDA is not set
716CONFIG_BT=y
717CONFIG_BT_L2CAP=y
718CONFIG_BT_SCO=y
719CONFIG_BT_RFCOMM=y
720CONFIG_BT_RFCOMM_TTY=y
721CONFIG_BT_BNEP=y
722CONFIG_BT_BNEP_MC_FILTER=y
723CONFIG_BT_BNEP_PROTO_FILTER=y
724CONFIG_BT_HIDP=y
725
726#
727# Bluetooth device drivers
728#
729CONFIG_BT_HCIBTUSB=y
730CONFIG_BT_HCIBTSDIO=m
731CONFIG_BT_HCIUART=m
732CONFIG_BT_HCIUART_H4=y
733CONFIG_BT_HCIUART_BCSP=y
734CONFIG_BT_HCIUART_LL=y
735CONFIG_BT_HCIBCM203X=m
736CONFIG_BT_HCIBPA10X=m
737CONFIG_BT_HCIBFUSB=m
738CONFIG_BT_HCIVHCI=m
739# CONFIG_AF_RXRPC is not set
740# CONFIG_PHONET is not set
741CONFIG_FIB_RULES=y
742CONFIG_WIRELESS=y
743CONFIG_CFG80211=y
744# CONFIG_CFG80211_REG_DEBUG is not set
745CONFIG_NL80211=y
746CONFIG_WIRELESS_OLD_REGULATORY=y
747CONFIG_WIRELESS_EXT=y
748CONFIG_WIRELESS_EXT_SYSFS=y
749CONFIG_LIB80211=y
750CONFIG_LIB80211_CRYPT_WEP=m
751CONFIG_LIB80211_CRYPT_CCMP=m
752CONFIG_LIB80211_CRYPT_TKIP=m
753CONFIG_MAC80211=y
754
755#
756# Rate control algorithm selection
757#
758CONFIG_MAC80211_RC_PID=y
759# CONFIG_MAC80211_RC_MINSTREL is not set
760CONFIG_MAC80211_RC_DEFAULT_PID=y
761# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
762CONFIG_MAC80211_RC_DEFAULT="pid"
763CONFIG_MAC80211_MESH=y
764CONFIG_MAC80211_LEDS=y
765CONFIG_MAC80211_DEBUGFS=y
766# CONFIG_MAC80211_DEBUG_MENU is not set
767CONFIG_IEEE80211=m
768# CONFIG_IEEE80211_DEBUG is not set
769CONFIG_IEEE80211_CRYPT_WEP=m
770CONFIG_IEEE80211_CRYPT_CCMP=m
771CONFIG_IEEE80211_CRYPT_TKIP=m
772CONFIG_WIMAX=m
773CONFIG_WIMAX_DEBUG_LEVEL=8
774CONFIG_RFKILL=y
775CONFIG_RFKILL_INPUT=y
776CONFIG_RFKILL_LEDS=y
777# CONFIG_NET_9P is not set
778
779#
780# Device Drivers
781#
782
783#
784# Generic Driver Options
785#
786CONFIG_UEVENT_HELPER_PATH=""
787CONFIG_STANDALONE=y
788CONFIG_PREVENT_FIRMWARE_BUILD=y
789CONFIG_FW_LOADER=y
790CONFIG_FIRMWARE_IN_KERNEL=y
791CONFIG_EXTRA_FIRMWARE=""
792# CONFIG_DEBUG_DRIVER is not set
793CONFIG_DEBUG_DEVRES=y
794# CONFIG_SYS_HYPERVISOR is not set
795# CONFIG_CONNECTOR is not set
796# CONFIG_MTD is not set
797# CONFIG_PARPORT is not set
798CONFIG_PNP=y
799# CONFIG_PNP_DEBUG_MESSAGES is not set
800
801#
802# Protocols
803#
804# CONFIG_ISAPNP is not set
805# CONFIG_PNPBIOS is not set
806CONFIG_PNPACPI=y
807CONFIG_BLK_DEV=y
808# CONFIG_BLK_DEV_FD is not set
809# CONFIG_BLK_DEV_XD is not set
810# CONFIG_BLK_CPQ_DA is not set
811# CONFIG_BLK_CPQ_CISS_DA is not set
812# CONFIG_BLK_DEV_DAC960 is not set
813# CONFIG_BLK_DEV_UMEM is not set
814# CONFIG_BLK_DEV_COW_COMMON is not set
815CONFIG_BLK_DEV_LOOP=y
816CONFIG_BLK_DEV_CRYPTOLOOP=m
817# CONFIG_BLK_DEV_NBD is not set
818# CONFIG_BLK_DEV_SX8 is not set
819# CONFIG_BLK_DEV_UB is not set
820CONFIG_BLK_DEV_RAM=m
821CONFIG_BLK_DEV_RAM_COUNT=16
822CONFIG_BLK_DEV_RAM_SIZE=16384
823# CONFIG_BLK_DEV_XIP is not set
824CONFIG_CDROM_PKTCDVD=m
825CONFIG_CDROM_PKTCDVD_BUFFERS=8
826# CONFIG_CDROM_PKTCDVD_WCACHE is not set
827# CONFIG_ATA_OVER_ETH is not set
828# CONFIG_BLK_DEV_HD is not set
829CONFIG_MISC_DEVICES=y
830# CONFIG_IBM_ASM is not set
831# CONFIG_PHANTOM is not set
832CONFIG_EEPROM_93CX6=m
833# CONFIG_SGI_IOC4 is not set
834CONFIG_TIFM_CORE=m
835# CONFIG_TIFM_7XX1 is not set
836CONFIG_ACER_WMI=y
837CONFIG_ASUS_LAPTOP=m
838CONFIG_FUJITSU_LAPTOP=m
839# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
840CONFIG_TC1100_WMI=m
841CONFIG_HP_WMI=m
842# CONFIG_ICS932S401 is not set
843CONFIG_MSI_LAPTOP=m
844CONFIG_PANASONIC_LAPTOP=m
845CONFIG_COMPAL_LAPTOP=m
846CONFIG_SONY_LAPTOP=m
847# CONFIG_SONYPI_COMPAT is not set
848CONFIG_THINKPAD_ACPI=m
849# CONFIG_THINKPAD_ACPI_DEBUG is not set
850CONFIG_THINKPAD_ACPI_BAY=y
851CONFIG_THINKPAD_ACPI_VIDEO=y
852CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
853# CONFIG_INTEL_MENLOW is not set
854CONFIG_EEEPC_LAPTOP=m
855# CONFIG_ENCLOSURE_SERVICES is not set
856# CONFIG_HP_ILO is not set
857# CONFIG_C2PORT is not set
858CONFIG_HAVE_IDE=y
859# CONFIG_IDE is not set
860
861#
862# SCSI device support
863#
864CONFIG_RAID_ATTRS=m
865CONFIG_SCSI=y
866CONFIG_SCSI_DMA=y
867# CONFIG_SCSI_TGT is not set
868# CONFIG_SCSI_NETLINK is not set
869CONFIG_SCSI_PROC_FS=y
870
871#
872# SCSI support type (disk, tape, CD-ROM)
873#
874CONFIG_BLK_DEV_SD=y
875# CONFIG_CHR_DEV_ST is not set
876# CONFIG_CHR_DEV_OSST is not set
877CONFIG_BLK_DEV_SR=y
878CONFIG_BLK_DEV_SR_VENDOR=y
879CONFIG_CHR_DEV_SG=y
880# CONFIG_CHR_DEV_SCH is not set
881
882#
883# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
884#
885CONFIG_SCSI_MULTI_LUN=y
886CONFIG_SCSI_CONSTANTS=y
887CONFIG_SCSI_LOGGING=y
888CONFIG_SCSI_SCAN_ASYNC=y
889CONFIG_SCSI_WAIT_SCAN=m
890
891#
892# SCSI Transports
893#
894# CONFIG_SCSI_SPI_ATTRS is not set
895# CONFIG_SCSI_FC_ATTRS is not set
896# CONFIG_SCSI_ISCSI_ATTRS is not set
897# CONFIG_SCSI_SAS_ATTRS is not set
898# CONFIG_SCSI_SAS_LIBSAS is not set
899# CONFIG_SCSI_SRP_ATTRS is not set
900CONFIG_SCSI_LOWLEVEL=y
901# CONFIG_ISCSI_TCP is not set
902# CONFIG_SCSI_CXGB3_ISCSI is not set
903# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
904# CONFIG_SCSI_3W_9XXX is not set
905# CONFIG_SCSI_7000FASST is not set
906# CONFIG_SCSI_ACARD is not set
907# CONFIG_SCSI_AHA152X is not set
908# CONFIG_SCSI_AHA1542 is not set
909# CONFIG_SCSI_AACRAID is not set
910# CONFIG_SCSI_AIC7XXX is not set
911# CONFIG_SCSI_AIC7XXX_OLD is not set
912# CONFIG_SCSI_AIC79XX is not set
913# CONFIG_SCSI_AIC94XX is not set
914# CONFIG_SCSI_DPT_I2O is not set
915# CONFIG_SCSI_ADVANSYS is not set
916# CONFIG_SCSI_IN2000 is not set
917# CONFIG_SCSI_ARCMSR is not set
918# CONFIG_MEGARAID_NEWGEN is not set
919# CONFIG_MEGARAID_LEGACY is not set
920# CONFIG_MEGARAID_SAS is not set
921# CONFIG_SCSI_HPTIOP is not set
922# CONFIG_SCSI_BUSLOGIC is not set
923# CONFIG_LIBFC is not set
924# CONFIG_FCOE is not set
925# CONFIG_FCOE_FNIC is not set
926# CONFIG_SCSI_DMX3191D is not set
927# CONFIG_SCSI_DTC3280 is not set
928# CONFIG_SCSI_EATA is not set
929# CONFIG_SCSI_FUTURE_DOMAIN is not set
930# CONFIG_SCSI_GDTH is not set
931# CONFIG_SCSI_GENERIC_NCR5380 is not set
932# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
933# CONFIG_SCSI_IPS is not set
934# CONFIG_SCSI_INITIO is not set
935# CONFIG_SCSI_INIA100 is not set
936# CONFIG_SCSI_MVSAS is not set
937# CONFIG_SCSI_NCR53C406A is not set
938# CONFIG_SCSI_STEX is not set
939# CONFIG_SCSI_SYM53C8XX_2 is not set
940# CONFIG_SCSI_IPR is not set
941# CONFIG_SCSI_PAS16 is not set
942# CONFIG_SCSI_QLOGIC_FAS is not set
943# CONFIG_SCSI_QLOGIC_1280 is not set
944# CONFIG_SCSI_QLA_FC is not set
945# CONFIG_SCSI_QLA_ISCSI is not set
946# CONFIG_SCSI_LPFC is not set
947# CONFIG_SCSI_SYM53C416 is not set
948# CONFIG_SCSI_DC395x is not set
949# CONFIG_SCSI_DC390T is not set
950# CONFIG_SCSI_T128 is not set
951# CONFIG_SCSI_U14_34F is not set
952# CONFIG_SCSI_ULTRASTOR is not set
953# CONFIG_SCSI_NSP32 is not set
954# CONFIG_SCSI_DEBUG is not set
955# CONFIG_SCSI_SRP is not set
956# CONFIG_SCSI_DH is not set
957CONFIG_ATA=y
958# CONFIG_ATA_NONSTANDARD is not set
959CONFIG_ATA_ACPI=y
960# CONFIG_SATA_PMP is not set
961CONFIG_SATA_AHCI=y
962CONFIG_SATA_SIL24=y
963CONFIG_ATA_SFF=y
964# CONFIG_SATA_SVW is not set
965CONFIG_ATA_PIIX=y
966# CONFIG_SATA_MV is not set
967# CONFIG_SATA_NV is not set
968# CONFIG_PDC_ADMA is not set
969# CONFIG_SATA_QSTOR is not set
970# CONFIG_SATA_PROMISE is not set
971# CONFIG_SATA_SX4 is not set
972CONFIG_SATA_SIL=y
973# CONFIG_SATA_SIS is not set
974# CONFIG_SATA_ULI is not set
975# CONFIG_SATA_VIA is not set
976# CONFIG_SATA_VITESSE is not set
977# CONFIG_SATA_INIC162X is not set
978# CONFIG_PATA_ACPI is not set
979# CONFIG_PATA_ALI is not set
980# CONFIG_PATA_AMD is not set
981# CONFIG_PATA_ARTOP is not set
982# CONFIG_PATA_ATIIXP is not set
983# CONFIG_PATA_CMD640_PCI is not set
984# CONFIG_PATA_CMD64X is not set
985# CONFIG_PATA_CS5520 is not set
986# CONFIG_PATA_CS5530 is not set
987# CONFIG_PATA_CS5535 is not set
988# CONFIG_PATA_CS5536 is not set
989# CONFIG_PATA_CYPRESS is not set
990# CONFIG_PATA_EFAR is not set
991CONFIG_ATA_GENERIC=y
992# CONFIG_PATA_HPT366 is not set
993# CONFIG_PATA_HPT37X is not set
994# CONFIG_PATA_HPT3X2N is not set
995# CONFIG_PATA_HPT3X3 is not set
996# CONFIG_PATA_ISAPNP is not set
997# CONFIG_PATA_IT821X is not set
998# CONFIG_PATA_IT8213 is not set
999# CONFIG_PATA_JMICRON is not set
1000# CONFIG_PATA_LEGACY is not set
1001# CONFIG_PATA_TRIFLEX is not set
1002# CONFIG_PATA_MARVELL is not set
1003CONFIG_PATA_MPIIX=y
1004# CONFIG_PATA_OLDPIIX is not set
1005# CONFIG_PATA_NETCELL is not set
1006# CONFIG_PATA_NINJA32 is not set
1007# CONFIG_PATA_NS87410 is not set
1008# CONFIG_PATA_NS87415 is not set
1009# CONFIG_PATA_OPTI is not set
1010# CONFIG_PATA_OPTIDMA is not set
1011# CONFIG_PATA_PDC_OLD is not set
1012# CONFIG_PATA_QDI is not set
1013# CONFIG_PATA_RADISYS is not set
1014# CONFIG_PATA_RZ1000 is not set
1015# CONFIG_PATA_SC1200 is not set
1016# CONFIG_PATA_SERVERWORKS is not set
1017# CONFIG_PATA_PDC2027X is not set
1018# CONFIG_PATA_SIL680 is not set
1019# CONFIG_PATA_SIS is not set
1020# CONFIG_PATA_VIA is not set
1021# CONFIG_PATA_WINBOND is not set
1022# CONFIG_PATA_WINBOND_VLB is not set
1023CONFIG_PATA_SCH=y
1024CONFIG_MD=y
1025# CONFIG_BLK_DEV_MD is not set
1026CONFIG_BLK_DEV_DM=m
1027CONFIG_DM_DEBUG=y
1028# CONFIG_DM_CRYPT is not set
1029CONFIG_DM_SNAPSHOT=m
1030CONFIG_DM_MIRROR=m
1031CONFIG_DM_ZERO=m
1032CONFIG_DM_MULTIPATH=m
1033CONFIG_DM_DELAY=m
1034# CONFIG_DM_UEVENT is not set
1035CONFIG_FUSION=y
1036CONFIG_FUSION_SPI=y
1037CONFIG_FUSION_FC=m
1038CONFIG_FUSION_SAS=m
1039CONFIG_FUSION_MAX_SGE=40
1040CONFIG_FUSION_CTL=m
1041CONFIG_FUSION_LAN=m
1042CONFIG_FUSION_LOGGING=y
1043
1044#
1045# IEEE 1394 (FireWire) support
1046#
1047
1048#
1049# Enable only one of the two stacks, unless you know what you are doing
1050#
1051# CONFIG_FIREWIRE is not set
1052# CONFIG_IEEE1394 is not set
1053# CONFIG_I2O is not set
1054# CONFIG_MACINTOSH_DRIVERS is not set
1055CONFIG_NETDEVICES=y
1056# CONFIG_DUMMY is not set
1057# CONFIG_BONDING is not set
1058CONFIG_MACVLAN=m
1059# CONFIG_EQUALIZER is not set
1060CONFIG_TUN=y
1061# CONFIG_VETH is not set
1062# CONFIG_NET_SB1000 is not set
1063# CONFIG_ARCNET is not set
1064CONFIG_PHYLIB=m
1065
1066#
1067# MII PHY device drivers
1068#
1069CONFIG_MARVELL_PHY=m
1070CONFIG_DAVICOM_PHY=m
1071CONFIG_QSEMI_PHY=m
1072CONFIG_LXT_PHY=m
1073CONFIG_CICADA_PHY=m
1074CONFIG_VITESSE_PHY=m
1075CONFIG_SMSC_PHY=m
1076CONFIG_BROADCOM_PHY=m
1077CONFIG_ICPLUS_PHY=m
1078CONFIG_REALTEK_PHY=m
1079CONFIG_MDIO_BITBANG=m
1080CONFIG_NET_ETHERNET=y
1081CONFIG_MII=y
1082# CONFIG_NATIONAL_PHY is not set
1083# CONFIG_STE10XP is not set
1084# CONFIG_LSI_ET1011C_PHY is not set
1085# CONFIG_HAPPYMEAL is not set
1086# CONFIG_SUNGEM is not set
1087# CONFIG_CASSINI is not set
1088# CONFIG_NET_VENDOR_3COM is not set
1089# CONFIG_LANCE is not set
1090# CONFIG_NET_VENDOR_SMC is not set
1091# CONFIG_NET_VENDOR_RACAL is not set
1092# CONFIG_NET_TULIP is not set
1093# CONFIG_AT1700 is not set
1094# CONFIG_DEPCA is not set
1095# CONFIG_HP100 is not set
1096# CONFIG_NET_ISA is not set
1097# CONFIG_IBM_NEW_EMAC_ZMII is not set
1098# CONFIG_IBM_NEW_EMAC_RGMII is not set
1099# CONFIG_IBM_NEW_EMAC_TAH is not set
1100# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1101# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1102# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1103# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1104CONFIG_NET_PCI=y
1105CONFIG_PCNET32=m
1106# CONFIG_AMD8111_ETH is not set
1107# CONFIG_ADAPTEC_STARFIRE is not set
1108# CONFIG_AC3200 is not set
1109# CONFIG_APRICOT is not set
1110# CONFIG_B44 is not set
1111# CONFIG_FORCEDETH is not set
1112# CONFIG_CS89x0 is not set
1113# CONFIG_EEPRO100 is not set
1114CONFIG_E100=y
1115# CONFIG_FEALNX is not set
1116# CONFIG_NATSEMI is not set
1117# CONFIG_NE2K_PCI is not set
1118CONFIG_8139CP=m
1119CONFIG_8139TOO=m
1120CONFIG_8139TOO_PIO=y
1121# CONFIG_8139TOO_TUNE_TWISTER is not set
1122# CONFIG_8139TOO_8129 is not set
1123# CONFIG_8139_OLD_RX_RESET is not set
1124# CONFIG_R6040 is not set
1125CONFIG_SIS900=m
1126# CONFIG_EPIC100 is not set
1127# CONFIG_SMSC9420 is not set
1128# CONFIG_SUNDANCE is not set
1129# CONFIG_TLAN is not set
1130# CONFIG_VIA_RHINE is not set
1131# CONFIG_SC92031 is not set
1132CONFIG_ATL2=m
1133CONFIG_NETDEV_1000=y
1134# CONFIG_ACENIC is not set
1135# CONFIG_DL2K is not set
1136CONFIG_E1000=y
1137CONFIG_E1000E=y
1138# CONFIG_IP1000 is not set
1139CONFIG_IGB=y
1140# CONFIG_IGB_LRO is not set
1141# CONFIG_NS83820 is not set
1142# CONFIG_HAMACHI is not set
1143# CONFIG_YELLOWFIN is not set
1144CONFIG_R8169=y
1145CONFIG_SIS190=m
1146# CONFIG_SKGE is not set
1147CONFIG_SKY2=m
1148# CONFIG_SKY2_DEBUG is not set
1149# CONFIG_VIA_VELOCITY is not set
1150CONFIG_TIGON3=m
1151CONFIG_BNX2=m
1152# CONFIG_QLA3XXX is not set
1153CONFIG_ATL1=m
1154CONFIG_ATL1E=y
1155# CONFIG_JME is not set
1156CONFIG_NETDEV_10000=y
1157# CONFIG_CHELSIO_T1 is not set
1158# CONFIG_CHELSIO_T3 is not set
1159# CONFIG_ENIC is not set
1160CONFIG_IXGBE=m
1161CONFIG_IXGB=m
1162# CONFIG_S2IO is not set
1163# CONFIG_MYRI10GE is not set
1164# CONFIG_NETXEN_NIC is not set
1165# CONFIG_NIU is not set
1166# CONFIG_MLX4_EN is not set
1167# CONFIG_MLX4_CORE is not set
1168# CONFIG_TEHUTI is not set
1169CONFIG_BNX2X=m
1170# CONFIG_QLGE is not set
1171# CONFIG_SFC is not set
1172# CONFIG_TR is not set
1173
1174#
1175# Wireless LAN
1176#
1177CONFIG_WLAN_PRE80211=y
1178# CONFIG_STRIP is not set
1179# CONFIG_ARLAN is not set
1180# CONFIG_WAVELAN is not set
1181CONFIG_WLAN_80211=y
1182CONFIG_IPW2100=m
1183# CONFIG_IPW2100_MONITOR is not set
1184# CONFIG_IPW2100_DEBUG is not set
1185CONFIG_IPW2200=m
1186# CONFIG_IPW2200_MONITOR is not set
1187CONFIG_IPW2200_QOS=y
1188# CONFIG_IPW2200_DEBUG is not set
1189# CONFIG_LIBIPW_DEBUG is not set
1190CONFIG_LIBERTAS=y
1191# CONFIG_LIBERTAS_THINFIRM is not set
1192CONFIG_LIBERTAS_USB=m
1193CONFIG_LIBERTAS_SDIO=y
1194CONFIG_LIBERTAS_SPI=y
1195# CONFIG_LIBERTAS_DEBUG is not set
1196# CONFIG_AIRO is not set
1197# CONFIG_HERMES is not set
1198# CONFIG_ATMEL is not set
1199# CONFIG_PRISM54 is not set
1200CONFIG_USB_ZD1201=m
1201CONFIG_USB_NET_RNDIS_WLAN=m
1202CONFIG_RTL8180=m
1203CONFIG_RTL8187=m
1204# CONFIG_ADM8211 is not set
1205# CONFIG_MAC80211_HWSIM is not set
1206CONFIG_P54_COMMON=m
1207CONFIG_P54_USB=m
1208CONFIG_P54_PCI=m
1209CONFIG_ATH5K=y
1210CONFIG_ATH9K=m
1211# CONFIG_ATH9K_DEBUG is not set
1212CONFIG_IWLWIFI=m
1213CONFIG_IWLCORE=m
1214# CONFIG_IWLWIFI_LEDS is not set
1215CONFIG_IWLWIFI_RFKILL=y
1216# CONFIG_IWLWIFI_DEBUG is not set
1217CONFIG_IWLAGN=m
1218# CONFIG_IWLAGN_SPECTRUM_MEASUREMENT is not set
1219# CONFIG_IWLAGN_LEDS is not set
1220CONFIG_IWL4965=y
1221CONFIG_IWL5000=y
1222CONFIG_IWL3945=m
1223CONFIG_IWL3945_RFKILL=y
1224# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
1225# CONFIG_IWL3945_LEDS is not set
1226# CONFIG_IWL3945_DEBUG is not set
1227# CONFIG_HOSTAP is not set
1228# CONFIG_B43 is not set
1229CONFIG_B43_PCI_AUTOSELECT=y
1230CONFIG_B43_PCICORE_AUTOSELECT=y
1231CONFIG_B43_LEDS=y
1232CONFIG_B43_RFKILL=y
1233# CONFIG_B43_DEBUG is not set
1234# CONFIG_B43LEGACY is not set
1235# CONFIG_ZD1211RW is not set
1236CONFIG_RT2X00=m
1237CONFIG_RT2400PCI=m
1238CONFIG_RT2500PCI=m
1239CONFIG_RT61PCI=m
1240CONFIG_RT2500USB=m
1241CONFIG_RT73USB=m
1242CONFIG_RT2X00_LIB_PCI=m
1243CONFIG_RT2X00_LIB_USB=m
1244CONFIG_RT2X00_LIB=m
1245CONFIG_RT2X00_LIB_FIRMWARE=y
1246CONFIG_RT2X00_LIB_CRYPTO=y
1247CONFIG_RT2X00_LIB_RFKILL=y
1248CONFIG_RT2X00_LIB_LEDS=y
1249# CONFIG_RT2X00_LIB_DEBUGFS is not set
1250# CONFIG_RT2X00_DEBUG is not set
1251
1252#
1253# WiMAX Wireless Broadband devices
1254#
1255CONFIG_WIMAX_I2400M_USB=m
1256CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
1257# CONFIG_WIMAX_I2400M_SDIO is not set
1258#
1259#
1260
1261#
1262# USB Network Adapters
1263#
1264CONFIG_USB_CATC=m
1265CONFIG_USB_KAWETH=m
1266CONFIG_USB_PEGASUS=m
1267CONFIG_USB_RTL8150=m
1268CONFIG_USB_USBNET=m
1269CONFIG_USB_NET_AX8817X=m
1270CONFIG_USB_NET_CDCETHER=m
1271CONFIG_USB_NET_CDC_EEM=m
1272CONFIG_USB_NET_DM9601=m
1273CONFIG_USB_NET_SMSC95XX=m
1274CONFIG_USB_NET_GL620A=m
1275CONFIG_USB_NET_NET1080=m
1276CONFIG_USB_NET_PLUSB=m
1277CONFIG_USB_NET_MCS7830=m
1278CONFIG_USB_NET_RNDIS_HOST=m
1279CONFIG_USB_NET_CDC_SUBSET=m
1280CONFIG_USB_ALI_M5632=y
1281CONFIG_USB_AN2720=y
1282CONFIG_USB_BELKIN=y
1283CONFIG_USB_ARMLINUX=y
1284CONFIG_USB_EPSON2888=y
1285CONFIG_USB_KC2190=y
1286CONFIG_USB_NET_ZAURUS=m
1287CONFIG_USB_HSO=n
1288# CONFIG_WAN is not set
1289# CONFIG_FDDI is not set
1290# CONFIG_HIPPI is not set
1291CONFIG_PPP=m
1292CONFIG_PPP_MULTILINK=y
1293CONFIG_PPP_FILTER=y
1294CONFIG_PPP_ASYNC=m
1295CONFIG_PPP_SYNC_TTY=m
1296CONFIG_PPP_DEFLATE=m
1297CONFIG_PPP_BSDCOMP=m
1298CONFIG_PPP_MPPE=m
1299CONFIG_PPPOE=m
1300CONFIG_PPPOL2TP=m
1301# CONFIG_SLIP is not set
1302CONFIG_SLHC=m
1303# CONFIG_NET_FC is not set
1304# CONFIG_NETCONSOLE is not set
1305# CONFIG_NETPOLL is not set
1306# CONFIG_NET_POLL_CONTROLLER is not set
1307# CONFIG_ISDN is not set
1308# CONFIG_PHONE is not set
1309
1310#
1311# Input device support
1312#
1313CONFIG_INPUT=y
1314CONFIG_INPUT_FF_MEMLESS=y
1315CONFIG_INPUT_POLLDEV=m
1316
1317#
1318# Userland interfaces
1319#
1320CONFIG_INPUT_MOUSEDEV=y
1321# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
1322CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1323CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1324CONFIG_INPUT_JOYDEV=m
1325CONFIG_INPUT_EVDEV=y
1326# CONFIG_INPUT_EVBUG is not set
1327
1328#
1329# Input Device Drivers
1330#
1331CONFIG_INPUT_KEYBOARD=y
1332CONFIG_KEYBOARD_ATKBD=y
1333# CONFIG_KEYBOARD_SUNKBD is not set
1334# CONFIG_KEYBOARD_LKKBD is not set
1335# CONFIG_KEYBOARD_XTKBD is not set
1336# CONFIG_KEYBOARD_NEWTON is not set
1337# CONFIG_KEYBOARD_STOWAWAY is not set
1338CONFIG_INPUT_MOUSE=y
1339CONFIG_MOUSE_PS2=y
1340CONFIG_MOUSE_PS2_ALPS=y
1341CONFIG_MOUSE_PS2_LOGIPS2PP=y
1342CONFIG_MOUSE_PS2_SYNAPTICS=y
1343CONFIG_MOUSE_PS2_LIFEBOOK=y
1344CONFIG_MOUSE_PS2_TRACKPOINT=y
1345# CONFIG_MOUSE_PS2_ELANTECH is not set
1346CONFIG_MOUSE_PS2_TOUCHKIT=y
1347CONFIG_MOUSE_SERIAL=m
1348# CONFIG_MOUSE_APPLETOUCH is not set
1349# CONFIG_MOUSE_BCM5974 is not set
1350# CONFIG_MOUSE_INPORT is not set
1351# CONFIG_MOUSE_LOGIBM is not set
1352# CONFIG_MOUSE_PC110PAD is not set
1353CONFIG_MOUSE_VSXXXAA=m
1354CONFIG_INPUT_JOYSTICK=y
1355# CONFIG_JOYSTICK_ANALOG is not set
1356# CONFIG_JOYSTICK_A3D is not set
1357# CONFIG_JOYSTICK_ADI is not set
1358# CONFIG_JOYSTICK_COBRA is not set
1359# CONFIG_JOYSTICK_GF2K is not set
1360# CONFIG_JOYSTICK_GRIP is not set
1361# CONFIG_JOYSTICK_GRIP_MP is not set
1362# CONFIG_JOYSTICK_GUILLEMOT is not set
1363# CONFIG_JOYSTICK_INTERACT is not set
1364# CONFIG_JOYSTICK_SIDEWINDER is not set
1365# CONFIG_JOYSTICK_TMDC is not set
1366# CONFIG_JOYSTICK_IFORCE is not set
1367# CONFIG_JOYSTICK_WARRIOR is not set
1368# CONFIG_JOYSTICK_MAGELLAN is not set
1369# CONFIG_JOYSTICK_SPACEORB is not set
1370# CONFIG_JOYSTICK_SPACEBALL is not set
1371# CONFIG_JOYSTICK_STINGER is not set
1372# CONFIG_JOYSTICK_TWIDJOY is not set
1373# CONFIG_JOYSTICK_ZHENHUA is not set
1374# CONFIG_JOYSTICK_JOYDUMP is not set
1375# CONFIG_JOYSTICK_XPAD is not set
1376# CONFIG_INPUT_TABLET is not set
1377CONFIG_INPUT_TOUCHSCREEN=y
1378CONFIG_TOUCHSCREEN_FUJITSU=m
1379CONFIG_TOUCHSCREEN_GUNZE=m
1380CONFIG_TOUCHSCREEN_ELO=m
1381# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1382CONFIG_TOUCHSCREEN_MTOUCH=m
1383CONFIG_TOUCHSCREEN_INEXIO=m
1384CONFIG_TOUCHSCREEN_MK712=m
1385CONFIG_TOUCHSCREEN_HTCPEN=m
1386CONFIG_TOUCHSCREEN_PENMOUNT=m
1387CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
1388CONFIG_TOUCHSCREEN_TOUCHWIN=m
1389CONFIG_TOUCHSCREEN_WM97XX=m
1390CONFIG_TOUCHSCREEN_WM9705=y
1391CONFIG_TOUCHSCREEN_WM9712=y
1392CONFIG_TOUCHSCREEN_WM9713=y
1393CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
1394CONFIG_TOUCHSCREEN_USB_EGALAX=y
1395CONFIG_TOUCHSCREEN_USB_PANJIT=y
1396CONFIG_TOUCHSCREEN_USB_3M=y
1397CONFIG_TOUCHSCREEN_USB_ITM=y
1398CONFIG_TOUCHSCREEN_USB_ETURBO=y
1399CONFIG_TOUCHSCREEN_USB_GUNZE=y
1400CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
1401CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
1402CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
1403CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
1404CONFIG_TOUCHSCREEN_USB_GOTOP=y
1405CONFIG_TOUCHSCREEN_TOUCHIT213=m
1406# CONFIG_TOUCHSCREEN_TSC2007 is not set
1407CONFIG_INPUT_MISC=y
1408# CONFIG_INPUT_PCSPKR is not set
1409# CONFIG_INPUT_APANEL is not set
1410CONFIG_INPUT_WISTRON_BTNS=m
1411# CONFIG_INPUT_ATLAS_BTNS is not set
1412# CONFIG_INPUT_ATI_REMOTE is not set
1413# CONFIG_INPUT_ATI_REMOTE2 is not set
1414CONFIG_INPUT_KEYSPAN_REMOTE=m
1415CONFIG_INPUT_POWERMATE=m
1416CONFIG_INPUT_YEALINK=m
1417# CONFIG_INPUT_CM109 is not set
1418# CONFIG_INPUT_UINPUT is not set
1419
1420#
1421# Hardware I/O ports
1422#
1423CONFIG_SERIO=y
1424CONFIG_SERIO_I8042=y
1425CONFIG_SERIO_SERPORT=y
1426# CONFIG_SERIO_CT82C710 is not set
1427# CONFIG_SERIO_PCIPS2 is not set
1428CONFIG_SERIO_LIBPS2=y
1429CONFIG_SERIO_RAW=m
1430# CONFIG_GAMEPORT is not set
1431
1432#
1433# Character devices
1434#
1435CONFIG_VT=y
1436CONFIG_CONSOLE_TRANSLATIONS=y
1437CONFIG_VT_CONSOLE=y
1438CONFIG_HW_CONSOLE=y
1439CONFIG_VT_HW_CONSOLE_BINDING=y
1440# CONFIG_DEVKMEM is not set
1441# CONFIG_SERIAL_NONSTANDARD is not set
1442# CONFIG_NOZOMI is not set
1443
1444#
1445# Serial drivers
1446#
1447CONFIG_SERIAL_8250=y
1448CONFIG_SERIAL_8250_CONSOLE=y
1449CONFIG_FIX_EARLYCON_MEM=y
1450CONFIG_SERIAL_8250_PCI=y
1451CONFIG_SERIAL_8250_PNP=y
1452CONFIG_SERIAL_8250_NR_UARTS=4
1453CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1454# CONFIG_SERIAL_8250_EXTENDED is not set
1455
1456#
1457# Non-8250 serial port support
1458#
1459# CONFIG_SERIAL_MAX3110 is not set
1460# CONFIG_MRST_MAX3110 is not set
1461# CONFIG_MRST_MAX3110_IRQ is not set
1462CONFIG_SERIAL_CORE=y
1463CONFIG_SERIAL_CORE_CONSOLE=y
1464# CONFIG_SERIAL_JSM is not set
1465CONFIG_UNIX98_PTYS=y
1466# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1467# CONFIG_LEGACY_PTYS is not set
1468# CONFIG_IPMI_HANDLER is not set
1469CONFIG_HW_RANDOM=m
1470# CONFIG_HW_RANDOM_INTEL is not set
1471# CONFIG_HW_RANDOM_AMD is not set
1472# CONFIG_HW_RANDOM_GEODE is not set
1473# CONFIG_HW_RANDOM_VIA is not set
1474CONFIG_NVRAM=m
1475# CONFIG_DTLK is not set
1476# CONFIG_R3964 is not set
1477# CONFIG_APPLICOM is not set
1478# CONFIG_SONYPI is not set
1479# CONFIG_MWAVE is not set
1480# CONFIG_PC8736x_GPIO is not set
1481# CONFIG_NSC_GPIO is not set
1482# CONFIG_CS5535_GPIO is not set
1483# CONFIG_RAW_DRIVER is not set
1484CONFIG_HPET=y
1485# CONFIG_HPET_MMAP is not set
1486# CONFIG_HANGCHECK_TIMER is not set
1487# CONFIG_TCG_TPM is not set
1488# CONFIG_TELCLOCK is not set
1489CONFIG_DEVPORT=y
1490CONFIG_I2C=y
1491CONFIG_I2C_BOARDINFO=y
1492# CONFIG_I2C_CHARDEV is not set
1493# CONFIG_I2C_HELPER_AUTO is not set
1494
1495#
1496# I2C Algorithms
1497#
1498# CONFIG_I2C_ALGOBIT is not set
1499# CONFIG_I2C_ALGOPCF is not set
1500# CONFIG_I2C_ALGOPCA is not set
1501
1502#
1503# I2C Hardware Bus support
1504#
1505
1506#
1507# PC SMBus host controller drivers
1508#
1509# CONFIG_I2C_ALI1535 is not set
1510# CONFIG_I2C_ALI1563 is not set
1511# CONFIG_I2C_ALI15X3 is not set
1512# CONFIG_I2C_AMD756 is not set
1513# CONFIG_I2C_AMD8111 is not set
1514# CONFIG_I2C_I801 is not set
1515# CONFIG_I2C_ISCH is not set
1516# CONFIG_I2C_PIIX4 is not set
1517# CONFIG_I2C_NFORCE2 is not set
1518# CONFIG_I2C_SIS5595 is not set
1519# CONFIG_I2C_SIS630 is not set
1520# CONFIG_I2C_SIS96X is not set
1521# CONFIG_I2C_VIA is not set
1522# CONFIG_I2C_VIAPRO is not set
1523
1524#
1525# I2C system bus drivers (mostly embedded / system-on-chip)
1526#
1527# CONFIG_I2C_OCORES is not set
1528# CONFIG_I2C_SIMTEC is not set
1529
1530#
1531# External I2C/SMBus adapter drivers
1532#
1533# CONFIG_I2C_PARPORT_LIGHT is not set
1534# CONFIG_I2C_TAOS_EVM is not set
1535# CONFIG_I2C_TINY_USB is not set
1536
1537#
1538# Graphics adapter I2C/DDC channel drivers
1539#
1540# CONFIG_I2C_VOODOO3 is not set
1541
1542#
1543# Other I2C/SMBus bus drivers
1544#
1545# CONFIG_I2C_PCA_ISA is not set
1546# CONFIG_I2C_PCA_PLATFORM is not set
1547# CONFIG_I2C_STUB is not set
1548# CONFIG_SCx200_ACB is not set
1549
1550#
1551# Miscellaneous I2C Chip support
1552#
1553# CONFIG_DS1682 is not set
1554# CONFIG_AT24 is not set
1555# CONFIG_SENSORS_EEPROM is not set
1556# CONFIG_EEPROM_AT24 is not set
1557# CONFIG_EEPROM_LEGACY is not set
1558# CONFIG_SENSORS_PCF8574 is not set
1559# CONFIG_PCF8575 is not set
1560# CONFIG_SENSORS_PCA9539 is not set
1561# CONFIG_SENSORS_PCF8591 is not set
1562# CONFIG_SENSORS_MAX6875 is not set
1563# CONFIG_SENSORS_TSL2550 is not set
1564# CONFIG_I2C_DEBUG_CORE is not set
1565# CONFIG_I2C_DEBUG_ALGO is not set
1566# CONFIG_I2C_DEBUG_BUS is not set
1567# CONFIG_I2C_DEBUG_CHIP is not set
1568# CONFIG_SPI is not set
1569# CONFIG_SPI_MRST_SLAVE is not set
1570# CONFIG_SPI_MRST_SLAVE_DMA is not set
1571CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1572# CONFIG_GPIOLIB is not set
1573# CONFIG_W1 is not set
1574CONFIG_POWER_SUPPLY=y
1575# CONFIG_POWER_SUPPLY_DEBUG is not set
1576# CONFIG_PDA_POWER is not set
1577# CONFIG_BATTERY_DS2760 is not set
1578# CONFIG_BATTERY_BQ27x00 is not set
1579CONFIG_HWMON=y
1580# CONFIG_HWMON_VID is not set
1581# CONFIG_SENSORS_ABITUGURU is not set
1582# CONFIG_SENSORS_ABITUGURU3 is not set
1583# CONFIG_SENSORS_AD7414 is not set
1584# CONFIG_SENSORS_AD7418 is not set
1585# CONFIG_SENSORS_ADM1021 is not set
1586# CONFIG_SENSORS_ADM1025 is not set
1587# CONFIG_SENSORS_ADM1026 is not set
1588# CONFIG_SENSORS_ADM1029 is not set
1589# CONFIG_SENSORS_ADM1031 is not set
1590# CONFIG_SENSORS_ADM9240 is not set
1591# CONFIG_SENSORS_ADT7462 is not set
1592# CONFIG_SENSORS_ADT7470 is not set
1593# CONFIG_SENSORS_ADT7473 is not set
1594# CONFIG_SENSORS_K8TEMP is not set
1595# CONFIG_SENSORS_ASB100 is not set
1596# CONFIG_SENSORS_ATXP1 is not set
1597# CONFIG_SENSORS_DS1621 is not set
1598# CONFIG_SENSORS_I5K_AMB is not set
1599# CONFIG_SENSORS_F71805F is not set
1600# CONFIG_SENSORS_F71882FG is not set
1601# CONFIG_SENSORS_F75375S is not set
1602# CONFIG_SENSORS_FSCHER is not set
1603# CONFIG_SENSORS_FSCPOS is not set
1604# CONFIG_SENSORS_FSCHMD is not set
1605# CONFIG_SENSORS_GL518SM is not set
1606# CONFIG_SENSORS_GL520SM is not set
1607# CONFIG_SENSORS_CORETEMP is not set
1608# CONFIG_SENSORS_IT87 is not set
1609# CONFIG_SENSORS_LM63 is not set
1610# CONFIG_SENSORS_LM75 is not set
1611# CONFIG_SENSORS_LM77 is not set
1612# CONFIG_SENSORS_LM78 is not set
1613# CONFIG_SENSORS_LM80 is not set
1614# CONFIG_SENSORS_LM83 is not set
1615# CONFIG_SENSORS_LM85 is not set
1616# CONFIG_SENSORS_LM87 is not set
1617# CONFIG_SENSORS_LM90 is not set
1618# CONFIG_SENSORS_LM92 is not set
1619# CONFIG_SENSORS_LM93 is not set
1620# CONFIG_SENSORS_LTC4245 is not set
1621# CONFIG_SENSORS_MAX1619 is not set
1622# CONFIG_SENSORS_MAX6650 is not set
1623# CONFIG_SENSORS_PC87360 is not set
1624# CONFIG_SENSORS_PC87427 is not set
1625# CONFIG_SENSORS_SIS5595 is not set
1626# CONFIG_SENSORS_DME1737 is not set
1627# CONFIG_SENSORS_SMSC47M1 is not set
1628# CONFIG_SENSORS_SMSC47M192 is not set
1629# CONFIG_SENSORS_SMSC47B397 is not set
1630# CONFIG_SENSORS_ADS7828 is not set
1631# CONFIG_SENSORS_THMC50 is not set
1632# CONFIG_SENSORS_VIA686A is not set
1633# CONFIG_SENSORS_VT1211 is not set
1634# CONFIG_SENSORS_VT8231 is not set
1635# CONFIG_SENSORS_W83781D is not set
1636# CONFIG_SENSORS_W83791D is not set
1637# CONFIG_SENSORS_W83792D is not set
1638# CONFIG_SENSORS_W83793 is not set
1639# CONFIG_SENSORS_W83L785TS is not set
1640# CONFIG_SENSORS_W83L786NG is not set
1641# CONFIG_SENSORS_W83627HF is not set
1642# CONFIG_SENSORS_W83627EHF is not set
1643# CONFIG_SENSORS_HDAPS is not set
1644# CONFIG_SENSORS_LIS3LV02D is not set
1645# CONFIG_SENSORS_APPLESMC is not set
1646# CONFIG_HWMON_DEBUG_CHIP is not set
1647CONFIG_THERMAL=y
1648CONFIG_THERMAL_HWMON=y
1649# CONFIG_WATCHDOG is not set
1650CONFIG_SSB_POSSIBLE=y
1651
1652#
1653# Sonics Silicon Backplane
1654#
1655CONFIG_SSB=m
1656CONFIG_SSB_SPROM=y
1657CONFIG_SSB_PCIHOST_POSSIBLE=y
1658CONFIG_SSB_PCIHOST=y
1659CONFIG_SSB_B43_PCI_BRIDGE=y
1660# CONFIG_SSB_DEBUG is not set
1661CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1662CONFIG_SSB_DRIVER_PCICORE=y
1663
1664#
1665# Multifunction device drivers
1666#
1667# CONFIG_MFD_CORE is not set
1668# CONFIG_MFD_SM501 is not set
1669# CONFIG_HTC_PASIC3 is not set
1670# CONFIG_TWL4030_CORE is not set
1671# CONFIG_MFD_TMIO is not set
1672# CONFIG_PMIC_DA903X is not set
1673# CONFIG_MFD_WM8400 is not set
1674# CONFIG_MFD_WM8350_I2C is not set
1675# CONFIG_REGULATOR is not set
1676
1677#
1678# Multimedia devices
1679#
1680CONFIG_MEDIA_SUPPORT=y
1681
1682#
1683# Multimedia core support
1684#
1685CONFIG_VIDEO_DEV=y
1686CONFIG_VIDEO_V4L2_COMMON=y
1687# CONFIG_VIDEO_ALLOW_V4L1 is not set
1688CONFIG_VIDEO_V4L1_COMPAT=y
1689CONFIG_DVB_CORE=m
1690CONFIG_VIDEO_MEDIA=m
1691
1692#
1693# Multimedia drivers
1694#
1695CONFIG_MEDIA_ATTACH=y
1696CONFIG_MEDIA_TUNER=m
1697# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1698CONFIG_MEDIA_TUNER_SIMPLE=m
1699CONFIG_MEDIA_TUNER_TDA8290=m
1700CONFIG_MEDIA_TUNER_TDA9887=m
1701CONFIG_MEDIA_TUNER_TEA5761=m
1702CONFIG_MEDIA_TUNER_TEA5767=m
1703CONFIG_MEDIA_TUNER_MT20XX=m
1704CONFIG_MEDIA_TUNER_XC2028=m
1705CONFIG_MEDIA_TUNER_XC5000=m
1706CONFIG_VIDEO_V4L2=y
1707CONFIG_VIDEOBUF_GEN=m
1708CONFIG_VIDEOBUF_VMALLOC=m
1709CONFIG_VIDEO_CAPTURE_DRIVERS=y
1710# CONFIG_VIDEO_ADV_DEBUG is not set
1711# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1712CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1713# CONFIG_VIDEO_VIVI is not set
1714# CONFIG_VIDEO_BT848 is not set
1715# CONFIG_VIDEO_SAA5246A is not set
1716# CONFIG_VIDEO_SAA5249 is not set
1717# CONFIG_VIDEO_SAA7134 is not set
1718# CONFIG_VIDEO_HEXIUM_ORION is not set
1719# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1720# CONFIG_VIDEO_CX88 is not set
1721# CONFIG_VIDEO_CX23885 is not set
1722# CONFIG_VIDEO_AU0828 is not set
1723# CONFIG_VIDEO_IVTV is not set
1724# CONFIG_VIDEO_CX18 is not set
1725# CONFIG_VIDEO_CAFE_CCIC is not set
1726# CONFIG_SOC_CAMERA is not set
1727CONFIG_V4L_USB_DRIVERS=y
1728CONFIG_USB_VIDEO_CLASS=m
1729CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
1730CONFIG_USB_GSPCA=m
1731# CONFIG_USB_M5602 is not set
1732# CONFIG_USB_STV06XX is not set
1733# CONFIG_USB_GSPCA_CONEX is not set
1734# CONFIG_USB_GSPCA_ETOMS is not set
1735# CONFIG_USB_GSPCA_FINEPIX is not set
1736# CONFIG_USB_GSPCA_MARS is not set
1737# CONFIG_USB_GSPCA_OV519 is not set
1738# CONFIG_USB_GSPCA_OV534 is not set
1739# CONFIG_USB_GSPCA_PAC207 is not set
1740# CONFIG_USB_GSPCA_PAC7311 is not set
1741# CONFIG_USB_GSPCA_SONIXB is not set
1742# CONFIG_USB_GSPCA_SONIXJ is not set
1743# CONFIG_USB_GSPCA_SPCA500 is not set
1744# CONFIG_USB_GSPCA_SPCA501 is not set
1745# CONFIG_USB_GSPCA_SPCA505 is not set
1746# CONFIG_USB_GSPCA_SPCA506 is not set
1747# CONFIG_USB_GSPCA_SPCA508 is not set
1748# CONFIG_USB_GSPCA_SPCA561 is not set
1749# CONFIG_USB_GSPCA_STK014 is not set
1750# CONFIG_USB_GSPCA_SUNPLUS is not set
1751# CONFIG_USB_GSPCA_T613 is not set
1752# CONFIG_USB_GSPCA_TV8532 is not set
1753# CONFIG_USB_GSPCA_VC032X is not set
1754# CONFIG_USB_GSPCA_ZC3XX is not set
1755# CONFIG_VIDEO_PVRUSB2 is not set
1756# CONFIG_VIDEO_EM28XX is not set
1757# CONFIG_VIDEO_USBVISION is not set
1758CONFIG_USB_ET61X251=m
1759CONFIG_USB_SN9C102=m
1760CONFIG_USB_ZC0301=m
1761CONFIG_USB_ZR364XX=m
1762CONFIG_USB_STKWEBCAM=m
1763CONFIG_USB_S2255=m
1764# CONFIG_RADIO_ADAPTERS is not set
1765# CONFIG_DVB_DYNAMIC_MINORS is not set
1766# CONFIG_DVB_CAPTURE_DRIVERS is not set
1767# CONFIG_DAB is not set
1768
1769#
1770# Graphics support
1771#
1772CONFIG_AGP=y
1773# CONFIG_AGP_ALI is not set
1774# CONFIG_AGP_ATI is not set
1775# CONFIG_AGP_AMD is not set
1776# CONFIG_AGP_AMD64 is not set
1777CONFIG_AGP_INTEL=y
1778# CONFIG_AGP_NVIDIA is not set
1779# CONFIG_AGP_SIS is not set
1780# CONFIG_AGP_SWORKS is not set
1781# CONFIG_AGP_VIA is not set
1782# CONFIG_AGP_EFFICEON is not set
1783CONFIG_DRM=y
1784# CONFIG_DRM_TDFX is not set
1785# CONFIG_DRM_R128 is not set
1786# CONFIG_DRM_RADEON is not set
1787CONFIG_DRM_I810=y
1788# CONFIG_DRM_I830 is not set
1789CONFIG_DRM_I915=y
1790CONFIG_DRM_I915_KMS=y
1791# CONFIG_DRM_MGA is not set
1792# CONFIG_DRM_SIS is not set
1793# CONFIG_DRM_VIA is not set
1794# CONFIG_DRM_SAVAGE is not set
1795# CONFIG_VGASTATE is not set
1796# CONFIG_DRM_PSB is not set
1797CONFIG_VIDEO_OUTPUT_CONTROL=y
1798CONFIG_FB=y
1799# CONFIG_FIRMWARE_EDID is not set
1800# CONFIG_FB_TRIDENT_ACCEL is not set
1801# CONFIG_FB_ARK is not set
1802# CONFIG_FB_PM3 is not set
1803# CONFIG_FB_CARMINE is not set
1804# CONFIG_FB_GEODE is not set
1805# CONFIG_FB_VIRTUAL is not set
1806# CONFIG_FB_METRONOME is not set
1807# CONFIG_FB_MB862XX is not set
1808
1809
1810CONFIG_BACKLIGHT_LCD_SUPPORT=y
1811# CONFIG_LCD_CLASS_DEVICE is not set
1812CONFIG_BACKLIGHT_CLASS_DEVICE=y
1813CONFIG_BACKLIGHT_GENERIC=y
1814CONFIG_SAMSUNG_BACKLIGHT=m
1815# CONFIG_BACKLIGHT_CORGI is not set
1816# CONFIG_BACKLIGHT_PROGEAR is not set
1817# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1818# CONFIG_BACKLIGHT_SAHARA is not set
1819
1820
1821#
1822# Frame buffer hardware drivers
1823#
1824# CONFIG_FB_TILEBLITTING is not set
1825# CONFIG_FB_FOREIGN_ENDIAN is not set
1826# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
1827# CONFIG_FB_3DFX_ACCEL is not set
1828# CONFIG_FB_CIRRUS is not set
1829# CONFIG_FB_PM2 is not set
1830# CONFIG_FB_CYBER2000 is not set
1831# CONFIG_FB_ARC is not set
1832# CONFIG_FB_ASILIANT is not set
1833# CONFIG_FB_IMSTT is not set
1834# CONFIG_FB_VGA16 is not set
1835# CONFIG_FB_VESA is not set
1836# CONFIG_FB_EFI is not set
1837# CONFIG_FB_N411 is not set
1838# CONFIG_FB_HGA is not set
1839# CONFIG_FB_S1D13XXX is not set
1840# CONFIG_FB_NVIDIA is not set
1841# CONFIG_FB_RIVA is not set
1842CONFIG_FB_I810=m
1843# CONFIG_FB_I810_GTF is not set
1844# CONFIG_FB_LE80578 is not set
1845# CONFIG_FB_CARILLO_RANCH is not set
1846# CONFIG_FB_INTEL is not set
1847# CONFIG_FB_INTEL_DEBUG is not set
1848# CONFIG_FB_INTEL_I2C is not set
1849# CONFIG_FB_MATROX is not set
1850# CONFIG_FB_RADEON is not set
1851CONFIG_FB_RADEON_I2C=y
1852# CONFIG_FB_RADEON_BACKLIGHT is not set
1853# CONFIG_FB_RADEON_DEBUG is not set
1854# CONFIG_FB_ATY128 is not set
1855# CONFIG_FB_ATY is not set
1856# CONFIG_FB_S3 is not set
1857# CONFIG_FB_SAVAGE is not set
1858# CONFIG_FB_SIS is not set
1859# CONFIG_FB_SIS_300 is not set
1860# CONFIG_FB_SIS_315 is not set
1861# CONFIG_FB_VIA is not set
1862# CONFIG_FB_NEOMAGIC is not set
1863# CONFIG_FB_KYRO is not set
1864# CONFIG_FB_3DFX is not set
1865# CONFIG_FB_VOODOO1 is not set
1866# CONFIG_FB_VT8623 is not set
1867# CONFIG_FB_CYBLA is not set
1868# CONFIG_FB_TRIDENT is not set
1869# CONFIG_FB_ARK is not set
1870# CONFIG_FB_PM3 is not set
1871# CONFIG_FB_CARMINE is not set
1872# CONFIG_FB_GEODE is not set
1873# CONFIG_FB_VIRTUAL is not set
1874# CONFIG_FB_METRONOME is not set
1875# CONFIG_FB_MB862XX is not set
1876
1877#
1878# Display device support
1879#
1880# CONFIG_DISPLAY_SUPPORT is not set
1881
1882#
1883# Console display driver support
1884#
1885CONFIG_VGA_CONSOLE=y
1886CONFIG_VGACON_SOFT_SCROLLBACK=y
1887CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1888# CONFIG_MDA_CONSOLE is not set
1889CONFIG_DUMMY_CONSOLE=y
1890CONFIG_FRAMEBUFFER_CONSOLE=y
1891CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
1892# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1893CONFIG_FONTS=y
1894CONFIG_FONT_8x16=y
1895# CONFIG_LOGO is not set
1896CONFIG_SOUND=y
1897# CONFIG_SOUND_OSS_CORE is not set
1898CONFIG_SND=y
1899CONFIG_SND_TIMER=y
1900CONFIG_SND_PCM=y
1901CONFIG_SND_HWDEP=y
1902CONFIG_SND_RAWMIDI=m
1903CONFIG_SND_SEQUENCER=y
1904CONFIG_SND_SEQ_DUMMY=y
1905# CONFIG_SND_OSSEMUL is not set
1906# CONFIG_SND_MIXER_OSS is not set
1907# CONFIG_SND_PCM_OSS is not set
1908# CONFIG_SND_SEQUENCER_OSS is not set
1909# CONFIG_SND_HRTIMER is not set
1910CONFIG_SND_DYNAMIC_MINORS=y
1911# CONFIG_SND_SUPPORT_OLD_API is not set
1912CONFIG_SND_VERBOSE_PROCFS=y
1913CONFIG_SND_VERBOSE_PRINTK=y
1914CONFIG_SND_DEBUG=y
1915# CONFIG_SND_DEBUG_VERBOSE is not set
1916CONFIG_SND_PCM_XRUN_DEBUG=y
1917CONFIG_SND_VMASTER=y
1918CONFIG_SND_AC97_CODEC=y
1919CONFIG_SND_DRIVERS=y
1920# CONFIG_SND_DUMMY is not set
1921# CONFIG_SND_VIRMIDI is not set
1922# CONFIG_SND_MTPAV is not set
1923# CONFIG_SND_SERIAL_U16550 is not set
1924# CONFIG_SND_MPU401 is not set
1925CONFIG_SND_AC97_POWER_SAVE=y
1926CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
1927# CONFIG_SND_ISA is not set
1928CONFIG_SND_PCI=y
1929# CONFIG_SND_AD1889 is not set
1930# CONFIG_SND_ALS300 is not set
1931# CONFIG_SND_ALS4000 is not set
1932# CONFIG_SND_ALI5451 is not set
1933# CONFIG_SND_ATIIXP is not set
1934# CONFIG_SND_ATIIXP_MODEM is not set
1935# CONFIG_SND_AU8810 is not set
1936# CONFIG_SND_AU8820 is not set
1937# CONFIG_SND_AU8830 is not set
1938# CONFIG_SND_AW2 is not set
1939# CONFIG_SND_AZT3328 is not set
1940# CONFIG_SND_BT87X is not set
1941# CONFIG_SND_CA0106 is not set
1942# CONFIG_SND_CMIPCI is not set
1943# CONFIG_SND_OXYGEN is not set
1944# CONFIG_SND_CS4281 is not set
1945# CONFIG_SND_CS46XX is not set
1946# CONFIG_SND_CS5530 is not set
1947# CONFIG_SND_CS5535AUDIO is not set
1948# CONFIG_SND_DARLA20 is not set
1949# CONFIG_SND_GINA20 is not set
1950# CONFIG_SND_LAYLA20 is not set
1951# CONFIG_SND_DARLA24 is not set
1952# CONFIG_SND_GINA24 is not set
1953# CONFIG_SND_LAYLA24 is not set
1954# CONFIG_SND_MONA is not set
1955# CONFIG_SND_MIA is not set
1956# CONFIG_SND_ECHO3G is not set
1957# CONFIG_SND_INDIGO is not set
1958# CONFIG_SND_INDIGOIO is not set
1959# CONFIG_SND_INDIGODJ is not set
1960# CONFIG_SND_EMU10K1 is not set
1961# CONFIG_SND_EMU10K1X is not set
1962# CONFIG_SND_ENS1370 is not set
1963CONFIG_SND_ENS1371=m
1964# CONFIG_SND_ES1938 is not set
1965# CONFIG_SND_ES1968 is not set
1966# CONFIG_SND_FM801 is not set
1967CONFIG_SND_HDA_INTEL=y
1968CONFIG_SND_HDA_HWDEP=y
1969# CONFIG_SND_HDA_RECONFIG is not set
1970# CONFIG_SND_HDA_INPUT_BEEP is not set
1971CONFIG_SND_HDA_CODEC_REALTEK=y
1972CONFIG_SND_HDA_CODEC_ANALOG=y
1973CONFIG_SND_HDA_CODEC_SIGMATEL=y
1974CONFIG_SND_HDA_CODEC_VIA=y
1975CONFIG_SND_HDA_CODEC_ATIHDMI=y
1976CONFIG_SND_HDA_CODEC_NVHDMI=y
1977CONFIG_SND_HDA_CODEC_INTELHDMI=y
1978CONFIG_SND_HDA_CODEC_CONEXANT=y
1979CONFIG_SND_HDA_CODEC_CMEDIA=y
1980CONFIG_SND_HDA_CODEC_SI3054=y
1981CONFIG_SND_HDA_GENERIC=y
1982CONFIG_SND_HDA_POWER_SAVE=y
1983CONFIG_SND_HDA_POWER_SAVE_DEFAULT=5
1984# CONFIG_SND_HDSP is not set
1985# CONFIG_SND_HDSPM is not set
1986# CONFIG_SND_HIFIER is not set
1987# CONFIG_SND_ICE1712 is not set
1988# CONFIG_SND_ICE1724 is not set
1989CONFIG_SND_INTEL8X0=y
1990# CONFIG_SND_INTEL8X0M is not set
1991# CONFIG_SND_KORG1212 is not set
1992# CONFIG_SND_MAESTRO3 is not set
1993# CONFIG_SND_MIXART is not set
1994# CONFIG_SND_NM256 is not set
1995# CONFIG_SND_PCXHR is not set
1996# CONFIG_SND_RIPTIDE is not set
1997# CONFIG_SND_RME32 is not set
1998# CONFIG_SND_RME96 is not set
1999# CONFIG_SND_RME9652 is not set
2000# CONFIG_SND_SIS7019 is not set
2001# CONFIG_SND_SONICVIBES is not set
2002# CONFIG_SND_TRIDENT is not set
2003# CONFIG_SND_VIA82XX is not set
2004# CONFIG_SND_VIA82XX_MODEM is not set
2005# CONFIG_SND_VIRTUOSO is not set
2006# CONFIG_SND_VX222 is not set
2007# CONFIG_SND_YMFPCI is not set
2008CONFIG_SND_USB=y
2009CONFIG_SND_USB_AUDIO=m
2010CONFIG_SND_USB_USX2Y=m
2011CONFIG_SND_USB_CAIAQ=m
2012CONFIG_SND_USB_CAIAQ_INPUT=y
2013# CONFIG_SND_USB_US122L is not set
2014# CONFIG_SND_SOC is not set
2015# CONFIG_SOUND_PRIME is not set
2016CONFIG_AC97_BUS=y
2017CONFIG_HID_SUPPORT=y
2018CONFIG_HID=y
2019CONFIG_HID_DEBUG=y
2020CONFIG_HIDRAW=y
2021
2022#
2023# USB Input Devices
2024#
2025CONFIG_USB_HID=y
2026CONFIG_HID_PID=y
2027CONFIG_USB_HIDDEV=y
2028
2029#
2030# Special HID drivers
2031#
2032CONFIG_HID_COMPAT=y
2033CONFIG_HID_A4TECH=y
2034CONFIG_HID_APPLE=y
2035CONFIG_HID_BELKIN=y
2036CONFIG_HID_BRIGHT=y
2037CONFIG_HID_CHERRY=y
2038CONFIG_HID_CHICONY=y
2039CONFIG_HID_CYPRESS=y
2040CONFIG_HID_DELL=y
2041CONFIG_HID_EZKEY=y
2042CONFIG_HID_GYRATION=y
2043CONFIG_HID_LOGITECH=y
2044# CONFIG_LOGITECH_FF is not set
2045# CONFIG_LOGIRUMBLEPAD2_FF is not set
2046CONFIG_HID_MICROSOFT=y
2047CONFIG_HID_MONTEREY=y
2048CONFIG_HID_PANTHERLORD=y
2049# CONFIG_PANTHERLORD_FF is not set
2050CONFIG_HID_PETALYNX=y
2051CONFIG_HID_SAMSUNG=y
2052CONFIG_HID_SONY=y
2053CONFIG_HID_SUNPLUS=y
2054# CONFIG_GREENASIA_FF is not set
2055# CONFIG_THRUSTMASTER_FF is not set
2056# CONFIG_ZEROPLUS_FF is not set
2057CONFIG_USB_SUPPORT=y
2058CONFIG_USB_ARCH_HAS_HCD=y
2059CONFIG_USB_ARCH_HAS_OHCI=y
2060CONFIG_USB_ARCH_HAS_EHCI=y
2061CONFIG_USB=y
2062# CONFIG_USB_DEBUG is not set
2063CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
2064
2065#
2066# Miscellaneous USB options
2067#
2068CONFIG_USB_DEVICEFS=y
2069# CONFIG_USB_DEVICE_CLASS is not set
2070# CONFIG_USB_DYNAMIC_MINORS is not set
2071CONFIG_USB_SUSPEND=y
2072# CONFIG_USB_OTG is not set
2073CONFIG_USB_MON=y
2074CONFIG_USB_WUSB=m
2075# CONFIG_USB_WUSB_CBAF is not set
2076
2077#
2078# USB Host Controller Drivers
2079#
2080# CONFIG_USB_C67X00_HCD is not set
2081CONFIG_USB_EHCI_HCD=y
2082CONFIG_USB_EHCI_ROOT_HUB_TT=y
2083CONFIG_USB_EHCI_TT_NEWSCHED=y
2084# CONFIG_USB_OXU210HP_HCD is not set
2085CONFIG_USB_ISP116X_HCD=m
2086# CONFIG_USB_ISP1760_HCD is not set
2087CONFIG_USB_OHCI_HCD=y
2088# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
2089# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
2090CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2091CONFIG_USB_UHCI_HCD=y
2092CONFIG_USB_U132_HCD=m
2093CONFIG_USB_SL811_HCD=m
2094# CONFIG_USB_R8A66597_HCD is not set
2095CONFIG_USB_WHCI_HCD=m
2096CONFIG_USB_HWA_HCD=m
2097# CONFIG_USB_GADGET_MUSB_HDRC is not set
2098
2099#
2100# USB Device Class drivers
2101#
2102CONFIG_USB_ACM=m
2103CONFIG_USB_PRINTER=m
2104CONFIG_USB_WDM=m
2105# CONFIG_USB_TMC is not set
2106
2107#
2108# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
2109#
2110
2111#
2112# see USB_STORAGE Help for more information
2113#
2114CONFIG_USB_STORAGE=y
2115# CONFIG_USB_STORAGE_DEBUG is not set
2116CONFIG_USB_STORAGE_DATAFAB=y
2117CONFIG_USB_STORAGE_FREECOM=y
2118CONFIG_USB_STORAGE_ISD200=y
2119CONFIG_USB_STORAGE_DPCM=y
2120CONFIG_USB_STORAGE_USBAT=y
2121CONFIG_USB_STORAGE_SDDR09=y
2122CONFIG_USB_STORAGE_SDDR55=y
2123CONFIG_USB_STORAGE_JUMPSHOT=y
2124CONFIG_USB_STORAGE_ALAUDA=y
2125# CONFIG_USB_STORAGE_ONETOUCH is not set
2126CONFIG_USB_STORAGE_KARMA=y
2127# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
2128CONFIG_USB_LIBUSUAL=y
2129
2130#
2131# USB Imaging devices
2132#
2133CONFIG_USB_MDC800=m
2134CONFIG_USB_MICROTEK=m
2135
2136#
2137# USB port drivers
2138#
2139CONFIG_USB_SERIAL=m
2140CONFIG_USB_EZUSB=y
2141CONFIG_USB_SERIAL_GENERIC=y
2142CONFIG_USB_SERIAL_AIRCABLE=m
2143CONFIG_USB_SERIAL_ARK3116=m
2144CONFIG_USB_SERIAL_BELKIN=m
2145CONFIG_USB_SERIAL_CH341=m
2146CONFIG_USB_SERIAL_WHITEHEAT=m
2147CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
2148CONFIG_USB_SERIAL_CP2101=m
2149CONFIG_USB_SERIAL_CYPRESS_M8=m
2150CONFIG_USB_SERIAL_EMPEG=m
2151CONFIG_USB_SERIAL_FTDI_SIO=m
2152CONFIG_USB_SERIAL_FUNSOFT=m
2153CONFIG_USB_SERIAL_VISOR=m
2154CONFIG_USB_SERIAL_IPAQ=m
2155CONFIG_USB_SERIAL_IR=m
2156CONFIG_USB_SERIAL_EDGEPORT=m
2157CONFIG_USB_SERIAL_EDGEPORT_TI=m
2158CONFIG_USB_SERIAL_GARMIN=m
2159CONFIG_USB_SERIAL_IPW=m
2160CONFIG_USB_SERIAL_IUU=m
2161CONFIG_USB_SERIAL_KEYSPAN_PDA=m
2162CONFIG_USB_SERIAL_KEYSPAN=m
2163CONFIG_USB_SERIAL_KEYSPAN_MPR=y
2164CONFIG_USB_SERIAL_KEYSPAN_USA28=y
2165CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
2166CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
2167CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
2168CONFIG_USB_SERIAL_KEYSPAN_USA19=y
2169CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
2170CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
2171CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
2172CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
2173CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
2174CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
2175CONFIG_USB_SERIAL_KLSI=m
2176CONFIG_USB_SERIAL_KOBIL_SCT=m
2177CONFIG_USB_SERIAL_MCT_U232=m
2178CONFIG_USB_SERIAL_MOS7720=m
2179CONFIG_USB_SERIAL_MOS7840=m
2180# CONFIG_USB_SERIAL_MOTOROLA is not set
2181CONFIG_USB_SERIAL_NAVMAN=m
2182CONFIG_USB_SERIAL_PL2303=m
2183CONFIG_USB_SERIAL_OTI6858=m
2184# CONFIG_USB_SERIAL_SPCP8X5 is not set
2185CONFIG_USB_SERIAL_HP4X=m
2186CONFIG_USB_SERIAL_SAFE=m
2187CONFIG_USB_SERIAL_SAFE_PADDED=y
2188# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
2189CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2190CONFIG_USB_SERIAL_TI=m
2191CONFIG_USB_SERIAL_CYBERJACK=m
2192CONFIG_USB_SERIAL_XIRCOM=m
2193CONFIG_USB_SERIAL_OPTION=m
2194CONFIG_USB_SERIAL_OMNINET=m
2195# CONFIG_USB_SERIAL_OPTICON is not set
2196CONFIG_USB_SERIAL_DEBUG=m
2197
2198#
2199# USB Miscellaneous drivers
2200#
2201CONFIG_USB_EMI62=m
2202CONFIG_USB_EMI26=m
2203CONFIG_USB_ADUTUX=m
2204# CONFIG_USB_SEVSEG is not set
2205# CONFIG_USB_RIO500 is not set
2206CONFIG_USB_LEGOTOWER=m
2207CONFIG_USB_LCD=m
2208CONFIG_USB_BERRY_CHARGE=m
2209CONFIG_USB_LED=m
2210# CONFIG_USB_CYPRESS_CY7C63 is not set
2211# CONFIG_USB_CYTHERM is not set
2212CONFIG_USB_PHIDGET=m
2213CONFIG_USB_PHIDGETKIT=m
2214CONFIG_USB_PHIDGETMOTORCONTROL=m
2215CONFIG_USB_PHIDGETSERVO=m
2216CONFIG_USB_IDMOUSE=m
2217CONFIG_USB_FTDI_ELAN=m
2218CONFIG_USB_APPLEDISPLAY=m
2219CONFIG_USB_SISUSBVGA=m
2220CONFIG_USB_SISUSBVGA_CON=y
2221CONFIG_USB_LD=m
2222CONFIG_USB_TRANCEVIBRATOR=m
2223CONFIG_USB_IOWARRIOR=m
2224# CONFIG_USB_TEST is not set
2225# CONFIG_USB_ISIGHTFW is not set
2226# CONFIG_USB_VST is not set
2227# CONFIG_USB_GADGET is not set
2228# CONFIG_USB_GADGET_DEBUG is not set
2229# CONFIG_USB_GADGET_DEBUG_FILES is not set
2230# CONFIG_USB_GADGET_DEBUG_FS is not set
2231# CONFIG_USB_GADGET_VBUS_DRAW is not set
2232# CONFIG_USB_GADGET_SELECTED is not set
2233# CONFIG_USB_GADGET_AT91 is not set
2234# CONFIG_USB_GADGET_ATMEL_USBA is not set
2235# CONFIG_USB_GADGET_FSL_USB2 is not set
2236# CONFIG_USB_GADGET_LH7A40X is not set
2237# CONFIG_USB_GADGET_OMAP is not set
2238# CONFIG_USB_GADGET_PXA25X is not set
2239# CONFIG_USB_GADGET_PXA27X is not set
2240# CONFIG_USB_GADGET_S3C2410 is not set
2241# CONFIG_USB_GADGET_IMX is not set
2242# CONFIG_USB_GADGET_M66592 is not set
2243# CONFIG_USB_GADGET_AMD5536UDC is not set
2244# CONFIG_USB_GADGET_FSL_QE is not set
2245# CONFIG_USB_GADGET_CI13XXX is not set
2246# CONFIG_USB_GADGET_NET2280 is not set
2247# CONFIG_USB_GADGET_GOKU is not set
2248# CONFIG_USB_GADGET_LANGWELL is not set
2249# CONFIG_USB_LANGWELL is not set
2250# CONFIG_USB_GADGET_DUMMY_HCD is not set
2251# CONFIG_USB_GADGET_DUALSPEED is not set
2252# CONFIG_USB_ZERO is not set
2253# CONFIG_USB_ETH is not set
2254# CONFIG_USB_ETH_RNDIS is not set
2255# CONFIG_USB_GADGETFS is not set
2256# CONFIG_USB_FILE_STORAGE is not set
2257# CONFIG_USB_FILE_STORAGE_TEST is not set
2258# CONFIG_USB_G_SERIAL is not set
2259# CONFIG_USB_MIDI_GADGET is not set
2260# CONFIG_USB_G_PRINTER is not set
2261# CONFIG_USB_CDC_COMPOSITE is not set
2262# CONFIG_USB_STILL_IMAGE is not set
2263
2264CONFIG_UWB=m
2265CONFIG_UWB_HWA=m
2266CONFIG_UWB_WHCI=m
2267# CONFIG_UWB_WLP is not set
2268# CONFIG_UWB_I1480U is not set
2269CONFIG_MMC=y
2270# CONFIG_MMC_DEBUG is not set
2271# CONFIG_MMC_UNSAFE_RESUME is not set
2272# CONFIG_SDIO_SUSPEND is not set
2273# CONFIG_MMC_SDHCI_MRST_SDIO1 is not set
2274
2275#
2276# MMC/SD/SDIO Card Drivers
2277#
2278CONFIG_MMC_BLOCK=y
2279CONFIG_MMC_BLOCK_BOUNCE=y
2280CONFIG_SDIO_UART=m
2281# CONFIG_MMC_TEST is not set
2282
2283#
2284# MMC/SD/SDIO Host Controller Drivers
2285#
2286CONFIG_MMC_SDHCI=y
2287CONFIG_MMC_SDHCI_PCI=y
2288# CONFIG_MMC_RICOH_MMC is not set
2289CONFIG_MMC_WBSD=m
2290CONFIG_MMC_TIFM_SD=m
2291# CONFIG_MEMSTICK is not set
2292CONFIG_NEW_LEDS=y
2293CONFIG_LEDS_CLASS=y
2294# CONFIG_MMC_CEATA_WR is not set
2295# CONFIG_MMC_SPI is not set
2296
2297#
2298# LED drivers
2299#
2300# CONFIG_LEDS_ALIX2 is not set
2301# CONFIG_LEDS_PCA9532 is not set
2302# CONFIG_LEDS_HP_DISK is not set
2303# CONFIG_LEDS_CLEVO_MAIL is not set
2304# CONFIG_LEDS_PCA955X is not set
2305
2306#
2307# LED Triggers
2308#
2309CONFIG_LEDS_TRIGGERS=y
2310# CONFIG_LEDS_TRIGGER_TIMER is not set
2311# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
2312# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
2313# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2314# CONFIG_ACCESSIBILITY is not set
2315# CONFIG_INFINIBAND is not set
2316# CONFIG_EDAC is not set
2317CONFIG_RTC_LIB=y
2318CONFIG_RTC_CLASS=y
2319# CONFIG_RTC_HCTOSYS is not set
2320# CONFIG_RTC_DEBUG is not set
2321
2322#
2323# RTC interfaces
2324#
2325CONFIG_RTC_INTF_SYSFS=y
2326CONFIG_RTC_INTF_PROC=y
2327CONFIG_RTC_INTF_DEV=y
2328# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
2329# CONFIG_RTC_DRV_TEST is not set
2330
2331#
2332# I2C RTC drivers
2333#
2334# CONFIG_RTC_DRV_DS1307 is not set
2335# CONFIG_RTC_DRV_DS1374 is not set
2336# CONFIG_RTC_DRV_DS1672 is not set
2337# CONFIG_RTC_DRV_MAX6900 is not set
2338# CONFIG_RTC_DRV_RS5C372 is not set
2339# CONFIG_RTC_DRV_ISL1208 is not set
2340# CONFIG_RTC_DRV_X1205 is not set
2341# CONFIG_RTC_DRV_PCF8563 is not set
2342# CONFIG_RTC_DRV_PCF8583 is not set
2343# CONFIG_RTC_DRV_M41T80 is not set
2344# CONFIG_RTC_DRV_S35390A is not set
2345# CONFIG_RTC_DRV_FM3130 is not set
2346# CONFIG_RTC_DRV_RX8581 is not set
2347
2348#
2349# SPI RTC drivers
2350#
2351
2352#
2353# Platform RTC drivers
2354#
2355CONFIG_RTC_DRV_CMOS=y
2356# CONFIG_RTC_DRV_DS1286 is not set
2357# CONFIG_RTC_DRV_DS1511 is not set
2358# CONFIG_RTC_DRV_DS1553 is not set
2359# CONFIG_RTC_DRV_DS1742 is not set
2360# CONFIG_RTC_DRV_STK17TA8 is not set
2361# CONFIG_RTC_DRV_M48T86 is not set
2362# CONFIG_RTC_DRV_M48T35 is not set
2363# CONFIG_RTC_DRV_M48T59 is not set
2364# CONFIG_RTC_DRV_BQ4802 is not set
2365# CONFIG_RTC_DRV_V3020 is not set
2366
2367#
2368# on-CPU RTC drivers
2369#
2370# CONFIG_DMADEVICES is not set
2371# CONFIG_UIO is not set
2372CONFIG_STAGING=y
2373# CONFIG_STAGING_EXCLUDE_BUILD is not set
2374# CONFIG_ET131X is not set
2375# CONFIG_SLICOSS is not set
2376# CONFIG_SXG is not set
2377# CONFIG_ME4000 is not set
2378# CONFIG_MEILHAUS is not set
2379# CONFIG_VIDEO_GO7007 is not set
2380CONFIG_USB_IP_COMMON=m
2381CONFIG_USB_IP_VHCI_HCD=m
2382CONFIG_USB_IP_HOST=m
2383# CONFIG_W35UND is not set
2384CONFIG_PRISM2_USB=m
2385# CONFIG_ECHO is not set
2386CONFIG_RT2860=m
2387CONFIG_RT2870=m
2388# CONFIG_BENET is not set
2389# CONFIG_COMEDI is not set
2390# CONFIG_ASUS_OLED is not set
2391# CONFIG_USB_ATMEL is not set
2392# CONFIG_AGNX is not set
2393# CONFIG_OTUS is not set
2394# CONFIG_ALTERA_PCIE_CHDMA is not set
2395CONFIG_RTL8187SE=m
2396# CONFIG_INPUT_MIMIO is not set
2397# CONFIG_TRANZPORT is not set
2398# CONFIG_EPL is not set
2399
2400#
2401# Android
2402#
2403# CONFIG_ANDROID is not set
2404# CONFIG_ANDROID_BINDER_IPC is not set
2405# CONFIG_ANDROID_LOGGER is not set
2406# CONFIG_ANDROID_RAM_CONSOLE is not set
2407# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
2408CONFIG_X86_PLATFORM_DEVICES=y
2409
2410#
2411# Firmware Drivers
2412#
2413# CONFIG_EDD is not set
2414CONFIG_FIRMWARE_MEMMAP=y
2415CONFIG_EFI_VARS=m
2416# CONFIG_DELL_RBU is not set
2417# CONFIG_DCDBAS is not set
2418CONFIG_DMIID=y
2419# CONFIG_ISCSI_IBFT_FIND is not set
2420
2421#
2422# File systems
2423#
2424CONFIG_EXT2_FS=y
2425# CONFIG_EXT2_FS_XATTR is not set
2426# CONFIG_EXT2_FS_XIP is not set
2427CONFIG_EXT3_FS=y
2428CONFIG_EXT3_FS_XATTR=y
2429CONFIG_EXT3_FS_POSIX_ACL=y
2430CONFIG_EXT3_FS_SECURITY=y
2431# CONFIG_EXT4_FS is not set
2432CONFIG_JBD=y
2433# CONFIG_JBD_DEBUG is not set
2434CONFIG_FS_MBCACHE=y
2435# CONFIG_REISERFS_FS is not set
2436# CONFIG_JFS_FS is not set
2437CONFIG_FS_POSIX_ACL=y
2438CONFIG_FILE_LOCKING=y
2439# CONFIG_XFS_FS is not set
2440# CONFIG_GFS2_FS is not set
2441# CONFIG_OCFS2_FS is not set
2442CONFIG_BTRFS_FS=y
2443CONFIG_BTRFS_FS_POSIX_ACL=y
2444CONFIG_DNOTIFY=y
2445CONFIG_INOTIFY=y
2446CONFIG_INOTIFY_USER=y
2447# CONFIG_QUOTA is not set
2448# CONFIG_AUTOFS_FS is not set
2449# CONFIG_AUTOFS4_FS is not set
2450CONFIG_FUSE_FS=m
2451CONFIG_GENERIC_ACL=y
2452
2453#
2454# CD-ROM/DVD Filesystems
2455#
2456CONFIG_ISO9660_FS=y
2457CONFIG_JOLIET=y
2458CONFIG_ZISOFS=y
2459CONFIG_UDF_FS=m
2460CONFIG_UDF_NLS=y
2461
2462#
2463# DOS/FAT/NT Filesystems
2464#
2465CONFIG_FAT_FS=y
2466CONFIG_MSDOS_FS=y
2467CONFIG_VFAT_FS=y
2468CONFIG_FAT_DEFAULT_CODEPAGE=437
2469CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
2470# CONFIG_NTFS_FS is not set
2471
2472#
2473# Pseudo filesystems
2474#
2475CONFIG_PROC_FS=y
2476# CONFIG_PROC_KCORE is not set
2477CONFIG_PROC_SYSCTL=y
2478CONFIG_PROC_PAGE_MONITOR=y
2479CONFIG_SYSFS=y
2480CONFIG_TMPFS=y
2481CONFIG_TMPFS_POSIX_ACL=y
2482# CONFIG_HUGETLBFS is not set
2483# CONFIG_HUGETLB_PAGE is not set
2484CONFIG_CONFIGFS_FS=m
2485
2486#
2487# Miscellaneous filesystems
2488#
2489CONFIG_MISC_FILESYSTEMS=y
2490# CONFIG_ADFS_FS is not set
2491# CONFIG_AFFS_FS is not set
2492# CONFIG_HFS_FS is not set
2493# CONFIG_HFSPLUS_FS is not set
2494# CONFIG_BEFS_FS is not set
2495# CONFIG_BFS_FS is not set
2496# CONFIG_EFS_FS is not set
2497# CONFIG_CRAMFS is not set
2498CONFIG_SQUASHFS=y
2499# CONFIG_SQUASHFS_EMBEDDED is not set
2500CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
2501# CONFIG_VXFS_FS is not set
2502# CONFIG_MINIX_FS is not set
2503# CONFIG_OMFS_FS is not set
2504# CONFIG_HPFS_FS is not set
2505# CONFIG_QNX4FS_FS is not set
2506# CONFIG_ROMFS_FS is not set
2507# CONFIG_SYSV_FS is not set
2508# CONFIG_UFS_FS is not set
2509CONFIG_NETWORK_FILESYSTEMS=y
2510# CONFIG_NFS_FS is not set
2511# CONFIG_NFSD is not set
2512# CONFIG_SMB_FS is not set
2513CONFIG_CIFS=m
2514# CONFIG_CIFS_STATS is not set
2515CONFIG_CIFS_WEAK_PW_HASH=y
2516# CONFIG_CIFS_XATTR is not set
2517# CONFIG_CIFS_DEBUG2 is not set
2518# CONFIG_CIFS_EXPERIMENTAL is not set
2519# CONFIG_NCP_FS is not set
2520# CONFIG_CODA_FS is not set
2521# CONFIG_AFS_FS is not set
2522
2523#
2524# Partition Types
2525#
2526CONFIG_PARTITION_ADVANCED=y
2527# CONFIG_ACORN_PARTITION is not set
2528# CONFIG_OSF_PARTITION is not set
2529# CONFIG_AMIGA_PARTITION is not set
2530# CONFIG_ATARI_PARTITION is not set
2531# CONFIG_MAC_PARTITION is not set
2532CONFIG_MSDOS_PARTITION=y
2533CONFIG_BSD_DISKLABEL=y
2534# CONFIG_MINIX_SUBPARTITION is not set
2535# CONFIG_SOLARIS_X86_PARTITION is not set
2536# CONFIG_UNIXWARE_DISKLABEL is not set
2537CONFIG_LDM_PARTITION=y
2538# CONFIG_LDM_DEBUG is not set
2539# CONFIG_SGI_PARTITION is not set
2540# CONFIG_ULTRIX_PARTITION is not set
2541# CONFIG_SUN_PARTITION is not set
2542# CONFIG_KARMA_PARTITION is not set
2543CONFIG_EFI_PARTITION=y
2544# CONFIG_SYSV68_PARTITION is not set
2545CONFIG_NLS=y
2546CONFIG_NLS_DEFAULT="utf8"
2547CONFIG_NLS_CODEPAGE_437=y
2548CONFIG_NLS_CODEPAGE_737=m
2549CONFIG_NLS_CODEPAGE_775=m
2550CONFIG_NLS_CODEPAGE_850=m
2551CONFIG_NLS_CODEPAGE_852=m
2552CONFIG_NLS_CODEPAGE_855=m
2553CONFIG_NLS_CODEPAGE_857=m
2554CONFIG_NLS_CODEPAGE_860=m
2555CONFIG_NLS_CODEPAGE_861=m
2556CONFIG_NLS_CODEPAGE_862=m
2557CONFIG_NLS_CODEPAGE_863=m
2558CONFIG_NLS_CODEPAGE_864=m
2559CONFIG_NLS_CODEPAGE_865=m
2560CONFIG_NLS_CODEPAGE_866=m
2561CONFIG_NLS_CODEPAGE_869=m
2562CONFIG_NLS_CODEPAGE_936=m
2563CONFIG_NLS_CODEPAGE_950=m
2564CONFIG_NLS_CODEPAGE_932=m
2565CONFIG_NLS_CODEPAGE_949=m
2566CONFIG_NLS_CODEPAGE_874=m
2567CONFIG_NLS_ISO8859_8=m
2568CONFIG_NLS_CODEPAGE_1250=m
2569CONFIG_NLS_CODEPAGE_1251=m
2570CONFIG_NLS_ASCII=y
2571CONFIG_NLS_ISO8859_1=m
2572CONFIG_NLS_ISO8859_2=m
2573CONFIG_NLS_ISO8859_3=m
2574CONFIG_NLS_ISO8859_4=m
2575CONFIG_NLS_ISO8859_5=m
2576CONFIG_NLS_ISO8859_6=m
2577CONFIG_NLS_ISO8859_7=m
2578CONFIG_NLS_ISO8859_9=m
2579CONFIG_NLS_ISO8859_13=m
2580CONFIG_NLS_ISO8859_14=m
2581CONFIG_NLS_ISO8859_15=m
2582CONFIG_NLS_KOI8_R=m
2583CONFIG_NLS_KOI8_U=m
2584CONFIG_NLS_UTF8=m
2585# CONFIG_DLM is not set
2586
2587#
2588# Kernel hacking
2589#
2590CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2591CONFIG_PRINTK_TIME=y
2592# CONFIG_ENABLE_WARN_DEPRECATED is not set
2593CONFIG_ENABLE_MUST_CHECK=y
2594CONFIG_FRAME_WARN=1024
2595CONFIG_MAGIC_SYSRQ=y
2596# CONFIG_UNUSED_SYMBOLS is not set
2597CONFIG_DEBUG_FS=y
2598# CONFIG_HEADERS_CHECK is not set
2599CONFIG_DEBUG_KERNEL=y
2600CONFIG_DEBUG_SHIRQ=y
2601CONFIG_DETECT_SOFTLOCKUP=y
2602# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2603CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2604# CONFIG_SCHED_DEBUG is not set
2605CONFIG_SCHEDSTATS=y
2606CONFIG_TIMER_STATS=y
2607# CONFIG_DEBUG_OBJECTS is not set
2608# CONFIG_DEBUG_SLAB is not set
2609# CONFIG_DEBUG_RT_MUTEXES is not set
2610# CONFIG_RT_MUTEX_TESTER is not set
2611CONFIG_DEBUG_SPINLOCK=y
2612# CONFIG_DEBUG_MUTEXES is not set
2613# CONFIG_DEBUG_LOCK_ALLOC is not set
2614# CONFIG_PROVE_LOCKING is not set
2615# CONFIG_DEBUG_LOCKDEP is not set
2616# CONFIG_LOCK_STAT is not set
2617CONFIG_DEBUG_SPINLOCK_SLEEP=y
2618# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2619CONFIG_STACKTRACE=y
2620# CONFIG_DEBUG_KOBJECT is not set
2621CONFIG_DEBUG_HIGHMEM=y
2622CONFIG_DEBUG_BUGVERBOSE=y
2623CONFIG_DEBUG_INFO=y
2624# CONFIG_DEBUG_VM is not set
2625# CONFIG_DEBUG_VIRTUAL is not set
2626# CONFIG_DEBUG_WRITECOUNT is not set
2627# CONFIG_DEBUG_MEMORY_INIT it not set
2628CONFIG_DEBUG_LIST=y
2629# CONFIG_DEBUG_SG is not set
2630CONFIG_DEBUG_NOTIFIERS=y
2631CONFIG_FRAME_POINTER=y
2632CONFIG_BOOT_PRINTK_DELAY=y
2633# CONFIG_RCU_TORTURE_TEST is not set
2634# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2635# CONFIG_BACKTRACE_SELF_TEST is not set
2636# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2637# CONFIG_FAULT_INJECTION is not set
2638CONFIG_LATENCYTOP=y
2639CONFIG_SYSCTL_SYSCALL_CHECK=y
2640CONFIG_HAVE_FUNCTION_TRACER=y
2641CONFIG_HAVE_DYNAMIC_FTRACE=y
2642CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2643
2644# CONFIG_X86_VISWS is not set
2645# CONFIG_FTRACE_STARTUP_TEST is not set
2646#
2647# Tracers
2648#
2649# CONFIG_FUNCTION_TRACER is not set
2650# CONFIG_IRQSOFF_TRACER is not set
2651CONFIG_SYSPROF_TRACER=y
2652CONFIG_SCHED_TRACER=y
2653CONFIG_CONTEXT_SWITCH_TRACER=y
2654CONFIG_OPEN_CLOSE_TRACER=y
2655# CONFIG_BOOT_TRACER is not set
2656CONFIG_POWER_TRACER=y
2657# CONFIG_TRACE_BRANCH_PROFILING is not set
2658# CONFIG_STACK_TRACER is not set
2659# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2660# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2661# CONFIG_SAMPLES is not set
2662CONFIG_HAVE_ARCH_KGDB=y
2663# CONFIG_KGDB is not set
2664CONFIG_STRICT_DEVMEM=y
2665CONFIG_X86_VERBOSE_BOOTUP=y
2666CONFIG_EARLY_PRINTK=y
2667# CONFIG_EARLY_PRINTK_DBGP is not set
2668CONFIG_DEBUG_STACKOVERFLOW=y
2669# CONFIG_DEBUG_STACK_USAGE is not set
2670# CONFIG_DEBUG_PAGEALLOC is not set
2671# CONFIG_DEBUG_PER_CPU_MAPS is not set
2672# CONFIG_X86_PTDUMP is not set
2673CONFIG_DEBUG_RODATA=y
2674# CONFIG_DEBUG_RODATA_TEST is not set
2675# CONFIG_DEBUG_NX_TEST is not set
2676# CONFIG_4KSTACKS is not set
2677CONFIG_DOUBLEFAULT=y
2678# CONFIG_MMIOTRACE is not set
2679CONFIG_IO_DELAY_TYPE_0X80=0
2680CONFIG_IO_DELAY_TYPE_0XED=1
2681CONFIG_IO_DELAY_TYPE_UDELAY=2
2682CONFIG_IO_DELAY_TYPE_NONE=3
2683CONFIG_IO_DELAY_0X80=y
2684# CONFIG_IO_DELAY_0XED is not set
2685# CONFIG_IO_DELAY_UDELAY is not set
2686# CONFIG_IO_DELAY_NONE is not set
2687CONFIG_DEFAULT_IO_DELAY_TYPE=0
2688CONFIG_DEBUG_BOOT_PARAMS=y
2689# CONFIG_CPA_DEBUG is not set
2690# CONFIG_OPTIMIZE_INLINING is not set
2691
2692#
2693# Security options
2694#
2695# CONFIG_KEYS is not set
2696# CONFIG_SECURITY is not set
2697# CONFIG_SECURITYFS is not set
2698# CONFIG_SECURITY_FILE_CAPABILITIES is not set
2699CONFIG_CRYPTO=y
2700
2701#
2702# Crypto core or helper
2703#
2704# CONFIG_CRYPTO_FIPS is not set
2705CONFIG_CRYPTO_ALGAPI=y
2706CONFIG_CRYPTO_AEAD=y
2707CONFIG_CRYPTO_BLKCIPHER=y
2708CONFIG_CRYPTO_HASH=y
2709CONFIG_CRYPTO_RNG=y
2710CONFIG_CRYPTO_MANAGER=y
2711CONFIG_CRYPTO_GF128MUL=m
2712CONFIG_CRYPTO_NULL=m
2713# CONFIG_CRYPTO_CRYPTD is not set
2714CONFIG_CRYPTO_AUTHENC=m
2715CONFIG_CRYPTO_TEST=m
2716
2717#
2718# Authenticated Encryption with Associated Data
2719#
2720CONFIG_CRYPTO_CCM=m
2721CONFIG_CRYPTO_GCM=m
2722CONFIG_CRYPTO_SEQIV=m
2723
2724#
2725# Block modes
2726#
2727CONFIG_CRYPTO_CBC=m
2728CONFIG_CRYPTO_CTR=m
2729# CONFIG_CRYPTO_CTS is not set
2730CONFIG_CRYPTO_ECB=y
2731CONFIG_CRYPTO_LRW=m
2732CONFIG_CRYPTO_PCBC=m
2733CONFIG_CRYPTO_XTS=m
2734
2735#
2736# Hash modes
2737#
2738CONFIG_CRYPTO_HMAC=y
2739CONFIG_CRYPTO_XCBC=m
2740
2741#
2742# Digest
2743#
2744CONFIG_CRYPTO_CRC32C=m
2745# CONFIG_CRYPTO_CRC32C_INTEL is not set
2746CONFIG_CRYPTO_MD4=m
2747CONFIG_CRYPTO_MD5=y
2748CONFIG_CRYPTO_MICHAEL_MIC=m
2749# CONFIG_CRYPTO_RMD128 is not set
2750# CONFIG_CRYPTO_RMD160 is not set
2751# CONFIG_CRYPTO_RMD256 is not set
2752# CONFIG_CRYPTO_RMD320 is not set
2753CONFIG_CRYPTO_SHA1=y
2754CONFIG_CRYPTO_SHA256=m
2755CONFIG_CRYPTO_SHA512=m
2756CONFIG_CRYPTO_TGR192=m
2757CONFIG_CRYPTO_WP512=m
2758
2759#
2760# Ciphers
2761#
2762CONFIG_CRYPTO_AES=y
2763CONFIG_CRYPTO_AES_586=m
2764CONFIG_CRYPTO_ANUBIS=m
2765CONFIG_CRYPTO_ARC4=y
2766CONFIG_CRYPTO_BLOWFISH=m
2767CONFIG_CRYPTO_CAMELLIA=m
2768CONFIG_CRYPTO_CAST5=m
2769CONFIG_CRYPTO_CAST6=m
2770CONFIG_CRYPTO_DES=m
2771CONFIG_CRYPTO_FCRYPT=m
2772CONFIG_CRYPTO_KHAZAD=m
2773CONFIG_CRYPTO_SALSA20=m
2774CONFIG_CRYPTO_SALSA20_586=m
2775CONFIG_CRYPTO_SEED=m
2776CONFIG_CRYPTO_SERPENT=m
2777CONFIG_CRYPTO_TEA=m
2778CONFIG_CRYPTO_TWOFISH=m
2779CONFIG_CRYPTO_TWOFISH_COMMON=m
2780CONFIG_CRYPTO_TWOFISH_586=m
2781
2782#
2783# Compression
2784#
2785CONFIG_CRYPTO_DEFLATE=m
2786# CONFIG_CRYPTO_LZO is not set
2787
2788#
2789# Random Number Generation
2790#
2791# CONFIG_CRYPTO_ANSI_CPRNG is not set
2792CONFIG_CRYPTO_HW=y
2793# CONFIG_CRYPTO_DEV_PADLOCK is not set
2794# CONFIG_CRYPTO_DEV_GEODE is not set
2795# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2796CONFIG_HAVE_KVM=y
2797CONFIG_VIRTUALIZATION=y
2798
2799#
2800# Library routines
2801#
2802CONFIG_BITREVERSE=y
2803CONFIG_GENERIC_FIND_FIRST_BIT=y
2804CONFIG_GENERIC_FIND_NEXT_BIT=y
2805CONFIG_CRC_CCITT=m
2806CONFIG_CRC16=m
2807# CONFIG_CRC_T10DIF is not set
2808CONFIG_CRC_ITU_T=m
2809CONFIG_CRC32=y
2810# CONFIG_CRC7 is not set
2811CONFIG_LIBCRC32C=m
2812CONFIG_ZLIB_INFLATE=y
2813CONFIG_ZLIB_DEFLATE=m
2814CONFIG_TEXTSEARCH=y
2815CONFIG_TEXTSEARCH_KMP=m
2816CONFIG_TEXTSEARCH_BM=m
2817CONFIG_TEXTSEARCH_FSM=m
2818CONFIG_PLIST=y
2819CONFIG_HAS_IOMEM=y
2820CONFIG_HAS_IOPORT=y
2821CONFIG_HAS_DMA=y
2822CONFIG_CHECK_SIGNATURE=y
2823
2824
2825# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
2826# CONFIG_MFD_PCF50633 is not set
2827# CONFIG_SENSORS_ADT7475 is not set
2828# CONFIG_LIB80211_DEBUG is not set
2829# CONFIG_DNET is not set
2830# CONFIG_BE2NET is not set
2831
2832
2833
2834# CONFIG_LNW_IPC is not set
2835# CONFIG_MRST is not set
2836# CONFIG_SFI is not set
2837# CONFIG_MDIO_GPIO is not set
2838# CONFIG_KEYBOARD_GPIO is not set
2839# CONFIG_MOUSE_GPIO is not set
2840# CONFIG_I2C_GPIO is not set
2841# CONFIG_DEBUG_GPIO is not set
2842# CONFIG_GPIO_SYSFS is not set
2843# CONFIG_GPIO_LANGWELL is not set
2844# CONFIG_GPIO_MAX732X is not set
2845# CONFIG_GPIO_PCA953X is not set
2846# CONFIG_GPIO_PCF857X is not set
2847# CONFIG_GPIO_BT8XX is not set
2848# CONFIG_UCB1400_CORE is not set
2849# CONFIG_TPS65010 is not set
2850# CONFIG_USB_GPIO_VBUS is not set
2851# CONFIG_LEDS_GPIO is not set
2852# CONFIG_ANDROID_TIMED_GPIO is not set
2853# CONFIG_X86_MRST_EARLY_PRINTK is not set
2854
2855# CONFIG_APB_TIMER is not set
2856# CONFIG_MRST_SPI_UART_BOOT_MSG is not set
2857# CONFIG_SFI_DEBUG is not set
2858# CONFIG_SFI_PROCFS is not set
2859# CONFIG_TOUCHSCREEN_UCB1400 is not set
2860# CONFIG_GPIO_LNWPMIC is not set
2861# CONFIG_RTC_DRV_VRTC is not set
2862# CONFIG_MRST_NAND is not set
2863# CONFIG_MRST_NAND_HW is not set
2864# CONFIG_USB_LANGWELL_OTG is not set
2865# CONFIG_KEYBOARD_MRST is not set
2866# CONFIG_I2C_MRST is not set
2867# CONFIG_MRST_VIB is not set
2868# CONFIG_SENSORS_ISL29020 is not set
2869# CONFIG_SENSORS_HMC6352 is not set
2870# CONFIG_SENSORS_LIS331DL is not set
2871# CONFIG_SENSORS_EMC1403 is not set
2872# CONFIG_SENSORS_MRST_ANALOG_ACCEL is not set
2873# CONFIG_USB_OTG_WHITELIST is not set
2874# CONFIG_USB_OTG_BLACKLIST_HUB is not set
2875# CONFIG_SND_PCM_OSS_PLUGINS is not set
2876# CONFIG_SND_INTEL_SST is not set
2877# CONFIG_SST_IPC_NOT_INCLUDED is not set
2878# CONFIG_SND_INTELMID is not set
2879# CONFIG_TOUCHSCREEN_MRSTOUCH is not set
2880CONFIG_ATL1C=m
2881# CONFIG_MRST_MMC_WR is not set
2882
2883
2884# CONFIG_VIDEO_MRSTCI is not set
2885# CONFIG_VIDEO_MRST_ISP is not set
2886# CONFIG_VIDEO_MRST_SENSOR is not set
2887# CONFIG_VIDEO_MRST_OV2650 is not set
2888# CONFIG_VIDEO_MRST_OV5630 is not set
2889# CONFIG_VIDEO_MRST_OV9665 is not set
2890# CONFIG_SPI2_MRST is not set
2891
2892# CONFIG_SFI_PM is not set
2893# CONFIG_SFI_CPUIDLE is not set
2894# CONFIG_SFI_PROCESSOR_PM is not set
2895# CONFIG_X86_SFI_CPUFREQ is not set
2896# CONFIG_MSTWN_POWER_MGMT is not set
2897# CONFIG_USB_NET_MBM is not set
2898
2899# CONFIG_USB_GADGET_LANGWELL is not set
2900# CONFIG_USB_LANGWELL is not set
2901
2902# CONFIG_INTEL_LNW_DMAC1 is not set
2903# CONFIG_INTEL_LNW_DMAC2 is not set
2904# CONFIG_LNW_DMA_DEBUG is not set
2905# CONFIG_NET_DMA is not set
2906# CONFIG_DMATEST is not set
2907# CONFIG_8688_RC is not set
2908# CONFIG_SSB_SILENT is not set
2909
2910# CONFIG_TOUCHSCREEN_TSC2003 is not set
2911# CONFIG_MFD_TIMBERDALE is not set
2912# CONFIG_MMC_SDHCI_PLTFM is not set
2913# CONFIG_SPI_XILINX is not set
2914# CONFIG_SPI_MRST is not set
2915# CONFIG_GPE is not set
2916
2917# CONFIG_STRIP_ASM_SYMS is not set
2918# CONFIG_X86_EXTENDED_PLATFORM is not set
2919# CONFIG_X86_32_NON_STANDARD is not set
2920# CONFIG_X86_CPU_DEBUG is not set
2921# CONFIG_CC_STACKPROTECTOR is not set
2922# CONFIG_DMAR is not set
2923# CONFIG_PCI_IOV is not set
2924# CONFIG_NETFILTER_XT_TARGET_LED is not set
2925# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
2926# CONFIG_NETFILTER_XT_MATCH_HL is not set
2927# CONFIG_NET_DROP_MONITOR is not set
2928# CONFIG_ISL29003 is not set
2929# CONFIG_SCSI_MPT2SAS is not set
2930# CONFIG_LIBFCOE is not set
2931# CONFIG_SCSI_OSD_INITIATOR is not set
2932# CONFIG_ETHOC is not set
2933# CONFIG_IGBVF is not set
2934# CONFIG_VXGE it not set
2935CONFIG_AT76C50X_USB=m
2936# CONFIG_MWL8K is not set
2937CONFIG_P54_SPI=m
2938CONFIG_AR9170_USB=m
2939# CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT is not set
2940CONFIG_TOUCHSCREEN_AD7877=m
2941CONFIG_TOUCHSCREEN_AD7879_I2C=m
2942CONFIG_TOUCHSCREEN_AD7879_SPI=m
2943CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
2944CONFIG_SERIAL_MAX3100=m
2945# CONFIG_HW_RANDOM_TIMERIOMEM is not set
2946# CONFIG_SENSORS_ATK0110 is not set
2947# CONFIG_SENSORS_G760A is not set
2948# CONFIG_SENSORS_LTC4215 is not set
2949# CONFIG_SENSORS_LM95241 is not set
2950# CONFIG_SENSORS_SHT15 is not set
2951# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
2952# CONFIG_VIDEO_ZORAN is not set
2953CONFIG_USB_GSPCA_MR97310A=m
2954CONFIG_USB_GSPCA_SQ905=m
2955CONFIG_USB_GSPCA_SQ905C=m
2956# CONFIG_VIDEO_HDPVR is not set
2957# CONFIG_VIDEO_CX231XX is not set
2958# CONFIG_USB_PWC_INPUT_EVDEV is not set
2959# CONFIG_FB_BROADSHEET is not set
2960# CONFIG_SND_PCSP is not set
2961# CONFIG_SND_INDIGOIOX is not set
2962# CONFIG_SND_INDIGODJX is not set
2963# CONFIG_DRAGONRISE_FF is not set
2964CONFIG_USB_SERIAL_CP210X=m
2965CONFIG_USB_SERIAL_QUALCOMM=m
2966CONFIG_USB_SERIAL_SYMBOL=m
2967# CONFIG_NOP_USB_XCEIV is not set
2968# CONFIG_LEDS_LP5521 is not set
2969# CONFIG_LEDS_DAC124S085 is not set
2970# CONFIG_LEDS_BD2802 is not set
2971# CONFIG_LEDS_TRIGGER_GPIO is not set
2972# CONFIG_AUXDISPLAY is not set
2973CONFIG_RT3070=m
2974# CONFIG_DST is not set
2975# CONFIG_POHMELFS is not set
2976# CONFIG_STLC45XX is not set
2977CONFIG_USB_SERIAL_ATEN2011=m
2978# CONFIG_B3DFG is not set
2979# CONFIG_IDE_PHISON is not set
2980# CONFIG_PLAN9AUTH is not set
2981# CONFIG_HECI is not set
2982CONFIG_LINE6_USB=m
2983CONFIG_USB_SERIAL_QUATECH_ESU100=m
2984CONFIG_DELL_WMI=m
2985# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
2986# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
2987# CONFIG_FSCACHE is not set
2988# CONFIG_NILFS2_FS is not set
2989# CONFIG_DETECT_HUNG_TASK is not set
2990# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
2991CONFIG_EVENT_TRACER=y
2992# CONFIG_FTRACE_SYSCALLS is not set
2993# CONFIG_KMEMTRACE is not set
2994# CONFIG_WORKQUEUE_TRACER is not set
2995# CONFIG_DYNAMIC_DEBUG is not set
2996# CONFIG_DMA_API_DEBUG is not set
2997# CONFIG_IMA is not set
2998CONFIG_CRYPTO_ZLIB=y
2999# CONFIG_VXGE is not set
3000# CONFIG_COMPAT_NET_DEV_OPS is not set
3001CONFIG_RD_GZIP=y
3002# CONFIG_RD_BZIP2 is not set
3003# CONFIG_RD_LZMA is not set
3004CONFIG_SENSORS_LIS3_SPI=y
3005# CONFIG_VIDEO_SAA6588 is not set
3006# CONFIG_VIDEO_BT819 is not set
3007# CONFIG_VIDEO_BT856 is not set
3008# CONFIG_VIDEO_BT866 is not set
3009# CONFIG_VIDEO_KS0127 is not set
3010# CONFIG_VIDEO_SAA7110 is not set
3011# CONFIG_VIDEO_VPX3220 is not set
3012# CONFIG_VIDEO_SAA7185 is not set
3013# CONFIG_VIDEO_ADV7170 is not set
3014# CONFIG_VIDEO_ADV7175 is not set
3015# CONFIG_HID_KYE is not set
3016# CONFIG_HID_KENSINGTON is not set
3017# CONFIG_SECURITY_TOMOYO is not set
3018# CONFIG_RTC_DRV_M41T94 is not set
3019# CONFIG_RTC_DRV_DS1305 is not set
3020# CONFIG_RTC_DRV_DS1390 is not set
3021# CONFIG_RTC_DRV_MAX6902 is not set
3022# CONFIG_RTC_DRV_R9701 is not set
3023# CONFIG_RTC_DRV_RS5C348 is not set
3024# CONFIG_RTC_DRV_DS3234 is not set
3025
3026# CONFIG_KS8842 is not set
3027# CONFIG_VIDEO_ADV7180 is not set
3028# CONFIG_MOST is not set
3029
3030CONFIG_PERF_COUNTERS=y
3031CONFIG_EVENT_PROFILE=y
3032# CONFIG_GCOV_KERNEL is not set
3033CONFIG_LBDAF=y
3034# CONFIG_X86_OLD_MCE is not set
3035CONFIG_X86_MCE_INTEL=y
3036# CONFIG_X86_MCE_AMD is not set
3037# CONFIG_X86_ANCIENT_MCE is not set
3038# CONFIG_X86_MCE_INJECT is not set
3039CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
3040# CONFIG_NETFILTER_XT_MATCH_OSF is not set
3041# CONFIG_IEEE802154 is not set
3042# CONFIG_CFG80211_DEBUGFS is not set
3043CONFIG_MAC80211_DEFAULT_PS=y
3044# CONFIG_EEPROM_MAX6875 is not set
3045# CONFIG_CB710_CORE is not set
3046# CONFIG_SCSI_BNX2_ISCSI is not set
3047# CONFIG_DM_LOG_USERSPACE is not set
3048# CONFIG_DM_MULTIPATH_QL is not set
3049# CONFIG_DM_MULTIPATH_ST is not set
3050# CONFIG_CNIC is not set
3051# CONFIG_RT2800USB is not set
3052# CONFIG_WL12XX is not set
3053# CONFIG_IWM is not set
3054# CONFIG_USB_NET_INT51X1 is not set
3055# CONFIG_KEYBOARD_LM8323 is not set
3056# CONFIG_MOUSE_SYNAPTICS_I2C is not set
3057# CONFIG_TOUCHSCREEN_EETI is not set
3058# CONFIG_TOUCHSCREEN_W90X900 is not set
3059# CONFIG_I2C_DESIGNWARE is not set
3060# CONFIG_PPS is not set
3061# CONFIG_BATTERY_MAX17040 is not set
3062# CONFIG_SENSORS_TMP401 is not set
3063# CONFIG_AB3100_CORE is not set
3064# CONFIG_EZX_PCAP is not set
3065# CONFIG_SND_CTXFI is not set
3066# CONFIG_SND_HDA_INPUT_JACK is not set
3067CONFIG_SND_HDA_CODEC_CA0110=y
3068# CONFIG_SND_LX6464ES is not set
3069# CONFIG_SMARTJOYPLUS_FF is not set
3070# CONFIG_USB_XHCI_HCD is not set
3071# CONFIG_MMC_CB710 is not set
3072# CONFIG_MMC_VIA_SDMMC is not set
3073# CONFIG_RTC_DRV_RX8025 is not set
3074# CONFIG_USB_SERIAL_QUATECH2 is not set
3075# CONFIG_VT6655 is not set
3076# CONFIG_USB_CPC is not set
3077# CONFIG_RDC_17F3101X is not set
3078# CONFIG_FB_UDL is not set
3079CONFIG_ACERHDF=m
3080CONFIG_FSNOTIFY=y
3081# CONFIG_CUSE is not set
3082# CONFIG_DEBUG_KMEMLEAK is not set
3083CONFIG_FTRACE=y
3084# CONFIG_RING_BUFFER_BENCHMARK is not set
3085# CONFIG_IOMMU_STRESS is not set
3086# CONFIG_LGUEST is not set
3087# CONFIG_MRST_LNW_A1_WR is not set
3088# CONFIG_MRST_LNW_A2_WR is not set
3089# CONFIG_KEYBOARD_MATRIX is not set
3090# CONFIG_LEDS_LP3944 is not set
3091CONFIG_RTL8192SU=m
3092# CONFIG_KS8851 is not set
3093# CONFIG_BATTERY_DS2782 is not set
3094CONFIG_USB_GSPCA_SN9C20X=m
3095CONFIG_USB_GSPCA_SN9C20X_EVDEV=y
3096# CONFIG_I2C_XILINX is not set
3097# CONFIG_RADIO_SAA7706H is not set
3098# CONFIG_RADIO_TEF6862 is not set
3099# CONFIG_RAR_REGISTER is not set
3100# CONFIG_MRST_RAR_HANDLER is not set
3101# CONFIG_MRST_CEATA_SUPPORT is not set
3102# CONFIG_MRST_IPC_TEST is not set
3103# CONFIG_DX_SEP is not set
3104# CONFIG_BT_MRVL is not set
3105# CONFIG_BT_MRVL_SDIO is not set
3106# CONFIG_GPIO_LANGWELL_PMIC is not set
3107
3108
3109#
3110# MTD options -- subconfigs need to turn on MTD themselves
3111# for these to become active
3112#
3113# CONFIG_MTD_DEBUG is not set
3114CONFIG_MTD_CONCAT=y
3115CONFIG_MTD_PARTITIONS=y
3116# CONFIG_MTD_TESTS is not set
3117# CONFIG_MTD_BLOCK_RO is not set
3118# CONFIG_FTL is not set
3119# CONFIG_NFTL is not set
3120# CONFIG_INFTL is not set
3121# CONFIG_RFD_FTL is not set
3122# CONFIG_SSFDC is not set
3123# CONFIG_MTD_OOPS is not set
3124# CONFIG_MTD_CFI is not set
3125# CONFIG_MTD_JEDECPROBE is not set
3126# CONFIG_MTD_RAM is not set
3127# CONFIG_MTD_ROM is not set
3128# CONFIG_MTD_ABSENT is not
3129# CONFIG_MTD_COMPLEX_MAPPINGS it not set
3130# CONFIG_MTD_TS5500 is not set
3131# CONFIG_MTD_INTEL_VR_NOR is not set
3132# CONFIG_MTD_PLATRAM is not set
3133# CONFIG_MTD_PMC551 is not set
3134# CONFIG_MTD_DATAFLASH is not set
3135# CONFIG_MTD_M25P80 is not set
3136# CONFIG_MTD_SLRAM is not set
3137# CONFIG_MTD_PHRAM is not set
3138# CONFIG_MTD_MTDRAM is not set
3139# CONFIG_MTD_BLOCK2MTD is not set
3140# CONFIG_MTD_DOC2000 is not set
3141# CONFIG_MTD_DOC2001 is not set
3142# CONFIG_MTD_DOC2001PLUS is not set
3143CONFIG_MTD_NAND=y
3144# CONFIG_MTD_ONENAND is not set
3145# CONFIG_MTD_LPDDR is not set
3146CONFIG_MTD_UBI=y
3147CONFIG_JFFS2_FS=y
3148# CONFIG_MTD_REDBOOT_PARTS is not set
3149# CONFIG_MTD_CMDLINE_PARTS is not set
3150# CONFIG_MTD_AR7_PARTS is not set
3151# CONFIG_MTD_ABSENT is not set
3152# CONFIG_MTD_COMPLEX_MAPPINGS is not set
3153# CONFIG_MTD_NAND_VERIFY_WRITE is not set
3154# CONFIG_MTD_NAND_ECC_SMC is not set
3155# CONFIG_MTD_NAND_MUSEUM_IDS is not set
3156# CONFIG_MTD_NAND_DISKONCHIP is not set
3157# CONFIG_MTD_NAND_CAFE is not set
3158# CONFIG_MTD_NAND_CS553X is not set
3159# CONFIG_MTD_NAND_NANDSIM is not set
3160# CONFIG_MTD_NAND_PLATFORM is not set
3161# CONFIG_MTD_ALAUDA is not set
3162# CONFIG_MTD_UBI_GLUEBI is not set
3163# CONFIG_MTD_UBI_DEBUG is not set
3164# CONFIG_JFFS2_FS_WRITEBUFFER is not set
3165# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
3166# CONFIG_JFFS2_SUMMARY is not set
3167CONFIG_JFFS2_FS_XATTR=y
3168# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
3169CONFIG_UBIFS_FS=m
3170
3171# CONFIG_MTD_CHAR is not set
3172# CONFIG_MTD_BLKDEVS is not set
3173# CONFIG_MTD_BLOCK is not set
3174# CONFIG_MTD_BLOCK_RO is not set
3175# CONFIG_FTL is not set
3176# CONFIG_NFTL is not set
3177# CONFIG_INFTL is not set
3178# CONFIG_RFD_FTL is not set
3179# CONFIG_SSFDC is not set
3180# CONFIG_MTD_OOPS is not set
3181CONFIG_JFFS2_FS=y
3182CONFIG_JFFS2_FS_DEBUG=0
3183# CONFIG_JFFS2_FS_WRITEBUFFER is not set
3184# CONFIG_JFFS2_SUMMARY is not set
3185CONFIG_JFFS2_FS_XATTR=y
3186CONFIG_JFFS2_FS_POSIX_ACL=y
3187CONFIG_JFFS2_FS_SECURITY=y
3188# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
3189CONFIG_JFFS2_ZLIB=y
3190# CONFIG_JFFS2_LZO is not set
3191CONFIG_JFFS2_RTIME=y
3192# CONFIG_JFFS2_RUBIN is not set
3193CONFIG_UBIFS_FS=m
3194# CONFIG_UBIFS_FS_XATTR is not set
3195# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
3196CONFIG_UBIFS_FS_LZO=y
3197CONFIG_UBIFS_FS_ZLIB=y
3198# CONFIG_UBIFS_FS_DEBUG is not set
3199CONFIG_MTD_UBI_WL_THRESHOLD=4096
3200CONFIG_MTD_UBI_BEB_RESERVE=1
3201# CONFIG_MTD_UBI_GLUEBI is not set
3202# CONFIG_IEGD is not set
3203# CONFIG_SERIAL_UARTLITE is not set
3204# CONFIG_R8169_VLAN is not set
3205# CONFIG_ATH5K_DEBUG is not set
3206CONFIG_FONT_8x8=y
3207CONFIG_FONT_6x11=y
3208CONFIG_FONT_7x14=y
3209# CONFIG_FONT_PEARL_8x8 is not set
3210# CONFIG_FONT_ACORN_8x8 is not set
3211# CONFIG_FONT_MINI_4x6 is not set
3212# CONFIG_FONT_SUN8x16 is not set
3213# CONFIG_FONT_SUN12x22 is not set
3214CONFIG_FONT_10x18=y
3215CONFIG_KVM=m
3216CONFIG_KVM_INTEL=m
3217# CONFIG_KVM_AMD is not set
3218# CONFIG_KVM_TRACE is not set
3219# CONFIG_VIRTIO_PCI is not set
3220# CONFIG_VIRTIO_BALLOON is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch
deleted file mode 100644
index 38de047249..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch
+++ /dev/null
@@ -1,142 +0,0 @@
1From e412ebbb8cea2aaf32f689ffc630b57cfe13bde5 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alanx.r.olsen@intel.com>
3Date: Tue, 21 Jul 2009 13:14:25 -0700
4Subject: [PATCH] linux-2.6-build-nonintconfig.patch
5
6Signed-off-by: Alan Olsen <alanx.r.olsen@intel.com>
7---
8 scripts/kconfig/Makefile | 5 +++++
9 scripts/kconfig/conf.c | 36 ++++++++++++++++++++++++++++++++----
10 2 files changed, 37 insertions(+), 4 deletions(-)
11
12diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
13index 5ddf8be..a4365db 100644
14--- a/scripts/kconfig/Makefile
15+++ b/scripts/kconfig/Makefile
16@@ -28,6 +28,11 @@ oldconfig: $(obj)/conf
17 silentoldconfig: $(obj)/conf
18 $< -s $(Kconfig)
19
20+nonint_oldconfig: $(obj)/conf
21+ $< -b $(Kconfig)
22+loose_nonint_oldconfig: $(obj)/conf
23+ $< -B $(Kconfig)
24+
25 # Create new linux.pot file
26 # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
27 # The symlink is used to repair a deficiency in arch/um
28diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
29index 3baaaec..2a81742 100644
30--- a/scripts/kconfig/conf.c
31+++ b/scripts/kconfig/conf.c
32@@ -23,6 +23,8 @@ enum {
33 ask_all,
34 ask_new,
35 ask_silent,
36+ dont_ask,
37+ dont_ask_dont_tell,
38 set_default,
39 set_yes,
40 set_mod,
41@@ -40,6 +42,8 @@ static struct menu *rootEntry;
42
43 static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
44
45+static int return_value = 0;
46+
47 static const char *get_help(struct menu *menu)
48 {
49 if (menu_has_help(menu))
50@@ -360,7 +364,10 @@ static void conf(struct menu *menu)
51
52 switch (prop->type) {
53 case P_MENU:
54- if (input_mode == ask_silent && rootEntry != menu) {
55+ if ((input_mode == ask_silent ||
56+ input_mode == dont_ask ||
57+ input_mode == dont_ask_dont_tell) &&
58+ rootEntry != menu) {
59 check_conf(menu);
60 return;
61 }
62@@ -418,12 +425,21 @@ static void check_conf(struct menu *menu)
63 if (sym && !sym_has_value(sym)) {
64 if (sym_is_changable(sym) ||
65 (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
66+ if (input_mode == dont_ask ||
67+ input_mode == dont_ask_dont_tell) {
68+ if (input_mode == dont_ask &&
69+ sym->name && !sym_is_choice_value(sym)) {
70+ fprintf(stderr,"CONFIG_%s\n",sym->name);
71+ ++return_value;
72+ }
73+ } else {
74 if (!conf_cnt++)
75 printf(_("*\n* Restart config...\n*\n"));
76 rootEntry = menu_get_parent_menu(menu);
77 conf(rootEntry);
78 }
79 }
80+ }
81
82 for (child = menu->list; child; child = child->next)
83 check_conf(child);
84@@ -439,7 +455,7 @@ int main(int ac, char **av)
85 bindtextdomain(PACKAGE, LOCALEDIR);
86 textdomain(PACKAGE);
87
88- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
89+ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
90 switch (opt) {
91 case 'o':
92 input_mode = ask_silent;
93@@ -448,6 +464,12 @@ int main(int ac, char **av)
94 input_mode = ask_silent;
95 sync_kconfig = 1;
96 break;
97+ case 'b':
98+ input_mode = dont_ask;
99+ break;
100+ case 'B':
101+ input_mode = dont_ask_dont_tell;
102+ break;
103 case 'd':
104 input_mode = set_default;
105 break;
106@@ -525,6 +547,8 @@ int main(int ac, char **av)
107 case ask_silent:
108 case ask_all:
109 case ask_new:
110+ case dont_ask:
111+ case dont_ask_dont_tell:
112 conf_read(NULL);
113 break;
114 case set_no:
115@@ -586,12 +610,16 @@ int main(int ac, char **av)
116 conf(&rootmenu);
117 input_mode = ask_silent;
118 /* fall through */
119+ case dont_ask:
120+ case dont_ask_dont_tell:
121 case ask_silent:
122 /* Update until a loop caused no more changes */
123 do {
124 conf_cnt = 0;
125 check_conf(&rootmenu);
126- } while (conf_cnt);
127+ } while (conf_cnt &&
128+ (input_mode != dont_ask &&
129+ input_mode != dont_ask_dont_tell));
130 break;
131 }
132
133@@ -613,5 +641,5 @@ int main(int ac, char **av)
134 exit(1);
135 }
136 }
137- return 0;
138+ return return_value;
139 }
140--
1411.6.0.6
142
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch
deleted file mode 100644
index 0073343d10..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch
+++ /dev/null
@@ -1,61 +0,0 @@
1commit 0f592e33934bf6108e33e34f00b425f98ee833ef
2Author: Matthew Garrett <mjg@redhat.com>
3Date: Wed Jul 8 19:04:23 2009 +0100
4
5 usb: Allow drivers to enable USB autosuspend on a per-device basis
6
7 USB autosuspend is currently only enabled by default for hubs. On other
8 hardware the decision is made by userspace. This is unnecessary in cases
9 where we know that the hardware supports autosuspend, so this patch adds
10 a function to allow drivers to enable it at probe time.
11
12 Signed-off-by: Matthew Garrett <mjg@redhat.com>
13
14diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
15index 69e5773..6e81caa 100644
16--- a/drivers/usb/core/driver.c
17+++ b/drivers/usb/core/driver.c
18@@ -1560,6 +1560,21 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
19 EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
20
21 /**
22+ * usb_device_autosuspend_enable - enable autosuspend on a device
23+ * @udev: the usb_device to be autosuspended
24+ *
25+ * This routine should be called by an interface driver when it knows that
26+ * the device in question supports USB autosuspend.
27+ *
28+ */
29+void usb_device_autosuspend_enable(struct usb_device *udev)
30+{
31+ udev->autosuspend_disabled = 0;
32+ udev->autoresume_disabled = 0;
33+}
34+EXPORT_SYMBOL_GPL(usb_device_autosuspend_enable);
35+
36+/**
37 * usb_autopm_get_interface - increment a USB interface's PM-usage counter
38 * @intf: the usb_interface whose counter should be incremented
39 *
40diff --git a/include/linux/usb.h b/include/linux/usb.h
41index b1e3c2f..61bddbe 100644
42--- a/include/linux/usb.h
43+++ b/include/linux/usb.h
44@@ -543,6 +543,7 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
45
46 /* USB autosuspend and autoresume */
47 #ifdef CONFIG_USB_SUSPEND
48+extern void usb_device_autosuspend_enable(struct usb_device *udev);
49 extern int usb_autopm_set_interface(struct usb_interface *intf);
50 extern int usb_autopm_get_interface(struct usb_interface *intf);
51 extern void usb_autopm_put_interface(struct usb_interface *intf);
52@@ -568,6 +569,9 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
53
54 #else
55
56+static inline void usb_device_autosuspend_enable(struct usb_device *udev)
57+{ }
58+
59 static inline int usb_autopm_set_interface(struct usb_interface *intf)
60 { return 0; }
61
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch
deleted file mode 100644
index b7c7f6e0f4..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch
+++ /dev/null
@@ -1,19 +0,0 @@
1commit 9d4c919bcfa794c054cc33155c7e3c53ac2c5684
2Author: Matthew Garrett <mjg@redhat.com>
3Date: Sun Jul 19 02:24:49 2009 +0100
4
5 Enable autosuspend on UVC by default
6
7diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
8index 89927b7..8de516b 100644
9--- a/drivers/media/video/uvc/uvc_driver.c
10+++ b/drivers/media/video/uvc/uvc_driver.c
11@@ -1647,6 +1647,8 @@ static int uvc_probe(struct usb_interface *intf,
12 "supported.\n", ret);
13 }
14
15+ usb_device_autosuspend_enable(udev);
16+
17 uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
18 return 0;
19
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch
deleted file mode 100644
index 6b2d54ff5e..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch
+++ /dev/null
@@ -1,47 +0,0 @@
1From dce8113d033975f56630cf6d2a6a908cfb66059d Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:12:16 -0700
4Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
5
6In the non-initrd case, we wait for all devices to finish their
7probing before we try to mount the rootfs.
8In practice, this means that we end up waiting 2 extra seconds for
9the PS/2 mouse probing even though the root holding device has been
10ready since a long time.
11
12The previous two patches in this series made the RAID autodetect code
13do it's own "wait for probing to be done" code, and added
14"wait and retry" functionality in case the root device isn't actually
15available.
16
17These two changes should make it safe to remove the delay itself,
18and this patch does this. On my test laptop, this reduces the boot time
19by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
20
21Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
22---
23---
24 init/do_mounts.c | 3 +++
25 1 file changed, 3 insertions(+)
26
27Index: linux-2.6.29/init/do_mounts.c
28===================================================================
29--- linux-2.6.29.orig/init/do_mounts.c
30+++ linux-2.6.29/init/do_mounts.c
31@@ -370,6 +370,7 @@ void __init prepare_namespace(void)
32 ssleep(root_delay);
33 }
34
35+#if 0
36 /*
37 * wait for the known devices to complete their probing
38 *
39@@ -378,6 +379,8 @@ void __init prepare_namespace(void)
40 * for the touchpad of a laptop to initialize.
41 */
42 wait_for_device_probe();
43+#endif
44+ async_synchronize_full();
45
46 md_run_setup();
47
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch
deleted file mode 100644
index 46a9e24a7e..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch
+++ /dev/null
@@ -1,25 +0,0 @@
1From 0143f8eb8afcaccba5a78196fb3db4361e0097a7 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Mon, 9 Feb 2009 21:25:32 -0800
4Subject: [PATCH] jbd: longer commit interval
5
6... 5 seconds is rather harsh on ssd's..
7
8Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
9---
10 include/linux/jbd.h | 2 +-
11 1 file changed, 1 insertion(+), 1 deletion(-)
12
13Index: linux-2.6.29/include/linux/jbd.h
14===================================================================
15--- linux-2.6.29.orig/include/linux/jbd.h
16+++ linux-2.6.29/include/linux/jbd.h
17@@ -46,7 +46,7 @@
18 /*
19 * The default maximum commit age, in seconds.
20 */
21-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
22+#define JBD_DEFAULT_MAX_COMMIT_AGE 15
23
24 #ifdef CONFIG_JBD_DEBUG
25 /*
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch
deleted file mode 100644
index f635e2a88d..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch
+++ /dev/null
@@ -1,38 +0,0 @@
1---
2 drivers/Makefile | 14 +++++++-------
3 1 file changed, 7 insertions(+), 7 deletions(-)
4
5Index: linux-2.6.29/drivers/Makefile
6===================================================================
7--- linux-2.6.29.orig/drivers/Makefile
8+++ linux-2.6.29/drivers/Makefile
9@@ -25,15 +25,8 @@ obj-$(CONFIG_REGULATOR) += regulator/
10 # default.
11 obj-y += char/
12
13-# gpu/ comes after char for AGP vs DRM startup
14-obj-y += gpu/
15-
16 obj-$(CONFIG_CONNECTOR) += connector/
17
18-# i810fb and intelfb depend on char/agp/
19-obj-$(CONFIG_FB_I810) += video/i810/
20-obj-$(CONFIG_FB_INTEL) += video/intelfb/
21-
22 obj-y += serial/
23 obj-$(CONFIG_PARPORT) += parport/
24 obj-y += base/ block/ misc/ mfd/ media/
25@@ -43,6 +36,13 @@ obj-$(CONFIG_IDE) += ide/
26 obj-$(CONFIG_SCSI) += scsi/
27 obj-$(CONFIG_ATA) += ata/
28 obj-y += net/
29+
30+# gpu/ comes after char for AGP vs DRM startup
31+obj-y += gpu/
32+# i810fb and intelfb depend on char/agp/
33+obj-$(CONFIG_FB_I810) += video/i810/
34+obj-$(CONFIG_FB_INTEL) += video/intelfb/
35+
36 obj-$(CONFIG_ATM) += atm/
37 obj-$(CONFIG_FUSION) += message/
38 obj-$(CONFIG_FIREWIRE) += firewire/
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch
deleted file mode 100644
index ad26326967..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch
+++ /dev/null
@@ -1,33 +0,0 @@
1--- vanilla-2.6.31-rc4/drivers/gpu/drm/i915/intel_lvds.c~ 2009-07-31 11:23:05.000000000 -0700
2+++ vanilla-2.6.31-rc4/drivers/gpu/drm/i915/intel_lvds.c 2009-07-31 11:23:05.000000000 -0700
3@@ -111,19 +111,12 @@ static void intel_lvds_set_power(struct
4 if (on) {
5 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
6 POWER_TARGET_ON);
7- do {
8- pp_status = I915_READ(status_reg);
9- } while ((pp_status & PP_ON) == 0);
10-
11 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
12 } else {
13 intel_lvds_set_backlight(dev, 0);
14
15 I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
16 ~POWER_TARGET_ON);
17- do {
18- pp_status = I915_READ(status_reg);
19- } while (pp_status & PP_ON);
20 }
21 }
22
23--- linux-2.6.31/drivers/gpu/drm/i915/intel_lvds.c~ 2009-10-11 10:13:38.000000000 -0700
24+++ linux-2.6.31/drivers/gpu/drm/i915/intel_lvds.c 2009-10-11 10:13:38.000000000 -0700
25@@ -98,7 +98,7 @@
26 static void intel_lvds_set_power(struct drm_device *dev, bool on)
27 {
28 struct drm_i915_private *dev_priv = dev->dev_private;
29- u32 pp_status, ctl_reg, status_reg;
30+ u32 ctl_reg, status_reg;
31
32 if (IS_IGDNG(dev)) {
33 ctl_reg = PCH_PP_CONTROL;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch
deleted file mode 100644
index 47e5b16a2a..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch
+++ /dev/null
@@ -1,58 +0,0 @@
1diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
2index 004541c..b218780 100644
3--- a/drivers/gpu/drm/i915/intel_drv.h
4+++ b/drivers/gpu/drm/i915/intel_drv.h
5@@ -81,6 +81,7 @@ struct intel_output {
6 int type;
7 struct i2c_adapter *i2c_bus;
8 struct i2c_adapter *ddc_bus;
9+ struct edid *edid;
10 bool load_detect_temp;
11 bool needs_tv_clock;
12 void *dev_priv;
13diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
14index 9ab38ef..9fba800 100644
15--- a/drivers/gpu/drm/i915/intel_lvds.c
16+++ b/drivers/gpu/drm/i915/intel_lvds.c
17@@ -657,6 +657,7 @@ static void intel_lvds_destroy(struct drm_connector *connector)
18 intel_i2c_destroy(intel_output->ddc_bus);
19 drm_sysfs_connector_remove(connector);
20 drm_connector_cleanup(connector);
21+ kfree(intel_output->edid);
22 kfree(connector);
23 }
24
25@@ -1017,5 +1018,6 @@ failed:
26 if (intel_output->ddc_bus)
27 intel_i2c_destroy(intel_output->ddc_bus);
28 drm_connector_cleanup(connector);
29+ kfree(intel_output->edid);
30 kfree(intel_output);
31 }
32diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
33index 67e2f46..5ac537f 100644
34--- a/drivers/gpu/drm/i915/intel_modes.c
35+++ b/drivers/gpu/drm/i915/intel_modes.c
36@@ -74,6 +74,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
37 int ret = 0;
38
39 intel_i2c_quirk_set(intel_output->base.dev, true);
40+ if (intel_output->edid && intel_output->type == INTEL_OUTPUT_LVDS) {
41+ printk(KERN_INFO "Skipping EDID probe due to cached edid\n");
42+ return ret;
43+ }
44 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
45 intel_i2c_quirk_set(intel_output->base.dev, false);
46 if (edid) {
47@@ -81,7 +85,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
48 edid);
49 ret = drm_add_edid_modes(&intel_output->base, edid);
50 intel_output->base.display_info.raw_edid = NULL;
51- kfree(edid);
52+ if (intel_output->type == INTEL_OUTPUT_LVDS)
53+ intel_output->edid = edid;
54+ else
55+ kfree(edid);
56 }
57
58 return ret;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch
deleted file mode 100644
index eaef8eb6ce..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch
+++ /dev/null
@@ -1,118 +0,0 @@
1Index: b/drivers/gpu/drm/drm_crtc_helper.c
2===================================================================
3--- a/drivers/gpu/drm/drm_crtc_helper.c
4+++ b/drivers/gpu/drm/drm_crtc_helper.c
5@@ -29,6 +29,8 @@
6 * Jesse Barnes <jesse.barnes@intel.com>
7 */
8
9+#include <linux/async.h>
10+
11 #include "drmP.h"
12 #include "drm_crtc.h"
13 #include "drm_crtc_helper.h"
14@@ -62,6 +64,8 @@ static void drm_mode_validate_flag(struc
15 return;
16 }
17
18+LIST_HEAD(drm_async_list);
19+
20 /**
21 * drm_helper_probe_connector_modes - get complete set of display modes
22 * @dev: DRM device
23@@ -916,6 +920,7 @@ bool drm_helper_plugged_event(struct drm
24 /* FIXME: send hotplug event */
25 return true;
26 }
27+
28 /**
29 * drm_initial_config - setup a sane initial connector configuration
30 * @dev: DRM device
31@@ -953,13 +958,26 @@ bool drm_helper_initial_config(struct dr
32
33 drm_setup_crtcs(dev);
34
35- /* alert the driver fb layer */
36 dev->mode_config.funcs->fb_changed(dev);
37-
38 return 0;
39 }
40 EXPORT_SYMBOL(drm_helper_initial_config);
41
42+static void drm_helper_initial_config_helper(void *ptr, async_cookie_t cookie)
43+{
44+ struct drm_device *dev = ptr;
45+ drm_helper_initial_config(dev);
46+}
47+
48+void drm_helper_initial_config_async(struct drm_device *dev)
49+{
50+ async_schedule_domain(drm_helper_initial_config_helper,
51+ dev, &drm_async_list);
52+}
53+EXPORT_SYMBOL(drm_helper_initial_config_async);
54+
55+
56+
57 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
58 {
59 int dpms = DRM_MODE_DPMS_OFF;
60Index: b/drivers/gpu/drm/drm_drv.c
61===================================================================
62--- a/drivers/gpu/drm/drm_drv.c
63+++ b/drivers/gpu/drm/drm_drv.c
64@@ -49,6 +49,7 @@
65 #include <linux/debugfs.h>
66 #include "drmP.h"
67 #include "drm_core.h"
68+#include <linux/async.h>
69
70
71 static int drm_version(struct drm_device *dev, void *data,
72@@ -290,6 +291,9 @@ void drm_exit(struct drm_driver *driver)
73 struct drm_device *dev, *tmp;
74 DRM_DEBUG("\n");
75
76+ /* make sure all async DRM operations are finished */
77+ async_synchronize_full_domain(&drm_async_list);
78+
79 if (driver->driver_features & DRIVER_MODESET) {
80 pci_unregister_driver(&driver->pci_driver);
81 } else {
82Index: b/include/drm/drmP.h
83===================================================================
84--- a/include/drm/drmP.h
85+++ b/include/drm/drmP.h
86@@ -328,6 +328,7 @@ struct drm_vma_entry {
87 pid_t pid;
88 };
89
90+extern struct list_head drm_async_list;
91 /**
92 * DMA buffer.
93 */
94Index: b/include/drm/drm_crtc_helper.h
95===================================================================
96--- a/include/drm/drm_crtc_helper.h
97+++ b/include/drm/drm_crtc_helper.h
98@@ -92,6 +92,7 @@ extern int drm_helper_probe_single_conne
99 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
100 extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
101 extern bool drm_helper_initial_config(struct drm_device *dev);
102+extern void drm_helper_initial_config_async(struct drm_device *dev);
103 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
104 extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
105 struct drm_display_mode *mode,
106Index: b/drivers/gpu/drm/i915/i915_dma.c
107===================================================================
108--- a/drivers/gpu/drm/i915/i915_dma.c
109+++ b/drivers/gpu/drm/i915/i915_dma.c
110@@ -1045,7 +1045,7 @@ static int i915_load_modeset_init(struct
111
112 intel_modeset_init(dev);
113
114- drm_helper_initial_config(dev);
115+ drm_helper_initial_config_async(dev);
116
117 return 0;
118
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch
deleted file mode 100644
index ff76f09de0..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch
+++ /dev/null
@@ -1,22 +0,0 @@
1From: Arjan van de Ven <arjan@linux.intel.com>
2Date: Fri, 23 Jan 2009
3
4Small fix changing error msg to info msg in acer wmi driver
5---
6---
7 drivers/platform/x86/acer-wmi.c | 2 +-
8 1 file changed, 1 insertion(+), 1 deletion(-)
9
10Index: linux-2.6.29/drivers/platform/x86/acer-wmi.c
11===================================================================
12--- linux-2.6.29.orig/drivers/platform/x86/acer-wmi.c
13+++ linux-2.6.29/drivers/platform/x86/acer-wmi.c
14@@ -1290,7 +1290,7 @@ static int __init acer_wmi_init(void)
15 AMW0_find_mailled();
16
17 if (!interface) {
18- printk(ACER_ERR "No or unsupported WMI interface, unable to "
19+ printk(ACER_INFO "No or unsupported WMI interface, unable to "
20 "load\n");
21 return -ENODEV;
22 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch
deleted file mode 100644
index 7fb6a29643..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch
+++ /dev/null
@@ -1,66 +0,0 @@
1From 4d690855d6bdc15b753ac3c21bf507ad94d46aac Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 21 Sep 2008 11:58:27 -0700
4Subject: [PATCH] superreadahead patch
5
6---
7 fs/ext3/ioctl.c | 3 +++
8 fs/ext3/super.c | 1 +
9 include/linux/ext3_fs.h | 1 +
10 include/linux/fs.h | 2 ++
11 4 files changed, 7 insertions(+), 0 deletions(-)
12
13diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
14index 8897481..08f4854 100644
15--- a/fs/ext3/ioctl.c
16+++ b/fs/ext3/ioctl.c
17@@ -276,6 +276,9 @@ group_add_out:
18 mnt_drop_write(filp->f_path.mnt);
19 return err;
20 }
21+ case EXT3_IOC_INODE_JIFFIES: {
22+ return inode->created_when;
23+ }
24
25
26 default:
27diff --git a/fs/ext3/super.c b/fs/ext3/super.c
28index 524b349..e6e8514 100644
29--- a/fs/ext3/super.c
30+++ b/fs/ext3/super.c
31@@ -466,6 +466,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
32 return NULL;
33 ei->i_block_alloc_info = NULL;
34 ei->vfs_inode.i_version = 1;
35+ ei->vfs_inode.created_when = jiffies;
36 return &ei->vfs_inode;
37 }
38
39diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
40index 634a5e5..84d5394 100644
41--- a/include/linux/ext3_fs.h
42+++ b/include/linux/ext3_fs.h
43@@ -250,6 +250,7 @@ struct ext3_new_group_data {
44 #endif
45 #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
46 #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
47+#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
48
49 /*
50 * ioctl commands in 32 bit emulation
51diff --git a/include/linux/fs.h b/include/linux/fs.h
52index 0872372..078e3fd 100644
53--- a/include/linux/fs.h
54+++ b/include/linux/fs.h
55@@ -781,6 +781,8 @@ struct inode {
56 struct posix_acl *i_default_acl;
57 #endif
58 void *i_private; /* fs or device private pointer */
59+
60+ unsigned long created_when; /* jiffies of creation time */
61 };
62
63 /*
64--
651.6.0.6
66
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch
deleted file mode 100644
index 5253404c29..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch
+++ /dev/null
@@ -1,146 +0,0 @@
1From 3281da09528ca94f1b1fd39cae388f5b5423aa46 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alanx.r.olsen@intel.com>
3Date: Tue, 21 Jul 2009 13:26:58 -0700
4Subject: [PATCH] linux-2.6.29-touchkit.patch
5
6Signed-off-by: Alan Olsen <alanx.r.olsen@intel.com>
7---
8 drivers/input/mouse/psmouse-base.c | 9 +++++++
9 drivers/input/mouse/psmouse.h | 1 +
10 drivers/input/mouse/touchkit_ps2.c | 45 ++++++++++++++++++++++++++++++++++-
11 drivers/input/mouse/touchkit_ps2.h | 6 ++++
12 4 files changed, 59 insertions(+), 2 deletions(-)
13
14diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
15index b407b35..4c6b184 100644
16--- a/drivers/input/mouse/psmouse-base.c
17+++ b/drivers/input/mouse/psmouse-base.c
18@@ -678,6 +678,9 @@ static int psmouse_extensions(struct psmouse *psmouse,
19
20 if (touchkit_ps2_detect(psmouse, set_properties) == 0)
21 return PSMOUSE_TOUCHKIT_PS2;
22+
23+ if (elftouch_ps2_detect(psmouse, set_properties) == 0)
24+ return PSMOUSE_ELFTOUCH_PS2;
25 }
26
27 /*
28@@ -788,6 +791,12 @@ static const struct psmouse_protocol psmouse_protocols[] = {
29 .alias = "trackpoint",
30 .detect = trackpoint_detect,
31 },
32+ {
33+ .type = PSMOUSE_ELFTOUCH_PS2,
34+ .name = "elftouchPS2",
35+ .alias = "elftouch",
36+ .detect = elftouch_ps2_detect,
37+ },
38 #endif
39 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
40 {
41diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
42index 54ed267..8d1ba79 100644
43--- a/drivers/input/mouse/psmouse.h
44+++ b/drivers/input/mouse/psmouse.h
45@@ -89,6 +89,7 @@ enum psmouse_type {
46 PSMOUSE_TRACKPOINT,
47 PSMOUSE_TOUCHKIT_PS2,
48 PSMOUSE_CORTRON,
49+ PSMOUSE_ELFTOUCH_PS2,
50 PSMOUSE_HGPK,
51 PSMOUSE_ELANTECH,
52 PSMOUSE_AUTO /* This one should always be last */
53diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c
54index 3fadb2a..e9c27f1 100644
55--- a/drivers/input/mouse/touchkit_ps2.c
56+++ b/drivers/input/mouse/touchkit_ps2.c
57@@ -51,6 +51,11 @@
58 #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2])
59 #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4])
60
61+#define ELFTOUCH_MAX_XC 0x0fff
62+#define ELFTOUCH_MAX_YC 0x0fff
63+#define ELFTOUCH_GET_X(packet) (((packet)[3] << 7) | (packet)[4])
64+#define ELFTOUCH_GET_Y(packet) (((packet)[1] << 7) | (packet)[2])
65+
66 static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
67 {
68 unsigned char *packet = psmouse->packet;
69@@ -59,9 +64,15 @@ static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
70 if (psmouse->pktcnt != 5)
71 return PSMOUSE_GOOD_DATA;
72
73- input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
74- input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
75+ if(psmouse->type==PSMOUSE_ELFTOUCH_PS2) {
76+ input_report_abs(dev, ABS_X, ELFTOUCH_GET_X(packet));
77+ input_report_abs(dev, ABS_Y, ELFTOUCH_GET_Y(packet));
78+ } else {
79+ input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
80+ input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
81+ }
82 input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet));
83+
84 input_sync(dev);
85
86 return PSMOUSE_FULL_PACKET;
87@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
88
89 return 0;
90 }
91+
92+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties)
93+{
94+ struct input_dev *dev = psmouse->dev;
95+ unsigned char param[16];
96+ int command, res;
97+
98+ param[0]=0x0f4;
99+ command = TOUCHKIT_SEND_PARMS(1, 0, TOUCHKIT_CMD);
100+ res=ps2_command(&psmouse->ps2dev, param, command);
101+ if(res) { return -ENODEV; }
102+
103+ param[0]=0x0b0;
104+ command = TOUCHKIT_SEND_PARMS(1, 1, TOUCHKIT_CMD);
105+ res=ps2_command(&psmouse->ps2dev, param, command);
106+ if(res) { return -ENODEV; }
107+
108+ if (set_properties) {
109+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
110+ set_bit(BTN_TOUCH, dev->keybit);
111+ input_set_abs_params(dev, ABS_X, 0, ELFTOUCH_MAX_XC, 0, 0);
112+ input_set_abs_params(dev, ABS_Y, 0, ELFTOUCH_MAX_YC, 0, 0);
113+
114+ psmouse->vendor = "ElfTouch";
115+ psmouse->name = "Touchscreen";
116+ psmouse->protocol_handler = touchkit_ps2_process_byte;
117+ psmouse->pktsize = 5;
118+ }
119+ return 0;
120+}
121diff --git a/drivers/input/mouse/touchkit_ps2.h b/drivers/input/mouse/touchkit_ps2.h
122index 8a0dd35..f32ef4c 100644
123--- a/drivers/input/mouse/touchkit_ps2.h
124+++ b/drivers/input/mouse/touchkit_ps2.h
125@@ -14,12 +14,18 @@
126
127 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
128 int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
129+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties);
130 #else
131 static inline int touchkit_ps2_detect(struct psmouse *psmouse,
132 int set_properties)
133 {
134 return -ENOSYS;
135 }
136+static inline int elftouch_ps2_detect(struct psmouse *psmouse,
137+ int set_properties)
138+{
139+ return -ENOSYS;
140+}
141 #endif /* CONFIG_MOUSE_PS2_TOUCHKIT */
142
143 #endif
144--
1451.6.0.6
146
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch
deleted file mode 100644
index 76dd36082a..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch
+++ /dev/null
@@ -1,32 +0,0 @@
1From a5a267593c15ac987f78cfc21cae0c8ef723f81e Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alan.r.olsen@intel.com>
3Date: Mon, 21 Sep 2009 13:58:49 -0700
4Subject: [PATCH] linux-2.6.30-non-root-X.patch
5
6Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
7---
8 drivers/gpu/drm/drm_drv.c | 4 ++--
9 1 files changed, 2 insertions(+), 2 deletions(-)
10
11diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
12index 4678f8f..b7f3a41 100644
13--- a/drivers/gpu/drm/drm_drv.c
14+++ b/drivers/gpu/drm/drm_drv.c
15@@ -64,12 +64,12 @@ static struct drm_ioctl_desc drm_ioctls[] = {
16 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
17 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
18 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
19- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
20+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
21
22 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
23 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
24 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
25- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
26+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
27
28 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
29 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
30--
311.6.0.6
32
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch
deleted file mode 100644
index 9db5b4ac72..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch
+++ /dev/null
@@ -1,12910 +0,0 @@
1diff -uNr linux-2.6.31/drivers/gpio/Kconfig linux-2.6.31.new/drivers/gpio/Kconfig
2--- linux-2.6.31/drivers/gpio/Kconfig 2009-10-23 11:18:30.000000000 -0700
3+++ linux-2.6.31.new/drivers/gpio/Kconfig 2009-10-23 11:17:19.000000000 -0700
4@@ -173,6 +173,12 @@
5
6 If unsure, say N.
7
8+config GPIO_TIMBERDALE
9+ bool "Support for timberdale GPIO IP"
10+ depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
11+ ---help---
12+ Add support for the GPIO IP in the timberdale FPGA.
13+
14 comment "SPI GPIO expanders:"
15
16 config GPIO_MAX7301
17@@ -188,4 +194,11 @@
18 SPI driver for Microchip MCP23S08 I/O expander. This provides
19 a GPIO interface supporting inputs and outputs.
20
21+config GPIO_MC33880
22+ tristate "Freescale MC33880 high-side/low-side switch"
23+ depends on SPI_MASTER
24+ help
25+ SPI driver for Freescale MC33880 high-side/low-side switch.
26+ This provides GPIO interface supporting inputs and outputs.
27+
28 endif
29diff -uNr linux-2.6.31/drivers/gpio/Makefile linux-2.6.31.new/drivers/gpio/Makefile
30--- linux-2.6.31/drivers/gpio/Makefile 2009-10-23 11:18:30.000000000 -0700
31+++ linux-2.6.31.new/drivers/gpio/Makefile 2009-10-23 11:17:19.000000000 -0700
32@@ -14,3 +14,6 @@
33 obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
34 obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
35 obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
36+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
37+obj-$(CONFIG_GPIO_MC33880) += mc33880.o
38+
39diff -uNr linux-2.6.31/drivers/gpio/mc33880.c linux-2.6.31.new/drivers/gpio/mc33880.c
40--- linux-2.6.31/drivers/gpio/mc33880.c 1969-12-31 16:00:00.000000000 -0800
41+++ linux-2.6.31.new/drivers/gpio/mc33880.c 2009-10-23 11:17:19.000000000 -0700
42@@ -0,0 +1,196 @@
43+/*
44+ * mc33880.c MC33880 high-side/low-side switch GPIO driver
45+ * Copyright (c) 2009 Intel Corporation
46+ *
47+ * This program is free software; you can redistribute it and/or modify
48+ * it under the terms of the GNU General Public License version 2 as
49+ * published by the Free Software Foundation.
50+ *
51+ * This program is distributed in the hope that it will be useful,
52+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
53+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
54+ * GNU General Public License for more details.
55+ *
56+ * You should have received a copy of the GNU General Public License
57+ * along with this program; if not, write to the Free Software
58+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
59+ */
60+
61+/* Supports:
62+ * Freescale MC33880 high-side/low-side switch
63+ */
64+
65+#include <linux/init.h>
66+#include <linux/mutex.h>
67+#include <linux/spi/spi.h>
68+#include <linux/spi/mc33880.h>
69+#include <linux/gpio.h>
70+
71+#define DRIVER_NAME "mc33880"
72+
73+/*
74+ * Pin configurations, see MAX7301 datasheet page 6
75+ */
76+#define PIN_CONFIG_MASK 0x03
77+#define PIN_CONFIG_IN_PULLUP 0x03
78+#define PIN_CONFIG_IN_WO_PULLUP 0x02
79+#define PIN_CONFIG_OUT 0x01
80+
81+#define PIN_NUMBER 8
82+
83+
84+/*
85+ * Some registers must be read back to modify.
86+ * To save time we cache them here in memory
87+ */
88+struct mc33880 {
89+ struct mutex lock; /* protect from simultanous accesses */
90+ u8 port_config;
91+ struct gpio_chip chip;
92+ struct spi_device *spi;
93+};
94+
95+static int mc33880_write_config(struct mc33880 *mc)
96+{
97+ return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config));
98+}
99+
100+
101+static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
102+{
103+ if (value)
104+ mc->port_config |= 1 << offset;
105+ else
106+ mc->port_config &= ~(1 << offset);
107+
108+ return mc33880_write_config(mc);
109+}
110+
111+
112+static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
113+{
114+ struct mc33880 *mc = container_of(chip, struct mc33880, chip);
115+
116+ mutex_lock(&mc->lock);
117+
118+ __mc33880_set(mc, offset, value);
119+
120+ mutex_unlock(&mc->lock);
121+}
122+
123+static int __devinit mc33880_probe(struct spi_device *spi)
124+{
125+ struct mc33880 *mc;
126+ struct mc33880_platform_data *pdata;
127+ int ret;
128+
129+ pdata = spi->dev.platform_data;
130+ if (!pdata || !pdata->base) {
131+ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
132+ return -EINVAL;
133+ }
134+
135+ /*
136+ * bits_per_word cannot be configured in platform data
137+ */
138+ spi->bits_per_word = 8;
139+
140+ ret = spi_setup(spi);
141+ if (ret < 0)
142+ return ret;
143+
144+ mc = kzalloc(sizeof(struct mc33880), GFP_KERNEL);
145+ if (!mc)
146+ return -ENOMEM;
147+
148+ mutex_init(&mc->lock);
149+
150+ dev_set_drvdata(&spi->dev, mc);
151+
152+ mc->spi = spi;
153+
154+ mc->chip.label = DRIVER_NAME,
155+ mc->chip.set = mc33880_set;
156+ mc->chip.base = pdata->base;
157+ mc->chip.ngpio = PIN_NUMBER;
158+ mc->chip.can_sleep = 1;
159+ mc->chip.dev = &spi->dev;
160+ mc->chip.owner = THIS_MODULE;
161+
162+ mc->port_config = 0x00;
163+ /* write twice, because during initialisation the first setting
164+ * is just for testing SPI communication, and the second is the
165+ * "real" configuration
166+ */
167+ ret = mc33880_write_config(mc);
168+ mc->port_config = 0x00;
169+ if (!ret)
170+ ret = mc33880_write_config(mc);
171+
172+ if (ret) {
173+ printk(KERN_ERR "Failed writing to " DRIVER_NAME ": %d\n", ret);
174+ goto exit_destroy;
175+ }
176+
177+ ret = gpiochip_add(&mc->chip);
178+ if (ret)
179+ goto exit_destroy;
180+
181+ return ret;
182+
183+exit_destroy:
184+ dev_set_drvdata(&spi->dev, NULL);
185+ mutex_destroy(&mc->lock);
186+ kfree(mc);
187+ return ret;
188+}
189+
190+static int mc33880_remove(struct spi_device *spi)
191+{
192+ struct mc33880 *mc;
193+ int ret;
194+
195+ mc = dev_get_drvdata(&spi->dev);
196+ if (mc == NULL)
197+ return -ENODEV;
198+
199+ dev_set_drvdata(&spi->dev, NULL);
200+
201+ ret = gpiochip_remove(&mc->chip);
202+ if (!ret) {
203+ mutex_destroy(&mc->lock);
204+ kfree(mc);
205+ } else
206+ dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
207+ ret);
208+
209+ return ret;
210+}
211+
212+static struct spi_driver mc33880_driver = {
213+ .driver = {
214+ .name = DRIVER_NAME,
215+ .owner = THIS_MODULE,
216+ },
217+ .probe = mc33880_probe,
218+ .remove = __devexit_p(mc33880_remove),
219+};
220+
221+static int __init mc33880_init(void)
222+{
223+ return spi_register_driver(&mc33880_driver);
224+}
225+/* register after spi postcore initcall and before
226+ * subsys initcalls that may rely on these GPIOs
227+ */
228+subsys_initcall(mc33880_init);
229+
230+static void __exit mc33880_exit(void)
231+{
232+ spi_unregister_driver(&mc33880_driver);
233+}
234+module_exit(mc33880_exit);
235+
236+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
237+MODULE_LICENSE("GPL v2");
238+
239diff -uNr linux-2.6.31/drivers/gpio/timbgpio.c linux-2.6.31.new/drivers/gpio/timbgpio.c
240--- linux-2.6.31/drivers/gpio/timbgpio.c 1969-12-31 16:00:00.000000000 -0800
241+++ linux-2.6.31.new/drivers/gpio/timbgpio.c 2009-10-23 11:17:19.000000000 -0700
242@@ -0,0 +1,342 @@
243+/*
244+ * timbgpio.c timberdale FPGA GPIO driver
245+ * Copyright (c) 2009 Intel Corporation
246+ *
247+ * This program is free software; you can redistribute it and/or modify
248+ * it under the terms of the GNU General Public License version 2 as
249+ * published by the Free Software Foundation.
250+ *
251+ * This program is distributed in the hope that it will be useful,
252+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
253+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
254+ * GNU General Public License for more details.
255+ *
256+ * You should have received a copy of the GNU General Public License
257+ * along with this program; if not, write to the Free Software
258+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
259+ */
260+
261+/* Supports:
262+ * Timberdale FPGA GPIO
263+ */
264+
265+#include <linux/module.h>
266+#include <linux/gpio.h>
267+#include <linux/platform_device.h>
268+#include <linux/io.h>
269+#include <linux/timb_gpio.h>
270+#include <linux/interrupt.h>
271+
272+#define DRIVER_NAME "timb-gpio"
273+
274+#define TGPIOVAL 0x00
275+#define TGPIODIR 0x04
276+#define TGPIO_IER 0x08
277+#define TGPIO_ISR 0x0c
278+#define TGPIO_IPR 0x10
279+#define TGPIO_ICR 0x14
280+#define TGPIO_FLR 0x18
281+#define TGPIO_LVR 0x1c
282+
283+struct timbgpio {
284+ void __iomem *membase;
285+ spinlock_t lock; /* mutual exclusion */
286+ struct gpio_chip gpio;
287+ int irq_base;
288+};
289+
290+static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
291+ unsigned offset, bool enabled)
292+{
293+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
294+ u32 reg;
295+
296+ spin_lock(&tgpio->lock);
297+ reg = ioread32(tgpio->membase + offset);
298+
299+ if (enabled)
300+ reg |= (1 << index);
301+ else
302+ reg &= ~(1 << index);
303+
304+ iowrite32(reg, tgpio->membase + offset);
305+ spin_unlock(&tgpio->lock);
306+
307+ return 0;
308+}
309+
310+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
311+{
312+ return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
313+}
314+
315+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
316+{
317+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
318+ u32 value;
319+
320+ value = ioread32(tgpio->membase + TGPIOVAL);
321+ return (value & (1 << nr)) ? 1 : 0;
322+}
323+
324+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
325+ unsigned nr, int val)
326+{
327+ return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
328+}
329+
330+static void timbgpio_gpio_set(struct gpio_chip *gpio,
331+ unsigned nr, int val)
332+{
333+ timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
334+}
335+
336+static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
337+{
338+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
339+
340+ if (tgpio->irq_base <= 0)
341+ return -EINVAL;
342+
343+ return tgpio->irq_base + offset;
344+}
345+
346+/*
347+ * GPIO IRQ
348+ */
349+static void timbgpio_irq_disable(unsigned irq)
350+{
351+ struct timbgpio *tgpio = get_irq_chip_data(irq);
352+ int offset = irq - tgpio->irq_base;
353+
354+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
355+}
356+
357+static void timbgpio_irq_enable(unsigned irq)
358+{
359+ struct timbgpio *tgpio = get_irq_chip_data(irq);
360+ int offset = irq - tgpio->irq_base;
361+
362+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
363+}
364+
365+static int timbgpio_irq_type(unsigned irq, unsigned trigger)
366+{
367+ struct timbgpio *tgpio = get_irq_chip_data(irq);
368+ int offset = irq - tgpio->irq_base;
369+ unsigned long flags;
370+ u32 lvr, flr;
371+
372+ if (offset < 0 || offset > tgpio->gpio.ngpio)
373+ return -EINVAL;
374+
375+ spin_lock_irqsave(&tgpio->lock, flags);
376+
377+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
378+ flr = ioread32(tgpio->membase + TGPIO_FLR);
379+
380+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
381+ flr &= ~(1 << offset);
382+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
383+ lvr |= 1 << offset;
384+ else
385+ lvr &= ~(1 << offset);
386+ }
387+
388+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
389+ return -EINVAL;
390+ else {
391+ flr |= 1 << offset;
392+ /* opposite compared to the datasheet, but it mirrors the
393+ * reality
394+ */
395+ if (trigger & IRQ_TYPE_EDGE_FALLING)
396+ lvr |= 1 << offset;
397+ else
398+ lvr &= ~(1 << offset);
399+ }
400+
401+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
402+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
403+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
404+ spin_unlock_irqrestore(&tgpio->lock, flags);
405+
406+ return 0;
407+}
408+
409+static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
410+{
411+ struct timbgpio *tgpio = get_irq_data(irq);
412+ unsigned long ipr;
413+ int offset;
414+
415+ desc->chip->ack(irq);
416+ ipr = ioread32(tgpio->membase + TGPIO_IPR);
417+ iowrite32(ipr, tgpio->membase + TGPIO_ICR);
418+
419+ for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
420+ generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
421+}
422+
423+static struct irq_chip timbgpio_irqchip = {
424+ .name = "GPIO",
425+ .enable = timbgpio_irq_enable,
426+ .disable = timbgpio_irq_disable,
427+ .set_type = timbgpio_irq_type,
428+};
429+
430+static int __devinit timbgpio_probe(struct platform_device *pdev)
431+{
432+ int err, i;
433+ struct gpio_chip *gc;
434+ struct timbgpio *tgpio;
435+ struct resource *iomem;
436+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
437+ int irq = platform_get_irq(pdev, 0);
438+
439+ if (!pdata || pdata->nr_pins > 32) {
440+ err = -EINVAL;
441+ goto err_mem;
442+ }
443+
444+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
445+ if (!iomem) {
446+ err = -EINVAL;
447+ goto err_mem;
448+ }
449+
450+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
451+ if (!tgpio) {
452+ err = -EINVAL;
453+ goto err_mem;
454+ }
455+ tgpio->irq_base = pdata->irq_base;
456+
457+ spin_lock_init(&tgpio->lock);
458+
459+ if (!request_mem_region(iomem->start, resource_size(iomem),
460+ DRIVER_NAME)) {
461+ err = -EBUSY;
462+ goto err_request;
463+ }
464+
465+ tgpio->membase = ioremap(iomem->start, resource_size(iomem));
466+ if (!tgpio->membase) {
467+ err = -ENOMEM;
468+ goto err_ioremap;
469+ }
470+
471+ gc = &tgpio->gpio;
472+
473+ gc->label = dev_name(&pdev->dev);
474+ gc->owner = THIS_MODULE;
475+ gc->dev = &pdev->dev;
476+ gc->direction_input = timbgpio_gpio_direction_input;
477+ gc->get = timbgpio_gpio_get;
478+ gc->direction_output = timbgpio_gpio_direction_output;
479+ gc->set = timbgpio_gpio_set;
480+ gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
481+ gc->dbg_show = NULL;
482+ gc->base = pdata->gpio_base;
483+ gc->ngpio = pdata->nr_pins;
484+ gc->can_sleep = 0;
485+
486+ err = gpiochip_add(gc);
487+ if (err)
488+ goto err_chipadd;
489+
490+ platform_set_drvdata(pdev, tgpio);
491+
492+ /* make sure to disable interrupts */
493+ iowrite32(0x0, tgpio->membase + TGPIO_IER);
494+
495+ if (irq < 0 || tgpio->irq_base <= 0)
496+ return 0;
497+
498+ for (i = 0; i < pdata->nr_pins; i++) {
499+ set_irq_chip_and_handler_name(tgpio->irq_base + i,
500+ &timbgpio_irqchip, handle_simple_irq, "mux");
501+ set_irq_chip_data(tgpio->irq_base + i, tgpio);
502+#ifdef CONFIG_ARM
503+ set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
504+#endif
505+ }
506+
507+ set_irq_data(irq, tgpio);
508+ set_irq_chained_handler(irq, timbgpio_irq);
509+
510+ return 0;
511+
512+err_chipadd:
513+ iounmap(tgpio->membase);
514+err_ioremap:
515+ release_mem_region(iomem->start, resource_size(iomem));
516+err_request:
517+ kfree(tgpio);
518+err_mem:
519+ printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
520+
521+ return err;
522+}
523+
524+static int __devexit timbgpio_remove(struct platform_device *pdev)
525+{
526+ int err;
527+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
528+ struct timbgpio *tgpio = platform_get_drvdata(pdev);
529+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
530+ int irq = platform_get_irq(pdev, 0);
531+
532+ if (irq >= 0 && tgpio->irq_base > 0) {
533+ int i;
534+ for (i = 0; i < pdata->nr_pins; i++) {
535+ set_irq_chip(tgpio->irq_base + i, NULL);
536+ set_irq_chip_data(tgpio->irq_base + i, NULL);
537+ }
538+
539+ set_irq_handler(irq, NULL);
540+ set_irq_data(irq, NULL);
541+ }
542+
543+ err = gpiochip_remove(&tgpio->gpio);
544+ if (err)
545+ printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
546+
547+ iounmap(tgpio->membase);
548+ release_mem_region(iomem->start, resource_size(iomem));
549+ kfree(tgpio);
550+
551+ platform_set_drvdata(pdev, NULL);
552+
553+ return 0;
554+}
555+
556+static struct platform_driver timbgpio_platform_driver = {
557+ .driver = {
558+ .name = DRIVER_NAME,
559+ .owner = THIS_MODULE,
560+ },
561+ .probe = timbgpio_probe,
562+ .remove = timbgpio_remove,
563+};
564+
565+/*--------------------------------------------------------------------------*/
566+
567+static int __init timbgpio_init(void)
568+{
569+ return platform_driver_register(&timbgpio_platform_driver);
570+}
571+
572+static void __exit timbgpio_exit(void)
573+{
574+ platform_driver_unregister(&timbgpio_platform_driver);
575+}
576+
577+module_init(timbgpio_init);
578+module_exit(timbgpio_exit);
579+
580+MODULE_DESCRIPTION("Timberdale GPIO driver");
581+MODULE_LICENSE("GPL v2");
582+MODULE_AUTHOR("Mocean Laboratories");
583+MODULE_ALIAS("platform:"DRIVER_NAME);
584+
585diff -uNr linux-2.6.31/drivers/i2c/busses/i2c-xiic.c linux-2.6.31.new/drivers/i2c/busses/i2c-xiic.c
586--- linux-2.6.31/drivers/i2c/busses/i2c-xiic.c 1969-12-31 16:00:00.000000000 -0800
587+++ linux-2.6.31.new/drivers/i2c/busses/i2c-xiic.c 2009-10-23 11:17:29.000000000 -0700
588@@ -0,0 +1,1132 @@
589+/*
590+ * i2c-xiic.c
591+ * Copyright (c) 2009 Intel Corporation
592+ *
593+ * This program is free software; you can redistribute it and/or modify
594+ * it under the terms of the GNU General Public License version 2 as
595+ * published by the Free Software Foundation.
596+ *
597+ * This program is distributed in the hope that it will be useful,
598+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
599+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
600+ * GNU General Public License for more details.
601+ *
602+ * You should have received a copy of the GNU General Public License
603+ * along with this program; if not, write to the Free Software
604+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
605+ */
606+
607+/* Supports:
608+ * Xilinx IIC
609+ */
610+#include <linux/kernel.h>
611+#include <linux/module.h>
612+#include <linux/init.h>
613+#include <linux/errno.h>
614+#include <linux/platform_device.h>
615+#include <linux/i2c.h>
616+#include <linux/interrupt.h>
617+#include <linux/wait.h>
618+#include <linux/i2c-xiic.h>
619+#include <linux/io.h>
620+
621+#define DRIVER_NAME "xiic-i2c"
622+
623+struct xiic_i2c {
624+ void __iomem *base;
625+ wait_queue_head_t wait;
626+ struct i2c_adapter adap;
627+ struct i2c_msg *tx_msg;
628+ spinlock_t lock; /* mutual exclusion */
629+ unsigned int tx_pos;
630+ unsigned int nmsgs;
631+ int state; /* see STATE_ */
632+
633+ struct i2c_msg *rx_msg; /* current RX message */
634+ int rx_pos;
635+};
636+
637+static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value);
638+
639+static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg);
640+
641+static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value);
642+
643+static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value);
644+
645+static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg);
646+
647+static void xiic_start_xfer(struct xiic_i2c *i2c);
648+static void __xiic_start_xfer(struct xiic_i2c *i2c);
649+
650+/************************** Constant Definitions ****************************/
651+
652+#define STATE_DONE 0x00
653+#define STATE_ERROR 0x01
654+#define STATE_START 0x02
655+
656+#define XIIC_MSB_OFFSET 0
657+#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
658+
659+/*
660+ * Register offsets in bytes from RegisterBase. Three is added to the
661+ * base offset to access LSB (IBM style) of the word
662+ */
663+#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
664+#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
665+#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
666+#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
667+#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
668+#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
669+#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
670+#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
671+#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
672+#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
673+
674+/* Control Register masks */
675+#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
676+#define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
677+#define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
678+#define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
679+#define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
680+#define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
681+#define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
682+
683+/* Status Register masks */
684+#define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
685+#define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
686+#define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
687+#define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
688+#define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
689+#define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
690+#define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
691+#define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
692+
693+/* Interrupt Status Register masks Interrupt occurs when... */
694+#define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
695+#define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
696+#define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
697+#define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
698+#define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
699+#define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
700+#define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
701+#define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
702+
703+/* The following constants specify the depth of the FIFOs */
704+#define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
705+#define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
706+
707+/* The following constants specify groups of interrupts that are typically
708+ * enabled or disables at the same time
709+ */
710+#define XIIC_TX_INTERRUPTS \
711+(XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
712+
713+#define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
714+
715+/* The following constants are used with the following macros to specify the
716+ * operation, a read or write operation.
717+ */
718+#define XIIC_READ_OPERATION 1
719+#define XIIC_WRITE_OPERATION 0
720+
721+/*
722+ * Tx Fifo upper bit masks.
723+ */
724+#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
725+#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
726+
727+/*
728+ * The following constants define the register offsets for the Interrupt
729+ * registers. There are some holes in the memory map for reserved addresses
730+ * to allow other registers to be added and still match the memory map of the
731+ * interrupt controller registers
732+ */
733+#define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
734+#define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
735+#define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
736+#define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
737+
738+#define XIIC_RESET_MASK 0xAUL
739+
740+/*
741+ * The following constant is used for the device global interrupt enable
742+ * register, to enable all interrupts for the device, this is the only bit
743+ * in the register
744+ */
745+#define XIIC_GINTR_ENABLE_MASK 0x80000000UL
746+
747+/***************** Macros (Inline Functions) Definitions *********************/
748+
749+
750+/******************************************************************************
751+*
752+* This macro disables all interrupts for the device by writing to the Global
753+* interrupt enable register. This register provides the ability to disable
754+* interrupts without any modifications to the interrupt enable register such
755+* that it is minimal effort to restore the interrupts to the previous enabled
756+* state. The corresponding function, XIIC_GINTR_ENABLE, is provided to
757+* restore the interrupts to the previous enabled state. This function is
758+* designed to be used in critical sections of device drivers such that it is
759+* not necessary to disable other device interrupts.
760+*
761+* @param Instance local i2c instance
762+*
763+* @return None.
764+*
765+* @note C-Style signature:
766+* void XIIC_GINTR_DISABLE(i2c);
767+*
768+******************************************************************************/
769+#define XIIC_GINTR_DISABLE(Instance) \
770+ xiic_setreg32(Instance, XIIC_DGIER_OFFSET, 0)
771+
772+/******************************************************************************
773+*
774+* This macro writes to the global interrupt enable register to enable
775+* interrupts from the device. This register provides the ability to enable
776+* interrupts without any modifications to the interrupt enable register such
777+* that it is minimal effort to restore the interrupts to the previous enabled
778+* state. This function does not enable individual interrupts as the interrupt
779+* enable register must be set appropriately. This function is designed to be
780+* used in critical sections of device drivers such that it is not necessary to
781+* disable other device interrupts.
782+*
783+* @param Instance local I2C instance
784+*
785+* @return None.
786+*
787+* @note C-Style signature:
788+* void XIIC_GINTR_ENABLE(i2c);
789+*
790+******************************************************************************/
791+#define XIIC_GINTR_ENABLE(Instance) \
792+ xiic_setreg32(Instance, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK)
793+
794+/******************************************************************************
795+*
796+*
797+* This function sets the Interrupt status register to the specified value.
798+* This register indicates the status of interrupt sources for the device.
799+* The status is independent of whether interrupts are enabled such that
800+* the status register may also be polled when interrupts are not enabled.
801+*
802+* Each bit of the register correlates to a specific interrupt source within the
803+* IIC device. All bits of this register are latched. Setting a bit which is 0
804+* within this register causes an interrupt to be generated. The device global
805+* interrupt enable register and the device interrupt enable register must be set
806+* appropriately to allow an interrupt to be passed out of the device. The
807+* interrupt is cleared by writing to this register with the bits to be
808+* cleared set to a one and all others to zero. This register implements a
809+* toggle on write functionality meaning any bits which are set in the value
810+* written cause the bits in the register to change to the opposite state.
811+*
812+* This function writes only the specified value to the register such that
813+* some status bits may be set and others cleared. It is the caller's
814+* responsibility to get the value of the register prior to setting the value
815+* to prevent an destructive behavior.
816+*
817+* @param Instance local I2C instance
818+* @param Status contains the value to be written to the Interrupt
819+* status register.
820+*
821+* @return None.
822+*
823+* @note C-Style signature:
824+* void XIIC_WRITE_IISR(i2c, u32 Status);
825+*
826+******************************************************************************/
827+#define XIIC_WRITE_IISR(Instance, Status) \
828+ xiic_setreg32(Instance, XIIC_IISR_OFFSET, (Status))
829+
830+/******************************************************************************
831+*
832+*
833+* This function gets the contents of the Interrupt Status Register.
834+* This register indicates the status of interrupt sources for the device.
835+* The status is independent of whether interrupts are enabled such
836+* that the status register may also be polled when interrupts are not enabled.
837+*
838+* Each bit of the register correlates to a specific interrupt source within the
839+* device. All bits of this register are latched. Writing a 1 to a bit within
840+* this register causes an interrupt to be generated if enabled in the interrupt
841+* enable register and the global interrupt enable is set. Since the status is
842+* latched, each status bit must be acknowledged in order for the bit in the
843+* status register to be updated. Each bit can be acknowledged by writing a
844+* 0 to the bit in the status register.
845+
846+* @param Instance local I2C instance
847+*
848+* @return A status which contains the value read from the Interrupt
849+* Status Register.
850+*
851+* @note C-Style signature:
852+* u32 XIIC_READ_IISR(i2c);
853+*
854+******************************************************************************/
855+#define XIIC_READ_IISR(Instance) \
856+ xiic_getreg32(Instance, XIIC_IISR_OFFSET)
857+
858+/******************************************************************************
859+*
860+* This function sets the contents of the Interrupt Enable Register . This
861+* register controls which interrupt sources of the IIC device are allowed to
862+* generate an interrupt. The global interrupt enable register and the device
863+* interrupt enable register must also be set appropriately for an interrupt
864+* to be passed out of the device.
865+*
866+* Each bit of the register correlates to a specific interrupt source within the
867+* device. Setting a bit in this register enables the interrupt source to gen
868+* an interrupt. Clearing a bit in this register disables interrupt generation
869+* for that interrupt source.
870+*
871+* This function writes only the specified value to the register such that
872+* some interrupt sources may be enabled and others disabled. It is the
873+* caller's responsibility to get the value of the interrupt enable register
874+* prior to setting the value to prevent a destructive behavior.
875+*
876+* @param Instance local I2C instance
877+* @param Enable contains the value to be written to the Interrupt Enable
878+* Register.
879+*
880+* @return None
881+*
882+* @note C-Style signature:
883+* void XIIC_WRITE_IIER(i2c, u32 Enable);
884+*
885+******************************************************************************/
886+#define XIIC_WRITE_IIER(Instance, Enable) \
887+ xiic_setreg32(Instance, XIIC_IIER_OFFSET, (Enable))
888+
889+/******************************************************************************
890+*
891+*
892+* This function gets the Interrupt enable register contents. This register
893+* controls which interrupt sources of the device are allowed to generate an
894+* interrupt. The global interrupt enable register and the device interrupt
895+* enable register must also be set appropriately for an interrupt to be
896+* passed out of the IIC device.
897+*
898+* Each bit of the register correlates to a specific interrupt source within the
899+* IIC device. Setting a bit in this register enables the interrupt source to
900+* generate an interrupt. Clearing a bit in this register disables interrupt
901+* generation for that interrupt source.
902+*
903+* @param Instance local I2C instance
904+*
905+* @return The contents read from the Interrupt Enable Register.
906+*
907+* @note C-Style signature:
908+* u32 XIIC_READ_IIER(i2c)
909+*
910+******************************************************************************/
911+#define XIIC_READ_IIER(Instance) \
912+ xiic_getreg32(Instance, XIIC_IIER_OFFSET)
913+
914+/************************** Function Prototypes ******************************/
915+
916+/******************************************************************************
917+*
918+* This macro disables the specified interrupts in the Interrupt enable
919+* register. It is non-destructive in that the register is read and only the
920+* interrupts specified is changed.
921+*
922+* @param BaseAddress is the base address of the IIC device.
923+* @param InterruptMask contains the interrupts to be disabled
924+*
925+* @return None.
926+*
927+* @note Signature:
928+* void XIic_mDisableIntr(u32 BaseAddress, u32 InterruptMask);
929+*
930+******************************************************************************/
931+#define XIic_mDisableIntr(Instance, InterruptMask) \
932+ XIIC_WRITE_IIER((Instance), XIIC_READ_IIER(Instance) & ~(InterruptMask))
933+
934+/******************************************************************************
935+*
936+* This macro enables the specified interrupts in the Interrupt enable
937+* register. It is non-destructive in that the register is read and only the
938+* interrupts specified is changed.
939+*
940+* @param BaseAddress is the base address of the IIC device.
941+* @param InterruptMask contains the interrupts to be disabled
942+*
943+* @return None.
944+*
945+* @note Signature:
946+* void XIic_mEnableIntr(u32 BaseAddress, u32 InterruptMask);
947+*
948+******************************************************************************/
949+#define XIic_mEnableIntr(Instance, InterruptMask) \
950+ XIIC_WRITE_IIER((Instance), XIIC_READ_IIER(Instance) | (InterruptMask))
951+
952+/******************************************************************************
953+*
954+* This macro clears the specified interrupt in the Interrupt status
955+* register. It is non-destructive in that the register is read and only the
956+* interrupt specified is cleared. Clearing an interrupt acknowledges it.
957+*
958+* @param BaseAddress is the base address of the IIC device.
959+* @param InterruptMask contains the interrupts to be disabled
960+*
961+* @return None.
962+*
963+* @note Signature:
964+* void XIic_mClearIntr(u32 BaseAddress, u32 InterruptMask);
965+*
966+******************************************************************************/
967+#define XIic_mClearIntr(Instance, InterruptMask) \
968+ XIIC_WRITE_IISR((Instance), XIIC_READ_IISR(Instance) & (InterruptMask))
969+
970+/******************************************************************************
971+*
972+* This macro clears and enables the specified interrupt in the Interrupt
973+* status and enable registers. It is non-destructive in that the registers are
974+* read and only the interrupt specified is modified.
975+* Clearing an interrupt acknowledges it.
976+*
977+* @param BaseAddress is the base address of the IIC device.
978+* @param InterruptMask contains the interrupts to be cleared and enabled
979+*
980+* @return None.
981+*
982+* @note Signature:
983+* void XIic_mClearEnableIntr(u32 BaseAddress, u32 InterruptMask);
984+*
985+******************************************************************************/
986+#define XIic_mClearEnableIntr(Instance, InterruptMask) { \
987+ XIIC_WRITE_IISR(Instance, \
988+ (XIIC_READ_IISR(Instance) & (InterruptMask))); \
989+ XIIC_WRITE_IIER(Instance, \
990+ (XIIC_READ_IIER(Instance) | (InterruptMask))); \
991+}
992+
993+
994+#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
995+#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
996+
997+static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
998+{
999+ u8 sr;
1000+ for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
1001+ !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
1002+ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
1003+ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
1004+}
1005+
1006+/******************************************************************************
1007+ *
1008+ * Initialize the IIC core for Dynamic Functionality.
1009+ *
1010+ * @param i2c local I2C instance
1011+ *
1012+ * @return None.
1013+ *
1014+ * @note None.
1015+ *
1016+ ******************************************************************************/
1017+static void xiic_reinit(struct xiic_i2c *i2c)
1018+{
1019+ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
1020+
1021+ /* Set receive Fifo depth to maximum (zero based). */
1022+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
1023+
1024+ /* Reset Tx Fifo. */
1025+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
1026+
1027+ /* Enable IIC Device, remove Tx Fifo reset & disable general call. */
1028+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
1029+
1030+ /* make sure RX fifo is empty */
1031+ xiic_clear_rx_fifo(i2c);
1032+
1033+ /* Enable interrupts */
1034+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
1035+
1036+ XIic_mClearEnableIntr(i2c, XIIC_INTR_AAS_MASK |
1037+ XIIC_INTR_ARB_LOST_MASK);
1038+}
1039+
1040+/******************************************************************************
1041+ *
1042+ * De-Initialize the IIC core.
1043+ *
1044+ * @param i2c local I2C instance
1045+ *
1046+ * @return None.
1047+ *
1048+ * @note None.
1049+ *
1050+ ******************************************************************************/
1051+static void xiic_deinit(struct xiic_i2c *i2c)
1052+{
1053+ u8 cr;
1054+
1055+ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
1056+
1057+ /* Disable IIC Device. */
1058+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
1059+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
1060+}
1061+
1062+
1063+
1064+/*****************************************************************************
1065+ *
1066+ *
1067+ * This function is called when the receive register is full. The number
1068+ * of bytes received to cause the interrupt is adjustable using the Receive FIFO
1069+ * Depth register. The number of bytes in the register is read in the Receive
1070+ * FIFO occupancy register. Both these registers are zero based values (0-15)
1071+ * such that a value of zero indicates 1 byte.
1072+ *
1073+ * For a Master Receiver to properly signal the end of a message, the data must
1074+ * be read in up to the message length - 1, where control register bits will be
1075+ * set for bus controls to occur on reading of the last byte.
1076+ *
1077+ * @param InstancePtr is a pointer to the XIic instance to be worked on.
1078+ *
1079+ * @return None.
1080+ *
1081+ * @note None.
1082+ *
1083+ ******************************************************************************/
1084+static void xiic_read_rx(struct xiic_i2c *i2c)
1085+{
1086+ u8 bytes_in_fifo;
1087+ int i;
1088+
1089+ bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
1090+
1091+ dev_dbg(i2c->adap.dev.parent, "%s entry, bytes in fifo: %d, msg: %d"
1092+ ", SR: 0x%x, CR: 0x%x\n",
1093+ __func__, bytes_in_fifo, xiic_rx_space(i2c),
1094+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
1095+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
1096+
1097+ if (bytes_in_fifo > xiic_rx_space(i2c))
1098+ bytes_in_fifo = xiic_rx_space(i2c);
1099+
1100+ for (i = 0; i < bytes_in_fifo; i++)
1101+ i2c->rx_msg->buf[i2c->rx_pos++] =
1102+ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
1103+
1104+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
1105+ (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
1106+ IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
1107+}
1108+
1109+/******************************************************************************
1110+ *
1111+ * This function fills the FIFO using the occupancy register to determine the
1112+ * available space to be filled. When the repeated start option is on, the last
1113+ * byte is withheld to allow the control register to be properly set on the last
1114+ * byte.
1115+ *
1116+ * @param InstancePtr is a pointer to the XIic instance to be worked on.
1117+ *
1118+ * @param Role indicates the role of this IIC device, a slave or a master, on
1119+ * the IIC bus (XIIC_SLAVE_ROLE or XIIC_MASTER_ROLE)
1120+ *
1121+ * @return
1122+ *
1123+ * None.
1124+ *
1125+ * @note
1126+ *
1127+ * None.
1128+ *
1129+ ******************************************************************************/
1130+static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
1131+{
1132+ return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
1133+}
1134+
1135+static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
1136+{
1137+ u8 fifo_space = xiic_tx_fifo_space(i2c);
1138+ int len = xiic_tx_space(i2c);
1139+
1140+ len = (len > fifo_space) ? fifo_space : len;
1141+
1142+ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
1143+ __func__, len, fifo_space);
1144+
1145+ while (len--) {
1146+ u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
1147+ if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
1148+ /* last message in transfer -> STOP */
1149+ data |= XIIC_TX_DYN_STOP_MASK;
1150+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
1151+
1152+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
1153+ } else
1154+ xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
1155+ }
1156+}
1157+
1158+static void xiic_wakeup(struct xiic_i2c *i2c, int code)
1159+{
1160+ i2c->tx_msg = NULL;
1161+ i2c->rx_msg = NULL;
1162+ i2c->nmsgs = 0;
1163+ i2c->state = code;
1164+ wake_up(&i2c->wait);
1165+}
1166+
1167+static void xiic_process(struct xiic_i2c *i2c)
1168+{
1169+ u32 pend, isr, ier;
1170+ u32 Clear = 0;
1171+
1172+ /* Get the interrupt Status from the IPIF. There is no clearing of
1173+ * interrupts in the IPIF. Interrupts must be cleared at the source.
1174+ * To find which interrupts are pending; AND interrupts pending with
1175+ * interrupts masked.
1176+ */
1177+ isr = XIIC_READ_IISR(i2c);
1178+ ier = XIIC_READ_IIER(i2c);
1179+ pend = isr & ier;
1180+
1181+ dev_dbg(i2c->adap.dev.parent, "%s entry, IER: 0x%x, ISR: 0x%x, "
1182+ "pend: 0x%x, SR: 0x%x, msg: %p, nmsgs: %d\n",
1183+ __func__, ier, isr, pend, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
1184+ i2c->tx_msg, i2c->nmsgs);
1185+
1186+ /* Do not processes a devices interrupts if the device has no
1187+ * interrupts pending
1188+ */
1189+ if (!pend)
1190+ return;
1191+
1192+ /* Service requesting interrupt */
1193+ if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
1194+ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
1195+ !(pend & XIIC_INTR_RX_FULL_MASK))) {
1196+ /* bus arbritration lost, or...
1197+ * Transmit error _OR_ RX completed
1198+ * if this happens when RX_FULL is not set
1199+ * this is probably a TX error
1200+ */
1201+
1202+ dev_dbg(i2c->adap.dev.parent,
1203+ "%s error\n", __func__);
1204+
1205+ /* dynamic mode seem to suffer from problems if we just flushes
1206+ * fifos and the next message is a TX with len 0 (only addr)
1207+ * reset the IP instead of just flush fifos
1208+ */
1209+ xiic_reinit(i2c);
1210+
1211+ if (i2c->tx_msg)
1212+ xiic_wakeup(i2c, STATE_ERROR);
1213+
1214+ } else if (pend & XIIC_INTR_RX_FULL_MASK) {
1215+ /* Receive register/FIFO is full */
1216+
1217+ Clear = XIIC_INTR_RX_FULL_MASK;
1218+ if (!i2c->rx_msg) {
1219+ dev_dbg(i2c->adap.dev.parent,
1220+ "%s unexpexted RX IRQ\n", __func__);
1221+ xiic_clear_rx_fifo(i2c);
1222+ goto out;
1223+ }
1224+
1225+ xiic_read_rx(i2c);
1226+ if (xiic_rx_space(i2c) == 0) {
1227+ /* this is the last part of the message */
1228+ i2c->rx_msg = NULL;
1229+
1230+ /* also clear TX error if there (RX complete) */
1231+ Clear |= (isr & XIIC_INTR_TX_ERROR_MASK);
1232+
1233+ dev_dbg(i2c->adap.dev.parent,
1234+ "%s end of message, nmsgs: %d\n",
1235+ __func__, i2c->nmsgs);
1236+
1237+ /* send next message if this wasn't the last,
1238+ * otherwise the transfer will be finialise when
1239+ * receiving the bus not busy interrupt
1240+ */
1241+ if (i2c->nmsgs > 1) {
1242+ i2c->nmsgs--;
1243+ i2c->tx_msg++;
1244+ dev_dbg(i2c->adap.dev.parent,
1245+ "%s will start next...\n", __func__);
1246+
1247+ __xiic_start_xfer(i2c);
1248+ }
1249+ }
1250+ } else if (pend & XIIC_INTR_BNB_MASK) {
1251+ /* IIC bus has transitioned to not busy */
1252+ Clear = XIIC_INTR_BNB_MASK;
1253+
1254+ /* The bus is not busy, disable BusNotBusy interrupt */
1255+ XIic_mDisableIntr(i2c, XIIC_INTR_BNB_MASK);
1256+
1257+ if (!i2c->tx_msg)
1258+ goto out;
1259+
1260+ if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
1261+ xiic_tx_space(i2c) == 0)
1262+ xiic_wakeup(i2c, STATE_DONE);
1263+ else
1264+ xiic_wakeup(i2c, STATE_ERROR);
1265+
1266+ } else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
1267+ /* Transmit register/FIFO is empty or ½ empty */
1268+
1269+ Clear = pend &
1270+ (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
1271+
1272+ if (!i2c->tx_msg) {
1273+ dev_dbg(i2c->adap.dev.parent,
1274+ "%s unexpexted TX IRQ\n", __func__);
1275+ goto out;
1276+ }
1277+
1278+ xiic_fill_tx_fifo(i2c);
1279+
1280+ /* current message sent and there is space in the fifo */
1281+ if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
1282+ dev_dbg(i2c->adap.dev.parent,
1283+ "%s end of message sent, nmsgs: %d\n",
1284+ __func__, i2c->nmsgs);
1285+ if (i2c->nmsgs > 1) {
1286+ i2c->nmsgs--;
1287+ i2c->tx_msg++;
1288+ __xiic_start_xfer(i2c);
1289+ } else {
1290+ XIic_mDisableIntr(i2c, XIIC_INTR_TX_HALF_MASK);
1291+
1292+ dev_err(i2c->adap.dev.parent,
1293+ "%s Got TX IRQ but no more to do...\n",
1294+ __func__);
1295+ }
1296+ } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
1297+ /* current frame is sent and is last,
1298+ * make sure to disable tx half
1299+ */
1300+ XIic_mDisableIntr(i2c, XIIC_INTR_TX_HALF_MASK);
1301+ } else {
1302+ /* got IRQ which is not acked */
1303+ dev_err(i2c->adap.dev.parent, "%s Got unexpected IRQ\n",
1304+ __func__);
1305+ Clear = pend;
1306+ }
1307+out:
1308+ dev_dbg(i2c->adap.dev.parent, "%s Clear: 0x%x\n", __func__, Clear);
1309+
1310+ XIIC_WRITE_IISR(i2c, Clear);
1311+}
1312+
1313+/******************************************************************************
1314+ *
1315+ * This function checks to see if the IIC bus is busy. If so, it will enable
1316+ * the bus not busy interrupt such that the driver is notified when the bus
1317+ * is no longer busy.
1318+ *
1319+ * @param InstancePtr points to the Iic instance to be worked on.
1320+ *
1321+ * @return FALSE if the IIC bus is not busy else TRUE.
1322+ *
1323+ * @note The BusNotBusy interrupt is enabled which will update the
1324+ * EventStatus when the bus is no longer busy.
1325+ *
1326+ ******************************************************************************/
1327+static int xiic_bus_busy(struct xiic_i2c *i2c)
1328+{
1329+ u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
1330+
1331+ return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
1332+}
1333+
1334+static int xiic_busy(struct xiic_i2c *i2c)
1335+{
1336+ int tries = 3;
1337+ int err;
1338+ if (i2c->tx_msg)
1339+ return -EBUSY;
1340+
1341+ /* for instance if previous transfer was terminated due to TX error
1342+ * it might be that the bus is on it's way to become available
1343+ * give it at most 3 ms to wake
1344+ */
1345+ err = xiic_bus_busy(i2c);
1346+ while (err && tries--) {
1347+ mdelay(1);
1348+ err = xiic_bus_busy(i2c);
1349+ }
1350+
1351+ return err;
1352+}
1353+
1354+static void xiic_dump_regs(struct xiic_i2c *i2c, const char *caller)
1355+{
1356+ dev_dbg(i2c->adap.dev.parent, "%s msg: %p, nmsgs: %d, "
1357+ "ISR: 0x%x, CR: 0x%x, SR: 0x%x\n",
1358+ caller, i2c->tx_msg, i2c->nmsgs, XIIC_READ_IISR(i2c),
1359+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET),
1360+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
1361+}
1362+
1363+static void xiic_start_recv(struct xiic_i2c *i2c)
1364+{
1365+ u8 rx_watermark;
1366+ struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1367+
1368+ xiic_dump_regs(i2c, __func__);
1369+
1370+ /* Clear and enable Rx full interrupt. */
1371+ XIic_mClearEnableIntr(i2c, XIIC_INTR_RX_FULL_MASK |
1372+ XIIC_INTR_TX_ERROR_MASK);
1373+
1374+ /* we want to get all but last byte, because the TX_ERROR IRQ is used
1375+ * to inidicate error ACK on the address, and negative ack on the last
1376+ * received byte, so to not mix them receive all but last.
1377+ * In the case where there is only one byte to receive
1378+ * we can check if ERROR and RX full is set at the same time
1379+ */
1380+ rx_watermark = msg->len;
1381+ if (rx_watermark > IIC_RX_FIFO_DEPTH)
1382+ rx_watermark = IIC_RX_FIFO_DEPTH;
1383+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1384+
1385+ if (!(msg->flags & I2C_M_NOSTART))
1386+ /* write the address */
1387+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1388+ (msg->addr << 1) | XIIC_READ_OPERATION |
1389+ XIIC_TX_DYN_START_MASK);
1390+
1391+ XIic_mClearEnableIntr(i2c,
1392+ XIIC_INTR_BNB_MASK);
1393+
1394+ xiic_dump_regs(i2c, "after address");
1395+
1396+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1397+ msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1398+ if (i2c->nmsgs == 1) {
1399+ /* very last, enable bus busy as well */
1400+ XIic_mClearEnableIntr(i2c, XIIC_INTR_BNB_MASK);
1401+ }
1402+
1403+ xiic_dump_regs(i2c, "xiic_start_recv exit");
1404+
1405+ /* the message is tx:ed */
1406+ i2c->tx_pos = msg->len;
1407+}
1408+
1409+static void xiic_start_send(struct xiic_i2c *i2c)
1410+{
1411+ struct i2c_msg *msg = i2c->tx_msg;
1412+
1413+ XIic_mClearIntr(i2c, XIIC_INTR_TX_ERROR_MASK);
1414+
1415+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d, "
1416+ "ISR: 0x%x, CR: 0x%x\n",
1417+ __func__, msg, msg->len, XIIC_READ_IISR(i2c),
1418+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
1419+
1420+ if (!(msg->flags & I2C_M_NOSTART)) {
1421+ /* write the address */
1422+ u16 data = ((msg->addr << 1) & 0xfe) | XIIC_WRITE_OPERATION |
1423+ XIIC_TX_DYN_START_MASK;
1424+ if ((i2c->nmsgs == 1) && msg->len == 0)
1425+ /* no data and last message -> add STOP */
1426+ data |= XIIC_TX_DYN_STOP_MASK;
1427+
1428+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
1429+ }
1430+
1431+ xiic_fill_tx_fifo(i2c);
1432+
1433+ /* Clear any pending Tx empty, Tx Error and then enable them. */
1434+ XIic_mClearEnableIntr(i2c,
1435+ XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
1436+ XIIC_INTR_BNB_MASK);
1437+}
1438+
1439+static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
1440+{
1441+ iowrite8(value, i2c->base + reg);
1442+}
1443+
1444+static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
1445+{
1446+ return ioread8(i2c->base + reg);
1447+}
1448+
1449+static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
1450+{
1451+ iowrite16(value, i2c->base + reg);
1452+}
1453+
1454+static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
1455+{
1456+ iowrite32(value, i2c->base + reg);
1457+}
1458+
1459+static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
1460+{
1461+ return ioread32(i2c->base + reg);
1462+}
1463+
1464+static irqreturn_t xiic_isr(int irq, void *dev_id)
1465+{
1466+ struct xiic_i2c *i2c = dev_id;
1467+ spin_lock(&i2c->lock);
1468+ XIIC_GINTR_DISABLE(i2c);
1469+
1470+ dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
1471+
1472+ xiic_process(i2c);
1473+
1474+ XIIC_GINTR_ENABLE(i2c);
1475+
1476+ spin_unlock(&i2c->lock);
1477+
1478+ return IRQ_HANDLED;
1479+}
1480+
1481+static void __xiic_start_xfer(struct xiic_i2c *i2c)
1482+{
1483+ int first = 1;
1484+ int fifo_space = xiic_tx_fifo_space(i2c);
1485+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
1486+ __func__, i2c->tx_msg, fifo_space);
1487+
1488+ if (!i2c->tx_msg)
1489+ return;
1490+
1491+ i2c->rx_pos = 0;
1492+ i2c->tx_pos = 0;
1493+ i2c->state = STATE_START;
1494+ while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
1495+ if (!first) {
1496+ i2c->nmsgs--;
1497+ i2c->tx_msg++;
1498+ i2c->tx_pos = 0;
1499+ } else
1500+ first = 0;
1501+
1502+ if (i2c->tx_msg->flags & I2C_M_RD) {
1503+ /* we dont date putting several reads in the FIFO */
1504+ xiic_start_recv(i2c);
1505+ return;
1506+ } else {
1507+ xiic_start_send(i2c);
1508+ if (xiic_tx_space(i2c) != 0) {
1509+ /* the message could not be completely sent */
1510+ break;
1511+ }
1512+ }
1513+
1514+ fifo_space = xiic_tx_fifo_space(i2c);
1515+ }
1516+
1517+ /* there are more messages or the current one could not be completely
1518+ * put into the FIFO, also enable the half empty interrupt
1519+ */
1520+ if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
1521+ XIic_mClearEnableIntr(i2c, XIIC_INTR_TX_HALF_MASK);
1522+
1523+}
1524+
1525+static void xiic_start_xfer(struct xiic_i2c *i2c)
1526+{
1527+ unsigned long flags;
1528+
1529+ spin_lock_irqsave(&i2c->lock, flags);
1530+ xiic_reinit(i2c);
1531+ XIIC_GINTR_DISABLE(i2c);
1532+ spin_unlock_irqrestore(&i2c->lock, flags);
1533+
1534+ __xiic_start_xfer(i2c);
1535+
1536+ XIIC_GINTR_ENABLE(i2c);
1537+}
1538+
1539+static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1540+{
1541+ struct xiic_i2c *i2c = i2c_get_adapdata(adap);
1542+ int err;
1543+
1544+ dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
1545+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
1546+
1547+ err = xiic_busy(i2c);
1548+ if (err) {
1549+ xiic_dump_regs(i2c, "bus busy");
1550+ return err;
1551+ }
1552+
1553+ i2c->tx_msg = msgs;
1554+ i2c->nmsgs = num;
1555+
1556+ xiic_start_xfer(i2c);
1557+
1558+ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
1559+ (i2c->state == STATE_DONE), HZ))
1560+ return (i2c->state == STATE_DONE) ? num : -EIO;
1561+ else {
1562+ xiic_dump_regs(i2c, __func__);
1563+ i2c->tx_msg = NULL;
1564+ i2c->rx_msg = NULL;
1565+ i2c->nmsgs = 0;
1566+ return -ETIMEDOUT;
1567+ }
1568+}
1569+
1570+static u32 xiic_func(struct i2c_adapter *adap)
1571+{
1572+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1573+}
1574+
1575+static const struct i2c_algorithm xiic_algorithm = {
1576+ .master_xfer = xiic_xfer,
1577+ .functionality = xiic_func,
1578+};
1579+
1580+static struct i2c_adapter xiic_adapter = {
1581+ .owner = THIS_MODULE,
1582+ .name = DRIVER_NAME,
1583+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
1584+ .algo = &xiic_algorithm,
1585+};
1586+
1587+
1588+static int __devinit xiic_i2c_probe(struct platform_device *pdev)
1589+{
1590+ struct xiic_i2c *i2c;
1591+ struct xiic_i2c_platform_data *pdata;
1592+ struct resource *res;
1593+ int ret, irq;
1594+ u8 i;
1595+
1596+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1597+ if (!res)
1598+ return -ENODEV;
1599+
1600+ irq = platform_get_irq(pdev, 0);
1601+ if (irq < 0)
1602+ return -ENODEV;
1603+
1604+ pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
1605+ if (!pdata)
1606+ return -ENODEV;
1607+
1608+ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
1609+ if (!i2c)
1610+ return -ENOMEM;
1611+
1612+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
1613+ dev_err(&pdev->dev, "Memory region busy\n");
1614+ ret = -EBUSY;
1615+ goto request_mem_failed;
1616+ }
1617+
1618+ i2c->base = ioremap(res->start, resource_size(res));
1619+ if (!i2c->base) {
1620+ dev_err(&pdev->dev, "Unable to map registers\n");
1621+ ret = -EIO;
1622+ goto map_failed;
1623+ }
1624+
1625+ /* hook up driver to tree */
1626+ platform_set_drvdata(pdev, i2c);
1627+ i2c->adap = xiic_adapter;
1628+ i2c_set_adapdata(&i2c->adap, i2c);
1629+ i2c->adap.dev.parent = &pdev->dev;
1630+
1631+ xiic_reinit(i2c);
1632+
1633+ spin_lock_init(&i2c->lock);
1634+ init_waitqueue_head(&i2c->wait);
1635+ ret = request_irq(irq, xiic_isr, 0, pdev->name, i2c);
1636+ if (ret) {
1637+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
1638+ goto request_irq_failed;
1639+ }
1640+
1641+ /* add i2c adapter to i2c tree */
1642+ ret = i2c_add_adapter(&i2c->adap);
1643+ if (ret) {
1644+ dev_err(&pdev->dev, "Failed to add adapter\n");
1645+ goto add_adapter_failed;
1646+ }
1647+
1648+ /* add in known devices to the bus */
1649+ for (i = 0; i < pdata->num_devices; i++)
1650+ i2c_new_device(&i2c->adap, pdata->devices + i);
1651+
1652+ return 0;
1653+
1654+add_adapter_failed:
1655+ free_irq(irq, i2c);
1656+request_irq_failed:
1657+ xiic_deinit(i2c);
1658+ iounmap(i2c->base);
1659+map_failed:
1660+ release_mem_region(res->start, resource_size(res));
1661+request_mem_failed:
1662+ kfree(i2c);
1663+
1664+ return ret;
1665+}
1666+
1667+static int __devexit xiic_i2c_remove(struct platform_device* pdev)
1668+{
1669+ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
1670+ struct resource *res;
1671+
1672+ /* remove adapter & data */
1673+ i2c_del_adapter(&i2c->adap);
1674+
1675+ xiic_deinit(i2c);
1676+
1677+ platform_set_drvdata(pdev, NULL);
1678+
1679+ free_irq(platform_get_irq(pdev, 0), i2c);
1680+
1681+ iounmap(i2c->base);
1682+
1683+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1684+ if (res)
1685+ release_mem_region(res->start, resource_size(res));
1686+
1687+ kfree(i2c);
1688+
1689+ return 0;
1690+}
1691+
1692+
1693+/* work with hotplug and coldplug */
1694+MODULE_ALIAS("platform:"DRIVER_NAME);
1695+
1696+static struct platform_driver xiic_i2c_driver = {
1697+ .probe = xiic_i2c_probe,
1698+ .remove = __devexit_p(xiic_i2c_remove),
1699+ .driver = {
1700+ .owner = THIS_MODULE,
1701+ .name = DRIVER_NAME,
1702+ },
1703+};
1704+
1705+static int __init xiic_i2c_init(void)
1706+{
1707+ return platform_driver_register(&xiic_i2c_driver);
1708+}
1709+
1710+static void __exit xiic_i2c_exit(void)
1711+{
1712+ platform_driver_unregister(&xiic_i2c_driver);
1713+}
1714+
1715+module_init(xiic_i2c_init);
1716+module_exit(xiic_i2c_exit);
1717+
1718+MODULE_AUTHOR("info@mocean-labs.com");
1719+MODULE_DESCRIPTION("Xilinx I2C bus driver");
1720+MODULE_LICENSE("GPL v2");
1721diff -uNr linux-2.6.31/drivers/i2c/busses/Kconfig linux-2.6.31.new/drivers/i2c/busses/Kconfig
1722--- linux-2.6.31/drivers/i2c/busses/Kconfig 2009-10-23 11:18:30.000000000 -0700
1723+++ linux-2.6.31.new/drivers/i2c/busses/Kconfig 2009-10-23 11:17:29.000000000 -0700
1724@@ -433,6 +433,16 @@
1725 This driver can also be built as a module. If so, the module
1726 will be called i2c-ocores.
1727
1728+config I2C_XILINX
1729+ tristate "Xilinx I2C Controller"
1730+ depends on EXPERIMENTAL && HAS_IOMEM
1731+ help
1732+ If you say yes to this option, support will be included for the
1733+ Xilinx I2C controller.
1734+
1735+ This driver can also be built as a module. If so, the module
1736+ will be called xilinx_i2c.
1737+
1738 config I2C_OMAP
1739 tristate "OMAP I2C adapter"
1740 depends on ARCH_OMAP
1741diff -uNr linux-2.6.31/drivers/i2c/busses/Makefile linux-2.6.31.new/drivers/i2c/busses/Makefile
1742--- linux-2.6.31/drivers/i2c/busses/Makefile 2009-10-23 11:18:30.000000000 -0700
1743+++ linux-2.6.31.new/drivers/i2c/busses/Makefile 2009-10-23 11:17:29.000000000 -0700
1744@@ -40,6 +40,7 @@
1745 obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
1746 obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
1747 obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
1748+obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
1749 obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
1750 obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
1751 obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
1752diff -uNr linux-2.6.31/drivers/input/touchscreen/tsc2007.c linux-2.6.31.new/drivers/input/touchscreen/tsc2007.c
1753--- linux-2.6.31/drivers/input/touchscreen/tsc2007.c 2009-10-23 11:18:30.000000000 -0700
1754+++ linux-2.6.31.new/drivers/input/touchscreen/tsc2007.c 2009-10-23 11:17:19.000000000 -0700
1755@@ -21,15 +21,14 @@
1756 */
1757
1758 #include <linux/module.h>
1759-#include <linux/hrtimer.h>
1760 #include <linux/slab.h>
1761 #include <linux/input.h>
1762 #include <linux/interrupt.h>
1763 #include <linux/i2c.h>
1764 #include <linux/i2c/tsc2007.h>
1765
1766-#define TS_POLL_DELAY (10 * 1000) /* ns delay before the first sample */
1767-#define TS_POLL_PERIOD (5 * 1000) /* ns delay between samples */
1768+#define TS_POLL_DELAY 1 /* ms delay between samples */
1769+#define TS_POLL_PERIOD 1 /* ms delay between samples */
1770
1771 #define TSC2007_MEASURE_TEMP0 (0x0 << 4)
1772 #define TSC2007_MEASURE_AUX (0x2 << 4)
1773@@ -70,17 +69,15 @@
1774 struct tsc2007 {
1775 struct input_dev *input;
1776 char phys[32];
1777- struct hrtimer timer;
1778- struct ts_event tc;
1779+ struct delayed_work work;
1780
1781 struct i2c_client *client;
1782
1783- spinlock_t lock;
1784-
1785 u16 model;
1786 u16 x_plate_ohms;
1787
1788- unsigned pendown;
1789+ bool pendown;
1790+ bool ignore_first_irq;
1791 int irq;
1792
1793 int (*get_pendown_state)(void);
1794@@ -109,52 +106,96 @@
1795 return val;
1796 }
1797
1798-static void tsc2007_send_event(void *tsc)
1799+static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc)
1800+{
1801+ /* y- still on; turn on only y+ (and ADC) */
1802+ tc->y = tsc2007_xfer(tsc, READ_Y);
1803+
1804+ /* turn y- off, x+ on, then leave in lowpower */
1805+ tc->x = tsc2007_xfer(tsc, READ_X);
1806+
1807+ /* turn y+ off, x- on; we'll use formula #1 */
1808+ tc->z1 = tsc2007_xfer(tsc, READ_Z1);
1809+ tc->z2 = tsc2007_xfer(tsc, READ_Z2);
1810+
1811+ /* Prepare for next touch reading - power down ADC, enable PENIRQ */
1812+ tsc2007_xfer(tsc, PWRDOWN);
1813+}
1814+
1815+static u32 tsc2007_calculate_pressure(struct tsc2007 *tsc, struct ts_event *tc)
1816 {
1817- struct tsc2007 *ts = tsc;
1818- u32 rt;
1819- u16 x, y, z1, z2;
1820-
1821- x = ts->tc.x;
1822- y = ts->tc.y;
1823- z1 = ts->tc.z1;
1824- z2 = ts->tc.z2;
1825+ u32 rt = 0;
1826
1827 /* range filtering */
1828- if (x == MAX_12BIT)
1829- x = 0;
1830+ if (tc->x == MAX_12BIT)
1831+ tc->x = 0;
1832
1833- if (likely(x && z1)) {
1834+ if (likely(tc->x && tc->z1)) {
1835 /* compute touch pressure resistance using equation #1 */
1836- rt = z2;
1837- rt -= z1;
1838- rt *= x;
1839- rt *= ts->x_plate_ohms;
1840- rt /= z1;
1841+ rt = tc->z2 - tc->z1;
1842+ rt *= tc->x;
1843+ rt *= tsc->x_plate_ohms;
1844+ rt /= tc->z1;
1845 rt = (rt + 2047) >> 12;
1846- } else
1847- rt = 0;
1848+ }
1849+
1850+ return rt;
1851+}
1852+
1853+static void tsc2007_send_up_event(struct tsc2007 *tsc)
1854+{
1855+ struct input_dev *input = tsc->input;
1856+
1857+ dev_dbg(&tsc->client->dev, "UP\n");
1858
1859- /* Sample found inconsistent by debouncing or pressure is beyond
1860- * the maximum. Don't report it to user space, repeat at least
1861- * once more the measurement
1862+ input_report_key(input, BTN_TOUCH, 0);
1863+ input_report_abs(input, ABS_PRESSURE, 0);
1864+ input_sync(input);
1865+}
1866+
1867+static void tsc2007_work(struct work_struct *work)
1868+{
1869+ struct tsc2007 *ts =
1870+ container_of(to_delayed_work(work), struct tsc2007, work);
1871+ struct ts_event tc;
1872+ u32 rt;
1873+
1874+ /*
1875+ * NOTE: We can't rely on the pressure to determine the pen down
1876+ * state, even though this controller has a pressure sensor.
1877+ * The pressure value can fluctuate for quite a while after
1878+ * lifting the pen and in some cases may not even settle at the
1879+ * expected value.
1880+ *
1881+ * The only safe way to check for the pen up condition is in the
1882+ * work function by reading the pen signal state (it's a GPIO
1883+ * and IRQ). Unfortunately such callback is not always available,
1884+ * in that case we have rely on the pressure anyway.
1885 */
1886+ if (ts->get_pendown_state) {
1887+ if (unlikely(!ts->get_pendown_state())) {
1888+ tsc2007_send_up_event(ts);
1889+ ts->pendown = false;
1890+ goto out;
1891+ }
1892+
1893+ dev_dbg(&ts->client->dev, "pen is still down\n");
1894+ }
1895+
1896+ tsc2007_read_values(ts, &tc);
1897+
1898+ rt = tsc2007_calculate_pressure(ts, &tc);
1899 if (rt > MAX_12BIT) {
1900+ /*
1901+ * Sample found inconsistent by debouncing or pressure is
1902+ * beyond the maximum. Don't report it to user space,
1903+ * repeat at least once more the measurement.
1904+ */
1905 dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
1906+ goto out;
1907
1908- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
1909- HRTIMER_MODE_REL);
1910- return;
1911 }
1912
1913- /* NOTE: We can't rely on the pressure to determine the pen down
1914- * state, even this controller has a pressure sensor. The pressure
1915- * value can fluctuate for quite a while after lifting the pen and
1916- * in some cases may not even settle at the expected value.
1917- *
1918- * The only safe way to check for the pen up condition is in the
1919- * timer by reading the pen signal state (it's a GPIO _and_ IRQ).
1920- */
1921 if (rt) {
1922 struct input_dev *input = ts->input;
1923
1924@@ -162,102 +203,82 @@
1925 dev_dbg(&ts->client->dev, "DOWN\n");
1926
1927 input_report_key(input, BTN_TOUCH, 1);
1928- ts->pendown = 1;
1929+ ts->pendown = true;
1930 }
1931
1932- input_report_abs(input, ABS_X, x);
1933- input_report_abs(input, ABS_Y, y);
1934+ input_report_abs(input, ABS_X, tc.x);
1935+ input_report_abs(input, ABS_Y, tc.y);
1936 input_report_abs(input, ABS_PRESSURE, rt);
1937
1938 input_sync(input);
1939
1940 dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
1941- x, y, rt);
1942- }
1943-
1944- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
1945- HRTIMER_MODE_REL);
1946-}
1947-
1948-static int tsc2007_read_values(struct tsc2007 *tsc)
1949-{
1950- /* y- still on; turn on only y+ (and ADC) */
1951- tsc->tc.y = tsc2007_xfer(tsc, READ_Y);
1952-
1953- /* turn y- off, x+ on, then leave in lowpower */
1954- tsc->tc.x = tsc2007_xfer(tsc, READ_X);
1955-
1956- /* turn y+ off, x- on; we'll use formula #1 */
1957- tsc->tc.z1 = tsc2007_xfer(tsc, READ_Z1);
1958- tsc->tc.z2 = tsc2007_xfer(tsc, READ_Z2);
1959-
1960- /* power down */
1961- tsc2007_xfer(tsc, PWRDOWN);
1962-
1963- return 0;
1964-}
1965-
1966-static enum hrtimer_restart tsc2007_timer(struct hrtimer *handle)
1967-{
1968- struct tsc2007 *ts = container_of(handle, struct tsc2007, timer);
1969- unsigned long flags;
1970-
1971- spin_lock_irqsave(&ts->lock, flags);
1972-
1973- if (unlikely(!ts->get_pendown_state() && ts->pendown)) {
1974- struct input_dev *input = ts->input;
1975-
1976- dev_dbg(&ts->client->dev, "UP\n");
1977+ tc.x, tc.y, rt);
1978
1979- input_report_key(input, BTN_TOUCH, 0);
1980- input_report_abs(input, ABS_PRESSURE, 0);
1981- input_sync(input);
1982+ } else if (!ts->get_pendown_state && ts->pendown) {
1983+ /*
1984+ * We don't have callback to check pendown state, so we
1985+ * have to assume that since pressure reported is 0 the
1986+ * pen was lifted up.
1987+ */
1988+ tsc2007_send_up_event(ts);
1989+ ts->pendown = false;
1990+ }
1991
1992- ts->pendown = 0;
1993+ out:
1994+ if (ts->pendown)
1995+ schedule_delayed_work(&ts->work,
1996+ msecs_to_jiffies(TS_POLL_PERIOD));
1997+ else {
1998+ if (!ts->get_pendown_state)
1999+ ts->ignore_first_irq = 1;
2000 enable_irq(ts->irq);
2001- } else {
2002- /* pen is still down, continue with the measurement */
2003- dev_dbg(&ts->client->dev, "pen is still down\n");
2004-
2005- tsc2007_read_values(ts);
2006- tsc2007_send_event(ts);
2007 }
2008-
2009- spin_unlock_irqrestore(&ts->lock, flags);
2010-
2011- return HRTIMER_NORESTART;
2012 }
2013
2014 static irqreturn_t tsc2007_irq(int irq, void *handle)
2015 {
2016 struct tsc2007 *ts = handle;
2017- unsigned long flags;
2018
2019- spin_lock_irqsave(&ts->lock, flags);
2020+ if (ts->ignore_first_irq) {
2021+ ts->ignore_first_irq = 0;
2022+ return IRQ_HANDLED;
2023+ }
2024
2025- if (likely(ts->get_pendown_state())) {
2026+ if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
2027 disable_irq_nosync(ts->irq);
2028- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY),
2029- HRTIMER_MODE_REL);
2030+ schedule_delayed_work(&ts->work,
2031+ msecs_to_jiffies(TS_POLL_DELAY));
2032 }
2033
2034 if (ts->clear_penirq)
2035 ts->clear_penirq();
2036
2037- spin_unlock_irqrestore(&ts->lock, flags);
2038-
2039 return IRQ_HANDLED;
2040 }
2041
2042-static int tsc2007_probe(struct i2c_client *client,
2043- const struct i2c_device_id *id)
2044+static void tsc2007_free_irq(struct tsc2007 *ts)
2045+{
2046+ free_irq(ts->irq, ts);
2047+ if (cancel_delayed_work_sync(&ts->work)) {
2048+ /*
2049+ * Work was pending, therefore we need to enable
2050+ * IRQ here to balance the disable_irq() done in the
2051+ * interrupt handler.
2052+ */
2053+ enable_irq(ts->irq);
2054+ }
2055+}
2056+
2057+static int __devinit tsc2007_probe(struct i2c_client *client,
2058+ const struct i2c_device_id *id)
2059 {
2060 struct tsc2007 *ts;
2061 struct tsc2007_platform_data *pdata = pdata = client->dev.platform_data;
2062 struct input_dev *input_dev;
2063 int err;
2064
2065- if (!pdata || !pdata->get_pendown_state) {
2066+ if (!pdata) {
2067 dev_err(&client->dev, "platform data is required!\n");
2068 return -EINVAL;
2069 }
2070@@ -274,22 +295,15 @@
2071 }
2072
2073 ts->client = client;
2074- i2c_set_clientdata(client, ts);
2075-
2076+ ts->irq = client->irq;
2077 ts->input = input_dev;
2078-
2079- hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2080- ts->timer.function = tsc2007_timer;
2081-
2082- spin_lock_init(&ts->lock);
2083+ INIT_DELAYED_WORK(&ts->work, tsc2007_work);
2084
2085 ts->model = pdata->model;
2086 ts->x_plate_ohms = pdata->x_plate_ohms;
2087 ts->get_pendown_state = pdata->get_pendown_state;
2088 ts->clear_penirq = pdata->clear_penirq;
2089
2090- pdata->init_platform_hw();
2091-
2092 snprintf(ts->phys, sizeof(ts->phys),
2093 "%s/input0", dev_name(&client->dev));
2094
2095@@ -304,9 +318,8 @@
2096 input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
2097 input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
2098
2099- tsc2007_read_values(ts);
2100-
2101- ts->irq = client->irq;
2102+ if (pdata->init_platform_hw)
2103+ pdata->init_platform_hw();
2104
2105 err = request_irq(ts->irq, tsc2007_irq, 0,
2106 client->dev.driver->name, ts);
2107@@ -319,29 +332,37 @@
2108 if (err)
2109 goto err_free_irq;
2110
2111- dev_info(&client->dev, "registered with irq (%d)\n", ts->irq);
2112+ i2c_set_clientdata(client, ts);
2113+
2114+ /* Prepare for touch readings - power down ADC and enable PENIRQ */
2115+ err = tsc2007_xfer(ts, PWRDOWN);
2116+ if (err < 0)
2117+ goto err_unreg_dev;
2118
2119 return 0;
2120
2121+ err_unreg_dev:
2122+ input_unregister_device(ts->input);
2123 err_free_irq:
2124- free_irq(ts->irq, ts);
2125- hrtimer_cancel(&ts->timer);
2126+ tsc2007_free_irq(ts);
2127+ if (pdata->exit_platform_hw)
2128+ pdata->exit_platform_hw();
2129 err_free_mem:
2130 input_free_device(input_dev);
2131 kfree(ts);
2132 return err;
2133 }
2134
2135-static int tsc2007_remove(struct i2c_client *client)
2136+static int __devexit tsc2007_remove(struct i2c_client *client)
2137 {
2138 struct tsc2007 *ts = i2c_get_clientdata(client);
2139- struct tsc2007_platform_data *pdata;
2140+ struct tsc2007_platform_data *pdata = client->dev.platform_data;
2141
2142- pdata = client->dev.platform_data;
2143- pdata->exit_platform_hw();
2144+ tsc2007_free_irq(ts);
2145+
2146+ if (pdata->exit_platform_hw)
2147+ pdata->exit_platform_hw();
2148
2149- free_irq(ts->irq, ts);
2150- hrtimer_cancel(&ts->timer);
2151 input_unregister_device(ts->input);
2152 kfree(ts);
2153
2154@@ -362,7 +383,7 @@
2155 },
2156 .id_table = tsc2007_idtable,
2157 .probe = tsc2007_probe,
2158- .remove = tsc2007_remove,
2159+ .remove = __devexit_p(tsc2007_remove),
2160 };
2161
2162 static int __init tsc2007_init(void)
2163diff -uNr linux-2.6.31/drivers/media/radio/Kconfig linux-2.6.31.new/drivers/media/radio/Kconfig
2164--- linux-2.6.31/drivers/media/radio/Kconfig 2009-10-23 11:18:30.000000000 -0700
2165+++ linux-2.6.31.new/drivers/media/radio/Kconfig 2009-10-23 11:17:28.000000000 -0700
2166@@ -406,4 +406,38 @@
2167 Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
2168 here if TEA5764 reference frequency is connected in FREQIN.
2169
2170+config RADIO_SAA7706H
2171+ tristate "SAA7706H Car Radio DSP"
2172+ depends on I2C && VIDEO_V4L2
2173+ ---help---
2174+ Say Y here if you want to use the SAA7706H Car radio Digital
2175+ Signal Processor, found for instance on the Russellville development
2176+ board. On the russellville the device is connected to internal
2177+ timberdale I2C bus.
2178+
2179+ To compile this driver as a module, choose M here: the
2180+ module will be called SAA7706H.
2181+
2182+config RADIO_TEF6862
2183+ tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
2184+ depends on I2C && VIDEO_V4L2
2185+ ---help---
2186+ Say Y here if you want to use the TEF6862 Car Radio Enhanced
2187+ Selectivity Tuner, found for instance on the Russellville development
2188+ board. On the russellville the device is connected to internal
2189+ timberdale I2C bus.
2190+
2191+ To compile this driver as a module, choose M here: the
2192+ module will be called TEF6862.
2193+
2194+config RADIO_TIMBERDALE
2195+ tristate "Enable the Timberdale radio driver"
2196+ depends on MFD_TIMBERDALE && VIDEO_V4L2 && HAS_IOMEM
2197+ select RADIO_TEF6862
2198+ select RADIO_SAA7706H
2199+ ---help---
2200+ This is a kind of umbrella driver for the Radio Tuner and DSP
2201+ found behind the Timberdale FPGA on the Russellville board.
2202+ Enable this driver will automatically select the DSP and tuner.
2203+
2204 endif # RADIO_ADAPTERS
2205diff -uNr linux-2.6.31/drivers/media/radio/Makefile linux-2.6.31.new/drivers/media/radio/Makefile
2206--- linux-2.6.31/drivers/media/radio/Makefile 2009-10-23 11:18:30.000000000 -0700
2207+++ linux-2.6.31.new/drivers/media/radio/Makefile 2009-10-23 11:17:28.000000000 -0700
2208@@ -20,5 +20,8 @@
2209 obj-$(CONFIG_USB_SI470X) += radio-si470x.o
2210 obj-$(CONFIG_USB_MR800) += radio-mr800.o
2211 obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
2212+obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
2213+obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
2214+obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
2215
2216 EXTRA_CFLAGS += -Isound
2217diff -uNr linux-2.6.31/drivers/media/radio/radio-timb.c linux-2.6.31.new/drivers/media/radio/radio-timb.c
2218--- linux-2.6.31/drivers/media/radio/radio-timb.c 1969-12-31 16:00:00.000000000 -0800
2219+++ linux-2.6.31.new/drivers/media/radio/radio-timb.c 2009-10-23 11:17:28.000000000 -0700
2220@@ -0,0 +1,545 @@
2221+/*
2222+ * radio-timb.c Timberdale FPGA Radio driver
2223+ * Copyright (c) 2009 Intel Corporation
2224+ *
2225+ * This program is free software; you can redistribute it and/or modify
2226+ * it under the terms of the GNU General Public License version 2 as
2227+ * published by the Free Software Foundation.
2228+ *
2229+ * This program is distributed in the hope that it will be useful,
2230+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2231+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2232+ * GNU General Public License for more details.
2233+ *
2234+ * You should have received a copy of the GNU General Public License
2235+ * along with this program; if not, write to the Free Software
2236+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2237+ */
2238+
2239+#include <linux/list.h>
2240+#include <linux/version.h>
2241+#include <linux/module.h>
2242+#include <linux/io.h>
2243+#include <media/v4l2-common.h>
2244+#include <media/v4l2-ioctl.h>
2245+#include <media/v4l2-device.h>
2246+#include <linux/platform_device.h>
2247+#include <linux/interrupt.h>
2248+#include <linux/i2c.h>
2249+#include <media/timb_radio.h>
2250+
2251+#define DRIVER_NAME "timb-radio"
2252+
2253+#define RDS_BLOCK_SIZE 4
2254+#define RDS_BUFFER_SIZE (RDS_BLOCK_SIZE * 100)
2255+
2256+struct timbradio {
2257+ struct mutex lock; /* for mutual exclusion */
2258+ void __iomem *membase;
2259+ struct timb_radio_platform_data pdata;
2260+ struct v4l2_subdev *sd_tuner;
2261+ struct module *tuner_owner;
2262+ struct v4l2_subdev *sd_dsp;
2263+ struct module *dsp_owner;
2264+ struct video_device *video_dev;
2265+ /* RDS related */
2266+ int open_count;
2267+ int rds_irq;
2268+ wait_queue_head_t read_queue;
2269+ unsigned char buffer[RDS_BUFFER_SIZE];
2270+ unsigned int rd_index;
2271+ unsigned int wr_index;
2272+};
2273+
2274+
2275+static int timbradio_vidioc_querycap(struct file *file, void *priv,
2276+ struct v4l2_capability *v)
2277+{
2278+ strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
2279+ strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
2280+ snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
2281+ v->version = KERNEL_VERSION(0, 0, 1);
2282+ v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
2283+ return 0;
2284+}
2285+
2286+static int timbradio_vidioc_g_tuner(struct file *file, void *priv,
2287+ struct v4l2_tuner *v)
2288+{
2289+ struct timbradio *tr = video_drvdata(file);
2290+ int ret;
2291+
2292+ mutex_lock(&tr->lock);
2293+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, g_tuner, v);
2294+ mutex_unlock(&tr->lock);
2295+
2296+ return ret;
2297+}
2298+
2299+static int timbradio_vidioc_s_tuner(struct file *file, void *priv,
2300+ struct v4l2_tuner *v)
2301+{
2302+ struct timbradio *tr = video_drvdata(file);
2303+ int ret;
2304+
2305+ mutex_lock(&tr->lock);
2306+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, s_tuner, v);
2307+ mutex_unlock(&tr->lock);
2308+
2309+ return ret;
2310+}
2311+
2312+static int timbradio_vidioc_g_input(struct file *filp, void *priv,
2313+ unsigned int *i)
2314+{
2315+ *i = 0;
2316+ return 0;
2317+}
2318+
2319+static int timbradio_vidioc_s_input(struct file *filp, void *priv,
2320+ unsigned int i)
2321+{
2322+ return i ? -EINVAL : 0;
2323+}
2324+
2325+static int timbradio_vidioc_g_audio(struct file *file, void *priv,
2326+ struct v4l2_audio *a)
2327+{
2328+ a->index = 0;
2329+ strlcpy(a->name, "Radio", sizeof(a->name));
2330+ a->capability = V4L2_AUDCAP_STEREO;
2331+ return 0;
2332+}
2333+
2334+
2335+static int timbradio_vidioc_s_audio(struct file *file, void *priv,
2336+ struct v4l2_audio *a)
2337+{
2338+ return a->index ? -EINVAL : 0;
2339+}
2340+
2341+static int timbradio_vidioc_s_frequency(struct file *file, void *priv,
2342+ struct v4l2_frequency *f)
2343+{
2344+ struct timbradio *tr = video_drvdata(file);
2345+ int ret;
2346+
2347+ mutex_lock(&tr->lock);
2348+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, s_frequency, f);
2349+ mutex_unlock(&tr->lock);
2350+
2351+ return ret;
2352+}
2353+
2354+static int timbradio_vidioc_g_frequency(struct file *file, void *priv,
2355+ struct v4l2_frequency *f)
2356+{
2357+ struct timbradio *tr = video_drvdata(file);
2358+ int ret;
2359+
2360+ mutex_lock(&tr->lock);
2361+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, g_frequency, f);
2362+ mutex_unlock(&tr->lock);
2363+
2364+ return ret;
2365+}
2366+
2367+static int timbradio_vidioc_queryctrl(struct file *file, void *priv,
2368+ struct v4l2_queryctrl *qc)
2369+{
2370+ struct timbradio *tr = video_drvdata(file);
2371+ int ret;
2372+
2373+ mutex_lock(&tr->lock);
2374+ ret = v4l2_subdev_call(tr->sd_dsp, core, queryctrl, qc);
2375+ mutex_unlock(&tr->lock);
2376+
2377+ return ret;
2378+}
2379+
2380+static int timbradio_vidioc_g_ctrl(struct file *file, void *priv,
2381+ struct v4l2_control *ctrl)
2382+{
2383+ struct timbradio *tr = video_drvdata(file);
2384+ int ret;
2385+
2386+ mutex_lock(&tr->lock);
2387+ ret = v4l2_subdev_call(tr->sd_dsp, core, g_ctrl, ctrl);
2388+ mutex_unlock(&tr->lock);
2389+
2390+ return ret;
2391+}
2392+
2393+static int timbradio_vidioc_s_ctrl(struct file *file, void *priv,
2394+ struct v4l2_control *ctrl)
2395+{
2396+ struct timbradio *tr = video_drvdata(file);
2397+ int ret;
2398+
2399+ mutex_lock(&tr->lock);
2400+ ret = v4l2_subdev_call(tr->sd_dsp, core, s_ctrl, ctrl);
2401+ mutex_unlock(&tr->lock);
2402+
2403+ return ret;
2404+}
2405+
2406+static const struct v4l2_ioctl_ops timbradio_ioctl_ops = {
2407+ .vidioc_querycap = timbradio_vidioc_querycap,
2408+ .vidioc_g_tuner = timbradio_vidioc_g_tuner,
2409+ .vidioc_s_tuner = timbradio_vidioc_s_tuner,
2410+ .vidioc_g_frequency = timbradio_vidioc_g_frequency,
2411+ .vidioc_s_frequency = timbradio_vidioc_s_frequency,
2412+ .vidioc_g_input = timbradio_vidioc_g_input,
2413+ .vidioc_s_input = timbradio_vidioc_s_input,
2414+ .vidioc_g_audio = timbradio_vidioc_g_audio,
2415+ .vidioc_s_audio = timbradio_vidioc_s_audio,
2416+ .vidioc_queryctrl = timbradio_vidioc_queryctrl,
2417+ .vidioc_g_ctrl = timbradio_vidioc_g_ctrl,
2418+ .vidioc_s_ctrl = timbradio_vidioc_s_ctrl
2419+};
2420+
2421+static irqreturn_t timbradio_irq(int irq, void *devid)
2422+{
2423+ struct timbradio *tr = devid;
2424+ u32 data = ioread32(tr->membase);
2425+
2426+ tr->buffer[tr->wr_index++] = data >> 24;
2427+ tr->buffer[tr->wr_index++] = data >> 16;
2428+ tr->buffer[tr->wr_index++] = data >> 8;
2429+ tr->buffer[tr->wr_index++] = data;
2430+ tr->wr_index %= RDS_BUFFER_SIZE;
2431+
2432+ wake_up(&tr->read_queue);
2433+
2434+ /* new RDS data received, read it */
2435+ return IRQ_HANDLED;
2436+}
2437+
2438+/**************************************************************************
2439+ * File Operations Interface
2440+ **************************************************************************/
2441+
2442+static ssize_t timbradio_rds_fops_read(struct file *file, char __user *buf,
2443+ size_t count, loff_t *ppos)
2444+{
2445+ struct timbradio *tr = video_drvdata(file);
2446+ int outblocks = 0;
2447+
2448+ /* block if no new data available */
2449+ while (tr->wr_index == tr->rd_index) {
2450+ if (file->f_flags & O_NONBLOCK)
2451+ return -EWOULDBLOCK;
2452+
2453+ if (wait_event_interruptible(tr->read_queue,
2454+ tr->wr_index != tr->rd_index))
2455+ return -EINTR;
2456+ }
2457+
2458+ count /= RDS_BLOCK_SIZE;
2459+ /* copy RDS block out of internal buffer and to user buffer */
2460+ mutex_lock(&tr->lock);
2461+ while (outblocks < count) {
2462+ if (tr->rd_index == tr->wr_index)
2463+ break;
2464+
2465+ if (copy_to_user(buf, tr->buffer + tr->rd_index,
2466+ RDS_BLOCK_SIZE))
2467+ break;
2468+ tr->rd_index += RDS_BLOCK_SIZE;
2469+ tr->rd_index %= RDS_BUFFER_SIZE;
2470+ outblocks++;
2471+ }
2472+ mutex_unlock(&tr->lock);
2473+
2474+ return outblocks *RDS_BLOCK_SIZE;
2475+}
2476+
2477+static unsigned int timbradio_rds_fops_poll(struct file *file,
2478+ struct poll_table_struct *pts)
2479+{
2480+ struct timbradio *tr = video_drvdata(file);
2481+
2482+ poll_wait(file, &tr->read_queue, pts);
2483+
2484+ if (tr->rd_index != tr->wr_index)
2485+ return POLLIN | POLLRDNORM;
2486+
2487+ return 0;
2488+}
2489+
2490+struct find_addr_arg {
2491+ char const *name;
2492+ struct i2c_client *client;
2493+};
2494+
2495+static int find_name(struct device *dev, void *argp)
2496+{
2497+ struct find_addr_arg *arg = (struct find_addr_arg *)argp;
2498+ struct i2c_client *client = i2c_verify_client(dev);
2499+
2500+ if (client && !strcmp(arg->name, client->name) && client->driver)
2501+ arg->client = client;
2502+
2503+ return 0;
2504+}
2505+
2506+static struct i2c_client *find_client(struct i2c_adapter *adapt,
2507+ const char *name)
2508+{
2509+ struct find_addr_arg find_arg;
2510+ /* now find the client */
2511+#ifdef MODULE
2512+ request_module(name);
2513+#endif
2514+ /* code for finding the I2C child */
2515+ find_arg.name = name;
2516+ find_arg.client = NULL;
2517+ device_for_each_child(&adapt->dev, &find_arg, find_name);
2518+ return find_arg.client;
2519+}
2520+
2521+static int timbradio_rds_fops_open(struct file *file)
2522+{
2523+ struct timbradio *tr = video_drvdata(file);
2524+ int err = 0;
2525+
2526+ mutex_lock(&tr->lock);
2527+ if (tr->open_count == 0) {
2528+ /* device currently not open, check if the DSP and tuner is not
2529+ * yet found, in that case find them
2530+ */
2531+ if (!tr->sd_tuner) {
2532+ struct i2c_adapter *adapt;
2533+ struct i2c_client *tuner;
2534+ struct i2c_client *dsp;
2535+
2536+ /* find the I2C bus */
2537+ adapt = i2c_get_adapter(tr->pdata.i2c_adapter);
2538+ if (!adapt) {
2539+ printk(KERN_ERR DRIVER_NAME": No I2C bus\n");
2540+ err = -ENODEV;
2541+ goto out;
2542+ }
2543+
2544+ /* now find the tuner and dsp */
2545+ tuner = find_client(adapt, tr->pdata.tuner);
2546+ dsp = find_client(adapt, tr->pdata.dsp);
2547+
2548+ i2c_put_adapter(adapt);
2549+
2550+ if (!tuner || !dsp) {
2551+ printk(KERN_ERR DRIVER_NAME
2552+ ": Failed to get tuner or DSP\n");
2553+ err = -ENODEV;
2554+ goto out;
2555+ }
2556+
2557+ tr->sd_tuner = i2c_get_clientdata(tuner);
2558+ tr->sd_dsp = i2c_get_clientdata(dsp);
2559+
2560+ tr->tuner_owner = tr->sd_tuner->owner;
2561+ tr->dsp_owner = tr->sd_dsp->owner;
2562+ /* Lock the modules */
2563+ if (!try_module_get(tr->tuner_owner)) {
2564+ err = -ENODEV;
2565+ goto err_get_tuner;
2566+ }
2567+
2568+ if (!try_module_get(tr->dsp_owner)) {
2569+ err = -ENODEV;
2570+ goto err_get_dsp;
2571+ }
2572+ }
2573+
2574+ /* enable the IRQ for receiving RDS data */
2575+ err = request_irq(tr->rds_irq, timbradio_irq, 0, DRIVER_NAME,
2576+ tr);
2577+ }
2578+ goto out;
2579+
2580+err_get_dsp:
2581+ module_put(tr->tuner_owner);
2582+err_get_tuner:
2583+ tr->sd_tuner = NULL;
2584+ tr->sd_dsp = NULL;
2585+out:
2586+ if (!err)
2587+ tr->open_count++;
2588+ mutex_unlock(&tr->lock);
2589+ return err;
2590+}
2591+
2592+static int timbradio_rds_fops_release(struct file *file)
2593+{
2594+ struct timbradio *tr = video_drvdata(file);
2595+
2596+ mutex_lock(&tr->lock);
2597+ tr->open_count--;
2598+ if (!tr->open_count) {
2599+ free_irq(tr->rds_irq, tr);
2600+
2601+ tr->wr_index = 0;
2602+ tr->rd_index = 0;
2603+
2604+ /* cancel read processes */
2605+ wake_up_interruptible(&tr->read_queue);
2606+ }
2607+ mutex_unlock(&tr->lock);
2608+
2609+ return 0;
2610+}
2611+
2612+
2613+static const struct v4l2_file_operations timbradio_fops = {
2614+ .owner = THIS_MODULE,
2615+ .ioctl = video_ioctl2,
2616+ .read = timbradio_rds_fops_read,
2617+ .poll = timbradio_rds_fops_poll,
2618+ .open = timbradio_rds_fops_open,
2619+ .release = timbradio_rds_fops_release,
2620+};
2621+
2622+static const struct video_device timbradio_template = {
2623+ .name = "Timberdale Radio",
2624+ .fops = &timbradio_fops,
2625+ .ioctl_ops = &timbradio_ioctl_ops,
2626+ .release = video_device_release_empty,
2627+ .minor = -1
2628+};
2629+
2630+
2631+
2632+static int timbradio_probe(struct platform_device *pdev)
2633+{
2634+ struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
2635+ struct timbradio *tr;
2636+ struct resource *iomem;
2637+ int irq;
2638+ int err;
2639+
2640+ if (!pdata) {
2641+ printk(KERN_ERR DRIVER_NAME": Platform data missing\n");
2642+ err = -EINVAL;
2643+ goto err;
2644+ }
2645+
2646+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2647+ if (!iomem) {
2648+ err = -ENODEV;
2649+ goto err;
2650+ }
2651+
2652+ irq = platform_get_irq(pdev, 0);
2653+ if (irq < 0) {
2654+ err = -ENODEV;
2655+ goto err;
2656+ }
2657+
2658+ if (!request_mem_region(iomem->start, resource_size(iomem),
2659+ DRIVER_NAME)) {
2660+ err = -EBUSY;
2661+ goto err;
2662+ }
2663+
2664+ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
2665+ if (!tr) {
2666+ err = -ENOMEM;
2667+ goto err_alloc;
2668+ }
2669+ mutex_init(&tr->lock);
2670+
2671+ tr->membase = ioremap(iomem->start, resource_size(iomem));
2672+ if (!tr->membase) {
2673+ err = -ENOMEM;
2674+ goto err_ioremap;
2675+ }
2676+
2677+ memcpy(&tr->pdata, pdata, sizeof(tr->pdata));
2678+
2679+ tr->video_dev = video_device_alloc();
2680+ if (!tr->video_dev) {
2681+ err = -ENOMEM;
2682+ goto err_video_req;
2683+ }
2684+ *tr->video_dev = timbradio_template;
2685+ tr->rds_irq = irq;
2686+ init_waitqueue_head(&tr->read_queue);
2687+
2688+ err = video_register_device(tr->video_dev, VFL_TYPE_RADIO, -1);
2689+ if (err) {
2690+ printk(KERN_ALERT DRIVER_NAME": Error reg video\n");
2691+ goto err_video_req;
2692+ }
2693+
2694+ video_set_drvdata(tr->video_dev, tr);
2695+
2696+ platform_set_drvdata(pdev, tr);
2697+ return 0;
2698+
2699+err_video_req:
2700+ if (tr->video_dev->minor != -1)
2701+ video_unregister_device(tr->video_dev);
2702+ else
2703+ video_device_release(tr->video_dev);
2704+ iounmap(tr->membase);
2705+err_ioremap:
2706+ kfree(tr);
2707+err_alloc:
2708+ release_mem_region(iomem->start, resource_size(iomem));
2709+err:
2710+ printk(KERN_ERR DRIVER_NAME ": Failed to register: %d\n", err);
2711+
2712+ return err;
2713+}
2714+
2715+static int timbradio_remove(struct platform_device *pdev)
2716+{
2717+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2718+ struct timbradio *tr = platform_get_drvdata(pdev);
2719+
2720+ if (tr->video_dev->minor != -1)
2721+ video_unregister_device(tr->video_dev);
2722+ else
2723+ video_device_release(tr->video_dev);
2724+
2725+ if (tr->sd_tuner) {
2726+ module_put(tr->tuner_owner);
2727+ module_put(tr->dsp_owner);
2728+ }
2729+
2730+ iounmap(tr->membase);
2731+ release_mem_region(iomem->start, resource_size(iomem));
2732+ kfree(tr);
2733+
2734+ return 0;
2735+}
2736+
2737+static struct platform_driver timbradio_platform_driver = {
2738+ .driver = {
2739+ .name = DRIVER_NAME,
2740+ .owner = THIS_MODULE,
2741+ },
2742+ .probe = timbradio_probe,
2743+ .remove = timbradio_remove,
2744+};
2745+
2746+/*--------------------------------------------------------------------------*/
2747+
2748+static int __init timbradio_init(void)
2749+{
2750+ return platform_driver_register(&timbradio_platform_driver);
2751+}
2752+
2753+static void __exit timbradio_exit(void)
2754+{
2755+ platform_driver_unregister(&timbradio_platform_driver);
2756+}
2757+
2758+module_init(timbradio_init);
2759+module_exit(timbradio_exit);
2760+
2761+MODULE_DESCRIPTION("Timberdale Radio driver");
2762+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
2763+MODULE_LICENSE("GPL v2");
2764+MODULE_ALIAS("platform:"DRIVER_NAME);
2765+
2766diff -uNr linux-2.6.31/drivers/media/radio/saa7706h.c linux-2.6.31.new/drivers/media/radio/saa7706h.c
2767--- linux-2.6.31/drivers/media/radio/saa7706h.c 1969-12-31 16:00:00.000000000 -0800
2768+++ linux-2.6.31.new/drivers/media/radio/saa7706h.c 2009-10-23 11:17:28.000000000 -0700
2769@@ -0,0 +1,496 @@
2770+/*
2771+ * saa7706.c Philips SAA7706H Car Radio DSP driver
2772+ * Copyright (c) 2009 Intel Corporation
2773+ *
2774+ * This program is free software; you can redistribute it and/or modify
2775+ * it under the terms of the GNU General Public License version 2 as
2776+ * published by the Free Software Foundation.
2777+ *
2778+ * This program is distributed in the hope that it will be useful,
2779+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2780+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2781+ * GNU General Public License for more details.
2782+ *
2783+ * You should have received a copy of the GNU General Public License
2784+ * along with this program; if not, write to the Free Software
2785+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2786+ */
2787+
2788+#include <linux/module.h>
2789+#include <linux/init.h>
2790+#include <linux/errno.h>
2791+#include <linux/kernel.h>
2792+#include <linux/interrupt.h>
2793+#include <linux/i2c.h>
2794+#include <linux/i2c-id.h>
2795+#include <media/v4l2-ioctl.h>
2796+#include <media/v4l2-device.h>
2797+#include <media/v4l2-chip-ident.h>
2798+
2799+#define DRIVER_NAME "saa7706h"
2800+
2801+/* the I2C memory map looks like this
2802+
2803+ $1C00 - $FFFF Not Used
2804+ $2200 - $3FFF Reserved YRAM (DSP2) space
2805+ $2000 - $21FF YRAM (DSP2)
2806+ $1FF0 - $1FFF Hardware Registers
2807+ $1280 - $1FEF Reserved XRAM (DSP2) space
2808+ $1000 - $127F XRAM (DSP2)
2809+ $0FFF DSP CONTROL
2810+ $0A00 - $0FFE Reserved
2811+ $0980 - $09FF Reserved YRAM (DSP1) space
2812+ $0800 - $097F YRAM (DSP1)
2813+ $0200 - $07FF Not Used
2814+ $0180 - $01FF Reserved XRAM (DSP1) space
2815+ $0000 - $017F XRAM (DSP1)
2816+*/
2817+
2818+#define SAA7706H_REG_CTRL 0x0fff
2819+#define SAA7706H_CTRL_BYP_PLL 0x0001
2820+#define SAA7706H_CTRL_PLL_DIV_MASK 0x003e
2821+#define SAA7706H_CTRL_PLL3_62975MHZ 0x003e
2822+#define SAA7706H_CTRL_DSP_TURBO 0x0040
2823+#define SAA7706H_CTRL_PC_RESET_DSP1 0x0080
2824+#define SAA7706H_CTRL_PC_RESET_DSP2 0x0100
2825+#define SAA7706H_CTRL_DSP1_ROM_EN_MASK 0x0600
2826+#define SAA7706H_CTRL_DSP1_FUNC_PROM 0x0000
2827+#define SAA7706H_CTRL_DSP2_ROM_EN_MASK 0x1800
2828+#define SAA7706H_CTRL_DSP2_FUNC_PROM 0x0000
2829+#define SAA7706H_CTRL_DIG_SIL_INTERPOL 0x8000
2830+
2831+#define SAA7706H_REG_EVALUATION 0x1ff0
2832+#define SAA7706H_EVAL_DISABLE_CHARGE_PUMP 0x000001
2833+#define SAA7706H_EVAL_DCS_CLOCK 0x000002
2834+#define SAA7706H_EVAL_GNDRC1_ENABLE 0x000004
2835+#define SAA7706H_EVAL_GNDRC2_ENABLE 0x000008
2836+
2837+#define SAA7706H_REG_CL_GEN1 0x1ff3
2838+#define SAA7706H_CL_GEN1_MIN_LOOPGAIN_MASK 0x00000f
2839+#define SAA7706H_CL_GEN1_LOOPGAIN_MASK 0x0000f0
2840+#define SAA7706H_CL_GEN1_COARSE_RATION 0xffff00
2841+
2842+#define SAA7706H_REG_CL_GEN2 0x1ff4
2843+#define SAA7706H_CL_GEN2_WSEDGE_FALLING 0x000001
2844+#define SAA7706H_CL_GEN2_STOP_VCO 0x000002
2845+#define SAA7706H_CL_GEN2_FRERUN 0x000004
2846+#define SAA7706H_CL_GEN2_ADAPTIVE 0x000008
2847+#define SAA7706H_CL_GEN2_FINE_RATIO_MASK 0x0ffff0
2848+
2849+#define SAA7706H_REG_CL_GEN4 0x1ff6
2850+#define SAA7706H_CL_GEN4_BYPASS_PLL1 0x001000
2851+#define SAA7706H_CL_GEN4_PLL1_DIV_MASK 0x03e000
2852+#define SAA7706H_CL_GEN4_DSP1_TURBO 0x040000
2853+
2854+#define SAA7706H_REG_SEL 0x1ff7
2855+#define SAA7706H_SEL_DSP2_SRCA_MASK 0x000007
2856+#define SAA7706H_SEL_DSP2_FMTA_MASK 0x000031
2857+#define SAA7706H_SEL_DSP2_SRCB_MASK 0x0001c0
2858+#define SAA7706H_SEL_DSP2_FMTB_MASK 0x000e00
2859+#define SAA7706H_SEL_DSP1_SRC_MASK 0x003000
2860+#define SAA7706H_SEL_DSP1_FMT_MASK 0x01c003
2861+#define SAA7706H_SEL_SPDIF2 0x020000
2862+#define SAA7706H_SEL_HOST_IO_FMT_MASK 0x1c0000
2863+#define SAA7706H_SEL_EN_HOST_IO 0x200000
2864+
2865+#define SAA7706H_REG_IAC 0x1ff8
2866+#define SAA7706H_REG_CLK_SET 0x1ff9
2867+#define SAA7706H_REG_CLK_COEFF 0x1ffa
2868+#define SAA7706H_REG_INPUT_SENS 0x1ffb
2869+#define SAA7706H_INPUT_SENS_RDS_VOL_MASK 0x0003f
2870+#define SAA7706H_INPUT_SENS_FM_VOL_MASK 0x00fc0
2871+#define SAA7706H_INPUT_SENS_FM_MPX 0x01000
2872+#define SAA7706H_INPUT_SENS_OFF_FILTER_A_EN 0x02000
2873+#define SAA7706H_INPUT_SENS_OFF_FILTER_B_EN 0x04000
2874+#define SAA7706H_REG_PHONE_NAV_AUDIO 0x1ffc
2875+#define SAA7706H_REG_IO_CONF_DSP2 0x1ffd
2876+#define SAA7706H_REG_STATUS_DSP2 0x1ffe
2877+#define SAA7706H_REG_PC_DSP2 0x1fff
2878+
2879+#define SAA7706H_DSP1_MOD0 0x0800
2880+#define SAA7706H_DSP1_ROM_VER 0x097f
2881+#define SAA7706H_DSP2_MPTR0 0x1000
2882+
2883+#define SAA7706H_DSP1_MODPNTR 0x0000
2884+
2885+#define SAA7706H_DSP2_XMEM_CONTLLCW 0x113e
2886+#define SAA7706H_DSP2_XMEM_BUSAMP 0x114a
2887+#define SAA7706H_DSP2_XMEM_FDACPNTR 0x11f9
2888+#define SAA7706H_DSP2_XMEM_IIS1PNTR 0x11fb
2889+
2890+#define SAA7706H_DSP2_YMEM_PVGA 0x212a
2891+#define SAA7706H_DSP2_YMEM_PVAT1 0x212b
2892+#define SAA7706H_DSP2_YMEM_PVAT 0x212c
2893+#define SAA7706H_DSP2_YMEM_ROM_VER 0x21ff
2894+
2895+#define SUPPORTED_DSP1_ROM_VER 0x667
2896+
2897+struct saa7706h_state {
2898+ struct v4l2_subdev sd;
2899+ unsigned muted;
2900+};
2901+
2902+static inline struct saa7706h_state *to_state(struct v4l2_subdev *sd)
2903+{
2904+ return container_of(sd, struct saa7706h_state, sd);
2905+}
2906+
2907+static int saa7706h_i2c_send(struct i2c_client *client, const u8 *data, int len)
2908+{
2909+ int err = i2c_master_send(client, data, len);
2910+ if (err == len)
2911+ return 0;
2912+ else if (err > 0)
2913+ return -EIO;
2914+ return err;
2915+}
2916+
2917+static int saa7706h_i2c_transfer(struct i2c_client *client,
2918+ struct i2c_msg *msgs, int num)
2919+{
2920+ int err = i2c_transfer(client->adapter, msgs, num);
2921+ if (err == num)
2922+ return 0;
2923+ else if (err > 0)
2924+ return -EIO;
2925+ else
2926+ return err;
2927+}
2928+
2929+static int saa7706h_set_reg24(struct i2c_client *client, u16 reg, u32 val)
2930+{
2931+ u8 buf[5];
2932+ int pos = 0;
2933+
2934+ buf[pos++] = reg >> 8;
2935+ buf[pos++] = reg;
2936+ buf[pos++] = val >> 16;
2937+ buf[pos++] = val >> 8;
2938+ buf[pos++] = val;
2939+
2940+ return saa7706h_i2c_send(client, buf, pos);
2941+}
2942+
2943+static int saa7706h_set_reg16(struct i2c_client *client, u16 reg, u16 val)
2944+{
2945+ u8 buf[4];
2946+ int pos = 0;
2947+
2948+ buf[pos++] = reg >> 8;
2949+ buf[pos++] = reg;
2950+ buf[pos++] = val >> 8;
2951+ buf[pos++] = val;
2952+
2953+ return saa7706h_i2c_send(client, buf, pos);
2954+}
2955+
2956+static int saa7706h_get_reg16(struct i2c_client *client, u16 reg)
2957+{
2958+ u8 buf[2];
2959+ int err;
2960+ u8 regaddr[] = {reg >> 8, reg};
2961+ struct i2c_msg msg[] = { {client->addr, 0, sizeof(regaddr), regaddr},
2962+ {client->addr, I2C_M_RD, sizeof(buf), buf} };
2963+
2964+ err = saa7706h_i2c_transfer(client, msg, ARRAY_SIZE(msg));
2965+ if (err)
2966+ return err;
2967+
2968+ return buf[0] << 8 | buf[1];
2969+}
2970+
2971+static int saa7706h_unmute(struct v4l2_subdev *sd)
2972+{
2973+ struct i2c_client *client = v4l2_get_subdevdata(sd);
2974+ struct saa7706h_state *state = to_state(sd);
2975+ int err;
2976+
2977+ err = saa7706h_set_reg16(client, SAA7706H_REG_CTRL,
2978+ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
2979+ SAA7706H_CTRL_PC_RESET_DSP2);
2980+ if (err)
2981+ goto out;
2982+
2983+ /* newer versions of the chip requires a small sleep after reset */
2984+ msleep(1);
2985+
2986+ err = saa7706h_set_reg16(client, SAA7706H_REG_CTRL,
2987+ SAA7706H_CTRL_PLL3_62975MHZ);
2988+ if (err)
2989+ goto out;
2990+
2991+ err = saa7706h_set_reg24(client, SAA7706H_REG_EVALUATION, 0);
2992+ if (err)
2993+ goto out;
2994+
2995+ err = saa7706h_set_reg24(client, SAA7706H_REG_CL_GEN1, 0x040022);
2996+ if (err)
2997+ goto out;
2998+
2999+ err = saa7706h_set_reg24(client, SAA7706H_REG_CL_GEN2,
3000+ SAA7706H_CL_GEN2_WSEDGE_FALLING);
3001+ if (err)
3002+ goto out;
3003+
3004+ err = saa7706h_set_reg24(client, SAA7706H_REG_CL_GEN4, 0x024080);
3005+ if (err)
3006+ goto out;
3007+
3008+ err = saa7706h_set_reg24(client, SAA7706H_REG_SEL, 0x200080);
3009+ if (err)
3010+ goto out;
3011+
3012+ err = saa7706h_set_reg24(client, SAA7706H_REG_IAC, 0xf4caed);
3013+ if (err)
3014+ goto out;
3015+
3016+ err = saa7706h_set_reg24(client, SAA7706H_REG_CLK_SET, 0x124334);
3017+ if (err)
3018+ goto out;
3019+
3020+ err = saa7706h_set_reg24(client, SAA7706H_REG_CLK_COEFF, 0x004a1a);
3021+ if (err)
3022+ goto out;
3023+
3024+ err = saa7706h_set_reg24(client, SAA7706H_REG_INPUT_SENS, 0x0071c7);
3025+ if (err)
3026+ goto out;
3027+
3028+ err = saa7706h_set_reg24(client, SAA7706H_REG_PHONE_NAV_AUDIO,
3029+ 0x0e22ff);
3030+ if (err)
3031+ goto out;
3032+
3033+ err = saa7706h_set_reg24(client, SAA7706H_REG_IO_CONF_DSP2, 0x001ff8);
3034+ if (err)
3035+ goto out;
3036+
3037+ err = saa7706h_set_reg24(client, SAA7706H_REG_STATUS_DSP2, 0x080003);
3038+ if (err)
3039+ goto out;
3040+
3041+ err = saa7706h_set_reg24(client, SAA7706H_REG_PC_DSP2, 0x000004);
3042+ if (err)
3043+ goto out;
3044+
3045+ err = saa7706h_set_reg16(client, SAA7706H_DSP1_MOD0, 0x0c6c);
3046+ if (err)
3047+ goto out;
3048+
3049+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_MPTR0, 0x000b4b);
3050+ if (err)
3051+ goto out;
3052+
3053+ err = saa7706h_set_reg24(client, SAA7706H_DSP1_MODPNTR, 0x000600);
3054+ if (err)
3055+ goto out;
3056+
3057+ err = saa7706h_set_reg24(client, SAA7706H_DSP1_MODPNTR, 0x0000c0);
3058+ if (err)
3059+ goto out;
3060+
3061+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000819);
3062+ if (err)
3063+ goto out;
3064+
3065+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_CONTLLCW, 0x00085a);
3066+ if (err)
3067+ goto out;
3068+
3069+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_BUSAMP, 0x7fffff);
3070+ if (err)
3071+ goto out;
3072+
3073+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_FDACPNTR, 0x2000cb);
3074+ if (err)
3075+ goto out;
3076+
3077+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_IIS1PNTR, 0x2000cb);
3078+ if (err)
3079+ goto out;
3080+
3081+ err = saa7706h_set_reg16(client, SAA7706H_DSP2_YMEM_PVGA, 0x0f80);
3082+ if (err)
3083+ goto out;
3084+
3085+ err = saa7706h_set_reg16(client, SAA7706H_DSP2_YMEM_PVAT1, 0x0800);
3086+ if (err)
3087+ goto out;
3088+
3089+ err = saa7706h_set_reg16(client, SAA7706H_DSP2_YMEM_PVAT, 0x0800);
3090+ if (err)
3091+ goto out;
3092+
3093+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000905);
3094+ if (err)
3095+ goto out;
3096+
3097+ state->muted = 0;
3098+out:
3099+ return err;
3100+}
3101+
3102+static int saa7706h_mute(struct v4l2_subdev *sd)
3103+{
3104+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3105+ struct saa7706h_state *state = to_state(sd);
3106+ int err;
3107+
3108+ err = saa7706h_set_reg16(client, SAA7706H_REG_CTRL,
3109+ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
3110+ SAA7706H_CTRL_PC_RESET_DSP2);
3111+ if (err)
3112+ goto out;
3113+
3114+ state->muted = 1;
3115+out:
3116+ return err;
3117+}
3118+
3119+static int saa7706h_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
3120+{
3121+ switch (qc->id) {
3122+ case V4L2_CID_AUDIO_MUTE:
3123+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
3124+ }
3125+ return -EINVAL;
3126+}
3127+
3128+static int saa7706h_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
3129+{
3130+ struct saa7706h_state *state = to_state(sd);
3131+
3132+ switch (ctrl->id) {
3133+ case V4L2_CID_AUDIO_MUTE:
3134+ ctrl->value = state->muted;
3135+ return 0;
3136+ }
3137+ return -EINVAL;
3138+}
3139+
3140+static int saa7706h_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
3141+{
3142+ switch (ctrl->id) {
3143+ case V4L2_CID_AUDIO_MUTE:
3144+ if (ctrl->value)
3145+ return saa7706h_mute(sd);
3146+ else
3147+ return saa7706h_unmute(sd);
3148+ }
3149+ return -EINVAL;
3150+}
3151+
3152+static int saa7706h_g_chip_ident(struct v4l2_subdev *sd,
3153+ struct v4l2_dbg_chip_ident *chip)
3154+{
3155+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3156+
3157+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA7706H, 0);
3158+}
3159+
3160+static const struct v4l2_subdev_core_ops saa7706h_core_ops = {
3161+ .g_chip_ident = saa7706h_g_chip_ident,
3162+ .queryctrl = saa7706h_queryctrl,
3163+ .g_ctrl = saa7706h_g_ctrl,
3164+ .s_ctrl = saa7706h_s_ctrl,
3165+};
3166+
3167+static const struct v4l2_subdev_ops saa7706h_ops = {
3168+ .core = &saa7706h_core_ops,
3169+};
3170+
3171+/*
3172+ * Generic i2c probe
3173+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
3174+ */
3175+
3176+static int __devinit saa7706h_probe(struct i2c_client *client,
3177+ const struct i2c_device_id *id)
3178+{
3179+ struct saa7706h_state *state;
3180+ struct v4l2_subdev *sd;
3181+ int err;
3182+
3183+ /* Check if the adapter supports the needed features */
3184+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
3185+ return -EIO;
3186+
3187+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
3188+ client->addr << 1, client->adapter->name);
3189+
3190+ state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
3191+ if (state == NULL)
3192+ return -ENOMEM;
3193+ sd = &state->sd;
3194+ v4l2_i2c_subdev_init(sd, client, &saa7706h_ops);
3195+
3196+ /* check the rom versions */
3197+ err = saa7706h_get_reg16(client, SAA7706H_DSP1_ROM_VER);
3198+ if (err < 0)
3199+ goto err;
3200+ if (err != SUPPORTED_DSP1_ROM_VER)
3201+ printk(KERN_WARNING DRIVER_NAME
3202+ ": Unknown DSP1 ROM code version: 0x%x\n", err);
3203+
3204+ state->muted = 1;
3205+
3206+ /* startup in a muted state */
3207+ err = saa7706h_mute(sd);
3208+ if (err)
3209+ goto err;
3210+
3211+ return 0;
3212+
3213+err:
3214+ v4l2_device_unregister_subdev(sd);
3215+ kfree(to_state(sd));
3216+
3217+ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", err);
3218+
3219+ return err;
3220+}
3221+
3222+static int __devexit saa7706h_remove(struct i2c_client *client)
3223+{
3224+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
3225+
3226+ saa7706h_mute(sd);
3227+ v4l2_device_unregister_subdev(sd);
3228+ kfree(to_state(sd));
3229+ return 0;
3230+}
3231+
3232+static const struct i2c_device_id saa7706h_id[] = {
3233+ {DRIVER_NAME, 0},
3234+ {},
3235+};
3236+
3237+MODULE_DEVICE_TABLE(i2c, saa7706h_id);
3238+
3239+static struct i2c_driver saa7706h_driver = {
3240+ .driver = {
3241+ .owner = THIS_MODULE,
3242+ .name = DRIVER_NAME,
3243+ },
3244+ .probe = saa7706h_probe,
3245+ .remove = saa7706h_remove,
3246+ .id_table = saa7706h_id,
3247+};
3248+
3249+static __init int saa7706h_init(void)
3250+{
3251+ return i2c_add_driver(&saa7706h_driver);
3252+}
3253+
3254+static __exit void saa7706h_exit(void)
3255+{
3256+ i2c_del_driver(&saa7706h_driver);
3257+}
3258+
3259+module_init(saa7706h_init);
3260+module_exit(saa7706h_exit);
3261+
3262+MODULE_DESCRIPTION("SAA7706H Car Radio DSP driver");
3263+MODULE_AUTHOR("Mocean Laboratories");
3264+MODULE_LICENSE("GPL v2");
3265+
3266diff -uNr linux-2.6.31/drivers/media/radio/tef6862.c linux-2.6.31.new/drivers/media/radio/tef6862.c
3267--- linux-2.6.31/drivers/media/radio/tef6862.c 1969-12-31 16:00:00.000000000 -0800
3268+++ linux-2.6.31.new/drivers/media/radio/tef6862.c 2009-10-23 11:17:28.000000000 -0700
3269@@ -0,0 +1,232 @@
3270+/*
3271+ * tef6862.c Philips TEF6862 Car Radio Enhanced Selectivity Tuner
3272+ * Copyright (c) 2009 Intel Corporation
3273+ *
3274+ * This program is free software; you can redistribute it and/or modify
3275+ * it under the terms of the GNU General Public License version 2 as
3276+ * published by the Free Software Foundation.
3277+ *
3278+ * This program is distributed in the hope that it will be useful,
3279+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3280+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3281+ * GNU General Public License for more details.
3282+ *
3283+ * You should have received a copy of the GNU General Public License
3284+ * along with this program; if not, write to the Free Software
3285+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3286+ */
3287+
3288+#include <linux/module.h>
3289+#include <linux/init.h>
3290+#include <linux/errno.h>
3291+#include <linux/kernel.h>
3292+#include <linux/interrupt.h>
3293+#include <linux/i2c.h>
3294+#include <linux/i2c-id.h>
3295+#include <media/v4l2-ioctl.h>
3296+#include <media/v4l2-device.h>
3297+#include <media/v4l2-chip-ident.h>
3298+
3299+#define DRIVER_NAME "tef6862"
3300+
3301+#define FREQ_MUL 16000
3302+
3303+#define TEF6862_LO_FREQ (875 * FREQ_MUL / 10)
3304+#define TEF6862_HI_FREQ (108 * FREQ_MUL)
3305+
3306+/* Write mode sub addresses */
3307+#define WM_SUB_BANDWIDTH 0x0
3308+#define WM_SUB_PLLM 0x1
3309+#define WM_SUB_PLLL 0x2
3310+#define WM_SUB_DAA 0x3
3311+#define WM_SUB_AGC 0x4
3312+#define WM_SUB_BAND 0x5
3313+#define WM_SUB_CONTROL 0x6
3314+#define WM_SUB_LEVEL 0x7
3315+#define WM_SUB_IFCF 0x8
3316+#define WM_SUB_IFCAP 0x9
3317+#define WM_SUB_ACD 0xA
3318+#define WM_SUB_TEST 0xF
3319+
3320+/* Different modes of the MSA register */
3321+#define MODE_BUFFER 0x0
3322+#define MODE_PRESET 0x1
3323+#define MODE_SEARCH 0x2
3324+#define MODE_AF_UPDATE 0x3
3325+#define MODE_JUMP 0x4
3326+#define MODE_CHECK 0x5
3327+#define MODE_LOAD 0x6
3328+#define MODE_END 0x7
3329+#define MODE_SHIFT 5
3330+
3331+struct tef6862_state {
3332+ struct v4l2_subdev sd;
3333+ unsigned long freq;
3334+};
3335+
3336+static inline struct tef6862_state *to_state(struct v4l2_subdev *sd)
3337+{
3338+ return container_of(sd, struct tef6862_state, sd);
3339+}
3340+
3341+static u16 tef6862_sigstr(struct i2c_client *client)
3342+{
3343+ u8 buf[4];
3344+ int err = i2c_master_recv(client, buf, sizeof(buf));
3345+ if (err == sizeof(buf))
3346+ return buf[3] << 8;
3347+ return 0;
3348+}
3349+
3350+static int tef6862_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
3351+{
3352+ if (v->index > 0)
3353+ return -EINVAL;
3354+
3355+ /* only support FM for now */
3356+ strlcpy(v->name, "FM", sizeof(v->name));
3357+ v->type = V4L2_TUNER_RADIO;
3358+ v->rangelow = TEF6862_LO_FREQ;
3359+ v->rangehigh = TEF6862_HI_FREQ;
3360+ v->rxsubchans = V4L2_TUNER_SUB_MONO;
3361+ v->capability = V4L2_TUNER_CAP_LOW;
3362+ v->audmode = V4L2_TUNER_MODE_STEREO;
3363+ v->signal = tef6862_sigstr(v4l2_get_subdevdata(sd));
3364+
3365+ return 0;
3366+}
3367+
3368+static int tef6862_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
3369+{
3370+ return v->index ? -EINVAL : 0;
3371+}
3372+
3373+static int tef6862_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
3374+{
3375+ struct tef6862_state *state = to_state(sd);
3376+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3377+ u16 pll;
3378+ u8 i2cmsg[3];
3379+ int err;
3380+
3381+ if (f->tuner != 0)
3382+ return -EINVAL;
3383+
3384+ pll = 1964 + ((f->frequency - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
3385+ i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM;
3386+ i2cmsg[1] = (pll >> 8) & 0xff;
3387+ i2cmsg[2] = pll & 0xff;
3388+
3389+ err = i2c_master_send(client, i2cmsg, sizeof(i2cmsg));
3390+ if (!err)
3391+ state->freq = f->frequency;
3392+ return err;
3393+}
3394+
3395+static int tef6862_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
3396+{
3397+ struct tef6862_state *state = to_state(sd);
3398+
3399+ if (f->tuner != 0)
3400+ return -EINVAL;
3401+ f->type = V4L2_TUNER_RADIO;
3402+ f->frequency = state->freq;
3403+ return 0;
3404+}
3405+
3406+static int tef6862_g_chip_ident(struct v4l2_subdev *sd,
3407+ struct v4l2_dbg_chip_ident *chip)
3408+{
3409+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3410+
3411+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TEF6862, 0);
3412+}
3413+
3414+static const struct v4l2_subdev_tuner_ops tef6862_tuner_ops = {
3415+ .g_tuner = tef6862_g_tuner,
3416+ .s_tuner = tef6862_s_tuner,
3417+ .s_frequency = tef6862_s_frequency,
3418+ .g_frequency = tef6862_g_frequency,
3419+};
3420+
3421+static const struct v4l2_subdev_core_ops tef6862_core_ops = {
3422+ .g_chip_ident = tef6862_g_chip_ident,
3423+};
3424+
3425+static const struct v4l2_subdev_ops tef6862_ops = {
3426+ .core = &tef6862_core_ops,
3427+ .tuner = &tef6862_tuner_ops,
3428+};
3429+
3430+/*
3431+ * Generic i2c probe
3432+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
3433+ */
3434+
3435+static int __devinit tef6862_probe(struct i2c_client *client,
3436+ const struct i2c_device_id *id)
3437+{
3438+ struct tef6862_state *state;
3439+ struct v4l2_subdev *sd;
3440+
3441+ /* Check if the adapter supports the needed features */
3442+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
3443+ return -EIO;
3444+
3445+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
3446+ client->addr << 1, client->adapter->name);
3447+
3448+ state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL);
3449+ if (state == NULL)
3450+ return -ENOMEM;
3451+ state->freq = TEF6862_LO_FREQ;
3452+
3453+ sd = &state->sd;
3454+ v4l2_i2c_subdev_init(sd, client, &tef6862_ops);
3455+
3456+ return 0;
3457+}
3458+
3459+static int __devexit tef6862_remove(struct i2c_client *client)
3460+{
3461+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
3462+
3463+ v4l2_device_unregister_subdev(sd);
3464+ kfree(to_state(sd));
3465+ return 0;
3466+}
3467+
3468+static const struct i2c_device_id tef6862_id[] = {
3469+ {DRIVER_NAME, 0},
3470+ {},
3471+};
3472+
3473+MODULE_DEVICE_TABLE(i2c, tef6862_id);
3474+
3475+static struct i2c_driver tef6862_driver = {
3476+ .driver = {
3477+ .owner = THIS_MODULE,
3478+ .name = DRIVER_NAME,
3479+ },
3480+ .probe = tef6862_probe,
3481+ .remove = tef6862_remove,
3482+ .id_table = tef6862_id,
3483+};
3484+
3485+static __init int tef6862_init(void)
3486+{
3487+ return i2c_add_driver(&tef6862_driver);
3488+}
3489+
3490+static __exit void tef6862_exit(void)
3491+{
3492+ i2c_del_driver(&tef6862_driver);
3493+}
3494+
3495+module_init(tef6862_init);
3496+module_exit(tef6862_exit);
3497+
3498+MODULE_DESCRIPTION("TEF6862 Car Radio Enhanced Selectivity Tuner");
3499+MODULE_AUTHOR("Mocean Laboratories");
3500+MODULE_LICENSE("GPL v2");
3501+
3502diff -uNr linux-2.6.31/drivers/media/video/adv7180.c linux-2.6.31.new/drivers/media/video/adv7180.c
3503--- linux-2.6.31/drivers/media/video/adv7180.c 1969-12-31 16:00:00.000000000 -0800
3504+++ linux-2.6.31.new/drivers/media/video/adv7180.c 2009-10-23 11:17:28.000000000 -0700
3505@@ -0,0 +1,475 @@
3506+/*
3507+ * adv7180.c Analog Devices ADV7180 video decoder driver
3508+ * Copyright (c) 2009 Intel Corporation
3509+ *
3510+ * This program is free software; you can redistribute it and/or modify
3511+ * it under the terms of the GNU General Public License version 2 as
3512+ * published by the Free Software Foundation.
3513+ *
3514+ * This program is distributed in the hope that it will be useful,
3515+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3516+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3517+ * GNU General Public License for more details.
3518+ *
3519+ * You should have received a copy of the GNU General Public License
3520+ * along with this program; if not, write to the Free Software
3521+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3522+ */
3523+
3524+#include <linux/module.h>
3525+#include <linux/init.h>
3526+#include <linux/errno.h>
3527+#include <linux/kernel.h>
3528+#include <linux/interrupt.h>
3529+#include <linux/i2c.h>
3530+#include <linux/i2c-id.h>
3531+#include <media/v4l2-ioctl.h>
3532+#include <linux/videodev2.h>
3533+#include <media/v4l2-device.h>
3534+#include <media/v4l2-chip-ident.h>
3535+#include <linux/mutex.h>
3536+
3537+#define DRIVER_NAME "adv7180"
3538+
3539+#define ADV7180_INPUT_CONTROL_REG 0x00
3540+#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM 0x00
3541+#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM_PED 0x10
3542+#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_J_SECAM 0x20
3543+#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_M_SECAM 0x30
3544+#define ADV7180_INPUT_CONTROL_NTSC_J 0x40
3545+#define ADV7180_INPUT_CONTROL_NTSC_M 0x50
3546+#define ADV7180_INPUT_CONTROL_PAL60 0x60
3547+#define ADV7180_INPUT_CONTROL_NTSC_443 0x70
3548+#define ADV7180_INPUT_CONTROL_PAL_BG 0x80
3549+#define ADV7180_INPUT_CONTROL_PAL_N 0x90
3550+#define ADV7180_INPUT_CONTROL_PAL_M 0xa0
3551+#define ADV7180_INPUT_CONTROL_PAL_M_PED 0xb0
3552+#define ADV7180_INPUT_CONTROL_PAL_COMB_N 0xc0
3553+#define ADV7180_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
3554+#define ADV7180_INPUT_CONTROL_PAL_SECAM 0xe0
3555+#define ADV7180_INPUT_CONTROL_PAL_SECAM_PED 0xf0
3556+
3557+#define ADV7180_EXTENDED_OUTPUT_CONTROL_REG 0x04
3558+#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
3559+
3560+#define ADV7180_AUTODETECT_ENABLE_REG 0x07
3561+#define ADV7180_AUTODETECT_DEFAULT 0x7f
3562+
3563+#define ADV7180_ADI_CTRL_REG 0x0e
3564+#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
3565+
3566+#define ADV7180_STATUS1_REG 0x10
3567+#define ADV7180_STATUS1_IN_LOCK 0x01
3568+#define ADV7180_STATUS1_AUTOD_MASK 0x70
3569+#define ADV7180_STATUS1_AUTOD_NTSM_M_J 0x00
3570+#define ADV7180_STATUS1_AUTOD_NTSC_4_43 0x10
3571+#define ADV7180_STATUS1_AUTOD_PAL_M 0x20
3572+#define ADV7180_STATUS1_AUTOD_PAL_60 0x30
3573+#define ADV7180_STATUS1_AUTOD_PAL_B_G 0x40
3574+#define ADV7180_STATUS1_AUTOD_SECAM 0x50
3575+#define ADV7180_STATUS1_AUTOD_PAL_COMB 0x60
3576+#define ADV7180_STATUS1_AUTOD_SECAM_525 0x70
3577+
3578+#define ADV7180_IDENT_REG 0x11
3579+#define ADV7180_ID_7180 0x18
3580+
3581+#define ADV7180_ICONF1_ADI 0x40
3582+#define ADV7180_ICONF1_ACTIVE_LOW 0x01
3583+#define ADV7180_ICONF1_PSYNC_ONLY 0x10
3584+#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
3585+
3586+#define ADV7180_IRQ1_LOCK 0x01
3587+#define ADV7180_IRQ1_UNLOCK 0x02
3588+#define ADV7180_ISR1_ADI 0x42
3589+#define ADV7180_ICR1_ADI 0x43
3590+#define ADV7180_IMR1_ADI 0x44
3591+#define ADV7180_IMR2_ADI 0x48
3592+#define ADV7180_IRQ3_AD_CHANGE 0x08
3593+#define ADV7180_ISR3_ADI 0x4A
3594+#define ADV7180_ICR3_ADI 0x4B
3595+#define ADV7180_IMR3_ADI 0x4C
3596+#define ADV7180_IMR4_ADI 0x50
3597+
3598+#define ADV7180_NTSC_V_BIT_END_REG 0xE6
3599+#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
3600+
3601+struct adv7180_state {
3602+ struct v4l2_subdev sd;
3603+ struct work_struct work;
3604+ struct mutex mutex; /* mutual excl. when accessing chip */
3605+ int irq;
3606+ v4l2_std_id curr_norm;
3607+ bool autodetect;
3608+};
3609+
3610+static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
3611+{
3612+ switch (status1 & ADV7180_STATUS1_AUTOD_MASK) {
3613+ case ADV7180_STATUS1_AUTOD_NTSM_M_J:
3614+ return V4L2_STD_NTSC;
3615+ case ADV7180_STATUS1_AUTOD_NTSC_4_43:
3616+ return V4L2_STD_NTSC_443;
3617+ case ADV7180_STATUS1_AUTOD_PAL_M:
3618+ return V4L2_STD_PAL_M;
3619+ case ADV7180_STATUS1_AUTOD_PAL_60:
3620+ return V4L2_STD_PAL_60;
3621+ case ADV7180_STATUS1_AUTOD_PAL_B_G:
3622+ return V4L2_STD_PAL;
3623+ case ADV7180_STATUS1_AUTOD_SECAM:
3624+ return V4L2_STD_SECAM;
3625+ case ADV7180_STATUS1_AUTOD_PAL_COMB:
3626+ return V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
3627+ case ADV7180_STATUS1_AUTOD_SECAM_525:
3628+ return V4L2_STD_SECAM;
3629+ default:
3630+ return V4L2_STD_UNKNOWN;
3631+ }
3632+}
3633+
3634+static int v4l2_std_to_adv7180(v4l2_std_id std)
3635+{
3636+ if (std == V4L2_STD_PAL_60)
3637+ return ADV7180_INPUT_CONTROL_PAL60;
3638+ if (std == V4L2_STD_NTSC_443)
3639+ return ADV7180_INPUT_CONTROL_NTSC_443;
3640+ if (std == V4L2_STD_PAL_N)
3641+ return ADV7180_INPUT_CONTROL_PAL_N;
3642+ if (std == V4L2_STD_PAL_M)
3643+ return ADV7180_INPUT_CONTROL_PAL_M;
3644+ if (std == V4L2_STD_PAL_Nc)
3645+ return ADV7180_INPUT_CONTROL_PAL_COMB_N;
3646+
3647+ /* pal is a combination of several variants */
3648+ if (std & V4L2_STD_PAL)
3649+ return ADV7180_INPUT_CONTROL_PAL_BG;
3650+ if (std & V4L2_STD_NTSC)
3651+ return ADV7180_INPUT_CONTROL_NTSC_M;
3652+ if (std & V4L2_STD_SECAM)
3653+ return ADV7180_INPUT_CONTROL_PAL_SECAM;
3654+
3655+ return -EINVAL;
3656+}
3657+
3658+static u32 adv7180_status_to_v4l2(u8 status1)
3659+{
3660+ if (!(status1 & ADV7180_STATUS1_IN_LOCK))
3661+ return V4L2_IN_ST_NO_SIGNAL;
3662+
3663+ return 0;
3664+}
3665+
3666+static int __adv7180_status(struct i2c_client *client, u32 *status,
3667+ v4l2_std_id *std)
3668+{
3669+ int status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
3670+
3671+ if (status1 < 0)
3672+ return status1;
3673+
3674+ if (status)
3675+ *status = adv7180_status_to_v4l2(status1);
3676+ if (std)
3677+ *std = adv7180_std_to_v4l2(status1);
3678+
3679+ return 0;
3680+}
3681+
3682+static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
3683+{
3684+ return container_of(sd, struct adv7180_state, sd);
3685+}
3686+
3687+static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
3688+{
3689+ struct adv7180_state *state = to_state(sd);
3690+ int err = mutex_lock_interruptible(&state->mutex);
3691+ if (err)
3692+ return err;
3693+
3694+ /* when we are interrupt driven we know the state */
3695+ if (!state->autodetect || state->irq > 0)
3696+ *std = state->curr_norm;
3697+ else
3698+ err = __adv7180_status(v4l2_get_subdevdata(sd), NULL, std);
3699+
3700+ mutex_unlock(&state->mutex);
3701+ return err;
3702+}
3703+
3704+static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
3705+{
3706+ struct adv7180_state *state = to_state(sd);
3707+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3708+ int ret = mutex_lock_interruptible(&state->mutex);
3709+ if (ret)
3710+ return ret;
3711+
3712+ /* all standards -> autodetect */
3713+ if (std == V4L2_STD_ALL) {
3714+ ret = i2c_smbus_write_byte_data(client,
3715+ ADV7180_INPUT_CONTROL_REG,
3716+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
3717+ if (ret < 0)
3718+ goto out;
3719+
3720+ __adv7180_status(client, NULL, &state->curr_norm);
3721+ state->autodetect = true;
3722+ } else {
3723+ ret = v4l2_std_to_adv7180(std);
3724+ if (ret < 0)
3725+ goto out;
3726+
3727+ ret = i2c_smbus_write_byte_data(client,
3728+ ADV7180_INPUT_CONTROL_REG, ret);
3729+ if (ret < 0)
3730+ goto out;
3731+
3732+ state->curr_norm = std;
3733+ state->autodetect = false;
3734+ }
3735+ ret = 0;
3736+out:
3737+ mutex_unlock(&state->mutex);
3738+ return ret;
3739+}
3740+
3741+static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
3742+{
3743+ struct adv7180_state *state = to_state(sd);
3744+ int ret = mutex_lock_interruptible(&state->mutex);
3745+ if (ret)
3746+ return ret;
3747+
3748+ ret = __adv7180_status(v4l2_get_subdevdata(sd), status, NULL);
3749+ mutex_unlock(&state->mutex);
3750+ return ret;
3751+}
3752+
3753+static int adv7180_g_chip_ident(struct v4l2_subdev *sd,
3754+ struct v4l2_dbg_chip_ident *chip)
3755+{
3756+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3757+
3758+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7180, 0);
3759+}
3760+
3761+static const struct v4l2_subdev_video_ops adv7180_video_ops = {
3762+ .querystd = adv7180_querystd,
3763+ .g_input_status = adv7180_g_input_status,
3764+};
3765+
3766+static const struct v4l2_subdev_core_ops adv7180_core_ops = {
3767+ .g_chip_ident = adv7180_g_chip_ident,
3768+ .s_std = adv7180_s_std,
3769+};
3770+
3771+static const struct v4l2_subdev_ops adv7180_ops = {
3772+ .core = &adv7180_core_ops,
3773+ .video = &adv7180_video_ops,
3774+};
3775+
3776+static void adv7180_work(struct work_struct *work)
3777+{
3778+ struct adv7180_state *state = container_of(work, struct adv7180_state,
3779+ work);
3780+ struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
3781+ u8 isr3;
3782+
3783+ mutex_lock(&state->mutex);
3784+ i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
3785+ ADV7180_ADI_CTRL_IRQ_SPACE);
3786+ isr3 = i2c_smbus_read_byte_data(client, ADV7180_ISR3_ADI);
3787+ /* clear */
3788+ i2c_smbus_write_byte_data(client, ADV7180_ICR3_ADI, isr3);
3789+ i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG, 0);
3790+
3791+ if (isr3 & ADV7180_IRQ3_AD_CHANGE && state->autodetect)
3792+ __adv7180_status(client, NULL, &state->curr_norm);
3793+ mutex_unlock(&state->mutex);
3794+
3795+ enable_irq(state->irq);
3796+}
3797+
3798+static irqreturn_t adv7180_irq(int irq, void *devid)
3799+{
3800+ struct adv7180_state *state = devid;
3801+
3802+ schedule_work(&state->work);
3803+
3804+ disable_irq_nosync(state->irq);
3805+
3806+ return IRQ_HANDLED;
3807+}
3808+
3809+/*
3810+ * Generic i2c probe
3811+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
3812+ */
3813+
3814+static int __devinit adv7180_probe(struct i2c_client *client,
3815+ const struct i2c_device_id *id)
3816+{
3817+ struct adv7180_state *state;
3818+ struct v4l2_subdev *sd;
3819+ int ret;
3820+
3821+ /* Check if the adapter supports the needed features */
3822+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
3823+ return -EIO;
3824+
3825+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
3826+ client->addr << 1, client->adapter->name);
3827+
3828+ state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
3829+ if (state == NULL) {
3830+ ret = -ENOMEM;
3831+ goto err;
3832+ }
3833+
3834+ state->irq = client->irq;
3835+ INIT_WORK(&state->work, adv7180_work);
3836+ mutex_init(&state->mutex);
3837+ state->autodetect = true;
3838+ sd = &state->sd;
3839+ v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
3840+
3841+ /* Initialize adv7180 */
3842+ /* Enable autodetection */
3843+ ret = i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
3844+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
3845+ if (ret < 0)
3846+ goto err_unreg_subdev;
3847+
3848+ ret = i2c_smbus_write_byte_data(client, ADV7180_AUTODETECT_ENABLE_REG,
3849+ ADV7180_AUTODETECT_DEFAULT);
3850+ if (ret < 0)
3851+ goto err_unreg_subdev;
3852+
3853+ /* ITU-R BT.656-4 compatible */
3854+ ret = i2c_smbus_write_byte_data(client,
3855+ ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
3856+ ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
3857+ if (ret < 0)
3858+ goto err_unreg_subdev;
3859+
3860+
3861+ /* Manually set V bit end position in NTSC mode */
3862+ ret = i2c_smbus_write_byte_data(client,
3863+ ADV7180_NTSC_V_BIT_END_REG,
3864+ ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
3865+ if (ret < 0)
3866+ goto err_unreg_subdev;
3867+
3868+ /* read current norm */
3869+ __adv7180_status(client, NULL, &state->curr_norm);
3870+
3871+ /* register for interrupts */
3872+ if (state->irq > 0) {
3873+ ret = request_irq(state->irq, adv7180_irq, 0, DRIVER_NAME,
3874+ state);
3875+ if (ret)
3876+ goto err_unreg_subdev;
3877+
3878+ ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
3879+ ADV7180_ADI_CTRL_IRQ_SPACE);
3880+ if (ret < 0)
3881+ goto err_unreg_subdev;
3882+
3883+ /* config the Interrupt pin to be active low */
3884+ ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
3885+ ADV7180_ICONF1_ACTIVE_LOW | ADV7180_ICONF1_PSYNC_ONLY);
3886+ if (ret < 0)
3887+ goto err_unreg_subdev;
3888+
3889+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
3890+ if (ret < 0)
3891+ goto err_unreg_subdev;
3892+
3893+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
3894+ if (ret < 0)
3895+ goto err_unreg_subdev;
3896+
3897+ /* enable AD change interrupts interrupts */
3898+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
3899+ ADV7180_IRQ3_AD_CHANGE);
3900+ if (ret < 0)
3901+ goto err_unreg_subdev;
3902+
3903+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
3904+ if (ret < 0)
3905+ goto err_unreg_subdev;
3906+
3907+ ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
3908+ 0);
3909+ if (ret < 0)
3910+ goto err_unreg_subdev;
3911+ }
3912+
3913+ return 0;
3914+
3915+err_unreg_subdev:
3916+ mutex_destroy(&state->mutex);
3917+ v4l2_device_unregister_subdev(sd);
3918+ kfree(state);
3919+err:
3920+ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", ret);
3921+ return ret;
3922+}
3923+
3924+static int __devexit adv7180_remove(struct i2c_client *client)
3925+{
3926+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
3927+ struct adv7180_state *state = to_state(sd);
3928+
3929+ if (state->irq > 0) {
3930+ free_irq(client->irq, state);
3931+ if (cancel_work_sync(&state->work)) {
3932+ /*
3933+ * Work was pending, therefore we need to enable
3934+ * IRQ here to balance the disable_irq() done in the
3935+ * interrupt handler.
3936+ */
3937+ enable_irq(state->irq);
3938+ }
3939+ }
3940+
3941+ mutex_destroy(&state->mutex);
3942+ v4l2_device_unregister_subdev(sd);
3943+ kfree(to_state(sd));
3944+ return 0;
3945+}
3946+
3947+static const struct i2c_device_id adv7180_id[] = {
3948+ {DRIVER_NAME, 0},
3949+ {},
3950+};
3951+
3952+MODULE_DEVICE_TABLE(i2c, adv7180_id);
3953+
3954+static struct i2c_driver adv7180_driver = {
3955+ .driver = {
3956+ .owner = THIS_MODULE,
3957+ .name = DRIVER_NAME,
3958+ },
3959+ .probe = adv7180_probe,
3960+ .remove = adv7180_remove,
3961+ .id_table = adv7180_id,
3962+};
3963+
3964+static __init int adv7180_init(void)
3965+{
3966+ return i2c_add_driver(&adv7180_driver);
3967+}
3968+
3969+static __exit void adv7180_exit(void)
3970+{
3971+ i2c_del_driver(&adv7180_driver);
3972+}
3973+
3974+module_init(adv7180_init);
3975+module_exit(adv7180_exit);
3976+
3977+MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
3978+MODULE_AUTHOR("Mocean Laboratories");
3979+MODULE_LICENSE("GPL v2");
3980+
3981diff -uNr linux-2.6.31/drivers/media/video/Kconfig linux-2.6.31.new/drivers/media/video/Kconfig
3982--- linux-2.6.31/drivers/media/video/Kconfig 2009-10-23 11:18:30.000000000 -0700
3983+++ linux-2.6.31.new/drivers/media/video/Kconfig 2009-10-23 11:17:28.000000000 -0700
3984@@ -265,6 +265,15 @@
3985
3986 comment "Video decoders"
3987
3988+config VIDEO_ADV7180
3989+ tristate "Analog Devices ADV7180 decoder"
3990+ depends on VIDEO_V4L2 && I2C
3991+ ---help---
3992+ Support for the Analog Devices ADV7180 video decoder.
3993+
3994+ To compile this driver as a module, choose M here: the
3995+ module will be called adv7180.
3996+
3997 config VIDEO_BT819
3998 tristate "BT819A VideoStream decoder"
3999 depends on VIDEO_V4L2 && I2C
4000@@ -816,6 +825,13 @@
4001 ---help---
4002 This is a v4l2 driver for the TI OMAP2 camera capture interface
4003
4004+config VIDEO_TIMBERDALE
4005+ tristate "Support for timberdale Video In/LogiWIN"
4006+ depends on VIDEO_V4L2 && MFD_TIMBERDALE_DMA
4007+ select VIDEO_ADV7180
4008+ ---help---
4009+ Add support for the Video In peripherial of the timberdale FPGA.
4010+
4011 #
4012 # USB Multimedia device configuration
4013 #
4014diff -uNr linux-2.6.31/drivers/media/video/Makefile linux-2.6.31.new/drivers/media/video/Makefile
4015--- linux-2.6.31/drivers/media/video/Makefile 2009-10-23 11:18:30.000000000 -0700
4016+++ linux-2.6.31.new/drivers/media/video/Makefile 2009-10-23 11:17:27.000000000 -0700
4017@@ -45,6 +45,7 @@
4018 obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
4019 obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
4020 obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
4021+obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
4022 obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
4023 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
4024 obj-$(CONFIG_VIDEO_BT819) += bt819.o
4025@@ -156,6 +157,8 @@
4026
4027 obj-$(CONFIG_VIDEO_AU0828) += au0828/
4028
4029+obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
4030+
4031 obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
4032
4033 obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
4034diff -uNr linux-2.6.31/drivers/media/video/timblogiw.c linux-2.6.31.new/drivers/media/video/timblogiw.c
4035--- linux-2.6.31/drivers/media/video/timblogiw.c 1969-12-31 16:00:00.000000000 -0800
4036+++ linux-2.6.31.new/drivers/media/video/timblogiw.c 2009-10-23 11:17:28.000000000 -0700
4037@@ -0,0 +1,1058 @@
4038+/*
4039+ * timblogiw.c timberdale FPGA LogiWin Video In driver
4040+ * Copyright (c) 2009 Intel Corporation
4041+ *
4042+ * This program is free software; you can redistribute it and/or modify
4043+ * it under the terms of the GNU General Public License version 2 as
4044+ * published by the Free Software Foundation.
4045+ *
4046+ * This program is distributed in the hope that it will be useful,
4047+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
4048+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4049+ * GNU General Public License for more details.
4050+ *
4051+ * You should have received a copy of the GNU General Public License
4052+ * along with this program; if not, write to the Free Software
4053+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4054+ */
4055+
4056+/* Supports:
4057+ * Timberdale FPGA LogiWin Video In
4058+ */
4059+
4060+#include <linux/list.h>
4061+#include <linux/version.h>
4062+#include <linux/module.h>
4063+#include <linux/dma-mapping.h>
4064+#include <media/v4l2-common.h>
4065+#include <media/v4l2-ioctl.h>
4066+#include <media/v4l2-device.h>
4067+#include <linux/platform_device.h>
4068+#include <linux/interrupt.h>
4069+#include "timblogiw.h"
4070+#include <linux/mfd/timbdma.h>
4071+#include <linux/i2c.h>
4072+
4073+#define DRIVER_NAME "timb-video"
4074+
4075+#define TIMBLOGIW_CTRL 0x40
4076+
4077+#define TIMBLOGIW_H_SCALE 0x20
4078+#define TIMBLOGIW_V_SCALE 0x28
4079+
4080+#define TIMBLOGIW_X_CROP 0x58
4081+#define TIMBLOGIW_Y_CROP 0x60
4082+
4083+#define TIMBLOGIW_W_CROP 0x00
4084+#define TIMBLOGIW_H_CROP 0x08
4085+
4086+#define TIMBLOGIW_VERSION_CODE 0x02
4087+
4088+#define TIMBLOGIW_BUF 0x04
4089+#define TIMBLOGIW_TBI 0x2c
4090+#define TIMBLOGIW_BPL 0x30
4091+
4092+#define dbg(...)
4093+
4094+#define BYTES_PER_LINE (720 * 2)
4095+
4096+#define DMA_BUFFER_SIZE (BYTES_PER_LINE * 576)
4097+
4098+#define TIMBLOGIW_VIDEO_FORMAT V4L2_PIX_FMT_UYVY
4099+
4100+static void timblogiw_release_buffers(struct timblogiw *lw);
4101+
4102+const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
4103+ {
4104+ .std = V4L2_STD_PAL,
4105+ .width = 720,
4106+ .height = 576
4107+ },
4108+ {
4109+ .std = V4L2_STD_NTSC,
4110+ .width = 720,
4111+ .height = 480
4112+ }
4113+};
4114+
4115+static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
4116+{
4117+ return norm->width * 2;
4118+}
4119+
4120+
4121+static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
4122+{
4123+ return norm->height * timblogiw_bytes_per_line(norm);
4124+}
4125+
4126+static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
4127+{
4128+ int i;
4129+ for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
4130+ if (timblogiw_tvnorms[i].std & std)
4131+ return timblogiw_tvnorms + i;
4132+
4133+ /* default to first element */
4134+ return timblogiw_tvnorms;
4135+}
4136+
4137+static void timblogiw_handleframe(unsigned long arg)
4138+{
4139+ struct timblogiw_frame *f;
4140+ struct timblogiw *lw = (struct timblogiw *)arg;
4141+
4142+ spin_lock_bh(&lw->queue_lock);
4143+ if (lw->dma.filled && !list_empty(&lw->inqueue)) {
4144+ /* put the entry in the outqueue */
4145+ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
4146+
4147+ /* sync memory and unmap */
4148+ dma_sync_single_for_cpu(lw->dev, lw->dma.filled->handle,
4149+ timblogiw_frame_size(lw->cur_norm), DMA_FROM_DEVICE);
4150+
4151+ /* copy data from the DMA buffer */
4152+ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
4153+ /* buffer consumed */
4154+ lw->dma.filled = NULL;
4155+
4156+ do_gettimeofday(&f->buf.timestamp);
4157+ f->buf.sequence = ++lw->frame_count;
4158+ f->buf.field = V4L2_FIELD_NONE;
4159+ f->state = F_DONE;
4160+ f->buf.bytesused = f->buf.length;
4161+ list_move_tail(&f->frame, &lw->outqueue);
4162+ /* wake up any waiter */
4163+ wake_up(&lw->wait_frame);
4164+ } else {
4165+ /* No user buffer available, consume buffer anyway
4166+ * who wants an old video frame?
4167+ */
4168+ lw->dma.filled = NULL;
4169+ }
4170+ spin_unlock_bh(&lw->queue_lock);
4171+}
4172+
4173+static int __timblogiw_start_dma(struct timblogiw *lw)
4174+{
4175+ int size = timblogiw_frame_size(lw->cur_norm);
4176+ int ret;
4177+ struct timbdma_transfer *transfer = lw->dma.transfer + lw->dma.curr;
4178+ int bytes_per_line = timblogiw_bytes_per_line(lw->cur_norm);
4179+
4180+ ret = timbdma_prep_desc(transfer->desc, transfer->handle, size);
4181+ if (ret)
4182+ goto err;
4183+
4184+ ret = timbdma_start(DMA_IRQ_VIDEO_RX, transfer->desc, bytes_per_line);
4185+ if (ret)
4186+ goto err;
4187+ return ret;
4188+err:
4189+ return ret;
4190+}
4191+
4192+static int timblogiw_isr(u32 flag, void *pdev)
4193+{
4194+ struct timblogiw *lw = (struct timblogiw *)pdev;
4195+
4196+ if (lw->stream == STREAM_OFF) {
4197+ timbdma_stop(DMA_IRQ_VIDEO_RX);
4198+ /* stream is stopped, signal that the current transfer is
4199+ * finished */
4200+ complete(&lw->irq_done);
4201+ } else {
4202+ struct timeval timestamp;
4203+
4204+ do_gettimeofday(&timestamp);
4205+
4206+ if (!lw->dma.filled && (flag & DMA_IRQ_VIDEO_RX)) {
4207+ /* Got a frame, store it, and flip to next DMA buffer */
4208+ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
4209+ lw->dma.curr = !lw->dma.curr;
4210+ } else if (lw->dma.filled && (flag & DMA_IRQ_VIDEO_RX))
4211+ printk("No free frame\n");
4212+
4213+ __timblogiw_start_dma(lw);
4214+
4215+ if (flag & DMA_IRQ_VIDEO_DROP)
4216+ dbg("%s: frame dropped\n", __func__);
4217+ if (flag & DMA_IRQ_VIDEO_RX) {
4218+ dbg("%s: frame RX\n", __func__);
4219+ tasklet_schedule(&lw->tasklet);
4220+ }
4221+ }
4222+
4223+ return 0;
4224+}
4225+
4226+static void timblogiw_empty_framequeues(struct timblogiw *lw)
4227+{
4228+ u32 i;
4229+
4230+ dbg("%s\n", __func__);
4231+
4232+ INIT_LIST_HEAD(&lw->inqueue);
4233+ INIT_LIST_HEAD(&lw->outqueue);
4234+
4235+ for (i = 0; i < lw->num_frames; i++) {
4236+ lw->frame[i].state = F_UNUSED;
4237+ lw->frame[i].buf.bytesused = 0;
4238+ }
4239+}
4240+
4241+u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
4242+{
4243+ /* needs to be page aligned cause the */
4244+ /* buffers can be mapped individually! */
4245+ const size_t imagesize = PAGE_ALIGN(timblogiw_frame_size(lw->cur_norm));
4246+ void *buff = NULL;
4247+ int ret;
4248+ u32 i;
4249+
4250+ dbg("%s - request of %i buffers of size %zi\n",
4251+ __func__, count, imagesize);
4252+
4253+ lw->dma.transfer[0].buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
4254+ if (!lw->dma.transfer[0].buf)
4255+ goto err;
4256+
4257+ lw->dma.transfer[1].buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
4258+ if (!lw->dma.transfer[1].buf)
4259+ goto err;
4260+
4261+ lw->dma.transfer[0].desc =
4262+ timbdma_alloc_desc(DMA_BUFFER_SIZE, BYTES_PER_LINE * 2);
4263+ if (!lw->dma.transfer[0].desc)
4264+ goto err;
4265+
4266+ lw->dma.transfer[1].desc =
4267+ timbdma_alloc_desc(DMA_BUFFER_SIZE, BYTES_PER_LINE * 2);
4268+ if (!lw->dma.transfer[1].desc)
4269+ goto err;
4270+
4271+ /* map up the DMA buffers */
4272+ lw->dma.transfer[0].handle = dma_map_single(lw->dev,
4273+ lw->dma.transfer[0].buf, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4274+ ret = dma_mapping_error(lw->dev, lw->dma.transfer[0].handle);
4275+ if (ret) {
4276+ lw->dma.transfer[0].handle = 0;
4277+ goto err;
4278+ }
4279+
4280+ lw->dma.transfer[1].handle = dma_map_single(lw->dev,
4281+ lw->dma.transfer[1].buf, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4282+ ret = dma_mapping_error(lw->dev, lw->dma.transfer[1].handle);
4283+ if (ret) {
4284+ lw->dma.transfer[1].handle = 0;
4285+ goto err;
4286+ }
4287+
4288+ if (count > TIMBLOGIW_NUM_FRAMES)
4289+ count = TIMBLOGIW_NUM_FRAMES;
4290+
4291+ lw->num_frames = count;
4292+ while (lw->num_frames > 0) {
4293+ buff = vmalloc_32(lw->num_frames * imagesize);
4294+ if (buff) {
4295+ memset(buff, 0, lw->num_frames * imagesize);
4296+ break;
4297+ }
4298+ lw->num_frames--;
4299+ }
4300+
4301+ for (i = 0; i < lw->num_frames; i++) {
4302+ lw->frame[i].bufmem = buff + i * imagesize;
4303+ lw->frame[i].buf.index = i;
4304+ lw->frame[i].buf.m.offset = i * imagesize;
4305+ lw->frame[i].buf.length = timblogiw_frame_size(lw->cur_norm);
4306+ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
4307+ lw->frame[i].buf.sequence = 0;
4308+ lw->frame[i].buf.field = V4L2_FIELD_NONE;
4309+ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
4310+ lw->frame[i].buf.flags = 0;
4311+ }
4312+
4313+ lw->dma.curr = 0;
4314+ lw->dma.filled = NULL;
4315+ return lw->num_frames;
4316+err:
4317+ timblogiw_release_buffers(lw);
4318+
4319+ return 0;
4320+}
4321+
4322+static void timblogiw_release_buffers(struct timblogiw *lw)
4323+{
4324+ dbg("%s\n", __func__);
4325+
4326+ if (lw->frame[0].bufmem != NULL) {
4327+ vfree(lw->frame[0].bufmem);
4328+ lw->frame[0].bufmem = NULL;
4329+ }
4330+
4331+ if (lw->dma.transfer[0].handle)
4332+ dma_unmap_single(lw->dev, lw->dma.transfer[0].handle,
4333+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4334+
4335+ if (lw->dma.transfer[1].handle)
4336+ dma_unmap_single(lw->dev, lw->dma.transfer[1].handle,
4337+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4338+
4339+ if (lw->dma.transfer[0].buf != NULL)
4340+ kfree(lw->dma.transfer[0].buf);
4341+ lw->dma.transfer[0].buf = NULL;
4342+
4343+ if (lw->dma.transfer[1].buf != NULL)
4344+ kfree(lw->dma.transfer[1].buf);
4345+ lw->dma.transfer[1].buf = NULL;
4346+
4347+ if (lw->dma.transfer[0].desc != NULL)
4348+ timbdma_free_desc(lw->dma.transfer[0].desc);
4349+ lw->dma.transfer[0].desc = NULL;
4350+
4351+ if (lw->dma.transfer[1].desc != NULL)
4352+ timbdma_free_desc(lw->dma.transfer[1].desc);
4353+ lw->dma.transfer[1].desc = NULL;
4354+
4355+
4356+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
4357+}
4358+
4359+/* IOCTL functions */
4360+
4361+static int timblogiw_g_fmt(struct file *file, void *priv,
4362+ struct v4l2_format *format)
4363+{
4364+ struct video_device *vdev = video_devdata(file);
4365+ struct timblogiw *lw = video_get_drvdata(vdev);
4366+
4367+ dbg("%s\n", __func__);
4368+
4369+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
4370+ return -EINVAL;
4371+
4372+ format->fmt.pix.width = lw->cur_norm->width;
4373+ format->fmt.pix.height = lw->cur_norm->height;
4374+ format->fmt.pix.pixelformat = TIMBLOGIW_VIDEO_FORMAT;
4375+ format->fmt.pix.bytesperline = timblogiw_bytes_per_line(lw->cur_norm);
4376+ format->fmt.pix.sizeimage = timblogiw_frame_size(lw->cur_norm);
4377+ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
4378+ format->fmt.pix.field = V4L2_FIELD_NONE;
4379+ return 0;
4380+}
4381+
4382+static int timblogiw_try_fmt(struct file *file, void *priv,
4383+ struct v4l2_format *format)
4384+{
4385+ struct video_device *vdev = video_devdata(file);
4386+ struct timblogiw *lw = video_get_drvdata(vdev);
4387+ struct v4l2_pix_format *pix = &format->fmt.pix;
4388+
4389+ dbg("%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
4390+ "bytes per line %d, size image: %d, colorspace: %d\n",
4391+ __func__,
4392+ pix->width, pix->height, pix->pixelformat, pix->field,
4393+ pix->bytesperline, pix->sizeimage, pix->colorspace);
4394+
4395+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
4396+ return -EINVAL;
4397+
4398+ if (pix->field != V4L2_FIELD_NONE)
4399+ return -EINVAL;
4400+
4401+ if (pix->pixelformat != TIMBLOGIW_VIDEO_FORMAT)
4402+ return -EINVAL;
4403+
4404+ if ((lw->cur_norm->height != pix->height) ||
4405+ (lw->cur_norm->width != pix->width)) {
4406+ pix->width = lw->cur_norm->width;
4407+ pix->height = lw->cur_norm->height;
4408+ }
4409+
4410+ return 0;
4411+}
4412+
4413+static int timblogiw_querycap(struct file *file, void *priv,
4414+ struct v4l2_capability *cap)
4415+{
4416+ dbg("%s\n", __func__);
4417+ memset(cap, 0, sizeof(*cap));
4418+ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
4419+ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
4420+ cap->version = TIMBLOGIW_VERSION_CODE;
4421+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
4422+ V4L2_CAP_STREAMING;
4423+
4424+ return 0;
4425+}
4426+
4427+static int timblogiw_enum_fmt(struct file *file, void *priv,
4428+ struct v4l2_fmtdesc *fmt)
4429+{
4430+ dbg("%s, index: %d\n", __func__, fmt->index);
4431+
4432+ if (fmt->index != 0)
4433+ return -EINVAL;
4434+ memset(fmt, 0, sizeof(*fmt));
4435+ fmt->index = 0;
4436+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
4437+ strncpy(fmt->description, "4:2:2, packed, YUYV",
4438+ sizeof(fmt->description)-1);
4439+ fmt->pixelformat = TIMBLOGIW_VIDEO_FORMAT;
4440+ memset(fmt->reserved, 0, sizeof(fmt->reserved));
4441+
4442+ return 0;
4443+}
4444+
4445+static int timblogiw_reqbufs(struct file *file, void *priv,
4446+ struct v4l2_requestbuffers *rb)
4447+{
4448+ struct video_device *vdev = video_devdata(file);
4449+ struct timblogiw *lw = video_get_drvdata(vdev);
4450+
4451+ dbg("%s\n", __func__);
4452+
4453+ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
4454+ rb->memory != V4L2_MEMORY_MMAP)
4455+ return -EINVAL;
4456+
4457+ timblogiw_empty_framequeues(lw);
4458+
4459+ timblogiw_release_buffers(lw);
4460+ if (rb->count)
4461+ rb->count = timblogiw_request_buffers(lw, rb->count);
4462+
4463+ dbg("%s - VIDIOC_REQBUFS: io method is mmap. num bufs %i\n",
4464+ __func__, rb->count);
4465+
4466+ return 0;
4467+}
4468+
4469+static int timblogiw_querybuf(struct file *file, void *priv,
4470+ struct v4l2_buffer *b)
4471+{
4472+ struct video_device *vdev = video_devdata(file);
4473+ struct timblogiw *lw = video_get_drvdata(vdev);
4474+
4475+ dbg("%s\n", __func__);
4476+
4477+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
4478+ b->index >= lw->num_frames)
4479+ return -EINVAL;
4480+
4481+ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
4482+
4483+ if (lw->frame[b->index].vma_use_count)
4484+ b->flags |= V4L2_BUF_FLAG_MAPPED;
4485+
4486+ if (lw->frame[b->index].state == F_DONE)
4487+ b->flags |= V4L2_BUF_FLAG_DONE;
4488+ else if (lw->frame[b->index].state != F_UNUSED)
4489+ b->flags |= V4L2_BUF_FLAG_QUEUED;
4490+
4491+ return 0;
4492+}
4493+
4494+static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
4495+{
4496+ struct video_device *vdev = video_devdata(file);
4497+ struct timblogiw *lw = video_get_drvdata(vdev);
4498+ unsigned long lock_flags;
4499+
4500+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
4501+ b->index >= lw->num_frames)
4502+ return -EINVAL;
4503+
4504+ if (lw->frame[b->index].state != F_UNUSED)
4505+ return -EAGAIN;
4506+
4507+ if (!lw->frame[b->index].bufmem)
4508+ return -EINVAL;
4509+
4510+ if (b->memory != V4L2_MEMORY_MMAP)
4511+ return -EINVAL;
4512+
4513+ lw->frame[b->index].state = F_QUEUED;
4514+
4515+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
4516+ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
4517+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
4518+
4519+ return 0;
4520+}
4521+
4522+static int timblogiw_dqbuf(struct file *file, void *priv,
4523+ struct v4l2_buffer *b)
4524+{
4525+ struct video_device *vdev = video_devdata(file);
4526+ struct timblogiw *lw = video_get_drvdata(vdev);
4527+ struct timblogiw_frame *f;
4528+ unsigned long lock_flags;
4529+ int ret = 0;
4530+
4531+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
4532+ dbg("%s - VIDIOC_DQBUF, illegal buf type!\n",
4533+ __func__);
4534+ return -EINVAL;
4535+ }
4536+
4537+ if (list_empty(&lw->outqueue)) {
4538+ if (file->f_flags & O_NONBLOCK)
4539+ return -EAGAIN;
4540+
4541+ ret = wait_event_interruptible(lw->wait_frame,
4542+ !list_empty(&lw->outqueue));
4543+ if (ret)
4544+ return ret;
4545+ }
4546+
4547+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
4548+ f = list_entry(lw->outqueue.next,
4549+ struct timblogiw_frame, frame);
4550+ list_del(lw->outqueue.next);
4551+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
4552+
4553+ f->state = F_UNUSED;
4554+ memcpy(b, &f->buf, sizeof(*b));
4555+
4556+ if (f->vma_use_count)
4557+ b->flags |= V4L2_BUF_FLAG_MAPPED;
4558+
4559+ return 0;
4560+}
4561+
4562+static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
4563+{
4564+ struct video_device *vdev = video_devdata(file);
4565+ struct timblogiw *lw = video_get_drvdata(vdev);
4566+
4567+ dbg("%s\n", __func__);
4568+
4569+ *std = lw->cur_norm->std;
4570+ return 0;
4571+}
4572+
4573+static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
4574+{
4575+ struct video_device *vdev = video_devdata(file);
4576+ struct timblogiw *lw = video_get_drvdata(vdev);
4577+ int err;
4578+
4579+ dbg("%s\n", __func__);
4580+
4581+ err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
4582+ if (!err)
4583+ lw->cur_norm = timblogiw_get_norm(*std);
4584+
4585+ return err;
4586+}
4587+
4588+static int timblogiw_enuminput(struct file *file, void *priv,
4589+ struct v4l2_input *inp)
4590+{
4591+ dbg("%s\n", __func__);
4592+
4593+ if (inp->index != 0)
4594+ return -EINVAL;
4595+
4596+ memset(inp, 0, sizeof(*inp));
4597+ inp->index = 0;
4598+
4599+ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
4600+ inp->type = V4L2_INPUT_TYPE_CAMERA;
4601+ inp->std = V4L2_STD_ALL;
4602+
4603+ return 0;
4604+}
4605+
4606+static int timblogiw_g_input(struct file *file, void *priv,
4607+ unsigned int *input)
4608+{
4609+ dbg("%s\n", __func__);
4610+
4611+ *input = 0;
4612+
4613+ return 0;
4614+}
4615+
4616+static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
4617+{
4618+ dbg("%s\n", __func__);
4619+
4620+ if (input != 0)
4621+ return -EINVAL;
4622+ return 0;
4623+}
4624+
4625+static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
4626+{
4627+ struct video_device *vdev = video_devdata(file);
4628+ struct timblogiw *lw = video_get_drvdata(vdev);
4629+ struct timblogiw_frame *f;
4630+
4631+ dbg("%s\n", __func__);
4632+
4633+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
4634+ dbg("%s - No capture device\n", __func__);
4635+ return -EINVAL;
4636+ }
4637+
4638+ if (list_empty(&lw->inqueue)) {
4639+ dbg("%s - inqueue is empty\n", __func__);
4640+ return -EINVAL;
4641+ }
4642+
4643+ if (lw->stream == STREAM_ON)
4644+ return 0;
4645+
4646+ lw->stream = STREAM_ON;
4647+
4648+ f = list_entry(lw->inqueue.next,
4649+ struct timblogiw_frame, frame);
4650+
4651+ dbg("%s - f size: %d, bpr: %d, dma addr: %x\n", __func__,
4652+ timblogiw_frame_size(lw->cur_norm),
4653+ timblogiw_bytes_per_line(lw->cur_norm),
4654+ (unsigned int)lw->dma.transfer[lw->dma.curr].handle);
4655+
4656+ __timblogiw_start_dma(lw);
4657+
4658+ return 0;
4659+}
4660+
4661+static void timblogiw_stopstream(struct timblogiw *lw)
4662+{
4663+ if (lw->stream == STREAM_ON) {
4664+ /* The FPGA might be busy copying the current frame, we have
4665+ * to wait for the frame to finish
4666+ */
4667+ unsigned long lock_flags;
4668+
4669+ init_completion(&lw->irq_done);
4670+
4671+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
4672+ lw->stream = STREAM_OFF;
4673+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
4674+
4675+ wait_for_completion_timeout(&lw->irq_done,
4676+ msecs_to_jiffies(100));
4677+ }
4678+}
4679+
4680+static int timblogiw_streamoff(struct file *file, void *priv,
4681+ unsigned int type)
4682+{
4683+ struct video_device *vdev = video_devdata(file);
4684+ struct timblogiw *lw = video_get_drvdata(vdev);
4685+
4686+ dbg("%s\n", __func__);
4687+
4688+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
4689+ return -EINVAL;
4690+
4691+ timblogiw_stopstream(lw);
4692+
4693+ timblogiw_empty_framequeues(lw);
4694+
4695+ return 0;
4696+}
4697+
4698+static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
4699+{
4700+ struct video_device *vdev = video_devdata(file);
4701+ struct timblogiw *lw = video_get_drvdata(vdev);
4702+
4703+ dbg("%s\n", __func__);
4704+
4705+ return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
4706+}
4707+
4708+static int timblogiw_enum_framesizes(struct file *file, void *priv,
4709+ struct v4l2_frmsizeenum *fsize)
4710+{
4711+ struct video_device *vdev = video_devdata(file);
4712+ struct timblogiw *lw = video_get_drvdata(vdev);
4713+
4714+ dbg("%s - index: %d, format: %d\n", __func__,
4715+ fsize->index, fsize->pixel_format);
4716+
4717+ if ((fsize->index != 0) ||
4718+ (fsize->pixel_format != TIMBLOGIW_VIDEO_FORMAT))
4719+ return -EINVAL;
4720+
4721+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
4722+ fsize->discrete.width = lw->cur_norm->width;
4723+ fsize->discrete.height = lw->cur_norm->height;
4724+
4725+ return 0;
4726+}
4727+
4728+struct find_addr_arg {
4729+ char const *name;
4730+ struct i2c_client *client;
4731+};
4732+
4733+static int find_name(struct device *dev, void *argp)
4734+{
4735+ struct find_addr_arg *arg = (struct find_addr_arg *)argp;
4736+ struct i2c_client *client = i2c_verify_client(dev);
4737+
4738+ if (client && !strcmp(arg->name, client->name) && client->driver)
4739+ arg->client = client;
4740+
4741+ return 0;
4742+}
4743+
4744+static struct i2c_client *find_client(struct i2c_adapter *adapt,
4745+ const char *name)
4746+{
4747+ struct find_addr_arg find_arg;
4748+ /* now find the client */
4749+#ifdef MODULE
4750+ request_module(name);
4751+#endif
4752+ /* code for finding the I2C child */
4753+ find_arg.name = name;
4754+ find_arg.client = NULL;
4755+ device_for_each_child(&adapt->dev, &find_arg, find_name);
4756+ return find_arg.client;
4757+}
4758+
4759+/*******************************
4760+ * Device Operations functions *
4761+ *******************************/
4762+
4763+static int timblogiw_open(struct file *file)
4764+{
4765+ struct video_device *vdev = video_devdata(file);
4766+ struct timblogiw *lw = video_get_drvdata(vdev);
4767+ v4l2_std_id std = V4L2_STD_UNKNOWN;
4768+ int err = 0;
4769+
4770+ dbg("%s -\n", __func__);
4771+
4772+ mutex_init(&lw->fileop_lock);
4773+ spin_lock_init(&lw->queue_lock);
4774+ init_waitqueue_head(&lw->wait_frame);
4775+
4776+ mutex_lock(&lw->lock);
4777+
4778+ if (!lw->sd_enc) {
4779+ struct i2c_adapter *adapt;
4780+ struct i2c_client *encoder;
4781+
4782+ /* find the video decoder */
4783+ adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
4784+ if (!adapt) {
4785+ printk(KERN_ERR DRIVER_NAME": No I2C bus\n");
4786+ err = -ENODEV;
4787+ goto out;
4788+ }
4789+
4790+ /* now find the encoder */
4791+ encoder = find_client(adapt, lw->pdata.encoder);
4792+
4793+ i2c_put_adapter(adapt);
4794+
4795+ if (!encoder) {
4796+ printk(KERN_ERR DRIVER_NAME": Failed to get encoder\n");
4797+ err = -ENODEV;
4798+ goto out;
4799+ }
4800+
4801+ lw->sd_enc = i2c_get_clientdata(encoder);
4802+ lw->enc_owner = lw->sd_enc->owner;
4803+ /* Lock the module */
4804+ if (!try_module_get(lw->enc_owner)) {
4805+ lw->sd_enc = NULL;
4806+ err = -ENODEV;
4807+ goto out;
4808+ }
4809+ }
4810+
4811+ timblogiw_querystd(file, NULL, &std);
4812+ lw->cur_norm = timblogiw_get_norm(std);
4813+
4814+ file->private_data = lw;
4815+ lw->stream = STREAM_OFF;
4816+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
4817+
4818+ timblogiw_empty_framequeues(lw);
4819+ timbdma_set_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP,
4820+ timblogiw_isr, (void *)lw);
4821+
4822+out:
4823+ mutex_unlock(&lw->lock);
4824+
4825+ return err;
4826+}
4827+
4828+static int timblogiw_close(struct file *file)
4829+{
4830+ struct timblogiw *lw = file->private_data;
4831+
4832+ dbg("%s - entry\n", __func__);
4833+
4834+ mutex_lock(&lw->lock);
4835+
4836+ timblogiw_stopstream(lw);
4837+
4838+ timbdma_set_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP, NULL,
4839+ NULL);
4840+ timblogiw_release_buffers(lw);
4841+
4842+ mutex_unlock(&lw->lock);
4843+ return 0;
4844+}
4845+
4846+static ssize_t timblogiw_read(struct file *file, char __user *data,
4847+ size_t count, loff_t *ppos)
4848+{
4849+ dbg("%s - read request\n", __func__);
4850+ return -EINVAL;
4851+}
4852+
4853+static void timblogiw_vm_open(struct vm_area_struct *vma)
4854+{
4855+ struct timblogiw_frame *f = vma->vm_private_data;
4856+ f->vma_use_count++;
4857+}
4858+
4859+static void timblogiw_vm_close(struct vm_area_struct *vma)
4860+{
4861+ struct timblogiw_frame *f = vma->vm_private_data;
4862+ f->vma_use_count--;
4863+}
4864+
4865+static struct vm_operations_struct timblogiw_vm_ops = {
4866+ .open = timblogiw_vm_open,
4867+ .close = timblogiw_vm_close,
4868+};
4869+
4870+static int timblogiw_mmap(struct file *filp, struct vm_area_struct *vma)
4871+{
4872+ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
4873+ void *pos;
4874+ u32 i;
4875+ int ret = -EINVAL;
4876+
4877+ struct timblogiw *lw = filp->private_data;
4878+ dbg("%s\n", __func__);
4879+
4880+ if (mutex_lock_interruptible(&lw->fileop_lock))
4881+ return -ERESTARTSYS;
4882+
4883+ if (!(vma->vm_flags & VM_WRITE) ||
4884+ size != PAGE_ALIGN(lw->frame[0].buf.length))
4885+ goto error_unlock;
4886+
4887+ for (i = 0; i < lw->num_frames; i++)
4888+ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
4889+ break;
4890+
4891+ if (i == lw->num_frames) {
4892+ dbg("%s - user supplied mapping address is out of range\n",
4893+ __func__);
4894+ goto error_unlock;
4895+ }
4896+
4897+ vma->vm_flags |= VM_IO;
4898+ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
4899+
4900+ pos = lw->frame[i].bufmem;
4901+ while (size > 0) { /* size is page-aligned */
4902+ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
4903+ dbg("%s - vm_insert_page failed\n", __func__);
4904+ ret = -EAGAIN;
4905+ goto error_unlock;
4906+ }
4907+ start += PAGE_SIZE;
4908+ pos += PAGE_SIZE;
4909+ size -= PAGE_SIZE;
4910+ }
4911+
4912+ vma->vm_ops = &timblogiw_vm_ops;
4913+ vma->vm_private_data = &lw->frame[i];
4914+ timblogiw_vm_open(vma);
4915+ ret = 0;
4916+
4917+error_unlock:
4918+ mutex_unlock(&lw->fileop_lock);
4919+ return ret;
4920+}
4921+
4922+
4923+void timblogiw_vdev_release(struct video_device *vdev)
4924+{
4925+ kfree(vdev);
4926+}
4927+
4928+static const struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
4929+ .vidioc_querycap = timblogiw_querycap,
4930+ .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
4931+ .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
4932+ .vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
4933+ .vidioc_s_fmt_vid_cap = timblogiw_try_fmt,
4934+ .vidioc_reqbufs = timblogiw_reqbufs,
4935+ .vidioc_querybuf = timblogiw_querybuf,
4936+ .vidioc_qbuf = timblogiw_qbuf,
4937+ .vidioc_dqbuf = timblogiw_dqbuf,
4938+ .vidioc_g_std = timblogiw_g_std,
4939+ .vidioc_s_std = timblogiw_s_std,
4940+ .vidioc_enum_input = timblogiw_enuminput,
4941+ .vidioc_g_input = timblogiw_g_input,
4942+ .vidioc_s_input = timblogiw_s_input,
4943+ .vidioc_streamon = timblogiw_streamon,
4944+ .vidioc_streamoff = timblogiw_streamoff,
4945+ .vidioc_querystd = timblogiw_querystd,
4946+ .vidioc_enum_framesizes = timblogiw_enum_framesizes,
4947+};
4948+
4949+static const struct v4l2_file_operations timblogiw_fops = {
4950+ .owner = THIS_MODULE,
4951+ .open = timblogiw_open,
4952+ .release = timblogiw_close,
4953+ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
4954+ .mmap = timblogiw_mmap,
4955+ .read = timblogiw_read,
4956+};
4957+
4958+static const struct video_device timblogiw_template = {
4959+ .name = TIMBLOGIWIN_NAME,
4960+ .fops = &timblogiw_fops,
4961+ .ioctl_ops = &timblogiw_ioctl_ops,
4962+ .release = &timblogiw_vdev_release,
4963+ .minor = -1,
4964+ .tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
4965+};
4966+
4967+static int timblogiw_probe(struct platform_device *dev)
4968+{
4969+ int err;
4970+ struct timblogiw *lw = NULL;
4971+ struct resource *iomem;
4972+ struct timb_video_platform_data *pdata = dev->dev.platform_data;
4973+
4974+ if (!pdata) {
4975+ printk(KERN_ERR DRIVER_NAME": Platform data missing\n");
4976+ err = -EINVAL;
4977+ goto err_mem;
4978+ }
4979+
4980+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
4981+ if (!iomem) {
4982+ err = -EINVAL;
4983+ goto err_mem;
4984+ }
4985+
4986+ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
4987+ if (!lw) {
4988+ err = -ENOMEM;
4989+ goto err_mem;
4990+ }
4991+
4992+ if (dev->dev.parent)
4993+ lw->dev = dev->dev.parent;
4994+ else
4995+ lw->dev = &dev->dev;
4996+
4997+ memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
4998+
4999+ mutex_init(&lw->lock);
5000+
5001+ lw->video_dev = video_device_alloc();
5002+ if (!lw->video_dev) {
5003+ err = -ENOMEM;
5004+ goto err_mem;
5005+ }
5006+ *lw->video_dev = timblogiw_template;
5007+
5008+ err = video_register_device(lw->video_dev, VFL_TYPE_GRABBER, 0);
5009+ if (err) {
5010+ printk(KERN_ALERT DRIVER_NAME": Error reg video\n");
5011+ goto err_request;
5012+ }
5013+
5014+ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
5015+
5016+ if (!request_mem_region(iomem->start, resource_size(iomem),
5017+ DRIVER_NAME)) {
5018+ err = -EBUSY;
5019+ goto err_request;
5020+ }
5021+
5022+ lw->membase = ioremap(iomem->start, resource_size(iomem));
5023+ if (!lw->membase) {
5024+ err = -ENOMEM;
5025+ goto err_ioremap;
5026+ }
5027+
5028+ platform_set_drvdata(dev, lw);
5029+ video_set_drvdata(lw->video_dev, lw);
5030+
5031+ return 0;
5032+
5033+err_ioremap:
5034+ release_mem_region(iomem->start, resource_size(iomem));
5035+err_request:
5036+ if (-1 != lw->video_dev->minor)
5037+ video_unregister_device(lw->video_dev);
5038+ else
5039+ video_device_release(lw->video_dev);
5040+err_mem:
5041+ kfree(lw);
5042+ printk(KERN_ERR DRIVER_NAME ": Failed to register: %d\n", err);
5043+
5044+ return err;
5045+}
5046+
5047+static int timblogiw_remove(struct platform_device *dev)
5048+{
5049+ struct timblogiw *lw = platform_get_drvdata(dev);
5050+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
5051+
5052+ if (-1 != lw->video_dev->minor)
5053+ video_unregister_device(lw->video_dev);
5054+ else
5055+ video_device_release(lw->video_dev);
5056+
5057+ if (lw->sd_enc)
5058+ module_put(lw->enc_owner);
5059+ tasklet_kill(&lw->tasklet);
5060+ iounmap(lw->membase);
5061+ release_mem_region(iomem->start, resource_size(iomem));
5062+ kfree(lw);
5063+
5064+ return 0;
5065+}
5066+
5067+static struct platform_driver timblogiw_platform_driver = {
5068+ .driver = {
5069+ .name = DRIVER_NAME,
5070+ .owner = THIS_MODULE,
5071+ },
5072+ .probe = timblogiw_probe,
5073+ .remove = timblogiw_remove,
5074+};
5075+
5076+/*--------------------------------------------------------------------------*/
5077+
5078+static int __init timblogiw_init(void)
5079+{
5080+ return platform_driver_register(&timblogiw_platform_driver);
5081+}
5082+
5083+static void __exit timblogiw_exit(void)
5084+{
5085+ platform_driver_unregister(&timblogiw_platform_driver);
5086+}
5087+
5088+module_init(timblogiw_init);
5089+module_exit(timblogiw_exit);
5090+
5091+MODULE_DESCRIPTION("Timberdale Video In driver");
5092+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
5093+MODULE_LICENSE("GPL v2");
5094+MODULE_ALIAS("platform:"DRIVER_NAME);
5095+
5096diff -uNr linux-2.6.31/drivers/media/video/timblogiw.h linux-2.6.31.new/drivers/media/video/timblogiw.h
5097--- linux-2.6.31/drivers/media/video/timblogiw.h 1969-12-31 16:00:00.000000000 -0800
5098+++ linux-2.6.31.new/drivers/media/video/timblogiw.h 2009-10-23 11:17:28.000000000 -0700
5099@@ -0,0 +1,96 @@
5100+/*
5101+ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
5102+ * Copyright (c) 2009 Intel Corporation
5103+ *
5104+ * This program is free software; you can redistribute it and/or modify
5105+ * it under the terms of the GNU General Public License version 2 as
5106+ * published by the Free Software Foundation.
5107+ *
5108+ * This program is distributed in the hope that it will be useful,
5109+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5110+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5111+ * GNU General Public License for more details.
5112+ *
5113+ * You should have received a copy of the GNU General Public License
5114+ * along with this program; if not, write to the Free Software
5115+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5116+ */
5117+
5118+/* Supports:
5119+ * Timberdale FPGA LogiWin Video In
5120+ */
5121+
5122+#ifndef _TIMBLOGIW_H
5123+#define _TIMBLOGIW_H
5124+
5125+#include <linux/interrupt.h>
5126+#include <media/timb_video.h>
5127+#include <linux/completion.h>
5128+
5129+#define TIMBLOGIWIN_NAME "Timberdale Video-In"
5130+
5131+#define TIMBLOGIW_NUM_FRAMES 10
5132+
5133+
5134+enum timblogiw_stream_state {
5135+ STREAM_OFF,
5136+ STREAM_ON,
5137+};
5138+
5139+enum timblogiw_frame_state {
5140+ F_UNUSED = 0,
5141+ F_QUEUED,
5142+ F_DONE,
5143+};
5144+
5145+struct timblogiw_frame {
5146+ void *bufmem;
5147+ struct v4l2_buffer buf;
5148+ enum timblogiw_frame_state state;
5149+ struct list_head frame;
5150+ unsigned long vma_use_count;
5151+};
5152+
5153+struct timblogiw_tvnorm {
5154+ v4l2_std_id std;
5155+ u16 width;
5156+ u16 height;
5157+};
5158+
5159+
5160+
5161+struct timbdma_transfer {
5162+ dma_addr_t handle;
5163+ void *buf;
5164+ void *desc;
5165+};
5166+
5167+struct timblogiw_dma_control {
5168+ struct timbdma_transfer transfer[2];
5169+ struct timbdma_transfer *filled;
5170+ int curr;
5171+};
5172+
5173+struct timblogiw {
5174+ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
5175+ int num_frames;
5176+ unsigned int frame_count;
5177+ struct list_head inqueue, outqueue;
5178+ spinlock_t queue_lock; /* mutual exclusion */
5179+ enum timblogiw_stream_state stream;
5180+ struct video_device *video_dev;
5181+ struct mutex lock, fileop_lock;
5182+ wait_queue_head_t wait_frame;
5183+ struct completion irq_done;
5184+ struct timblogiw_tvnorm const *cur_norm;
5185+ struct device *dev;
5186+ struct timblogiw_dma_control dma;
5187+ void __iomem *membase;
5188+ struct tasklet_struct tasklet;
5189+ struct timb_video_platform_data pdata;
5190+ struct v4l2_subdev *sd_enc; /* encoder */
5191+ struct module *enc_owner;
5192+};
5193+
5194+#endif /* _TIMBLOGIW_H */
5195+
5196diff -uNr linux-2.6.31/drivers/mfd/Kconfig linux-2.6.31.new/drivers/mfd/Kconfig
5197--- linux-2.6.31/drivers/mfd/Kconfig 2009-10-23 11:18:30.000000000 -0700
5198+++ linux-2.6.31.new/drivers/mfd/Kconfig 2009-10-23 11:17:29.000000000 -0700
5199@@ -263,6 +263,25 @@
5200 This enables the PCAP ASIC present on EZX Phones. This is
5201 needed for MMC, TouchScreen, Sound, USB, etc..
5202
5203+config MFD_TIMBERDALE
5204+ tristate "Support for the Timberdale FPGA"
5205+ select MFD_CORE
5206+ depends on PCI
5207+ ---help---
5208+ This is the core driver for the timberdale FPGA. This device is a
5209+ multifunctioanl device which may provide numerous interfaces.
5210+
5211+ The timberdale FPGA can be found on the Intel Atom development board
5212+ for automotive in-vehicle infontainment board called Russellville.
5213+
5214+config MFD_TIMBERDALE_DMA
5215+ tristate "Support for timberdale DMA"
5216+ depends on MFD_TIMBERDALE
5217+ depends on HAS_IOMEM
5218+ ---help---
5219+ Add support the DMA block inside the timberdale FPGA. This to be able
5220+ to do DMA transfers directly to some of the blocks inside the FPGA
5221+
5222 endmenu
5223
5224 menu "Multimedia Capabilities Port drivers"
5225diff -uNr linux-2.6.31/drivers/mfd/Makefile linux-2.6.31.new/drivers/mfd/Makefile
5226--- linux-2.6.31/drivers/mfd/Makefile 2009-10-23 11:18:30.000000000 -0700
5227+++ linux-2.6.31.new/drivers/mfd/Makefile 2009-10-23 11:17:29.000000000 -0700
5228@@ -44,3 +44,7 @@
5229 obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
5230 obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
5231 obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
5232+
5233+obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
5234+obj-$(CONFIG_MFD_TIMBERDALE_DMA) += timbdma.o
5235+
5236diff -uNr linux-2.6.31/drivers/mfd/timbdma.c linux-2.6.31.new/drivers/mfd/timbdma.c
5237--- linux-2.6.31/drivers/mfd/timbdma.c 1969-12-31 16:00:00.000000000 -0800
5238+++ linux-2.6.31.new/drivers/mfd/timbdma.c 2009-10-23 11:17:29.000000000 -0700
5239@@ -0,0 +1,542 @@
5240+/*
5241+ * timbdma.c timberdale FPGA DMA driver
5242+ * Copyright (c) 2009 Intel Corporation
5243+ *
5244+ * This program is free software; you can redistribute it and/or modify
5245+ * it under the terms of the GNU General Public License version 2 as
5246+ * published by the Free Software Foundation.
5247+ *
5248+ * This program is distributed in the hope that it will be useful,
5249+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5250+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5251+ * GNU General Public License for more details.
5252+ *
5253+ * You should have received a copy of the GNU General Public License
5254+ * along with this program; if not, write to the Free Software
5255+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5256+ */
5257+
5258+/* Supports:
5259+ * Timberdale FPGA DMA engine
5260+ */
5261+
5262+#include <linux/version.h>
5263+#include <linux/module.h>
5264+#include <linux/interrupt.h>
5265+#include <linux/platform_device.h>
5266+#include <linux/io-mapping.h>
5267+#include <linux/dma-mapping.h>
5268+#include <linux/mfd/timbdma.h>
5269+
5270+#define DRIVER_NAME "timb-dma"
5271+
5272+#define TIMBDMA_ACR 0x34
5273+#define TIMBDMA_32BIT_ADDR 0x01
5274+
5275+#define TIMBDMA_ISR 0x080000
5276+#define TIMBDMA_IPR 0x080004
5277+#define TIMBDMA_IER 0x080008
5278+
5279+/* DMA configuration registers */
5280+/* RX registers */
5281+#define TIMBDMA_OFFS_RX_DHAR 0x00
5282+#define TIMBDMA_OFFS_RX_DLAR 0x04
5283+#define TIMBDMA_OFFS_RX_LR 0x0C
5284+#define TIMBDMA_OFFS_RX_BLR 0x10
5285+#define TIMBDMA_OFFS_RX_ER 0x14
5286+#define TIMBDMA_RX_EN 0x01
5287+/* bytes per Row, video specific register */
5288+#define TIMBDMA_OFFS_RX_BPRR 0x30
5289+
5290+/* TX registers */
5291+#define TIMBDMA_OFFS_TX_DHAR 0x18
5292+#define TIMBDMA_OFFS_TX_DLAR 0x1C
5293+#define TIMBDMA_OFFS_TX_BLR 0x24
5294+#define TIMBDMA_OFFS_TX_LR 0x28
5295+
5296+#define DMA_DESC_SIZE 8
5297+
5298+struct dma_desc {
5299+ u32 len;
5300+ u32 chunk_size;
5301+ u8 buf[0];
5302+};
5303+
5304+struct timbdma_control {
5305+ timbdma_interruptcb callback;
5306+ void *callback_data;
5307+ dma_addr_t desc;
5308+ int desc_len;
5309+ /* the following are used to store a desc while the hw has not been
5310+ * probed yet
5311+ */
5312+ struct dma_desc *stored_desc;
5313+ int stored_bytes_per_row;
5314+};
5315+
5316+struct timbdma_dev {
5317+ void __iomem *membase;
5318+ struct device *dev;
5319+ struct timbdma_control control[DMA_IRQS];
5320+ spinlock_t lock; /* mutual exclusion */
5321+};
5322+
5323+static struct timbdma_dev *self_g;
5324+
5325+
5326+void *timbdma_alloc_desc(u32 size, u16 alignment)
5327+{
5328+ /* calculate the number of chunks needed */
5329+ int chunk_size = USHORT_MAX - (USHORT_MAX % alignment);
5330+ int chunks = size / chunk_size;
5331+ int len;
5332+ struct dma_desc *dma_desc;
5333+
5334+ if (size % chunk_size)
5335+ chunks++;
5336+
5337+ len = sizeof(struct dma_desc) + DMA_DESC_SIZE * chunks;
5338+
5339+ dma_desc = kzalloc(len, GFP_KERNEL);
5340+ if (dma_desc) {
5341+ dma_desc->len = DMA_DESC_SIZE * chunks;
5342+ dma_desc->chunk_size = chunk_size;
5343+ }
5344+ return dma_desc;
5345+}
5346+EXPORT_SYMBOL(timbdma_alloc_desc);
5347+
5348+void timbdma_free_desc(void *desc)
5349+{
5350+ kfree(desc);
5351+}
5352+EXPORT_SYMBOL(timbdma_free_desc);
5353+
5354+int timbdma_prep_desc(void *desc, dma_addr_t addr, u32 size)
5355+{
5356+ struct dma_desc *dma_desc = desc;
5357+ u8 *buf = dma_desc->buf;
5358+ dma_addr_t cur_addr = addr;
5359+ int chunks = size / dma_desc->chunk_size;
5360+ if (size % dma_desc->chunk_size)
5361+ chunks++;
5362+
5363+ if (dma_desc->len < chunks * DMA_DESC_SIZE)
5364+ return -EINVAL;
5365+
5366+ while (size > 0) {
5367+ int chunk_size = dma_desc->chunk_size;
5368+ if (chunk_size > size)
5369+ chunk_size = size;
5370+ buf[7] = (cur_addr >> 24) & 0xff;
5371+ buf[6] = (cur_addr >> 16) & 0xff;
5372+ buf[5] = (cur_addr >> 8) & 0xff;
5373+ buf[4] = (cur_addr >> 0) & 0xff;
5374+
5375+ buf[3] = (chunk_size >> 8) & 0xff;
5376+ buf[2] = (chunk_size >> 0) & 0xff;
5377+
5378+ buf[1] = 0x00;
5379+ buf[0] = 0x21; /* tran, valid */
5380+
5381+ buf += DMA_DESC_SIZE;
5382+ cur_addr += chunk_size;
5383+ size -= chunk_size;
5384+ }
5385+
5386+ /* make sure to mark the last one as end */
5387+ (buf-DMA_DESC_SIZE)[0] |= 0x2;
5388+
5389+ return 0;
5390+}
5391+EXPORT_SYMBOL(timbdma_prep_desc);
5392+
5393+static irqreturn_t timbdma_handleinterrupt(int irq, void *devid)
5394+{
5395+ struct timbdma_dev *dev = (struct timbdma_dev *)devid;
5396+ u32 ipr, ier;
5397+ int i;
5398+
5399+ ipr = ioread32(dev->membase + TIMBDMA_IPR);
5400+ /* the MSI-X controller is level triggered, help it a bit,
5401+ * by disabling interrupts and re-enable them in the end.
5402+ */
5403+ ier = ioread32(dev->membase + TIMBDMA_IER);
5404+ iowrite32(0, dev->membase + TIMBDMA_IER);
5405+
5406+ if (ipr) {
5407+ /* ack */
5408+ iowrite32(ipr, dev->membase + TIMBDMA_ISR);
5409+
5410+ /* call the callbacks */
5411+ for (i = 0; i < DMA_IRQS; i++) {
5412+ int mask = 1 << i;
5413+ if (ipr & mask) {
5414+ struct timbdma_control *ctrl = dev->control + i;
5415+ struct timbdma_control *unmap_ctrl = ctrl;
5416+
5417+ /* special case for video frame drop */
5418+ if (mask == DMA_IRQ_VIDEO_DROP)
5419+ unmap_ctrl = dev->control + i - 1;
5420+
5421+ /* unmap memory */
5422+ dma_unmap_single(dev->dev, unmap_ctrl->desc,
5423+ unmap_ctrl->desc_len, DMA_TO_DEVICE);
5424+ unmap_ctrl->desc = 0;
5425+
5426+ if (ctrl->callback)
5427+ ctrl->callback(mask,
5428+ ctrl->callback_data);
5429+ }
5430+ }
5431+
5432+ iowrite32(ier, dev->membase + TIMBDMA_IER);
5433+ return IRQ_HANDLED;
5434+ } else {
5435+ iowrite32(ier, dev->membase + TIMBDMA_IER);
5436+ return IRQ_NONE;
5437+ }
5438+}
5439+
5440+static int __timbdma_start(struct timbdma_dev *dev, int index,
5441+ struct dma_desc *dma_desc, int bytes_per_row)
5442+{
5443+ u32 offset;
5444+ unsigned long flags;
5445+ struct timbdma_control *ctrl;
5446+ int err;
5447+
5448+ ctrl = dev->control + index;
5449+
5450+ BUG_ON(ctrl->desc);
5451+
5452+ /* check if we already have a descriptor */
5453+ if (ctrl->desc)
5454+ return -EALREADY;
5455+
5456+ /* map up the descriptor */
5457+ ctrl->desc = dma_map_single(dev->dev, dma_desc->buf, dma_desc->len,
5458+ DMA_TO_DEVICE);
5459+ err = dma_mapping_error(dev->dev, ctrl->desc);
5460+ if (err) {
5461+ ctrl->desc = 0;
5462+ return err;
5463+ }
5464+ ctrl->desc_len = dma_desc->len;
5465+
5466+ /* now enable the DMA transfer */
5467+ offset = index / 2 * 0x40;
5468+
5469+ spin_lock_irqsave(&dev->lock, flags);
5470+ if (!(index % 2)) {
5471+ /* RX */
5472+ /* descriptor address */
5473+ iowrite32(0, dev->membase + offset + TIMBDMA_OFFS_RX_DHAR);
5474+ iowrite32(ctrl->desc, dev->membase + offset +
5475+ TIMBDMA_OFFS_RX_DLAR);
5476+ /* Bytes per line */
5477+ iowrite32(bytes_per_row, dev->membase + offset +
5478+ TIMBDMA_OFFS_RX_BPRR);
5479+ /* enable RX */
5480+ iowrite32(TIMBDMA_RX_EN, dev->membase + offset +
5481+ TIMBDMA_OFFS_RX_ER);
5482+ } else {
5483+ /* TX */
5484+ /* address high */
5485+ iowrite32(0, dev->membase + offset + TIMBDMA_OFFS_TX_DHAR);
5486+ iowrite32(ctrl->desc, dev->membase + offset +
5487+ TIMBDMA_OFFS_TX_DLAR);
5488+ }
5489+ spin_unlock_irqrestore(&dev->lock, flags);
5490+
5491+ return 0;
5492+}
5493+int timbdma_start(u32 flag, void *desc, int bytes_per_row)
5494+{
5495+ int i;
5496+ struct timbdma_dev *dev = self_g;
5497+ struct dma_desc *dma_desc = desc;
5498+ int ret = 0;
5499+
5500+ /* only allow 1 flag bit to be set */
5501+ for (i = 0; i < DMA_IRQS && !(flag & (1 << i)); i++)
5502+ ;
5503+ if (i == DMA_IRQS || (flag & ~(1 << i)))
5504+ return -EINVAL;
5505+
5506+ if (!dev->membase) {
5507+ /* the physical DMA device has not showed up yet */
5508+ unsigned long flags;
5509+ struct timbdma_control *ctrl = dev->control + i;
5510+ BUG_ON(ctrl->stored_desc);
5511+ if (ctrl->stored_desc)
5512+ ret = -EALREADY;
5513+ else {
5514+ spin_lock_irqsave(&dev->lock, flags);
5515+ ctrl->stored_desc = desc;
5516+ ctrl->stored_bytes_per_row = bytes_per_row;
5517+ spin_unlock_irqrestore(&dev->lock, flags);
5518+ }
5519+ } else
5520+ ret = __timbdma_start(dev, i, dma_desc, bytes_per_row);
5521+
5522+ if (ret)
5523+ printk(KERN_ERR DRIVER_NAME": Failed to start DMA: %d\n", ret);
5524+ return ret;
5525+}
5526+EXPORT_SYMBOL(timbdma_start);
5527+
5528+int timbdma_stop(u32 flags)
5529+{
5530+ int i;
5531+ unsigned long irqflags;
5532+ struct timbdma_dev *dev = self_g;
5533+ int ret = 0;
5534+
5535+ spin_lock_irqsave(&dev->lock, irqflags);
5536+
5537+ /* now disable the DMA transfers */
5538+ for (i = 0; i < DMA_IRQS; i++)
5539+ if (flags & (1 << i)) {
5540+ /*
5541+ RX enable registers are located at:
5542+ 0x14
5543+ 0x54
5544+ 0x94
5545+
5546+ TX DESC ADDR LOW registers are located at:
5547+ 0x1C
5548+ 0x5C
5549+ */
5550+ struct timbdma_control *ctrl = dev->control + i;
5551+ if (ctrl->desc) {
5552+ u32 offset = i / 2 * 0x40;
5553+
5554+ if (!(i % 2)) {
5555+ /* even -> RX enable */
5556+ offset += TIMBDMA_OFFS_RX_ER;
5557+ /** TODO: FIX received length */
5558+ } else {
5559+ /* odd -> TX desc addr low */
5560+ offset += TIMBDMA_OFFS_TX_DLAR;
5561+ /** TODO: FIX written lenth */
5562+ }
5563+
5564+ if (dev->membase)
5565+ iowrite32(0, dev->membase + offset);
5566+
5567+ dma_unmap_single(dev->dev, ctrl->desc,
5568+ ctrl->desc_len, DMA_TO_DEVICE);
5569+ ctrl->desc = 0;
5570+ } else if (ctrl->stored_desc)
5571+ ctrl->stored_desc = NULL;
5572+ }
5573+
5574+ if (dev->membase)
5575+ /* ack any pending IRQs */
5576+ iowrite32(flags, dev->membase + TIMBDMA_ISR);
5577+
5578+ spin_unlock_irqrestore(&dev->lock, irqflags);
5579+
5580+ return ret;
5581+}
5582+EXPORT_SYMBOL(timbdma_stop);
5583+
5584+void timbdma_set_interruptcb(u32 flags, timbdma_interruptcb icb, void *data)
5585+{
5586+ int i;
5587+ unsigned long irqflags;
5588+ struct timbdma_dev *dev = self_g;
5589+ u32 ier;
5590+
5591+ spin_lock_irqsave(&dev->lock, irqflags);
5592+
5593+ for (i = 0; i < DMA_IRQS; i++)
5594+ if (flags & (1 << i)) {
5595+ struct timbdma_control *ctrl = dev->control + i;
5596+ ctrl->callback = icb;
5597+ ctrl->callback_data = data;
5598+ }
5599+
5600+ /* the DMA device might not have showed up yet */
5601+ if (dev->membase) {
5602+ /* Ack any pending IRQ */
5603+ iowrite32(flags, dev->membase + TIMBDMA_ISR);
5604+
5605+ /* if a null callback is given -> clear interrupt,
5606+ * else -> enable
5607+ */
5608+ ier = ioread32(dev->membase + TIMBDMA_IER);
5609+ if (icb != NULL)
5610+ ier |= flags;
5611+ else
5612+ ier &= ~flags;
5613+ iowrite32(ier, dev->membase + TIMBDMA_IER);
5614+ }
5615+
5616+ spin_unlock_irqrestore(&dev->lock, irqflags);
5617+}
5618+EXPORT_SYMBOL(timbdma_set_interruptcb);
5619+
5620+static void timbdma_start_operations(struct timbdma_dev *self)
5621+{
5622+ int i;
5623+ u32 ier;
5624+ unsigned long flags;
5625+
5626+ spin_lock_irqsave(&self->lock, flags);
5627+ ier = ioread32(self->membase + TIMBDMA_IER);
5628+ for (i = 0; i < DMA_IRQS; i++)
5629+ if (self->control[i].callback)
5630+ ier |= 1 << i;
5631+ iowrite32(ier, self->membase + TIMBDMA_IER);
5632+ spin_unlock_irqrestore(&self->lock, flags);
5633+
5634+ /* look for any transfers that were started before the HW was
5635+ * available, and start them
5636+ */
5637+ for (i = 0; i < DMA_IRQS; i++) {
5638+ struct timbdma_control *ctrl = self->control + i;
5639+ if (ctrl->stored_desc) {
5640+ struct dma_desc *dma_desc = ctrl->stored_desc;
5641+ ctrl->stored_desc = NULL;
5642+ if (__timbdma_start(self, i, dma_desc,
5643+ ctrl->stored_bytes_per_row))
5644+ printk(KERN_ERR DRIVER_NAME
5645+ ": Failed to start DMA\n");
5646+ }
5647+ }
5648+}
5649+
5650+
5651+static int timbdma_probe(struct platform_device *dev)
5652+{
5653+ int err, irq;
5654+ struct resource *iomem;
5655+ struct timbdma_dev *self = self_g;
5656+
5657+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
5658+ if (!iomem) {
5659+ err = -EINVAL;
5660+ goto err_request;
5661+ }
5662+
5663+ if (dev->dev.parent)
5664+ self->dev = dev->dev.parent;
5665+ else
5666+ self->dev = &dev->dev;
5667+
5668+ if (!request_mem_region(iomem->start,
5669+ resource_size(iomem), DRIVER_NAME)) {
5670+ err = -EBUSY;
5671+ goto err_request;
5672+ }
5673+
5674+ self->membase = ioremap(iomem->start, resource_size(iomem));
5675+ if (!self->membase) {
5676+ printk(KERN_ERR DRIVER_NAME ": Failed to remap I/O memory\n");
5677+ err = -ENOMEM;
5678+ goto err_ioremap;
5679+ }
5680+
5681+ /* 32bit addressing */
5682+ iowrite32(TIMBDMA_32BIT_ADDR, self->membase + TIMBDMA_ACR);
5683+
5684+ /* disable and clear any interrupts */
5685+ iowrite32(0x0, self->membase + TIMBDMA_IER);
5686+ iowrite32(0x0, self->membase + TIMBDMA_ISR);
5687+
5688+ /* register interrupt */
5689+ irq = platform_get_irq(dev, 0);
5690+ if (irq < 0) {
5691+ err = irq;
5692+ goto err_get_irq;
5693+ }
5694+
5695+ /* request IRQ */
5696+ err = request_irq(irq, timbdma_handleinterrupt, IRQF_SHARED,
5697+ DRIVER_NAME, self);
5698+ if (err) {
5699+ printk(KERN_ERR DRIVER_NAME ": Failed to request IRQ\n");
5700+ goto err_get_irq;
5701+ }
5702+
5703+ platform_set_drvdata(dev, self);
5704+
5705+ /* assign the global pointer */
5706+ self_g = self;
5707+
5708+ timbdma_start_operations(self);
5709+
5710+ return 0;
5711+
5712+err_get_irq:
5713+ iounmap(self->membase);
5714+err_ioremap:
5715+ release_mem_region(iomem->start, resource_size(iomem));
5716+err_request:
5717+ printk(KERN_ERR DRIVER_NAME ": Failed to register Timberdale DMA: %d\n",
5718+ err);
5719+ self->membase = NULL;
5720+ self->dev = NULL;
5721+
5722+ return err;
5723+}
5724+
5725+static int timbdma_remove(struct platform_device *dev)
5726+{
5727+ struct timbdma_dev *self = platform_get_drvdata(dev);
5728+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
5729+
5730+ free_irq(platform_get_irq(dev, 0), self);
5731+ iounmap(self->membase);
5732+ release_mem_region(iomem->start, resource_size(iomem));
5733+ self->membase = NULL;
5734+ self->dev = NULL;
5735+ return 0;
5736+}
5737+
5738+static struct platform_driver timbdma_platform_driver = {
5739+ .driver = {
5740+ .name = DRIVER_NAME,
5741+ .owner = THIS_MODULE,
5742+ },
5743+ .probe = timbdma_probe,
5744+ .remove = timbdma_remove,
5745+};
5746+
5747+/*--------------------------------------------------------------------------*/
5748+
5749+static int __init timbdma_init(void)
5750+{
5751+ struct timbdma_dev *self;
5752+ int err;
5753+
5754+ self = kzalloc(sizeof(*self), GFP_KERNEL);
5755+ if (!self)
5756+ return -ENOMEM;
5757+
5758+ spin_lock_init(&self->lock);
5759+
5760+ self_g = self;
5761+ err = platform_driver_register(&timbdma_platform_driver);
5762+ if (err)
5763+ kfree(self);
5764+
5765+ return err;
5766+}
5767+
5768+static void __exit timbdma_exit(void)
5769+{
5770+ platform_driver_unregister(&timbdma_platform_driver);
5771+ kfree(self_g);
5772+}
5773+
5774+module_init(timbdma_init);
5775+module_exit(timbdma_exit);
5776+
5777+MODULE_DESCRIPTION("Timberdale DMA driver");
5778+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
5779+MODULE_LICENSE("GPL v2");
5780+MODULE_ALIAS("platform:"DRIVER_NAME);
5781+
5782diff -uNr linux-2.6.31/drivers/mfd/timberdale.c linux-2.6.31.new/drivers/mfd/timberdale.c
5783--- linux-2.6.31/drivers/mfd/timberdale.c 1969-12-31 16:00:00.000000000 -0800
5784+++ linux-2.6.31.new/drivers/mfd/timberdale.c 2009-10-23 11:17:29.000000000 -0700
5785@@ -0,0 +1,914 @@
5786+/*
5787+ * timberdale.c timberdale FPGA mfd shim driver
5788+ * Copyright (c) 2009 Intel Corporation
5789+ *
5790+ * This program is free software; you can redistribute it and/or modify
5791+ * it under the terms of the GNU General Public License version 2 as
5792+ * published by the Free Software Foundation.
5793+ *
5794+ * This program is distributed in the hope that it will be useful,
5795+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5796+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5797+ * GNU General Public License for more details.
5798+ *
5799+ * You should have received a copy of the GNU General Public License
5800+ * along with this program; if not, write to the Free Software
5801+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5802+ */
5803+
5804+/* Supports:
5805+ * Timberdale FPGA
5806+ */
5807+
5808+#include <linux/kernel.h>
5809+#include <linux/module.h>
5810+#include <linux/pci.h>
5811+#include <linux/msi.h>
5812+#include <linux/init.h>
5813+#include <linux/interrupt.h>
5814+#include <linux/platform_device.h>
5815+#include <linux/mfd/core.h>
5816+#include <linux/irq.h>
5817+
5818+#include <linux/timb_gpio.h>
5819+
5820+#include <linux/i2c.h>
5821+#include <linux/i2c-ocores.h>
5822+#include <linux/i2c-xiic.h>
5823+#include <linux/i2c/tsc2007.h>
5824+#include <linux/can/platform/ascb.h>
5825+
5826+#include <linux/spi/spi.h>
5827+#include <linux/spi/xilinx_spi.h>
5828+#include <linux/spi/max7301.h>
5829+#include <linux/spi/mc33880.h>
5830+
5831+#include <media/timb_video.h>
5832+#include <media/timb_radio.h>
5833+#include <linux/most/timbmlb.h>
5834+
5835+#include <sound/timbi2s.h>
5836+
5837+#include "timberdale.h"
5838+
5839+#define DRIVER_NAME "timberdale"
5840+
5841+struct timberdale_device {
5842+ resource_size_t intc_mapbase;
5843+ resource_size_t ctl_mapbase;
5844+ unsigned char __iomem *ctl_membase;
5845+ /* locking from interrupts while modifiying registers */
5846+ spinlock_t lock;
5847+ struct {
5848+ u32 major;
5849+ u32 minor;
5850+ u32 config;
5851+ } fw;
5852+};
5853+
5854+/*--------------------------------------------------------------------------*/
5855+
5856+static struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
5857+ .model = 2003,
5858+ .x_plate_ohms = 100
5859+};
5860+
5861+static struct ascb_platform_data timberdale_ascb_platform_data = {
5862+ .gpio_pin = GPIO_PIN_ASCB
5863+};
5864+
5865+static struct i2c_board_info timberdale_i2c_board_info[] = {
5866+ {
5867+ I2C_BOARD_INFO("tsc2007", 0x48),
5868+ .platform_data = &timberdale_tsc2007_platform_data,
5869+ .irq = IRQ_TIMBERDALE_TSC_INT
5870+ },
5871+ {
5872+ /* Requires jumper JP9 to be off */
5873+ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
5874+ .irq = IRQ_TIMBERDALE_ADV7180
5875+ },
5876+ {
5877+ I2C_BOARD_INFO("tef6862", 0x60)
5878+ },
5879+ {
5880+ I2C_BOARD_INFO("saa7706h", 0x1C)
5881+ },
5882+ {
5883+ I2C_BOARD_INFO("ascb-can", 0x18),
5884+ .platform_data = &timberdale_ascb_platform_data,
5885+ }
5886+};
5887+
5888+static __devinitdata struct xiic_i2c_platform_data
5889+timberdale_xiic_platform_data = {
5890+ .devices = timberdale_i2c_board_info,
5891+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
5892+};
5893+
5894+static __devinitdata struct ocores_i2c_platform_data
5895+timberdale_ocores_platform_data = {
5896+ .regstep = 4,
5897+ .clock_khz = 62500,
5898+ .devices = timberdale_i2c_board_info,
5899+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
5900+};
5901+
5902+const static __devinitconst struct resource timberdale_xiic_resources[] = {
5903+ {
5904+ .start = XIICOFFSET,
5905+ .end = XIICEND,
5906+ .flags = IORESOURCE_MEM,
5907+ },
5908+ {
5909+ .start = IRQ_TIMBERDALE_I2C,
5910+ .end = IRQ_TIMBERDALE_I2C,
5911+ .flags = IORESOURCE_IRQ,
5912+ },
5913+};
5914+
5915+const static __devinitconst struct resource timberdale_ocores_resources[] = {
5916+ {
5917+ .start = OCORESOFFSET,
5918+ .end = OCORESEND,
5919+ .flags = IORESOURCE_MEM,
5920+ },
5921+ {
5922+ .start = IRQ_TIMBERDALE_I2C,
5923+ .end = IRQ_TIMBERDALE_I2C,
5924+ .flags = IORESOURCE_IRQ,
5925+ },
5926+};
5927+
5928+const struct max7301_platform_data timberdale_max7301_platform_data = {
5929+ .base = 200
5930+};
5931+
5932+const struct mc33880_platform_data timberdale_mc33880_platform_data = {
5933+ .base = 100
5934+};
5935+
5936+static struct spi_board_info timberdale_spi_16bit_board_info[] = {
5937+ {
5938+ .modalias = "max7301",
5939+ .max_speed_hz = 26000,
5940+ .chip_select = 2,
5941+ .mode = SPI_MODE_0,
5942+ .platform_data = &timberdale_max7301_platform_data
5943+ },
5944+};
5945+
5946+static struct spi_board_info timberdale_spi_8bit_board_info[] = {
5947+ {
5948+ .modalias = "mc33880",
5949+ .max_speed_hz = 4000,
5950+ .chip_select = 1,
5951+ .mode = SPI_MODE_1,
5952+ .platform_data = &timberdale_mc33880_platform_data
5953+ },
5954+};
5955+
5956+static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
5957+ /* Current(2009-03-06) revision of
5958+ * Timberdale we can handle 3 chip selects
5959+ */
5960+ .num_chipselect = 3,
5961+ /* bits per word and devices will be filled in runtime depending
5962+ * on the HW config
5963+ */
5964+};
5965+
5966+const static __devinitconst struct resource timberdale_spi_resources[] = {
5967+ {
5968+ .start = SPIOFFSET,
5969+ .end = SPIEND,
5970+ .flags = IORESOURCE_MEM,
5971+ },
5972+ {
5973+ .start = IRQ_TIMBERDALE_SPI,
5974+ .end = IRQ_TIMBERDALE_SPI,
5975+ .flags = IORESOURCE_IRQ,
5976+ },
5977+};
5978+
5979+const static __devinitconst struct resource timberdale_eth_resources[] = {
5980+ {
5981+ .start = ETHOFFSET,
5982+ .end = ETHEND,
5983+ .flags = IORESOURCE_MEM,
5984+ },
5985+ {
5986+ .start = IRQ_TIMBERDALE_ETHSW_IF,
5987+ .end = IRQ_TIMBERDALE_ETHSW_IF,
5988+ .flags = IORESOURCE_IRQ,
5989+ },
5990+};
5991+
5992+static __devinitdata struct timbgpio_platform_data
5993+ timberdale_gpio_platform_data = {
5994+ .gpio_base = 0,
5995+ .nr_pins = GPIO_NR_PINS,
5996+ .irq_base = 200,
5997+};
5998+
5999+const static __devinitconst struct resource timberdale_gpio_resources[] = {
6000+ {
6001+ .start = GPIOOFFSET,
6002+ .end = GPIOEND,
6003+ .flags = IORESOURCE_MEM,
6004+ },
6005+ {
6006+ .start = IRQ_TIMBERDALE_GPIO,
6007+ .end = IRQ_TIMBERDALE_GPIO,
6008+ .flags = IORESOURCE_IRQ,
6009+ },
6010+};
6011+
6012+static __devinitdata struct timbmlb_platform_data
6013+ timberdale_mlb_platform_data = {
6014+ .reset_pin = GPIO_PIN_INIC_RST
6015+};
6016+
6017+const static __devinitconst struct resource timberdale_most_resources[] = {
6018+ {
6019+ .start = MOSTOFFSET,
6020+ .end = MOSTEND,
6021+ .flags = IORESOURCE_MEM,
6022+ },
6023+ {
6024+ .start = IRQ_TIMBERDALE_MLB,
6025+ .end = IRQ_TIMBERDALE_MLB,
6026+ .flags = IORESOURCE_IRQ,
6027+ },
6028+};
6029+
6030+const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
6031+ {
6032+ .start = MLCOREOFFSET,
6033+ .end = MLCOREEND,
6034+ .flags = IORESOURCE_MEM,
6035+ },
6036+ {
6037+ .start = IRQ_TIMBERDALE_MLCORE,
6038+ .end = IRQ_TIMBERDALE_MLCORE,
6039+ .flags = IORESOURCE_IRQ,
6040+ },
6041+ {
6042+ .start = IRQ_TIMBERDALE_MLCORE_BUF,
6043+ .end = IRQ_TIMBERDALE_MLCORE_BUF,
6044+ .flags = IORESOURCE_IRQ,
6045+ },
6046+};
6047+
6048+const static __devinitconst struct resource timberdale_uart_resources[] = {
6049+ {
6050+ .start = UARTOFFSET,
6051+ .end = UARTEND,
6052+ .flags = IORESOURCE_MEM,
6053+ },
6054+ {
6055+ .start = IRQ_TIMBERDALE_UART,
6056+ .end = IRQ_TIMBERDALE_UART,
6057+ .flags = IORESOURCE_IRQ,
6058+ },
6059+};
6060+
6061+const static __devinitconst struct resource timberdale_uartlite_resources[] = {
6062+ {
6063+ .start = UARTLITEOFFSET,
6064+ .end = UARTLITEEND,
6065+ .flags = IORESOURCE_MEM,
6066+ },
6067+ {
6068+ .start = IRQ_TIMBERDALE_UARTLITE,
6069+ .end = IRQ_TIMBERDALE_UARTLITE,
6070+ .flags = IORESOURCE_IRQ,
6071+ },
6072+};
6073+
6074+static __devinitdata struct timbi2s_bus_data timbi2s_bus_data[] = {
6075+ {
6076+ .rx = 0,
6077+ .sample_rate = 8000,
6078+ },
6079+ {
6080+ .rx = 1,
6081+ .sample_rate = 8000,
6082+ },
6083+ {
6084+ .rx = 1,
6085+ .sample_rate = 44100,
6086+ },
6087+};
6088+
6089+static __devinitdata struct timbi2s_platform_data timbi2s_platform_data = {
6090+ .busses = timbi2s_bus_data,
6091+ .num_busses = ARRAY_SIZE(timbi2s_bus_data),
6092+ .main_clk = 62500000,
6093+};
6094+
6095+const static __devinitconst struct resource timberdale_i2s_resources[] = {
6096+ {
6097+ .start = I2SOFFSET,
6098+ .end = I2SEND,
6099+ .flags = IORESOURCE_MEM,
6100+ },
6101+ {
6102+ .start = IRQ_TIMBERDALE_I2S,
6103+ .end = IRQ_TIMBERDALE_I2S,
6104+ .flags = IORESOURCE_IRQ,
6105+ },
6106+};
6107+
6108+static __devinitdata struct timb_video_platform_data
6109+ timberdale_video_platform_data = {
6110+ .i2c_adapter = 0,
6111+ .encoder = "adv7180"
6112+};
6113+
6114+const static __devinitconst struct resource timberdale_radio_resources[] = {
6115+ {
6116+ .start = RDSOFFSET,
6117+ .end = RDSEND,
6118+ .flags = IORESOURCE_MEM,
6119+ },
6120+ {
6121+ .start = IRQ_TIMBERDALE_RDS,
6122+ .end = IRQ_TIMBERDALE_RDS,
6123+ .flags = IORESOURCE_IRQ,
6124+ },
6125+};
6126+
6127+static __devinitdata struct timb_radio_platform_data
6128+ timberdale_radio_platform_data = {
6129+ .i2c_adapter = 0,
6130+ .tuner = "tef6862",
6131+ .dsp = "saa7706h"
6132+};
6133+
6134+const static __devinitconst struct resource timberdale_video_resources[] = {
6135+ {
6136+ .start = LOGIWOFFSET,
6137+ .end = LOGIWEND,
6138+ .flags = IORESOURCE_MEM,
6139+ },
6140+ /*
6141+ note that the "frame buffer" is located in DMA area
6142+ starting at 0x1200000
6143+ */
6144+};
6145+
6146+const static __devinitconst struct resource timberdale_dma_resources[] = {
6147+ {
6148+ .start = DMAOFFSET,
6149+ .end = DMAEND,
6150+ .flags = IORESOURCE_MEM,
6151+ },
6152+ {
6153+ .start = IRQ_TIMBERDALE_DMA,
6154+ .end = IRQ_TIMBERDALE_DMA,
6155+ .flags = IORESOURCE_IRQ,
6156+ },
6157+};
6158+
6159+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
6160+ {
6161+ .name = "timb-uart",
6162+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6163+ .resources = timberdale_uart_resources,
6164+ },
6165+ {
6166+ .name = "xiic-i2c",
6167+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
6168+ .resources = timberdale_xiic_resources,
6169+ .platform_data = &timberdale_xiic_platform_data,
6170+ .data_size = sizeof(timberdale_xiic_platform_data),
6171+ },
6172+ {
6173+ .name = "timb-gpio",
6174+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6175+ .resources = timberdale_gpio_resources,
6176+ .platform_data = &timberdale_gpio_platform_data,
6177+ .data_size = sizeof(timberdale_gpio_platform_data),
6178+ },
6179+ {
6180+ .name = "timb-i2s",
6181+ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
6182+ .resources = timberdale_i2s_resources,
6183+ .platform_data = &timbi2s_platform_data,
6184+ .data_size = sizeof(timbi2s_platform_data),
6185+ },
6186+ {
6187+ .name = "timb-most",
6188+ .num_resources = ARRAY_SIZE(timberdale_most_resources),
6189+ .resources = timberdale_most_resources,
6190+ .platform_data = &timberdale_mlb_platform_data,
6191+ .data_size = sizeof(timberdale_mlb_platform_data),
6192+ },
6193+ {
6194+ .name = "timb-video",
6195+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6196+ .resources = timberdale_video_resources,
6197+ .platform_data = &timberdale_video_platform_data,
6198+ .data_size = sizeof(timberdale_video_platform_data),
6199+ },
6200+ {
6201+ .name = "timb-radio",
6202+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6203+ .resources = timberdale_radio_resources,
6204+ .platform_data = &timberdale_radio_platform_data,
6205+ .data_size = sizeof(timberdale_radio_platform_data),
6206+ },
6207+ {
6208+ .name = "xilinx_spi",
6209+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6210+ .resources = timberdale_spi_resources,
6211+ .platform_data = &timberdale_xspi_platform_data,
6212+ .data_size = sizeof(timberdale_xspi_platform_data),
6213+ },
6214+ {
6215+ .name = "ks8842",
6216+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
6217+ .resources = timberdale_eth_resources,
6218+ },
6219+ {
6220+ .name = "timb-dma",
6221+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6222+ .resources = timberdale_dma_resources,
6223+ },
6224+};
6225+
6226+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
6227+ {
6228+ .name = "timb-uart",
6229+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6230+ .resources = timberdale_uart_resources,
6231+ },
6232+ {
6233+ .name = "uartlite",
6234+ .num_resources = ARRAY_SIZE(timberdale_uartlite_resources),
6235+ .resources = timberdale_uartlite_resources,
6236+ },
6237+ {
6238+ .name = "xiic-i2c",
6239+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
6240+ .resources = timberdale_xiic_resources,
6241+ .platform_data = &timberdale_xiic_platform_data,
6242+ .data_size = sizeof(timberdale_xiic_platform_data),
6243+ },
6244+ {
6245+ .name = "timb-gpio",
6246+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6247+ .resources = timberdale_gpio_resources,
6248+ .platform_data = &timberdale_gpio_platform_data,
6249+ .data_size = sizeof(timberdale_gpio_platform_data),
6250+ },
6251+ {
6252+ .name = "timb-mlogicore",
6253+ .num_resources = ARRAY_SIZE(timberdale_mlogicore_resources),
6254+ .resources = timberdale_mlogicore_resources,
6255+ },
6256+ {
6257+ .name = "timb-video",
6258+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6259+ .resources = timberdale_video_resources,
6260+ .platform_data = &timberdale_video_platform_data,
6261+ .data_size = sizeof(timberdale_video_platform_data),
6262+ },
6263+ {
6264+ .name = "timb-radio",
6265+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6266+ .resources = timberdale_radio_resources,
6267+ .platform_data = &timberdale_radio_platform_data,
6268+ .data_size = sizeof(timberdale_radio_platform_data),
6269+ },
6270+ {
6271+ .name = "xilinx_spi",
6272+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6273+ .resources = timberdale_spi_resources,
6274+ .platform_data = &timberdale_xspi_platform_data,
6275+ .data_size = sizeof(timberdale_xspi_platform_data),
6276+ },
6277+ {
6278+ .name = "ks8842",
6279+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
6280+ .resources = timberdale_eth_resources,
6281+ },
6282+ {
6283+ .name = "timb-dma",
6284+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6285+ .resources = timberdale_dma_resources,
6286+ },
6287+};
6288+
6289+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
6290+ {
6291+ .name = "timb-uart",
6292+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6293+ .resources = timberdale_uart_resources,
6294+ },
6295+ {
6296+ .name = "xiic-i2c",
6297+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
6298+ .resources = timberdale_xiic_resources,
6299+ .platform_data = &timberdale_xiic_platform_data,
6300+ .data_size = sizeof(timberdale_xiic_platform_data),
6301+ },
6302+ {
6303+ .name = "timb-gpio",
6304+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6305+ .resources = timberdale_gpio_resources,
6306+ .platform_data = &timberdale_gpio_platform_data,
6307+ .data_size = sizeof(timberdale_gpio_platform_data),
6308+ },
6309+ {
6310+ .name = "timb-video",
6311+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6312+ .resources = timberdale_video_resources,
6313+ .platform_data = &timberdale_video_platform_data,
6314+ .data_size = sizeof(timberdale_video_platform_data),
6315+ },
6316+ {
6317+ .name = "timb-radio",
6318+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6319+ .resources = timberdale_radio_resources,
6320+ .platform_data = &timberdale_radio_platform_data,
6321+ .data_size = sizeof(timberdale_radio_platform_data),
6322+ },
6323+ {
6324+ .name = "xilinx_spi",
6325+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6326+ .resources = timberdale_spi_resources,
6327+ .platform_data = &timberdale_xspi_platform_data,
6328+ .data_size = sizeof(timberdale_xspi_platform_data),
6329+ },
6330+ {
6331+ .name = "timb-dma",
6332+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6333+ .resources = timberdale_dma_resources,
6334+ },
6335+};
6336+
6337+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
6338+ {
6339+ .name = "timb-uart",
6340+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6341+ .resources = timberdale_uart_resources,
6342+ },
6343+ {
6344+ .name = "ocores-i2c",
6345+ .num_resources = ARRAY_SIZE(timberdale_ocores_resources),
6346+ .resources = timberdale_ocores_resources,
6347+ .platform_data = &timberdale_ocores_platform_data,
6348+ .data_size = sizeof(timberdale_ocores_platform_data),
6349+ },
6350+ {
6351+ .name = "timb-gpio",
6352+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6353+ .resources = timberdale_gpio_resources,
6354+ .platform_data = &timberdale_gpio_platform_data,
6355+ .data_size = sizeof(timberdale_gpio_platform_data),
6356+ },
6357+ {
6358+ .name = "timb-i2s",
6359+ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
6360+ .resources = timberdale_i2s_resources,
6361+ .platform_data = &timbi2s_platform_data,
6362+ .data_size = sizeof(timbi2s_platform_data),
6363+ },
6364+ {
6365+ .name = "timb-most",
6366+ .num_resources = ARRAY_SIZE(timberdale_most_resources),
6367+ .resources = timberdale_most_resources,
6368+ .platform_data = &timberdale_mlb_platform_data,
6369+ .data_size = sizeof(timberdale_mlb_platform_data),
6370+ },
6371+ {
6372+ .name = "timb-video",
6373+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6374+ .resources = timberdale_video_resources,
6375+ .platform_data = &timberdale_video_platform_data,
6376+ .data_size = sizeof(timberdale_video_platform_data),
6377+ },
6378+ {
6379+ .name = "timb-radio",
6380+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6381+ .resources = timberdale_radio_resources,
6382+ .platform_data = &timberdale_radio_platform_data,
6383+ .data_size = sizeof(timberdale_radio_platform_data),
6384+ },
6385+ {
6386+ .name = "xilinx_spi",
6387+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6388+ .resources = timberdale_spi_resources,
6389+ .platform_data = &timberdale_xspi_platform_data,
6390+ .data_size = sizeof(timberdale_xspi_platform_data),
6391+ },
6392+ {
6393+ .name = "ks8842",
6394+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
6395+ .resources = timberdale_eth_resources,
6396+ },
6397+ {
6398+ .name = "timb-dma",
6399+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6400+ .resources = timberdale_dma_resources,
6401+ },
6402+};
6403+
6404+static const __devinitconst struct resource timberdale_sdhc_resources[] = {
6405+ /* located in bar 1 and bar 2 */
6406+ {
6407+ .start = SDHC0OFFSET,
6408+ .end = SDHC0END,
6409+ .flags = IORESOURCE_MEM,
6410+ },
6411+ {
6412+ .start = IRQ_TIMBERDALE_SDHC,
6413+ .end = IRQ_TIMBERDALE_SDHC,
6414+ .flags = IORESOURCE_IRQ,
6415+ },
6416+};
6417+
6418+static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
6419+ {
6420+ .name = "sdhci",
6421+ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
6422+ .resources = timberdale_sdhc_resources,
6423+ },
6424+};
6425+
6426+static __devinitdata struct mfd_cell timberdale_cells_bar2[] = {
6427+ {
6428+ .name = "sdhci",
6429+ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
6430+ .resources = timberdale_sdhc_resources,
6431+ },
6432+};
6433+
6434+static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
6435+ char *buf)
6436+{
6437+ struct pci_dev *pdev = to_pci_dev(dev);
6438+ struct timberdale_device *priv = pci_get_drvdata(pdev);
6439+
6440+ return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor,
6441+ priv->fw.config);
6442+}
6443+
6444+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
6445+
6446+/*--------------------------------------------------------------------------*/
6447+
6448+static int __devinit timb_probe(struct pci_dev *dev,
6449+ const struct pci_device_id *id)
6450+{
6451+ struct timberdale_device *priv;
6452+ int err, i;
6453+ resource_size_t mapbase;
6454+ struct msix_entry *msix_entries = NULL;
6455+ u8 ip_setup;
6456+
6457+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
6458+ if (!priv)
6459+ return -ENOMEM;
6460+
6461+ spin_lock_init(&priv->lock);
6462+ pci_set_drvdata(dev, priv);
6463+
6464+ err = pci_enable_device(dev);
6465+ if (err)
6466+ goto err_enable;
6467+
6468+ mapbase = pci_resource_start(dev, 0);
6469+ if (!mapbase) {
6470+ printk(KERN_ERR DRIVER_NAME ": No resource\n");
6471+ goto err_start;
6472+ }
6473+
6474+ /* create a resource for the PCI master register */
6475+ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
6476+ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
6477+ printk(KERN_ERR DRIVER_NAME ": Failed to request ctl mem\n");
6478+ goto err_request;
6479+ }
6480+
6481+ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
6482+ if (!priv->ctl_membase) {
6483+ printk(KERN_ALERT DRIVER_NAME": Map error, ctl\n");
6484+ goto err_ioremap;
6485+ }
6486+
6487+ /* read the HW config */
6488+ priv->fw.major = ioread32(priv->ctl_membase + TIMB_REV_MAJOR);
6489+ priv->fw.minor = ioread32(priv->ctl_membase + TIMB_REV_MINOR);
6490+ priv->fw.config = ioread32(priv->ctl_membase + TIMB_HW_CONFIG);
6491+
6492+ if (priv->fw.major > TIMB_SUPPORTED_MAJOR) {
6493+ printk(KERN_ERR DRIVER_NAME": The driver supports an older "
6494+ "version of the FPGA, please update the driver to "
6495+ "support %d.%d\n", priv->fw.major, priv->fw.minor);
6496+ goto err_ioremap;
6497+ }
6498+ if (priv->fw.major < TIMB_SUPPORTED_MAJOR ||
6499+ priv->fw.minor < TIMB_REQUIRED_MINOR) {
6500+ printk(KERN_ERR DRIVER_NAME
6501+ ": The FPGA image is too old (%d.%d), "
6502+ "please upgrade the FPGA to at least: %d.%d\n",
6503+ priv->fw.major, priv->fw.minor,
6504+ TIMB_SUPPORTED_MAJOR, TIMB_REQUIRED_MINOR);
6505+ goto err_ioremap;
6506+ }
6507+
6508+ msix_entries = kzalloc(TIMBERDALE_NR_IRQS * sizeof(*msix_entries),
6509+ GFP_KERNEL);
6510+ if (!msix_entries)
6511+ goto err_ioremap;
6512+
6513+ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
6514+ msix_entries[i].entry = i;
6515+
6516+ err = pci_enable_msix(dev, msix_entries, TIMBERDALE_NR_IRQS);
6517+ if (err) {
6518+ printk(KERN_WARNING DRIVER_NAME
6519+ ": MSI-X init failed: %d, expected entries: %d\n",
6520+ err, TIMBERDALE_NR_IRQS);
6521+ goto err_msix;
6522+ }
6523+
6524+ err = device_create_file(&dev->dev, &dev_attr_fw_ver);
6525+ if (err)
6526+ goto err_create_file;
6527+
6528+ /* Reset all FPGA PLB peripherals */
6529+ iowrite32(0x1, priv->ctl_membase + TIMB_SW_RST);
6530+
6531+ /* update IRQ offsets in I2C board info */
6532+ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
6533+ timberdale_i2c_board_info[i].irq =
6534+ msix_entries[timberdale_i2c_board_info[i].irq].vector;
6535+
6536+ /* Update the SPI configuration depending on the HW (8 or 16 bit) */
6537+ if (priv->fw.config & TIMB_HW_CONFIG_SPI_8BIT) {
6538+ timberdale_xspi_platform_data.bits_per_word = 8;
6539+ timberdale_xspi_platform_data.devices =
6540+ timberdale_spi_8bit_board_info;
6541+ timberdale_xspi_platform_data.num_devices =
6542+ ARRAY_SIZE(timberdale_spi_8bit_board_info);
6543+ } else {
6544+ timberdale_xspi_platform_data.bits_per_word = 16;
6545+ timberdale_xspi_platform_data.devices =
6546+ timberdale_spi_16bit_board_info;
6547+ timberdale_xspi_platform_data.num_devices =
6548+ ARRAY_SIZE(timberdale_spi_16bit_board_info);
6549+ }
6550+
6551+ ip_setup = priv->fw.config & TIMB_HW_VER_MASK;
6552+ if (ip_setup == TIMB_HW_VER0)
6553+ err = mfd_add_devices(&dev->dev, -1,
6554+ timberdale_cells_bar0_cfg0,
6555+ ARRAY_SIZE(timberdale_cells_bar0_cfg0),
6556+ &dev->resource[0], msix_entries[0].vector);
6557+ else if (ip_setup == TIMB_HW_VER1)
6558+ err = mfd_add_devices(&dev->dev, -1,
6559+ timberdale_cells_bar0_cfg1,
6560+ ARRAY_SIZE(timberdale_cells_bar0_cfg1),
6561+ &dev->resource[0], msix_entries[0].vector);
6562+ else if (ip_setup == TIMB_HW_VER2)
6563+ err = mfd_add_devices(&dev->dev, -1,
6564+ timberdale_cells_bar0_cfg2,
6565+ ARRAY_SIZE(timberdale_cells_bar0_cfg2),
6566+ &dev->resource[0], msix_entries[0].vector);
6567+ else if (ip_setup == TIMB_HW_VER3)
6568+ err = mfd_add_devices(&dev->dev, -1,
6569+ timberdale_cells_bar0_cfg3,
6570+ ARRAY_SIZE(timberdale_cells_bar0_cfg3),
6571+ &dev->resource[0], msix_entries[0].vector);
6572+ else {
6573+ /* unknown version */
6574+ printk(KERN_ERR"Uknown IP setup: %d.%d.%d\n",
6575+ priv->fw.major, priv->fw.minor, ip_setup);
6576+ err = -ENODEV;
6577+ goto err_mfd;
6578+ }
6579+
6580+ if (err) {
6581+ printk(KERN_WARNING DRIVER_NAME
6582+ ": mfd_add_devices failed: %d\n", err);
6583+ goto err_mfd;
6584+ }
6585+
6586+ err = mfd_add_devices(&dev->dev, 0,
6587+ timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1),
6588+ &dev->resource[1], msix_entries[0].vector);
6589+ if (err) {
6590+ printk(KERN_WARNING DRIVER_NAME
6591+ "mfd_add_devices failed: %d\n", err);
6592+ goto err_mfd2;
6593+ }
6594+
6595+ /* only version 0 and 3 have the iNand routed to SDHCI */
6596+ if (((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER0) ||
6597+ ((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) {
6598+ err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2,
6599+ ARRAY_SIZE(timberdale_cells_bar2),
6600+ &dev->resource[2], msix_entries[0].vector);
6601+ if (err) {
6602+ printk(KERN_WARNING DRIVER_NAME
6603+ ": mfd_add_devices failed: %d\n", err);
6604+ goto err_mfd2;
6605+ }
6606+ }
6607+
6608+ kfree(msix_entries);
6609+
6610+ printk(KERN_INFO
6611+ "Found Timberdale Card. Rev: %d.%d, HW config: 0x%02x\n",
6612+ priv->fw.major, priv->fw.minor, priv->fw.config);
6613+
6614+ return 0;
6615+
6616+err_mfd2:
6617+ mfd_remove_devices(&dev->dev);
6618+err_mfd:
6619+ device_remove_file(&dev->dev, &dev_attr_fw_ver);
6620+err_create_file:
6621+ pci_disable_msix(dev);
6622+err_msix:
6623+ iounmap(priv->ctl_membase);
6624+err_ioremap:
6625+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
6626+err_request:
6627+ pci_set_drvdata(dev, NULL);
6628+err_start:
6629+ pci_disable_device(dev);
6630+err_enable:
6631+ kfree(msix_entries);
6632+ kfree(priv);
6633+ pci_set_drvdata(dev, NULL);
6634+ return -ENODEV;
6635+}
6636+
6637+static void __devexit timb_remove(struct pci_dev *dev)
6638+{
6639+ struct timberdale_device *priv = pci_get_drvdata(dev);
6640+
6641+ mfd_remove_devices(&dev->dev);
6642+
6643+ device_remove_file(&dev->dev, &dev_attr_fw_ver);
6644+
6645+ iounmap(priv->ctl_membase);
6646+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
6647+
6648+ pci_disable_msix(dev);
6649+ pci_disable_device(dev);
6650+ pci_set_drvdata(dev, NULL);
6651+ kfree(priv);
6652+}
6653+
6654+static struct pci_device_id timberdale_pci_tbl[] = {
6655+ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
6656+ { 0 }
6657+};
6658+MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
6659+
6660+static struct pci_driver timberdale_pci_driver = {
6661+ .name = DRIVER_NAME,
6662+ .id_table = timberdale_pci_tbl,
6663+ .probe = timb_probe,
6664+ .remove = __devexit_p(timb_remove),
6665+};
6666+
6667+static int __init timberdale_init(void)
6668+{
6669+ int err;
6670+
6671+ err = pci_register_driver(&timberdale_pci_driver);
6672+ if (err < 0) {
6673+ printk(KERN_ERR
6674+ "Failed to register PCI driver for %s device.\n",
6675+ timberdale_pci_driver.name);
6676+ return -ENODEV;
6677+ }
6678+
6679+ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
6680+ timberdale_pci_driver.name);
6681+
6682+ return 0;
6683+}
6684+
6685+static void __exit timberdale_exit(void)
6686+{
6687+ pci_unregister_driver(&timberdale_pci_driver);
6688+
6689+ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
6690+ timberdale_pci_driver.name);
6691+}
6692+
6693+module_init(timberdale_init);
6694+module_exit(timberdale_exit);
6695+
6696+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
6697+MODULE_VERSION(DRV_VERSION);
6698+MODULE_LICENSE("GPL v2");
6699+
6700diff -uNr linux-2.6.31/drivers/mfd/timberdale.h linux-2.6.31.new/drivers/mfd/timberdale.h
6701--- linux-2.6.31/drivers/mfd/timberdale.h 1969-12-31 16:00:00.000000000 -0800
6702+++ linux-2.6.31.new/drivers/mfd/timberdale.h 2009-10-23 11:17:29.000000000 -0700
6703@@ -0,0 +1,152 @@
6704+/*
6705+ * timberdale.h timberdale FPGA mfd shim driver defines
6706+ * Copyright (c) 2009 Intel Corporation
6707+ *
6708+ * This program is free software; you can redistribute it and/or modify
6709+ * it under the terms of the GNU General Public License version 2 as
6710+ * published by the Free Software Foundation.
6711+ *
6712+ * This program is distributed in the hope that it will be useful,
6713+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
6714+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6715+ * GNU General Public License for more details.
6716+ *
6717+ * You should have received a copy of the GNU General Public License
6718+ * along with this program; if not, write to the Free Software
6719+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
6720+ */
6721+
6722+/* Supports:
6723+ * Timberdale FPGA
6724+ */
6725+
6726+#ifndef MFD_TIMBERDALE_H
6727+#define MFD_TIMBERDALE_H
6728+
6729+#define DRV_VERSION "1.0"
6730+
6731+/* This driver only support versions >= 3.8 and < 4.0 */
6732+#define TIMB_SUPPORTED_MAJOR 3
6733+
6734+/* This driver only support minor >= 8 */
6735+#define TIMB_REQUIRED_MINOR 8
6736+
6737+/* Registers of the interrupt controller */
6738+#define ISR 0x00
6739+#define IPR 0x04
6740+#define IER 0x08
6741+#define IAR 0x0c
6742+#define SIE 0x10
6743+#define CIE 0x14
6744+#define MER 0x1c
6745+
6746+/* Registers of the control area */
6747+#define TIMB_REV_MAJOR 0x00
6748+#define TIMB_REV_MINOR 0x04
6749+#define TIMB_HW_CONFIG 0x08
6750+#define TIMB_SW_RST 0x40
6751+
6752+/* bits in the TIMB_HW_CONFIG register */
6753+#define TIMB_HW_CONFIG_SPI_8BIT 0x80
6754+
6755+#define TIMB_HW_VER_MASK 0x0f
6756+#define TIMB_HW_VER0 0x00
6757+#define TIMB_HW_VER1 0x01
6758+#define TIMB_HW_VER2 0x02
6759+#define TIMB_HW_VER3 0x03
6760+
6761+#define OCORESOFFSET 0x0
6762+#define OCORESEND 0x1f
6763+
6764+#define SPIOFFSET 0x80
6765+#define SPIEND 0xff
6766+
6767+#define UARTLITEOFFSET 0x100
6768+#define UARTLITEEND 0x10f
6769+
6770+#define RDSOFFSET 0x180
6771+#define RDSEND 0x183
6772+
6773+#define ETHOFFSET 0x300
6774+#define ETHEND 0x3ff
6775+
6776+#define GPIOOFFSET 0x400
6777+#define GPIOEND 0x7ff
6778+
6779+#define CHIPCTLOFFSET 0x800
6780+#define CHIPCTLEND 0x8ff
6781+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
6782+
6783+#define INTCOFFSET 0xc00
6784+#define INTCEND 0xfff
6785+#define INTCSIZE (INTCEND - INTCOFFSET)
6786+
6787+#define MOSTOFFSET 0x1000
6788+#define MOSTEND 0x13ff
6789+
6790+#define UARTOFFSET 0x1400
6791+#define UARTEND 0x17ff
6792+
6793+#define XIICOFFSET 0x1800
6794+#define XIICEND 0x19ff
6795+
6796+#define I2SOFFSET 0x1C00
6797+#define I2SEND 0x1fff
6798+
6799+#define LOGIWOFFSET 0x30000
6800+#define LOGIWEND 0x37fff
6801+
6802+#define MLCOREOFFSET 0x40000
6803+#define MLCOREEND 0x43fff
6804+
6805+#define DMAOFFSET 0x01000000
6806+#define DMAEND 0x013fffff
6807+
6808+/* SDHC0 is placed in PCI bar 1 */
6809+#define SDHC0OFFSET 0x00
6810+#define SDHC0END 0xff
6811+
6812+/* SDHC1 is placed in PCI bar 2 */
6813+#define SDHC1OFFSET 0x00
6814+#define SDHC1END 0xff
6815+
6816+#define PCI_VENDOR_ID_TIMB 0x10ee
6817+#define PCI_DEVICE_ID_TIMB 0xa123
6818+
6819+#define IRQ_TIMBERDALE_INIC 0
6820+#define IRQ_TIMBERDALE_MLB 1
6821+#define IRQ_TIMBERDALE_GPIO 2
6822+#define IRQ_TIMBERDALE_I2C 3
6823+#define IRQ_TIMBERDALE_UART 4
6824+#define IRQ_TIMBERDALE_DMA 5
6825+#define IRQ_TIMBERDALE_I2S 6
6826+#define IRQ_TIMBERDALE_TSC_INT 7
6827+#define IRQ_TIMBERDALE_SDHC 8
6828+#define IRQ_TIMBERDALE_ADV7180 9
6829+#define IRQ_TIMBERDALE_ETHSW_IF 10
6830+#define IRQ_TIMBERDALE_SPI 11
6831+#define IRQ_TIMBERDALE_UARTLITE 12
6832+#define IRQ_TIMBERDALE_MLCORE 13
6833+#define IRQ_TIMBERDALE_MLCORE_BUF 14
6834+#define IRQ_TIMBERDALE_RDS 15
6835+
6836+#define TIMBERDALE_NR_IRQS 16
6837+
6838+/* Some of the interrupts are level triggered, some are edge triggered */
6839+#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
6840+ (1 << IRQ_TIMBERDALE_TSC_INT) | \
6841+ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
6842+
6843+#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
6844+ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
6845+ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
6846+ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO) | \
6847+ (1 << IRQ_TIMBERDALE_DMA))
6848+
6849+#define GPIO_PIN_ASCB 8
6850+#define GPIO_PIN_INIC_RST 14
6851+#define GPIO_PIN_BT_RST 15
6852+#define GPIO_NR_PINS 16
6853+
6854+#endif
6855+
6856diff -uNr linux-2.6.31/drivers/mmc/host/sdhci.c linux-2.6.31.new/drivers/mmc/host/sdhci.c
6857--- linux-2.6.31/drivers/mmc/host/sdhci.c 2009-10-23 11:18:30.000000000 -0700
6858+++ linux-2.6.31.new/drivers/mmc/host/sdhci.c 2009-10-23 11:17:25.000000000 -0700
6859@@ -652,7 +652,7 @@
6860 count = sdhci_calc_timeout(host, data);
6861 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
6862
6863- if (host->flags & SDHCI_USE_DMA)
6864+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
6865 host->flags |= SDHCI_REQ_USE_DMA;
6866
6867 /*
6868@@ -1597,7 +1597,7 @@
6869 {
6870 int ret;
6871
6872- if (host->flags & SDHCI_USE_DMA) {
6873+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
6874 if (host->ops->enable_dma)
6875 host->ops->enable_dma(host);
6876 }
6877@@ -1678,23 +1678,20 @@
6878 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
6879
6880 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
6881- host->flags |= SDHCI_USE_DMA;
6882- else if (!(caps & SDHCI_CAN_DO_DMA))
6883- DBG("Controller doesn't have DMA capability\n");
6884+ host->flags |= SDHCI_USE_SDMA;
6885+ else if (!(caps & SDHCI_CAN_DO_SDMA))
6886+ DBG("Controller doesn't have SDMA capability\n");
6887 else
6888- host->flags |= SDHCI_USE_DMA;
6889+ host->flags |= SDHCI_USE_SDMA;
6890
6891 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
6892- (host->flags & SDHCI_USE_DMA)) {
6893+ (host->flags & SDHCI_USE_SDMA)) {
6894 DBG("Disabling DMA as it is marked broken\n");
6895- host->flags &= ~SDHCI_USE_DMA;
6896+ host->flags &= ~SDHCI_USE_SDMA;
6897 }
6898
6899- if (host->flags & SDHCI_USE_DMA) {
6900- if ((host->version >= SDHCI_SPEC_200) &&
6901- (caps & SDHCI_CAN_DO_ADMA2))
6902- host->flags |= SDHCI_USE_ADMA;
6903- }
6904+ if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
6905+ host->flags |= SDHCI_USE_ADMA;
6906
6907 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
6908 (host->flags & SDHCI_USE_ADMA)) {
6909@@ -1702,13 +1699,14 @@
6910 host->flags &= ~SDHCI_USE_ADMA;
6911 }
6912
6913- if (host->flags & SDHCI_USE_DMA) {
6914+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
6915 if (host->ops->enable_dma) {
6916 if (host->ops->enable_dma(host)) {
6917 printk(KERN_WARNING "%s: No suitable DMA "
6918 "available. Falling back to PIO.\n",
6919 mmc_hostname(mmc));
6920- host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
6921+ host->flags &=
6922+ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
6923 }
6924 }
6925 }
6926@@ -1736,7 +1734,7 @@
6927 * mask, but PIO does not need the hw shim so we set a new
6928 * mask here in that case.
6929 */
6930- if (!(host->flags & SDHCI_USE_DMA)) {
6931+ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
6932 host->dma_mask = DMA_BIT_MASK(64);
6933 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
6934 }
6935@@ -1810,7 +1808,7 @@
6936 */
6937 if (host->flags & SDHCI_USE_ADMA)
6938 mmc->max_hw_segs = 128;
6939- else if (host->flags & SDHCI_USE_DMA)
6940+ else if (host->flags & SDHCI_USE_SDMA)
6941 mmc->max_hw_segs = 1;
6942 else /* PIO */
6943 mmc->max_hw_segs = 128;
6944@@ -1893,10 +1891,10 @@
6945
6946 mmc_add_host(mmc);
6947
6948- printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
6949+ printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
6950 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
6951- (host->flags & SDHCI_USE_ADMA)?"A":"",
6952- (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
6953+ (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
6954+ (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
6955
6956 sdhci_enable_card_detection(host);
6957
6958diff -uNr linux-2.6.31/drivers/mmc/host/sdhci.h linux-2.6.31.new/drivers/mmc/host/sdhci.h
6959--- linux-2.6.31/drivers/mmc/host/sdhci.h 2009-10-23 11:18:30.000000000 -0700
6960+++ linux-2.6.31.new/drivers/mmc/host/sdhci.h 2009-10-23 11:17:25.000000000 -0700
6961@@ -143,7 +143,7 @@
6962 #define SDHCI_CAN_DO_ADMA2 0x00080000
6963 #define SDHCI_CAN_DO_ADMA1 0x00100000
6964 #define SDHCI_CAN_DO_HISPD 0x00200000
6965-#define SDHCI_CAN_DO_DMA 0x00400000
6966+#define SDHCI_CAN_DO_SDMA 0x00400000
6967 #define SDHCI_CAN_VDD_330 0x01000000
6968 #define SDHCI_CAN_VDD_300 0x02000000
6969 #define SDHCI_CAN_VDD_180 0x04000000
6970@@ -250,7 +250,7 @@
6971 spinlock_t lock; /* Mutex */
6972
6973 int flags; /* Host attributes */
6974-#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */
6975+#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
6976 #define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
6977 #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
6978 #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
6979diff -uNr linux-2.6.31/drivers/mmc/host/sdhci-pci.c linux-2.6.31.new/drivers/mmc/host/sdhci-pci.c
6980--- linux-2.6.31/drivers/mmc/host/sdhci-pci.c 2009-10-23 11:18:30.000000000 -0700
6981+++ linux-2.6.31.new/drivers/mmc/host/sdhci-pci.c 2009-10-23 11:17:25.000000000 -0700
6982@@ -395,7 +395,7 @@
6983
6984 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
6985 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
6986- (host->flags & SDHCI_USE_DMA)) {
6987+ (host->flags & SDHCI_USE_SDMA)) {
6988 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
6989 "doesn't fully claim to support it.\n");
6990 }
6991diff -uNr linux-2.6.31/drivers/net/Kconfig linux-2.6.31.new/drivers/net/Kconfig
6992--- linux-2.6.31/drivers/net/Kconfig 2009-10-23 11:18:30.000000000 -0700
6993+++ linux-2.6.31.new/drivers/net/Kconfig 2009-10-23 11:17:23.000000000 -0700
6994@@ -1730,6 +1730,16 @@
6995 This platform driver is for Micrel KSZ8842 / KS8842
6996 2-port ethernet switch chip (managed, VLAN, QoS).
6997
6998+config KS8842_TIMB_DMA
6999+ bool "Use Timberdale specific DMA engine"
7000+ depends on KS8842 && MFD_TIMBERDALE
7001+ select MFD_TIMBERDALE_DMA
7002+ help
7003+ This option enables usage of the timberdale specific DMA engine
7004+ for the KS8842 driver. Rather than using PIO which results in
7005+ single accesses over PCIe, the DMA block of the timberdale FPGA
7006+ will burst data to and from the KS8842.
7007+
7008 config KS8851
7009 tristate "Micrel KS8851 SPI"
7010 depends on SPI
7011diff -uNr linux-2.6.31/drivers/net/ks8842.c linux-2.6.31.new/drivers/net/ks8842.c
7012--- linux-2.6.31/drivers/net/ks8842.c 2009-10-23 11:18:30.000000000 -0700
7013+++ linux-2.6.31.new/drivers/net/ks8842.c 2009-10-23 11:17:22.000000000 -0700
7014@@ -26,11 +26,17 @@
7015 #include <linux/netdevice.h>
7016 #include <linux/etherdevice.h>
7017 #include <linux/ethtool.h>
7018+#include <linux/mfd/timbdma.h>
7019
7020 #define DRV_NAME "ks8842"
7021
7022 /* Timberdale specific Registers */
7023-#define REG_TIMB_RST 0x1c
7024+#define REG_TIMB_RST 0x1c
7025+#define REG_TIMB_FIFO 0x20
7026+#define REG_TIMB_ISR 0x24
7027+#define REG_TIMB_IER 0x28
7028+#define REG_TIMB_IAR 0x2C
7029+#define REQ_TIMB_DMA_RESUME 0x30
7030
7031 /* KS8842 registers */
7032
7033@@ -73,6 +79,11 @@
7034 #define IRQ_RX_ERROR 0x0080
7035 #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
7036 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
7037+#ifdef CONFIG_KS8842_TIMB_DMA
7038+ #define ENABLED_IRQS_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
7039+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
7040+ #define ENABLED_IRQS_DMA (ENABLED_IRQS_IP | IRQ_RX)
7041+#endif
7042 #define REG_ISR 0x02
7043 #define REG_RXSR 0x04
7044 #define RXSR_VALID 0x8000
7045@@ -111,14 +122,50 @@
7046 #define REG_P1CR4 0x02
7047 #define REG_P1SR 0x04
7048
7049+#ifdef CONFIG_KS8842_TIMB_DMA
7050+#define DMA_BUFFER_SIZE 2048
7051+
7052+#define DMA_DEV(a) ((a->dev->parent) ? a->dev->parent : a->dev)
7053+
7054+#define DMA_ONGOING(a) (a->dma_tx.ongoing | a->dma_rx.ongoing)
7055+
7056+struct ks8842_dma_ctl {
7057+ void *desc;
7058+ void *buf;
7059+ dma_addr_t addr;
7060+ unsigned ongoing;
7061+};
7062+
7063+struct ks8842_rx_dma_ctl {
7064+ void *desc;
7065+ struct sk_buff *skb;
7066+ dma_addr_t addr;
7067+};
7068+
7069+#endif
7070+
7071 struct ks8842_adapter {
7072 void __iomem *hw_addr;
7073 int irq;
7074 struct tasklet_struct tasklet;
7075 spinlock_t lock; /* spinlock to be interrupt safe */
7076- struct platform_device *pdev;
7077+ struct device *dev;
7078+ struct work_struct timeout_work;
7079+ struct net_device *netdev;
7080+#ifdef CONFIG_KS8842_TIMB_DMA
7081+ unsigned use_dma;
7082+ struct ks8842_dma_ctl dma_tx;
7083+ struct ks8842_rx_dma_ctl dma_rx;
7084+#endif
7085 };
7086
7087+#ifdef CONFIG_KS8842_TIMB_DMA
7088+static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
7089+{
7090+ iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
7091+}
7092+#endif
7093+
7094 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
7095 {
7096 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
7097@@ -195,7 +242,6 @@
7098 msleep(10);
7099 iowrite16(0, adapter->hw_addr + REG_GRR);
7100 */
7101- iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
7102 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
7103 msleep(20);
7104 }
7105@@ -203,8 +249,10 @@
7106 static void ks8842_update_link_status(struct net_device *netdev,
7107 struct ks8842_adapter *adapter)
7108 {
7109+ u16 p1mbsr = ks8842_read16(adapter, 45, REG_P1MBSR);
7110+
7111 /* check the status of the link */
7112- if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
7113+ if (p1mbsr & 0x4) {
7114 netif_carrier_on(netdev);
7115 netif_wake_queue(netdev);
7116 } else {
7117@@ -241,10 +289,8 @@
7118 /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
7119 ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
7120
7121- /* enable the receiver, uni + multi + broadcast + flow ctrl
7122- + crc strip */
7123- ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
7124- REG_RXCR);
7125+ /* enable the receiver, uni + multi + broadcast + crc strip */
7126+ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80, REG_RXCR);
7127
7128 /* TX frame pointer autoincrement */
7129 ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
7130@@ -261,13 +307,11 @@
7131 /* enable no excessive collison drop */
7132 ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
7133
7134- /* Enable port 1 force flow control / back pressure / transmit / recv */
7135- ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
7136+ /* Enable port 1 / back pressure / transmit / recv */
7137+ ks8842_write16(adapter, 48, 0xE07, REG_P1CR2);
7138
7139 /* restart port auto-negotiation */
7140 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
7141- /* only advertise 10Mbps */
7142- ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
7143
7144 /* Enable the transmitter */
7145 ks8842_enable_tx(adapter);
7146@@ -279,7 +323,17 @@
7147 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
7148
7149 /* enable interrupts */
7150- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7151+#ifdef CONFIG_KS8842_TIMB_DMA
7152+ if (adapter->use_dma) {
7153+ iowrite16(ENABLED_IRQS_IP, adapter->hw_addr + REG_TIMB_IER);
7154+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
7155+ } else {
7156+#endif
7157+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7158+ iowrite16(ENABLED_IRQS, adapter->hw_addr + REG_TIMB_IER);
7159+#ifdef CONFIG_KS8842_TIMB_DMA
7160+ }
7161+#endif
7162
7163 /* enable the switch */
7164 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
7165@@ -302,11 +356,73 @@
7166 ks8842_write16(adapter, 39, mac, REG_MACAR3);
7167 }
7168
7169+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
7170+{
7171+ unsigned long flags;
7172+ unsigned i;
7173+
7174+ spin_lock_irqsave(&adapter->lock, flags);
7175+ for (i = 0; i < ETH_ALEN; i++) {
7176+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
7177+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
7178+ REG_MACAR1 + i);
7179+ }
7180+ spin_unlock_irqrestore(&adapter->lock, flags);
7181+}
7182+
7183 static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
7184 {
7185 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
7186 }
7187
7188+#ifdef CONFIG_KS8842_TIMB_DMA
7189+static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
7190+{
7191+ struct ks8842_adapter *adapter = netdev_priv(netdev);
7192+ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
7193+ int err;
7194+ int len = skb->len + sizeof(u32);
7195+ u8 *buf = ctl->buf;
7196+
7197+ if (ctl->ongoing) {
7198+ dev_dbg(adapter->dev, "%s: TX ongoing\n", __func__);
7199+ /* transfer ongoing */
7200+ return NETDEV_TX_BUSY;
7201+ }
7202+
7203+ /* copy data to the TX buffer */
7204+ /* the control word, enable IRQ, port 1 and the length */
7205+ *buf++ = 0x00;
7206+ *buf++ = 0x01; /* Port 1 */
7207+ *buf++ = skb->len & 0xff;
7208+ *buf++ = (skb->len >> 8) & 0xff;
7209+ skb_copy_from_linear_data(skb, buf, skb->len);
7210+
7211+ dma_sync_single_range_for_device(DMA_DEV(adapter), ctl->addr, 0, len,
7212+ DMA_TO_DEVICE);
7213+
7214+ /* make sure the length is a multiple of 4 */
7215+ if (len % 4)
7216+ len += 4 - len % 4;
7217+
7218+ err = timbdma_prep_desc(ctl->desc, ctl->addr, len);
7219+ if (err)
7220+ return NETDEV_TX_BUSY;
7221+
7222+ ctl->ongoing = 1;
7223+ err = timbdma_start(DMA_IRQ_ETH_TX, ctl->desc, 0);
7224+ if (err) {
7225+ ctl->ongoing = 0;
7226+ return NETDEV_TX_BUSY;
7227+ }
7228+ netdev->stats.tx_bytes += skb->len;
7229+
7230+ dev_kfree_skb(skb);
7231+
7232+ return NETDEV_TX_OK;
7233+}
7234+#endif
7235+
7236 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
7237 {
7238 struct ks8842_adapter *adapter = netdev_priv(netdev);
7239@@ -314,7 +430,7 @@
7240 u32 *ptr = (u32 *)skb->data;
7241 u32 ctrl;
7242
7243- dev_dbg(&adapter->pdev->dev,
7244+ dev_dbg(adapter->dev,
7245 "%s: len %u head %p data %p tail %p end %p\n",
7246 __func__, skb->len, skb->head, skb->data,
7247 skb_tail_pointer(skb), skb_end_pointer(skb));
7248@@ -344,6 +460,104 @@
7249 return NETDEV_TX_OK;
7250 }
7251
7252+#ifdef CONFIG_KS8842_TIMB_DMA
7253+static int __ks8842_start_new_rx_dma(struct net_device *netdev,
7254+ struct ks8842_adapter *adapter)
7255+{
7256+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
7257+ int err;
7258+
7259+ ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
7260+ if (ctl->skb) {
7261+ ctl->addr = dma_map_single(DMA_DEV(adapter), ctl->skb->data,
7262+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
7263+ err = dma_mapping_error(DMA_DEV(adapter), ctl->addr);
7264+ if (unlikely(err)) {
7265+ ctl->addr = 0;
7266+ goto out;
7267+ }
7268+ err = timbdma_prep_desc(ctl->desc, ctl->addr, DMA_BUFFER_SIZE);
7269+ if (unlikely(err))
7270+ goto out;
7271+ err = timbdma_start(DMA_IRQ_ETH_RX, ctl->desc, 0);
7272+ if (unlikely(err))
7273+ goto out;
7274+ } else {
7275+ err = -ENOMEM;
7276+ ctl->addr = 0;
7277+ goto out;
7278+ }
7279+
7280+ return err;
7281+out:
7282+ if (ctl->addr)
7283+ dma_unmap_single(DMA_DEV(adapter), ctl->addr, DMA_BUFFER_SIZE,
7284+ DMA_FROM_DEVICE);
7285+ ctl->addr = 0;
7286+ if (ctl->skb)
7287+ dev_kfree_skb(ctl->skb);
7288+
7289+ ctl->skb = NULL;
7290+
7291+ printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
7292+ return err;
7293+}
7294+
7295+static void ks8842_rx_frame_dma(struct net_device *netdev,
7296+ struct ks8842_adapter *adapter)
7297+{
7298+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
7299+ struct sk_buff *skb = ctl->skb;
7300+ dma_addr_t addr = ctl->addr;
7301+ u32 status;
7302+
7303+ /* kick next transfer going */
7304+ __ks8842_start_new_rx_dma(netdev, adapter);
7305+
7306+ /* now handle the data we got */
7307+ dma_unmap_single(DMA_DEV(adapter), addr, DMA_BUFFER_SIZE,
7308+ DMA_FROM_DEVICE);
7309+
7310+ status = *((u32 *)skb->data);
7311+
7312+ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
7313+ __func__, status & 0xffff);
7314+
7315+ /* check the status */
7316+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
7317+ int len = (status >> 16) & 0x7ff;
7318+
7319+ dev_dbg(adapter->dev, "%s, got package, len: %d, skb: %p\n",
7320+ __func__, len, skb);
7321+
7322+ netdev->stats.rx_packets++;
7323+ netdev->stats.rx_bytes += len;
7324+ if (status & RXSR_MULTICAST)
7325+ netdev->stats.multicast++;
7326+
7327+ /* we are not nice to the stack, we want to be nice
7328+ * to our DMA engine instead, reserve 4 bytes
7329+ * which is the status word
7330+ */
7331+ skb_reserve(skb, 4);
7332+ skb_put(skb, len);
7333+
7334+ skb->protocol = eth_type_trans(skb, netdev);
7335+ netif_rx(skb);
7336+ } else {
7337+ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
7338+ netdev->stats.rx_errors++;
7339+ if (status & RXSR_TOO_LONG)
7340+ netdev->stats.rx_length_errors++;
7341+ if (status & RXSR_CRC_ERROR)
7342+ netdev->stats.rx_crc_errors++;
7343+ if (status & RXSR_RUNT)
7344+ netdev->stats.rx_frame_errors++;
7345+ dev_kfree_skb(skb);
7346+ }
7347+}
7348+#endif
7349+
7350 static void ks8842_rx_frame(struct net_device *netdev,
7351 struct ks8842_adapter *adapter)
7352 {
7353@@ -352,14 +566,14 @@
7354
7355 status &= 0xffff;
7356
7357- dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
7358+ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
7359 __func__, status);
7360
7361 /* check the status */
7362 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
7363 struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
7364
7365- dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
7366+ dev_dbg(adapter->dev, "%s, got package, len: %d\n",
7367 __func__, len);
7368 if (skb) {
7369 u32 *data;
7370@@ -386,7 +600,7 @@
7371 } else
7372 netdev->stats.rx_dropped++;
7373 } else {
7374- dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
7375+ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
7376 netdev->stats.rx_errors++;
7377 if (status & RXSR_TOO_LONG)
7378 netdev->stats.rx_length_errors++;
7379@@ -409,7 +623,7 @@
7380 void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
7381 {
7382 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
7383- dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
7384+ dev_dbg(adapter->dev, "%s Entry - rx_data: %d\n",
7385 __func__, rx_data);
7386 while (rx_data) {
7387 ks8842_rx_frame(netdev, adapter);
7388@@ -420,7 +634,7 @@
7389 void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
7390 {
7391 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
7392- dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
7393+ dev_dbg(adapter->dev, "%s - entry, sr: %x\n", __func__, sr);
7394 netdev->stats.tx_packets++;
7395 if (netif_queue_stopped(netdev))
7396 netif_wake_queue(netdev);
7397@@ -429,7 +643,7 @@
7398 void ks8842_handle_rx_overrun(struct net_device *netdev,
7399 struct ks8842_adapter *adapter)
7400 {
7401- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7402+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7403 netdev->stats.rx_errors++;
7404 netdev->stats.rx_fifo_errors++;
7405 }
7406@@ -448,20 +662,33 @@
7407 spin_unlock_irqrestore(&adapter->lock, flags);
7408
7409 isr = ks8842_read16(adapter, 18, REG_ISR);
7410- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
7411+ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
7412+
7413+#ifdef CONFIG_KS8842_TIMB_DMA
7414+ if (adapter->use_dma)
7415+ isr &= ~IRQ_RX;
7416+#endif
7417
7418 /* Ack */
7419 ks8842_write16(adapter, 18, isr, REG_ISR);
7420
7421+ /* Ack in the timberdale IP as well */
7422+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
7423+
7424 if (!netif_running(netdev))
7425 return;
7426
7427 if (isr & IRQ_LINK_CHANGE)
7428 ks8842_update_link_status(netdev, adapter);
7429
7430+ /* should not get IRQ_RX when in DMA mode */
7431 if (isr & (IRQ_RX | IRQ_RX_ERROR))
7432- ks8842_handle_rx(netdev, adapter);
7433+#ifdef CONFIG_KS8842_TIMB_DMA
7434+ if (!adapter->use_dma)
7435+#endif
7436+ ks8842_handle_rx(netdev, adapter);
7437
7438+ /* should only happen when not doing DMA */
7439 if (isr & IRQ_TX)
7440 ks8842_handle_tx(netdev, adapter);
7441
7442@@ -480,8 +707,18 @@
7443
7444 /* re-enable interrupts, put back the bank selection register */
7445 spin_lock_irqsave(&adapter->lock, flags);
7446- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7447+#ifdef CONFIG_KS8842_TIMB_DMA
7448+ if (adapter->use_dma)
7449+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
7450+ else
7451+#endif
7452+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7453+
7454 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
7455+#ifdef CONFIG_KS8842_TIMB_DMA
7456+ /* resume DMA operations */
7457+ ks8842_resume_dma(adapter);
7458+#endif
7459 spin_unlock_irqrestore(&adapter->lock, flags);
7460 }
7461
7462@@ -493,11 +730,17 @@
7463 irqreturn_t ret = IRQ_NONE;
7464
7465 isr = ks8842_read16(adapter, 18, REG_ISR);
7466- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
7467+ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
7468
7469 if (isr) {
7470- /* disable IRQ */
7471- ks8842_write16(adapter, 18, 0x00, REG_IER);
7472+#ifdef CONFIG_KS8842_TIMB_DMA
7473+ if (adapter->use_dma)
7474+ /* disable all but RX IRQ, since the FPGA relies on it*/
7475+ ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
7476+ else
7477+#endif
7478+ /* disable IRQ */
7479+ ks8842_write16(adapter, 18, 0x00, REG_IER);
7480
7481 /* schedule tasklet */
7482 tasklet_schedule(&adapter->tasklet);
7483@@ -506,23 +749,129 @@
7484 }
7485
7486 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
7487-
7488+#ifdef CONFIG_KS8842_TIMB_DMA
7489+ ks8842_resume_dma(adapter);
7490+#endif
7491 return ret;
7492 }
7493
7494+#ifdef CONFIG_KS8842_TIMB_DMA
7495+static int ks8842_dma_irq(u32 flag, void *data)
7496+{
7497+ struct net_device *netdev = data;
7498+ struct ks8842_adapter *adapter = netdev_priv(netdev);
7499+
7500+ if (flag & DMA_IRQ_ETH_RX) {
7501+ dev_dbg(adapter->dev, "RX DMA finished\n");
7502+ ks8842_rx_frame_dma(netdev, adapter);
7503+ }
7504+ if (flag & DMA_IRQ_ETH_TX) {
7505+ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
7506+ dev_dbg(adapter->dev, "TX DMA finished\n");
7507+
7508+ netdev->stats.tx_packets++;
7509+ ctl->ongoing = 0;
7510+
7511+ if (netif_queue_stopped(netdev))
7512+ netif_wake_queue(netdev);
7513+ }
7514+
7515+ return 0;
7516+}
7517+
7518+static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
7519+{
7520+ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
7521+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
7522+
7523+ if (tx_ctl->ongoing)
7524+ timbdma_stop(DMA_IRQ_ETH_TX);
7525+ tx_ctl->ongoing = 0;
7526+ if (rx_ctl->skb)
7527+ timbdma_stop(DMA_IRQ_ETH_RX);
7528+
7529+ timbdma_set_interruptcb(DMA_IRQ_ETH_RX | DMA_IRQ_ETH_TX, NULL, NULL);
7530+
7531+ if (rx_ctl->desc)
7532+ timbdma_free_desc(rx_ctl->desc);
7533+ rx_ctl->desc = NULL;
7534+ if (tx_ctl->desc)
7535+ timbdma_free_desc(tx_ctl->desc);
7536+ tx_ctl->desc = NULL;
7537+ if (rx_ctl->addr)
7538+ dma_unmap_single(DMA_DEV(adapter), rx_ctl->addr,
7539+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
7540+ rx_ctl->addr = 0;
7541+ if (tx_ctl->addr)
7542+ dma_unmap_single(DMA_DEV(adapter), tx_ctl->addr,
7543+ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
7544+ tx_ctl->addr = 0;
7545+ dev_kfree_skb(rx_ctl->skb);
7546+ rx_ctl->skb = NULL;
7547+ kfree(tx_ctl->buf);
7548+ tx_ctl->buf = NULL;
7549+}
7550+#endif
7551
7552 /* Netdevice operations */
7553
7554 static int ks8842_open(struct net_device *netdev)
7555 {
7556 struct ks8842_adapter *adapter = netdev_priv(netdev);
7557+#ifdef CONFIG_KS8842_TIMB_DMA
7558+ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
7559+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
7560+ int use_dma = 0;
7561+#endif
7562 int err;
7563
7564- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
7565+ dev_dbg(adapter->dev, "%s - entry\n", __func__);
7566+
7567+#ifdef CONFIG_KS8842_TIMB_DMA
7568+ if (adapter->use_dma) {
7569+ /* allocate SG descriptor */
7570+ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
7571+ if (!tx_ctl->buf)
7572+ goto no_dma;
7573+ tx_ctl->addr = dma_map_single(DMA_DEV(adapter), tx_ctl->buf,
7574+ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
7575+ err = dma_mapping_error(DMA_DEV(adapter), tx_ctl->addr);
7576+ if (err) {
7577+ tx_ctl->addr = 0;
7578+ goto no_dma;
7579+ }
7580+ tx_ctl->desc = timbdma_alloc_desc(DMA_BUFFER_SIZE, 1);
7581+ if (!tx_ctl->desc)
7582+ goto no_dma;
7583+
7584+ rx_ctl->desc = timbdma_alloc_desc(DMA_BUFFER_SIZE, 1);
7585+ if (!rx_ctl->desc)
7586+ goto no_dma;
7587+
7588+ timbdma_set_interruptcb(DMA_IRQ_ETH_RX | DMA_IRQ_ETH_TX,
7589+ ks8842_dma_irq, (void *)netdev);
7590+
7591+ /* start RX dma */
7592+ err = __ks8842_start_new_rx_dma(netdev, adapter);
7593+ if (err)
7594+ goto no_dma;
7595+
7596+ use_dma = 1;
7597+ }
7598+no_dma:
7599+ if (!use_dma) {
7600+ printk(KERN_WARNING DRV_NAME
7601+ ": Failed to initiate DMA, falling back to PIO\n");
7602+ ks8842_dealloc_dma_bufs(adapter);
7603+ adapter->use_dma = 0;
7604+ }
7605+#endif
7606
7607 /* reset the HW */
7608 ks8842_reset_hw(adapter);
7609
7610+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
7611+
7612 ks8842_update_link_status(netdev, adapter);
7613
7614 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
7615@@ -536,11 +885,19 @@
7616 return 0;
7617 }
7618
7619+
7620 static int ks8842_close(struct net_device *netdev)
7621 {
7622 struct ks8842_adapter *adapter = netdev_priv(netdev);
7623
7624- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
7625+ dev_dbg(adapter->dev, "%s - entry\n", __func__);
7626+
7627+ cancel_work_sync(&adapter->timeout_work);
7628+
7629+#ifdef CONFIG_KS8842_TIMB_DMA
7630+ if (adapter->use_dma)
7631+ ks8842_dealloc_dma_bufs(adapter);
7632+#endif
7633
7634 /* free the irq */
7635 free_irq(adapter->irq, adapter);
7636@@ -556,8 +913,20 @@
7637 int ret;
7638 struct ks8842_adapter *adapter = netdev_priv(netdev);
7639
7640- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7641+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7642
7643+#ifdef CONFIG_KS8842_TIMB_DMA
7644+ if (adapter->use_dma) {
7645+ unsigned long flags;
7646+ ret = ks8842_tx_frame_dma(skb, netdev);
7647+ /* for now only allow one transfer at the time */
7648+ spin_lock_irqsave(&adapter->lock, flags);
7649+ if (adapter->dma_tx.ongoing)
7650+ netif_stop_queue(netdev);
7651+ spin_unlock_irqrestore(&adapter->lock, flags);
7652+ return ret;
7653+ }
7654+#endif
7655 ret = ks8842_tx_frame(skb, netdev);
7656
7657 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
7658@@ -569,44 +938,77 @@
7659 static int ks8842_set_mac(struct net_device *netdev, void *p)
7660 {
7661 struct ks8842_adapter *adapter = netdev_priv(netdev);
7662- unsigned long flags;
7663 struct sockaddr *addr = p;
7664 char *mac = (u8 *)addr->sa_data;
7665- int i;
7666
7667- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7668+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7669
7670 if (!is_valid_ether_addr(addr->sa_data))
7671 return -EADDRNOTAVAIL;
7672
7673 memcpy(netdev->dev_addr, mac, netdev->addr_len);
7674
7675- spin_lock_irqsave(&adapter->lock, flags);
7676- for (i = 0; i < ETH_ALEN; i++) {
7677- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
7678- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
7679- REG_MACAR1 + i);
7680- }
7681- spin_unlock_irqrestore(&adapter->lock, flags);
7682+ ks8842_write_mac_addr(adapter, mac);
7683 return 0;
7684 }
7685
7686-static void ks8842_tx_timeout(struct net_device *netdev)
7687+static void ks8842_tx_timeout_work(struct work_struct *work)
7688 {
7689- struct ks8842_adapter *adapter = netdev_priv(netdev);
7690+ struct ks8842_adapter *adapter =
7691+ container_of(work, struct ks8842_adapter, timeout_work);
7692+ struct net_device *netdev = adapter->netdev;
7693 unsigned long flags;
7694
7695- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7696+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7697
7698 spin_lock_irqsave(&adapter->lock, flags);
7699+#ifdef CONFIG_KS8842_TIMB_DMA
7700+ if (adapter->use_dma) {
7701+ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
7702+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
7703+
7704+ if (tx_ctl->ongoing)
7705+ timbdma_stop(DMA_IRQ_ETH_TX);
7706+ tx_ctl->ongoing = 0;
7707+
7708+ timbdma_stop(DMA_IRQ_ETH_RX);
7709+
7710+ dma_unmap_single(DMA_DEV(adapter), rx_ctl->addr,
7711+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
7712+ rx_ctl->addr = 0;
7713+
7714+ dev_kfree_skb(rx_ctl->skb);
7715+ rx_ctl->skb = NULL;
7716+ }
7717+#endif
7718+
7719 /* disable interrupts */
7720 ks8842_write16(adapter, 18, 0, REG_IER);
7721 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
7722+
7723+ netif_stop_queue(netdev);
7724+
7725 spin_unlock_irqrestore(&adapter->lock, flags);
7726
7727 ks8842_reset_hw(adapter);
7728
7729+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
7730+
7731 ks8842_update_link_status(netdev, adapter);
7732+
7733+#ifdef CONFIG_KS8842_TIMB_DMA
7734+ if (adapter->use_dma)
7735+ __ks8842_start_new_rx_dma(netdev, adapter);
7736+#endif
7737+}
7738+
7739+static void ks8842_tx_timeout(struct net_device *netdev)
7740+{
7741+ struct ks8842_adapter *adapter = netdev_priv(netdev);
7742+
7743+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7744+
7745+ schedule_work(&adapter->timeout_work);
7746 }
7747
7748 static const struct net_device_ops ks8842_netdev_ops = {
7749@@ -641,6 +1043,8 @@
7750 SET_NETDEV_DEV(netdev, &pdev->dev);
7751
7752 adapter = netdev_priv(netdev);
7753+ adapter->netdev = netdev;
7754+ INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
7755 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
7756 if (!adapter->hw_addr)
7757 goto err_ioremap;
7758@@ -651,8 +1055,10 @@
7759 goto err_get_irq;
7760 }
7761
7762- adapter->pdev = pdev;
7763-
7764+ adapter->dev = &pdev->dev;
7765+#ifdef CONFIG_KS8842_TIMB_DMA
7766+ adapter->use_dma = 1;
7767+#endif
7768 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
7769 spin_lock_init(&adapter->lock);
7770
7771@@ -660,6 +1066,8 @@
7772 netdev->ethtool_ops = &ks8842_ethtool_ops;
7773
7774 ks8842_read_mac_addr(adapter, netdev->dev_addr);
7775+ if (!is_valid_ether_addr(netdev->dev_addr))
7776+ random_ether_addr(netdev->dev_addr);
7777
7778 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
7779
7780diff -uNr linux-2.6.31/drivers/net/Makefile linux-2.6.31.new/drivers/net/Makefile
7781--- linux-2.6.31/drivers/net/Makefile 2009-10-23 11:18:30.000000000 -0700
7782+++ linux-2.6.31.new/drivers/net/Makefile 2009-10-23 11:17:22.000000000 -0700
7783@@ -16,6 +16,7 @@
7784 obj-$(CONFIG_CHELSIO_T3) += cxgb3/
7785 obj-$(CONFIG_EHEA) += ehea/
7786 obj-$(CONFIG_CAN) += can/
7787+obj-$(CONFIG_MOST) += most/
7788 obj-$(CONFIG_BONDING) += bonding/
7789 obj-$(CONFIG_ATL1) += atlx/
7790 obj-$(CONFIG_ATL2) += atlx/
7791diff -uNr linux-2.6.31/drivers/net/most/Kconfig linux-2.6.31.new/drivers/net/most/Kconfig
7792--- linux-2.6.31/drivers/net/most/Kconfig 1969-12-31 16:00:00.000000000 -0800
7793+++ linux-2.6.31.new/drivers/net/most/Kconfig 2009-10-23 11:17:22.000000000 -0700
7794@@ -0,0 +1,14 @@
7795+menu "MOST Device Drivers"
7796+ depends on MOST
7797+
7798+config MOST_TIMB_MLB
7799+ tristate "The timberdale MOST block"
7800+ depends on MOST
7801+ depends on MFD_TIMBERDALE_DMA
7802+ depends on GENERIC_GPIO
7803+ depends on HAS_IOMEM
7804+ default N
7805+ ---help---
7806+ Adds support for MOST on the timberdale FPGA.
7807+
7808+endmenu
7809diff -uNr linux-2.6.31/drivers/net/most/Makefile linux-2.6.31.new/drivers/net/most/Makefile
7810--- linux-2.6.31/drivers/net/most/Makefile 1969-12-31 16:00:00.000000000 -0800
7811+++ linux-2.6.31.new/drivers/net/most/Makefile 2009-10-23 11:17:22.000000000 -0700
7812@@ -0,0 +1,6 @@
7813+#
7814+# Makefile for the Linux Media Oriented Systems Transport drivers.
7815+#
7816+
7817+obj-$(CONFIG_MOST_TIMB_MLB) += timbmlb.o
7818+
7819diff -uNr linux-2.6.31/drivers/net/most/timbmlb.c linux-2.6.31.new/drivers/net/most/timbmlb.c
7820--- linux-2.6.31/drivers/net/most/timbmlb.c 1969-12-31 16:00:00.000000000 -0800
7821+++ linux-2.6.31.new/drivers/net/most/timbmlb.c 2009-10-23 11:17:22.000000000 -0700
7822@@ -0,0 +1,1087 @@
7823+/*
7824+ * timbmlb.c Driver for the timberdale MLB block
7825+ * Copyright (c) 2009 Intel Corporation
7826+ *
7827+ * This program is free software; you can redistribute it and/or modify
7828+ * it under the terms of the GNU General Public License version 2 as
7829+ * published by the Free Software Foundation.
7830+ *
7831+ * This program is distributed in the hope that it will be useful,
7832+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
7833+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7834+ * GNU General Public License for more details.
7835+ *
7836+ * You should have received a copy of the GNU General Public License
7837+ * along with this program; if not, write to the Free Software
7838+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
7839+ */
7840+#include <linux/module.h>
7841+#include <linux/interrupt.h>
7842+#include <linux/platform_device.h>
7843+#include <linux/mfd/timbdma.h>
7844+#include <linux/spinlock.h>
7845+#include <net/most/most_core.h>
7846+#include <linux/gpio.h>
7847+#include <linux/most/timbmlb.h>
7848+
7849+#define DRIVER_NAME "timb-most"
7850+
7851+#define MLB_REG_CFG 0x00
7852+#define MLB_REG_CH_CTRL 0x04
7853+#define MLB_REG_ISR 0x08
7854+#define MLB_REG_IMR 0x0C
7855+#define MLB_REG_CH_CFG_1 0x10
7856+#define MLB_REG_CH_CFG_2 0x14
7857+#define MLB_REG_CH_CFG_3 0x18
7858+#define MLB_REG_CH_CFG_4 0x1C
7859+#define MLB_REG_CH_CFG_5 0x20
7860+#define MLB_REG_CH_CFG_6 0x24
7861+#define MLB_REG_CH_CFG_7 0x28
7862+#define MLB_REG_CTRL_RX 0x2C /* 8 bits */
7863+#define MLB_REG_CTRL_TX MLB_REG_CTRL_RX
7864+#define MLB_REG_ASYNC_RX 0x30 /* 32 bits */
7865+#define MLB_REG_ASYNC_TX MLB_REG_ASYNC_RX
7866+#define MLB_REG_SYNC_RX 0x34 /* 32 bits */
7867+#define MLB_REG_SYNC_TX MLB_REG_SYNC_RX
7868+#define MLB_REG_FIFO_RST 0x38
7869+
7870+#define MLB_WR_CFG_CTRL_RX_EMPTY 0x20000
7871+#define MLB_WR_CFG_ASYNC_RX_EMPTY 0x10000
7872+#define MLB_CFG_SYNC_TX_EN 0x00200
7873+#define MLB_CFG_SYNC_RX_EN 0x00100
7874+#define MLB_CFG_ASYNC_RX_EN 0x00080
7875+#define MLB_CFG_CTRL_RX_EN 0x00040
7876+
7877+#define MLB_CH_CTRL_ASYNC_TX_START 0x8000
7878+#define MLB_CH_CTRL_ASYNC_RX_BREAK 0x4000
7879+#define MLB_CH_CTRL_CTRL_TX_START 0x0800
7880+#define MLB_CH_CTRL_CTRL_RX_BREAK 0x0400
7881+
7882+#define MLB_WR_I_SYNC_RX_EMPTY 0x80000
7883+#define MLB_WR_I_SYNC_RX_ALMOST_FULL 0x40000
7884+#define MLB_WR_I_SYNC_TX_FULL 0x20000
7885+#define MLB_WR_I_SYNC_TX_ALMOST_EMPTY 0x10000
7886+#define MLB_I_ASYNC_TX_READY 0x08000
7887+#define MLB_I_ASYNC_TX_PROT_ERR 0x04000
7888+#define MLB_I_ASYNC_TX_RX_BREAK 0x02000
7889+#define MLB_I_ASYNC_TX_BUSY_BREAK 0x01000
7890+#define MLB_I_ASYNC_RX_READY 0x00800
7891+#define MLB_I_ASYNC_RX_PROT_ERR 0x00400
7892+#define MLB_I_ASYNC_RX_CMD_BREAK 0x00200
7893+#define MLB_I_SYNC_LOCK 0x00100
7894+#define MLB_I_CTRL_TX_READY 0x00080
7895+#define MLB_I_CTRL_TX_PROT_ERR 0x00040
7896+#define MLB_I_CTRL_TX_RX_BREAK 0x00020
7897+#define MLB_I_CTRL_TX_BUSY_BREAK 0x00010
7898+#define MLB_I_CTRL_RX_READY 0x00008
7899+#define MLB_I_CTRL_RX_PROT_ERR 0x00004
7900+#define MLB_I_CTRL_RX_CMD_BREAK 0x00002
7901+#define MLB_I_SYNC_RX_PROT_ERR 0x00001
7902+
7903+#define MLB_CH_CFG_NOT_ALLOCATED 0x0000
7904+#define MLB_CH_CFG_SYNC_TX 0x0001
7905+#define MLB_CH_CFG_SYNC_RX 0x0002
7906+#define MLB_CH_CFG_ASYNC_TX 0x0003
7907+#define MLB_CH_CFG_ASYNC_RX 0x0004
7908+#define MLB_CH_CFG_CTRL_TX 0x0005
7909+#define MLB_CH_CFG_CTRL_RX 0x0006
7910+
7911+#define MLB_FIFO_RST_CTRL_TX 0x010000
7912+#define MLB_FIFO_RST_CTRL_RX 0x020000
7913+#define MLB_FIFO_RST_ASYNC_TX 0x040000
7914+#define MLB_FIFO_RST_ASYNC_RX 0x080000
7915+#define MLB_FIFO_RST_SYNC_TX 0x100000
7916+#define MLB_FIFO_RST_SYNC_RX 0x200000
7917+#define MLB_FIFO_RST_MLB 0x400000
7918+#define MLB_FIFO_RST_ALL (MLB_FIFO_RST_CTRL_TX | \
7919+ MLB_FIFO_RST_CTRL_RX | \
7920+ MLB_FIFO_RST_ASYNC_TX | \
7921+ MLB_FIFO_RST_ASYNC_RX | \
7922+ MLB_FIFO_RST_SYNC_TX | \
7923+ MLB_FIFO_RST_SYNC_RX | \
7924+ MLB_FIFO_RST_MLB)
7925+
7926+#define ASYNC_SKB_SIZE 1024
7927+#define SYNC_SKB_SIZE 32
7928+
7929+#define SYNC_MAX_DMA_SIZE 4096
7930+
7931+#define RX_CHAN 0
7932+#define TX_CHAN 1
7933+#define CHANNELS 2
7934+
7935+#define DMA_DEV(s) ((s->mdev->parent->parent) ? \
7936+ s->mdev->parent->parent : s->mdev->parent)
7937+
7938+struct timbmost {
7939+ void __iomem *membase;
7940+ struct most_dev *mdev;
7941+ int irq;
7942+ int reset_pin;
7943+ spinlock_t lock; /* mutual exclusion */
7944+
7945+ /* one queue per channel (type) */
7946+ struct sk_buff_head ctl_q;
7947+ struct sk_buff_head async_q;
7948+ struct sk_buff_head sync_q;
7949+
7950+ /* The SKB currently written/read into by the DMA engine
7951+ * only used for the synchronous channel
7952+ */
7953+ struct sk_buff *sync_read_skb;
7954+ dma_addr_t sync_read_handle;
7955+ void *sync_read_desc;
7956+ struct sk_buff *sync_write_skb;
7957+ void *sync_write_desc;
7958+ int sync_write_next_map;
7959+
7960+ /* channel numbers */
7961+ u8 ctl_channels[CHANNELS];
7962+ u8 sync_channels[CHANNELS];
7963+ u8 async_channels[CHANNELS];
7964+};
7965+
7966+static void timbmost_ctl_write_wake(struct timbmost *self);
7967+static void timbmost_async_write_wake(struct timbmost *self);
7968+
7969+static void __timbmost_dump_regs(struct timbmost *self, const char *caption)
7970+{
7971+ dev_dbg(self->mdev->parent, "%s\nMLB_CFG:\t%x\tCH_CTRL:\t%x\n",
7972+ caption,
7973+ ioread32(self->membase + MLB_REG_CFG),
7974+ ioread32(self->membase + MLB_REG_CH_CTRL));
7975+
7976+ dev_dbg(self->mdev->parent, "ISTAT:\t%x\tIMASK:\t%x\n",
7977+ ioread32(self->membase + MLB_REG_ISR),
7978+ ioread32(self->membase + MLB_REG_IMR));
7979+
7980+ dev_dbg(self->mdev->parent, "CH_CFG1:\t%x\tCH_CFG2:\t%x\n",
7981+ ioread32(self->membase + MLB_REG_CH_CFG_1),
7982+ ioread32(self->membase + MLB_REG_CH_CFG_2));
7983+
7984+ dev_dbg(self->mdev->parent, "CH_CFG3:\t%x\tCH_CFG4:\t%x\n",
7985+ ioread32(self->membase + MLB_REG_CH_CFG_3),
7986+ ioread32(self->membase + MLB_REG_CH_CFG_4));
7987+
7988+ dev_dbg(self->mdev->parent, "CH_CFG5:\t%x\tCH_CFG6:\t%x\n",
7989+ ioread32(self->membase + MLB_REG_CH_CFG_5),
7990+ ioread32(self->membase + MLB_REG_CH_CFG_6));
7991+
7992+ dev_dbg(self->mdev->parent, "CH_CFG7:\t%x\n",
7993+ ioread32(self->membase + MLB_REG_CH_CFG_7));
7994+}
7995+
7996+static void __timbmost_hw_reset(struct timbmost *self)
7997+{
7998+ /* disable all interrupts */
7999+ iowrite32(0, self->membase + MLB_REG_IMR);
8000+ iowrite32(0, self->membase + MLB_REG_ISR);
8001+
8002+ /* disable RX and TX */
8003+ iowrite32(0, self->membase + MLB_REG_CFG);
8004+ iowrite32(0, self->membase + MLB_REG_CH_CTRL);
8005+
8006+ /* make sure the channels are not allocated */
8007+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_1);
8008+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_2);
8009+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_3);
8010+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_4);
8011+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_5);
8012+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_6);
8013+
8014+ /* reset */
8015+ iowrite32(MLB_FIFO_RST_ALL, self->membase + MLB_REG_FIFO_RST);
8016+
8017+ /* reset the INIC */
8018+ gpio_direction_output(self->reset_pin, 0);
8019+ msleep(10);
8020+ gpio_set_value(self->reset_pin, 1);
8021+}
8022+
8023+static void __timbmost_ctl_rx(struct timbmost *self)
8024+{
8025+ u32 cfg;
8026+ do {
8027+ struct sk_buff *skb =
8028+ most_skb_alloc(CTL_FRAME_SIZE, GFP_ATOMIC);
8029+ if (!skb)
8030+ return;
8031+
8032+ do {
8033+ u32 word = ioread32(self->membase + MLB_REG_CTRL_RX);
8034+ int i;
8035+
8036+ for (i = 0; i < 4; i++)
8037+ *skb_put(skb, 1) = (word >> (i * 8)) & 0xff;
8038+
8039+ cfg = ioread32(self->membase + MLB_REG_CFG);
8040+ } while ((skb->len < CTL_FRAME_SIZE) &&
8041+ !(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
8042+
8043+ /* deliver SKB upstreams */
8044+ skb->dev = (void *)self->mdev;
8045+ most_cb(skb)->channel_type = CHAN_CTL;
8046+ /* only one channel is supported... */
8047+ most_cb(skb)->channel = self->ctl_channels[RX_CHAN];
8048+
8049+ most_recv_frame(skb);
8050+ } while (!(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
8051+}
8052+
8053+static void __timbmost_async_rx(struct timbmost *self)
8054+{
8055+ /* TODO: The FIFO is 32bit not 8bit */
8056+ u32 cfg;
8057+
8058+ __timbmost_dump_regs(self, "Before read");
8059+
8060+ do {
8061+ struct sk_buff *skb =
8062+ most_skb_alloc(ASYNC_SKB_SIZE, GFP_ATOMIC);
8063+ if (!skb)
8064+ return;
8065+
8066+ do {
8067+ *skb_put(skb, 1) =
8068+ ioread32(self->membase + MLB_REG_ASYNC_RX);
8069+ cfg = ioread32(self->membase + MLB_REG_CFG);
8070+ } while ((skb->len < ASYNC_SKB_SIZE) &&
8071+ !(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
8072+
8073+ /* deliver SKB upstreams */
8074+ skb->dev = (void *)self->mdev;
8075+ most_cb(skb)->channel_type = CHAN_ASYNC;
8076+ /* only one channel is supported... */
8077+ most_cb(skb)->channel = self->async_channels[RX_CHAN];
8078+
8079+ most_recv_frame(skb);
8080+ } while (!(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
8081+}
8082+
8083+static void __timbmost_sync_read_wake(struct timbmost *self)
8084+{
8085+ struct sk_buff *skb = self->sync_read_skb;
8086+ dma_addr_t map;
8087+ int err;
8088+
8089+ if (skb)
8090+ return;
8091+
8092+ skb = most_skb_alloc(SYNC_SKB_SIZE, GFP_ATOMIC);
8093+ if (!skb)
8094+ return;
8095+
8096+ map = dma_map_single(DMA_DEV(self), skb->data, SYNC_SKB_SIZE,
8097+ DMA_FROM_DEVICE);
8098+ if (dma_mapping_error(DMA_DEV(self), map))
8099+ goto map_failed;
8100+
8101+ err = timbdma_prep_desc(self->sync_read_desc, map, SYNC_SKB_SIZE);
8102+ if (err)
8103+ goto prep_failed;
8104+
8105+ dev_dbg(self->mdev->parent, "%s: will start RX: to: %x, size: %d\n",
8106+ __func__, (u32)map, SYNC_SKB_SIZE);
8107+
8108+ err = timbdma_start(DMA_IRQ_MLB_RX, self->sync_read_desc, 0);
8109+ if (err)
8110+ goto start_failed;
8111+
8112+ self->sync_read_skb = skb;
8113+ self->sync_read_handle = map;
8114+ return;
8115+start_failed:
8116+prep_failed:
8117+ dma_unmap_single(DMA_DEV(self), map, SYNC_SKB_SIZE, DMA_FROM_DEVICE);
8118+map_failed:
8119+ dev_kfree_skb(skb);
8120+}
8121+
8122+static void __timbmost_sync_rx_done(struct timbmost *self)
8123+{
8124+ struct sk_buff *skb = self->sync_read_skb;
8125+ int len;
8126+
8127+ BUG_ON(!skb);
8128+
8129+ /* unmap DMA */
8130+ dma_unmap_single(DMA_DEV(self), self->sync_read_handle, SYNC_SKB_SIZE,
8131+ DMA_FROM_DEVICE);
8132+
8133+ /* set the length */
8134+ len = timbdma_stop(DMA_IRQ_MLB_RX);
8135+ skb_put(skb, len);
8136+ /* send the SKB upwards */
8137+ skb->dev = (void *)self->mdev;
8138+ most_cb(skb)->channel_type = CHAN_SYNC;
8139+ /* only one channel is supported... */
8140+ most_cb(skb)->channel = self->sync_channels[RX_CHAN];
8141+ most_recv_frame(skb);
8142+ self->sync_read_skb = NULL;
8143+
8144+ __timbmost_sync_read_wake(self);
8145+}
8146+
8147+static void __timbmost_sync_write_wake(struct timbmost *self)
8148+{
8149+ unsigned long flags;
8150+ int len;
8151+ dma_addr_t map;
8152+ struct sk_buff *skb = self->sync_write_skb;
8153+ u32 isr;
8154+
8155+ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
8156+
8157+ if (!skb) {
8158+ /* check for next SKB */
8159+ skb = skb_dequeue(&self->sync_q);
8160+ if (!skb)
8161+ return;
8162+
8163+ if (skb_dma_map(DMA_DEV(self), skb, DMA_TO_DEVICE)) {
8164+ /* failed to dma map? */
8165+ dev_kfree_skb(skb);
8166+ return;
8167+ }
8168+ /* next dma map to write is the first ... */
8169+ self->sync_write_next_map = -1;
8170+ self->sync_write_skb = skb;
8171+ dev_dbg(self->mdev->parent, "%s: New skb: fragments: %d\n",
8172+ __func__, skb_shinfo(skb)->nr_frags);
8173+ }
8174+
8175+ /* check if there is space in the FIFO */
8176+ spin_lock_irqsave(&self->lock, flags);
8177+ isr = ioread32(self->membase + MLB_REG_ISR);
8178+ if (isr & MLB_WR_I_SYNC_TX_FULL) {
8179+ /* FIFO full enable, almost empty interrupt */
8180+ u32 imr = ioread32(self->membase + MLB_REG_IMR);
8181+ imr |= MLB_WR_I_SYNC_TX_ALMOST_EMPTY;
8182+ iowrite32(imr, self->membase + MLB_REG_IMR);
8183+ }
8184+ spin_unlock_irqrestore(&self->lock, flags);
8185+
8186+ /* exit if the FIFO is full, we will continue when the almost empty
8187+ * interrupt occurs
8188+ */
8189+ if (isr & MLB_WR_I_SYNC_TX_FULL)
8190+ return;
8191+
8192+ /* send next fragment */
8193+ if (self->sync_write_next_map < 0) {
8194+ len = skb_headlen(skb);
8195+ map = skb_shinfo(skb)->dma_head;
8196+ } else {
8197+ len = skb_shinfo(skb)->frags[self->sync_write_next_map].size;
8198+ map = skb_shinfo(skb)->dma_maps[self->sync_write_next_map];
8199+ }
8200+ self->sync_write_next_map++;
8201+ dev_dbg(self->mdev->parent, "%s: Will send %x, len: %d\n",
8202+ __func__, (uint32_t)map, len);
8203+ timbdma_prep_desc(self->sync_write_desc, map, len);
8204+ timbdma_start(DMA_IRQ_MLB_TX, self->sync_write_desc, 0);
8205+}
8206+
8207+static void __timbmost_sync_tx_done(struct timbmost *self)
8208+{
8209+ struct sk_buff *skb = self->sync_write_skb;
8210+
8211+ /* TX done, free current SKB, and check for next */
8212+ BUG_ON(!skb);
8213+
8214+ /* check if this was the last DMA map */
8215+ if (self->sync_write_next_map >= skb_shinfo(skb)->nr_frags) {
8216+
8217+ /* it was the last... */
8218+ skb_dma_unmap(DMA_DEV(self), skb, DMA_TO_DEVICE);
8219+ dev_kfree_skb(skb);
8220+ self->sync_write_skb = NULL;
8221+ }
8222+
8223+ __timbmost_sync_write_wake(self);
8224+}
8225+
8226+static void timbmost_sync_start_write(struct timbmost *self)
8227+{
8228+ unsigned long flags;
8229+ struct sk_buff *skb;
8230+
8231+ spin_lock_irqsave(&self->lock, flags);
8232+ skb = self->sync_write_skb;
8233+ spin_unlock_irqrestore(&self->lock, flags);
8234+
8235+ /* transfer is ongoing */
8236+ if (skb)
8237+ return;
8238+
8239+ __timbmost_sync_write_wake(self);
8240+}
8241+
8242+/* function called in interrupt context by the timberdale DMA engine
8243+ * when a transfer is finished
8244+ */
8245+static int timbmost_dma_irq(u32 flag, void *devid)
8246+{
8247+ struct timbmost *self = (struct timbmost *)devid;
8248+
8249+ if (flag & DMA_IRQ_MLB_RX)
8250+ __timbmost_sync_rx_done(self);
8251+
8252+ if (flag & DMA_IRQ_MLB_TX)
8253+ __timbmost_sync_tx_done(self);
8254+
8255+ return 0;
8256+}
8257+
8258+static irqreturn_t timbmost_irq(int irq, void *devid)
8259+{
8260+ struct timbmost *self = (struct timbmost *)devid;
8261+ u32 isr, imr;
8262+
8263+ isr = ioread32(self->membase + MLB_REG_ISR);
8264+ imr = ioread32(self->membase + MLB_REG_IMR);
8265+
8266+ dev_dbg(self->mdev->parent, "%s: entry, isr: %x, imr: %x\n", __func__,
8267+ isr, imr);
8268+
8269+ /* mask out only enabled interrupts */
8270+ isr &= imr;
8271+
8272+ /* ack */
8273+ iowrite32(isr, self->membase + MLB_REG_ISR);
8274+
8275+ if (isr & MLB_WR_I_SYNC_TX_ALMOST_EMPTY) {
8276+ /* disable the interrupt */
8277+ imr &= ~MLB_WR_I_SYNC_TX_ALMOST_EMPTY;
8278+ iowrite32(imr, self->membase + MLB_REG_IMR);
8279+ __timbmost_sync_write_wake(self);
8280+ }
8281+
8282+ if (isr & MLB_I_ASYNC_TX_READY) {
8283+ /* disable TX interrupts */
8284+ imr &= ~(MLB_I_ASYNC_TX_READY | MLB_I_ASYNC_TX_PROT_ERR);
8285+ iowrite32(imr, self->membase + MLB_REG_IMR);
8286+ /* schedule to send next package */
8287+ timbmost_async_write_wake(self);
8288+ }
8289+
8290+ if (isr & MLB_I_ASYNC_RX_READY)
8291+ /* pass data upstreams */
8292+ __timbmost_async_rx(self);
8293+
8294+ if (isr & MLB_I_CTRL_TX_READY) {
8295+ /* disable TX interrupts */
8296+ imr &= ~(MLB_I_CTRL_TX_READY | MLB_I_CTRL_TX_PROT_ERR);
8297+ iowrite32(imr, self->membase + MLB_REG_IMR);
8298+ /* schedule to send next package */
8299+ timbmost_ctl_write_wake(self);
8300+ }
8301+
8302+ if (isr & MLB_I_CTRL_RX_READY)
8303+ /* pass data upstreams */
8304+ __timbmost_ctl_rx(self);
8305+
8306+ if (isr)
8307+ return IRQ_HANDLED;
8308+ else
8309+ return IRQ_NONE;
8310+}
8311+
8312+static int timbmost_open(struct most_dev *mdev)
8313+{
8314+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8315+ int err;
8316+
8317+ dev_dbg(mdev->parent, "%s\n", __func__);
8318+
8319+ skb_queue_head_init(&self->ctl_q);
8320+ skb_queue_head_init(&self->sync_q);
8321+ skb_queue_head_init(&self->async_q);
8322+
8323+ spin_lock_init(&self->lock);
8324+
8325+ /* request the GPIO reset pin */
8326+ err = gpio_request(self->reset_pin, DRIVER_NAME);
8327+ if (err) {
8328+ printk(KERN_ERR DRIVER_NAME
8329+ " Failed to request reset pin: %d, err: %d\n",
8330+ self->reset_pin, err);
8331+ return err;
8332+ }
8333+
8334+ __timbmost_hw_reset(self);
8335+
8336+ /* set DMA callback */
8337+ timbdma_set_interruptcb(DMA_IRQ_MLB_RX | DMA_IRQ_MLB_TX,
8338+ timbmost_dma_irq, (void *)self);
8339+
8340+ self->sync_read_desc = timbdma_alloc_desc(SYNC_MAX_DMA_SIZE, 1);
8341+ if (!self->sync_read_desc) {
8342+ err = -ENOMEM;
8343+ goto err_alloc_r_desc;
8344+ }
8345+
8346+ self->sync_write_desc = timbdma_alloc_desc(SYNC_MAX_DMA_SIZE, 1);
8347+ if (!self->sync_write_desc) {
8348+ err = -ENOMEM;
8349+ goto err_alloc_w_desc;
8350+ }
8351+
8352+ /* request IRQ */
8353+ err = request_irq(self->irq, timbmost_irq, IRQF_SHARED, "timb-most",
8354+ self);
8355+ if (err)
8356+ goto err_req_irq;
8357+
8358+ return 0;
8359+
8360+err_req_irq:
8361+ timbdma_free_desc(self->sync_write_desc);
8362+err_alloc_w_desc:
8363+ timbdma_free_desc(self->sync_read_desc);
8364+err_alloc_r_desc:
8365+ gpio_free(self->reset_pin);
8366+ return err;
8367+}
8368+
8369+static void timbmost_stop_sync_dma(struct timbmost *self)
8370+{
8371+ if (self->sync_read_skb) {
8372+ timbdma_stop(DMA_IRQ_MLB_RX);
8373+ dma_unmap_single(DMA_DEV(self), self->sync_read_handle,
8374+ SYNC_SKB_SIZE, DMA_FROM_DEVICE);
8375+ kfree_skb(self->sync_read_skb);
8376+ self->sync_read_skb = NULL;
8377+ }
8378+
8379+ if (self->sync_write_skb) {
8380+ timbdma_stop(DMA_IRQ_MLB_TX);
8381+ skb_dma_unmap(DMA_DEV(self), self->sync_write_skb,
8382+ DMA_TO_DEVICE);
8383+ kfree_skb(self->sync_write_skb);
8384+ self->sync_write_skb = NULL;
8385+ }
8386+}
8387+
8388+static int timbmost_close(struct most_dev *mdev)
8389+{
8390+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8391+
8392+ dev_dbg(mdev->parent, "%s\n", __func__);
8393+
8394+ /* free IRQ */
8395+ free_irq(self->irq, self);
8396+
8397+ __timbmost_hw_reset(self);
8398+
8399+ /* free GPIO */
8400+ gpio_free(self->reset_pin);
8401+
8402+ /* empty all queues */
8403+ skb_queue_purge(&self->ctl_q);
8404+ skb_queue_purge(&self->sync_q);
8405+ skb_queue_purge(&self->async_q);
8406+
8407+ /* clear DMA callback */
8408+ timbdma_set_interruptcb(DMA_IRQ_MLB_RX | DMA_IRQ_MLB_TX, NULL, NULL);
8409+
8410+ return 0;
8411+}
8412+
8413+static int __timbmost_conf_channel(struct timbmost *self, u8 channel,
8414+ u8 channel_mask)
8415+{
8416+ int register_offset;
8417+ int shift;
8418+ u32 ch_cfg;
8419+
8420+ /* only even channel numbers are allowed */
8421+ if (channel % 2 || channel > 0x3e || channel == 0) {
8422+ printk(KERN_WARNING DRIVER_NAME": Invalid channel: %d\n",
8423+ channel);
8424+ return -EINVAL;
8425+ }
8426+
8427+ channel = (channel / 2) - 1;
8428+ /* the channel conf is spread out over the 7 channel config registers
8429+ * each register configures 5 channels, each reg is 32bit
8430+ */
8431+ register_offset = MLB_REG_CH_CFG_1 + (channel / 5) * 4;
8432+
8433+ /* each register configures 5 channels, 3 bit per channel
8434+ * lowest bits configures highest channel
8435+ */
8436+ shift = (4 - (channel % 5)) * 3;
8437+
8438+ ch_cfg = ioread32(self->membase + register_offset);
8439+ ch_cfg &= ~(0x7 << shift);
8440+ ch_cfg |= (channel_mask & 0x7) << shift;
8441+ iowrite32(ch_cfg, self->membase + register_offset);
8442+ return 0;
8443+}
8444+
8445+static int timbmost_conf_channel(struct most_dev *mdev,
8446+ enum most_chan_type type, u8 channel, u8 flags)
8447+{
8448+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8449+ unsigned long irq_flags;
8450+ u32 imr, cfg;
8451+ int err = -EINVAL;
8452+ int chan_idx = (flags & MOST_CONF_FLAG_TX) ? TX_CHAN : RX_CHAN;
8453+
8454+ dev_dbg(mdev->parent, "%s: channel: %d, flags: %x\n",
8455+ __func__, channel, flags);
8456+
8457+ if (flags & MOST_CONF_FLAG_UP) {
8458+ switch (type) {
8459+ case CHAN_CTL:
8460+ spin_lock_irqsave(&self->lock, irq_flags);
8461+ /* we only support one channel at the time */
8462+ if (self->ctl_channels[chan_idx])
8463+ goto error;
8464+
8465+ /* reset the FIFO */
8466+ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_CTRL_TX :
8467+ MLB_FIFO_RST_CTRL_RX,
8468+ self->membase + MLB_REG_FIFO_RST);
8469+
8470+ err = __timbmost_conf_channel(self, channel,
8471+ (chan_idx == TX_CHAN) ? MLB_CH_CFG_CTRL_TX :
8472+ MLB_CH_CFG_CTRL_RX);
8473+ if (err)
8474+ goto error;
8475+
8476+ if (chan_idx == RX_CHAN) {
8477+ /* enable the receiver */
8478+ cfg = ioread32(self->membase + MLB_REG_CFG);
8479+ cfg |= MLB_CFG_CTRL_RX_EN;
8480+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8481+
8482+ /* enable RX interrupts */
8483+ imr = ioread32(self->membase + MLB_REG_IMR);
8484+ imr |= (MLB_I_CTRL_RX_READY |
8485+ MLB_I_CTRL_RX_PROT_ERR |
8486+ MLB_I_CTRL_RX_CMD_BREAK);
8487+ iowrite32(imr, self->membase + MLB_REG_IMR);
8488+ }
8489+ self->ctl_channels[chan_idx] = channel;
8490+ spin_unlock_irqrestore(&self->lock, irq_flags);
8491+ break;
8492+ case CHAN_SYNC:
8493+ spin_lock_irqsave(&self->lock, irq_flags);
8494+ /* we only support one channel at the time */
8495+ if (self->sync_channels[chan_idx])
8496+ goto error;
8497+
8498+ /* reset the FIFO */
8499+ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_SYNC_TX :
8500+ MLB_FIFO_RST_SYNC_RX,
8501+ self->membase + MLB_REG_FIFO_RST);
8502+
8503+ err = __timbmost_conf_channel(self, channel,
8504+ (chan_idx == TX_CHAN) ? MLB_CH_CFG_SYNC_TX :
8505+ MLB_CH_CFG_SYNC_RX);
8506+ if (err)
8507+ goto error;
8508+
8509+ if (chan_idx == RX_CHAN) {
8510+ /* enable the receiver */
8511+ cfg = ioread32(self->membase + MLB_REG_CFG);
8512+ cfg |= MLB_CFG_SYNC_RX_EN;
8513+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8514+
8515+ /* enable prot error interrupts */
8516+ imr = ioread32(self->membase + MLB_REG_IMR);
8517+ imr |= MLB_I_SYNC_RX_PROT_ERR;
8518+ iowrite32(imr, self->membase + MLB_REG_IMR);
8519+ /* start RX DMA */
8520+ __timbmost_sync_read_wake(self);
8521+ }
8522+ self->sync_channels[chan_idx] = channel;
8523+ spin_unlock_irqrestore(&self->lock, irq_flags);
8524+
8525+ break;
8526+ case CHAN_ASYNC:
8527+ spin_lock_irqsave(&self->lock, irq_flags);
8528+ /* we only support one channel at the time */
8529+ if (self->async_channels[chan_idx])
8530+ goto error;
8531+ /* reset the FIFO */
8532+ iowrite32((chan_idx == TX_CHAN) ?
8533+ MLB_FIFO_RST_ASYNC_TX : MLB_FIFO_RST_ASYNC_RX,
8534+ self->membase + MLB_REG_FIFO_RST);
8535+
8536+ err = __timbmost_conf_channel(self, channel,
8537+ (chan_idx == TX_CHAN) ? MLB_CH_CFG_ASYNC_TX :
8538+ MLB_CH_CFG_ASYNC_RX);
8539+ if (err)
8540+ goto error;
8541+
8542+ if (chan_idx == RX_CHAN) {
8543+ /* enable the receiver */
8544+ cfg = ioread32(self->membase + MLB_REG_CFG);
8545+ cfg |= MLB_CFG_ASYNC_RX_EN;
8546+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8547+
8548+ /* enable RX interrupts */
8549+ imr = ioread32(self->membase + MLB_REG_IMR);
8550+ imr |= (MLB_I_ASYNC_RX_READY |
8551+ MLB_I_ASYNC_RX_PROT_ERR |
8552+ MLB_I_ASYNC_RX_CMD_BREAK);
8553+ iowrite32(imr, self->membase + MLB_REG_IMR);
8554+ }
8555+ self->async_channels[chan_idx] = channel;
8556+ spin_unlock_irqrestore(&self->lock, irq_flags);
8557+ break;
8558+ default:
8559+ printk(KERN_WARNING "timbmlb: Uknown channel type\n");
8560+ return -EINVAL;
8561+ }
8562+ } else {
8563+ switch (type) {
8564+ case CHAN_CTL:
8565+ /* stop any ongoing transfer */
8566+ spin_lock_irqsave(&self->lock, irq_flags);
8567+ if (self->ctl_channels[chan_idx] != channel)
8568+ goto error;
8569+
8570+ imr = ioread32(self->membase + MLB_REG_IMR);
8571+ imr &= ~(MLB_I_CTRL_TX_READY |
8572+ MLB_I_CTRL_TX_PROT_ERR |
8573+ MLB_I_CTRL_TX_RX_BREAK |
8574+ MLB_I_CTRL_TX_BUSY_BREAK |
8575+ MLB_I_CTRL_RX_READY |
8576+ MLB_I_CTRL_RX_PROT_ERR |
8577+ MLB_I_CTRL_RX_CMD_BREAK);
8578+ iowrite32(imr, self->membase + MLB_REG_IMR);
8579+
8580+ /* disable CTL RX */
8581+ cfg = ioread32(self->membase + MLB_REG_CFG);
8582+ cfg &= ~MLB_CFG_CTRL_RX_EN;
8583+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8584+
8585+ err = __timbmost_conf_channel(self, channel,
8586+ MLB_CH_CFG_NOT_ALLOCATED);
8587+ spin_unlock_irqrestore(&self->lock, irq_flags);
8588+ skb_queue_purge(&self->ctl_q);
8589+ self->ctl_channels[chan_idx] = 0;
8590+ return err;
8591+ case CHAN_SYNC:
8592+
8593+ /* stop any ongoing transfer */
8594+ spin_lock_irqsave(&self->lock, irq_flags);
8595+ if (self->sync_channels[chan_idx] != channel)
8596+ goto error;
8597+
8598+ /* stop DMA */
8599+ timbmost_stop_sync_dma(self);
8600+ imr = ioread32(self->membase + MLB_REG_IMR);
8601+ imr &= ~MLB_I_SYNC_RX_PROT_ERR;
8602+ iowrite32(imr, self->membase + MLB_REG_IMR);
8603+
8604+ /* disable SYNC TX/RX */
8605+ cfg = ioread32(self->membase + MLB_REG_CFG);
8606+ cfg &= ~(MLB_CFG_SYNC_TX_EN |
8607+ MLB_CFG_SYNC_RX_EN);
8608+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8609+
8610+ err = __timbmost_conf_channel(self, channel,
8611+ MLB_CH_CFG_NOT_ALLOCATED);
8612+ spin_unlock_irqrestore(&self->lock, irq_flags);
8613+ skb_queue_purge(&self->sync_q);
8614+ self->sync_channels[chan_idx] = 0;
8615+ return err;
8616+ case CHAN_ASYNC:
8617+ /* stop any ongoing transfer */
8618+ spin_lock_irqsave(&self->lock, irq_flags);
8619+ if (self->async_channels[chan_idx] != channel)
8620+ goto error;
8621+ imr = ioread32(self->membase + MLB_REG_IMR);
8622+ imr &= ~(MLB_I_ASYNC_TX_READY |
8623+ MLB_I_ASYNC_TX_PROT_ERR |
8624+ MLB_I_ASYNC_TX_RX_BREAK |
8625+ MLB_I_ASYNC_TX_BUSY_BREAK |
8626+ MLB_I_ASYNC_RX_READY |
8627+ MLB_I_ASYNC_RX_PROT_ERR |
8628+ MLB_I_ASYNC_RX_CMD_BREAK);
8629+ iowrite32(imr, self->membase + MLB_REG_IMR);
8630+
8631+ /* disable CTL RX */
8632+ cfg = ioread32(self->membase + MLB_REG_CFG);
8633+ cfg &= ~MLB_CFG_ASYNC_RX_EN;
8634+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8635+
8636+ err = __timbmost_conf_channel(self, channel,
8637+ MLB_CH_CFG_NOT_ALLOCATED);
8638+ spin_unlock_irqrestore(&self->lock, irq_flags);
8639+ skb_queue_purge(&self->async_q);
8640+ self->async_channels[chan_idx] = 0;
8641+ return err;
8642+ default:
8643+ return -EINVAL;
8644+ }
8645+ }
8646+ return 0;
8647+
8648+error:
8649+ spin_unlock_irqrestore(&self->lock, irq_flags);
8650+ return err;
8651+}
8652+
8653+static void timbmost_ctl_write_wake(struct timbmost *self)
8654+{
8655+ unsigned long flags;
8656+ u32 imr;
8657+ u32 isr;
8658+ struct sk_buff *skb;
8659+ int i;
8660+
8661+ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
8662+ __timbmost_dump_regs(self, "Before write");
8663+
8664+ spin_lock_irqsave(&self->lock, flags);
8665+ imr = ioread32(self->membase + MLB_REG_IMR);
8666+ isr = ioread32(self->membase + MLB_REG_ISR);
8667+ spin_unlock_irqrestore(&self->lock, flags);
8668+
8669+ /* check if the hardware is currently writing */
8670+ if (imr & MLB_I_CTRL_TX_READY)
8671+ return;
8672+
8673+ /* check if we have sync */
8674+ if (!(isr & MLB_I_SYNC_LOCK))
8675+ return;
8676+
8677+ skb = skb_dequeue(&self->ctl_q);
8678+ if (!skb)
8679+ return;
8680+
8681+ /* now write to the FIFO */
8682+ for (i = 0; i < skb->len;) {
8683+ u32 word = 0;
8684+ int j;
8685+
8686+ for (j = 0; j < 4 && i < skb->len; j++, i++)
8687+ word |= ((u8 *)skb->data)[i] << j * 8;
8688+
8689+ iowrite32(word, self->membase + MLB_REG_CTRL_TX);
8690+ }
8691+
8692+ /* data is in the FIFO, enable proper interrupts */
8693+ spin_lock_irqsave(&self->lock, flags);
8694+ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_CTRL_TX_READY |
8695+ MLB_I_CTRL_TX_PROT_ERR;
8696+ iowrite32(imr, self->membase + MLB_REG_IMR);
8697+ /* start TX */
8698+ iowrite32(MLB_CH_CTRL_CTRL_TX_START, self->membase + MLB_REG_CH_CTRL);
8699+ spin_unlock_irqrestore(&self->lock, flags);
8700+
8701+ kfree_skb(skb);
8702+}
8703+
8704+static void timbmost_async_write_wake(struct timbmost *self)
8705+{
8706+ unsigned long flags;
8707+ u32 imr;
8708+ u32 isr;
8709+ struct sk_buff *skb;
8710+ int i;
8711+
8712+ spin_lock_irqsave(&self->lock, flags);
8713+ imr = ioread32(self->membase + MLB_REG_IMR);
8714+ isr = ioread32(self->membase + MLB_REG_ISR);
8715+ spin_unlock_irqrestore(&self->lock, flags);
8716+
8717+ /* check if the hardware is currently writing */
8718+ if (imr & MLB_I_ASYNC_TX_READY)
8719+ return;
8720+
8721+ /* check if we have sync */
8722+ if (!(isr & MLB_I_SYNC_LOCK))
8723+ return;
8724+
8725+ skb = skb_dequeue(&self->async_q);
8726+ if (!skb)
8727+ return;
8728+
8729+ /* TODO: The FIFO is 32bit not 8bit */
8730+ /* now write to the FIFO */
8731+ for (i = 0; i < skb->len; i++)
8732+ iowrite32(skb->data[i], self->membase + MLB_REG_ASYNC_TX);
8733+
8734+ /* data is in the FIFO, enable proper interrupts */
8735+ spin_lock_irqsave(&self->lock, flags);
8736+ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_ASYNC_TX_READY |
8737+ MLB_I_ASYNC_TX_PROT_ERR;
8738+ iowrite32(imr, self->membase + MLB_REG_IMR);
8739+ /* start TX */
8740+ iowrite32(MLB_CH_CTRL_ASYNC_TX_START, self->membase + MLB_REG_CH_CTRL);
8741+ spin_unlock_irqrestore(&self->lock, flags);
8742+
8743+ kfree_skb(skb);
8744+}
8745+
8746+static int timbmost_send(struct sk_buff *skb)
8747+{
8748+ struct most_dev *mdev = (struct most_dev *)skb->dev;
8749+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8750+
8751+ dev_dbg(mdev->parent, "%s, type: %d\n",
8752+ __func__, most_cb(skb)->channel_type);
8753+
8754+ switch (most_cb(skb)->channel_type) {
8755+ case CHAN_CTL:
8756+ skb_queue_tail(&self->ctl_q, skb);
8757+ timbmost_ctl_write_wake(self);
8758+ break;
8759+ case CHAN_ASYNC:
8760+ skb_queue_tail(&self->async_q, skb);
8761+ timbmost_async_write_wake(self);
8762+ break;
8763+ case CHAN_SYNC:
8764+ skb_queue_tail(&self->sync_q, skb);
8765+ timbmost_sync_start_write(self);
8766+ break;
8767+ default:
8768+ printk(KERN_WARNING "%s: Got unsupported channel type: %d\n",
8769+ __func__, most_cb(skb)->channel_type);
8770+ kfree_skb(skb);
8771+ break;
8772+ }
8773+
8774+ return 0;
8775+}
8776+
8777+static int timbmost_probe(struct platform_device *dev)
8778+{
8779+ int err;
8780+ struct timbmost *self = NULL;
8781+ struct resource *iomem;
8782+ struct timbmlb_platform_data *pdata = dev->dev.platform_data;
8783+
8784+ if (!pdata) {
8785+ printk(KERN_ERR DRIVER_NAME "No platform data supplied\n");
8786+ err = -EINVAL;
8787+ goto err_mem;
8788+ }
8789+
8790+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
8791+ if (!iomem) {
8792+ err = -EINVAL;
8793+ goto err_mem;
8794+ }
8795+
8796+ self = kzalloc(sizeof(*self), GFP_KERNEL);
8797+ if (!self) {
8798+ err = -ENOMEM;
8799+ goto err_mem;
8800+ }
8801+
8802+ self->mdev = most_alloc_dev();
8803+ if (!self->mdev) {
8804+ err = -ENOMEM;
8805+ goto err_mem;
8806+ }
8807+
8808+ self->mdev->owner = THIS_MODULE;
8809+ self->mdev->driver_data = self;
8810+ self->mdev->parent = &dev->dev;
8811+ self->mdev->open = timbmost_open;
8812+ self->mdev->close = timbmost_close;
8813+ self->mdev->send = timbmost_send;
8814+ self->mdev->conf_channel = timbmost_conf_channel;
8815+
8816+ if (!request_mem_region(iomem->start,
8817+ resource_size(iomem), "timb-most")) {
8818+ err = -EBUSY;
8819+ goto err_mem;
8820+ }
8821+
8822+ self->membase = ioremap(iomem->start, resource_size(iomem));
8823+ if (!self->membase) {
8824+ printk(KERN_ERR "timbmost: Failed to remap I/O memory\n");
8825+ err = -ENOMEM;
8826+ goto err_ioremap;
8827+ }
8828+
8829+ self->reset_pin = pdata->reset_pin;
8830+
8831+ /* find interrupt */
8832+ self->irq = platform_get_irq(dev, 0);
8833+ if (self->irq < 0) {
8834+ err = self->irq;
8835+ goto err_get_irq;
8836+ }
8837+
8838+ /* register to the MOST layer */
8839+ err = most_register_dev(self->mdev);
8840+ if (err)
8841+ goto err_register;
8842+
8843+
8844+ platform_set_drvdata(dev, self);
8845+
8846+ return 0;
8847+
8848+err_get_irq:
8849+err_register:
8850+ iounmap(self->membase);
8851+err_ioremap:
8852+ release_mem_region(iomem->start, resource_size(iomem));
8853+err_mem:
8854+ if (self) {
8855+ if (self->mdev)
8856+ most_free_dev(self->mdev);
8857+
8858+ timbdma_free_desc(self->sync_read_desc);
8859+ timbdma_free_desc(self->sync_write_desc);
8860+
8861+ kfree(self);
8862+ }
8863+ printk(KERN_ERR "timb-most: Failed to register: %d\n", err);
8864+
8865+ return err;
8866+}
8867+
8868+static int timbmost_remove(struct platform_device *dev)
8869+{
8870+ struct timbmost *self = platform_get_drvdata(dev);
8871+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
8872+
8873+ most_unregister_dev(self->mdev);
8874+ iounmap(self->membase);
8875+ release_mem_region(iomem->start, resource_size(iomem));
8876+ most_free_dev(self->mdev);
8877+ kfree(self);
8878+ return 0;
8879+}
8880+
8881+static struct platform_driver timbmost_platform_driver = {
8882+ .driver = {
8883+ .name = DRIVER_NAME,
8884+ .owner = THIS_MODULE,
8885+ },
8886+ .probe = timbmost_probe,
8887+ .remove = timbmost_remove,
8888+};
8889+
8890+/*--------------------------------------------------------------------------*/
8891+
8892+static int __init timbmost_init(void)
8893+{
8894+ return platform_driver_register(&timbmost_platform_driver);
8895+}
8896+
8897+static void __exit timbmost_exit(void)
8898+{
8899+ platform_driver_unregister(&timbmost_platform_driver);
8900+}
8901+
8902+module_init(timbmost_init);
8903+module_exit(timbmost_exit);
8904+
8905+MODULE_DESCRIPTION("Timberdale MLB driver");
8906+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
8907+MODULE_LICENSE("GPL v2");
8908+MODULE_ALIAS("platform:timb-most");
8909+
8910diff -uNr linux-2.6.31/drivers/serial/Kconfig linux-2.6.31.new/drivers/serial/Kconfig
8911--- linux-2.6.31/drivers/serial/Kconfig 2009-10-23 11:18:08.000000000 -0700
8912+++ linux-2.6.31.new/drivers/serial/Kconfig 2009-10-23 11:17:29.000000000 -0700
8913@@ -855,7 +855,7 @@
8914
8915 config SERIAL_UARTLITE
8916 tristate "Xilinx uartlite serial port support"
8917- depends on PPC32 || MICROBLAZE
8918+ depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
8919 select SERIAL_CORE
8920 help
8921 Say Y here if you want to use the Xilinx uartlite serial controller.
8922diff -uNr linux-2.6.31/drivers/serial/timbuart.c linux-2.6.31.new/drivers/serial/timbuart.c
8923--- linux-2.6.31/drivers/serial/timbuart.c 2009-10-23 11:18:30.000000000 -0700
8924+++ linux-2.6.31.new/drivers/serial/timbuart.c 2009-10-23 11:17:29.000000000 -0700
8925@@ -31,6 +31,7 @@
8926
8927 struct timbuart_port {
8928 struct uart_port port;
8929+ struct uart_driver uart_driver;
8930 struct tasklet_struct tasklet;
8931 int usedma;
8932 u32 last_ier;
8933@@ -410,7 +411,7 @@
8934 .verify_port = timbuart_verify_port
8935 };
8936
8937-static struct uart_driver timbuart_driver = {
8938+static const __devinitconst struct uart_driver timbuart_driver_template = {
8939 .owner = THIS_MODULE,
8940 .driver_name = "timberdale_uart",
8941 .dev_name = "ttyTU",
8942@@ -419,7 +420,7 @@
8943 .nr = 1
8944 };
8945
8946-static int timbuart_probe(struct platform_device *dev)
8947+static int __devinit timbuart_probe(struct platform_device *dev)
8948 {
8949 int err;
8950 struct timbuart_port *uart;
8951@@ -433,6 +434,8 @@
8952 goto err_mem;
8953 }
8954
8955+ uart->uart_driver = timbuart_driver_template;
8956+
8957 uart->usedma = 0;
8958
8959 uart->port.uartclk = 3250000 * 16;
8960@@ -461,11 +464,11 @@
8961
8962 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
8963
8964- err = uart_register_driver(&timbuart_driver);
8965+ err = uart_register_driver(&uart->uart_driver);
8966 if (err)
8967 goto err_register;
8968
8969- err = uart_add_one_port(&timbuart_driver, &uart->port);
8970+ err = uart_add_one_port(&uart->uart_driver, &uart->port);
8971 if (err)
8972 goto err_add_port;
8973
8974@@ -474,7 +477,7 @@
8975 return 0;
8976
8977 err_add_port:
8978- uart_unregister_driver(&timbuart_driver);
8979+ uart_unregister_driver(&uart->uart_driver);
8980 err_register:
8981 kfree(uart);
8982 err_mem:
8983@@ -484,13 +487,13 @@
8984 return err;
8985 }
8986
8987-static int timbuart_remove(struct platform_device *dev)
8988+static int __devexit timbuart_remove(struct platform_device *dev)
8989 {
8990 struct timbuart_port *uart = platform_get_drvdata(dev);
8991
8992 tasklet_kill(&uart->tasklet);
8993- uart_remove_one_port(&timbuart_driver, &uart->port);
8994- uart_unregister_driver(&timbuart_driver);
8995+ uart_remove_one_port(&uart->uart_driver, &uart->port);
8996+ uart_unregister_driver(&uart->uart_driver);
8997 kfree(uart);
8998
8999 return 0;
9000diff -uNr linux-2.6.31/drivers/spi/Kconfig linux-2.6.31.new/drivers/spi/Kconfig
9001--- linux-2.6.31/drivers/spi/Kconfig 2009-10-23 11:18:30.000000000 -0700
9002+++ linux-2.6.31.new/drivers/spi/Kconfig 2009-10-23 11:17:32.000000000 -0700
9003@@ -218,8 +218,8 @@
9004 SPI driver for Toshiba TXx9 MIPS SoCs
9005
9006 config SPI_XILINX
9007- tristate "Xilinx SPI controller"
9008- depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
9009+ tristate "Xilinx SPI controller common module"
9010+ depends on EXPERIMENTAL
9011 select SPI_BITBANG
9012 help
9013 This exposes the SPI controller IP from the Xilinx EDK.
9014@@ -227,6 +227,25 @@
9015 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
9016 Product Specification document (DS464) for hardware details.
9017
9018+config SPI_XILINX_OF
9019+ tristate "Xilinx SPI controller OF device"
9020+ depends on SPI_XILINX && XILINX_VIRTEX
9021+ help
9022+ This exposes the SPI controller IP from the Xilinx EDK.
9023+
9024+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
9025+ Product Specification document (DS464) for hardware details.
9026+
9027+config SPI_XILINX_PLTFM
9028+ tristate "Xilinx SPI controller platform device"
9029+ depends on SPI_XILINX
9030+ help
9031+ This exposes the SPI controller IP from the Xilinx EDK.
9032+
9033+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
9034+ Product Specification document (DS464) for hardware details.
9035+
9036+
9037 #
9038 # Add new SPI master controllers in alphabetical order above this line
9039 #
9040diff -uNr linux-2.6.31/drivers/spi/Makefile linux-2.6.31.new/drivers/spi/Makefile
9041--- linux-2.6.31/drivers/spi/Makefile 2009-10-23 11:18:30.000000000 -0700
9042+++ linux-2.6.31.new/drivers/spi/Makefile 2009-10-23 11:17:32.000000000 -0700
9043@@ -30,6 +30,8 @@
9044 obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
9045 obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
9046 obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
9047+obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
9048+obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
9049 obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
9050 # ... add above this line ...
9051
9052diff -uNr linux-2.6.31/drivers/spi/xilinx_spi.c linux-2.6.31.new/drivers/spi/xilinx_spi.c
9053--- linux-2.6.31/drivers/spi/xilinx_spi.c 2009-10-23 11:18:30.000000000 -0700
9054+++ linux-2.6.31.new/drivers/spi/xilinx_spi.c 2009-10-23 11:17:32.000000000 -0700
9055@@ -14,22 +14,35 @@
9056 #include <linux/module.h>
9057 #include <linux/init.h>
9058 #include <linux/interrupt.h>
9059-#include <linux/platform_device.h>
9060-
9061-#include <linux/of_platform.h>
9062-#include <linux/of_device.h>
9063-#include <linux/of_spi.h>
9064
9065 #include <linux/spi/spi.h>
9066 #include <linux/spi/spi_bitbang.h>
9067 #include <linux/io.h>
9068
9069-#define XILINX_SPI_NAME "xilinx_spi"
9070+#include "xilinx_spi.h"
9071+
9072+struct xilinx_spi {
9073+ /* bitbang has to be first */
9074+ struct spi_bitbang bitbang;
9075+ struct completion done;
9076+ struct resource mem; /* phys mem */
9077+ void __iomem *regs; /* virt. address of the control registers */
9078+ u32 irq;
9079+ u8 *rx_ptr; /* pointer in the Tx buffer */
9080+ const u8 *tx_ptr; /* pointer in the Rx buffer */
9081+ int remaining_bytes; /* the number of bytes left to transfer */
9082+ /* offset to the XSPI regs, these might vary... */
9083+ u8 bits_per_word;
9084+ bool big_endian; /* The device could be accessed big or little
9085+ * endian
9086+ */
9087+};
9088+
9089
9090 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
9091 * Product Specification", DS464
9092 */
9093-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
9094+#define XSPI_CR_OFFSET 0x60 /* Control Register */
9095
9096 #define XSPI_CR_ENABLE 0x02
9097 #define XSPI_CR_MASTER_MODE 0x04
9098@@ -40,8 +53,9 @@
9099 #define XSPI_CR_RXFIFO_RESET 0x40
9100 #define XSPI_CR_MANUAL_SSELECT 0x80
9101 #define XSPI_CR_TRANS_INHIBIT 0x100
9102+#define XSPI_CR_LSB_FIRST 0x200
9103
9104-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
9105+#define XSPI_SR_OFFSET 0x64 /* Status Register */
9106
9107 #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
9108 #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
9109@@ -49,8 +63,8 @@
9110 #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
9111 #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
9112
9113-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
9114-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
9115+#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
9116+#define XSPI_RXD_OFFSET 0x6C /* Data Receive Register */
9117
9118 #define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
9119
9120@@ -70,43 +84,72 @@
9121 #define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
9122 #define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
9123 #define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
9124+#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
9125
9126 #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
9127 #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
9128
9129-struct xilinx_spi {
9130- /* bitbang has to be first */
9131- struct spi_bitbang bitbang;
9132- struct completion done;
9133+/* to follow are some functions that does little of big endian read and
9134+ * write depending on the config of the device.
9135+ */
9136+static inline void xspi_write8(struct xilinx_spi *xspi, u32 offs, u8 val)
9137+{
9138+ iowrite8(val, xspi->regs + offs + ((xspi->big_endian) ? 3 : 0));
9139+}
9140
9141- void __iomem *regs; /* virt. address of the control registers */
9142+static inline void xspi_write16(struct xilinx_spi *xspi, u32 offs, u16 val)
9143+{
9144+ if (xspi->big_endian)
9145+ iowrite16be(val, xspi->regs + offs + 2);
9146+ else
9147+ iowrite16(val, xspi->regs + offs);
9148+}
9149
9150- u32 irq;
9151+static inline void xspi_write32(struct xilinx_spi *xspi, u32 offs, u32 val)
9152+{
9153+ if (xspi->big_endian)
9154+ iowrite32be(val, xspi->regs + offs);
9155+ else
9156+ iowrite32(val, xspi->regs + offs);
9157+}
9158
9159- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
9160+static inline u8 xspi_read8(struct xilinx_spi *xspi, u32 offs)
9161+{
9162+ return ioread8(xspi->regs + offs + ((xspi->big_endian) ? 3 : 0));
9163+}
9164
9165- u8 *rx_ptr; /* pointer in the Tx buffer */
9166- const u8 *tx_ptr; /* pointer in the Rx buffer */
9167- int remaining_bytes; /* the number of bytes left to transfer */
9168-};
9169+static inline u16 xspi_read16(struct xilinx_spi *xspi, u32 offs)
9170+{
9171+ if (xspi->big_endian)
9172+ return ioread16be(xspi->regs + offs + 2);
9173+ else
9174+ return ioread16(xspi->regs + offs);
9175+}
9176+
9177+static inline u32 xspi_read32(struct xilinx_spi *xspi, u32 offs)
9178+{
9179+ if (xspi->big_endian)
9180+ return ioread32be(xspi->regs + offs);
9181+ else
9182+ return ioread32(xspi->regs + offs);
9183+}
9184
9185-static void xspi_init_hw(void __iomem *regs_base)
9186+static void xspi_init_hw(struct xilinx_spi *xspi)
9187 {
9188 /* Reset the SPI device */
9189- out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
9190- XIPIF_V123B_RESET_MASK);
9191+ xspi_write32(xspi, XIPIF_V123B_RESETR_OFFSET, XIPIF_V123B_RESET_MASK);
9192 /* Disable all the interrupts just in case */
9193- out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
9194+ xspi_write32(xspi, XIPIF_V123B_IIER_OFFSET, 0);
9195 /* Enable the global IPIF interrupt */
9196- out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
9197- XIPIF_V123B_GINTR_ENABLE);
9198+ xspi_write32(xspi, XIPIF_V123B_DGIER_OFFSET, XIPIF_V123B_GINTR_ENABLE);
9199 /* Deselect the slave on the SPI bus */
9200- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
9201+ xspi_write32(xspi, XSPI_SSR_OFFSET, 0xffff);
9202 /* Disable the transmitter, enable Manual Slave Select Assertion,
9203 * put SPI controller into master mode, and enable it */
9204- out_be16(regs_base + XSPI_CR_OFFSET,
9205- XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
9206- | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
9207+ xspi_write16(xspi, XSPI_CR_OFFSET,
9208+ XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
9209+ XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
9210+ XSPI_CR_RXFIFO_RESET);
9211 }
9212
9213 static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
9214@@ -115,16 +158,16 @@
9215
9216 if (is_on == BITBANG_CS_INACTIVE) {
9217 /* Deselect the slave on the SPI bus */
9218- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
9219+ xspi_write32(xspi, XSPI_SSR_OFFSET, 0xffff);
9220 } else if (is_on == BITBANG_CS_ACTIVE) {
9221 /* Set the SPI clock phase and polarity */
9222- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
9223+ u16 cr = xspi_read16(xspi, XSPI_CR_OFFSET)
9224 & ~XSPI_CR_MODE_MASK;
9225 if (spi->mode & SPI_CPHA)
9226 cr |= XSPI_CR_CPHA;
9227 if (spi->mode & SPI_CPOL)
9228 cr |= XSPI_CR_CPOL;
9229- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
9230+ xspi_write16(xspi, XSPI_CR_OFFSET, cr);
9231
9232 /* We do not check spi->max_speed_hz here as the SPI clock
9233 * frequency is not software programmable (the IP block design
9234@@ -132,24 +175,27 @@
9235 */
9236
9237 /* Activate the chip select */
9238- out_be32(xspi->regs + XSPI_SSR_OFFSET,
9239+ xspi_write32(xspi, XSPI_SSR_OFFSET,
9240 ~(0x0001 << spi->chip_select));
9241 }
9242 }
9243
9244 /* spi_bitbang requires custom setup_transfer() to be defined if there is a
9245 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
9246- * supports just 8 bits per word, and SPI clock can't be changed in software.
9247- * Check for 8 bits per word. Chip select delay calculations could be
9248+ * supports 8 or 16 bits per word, which can not be changed in software.
9249+ * SPI clock can't be changed in software.
9250+ * Check for correct bits per word. Chip select delay calculations could be
9251 * added here as soon as bitbang_work() can be made aware of the delay value.
9252 */
9253 static int xilinx_spi_setup_transfer(struct spi_device *spi,
9254- struct spi_transfer *t)
9255+ struct spi_transfer *t)
9256 {
9257 u8 bits_per_word;
9258+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
9259
9260- bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
9261- if (bits_per_word != 8) {
9262+ bits_per_word = (t->bits_per_word) ? t->bits_per_word :
9263+ spi->bits_per_word;
9264+ if (bits_per_word != xspi->bits_per_word) {
9265 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
9266 __func__, bits_per_word);
9267 return -EINVAL;
9268@@ -160,34 +206,50 @@
9269
9270 static int xilinx_spi_setup(struct spi_device *spi)
9271 {
9272- struct spi_bitbang *bitbang;
9273- struct xilinx_spi *xspi;
9274- int retval;
9275-
9276- xspi = spi_master_get_devdata(spi->master);
9277- bitbang = &xspi->bitbang;
9278-
9279- retval = xilinx_spi_setup_transfer(spi, NULL);
9280- if (retval < 0)
9281- return retval;
9282-
9283+ /* always return 0, we can not check the number of bits.
9284+ * There are cases when SPI setup is called before any driver is
9285+ * there, in that case the SPI core defaults to 8 bits, which we
9286+ * do not support in some cases. But if we return an error, the
9287+ * SPI device would not be registered and no driver can get hold of it
9288+ * When the driver is there, it will call SPI setup again with the
9289+ * correct number of bits per transfer.
9290+ * If a driver setups with the wrong bit number, it will fail when
9291+ * it tries to do a transfer
9292+ */
9293 return 0;
9294 }
9295
9296 static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
9297 {
9298 u8 sr;
9299+ u8 wsize;
9300+ if (xspi->bits_per_word == 8)
9301+ wsize = 1;
9302+ else if (xspi->bits_per_word == 16)
9303+ wsize = 2;
9304+ else
9305+ wsize = 4;
9306
9307 /* Fill the Tx FIFO with as many bytes as possible */
9308- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9309- while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
9310+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9311+ while ((sr & XSPI_SR_TX_FULL_MASK) == 0 &&
9312+ xspi->remaining_bytes > 0) {
9313 if (xspi->tx_ptr) {
9314- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
9315- } else {
9316- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
9317- }
9318- xspi->remaining_bytes--;
9319- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9320+ if (wsize == 1)
9321+ xspi_write8(xspi, XSPI_TXD_OFFSET,
9322+ *xspi->tx_ptr);
9323+ else if (wsize == 2)
9324+ xspi_write16(xspi, XSPI_TXD_OFFSET,
9325+ *(u16 *)(xspi->tx_ptr));
9326+ else if (wsize == 4)
9327+ xspi_write32(xspi, XSPI_TXD_OFFSET,
9328+ *(u32 *)(xspi->tx_ptr));
9329+
9330+ xspi->tx_ptr += wsize;
9331+ } else
9332+ xspi_write8(xspi, XSPI_TXD_OFFSET, 0);
9333+ xspi->remaining_bytes -= wsize;
9334+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9335 }
9336 }
9337
9338@@ -209,23 +271,22 @@
9339 /* Enable the transmit empty interrupt, which we use to determine
9340 * progress on the transmission.
9341 */
9342- ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
9343- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
9344+ ipif_ier = xspi_read32(xspi, XIPIF_V123B_IIER_OFFSET);
9345+ xspi_write32(xspi, XIPIF_V123B_IIER_OFFSET,
9346 ipif_ier | XSPI_INTR_TX_EMPTY);
9347
9348 /* Start the transfer by not inhibiting the transmitter any longer */
9349- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
9350- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
9351+ cr = xspi_read16(xspi, XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
9352+ xspi_write16(xspi, XSPI_CR_OFFSET, cr);
9353
9354 wait_for_completion(&xspi->done);
9355
9356 /* Disable the transmit empty interrupt */
9357- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
9358+ xspi_write32(xspi, XIPIF_V123B_IIER_OFFSET, ipif_ier);
9359
9360 return t->len - xspi->remaining_bytes;
9361 }
9362
9363-
9364 /* This driver supports single master mode only. Hence Tx FIFO Empty
9365 * is the only interrupt we care about.
9366 * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
9367@@ -237,32 +298,50 @@
9368 u32 ipif_isr;
9369
9370 /* Get the IPIF interrupts, and clear them immediately */
9371- ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
9372- out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
9373+ ipif_isr = xspi_read32(xspi, XIPIF_V123B_IISR_OFFSET);
9374+ xspi_write32(xspi, XIPIF_V123B_IISR_OFFSET, ipif_isr);
9375
9376 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
9377 u16 cr;
9378 u8 sr;
9379+ u8 rsize;
9380+ if (xspi->bits_per_word == 8)
9381+ rsize = 1;
9382+ else if (xspi->bits_per_word == 16)
9383+ rsize = 2;
9384+ else
9385+ rsize = 4;
9386
9387 /* A transmit has just completed. Process received data and
9388 * check for more data to transmit. Always inhibit the
9389 * transmitter while the Isr refills the transmit register/FIFO,
9390 * or make sure it is stopped if we're done.
9391 */
9392- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
9393- out_be16(xspi->regs + XSPI_CR_OFFSET,
9394- cr | XSPI_CR_TRANS_INHIBIT);
9395+ cr = xspi_read16(xspi, XSPI_CR_OFFSET);
9396+ xspi_write16(xspi, XSPI_CR_OFFSET, cr | XSPI_CR_TRANS_INHIBIT);
9397
9398 /* Read out all the data from the Rx FIFO */
9399- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9400+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9401 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
9402- u8 data;
9403+ u32 data;
9404+ if (rsize == 1)
9405+ data = xspi_read8(xspi, XSPI_RXD_OFFSET);
9406+ else if (rsize == 2)
9407+ data = xspi_read16(xspi, XSPI_RXD_OFFSET);
9408+ else
9409+ data = xspi_read32(xspi, XSPI_RXD_OFFSET);
9410
9411- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
9412 if (xspi->rx_ptr) {
9413- *xspi->rx_ptr++ = data;
9414+ if (rsize == 1)
9415+ *xspi->rx_ptr = data & 0xff;
9416+ else if (rsize == 2)
9417+ *(u16 *)(xspi->rx_ptr) = data & 0xffff;
9418+ else
9419+ *((u32 *)(xspi->rx_ptr)) = data;
9420+ xspi->rx_ptr += rsize;
9421 }
9422- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9423+
9424+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9425 }
9426
9427 /* See if there is more data to send */
9428@@ -271,7 +350,7 @@
9429 /* Start the transfer by not inhibiting the
9430 * transmitter any longer
9431 */
9432- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
9433+ xspi_write16(xspi, XSPI_CR_OFFSET, cr);
9434 } else {
9435 /* No more data to send.
9436 * Indicate the transfer is completed.
9437@@ -279,44 +358,21 @@
9438 complete(&xspi->done);
9439 }
9440 }
9441-
9442 return IRQ_HANDLED;
9443 }
9444
9445-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
9446- const struct of_device_id *match)
9447+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
9448+ u32 irq, s16 bus_num, u16 num_chipselect, u8 bits_per_word,
9449+ bool big_endian)
9450 {
9451 struct spi_master *master;
9452 struct xilinx_spi *xspi;
9453- struct resource r_irq_struct;
9454- struct resource r_mem_struct;
9455+ int ret = 0;
9456
9457- struct resource *r_irq = &r_irq_struct;
9458- struct resource *r_mem = &r_mem_struct;
9459- int rc = 0;
9460- const u32 *prop;
9461- int len;
9462+ master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
9463
9464- /* Get resources(memory, IRQ) associated with the device */
9465- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
9466-
9467- if (master == NULL) {
9468- return -ENOMEM;
9469- }
9470-
9471- dev_set_drvdata(&ofdev->dev, master);
9472-
9473- rc = of_address_to_resource(ofdev->node, 0, r_mem);
9474- if (rc) {
9475- dev_warn(&ofdev->dev, "invalid address\n");
9476- goto put_master;
9477- }
9478-
9479- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
9480- if (rc == NO_IRQ) {
9481- dev_warn(&ofdev->dev, "no IRQ found\n");
9482- goto put_master;
9483- }
9484+ if (master == NULL)
9485+ return ERR_PTR(-ENOMEM);
9486
9487 /* the spi->mode bits understood by this driver: */
9488 master->mode_bits = SPI_CPOL | SPI_CPHA;
9489@@ -329,128 +385,73 @@
9490 xspi->bitbang.master->setup = xilinx_spi_setup;
9491 init_completion(&xspi->done);
9492
9493- xspi->irq = r_irq->start;
9494-
9495- if (!request_mem_region(r_mem->start,
9496- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
9497- rc = -ENXIO;
9498- dev_warn(&ofdev->dev, "memory request failure\n");
9499+ if (!request_mem_region(mem->start, resource_size(mem),
9500+ XILINX_SPI_NAME)) {
9501+ ret = -ENXIO;
9502 goto put_master;
9503 }
9504
9505- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
9506+ xspi->regs = ioremap(mem->start, resource_size(mem));
9507 if (xspi->regs == NULL) {
9508- rc = -ENOMEM;
9509- dev_warn(&ofdev->dev, "ioremap failure\n");
9510- goto release_mem;
9511+ ret = -ENOMEM;
9512+ dev_warn(dev, "ioremap failure\n");
9513+ goto map_failed;
9514 }
9515- xspi->irq = r_irq->start;
9516
9517- /* dynamic bus assignment */
9518- master->bus_num = -1;
9519-
9520- /* number of slave select bits is required */
9521- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
9522- if (!prop || len < sizeof(*prop)) {
9523- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
9524- goto unmap_io;
9525- }
9526- master->num_chipselect = *prop;
9527+ master->bus_num = bus_num;
9528+ master->num_chipselect = num_chipselect;
9529+
9530+ xspi->mem = *mem;
9531+ xspi->irq = irq;
9532+ xspi->bits_per_word = bits_per_word;
9533+ xspi->big_endian = big_endian;
9534
9535 /* SPI controller initializations */
9536- xspi_init_hw(xspi->regs);
9537+ xspi_init_hw(xspi);
9538
9539 /* Register for SPI Interrupt */
9540- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
9541- if (rc != 0) {
9542- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
9543+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
9544+ if (ret != 0)
9545 goto unmap_io;
9546- }
9547
9548- rc = spi_bitbang_start(&xspi->bitbang);
9549- if (rc != 0) {
9550- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
9551+ ret = spi_bitbang_start(&xspi->bitbang);
9552+ if (ret != 0) {
9553+ dev_err(dev, "spi_bitbang_start FAILED\n");
9554 goto free_irq;
9555 }
9556
9557- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
9558- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
9559-
9560- /* Add any subnodes on the SPI bus */
9561- of_register_spi_devices(master, ofdev->node);
9562-
9563- return rc;
9564+ dev_info(dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
9565+ (u32)mem->start, (u32)xspi->regs, xspi->irq);
9566+ return master;
9567
9568 free_irq:
9569 free_irq(xspi->irq, xspi);
9570 unmap_io:
9571 iounmap(xspi->regs);
9572-release_mem:
9573- release_mem_region(r_mem->start, resource_size(r_mem));
9574+map_failed:
9575+ release_mem_region(mem->start, resource_size(mem));
9576 put_master:
9577 spi_master_put(master);
9578- return rc;
9579+ return ERR_PTR(ret);
9580 }
9581+EXPORT_SYMBOL(xilinx_spi_init);
9582
9583-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
9584+void xilinx_spi_deinit(struct spi_master *master)
9585 {
9586 struct xilinx_spi *xspi;
9587- struct spi_master *master;
9588- struct resource r_mem;
9589
9590- master = platform_get_drvdata(ofdev);
9591 xspi = spi_master_get_devdata(master);
9592
9593 spi_bitbang_stop(&xspi->bitbang);
9594 free_irq(xspi->irq, xspi);
9595 iounmap(xspi->regs);
9596- if (!of_address_to_resource(ofdev->node, 0, &r_mem))
9597- release_mem_region(r_mem.start, resource_size(&r_mem));
9598- dev_set_drvdata(&ofdev->dev, 0);
9599- spi_master_put(xspi->bitbang.master);
9600-
9601- return 0;
9602-}
9603-
9604-/* work with hotplug and coldplug */
9605-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
9606-
9607-static int __exit xilinx_spi_of_remove(struct of_device *op)
9608-{
9609- return xilinx_spi_remove(op);
9610-}
9611
9612-static struct of_device_id xilinx_spi_of_match[] = {
9613- { .compatible = "xlnx,xps-spi-2.00.a", },
9614- { .compatible = "xlnx,xps-spi-2.00.b", },
9615- {}
9616-};
9617-
9618-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
9619-
9620-static struct of_platform_driver xilinx_spi_of_driver = {
9621- .owner = THIS_MODULE,
9622- .name = "xilinx-xps-spi",
9623- .match_table = xilinx_spi_of_match,
9624- .probe = xilinx_spi_of_probe,
9625- .remove = __exit_p(xilinx_spi_of_remove),
9626- .driver = {
9627- .name = "xilinx-xps-spi",
9628- .owner = THIS_MODULE,
9629- },
9630-};
9631-
9632-static int __init xilinx_spi_init(void)
9633-{
9634- return of_register_platform_driver(&xilinx_spi_of_driver);
9635+ release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
9636+ spi_master_put(xspi->bitbang.master);
9637 }
9638-module_init(xilinx_spi_init);
9639+EXPORT_SYMBOL(xilinx_spi_deinit);
9640
9641-static void __exit xilinx_spi_exit(void)
9642-{
9643- of_unregister_platform_driver(&xilinx_spi_of_driver);
9644-}
9645-module_exit(xilinx_spi_exit);
9646 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
9647 MODULE_DESCRIPTION("Xilinx SPI driver");
9648 MODULE_LICENSE("GPL");
9649+
9650diff -uNr linux-2.6.31/drivers/spi/xilinx_spi.h linux-2.6.31.new/drivers/spi/xilinx_spi.h
9651--- linux-2.6.31/drivers/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
9652+++ linux-2.6.31.new/drivers/spi/xilinx_spi.h 2009-10-23 11:17:32.000000000 -0700
9653@@ -0,0 +1,33 @@
9654+/*
9655+ * xilinx_spi.h
9656+ * Copyright (c) 2009 Intel Corporation
9657+ *
9658+ * This program is free software; you can redistribute it and/or modify
9659+ * it under the terms of the GNU General Public License version 2 as
9660+ * published by the Free Software Foundation.
9661+ *
9662+ * This program is distributed in the hope that it will be useful,
9663+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9664+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9665+ * GNU General Public License for more details.
9666+ *
9667+ * You should have received a copy of the GNU General Public License
9668+ * along with this program; if not, write to the Free Software
9669+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9670+ */
9671+
9672+#ifndef _XILINX_SPI_H_
9673+#define _XILINX_SPI_H_ 1
9674+
9675+#include <linux/spi/spi.h>
9676+#include <linux/spi/spi_bitbang.h>
9677+#include <linux/spi/xilinx_spi.h>
9678+
9679+#define XILINX_SPI_NAME "xilinx_spi"
9680+
9681+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
9682+ u32 irq, s16 bus_num, u16 num_chipselect, u8 bits_per_word,
9683+ bool big_endian);
9684+
9685+void xilinx_spi_deinit(struct spi_master *master);
9686+#endif
9687diff -uNr linux-2.6.31/drivers/spi/xilinx_spi_of.c linux-2.6.31.new/drivers/spi/xilinx_spi_of.c
9688--- linux-2.6.31/drivers/spi/xilinx_spi_of.c 1969-12-31 16:00:00.000000000 -0800
9689+++ linux-2.6.31.new/drivers/spi/xilinx_spi_of.c 2009-10-23 11:17:32.000000000 -0700
9690@@ -0,0 +1,120 @@
9691+/*
9692+ * xilinx_spi_of.c
9693+ *
9694+ * Xilinx SPI controller driver (master mode only)
9695+ *
9696+ * Author: MontaVista Software, Inc.
9697+ * source@mvista.com
9698+ *
9699+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
9700+ * terms of the GNU General Public License version 2. This program is licensed
9701+ * "as is" without any warranty of any kind, whether express or implied.
9702+ */
9703+
9704+#include <linux/module.h>
9705+#include <linux/init.h>
9706+#include <linux/interrupt.h>
9707+#include <linux/io.h>
9708+#include <linux/platform_device.h>
9709+
9710+#include <linux/of_platform.h>
9711+#include <linux/of_device.h>
9712+#include <linux/of_spi.h>
9713+
9714+#include <linux/spi/spi.h>
9715+#include <linux/spi/spi_bitbang.h>
9716+
9717+#include "xilinx_spi.h"
9718+
9719+
9720+static int __init xilinx_spi_of_probe(struct of_device *ofdev,
9721+ const struct of_device_id *match)
9722+{
9723+ struct resource r_irq_struct;
9724+ struct resource r_mem_struct;
9725+ struct spi_master *master;
9726+
9727+ struct resource *r_irq = &r_irq_struct;
9728+ struct resource *r_mem = &r_mem_struct;
9729+ int rc = 0;
9730+ const u32 *prop;
9731+ int len;
9732+
9733+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
9734+ if (rc) {
9735+ dev_warn(&ofdev->dev, "invalid address\n");
9736+ return rc;
9737+ }
9738+
9739+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
9740+ if (rc == NO_IRQ) {
9741+ dev_warn(&ofdev->dev, "no IRQ found\n");
9742+ return -ENODEV;
9743+ }
9744+
9745+ /* number of slave select bits is required */
9746+ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
9747+ if (!prop || len < sizeof(*prop)) {
9748+ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
9749+ return -EINVAL;
9750+ }
9751+ master = xilinx_spi_init(&ofdev->dev, r_mem, r_irq->start, -1, *prop, 8,
9752+ true);
9753+ if (IS_ERR(master))
9754+ return PTR_ERR(master);
9755+
9756+ dev_set_drvdata(&ofdev->dev, master);
9757+
9758+ /* Add any subnodes on the SPI bus */
9759+ of_register_spi_devices(master, ofdev->node);
9760+
9761+ return 0;
9762+}
9763+
9764+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
9765+{
9766+ xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
9767+ dev_set_drvdata(&ofdev->dev, 0);
9768+ return 0;
9769+}
9770+
9771+static int __exit xilinx_spi_of_remove(struct of_device *op)
9772+{
9773+ return xilinx_spi_remove(op);
9774+}
9775+
9776+static struct of_device_id xilinx_spi_of_match[] = {
9777+ { .compatible = "xlnx,xps-spi-2.00.a", },
9778+ { .compatible = "xlnx,xps-spi-2.00.b", },
9779+ {}
9780+};
9781+
9782+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
9783+
9784+static struct of_platform_driver xilinx_spi_of_driver = {
9785+ .owner = THIS_MODULE,
9786+ .name = "xilinx-xps-spi",
9787+ .match_table = xilinx_spi_of_match,
9788+ .probe = xilinx_spi_of_probe,
9789+ .remove = __exit_p(xilinx_spi_of_remove),
9790+ .driver = {
9791+ .name = "xilinx-xps-spi",
9792+ .owner = THIS_MODULE,
9793+ },
9794+};
9795+
9796+static int __init xilinx_spi_of_init(void)
9797+{
9798+ return of_register_platform_driver(&xilinx_spi_of_driver);
9799+}
9800+module_init(xilinx_spi_of_init);
9801+
9802+static void __exit xilinx_spi_of_exit(void)
9803+{
9804+ of_unregister_platform_driver(&xilinx_spi_of_driver);
9805+}
9806+module_exit(xilinx_spi_of_exit);
9807+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
9808+MODULE_DESCRIPTION("Xilinx SPI driver");
9809+MODULE_LICENSE("GPL");
9810+
9811diff -uNr linux-2.6.31/drivers/spi/xilinx_spi_pltfm.c linux-2.6.31.new/drivers/spi/xilinx_spi_pltfm.c
9812--- linux-2.6.31/drivers/spi/xilinx_spi_pltfm.c 1969-12-31 16:00:00.000000000 -0800
9813+++ linux-2.6.31.new/drivers/spi/xilinx_spi_pltfm.c 2009-10-23 11:17:32.000000000 -0700
9814@@ -0,0 +1,104 @@
9815+/*
9816+ * xilinx_spi_pltfm.c Support for Xilinx SPI platform devices
9817+ * Copyright (c) 2009 Intel Corporation
9818+ *
9819+ * This program is free software; you can redistribute it and/or modify
9820+ * it under the terms of the GNU General Public License version 2 as
9821+ * published by the Free Software Foundation.
9822+ *
9823+ * This program is distributed in the hope that it will be useful,
9824+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9825+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9826+ * GNU General Public License for more details.
9827+ *
9828+ * You should have received a copy of the GNU General Public License
9829+ * along with this program; if not, write to the Free Software
9830+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9831+ */
9832+
9833+/* Supports:
9834+ * Xilinx SPI devices as platform devices
9835+ *
9836+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
9837+ */
9838+
9839+#include <linux/module.h>
9840+#include <linux/init.h>
9841+#include <linux/interrupt.h>
9842+#include <linux/io.h>
9843+#include <linux/platform_device.h>
9844+
9845+#include <linux/spi/spi.h>
9846+#include <linux/spi/spi_bitbang.h>
9847+#include <linux/spi/xilinx_spi.h>
9848+
9849+#include "xilinx_spi.h"
9850+
9851+static int __devinit xilinx_spi_probe(struct platform_device *dev)
9852+{
9853+ struct xspi_platform_data *pdata;
9854+ struct resource *r;
9855+ int irq;
9856+ struct spi_master *master;
9857+ u8 i;
9858+
9859+ pdata = dev->dev.platform_data;
9860+ if (pdata == NULL)
9861+ return -ENODEV;
9862+
9863+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
9864+ if (r == NULL)
9865+ return -ENODEV;
9866+
9867+ irq = platform_get_irq(dev, 0);
9868+ if (irq < 0)
9869+ return -ENXIO;
9870+
9871+ master = xilinx_spi_init(&dev->dev, r, irq, dev->id,
9872+ pdata->num_chipselect, pdata->bits_per_word, false);
9873+ if (IS_ERR(master))
9874+ return PTR_ERR(master);
9875+
9876+ for (i = 0; i < pdata->num_devices; i++)
9877+ spi_new_device(master, pdata->devices + i);
9878+
9879+ platform_set_drvdata(dev, master);
9880+ return 0;
9881+}
9882+
9883+static int __devexit xilinx_spi_remove(struct platform_device *dev)
9884+{
9885+ xilinx_spi_deinit(platform_get_drvdata(dev));
9886+ platform_set_drvdata(dev, 0);
9887+
9888+ return 0;
9889+}
9890+
9891+/* work with hotplug and coldplug */
9892+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
9893+
9894+static struct platform_driver xilinx_spi_driver = {
9895+ .probe = xilinx_spi_probe,
9896+ .remove = __devexit_p(xilinx_spi_remove),
9897+ .driver = {
9898+ .name = XILINX_SPI_NAME,
9899+ .owner = THIS_MODULE,
9900+ },
9901+};
9902+
9903+static int __init xilinx_spi_pltfm_init(void)
9904+{
9905+ return platform_driver_register(&xilinx_spi_driver);
9906+}
9907+module_init(xilinx_spi_pltfm_init);
9908+
9909+static void __exit xilinx_spi_pltfm_exit(void)
9910+{
9911+ platform_driver_unregister(&xilinx_spi_driver);
9912+}
9913+module_exit(xilinx_spi_pltfm_exit);
9914+
9915+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
9916+MODULE_DESCRIPTION("Xilinx SPI platform driver");
9917+MODULE_LICENSE("GPL v2");
9918+
9919diff -uNr linux-2.6.31/include/linux/can/platform/ascb.h linux-2.6.31.new/include/linux/can/platform/ascb.h
9920--- linux-2.6.31/include/linux/can/platform/ascb.h 1969-12-31 16:00:00.000000000 -0800
9921+++ linux-2.6.31.new/include/linux/can/platform/ascb.h 2009-10-23 11:16:56.000000000 -0700
9922@@ -0,0 +1,8 @@
9923+#ifndef _CAN_PLATFORM_ASCB_H_
9924+#define _CAN_PLATFORM_ASCB_H_
9925+
9926+struct ascb_platform_data {
9927+ int gpio_pin;
9928+};
9929+
9930+#endif
9931diff -uNr linux-2.6.31/include/linux/i2c-xiic.h linux-2.6.31.new/include/linux/i2c-xiic.h
9932--- linux-2.6.31/include/linux/i2c-xiic.h 1969-12-31 16:00:00.000000000 -0800
9933+++ linux-2.6.31.new/include/linux/i2c-xiic.h 2009-10-23 11:16:56.000000000 -0700
9934@@ -0,0 +1,31 @@
9935+/*
9936+ * i2c-xiic.h
9937+ * Copyright (c) 2009 Intel Corporation
9938+ *
9939+ * This program is free software; you can redistribute it and/or modify
9940+ * it under the terms of the GNU General Public License version 2 as
9941+ * published by the Free Software Foundation.
9942+ *
9943+ * This program is distributed in the hope that it will be useful,
9944+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9945+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9946+ * GNU General Public License for more details.
9947+ *
9948+ * You should have received a copy of the GNU General Public License
9949+ * along with this program; if not, write to the Free Software
9950+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9951+ */
9952+
9953+/* Supports:
9954+ * Xilinx IIC
9955+ */
9956+
9957+#ifndef _LINUX_I2C_XIIC_H
9958+#define _LINUX_I2C_XIIC_H
9959+
9960+struct xiic_i2c_platform_data {
9961+ u8 num_devices; /* number of devices in the devices list */
9962+ struct i2c_board_info const *devices; /* devices connected to the bus */
9963+};
9964+
9965+#endif /* _LINUX_I2C_XIIC_H */
9966diff -uNr linux-2.6.31/include/linux/mfd/timbdma.h linux-2.6.31.new/include/linux/mfd/timbdma.h
9967--- linux-2.6.31/include/linux/mfd/timbdma.h 1969-12-31 16:00:00.000000000 -0800
9968+++ linux-2.6.31.new/include/linux/mfd/timbdma.h 2009-10-23 11:16:56.000000000 -0700
9969@@ -0,0 +1,58 @@
9970+/*
9971+ * timbdma.h timberdale FPGA DMA driver defines
9972+ * Copyright (c) 2009 Intel Corporation
9973+ *
9974+ * This program is free software; you can redistribute it and/or modify
9975+ * it under the terms of the GNU General Public License version 2 as
9976+ * published by the Free Software Foundation.
9977+ *
9978+ * This program is distributed in the hope that it will be useful,
9979+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9980+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9981+ * GNU General Public License for more details.
9982+ *
9983+ * You should have received a copy of the GNU General Public License
9984+ * along with this program; if not, write to the Free Software
9985+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9986+ */
9987+
9988+/* Supports:
9989+ * Timberdale FPGA DMA engine
9990+ */
9991+
9992+#ifndef _TIMBDMA_H
9993+#define _TIMBDMA_H
9994+
9995+#include <linux/spinlock.h>
9996+
9997+
9998+#define DMA_IRQ_UART_RX 0x001
9999+#define DMA_IRQ_UART_TX 0x002
10000+#define DMA_IRQ_MLB_RX 0x004
10001+#define DMA_IRQ_MLB_TX 0x008
10002+#define DMA_IRQ_VIDEO_RX 0x010
10003+#define DMA_IRQ_VIDEO_DROP 0x020
10004+#define DMA_IRQ_SDHCI_RX 0x040
10005+#define DMA_IRQ_SDHCI_TX 0x080
10006+#define DMA_IRQ_ETH_RX 0x100
10007+#define DMA_IRQ_ETH_TX 0x200
10008+#define DMA_IRQS 10
10009+
10010+
10011+typedef int (*timbdma_interruptcb)(u32 flag, void *data);
10012+
10013+
10014+int timbdma_start(u32 flag, void *desc, int bytes_per_row);
10015+
10016+int timbdma_stop(u32 flags);
10017+
10018+void timbdma_set_interruptcb(u32 flags, timbdma_interruptcb icb, void *data);
10019+
10020+void *timbdma_alloc_desc(u32 size, u16 alignment);
10021+
10022+void timbdma_free_desc(void *desc);
10023+
10024+int timbdma_prep_desc(void *desc, dma_addr_t buf, u32 size);
10025+
10026+#endif /* _TIMBDMA_H */
10027+
10028diff -uNr linux-2.6.31/include/linux/most/timbmlb.h linux-2.6.31.new/include/linux/most/timbmlb.h
10029--- linux-2.6.31/include/linux/most/timbmlb.h 1969-12-31 16:00:00.000000000 -0800
10030+++ linux-2.6.31.new/include/linux/most/timbmlb.h 2009-10-23 11:16:56.000000000 -0700
10031@@ -0,0 +1,9 @@
10032+#ifndef __LINUX_MOST_TIMBMLB_H
10033+#define __LINUX_MOST_TIMBMLB_H
10034+
10035+/* Timberdale MLB IP */
10036+struct timbmlb_platform_data {
10037+ int reset_pin; /* pin used for reset of the INIC */
10038+};
10039+
10040+#endif
10041diff -uNr linux-2.6.31/include/linux/socket.h linux-2.6.31.new/include/linux/socket.h
10042--- linux-2.6.31/include/linux/socket.h 2009-10-23 11:18:30.000000000 -0700
10043+++ linux-2.6.31.new/include/linux/socket.h 2009-10-23 11:16:56.000000000 -0700
10044@@ -195,7 +195,8 @@
10045 #define AF_ISDN 34 /* mISDN sockets */
10046 #define AF_PHONET 35 /* Phonet sockets */
10047 #define AF_IEEE802154 36 /* IEEE802154 sockets */
10048-#define AF_MAX 37 /* For now.. */
10049+#define AF_MOST 37 /* Media Oriented Systems Transport */
10050+#define AF_MAX 38 /* For now.. */
10051
10052 /* Protocol families, same as address families. */
10053 #define PF_UNSPEC AF_UNSPEC
10054@@ -235,6 +236,7 @@
10055 #define PF_ISDN AF_ISDN
10056 #define PF_PHONET AF_PHONET
10057 #define PF_IEEE802154 AF_IEEE802154
10058+#define PF_MOST AF_MOST
10059 #define PF_MAX AF_MAX
10060
10061 /* Maximum queue length specifiable by listen. */
10062diff -uNr linux-2.6.31/include/linux/spi/mc33880.h linux-2.6.31.new/include/linux/spi/mc33880.h
10063--- linux-2.6.31/include/linux/spi/mc33880.h 1969-12-31 16:00:00.000000000 -0800
10064+++ linux-2.6.31.new/include/linux/spi/mc33880.h 2009-10-23 11:16:56.000000000 -0700
10065@@ -0,0 +1,10 @@
10066+#ifndef LINUX_SPI_MC33880_H
10067+#define LINUX_SPI_MC33880_H
10068+
10069+struct mc33880_platform_data {
10070+ /* number assigned to the first GPIO */
10071+ unsigned base;
10072+};
10073+
10074+#endif
10075+
10076diff -uNr linux-2.6.31/include/linux/spi/xilinx_spi.h linux-2.6.31.new/include/linux/spi/xilinx_spi.h
10077--- linux-2.6.31/include/linux/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
10078+++ linux-2.6.31.new/include/linux/spi/xilinx_spi.h 2009-10-23 11:16:56.000000000 -0700
10079@@ -0,0 +1,19 @@
10080+#ifndef __LINUX_SPI_XILINX_SPI_H
10081+#define __LINUX_SPI_XILINX_SPI_H
10082+
10083+/**
10084+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
10085+ * @num_chipselect: Number of chip select by the IP
10086+ * @bits_per_word: Number of bits per word. 8/16/32, Note that the DS464
10087+ * only support 8bit SPI.
10088+ * @devices: Devices to add when the driver is probed.
10089+ * @num_devices: Number of devices in the devices array.
10090+ */
10091+struct xspi_platform_data {
10092+ u16 num_chipselect;
10093+ u8 bits_per_word;
10094+ struct spi_board_info *devices;
10095+ u8 num_devices;
10096+};
10097+
10098+#endif /* __LINUX_SPI_XILINX_SPI_H */
10099diff -uNr linux-2.6.31/include/linux/timb_gpio.h linux-2.6.31.new/include/linux/timb_gpio.h
10100--- linux-2.6.31/include/linux/timb_gpio.h 1969-12-31 16:00:00.000000000 -0800
10101+++ linux-2.6.31.new/include/linux/timb_gpio.h 2009-10-23 11:16:56.000000000 -0700
10102@@ -0,0 +1,28 @@
10103+/*
10104+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
10105+ * Copyright (c) 2009 Intel Corporation
10106+ *
10107+ * This program is free software; you can redistribute it and/or modify
10108+ * it under the terms of the GNU General Public License version 2 as
10109+ * published by the Free Software Foundation.
10110+ *
10111+ * This program is distributed in the hope that it will be useful,
10112+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10113+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10114+ * GNU General Public License for more details.
10115+ *
10116+ * You should have received a copy of the GNU General Public License
10117+ * along with this program; if not, write to the Free Software
10118+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10119+ */
10120+
10121+#ifndef _LINUX_TIMB_GPIO_H
10122+#define _LINUX_TIMB_GPIO_H
10123+
10124+struct timbgpio_platform_data {
10125+ int gpio_base;
10126+ int nr_pins;
10127+ int irq_base;
10128+};
10129+
10130+#endif
10131diff -uNr linux-2.6.31/include/media/timb_radio.h linux-2.6.31.new/include/media/timb_radio.h
10132--- linux-2.6.31/include/media/timb_radio.h 1969-12-31 16:00:00.000000000 -0800
10133+++ linux-2.6.31.new/include/media/timb_radio.h 2009-10-23 11:16:55.000000000 -0700
10134@@ -0,0 +1,31 @@
10135+/*
10136+ * timb_radio.h Platform struct for the Timberdale radio driver
10137+ * Copyright (c) 2009 Intel Corporation
10138+ *
10139+ * This program is free software; you can redistribute it and/or modify
10140+ * it under the terms of the GNU General Public License version 2 as
10141+ * published by the Free Software Foundation.
10142+ *
10143+ * This program is distributed in the hope that it will be useful,
10144+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10145+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10146+ * GNU General Public License for more details.
10147+ *
10148+ * You should have received a copy of the GNU General Public License
10149+ * along with this program; if not, write to the Free Software
10150+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10151+ */
10152+
10153+#ifndef _TIMB_RADIO_
10154+#define _TIMB_RADIO_ 1
10155+
10156+#include <linux/i2c.h>
10157+
10158+struct timb_radio_platform_data {
10159+ int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */
10160+ char tuner[32];
10161+ char dsp[32];
10162+};
10163+
10164+#endif
10165+
10166diff -uNr linux-2.6.31/include/media/timb_video.h linux-2.6.31.new/include/media/timb_video.h
10167--- linux-2.6.31/include/media/timb_video.h 1969-12-31 16:00:00.000000000 -0800
10168+++ linux-2.6.31.new/include/media/timb_video.h 2009-10-23 11:16:55.000000000 -0700
10169@@ -0,0 +1,30 @@
10170+/*
10171+ * timb_video.h Platform struct for the Timberdale video driver
10172+ * Copyright (c) 2009 Intel Corporation
10173+ *
10174+ * This program is free software; you can redistribute it and/or modify
10175+ * it under the terms of the GNU General Public License version 2 as
10176+ * published by the Free Software Foundation.
10177+ *
10178+ * This program is distributed in the hope that it will be useful,
10179+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10180+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10181+ * GNU General Public License for more details.
10182+ *
10183+ * You should have received a copy of the GNU General Public License
10184+ * along with this program; if not, write to the Free Software
10185+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10186+ */
10187+
10188+#ifndef _TIMB_VIDEO_
10189+#define _TIMB_VIDEO_ 1
10190+
10191+#include <linux/i2c.h>
10192+
10193+struct timb_video_platform_data {
10194+ int i2c_adapter; /* The I2C adapter where the encoder is attached */
10195+ char encoder[32];
10196+};
10197+
10198+#endif
10199+
10200diff -uNr linux-2.6.31/include/media/v4l2-chip-ident.h linux-2.6.31.new/include/media/v4l2-chip-ident.h
10201--- linux-2.6.31/include/media/v4l2-chip-ident.h 2009-10-23 11:18:30.000000000 -0700
10202+++ linux-2.6.31.new/include/media/v4l2-chip-ident.h 2009-10-23 11:16:55.000000000 -0700
10203@@ -129,12 +129,18 @@
10204 V4L2_IDENT_SAA6752HS = 6752,
10205 V4L2_IDENT_SAA6752HS_AC3 = 6753,
10206
10207+ /* modules tef6862: just ident 6862 */
10208+ V4L2_IDENT_TEF6862 = 6862,
10209+
10210 /* module adv7170: just ident 7170 */
10211 V4L2_IDENT_ADV7170 = 7170,
10212
10213 /* module adv7175: just ident 7175 */
10214 V4L2_IDENT_ADV7175 = 7175,
10215
10216+ /* module adv7180: just ident 7180 */
10217+ V4L2_IDENT_ADV7180 = 7180,
10218+
10219 /* module saa7185: just ident 7185 */
10220 V4L2_IDENT_SAA7185 = 7185,
10221
10222@@ -147,6 +153,9 @@
10223 /* module adv7343: just ident 7343 */
10224 V4L2_IDENT_ADV7343 = 7343,
10225
10226+ /* module saa7706h: just ident 7706 */
10227+ V4L2_IDENT_SAA7706H = 7706,
10228+
10229 /* module wm8739: just ident 8739 */
10230 V4L2_IDENT_WM8739 = 8739,
10231
10232diff -uNr linux-2.6.31/include/net/most/async.h linux-2.6.31.new/include/net/most/async.h
10233--- linux-2.6.31/include/net/most/async.h 1969-12-31 16:00:00.000000000 -0800
10234+++ linux-2.6.31.new/include/net/most/async.h 2009-10-23 11:16:55.000000000 -0700
10235@@ -0,0 +1,12 @@
10236+#ifndef __ASYNC_H
10237+#define __ASYNC_H
10238+
10239+struct sockaddr_mostasync {
10240+ sa_family_t most_family;
10241+ unsigned short most_dev;
10242+ unsigned char rx_channel;
10243+ unsigned char tx_channel;
10244+};
10245+
10246+#endif
10247+
10248diff -uNr linux-2.6.31/include/net/most/ctl.h linux-2.6.31.new/include/net/most/ctl.h
10249--- linux-2.6.31/include/net/most/ctl.h 1969-12-31 16:00:00.000000000 -0800
10250+++ linux-2.6.31.new/include/net/most/ctl.h 2009-10-23 11:16:55.000000000 -0700
10251@@ -0,0 +1,12 @@
10252+#ifndef __CTL_H
10253+#define __CTL_H
10254+
10255+struct sockaddr_mostctl {
10256+ sa_family_t most_family;
10257+ unsigned short most_dev;
10258+ unsigned char rx_channel;
10259+ unsigned char tx_channel;
10260+};
10261+
10262+#endif
10263+
10264diff -uNr linux-2.6.31/include/net/most/dev.h linux-2.6.31.new/include/net/most/dev.h
10265--- linux-2.6.31/include/net/most/dev.h 1969-12-31 16:00:00.000000000 -0800
10266+++ linux-2.6.31.new/include/net/most/dev.h 2009-10-23 11:16:55.000000000 -0700
10267@@ -0,0 +1,27 @@
10268+#ifndef __DEV_H
10269+#define __DEV_H
10270+
10271+struct sockaddr_mostdev {
10272+ sa_family_t most_family;
10273+ unsigned short most_dev;
10274+};
10275+
10276+
10277+/* MOST Dev ioctl defines */
10278+#define MOSTDEVUP _IOW('M', 201, int)
10279+#define MOSTDEVDOWN _IOW('M', 202, int)
10280+
10281+#define MOSTGETDEVLIST _IOR('M', 210, int)
10282+
10283+struct most_dev_req {
10284+ uint16_t dev_id;
10285+};
10286+
10287+struct most_dev_list_req {
10288+ uint16_t dev_num;
10289+ struct most_dev_req dev_req[0];
10290+};
10291+
10292+
10293+#endif
10294+
10295diff -uNr linux-2.6.31/include/net/most/most_core.h linux-2.6.31.new/include/net/most/most_core.h
10296--- linux-2.6.31/include/net/most/most_core.h 1969-12-31 16:00:00.000000000 -0800
10297+++ linux-2.6.31.new/include/net/most/most_core.h 2009-10-23 11:16:55.000000000 -0700
10298@@ -0,0 +1,133 @@
10299+#ifndef __MOST_CORE_H
10300+#define __MOST_CORE_H
10301+
10302+#include <net/most/most.h>
10303+
10304+enum most_chan_type {
10305+ CHAN_CTL = 0,
10306+ CHAN_SYNC,
10307+ CHAN_ASYNC,
10308+ CHAN_DEV
10309+};
10310+
10311+#define MOST_CONF_FLAG_UP 0x01
10312+#define MOST_CONF_FLAG_TX 0x02
10313+
10314+enum most_dev_state {
10315+ MOST_DEV_DOWN = 0,
10316+ MOST_DEV_UP
10317+};
10318+
10319+struct most_dev {
10320+
10321+ struct list_head list;
10322+ atomic_t refcnt;
10323+
10324+ char name[8];
10325+
10326+ __u16 id;
10327+ enum most_dev_state state;
10328+
10329+ struct module *owner;
10330+
10331+ struct tasklet_struct rx_task;
10332+ struct tasklet_struct tx_task;
10333+
10334+ struct sk_buff_head rx_q;
10335+ struct sk_buff_head ctl_q;
10336+ struct sk_buff_head async_q;
10337+ struct sk_buff_head sync_q;
10338+
10339+ /* set by the driver */
10340+
10341+ void *driver_data;
10342+ struct device *parent;
10343+
10344+ int (*open)(struct most_dev *mdev);
10345+ int (*close)(struct most_dev *mdev);
10346+ int (*conf_channel)(struct most_dev *mdev, enum most_chan_type type,
10347+ u8 channel, u8 flags);
10348+ int (*send)(struct sk_buff *skb);
10349+ int (*can_send)(struct sk_buff *skb);
10350+};
10351+
10352+#define most_dbg(...) printk(__VA_ARGS__)
10353+
10354+static inline struct most_dev *most_dev_hold(struct most_dev *d)
10355+{
10356+ if (try_module_get(d->owner))
10357+ return d;
10358+ return NULL;
10359+}
10360+
10361+static inline void most_dev_put(struct most_dev *d)
10362+{
10363+ module_put(d->owner);
10364+}
10365+
10366+static inline void most_sched_tx(struct most_dev *mdev)
10367+{
10368+ tasklet_schedule(&mdev->tx_task);
10369+}
10370+
10371+static inline void most_sched_rx(struct most_dev *mdev)
10372+{
10373+ tasklet_schedule(&mdev->rx_task);
10374+}
10375+
10376+static inline int most_recv_frame(struct sk_buff *skb)
10377+{
10378+ struct most_dev *mdev = (struct most_dev *) skb->dev;
10379+
10380+ /* Time stamp */
10381+ __net_timestamp(skb);
10382+
10383+ /* Queue frame for rx task */
10384+ skb_queue_tail(&mdev->rx_q, skb);
10385+ most_sched_rx(mdev);
10386+ return 0;
10387+}
10388+
10389+static inline int __most_configure_channel(struct most_dev *mdev,
10390+ u8 channel_type, u8 channel, u8 up)
10391+{
10392+ if (mdev->state != MOST_DEV_UP)
10393+ return -ENETDOWN;
10394+
10395+ if (mdev->conf_channel)
10396+ if (channel != MOST_NO_CHANNEL)
10397+ return mdev->conf_channel(mdev, channel_type, channel,
10398+ up);
10399+ return 0;
10400+}
10401+
10402+static inline int most_configure_channels(struct most_dev *mdev,
10403+ struct most_sock *sk, u8 up)
10404+{
10405+ int err;
10406+ u8 flags = (up) ? MOST_CONF_FLAG_UP : 0;
10407+
10408+ err = __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
10409+ flags);
10410+ if (err)
10411+ return err;
10412+
10413+ err = __most_configure_channel(mdev, sk->channel_type, sk->tx_channel,
10414+ flags | MOST_CONF_FLAG_TX);
10415+ if (err)
10416+ __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
10417+ (up) ? 0 : MOST_CONF_FLAG_UP);
10418+ return err;
10419+}
10420+
10421+struct most_dev *most_alloc_dev(void);
10422+void most_free_dev(struct most_dev *mdev);
10423+int most_register_dev(struct most_dev *mdev);
10424+int most_unregister_dev(struct most_dev *mdev);
10425+
10426+int most_get_dev_list(void __user *arg);
10427+int most_open_dev(u16 dev_id);
10428+int most_close_dev(u16 dev_id);
10429+
10430+#endif
10431+
10432diff -uNr linux-2.6.31/include/net/most/most.h linux-2.6.31.new/include/net/most/most.h
10433--- linux-2.6.31/include/net/most/most.h 1969-12-31 16:00:00.000000000 -0800
10434+++ linux-2.6.31.new/include/net/most/most.h 2009-10-23 11:16:55.000000000 -0700
10435@@ -0,0 +1,110 @@
10436+#ifndef __MOST_H
10437+#define __MOST_H
10438+
10439+#include <net/sock.h>
10440+
10441+#ifndef AF_MOST
10442+#define AF_MOST 37
10443+#define PF_MOST AF_MOST
10444+#endif
10445+
10446+/* Reserve for core and drivers use */
10447+#define MOST_SKB_RESERVE 8
10448+
10449+#define CTL_FRAME_SIZE 32
10450+
10451+#define MOSTPROTO_DEV 0
10452+#define MOSTPROTO_CTL 1
10453+#define MOSTPROTO_SYNC 2
10454+#define MOSTPROTO_ASYNC 3
10455+
10456+#define MOST_NO_CHANNEL 0xFE
10457+
10458+enum {
10459+ MOST_CONNECTED = 1, /* Equal to TCP_ESTABLISHED makes net code happy */
10460+ MOST_OPEN,
10461+ MOST_BOUND,
10462+};
10463+
10464+
10465+struct most_skb_cb {
10466+ __u8 channel_type;
10467+ __u8 channel;
10468+};
10469+#define most_cb(skb) ((struct most_skb_cb *)(skb->cb))
10470+
10471+struct most_sock {
10472+ struct sock sk;
10473+ u8 channel_type;
10474+ u8 rx_channel;
10475+ u8 tx_channel;
10476+ int dev_id;
10477+ struct most_dev *mdev;
10478+};
10479+#define most_sk(sk) ((struct most_sock *)sk)
10480+
10481+static inline struct sock *most_sk_alloc(struct net *net,
10482+ struct proto *pops, u8 channel_type)
10483+{
10484+ struct sock *sk = sk_alloc(net, PF_MOST, GFP_ATOMIC, pops);
10485+ if (sk) {
10486+ most_sk(sk)->channel_type = channel_type;
10487+ most_sk(sk)->dev_id = -1;
10488+ }
10489+
10490+ return sk;
10491+}
10492+static inline struct sk_buff *most_skb_alloc(unsigned int len, gfp_t how)
10493+{
10494+ struct sk_buff *skb = alloc_skb(len + MOST_SKB_RESERVE, how);
10495+
10496+ if (skb)
10497+ skb_reserve(skb, MOST_SKB_RESERVE);
10498+
10499+ return skb;
10500+}
10501+
10502+static inline struct sk_buff *most_skb_send_alloc(struct sock *sk,
10503+ unsigned long len, int nb, int *err)
10504+{
10505+ struct sk_buff *skb =
10506+ sock_alloc_send_skb(sk, len + MOST_SKB_RESERVE, nb, err);
10507+
10508+ if (skb)
10509+ skb_reserve(skb, MOST_SKB_RESERVE);
10510+
10511+ return skb;
10512+}
10513+
10514+struct most_sock_list {
10515+ struct hlist_head head;
10516+ rwlock_t lock;
10517+};
10518+
10519+struct most_dev *most_dev_get(int index);
10520+
10521+int most_sock_register(int proto, struct net_proto_family *ops);
10522+int most_sock_unregister(int proto);
10523+void most_sock_link(struct sock *s);
10524+void most_sock_unlink(struct sock *sk);
10525+
10526+int most_send_to_sock(int dev_id, struct sk_buff *skb);
10527+
10528+/* default implementation of socket operations */
10529+int most_sock_release(struct socket *sock);
10530+int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan);
10531+int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
10532+int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
10533+ struct msghdr *msg, size_t len, int flags);
10534+int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
10535+ struct msghdr *msg, size_t len);
10536+int most_sock_setsockopt(struct socket *sock, int level, int optname,
10537+ char __user *optval, int len);
10538+int most_sock_getsockopt(struct socket *sock, int level, int optname,
10539+ char __user *optval, int __user *optlen);
10540+
10541+extern int dev_sock_init(void);
10542+extern void dev_sock_cleanup(void);
10543+
10544+#endif /* __MOST_H */
10545+
10546diff -uNr linux-2.6.31/include/net/most/sync.h linux-2.6.31.new/include/net/most/sync.h
10547--- linux-2.6.31/include/net/most/sync.h 1969-12-31 16:00:00.000000000 -0800
10548+++ linux-2.6.31.new/include/net/most/sync.h 2009-10-23 11:16:55.000000000 -0700
10549@@ -0,0 +1,12 @@
10550+#ifndef __SYNC_H
10551+#define __SYNC_H
10552+
10553+struct sockaddr_mostsync {
10554+ sa_family_t most_family;
10555+ unsigned short most_dev;
10556+ unsigned char rx_channel;
10557+ unsigned char tx_channel;
10558+};
10559+
10560+#endif
10561+
10562diff -uNr linux-2.6.31/include/sound/timbi2s.h linux-2.6.31.new/include/sound/timbi2s.h
10563--- linux-2.6.31/include/sound/timbi2s.h 1969-12-31 16:00:00.000000000 -0800
10564+++ linux-2.6.31.new/include/sound/timbi2s.h 2009-10-23 11:16:55.000000000 -0700
10565@@ -0,0 +1,32 @@
10566+/*
10567+ * timbi2s.h timberdale FPGA I2S platform data
10568+ * Copyright (c) 2009 Intel Corporation
10569+ *
10570+ * This program is free software; you can redistribute it and/or modify
10571+ * it under the terms of the GNU General Public License version 2 as
10572+ * published by the Free Software Foundation.
10573+ *
10574+ * This program is distributed in the hope that it will be useful,
10575+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10576+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10577+ * GNU General Public License for more details.
10578+ *
10579+ * You should have received a copy of the GNU General Public License
10580+ * along with this program; if not, write to the Free Software
10581+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10582+ */
10583+#ifndef __INCLUDE_SOUND_TIMBI2S_H
10584+#define __INCLUDE_SOUND_TIMBI2S_H
10585+
10586+struct timbi2s_bus_data {
10587+ u8 rx;
10588+ u16 sample_rate;
10589+};
10590+
10591+struct timbi2s_platform_data {
10592+ const struct timbi2s_bus_data *busses;
10593+ int num_busses;
10594+ u32 main_clk;
10595+};
10596+
10597+#endif
10598diff -uNr linux-2.6.31/net/Kconfig linux-2.6.31.new/net/Kconfig
10599--- linux-2.6.31/net/Kconfig 2009-10-23 11:18:30.000000000 -0700
10600+++ linux-2.6.31.new/net/Kconfig 2009-10-23 11:17:37.000000000 -0700
10601@@ -235,6 +235,7 @@
10602 source "net/irda/Kconfig"
10603 source "net/bluetooth/Kconfig"
10604 source "net/rxrpc/Kconfig"
10605+source "net/most/Kconfig"
10606
10607 config FIB_RULES
10608 bool
10609diff -uNr linux-2.6.31/net/Makefile linux-2.6.31.new/net/Makefile
10610--- linux-2.6.31/net/Makefile 2009-10-23 11:18:30.000000000 -0700
10611+++ linux-2.6.31.new/net/Makefile 2009-10-23 11:17:36.000000000 -0700
10612@@ -44,6 +44,7 @@
10613 obj-$(CONFIG_DECNET) += decnet/
10614 obj-$(CONFIG_ECONET) += econet/
10615 obj-$(CONFIG_PHONET) += phonet/
10616+obj-$(CONFIG_MOST) += most/
10617 ifneq ($(CONFIG_VLAN_8021Q),)
10618 obj-y += 8021q/
10619 endif
10620diff -uNr linux-2.6.31/net/most/af_most.c linux-2.6.31.new/net/most/af_most.c
10621--- linux-2.6.31/net/most/af_most.c 1969-12-31 16:00:00.000000000 -0800
10622+++ linux-2.6.31.new/net/most/af_most.c 2009-10-23 11:17:37.000000000 -0700
10623@@ -0,0 +1,169 @@
10624+/*
10625+ * af_most.c Support for the MOST address family
10626+ * Copyright (c) 2009 Intel Corporation
10627+ *
10628+ * This program is free software; you can redistribute it and/or modify
10629+ * it under the terms of the GNU General Public License version 2 as
10630+ * published by the Free Software Foundation.
10631+ *
10632+ * This program is distributed in the hope that it will be useful,
10633+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10634+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10635+ * GNU General Public License for more details.
10636+ *
10637+ * You should have received a copy of the GNU General Public License
10638+ * along with this program; if not, write to the Free Software
10639+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10640+ */
10641+
10642+#include <linux/module.h>
10643+#include <net/most/most.h>
10644+
10645+#define MOST_MAX_PROTO 4
10646+static struct net_proto_family *most_proto[MOST_MAX_PROTO];
10647+static DEFINE_RWLOCK(most_proto_lock);
10648+
10649+#ifdef CONFIG_DEBUG_LOCK_ALLOC
10650+static struct lock_class_key most_lock_key[MOST_MAX_PROTO];
10651+static const char *most_key_strings[MOST_MAX_PROTO] = {
10652+ "sk_lock-AF_MOST-MOSTPROTO_DEV",
10653+ "sk_lock-AF_MOST-MOSTPROTO_CTL",
10654+ "sk_lock-AF_MOST-MOSTPROTO_SYNC",
10655+ "sk_lock-AF_MOST-MOSTPROTO_ASYNC",
10656+};
10657+
10658+static struct lock_class_key most_slock_key[MOST_MAX_PROTO];
10659+static const char *most_slock_key_strings[MOST_MAX_PROTO] = {
10660+ "slock-AF_MOST-MOSTPROTO_DEV",
10661+ "slock-AF_MOST-MOSTPROTO_CTL",
10662+ "slock-AF_MOST-MOSTPROTO_SYNC",
10663+ "slock-AF_MOST-MOSTPROTO_ASYNC",
10664+};
10665+
10666+static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
10667+{
10668+ struct sock *sk = sock->sk;
10669+
10670+ if (!sk)
10671+ return;
10672+
10673+ BUG_ON(sock_owned_by_user(sk));
10674+
10675+ sock_lock_init_class_and_name(sk,
10676+ most_slock_key_strings[proto], &most_slock_key[proto],
10677+ most_key_strings[proto], &most_lock_key[proto]);
10678+}
10679+#else
10680+static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
10681+{
10682+}
10683+#endif
10684+
10685+
10686+int most_sock_register(int proto, struct net_proto_family *ops)
10687+{
10688+ int err = 0;
10689+
10690+ if (proto < 0 || proto >= MOST_MAX_PROTO)
10691+ return -EINVAL;
10692+
10693+ write_lock(&most_proto_lock);
10694+
10695+ if (most_proto[proto])
10696+ err = -EEXIST;
10697+ else
10698+ most_proto[proto] = ops;
10699+
10700+ write_unlock(&most_proto_lock);
10701+
10702+ return err;
10703+}
10704+EXPORT_SYMBOL(most_sock_register);
10705+
10706+int most_sock_unregister(int proto)
10707+{
10708+ int err = 0;
10709+
10710+ if (proto < 0 || proto >= MOST_MAX_PROTO)
10711+ return -EINVAL;
10712+
10713+ write_lock(&most_proto_lock);
10714+
10715+ if (!most_proto[proto])
10716+ err = -ENOENT;
10717+ else
10718+ most_proto[proto] = NULL;
10719+
10720+ write_unlock(&most_proto_lock);
10721+
10722+ return err;
10723+}
10724+EXPORT_SYMBOL(most_sock_unregister);
10725+
10726+static int most_sock_create(struct net *net, struct socket *sock, int proto)
10727+{
10728+ int err;
10729+
10730+ if (net != &init_net)
10731+ return -EAFNOSUPPORT;
10732+
10733+ if (proto < 0 || proto >= MOST_MAX_PROTO)
10734+ return -EINVAL;
10735+
10736+ if (!most_proto[proto])
10737+ request_module("most-proto-%d", proto);
10738+
10739+ err = -EPROTONOSUPPORT;
10740+
10741+ read_lock(&most_proto_lock);
10742+
10743+ if (most_proto[proto] && try_module_get(most_proto[proto]->owner)) {
10744+ err = most_proto[proto]->create(net, sock, proto);
10745+ most_sock_reclassify_lock(sock, proto);
10746+ module_put(most_proto[proto]->owner);
10747+ }
10748+
10749+ read_unlock(&most_proto_lock);
10750+
10751+ return err;
10752+}
10753+
10754+static struct net_proto_family most_sock_family_ops = {
10755+ .owner = THIS_MODULE,
10756+ .family = PF_MOST,
10757+ .create = most_sock_create,
10758+};
10759+
10760+static int __init most_init(void)
10761+{
10762+ int err;
10763+
10764+ err = sock_register(&most_sock_family_ops);
10765+ if (err < 0)
10766+ return err;
10767+
10768+ err = dev_sock_init();
10769+ if (err < 0) {
10770+ sock_unregister(PF_MOST);
10771+ return err;
10772+ }
10773+
10774+ printk(KERN_INFO "MOST is initialized\n");
10775+
10776+ return 0;
10777+}
10778+
10779+static void __exit most_exit(void)
10780+{
10781+ dev_sock_cleanup();
10782+
10783+ sock_unregister(PF_MOST);
10784+}
10785+
10786+subsys_initcall(most_init);
10787+module_exit(most_exit);
10788+
10789+MODULE_DESCRIPTION("MOST Core");
10790+MODULE_LICENSE("GPL v2");
10791+MODULE_ALIAS_NETPROTO(PF_MOST);
10792+
10793diff -uNr linux-2.6.31/net/most/async_sock.c linux-2.6.31.new/net/most/async_sock.c
10794--- linux-2.6.31/net/most/async_sock.c 1969-12-31 16:00:00.000000000 -0800
10795+++ linux-2.6.31.new/net/most/async_sock.c 2009-10-23 11:17:37.000000000 -0700
10796@@ -0,0 +1,154 @@
10797+/*
10798+ * async_sock.c MOST asyncronous socket support
10799+ * Copyright (c) 2009 Intel Corporation
10800+ *
10801+ * This program is free software; you can redistribute it and/or modify
10802+ * it under the terms of the GNU General Public License version 2 as
10803+ * published by the Free Software Foundation.
10804+ *
10805+ * This program is distributed in the hope that it will be useful,
10806+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10807+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10808+ * GNU General Public License for more details.
10809+ *
10810+ * You should have received a copy of the GNU General Public License
10811+ * along with this program; if not, write to the Free Software
10812+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10813+ */
10814+
10815+/* Supports:
10816+ * Support for MOST asynchronous sockets
10817+ */
10818+
10819+#include <linux/module.h>
10820+#include <net/most/most.h>
10821+#include <net/most/most_core.h>
10822+#include <net/most/async.h>
10823+
10824+static int async_sock_bind(struct socket *sock, struct sockaddr *addr,
10825+ int addr_len)
10826+{
10827+ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
10828+
10829+ if (!aaddr || aaddr->most_family != AF_MOST)
10830+ return -EINVAL;
10831+
10832+ return most_sock_bind(sock, aaddr->most_dev, aaddr->rx_channel,
10833+ aaddr->tx_channel);
10834+}
10835+
10836+static int async_sock_getname(struct socket *sock, struct sockaddr *addr,
10837+ int *addr_len, int peer)
10838+{
10839+ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
10840+ struct sock *sk = sock->sk;
10841+ struct most_dev *mdev = most_sk(sk)->mdev;
10842+
10843+ if (!mdev)
10844+ return -EBADFD;
10845+
10846+ lock_sock(sk);
10847+
10848+ *addr_len = sizeof(*aaddr);
10849+ aaddr->most_family = AF_MOST;
10850+ aaddr->most_dev = mdev->id;
10851+ aaddr->rx_channel = most_sk(sk)->rx_channel;
10852+ aaddr->tx_channel = most_sk(sk)->tx_channel;
10853+
10854+ release_sock(sk);
10855+ return 0;
10856+}
10857+
10858+
10859+static const struct proto_ops async_sock_ops = {
10860+ .family = PF_MOST,
10861+ .owner = THIS_MODULE,
10862+ .release = most_sock_release,
10863+ .bind = async_sock_bind,
10864+ .getname = async_sock_getname,
10865+ .sendmsg = most_sock_sendmsg,
10866+ .recvmsg = most_sock_recvmsg,
10867+ .ioctl = most_sock_ioctl,
10868+ .poll = datagram_poll,
10869+ .listen = sock_no_listen,
10870+ .shutdown = sock_no_shutdown,
10871+ .setsockopt = most_sock_setsockopt,
10872+ .getsockopt = most_sock_getsockopt,
10873+ .connect = sock_no_connect,
10874+ .socketpair = sock_no_socketpair,
10875+ .accept = sock_no_accept,
10876+ .mmap = sock_no_mmap
10877+};
10878+static struct proto async_sk_proto = {
10879+ .name = "ASYNC",
10880+ .owner = THIS_MODULE,
10881+ .obj_size = sizeof(struct most_sock)
10882+};
10883+
10884+static int async_sock_create(struct net *net, struct socket *sock, int protocol)
10885+{
10886+ struct sock *sk;
10887+
10888+ if (sock->type != SOCK_DGRAM)
10889+ return -ESOCKTNOSUPPORT;
10890+
10891+ sock->ops = &async_sock_ops;
10892+
10893+ sk = most_sk_alloc(net, &async_sk_proto, CHAN_ASYNC);
10894+ if (!sk)
10895+ return -ENOMEM;
10896+
10897+ sock_init_data(sock, sk);
10898+
10899+ sock_reset_flag(sk, SOCK_ZAPPED);
10900+
10901+ sk->sk_protocol = protocol;
10902+
10903+ sock->state = SS_UNCONNECTED;
10904+ sk->sk_state = MOST_OPEN;
10905+
10906+ most_sock_link(sk);
10907+ return 0;
10908+}
10909+
10910+static struct net_proto_family async_sock_family_ops = {
10911+ .family = PF_MOST,
10912+ .owner = THIS_MODULE,
10913+ .create = async_sock_create,
10914+};
10915+
10916+
10917+static int __init async_init(void)
10918+{
10919+ int err;
10920+
10921+ err = proto_register(&async_sk_proto, 0);
10922+ if (err < 0)
10923+ return err;
10924+
10925+ err = most_sock_register(MOSTPROTO_ASYNC, &async_sock_family_ops);
10926+ if (err < 0) {
10927+ printk(KERN_ERR "MOST socket registration failed\n");
10928+ return err;
10929+ }
10930+
10931+ printk(KERN_INFO "MOST asynchronous socket layer initialized\n");
10932+
10933+ return 0;
10934+}
10935+
10936+static void __exit async_exit(void)
10937+{
10938+ if (most_sock_unregister(MOSTPROTO_ASYNC) < 0)
10939+ printk(KERN_ERR "ASYNC socket unregistration failed\n");
10940+
10941+ proto_unregister(&async_sk_proto);
10942+}
10943+
10944+module_init(async_init);
10945+module_exit(async_exit);
10946+
10947+MODULE_DESCRIPTION("Most Asyncronous");
10948+MODULE_LICENSE("GPL v2");
10949+MODULE_ALIAS("most-proto-3");
10950+
10951diff -uNr linux-2.6.31/net/most/ctl_sock.c linux-2.6.31.new/net/most/ctl_sock.c
10952--- linux-2.6.31/net/most/ctl_sock.c 1969-12-31 16:00:00.000000000 -0800
10953+++ linux-2.6.31.new/net/most/ctl_sock.c 2009-10-23 11:17:37.000000000 -0700
10954@@ -0,0 +1,159 @@
10955+/*
10956+ * ctl_sock.c Support for MOST control sockets
10957+ * Copyright (c) 2009 Intel Corporation
10958+ *
10959+ * This program is free software; you can redistribute it and/or modify
10960+ * it under the terms of the GNU General Public License version 2 as
10961+ * published by the Free Software Foundation.
10962+ *
10963+ * This program is distributed in the hope that it will be useful,
10964+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10965+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10966+ * GNU General Public License for more details.
10967+ *
10968+ * You should have received a copy of the GNU General Public License
10969+ * along with this program; if not, write to the Free Software
10970+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10971+ */
10972+
10973+#include <linux/module.h>
10974+#include <net/most/most.h>
10975+#include <net/most/most_core.h>
10976+#include <net/most/ctl.h>
10977+
10978+
10979+static int ctl_sock_bind(struct socket *sock, struct sockaddr *addr,
10980+ int addr_len)
10981+{
10982+ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
10983+
10984+ if (!caddr || caddr->most_family != AF_MOST)
10985+ return -EINVAL;
10986+
10987+ return most_sock_bind(sock, caddr->most_dev, caddr->rx_channel,
10988+ caddr->tx_channel);
10989+}
10990+
10991+static int ctl_sock_getname(struct socket *sock, struct sockaddr *addr,
10992+ int *addr_len, int peer)
10993+{
10994+ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
10995+ struct sock *sk = sock->sk;
10996+ struct most_dev *mdev = most_sk(sk)->mdev;
10997+
10998+ if (!mdev)
10999+ return -EBADFD;
11000+
11001+ lock_sock(sk);
11002+
11003+ *addr_len = sizeof(*caddr);
11004+ caddr->most_family = AF_MOST;
11005+ caddr->most_dev = mdev->id;
11006+ caddr->rx_channel = most_sk(sk)->rx_channel;
11007+ caddr->tx_channel = most_sk(sk)->tx_channel;
11008+
11009+ release_sock(sk);
11010+ return 0;
11011+}
11012+
11013+int ctl_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
11014+ struct msghdr *msg, size_t len)
11015+{
11016+ if (len != CTL_FRAME_SIZE)
11017+ return -EINVAL;
11018+
11019+ return most_sock_sendmsg(iocb, sock, msg, len);
11020+}
11021+
11022+static const struct proto_ops ctl_sock_ops = {
11023+ .family = PF_MOST,
11024+ .owner = THIS_MODULE,
11025+ .release = most_sock_release,
11026+ .bind = ctl_sock_bind,
11027+ .getname = ctl_sock_getname,
11028+ .sendmsg = most_sock_sendmsg,
11029+ .recvmsg = most_sock_recvmsg,
11030+ .ioctl = most_sock_ioctl,
11031+ .poll = datagram_poll,
11032+ .listen = sock_no_listen,
11033+ .shutdown = sock_no_shutdown,
11034+ .setsockopt = most_sock_setsockopt,
11035+ .getsockopt = most_sock_getsockopt,
11036+ .connect = sock_no_connect,
11037+ .socketpair = sock_no_socketpair,
11038+ .accept = sock_no_accept,
11039+ .mmap = sock_no_mmap
11040+};
11041+static struct proto ctl_sk_proto = {
11042+ .name = "CTL",
11043+ .owner = THIS_MODULE,
11044+ .obj_size = sizeof(struct most_sock)
11045+};
11046+
11047+static int ctl_sock_create(struct net *net, struct socket *sock, int protocol)
11048+{
11049+ struct sock *sk;
11050+
11051+ if (sock->type != SOCK_RAW)
11052+ return -ESOCKTNOSUPPORT;
11053+
11054+ sock->ops = &ctl_sock_ops;
11055+
11056+ sk = most_sk_alloc(net, &ctl_sk_proto, CHAN_CTL);
11057+ if (!sk)
11058+ return -ENOMEM;
11059+
11060+ sock_init_data(sock, sk);
11061+
11062+ sock_reset_flag(sk, SOCK_ZAPPED);
11063+
11064+ sk->sk_protocol = protocol;
11065+
11066+ sock->state = SS_UNCONNECTED;
11067+ sk->sk_state = MOST_OPEN;
11068+
11069+ most_sock_link(sk);
11070+ return 0;
11071+}
11072+
11073+static struct net_proto_family ctl_sock_family_ops = {
11074+ .family = PF_MOST,
11075+ .owner = THIS_MODULE,
11076+ .create = ctl_sock_create,
11077+};
11078+
11079+
11080+static int __init ctl_init(void)
11081+{
11082+ int err;
11083+
11084+ err = proto_register(&ctl_sk_proto, 0);
11085+ if (err < 0)
11086+ return err;
11087+
11088+ err = most_sock_register(MOSTPROTO_CTL, &ctl_sock_family_ops);
11089+ if (err < 0) {
11090+ printk(KERN_ERR "MOST socket registration failed\n");
11091+ return err;
11092+ }
11093+
11094+ printk(KERN_INFO "MOST control socket layer initialized\n");
11095+
11096+ return 0;
11097+}
11098+
11099+static void __exit ctl_exit(void)
11100+{
11101+ if (most_sock_unregister(MOSTPROTO_CTL) < 0)
11102+ printk(KERN_ERR "Control socket unregistration failed\n");
11103+
11104+ proto_unregister(&ctl_sk_proto);
11105+}
11106+
11107+module_init(ctl_init);
11108+module_exit(ctl_exit);
11109+
11110+MODULE_DESCRIPTION("Most Control");
11111+MODULE_LICENSE("GPL v2");
11112+MODULE_ALIAS("most-proto-1");
11113+
11114diff -uNr linux-2.6.31/net/most/dev_sock.c linux-2.6.31.new/net/most/dev_sock.c
11115--- linux-2.6.31/net/most/dev_sock.c 1969-12-31 16:00:00.000000000 -0800
11116+++ linux-2.6.31.new/net/most/dev_sock.c 2009-10-23 11:17:37.000000000 -0700
11117@@ -0,0 +1,170 @@
11118+/*
11119+ * dev_sock.c Device MOST sockets, to control the underlaying devices
11120+ * Copyright (c) 2009 Intel Corporation
11121+ *
11122+ * This program is free software; you can redistribute it and/or modify
11123+ * it under the terms of the GNU General Public License version 2 as
11124+ * published by the Free Software Foundation.
11125+ *
11126+ * This program is distributed in the hope that it will be useful,
11127+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11128+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11129+ * GNU General Public License for more details.
11130+ *
11131+ * You should have received a copy of the GNU General Public License
11132+ * along with this program; if not, write to the Free Software
11133+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11134+ */
11135+
11136+#include <linux/module.h>
11137+#include <net/most/most.h>
11138+#include <net/most/most_core.h>
11139+#include <net/most/dev.h>
11140+
11141+/* Ioctls that require bound socket */
11142+static inline int dev_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
11143+ unsigned long arg)
11144+{
11145+ return -ENOSYS;
11146+}
11147+
11148+static int dev_sock_ioctl(struct socket *sock, unsigned int cmd,
11149+ unsigned long arg)
11150+{
11151+ void __user *argp = (void __user *) arg;
11152+
11153+ switch (cmd) {
11154+ case MOSTDEVUP:
11155+ return most_open_dev(arg & 0xffff);
11156+ case MOSTDEVDOWN:
11157+ return most_close_dev(arg & 0xffff);
11158+ case MOSTGETDEVLIST:
11159+ return most_get_dev_list(argp);
11160+ default:
11161+ return -EINVAL;
11162+ }
11163+}
11164+
11165+static int dev_sock_bind(struct socket *sock, struct sockaddr *addr,
11166+ int addr_len)
11167+{
11168+ return -ENOSYS;
11169+}
11170+
11171+static int dev_sock_getname(struct socket *sock, struct sockaddr *addr,
11172+ int *addr_len, int peer)
11173+{
11174+ struct sockaddr_mostdev *daddr = (struct sockaddr_mostdev *) addr;
11175+ struct sock *sk = sock->sk;
11176+ struct most_dev *mdev = most_sk(sk)->mdev;
11177+
11178+ if (!mdev)
11179+ return -EBADFD;
11180+
11181+ lock_sock(sk);
11182+
11183+ *addr_len = sizeof(*daddr);
11184+ daddr->most_family = AF_MOST;
11185+ daddr->most_dev = mdev->id;
11186+
11187+ release_sock(sk);
11188+ return 0;
11189+}
11190+
11191+static int dev_sock_setsockopt(struct socket *sock, int level, int optname,
11192+ char __user *optval, int len)
11193+{
11194+ return -ENOSYS;
11195+}
11196+
11197+static int dev_sock_getsockopt(struct socket *sock, int level, int optname,
11198+ char __user *optval, int __user *optlen)
11199+{
11200+ return -ENOSYS;
11201+}
11202+
11203+static const struct proto_ops dev_sock_ops = {
11204+ .family = PF_MOST,
11205+ .owner = THIS_MODULE,
11206+ .release = most_sock_release,
11207+ .bind = dev_sock_bind,
11208+ .getname = dev_sock_getname,
11209+ .sendmsg = sock_no_sendmsg,
11210+ .recvmsg = sock_no_recvmsg,
11211+ .ioctl = dev_sock_ioctl,
11212+ .poll = sock_no_poll,
11213+ .listen = sock_no_listen,
11214+ .shutdown = sock_no_shutdown,
11215+ .setsockopt = dev_sock_setsockopt,
11216+ .getsockopt = dev_sock_getsockopt,
11217+ .connect = sock_no_connect,
11218+ .socketpair = sock_no_socketpair,
11219+ .accept = sock_no_accept,
11220+ .mmap = sock_no_mmap
11221+};
11222+static struct proto dev_sk_proto = {
11223+ .name = "DEV",
11224+ .owner = THIS_MODULE,
11225+ .obj_size = sizeof(struct most_sock)
11226+};
11227+
11228+static int dev_sock_create(struct net *net, struct socket *sock, int protocol)
11229+{
11230+ struct sock *sk;
11231+
11232+ if (sock->type != SOCK_RAW)
11233+ return -ESOCKTNOSUPPORT;
11234+
11235+ sock->ops = &dev_sock_ops;
11236+
11237+ sk = most_sk_alloc(net, &dev_sk_proto, CHAN_DEV);
11238+ if (!sk)
11239+ return -ENOMEM;
11240+
11241+ sock_init_data(sock, sk);
11242+
11243+ sock_reset_flag(sk, SOCK_ZAPPED);
11244+
11245+ sk->sk_protocol = protocol;
11246+
11247+ sock->state = SS_UNCONNECTED;
11248+ sk->sk_state = MOST_OPEN;
11249+
11250+ most_sock_link(sk);
11251+ return 0;
11252+}
11253+
11254+static struct net_proto_family dev_sock_family_ops = {
11255+ .family = PF_MOST,
11256+ .owner = THIS_MODULE,
11257+ .create = dev_sock_create,
11258+};
11259+
11260+
11261+int __init dev_sock_init(void)
11262+{
11263+ int err;
11264+
11265+ err = proto_register(&dev_sk_proto, 0);
11266+ if (err < 0)
11267+ return err;
11268+
11269+ err = most_sock_register(MOSTPROTO_DEV, &dev_sock_family_ops);
11270+ if (err < 0) {
11271+ printk(KERN_ERR "MOST socket registration failed\n");
11272+ return err;
11273+ }
11274+
11275+ printk(KERN_INFO "MOST device socket layer initialized\n");
11276+
11277+ return 0;
11278+}
11279+
11280+void __exit dev_sock_cleanup(void)
11281+{
11282+ if (most_sock_unregister(MOSTPROTO_DEV) < 0)
11283+ printk(KERN_ERR "Device socket unregistration failed\n");
11284+
11285+ proto_unregister(&dev_sk_proto);
11286+}
11287+
11288diff -uNr linux-2.6.31/net/most/Kconfig linux-2.6.31.new/net/most/Kconfig
11289--- linux-2.6.31/net/most/Kconfig 1969-12-31 16:00:00.000000000 -0800
11290+++ linux-2.6.31.new/net/most/Kconfig 2009-10-23 11:17:37.000000000 -0700
11291@@ -0,0 +1,38 @@
11292+#
11293+# Media Oriented Systems Transport (MOST) network layer core configuration
11294+#
11295+
11296+menuconfig MOST
11297+ depends on NET
11298+ tristate "MOST bus subsystem support"
11299+ ---help---
11300+ Media Oriented Systems Transport (MOST) is a multimedia
11301+ communications protocol in the automotive industry.
11302+
11303+ If you want MOST support you should say Y here.
11304+
11305+config MOST_CTL
11306+ tristate "Support for Control data over MOST"
11307+ depends on MOST
11308+ default N
11309+ ---help---
11310+ Support for the control channel of the MOST bus.
11311+
11312+config MOST_ASYNC
11313+ tristate "Support for Asynchronous data over MOST"
11314+ depends on MOST
11315+ default N
11316+ ---help---
11317+ Support for the asyncronous channel of the MOST bus. Normally
11318+ used for software download od file transfers.
11319+
11320+config MOST_SYNC
11321+ tristate "Support for Synchronous data over MOST"
11322+ depends on MOST
11323+ default N
11324+ ---help---
11325+ Support for synchronous channles of the MOST bus. Normally used
11326+ for streaming media such as audio and video.
11327+
11328+
11329+source "drivers/net/most/Kconfig"
11330diff -uNr linux-2.6.31/net/most/Makefile linux-2.6.31.new/net/most/Makefile
11331--- linux-2.6.31/net/most/Makefile 1969-12-31 16:00:00.000000000 -0800
11332+++ linux-2.6.31.new/net/most/Makefile 2009-10-23 11:17:37.000000000 -0700
11333@@ -0,0 +1,15 @@
11334+#
11335+# Makefile for the Linux Media Oriented Systems Transport core.
11336+#
11337+
11338+obj-$(CONFIG_MOST) += most.o
11339+most-objs := af_most.o most_core.o most_sock.o dev_sock.o
11340+
11341+obj-$(CONFIG_MOST_CTL) += ctl.o
11342+ctl-objs := ctl_sock.o
11343+
11344+obj-$(CONFIG_MOST_SYNC) += sync.o
11345+sync-objs := sync_sock.o
11346+
11347+obj-$(CONFIG_MOST_ASYNC) += async.o
11348+async-objs := async_sock.o
11349diff -uNr linux-2.6.31/net/most/most_core.c linux-2.6.31.new/net/most/most_core.c
11350--- linux-2.6.31/net/most/most_core.c 1969-12-31 16:00:00.000000000 -0800
11351+++ linux-2.6.31.new/net/most/most_core.c 2009-10-23 11:17:37.000000000 -0700
11352@@ -0,0 +1,287 @@
11353+/*
11354+ * most_core.c The MOST core functions
11355+ * Copyright (c) 2009 Intel Corporation
11356+ *
11357+ * This program is free software; you can redistribute it and/or modify
11358+ * it under the terms of the GNU General Public License version 2 as
11359+ * published by the Free Software Foundation.
11360+ *
11361+ * This program is distributed in the hope that it will be useful,
11362+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11363+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11364+ * GNU General Public License for more details.
11365+ *
11366+ * You should have received a copy of the GNU General Public License
11367+ * along with this program; if not, write to the Free Software
11368+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11369+ */
11370+
11371+#include <linux/kernel.h>
11372+#include <linux/slab.h>
11373+#include <linux/module.h>
11374+
11375+#include <net/most/most_core.h>
11376+#include <net/most/dev.h>
11377+
11378+/* MOST device list */
11379+LIST_HEAD(most_dev_list);
11380+DEFINE_RWLOCK(most_dev_list_lock);
11381+
11382+
11383+int most_open_dev(u16 dev_id)
11384+{
11385+ struct most_dev *mdev = most_dev_get(dev_id);
11386+ int err = 0;
11387+
11388+ if (!mdev)
11389+ return -ENODEV;
11390+
11391+ most_dbg("%s: %s, state: %d\n", __func__, mdev->name, mdev->state);
11392+
11393+ if (mdev->state == MOST_DEV_UP)
11394+ err = -EALREADY;
11395+
11396+ if (!err)
11397+ err = mdev->open(mdev);
11398+ if (!err)
11399+ mdev->state = MOST_DEV_UP;
11400+
11401+ most_dev_put(mdev);
11402+ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
11403+ mdev->name, mdev->state, err);
11404+ return err;
11405+}
11406+
11407+static int __most_close_dev(struct most_dev *mdev)
11408+{
11409+ int err = 0;
11410+
11411+ most_dbg("%s: %s, state: %d\n", __func__, mdev ? mdev->name : "nil",
11412+ mdev ? mdev->state : -1);
11413+
11414+ if (!mdev)
11415+ return -ENODEV;
11416+
11417+ if (mdev->state == MOST_DEV_DOWN)
11418+ err = -EALREADY;
11419+
11420+ if (!err)
11421+ err = mdev->close(mdev);
11422+ if (!err)
11423+ mdev->state = MOST_DEV_DOWN;
11424+
11425+ most_dev_put(mdev);
11426+ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
11427+ mdev->name, mdev->state, err);
11428+ return err;
11429+}
11430+
11431+int most_close_dev(u16 dev_id)
11432+{
11433+ return __most_close_dev(most_dev_get(dev_id));
11434+}
11435+
11436+int most_get_dev_list(void __user *arg)
11437+{
11438+ struct most_dev_list_req *dl;
11439+ struct most_dev_req *dr;
11440+ struct list_head *p;
11441+ int n = 0, size, err;
11442+ u16 dev_num;
11443+
11444+ if (get_user(dev_num, (u16 __user *) arg))
11445+ return -EFAULT;
11446+
11447+ if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
11448+ return -EINVAL;
11449+
11450+ size = sizeof(*dl) + dev_num * sizeof(*dr);
11451+
11452+ dl = kzalloc(size, GFP_KERNEL);
11453+ if (!dl)
11454+ return -ENOMEM;
11455+
11456+ dr = dl->dev_req;
11457+
11458+ read_lock_bh(&most_dev_list_lock);
11459+ list_for_each(p, &most_dev_list) {
11460+ struct most_dev *mdev;
11461+ mdev = list_entry(p, struct most_dev, list);
11462+ (dr + n)->dev_id = mdev->id;
11463+ if (++n >= dev_num)
11464+ break;
11465+ }
11466+ read_unlock_bh(&most_dev_list_lock);
11467+
11468+ dl->dev_num = n;
11469+ size = sizeof(*dl) + n * sizeof(*dr);
11470+
11471+ err = copy_to_user(arg, dl, size);
11472+ kfree(dl);
11473+
11474+ return err ? -EFAULT : 0;
11475+}
11476+
11477+static int most_send_frame(struct sk_buff *skb)
11478+{
11479+ struct most_dev *mdev = (struct most_dev *) skb->dev;
11480+
11481+ if (!mdev) {
11482+ kfree_skb(skb);
11483+ return -ENODEV;
11484+ }
11485+
11486+ most_dbg("%s: %s type %d len %d\n", __func__, mdev->name,
11487+ most_cb(skb)->channel_type, skb->len);
11488+
11489+ /* Get rid of skb owner, prior to sending to the driver. */
11490+ skb_orphan(skb);
11491+
11492+ return mdev->send(skb);
11493+}
11494+
11495+static void most_send_queue(struct sk_buff_head *q)
11496+{
11497+ struct sk_buff *skb;
11498+
11499+ while ((skb = skb_dequeue(q))) {
11500+ struct most_dev *mdev = (struct most_dev *)skb->dev;
11501+
11502+ most_dbg("%s: skb %p len %d\n", __func__, skb, skb->len);
11503+
11504+ if (!mdev->can_send || mdev->can_send(skb))
11505+ most_send_frame(skb);
11506+ else {
11507+ most_dbg("%s, could not send frame, requeueing\n",
11508+ __func__);
11509+ skb_queue_tail(q, skb);
11510+ break;
11511+ }
11512+ }
11513+}
11514+
11515+static void most_tx_task(unsigned long arg)
11516+{
11517+ struct most_dev *mdev = (struct most_dev *) arg;
11518+
11519+ most_dbg("%s: %s\n", __func__, mdev->name);
11520+
11521+ most_send_queue(&mdev->ctl_q);
11522+ most_send_queue(&mdev->sync_q);
11523+ most_send_queue(&mdev->async_q);
11524+}
11525+
11526+static void most_rx_task(unsigned long arg)
11527+{
11528+ struct most_dev *mdev = (struct most_dev *) arg;
11529+ struct sk_buff *skb = skb_dequeue(&mdev->rx_q);
11530+
11531+ most_dbg("%s: %s\n", __func__, mdev->name);
11532+
11533+ while (skb) {
11534+ /* Send to the sockets */
11535+ most_send_to_sock(mdev->id, skb);
11536+ kfree_skb(skb);
11537+ skb = skb_dequeue(&mdev->rx_q);
11538+ }
11539+}
11540+
11541+
11542+/* Get MOST device by index.
11543+ * Device is held on return. */
11544+struct most_dev *most_dev_get(int index)
11545+{
11546+ struct most_dev *mdev = NULL;
11547+ struct list_head *p;
11548+
11549+ if (index < 0)
11550+ return NULL;
11551+
11552+ read_lock(&most_dev_list_lock);
11553+ list_for_each(p, &most_dev_list) {
11554+ struct most_dev *d = list_entry(p, struct most_dev, list);
11555+ if (d->id == index) {
11556+ mdev = most_dev_hold(d);
11557+ break;
11558+ }
11559+ }
11560+ read_unlock(&most_dev_list_lock);
11561+ return mdev;
11562+}
11563+EXPORT_SYMBOL(most_dev_get);
11564+
11565+
11566+/* Alloc MOST device */
11567+struct most_dev *most_alloc_dev(void)
11568+{
11569+ struct most_dev *mdev;
11570+
11571+ mdev = kzalloc(sizeof(struct most_dev), GFP_KERNEL);
11572+ if (!mdev)
11573+ return NULL;
11574+
11575+ mdev->state = MOST_DEV_DOWN;
11576+
11577+ return mdev;
11578+}
11579+EXPORT_SYMBOL(most_alloc_dev);
11580+
11581+
11582+void most_free_dev(struct most_dev *mdev)
11583+{
11584+ kfree(mdev);
11585+}
11586+EXPORT_SYMBOL(most_free_dev);
11587+
11588+
11589+/* Register MOST device */
11590+int most_register_dev(struct most_dev *mdev)
11591+{
11592+ struct list_head *head = &most_dev_list, *p;
11593+ int id = 0;
11594+
11595+ if (!mdev->open || !mdev->close || !mdev->send || !mdev->owner)
11596+ return -EINVAL;
11597+
11598+ write_lock_bh(&most_dev_list_lock);
11599+
11600+ /* Find first available device id */
11601+ list_for_each(p, &most_dev_list) {
11602+ if (list_entry(p, struct most_dev, list)->id != id)
11603+ break;
11604+ head = p; id++;
11605+ }
11606+
11607+ sprintf(mdev->name, "most%d", id);
11608+ mdev->id = id;
11609+ list_add(&mdev->list, head);
11610+
11611+ tasklet_init(&mdev->rx_task, most_rx_task, (unsigned long) mdev);
11612+ tasklet_init(&mdev->tx_task, most_tx_task, (unsigned long) mdev);
11613+
11614+ skb_queue_head_init(&mdev->rx_q);
11615+ skb_queue_head_init(&mdev->ctl_q);
11616+ skb_queue_head_init(&mdev->sync_q);
11617+ skb_queue_head_init(&mdev->async_q);
11618+
11619+ write_unlock_bh(&most_dev_list_lock);
11620+ return 0;
11621+}
11622+EXPORT_SYMBOL(most_register_dev);
11623+
11624+int most_unregister_dev(struct most_dev *mdev)
11625+{
11626+ int ret = 0;
11627+ most_dbg("%s: %s: state: %d\n", __func__, mdev->name, mdev->state);
11628+
11629+ if (mdev->state != MOST_DEV_DOWN)
11630+ ret = __most_close_dev(mdev);
11631+
11632+ write_lock_bh(&most_dev_list_lock);
11633+ list_del(&mdev->list);
11634+ write_unlock_bh(&most_dev_list_lock);
11635+
11636+ return ret;
11637+}
11638+EXPORT_SYMBOL(most_unregister_dev);
11639+
11640diff -uNr linux-2.6.31/net/most/most_sock.c linux-2.6.31.new/net/most/most_sock.c
11641--- linux-2.6.31/net/most/most_sock.c 1969-12-31 16:00:00.000000000 -0800
11642+++ linux-2.6.31.new/net/most/most_sock.c 2009-10-23 11:17:37.000000000 -0700
11643@@ -0,0 +1,315 @@
11644+/*
11645+ * most_sock.c Generic functions for MOST sockets
11646+ * Copyright (c) 2009 Intel Corporation
11647+ *
11648+ * This program is free software; you can redistribute it and/or modify
11649+ * it under the terms of the GNU General Public License version 2 as
11650+ * published by the Free Software Foundation.
11651+ *
11652+ * This program is distributed in the hope that it will be useful,
11653+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11654+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11655+ * GNU General Public License for more details.
11656+ *
11657+ * You should have received a copy of the GNU General Public License
11658+ * along with this program; if not, write to the Free Software
11659+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11660+ */
11661+
11662+#include <linux/module.h>
11663+#include <net/most/most_core.h>
11664+
11665+static struct most_sock_list most_sk_list = {
11666+ .lock = __RW_LOCK_UNLOCKED(ctl_sk_list.lock)
11667+};
11668+
11669+void most_sock_link(struct sock *sk)
11670+{
11671+ write_lock_bh(&most_sk_list.lock);
11672+ sk_add_node(sk, &most_sk_list.head);
11673+ write_unlock_bh(&most_sk_list.lock);
11674+}
11675+EXPORT_SYMBOL(most_sock_link);
11676+
11677+void most_sock_unlink(struct sock *sk)
11678+{
11679+ write_lock_bh(&most_sk_list.lock);
11680+ sk_del_node_init(sk);
11681+ write_unlock_bh(&most_sk_list.lock);
11682+}
11683+EXPORT_SYMBOL(most_sock_unlink);
11684+
11685+static int channel_in_use(int dev_id, u8 channel)
11686+{
11687+ struct sock *sk;
11688+ struct hlist_node *node;
11689+
11690+ read_lock_bh(&most_sk_list.lock);
11691+
11692+ sk_for_each(sk, node, &most_sk_list.head)
11693+ if (most_sk(sk)->dev_id == dev_id &&
11694+ sk->sk_state == MOST_BOUND &&
11695+ (most_sk(sk)->rx_channel == channel ||
11696+ most_sk(sk)->tx_channel == channel))
11697+ goto found;
11698+
11699+ sk = NULL;
11700+found:
11701+ read_unlock_bh(&most_sk_list.lock);
11702+
11703+ return sk != NULL;
11704+}
11705+
11706+int most_send_to_sock(int dev_id, struct sk_buff *skb)
11707+{
11708+ struct sock *sk;
11709+ struct hlist_node *node;
11710+
11711+ read_lock(&most_sk_list.lock);
11712+ sk_for_each(sk, node, &most_sk_list.head) {
11713+ if (most_sk(sk)->dev_id == dev_id &&
11714+ most_sk(sk)->channel_type == most_cb(skb)->channel_type
11715+ && most_sk(sk)->rx_channel == most_cb(skb)->channel &&
11716+ sk->sk_state == MOST_BOUND) {
11717+
11718+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
11719+ if (nskb)
11720+ if (sock_queue_rcv_skb(sk, nskb))
11721+ kfree_skb(nskb);
11722+ }
11723+
11724+ }
11725+ read_unlock(&most_sk_list.lock);
11726+
11727+ return 0;
11728+}
11729+EXPORT_SYMBOL(most_send_to_sock);
11730+
11731+int most_sock_release(struct socket *sock)
11732+{
11733+ struct sock *sk = sock->sk;
11734+ struct most_dev *mdev;
11735+
11736+ most_dbg("%s: sock %p sk %p\n", __func__, sock, sk);
11737+
11738+ if (!sk)
11739+ return 0;
11740+
11741+ mdev = most_sk(sk)->mdev;
11742+
11743+ most_sock_unlink(sk);
11744+
11745+ if (mdev) {
11746+ if (sk->sk_state == MOST_BOUND)
11747+ most_configure_channels(mdev, most_sk(sk), 0);
11748+
11749+ most_dev_put(mdev);
11750+ }
11751+
11752+ sock_orphan(sk);
11753+ sock_put(sk);
11754+ return 0;
11755+}
11756+EXPORT_SYMBOL(most_sock_release);
11757+
11758+int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan)
11759+{
11760+ struct sock *sk = sock->sk;
11761+ struct most_dev *mdev = NULL;
11762+ int err = 0;
11763+
11764+ most_dbg("%s: sock %p sk %p, rx: %d, tx: %d\n",
11765+ __func__, sock, sk, rx_chan, tx_chan);
11766+
11767+ lock_sock(sk);
11768+
11769+ if (sk->sk_state != MOST_OPEN) {
11770+ err = -EBADFD;
11771+ goto done;
11772+ }
11773+
11774+ if (most_sk(sk)->mdev) {
11775+ err = -EALREADY;
11776+ goto done;
11777+ }
11778+
11779+ if (channel_in_use(dev_id, rx_chan) ||
11780+ channel_in_use(dev_id, tx_chan)) {
11781+ err = -EADDRINUSE;
11782+ goto done;
11783+ } else {
11784+ most_sk(sk)->rx_channel = rx_chan;
11785+ most_sk(sk)->tx_channel = tx_chan;
11786+ }
11787+
11788+ mdev = most_dev_get(dev_id);
11789+ if (!mdev) {
11790+ err = -ENODEV;
11791+ goto done;
11792+ }
11793+
11794+ err = most_configure_channels(mdev, most_sk(sk), 1);
11795+ if (err) {
11796+ most_dev_put(mdev);
11797+ goto done;
11798+ }
11799+
11800+ most_sk(sk)->mdev = mdev;
11801+ most_sk(sk)->dev_id = mdev->id;
11802+
11803+ sk->sk_state = MOST_BOUND;
11804+
11805+done:
11806+ release_sock(sk);
11807+ return err;
11808+}
11809+EXPORT_SYMBOL(most_sock_bind);
11810+
11811+
11812+int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
11813+{
11814+ most_dbg("%s\n", __func__);
11815+ return -EINVAL;
11816+}
11817+EXPORT_SYMBOL(most_sock_ioctl);
11818+
11819+int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
11820+ struct msghdr *msg, size_t len, int flags)
11821+{
11822+ int noblock = flags & MSG_DONTWAIT;
11823+ struct sock *sk = sock->sk;
11824+ struct sk_buff *skb;
11825+ int copied, err;
11826+
11827+ most_dbg("%s\n", __func__);
11828+
11829+ if (most_sk(sk)->rx_channel == MOST_NO_CHANNEL)
11830+ return -EOPNOTSUPP;
11831+
11832+ if (flags & (MSG_OOB))
11833+ return -EOPNOTSUPP;
11834+
11835+ if (sk->sk_state != MOST_BOUND)
11836+ return 0;
11837+
11838+ skb = skb_recv_datagram(sk, flags, noblock, &err);
11839+ if (!skb)
11840+ return err;
11841+
11842+ msg->msg_namelen = 0;
11843+
11844+ copied = skb->len;
11845+ if (len < copied) {
11846+ msg->msg_flags |= MSG_TRUNC;
11847+ copied = len;
11848+ }
11849+
11850+ skb_reset_transport_header(skb);
11851+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
11852+
11853+ skb_free_datagram(sk, skb);
11854+
11855+ return err ? : copied;
11856+}
11857+EXPORT_SYMBOL(most_sock_recvmsg);
11858+
11859+int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
11860+ struct msghdr *msg, size_t len)
11861+{
11862+ struct sock *sk = sock->sk;
11863+ struct most_dev *mdev;
11864+ struct sk_buff *skb;
11865+ int err;
11866+
11867+ most_dbg("%s: sock %p sk %p, channeltype: %d\n",
11868+ __func__, sock, sk, most_sk(sk)->channel_type);
11869+
11870+ if (most_sk(sk)->tx_channel == MOST_NO_CHANNEL)
11871+ return -EOPNOTSUPP;
11872+
11873+ if (msg->msg_flags & MSG_OOB)
11874+ return -EOPNOTSUPP;
11875+
11876+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
11877+ return -EINVAL;
11878+
11879+ lock_sock(sk);
11880+
11881+ mdev = most_sk(sk)->mdev;
11882+ if (!mdev) {
11883+ err = -EBADFD;
11884+ goto done;
11885+ }
11886+
11887+ skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
11888+ if (!skb)
11889+ goto done;
11890+
11891+ most_cb(skb)->channel = most_sk(sk)->tx_channel;
11892+ most_cb(skb)->channel_type = most_sk(sk)->channel_type;
11893+
11894+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
11895+ err = -EFAULT;
11896+ goto drop;
11897+ }
11898+
11899+ skb->dev = (void *) mdev;
11900+
11901+ skb_queue_tail(&mdev->ctl_q, skb);
11902+ most_sched_tx(mdev);
11903+
11904+ err = len;
11905+
11906+done:
11907+ release_sock(sk);
11908+ return err;
11909+
11910+drop:
11911+ kfree_skb(skb);
11912+ goto done;
11913+}
11914+EXPORT_SYMBOL(most_sock_sendmsg);
11915+
11916+int most_sock_setsockopt(struct socket *sock, int level, int optname,
11917+ char __user *optval, int len)
11918+{
11919+ struct sock *sk = sock->sk;
11920+ int err = 0;
11921+
11922+ most_dbg("%s: sk %p", __func__, sk);
11923+
11924+ lock_sock(sk);
11925+
11926+ switch (optname) {
11927+ default:
11928+ err = -ENOPROTOOPT;
11929+ break;
11930+ }
11931+
11932+ release_sock(sk);
11933+ return err;
11934+}
11935+EXPORT_SYMBOL(most_sock_setsockopt);
11936+
11937+
11938+int most_sock_getsockopt(struct socket *sock, int level, int optname,
11939+ char __user *optval, int __user *optlen)
11940+{
11941+ struct sock *sk = sock->sk;
11942+ int err = 0;
11943+
11944+ most_dbg("%s: sk %p", __func__, sk);
11945+
11946+ lock_sock(sk);
11947+
11948+ switch (optname) {
11949+ default:
11950+ err = -ENOPROTOOPT;
11951+ break;
11952+ }
11953+
11954+ release_sock(sk);
11955+ return err;
11956+}
11957+EXPORT_SYMBOL(most_sock_getsockopt);
11958+
11959diff -uNr linux-2.6.31/net/most/sync_sock.c linux-2.6.31.new/net/most/sync_sock.c
11960--- linux-2.6.31/net/most/sync_sock.c 1969-12-31 16:00:00.000000000 -0800
11961+++ linux-2.6.31.new/net/most/sync_sock.c 2009-10-23 11:17:37.000000000 -0700
11962@@ -0,0 +1,150 @@
11963+/*
11964+ * sync_sock.c Support for MOST synchronous sockets
11965+ * Copyright (c) 2009 Intel Corporation
11966+ *
11967+ * This program is free software; you can redistribute it and/or modify
11968+ * it under the terms of the GNU General Public License version 2 as
11969+ * published by the Free Software Foundation.
11970+ *
11971+ * This program is distributed in the hope that it will be useful,
11972+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11973+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11974+ * GNU General Public License for more details.
11975+ *
11976+ * You should have received a copy of the GNU General Public License
11977+ * along with this program; if not, write to the Free Software
11978+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11979+ */
11980+
11981+#include <linux/module.h>
11982+#include <net/most/most.h>
11983+#include <net/most/most_core.h>
11984+#include <net/most/sync.h>
11985+
11986+static int sync_sock_bind(struct socket *sock, struct sockaddr *addr,
11987+ int addr_len)
11988+{
11989+ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
11990+
11991+ if (!saddr || saddr->most_family != AF_MOST)
11992+ return -EINVAL;
11993+
11994+ return most_sock_bind(sock, saddr->most_dev, saddr->rx_channel,
11995+ saddr->tx_channel);
11996+}
11997+
11998+static int sync_sock_getname(struct socket *sock, struct sockaddr *addr,
11999+ int *addr_len, int peer)
12000+{
12001+ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
12002+ struct sock *sk = sock->sk;
12003+ struct most_dev *mdev = most_sk(sk)->mdev;
12004+
12005+ if (!mdev)
12006+ return -EBADFD;
12007+
12008+ lock_sock(sk);
12009+
12010+ *addr_len = sizeof(*saddr);
12011+ saddr->most_family = AF_MOST;
12012+ saddr->most_dev = mdev->id;
12013+ saddr->rx_channel = most_sk(sk)->rx_channel;
12014+ saddr->tx_channel = most_sk(sk)->tx_channel;
12015+
12016+ release_sock(sk);
12017+ return 0;
12018+}
12019+
12020+
12021+static const struct proto_ops sync_sock_ops = {
12022+ .family = PF_MOST,
12023+ .owner = THIS_MODULE,
12024+ .release = most_sock_release,
12025+ .bind = sync_sock_bind,
12026+ .getname = sync_sock_getname,
12027+ .sendmsg = most_sock_sendmsg,
12028+ .recvmsg = most_sock_recvmsg,
12029+ .ioctl = most_sock_ioctl,
12030+ .poll = datagram_poll,
12031+ .listen = sock_no_listen,
12032+ .shutdown = sock_no_shutdown,
12033+ .setsockopt = most_sock_setsockopt,
12034+ .getsockopt = most_sock_getsockopt,
12035+ .connect = sock_no_connect,
12036+ .socketpair = sock_no_socketpair,
12037+ .accept = sock_no_accept,
12038+ .mmap = sock_no_mmap
12039+};
12040+static struct proto sync_sk_proto = {
12041+ .name = "SYNC",
12042+ .owner = THIS_MODULE,
12043+ .obj_size = sizeof(struct most_sock)
12044+};
12045+
12046+static int sync_sock_create(struct net *net, struct socket *sock, int protocol)
12047+{
12048+ struct sock *sk;
12049+
12050+ if (sock->type != SOCK_STREAM)
12051+ return -ESOCKTNOSUPPORT;
12052+
12053+ sock->ops = &sync_sock_ops;
12054+
12055+ sk = most_sk_alloc(net, &sync_sk_proto, CHAN_SYNC);
12056+ if (!sk)
12057+ return -ENOMEM;
12058+
12059+ sock_init_data(sock, sk);
12060+
12061+ sock_reset_flag(sk, SOCK_ZAPPED);
12062+
12063+ sk->sk_protocol = protocol;
12064+
12065+ sock->state = SS_UNCONNECTED;
12066+ sk->sk_state = MOST_OPEN;
12067+
12068+ most_sock_link(sk);
12069+ return 0;
12070+}
12071+
12072+static struct net_proto_family sync_sock_family_ops = {
12073+ .family = PF_MOST,
12074+ .owner = THIS_MODULE,
12075+ .create = sync_sock_create,
12076+};
12077+
12078+
12079+static int __init sync_init(void)
12080+{
12081+ int err;
12082+
12083+ err = proto_register(&sync_sk_proto, 0);
12084+ if (err < 0)
12085+ return err;
12086+
12087+ err = most_sock_register(MOSTPROTO_SYNC, &sync_sock_family_ops);
12088+ if (err < 0) {
12089+ printk(KERN_ERR "MOST socket registration failed\n");
12090+ return err;
12091+ }
12092+
12093+ printk(KERN_INFO "MOST synchronous socket layer initialized\n");
12094+
12095+ return 0;
12096+}
12097+
12098+static void __exit sync_exit(void)
12099+{
12100+ if (most_sock_unregister(MOSTPROTO_SYNC) < 0)
12101+ printk(KERN_ERR "SYNC socket unregistration failed\n");
12102+
12103+ proto_unregister(&sync_sk_proto);
12104+}
12105+
12106+module_init(sync_init);
12107+module_exit(sync_exit);
12108+
12109+MODULE_DESCRIPTION("Most Syncronous");
12110+MODULE_LICENSE("GPL v2");
12111+MODULE_ALIAS("most-proto-2");
12112+
12113diff -uNr linux-2.6.31/sound/drivers/Kconfig linux-2.6.31.new/sound/drivers/Kconfig
12114--- linux-2.6.31/sound/drivers/Kconfig 2009-10-23 11:18:30.000000000 -0700
12115+++ linux-2.6.31.new/sound/drivers/Kconfig 2009-10-23 11:16:57.000000000 -0700
12116@@ -182,4 +182,17 @@
12117 The default time-out value in seconds for AC97 automatic
12118 power-save mode. 0 means to disable the power-save mode.
12119
12120+config SND_TIMBERDALE_I2S
12121+ tristate "The timberdale FPGA I2S driver"
12122+ depends on MFD_TIMBERDALE && HAS_IOMEM
12123+ default y
12124+ help
12125+ Say Y here to enable driver for the I2S block found within the
12126+ Timberdale FPGA.
12127+ There is support for up to 8 I2S channels, in either transmitter
12128+ or receiver mode.
12129+
12130+ To compile this driver as a module, choose M here: the module
12131+ will be called snd-timbi2s.
12132+
12133 endif # SND_DRIVERS
12134diff -uNr linux-2.6.31/sound/drivers/Makefile linux-2.6.31.new/sound/drivers/Makefile
12135--- linux-2.6.31/sound/drivers/Makefile 2009-10-23 11:18:30.000000000 -0700
12136+++ linux-2.6.31.new/sound/drivers/Makefile 2009-10-23 11:16:57.000000000 -0700
12137@@ -10,6 +10,7 @@
12138 snd-serial-u16550-objs := serial-u16550.o
12139 snd-virmidi-objs := virmidi.o
12140 snd-ml403-ac97cr-objs := ml403-ac97cr.o pcm-indirect2.o
12141+snd-timbi2s-objs := timbi2s.o
12142
12143 # Toplevel Module Dependency
12144 obj-$(CONFIG_SND_DUMMY) += snd-dummy.o
12145@@ -19,5 +20,6 @@
12146 obj-$(CONFIG_SND_MTS64) += snd-mts64.o
12147 obj-$(CONFIG_SND_PORTMAN2X4) += snd-portman2x4.o
12148 obj-$(CONFIG_SND_ML403_AC97CR) += snd-ml403-ac97cr.o
12149+obj-$(CONFIG_SND_TIMBERDALE_I2S) += snd-timbi2s.o
12150
12151 obj-$(CONFIG_SND) += opl3/ opl4/ mpu401/ vx/ pcsp/
12152diff -uNr linux-2.6.31/sound/drivers/timbi2s.c linux-2.6.31.new/sound/drivers/timbi2s.c
12153--- linux-2.6.31/sound/drivers/timbi2s.c 1969-12-31 16:00:00.000000000 -0800
12154+++ linux-2.6.31.new/sound/drivers/timbi2s.c 2009-10-23 11:16:57.000000000 -0700
12155@@ -0,0 +1,755 @@
12156+/*
12157+ * timbi2s.c timberdale FPGA I2S driver
12158+ * Copyright (c) 2009 Intel Corporation
12159+ *
12160+ * This program is free software; you can redistribute it and/or modify
12161+ * it under the terms of the GNU General Public License version 2 as
12162+ * published by the Free Software Foundation.
12163+ *
12164+ * This program is distributed in the hope that it will be useful,
12165+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12166+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12167+ * GNU General Public License for more details.
12168+ *
12169+ * You should have received a copy of the GNU General Public License
12170+ * along with this program; if not, write to the Free Software
12171+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
12172+ */
12173+
12174+/* Supports:
12175+ * Timberdale FPGA I2S
12176+ *
12177+ */
12178+
12179+#include <linux/io.h>
12180+#include <linux/interrupt.h>
12181+#include <linux/platform_device.h>
12182+#include <sound/core.h>
12183+#include <sound/pcm.h>
12184+#include <sound/pcm_params.h>
12185+#include <sound/initval.h>
12186+#include <sound/timbi2s.h>
12187+
12188+#define DRIVER_NAME "timb-i2s"
12189+
12190+#define MAX_BUSSES 8
12191+
12192+#define TIMBI2S_REG_VER 0x00
12193+#define TIMBI2S_REG_UIR 0x04
12194+
12195+#define TIMBI2S_BUS_PRESCALE 0x00
12196+#define TIMBI2S_BUS_ICLR 0x04
12197+#define TIMBI2S_BUS_IPR 0x08
12198+#define TIMBI2S_BUS_ISR 0x0c
12199+#define TIMBI2S_BUS_IER 0x10
12200+
12201+
12202+#define TIMBI2S_IRQ_TX_FULL 0x01
12203+#define TIMBI2S_IRQ_TX_ALMOST_FULL 0x02
12204+#define TIMBI2S_IRQ_TX_ALMOST_EMPTY 0x04
12205+#define TIMBI2S_IRQ_TX_EMPTY 0x08
12206+
12207+#define TIMBI2S_IRQ_RX_FULL 0x10
12208+#define TIMBI2S_IRQ_RX_ALMOST_FULL 0x20
12209+#define TIMBI2S_IRQ_RX_ALMOST_EMPTY 0x40
12210+#define TIMBI2S_IRQ_RX_NOT_EMPTY 0x80
12211+
12212+#define TIMBI2S_BUS_ICOR 0x14
12213+#define TIMBI2S_ICOR_TX_ENABLE 0x00000001
12214+#define TIMBI2S_ICOR_RX_ENABLE 0x00000002
12215+#define TIMBI2S_ICOR_LFIFO_RST 0x00000004
12216+#define TIMBI2S_ICOR_RFIFO_RST 0x00000008
12217+#define TIMBI2S_ICOR_FIFO_RST (TIMBI2S_ICOR_LFIFO_RST | TIMBI2S_ICOR_RFIFO_RST)
12218+#define TIMBI2S_ICOR_SOFT_RST 0x00000010
12219+#define TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT 8
12220+#define TIMBI2S_ICOR_WORD_SEL_LEFT_MASK (0xff << 8)
12221+#define TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT 16
12222+#define TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK (0xff << 16)
12223+#define TIMBI2S_ICOR_CLK_MASTER 0x10000000
12224+#define TIMBI2S_ICOR_RX_ID 0x20000000
12225+#define TIMBI2S_ICOR_TX_ID 0x40000000
12226+#define TIMBI2S_ICOR_WORD_SEL 0x80000000
12227+#define TIMBI2S_BUS_FIFO 0x18
12228+
12229+#define TIMBI2S_BUS_REG_AREA_SIZE (TIMBI2S_BUS_FIFO - \
12230+ TIMBI2S_BUS_PRESCALE + 4)
12231+#define TIMBI2S_FIRST_BUS_AREA_OFS 0x08
12232+
12233+struct timbi2s_bus {
12234+ u32 flags;
12235+ u32 prescale;
12236+ struct snd_pcm *pcm;
12237+ struct snd_card *card;
12238+ struct snd_pcm_substream *substream;
12239+ unsigned buf_pos;
12240+ spinlock_t lock; /* mutual exclusion */
12241+ u16 sample_rate;
12242+};
12243+
12244+#define BUS_RX 0x200
12245+#define BUS_MASTER 0x100
12246+#define BUS_INDEX_MASK 0xff
12247+#define BUS_INDEX(b) ((b)->flags & BUS_INDEX_MASK)
12248+#define BUS_IS_MASTER(b) ((b)->flags & BUS_MASTER)
12249+#define BUS_IS_RX(b) ((b)->flags & BUS_RX)
12250+
12251+#define SET_BUS_INDEX(b, id) ((b)->flags = ((b)->flags & ~BUS_INDEX_MASK) | id)
12252+#define SET_BUS_MASTER(b) ((b)->flags |= BUS_MASTER)
12253+#define SET_BUS_RX(b) ((b)->flags |= BUS_RX)
12254+
12255+#define TIMBI2S_BUS_OFFSET(bus) (TIMBI2S_FIRST_BUS_AREA_OFS + \
12256+ TIMBI2S_BUS_REG_AREA_SIZE * BUS_INDEX(bus))
12257+
12258+struct timbi2s {
12259+ void __iomem *membase;
12260+ int irq;
12261+ struct tasklet_struct tasklet;
12262+ u32 main_clk;
12263+ unsigned num_busses;
12264+ struct timbi2s_bus busses[0];
12265+};
12266+
12267+#define BITS_PER_CHANNEL 16
12268+#define NUM_CHANNELS 2
12269+
12270+#define SAMPLE_SIZE ((NUM_CHANNELS * BITS_PER_CHANNEL) / 8)
12271+#define NUM_PERIODS 32
12272+#define NUM_SAMPLES 256
12273+
12274+static struct snd_pcm_hardware timbi2s_rx_hw = {
12275+ .info = (SNDRV_PCM_INFO_MMAP
12276+ | SNDRV_PCM_INFO_MMAP_VALID
12277+ | SNDRV_PCM_INFO_INTERLEAVED),
12278+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
12279+ .rates = SNDRV_PCM_RATE_44100,
12280+ .rate_min = 44100,
12281+ .rate_max = 44100,
12282+ .channels_min = 2, /* only stereo */
12283+ .channels_max = 2,
12284+ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
12285+ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
12286+ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
12287+ .periods_min = NUM_PERIODS,
12288+ .periods_max = NUM_PERIODS,
12289+};
12290+
12291+static struct snd_pcm_hardware timbi2s_tx_hw = {
12292+ .info = (SNDRV_PCM_INFO_MMAP
12293+ | SNDRV_PCM_INFO_MMAP_VALID
12294+ | SNDRV_PCM_INFO_INTERLEAVED),
12295+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
12296+ .rates = SNDRV_PCM_RATE_8000,
12297+ .rate_min = 8000,
12298+ .rate_max = 8000,
12299+ .channels_min = 2, /* only stereo */
12300+ .channels_max = 2,
12301+ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
12302+ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
12303+ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
12304+ .periods_min = NUM_PERIODS,
12305+ .periods_max = NUM_PERIODS,
12306+};
12307+
12308+static inline void timbi2s_bus_write(struct timbi2s_bus *bus, u32 val, u32 reg)
12309+{
12310+ struct timbi2s *i2s = snd_pcm_chip(bus->card);
12311+
12312+ iowrite32(val, i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
12313+}
12314+
12315+static inline u32 timbi2s_bus_read(struct timbi2s_bus *bus, u32 reg)
12316+{
12317+ struct timbi2s *i2s = snd_pcm_chip(bus->card);
12318+
12319+ return ioread32(i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
12320+}
12321+
12322+static u32 timbi2s_calc_prescale(u32 main_clk, u32 sample_rate)
12323+{
12324+ u32 halfbit_rate = sample_rate * BITS_PER_CHANNEL * NUM_CHANNELS * 2;
12325+ return main_clk / halfbit_rate;
12326+}
12327+
12328+static int timbi2s_open(struct snd_pcm_substream *substream)
12329+{
12330+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12331+ struct snd_card *card = bus->card;
12332+ struct snd_pcm_runtime *runtime = substream->runtime;
12333+ dev_dbg(snd_card_get_device_link(card),
12334+ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
12335+ BUS_INDEX(bus));
12336+
12337+ if (BUS_IS_RX(bus)) {
12338+ runtime->hw = timbi2s_rx_hw;
12339+ if (bus->sample_rate == 8000) {
12340+ runtime->hw.rates = SNDRV_PCM_RATE_8000;
12341+ runtime->hw.rate_min = 8000;
12342+ runtime->hw.rate_max = 8000;
12343+ }
12344+ } else
12345+ runtime->hw = timbi2s_tx_hw;
12346+
12347+ bus->substream = substream;
12348+
12349+ return 0;
12350+}
12351+
12352+static int timbi2s_close(struct snd_pcm_substream *substream)
12353+{
12354+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12355+ struct snd_card *card = bus->card;
12356+ dev_dbg(snd_card_get_device_link(card),
12357+ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
12358+ BUS_INDEX(bus));
12359+
12360+ bus->substream = NULL;
12361+
12362+ return 0;
12363+}
12364+
12365+static int timbi2s_hw_params(struct snd_pcm_substream *substream,
12366+ struct snd_pcm_hw_params *hw_params)
12367+{
12368+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12369+ struct snd_card *card = bus->card;
12370+ struct timbi2s *i2s = snd_pcm_chip(card);
12371+ int err;
12372+
12373+ dev_dbg(snd_card_get_device_link(card),
12374+ "%s: Entry, substream: %p, bus: %d\n", __func__,
12375+ substream, BUS_INDEX(bus));
12376+
12377+ bus->prescale = timbi2s_calc_prescale(i2s->main_clk,
12378+ params_rate(hw_params));
12379+
12380+ err = snd_pcm_lib_malloc_pages(substream,
12381+ params_buffer_bytes(hw_params));
12382+ if (err < 0)
12383+ return err;
12384+
12385+ dev_dbg(snd_card_get_device_link(card),
12386+ "%s: Rate: %d, format: %d\n", __func__, params_rate(hw_params),
12387+ params_format(hw_params));
12388+
12389+ return 0;
12390+}
12391+
12392+static int timbi2s_hw_free(struct snd_pcm_substream *substream)
12393+{
12394+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12395+ struct snd_card *card = bus->card;
12396+ unsigned long flags;
12397+
12398+ dev_dbg(snd_card_get_device_link(card),
12399+ "%s: Entry, substream: %p\n", __func__, substream);
12400+
12401+ spin_lock_irqsave(&bus->lock, flags);
12402+ /* disable interrupts */
12403+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
12404+ spin_unlock_irqrestore(&bus->lock, flags);
12405+
12406+ /* disable TX and RX */
12407+ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
12408+ TIMBI2S_BUS_ICOR);
12409+
12410+ return snd_pcm_lib_free_pages(substream);
12411+}
12412+
12413+static int timbi2s_prepare(struct snd_pcm_substream *substream)
12414+{
12415+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12416+ struct snd_card *card = bus->card;
12417+ struct snd_pcm_runtime *runtime = substream->runtime;
12418+ u32 data;
12419+
12420+ dev_dbg(snd_card_get_device_link(card),
12421+ "%s: Entry, substream: %p, bus: %d, buffer: %d, period: %d\n",
12422+ __func__, substream,
12423+ BUS_INDEX(bus), (int)snd_pcm_lib_buffer_bytes(substream),
12424+ (int)snd_pcm_lib_period_bytes(substream));
12425+
12426+ if (runtime->dma_addr & 3 || runtime->buffer_size & 3) {
12427+ dev_err(snd_card_get_device_link(card),
12428+ "%s: Only word aligned data allowed\n", __func__);
12429+ return -EINVAL;
12430+ }
12431+
12432+ if (runtime->channels != NUM_CHANNELS) {
12433+ dev_err(snd_card_get_device_link(card),
12434+ "%s: Number of channels unsupported %d\n", __func__,
12435+ runtime->channels);
12436+ return -EINVAL;
12437+ }
12438+
12439+ /* reset */
12440+ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
12441+ TIMBI2S_BUS_ICOR);
12442+
12443+ /* only masters have prescaling, don't write if not needed */
12444+ if (BUS_IS_MASTER(bus))
12445+ timbi2s_bus_write(bus, bus->prescale, TIMBI2S_BUS_PRESCALE);
12446+
12447+ /* write word select */
12448+ data = ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT) &
12449+ TIMBI2S_ICOR_WORD_SEL_LEFT_MASK) |
12450+ ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT) &
12451+ TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK);
12452+ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
12453+
12454+ bus->buf_pos = 0;
12455+
12456+ return 0;
12457+}
12458+
12459+static int
12460+timbi2s_playback_trigger(struct snd_pcm_substream *substream, int cmd)
12461+{
12462+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12463+ struct snd_card *card = bus->card;
12464+ unsigned long flags;
12465+ u32 data;
12466+
12467+ dev_dbg(snd_card_get_device_link(card),
12468+ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
12469+ substream, BUS_INDEX(bus), cmd);
12470+
12471+ switch (cmd) {
12472+ case SNDRV_PCM_TRIGGER_START:
12473+ dev_dbg(snd_card_get_device_link(card),
12474+ "%s: Got TRIGGER_START command\n", __func__);
12475+
12476+ /* start */
12477+ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
12478+ data |= TIMBI2S_ICOR_TX_ENABLE;
12479+ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
12480+
12481+ /* enable interrupts */
12482+ timbi2s_bus_write(bus, TIMBI2S_IRQ_TX_ALMOST_EMPTY,
12483+ TIMBI2S_BUS_IER);
12484+ dev_dbg(snd_card_get_device_link(card),
12485+ "%s: ISR: %x, ICOR: %x\n", __func__,
12486+ timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
12487+ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
12488+ break;
12489+ case SNDRV_PCM_TRIGGER_STOP:
12490+ dev_dbg(snd_card_get_device_link(card),
12491+ "%s: Got TRIGGER_STOP command\n", __func__);
12492+
12493+ spin_lock_irqsave(&bus->lock, flags);
12494+ /* disable interrupts */
12495+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
12496+ spin_unlock_irqrestore(&bus->lock, flags);
12497+
12498+ /* reset */
12499+ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
12500+ data &= ~TIMBI2S_ICOR_TX_ENABLE;
12501+
12502+ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
12503+ break;
12504+ default:
12505+ dev_dbg(snd_card_get_device_link(card),
12506+ "%s: Got unsupported command\n", __func__);
12507+
12508+ return -EINVAL;
12509+ }
12510+
12511+ return 0;
12512+}
12513+
12514+static int
12515+timbi2s_capture_trigger(struct snd_pcm_substream *substream, int cmd)
12516+{
12517+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12518+ struct snd_card *card = bus->card;
12519+ unsigned long flags;
12520+
12521+ dev_dbg(snd_card_get_device_link(card),
12522+ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
12523+ substream, BUS_INDEX(bus), cmd);
12524+
12525+ switch (cmd) {
12526+ case SNDRV_PCM_TRIGGER_START:
12527+ dev_dbg(snd_card_get_device_link(card),
12528+ "%s: Got TRIGGER_START command\n", __func__);
12529+
12530+ timbi2s_bus_write(bus, TIMBI2S_ICOR_RX_ENABLE |
12531+ TIMBI2S_ICOR_FIFO_RST, TIMBI2S_BUS_ICOR);
12532+
12533+ timbi2s_bus_write(bus, TIMBI2S_IRQ_RX_ALMOST_FULL,
12534+ TIMBI2S_BUS_IER);
12535+ break;
12536+ case SNDRV_PCM_TRIGGER_STOP:
12537+ dev_dbg(snd_card_get_device_link(card),
12538+ "%s: Got TRIGGER_STOP command\n", __func__);
12539+ /* disable interrupts */
12540+ spin_lock_irqsave(&bus->lock, flags);
12541+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
12542+ spin_unlock_irqrestore(&bus->lock, flags);
12543+ /* Stop RX */
12544+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_ICOR);
12545+ break;
12546+ default:
12547+ dev_dbg(snd_card_get_device_link(card),
12548+ "%s: Got unsupported command\n", __func__);
12549+
12550+ return -EINVAL;
12551+ }
12552+
12553+ return 0;
12554+}
12555+
12556+static snd_pcm_uframes_t
12557+timbi2s_pointer(struct snd_pcm_substream *substream)
12558+{
12559+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12560+ struct snd_card *card = bus->card;
12561+ snd_pcm_uframes_t ret;
12562+
12563+ dev_dbg(snd_card_get_device_link(card),
12564+ "%s: Entry, substream: %p\n", __func__, substream);
12565+
12566+ ret = bytes_to_frames(substream->runtime, bus->buf_pos);
12567+ if (ret >= substream->runtime->buffer_size)
12568+ ret -= substream->runtime->buffer_size;
12569+
12570+ return ret;
12571+}
12572+
12573+static struct snd_pcm_ops timbi2s_playback_ops = {
12574+ .open = timbi2s_open,
12575+ .close = timbi2s_close,
12576+ .ioctl = snd_pcm_lib_ioctl,
12577+ .hw_params = timbi2s_hw_params,
12578+ .hw_free = timbi2s_hw_free,
12579+ .prepare = timbi2s_prepare,
12580+ .trigger = timbi2s_playback_trigger,
12581+ .pointer = timbi2s_pointer,
12582+};
12583+
12584+static struct snd_pcm_ops timbi2s_capture_ops = {
12585+ .open = timbi2s_open,
12586+ .close = timbi2s_close,
12587+ .ioctl = snd_pcm_lib_ioctl,
12588+ .hw_params = timbi2s_hw_params,
12589+ .hw_free = timbi2s_hw_free,
12590+ .prepare = timbi2s_prepare,
12591+ .trigger = timbi2s_capture_trigger,
12592+ .pointer = timbi2s_pointer,
12593+};
12594+
12595+static void timbi2s_irq_process_rx(struct timbi2s_bus *bus)
12596+{
12597+ struct snd_pcm_runtime *runtime = bus->substream->runtime;
12598+ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
12599+ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
12600+ int i;
12601+
12602+ dev_dbg(snd_card_get_device_link(bus->card),
12603+ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
12604+
12605+ for (i = 0; i < NUM_SAMPLES; i++) {
12606+ *(u32 *)(runtime->dma_area + bus->buf_pos) =
12607+ timbi2s_bus_read(bus, TIMBI2S_BUS_FIFO);
12608+ bus->buf_pos += SAMPLE_SIZE;
12609+ bus->buf_pos %= buffer_size;
12610+ }
12611+
12612+ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
12613+
12614+ /* inform ALSA that a period was received */
12615+ snd_pcm_period_elapsed(bus->substream);
12616+}
12617+
12618+static void timbi2s_irq_process_tx(struct timbi2s_bus *bus)
12619+{
12620+ struct snd_pcm_runtime *runtime = bus->substream->runtime;
12621+ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
12622+ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
12623+ int i;
12624+
12625+ dev_dbg(snd_card_get_device_link(bus->card),
12626+ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
12627+
12628+ for (i = 0; i < NUM_SAMPLES; i++) {
12629+ timbi2s_bus_write(bus,
12630+ *(u32 *)(runtime->dma_area + bus->buf_pos),
12631+ TIMBI2S_BUS_FIFO);
12632+ bus->buf_pos += SAMPLE_SIZE;
12633+ bus->buf_pos %= buffer_size;
12634+ }
12635+
12636+ dev_dbg(snd_card_get_device_link(bus->card), "%s: ISR: %x, ICOR: %x\n",
12637+ __func__, timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
12638+ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
12639+
12640+ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
12641+
12642+ /* inform ALSA that a period was received */
12643+ snd_pcm_period_elapsed(bus->substream);
12644+}
12645+
12646+static void timbi2s_tasklet(unsigned long arg)
12647+{
12648+ struct snd_card *card = (struct snd_card *)arg;
12649+ struct timbi2s *i2s = snd_pcm_chip(card);
12650+ u32 uir = ioread32(i2s->membase + TIMBI2S_REG_UIR);
12651+ unsigned i;
12652+
12653+ dev_dbg(snd_card_get_device_link(card), "%s: Entry, UIR %x\n",
12654+ __func__, uir);
12655+
12656+ for (i = 0; i < i2s->num_busses; i++)
12657+ if (uir & (1 << i)) {
12658+ struct timbi2s_bus *bus = i2s->busses + i;
12659+ if (BUS_IS_RX(bus))
12660+ timbi2s_irq_process_rx(bus);
12661+ else
12662+ timbi2s_irq_process_tx(bus);
12663+ }
12664+
12665+ enable_irq(i2s->irq);
12666+}
12667+
12668+static irqreturn_t timbi2s_irq(int irq, void *devid)
12669+{
12670+ struct timbi2s *i2s = devid;
12671+
12672+ tasklet_schedule(&i2s->tasklet);
12673+ disable_irq_nosync(i2s->irq);
12674+
12675+ return IRQ_HANDLED;
12676+}
12677+
12678+static int timbi2s_setup_busses(struct snd_card *card,
12679+ struct platform_device *pdev)
12680+{
12681+ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
12682+ unsigned i;
12683+
12684+ dev_dbg(&pdev->dev, "%s: Entry, no busses: %d, busses: %p\n", __func__,
12685+ pdata->num_busses, pdata->busses);
12686+
12687+ for (i = 0; i < pdata->num_busses; i++) {
12688+ int capture = pdata->busses[i].rx;
12689+ int err;
12690+ u32 ctl;
12691+ struct timbi2s *i2s = snd_pcm_chip(card);
12692+ struct timbi2s_bus *bus = i2s->busses + i;
12693+
12694+ dev_dbg(&pdev->dev, "%s: Setting up bus: %d\n", __func__, i);
12695+
12696+ SET_BUS_INDEX(bus, i);
12697+ bus->sample_rate = pdata->busses[i].sample_rate;
12698+ bus->card = card;
12699+ /* prescaling only applies to master busses, we use the
12700+ * knowledge of that to identify the direction later
12701+ * eg, bus->prescale != 0 -> master bus
12702+ */
12703+ if (capture)
12704+ SET_BUS_RX(bus);
12705+
12706+ spin_lock_init(&bus->lock);
12707+
12708+ if (bus->sample_rate != 44100 && bus->sample_rate != 8000) {
12709+ dev_err(&pdev->dev,
12710+ "Unsupported bitrate: %d\n", bus->sample_rate);
12711+ return -EINVAL;
12712+ }
12713+
12714+ dev_dbg(&pdev->dev, "%s: Will check HW direction on bus: %d\n",
12715+ __func__, BUS_INDEX(bus));
12716+
12717+ /* check that the HW agrees with the direction */
12718+ ctl = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
12719+ if ((capture && !(ctl & TIMBI2S_ICOR_RX_ID)) ||
12720+ (!capture && !(ctl & TIMBI2S_ICOR_TX_ID))) {
12721+ dev_dbg(&pdev->dev,
12722+ "HW and platform data disagree on direction\n");
12723+ return -EINVAL;
12724+ }
12725+
12726+ dev_dbg(&pdev->dev, "%s: Will create PCM channel for bus: %d\n",
12727+ __func__, BUS_INDEX(bus));
12728+ err = snd_pcm_new(card, card->shortname, i, !capture,
12729+ capture, &bus->pcm);
12730+ if (err) {
12731+ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
12732+ __func__, err);
12733+ return err;
12734+ }
12735+
12736+ if (capture)
12737+ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_CAPTURE,
12738+ &timbi2s_capture_ops);
12739+ if (!capture)
12740+ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_PLAYBACK,
12741+ &timbi2s_playback_ops);
12742+
12743+ dev_dbg(&pdev->dev, "%s: Will preallocate buffers to bus: %d\n",
12744+ __func__, BUS_INDEX(bus));
12745+
12746+ err = snd_pcm_lib_preallocate_pages_for_all(bus->pcm,
12747+ SNDRV_DMA_TYPE_CONTINUOUS,
12748+ snd_dma_continuous_data(GFP_KERNEL),
12749+ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2,
12750+ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2);
12751+ if (err) {
12752+ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
12753+ __func__, err);
12754+
12755+ return err;
12756+ }
12757+
12758+ bus->pcm->private_data = bus;
12759+ bus->pcm->info_flags = 0;
12760+ strcpy(bus->pcm->name, card->shortname);
12761+ i2s->num_busses++;
12762+ }
12763+
12764+ return 0;
12765+}
12766+
12767+static int __devinit timbi2s_probe(struct platform_device *pdev)
12768+{
12769+ int err;
12770+ int irq;
12771+ struct timbi2s *i2s;
12772+ struct resource *iomem;
12773+ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
12774+ struct snd_card *card;
12775+ u32 ver;
12776+
12777+ if (!pdata) {
12778+ err = -ENODEV;
12779+ goto out;
12780+ }
12781+
12782+ if (pdata->num_busses > MAX_BUSSES) {
12783+ err = -EINVAL;
12784+ goto out;
12785+ }
12786+
12787+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12788+ if (!iomem) {
12789+ err = -ENODEV;
12790+ goto out;
12791+ }
12792+
12793+ irq = platform_get_irq(pdev, 0);
12794+ if (irq < 0) {
12795+ err = -ENODEV;
12796+ goto out;
12797+ }
12798+
12799+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
12800+ THIS_MODULE, sizeof(struct timbi2s) +
12801+ sizeof(struct timbi2s_bus) * pdata->num_busses, &card);
12802+ if (err)
12803+ goto out;
12804+
12805+ strcpy(card->driver, "Timberdale I2S");
12806+ strcpy(card->shortname, "Timberdale I2S");
12807+ sprintf(card->longname, "Timberdale I2S Driver");
12808+
12809+ snd_card_set_dev(card, &pdev->dev);
12810+
12811+ i2s = snd_pcm_chip(card);
12812+
12813+ if (!request_mem_region(iomem->start, resource_size(iomem),
12814+ DRIVER_NAME)) {
12815+ err = -EBUSY;
12816+ goto err_region;
12817+ }
12818+
12819+ i2s->membase = ioremap(iomem->start, resource_size(iomem));
12820+ if (!i2s->membase) {
12821+ err = -ENOMEM;
12822+ goto err_ioremap;
12823+ }
12824+
12825+ err = timbi2s_setup_busses(card, pdev);
12826+ if (err)
12827+ goto err_setup;
12828+
12829+ tasklet_init(&i2s->tasklet, timbi2s_tasklet, (unsigned long)card);
12830+ i2s->irq = irq;
12831+ i2s->main_clk = pdata->main_clk;
12832+
12833+ err = request_irq(irq, timbi2s_irq, 0, DRIVER_NAME, i2s);
12834+ if (err)
12835+ goto err_request_irq;
12836+
12837+ err = snd_card_register(card);
12838+ if (err)
12839+ goto err_register;
12840+
12841+ platform_set_drvdata(pdev, card);
12842+
12843+ ver = ioread32(i2s->membase + TIMBI2S_REG_VER);
12844+
12845+ printk(KERN_INFO
12846+ "Driver for Timberdale I2S (ver: %d.%d) successfully probed.\n",
12847+ ver >> 16 , ver & 0xffff);
12848+
12849+ return 0;
12850+
12851+err_register:
12852+ free_irq(irq, card);
12853+err_request_irq:
12854+err_setup:
12855+ iounmap(i2s->membase);
12856+err_ioremap:
12857+ release_mem_region(iomem->start, resource_size(iomem));
12858+err_region:
12859+ snd_card_free(card);
12860+out:
12861+ printk(KERN_ERR DRIVER_NAME": Failed to register: %d\n", err);
12862+
12863+ return err;
12864+}
12865+
12866+static int __devexit timbi2s_remove(struct platform_device *pdev)
12867+{
12868+ struct snd_card *card = platform_get_drvdata(pdev);
12869+ struct timbi2s *i2s = snd_pcm_chip(card);
12870+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12871+
12872+ tasklet_kill(&i2s->tasklet);
12873+ free_irq(i2s->irq, i2s);
12874+
12875+ iounmap(i2s->membase);
12876+ release_mem_region(iomem->start, resource_size(iomem));
12877+ snd_card_free(card);
12878+
12879+ platform_set_drvdata(pdev, 0);
12880+ return 0;
12881+}
12882+
12883+static struct platform_driver timbi2s_platform_driver = {
12884+ .driver = {
12885+ .name = DRIVER_NAME,
12886+ .owner = THIS_MODULE,
12887+ },
12888+ .probe = timbi2s_probe,
12889+ .remove = __devexit_p(timbi2s_remove),
12890+};
12891+
12892+/*--------------------------------------------------------------------------*/
12893+
12894+static int __init timbi2s_init(void)
12895+{
12896+ return platform_driver_register(&timbi2s_platform_driver);
12897+}
12898+
12899+static void __exit timbi2s_exit(void)
12900+{
12901+ platform_driver_unregister(&timbi2s_platform_driver);
12902+}
12903+
12904+module_init(timbi2s_init);
12905+module_exit(timbi2s_exit);
12906+
12907+MODULE_ALIAS("platform:"DRIVER_NAME);
12908+MODULE_DESCRIPTION("Timberdale I2S bus driver");
12909+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
12910+MODULE_LICENSE("GPL v2");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch
deleted file mode 100644
index 3a7e27881e..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch
+++ /dev/null
@@ -1,44 +0,0 @@
1From 9de5f61c79361bf6e9394d2f77a2b436d53deee5 Mon Sep 17 00:00:00 2001
2From: Yong Wang <yong.y.wang@intel.com>
3Date: Tue, 30 Jun 2009 14:17:19 +0800
4Subject: [PATCH] Revert "net: num_dma_maps is not used"
5
6This reverts commit eae3f29cc73f83cc3f1891d3ad40021b5172c630.
7
8The IVI driver is a user of num_dma_maps.
9
10Signed-off-by: Yong Wang <yong.y.wang@intel.com>
11---
12 include/linux/skbuff.h | 3 +++
13 net/core/skb_dma_map.c | 1 +
14 2 files changed, 4 insertions(+), 0 deletions(-)
15
16diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
17index b47b3f0..468bc21 100644
18--- a/include/linux/skbuff.h
19+++ b/include/linux/skbuff.h
20@@ -198,6 +198,9 @@ struct skb_shared_info {
21 unsigned short gso_type;
22 __be32 ip6_frag_id;
23 union skb_shared_tx tx_flags;
24+#ifdef CONFIG_HAS_DMA
25+ unsigned int num_dma_maps;
26+#endif
27 struct sk_buff *frag_list;
28 struct skb_shared_hwtstamps hwtstamps;
29 skb_frag_t frags[MAX_SKB_FRAGS];
30diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
31index 79687df..07d4ac5 100644
32--- a/net/core/skb_dma_map.c
33+++ b/net/core/skb_dma_map.c
34@@ -30,6 +30,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
35 goto unwind;
36 sp->dma_maps[i] = map;
37 }
38+ sp->num_dma_maps = i + 1;
39
40 return 0;
41
42--
431.6.0.6
44
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch
deleted file mode 100644
index 786e1f2fcd..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch
+++ /dev/null
@@ -1,465 +0,0 @@
1diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2index e70c57e..8e36fc8 100644
3--- a/drivers/bluetooth/btusb.c
4+++ b/drivers/bluetooth/btusb.c
5@@ -35,7 +36,7 @@
6 #include <net/bluetooth/bluetooth.h>
7 #include <net/bluetooth/hci_core.h>
8
9-#define VERSION "0.5"
10+#define VERSION "0.6"
11
12 static int ignore_dga;
13 static int ignore_csr;
14@@ -145,6 +146,7 @@ static struct usb_device_id blacklist_table[] = {
15 #define BTUSB_INTR_RUNNING 0
16 #define BTUSB_BULK_RUNNING 1
17 #define BTUSB_ISOC_RUNNING 2
18+#define BTUSB_SUSPENDING 3
19
20 struct btusb_data {
21 struct hci_dev *hdev;
22@@ -157,11 +159,15 @@ struct btusb_data {
23 unsigned long flags;
24
25 struct work_struct work;
26+ struct work_struct waker;
27
28 struct usb_anchor tx_anchor;
29 struct usb_anchor intr_anchor;
30 struct usb_anchor bulk_anchor;
31 struct usb_anchor isoc_anchor;
32+ struct usb_anchor deferred;
33+ int tx_in_flight;
34+ spinlock_t txlock;
35
36 struct usb_endpoint_descriptor *intr_ep;
37 struct usb_endpoint_descriptor *bulk_tx_ep;
38@@ -174,8 +180,26 @@ struct btusb_data {
39 unsigned int sco_num;
40 int isoc_altsetting;
41 int suspend_count;
42+ int did_iso_resume:1;
43 };
44
45+static int inc_tx(struct btusb_data *data)
46+{
47+ unsigned long flags;
48+ int rv;
49+
50+ spin_lock_irqsave(&data->txlock, flags);
51+ rv = test_bit(BTUSB_SUSPENDING, &data->flags);
52+ BT_DBG("BTUSB_SUSPENDING bit = %d for intf %p in %s",
53+ rv, data->intf, __func__);
54+ if (!rv)
55+ data->tx_in_flight++;
56+ spin_unlock_irqrestore(&data->txlock, flags);
57+
58+ return rv;
59+}
60+
61+
62 static void btusb_intr_complete(struct urb *urb)
63 {
64 struct hci_dev *hdev = urb->context;
65@@ -202,6 +226,7 @@ static void btusb_intr_complete(struct urb *urb)
66 if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
67 return;
68
69+ usb_mark_last_busy(data->udev);
70 usb_anchor_urb(urb, &data->intr_anchor);
71
72 err = usb_submit_urb(urb, GFP_ATOMIC);
73@@ -327,6 +352,7 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
74
75 urb->transfer_flags |= URB_FREE_BUFFER;
76
77+ usb_mark_last_busy(data->udev);
78 usb_anchor_urb(urb, &data->bulk_anchor);
79
80 err = usb_submit_urb(urb, mem_flags);
81@@ -465,6 +491,33 @@ static void btusb_tx_complete(struct urb *urb)
82 {
83 struct sk_buff *skb = urb->context;
84 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
85+ struct btusb_data *data = hdev->driver_data;
86+
87+ BT_DBG("%s urb %p status %d count %d", hdev->name,
88+ urb, urb->status, urb->actual_length);
89+
90+ if (!test_bit(HCI_RUNNING, &hdev->flags))
91+ goto done;
92+
93+ if (!urb->status)
94+ hdev->stat.byte_tx += urb->transfer_buffer_length;
95+ else
96+ hdev->stat.err_tx++;
97+
98+done:
99+ spin_lock(&data->txlock);
100+ data->tx_in_flight--;
101+ spin_unlock(&data->txlock);
102+
103+ kfree(urb->setup_packet);
104+
105+ kfree_skb(skb);
106+}
107+
108+static void btusb_isoc_tx_complete(struct urb *urb)
109+{
110+ struct sk_buff *skb = urb->context;
111+ struct hci_dev *hdev = (struct hci_dev *) skb->dev;
112
113 BT_DBG("%s urb %p status %d count %d", hdev->name,
114 urb, urb->status, urb->actual_length);
115@@ -490,11 +543,16 @@ static int btusb_open(struct hci_dev *hdev)
116
117 BT_DBG("%s", hdev->name);
118
119+ err = usb_autopm_get_interface(data->intf);
120+ if (err < 0)
121+ return err;
122+ data->intf->needs_remote_wakeup = 1;
123+
124 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
125- return 0;
126+ goto out;
127
128 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
129- return 0;
130+ goto out;
131
132 err = btusb_submit_intr_urb(hdev, GFP_KERNEL);
133 if (err < 0)
134@@ -502,6 +560,7 @@ static int btusb_open(struct hci_dev *hdev)
135
136 err = btusb_submit_bulk_urb(hdev, GFP_KERNEL);
137 if (err < 0) {
138+ BT_DBG("kill urbs %s", __func__);
139 usb_kill_anchored_urbs(&data->intr_anchor);
140 goto failed;
141 }
142@@ -509,17 +568,28 @@ static int btusb_open(struct hci_dev *hdev)
143 set_bit(BTUSB_BULK_RUNNING, &data->flags);
144 btusb_submit_bulk_urb(hdev, GFP_KERNEL);
145
146+out:
147+ usb_autopm_put_interface(data->intf);
148 return 0;
149
150 failed:
151 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
152 clear_bit(HCI_RUNNING, &hdev->flags);
153+ usb_autopm_put_interface(data->intf);
154 return err;
155 }
156
157+static void btusb_stop_traffic(struct btusb_data *data)
158+{
159+ usb_kill_anchored_urbs(&data->intr_anchor);
160+ usb_kill_anchored_urbs(&data->bulk_anchor);
161+ usb_kill_anchored_urbs(&data->isoc_anchor);
162+}
163+
164 static int btusb_close(struct hci_dev *hdev)
165 {
166 struct btusb_data *data = hdev->driver_data;
167+ int err;
168
169 BT_DBG("%s", hdev->name);
170
171@@ -529,13 +599,16 @@ static int btusb_close(struct hci_dev *hdev)
172 cancel_work_sync(&data->work);
173
174 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
175- usb_kill_anchored_urbs(&data->isoc_anchor);
176-
177 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
178- usb_kill_anchored_urbs(&data->bulk_anchor);
179-
180 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
181- usb_kill_anchored_urbs(&data->intr_anchor);
182+
183+ BT_DBG("kill urbs %s", __func__);
184+ btusb_stop_traffic(data);
185+ err = usb_autopm_get_interface(data->intf);
186+ if (!err) {
187+ data->intf->needs_remote_wakeup = 0;
188+ usb_autopm_put_interface(data->intf);
189+ }
190
191 return 0;
192 }
193@@ -546,6 +619,7 @@ static int btusb_flush(struct hci_dev *hdev)
194
195 BT_DBG("%s", hdev->name);
196
197+ BT_DBG("kill urbs %s", __func__);
198 usb_kill_anchored_urbs(&data->tx_anchor);
199
200 return 0;
201@@ -622,7 +696,7 @@ static int btusb_send_frame(struct sk_buff *skb)
202 urb->dev = data->udev;
203 urb->pipe = pipe;
204 urb->context = skb;
205- urb->complete = btusb_tx_complete;
206+ urb->complete = btusb_isoc_tx_complete;
207 urb->interval = data->isoc_tx_ep->bInterval;
208
209 urb->transfer_flags = URB_ISO_ASAP;
210@@ -633,12 +707,23 @@ static int btusb_send_frame(struct sk_buff *skb)
211 le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
212
213 hdev->stat.sco_tx++;
214- break;
215+ goto skip_waking;
216
217 default:
218 return -EILSEQ;
219 }
220
221+ err = inc_tx(data);
222+ if (err) {
223+
224+ usb_anchor_urb(urb, &data->deferred);
225+ schedule_work(&data->waker);
226+ err = 0;
227+ goto out;
228+ } else {
229+
230+ }
231+skip_waking:
232 usb_anchor_urb(urb, &data->tx_anchor);
233
234 err = usb_submit_urb(urb, GFP_ATOMIC);
235@@ -646,10 +731,13 @@ static int btusb_send_frame(struct sk_buff *skb)
236 BT_ERR("%s urb %p submission failed", hdev->name, urb);
237 kfree(urb->setup_packet);
238 usb_unanchor_urb(urb);
239+ } else {
240+ usb_mark_last_busy(data->udev);
241 }
242
243 usb_free_urb(urb);
244
245+out:
246 return err;
247 }
248
249@@ -721,10 +809,23 @@ static void btusb_work(struct work_struct *work)
250 {
251 struct btusb_data *data = container_of(work, struct btusb_data, work);
252 struct hci_dev *hdev = data->hdev;
253+ int err;
254
255 if (hdev->conn_hash.sco_num > 0) {
256+ if (!data->did_iso_resume) {
257+ err = usb_autopm_get_interface(data->isoc);
258+ if (!err) {
259+ data->did_iso_resume = 1;
260+ } else {
261+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
262+ BT_DBG("kill urbs %s", __func__);
263+ usb_kill_anchored_urbs(&data->isoc_anchor);
264+ return;
265+ }
266+ }
267 if (data->isoc_altsetting != 2) {
268 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
269+ BT_DBG("kill urbs %s", __func__);
270 usb_kill_anchored_urbs(&data->isoc_anchor);
271
272 if (__set_isoc_interface(hdev, 2) < 0)
273@@ -739,12 +840,28 @@ static void btusb_work(struct work_struct *work)
274 }
275 } else {
276 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
277+ BT_DBG("kill urbs %s", __func__);
278 usb_kill_anchored_urbs(&data->isoc_anchor);
279
280 __set_isoc_interface(hdev, 0);
281+ if (data->did_iso_resume) {
282+ data->did_iso_resume = 0;
283+ usb_autopm_put_interface(data->isoc);
284+ }
285 }
286 }
287
288+static void btusb_waker(struct work_struct *work)
289+{
290+ struct btusb_data *data = container_of(work, struct btusb_data, waker);
291+ int err;
292+
293+
294+ err = usb_autopm_get_interface(data->intf);
295+ if (!err)
296+ usb_autopm_put_interface(data->intf);
297+}
298+
299 static int btusb_probe(struct usb_interface *intf,
300 const struct usb_device_id *id)
301 {
302@@ -814,11 +931,14 @@ static int btusb_probe(struct usb_interface *intf,
303 spin_lock_init(&data->lock);
304
305 INIT_WORK(&data->work, btusb_work);
306+ INIT_WORK(&data->waker, btusb_waker);
307+ spin_lock_init(&data->txlock);
308
309 init_usb_anchor(&data->tx_anchor);
310 init_usb_anchor(&data->intr_anchor);
311 init_usb_anchor(&data->bulk_anchor);
312 init_usb_anchor(&data->isoc_anchor);
313+ init_usb_anchor(&data->deferred);
314
315 hdev = hci_alloc_dev();
316 if (!hdev) {
317@@ -949,39 +1069,78 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
318
319 BT_DBG("intf %p", intf);
320
321- if (data->suspend_count++)
322+ if (data->suspend_count++) {
323+ BT_DBG("data->suspend_count = %d for intf %p, returning from %s",
324+ data->suspend_count, intf, __func__);
325 return 0;
326+ }
327+ BT_DBG("data->suspend_count = %d for intf %p, continuing %s",
328+ data->suspend_count, intf, __func__);
329+
330+ spin_lock_irq(&data->txlock);
331+ if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) {
332+ BT_DBG("Setting BTUSB_SUSPENDING bit in %s for intf %p",
333+ __func__, intf);
334+ set_bit(BTUSB_SUSPENDING, &data->flags);
335+ spin_unlock_irq(&data->txlock);
336+ } else {
337+ spin_unlock_irq(&data->txlock);
338+ BT_DBG("%d URBs in flight", data->tx_in_flight);
339+ data->suspend_count--;
340+ return -EBUSY;
341+ }
342
343 cancel_work_sync(&data->work);
344
345+ BT_DBG("kill urbs %s", __func__);
346+ btusb_stop_traffic(data);
347 usb_kill_anchored_urbs(&data->tx_anchor);
348
349- usb_kill_anchored_urbs(&data->isoc_anchor);
350- usb_kill_anchored_urbs(&data->bulk_anchor);
351- usb_kill_anchored_urbs(&data->intr_anchor);
352-
353 return 0;
354 }
355
356+static void play_deferred(struct btusb_data *data)
357+{
358+ struct urb *urb;
359+ int err;
360+
361+ while ((urb = usb_get_from_anchor(&data->deferred))) {
362+ err = usb_submit_urb(urb, GFP_ATOMIC);
363+ if (err < 0)
364+ break;
365+ else
366+ data->tx_in_flight++;
367+
368+ }
369+ usb_scuttle_anchored_urbs(&data->deferred);
370+}
371+
372 static int btusb_resume(struct usb_interface *intf)
373 {
374 struct btusb_data *data = usb_get_intfdata(intf);
375 struct hci_dev *hdev = data->hdev;
376- int err;
377+ int err = 0;
378
379 BT_DBG("intf %p", intf);
380
381- if (--data->suspend_count)
382+ if (--data->suspend_count) {
383+ BT_DBG("data->suspend_count = %d for intf %p, returning from %s",
384+ data->suspend_count, intf, __func__);
385 return 0;
386+ }
387
388- if (!test_bit(HCI_RUNNING, &hdev->flags))
389- return 0;
390+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
391+ BT_DBG("HCI not running, returning from %s", __func__);
392+ goto no_io_needed;
393+ }
394
395 if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) {
396 err = btusb_submit_intr_urb(hdev, GFP_NOIO);
397 if (err < 0) {
398 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
399- return err;
400+ BT_DBG("Error (%d) submitting interrupt URB, returning from %s",
401+ err, __func__);
402+ goto err_out;
403 }
404 }
405
406@@ -989,9 +1148,12 @@ static int btusb_resume(struct usb_interface *intf)
407 err = btusb_submit_bulk_urb(hdev, GFP_NOIO);
408 if (err < 0) {
409 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
410- return err;
411- } else
412+ BT_DBG("Error (%d) submitting bulk URB, returning from %s",
413+ err, __func__);
414+ goto err_out;
415+ } else {
416 btusb_submit_bulk_urb(hdev, GFP_NOIO);
417+ }
418 }
419
420 if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
421@@ -1001,7 +1163,24 @@ static int btusb_resume(struct usb_interface *intf)
422 btusb_submit_isoc_urb(hdev, GFP_NOIO);
423 }
424
425+ spin_lock_irq(&data->txlock);
426+ play_deferred(data);
427+ BT_DBG("Clearing BTUSB_SUSPENDING bit in %s for intf %p", __func__, intf);
428+ clear_bit(BTUSB_SUSPENDING, &data->flags);
429+ spin_unlock_irq(&data->txlock);
430+ schedule_work(&data->work);
431+
432 return 0;
433+
434+err_out:
435+ usb_scuttle_anchored_urbs(&data->deferred);
436+no_io_needed:
437+ spin_lock_irq(&data->txlock);
438+ BT_DBG("Clearing BTUSB_SUSPENDING bit in %s for intf %p", __func__, intf);
439+ clear_bit(BTUSB_SUSPENDING, &data->flags);
440+ spin_unlock_irq(&data->txlock);
441+
442+ return err;
443 }
444
445 static struct usb_driver btusb_driver = {
446@@ -1011,6 +1190,7 @@ static struct usb_driver btusb_driver = {
447 .suspend = btusb_suspend,
448 .resume = btusb_resume,
449 .id_table = btusb_table,
450+ .supports_autosuspend = 1,
451 };
452
453 static int __init btusb_init(void)
454diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
455index e70c57e..ac94f91 100644
456--- a/drivers/bluetooth/btusb.c
457+++ b/drivers/bluetooth/btusb.c
458@@ -908,6 +967,7 @@ static int btusb_probe(struct usb_interface *intf,
459 }
460
461 usb_set_intfdata(intf, data);
462+ usb_device_autosuspend_enable(data->udev);
463
464 return 0;
465 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch
deleted file mode 100644
index fabe878413..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch
+++ /dev/null
@@ -1,26 +0,0 @@
1From 70ae749d15e012ab4a33aa2abe7a4d97a4dcebdb Mon Sep 17 00:00:00 2001
2From: Li Peng <peng.li@intel.com>
3Date: Thu, 20 Aug 2009 13:54:04 +0800
4Subject: Add G33 series in VGA hotplug support category
5
6Test on the IGD chip, which is a G33-like graphic device.
7---
8 drivers/gpu/drm/i915/i915_drv.h | 2 +-
9 1 files changed, 1 insertions(+), 1 deletions(-)
10
11diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
12index 7537f57..940ee4c 100644
13--- a/drivers/gpu/drm/i915/i915_drv.h
14+++ b/drivers/gpu/drm/i915/i915_drv.h
15@@ -892,7 +892,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
16 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
17 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
18 #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
19-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
20+#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
21 /* dsparb controlled by hw only */
22 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
23
24--
251.6.1.3
26
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch
deleted file mode 100644
index a9b5e03cec..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch
+++ /dev/null
@@ -1,43 +0,0 @@
1diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
2index e4b4e88..2d51935 100644
3--- a/drivers/gpu/drm/i915/i915_opregion.c
4+++ b/drivers/gpu/drm/i915/i915_opregion.c
5@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
6 struct drm_i915_private *dev_priv = dev->dev_private;
7 struct opregion_asle *asle = dev_priv->opregion.asle;
8 u32 blc_pwm_ctl, blc_pwm_ctl2;
9+ u32 max_backlight, level, shift;
10
11 if (!(bclp & ASLE_BCLP_VALID))
12 return ASLE_BACKLIGHT_FAIL;
13@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
14 return ASLE_BACKLIGHT_FAIL;
15
16 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
17- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
18 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
19
20- if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
21+ if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
22 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
23- else
24- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
25-
26+ else {
27+ if (IS_IGD(dev)) {
28+ blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
29+ max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
30+ BACKLIGHT_MODULATION_FREQ_SHIFT;
31+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
32+ } else {
33+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
34+ max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
35+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
36+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
37+ }
38+ level = (bclp * max_backlight) / 255;
39+ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
40+ }
41 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
42
43 return 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch
deleted file mode 100644
index ef136c9877..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch
+++ /dev/null
@@ -1,26 +0,0 @@
1diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
2index f85aaf2..2e5841e 100644
3--- a/drivers/gpu/drm/drm_irq.c
4+++ b/drivers/gpu/drm/drm_irq.c
5@@ -412,6 +412,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
6 dev->vblank_enabled[crtc] = 1;
7 drm_update_vblank_count(dev, crtc);
8 }
9+ } else if (atomic_read(&dev->vblank_refcount[crtc]) > 1) {
10+ atomic_dec(&dev->vblank_refcount[crtc]);
11+ ret = -EINVAL;
12 }
13 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
14
15diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
16index 748ed50..9cb07a5 100644
17--- a/drivers/gpu/drm/i915/intel_display.c
18+++ b/drivers/gpu/drm/i915/intel_display.c
19@@ -1549,6 +1549,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
20
21 /* Wait for vblank for the disable to take effect. */
22 intel_wait_for_vblank(dev);
23+ dev->vblank_enabled[pipe] = 0;
24
25 temp = I915_READ(dpll_reg);
26 if ((temp & DPLL_VCO_ENABLE) != 0) {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch
deleted file mode 100644
index 1b85ecba09..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch
+++ /dev/null
@@ -1,307 +0,0 @@
1From 4e8354884daa2ee3e491bae69a81f85a2d1ca8ba Mon Sep 17 00:00:00 2001
2From: Fei Jiang <fei.jiang@intel.com>
3Date: Mon, 3 Aug 2009 11:31:53 -0400
4Subject: [PATCH] change for general drm code to implement kms-flip feature
5
6
7Signed-off-by: Fei Jiang <fei.jiang@intel.com>
8---
9 drivers/gpu/drm/drm_crtc.c | 128 ++++++++++++++++++++++++++++++++++++++++++++
10 drivers/gpu/drm/drm_drv.c | 1 +
11 drivers/gpu/drm/drm_irq.c | 30 ++++++++++
12 include/drm/drm.h | 1 +
13 include/drm/drmP.h | 9 +++
14 include/drm/drm_crtc.h | 12 ++++
15 include/drm/drm_mode.h | 16 ++++++
16 7 files changed, 197 insertions(+), 0 deletions(-)
17
18diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
19index 8fab789..3ada446
20--- a/drivers/gpu/drm/drm_crtc.c
21+++ b/drivers/gpu/drm/drm_crtc.c
22@@ -2452,3 +2452,131 @@ out:
23 mutex_unlock(&dev->mode_config.mutex);
24 return ret;
25 }
26+
27+/**
28+ * drm_mode_page_flip_ioctl - page flip ioctl
29+ * @dev: DRM device
30+ * @data: ioctl args
31+ * @file_priv: file private data
32+ *
33+ * The page flip ioctl replaces the current front buffer with a new
34+ * one, using the CRTC's set_base function, which should just update
35+ * the front buffer base pointer. It's up to set_base to make
36+ * sure the update doesn't result in tearing (on some hardware the
37+ * base register is double buffered, so this is easy).
38+ *
39+ * Note that this covers just the simple case of flipping the front
40+ * buffer immediately. Interval handling and interlaced modes have to
41+ * be handled by userspace, or with new ioctls.
42+ */
43+int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
44+ struct drm_file *file_priv)
45+{
46+#if 0
47+ struct drm_pending_flip *pending;
48+#endif
49+
50+ struct drm_mode_page_flip *flip_data = data;
51+ struct drm_mode_object *drm_obj, *fb_obj;
52+ struct drm_crtc *crtc;
53+ int ret = 0;
54+
55+ if (!(drm_core_check_feature(dev, DRIVER_MODESET)))
56+ return -ENODEV;
57+
58+ /*
59+ * Reject unknown flags so future userspace knows what we (don't)
60+ * support
61+ */
62+ if (flip_data->flags & (~DRM_MODE_PAGE_FLIP_FLAGS_MASK)) {
63+ DRM_DEBUG("bad page flip flags\n");
64+ return -EINVAL;
65+ }
66+#if 0
67+ pending = kzalloc(sizeof *pending, GFP_KERNEL);
68+ if (pending == NULL)
69+ return -ENOMEM;
70+#endif
71+ mutex_lock(&dev->struct_mutex);
72+
73+ fb_obj = drm_mode_object_find(dev, flip_data->fb_id,
74+ DRM_MODE_OBJECT_FB);
75+ if (!fb_obj) {
76+ DRM_DEBUG("unknown fb %d\n", flip_data->fb_id);
77+ ret = -ENOENT;
78+ goto out_unlock;
79+ }
80+
81+ drm_obj = drm_mode_object_find(dev, flip_data->crtc_id,
82+ DRM_MODE_OBJECT_CRTC);
83+ if (!drm_obj) {
84+ DRM_DEBUG("unknown crtc %d\n", flip_data->crtc_id);
85+ ret = -ENOENT;
86+ goto out_unlock;
87+ }
88+ crtc = obj_to_crtc(drm_obj);
89+ if (!crtc->enabled) {
90+ DRM_DEBUG("crtc %d not enabled\n", flip_data->crtc_id);
91+ ret = -EINVAL;
92+ goto out_unlock;
93+ }
94+
95+#if 0
96+ if (crtc->fb->funcs->unpin == NULL) {
97+ DRM_DEBUG("fb for crtc %d does not support delayed unpin\n",
98+ flip_data->crtc_id);
99+ ret = -ENODEV;
100+ goto out_unlock;
101+ }
102+
103+ pending->crtc = crtc;
104+ pending->old_fb = crtc->fb;
105+ pending->pipe = crtc->pipe;
106+ pending->event.base.type = DRM_EVENT_MODE_PAGE_FLIP;
107+ pending->event.base.length = sizeof pending->event;
108+ pending->event.user_data = flip_data->user_data;
109+ pending->pending_event.event = &pending->event.base;
110+ pending->pending_event.file_priv = file_priv;
111+ pending->pending_event.destroy =
112+ (void (*) (struct drm_pending_event *)) kfree;
113+
114+ /* Get vblank ref for completion handling */
115+ ret = drm_vblank_get(dev, crtc->pipe);
116+ if (ret) {
117+ DRM_DEBUG("failed to take vblank ref\n");
118+ goto out_unlock;
119+ }
120+
121+ pending->frame = drm_vblank_count(dev, crtc->pipe);
122+ list_add_tail(&pending->link, &dev->flip_list);
123+#endif
124+
125+ /*
126+ * The set_base call will change the domain on the new fb,
127+ * which will force the rendering to finish and block the
128+ * ioctl. We need to do this last part from a work queue, to
129+ * avoid blocking userspace here.
130+ */
131+ crtc->fb = obj_to_fb(fb_obj);
132+retry_set:
133+ ret = (*crtc->funcs->set_base)(crtc, 0, 0, NULL);
134+ if (ret == -ERESTARTSYS)
135+ goto retry_set;
136+
137+ if (ret) {
138+ DRM_ERROR("set_base failed: %d\n", ret);
139+ goto out_unlock;
140+ }
141+
142+ mutex_unlock(&dev->struct_mutex);
143+
144+ return 0;
145+
146+out_unlock:
147+ mutex_unlock(&dev->struct_mutex);
148+#if 0
149+ kfree(pending);
150+#endif
151+ return ret;
152+}
153+
154diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
155index 1ce7977..761c2ec
156--- a/drivers/gpu/drm/drm_drv.c
157+++ b/drivers/gpu/drm/drm_drv.c
158@@ -145,6 +145,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
162+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
163 };
164
165 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
166diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
167index b4a3dbc..d5104df
168--- a/drivers/gpu/drm/drm_irq.c
169+++ b/drivers/gpu/drm/drm_irq.c
170@@ -71,6 +71,28 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
171 return 0;
172 }
173
174+#if 0
175+static void drm_flip_work_func(struct work_struct *work)
176+{
177+ struct drm_device *dev =
178+ container_of(work, struct drm_device, flip_work);
179+#if 0
180+ struct drm_pending_flip *f, *t;
181+#endif
182+ u32 frame;
183+
184+ mutex_lock(&dev->struct_mutex);
185+
186+ list_for_each_entry_safe(f, t, &dev->flip_list, link) {
187+ frame = drm_vblank_count(dev, f->pipe);
188+ if (vblank_after(frame, f->frame))
189+ drm_finish_pending_flip(dev, f, frame);
190+ }
191+
192+ mutex_unlock(&dev->struct_mutex);
193+}
194+#endif
195+
196 static void vblank_disable_fn(unsigned long arg)
197 {
198 struct drm_device *dev = (struct drm_device *)arg;
199@@ -161,6 +183,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
200 atomic_set(&dev->vblank_refcount[i], 0);
201 }
202
203+#if 0
204+ INIT_LIST_HEAD(&dev->flip_list);
205+ INIT_WORK(&dev->flip_work, drm_flip_work_func);
206+#endif
207+
208 dev->vblank_disable_allowed = 0;
209
210 return 0;
211@@ -626,5 +653,8 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
212 {
213 atomic_inc(&dev->_vblank_count[crtc]);
214 DRM_WAKEUP(&dev->vbl_queue[crtc]);
215+#if 0
216+ schedule_work(&dev->flip_work);
217+#endif
218 }
219 EXPORT_SYMBOL(drm_handle_vblank);
220diff --git a/include/drm/drm.h b/include/drm/drm.h
221index 7cb50bd..78bd91b
222--- a/include/drm/drm.h
223+++ b/include/drm/drm.h
224@@ -686,6 +686,7 @@ struct drm_gem_open {
225 #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
226 #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
227 #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
228+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOW( 0xB0, struct drm_mode_page_flip)
229
230 /**
231 * Device specific ioctls should only be in their respective headers
232diff --git a/include/drm/drmP.h b/include/drm/drmP.h
233index c5122bf..36f9e6a
234--- a/include/drm/drmP.h
235+++ b/include/drm/drmP.h
236@@ -976,6 +976,15 @@ struct drm_device {
237 cycles_t ctx_start;
238 cycles_t lck_start;
239
240+ struct work_struct flip_work;
241+
242+#if 0
243+ /**
244+ * List of objects waiting on flip completion
245+ */
246+ struct list_head flip_list;
247+#endif
248+
249 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
250 wait_queue_head_t buf_readers; /**< Processes waiting to read */
251 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
252diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
253index 7300fb8..742c870
254--- a/include/drm/drm_crtc.h
255+++ b/include/drm/drm_crtc.h
256@@ -331,6 +331,16 @@ struct drm_crtc_funcs {
257 void (*destroy)(struct drm_crtc *crtc);
258
259 int (*set_config)(struct drm_mode_set *set);
260+
261+ /*
262+ * Move the crtc on the current fb to the given position.
263+ * This function is optional. If old_fb is provided, the
264+ * function will wait for vblank and unpin it. If old_fb is
265+ * NULL, nothing is unpinned and the caller must call
266+ * mode_unpin_fb to release the old framebuffer.
267+ */
268+ int (*set_base)(struct drm_crtc *crtc, int x, int y,
269+ struct drm_framebuffer *old_fb);
270 };
271
272 /**
273@@ -736,4 +746,6 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
274 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
275 void *data, struct drm_file *file_priv);
276 extern bool drm_detect_hdmi_monitor(struct edid *edid);
277+extern int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
278+ struct drm_file *file_priv);
279 #endif /* __DRM_CRTC_H__ */
280diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
281index ae304cc..464b779
282--- a/include/drm/drm_mode.h
283+++ b/include/drm/drm_mode.h
284@@ -265,4 +265,20 @@ struct drm_mode_crtc_lut {
285 __u64 blue;
286 };
287
288+#define DRM_MODE_PAGE_FLIP_WAIT (1<<0) /* block on previous page flip */
289+#define DRM_MODE_PAGE_FLIP_FLAGS_MASK (DRM_MODE_PAGE_FLIP_WAIT)
290+
291+struct drm_mode_page_flip {
292+ /** Handle of new front buffer */
293+ __u32 fb_id;
294+ __u32 crtc_id;
295+
296+ /* 64 bit cookie returned to userspace in the page flip event. */
297+ __u64 user_data;
298+ /**
299+ * page flip flags (wait on flip only for now)
300+ */
301+ __u32 flags;
302+};
303+
304 #endif
305--
3061.5.3.4
307
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch
deleted file mode 100644
index 3b9463f01d..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch
+++ /dev/null
@@ -1,140 +0,0 @@
1From 5deab387f5b9ec79a6bf7edc52b0653c2a6d44b5 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alan.r.olsen@intel.com>
3Date: Fri, 11 Sep 2009 15:57:46 -0700
4Subject: [PATCH] linux-2.6.31-drm-mem-info.patch
5
6Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
7---
8 drivers/gpu/drm/Makefile | 4 +++
9 drivers/gpu/drm/drm_info.c | 58 ++++++++++++++++++++++++++++++++++++++++++++
10 drivers/gpu/drm/drm_proc.c | 2 +
11 include/drm/drmP.h | 2 +
12 4 files changed, 66 insertions(+), 0 deletions(-)
13
14diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
15index fe23f29..d76f167 100644
16--- a/drivers/gpu/drm/Makefile
17+++ b/drivers/gpu/drm/Makefile
18@@ -4,6 +4,10 @@
19
20 ccflags-y := -Iinclude/drm
21
22+ifeq ($(CONFIG_DRM_PSB),y)
23+ ccflags-y += -Idrivers/gpu/drm/psb
24+endif
25+
26 drm-y := drm_auth.o drm_bufs.o drm_cache.o \
27 drm_context.o drm_dma.o drm_drawable.o \
28 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
29diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30index f0f6c6b..0ecc778 100644
31--- a/drivers/gpu/drm/drm_info.c
32+++ b/drivers/gpu/drm/drm_info.c
33@@ -36,6 +36,10 @@
34 #include <linux/seq_file.h>
35 #include "drmP.h"
36
37+#ifdef CONFIG_DRM_PSB
38+#include "psb/psb_drv.h"
39+#endif
40+
41 /**
42 * Called when "/proc/dri/.../name" is read.
43 *
44@@ -211,6 +215,33 @@ int drm_vblank_info(struct seq_file *m, void *data)
45 return 0;
46 }
47
48+int drm_gem_object_mem_info(int id, void *ptr, void *data)
49+{
50+ struct drm_gem_object *obj = ptr;
51+ struct seq_file *m = data;
52+
53+ seq_printf(m, "object 0x%p name %2d memory %8zd\n",
54+ obj, obj->name, obj->size);
55+
56+ return 0;
57+}
58+
59+int drm_gem_clients_info(struct seq_file *m, void *data)
60+{
61+ struct drm_info_node *node = (struct drm_info_node *) m->private;
62+ struct drm_device *dev = node->minor->dev;
63+ struct drm_file *priv;
64+
65+ mutex_lock(&dev->struct_mutex);
66+ list_for_each_entry(priv, &dev->filelist, lhead) {
67+ seq_printf(m, "pid %5d \n", priv->pid);
68+ idr_for_each(&priv->object_idr, &drm_gem_object_mem_info, m);
69+ seq_printf(m, "\n");
70+ }
71+ mutex_unlock(&dev->struct_mutex);
72+ return 0;
73+}
74+
75 /**
76 * Called when "/proc/dri/.../clients" is read.
77 *
78@@ -273,6 +304,33 @@ int drm_gem_object_info(struct seq_file *m, void* data)
79 return 0;
80 }
81
82+#ifdef CONFIG_DRM_PSB
83+int drm_ttm_mem_info(struct seq_file *m, void* data)
84+{
85+ struct drm_info_node *node = (struct drm_info_node *) m->private;
86+ struct drm_device *dev = node->minor->dev;
87+
88+ if (!strncmp("psb", dev->devname, 3)) {
89+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
90+ struct ttm_mem_global *glob = bdev->mem_glob;
91+
92+ spin_lock(&glob->lock);
93+ seq_printf(m, "used memory %llu \n", glob->used_memory);
94+ seq_printf(m, "used total memory %llu \n", glob->used_total_memory);
95+ spin_unlock(&glob->lock);
96+ } else {
97+ seq_printf(m, "This is not a PSB device, no ttm mem info available\n");
98+ }
99+ return 0;
100+}
101+#else
102+int drm_ttm_mem_info(struct seq_file *m, void* data)
103+{
104+ seq_printf(m, "ttm is not used\n");
105+ return 0;
106+}
107+#endif
108+
109 #if DRM_DEBUG_CODE
110
111 int drm_vma_info(struct seq_file *m, void *data)
112diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
113index bbd4b3d..26e64ec 100644
114--- a/drivers/gpu/drm/drm_proc.c
115+++ b/drivers/gpu/drm/drm_proc.c
116@@ -55,6 +55,8 @@ static struct drm_info_list drm_proc_list[] = {
117 {"bufs", drm_bufs_info, 0},
118 {"gem_names", drm_gem_name_info, DRIVER_GEM},
119 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
120+ {"gem_clients", drm_gem_clients_info, DRIVER_GEM},
121+ {"ttm_meminfo", drm_ttm_mem_info, 0},
122 #if DRM_DEBUG_CODE
123 {"vma", drm_vma_info, 0},
124 #endif
125diff --git a/include/drm/drmP.h b/include/drm/drmP.h
126index dbd40f1..5575b9a 100644
127--- a/include/drm/drmP.h
128+++ b/include/drm/drmP.h
129@@ -1355,6 +1355,8 @@ extern int drm_vblank_info(struct seq_file *m, void *data);
130 extern int drm_clients_info(struct seq_file *m, void* data);
131 extern int drm_gem_name_info(struct seq_file *m, void *data);
132 extern int drm_gem_object_info(struct seq_file *m, void* data);
133+extern int drm_gem_clients_info(struct seq_file *m, void *data);
134+extern int drm_ttm_mem_info(struct seq_file *m, void* data);
135
136 #if DRM_DEBUG_CODE
137 extern int drm_vma_info(struct seq_file *m, void *data);
138--
1391.6.0.6
140
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch
deleted file mode 100644
index fa6a3ea9f1..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch
+++ /dev/null
@@ -1,9290 +0,0 @@
1diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Kconfig patch_script_temp/drivers/gpu/drm/Kconfig
2--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Kconfig 2009-08-27 11:30:10.000000000 -0700
3+++ patch_script_temp/drivers/gpu/drm/Kconfig 2009-10-06 10:30:05.000000000 -0700
4@@ -154,3 +154,10 @@
5 Choose this option if you have a Poulsbo or Moorestown platform.
6 If M is selected the module will be called psb.
7
8+
9+config IEGD
10+ tristate "Intel IEGD"
11+ depends on DRM
12+ help
13+ Choose this option for the Intel Embedded Graphics Driver (IEGD)
14+
15diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Makefile patch_script_temp/drivers/gpu/drm/Makefile
16--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Makefile 2009-08-27 11:30:10.000000000 -0700
17+++ patch_script_temp/drivers/gpu/drm/Makefile 2009-10-06 10:30:05.000000000 -0700
18@@ -32,3 +32,4 @@
19 obj-$(CONFIG_DRM_SIS) += sis/
20 obj-$(CONFIG_DRM_SAVAGE)+= savage/
21 obj-$(CONFIG_DRM_VIA) +=via/
22+obj-$(CONFIG_IEGD) += iegd/
23diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/Makefile patch_script_temp/drivers/gpu/drm/iegd/Makefile
24--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/Makefile 1969-12-31 17:00:00.000000000 -0700
25+++ patch_script_temp/drivers/gpu/drm/iegd/Makefile 2009-10-06 10:30:05.000000000 -0700
26@@ -0,0 +1,9 @@
27+
28+
29+ccflags-y := -Idrivers/gpu/drm/iegd/include
30+ccflags-y += -Idrivers/char/agp -Iinclude/drm
31+
32+iegd_mod-objs := agp/pci.o agp/global.o agp/drv_alm.o agp/drv_nap.o agp/drv_plb.o agp/drv_cmn.o agp/drv_gn4.o drm/iegd_drv.o drm/iegd_interface.o drm/iegd_interface_265.o drm/iegd_interface_2611.o drm/iegd_interface_2615.o drm/iegd_interface_2624.o drm/psb_irq.o
33+
34+obj-$(CONFIG_IEGD) += iegd_mod.o
35+
36diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/agp_test.c patch_script_temp/drivers/gpu/drm/iegd/agp/agp_test.c
37--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/agp_test.c 1969-12-31 17:00:00.000000000 -0700
38+++ patch_script_temp/drivers/gpu/drm/iegd/agp/agp_test.c 2009-10-06 10:30:05.000000000 -0700
39@@ -0,0 +1,314 @@
40+/* -*- pse-c -*-
41+ *----------------------------------------------------------------------------
42+ * Filename: agp_test.c
43+ * $Revision: 1.5 $
44+ *----------------------------------------------------------------------------
45+ * Unit level test for IEGD AGP
46+ * Copyright © 2009 Intel Corporation.
47+ *
48+ * This program is free software; you can redistribute it and/or modify it
49+ * under the terms and conditions of the GNU General Public License,
50+ * version 2, as published by the Free Software Foundation.
51+ *
52+ * This program is distributed in the hope it will be useful, but WITHOUT
53+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
54+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
55+ * more details.
56+ *
57+ * You should have received a copy of the GNU General Public License along with
58+ * this program; if not, write to the Free Software Foundation, Inc.,
59+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
60+ *
61+ */
62+
63+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
64+ *
65+ * Redistribution and use in source and binary forms, with or without
66+ * modification, are permitted provided that the following conditions are met:
67+ * Redistributions of source code must retain the above copyright notice,
68+ * this list of conditions and the following disclaimer.
69+ *
70+ * Redistributions in binary form must reproduce the above copyright
71+ * notice, this list of conditions and the following disclaimer in the
72+ * documentation and/or other materials provided with the distribution.
73+ *
74+ * Neither the name Intel Corporation nor the names of its contributors
75+ * may be used to endorse or promote products derived from this software
76+ * without specific prior written permission.
77+ *
78+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
80+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
81+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
82+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
83+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
84+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
85+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
86+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
87+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
88+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89+ *
90+ */
91+#include <fcntl.h>
92+#include <unistd.h>
93+#include <sys/ioctl.h>
94+#include <stdlib.h>
95+#include <stdio.h>
96+#include <linux/agpgart.h>
97+#include <sys/mman.h>
98+
99+/*#define PAGE_SIZE 1024*/
100+
101+#define VERBOSE "-v"
102+
103+int verbose = 0;
104+int file_desc, temp, length;
105+unsigned char *mmap_gart;
106+
107+int init_agp(void)
108+{
109+ agp_info info;
110+ agp_setup setup;
111+
112+ if (verbose)
113+ {
114+ printf("Testing ioctl AGPIOC_ACQUIRE.\n");
115+ }
116+ if(ioctl(file_desc, AGPIOC_ACQUIRE) != 0)
117+ {
118+ printf("Error on AGPIOC_ACQUIRE.\n");
119+ printf("Reinstall IKM.\n");
120+ exit(-1);
121+ }
122+ if (verbose)
123+ {
124+ printf("Testing ioctl call for info init\n");
125+ }
126+ if(ioctl(file_desc, AGPIOC_INFO, &info) != 0)
127+ {
128+ printf("Error on AGPIOC_INFO\n");
129+ printf("Reinstall IKM.\n");
130+ exit(-1);
131+ }
132+ if (verbose)
133+ {
134+ printf("Testing init info\n version:%i.%i,\n id:0x%lx,\n mode:0x%lx,\n"
135+ " base:0x%lx,\n size:%i,\n total mem:%i,\n system mem:%i,\n"
136+ " used mem:%i\n", info.version.major, info.version.minor,
137+ info.bridge_id, info.agp_mode, info.aper_base, info.aper_size,
138+ info.pg_total, info.pg_system, info.pg_used);
139+
140+ printf("Testing mmap the device\n");
141+ }
142+ length = info.aper_size*0x100000;
143+ mmap_gart = mmap(NULL, info.aper_size*0x100000,
144+ PROT_READ | PROT_WRITE, MAP_SHARED, file_desc, 0);
145+ if(mmap_gart == (unsigned char *) 0xFFFFFFFF)
146+ {
147+ printf("Error on mmap\n");
148+ printf("Reinstall IKM.\n");
149+ close(file_desc);
150+ exit(-1);
151+ }
152+
153+ setup.agp_mode = info.agp_mode;
154+ if (verbose)
155+ {
156+ printf("Testing ioctl AGPIOC_SETUP\n");
157+ }
158+ if(ioctl(file_desc, AGPIOC_SETUP, &setup) != 0)
159+ {
160+ printf("Error on AGPIOC_SETUP\n");
161+ printf("Reinstall IKM.\n");
162+ exit(-1);
163+ }
164+}
165+
166+void gart_unbind(int key)
167+{
168+ agp_unbind unbind;
169+ unbind.key = key;
170+ unbind.priority = 0;
171+ if (verbose)
172+ {
173+ printf("Testing ioctl AGPIOC_UNBIND\n");
174+ }
175+ if(ioctl(file_desc, AGPIOC_UNBIND, &unbind) != 0)
176+ {
177+ printf("Error on AGPIOC_UNBIND\n");
178+ printf("Reinstall IKM.\n");
179+ exit(-1);
180+ }
181+}
182+
183+void gart_bind(int key)
184+{
185+ agp_bind bind;
186+ bind.key = key;
187+ agp_info info;
188+
189+ int page_size = 4000;
190+ int aper_size, gtt_entries, bind_error;
191+
192+ if(ioctl(file_desc, AGPIOC_INFO, &info) != 0)
193+ {
194+ printf("Error on AGPIOC_INFO\n");
195+ printf("Reinstall IKM.\n");
196+ exit(-1);
197+ }
198+ aper_size = info.aper_size;
199+ gtt_entries = aper_size*1000000/page_size;
200+ if (verbose)
201+ {
202+ printf("max memory: %i\n", gtt_entries);
203+ }
204+
205+ do
206+ {
207+ bind_error = 0;
208+ bind.pg_start = page_size;
209+ printf("Testing ioctl AGPIOC_BIND\n");
210+ if(ioctl(file_desc, AGPIOC_BIND, &bind) != 0)
211+ {
212+ if (page_size < gtt_entries)
213+ {
214+ page_size = page_size+4000;
215+ printf("Trying new address for bind %i.\n", page_size);
216+ bind_error = 1;
217+ }
218+ else
219+ {
220+ printf("Error on AGPIOC_BIND\n");
221+ printf("Reinstall IKM.");
222+ exit(-1);
223+ }
224+ }
225+ } while (bind_error);
226+ printf("Sucessful bind.\n");
227+}
228+
229+int gart_alloc(int count)
230+{
231+ agp_allocate allocate;
232+
233+ allocate.type = 0;
234+ allocate.pg_count = count;
235+ if (verbose)
236+ {
237+ printf("Testing ioctl AGPIOC_ALLOCATE\n");
238+ }
239+ if(ioctl(file_desc, AGPIOC_ALLOCATE, &allocate) != 0)
240+ {
241+ printf("Error on AGPIOC_ALLOCATE\n");
242+ printf("Reinstall IKM.");
243+ exit(-1);
244+ }
245+
246+ gart_bind(allocate.key);
247+
248+ return(allocate.key);
249+}
250+
251+void gart_free(int key)
252+{
253+
254+ gart_unbind(key);
255+ if (verbose)
256+ {
257+ printf("Testing ioctl AGPIOC_DEALLOCATE\n");
258+ }
259+ if(ioctl(file_desc, AGPIOC_DEALLOCATE, key) != 0)
260+ {
261+ printf("Error on AGPIOC_DEALLOCATE\n");
262+ printf("Reinstall IKM.\n");
263+ exit(-1);
264+ }
265+}
266+
267+int main(int argc, char *argv[])
268+{
269+ /* Check for verbose mode */
270+ int i, key, key1;
271+ agp_info info;
272+
273+ for (i = 1; i < argc; i++)
274+ {
275+ if(strcmp(argv[1], VERBOSE) == 0)
276+ {
277+ verbose = 1;
278+ printf("Verbose mode.\n");
279+ }
280+ }
281+
282+ /* Open the agpgart */
283+ file_desc=open("/dev/agpgart",O_RDWR);
284+
285+ if(file_desc<0){
286+ printf("Cannot open device file:%d\n",file_desc);
287+ printf("Check for root level permissions.");
288+ exit(-1);
289+ }
290+
291+ if (verbose)
292+ {
293+ printf("Open device file:%d\n",file_desc);
294+ /* This the ioctl that allocates physical memory */
295+ printf("Testing ioctl for memory allocation\n");
296+ }
297+
298+ init_agp();
299+
300+ key = gart_alloc(64);
301+ key1 = gart_alloc(0);
302+ if (verbose)
303+ {
304+ printf("Testing ioctl call for info\n");
305+ }
306+ if(ioctl(file_desc, AGPIOC_INFO, &info) != 0)
307+ {
308+ close(file_desc);
309+ printf("Error on AGPIOC_INFO\n");
310+ printf("Reinstall IKM.\n");
311+ exit(-1);
312+ }
313+
314+ if (verbose)
315+ {
316+ printf("Testing init info\n version:%i.%i,\n id:0x%lx,\n mode:0x%lx,\n"
317+ " base:0x%lx,\n size:%i,\n total mem:%i,\n system mem:%i,\n"
318+ " used mem:%i\n", info.version.major, info.version.minor,
319+ info.bridge_id, info.agp_mode, info.aper_base, info.aper_size,
320+ info.pg_total, info.pg_system, info.pg_used);
321+ }
322+
323+ gart_free(key);
324+ gart_free(key1);
325+
326+ if (munmap(mmap_gart, length) < 0)
327+ {
328+ close(file_desc);
329+ printf("Error on munmap\n");
330+ printf("Reinstall IKM.\n");
331+ exit(-1);
332+ }
333+
334+ if (verbose)
335+ {
336+ printf("Testing ioctl AGPIOC_RELEASE\n");
337+ }
338+ if(ioctl(file_desc, AGPIOC_RELEASE) != 0)
339+ {
340+ close(file_desc);
341+ printf("Error on AGPIOC_RELEASE\n");
342+ printf("Reinstall IKM.");
343+ exit(-1);
344+ }
345+
346+ printf("AGPGART successfully loaded\n");
347+
348+ close(file_desc);
349+
350+ return 0;
351+
352+
353+}
354diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_alm.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_alm.c
355--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_alm.c 1969-12-31 17:00:00.000000000 -0700
356+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_alm.c 2009-10-06 10:30:05.000000000 -0700
357@@ -0,0 +1,447 @@
358+/* -*- pse-c -*-
359+ *----------------------------------------------------------------------------
360+ * Filename: drv_alm.c
361+ * $Revision: 1.7 $
362+ *----------------------------------------------------------------------------
363+ * Gart and DRM driver for Intel Embedded Graphics Driver
364+ * Copyright © 2008, Intel Corporation.
365+ *
366+ * This program is free software; you can redistribute it and/or modify it
367+ * under the terms and conditions of the GNU General Public License,
368+ * version 2, as published by the Free Software Foundation.
369+ *
370+ * This program is distributed in the hope it will be useful, but WITHOUT
371+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
372+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
373+ * more details.
374+ *
375+ * You should have received a copy of the GNU General Public License along with
376+ * this program; if not, write to the Free Software Foundation, Inc.,
377+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
378+ *
379+ */
380+
381+#include "global.h"
382+#include "intelpci.h"
383+
384+static int iegd_alm_configure(void);
385+static int iegd_alm_fetch_size(void);
386+static void iegd_alm_cleanup(void);
387+static void iegd_alm_tlbflush(struct agp_memory *mem);
388+
389+static int iegd_alm_insert_entries(
390+ struct agp_memory *mem,
391+ off_t pg_start,
392+ int type);
393+
394+static int iegd_alm_remove_entries(
395+ struct agp_memory *mem,
396+ off_t pg_start,
397+ int type);
398+
399+struct aper_size_info_fixed intel_i830_sizes[] =
400+{
401+ {128, 32768, 5},
402+ /* The 64M mode still requires a 128k gatt */
403+ {64, 16384, 5},
404+ {256, 65536, 6},
405+ {512, 131072, 7},
406+};
407+
408+struct aper_size_info_fixed intel_i810_sizes[] =
409+{
410+ {64, 16384, 4},
411+ {32, 8192, 4},
412+};
413+
414+bridge_driver_t drv_alm = {
415+ .owner = THIS_MODULE,
416+ .size_type = FIXED_APER_SIZE,
417+ .aperture_sizes = 0,
418+ .num_aperture_sizes = 0,
419+ .needs_scratch_page = TRUE,
420+ .configure = iegd_alm_configure,
421+ .fetch_size = iegd_alm_fetch_size,
422+ .cleanup = iegd_alm_cleanup,
423+ .tlb_flush = iegd_alm_tlbflush,
424+ .mask_memory = iegd_cmn_mask_memory,
425+ .masks = iegd_cmn_masks,
426+ .agp_enable = iegd_cmn_agp_enable,
427+ .cache_flush = global_cache_flush,
428+ .create_gatt_table = NULL,
429+ .free_gatt_table = NULL,
430+ .insert_memory = iegd_alm_insert_entries,
431+ .remove_memory = iegd_alm_remove_entries,
432+ .alloc_by_type = iegd_cmn_alloc_by_type,
433+ .free_by_type = iegd_cmn_free_by_type,
434+ .agp_alloc_page = agp_generic_alloc_page,
435+ .agp_destroy_page = agp_generic_destroy_page,
436+};
437+
438+static int iegd_alm_configure(void)
439+{
440+ struct aper_size_info_fixed *current_size;
441+ u32 temp;
442+ u16 gmch_ctrl;
443+ int i;
444+ int entries_start = 0;
445+
446+ AGN_DEBUG("Enter");
447+
448+ current_size = A_SIZE_FIX(agp_bridge->current_size);
449+
450+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
451+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
452+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
453+ private_data.pdev->device == PCI_DEVICE_ID_815) {
454+
455+ pci_read_config_dword(private_data.pdev, I810_MMADDR, &temp);
456+ temp &= 0xfff80000;
457+
458+ private_data.registers = ioremap(temp, 128*4096);
459+ if(!private_data.registers) {
460+ AGN_ERROR("Unable to remap memory");
461+ return -ENOMEM;
462+ }
463+
464+ if((readl(private_data.registers+I810_DRAM_CTL)
465+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
466+ AGN_LOG("Detected 4MB dedicated video RAM.");
467+ private_data.num_dcache_entries = 1024;
468+ }
469+ } else if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
470+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
471+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
472+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
473+
474+ entries_start = private_data.gtt_entries;
475+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
476+ gmch_ctrl |= I830_GMCH_ENABLED;
477+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
478+ }
479+
480+ /* Get based address of the graphic aperture */
481+ pci_read_config_dword(private_data.pdev, I810_GMADDR, &temp);
482+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
483+
484+ /* Write the based address of the gtt table to the
485+ * page table control register */
486+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED,
487+ private_data.registers+I810_PGETBL_CTL);
488+ readl(private_data.registers+I810_PGETBL_CTL);
489+
490+ if (agp_bridge->driver->needs_scratch_page) {
491+ for (i = entries_start; i < current_size->num_entries; i++) {
492+ writel(agp_bridge->scratch_page,
493+ private_data.registers+I810_PTE_BASE+(i*4));
494+ /* PCI Posting. */
495+ readl(private_data.registers+I810_PTE_BASE+(i*4));
496+ }
497+ }
498+
499+ global_cache_flush();
500+
501+ AGN_DEBUG("Exit");
502+ return 0;
503+}
504+
505+
506+static int iegd_alm_fetch_size(void)
507+{
508+ u32 smram_miscc;
509+ u16 gmch_ctrl;
510+ struct aper_size_info_fixed *values;
511+
512+ AGN_DEBUG("Enter");
513+
514+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
515+
516+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
517+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
518+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
519+ private_data.pdev->device == PCI_DEVICE_ID_815) {
520+
521+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC,
522+ &smram_miscc);
523+
524+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
525+ printk(KERN_WARNING PFX "i810 is disabled\n");
526+ return 0;
527+ }
528+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) ==
529+ I810_GFX_MEM_WIN_32M) {
530+ agp_bridge->previous_size =
531+ agp_bridge->current_size = (void *) (values + 1);
532+ agp_bridge->aperture_size_idx = 1;
533+ return values[1].size;
534+ } else {
535+ agp_bridge->previous_size =
536+ agp_bridge->current_size = (void *) (values);
537+ agp_bridge->aperture_size_idx = 0;
538+ return values[0].size;
539+ }
540+ } else if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
541+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
542+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
543+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
544+
545+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
546+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
547+ /* 855GM/852GM/865G has 128MB aperture size */
548+ agp_bridge->previous_size =
549+ agp_bridge->current_size = (void *) values;
550+ agp_bridge->aperture_size_idx = 0;
551+ return values[0].size;
552+ }
553+
554+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
555+
556+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
557+ agp_bridge->previous_size =
558+ agp_bridge->current_size = (void *) values;
559+ agp_bridge->aperture_size_idx = 0;
560+ return values[0].size;
561+ } else {
562+ agp_bridge->previous_size =
563+ agp_bridge->current_size = (void *) (values + 1);
564+ agp_bridge->aperture_size_idx = 1;
565+ return values[1].size;
566+ }
567+ }
568+
569+ AGN_DEBUG("Exit");
570+
571+ return 0;
572+}
573+
574+static void iegd_alm_cleanup(void)
575+{
576+
577+ AGN_DEBUG("Enter");
578+
579+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
580+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
581+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
582+ private_data.pdev->device == PCI_DEVICE_ID_815) {
583+
584+ writel(0, private_data.registers+I810_PGETBL_CTL);
585+ readl(private_data.registers); /* PCI Posting. */
586+ }
587+
588+ /* Unmap the mapping of the mmio */
589+ iounmap((void *) private_data.registers);
590+
591+ AGN_DEBUG("Exit");
592+}
593+
594+static void iegd_alm_tlbflush(struct agp_memory *mem)
595+{
596+ AGN_DEBUG("Enter");
597+ return;
598+ AGN_DEBUG("Exit");
599+}
600+
601+int AGP_CREATE_GATT(iegd_alm_create_gatt_table)
602+{
603+ int num_entries = 0;
604+ int i830_gtt_page_order = 0;
605+ u32 gtt_bus_addr = 0;
606+ u32 mmio_bus_addr = 0;
607+ char *gtt_table = NULL;
608+ char *gtt_table_end = NULL;
609+ char *current_entry = NULL;
610+ int gtt_enabled = FALSE;
611+ struct page *gtt_table_page = NULL;
612+ struct aper_size_info_fixed *aper_size = NULL;
613+
614+ AGN_DEBUG("Enter");
615+
616+ agp_bridge->gatt_table_real = NULL;
617+ agp_bridge->gatt_table = NULL;
618+ aper_size = (struct aper_size_info_fixed *)agp_bridge->current_size;
619+
620+ /* Find and save the address of the MMIO registers */
621+ pci_read_config_dword(private_data.pdev, I810_MMADDR,
622+ &mmio_bus_addr);
623+ mmio_bus_addr &= 0xFFF80000;
624+
625+ private_data.registers = (volatile u8 *)
626+ ioremap(mmio_bus_addr, 128 * 4096);
627+
628+ if (!private_data.registers) {
629+ AGN_ERROR("ioremap failed to map");
630+ return (-ENOMEM);
631+ }
632+
633+ /* Get value on the control register */
634+ gtt_bus_addr = readl(private_data.registers+I810_PGETBL_CTL) &
635+ 0xFFFFF000;
636+ gtt_enabled = readl(private_data.registers+I810_PGETBL_CTL) &
637+ I810_PGETBL_ENABLED;
638+ global_cache_flush();
639+
640+ /* we have to call this as early as possible after the MMIO base address
641+ * is known */
642+ iegd_cmn_init_gtt_entries();
643+
644+ /* If GTT does not exist, which can happen if a PCI graphics card is the
645+ * boot-up display device, then we will have to allocate the GTT table
646+ * ourselves
647+ */
648+ if (!gtt_enabled) {
649+
650+ AGN_DEBUG("Gtt is disabled");
651+
652+ i830_gtt_page_order = aper_size->page_order;
653+ num_entries = aper_size->num_entries;
654+ gtt_table = (char *) __get_free_pages(
655+ GFP_KERNEL, i830_gtt_page_order);
656+ gtt_table_end = gtt_table +
657+ ((PAGE_SIZE * (1<<i830_gtt_page_order)) - 1);
658+
659+ /* Make sure allocation was successful */
660+ if (NULL == gtt_table) {
661+ AGN_ERROR("Fail to allocate kernel memory");
662+ return -ENOMEM;
663+ }
664+
665+ for (current_entry = gtt_table; current_entry < gtt_table_end;
666+ current_entry += PAGE_SIZE) {
667+ gtt_table_page = virt_to_page(current_entry);
668+ set_bit(PG_reserved, &gtt_table_page->flags);
669+ }
670+ agp_bridge->gatt_bus_addr = virt_to_phys(gtt_table);
671+ } else {
672+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
673+ }
674+
675+ AGN_DEBUG("Exit");
676+ return(0);
677+}
678+
679+
680+static int iegd_alm_insert_entries(
681+ struct agp_memory *mem,
682+ off_t pg_start,
683+ int type)
684+{
685+ int i, j, num_entries;
686+ void *temp;
687+
688+ AGN_DEBUG("Enter");
689+
690+ temp = agp_bridge->current_size;
691+ num_entries = A_SIZE_FIX(temp)->num_entries;
692+
693+ if ((pg_start + mem->page_count) > num_entries) {
694+ AGN_ERROR("Trying to write beyond aperture limit");
695+ AGN_DEBUG("pg_start=0x%.8lx, mem->page_count=%d,"
696+ "num_entries=%d", pg_start, mem->page_count,
697+ num_entries);
698+ return -EINVAL;
699+ }
700+
701+ if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
702+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
703+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
704+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
705+
706+ if (pg_start < private_data.gtt_entries) {
707+ AGN_ERROR("Trying to insert into local/stolen memory");
708+ AGN_DEBUG("pg_start == 0x%.8lx,private_data.gtt_entries =="
709+ "0x%.8x", pg_start,private_data.gtt_entries);
710+ return -EINVAL;
711+ }
712+ } else if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
713+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
714+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
715+ private_data.pdev->device == PCI_DEVICE_ID_815) {
716+
717+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
718+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
719+ AGN_ERROR("Device busy");
720+ return -EBUSY;
721+ }
722+ }
723+ if (type != 0 || mem->type != 0) {
724+ if ((type == AGP_DCACHE_MEMORY) &&
725+ (mem->type == AGP_DCACHE_MEMORY)) {
726+ /* special insert */
727+ global_cache_flush();
728+ for (i = pg_start; i < (pg_start + mem->page_count);
729+ i++) {
730+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
731+ private_data.registers+I810_PTE_BASE+(i*4));
732+ /* PCI Posting. */
733+ readl(private_data.registers +
734+ I810_PTE_BASE+(i*4));
735+ }
736+ global_cache_flush();
737+ agp_bridge->driver->tlb_flush(mem);
738+ AGN_DEBUG("AGP_DCACHE_MEMORY.. Exit");
739+ return 0;
740+ }
741+ }
742+ }
743+
744+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
745+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) {
746+ AGN_ERROR("Unsupported memory type");
747+ AGN_DEBUG("mem->type=%x", mem->type);
748+ return -EINVAL;
749+ }
750+
751+ global_cache_flush();
752+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
753+ writel(AGP_MASK_GTT(), private_data.registers+I810_PTE_BASE+(j*4));
754+ /* PCI Posting. */
755+ readl(private_data.registers+I810_PTE_BASE+(j*4));
756+ }
757+
758+ global_cache_flush();
759+ agp_bridge->driver->tlb_flush(mem);
760+
761+ AGN_DEBUG("Exit");
762+
763+ return 0;
764+}
765+
766+static int iegd_alm_remove_entries(
767+ struct agp_memory *mem,
768+ off_t pg_start,
769+ int type)
770+{
771+ int i;
772+
773+ AGN_DEBUG("Enter");
774+
775+ global_cache_flush();
776+
777+ if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
778+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
779+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
780+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
781+
782+ if (pg_start < private_data.gtt_entries) {
783+ AGN_ERROR("Trying to disable local/stolen memory");
784+ AGN_DEBUG("pg_start=0x%.8lx, private_data.gtt_entries=%d",
785+ pg_start, private_data.gtt_entries);
786+ return -EINVAL;
787+ }
788+ }
789+
790+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
791+ writel(agp_bridge->scratch_page,
792+ private_data.registers+I810_PTE_BASE+(i*4));
793+ /* PCI Posting. */
794+ readl(private_data.registers+I810_PTE_BASE+(i*4));
795+ }
796+
797+ global_cache_flush();
798+ agp_bridge->driver->tlb_flush(mem);
799+
800+ AGN_DEBUG("Exit");
801+
802+ return 0;
803+}
804+
805diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_cmn.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_cmn.c
806--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_cmn.c 1969-12-31 17:00:00.000000000 -0700
807+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_cmn.c 2009-10-06 10:30:05.000000000 -0700
808@@ -0,0 +1,682 @@
809+/* -*- pse-c -*-
810+ *----------------------------------------------------------------------------
811+ * Filename: drv_cmn.c
812+ * $Revision: 1.28 $
813+ *----------------------------------------------------------------------------
814+ * Gart and DRM driver for Intel Embedded Graphics Driver
815+ * Copyright © 2008, Intel Corporation.
816+ *
817+ * This program is free software; you can redistribute it and/or modify it
818+ * under the terms and conditions of the GNU General Public License,
819+ * version 2, as published by the Free Software Foundation.
820+ *
821+ * This program is distributed in the hope it will be useful, but WITHOUT
822+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
823+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
824+ * more details.
825+ *
826+ * You should have received a copy of the GNU General Public License along with
827+ * this program; if not, write to the Free Software Foundation, Inc.,
828+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
829+ *
830+ */
831+
832+#include <linux/pagemap.h>
833+#include "global.h"
834+#include "intelpci.h"
835+#include "interface_abs.h"
836+#include "igd_abs.h"
837+
838+static struct agp_memory *alloc_agpphysmem_i8xx(
839+ size_t pg_count, int type);
840+static AGP_MEM_TYPE i8xx_alloc_pages(size_t pg_count,
841+ unsigned int order);
842+
843+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
844+static void i8xx_destroy_pages_by_addr(void *addr,
845+ size_t pg_count, unsigned int order);
846+#define PAGES_OR_MEMORY(a) gart_to_virt(a->memory[0])
847+#define DESTROY_PAGES i8xx_destroy_pages_by_addr
848+#else
849+static void i8xx_destroy_pages(struct page **pages,
850+ size_t pg_count, unsigned int order);
851+#define PAGES_OR_MEMORY(a) a->pages
852+#define DESTROY_PAGES i8xx_destroy_pages
853+#endif
854+
855+dispatch_table_t driver_dispatch_list[] = {
856+ { PCI_DEVICE_ID_810, &drv_alm },
857+ { PCI_DEVICE_ID_810DC, &drv_alm },
858+ { PCI_DEVICE_ID_810E, &drv_alm },
859+ { PCI_DEVICE_ID_815, &drv_alm },
860+ { PCI_DEVICE_ID_830M, &drv_alm },
861+ { PCI_DEVICE_ID_845G, &drv_alm },
862+ { PCI_DEVICE_ID_855, &drv_alm },
863+ { PCI_DEVICE_ID_865G, &drv_alm },
864+ { PCI_DEVICE_ID_915GD, &drv_nap },
865+ { PCI_DEVICE_ID_915AL, &drv_nap },
866+ { PCI_DEVICE_ID_945G, &drv_nap },
867+ { PCI_DEVICE_ID_945GM, &drv_nap },
868+ { PCI_DEVICE_ID_945GME,&drv_nap },
869+ { PCI_DEVICE_ID_Q35, &drv_nap },
870+ { PCI_DEVICE_ID_Q35A2, &drv_nap },
871+ { PCI_DEVICE_ID_965G, &drv_gn4 },
872+ { PCI_DEVICE_ID_946GZ, &drv_gn4 },
873+ { PCI_DEVICE_ID_G965, &drv_gn4 },
874+ { PCI_DEVICE_ID_Q965, &drv_gn4 },
875+ { PCI_DEVICE_ID_GM965, &drv_gn4 },
876+ { PCI_DEVICE_ID_GME965,&drv_gn4 },
877+ { PCI_DEVICE_ID_GM45, &drv_gm45},
878+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
879+ { PCI_DEVICE_ID_PLB, &drv_plb },
880+#endif
881+ { PCI_DEVICE_ID_ELK, &drv_gm45},
882+ { PCI_DEVICE_ID_Q45, &drv_gm45},
883+ { PCI_DEVICE_ID_G45, &drv_gm45},
884+ { PCI_DEVICE_ID_G41, &drv_gm45},
885+ { 0, NULL },
886+};
887+
888+/* Structure contained bit mask for the page table entries */
889+struct gatt_mask iegd_cmn_masks[] =
890+{
891+ {.mask = I810_PTE_VALID, .type = 0},
892+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL),
893+ .type = AGP_DCACHE_MEMORY},
894+ {.mask = I810_PTE_VALID, .type = 0}
895+};
896+
897+
898+int iegd_cmn_configure(void)
899+{
900+ struct aper_size_info_fixed *current_size;
901+ u32 temp;
902+ u16 gmch_ctrl;
903+ int i;
904+
905+ AGN_DEBUG("Enter");
906+
907+ current_size = A_SIZE_FIX(agp_bridge->current_size);
908+
909+ pci_read_config_dword(private_data.pdev, I915_GMADDR, &temp);
910+
911+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
912+
913+ if(!((private_data.pdev->device == PCI_DEVICE_ID_Q35) ||
914+ (private_data.pdev->device == PCI_DEVICE_ID_Q35A2))) {
915+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
916+ gmch_ctrl |= I830_GMCH_ENABLED;
917+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
918+
919+ global_cache_flush();
920+ agp_bridge->driver->tlb_flush(0);
921+
922+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED,
923+ private_data.registers+I810_PGETBL_CTL);
924+ /* PCI Posting. */
925+ readl(private_data.registers+I810_PGETBL_CTL);
926+ }
927+
928+ AGN_DEBUG ("gtt_entries: %X", private_data.gtt_entries);
929+ if (agp_bridge->driver->needs_scratch_page) {
930+ for (i = private_data.gtt_entries;
931+ i < current_size->num_entries; i++) {
932+ writel(agp_bridge->scratch_page, private_data.gtt+i);
933+ readl(private_data.gtt+i); /* PCI Posting. */
934+ }
935+ }
936+ global_cache_flush();
937+
938+ AGN_DEBUG("Exit");
939+
940+ return 0;
941+}
942+
943+void iegd_cmn_init_gtt_entries(void)
944+{
945+ u16 gmch_ctrl;
946+ u32 iegd_scratch, iegd_scratch2;
947+ int gtt_entries;
948+ u8 rdct;
949+ int local = 0;
950+ static const int ddt[4] = { 0, 16, 32, 64 };
951+ int size;
952+ int gtt_enabled = FALSE;
953+
954+ AGN_DEBUG("Enter");
955+
956+ /* This code original read the GMCH_CTRL register of the host
957+ * bridge. This register is also mirrored on the VGA device at
958+ * the same address. In the PLB family, the host bridge no
959+ * longer contains the register. As a result, all platforms
960+ * will now use the mirrored register. This breaks
961+ * compatability with chipsets prior to 915G
962+ */
963+ pci_read_config_word(private_data.pdev, I830_GMCH_CTRL, &gmch_ctrl);
964+
965+ gtt_enabled = readl(private_data.registers + I810_PGETBL_CTL) &
966+ I810_PGETBL_ENABLED;
967+
968+ /* A note on stolen memory:
969+ * Intel chipsets set aside a small area at the top of system memory
970+ * for VGA framebuffers etc. When the Intel device is the VGA
971+ * device, this memory is used to contain the GTT itself, and a scratch
972+ * memory page. Therefore the actual available memory already populated
973+ * in the GTT is the stolen memory minus the 4k scratch page minus the
974+ * 128 page table.
975+ *
976+ * Additionally, the embedded firmware may further alter this amount.
977+ * It can either allocate additional memory to be placed in the GTT
978+ * or use some stolen memory for data. If the IEGD vBIOS has altered
979+ * the amount we can detect it by reading a well-defined scratch
980+ * register.
981+ *
982+ * When the Intel Graphics Device is not the VGA device, i.e.
983+ * the system boots with a PCI card, then this driver discards
984+ * the stolen memory.
985+ *
986+ * We obtain the size of the GTT, which is also stored (for some
987+ * reason) at the top of stolen memory. Then we add 4KB to that
988+ * for the video BIOS popup, which is also stored in there. */
989+
990+ size = agp_bridge->driver->fetch_size() + 4;
991+ AGN_DEBUG("Size from fetch size + 4 = %x", size);
992+
993+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
994+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
995+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
996+ case I830_GMCH_GMS_STOLEN_512:
997+ gtt_entries = KB(512) - KB(size);
998+ break;
999+ case I830_GMCH_GMS_STOLEN_1024:
1000+ gtt_entries = MB(1) - KB(size);
1001+ break;
1002+ case I830_GMCH_GMS_STOLEN_8192:
1003+ gtt_entries = MB(8) - KB(size);
1004+ break;
1005+ case I830_GMCH_GMS_LOCAL:
1006+ rdct = readb(private_data.registers+I830_RDRAM_CHANNEL_TYPE);
1007+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
1008+ MB(ddt[I830_RDRAM_DDT(rdct)]);
1009+ local = 1;
1010+ break;
1011+ default:
1012+ gtt_entries = 0;
1013+ break;
1014+ }
1015+ } else {
1016+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1017+ case I855_GMCH_GMS_STOLEN_1M:
1018+ gtt_entries = MB(1) - KB(size);
1019+ break;
1020+ case I855_GMCH_GMS_STOLEN_4M:
1021+ gtt_entries = MB(4) - KB(size);
1022+ break;
1023+ case I855_GMCH_GMS_STOLEN_8M:
1024+ gtt_entries = MB(8) - KB(size);
1025+ break;
1026+ case I855_GMCH_GMS_STOLEN_16M:
1027+ gtt_entries = MB(16) - KB(size);
1028+ break;
1029+ case I855_GMCH_GMS_STOLEN_32M:
1030+ gtt_entries = MB(32) - KB(size);
1031+ break;
1032+ case I915_GMCH_GMS_STOLEN_48M:
1033+ /* Check it's really I915G */
1034+ if (agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915GD ||
1035+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915AL ||
1036+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945G ||
1037+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GM ||
1038+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GME ||
1039+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_965G ||
1040+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_G965 ||
1041+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_Q965 ||
1042+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GM965 ||
1043+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GME965 ||
1044+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_946GZ )
1045+ gtt_entries = MB(48) - KB(size);
1046+ else
1047+ gtt_entries = 0;
1048+ break;
1049+ case I915_GMCH_GMS_STOLEN_64M:
1050+ /* Check it's really I915G */
1051+ if (agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915GD ||
1052+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915AL ||
1053+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945G ||
1054+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GM ||
1055+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GME ||
1056+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_965G ||
1057+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_G965 ||
1058+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_Q965 ||
1059+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GM965 ||
1060+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GME965 ||
1061+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_946GZ )
1062+ gtt_entries = MB(64) - KB(size);
1063+ else
1064+ gtt_entries = 0;
1065+ default:
1066+ gtt_entries = 0;
1067+ break;
1068+ }
1069+ }
1070+
1071+ /* if GTT is not enabled, then initialize gtt entries to 0 */
1072+
1073+ if (!gtt_entries) {
1074+ AGN_DEBUG("GTT is disabled");
1075+ AGN_LOG("IGD not primary, throwing away stolen memory.");
1076+
1077+ /* Update the scratch registers to say that we have no stolen memory */
1078+ writel((0xE1DF << 16), private_data.registers + 0x71410);
1079+
1080+ iegd_scratch = readl(private_data.registers + 0x71410);
1081+ iegd_scratch |= 0x4;
1082+
1083+ writel(iegd_scratch, private_data.registers + 0x71410);
1084+
1085+ /* say that we have 0 stolen memory regardless of what was
1086+ * really in there */
1087+ writel(0, private_data.registers + 0x71418);
1088+
1089+ gtt_entries = 0;
1090+ }
1091+
1092+ iegd_scratch = readl(private_data.registers + 0x71410);
1093+
1094+ if(((iegd_scratch>>16) == 0xE1DF) && (iegd_scratch & 0x4)) {
1095+ AGN_LOG("IEGD Firmware Detected");
1096+ /* IEGD firmware found, and Mem Reservation Flag present */
1097+ iegd_scratch2 = readl(private_data.registers + 0x71418);
1098+ gtt_entries = (iegd_scratch2 & 0xFFFF) * 4096;
1099+ }
1100+
1101+ if (gtt_entries > 0)
1102+ AGN_LOG("Detected %dK %s memory.",
1103+ gtt_entries / KB(1), local ? "local" : "stolen");
1104+ else
1105+ AGN_LOG("No pre-allocated video memory detected.\n");
1106+
1107+ gtt_entries /= KB(4);
1108+ private_data.gtt_entries = gtt_entries;
1109+
1110+ AGN_DEBUG("Exit");
1111+}
1112+
1113+int AGP_FREE_GATT(iegd_cmn_free_gatt_table)
1114+{
1115+ AGN_DEBUG("Enter");
1116+ return 0;
1117+ AGN_DEBUG("Exit");
1118+}
1119+
1120+void AGP_ENABLE(iegd_cmn_agp_enable)
1121+{
1122+ AGN_DEBUG("Enter");
1123+ return;
1124+ AGN_DEBUG("Exit");
1125+}
1126+
1127+struct agp_memory *iegd_cmn_alloc_by_type(
1128+ size_t pg_count, int type)
1129+{
1130+ struct agp_memory *new;
1131+
1132+ AGN_DEBUG("Enter");
1133+
1134+ /* AGP_DCACHE_MEMORY use by certain chipset only, especially
1135+ * chipset from almador family. */
1136+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
1137+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
1138+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
1139+ private_data.pdev->device == PCI_DEVICE_ID_815) {
1140+ if (type == AGP_DCACHE_MEMORY) {
1141+ if (pg_count != private_data.num_dcache_entries) {
1142+ AGN_ERROR("Page count error");
1143+ AGN_DEBUG("pg_count=%d, num_dcache_entries=%d",
1144+ pg_count, private_data.num_dcache_entries);
1145+ return NULL;
1146+ }
1147+
1148+ new = agp_create_memory(1);
1149+ if (new == NULL) {
1150+ AGN_ERROR("Allocating memory failed");
1151+ return NULL;
1152+ }
1153+
1154+ new->type = AGP_DCACHE_MEMORY;
1155+ new->page_count = pg_count;
1156+ new->num_scratch_pages = 0;
1157+ vfree(new->AGP_MEMORY_MEMBER); //free pages or memory
1158+ AGN_DEBUG("AGP_DCACHE_MEMORY.. Exit");
1159+ return new;
1160+ }
1161+ }
1162+
1163+ if (type == AGP_PHYS_MEMORY) {
1164+ AGN_DEBUG("AGP_PHYS_MEMORY.. Exit");
1165+ return alloc_agpphysmem_i8xx(pg_count, type);
1166+ }
1167+
1168+ AGN_DEBUG("NULL.. Exit");
1169+ return NULL;
1170+}
1171+
1172+void iegd_cmn_free_by_type(struct agp_memory *curr)
1173+{
1174+ unsigned int order;
1175+
1176+ AGN_DEBUG("Enter");
1177+
1178+ switch (curr->page_count) {
1179+ case 1:
1180+ order = 0; /* pg_count = 1 => 2 ^ 0 */
1181+ break;
1182+ case 4:
1183+ order = 2; /* pg_count = 4 => 2 ^ 2 */
1184+ break;
1185+ case 8:
1186+ order = 3; /* pg_count = 8 => 2 ^ 3 */
1187+ break;
1188+ default:
1189+ /* This case should never happen */
1190+ return;
1191+ }
1192+
1193+ agp_free_key(curr->key);
1194+ if(curr->type == AGP_PHYS_MEMORY) {
1195+ DESTROY_PAGES(PAGES_OR_MEMORY(curr), curr->page_count,
1196+ order);
1197+ IGD_FREE_MEM(curr);
1198+ }
1199+ kfree(curr);
1200+
1201+ AGN_DEBUG("Exit");
1202+}
1203+
1204+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
1205+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
1206+{
1207+ struct agp_memory *new;
1208+ void *addr;
1209+ unsigned int order, i;
1210+
1211+ AGN_DEBUG("Enter");
1212+
1213+ /* To support RGBA hardware cursor which may require contiguous physical
1214+ * * memory to be allocated with either 1, 4 or 8 pages. 8 pages is
1215+ * * the worst case for 830 which requires 4 pages and 4 page alignment.
1216+ * */
1217+ switch (pg_count) {
1218+ case 1:
1219+ order = 0; /* pg_count = 1 => 2 ^ 0 */
1220+ break;
1221+ case 4:
1222+ order = 2; /* pg_count = 4 => 2 ^ 2 */
1223+ break;
1224+ case 8:
1225+ order = 3; /* pg_count = 8 => 2 ^ 3 */
1226+ break;
1227+ default:
1228+ return NULL;
1229+ }
1230+
1231+ addr = i8xx_alloc_pages(pg_count, order);
1232+ if (addr == NULL) {
1233+ AGN_ERROR("Allocating pages failed");
1234+ return NULL;
1235+ }
1236+
1237+ new = agp_create_memory(pg_count);
1238+ if (new == NULL) {
1239+ AGN_ERROR("Allocating memory failed");
1240+ return NULL;
1241+ }
1242+
1243+ new->memory[0] = virt_to_gart(addr);
1244+ for (i = 1; i < pg_count; i++) {
1245+ new->memory[i] = new->memory[i-1] + PAGE_SIZE;
1246+ }
1247+ new->page_count = pg_count;
1248+ new->num_scratch_pages = pg_count;
1249+ new->type = AGP_PHYS_MEMORY;
1250+ new->physical = new->memory[0];
1251+
1252+ AGN_DEBUG("Exit");
1253+ return new;
1254+}
1255+#else // kernel 31 or newer
1256+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
1257+{
1258+ struct agp_memory *new;
1259+ struct page *page;
1260+ unsigned int order, i;
1261+
1262+ AGN_DEBUG("Enter");
1263+
1264+ /* To support RGBA hardware cursor which may require contiguous physical
1265+ * memory to be allocated with either 1, 4 or 8 pages. 8 pages is
1266+ * the worst case for 830 which requires 4 pages and 4 page alignment.
1267+ */
1268+ switch (pg_count) {
1269+ case 1:
1270+ order = 0; /* pg_count = 1 => 2 ^ 0 */
1271+ break;
1272+ case 4:
1273+ order = 2; /* pg_count = 4 => 2 ^ 2 */
1274+ break;
1275+ case 8:
1276+ order = 3; /* pg_count = 8 => 2 ^ 3 */
1277+ break;
1278+ default:
1279+ return NULL;
1280+ }
1281+
1282+ page = i8xx_alloc_pages(pg_count, order);
1283+ if (page == NULL) {
1284+ AGN_ERROR("Allocating pages failed");
1285+ return NULL;
1286+ }
1287+
1288+ new = agp_create_memory(pg_count);
1289+ if (new == NULL) {
1290+ AGN_ERROR("Allocating memory failed");
1291+ return NULL;
1292+ }
1293+
1294+ new->pages[0] = page;
1295+ if (pg_count > 1) { // if page count is 4 or 8
1296+ for (i=0; i< pg_count-1; i++) {
1297+ new->pages[i+1] = new->pages[i] + 1;
1298+ }
1299+ }
1300+ new->page_count = pg_count;
1301+ new->num_scratch_pages = pg_count;
1302+ new->type = AGP_PHYS_MEMORY;
1303+ new->physical = page_to_phys(new->pages[0]);
1304+ return new;
1305+
1306+ AGN_DEBUG("Exit");
1307+}
1308+#endif
1309+
1310+static AGP_MEM_TYPE i8xx_alloc_pages(size_t pg_count, unsigned int order)
1311+{
1312+ struct page * page;
1313+
1314+ AGN_DEBUG("Enter");
1315+
1316+ page = alloc_pages(GFP_KERNEL, order);
1317+ if (page == NULL) {
1318+ AGN_ERROR("Allocating kernel page failed");
1319+ return NULL;
1320+ }
1321+
1322+ if (SET_PAGES_UC(page, pg_count) < 0) {
1323+ SET_PAGES_WB(page, pg_count);
1324+ GLOBAL_FLUSH_TLB();
1325+ __free_pages(page, pg_count);
1326+ AGN_ERROR("Change page attribute failed");
1327+ return NULL;
1328+ }
1329+ GLOBAL_FLUSH_TLB();
1330+ get_page(page);
1331+
1332+ /*
1333+ * Starting kernel 2.6.23 locking will causing lot of trouble. This is
1334+ * because of the changes in page fault handler in the kernel.
1335+ */
1336+ AGP_LOCK_PAGE(page);
1337+ atomic_inc(&agp_bridge->current_memory_agp);
1338+ return PAGE_ADDRESS(page); //returns page or addr depending on kernel
1339+
1340+ AGN_DEBUG("Exit");
1341+}
1342+
1343+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
1344+static void i8xx_destroy_pages_by_addr(void *addr,
1345+ size_t pg_count, unsigned int order)
1346+{
1347+ struct page *page;
1348+
1349+ AGN_DEBUG("Enter");
1350+
1351+ if (addr == NULL)
1352+ return;
1353+
1354+ page = virt_to_page(addr);
1355+ SET_PAGES_WB(page, pg_count);
1356+ GLOBAL_FLUSH_TLB();
1357+ put_page(page);
1358+ /*
1359+ * Starting kernel 2.6.23 locking will causing lot of trouble. This is
1360+ * because of the changes in page fault handler in the kernel.
1361+ */
1362+ AGP_UNLOCK_PAGE(page);
1363+
1364+ free_pages((unsigned long)addr, order);
1365+ atomic_dec(&agp_bridge->current_memory_agp);
1366+
1367+ AGN_DEBUG("Exit");
1368+}
1369+
1370+#else //kernel is 31 or newer
1371+static void i8xx_destroy_pages(struct page **pages,
1372+ size_t pg_count, unsigned int order)
1373+{
1374+ struct page *page;
1375+ int i;
1376+
1377+ AGN_DEBUG("Enter");
1378+
1379+ if (pages == NULL)
1380+ return;
1381+
1382+ GLOBAL_FLUSH_TLB();
1383+ //The following code is based on agp_generic_destroy_pages in generic.c
1384+ for (i = 0; i < pg_count; i++) {
1385+ page = pages[i];
1386+
1387+ put_page(page);
1388+ __free_page(page);
1389+ atomic_dec(&agp_bridge->current_memory_agp);
1390+ pages[i] = NULL;
1391+ }
1392+
1393+ AGN_DEBUG("Exit");
1394+}
1395+#endif
1396+
1397+unsigned long AGP_MASK_MEMORY(iegd_cmn_mask_memory)
1398+{
1399+ struct agp_bridge_data *brdg = AGP_BRIDGE_VAR;
1400+
1401+ // only converts if kernel is 2.6.31 or newer
1402+ unsigned long address = CONVERT_PAGE_TO_GART(addr);
1403+
1404+ /* Type checking must be done elsewhere */
1405+ return address | AGP_MASK_ADDR(brdg);
1406+}
1407+
1408+int iegd_cmn_insert_entries(struct agp_memory *mem,
1409+ off_t pg_start, int type)
1410+{
1411+ int i,j,num_entries;
1412+ void *temp;
1413+
1414+ AGN_DEBUG("Enter");
1415+
1416+ temp = agp_bridge->current_size;
1417+ num_entries = A_SIZE_FIX(temp)->num_entries;
1418+
1419+ if (pg_start < private_data.gtt_entries) {
1420+ AGN_ERROR("Trying to insert into local/stolen memory");
1421+ AGN_DEBUG("pg_start == 0x%.8lx,private_data.gtt_entries =="
1422+ "%d", pg_start,private_data.gtt_entries);
1423+ return -EINVAL;
1424+ }
1425+
1426+ /* If we try to write beyond gtt table, return error */
1427+ if ((pg_start + mem->page_count) > num_entries) {
1428+ AGN_ERROR("Trying to write beyond aperture limit");
1429+ AGN_DEBUG("pg_start=0x%.8lx, mem->page_count=%d,"
1430+ "num_entries=%d", pg_start, mem->page_count,
1431+ num_entries);
1432+ return -EINVAL;
1433+ }
1434+
1435+ /* The i830 can't check the GTT for entries since its read only,
1436+ * depend on the caller to make the correct offset decisions.
1437+ */
1438+
1439+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
1440+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) {
1441+ AGN_ERROR("Unsupported memory type");
1442+ AGN_DEBUG("mem->type=%x, type=%x", mem->type, type);
1443+ return -EINVAL;
1444+ }
1445+
1446+ global_cache_flush();
1447+ agp_bridge->driver->tlb_flush(mem);
1448+
1449+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1450+ writel(AGP_MASK_GTT(), private_data.gtt+j);
1451+ readl(private_data.gtt+j); /* PCI Posting. */
1452+ }
1453+
1454+ global_cache_flush();
1455+ agp_bridge->driver->tlb_flush(mem);
1456+
1457+ AGN_DEBUG("Exit");
1458+
1459+ return 0;
1460+}
1461+
1462+int iegd_cmn_remove_entries(struct agp_memory *mem,
1463+ off_t pg_start, int type)
1464+{
1465+ int i;
1466+
1467+ AGN_DEBUG("Enter");
1468+
1469+ global_cache_flush();
1470+ agp_bridge->driver->tlb_flush(mem);
1471+
1472+ if (pg_start < private_data.gtt_entries) {
1473+ AGN_ERROR("Trying to disable local/stolen memory");
1474+ AGN_DEBUG("pg_start=0x%.8lx, private_data.gtt_entries=%d",
1475+ pg_start, private_data.gtt_entries);
1476+ return -EINVAL;
1477+ }
1478+
1479+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1480+ writel(agp_bridge->scratch_page, private_data.gtt+i);
1481+ readl(private_data.gtt+i);
1482+ }
1483+
1484+ global_cache_flush();
1485+ agp_bridge->driver->tlb_flush(mem);
1486+
1487+ AGN_DEBUG("Exit");
1488+
1489+ return 0;
1490+}
1491diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_gn4.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_gn4.c
1492--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_gn4.c 1969-12-31 17:00:00.000000000 -0700
1493+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_gn4.c 2009-10-06 10:30:05.000000000 -0700
1494@@ -0,0 +1,455 @@
1495+/* -*- pse-c -*-
1496+ *----------------------------------------------------------------------------
1497+ * Filename: iegd_interface.c
1498+ * $Revision: 1.17 $
1499+ *----------------------------------------------------------------------------
1500+ * Gart and DRM driver for Intel Embedded Graphics Driver
1501+ * Copyright © 2007, Intel Corporation.
1502+ *
1503+ * This program is free software; you can redistribute it and/or modify it
1504+ * under the terms and conditions of the GNU General Public License,
1505+ * version 2, as published by the Free Software Foundation.
1506+ *
1507+ * This program is distributed in the hope it will be useful, but WITHOUT
1508+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1509+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1510+ * more details.
1511+ *
1512+ * You should have received a copy of the GNU General Public License along with
1513+ * this program; if not, write to the Free Software Foundation, Inc.,
1514+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1515+ *
1516+ */
1517+
1518+#include "global.h"
1519+#include "intelpci.h"
1520+
1521+static int iegd_gn4_fetch_size(void);
1522+static void iegd_gn4_cleanup(void);
1523+static void iegd_gn4_tlbflush(struct agp_memory *mem);
1524+static int AGP_CREATE_GATT(iegd_gn4_create_gatt_table);
1525+
1526+/* GM45 functions */
1527+static int iegd_igm45_fetch_size(void);
1528+static int iegd_igm45_configure(void);
1529+static int AGP_CREATE_GATT(iegd_igm45_create_gatt_table);
1530+
1531+struct aper_size_info_fixed iegd_i965_sizes[] =
1532+{
1533+ /* VBIOS always allocates enough space for 512MB aperture */
1534+ /* Size KB, # of entries, ? */
1535+ {128, 131072, 7},
1536+ {64, 131072, 7},
1537+ {256, 131072, 7},
1538+ {512, 131072, 7},
1539+};
1540+
1541+struct aper_size_info_fixed iegd_igm45_sizes[] =
1542+{
1543+ /* GM45 has 2MB GTT (EDS page 217) size */
1544+ /* Size_KB #_of_entries ? */
1545+ {256, 524288, 7},
1546+ {512, 524288, 7},
1547+};
1548+
1549+
1550+bridge_driver_t drv_gn4 = {
1551+ .owner = THIS_MODULE,
1552+ .size_type = FIXED_APER_SIZE,
1553+ .aperture_sizes = 0,
1554+ .num_aperture_sizes = 0,
1555+ .needs_scratch_page = TRUE,
1556+ .configure = iegd_cmn_configure,
1557+ .fetch_size = iegd_gn4_fetch_size,
1558+ .cleanup = iegd_gn4_cleanup,
1559+ .tlb_flush = iegd_gn4_tlbflush,
1560+ .mask_memory = iegd_cmn_mask_memory,
1561+ .masks = iegd_cmn_masks,
1562+ .agp_enable = iegd_cmn_agp_enable,
1563+ .cache_flush = global_cache_flush,
1564+ .create_gatt_table = iegd_gn4_create_gatt_table,
1565+ .free_gatt_table = iegd_cmn_free_gatt_table,
1566+ .insert_memory = iegd_cmn_insert_entries,
1567+ .remove_memory = iegd_cmn_remove_entries,
1568+ .alloc_by_type = iegd_cmn_alloc_by_type,
1569+ .free_by_type = iegd_cmn_free_by_type,
1570+ .agp_alloc_page = agp_generic_alloc_page,
1571+ .agp_destroy_page = agp_generic_destroy_page,
1572+};
1573+
1574+/* GM45 */
1575+bridge_driver_t drv_gm45 = {
1576+ .owner = THIS_MODULE,
1577+ .size_type = FIXED_APER_SIZE,
1578+ .aperture_sizes = (void *)iegd_igm45_sizes,
1579+ .num_aperture_sizes = 3,
1580+ .needs_scratch_page = TRUE,
1581+ .configure = iegd_igm45_configure,
1582+ .fetch_size = iegd_igm45_fetch_size,
1583+ .cleanup = iegd_gn4_cleanup,
1584+ .tlb_flush = iegd_gn4_tlbflush,
1585+ .mask_memory = iegd_cmn_mask_memory,
1586+ .masks = iegd_cmn_masks,
1587+ .agp_enable = iegd_cmn_agp_enable,
1588+ .cache_flush = global_cache_flush,
1589+ .create_gatt_table = iegd_igm45_create_gatt_table,
1590+ .free_gatt_table = iegd_cmn_free_gatt_table,
1591+ .insert_memory = iegd_cmn_insert_entries,
1592+ .remove_memory = iegd_cmn_remove_entries,
1593+ .alloc_by_type = iegd_cmn_alloc_by_type,
1594+ .free_by_type = iegd_cmn_free_by_type,
1595+ .agp_alloc_page = agp_generic_alloc_page,
1596+ .agp_destroy_page = agp_generic_destroy_page,
1597+};
1598+
1599+
1600+static int iegd_gn4_fetch_size(void)
1601+{
1602+ struct aper_size_info_fixed *values;
1603+ u32 offset = 0;
1604+ u8 temp;
1605+
1606+#define IG965_GMCH_MSAC 0x62
1607+#define IGM965_GMCH_MSAC 0x66
1608+
1609+ AGN_DEBUG("Enter");
1610+
1611+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
1612+
1613+ if(private_data.pdev->device == PCI_DEVICE_ID_GM965) {
1614+ pci_read_config_byte(private_data.pdev, IGM965_GMCH_MSAC, &temp);
1615+ } else {
1616+ pci_read_config_byte(private_data.pdev, IG965_GMCH_MSAC, &temp);
1617+ }
1618+
1619+ switch (temp & 6) {
1620+ case 0:
1621+ offset = 0; /* 128MB aperture */
1622+ break;
1623+ case 2:
1624+ offset = 2; /* 256MB aperture */
1625+ break;
1626+ case 6:
1627+ offset = 3; /* 512MB aperture */
1628+ break;
1629+ }
1630+
1631+ /* Set the actual size here */
1632+ agp_bridge->previous_size = agp_bridge->current_size =
1633+ (void *)(values + offset);
1634+
1635+ AGN_DEBUG("Exit");
1636+
1637+ /* Always return 512KB GTT when calculating available stolen memory */
1638+ return values[3].size;
1639+}
1640+
1641+static void iegd_gn4_cleanup(void)
1642+{
1643+ AGN_DEBUG("Enter");
1644+ iounmap((void *)private_data.registers);
1645+ AGN_DEBUG("Exit");
1646+}
1647+
1648+static void iegd_gn4_tlbflush(struct agp_memory *mem)
1649+{
1650+ AGN_DEBUG("Enter");
1651+ /* Gen4 must flush the GTT or simple 2D rendering will lock the engine. */
1652+ writel(0, private_data.registers+0x2170);
1653+ writel(0, private_data.registers+0x2174);
1654+ AGN_DEBUG("Exit");
1655+ return;
1656+}
1657+
1658+static int AGP_CREATE_GATT(iegd_gn4_create_gatt_table)
1659+{
1660+ const u32 i965_gtt_table_order = 7;
1661+
1662+ int i;
1663+ u16 j = 0;
1664+ int num_entries;
1665+ u32 gtt_bus_addr;
1666+ u32 mmio_bus_addr;
1667+ u32 gtt_enabled = FALSE;
1668+ u32 gtt_table_size = (1 << i965_gtt_table_order) * PAGE_SIZE - 1;
1669+ u32 gtt_pgctl_reg;
1670+ char *gtt_table, *gtt_table_end, *current_entry;
1671+ struct page *gtt_table_page;
1672+
1673+ AGN_DEBUG("Enter");
1674+
1675+ agp_bridge->gatt_table_real = NULL;
1676+
1677+ /* Find and save the address of the MMIO register */
1678+ pci_read_config_dword(private_data.pdev, I915_MMADDR, &mmio_bus_addr);
1679+
1680+ mmio_bus_addr &= 0xFFF80000;
1681+ private_data.registers =(volatile u8 *)
1682+ ioremap(mmio_bus_addr,1024 * 4096);
1683+ if (!private_data.registers) {
1684+ AGN_ERROR("ioremap failed to map");
1685+ return (-ENOMEM);
1686+ }
1687+ /* GTT is mapped 512KB after the registers */
1688+ private_data.gtt = (u32 __iomem *)((u32)private_data.registers +
1689+ 512*1024);
1690+
1691+ /* Extract the content of the control register */
1692+ gtt_pgctl_reg = readl(private_data.registers+I810_PGETBL_CTL);
1693+ gtt_bus_addr = gtt_pgctl_reg & 0xFFFFF000;
1694+ gtt_enabled = gtt_pgctl_reg & I810_PGETBL_ENABLED;
1695+
1696+ global_cache_flush();
1697+ agp_bridge->driver->tlb_flush(0);
1698+
1699+ /* we have to call this as early as possible after the MMIO base address is known */
1700+ iegd_cmn_init_gtt_entries();
1701+
1702+ if( !gtt_enabled ) {
1703+ num_entries = iegd_i965_sizes[0].num_entries;
1704+ gtt_table = (char *) __get_free_pages(GFP_KERNEL,
1705+ i965_gtt_table_order);
1706+ gtt_table_end = gtt_table + gtt_table_size;
1707+
1708+ /* Make sure allocation was successful */
1709+ if( NULL == gtt_table ) {
1710+ AGN_ERROR("Fail to allocate kernel pages");
1711+ return (-ENOMEM);
1712+ }
1713+
1714+ for( current_entry = gtt_table; current_entry < gtt_table_end;
1715+ current_entry += PAGE_SIZE ) {
1716+ gtt_table_page = virt_to_page( current_entry );
1717+ set_bit( PG_reserved, &gtt_table_page->flags );
1718+ }
1719+
1720+ agp_bridge->gatt_bus_addr = virt_to_phys( gtt_table );
1721+
1722+ for( i = 0; i < num_entries; i++ ) {
1723+ *(gtt_table + j) = (unsigned long) agp_bridge->scratch_page;
1724+ j += 4;
1725+ }
1726+ }
1727+ else {
1728+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
1729+ }
1730+
1731+ agp_bridge->gatt_table = NULL;
1732+
1733+ AGN_DEBUG("Exit");
1734+
1735+ return(0);
1736+}
1737+
1738+static int AGP_CREATE_GATT(iegd_igm45_create_gatt_table)
1739+{
1740+ u32 mmio_bus_addr;
1741+
1742+ u32 gtt_mem_size;
1743+ u32 base_stolen_mem;
1744+ u16 gmch_ctrl;
1745+
1746+ u32 iegd_scratch, iegd_scratch2;
1747+ int gtt_entries;
1748+ int size = 4; /* Scratch page 4KB */
1749+
1750+ AGN_DEBUG("Enter");
1751+
1752+ agp_bridge->gatt_table_real = NULL;
1753+
1754+ /* Find and save the address of the MMIO register */
1755+ pci_read_config_dword(private_data.pdev, I915_MMADDR, &mmio_bus_addr);
1756+
1757+ /* Bits 35-22 */
1758+ mmio_bus_addr &= 0xFFC00000;
1759+
1760+ /* Map 4MB: 512KB MMIO, 2MB GTT */
1761+ private_data.registers =(volatile u8 *) ioremap(mmio_bus_addr, MB(4));
1762+
1763+ if (!private_data.registers) {
1764+ AGN_ERROR("ioremap failed to map");
1765+ return (-ENOMEM);
1766+ }
1767+
1768+ /* GTT is mapped 2MB after the registers */
1769+ private_data.gtt = (u32 __iomem *)((u32)private_data.registers + MB(2));
1770+
1771+ global_cache_flush();
1772+ agp_bridge->driver->tlb_flush(0);
1773+
1774+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL, &gmch_ctrl);
1775+
1776+#define IGM45_GMCH_GMS_STOLEN_128M (0x8 << 4)
1777+#define IGM45_GMCH_GMS_STOLEN_256M (0x9 << 4)
1778+#define IGM45_BASE_STOLEN 0x5C
1779+ pci_read_config_dword(private_data.pdev,IGM45_BASE_STOLEN,&base_stolen_mem);
1780+ base_stolen_mem &= 0xFFF00000;
1781+
1782+ /* Bits [7:4] will tell the amount of stolen memory */
1783+ /* Stolen memory = Amount specfied - 1 scratch page */
1784+ switch (gmch_ctrl & 0xf0) {
1785+ case I855_GMCH_GMS_STOLEN_1M:
1786+ gtt_entries = MB(1) - KB(size);
1787+ break;
1788+ case I855_GMCH_GMS_STOLEN_4M:
1789+ gtt_entries = MB(4) - KB(size);
1790+ break;
1791+ case I855_GMCH_GMS_STOLEN_8M:
1792+ gtt_entries = MB(8) - KB(size);
1793+ break;
1794+ case I855_GMCH_GMS_STOLEN_16M:
1795+ gtt_entries = MB(16) - KB(size);
1796+ break;
1797+ case I855_GMCH_GMS_STOLEN_32M:
1798+ gtt_entries = MB(32) - KB(size);
1799+ break;
1800+ case I915_GMCH_GMS_STOLEN_48M:
1801+ gtt_entries = MB(48) - KB(size);
1802+ break;
1803+ case I915_GMCH_GMS_STOLEN_64M:
1804+ gtt_entries = MB(64) - KB(size);
1805+ break;
1806+ case IGM45_GMCH_GMS_STOLEN_128M:
1807+ gtt_entries = MB(128) - KB(size);
1808+ break;
1809+ case IGM45_GMCH_GMS_STOLEN_256M:
1810+ gtt_entries = MB(256) - KB(size);
1811+ break;
1812+ default:
1813+ gtt_entries = 0;
1814+ break;
1815+ }
1816+
1817+ iegd_scratch = readl(private_data.registers + 0x71410);
1818+
1819+ /* check for the pci card as primary */
1820+ if (iegd_scratch == 0) {
1821+ /* No stolen memory has been used */
1822+ /*
1823+ * In Gen4, GTT is 2MB below stolen memory, which is a fix location
1824+ * The GTT is empty.
1825+ * Populate the GTT with PTE point to the stolen memory.
1826+ * This will not waste the stolen memory which BIOS already allocated.
1827+ */
1828+ int num_entries;
1829+ int i;
1830+ u16 j = 0;
1831+
1832+ AGN_DEBUG("PCI as primary.\n");
1833+
1834+ num_entries = gtt_entries / KB (4);
1835+
1836+ for (i = 0; i < num_entries; i++) {
1837+ writel(((base_stolen_mem + i * KB(4)) | 1), private_data.gtt+j);
1838+ j+=1;
1839+ }
1840+
1841+ gtt_entries = num_entries * KB(4);
1842+
1843+ AGN_DEBUG("PCI as primary scratch_page = %08lx gtt_entries = %d",
1844+ agp_bridge->scratch_page, gtt_entries);
1845+ } else if (((iegd_scratch>>16) == 0xE1DF) && (iegd_scratch & 0x4)) {
1846+ AGN_LOG("IEGD Firmware Detected");
1847+ /* IEGD firmware found, and Mem Reservation Flag present */
1848+ iegd_scratch2 = readl(private_data.registers + 0x71418);
1849+ /* Stolen memory = # of pages * 4KB */
1850+ gtt_entries = (iegd_scratch2 & 0xFFFF) * 4096;
1851+ }
1852+
1853+ if (gtt_entries > 0) {
1854+ AGN_LOG("Detected %d KB = %d MB stolen memory.", gtt_entries / KB(1),
1855+ gtt_entries/MB(1));
1856+ } else {
1857+ AGN_LOG("No pre-allocated video memory detected.");
1858+ }
1859+
1860+ /* Divide by 4KB to get the # of GTT entries */
1861+ private_data.gtt_entries = gtt_entries/KB(4);
1862+
1863+
1864+ /* On GM45, GTTADR size is 2MB. EDS page 217 */
1865+ gtt_mem_size = MB(2);
1866+
1867+ AGN_DEBUG("gtt_mem_size = %uMB", gtt_mem_size/MB(1));
1868+
1869+ /* Minus base stolen memory to get the base of gtt. This address
1870+ * can also get from register 0xA8 of config space device 0 */
1871+ agp_bridge->gatt_bus_addr = base_stolen_mem - gtt_mem_size;
1872+ agp_bridge->gatt_table = NULL;
1873+ AGN_DEBUG("Exit");
1874+
1875+ return(0);
1876+}
1877+
1878+/* GM45: configure */
1879+static int iegd_igm45_configure(void)
1880+{
1881+ struct aper_size_info_fixed *current_size;
1882+ u32 temp;
1883+ int i;
1884+
1885+ AGN_DEBUG("Enter");
1886+
1887+ current_size = A_SIZE_FIX(agp_bridge->current_size);
1888+
1889+ pci_read_config_dword(private_data.pdev, I915_GMADDR, &temp);
1890+ AGN_DEBUG("1. Reg[0x%x] = 0x%x\n", I915_GMADDR, temp);
1891+
1892+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1893+ AGN_DEBUG("2. Reg[0x%x] = 0x%x\n", I915_GMADDR, temp);
1894+
1895+ if (agp_bridge->driver->needs_scratch_page) {
1896+ for (i = private_data.gtt_entries;
1897+ i < current_size->num_entries; i++) {
1898+ writel(agp_bridge->scratch_page, private_data.gtt+i);
1899+ readl(private_data.gtt+i); /* PCI Posting. */
1900+ }
1901+ }
1902+ global_cache_flush();
1903+
1904+ AGN_DEBUG("Exit");
1905+
1906+ return 0;
1907+}
1908+
1909+/* GM45: fetch_size() */
1910+static int iegd_igm45_fetch_size(void)
1911+{
1912+ struct aper_size_info_fixed *values;
1913+ u32 offset = 0;
1914+ u8 temp;
1915+
1916+#define IGM45_GMCH_MSAC 0x66
1917+#define Q45_GMCH_MSAC 0x62
1918+
1919+ AGN_DEBUG("Enter");
1920+
1921+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
1922+
1923+ if(private_data.pdev->device == PCI_DEVICE_ID_ELK ||
1924+ private_data.pdev->device == PCI_DEVICE_ID_Q45 ||
1925+ private_data.pdev->device == PCI_DEVICE_ID_G45 ||
1926+ private_data.pdev->device == PCI_DEVICE_ID_G41) {
1927+ pci_read_config_byte(private_data.pdev, Q45_GMCH_MSAC, &temp);
1928+ } else {
1929+ pci_read_config_byte(private_data.pdev, IGM45_GMCH_MSAC, &temp);
1930+ }
1931+
1932+ /* GM45 has only 2 aperture sizes (EDS 227) : 256MB/512MB */
1933+ switch (temp & 6) {
1934+ case 2:
1935+ offset = 0; /* 256MB aperture */
1936+ break;
1937+ case 6:
1938+ offset = 1; /* 512MB aperture */
1939+ break;
1940+ }
1941+
1942+ /* Set the actual size here */
1943+ agp_bridge->previous_size = agp_bridge->current_size =
1944+ (void *)(values + offset);
1945+
1946+ AGN_DEBUG("Exit");
1947+ /* For GM45 always return 2MB as GTT size */
1948+ return values[0].size;
1949+}
1950diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_nap.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_nap.c
1951--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_nap.c 1969-12-31 17:00:00.000000000 -0700
1952+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_nap.c 2009-10-06 10:30:05.000000000 -0700
1953@@ -0,0 +1,470 @@
1954+/* -*- pse-c -*-
1955+ *----------------------------------------------------------------------------
1956+ * Filename: drv_nap.c
1957+ * $Revision: 1.14 $
1958+ *----------------------------------------------------------------------------
1959+ * Gart and DRM driver for Intel Embedded Graphics Driver
1960+ * Copyright © 2008, Intel Corporation.
1961+ *
1962+ * This program is free software; you can redistribute it and/or modify it
1963+ * under the terms and conditions of the GNU General Public License,
1964+ * version 2, as published by the Free Software Foundation.
1965+ *
1966+ * This program is distributed in the hope it will be useful, but WITHOUT
1967+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1968+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1969+ * more details.
1970+ *
1971+ * You should have received a copy of the GNU General Public License along with
1972+ * this program; if not, write to the Free Software Foundation, Inc.,
1973+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1974+ *
1975+ */
1976+
1977+#include "global.h"
1978+#include "intelpci.h"
1979+
1980+static int iegd_nap_fetch_size(void);
1981+static void iegd_nap_tlbflush(struct agp_memory *mem);
1982+
1983+static void iegd_iq35_init_gtt_entries(void);
1984+static void iegd_nap_iq35_gatt(void);
1985+static int iegd_nap_9series(u32 order);
1986+static int AGP_CREATE_GATT(iegd_nap_create_gatt_table);
1987+static void iegd_nap_cleanup(void);
1988+
1989+
1990+struct aper_size_info_fixed iegd_i915_sizes[] =
1991+{
1992+ {128, 32768, 5},
1993+ /* The 64M mode still requires a 128k gatt */
1994+ {64, 16384, 5},
1995+ {256, 65536, 6},
1996+ {512, 131072, 7},
1997+};
1998+
1999+struct aper_size_info_fixed iegd_iq35_sizes[] =
2000+{
2001+ {128, 32768, 5},
2002+ {256, 65536, 6},
2003+ {512, 131072, 7},
2004+};
2005+
2006+bridge_driver_t drv_nap = {
2007+ .owner = THIS_MODULE,
2008+ .size_type = FIXED_APER_SIZE,
2009+ .aperture_sizes = 0,
2010+ .num_aperture_sizes = 0,
2011+ .needs_scratch_page = TRUE,
2012+ .configure = iegd_cmn_configure,
2013+ .fetch_size = iegd_nap_fetch_size,
2014+ .cleanup = iegd_nap_cleanup,
2015+ .tlb_flush = iegd_nap_tlbflush,
2016+ .mask_memory = iegd_cmn_mask_memory,
2017+ .masks = iegd_cmn_masks,
2018+ .agp_enable = iegd_cmn_agp_enable,
2019+ .cache_flush = global_cache_flush,
2020+ .create_gatt_table = iegd_nap_create_gatt_table,
2021+ .free_gatt_table = iegd_cmn_free_gatt_table,
2022+ .insert_memory = iegd_cmn_insert_entries,
2023+ .remove_memory = iegd_cmn_remove_entries,
2024+ .alloc_by_type = iegd_cmn_alloc_by_type,
2025+ .free_by_type = iegd_cmn_free_by_type,
2026+ .agp_alloc_page = agp_generic_alloc_page,
2027+ .agp_destroy_page = agp_generic_destroy_page,
2028+};
2029+
2030+static int iegd_nap_fetch_size()
2031+{
2032+ struct aper_size_info_fixed *values;
2033+ u32 offset = 0;
2034+ u32 temp2;
2035+ u8 temp;
2036+
2037+#define IQ35_GMCH_MSAC 0x62
2038+#define I915_256MB_ADDRESS_MASK (1<<27)
2039+
2040+ AGN_DEBUG("Enter");
2041+
2042+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
2043+
2044+ switch(private_data.pdev->device) {
2045+ case PCI_DEVICE_ID_Q35:
2046+ case PCI_DEVICE_ID_Q35A2:
2047+ pci_read_config_byte(private_data.pdev,
2048+ IQ35_GMCH_MSAC, &temp);
2049+ switch(temp & 0x3) {
2050+ case 1:
2051+ offset = 2; /* 512MB aperture size */
2052+ break;
2053+ case 2:
2054+ offset = 1; /* 256MB aperture size */
2055+ break;
2056+ case 3:
2057+ offset = 0; /* 128MB aperture size */
2058+ break;
2059+ }
2060+ break;
2061+ case PCI_DEVICE_ID_915GD:
2062+ case PCI_DEVICE_ID_915AL:
2063+ case PCI_DEVICE_ID_945G:
2064+ case PCI_DEVICE_ID_945GM:
2065+ case PCI_DEVICE_ID_945GME:
2066+ pci_read_config_dword(private_data.pdev,
2067+ I915_GMADDR, &temp2);
2068+ if (temp2 & I915_256MB_ADDRESS_MASK) {
2069+ offset = 0; /* 128MB aperture */
2070+ } else {
2071+ offset = 2; /* 256MB aperture */
2072+ }
2073+ break;
2074+ }
2075+
2076+ agp_bridge->previous_size = agp_bridge->current_size =
2077+ (void *)(values + offset);
2078+
2079+ AGN_DEBUG("Exit");
2080+
2081+ return values[offset].size;
2082+}
2083+
2084+static void iegd_nap_tlbflush(struct agp_memory *mem)
2085+{
2086+ AGN_DEBUG("Enter");
2087+ return;
2088+ AGN_DEBUG("Exit");
2089+}
2090+
2091+static void iegd_iq35_init_gtt_entries(void)
2092+{
2093+ u16 gmch_ctrl;
2094+ u32 iegd_scratch, iegd_scratch2;
2095+ int gtt_entries;
2096+ int local = 0;
2097+ int size = 4;
2098+
2099+#define I35_GMCH_GMS_STOLEN_128M (0x8 << 4)
2100+#define I35_GMCH_GMS_STOLEN_256M (0x9 << 4)
2101+#define I35_GMCH_GMS_MASK 0xf0
2102+
2103+ AGN_DEBUG("Enter");
2104+
2105+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
2106+
2107+ switch (gmch_ctrl & I35_GMCH_GMS_MASK) {
2108+ case I855_GMCH_GMS_STOLEN_1M:
2109+ gtt_entries = MB(1) - KB(size);
2110+ break;
2111+ case I855_GMCH_GMS_STOLEN_4M:
2112+ gtt_entries = MB(4) - KB(size);
2113+ break;
2114+ case I855_GMCH_GMS_STOLEN_8M:
2115+ gtt_entries = MB(8) - KB(size);
2116+ break;
2117+ case I855_GMCH_GMS_STOLEN_16M:
2118+ gtt_entries = MB(16) - KB(size);
2119+ break;
2120+ case I855_GMCH_GMS_STOLEN_32M:
2121+ gtt_entries = MB(32) - KB(size);
2122+ break;
2123+ case I915_GMCH_GMS_STOLEN_48M:
2124+ gtt_entries = MB(48) - KB(size);
2125+ break;
2126+ case I915_GMCH_GMS_STOLEN_64M:
2127+ gtt_entries = MB(64) - KB(size);
2128+ break;
2129+ case I35_GMCH_GMS_STOLEN_128M:
2130+ gtt_entries = MB(128) - KB(size);
2131+ break;
2132+ case I35_GMCH_GMS_STOLEN_256M:
2133+ gtt_entries = MB(256) - KB(size);
2134+ break;
2135+ default:
2136+ gtt_entries = 0;
2137+ break;
2138+ }
2139+
2140+ iegd_scratch = readl(private_data.registers + 0x71410);
2141+
2142+ /* FIXME: check for the pci card as primary */
2143+ if(iegd_scratch == 0) {
2144+ gtt_entries = 0;
2145+ } else if (((iegd_scratch>>16) == 0xE1DF) && (iegd_scratch & 0x4)) {
2146+ AGN_LOG("IEGD Firmware Detected");
2147+ /* IEGD firmware found, and Mem Reservation Flag present */
2148+ iegd_scratch2 = readl(private_data.registers + 0x71418);
2149+ gtt_entries = (iegd_scratch2 & 0xFFFF) * 4096;
2150+ }
2151+
2152+ if (gtt_entries > 0) {
2153+ AGN_LOG("Detected %dK %s memory.",
2154+ gtt_entries / KB(1), local ? "local" : "stolen");
2155+ } else {
2156+ AGN_LOG("No pre-allocated video memory detected.");
2157+ }
2158+
2159+ gtt_entries /= KB(4);
2160+
2161+ private_data.gtt_entries = gtt_entries;
2162+
2163+ AGN_DEBUG("Exit");
2164+}
2165+
2166+static void iegd_nap_iq35_gatt()
2167+{
2168+ u32 gtt_mem_size;
2169+ u32 base_stolen_mem;
2170+ u16 gmch_ctrl;
2171+
2172+ AGN_DEBUG("Enter");
2173+
2174+ iegd_iq35_init_gtt_entries();
2175+
2176+ pci_read_config_dword(private_data.pdev,
2177+ IQ35_BASE_STOLEN, &base_stolen_mem);
2178+ base_stolen_mem &= 0xFFF00000;
2179+
2180+ pci_read_config_word(private_data.pdev,
2181+ I830_GMCH_CTRL, &gmch_ctrl);
2182+
2183+ switch(gmch_ctrl & IQ35_GTT_MEM_SIZE) {
2184+ case IQ35_GGMS_1MB:
2185+ gtt_mem_size = MB(1); /* Normal mode */
2186+ break;
2187+ case IQ35_GGMS_2MB:
2188+ gtt_mem_size = MB(2); /* VT mode */
2189+ break;
2190+ default:
2191+ gtt_mem_size = 0;
2192+ }
2193+
2194+ AGN_DEBUG("gtt_mem_size = %uMB", gtt_mem_size);
2195+
2196+ /* Minus based stolen memory to get the base of gtt. This address
2197+ * can also get from register 0xA8 of config space device 0 */
2198+ agp_bridge->gatt_bus_addr = base_stolen_mem - gtt_mem_size;
2199+
2200+ AGN_DEBUG("Exit");
2201+}
2202+
2203+static int iegd_nap_9series(u32 order)
2204+{
2205+ u32 gtt_pgctl_reg;
2206+ u32 gtt_bus_addr;
2207+ u32 gtt_enabled = FALSE;
2208+ u32 iegd_scratch;
2209+
2210+ gtt_pgctl_reg = readl(private_data.registers +
2211+ I810_PGETBL_CTL);
2212+ global_cache_flush();
2213+ gtt_bus_addr = gtt_pgctl_reg & 0xFFFFF000;
2214+ gtt_enabled = gtt_pgctl_reg & I810_PGETBL_ENABLED;
2215+
2216+
2217+ /* we have to call this as early as possible after the MMIO base
2218+ * address is known */
2219+ iegd_cmn_init_gtt_entries();
2220+
2221+ /*
2222+ * If GTT not enabled created our own gtt table from kernel memory
2223+ * and initialize it to scratch page. This in case the VBIOS is
2224+ * not our VBIOS
2225+ */
2226+ iegd_scratch = readl(private_data.registers + 0x71410);
2227+
2228+ if (iegd_scratch == 0) {
2229+ /* PCI as primary device. IEGD VBIOS is not loaded.
2230+ * Need to setup the GTT in stolen memory
2231+ * GTT will located at the bottom of stolen memory.
2232+ * The rest of the memory will use as video memory and map the PTE except
2233+ * the last page, which use as sratch page.
2234+ */
2235+ u32 gtt_end;
2236+ u32 gtt_addr_reg;
2237+ u32 base_stolen_mem;
2238+ u16 gmch_ctrl;
2239+ int aperture_size = 0;
2240+ int total_stolen_pages = 0;
2241+ int total_gtt_entries = 0;
2242+ int num_entries;
2243+ int i;
2244+ u16 j = 0;
2245+ u32 temp2;
2246+ u8 temp;
2247+
2248+ /* read the stolen memory address.
2249+ * use 512 bytes as GTT table, and use the rest table for memory. */
2250+ pci_read_config_dword(private_data.pdev,
2251+ IQ35_BASE_STOLEN, &base_stolen_mem);
2252+ base_stolen_mem &= 0xFFF00000;
2253+
2254+ /* have to determine the stolen memory size.
2255+ * We can't use the private_data.gtt_entries value because the value assume VBIOS is present. */
2256+ pci_read_config_word(private_data.pdev, I830_GMCH_CTRL, &gmch_ctrl);
2257+ gmch_ctrl = (gmch_ctrl >> 4) & 0xf;
2258+ /* Translate the stolen memory size to num of pages available. */
2259+ if (gmch_ctrl == 1) {
2260+ total_stolen_pages = 1024 / 4 ;
2261+ } else if (gmch_ctrl > 1) {
2262+ total_stolen_pages = (2 << (gmch_ctrl - 1)) * (1024 / 4);
2263+ }
2264+
2265+ /* We need to allocate the last page as scratch page. */
2266+ total_stolen_pages = total_stolen_pages - 1;
2267+
2268+ /* Need to program the PGETBL_CTL to enable page table. */
2269+ writel(base_stolen_mem | 1, private_data.registers + I810_PGETBL_CTL);
2270+
2271+#define I810_GTT_ADDR 0x1c
2272+ /* Find and save the address of the MMIO register */
2273+ pci_read_config_dword(private_data.pdev, I810_GTT_ADDR, &gtt_addr_reg);
2274+ private_data.gtt = (volatile u32 *) ioremap(gtt_addr_reg, KB(512));
2275+
2276+ if (!private_data.gtt) {
2277+ AGN_ERROR("ioremap failed to map");
2278+ return (-ENOMEM);
2279+ }
2280+
2281+
2282+ switch(private_data.pdev->device) {
2283+ case PCI_DEVICE_ID_Q35:
2284+ case PCI_DEVICE_ID_Q35A2:
2285+ pci_read_config_byte(private_data.pdev,
2286+ IQ35_GMCH_MSAC, &temp);
2287+ switch(temp & 0x3) {
2288+ case 1:
2289+ aperture_size = 512; /* 512MB aperture size */
2290+ break;
2291+ case 2:
2292+ aperture_size = 256; /* 256MB aperture size */
2293+ break;
2294+ case 3:
2295+ aperture_size = 128; /* 128MB aperture size */
2296+ break;
2297+ }
2298+ break;
2299+ case PCI_DEVICE_ID_915GD:
2300+ case PCI_DEVICE_ID_915AL:
2301+ case PCI_DEVICE_ID_945G:
2302+ case PCI_DEVICE_ID_945GM:
2303+ case PCI_DEVICE_ID_945GME:
2304+ pci_read_config_dword(private_data.pdev,
2305+ I915_GMADDR, &temp2);
2306+ if (temp2 & I915_256MB_ADDRESS_MASK) {
2307+ aperture_size = 128; /* 128MB aperture */
2308+ } else {
2309+ aperture_size = 256; /* 256MB aperture */
2310+ }
2311+ break;
2312+ default: AGN_ERROR("Illegal Device ID");
2313+ break;
2314+ }
2315+ /* Number of GTT entries available based on the aperture size. */
2316+ total_gtt_entries = aperture_size * 1024 / 4;
2317+ /* gtt_end is the last entry of the GTT, and start of video memory. */
2318+ gtt_end = base_stolen_mem + KB(aperture_size);
2319+
2320+ /* This num_entries mean total of PTE can be populate based on the
2321+ * remaining stolen memory size.*/
2322+ num_entries = ((total_stolen_pages * 4) - aperture_size) / 4;
2323+
2324+ /* Have to program the PTE through the GTT ADDRESS.*/
2325+ for (i=0; i < num_entries; i++) {
2326+ writel(((gtt_end + i * KB(4)) | 1), (private_data.gtt + j));
2327+ j+=1;
2328+ }
2329+
2330+ /* I believe this will be the reserved memory refer by GMM.
2331+ * So, have to update the actual PTE has been used.*/
2332+ private_data.gtt_entries = num_entries - 1;
2333+
2334+ /* This num_entries is the remaining GTT table not fill up. Have to
2335+ * populate with scratch page. */
2336+ num_entries = total_gtt_entries - num_entries;
2337+
2338+ for (i=0; i < num_entries; i++)
2339+ {
2340+ writel(agp_bridge->scratch_page, private_data.gtt + j);
2341+ j+=1;
2342+ }
2343+
2344+ agp_bridge->gatt_bus_addr = base_stolen_mem;
2345+ } else {
2346+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
2347+ }
2348+
2349+ agp_bridge->gatt_table = NULL;
2350+
2351+ AGN_DEBUG("Exit");
2352+
2353+ return 0;
2354+}
2355+
2356+
2357+static int AGP_CREATE_GATT(iegd_nap_create_gatt_table)
2358+{
2359+ const u32 i915_gtt_table_order = 6;
2360+ u32 mmio_bus_addr, temp2;
2361+ int ret;
2362+
2363+ AGN_DEBUG("Enter");
2364+
2365+ agp_bridge->gatt_table_real = NULL;
2366+
2367+ /* Find and save the address of the MMIO register */
2368+ pci_read_config_dword(private_data.pdev, I915_MMADDR,
2369+ &mmio_bus_addr);
2370+ mmio_bus_addr &= 0xFFF80000;
2371+
2372+ private_data.registers = (volatile u8 *) ioremap(mmio_bus_addr,
2373+ 128 * 4096);
2374+ if (!private_data.registers) {
2375+ AGN_ERROR("ioremap failed to map mmio");
2376+ return (-ENOMEM);
2377+ }
2378+
2379+ pci_read_config_dword(private_data.pdev, I915_PTEADDR,&temp2);
2380+
2381+ /* FIXME: double check the size of area to map to pci space */
2382+ private_data.gtt = (volatile u32 *)ioremap(temp2, 512 * 1024);
2383+ if (!private_data.gtt) {
2384+ AGN_ERROR("ioremap failed to map gtt");
2385+ return (-ENOMEM);
2386+ }
2387+
2388+ switch(private_data.pdev->device) {
2389+ case PCI_DEVICE_ID_Q35:
2390+ case PCI_DEVICE_ID_Q35A2:
2391+ /* Bearlake B is difference from other chipset, especially
2392+ * when reading gtt based address. Probably future chipset
2393+ * will have same architecture as Bearlake-B and this
2394+ * code can move to common file*/
2395+ iegd_nap_iq35_gatt();
2396+ break;
2397+ case PCI_DEVICE_ID_915GD:
2398+ case PCI_DEVICE_ID_915AL:
2399+ case PCI_DEVICE_ID_945G:
2400+ case PCI_DEVICE_ID_945GM:
2401+ case PCI_DEVICE_ID_945GME:
2402+ if((ret = iegd_nap_9series(i915_gtt_table_order))) {
2403+ return (ret);
2404+ }
2405+ break;
2406+ }
2407+
2408+ agp_bridge->gatt_table = NULL;
2409+
2410+ AGN_DEBUG("Exit");
2411+
2412+ return (0);
2413+}
2414+
2415+static void iegd_nap_cleanup(void)
2416+{
2417+
2418+ AGN_DEBUG("Enter");
2419+ iounmap((void *)private_data.gtt);
2420+ iounmap((void *)private_data.registers);
2421+ AGN_DEBUG("Exit");
2422+}
2423+
2424diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_plb.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_plb.c
2425--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_plb.c 1969-12-31 17:00:00.000000000 -0700
2426+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_plb.c 2009-10-06 10:30:05.000000000 -0700
2427@@ -0,0 +1,945 @@
2428+/* -*- pse-c -*-
2429+ *----------------------------------------------------------------------------
2430+ * Filename: iegd_interface.c
2431+ * $Revision: 1.36 $
2432+ *----------------------------------------------------------------------------
2433+ * Gart and DRM driver for Intel Embedded Graphics Driver
2434+ * Copyright © 2007, Intel Corporation.
2435+ *
2436+ * This program is free software; you can redistribute it and/or modify it
2437+ * under the terms and conditions of the GNU General Public License,
2438+ * version 2, as published by the Free Software Foundation.
2439+ *
2440+ * This program is distributed in the hope it will be useful, but WITHOUT
2441+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2442+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2443+ * more details.
2444+ *
2445+ * You should have received a copy of the GNU General Public License along with
2446+ * this program; if not, write to the Free Software Foundation, Inc.,
2447+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2448+ *
2449+ */
2450+
2451+#include "global.h"
2452+#include "intelpci.h"
2453+#include <linux/pagemap.h>
2454+#include <linux/list.h>
2455+
2456+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
2457+
2458+static int iegd_plb_fetch_size(void);
2459+static void iegd_plb_tlbflush(struct agp_memory *mem);
2460+static int iegd_plb_init_gtt(u32 order);
2461+static int AGP_CREATE_GATT(iegd_plb_create_gatt_table);
2462+static void iegd_plb_cleanup(void);
2463+static struct page *iegd_plb_vm_nopage(struct vm_area_struct *,
2464+ unsigned long, int *);
2465+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2466+static int iegd_plb_vm_fault(struct vm_area_struct *vma,
2467+ struct vm_fault *vmf);
2468+#endif
2469+static void iegd_plb_vm_close(struct vm_area_struct *);
2470+int iegd_plb_insert_entries(struct agp_memory *, off_t, int);
2471+int iegd_plb_remove_entries(struct agp_memory *, off_t, int);
2472+void iegd_plb_free_by_type(struct agp_memory *);
2473+int iegd_plb_configure(void);
2474+
2475+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
2476+#define PLB_DESTROY_PAGES(a,b,c) plb_destroy_pages(a->pages[0],b,c)
2477+#else
2478+#define PLB_DESTROY_PAGES(a,b,c) plb_destroy_pages_by_addr(gart_to_virt(a->memory[0]),b,c)
2479+#endif
2480+
2481+
2482+/* Each structure in this array contains three elements:
2483+ * Size of GTT in KB
2484+ * Number of 32-bit entries that make up the GTT
2485+ * Page "order" -- 2^order == number of contiguous CPU pages
2486+ * required to store the GTT
2487+ */
2488+struct aper_size_info_fixed iegd_plb_sizes[] =
2489+{
2490+ {256, 65536, 6},
2491+};
2492+
2493+bridge_driver_t drv_plb = {
2494+ .owner = THIS_MODULE,
2495+ .size_type = FIXED_APER_SIZE,
2496+ .aperture_sizes = iegd_plb_sizes,
2497+ .num_aperture_sizes = 1,
2498+ .needs_scratch_page = TRUE,
2499+ .configure = iegd_plb_configure,
2500+ .fetch_size = iegd_plb_fetch_size,
2501+ .cleanup = iegd_plb_cleanup,
2502+ .tlb_flush = iegd_plb_tlbflush,
2503+ .mask_memory = iegd_cmn_mask_memory,
2504+ .masks = iegd_cmn_masks,
2505+ .agp_enable = iegd_cmn_agp_enable,
2506+ .cache_flush = global_cache_flush,
2507+ .create_gatt_table = iegd_plb_create_gatt_table,
2508+ .free_gatt_table = iegd_cmn_free_gatt_table,
2509+ .insert_memory = iegd_plb_insert_entries,
2510+ .remove_memory = iegd_plb_remove_entries,
2511+ .alloc_by_type = iegd_cmn_alloc_by_type,
2512+ .free_by_type = iegd_plb_free_by_type,
2513+ .agp_alloc_page = agp_generic_alloc_page,
2514+ .agp_destroy_page = agp_generic_destroy_page,
2515+};
2516+
2517+struct vm_operations_struct iegd_plb_vm_ops = {
2518+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2519+ .fault = iegd_plb_vm_fault,
2520+#else
2521+ .nopage = iegd_plb_vm_nopage,
2522+#endif
2523+ .close = iegd_plb_vm_close
2524+};
2525+
2526+static DECLARE_MUTEX(client_sem);
2527+
2528+struct client_list_struct {
2529+ struct list_head list;
2530+ struct vm_area_struct *vma;
2531+ pid_t pid;
2532+};
2533+
2534+static LIST_HEAD(client_list);
2535+
2536+
2537+static int iegd_plb_fetch_size()
2538+{
2539+ struct aper_size_info_fixed *values;
2540+
2541+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
2542+
2543+
2544+ agp_bridge->previous_size = agp_bridge->current_size =
2545+ (void *)(values);
2546+
2547+ return values[0].size;
2548+}
2549+
2550+static void iegd_plb_tlbflush(struct agp_memory *mem)
2551+{
2552+ u32 sgx_mmu;
2553+
2554+ /* Flush TLB */
2555+ sgx_mmu = readl(private_data.registers + 0x40C00);
2556+ sgx_mmu &= 0xFFFFFFE0;
2557+ sgx_mmu |= 0x0C;
2558+ writel(sgx_mmu, private_data.registers + 0x40C00);
2559+
2560+ wmb();
2561+ sgx_mmu = readl(private_data.registers + 0x40C00);
2562+ sgx_mmu &= 0xFFFFFFE0;
2563+ writel(sgx_mmu, private_data.registers + 0x40C00);
2564+
2565+ return;
2566+}
2567+
2568+#define IUS15_GMCH_MSAC 0x62
2569+
2570+static int iegd_plb_init_gtt(u32 order)
2571+{
2572+ u32 gtt_pgctl_reg;
2573+ u32 gtt_bus_addr;
2574+ u32 gtt_enabled = FALSE;
2575+ int num_entries;
2576+ u32 *gtt_table, *dstvirt;
2577+ u32 *sgx_dir, sgx_mmu;
2578+ u32 iegd_scratch, aperphys;
2579+ u8 temp;
2580+ struct page *gtt_table_page;
2581+ int i,j;
2582+ u32 new_order;
2583+
2584+ /* Has the system BIOS only allocateda GTT for 128MB? If
2585+ * so we need to replace it with one sized for 256MB
2586+ */
2587+ pci_read_config_byte(private_data.pdev, IUS15_GMCH_MSAC, &temp);
2588+ if ((temp & 0x03) == 0x03) {
2589+ AGN_DEBUG("Graphics aperture is configured for 128MB");
2590+ AGN_DEBUG("Enabling 256MB split aperture");
2591+ private_data.split_gtt = 1;
2592+ } else {
2593+ private_data.split_gtt = 0;
2594+ }
2595+
2596+ gtt_pgctl_reg = readl(private_data.registers +
2597+ I810_PGETBL_CTL);
2598+ global_cache_flush();
2599+ gtt_bus_addr = gtt_pgctl_reg & 0xFFFFF000;
2600+ gtt_enabled = gtt_pgctl_reg & I810_PGETBL_ENABLED;
2601+
2602+ /* we have to call this as early as possible after the MMIO base
2603+ * address is known */
2604+ iegd_cmn_init_gtt_entries();
2605+
2606+ /* Update the scratch registers to say that we have no stolen memory */
2607+ iegd_scratch = readl(private_data.registers + 0x71410);
2608+ if ((iegd_scratch & 0xE1DF0000) == 0xE1DF0000) {
2609+ /* if our vBios modify only the stolen memory bit */
2610+ iegd_scratch |= 0x00000004;
2611+ writel(iegd_scratch, private_data.registers + 0x71410);
2612+ } else {
2613+ /* Not our vBIOS but set the stolen memory anyway */
2614+ writel(0xE1DF0004, private_data.registers + 0x71410);
2615+ }
2616+
2617+ /* Reportthat we have 0 stolen memory regardless of what was
2618+ * really in there. We _want_ to insert fresh pages on top of
2619+ * stolen memory. */
2620+ writel(0, private_data.registers + 0x71418);
2621+
2622+ num_entries = (1 << order) * KB(1);
2623+
2624+ private_data.upper_gtt=NULL;
2625+
2626+ /*
2627+ * If GTT not enabled created our own gtt table from kernel memory
2628+ * and initialize it to scratch page. This in case the VBIOS is
2629+ * not our VBIOS
2630+ */
2631+ if (!gtt_enabled) {
2632+ gtt_table = (u32 *)__get_free_pages(GFP_KERNEL, order);
2633+
2634+ /* Make sure allocation was successful */
2635+ if (NULL == gtt_table) {
2636+ AGN_ERROR("Failed to allocate kernel pages");
2637+ return (-ENOMEM);
2638+ }
2639+
2640+ for (i=0; i < (1 << order); i++) {
2641+ dstvirt = gtt_table + (PAGE_SIZE * i);
2642+ gtt_table_page = virt_to_page(dstvirt);
2643+ AGN_DEBUG("Setting reserved bit on %p", gtt_table_page);
2644+ set_bit(PG_reserved, &gtt_table_page->flags);
2645+ }
2646+
2647+ private_data.upper_gtt = gtt_table + 0x8000;
2648+ agp_bridge->gatt_bus_addr = virt_to_phys(gtt_table);
2649+
2650+ for (i = 0; i < num_entries; i++) {
2651+ gtt_table[i] = (unsigned long) agp_bridge->scratch_page;
2652+ }
2653+
2654+ /* Enable the newly created GTT */
2655+ AGN_DEBUG("Enabling new GTT");
2656+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED,
2657+ private_data.registers+I810_PGETBL_CTL);
2658+ readl(private_data.registers+I810_PGETBL_CTL);
2659+
2660+ } else if (private_data.split_gtt) {
2661+ /* We're keeping the system BIOS created normal gtt but
2662+ * augmenting it with more entries
2663+ */
2664+ gtt_table = (u32 *)__get_free_pages(GFP_KERNEL, order - 1);
2665+
2666+ //AGN_DEBUG("Allocated secondary GTT at %p:%p (virt:phys)", gtt_table,
2667+ // virt_to_phys(gtt_table));
2668+
2669+ /* Make sure allocation was successful */
2670+ if (NULL == gtt_table) {
2671+ AGN_ERROR("Failed to allocate kernel pages");
2672+ return (-ENOMEM);
2673+ }
2674+
2675+ private_data.upper_gtt = gtt_table;
2676+
2677+ for (i = 0; i < num_entries/2; i++) {
2678+ gtt_table[i] = (unsigned long) agp_bridge->scratch_page;
2679+ }
2680+
2681+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
2682+
2683+ } else {
2684+
2685+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
2686+
2687+ }
2688+
2689+ /*
2690+ * Now that the GTT exists and has been configured, enable
2691+ * the SGX MMU to point to the GTT as its page tables
2692+ */
2693+
2694+ /* The directory level is a single page of memory */
2695+ sgx_dir = (u32 *)__get_free_pages(GFP_KERNEL, 0);
2696+ if (NULL == sgx_dir ) {
2697+ AGN_ERROR("Failed to allocate kernel page");
2698+ return (-ENOMEM);
2699+ }
2700+
2701+ /* Mark the directory so that it is not swappable */
2702+ gtt_table_page = virt_to_page( sgx_dir );
2703+ set_bit(PG_reserved, &gtt_table_page->flags);
2704+
2705+ memset (sgx_dir, 0, PAGE_SIZE);
2706+
2707+ /* Initialize the directory so that each used page table
2708+ * is addressed
2709+ */
2710+
2711+ /* Make sure entire SGX directory is populated */
2712+ for (i = 0; i < 0x400; i++) {
2713+ sgx_dir[i] = agp_bridge->gatt_bus_addr | 0x01;
2714+ }
2715+
2716+ pci_read_config_dword(private_data.pdev, I915_GMADDR, &aperphys);
2717+ aperphys &= PCI_BASE_ADDRESS_MEM_MASK;
2718+ aperphys = aperphys >> 22;
2719+
2720+ if (private_data.split_gtt) {
2721+ /* Only use half of the entries */
2722+ new_order = order-1;
2723+ } else {
2724+ /* Full GTT, use all entries */
2725+ new_order = order;
2726+ }
2727+
2728+ for (i = 0; i < (1 << new_order); i++) {
2729+ /* Set the address for 2D/3D*/
2730+ sgx_dir[i] = agp_bridge->gatt_bus_addr + (PAGE_SIZE * i);
2731+ /* Set the address for hostport */
2732+ sgx_dir[i+aperphys] = agp_bridge->gatt_bus_addr + (PAGE_SIZE * i);
2733+
2734+ /* Mark them as valid */
2735+ sgx_dir[i] |= 0x01;
2736+ sgx_dir[i+aperphys] |= 0x01;
2737+
2738+ //AGN_DEBUG("Directory %d is %08lx", i, sgx_dir[i]);
2739+ }
2740+
2741+ /* If we're in split gtt mode, set the directory entries of the second
2742+ * gtt
2743+ */
2744+
2745+ if (private_data.split_gtt) {
2746+ j=0;
2747+ for (i = (1 << (order - 1)); i < (1 << order); i++) {
2748+ /* Set the address for 2D/3D*/
2749+ sgx_dir[i] = virt_to_phys(private_data.upper_gtt) + (PAGE_SIZE * j);
2750+ /* Set the address for hostport */
2751+ sgx_dir[i+aperphys] = virt_to_phys(private_data.upper_gtt) + (PAGE_SIZE * j);
2752+
2753+ j++;
2754+
2755+ /* Mark them as valid */
2756+ sgx_dir[i] |= 0x01;
2757+ sgx_dir[i+aperphys] |= 0x01;
2758+ //AGN_DEBUG("Directory %d is %08lx", i, sgx_dir[i]);
2759+ }
2760+ }
2761+
2762+ /*
2763+ * Program the directory's address into the MMU control
2764+ * register
2765+ */
2766+
2767+ /* Flush the cache */
2768+ flush_cache_all();
2769+ global_cache_flush();
2770+
2771+ /* Invalidate directory cache */
2772+ sgx_mmu = readl(private_data.registers + 0x40C00);
2773+ sgx_mmu |= 0x1E;
2774+ writel(sgx_mmu, private_data.registers + 0x40C00);
2775+ wmb();
2776+ readl(private_data.registers + 0x40C00);
2777+
2778+ writel(virt_to_phys(sgx_dir), private_data.registers + 0x40C84);
2779+ wmb();
2780+ readl(private_data.registers + 0x40C84);
2781+
2782+ /* Turn on host access to aperture via the MMU */
2783+ sgx_mmu = readl(private_data.registers + 0x40C00);
2784+ sgx_mmu &= 0xFFFE0000;
2785+ writel(sgx_mmu, private_data.registers + 0x40C00);
2786+ wmb();
2787+ readl(private_data.registers + 0x40C00);
2788+
2789+ return 0;
2790+}
2791+
2792+
2793+static int AGP_CREATE_GATT(iegd_plb_create_gatt_table)
2794+{
2795+ u32 order;
2796+ u32 mmio_bus_addr, temp2;
2797+ int ret;
2798+ u32 gtt_size;
2799+ unsigned char msac;
2800+ u32 msac_gtt_size;
2801+
2802+ agp_bridge->gatt_table_real = NULL;
2803+
2804+ order=A_SIZE_FIX(agp_bridge->current_size)->page_order;
2805+
2806+ /* Find and save the address of the MMIO register */
2807+ pci_read_config_dword(private_data.pdev, I915_MMADDR,
2808+ &mmio_bus_addr);
2809+ mmio_bus_addr &= 0xFFF80000;
2810+
2811+ private_data.registers = (volatile u8 *) ioremap(mmio_bus_addr,
2812+ KB(512));
2813+
2814+ if (!private_data.registers) {
2815+ AGN_ERROR("ioremap failed to map mmio");
2816+ return (-ENOMEM);
2817+ }
2818+
2819+ pci_read_config_dword(private_data.pdev, I915_PTEADDR, &temp2);
2820+
2821+ /* Get the GTT size via MSAC */
2822+ pci_read_config_byte(private_data.pdev, IUS15_GMCH_MSAC, &msac);
2823+
2824+ switch (msac & 0x03) {
2825+ case 0x02: /* 256K GTT size */
2826+ msac_gtt_size = KB(256);
2827+ break;
2828+ case 0x03: /* 128K GTT size */
2829+ default:
2830+ msac_gtt_size = KB(128);
2831+ break;
2832+ }
2833+
2834+ gtt_size = A_SIZE_FIX(agp_bridge->current_size)->num_entries * sizeof(u32);
2835+
2836+ if (gtt_size!=msac_gtt_size) {
2837+ AGN_DEBUG("MSAC GTT size 0x%08x, bridge GTT size 0x%08x; using MSAC",
2838+ msac_gtt_size, gtt_size);
2839+ gtt_size = msac_gtt_size;
2840+ }
2841+
2842+ private_data.gtt = (volatile u32 *)ioremap(temp2, gtt_size);
2843+
2844+ if (!private_data.gtt) {
2845+ AGN_ERROR("ioremap failed to map gtt");
2846+ return (-ENOMEM);
2847+ }
2848+
2849+ if((ret = iegd_plb_init_gtt(order))) {
2850+ return (ret);
2851+ }
2852+
2853+ agp_bridge->gatt_table = NULL;
2854+
2855+ return (0);
2856+}
2857+
2858+static void iegd_plb_cleanup(void)
2859+{
2860+
2861+ iounmap((void *)private_data.gtt);
2862+ iounmap((void *)private_data.registers);
2863+}
2864+
2865+
2866+static void iegd_plb_vm_close(struct vm_area_struct *vma)
2867+{
2868+ struct list_head *tmp;
2869+ struct client_list_struct *entry;
2870+
2871+ down(&client_sem);
2872+ list_for_each(tmp, &client_list) {
2873+ entry = list_entry(tmp, struct client_list_struct, list);
2874+ if (entry->vma == vma) {
2875+ list_del(&entry->list);
2876+ kfree(entry);
2877+ AGN_DEBUG("Removed VMA %p from client list", vma);
2878+ break;
2879+ }
2880+ }
2881+ up(&client_sem);
2882+}
2883+
2884+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2885+static int iegd_plb_vm_fault(struct vm_area_struct *vma,
2886+ struct vm_fault *vmf)
2887+{
2888+ int type=0; /* New fault handler doesn't use type */
2889+ unsigned long address = (unsigned long) vmf->virtual_address;
2890+
2891+ vmf->page = iegd_plb_vm_nopage(vma, address, &type);
2892+
2893+ return 0;
2894+}
2895+#endif
2896+
2897+static struct page *iegd_plb_vm_nopage(struct vm_area_struct *vma,
2898+ unsigned long address,
2899+ int *type)
2900+{
2901+ unsigned long offset=0;
2902+ unsigned long physaddr=0;
2903+ struct page *page;
2904+ struct list_head *tmp;
2905+ struct client_list_struct *entry;
2906+ int flag=0;
2907+
2908+ /* On the Intel SCH US15, we don't have a traditional aperture. As
2909+ * a result, we're substituting the base of stolen memory
2910+ * as the aperture address.
2911+ *
2912+ * Mmaps relative to the base of stolen memory will be
2913+ * treated as mmaps covering parts of our virtual aperture.
2914+ *
2915+ * Given that a single surface may be mapped, and not the
2916+ * whole virtual aperture, we must translate the values
2917+ * received so that they are relative to our 0-based virtual
2918+ * aperture.
2919+ */
2920+ offset = (vma->vm_pgoff << PAGE_SHIFT) - agp_bridge->gart_bus_addr;
2921+
2922+ /* All pages returned must be noncached or write-combined*/
2923+ if (agp_use_pat()) {
2924+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
2925+ pgprot_val(vma->vm_page_prot) |= _PAGE_PAT;
2926+ } else {
2927+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2928+ }
2929+
2930+ /* Access to the virtual frame buffer does not appear to
2931+ * call open properly before faulting. As a result, we
2932+ * need to do this housekeeping at each fault.
2933+ */
2934+ down(&client_sem);
2935+ list_for_each(tmp, &client_list) {
2936+ entry = list_entry(tmp, struct client_list_struct, list);
2937+ if (entry->vma == vma) {
2938+ flag=1;
2939+ }
2940+ }
2941+
2942+ if (!flag) {
2943+ entry = kmalloc(sizeof(struct client_list_struct), GFP_KERNEL);
2944+ if (entry) {
2945+ entry->vma = vma;
2946+ list_add(&(entry->list), &client_list);
2947+ AGN_DEBUG("Added VMA %p to client list", vma);
2948+
2949+ AGN_DEBUG("Scratch: %p", virt_to_page(agp_bridge->scratch_page));
2950+
2951+ } else {
2952+ AGN_ERROR("Failed to add VMA to client list");
2953+ }
2954+ }
2955+ up(&client_sem);
2956+
2957+ offset += address - vma->vm_start;
2958+
2959+ if (private_data.split_gtt && ((offset >> PAGE_SHIFT)) >= 0x8000) {
2960+ physaddr = readl(private_data.upper_gtt + (offset >> PAGE_SHIFT)
2961+ - 0x8000);
2962+ } else {
2963+ physaddr = readl(private_data.gtt + (offset >> PAGE_SHIFT));
2964+ }
2965+
2966+
2967+ physaddr &= PAGE_MASK;
2968+
2969+ if (!pfn_valid(physaddr >> PAGE_SHIFT)) {
2970+ AGN_ERROR("Referencing non-existant struct page.\n");
2971+ }
2972+
2973+ if (physaddr >= agp_bridge->gart_bus_addr) {
2974+ AGN_DEBUG("Faulted before insert, returning scratch page");
2975+ page = virt_to_page(__va(agp_bridge->scratch_page));
2976+ } else {
2977+ page = virt_to_page(__va(physaddr));
2978+ }
2979+
2980+ get_page(page);
2981+
2982+ if (type) {
2983+ *type = VM_FAULT_MINOR;
2984+ }
2985+
2986+ return (page);
2987+}
2988+
2989+
2990+int iegd_plb_insert_entries(struct agp_memory *mem,
2991+ off_t pg_start, int type)
2992+{
2993+ int i,j,num_entries, zap;
2994+ void *temp;
2995+ struct list_head *tmp;
2996+ struct client_list_struct *entry;
2997+ unsigned long addr_start=0;
2998+ unsigned long addr_end=0;
2999+ unsigned long addr_offset=0;
3000+ unsigned long vaddr;
3001+ char *srcvirt;
3002+ unsigned long srcphys;
3003+ unsigned long dstphys;
3004+ pgd_t *pgd;
3005+ pud_t *pud;
3006+ pmd_t *pmd;
3007+ pte_t *pte;
3008+
3009+ temp = agp_bridge->current_size;
3010+ num_entries = A_SIZE_FIX(temp)->num_entries;
3011+
3012+ /* If we try to write beyond gtt table, return error */
3013+ if ((pg_start + mem->page_count) > num_entries) {
3014+ AGN_ERROR("Trying to write beyond aperture limit");
3015+ AGN_DEBUG("pg_start=0x%.8lx, mem->page_count=%d,"
3016+ "num_entries=%d", pg_start, mem->page_count,
3017+ num_entries);
3018+ return -EINVAL;
3019+ }
3020+
3021+ /* The i830 can't check the GTT for entries since its read only,
3022+ * depend on the caller to make the correct offset decisions.
3023+ */
3024+
3025+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
3026+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) {
3027+ AGN_ERROR("Unsupported memory type");
3028+ AGN_DEBUG("mem->type=%x, type=%x", mem->type, type);
3029+ return -EINVAL;
3030+ }
3031+
3032+ global_cache_flush();
3033+ agp_bridge->driver->tlb_flush(mem);
3034+
3035+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
3036+
3037+ /* If we're inserting into stolen memory, we need to read
3038+ * the contents of the original page that occupied this space
3039+ */
3040+ if (j < private_data.gtt_entries) {
3041+ srcphys=readl(private_data.gtt+j);
3042+ srcphys &= PAGE_MASK;
3043+
3044+ if (srcphys >= agp_bridge->gart_bus_addr) {
3045+ srcvirt=ioremap(srcphys, PAGE_SIZE);
3046+
3047+ if (!srcvirt) {
3048+ AGN_ERROR("Could not map stolen memory source %d:%08lX", j, srcphys);
3049+ return -ENOMEM;
3050+ }
3051+
3052+ dstphys=AGP_MASK_GTT();
3053+ dstphys &= PAGE_MASK;
3054+
3055+ copy_page(__va(dstphys), srcvirt);
3056+
3057+ iounmap(srcvirt);
3058+ } else {
3059+ AGN_ERROR ("Tried to copy a page not in stolen memory %d:%08lX", j, srcphys);
3060+ }
3061+ }
3062+
3063+ if (private_data.split_gtt && (j >= 0x8000)) {
3064+ writel(AGP_MASK_GTT(), private_data.upper_gtt + j - 0x8000);
3065+ } else {
3066+ writel(AGP_MASK_GTT(), private_data.gtt+j);
3067+ readl(private_data.gtt+j); /* PCI Posting. */
3068+ }
3069+
3070+ down(&client_sem);
3071+ list_for_each(tmp, &client_list) {
3072+ entry = list_entry(tmp, struct client_list_struct, list);
3073+
3074+ /* We need to handle invalidating VMA's that are only mapping
3075+ * a portion of the virtual aperture. Calculate what if
3076+ * any invalidated pages need to be zapped
3077+ */
3078+ addr_start = (entry->vma->vm_pgoff << PAGE_SHIFT)
3079+ - agp_bridge->gart_bus_addr;
3080+ addr_end = addr_start + (entry->vma->vm_end - entry->vma->vm_start);
3081+ addr_offset = j << PAGE_SHIFT;
3082+
3083+ vaddr = entry->vma->vm_start + (addr_offset - addr_start);
3084+
3085+ zap=0;
3086+ pgd=NULL;
3087+ pud=NULL;
3088+ pmd=NULL;
3089+ pte=NULL;
3090+
3091+ pgd = pgd_offset(entry->vma->vm_mm, vaddr);
3092+ if (!pgd_none(*pgd)) {
3093+ pud = pud_offset(pgd, vaddr);
3094+ if (!pud_none(*pud)) {
3095+ pmd = pmd_offset(pud, vaddr);
3096+ if (!pmd_none(*pmd)) {
3097+ pte = pte_offset_map(pmd, vaddr);
3098+ if (!pte_none(*pte)) {
3099+ zap=1;
3100+ }
3101+ }
3102+ }
3103+ }
3104+
3105+ /* Only zap a page if it falls within the mapped region
3106+ * and it has previously faulted
3107+ */
3108+ if (zap && (addr_offset >= addr_start) &&
3109+ (addr_offset < addr_end)) {
3110+
3111+ if (!page_mapcount(pte_page(*pte))) {
3112+ AGN_ERROR("ERROR No mapcount");
3113+ AGN_DEBUG("ZI %p %08lX %d %d %p", pte_page(*pte),
3114+ pte_page(*pte)->flags, page_count(pte_page(*pte)),
3115+ page_mapcount(pte_page(*pte)), pte_page(*pte)->mapping);
3116+ } else {
3117+ atomic_add_negative(-1, &pte_page(*pte)->_mapcount);
3118+ put_page(pte_page(*pte));
3119+ dec_mm_counter(entry->vma->vm_mm, file_rss);
3120+ }
3121+
3122+ pte_clear(entry->vma->vm_mm, vaddr, pte);
3123+ }
3124+
3125+ if(pte) {
3126+ pte_unmap(pte);
3127+ }
3128+ }
3129+ up(&client_sem);
3130+ }
3131+
3132+ global_cache_flush();
3133+ agp_bridge->driver->tlb_flush(mem);
3134+
3135+ AGN_DEBUG("Exit");
3136+ return 0;
3137+}
3138+
3139+
3140+int iegd_plb_remove_entries(struct agp_memory *mem,
3141+ off_t pg_start, int type)
3142+{
3143+ int i, zap;
3144+ struct list_head *tmp;
3145+ struct client_list_struct *entry;
3146+ unsigned long physaddr;
3147+ unsigned long addr_start=0;
3148+ unsigned long addr_end=0;
3149+ unsigned long addr_offset=0;
3150+ unsigned long vaddr;
3151+ pgd_t *pgd;
3152+ pud_t *pud;
3153+ pmd_t *pmd;
3154+ pte_t *pte;
3155+
3156+ global_cache_flush();
3157+ agp_bridge->driver->tlb_flush(mem);
3158+
3159+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
3160+ if (i < private_data.gtt_entries) {
3161+ physaddr = agp_bridge->gart_bus_addr + (i * PAGE_SIZE);
3162+ physaddr |= 0x01;
3163+ writel(physaddr, private_data.gtt+i);
3164+ readl(private_data.gtt+i); /* PCI Posting. */
3165+ } else {
3166+ if (private_data.split_gtt && (i >= 0x8000)) {
3167+ writel(agp_bridge->scratch_page, private_data.upper_gtt + i - 0x8000);
3168+ } else {
3169+ writel(agp_bridge->scratch_page, private_data.gtt+i);
3170+ readl(private_data.gtt+i); /* PCI Posting. */
3171+ }
3172+ }
3173+
3174+ down(&client_sem);
3175+ list_for_each(tmp, &client_list) {
3176+ entry = list_entry(tmp, struct client_list_struct, list);
3177+
3178+ /* We need to handle invalidating VMA's that are only mapping
3179+ * a portion of the virtual aperture. Calculate what if
3180+ * any invalidated pages need to be zapped
3181+ */
3182+ addr_start = (entry->vma->vm_pgoff << PAGE_SHIFT)
3183+ - agp_bridge->gart_bus_addr;
3184+ addr_end = addr_start + (entry->vma->vm_end - entry->vma->vm_start);
3185+ addr_offset = i << PAGE_SHIFT;
3186+
3187+ vaddr = entry->vma->vm_start + (addr_offset - addr_start);
3188+
3189+ zap=0;
3190+ pgd=NULL;
3191+ pud=NULL;
3192+ pmd=NULL;
3193+ pte=NULL;
3194+
3195+ /* Look up page table entries for all VMAs that currently
3196+ * have the virtual aperture mapped -- to see if the page
3197+ * has ever faulted
3198+ */
3199+ pgd = pgd_offset(entry->vma->vm_mm, vaddr);
3200+ if (!pgd_none(*pgd)) {
3201+ pud = pud_offset(pgd, vaddr);
3202+ if (!pud_none(*pud)) {
3203+ pmd = pmd_offset(pud, vaddr);
3204+ if (!pmd_none(*pmd)) {
3205+ pte = pte_offset_map(pmd, vaddr);
3206+ if (!pte_none(*pte)) {
3207+ zap=1;
3208+ }
3209+ }
3210+ }
3211+ }
3212+
3213+ /* Only zap a page if it falls within the mapped region
3214+ * and it has previously faulted
3215+ */
3216+ if (zap && (addr_offset >= addr_start) &&
3217+ (addr_offset < addr_end)) {
3218+
3219+
3220+ if (!page_mapcount(pte_page(*pte))) {
3221+ AGN_ERROR("ERROR No mapcount");
3222+ AGN_DEBUG("ZR %p %08lX %d %d %p", pte_page(*pte),
3223+ pte_page(*pte)->flags, page_count(pte_page(*pte)),
3224+ page_mapcount(pte_page(*pte)), pte_page(*pte)->mapping);
3225+ } else {
3226+ atomic_add_negative(-1, &pte_page(*pte)->_mapcount);
3227+ put_page(pte_page(*pte));
3228+ dec_mm_counter(entry->vma->vm_mm, file_rss);
3229+ }
3230+
3231+ pte_clear(entry->vma->vm_mm, vaddr, pte);
3232+ }
3233+
3234+ if(pte) {
3235+ pte_unmap(pte);
3236+ }
3237+ }
3238+ up(&client_sem);
3239+ }
3240+
3241+ global_cache_flush();
3242+ agp_bridge->driver->tlb_flush(mem);
3243+
3244+ return 0;
3245+}
3246+
3247+
3248+int iegd_plb_configure(void)
3249+{
3250+ struct aper_size_info_fixed *current_size;
3251+ u32 temp;
3252+ u16 gmch_ctrl;
3253+ int i;
3254+
3255+ current_size = A_SIZE_FIX(agp_bridge->current_size);
3256+
3257+ /* SCH US15 uses the Base of Stolen Memory as it's artificial
3258+ * aperture address
3259+ */
3260+ pci_read_config_dword(private_data.pdev, 0x5C, &temp);
3261+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
3262+
3263+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
3264+ gmch_ctrl |= I830_GMCH_ENABLED;
3265+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
3266+
3267+ global_cache_flush();
3268+ agp_bridge->driver->tlb_flush(0);
3269+
3270+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED,
3271+ private_data.registers+I810_PGETBL_CTL);
3272+ /* PCI Posting. */
3273+ readl(private_data.registers+I810_PGETBL_CTL);
3274+
3275+ if (agp_bridge->driver->needs_scratch_page) {
3276+
3277+ for (i = private_data.gtt_entries; i < current_size->num_entries; i++) {
3278+ if ((private_data.split_gtt) && (i >= 0x8000)) {
3279+ writel(agp_bridge->scratch_page, private_data.upper_gtt+i-0x8000);
3280+ readl(private_data.upper_gtt+i-0x8000);
3281+ } else {
3282+ writel(agp_bridge->scratch_page, private_data.gtt+i);
3283+ readl(private_data.gtt+i); /* PCI Posting. */
3284+ }
3285+ }
3286+ }
3287+
3288+ global_cache_flush();
3289+
3290+ return 0;
3291+}
3292+
3293+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
3294+static void plb_destroy_pages_by_addr(void *addr, size_t pg_count, unsigned int order)
3295+{
3296+ struct page *page;
3297+
3298+ AGN_DEBUG("Enter");
3299+
3300+ if (addr == NULL) {
3301+ return;
3302+ }
3303+
3304+ page = virt_to_page(addr);
3305+ SET_PAGES_WB(page, pg_count);
3306+ GLOBAL_FLUSH_TLB();
3307+ put_page(page);
3308+ AGP_UNLOCK_PAGE(page);
3309+
3310+ if(page_count(page) > 1) {
3311+ free_pages((unsigned long)addr, order);
3312+ }
3313+
3314+ atomic_dec(&agp_bridge->current_memory_agp);
3315+
3316+ AGN_DEBUG("Exit");
3317+}
3318+#else
3319+static void plb_destroy_pages(struct page *page, size_t pg_count, unsigned int order)
3320+{
3321+ //AGN_LOG("IN plb_destroy_pages");
3322+ AGN_DEBUG("Enter");
3323+
3324+ if (page == NULL) {
3325+ return;
3326+ }
3327+
3328+ SET_PAGES_WB(page, pg_count);
3329+ GLOBAL_FLUSH_TLB();
3330+ put_page(page);
3331+ AGP_UNLOCK_PAGE(page);
3332+
3333+ if(page_count(page) > 1) {
3334+ __free_pages(page, order);
3335+ }
3336+
3337+ atomic_dec(&agp_bridge->current_memory_agp);
3338+
3339+ AGN_DEBUG("Exit");
3340+}
3341+#endif
3342+
3343+void iegd_plb_free_by_type(struct agp_memory *curr)
3344+{
3345+ unsigned int order;
3346+
3347+ switch (curr->page_count) {
3348+ case 1:
3349+ order = 0; /* pg_count = 1 => 2 ^ 0 */
3350+ break;
3351+ case 4:
3352+ order = 2; /* pg_count = 4 => 2 ^ 2 */
3353+ break;
3354+ case 8:
3355+ order = 3; /* pg_count = 8 => 2 ^ 3 */
3356+ break;
3357+ default:
3358+ /* This case should never happen */
3359+ return;
3360+ }
3361+
3362+ agp_free_key(curr->key);
3363+ if(curr->type == AGP_PHYS_MEMORY) {
3364+ PLB_DESTROY_PAGES(curr, curr->page_count, order);
3365+ IGD_FREE_MEM(curr);
3366+ }
3367+
3368+ kfree(curr);
3369+
3370+}
3371+
3372+#endif
3373diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/global.c patch_script_temp/drivers/gpu/drm/iegd/agp/global.c
3374--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/global.c 1969-12-31 17:00:00.000000000 -0700
3375+++ patch_script_temp/drivers/gpu/drm/iegd/agp/global.c 2009-10-06 10:30:05.000000000 -0700
3376@@ -0,0 +1,142 @@
3377+/* -*- pse-c -*-
3378+ *----------------------------------------------------------------------------
3379+ * Filename: global.c
3380+ * $Revision: 1.17 $
3381+ *----------------------------------------------------------------------------
3382+ * Gart and DRM driver for Intel Embedded Graphics Driver
3383+ * Copyright © 2008, Intel Corporation.
3384+ *
3385+ * This program is free software; you can redistribute it and/or modify it
3386+ * under the terms and conditions of the GNU General Public License,
3387+ * version 2, as published by the Free Software Foundation.
3388+ *
3389+ * This program is distributed in the hope it will be useful, but WITHOUT
3390+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3391+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3392+ * more details.
3393+ *
3394+ * You should have received a copy of the GNU General Public License along with
3395+ * this program; if not, write to the Free Software Foundation, Inc.,
3396+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3397+ *
3398+ */
3399+
3400+#include "global.h"
3401+#include "intelpci.h"
3402+
3403+/* will point to the current table entries for
3404+ * current chipset */
3405+gart_dispatch_t *gart_id;
3406+
3407+/* Private data that contained chipset information */
3408+dev_private_data_t private_data;
3409+
3410+int iegd_find_device(u16 device)
3411+{
3412+ struct pci_dev *device_pdev;
3413+
3414+ device_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
3415+ /* Check for function 0. */
3416+ if(device_pdev && PCI_FUNC(device_pdev->devfn) != 0) {
3417+ device_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
3418+ device, device_pdev);
3419+ }
3420+
3421+ if(!device_pdev) {
3422+ return 0;
3423+ }
3424+
3425+ AGN_DEBUG("Device found = 0x%x\n", device);
3426+ private_data.pdev = device_pdev;
3427+ return 1;
3428+
3429+}
3430+
3431+/**
3432+ * This function is to hook the function pointer that
3433+ * belong to specific chipset, other than that this
3434+ * is the place for customization of the structure
3435+ * in case chipset in the same family have different
3436+ * architecture. Make sure to add new device id here
3437+ * if new device been introduce.
3438+ *
3439+ * parameter:
3440+ * driver_hook - Pointer to hold the structure
3441+ * did - device id
3442+ * list - lookup table for the chipset family
3443+ *
3444+ * return value:
3445+ * 0 - success
3446+ * 1 - No function hook
3447+ */
3448+int bridge_driver_init(bridge_driver_t **driver_hook,
3449+ unsigned short did, dispatch_table_t *list )
3450+{
3451+
3452+ (*driver_hook) = (bridge_driver_t *)dispatch_acquire(
3453+ gart_id->device_id, list);
3454+
3455+ /* For specific chipset implementation assign the pointer
3456+ * here. */
3457+ switch(did) {
3458+ case PCI_DEVICE_ID_GM45:
3459+ case PCI_DEVICE_ID_ELK:
3460+ case PCI_DEVICE_ID_Q45:
3461+ case PCI_DEVICE_ID_G45:
3462+ case PCI_DEVICE_ID_G41:
3463+ (*driver_hook)->aperture_sizes = iegd_igm45_sizes;
3464+ (*driver_hook)->num_aperture_sizes = 2;
3465+ break;
3466+ case PCI_DEVICE_ID_Q35:
3467+ case PCI_DEVICE_ID_Q35A2:
3468+ (*driver_hook)->aperture_sizes = iegd_iq35_sizes;
3469+ (*driver_hook)->num_aperture_sizes = 3;
3470+ break;
3471+ case PCI_DEVICE_ID_915GD:
3472+ case PCI_DEVICE_ID_915AL:
3473+ case PCI_DEVICE_ID_945G:
3474+ case PCI_DEVICE_ID_945GM:
3475+ case PCI_DEVICE_ID_945GME:
3476+ (*driver_hook)->aperture_sizes = iegd_i915_sizes;
3477+ (*driver_hook)->num_aperture_sizes = 4;
3478+ break;
3479+ case PCI_DEVICE_ID_965G:
3480+ case PCI_DEVICE_ID_946GZ:
3481+ case PCI_DEVICE_ID_G965:
3482+ case PCI_DEVICE_ID_Q965:
3483+ case PCI_DEVICE_ID_GM965:
3484+ case PCI_DEVICE_ID_GME965:
3485+ (*driver_hook)->aperture_sizes = iegd_i965_sizes;
3486+ (*driver_hook)->num_aperture_sizes = 4;
3487+ break;
3488+ case PCI_DEVICE_ID_810:
3489+ case PCI_DEVICE_ID_810DC:
3490+ case PCI_DEVICE_ID_810E:
3491+ case PCI_DEVICE_ID_815:
3492+ (*driver_hook)->aperture_sizes = intel_i810_sizes;
3493+ (*driver_hook)->num_aperture_sizes = 2;
3494+ (*driver_hook)->create_gatt_table = agp_generic_create_gatt_table;
3495+ (*driver_hook)->free_gatt_table = agp_generic_free_gatt_table;
3496+ break;
3497+ case PCI_DEVICE_ID_830M:
3498+ case PCI_DEVICE_ID_845G:
3499+ case PCI_DEVICE_ID_855:
3500+ case PCI_DEVICE_ID_865G:
3501+ (*driver_hook)->aperture_sizes = intel_i830_sizes;
3502+ (*driver_hook)->num_aperture_sizes = 4;
3503+ (*driver_hook)->create_gatt_table = iegd_alm_create_gatt_table;
3504+ (*driver_hook)->free_gatt_table = iegd_cmn_free_gatt_table;
3505+ break;
3506+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3507+ case PCI_DEVICE_ID_PLB:
3508+ (*driver_hook)->aperture_sizes = iegd_plb_sizes;
3509+ (*driver_hook)->num_aperture_sizes = 1;
3510+ break;
3511+#endif
3512+ default:
3513+ return -1;
3514+ }
3515+
3516+ return 0;
3517+
3518+}
3519diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/pci.c patch_script_temp/drivers/gpu/drm/iegd/agp/pci.c
3520--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/pci.c 1969-12-31 17:00:00.000000000 -0700
3521+++ patch_script_temp/drivers/gpu/drm/iegd/agp/pci.c 2009-10-06 10:30:05.000000000 -0700
3522@@ -0,0 +1,501 @@
3523+/* -*- pse-c -*-
3524+ *----------------------------------------------------------------------------
3525+ * Filename: pci.c
3526+ * $Revision: 1.31 $
3527+ *----------------------------------------------------------------------------
3528+ * Gart and DRM driver for Intel Embedded Graphics Driver
3529+ * Copyright © 2008, Intel Corporation.
3530+ *
3531+ * This program is free software; you can redistribute it and/or modify it
3532+ * under the terms and conditions of the GNU General Public License,
3533+ * version 2, as published by the Free Software Foundation.
3534+ *
3535+ * This program is distributed in the hope it will be useful, but WITHOUT
3536+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3537+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3538+ * more details.
3539+ *
3540+ * You should have received a copy of the GNU General Public License along with
3541+ * this program; if not, write to the Free Software Foundation, Inc.,
3542+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3543+ *
3544+ */
3545+
3546+#include <linux/init.h>
3547+#include <linux/module.h>
3548+#include <linux/pci.h>
3549+#include <linux/agp_backend.h>
3550+#include "agp.h"
3551+#include "global.h"
3552+#include "igd_gart.h"
3553+#include "intelpci.h"
3554+#include "igd_abs.h"
3555+
3556+static gart_dispatch_t gart_pci_device_table[] = {
3557+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_810, PCI_DEVICE_ID_810,
3558+ "810", 0, 0, 0,
3559+ },
3560+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_810DC, PCI_DEVICE_ID_810DC,
3561+ "810DC", 0, 0, 0,
3562+ },
3563+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_810E, PCI_DEVICE_ID_810E,
3564+ "810E", 0, 0, 0,
3565+ },
3566+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_815, PCI_DEVICE_ID_815,
3567+ "815", 0, 0, 0,
3568+ },
3569+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_830M, PCI_DEVICE_ID_830M,
3570+ "830M", 0, 0, 0,
3571+ },
3572+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_835, PCI_DEVICE_ID_835,
3573+ "835", 0, 0, 0,
3574+ },
3575+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_845G, PCI_DEVICE_ID_845G,
3576+ "845G", 0, 0, 0,
3577+ },
3578+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_855, PCI_DEVICE_ID_855,
3579+ "855", 0, 0, 0,
3580+ },
3581+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_865G, PCI_DEVICE_ID_865G,
3582+ "865G", 0, 0, 0,
3583+ },
3584+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_915GD, PCI_DEVICE_ID_915GD,
3585+ "915GD", 0, 0, 0,
3586+ },
3587+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_910GL, PCI_DEVICE_ID_910GL,
3588+ "910GL", 0, 0, 0,
3589+ },
3590+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_915AL, PCI_DEVICE_ID_915AL,
3591+ "915AL", 0, 0, 0,
3592+ },
3593+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_945G, PCI_DEVICE_ID_945G,
3594+ "945G", 0, 0, 0,
3595+ },
3596+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_945GM, PCI_DEVICE_ID_945GM,
3597+ "945GM", 0, 0, 0,
3598+ },
3599+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_945GME, PCI_DEVICE_ID_945GME,
3600+ "945GME/GSE", 0, 0, 0,
3601+ },
3602+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q35, PCI_DEVICE_ID_Q35,
3603+ "Q33/Q35", 0, 0, 0,
3604+ },
3605+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q35A2, PCI_DEVICE_ID_Q35A2,
3606+ "Q33/Q35", 0, 0, 0,
3607+ },
3608+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_965G, PCI_DEVICE_ID_965G,
3609+ "965G", 0, 0, 0,
3610+ },
3611+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_946GZ, PCI_DEVICE_ID_946GZ,
3612+ "946GZ", 0, 0, 0,
3613+ },
3614+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_G965, PCI_DEVICE_ID_G965,
3615+ "G965", 0, 0, 0,
3616+ },
3617+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q965, PCI_DEVICE_ID_Q965,
3618+ "Q965", 0, 0, 0,
3619+ },
3620+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_GM965, PCI_DEVICE_ID_GM965,
3621+ "GM965", 0, 0, 0,
3622+ },
3623+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_GME965, PCI_DEVICE_ID_GME965,
3624+ "GLE960/GME965", 0, 0, 0,
3625+ },
3626+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3627+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_PLB, PCI_DEVICE_ID_PLB,
3628+ "US15", 0, 0, 0,
3629+ },
3630+#endif
3631+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_GM45, PCI_DEVICE_ID_GM45,
3632+ "GM45/GS45/GL40", 0, 0, 0,
3633+ },
3634+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_ELK, PCI_DEVICE_ID_ELK,
3635+ "Q45", 0, 0, 0,
3636+ },
3637+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q45, PCI_DEVICE_ID_Q45,
3638+ "Q45", 0, 0, 0,
3639+ },
3640+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_G45, PCI_DEVICE_ID_G45,
3641+ "G45", 0, 0, 0,
3642+ },
3643+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_G41, PCI_DEVICE_ID_G41,
3644+ "G41", 0, 0, 0,
3645+ },
3646+};
3647+
3648+/* PCI device id that supported by IEGD */
3649+struct pci_device_id iegd_pci_table[] = {
3650+ ID(PCI_DEVICE_ID_BRIDGE_810),
3651+ ID(PCI_DEVICE_ID_BRIDGE_810DC),
3652+ ID(PCI_DEVICE_ID_BRIDGE_810E),
3653+ ID(PCI_DEVICE_ID_BRIDGE_815),
3654+ ID(PCI_DEVICE_ID_BRIDGE_830M),
3655+ ID(PCI_DEVICE_ID_BRIDGE_845G),
3656+ ID(PCI_DEVICE_ID_BRIDGE_855),
3657+ ID(PCI_DEVICE_ID_BRIDGE_865G),
3658+ ID(PCI_DEVICE_ID_BRIDGE_915GD),
3659+ ID(PCI_DEVICE_ID_BRIDGE_915AL),
3660+ ID(PCI_DEVICE_ID_BRIDGE_945G),
3661+ ID(PCI_DEVICE_ID_BRIDGE_945GM),
3662+ ID(PCI_DEVICE_ID_BRIDGE_945GME),
3663+ ID(PCI_DEVICE_ID_BRIDGE_965G),
3664+ ID(PCI_DEVICE_ID_BRIDGE_946GZ),
3665+ ID(PCI_DEVICE_ID_BRIDGE_G965),
3666+ ID(PCI_DEVICE_ID_BRIDGE_Q965),
3667+ ID(PCI_DEVICE_ID_BRIDGE_GM965),
3668+ ID(PCI_DEVICE_ID_BRIDGE_GME965),
3669+ ID(PCI_DEVICE_ID_BRIDGE_Q35),
3670+ ID(PCI_DEVICE_ID_BRIDGE_Q35A2),
3671+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3672+ ID(PCI_DEVICE_ID_BRIDGE_PLB),
3673+#endif
3674+ ID(PCI_DEVICE_ID_BRIDGE_GM45),
3675+ ID(PCI_DEVICE_ID_BRIDGE_ELK),
3676+ ID(PCI_DEVICE_ID_BRIDGE_Q45),
3677+ ID(PCI_DEVICE_ID_BRIDGE_G45),
3678+ ID(PCI_DEVICE_ID_BRIDGE_G41),
3679+ { }
3680+};
3681+
3682+MODULE_DEVICE_TABLE(pci, iegd_pci_table);
3683+
3684+#include <asm/tlbflush.h>
3685+
3686+static int agp_has_pat = 0;
3687+
3688+int agp_use_pat(void)
3689+{
3690+ return agp_has_pat;
3691+}
3692+EXPORT_SYMBOL(agp_use_pat);
3693+
3694+static void agp_pat_ipi_handler(void *notused)
3695+{
3696+ u32 v1, v2;
3697+
3698+ rdmsr(MSR_IA32_CR_PAT, v1, v2);
3699+ v2 &= 0xFFFFFFF8;
3700+ v2 |= 0x00000001;
3701+ wbinvd();
3702+ wrmsr(MSR_IA32_CR_PAT, v1, v2);
3703+ __flush_tlb_all();
3704+}
3705+
3706+/*
3707+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
3708+ */
3709+
3710+void agp_init_pat(void)
3711+{
3712+
3713+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
3714+ AGN_ERROR("PAT Feature not available\n");
3715+ return;
3716+ }
3717+ AGN_DEBUG("Enabled PAT");
3718+ if (ON_EACH_CPU(agp_pat_ipi_handler, NULL, 1, 1) != 0) {
3719+ AGN_ERROR("Timed out setting up CPU PAT.\n");
3720+ return;
3721+ }
3722+ agp_has_pat = 1;
3723+}
3724+EXPORT_SYMBOL(agp_init_pat);
3725+
3726+
3727+/* This function get called by PCI core when one of the chipset
3728+ * above detected */
3729+static int __devinit iegd_intel_probe(
3730+ struct pci_dev *pdev,
3731+ const struct pci_device_id *ent)
3732+{
3733+ bridge_data_t *bridge_device;
3734+ u8 cap_ptr = 0;
3735+ struct resource *r;
3736+ int ret;
3737+
3738+ AGN_DEBUG("Enter");
3739+ AGN_LOG("Initialize IEGD agpgart and drm");
3740+
3741+ /* Make sure this probing is called because of prefered
3742+ * chipsets. This is because to make sure we initialize
3743+ * chipset that belong to deregister gart module */
3744+ if(!gart_id->bridge_pdev ||
3745+ (gart_id->bridge_pdev->device != pdev->device)) {
3746+ return -ENODEV;
3747+ }
3748+
3749+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
3750+
3751+ /* Allocate memory for the bridge. This data structure then will be
3752+ * used by the agp backend and frontend */
3753+ gart_id->bridge_info = agp_alloc_bridge();
3754+ if(gart_id->bridge_info == NULL) {
3755+ return -ENOMEM;
3756+ }
3757+
3758+ /* Check for the device and initialize private data */
3759+ if(!iegd_find_device(gart_id->device_id)) {
3760+ agp_put_bridge(gart_id->bridge_info);
3761+ AGN_ERROR("Unsupported device: %x", gart_id->device_id);
3762+ return -ENODEV;
3763+ }
3764+
3765+ bridge_device = gart_id->bridge_info;
3766+
3767+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3768+ if (gart_id->device_id == PCI_DEVICE_ID_PLB) {
3769+ agp_init_pat();
3770+ bridge_device->vm_ops = &iegd_plb_vm_ops;
3771+ }
3772+#endif
3773+
3774+ AGN_DEBUG("driver %p, id %X, list %p", bridge_device->driver,
3775+ gart_id->device_id, driver_dispatch_list);
3776+ /* Dispatch the core function based on the chipset id */
3777+ ret = bridge_driver_init((bridge_driver_t **) &bridge_device->driver,
3778+ gart_id->device_id, driver_dispatch_list);
3779+
3780+ if(ret) {
3781+ agp_put_bridge(gart_id->bridge_info);
3782+ AGN_ERROR("Device found but no function hook");
3783+ return -ENODEV;
3784+ }
3785+
3786+ bridge_device->dev = pdev;
3787+ bridge_device->capndx = cap_ptr;
3788+ bridge_device->dev_private_data = &private_data;
3789+
3790+ AGN_LOG("Intel %s chipset detected", gart_id->name);
3791+
3792+ r = &pdev->resource[0];
3793+ if (!r->start && r->end) {
3794+ if (pci_assign_resource(pdev, 0)) {
3795+ AGN_ERROR("could not assign resource 0");
3796+ agp_put_bridge(gart_id->bridge_info);
3797+ return -ENODEV;
3798+ }
3799+ }
3800+
3801+ if(pci_enable_device(pdev)) {
3802+ AGN_ERROR("Unable to enable PCI device");
3803+ agp_put_bridge(gart_id->bridge_info);
3804+ return -ENODEV;
3805+ }
3806+
3807+ if(cap_ptr) {
3808+ pci_read_config_dword(pdev,
3809+ bridge_device->capndx+PCI_AGP_STATUS,
3810+ &bridge_device->mode);
3811+ }
3812+
3813+ pci_set_drvdata(pdev, bridge_device);
3814+ AGN_DEBUG("Exit");
3815+ return agp_add_bridge(bridge_device);
3816+}
3817+
3818+static void iegd_intel_remove(struct pci_dev *pdev)
3819+{
3820+ AGN_LOG("Exit from module");
3821+}
3822+
3823+int iegd_intel_suspend(struct pci_dev *dev, pm_message_t state)
3824+{
3825+ int pm_cap;
3826+ struct pci_dev *iegd_dev;
3827+ unsigned short pci_pm_csr;
3828+
3829+ AGN_DEBUG("Enter");
3830+
3831+ if (!(IGD_IS_SUSPEND(state))) {
3832+ AGN_DEBUG("Unsupported PM event %d", state.event);
3833+ return -EINVAL;
3834+ }
3835+
3836+ iegd_dev = private_data.pdev;
3837+
3838+ /* Save our resources */
3839+ IGD_PCI_SAVE_STATE(iegd_dev, private_data.pm_save);
3840+
3841+ /* Find the PM CSR */
3842+ pm_cap = pci_find_capability(iegd_dev, PCI_CAP_ID_PM);
3843+
3844+ if (!pm_cap) {
3845+ AGN_DEBUG("No PCI PM capability record.. Exit");
3846+ return 0;
3847+ }
3848+
3849+ /* Power down the device */
3850+ pci_read_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, &pci_pm_csr);
3851+ pci_pm_csr |= PCI_PM_CTRL_STATE_MASK;
3852+ pci_write_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, pci_pm_csr);
3853+
3854+ AGN_DEBUG("Suspended.. Exit");
3855+ return 0;
3856+}
3857+
3858+int iegd_intel_resume(struct pci_dev *dev)
3859+{
3860+ int pm_cap;
3861+ struct pci_dev *iegd_dev;
3862+ unsigned short pci_pm_csr;
3863+
3864+ AGN_DEBUG("Enter");
3865+
3866+ iegd_dev = private_data.pdev;
3867+
3868+ /* Get back our resources */
3869+ IGD_PCI_RESTORE_STATE(iegd_dev, private_data.pm_save);
3870+
3871+ /* Find the PM CSR */
3872+ pm_cap = pci_find_capability(iegd_dev, PCI_CAP_ID_PM);
3873+
3874+ if (!pm_cap) {
3875+ AGN_DEBUG("No PCI PM capability record.. Exit");
3876+ return 0;
3877+ }
3878+
3879+ /* Power on device */
3880+ pci_read_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, &pci_pm_csr);
3881+ pci_pm_csr &= ~PCI_PM_CTRL_STATE_MASK;
3882+ pci_write_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, pci_pm_csr);
3883+
3884+ AGN_DEBUG("Resumed.. Exit");
3885+ return 0;
3886+}
3887+
3888+static struct pci_driver iegd_pci_driver = {
3889+ .name = "iegd-intel",
3890+ .id_table = iegd_pci_table,
3891+ .probe = iegd_intel_probe,
3892+ .remove = __devexit_p(iegd_intel_remove),
3893+ .suspend = iegd_intel_suspend,
3894+ .resume = iegd_intel_resume,
3895+};
3896+
3897+struct pci_dev *iegd_probe_device()
3898+{
3899+ int i;
3900+ struct pci_dev *dev;
3901+
3902+ AGN_DEBUG("Enter");
3903+
3904+ /* Probed for the supported devices */
3905+ for(i=0 ; i<sizeof(gart_pci_device_table)/sizeof(gart_dispatch_t);
3906+ i++) {
3907+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
3908+ (unsigned int)gart_pci_device_table[i].bridge_id, NULL);
3909+ if(dev){
3910+ gart_id = &gart_pci_device_table[i];
3911+ AGN_DEBUG("Device found.. Exit");
3912+ return dev;
3913+ }
3914+ }
3915+ AGN_DEBUG("Device not found.. Exit");
3916+ return NULL;
3917+}
3918+
3919+/**
3920+ * This is the first routine been called by the init function.
3921+ * This function will probe for devices that supported by IEGD.
3922+ * Once it found the device, it will check whether driver for
3923+ * this device exist. If it exist, get the pci_driver structure
3924+ * for the existing driver and call the pci unregister fucntion
3925+ * to deregister existing driver and register iegd agpgart
3926+ */
3927+static int iegd_agp_init(void)
3928+{
3929+ struct pci_driver *curr_driver;
3930+ struct pci_dev *temp_pdev;
3931+
3932+ AGN_DEBUG("Enter");
3933+
3934+ /* Probe for the intel embedded graphic device chipset */
3935+ temp_pdev = iegd_probe_device();
3936+
3937+ if(!temp_pdev) {
3938+ AGN_ERROR("Probe device failed");
3939+ return -ENODEV;
3940+ }
3941+
3942+ gart_id->bridge_pdev = temp_pdev;
3943+ curr_driver = pci_dev_driver(gart_id->bridge_pdev);
3944+
3945+ if(curr_driver) {
3946+ /* FIXME: Don't know whether we have to make separate copy of this
3947+ * structure */
3948+ gart_id->old_gart = curr_driver;
3949+
3950+ /* deregister pci driver from pci core. This is needed since we
3951+ * don't want 2 agpgart reside in the kernel that respond to the
3952+ * same device id */
3953+ AGN_LOG("Unregister agpgart name %s", curr_driver->name);
3954+ pci_unregister_driver(curr_driver);
3955+ }
3956+
3957+ AGN_LOG("Registering iegd gart module");
3958+ /* Register our own to pci core */
3959+ AGN_DEBUG("Exit");
3960+ return pci_register_driver(&iegd_pci_driver);
3961+
3962+}
3963+
3964+static void iegd_restore_device(void)
3965+{
3966+ int ret;
3967+
3968+ AGN_DEBUG("Enter");
3969+
3970+ /* Decrement the reference for this pci device */
3971+ pci_dev_put(gart_id->bridge_pdev);
3972+
3973+ if(gart_id->old_gart) {
3974+ /* Register the original driver */
3975+ ret = pci_register_driver(gart_id->old_gart);
3976+ }
3977+
3978+ AGN_DEBUG("Exit");
3979+
3980+}
3981+
3982+static int iegd_gart_init(void)
3983+{
3984+ int ret;
3985+
3986+ AGN_DEBUG("Enter");
3987+
3988+ /* Find bridge based on chipset supported by IEGD */
3989+ ret = iegd_agp_init();
3990+ if(AGP_RET(ret)) {
3991+ AGN_LOG("Registering iegd drm module");
3992+ /* Initialize DRM module by calling DRM init function */
3993+ return DRM_INIT_MODULE();
3994+ } else {
3995+ /* Log the driver failed to register */
3996+ AGN_LOG("Driver registration failed");
3997+ }
3998+
3999+ AGN_DEBUG("Exit");
4000+
4001+ /* Return agp error if agp init failed */
4002+ return ret;
4003+}
4004+
4005+static void iegd_gart_exit(void)
4006+{
4007+ /* Unregister DRM module */
4008+ AGN_DEBUG("Unregister iegd DRM module");
4009+ DRM_EXIT_MODULE();
4010+
4011+ /* Remove our device from the kernel */
4012+ AGN_DEBUG("Unregister IKM module");
4013+ pci_unregister_driver(&iegd_pci_driver);
4014+
4015+ /* Restore back the old agp gart */
4016+ AGN_DEBUG("Register original module");
4017+ iegd_restore_device();
4018+}
4019+
4020+MODULE_LICENSE("GPL and additional rights");
4021+
4022+module_init(iegd_gart_init);
4023+module_exit(iegd_gart_exit);
4024diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drm_test.c patch_script_temp/drivers/gpu/drm/iegd/drm/drm_test.c
4025--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drm_test.c 1969-12-31 17:00:00.000000000 -0700
4026+++ patch_script_temp/drivers/gpu/drm/iegd/drm/drm_test.c 2009-10-06 10:30:05.000000000 -0700
4027@@ -0,0 +1,288 @@
4028+/* -*- pse-c -*-
4029+ *----------------------------------------------------------------------------
4030+ * Filename: drm_test.c
4031+ * $Revision: 1.3 $
4032+ *----------------------------------------------------------------------------
4033+ * Unit level test for IEGD DRM
4034+ * Copyright © 2009 Intel Corporation.
4035+ *
4036+ * This program is free software; you can redistribute it and/or modify it
4037+ * under the terms and conditions of the GNU General Public License,
4038+ * version 2, as published by the Free Software Foundation.
4039+ *
4040+ * This program is distributed in the hope it will be useful, but WITHOUT
4041+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4042+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4043+ * more details.
4044+ *
4045+ * You should have received a copy of the GNU General Public License along with
4046+ * this program; if not, write to the Free Software Foundation, Inc.,
4047+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4048+ *
4049+ */
4050+
4051+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4052+ *
4053+ * Redistribution and use in source and binary forms, with or without
4054+ * modification, are permitted provided that the following conditions are met:
4055+ * Redistributions of source code must retain the above copyright notice,
4056+ * this list of conditions and the following disclaimer.
4057+ *
4058+ * Redistributions in binary form must reproduce the above copyright
4059+ * notice, this list of conditions and the following disclaimer in the
4060+ * documentation and/or other materials provided with the distribution.
4061+ *
4062+ * Neither the name Intel Corporation nor the names of its contributors
4063+ * may be used to endorse or promote products derived from this software
4064+ * without specific prior written permission.
4065+ *
4066+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4067+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4068+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4069+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4070+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4071+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4072+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4073+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4074+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4075+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4076+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4077+ *
4078+ */
4079+
4080+#include <fcntl.h>
4081+#include <unistd.h>
4082+#include <sys/ioctl.h>
4083+#include <stdlib.h>
4084+#include <stdio.h>
4085+#include <string.h>
4086+
4087+#include "iegd.h"
4088+#include "iegd_drm_client.h"
4089+
4090+#define DRM_IOCTL_BASE 'd'
4091+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4092+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4093+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
4094+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
4095+
4096+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4097+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4098+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4099+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4100+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4101+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4102+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4103+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4104+
4105+#define PAGE_SIZE 4096
4106+
4107+#define VERBOSE "-v"
4108+
4109+int main(int argc, char *argv[])
4110+{
4111+ int file_desc, ret_value, i;
4112+ unsigned long *virt_ptr;
4113+ int failed=0;
4114+
4115+ /* Check for verbose mode */
4116+ int index;
4117+ int verbose = 0;
4118+
4119+ for (index = 1; index < argc; index++)
4120+ {
4121+ if(strcmp(argv[index], VERBOSE) == 0)
4122+ {
4123+ verbose = 1;
4124+ printf("Verbose mode.\n");
4125+ }
4126+ }
4127+
4128+ if (verbose)
4129+ {
4130+ printf("Starting client\n");
4131+ }
4132+ /* Open the drm */
4133+ file_desc=open("/dev/dri/card0",O_RDWR);
4134+
4135+ if(file_desc<0){
4136+ /* In case of a different /dev tree struct.
4137+ * try /dev/card0
4138+ */
4139+ file_desc=open("/dev/card0",O_RDWR);
4140+ }
4141+
4142+ if(file_desc<0){
4143+ printf("Can't open device file:%s\n",DRIVER_DESC);
4144+ printf("Check for root level permissions.");
4145+ printf("Reinstall IKM.\n");
4146+ exit(-1);
4147+ }
4148+
4149+ if (verbose)
4150+ {
4151+ printf("Open device file:%d\n",file_desc);
4152+ /* This the ioctl that allocates physical memory */
4153+ printf("Testing ioctl for memory allocation\n");
4154+ }
4155+
4156+ drm_intel_getpages_t getpages;
4157+ /* set the number of bytes we want the drm to allocate */
4158+ getpages.size=(PAGE_SIZE- 1000);
4159+
4160+ ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
4161+ if (verbose)
4162+ {
4163+ printf("IOCTL call memory allocation test:");
4164+ }
4165+ if(ret_value<0){
4166+ printf("DRM module failed memory allocation test.\n");
4167+ printf("Reinstall IKM.\n");
4168+ exit(-1);
4169+ }
4170+ if (verbose)
4171+ {
4172+ printf(" Success\n");
4173+ printf("size: %d,phy_address: %#x,virt_address: %#x,offset: %#x\n",
4174+ getpages.size, getpages.phy_address, getpages.virt_address,
4175+ getpages.offset);
4176+
4177+ /* test for memory access */
4178+
4179+ printf("Testing ioctl for memory access\n");
4180+ }
4181+
4182+ virt_ptr=(unsigned long *)getpages.virt_address;
4183+
4184+ /* input 0..10 into subsequent memory */
4185+
4186+ for(i=0;i<=11;i++){
4187+ *virt_ptr=i;
4188+ virt_ptr++;
4189+ }
4190+
4191+ /*read from subsequent memory */
4192+ virt_ptr=(unsigned long *)getpages.virt_address;
4193+ for(i=0;i<=11;i++){
4194+ if (verbose)
4195+ {
4196+ printf("virt_ptr @ %#x,value: %d\n",virt_ptr,*virt_ptr);
4197+ }
4198+ if(*virt_ptr!=i){
4199+ printf("Failed memory read.\n");
4200+ }
4201+ virt_ptr++;
4202+ }
4203+ if (verbose)
4204+ {
4205+ printf("IOCTL call memory access test:");
4206+ }
4207+ if(failed){
4208+ printf("DRM module failed memory access test.\n");
4209+ printf("Reinstall IKM.\n");
4210+ exit(-1);
4211+ }
4212+ if (verbose)
4213+ {
4214+ printf(" Success\n");
4215+ }
4216+ /* freeing memory */
4217+
4218+ drm_intel_freepages_t freepages;
4219+ freepages.size=getpages.size;
4220+ freepages.phy_address=getpages.phy_address;
4221+ freepages.virt_address=getpages.virt_address;
4222+ if (verbose)
4223+ {
4224+ printf("Freeing phy_address:%#x,size:%#x\n",
4225+ freepages.phy_address,freepages.size);
4226+
4227+ printf("Testing ioctl call for info init\n");
4228+ }
4229+ /* init the drm info structure in the drm and test its value */
4230+ intel_drm_info_t info;
4231+ intel_drm_info_t test_info;
4232+ info.device_id=0x456;
4233+ info.revision=333;
4234+ info.video_memory_offset=0x10245;
4235+ info.video_memory_size=987;
4236+ info.hw_status_offset=0x444;
4237+ if (verbose)
4238+ {
4239+
4240+ printf("Testing init info device_id: %#x,revision: %d,offset: %#x,"
4241+ " size: %d, hw_status_offset: %lx\n", info.device_id, info.revision,
4242+ info.video_memory_offset, info.video_memory_size,
4243+ info.hw_status_offset);
4244+ }
4245+
4246+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
4247+
4248+ if (verbose)
4249+ {
4250+ printf("Alternative data to init\n");
4251+ }
4252+ /* init the drm info structure in the drm and test its value */
4253+ info.device_id=0x123;
4254+ info.revision=456;
4255+ info.video_memory_offset=0x789;
4256+ info.video_memory_size=111;
4257+ info.hw_status_offset=0x555;
4258+
4259+ if (verbose)
4260+ {
4261+ printf("Testing init 2nd info device_id: %#x,revision: %d,offset: %#x,"
4262+ " size: %d, hw_status_offset: %lx\n", info.device_id, info.revision,
4263+ info.video_memory_offset, info.video_memory_size,
4264+ info.hw_status_offset);
4265+
4266+ printf("Get init info\n");
4267+ }
4268+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_GET,&test_info);
4269+
4270+ if (verbose)
4271+ {
4272+ printf("IOCTL call for info init:");
4273+ printf("Got init info device_id: %#x,revision: %d,"
4274+ "offset: %#x,size:%d, hw_status_offset: %lx\n", test_info.device_id,
4275+ test_info.revision, test_info.video_memory_offset,
4276+ test_info.video_memory_size, test_info.hw_status_offset);
4277+ }
4278+
4279+ /* compare with original data to see if its still the same */
4280+ info.device_id=0x456;
4281+ info.revision=333;
4282+ info.video_memory_offset=0x10245;
4283+ info.video_memory_size=987;
4284+ info.hw_status_offset=0x444;
4285+ failed=0;
4286+
4287+ if(info.device_id!=test_info.device_id){
4288+ failed=1;
4289+ }
4290+ if(info.revision!=test_info.revision){
4291+ failed=1;
4292+ }
4293+ if(info.video_memory_offset!=test_info.video_memory_offset){
4294+ failed=1;
4295+ }
4296+ if(info.video_memory_size!=test_info.video_memory_size){
4297+ failed=1;
4298+ }
4299+ if(info.hw_status_offset!=test_info.hw_status_offset){
4300+ failed=1;
4301+ }
4302+
4303+ if(failed){
4304+ printf("DRM module failed IOCTL info did not match.\n");
4305+ printf("Reinstall IKM.");
4306+ exit(-1);
4307+ }
4308+
4309+ printf("DRM successfully loaded\n");
4310+
4311+ close(file_desc);
4312+
4313+ return 0;
4314+
4315+}
4316diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drmult.c patch_script_temp/drivers/gpu/drm/iegd/drm/drmult.c
4317--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drmult.c 1969-12-31 17:00:00.000000000 -0700
4318+++ patch_script_temp/drivers/gpu/drm/iegd/drm/drmult.c 2009-10-06 10:30:05.000000000 -0700
4319@@ -0,0 +1,270 @@
4320+/* -*- pse-c -*-
4321+ *----------------------------------------------------------------------------
4322+ * Filename: drmult.c
4323+ * $Revision: 1.7 $
4324+ *----------------------------------------------------------------------------
4325+ * Unit level test for IEGD DRM
4326+ * Copyright © 2008, Intel Corporation.
4327+ *
4328+ * This program is free software; you can redistribute it and/or modify it
4329+ * under the terms and conditions of the GNU General Public License,
4330+ * version 2, as published by the Free Software Foundation.
4331+ *
4332+ * This program is distributed in the hope it will be useful, but WITHOUT
4333+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4334+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4335+ * more details.
4336+ *
4337+ * You should have received a copy of the GNU General Public License along with
4338+ * this program; if not, write to the Free Software Foundation, Inc.,
4339+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4340+ *
4341+ */
4342+
4343+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4344+ *
4345+ * Redistribution and use in source and binary forms, with or without
4346+ * modification, are permitted provided that the following conditions are met:
4347+ * Redistributions of source code must retain the above copyright notice,
4348+ * this list of conditions and the following disclaimer.
4349+ *
4350+ * Redistributions in binary form must reproduce the above copyright
4351+ * notice, this list of conditions and the following disclaimer in the
4352+ * documentation and/or other materials provided with the distribution.
4353+ *
4354+ * Neither the name Intel Corporation nor the names of its contributors
4355+ * may be used to endorse or promote products derived from this software
4356+ * without specific prior written permission.
4357+ *
4358+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4359+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4360+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4361+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4362+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4363+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4364+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4365+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4366+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4367+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4368+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4369+ *
4370+ */
4371+
4372+/*client to test the ioctl
4373+ * make sure you change the permission bits in intel.h to 0,0
4374+ * before you start using this
4375+ */
4376+
4377+#include "iegd.h"
4378+
4379+#include <fcntl.h>
4380+#include <unistd.h>
4381+#include <sys/ioctl.h>
4382+#include <stdlib.h>
4383+#include <stdio.h>
4384+
4385+#if 0
4386+#define DRM_IOCTL_BASE 'd'
4387+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4388+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4389+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
4390+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
4391+#endif
4392+
4393+#include "iegd_drm_client.h"
4394+
4395+#if 0
4396+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4397+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4398+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4399+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4400+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4401+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4402+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4403+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4404+#endif
4405+
4406+
4407+#define PAGE_SIZE 4096
4408+int main()
4409+{
4410+int file_desc, ret_value;
4411+printf("Starting client\n");
4412+/* Open the drm */
4413+file_desc=open("/dev/dri/card0",O_RDWR);
4414+
4415+if(file_desc<0){
4416+/* Suse has a different /dev tree struct.
4417+ * try /dev/card0
4418+ */
4419+
4420+file_desc=open("/dev/card0",O_RDWR);
4421+
4422+}
4423+
4424+if(file_desc<0){
4425+ printf("Can't open device file:%s\n",DRIVER_DESC);
4426+ exit(-1);
4427+}
4428+
4429+printf("Open device file:%d\n",file_desc);
4430+
4431+
4432+/* Test interrupt IOCTL */
4433+interrupt_info_t irq_info;
4434+
4435+irq_info.req_status = 0;
4436+irq_info.req_type = READ_INT; /* CLEAR_INT, WAIT_INT */
4437+irq_info.in[0] = 0xffffffff;
4438+irq_info.in[1] = 0xffffffff;
4439+irq_info.in[2] = 0xffffffff;
4440+irq_info.in[3] = 0xffffffff;
4441+irq_info.in[4] = 0xa5a5a5a5;
4442+irq_info.in[5] = 0xdeadbeef;
4443+
4444+ret_value = ioctl(file_desc, DRM_IOCTL_INTEL_INTERRUPT, &irq_info);
4445+printf("ULT IOCTL call read interrupt tests: %d\n\n", irq_info.req_status);
4446+
4447+irq_info.req_status = 0;
4448+irq_info.req_type = WAIT_INT;
4449+irq_info.in[0] = 0xffffffff;
4450+irq_info.in[1] = 0xffffffff;
4451+irq_info.in[2] = 0xffffffff;
4452+irq_info.in[3] = 0xffffffff;
4453+
4454+ret_value = ioctl(file_desc, DRM_IOCTL_INTEL_INTERRUPT, &irq_info);
4455+printf("ULT IOCTL call wait interrupt tests: %d\n\n", irq_info.req_status);
4456+
4457+
4458+
4459+/* This the ioctl that allocates physical memory */
4460+printf("ULT: Testing ioctl for memory allocation\n");
4461+
4462+drm_intel_getpages_t getpages;
4463+/* set the number of bytes we want the drm to allocate */
4464+getpages.size=(PAGE_SIZE- 1000);
4465+
4466+ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
4467+printf("ULT IOCTL call memory allocation test:");
4468+if(ret_value<0){
4469+ printf(" Failed\n");
4470+ exit(-1);
4471+}
4472+printf(" Success\n");
4473+printf("size%d,phy_address:%#x,virt_address:%#x,offset:%#x\n",getpages.size,getpages.phy_address,getpages.virt_address,getpages.offset);
4474+
4475+/* test for memory access */
4476+
4477+printf("ULT: Testing ioctl for memory access\n");
4478+int i;
4479+unsigned long *virt_ptr;
4480+
4481+virt_ptr=(unsigned long *)getpages.virt_address;
4482+
4483+/* input 0..10 into subsequent memory */
4484+
4485+for(i=0;i<=11;i++){
4486+*virt_ptr=i;
4487+virt_ptr++;
4488+
4489+}
4490+
4491+/*read from subsequent memory */
4492+int failed=0;
4493+virt_ptr=(unsigned long *)getpages.virt_address;
4494+for(i=0;i<=11;i++){
4495+ printf("virt_ptr@%#x,value:%d\n",virt_ptr,*virt_ptr);
4496+ if(*virt_ptr!=i){
4497+ printf("Test failed!\n");
4498+ }
4499+virt_ptr++;
4500+}
4501+printf("ULT IOCTL call memory access test:");
4502+if(failed){
4503+ printf(" Failed\n");
4504+ exit(-1);
4505+}
4506+
4507+ printf(" Success\n");
4508+/* freeing memory */
4509+
4510+drm_intel_freepages_t freepages;
4511+freepages.size=getpages.size;
4512+freepages.phy_address=getpages.phy_address;
4513+freepages.virt_address=getpages.virt_address;
4514+printf("Freeing phy_address:%#x,size:%#x\n",freepages.phy_address,freepages.size);
4515+
4516+/* init the drm info structure in the drm and test its value */
4517+
4518+printf("ULT: Testing ioctl call for info init\n");
4519+ intel_drm_info_t info;
4520+ intel_drm_info_t test_info;
4521+ info.device_id=0x456;
4522+ info.revision=333;
4523+ info.video_memory_offset=0x10245;
4524+ info.video_memory_size=987;
4525+ info.hw_status_offset=0x444;
4526+
4527+ printf("Testing init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
4528+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
4529+
4530+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
4531+
4532+/* init the drm info structure in the drm and test its value */
4533+printf("Alternative data to init\n");
4534+ info.device_id=0x123;
4535+ info.revision=456;
4536+ info.video_memory_offset=0x789;
4537+ info.video_memory_size=111;
4538+ info.hw_status_offset=0x555;
4539+
4540+ printf("Testing init 2nd info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
4541+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
4542+
4543+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
4544+
4545+
4546+printf("Get init info\n");
4547+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_GET,&test_info);
4548+
4549+ printf("Got init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
4550+ test_info.device_id,test_info.revision,test_info.video_memory_offset,test_info.video_memory_size,test_info.hw_status_offset);
4551+/* compare with original data to see if its still the same */
4552+info.device_id=0x456;
4553+info.revision=333;
4554+info.video_memory_offset=0x10245;
4555+info.video_memory_size=987;
4556+info.hw_status_offset=0x444;
4557+failed=0;
4558+
4559+if(info.device_id!=test_info.device_id){
4560+ failed=1;
4561+}
4562+if(info.revision!=test_info.revision){
4563+ failed=1;
4564+}
4565+if(info.video_memory_offset!=test_info.video_memory_offset){
4566+ failed=1;
4567+}
4568+if(info.video_memory_size!=test_info.video_memory_size){
4569+ failed=1;
4570+}
4571+if(info.hw_status_offset!=test_info.hw_status_offset){
4572+ failed=1;
4573+}
4574+
4575+printf("ULT IOCTL call for info init:");
4576+if(failed){
4577+ printf(" Failed\n");
4578+ exit(-1);
4579+}
4580+
4581+ printf(" Success\n");
4582+
4583+close(file_desc);
4584+/*
4585+sleep(100000000000);
4586+*/
4587+return 0;
4588+
4589+}
4590diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd.h
4591--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd.h 1969-12-31 17:00:00.000000000 -0700
4592+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd.h 2009-10-06 10:30:05.000000000 -0700
4593@@ -0,0 +1,117 @@
4594+
4595+/* -*- pse-c -*-
4596+ *----------------------------------------------------------------------------
4597+ * Filename: iegd.h
4598+ * $Revision: 1.7 $
4599+ *----------------------------------------------------------------------------
4600+ * Gart and DRM driver for Intel Embedded Graphics Driver
4601+ * Copyright © 2008, Intel Corporation.
4602+ *
4603+ * This program is free software; you can redistribute it and/or modify it
4604+ * under the terms and conditions of the GNU General Public License,
4605+ * version 2, as published by the Free Software Foundation.
4606+ *
4607+ * This program is distributed in the hope it will be useful, but WITHOUT
4608+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4609+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4610+ * more details.
4611+ *
4612+ * You should have received a copy of the GNU General Public License along with
4613+ * this program; if not, write to the Free Software Foundation, Inc.,
4614+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4615+ *
4616+ */
4617+
4618+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4619+ *
4620+ * Redistribution and use in source and binary forms, with or without
4621+ * modification, are permitted provided that the following conditions are met:
4622+ * Redistributions of source code must retain the above copyright notice,
4623+ * this list of conditions and the following disclaimer.
4624+ *
4625+ * Redistributions in binary form must reproduce the above copyright
4626+ * notice, this list of conditions and the following disclaimer in the
4627+ * documentation and/or other materials provided with the distribution.
4628+ *
4629+ * Neither the name Intel Corporation nor the names of its contributors
4630+ * may be used to endorse or promote products derived from this software
4631+ * without specific prior written permission.
4632+ *
4633+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4634+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4635+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4636+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4637+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4638+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4639+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4640+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4641+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4642+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4643+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4644+ *
4645+ */
4646+#include <linux/version.h>
4647+#ifndef __IEGD_H__
4648+#define __IEGD_H__
4649+
4650+/* General customization:
4651+ */
4652+#define __HAVE_AGP 1
4653+#define __MUST_HAVE_AGP 0
4654+#define __HAVE_MTRR 0
4655+#define __HAVE_CTX_BITMAP 1
4656+
4657+#define DRIVER_AUTHOR " "
4658+
4659+#define DRIVER_NAME "iegd"
4660+#define DRIVER_DESC "Intel DRM"
4661+#define DRIVER_DATE "20081022"
4662+
4663+#define DRIVER_MAJOR 1
4664+#define DRIVER_MINOR 0
4665+#define DRIVER_PATCHLEVEL 1
4666+
4667+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,5)
4668+#define KERNEL265 1
4669+#endif
4670+
4671+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)) && \
4672+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)))
4673+#define KERNEL2611 1
4674+#endif
4675+
4676+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) && \
4677+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)))
4678+#define KERNEL2615 1
4679+#endif
4680+
4681+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
4682+#define KERNEL2624 1
4683+#endif
4684+
4685+#ifndef KERNEL265
4686+#define KERNEL265 0
4687+#endif
4688+
4689+#ifndef KERNEL2611
4690+#define KERNEL2611 0
4691+#endif
4692+
4693+#ifndef KERNEL2615
4694+#define KERNEL2615 0
4695+#endif
4696+
4697+#ifndef KERNEL2624
4698+#define KERNEL2624 0
4699+#endif
4700+
4701+/* For some arcane reasons certain stuff needs to be defined in this file.
4702+ * This is being defined in intel_interface_265.h.If not the drm won't
4703+ * compile properly.
4704+ */
4705+#include "iegd_interface_265.h"
4706+#include "iegd_interface_2611.h"
4707+#include "iegd_interface_2615.h"
4708+#include "iegd_interface_2624.h"
4709+
4710+#endif
4711diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm.h
4712--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm.h 1969-12-31 17:00:00.000000000 -0700
4713+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm.h 2009-10-06 10:30:05.000000000 -0700
4714@@ -0,0 +1,116 @@
4715+/* -*- pse-c -*-
4716+ *----------------------------------------------------------------------------
4717+ * Filename: iegd_drm.h
4718+ * $Revision: 1.7 $
4719+ *----------------------------------------------------------------------------
4720+ * Gart and DRM driver for Intel Embedded Graphics Driver
4721+ * Copyright © 2008, Intel Corporation.
4722+ *
4723+ * This program is free software; you can redistribute it and/or modify it
4724+ * under the terms and conditions of the GNU General Public License,
4725+ * version 2, as published by the Free Software Foundation.
4726+ *
4727+ * This program is distributed in the hope it will be useful, but WITHOUT
4728+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4729+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4730+ * more details.
4731+ *
4732+ * You should have received a copy of the GNU General Public License along with
4733+ * this program; if not, write to the Free Software Foundation, Inc.,
4734+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4735+ *
4736+ */
4737+
4738+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4739+ *
4740+ * Redistribution and use in source and binary forms, with or without
4741+ * modification, are permitted provided that the following conditions are met:
4742+ * Redistributions of source code must retain the above copyright notice,
4743+ * this list of conditions and the following disclaimer.
4744+ *
4745+ * Redistributions in binary form must reproduce the above copyright
4746+ * notice, this list of conditions and the following disclaimer in the
4747+ * documentation and/or other materials provided with the distribution.
4748+ *
4749+ * Neither the name Intel Corporation nor the names of its contributors
4750+ * may be used to endorse or promote products derived from this software
4751+ * without specific prior written permission.
4752+ *
4753+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4754+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4755+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4756+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4757+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4758+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4759+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4760+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4761+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4762+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4763+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4764+ *
4765+ */
4766+
4767+#ifndef _IEGD_DRM_H__
4768+#define _IEGD_DRM_H__
4769+
4770+#include "iegd_drm_client.h"
4771+
4772+/* INTEL specific ioctls
4773+ * The device specific ioctl range is 0x40 to 0x79.
4774+ */
4775+
4776+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4777+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4778+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4779+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4780+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4781+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4782+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4783+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4784+#define DRM_IOCTL_INTEL_INTERRUPT DRM_IOWR( DRM_BASE_COMMAND + \
4785+ DRM_INTEL_INTERRUPT, interrupt_info_t)
4786+
4787+/* New ioctl to set kernel params:
4788+ */
4789+typedef struct drm_intel_listpages {
4790+ int pid;
4791+ int size;
4792+ unsigned long phy_address;
4793+ unsigned long virt_address;
4794+ unsigned long offset;
4795+} drm_intel_listpages_t;
4796+
4797+typedef struct drm_intel_list{
4798+ struct list_head head;
4799+ drm_intel_listpages_t *page;
4800+}drm_intel_list_t;
4801+/*
4802+ * This is the basic information structure that is obtained from the
4803+ * IEGD XFree driver.
4804+ */
4805+typedef struct intel_device_private{
4806+ drm_intel_list_t *pagelist;
4807+ intel_drm_info_t *info_ptr;
4808+ spinlock_t irqmask_lock;
4809+ uint8_t *sgx_reg;
4810+ uint8_t *vdc_reg;
4811+ uint8_t *msvdx_reg;
4812+ uint32_t sgx_irq_mask;
4813+ uint32_t sgx_irq_mask2;
4814+ uint32_t vdc_irq_mask;
4815+ uint32_t msvdx_irq_mask;
4816+ /* interrupt status bits returned once woken up */
4817+ uint32_t interrupt_status;
4818+ int irq_enabled;
4819+ /* condition to wake up on */
4820+ unsigned int event_present;
4821+ wait_queue_head_t event_queue;
4822+ /* interrupts that have already occured */
4823+ unsigned int out_vdc;
4824+ unsigned int out_sgx;
4825+ unsigned int out_sgx2;
4826+ unsigned int out_mtx;
4827+} intel_device_private_t;
4828+
4829+#endif /* _INTEL_DRM_H_ */
4830+
4831diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm_client.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm_client.h
4832--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm_client.h 1969-12-31 17:00:00.000000000 -0700
4833+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm_client.h 2009-10-06 10:30:05.000000000 -0700
4834@@ -0,0 +1,139 @@
4835+/* -*- pse-c -*-
4836+ *----------------------------------------------------------------------------
4837+ * Filename: iegd_drm_client.h
4838+ * $Revision: 1.7 $
4839+ *----------------------------------------------------------------------------
4840+ * Gart and DRM driver for Intel Embedded Graphics Driver
4841+ * Copyright © 2008, Intel Corporation.
4842+ *
4843+ * This program is free software; you can redistribute it and/or modify it
4844+ * under the terms and conditions of the GNU General Public License,
4845+ * version 2, as published by the Free Software Foundation.
4846+ *
4847+ * This program is distributed in the hope it will be useful, but WITHOUT
4848+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4849+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4850+ * more details.
4851+ *
4852+ * You should have received a copy of the GNU General Public License along with
4853+ * this program; if not, write to the Free Software Foundation, Inc.,
4854+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4855+ *
4856+ */
4857+
4858+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4859+ *
4860+ * Redistribution and use in source and binary forms, with or without
4861+ * modification, are permitted provided that the following conditions are met:
4862+ * Redistributions of source code must retain the above copyright notice,
4863+ * this list of conditions and the following disclaimer.
4864+ *
4865+ * Redistributions in binary form must reproduce the above copyright
4866+ * notice, this list of conditions and the following disclaimer in the
4867+ * documentation and/or other materials provided with the distribution.
4868+ *
4869+ * Neither the name Intel Corporation nor the names of its contributors
4870+ * may be used to endorse or promote products derived from this software
4871+ * without specific prior written permission.
4872+ *
4873+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4874+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4875+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4876+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4877+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4878+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4879+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4880+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4881+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4882+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4883+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4884+ *
4885+ */
4886+
4887+#ifndef __IEGD_DRM_CLIENT_H__
4888+#define __IEGD_DRM_CLIENT_H__
4889+
4890+/* Requests made from client to drm */
4891+#define CLEAR_INT 1
4892+#define WAIT_INT 2
4893+#define READ_INT 3
4894+#define UNMASK_INT 4
4895+#define MASK_INT 5
4896+
4897+/* Responses returned to the client from drm */
4898+#define INT_INVALID -1
4899+#define INT_NOOP 0
4900+#define INT_CLEARED 1
4901+#define INT_HANDLED 2
4902+#define INT_READ 3
4903+#define INT_STORED 4
4904+#define INT_TIMEOUT 5
4905+
4906+typedef struct drm_intel_getpages {
4907+ int size;
4908+ unsigned long phy_address;
4909+ unsigned long virt_address;
4910+ unsigned long offset;
4911+} drm_intel_getpages_t;
4912+
4913+typedef struct drm_intel_freepages {
4914+ int size;
4915+ unsigned long phy_address;
4916+ unsigned long virt_address;
4917+ unsigned long offset;
4918+} drm_intel_freepages_t;
4919+
4920+/*
4921+ * This is the basic information structure that is is obtained from the
4922+ * IEGD drm driver.
4923+ */
4924+typedef struct _intel_drm_info {
4925+ unsigned long device_id;
4926+ unsigned long revision;
4927+ unsigned long video_memory_offset;
4928+ unsigned long video_memory_size;
4929+ unsigned long hw_status_offset;
4930+} intel_drm_info_t;
4931+
4932+typedef struct {
4933+ /* request status returned to client */
4934+ unsigned int req_status;
4935+ /* what type of request is being made to drm (clear/wait/read) */
4936+ unsigned int req_type;
4937+ /* which interrupts to clear or look for */
4938+ unsigned long in[8]; /* Array of device dependant mask/request bits */
4939+ /* interrupts that have already occured, returned to the client */
4940+ unsigned long out[8]; /* Array of device dependant status bits */
4941+} interrupt_info_t;
4942+
4943+#define DRM_IOCTL_BASE 'd'
4944+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4945+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4946+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
4947+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
4948+
4949+/* IOCTL numbers to be used along side drmCommand* in Xserver
4950+ * example taken from intel_dri.c:
4951+ * drmCommandWrite(iptr->drm_sub_fd
4952+ * , DRM_INTEL_INFO_INIT
4953+ * , &info,sizeof(intel_drm_info_t)
4954+ */
4955+
4956+#define DRM_BASE_COMMAND 0x40
4957+#define DRM_INTEL_GETPAGES 0x01
4958+#define DRM_INTEL_FREEPAGES 0x02
4959+#define DRM_INTEL_INFO_INIT 0x03
4960+#define DRM_INTEL_INFO_GET 0x04
4961+#define DRM_INTEL_INTERRUPT 0x05
4962+
4963+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4964+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4965+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4966+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4967+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4968+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4969+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4970+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4971+#define DRM_IOCTL_INTEL_INTERRUPT DRM_IOWR( DRM_BASE_COMMAND + \
4972+ DRM_INTEL_INTERRUPT, interrupt_info_t)
4973+#endif
4974diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.c
4975--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.c 1969-12-31 17:00:00.000000000 -0700
4976+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.c 2009-10-06 10:30:05.000000000 -0700
4977@@ -0,0 +1,59 @@
4978+/* -*- pse-c -*-
4979+ *----------------------------------------------------------------------------
4980+ * Filename: iegd_drv.c
4981+ * $Revision: 1.5 $
4982+ *----------------------------------------------------------------------------
4983+ * Gart and DRM driver for Intel Embedded Graphics Driver
4984+ * Copyright © 2008, Intel Corporation.
4985+ *
4986+ * This program is free software; you can redistribute it and/or modify it
4987+ * under the terms and conditions of the GNU General Public License,
4988+ * version 2, as published by the Free Software Foundation.
4989+ *
4990+ * This program is distributed in the hope it will be useful, but WITHOUT
4991+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4992+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4993+ * more details.
4994+ *
4995+ * You should have received a copy of the GNU General Public License along with
4996+ * this program; if not, write to the Free Software Foundation, Inc.,
4997+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4998+ *
4999+ */
5000+
5001+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
5002+ *
5003+ * Redistribution and use in source and binary forms, with or without
5004+ * modification, are permitted provided that the following conditions are met:
5005+ * Redistributions of source code must retain the above copyright notice,
5006+ * this list of conditions and the following disclaimer.
5007+ *
5008+ * Redistributions in binary form must reproduce the above copyright
5009+ * notice, this list of conditions and the following disclaimer in the
5010+ * documentation and/or other materials provided with the distribution.
5011+ *
5012+ * Neither the name Intel Corporation nor the names of its contributors
5013+ * may be used to endorse or promote products derived from this software
5014+ * without specific prior written permission.
5015+ *
5016+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5017+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
5018+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5019+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5020+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5021+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5022+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5023+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5024+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5025+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5026+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5027+ *
5028+ */
5029+
5030+#include <linux/config.h>
5031+#include "iegd.h"
5032+#include <drmP.h>
5033+#include <drm.h>
5034+#include "iegd_drm.h"
5035+#include "iegd_drv.h"
5036+
5037diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.h
5038--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.h 1969-12-31 17:00:00.000000000 -0700
5039+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.h 2009-10-06 10:30:05.000000000 -0700
5040@@ -0,0 +1,216 @@
5041+
5042+/* -*- pse-c -*-
5043+ *----------------------------------------------------------------------------
5044+ * Filename: iegd_drv.h
5045+ * $Revision: 1.15 $
5046+ *----------------------------------------------------------------------------
5047+ * Gart and DRM driver for Intel Embedded Graphics Driver
5048+ * Copyright © 2008, Intel Corporation.
5049+ *
5050+ * This program is free software; you can redistribute it and/or modify it
5051+ * under the terms and conditions of the GNU General Public License,
5052+ * version 2, as published by the Free Software Foundation.
5053+ *
5054+ * This program is distributed in the hope it will be useful, but WITHOUT
5055+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5056+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5057+ * more details.
5058+ *
5059+ * You should have received a copy of the GNU General Public License along with
5060+ * this program; if not, write to the Free Software Foundation, Inc.,
5061+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5062+ *
5063+ */
5064+
5065+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
5066+ *
5067+ * Redistribution and use in source and binary forms, with or without
5068+ * modification, are permitted provided that the following conditions are met:
5069+ * Redistributions of source code must retain the above copyright notice,
5070+ * this list of conditions and the following disclaimer.
5071+ *
5072+ * Redistributions in binary form must reproduce the above copyright
5073+ * notice, this list of conditions and the following disclaimer in the
5074+ * documentation and/or other materials provided with the distribution.
5075+ *
5076+ * Neither the name Intel Corporation nor the names of its contributors
5077+ * may be used to endorse or promote products derived from this software
5078+ * without specific prior written permission.
5079+ *
5080+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5081+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
5082+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5083+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5084+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5085+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5086+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5087+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5088+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5089+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5090+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5091+ *
5092+ */
5093+
5094+#ifndef _IEGD_DRV_H_
5095+#define _IEGD_DRV_H_
5096+
5097+#define KB(x) ((x) * 1024)
5098+#define MB(x) (KB (KB (x)))
5099+#define GB(x) (MB (KB (x)))
5100+#include "iegd_drm.h"
5101+#include "igd_gart.h"
5102+
5103+/* Define the PCI IDs below */
5104+#define INTEL_PCI_IDS \
5105+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5106+ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5107+ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5108+ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5109+ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5110+ {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5111+ {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5112+ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5113+ {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5114+ {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5115+ {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5116+ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5117+ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5118+ {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5119+ {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5120+ {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5121+ {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5122+ {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5123+ {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5124+ {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5125+ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5126+ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5127+ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5128+ {0, 0, 0}
5129+
5130+/* Latest kernel remove this macro from drmP.h */
5131+#ifndef VM_OFFSET
5132+#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
5133+#endif
5134+
5135+#ifndef DRMFILE
5136+#define DRMFILE struct file *
5137+#endif
5138+
5139+/*
5140+ * We check kernel version here because in kernel 2.6.23 onward some of the
5141+ * structure definition for the drm have been change. They have remove all the
5142+ * typedef for the drm data structure to follow kernel coding guidelines. This
5143+ * causing backward compatibility problem with IKM. Since only the typedef and
5144+ * the way they handling link list are changing, to create separate file just
5145+ * for handling this changes are redundant since implementation wise are
5146+ * still the same.
5147+ */
5148+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5149+typedef struct drm_device drm_device_t;
5150+typedef struct drm_file drm_file_t;
5151+typedef struct drm_map drm_map_t;
5152+typedef struct drm_map_list drm_map_list_t;
5153+typedef struct drm_vma_entry drm_vma_entry_t;
5154+typedef struct drm_ioctl_desc drm_ioctl_desc_t;
5155+
5156+#define INSERT_VMA() \
5157+ mutex_lock(&dev->struct_mutex); \
5158+ vma_entry->vma = vma; \
5159+ vma_entry->pid = current->pid; \
5160+ list_add(&vma_entry->head, &dev->vmalist); \
5161+ mutex_unlock(&dev->struct_mutex);
5162+
5163+#define LIST_FOR_EACH(l, d) list_for_each((l), &(d)->maplist)
5164+
5165+#else
5166+
5167+#define INSERT_VMA() \
5168+ mutex_lock(&dev->struct_mutex); \
5169+ vma_entry->vma = vma; \
5170+ vma_entry->next = dev->vmalist; \
5171+ vma_entry->pid = current->pid; \
5172+ dev->vmalist = vma_entry; \
5173+ mutex_unlock(&dev->struct_mutex);
5174+
5175+#define LIST_FOR_EACH(l, d) list_for_each((l), &(d)->maplist->head)
5176+
5177+#endif /* #if LINUX_VERSION_CODE */
5178+
5179+/* Define the prototype and interfaces for functions for the different
5180+ * kernel version below.
5181+ */
5182+
5183+/* function definition in intel_interface.c */
5184+extern int intel_mmap_buffers(struct file *filp,struct vm_area_struct *vma);
5185+
5186+/* function definition to get pages this is in intel_interface*/
5187+extern int intel_getpages( drm_device_t *dev,struct file *filp, unsigned long arg );
5188+
5189+extern int intel_freepages(drm_device_t *dev , unsigned long arg );
5190+
5191+extern int intel_drm_info_init( drm_device_t *dev, unsigned long arg );
5192+
5193+extern int intel_drm_info_get( drm_device_t *dev, unsigned long arg );
5194+
5195+extern int intel_postinit(intel_device_private_t **priv);
5196+
5197+extern int intel_prerelease(drm_device_t *dev);
5198+
5199+/* Functions in intel_interface_265.c used in 2.6.5 kernel and below */
5200+
5201+extern int intel_postinit_265(drm_device_t *dev);
5202+
5203+extern int intel_prerelease_265(drm_device_t *dev);
5204+
5205+extern int intel_getpages_265( struct inode *inode, struct file *filp,
5206+ unsigned int cmd, unsigned long arg );
5207+
5208+extern int intel_freepages_265( struct inode *inode, struct file *filp,
5209+ unsigned int cmd, unsigned long arg );
5210+
5211+extern int intel_drm_info_init_265( struct inode *inode, struct file *filp,
5212+ unsigned int cmd, unsigned long arg );
5213+
5214+extern int intel_drm_info_get_265( struct inode *inode, struct file *filp,
5215+ unsigned int cmd, unsigned long arg );
5216+
5217+/* Functions in intel_interface_2611.c used in 2.6.11 kernel and above */
5218+
5219+extern int intel_postinit_2611(struct drm_device *dev,unsigned long flags);
5220+
5221+extern void intel_prerelease_2611(drm_device_t *dev,DRMFILE filp);
5222+
5223+extern int intel_getpages_2611( struct inode *inode, struct file *filp,
5224+ unsigned int cmd, unsigned long arg );
5225+
5226+extern int intel_freepages_2611( struct inode *inode, struct file *filp,
5227+ unsigned int cmd, unsigned long arg );
5228+
5229+extern int intel_drm_info_init_2611( struct inode *inode, struct file *filp,
5230+ unsigned int cmd, unsigned long arg );
5231+
5232+extern int intel_drm_info_get_2611( struct inode *inode, struct file *filp,
5233+ unsigned int cmd, unsigned long arg );
5234+
5235+extern unsigned long intel_alloc_pages(int order, int area);
5236+
5237+extern void intel_free_pages(unsigned long address, int order, int area);
5238+
5239+extern int drm_plb_mmap(struct file *, struct vm_area_struct *);
5240+
5241+extern struct vm_operations_struct iegd_plb_vm_ops_drm;
5242+extern gart_dispatch_t *gart_id;
5243+extern dev_private_data_t private_data;
5244+
5245+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
5246+extern void psb_irq_preinstall(struct drm_device *dev);
5247+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
5248+extern void psb_irq_postinstall(struct drm_device *dev);
5249+#else
5250+extern int psb_irq_postinstall(struct drm_device *dev);
5251+#endif
5252+extern void psb_irq_uninstall(struct drm_device *dev);
5253+extern int psb_init(intel_device_private_t *priv);
5254+int intel_drm_plb_interrupts( drm_device_t *dev, void *data );
5255+#endif
5256+
5257diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface.c
5258--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface.c 1969-12-31 17:00:00.000000000 -0700
5259+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface.c 2009-10-06 10:30:05.000000000 -0700
5260@@ -0,0 +1,888 @@
5261+/* -*- pse-c -*-
5262+ *----------------------------------------------------------------------------
5263+ * Filename: iegd_interface.c
5264+ * $Revision: 1.23 $
5265+ *----------------------------------------------------------------------------
5266+ * Gart and DRM driver for Intel Embedded Graphics Driver
5267+ * Copyright © 2008, Intel Corporation.
5268+ *
5269+ * This program is free software; you can redistribute it and/or modify it
5270+ * under the terms and conditions of the GNU General Public License,
5271+ * version 2, as published by the Free Software Foundation.
5272+ *
5273+ * This program is distributed in the hope it will be useful, but WITHOUT
5274+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5275+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5276+ * more details.
5277+ *
5278+ * You should have received a copy of the GNU General Public License along with
5279+ * this program; if not, write to the Free Software Foundation, Inc.,
5280+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5281+ *
5282+ */
5283+
5284+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
5285+ *
5286+ * Redistribution and use in source and binary forms, with or without
5287+ * modification, are permitted provided that the following conditions are met:
5288+ * Redistributions of source code must retain the above copyright notice,
5289+ * this list of conditions and the following disclaimer.
5290+ *
5291+ * Redistributions in binary form must reproduce the above copyright
5292+ * notice, this list of conditions and the following disclaimer in the
5293+ * documentation and/or other materials provided with the distribution.
5294+ *
5295+ * Neither the name Intel Corporation nor the names of its contributors
5296+ * may be used to endorse or promote products derived from this software
5297+ * without specific prior written permission.
5298+ *
5299+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5300+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
5301+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5302+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5303+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5304+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5305+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5306+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5307+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5308+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5309+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5310+ *
5311+ */
5312+
5313+#include "iegd.h"
5314+#include "igd_abs.h"
5315+#include "drmP.h"
5316+#include "drm.h"
5317+
5318+#include "iegd_drm.h"
5319+#include "iegd_drv.h"
5320+#include "psb_intregs.h"
5321+
5322+#ifndef MSR_IA32_CR_PAT
5323+#define MSR_IA32_CR_PAT 0x0277
5324+#endif
5325+#ifndef _PAGE_PAT
5326+#define _PAGE_PAT 0x080
5327+#endif
5328+
5329+extern void agp_init_pat(void);
5330+extern int agp_use_pat (void);
5331+
5332+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
5333+int drm_irq_install(drm_device_t *dev);
5334+#endif
5335+
5336+/* get intel_buffer_fops from the interface_###.c files */
5337+extern struct file_operations intel_buffer_fops;
5338+
5339+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
5340+extern struct vm_operations_struct iegd_plb_vm_ops;
5341+#endif
5342+
5343+
5344+/* Global variable to keep track the amount of memory we are using */
5345+static int memory;
5346+
5347+/* Our own mmap function to memory map physical to user space memory
5348+ */
5349+int intel_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
5350+{
5351+ DRM_DEBUG("\n");
5352+
5353+ lock_kernel();
5354+ vma->vm_flags |= (VM_IO | VM_RESERVED);
5355+ vma->vm_file = filp;
5356+ unlock_kernel();
5357+
5358+ DRM_DEBUG("VM_OFFSET(vma):%#x\n",(unsigned int)VM_OFFSET(vma));
5359+ if (REMAP_PAGE( vma,
5360+ vma->vm_start,
5361+ VM_OFFSET(vma),
5362+ (vma->vm_end - vma->vm_start),
5363+ pgprot_noncached(vma->vm_page_prot))){
5364+ return -EAGAIN;
5365+ }
5366+
5367+ return 0;
5368+}
5369+
5370+/* IOCTL to Allocate size pages and mmap it to the client calling it with
5371+ * corresponding virtual address
5372+ */
5373+int intel_getpages( drm_device_t *dev, struct file *filp, unsigned long arg ){
5374+
5375+ drm_intel_getpages_t getpages;
5376+ /* allocate some bytes */
5377+ unsigned long bytes;
5378+ int order;
5379+ int size;
5380+
5381+ unsigned long address;
5382+ unsigned long phy_address;
5383+ unsigned long offset;
5384+
5385+ struct page *pg;
5386+
5387+ unsigned long virtual;
5388+ struct file_operations *old_fops;
5389+
5390+ intel_device_private_t *dev_ptr=dev->dev_private;
5391+ drm_intel_listpages_t *page;
5392+ drm_intel_list_t *list;
5393+
5394+ DRM_DEBUG("\n");
5395+ /* copy user arguments */
5396+ if(copy_from_user(&getpages, (void __user *) arg, sizeof(getpages))){
5397+ return -EFAULT;
5398+ }
5399+
5400+ bytes=getpages.size;
5401+ /* Check to see if this allocation would exceed 16MEG in total memory
5402+ * This is to prevent denial of service attack. 16Meg should be enough.
5403+ */
5404+ if((memory+bytes)>MB(16) ){
5405+ /* We exceeded 16MEG. Bail out */
5406+ DRM_ERROR("Total memory allocated exceeded 16Meg!\n");
5407+ return -EFAULT;
5408+ }
5409+
5410+ /*number of pages that are needed*/
5411+ size=bytes>>PAGE_SHIFT;
5412+ if(bytes & ~(PAGE_SIZE*size)){
5413+ ++size;
5414+ }
5415+ order=ORDER(size);
5416+ DRM_DEBUG("Allocating bytes:%#lx,size:%d,order:%d\n",
5417+ (unsigned long)bytes,size,order);
5418+ /* allocate the pages */
5419+ /* returns kernel logical address.
5420+ * Is this the same as the kernel virtual address??
5421+ */
5422+ address=ALLOC_PAGES(order,0);
5423+ if(!address){
5424+ DRM_ERROR("Can't get pages\n");
5425+ return -EFAULT;
5426+ }
5427+ phy_address=__pa(address);
5428+
5429+ /* Find virtual address of the phys address */
5430+ pg=virt_to_page((void *)address);
5431+ offset=pg->index;
5432+ /* Find the number of bytes that is actually allocated */
5433+ size=PAGE_SIZE<<order;
5434+ DRM_DEBUG("Allocated address:%#lx,page offset:%#lx,phy_address:%#lx\n",
5435+ address,offset,phy_address);
5436+
5437+ /*do_mmap on the logical address and return virtual address */
5438+ down_write(&current->mm->mmap_sem);
5439+
5440+ old_fops= (struct file_operations *)filp->f_op;
5441+ filp->f_op=&intel_buffer_fops;
5442+
5443+ virtual=do_mmap(filp,0,size,PROT_READ|PROT_WRITE,MAP_SHARED,phy_address);
5444+
5445+ filp->f_op=old_fops;
5446+ up_write(&current->mm->mmap_sem);
5447+ DRM_DEBUG("Mmaped virtual:%#lx,address:%#lx\n",virtual,
5448+ (unsigned long)__va(phy_address));
5449+ if(virtual > -1024UL){
5450+ DRM_ERROR("mmap failed:%d\n",(int)virtual);
5451+ return -EFAULT;
5452+ }
5453+ getpages.phy_address=phy_address;
5454+ getpages.virt_address=virtual;
5455+ getpages.size=size;
5456+ getpages.offset=offset;
5457+
5458+ DRM_DEBUG("Mmap success requested size:%d (%d)\n",
5459+ getpages.size,(int)bytes);
5460+
5461+ /* alloc the page to be put into the linked list */
5462+ page=ALLOC(sizeof(*page),DRM_MEM_DRIVER);
5463+ if(!page){
5464+ DRM_DEBUG("Can't alloc list for page\n");
5465+ return -ENOMEM;
5466+ }
5467+
5468+ /*page->pid=current->pid;*/
5469+ page->pid=current->group_leader->pid;
5470+ page->size=size;
5471+ page->phy_address=phy_address;
5472+ page->virt_address=virtual;
5473+ page->offset=offset;
5474+
5475+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
5476+ ,current->parent->pid,current->pid,current->group_leader->pid);
5477+ /* Alloc the list to be added then add it to the linked list */
5478+ list=ALLOC(sizeof(*list),DRM_MEM_DRIVER);
5479+ if(!list){
5480+ DRM_DEBUG("Can't alloc list for page\n");
5481+ FREE(page,sizeof(*page),0);
5482+ return -ENOMEM;
5483+ }
5484+ memset(list,0,sizeof(*list));
5485+ list->page=page;
5486+ LOCK_DRM(dev);
5487+ list_add(&list->head,&dev_ptr->pagelist->head);
5488+ UNLOCK_DRM(dev);
5489+ if(copy_to_user((void __user *) arg,&getpages,sizeof(getpages))){
5490+ return -EFAULT;
5491+ }
5492+ /* update the total amount of memory we use */
5493+ memory+=size;
5494+ DRM_DEBUG("memory has:%d bytes\n",memory);
5495+
5496+return 0;
5497+}
5498+
5499+/* IOCTL to free pages that are allocated by getpages
5500+ */
5501+int intel_freepages( drm_device_t *dev, unsigned long arg ){
5502+
5503+ drm_intel_freepages_t freepages;
5504+ /* allocate some bytes */
5505+ unsigned long bytes;
5506+ int order;
5507+ int size;
5508+
5509+ intel_device_private_t *dev_ptr=dev->dev_private;
5510+ drm_intel_listpages_t *page;
5511+ drm_intel_list_t *r_list=NULL;
5512+ struct list_head *pagelist;
5513+
5514+ DRM_DEBUG("Freeing pages\n");
5515+ /* copy user arguments */
5516+ if(copy_from_user(&freepages, (void __user *) arg, sizeof(freepages))){
5517+ return -EFAULT;
5518+ }
5519+
5520+ bytes=freepages.size;
5521+ /*number of pages that are needed*/
5522+ size=bytes>>PAGE_SHIFT;
5523+ if(bytes & ~(PAGE_SIZE*size)){
5524+ ++size;
5525+ }
5526+ order=ORDER(size);
5527+ DRM_DEBUG("bytes:%d,size:%d,order:%d,phy_address:%#lx\n",(int)bytes,(int)size,(int)order,freepages.phy_address);
5528+
5529+ /* free the pages */
5530+ DRM_DEBUG("freeing address:%#lx,size:%#lx\n",(unsigned long)__va(freepages.phy_address),(unsigned long)bytes);
5531+
5532+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
5533+ ,current->parent->pid,current->pid,current->group_leader->pid);
5534+ /* See if the requested address is in our page list */
5535+ LOCK_DRM(dev);
5536+ pagelist=&dev_ptr->pagelist->head;
5537+ list_for_each(pagelist,&dev_ptr->pagelist->head){
5538+ r_list=list_entry(pagelist,drm_intel_list_t,head);
5539+ if((r_list->page->pid==current->group_leader->pid)
5540+ && (r_list->page->phy_address==freepages.phy_address)){
5541+
5542+ DRM_DEBUG("found pid:%d\n",current->group_leader->pid);
5543+ DRM_DEBUG("size:%d\n",r_list->page->size);
5544+ DRM_DEBUG("phy_address:%#lx\n",r_list->page->phy_address);
5545+ DRM_DEBUG("virt_add:%#lx\n",r_list->page->virt_address);
5546+ DRM_DEBUG("offset:%#lx\n",r_list->page->offset);
5547+
5548+ break;
5549+ }
5550+
5551+ }
5552+ if(pagelist==(&dev_ptr->pagelist->head)){
5553+ DRM_DEBUG("Can't find pages alloc for pid:%d\n",current->pid);
5554+ UNLOCK_DRM(dev);
5555+ return -EINVAL;
5556+ }
5557+
5558+ /* munmap the region 1st */
5559+ down_write(&current->mm->mmap_sem);
5560+ DRM_DEBUG("Unmapping virt_address:%#lx\n",freepages.virt_address);
5561+ do_munmap(current->mm,freepages.virt_address,bytes);
5562+ up_write(&current->mm->mmap_sem);
5563+
5564+ /* Free the pages! */
5565+ FREE_PAGES((unsigned long)__va(freepages.phy_address),order,0);
5566+
5567+ /* Free the page list */
5568+ page=r_list->page;
5569+ list_del(pagelist);
5570+ size=r_list->page->size;
5571+ FREE(pagelist,sizeof(*pagelist),0);
5572+ FREE(page,sizeof(*page),0);
5573+ UNLOCK_DRM(dev);
5574+
5575+ /* update the total memory that we use */
5576+ memory-=size;
5577+ DRM_DEBUG("memory has:%d bytes\n",memory);
5578+ return 0;
5579+}
5580+
5581+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
5582+/* This code is copied verbatim from the DRM module code in
5583+ * IKM/val/drm/drmv11p0/drm_irq.c. It's here because we
5584+ * need to activate interrupt handling, but for some reason the DRM module
5585+ * only exports the routine to disable interrupt handling drm_irq_uninstall(),
5586+ * and not the one to install.
5587+ *
5588+ * This could be problematic when new DRM versions appear.
5589+ *
5590+ * Fortunately, should a new DRM version appear, it should export
5591+ * drm_irq_install(), and then this source won't be needed at all; the
5592+ * code should compile cleanly with an external reference if this
5593+ * static version is removed completely.
5594+ */
5595+int drm_irq_install(drm_device_t * dev)
5596+{
5597+ int ret;
5598+ unsigned long sh_flags = 0;
5599+ int dev_irq = 0;
5600+
5601+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
5602+ return -EINVAL;
5603+
5604+ dev_irq = DRM_DEV_TO_IRQ(dev);
5605+ if (dev_irq == 0)
5606+ return -EINVAL;
5607+
5608+ mutex_lock(&dev->struct_mutex);
5609+
5610+ /* Driver must have been initialized */
5611+ if (!dev->dev_private) {
5612+ mutex_unlock(&dev->struct_mutex);
5613+ return -EINVAL;
5614+ }
5615+
5616+ if (dev->irq_enabled) {
5617+ mutex_unlock(&dev->struct_mutex);
5618+ return -EBUSY;
5619+ }
5620+ dev->irq_enabled = 1;
5621+ mutex_unlock(&dev->struct_mutex);
5622+
5623+ DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev_irq);
5624+
5625+ /*
5626+ if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
5627+ init_waitqueue_head(&dev->vbl_queue);
5628+
5629+ spin_lock_init(&dev->vbl_lock);
5630+
5631+ INIT_LIST_HEAD(&dev->vbl_sigs.head);
5632+ INIT_LIST_HEAD(&dev->vbl_sigs2.head);
5633+
5634+ dev->vbl_pending = 0;
5635+ }
5636+ */
5637+
5638+ /* Before installing handler */
5639+ dev->driver->irq_preinstall(dev);
5640+
5641+ /* Install handler */
5642+ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
5643+ sh_flags = IRQF_SHARED;
5644+
5645+ ret = request_irq(dev_irq, dev->driver->irq_handler,
5646+ sh_flags, dev->devname, dev);
5647+ if (ret < 0) {
5648+ mutex_lock(&dev->struct_mutex);
5649+ dev->irq_enabled = 0;
5650+ mutex_unlock(&dev->struct_mutex);
5651+ return ret;
5652+ }
5653+
5654+ /* After installing handler */
5655+ dev->driver->irq_postinstall(dev);
5656+
5657+ return 0;
5658+}
5659+#endif
5660+
5661+/* IOCTL to init the info that is needed by the client
5662+ */
5663+int intel_drm_info_init( drm_device_t *dev, unsigned long arg ){
5664+
5665+ intel_drm_info_t info;
5666+ intel_drm_info_t *info_ptr;
5667+ intel_device_private_t *dev_ptr;
5668+
5669+ DRM_DEBUG("info init succesful dev_private:%#lx\n",(unsigned long)dev->dev_private);
5670+ dev_ptr=dev->dev_private;
5671+ /* See if dev_private is already allocated */
5672+ if(!dev->dev_private){
5673+ DRM_ERROR("dev_private not allocated!\n");
5674+ return 0;
5675+ }
5676+ info_ptr=dev_ptr->info_ptr;
5677+ /* See if info is already allocated */
5678+ if(info_ptr->device_id){
5679+ DRM_ERROR("Info already allocated!\n");
5680+ return 0;
5681+ }
5682+
5683+ /* copy user arguments */
5684+ if(copy_from_user(&info, (void __user *) arg, sizeof(info))){
5685+ return -EFAULT;
5686+ }
5687+
5688+ info_ptr->device_id=info.device_id;
5689+ info_ptr->revision=info.revision;
5690+ info_ptr->video_memory_offset=info.video_memory_offset;
5691+ info_ptr->video_memory_size=info.video_memory_size;
5692+ info_ptr->hw_status_offset=info.hw_status_offset;
5693+ DRM_DEBUG("device_id:%#lx,revision:%#lx,offset:%#lx,size:%#lx,hw_status_offset:%lx\n",
5694+ info_ptr->device_id,info_ptr->revision,
5695+ info_ptr->video_memory_offset,info_ptr->video_memory_size,
5696+ info_ptr->hw_status_offset);
5697+return 0;
5698+}
5699+/* IOCTL to get the info that is needed by the client
5700+ */
5701+int intel_drm_info_get( drm_device_t *dev, unsigned long arg ){
5702+
5703+ intel_drm_info_t info;
5704+ intel_device_private_t *dev_ptr=dev->dev_private;
5705+ intel_drm_info_t *info_ptr=dev_ptr->info_ptr;
5706+
5707+ DRM_DEBUG("Info get device_id:%#lx,revision:%#lx,offset:%#lx,size:%#lx, hw_status_offset:%lx\n",
5708+ info_ptr->device_id,info_ptr->revision,
5709+ info_ptr->video_memory_offset,info_ptr->video_memory_size,
5710+ info_ptr->hw_status_offset);
5711+
5712+ info.device_id=info_ptr->device_id;
5713+ info.revision=info_ptr->revision;
5714+ info.video_memory_offset=info_ptr->video_memory_offset;
5715+ info.video_memory_size=info_ptr->video_memory_size;
5716+ info.hw_status_offset=info_ptr->hw_status_offset;
5717+
5718+ if(copy_to_user((void __user *) arg,&info,sizeof(info))){
5719+ return -EFAULT;
5720+ }
5721+
5722+return 0;
5723+}
5724+
5725+/* initialise structure for link list and driver info in dev_private */
5726+int intel_postinit(intel_device_private_t **priv){
5727+
5728+ intel_drm_info_t *info_ptr;
5729+ intel_device_private_t *dev_ptr;
5730+ DRM_DEBUG("\n");
5731+ /* allocate info to be stored */
5732+ dev_ptr=ALLOC(sizeof(intel_device_private_t),DRM_MEM_DRIVER);
5733+
5734+ if(!dev_ptr){
5735+ return -ENOMEM;
5736+ }
5737+
5738+ DRM_DEBUG("dev_ptr allocation succesful\n");
5739+
5740+ memset(dev_ptr,0,sizeof(intel_device_private_t));
5741+ *priv=dev_ptr;
5742+
5743+ info_ptr=ALLOC(sizeof(intel_drm_info_t),DRM_MEM_DRIVER);
5744+
5745+ if(!info_ptr){
5746+ return -ENOMEM;
5747+ }
5748+
5749+ DRM_DEBUG("Info_ptr allocation succesful\n");
5750+ memset(info_ptr,0,sizeof(intel_drm_info_t));
5751+ dev_ptr->info_ptr=info_ptr;
5752+
5753+ dev_ptr->pagelist=ALLOC(sizeof(*dev_ptr->pagelist),DRM_MEM_DRIVER);
5754+
5755+ if(!dev_ptr->pagelist){
5756+ return -ENOMEM;
5757+ }
5758+
5759+ DRM_DEBUG("pagelist allocation succesful\n");
5760+ memset(dev_ptr->pagelist,0,sizeof(*dev_ptr->pagelist));
5761+ INIT_LIST_HEAD(&dev_ptr->pagelist->head);
5762+ /* Initialise global variable to zero when we start up */
5763+ memory=0;
5764+ DRM_DEBUG("Initialised memory:%d\n",memory);
5765+
5766+return 0;
5767+
5768+}
5769+/* check and free pages of client that is closing the fd */
5770+int intel_prerelease(drm_device_t *dev){
5771+ unsigned long bytes;
5772+ int order;
5773+ int size;
5774+
5775+ intel_device_private_t *dev_ptr=dev->dev_private;
5776+ drm_intel_listpages_t *page;
5777+ drm_intel_list_t *r_list=NULL;
5778+ struct list_head *pagelist, *pagelist_next;
5779+
5780+ DRM_DEBUG("Client closing freeing pages alloc to it\n");
5781+
5782+
5783+ /* Search for the page list has been added and free it */
5784+
5785+ LOCK_DRM(dev);
5786+
5787+ /* The changes to this function are copied form 8.1 */
5788+ /* I've no idea why, but sometimes during bootup the dev_private
5789+ * field can show up as NULL. Guarding against this for now...
5790+ */
5791+ if (dev_ptr != NULL) {
5792+
5793+ pagelist=&dev_ptr->pagelist->head;
5794+ list_for_each_safe(pagelist,pagelist_next,&dev_ptr->pagelist->head){
5795+ r_list=list_entry(pagelist,drm_intel_list_t,head);
5796+ if(r_list->page->pid==current->group_leader->pid){
5797+#if 0
5798+ printk("found pid:%d\n",current->pid);
5799+ printk("size:%d\n",r_list->page->size);
5800+ printk("phy_address:%#lx\n",r_list->page->phy_address);
5801+ printk("virt_add:%#lx\n",r_list->page->virt_address);
5802+ printk("offset:%#lx\n",r_list->page->offset);
5803+#endif
5804+ bytes=r_list->page->size;
5805+
5806+ /*number of pages that are needed*/
5807+
5808+ size=bytes>>PAGE_SHIFT;
5809+ if(bytes & ~(PAGE_SIZE*size)){
5810+ ++size;
5811+ }
5812+ order=ORDER(size);
5813+ /* free the pages */
5814+
5815+#if 0
5816+ printk("freeing address:%#lx,size:%#lx\n"
5817+ ,(unsigned long)__va(r_list->page->phy_address)
5818+ ,(unsigned long)bytes);
5819+#endif
5820+
5821+ FREE_PAGES((unsigned long)__va(r_list->page->phy_address)
5822+ ,order,0);
5823+
5824+ /* remove from list and free the resource */
5825+
5826+ page=r_list->page;
5827+ list_del(pagelist);
5828+ FREE(pagelist,sizeof(*pagelist),0);
5829+ FREE(page,sizeof(*page),0);
5830+ /* update the total memory that we use */
5831+ memory-=bytes;
5832+ DRM_DEBUG("memory:%d bytes\n",memory);
5833+ }
5834+ }
5835+ }
5836+
5837+ UNLOCK_DRM(dev);
5838+
5839+ return 0;
5840+
5841+}
5842+
5843+int drm_plb_mmap(struct file *filp, struct vm_area_struct *vma)
5844+{
5845+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
5846+ drm_file_t *priv = filp->private_data;
5847+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
5848+ drm_device_t *dev = priv->minor->dev;
5849+#else
5850+ drm_device_t *dev = priv->head->dev;
5851+#endif
5852+
5853+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
5854+ drm_local_map_t *map = NULL;
5855+#else
5856+ drm_map_t *map = NULL;
5857+#endif
5858+ drm_map_list_t *r_list;
5859+ unsigned long offset = 0;
5860+ struct list_head *list;
5861+ drm_vma_entry_t *vma_entry;
5862+
5863+ DRM_DEBUG("drm_plb_mmap: start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
5864+ vma->vm_start, vma->vm_end, VM_OFFSET(vma));
5865+
5866+ if (!priv->authenticated) {
5867+ DRM_DEBUG("Did not authenticate");
5868+ return -EACCES;
5869+ } else {
5870+ DRM_DEBUG("Authenticate successful");
5871+ }
5872+
5873+ /* A sequential search of a linked list is
5874+ * fine here because: 1) there will only be
5875+ * about 5-10 entries in the list and, 2) a
5876+ * DRI client only has to do this mapping
5877+ * once, so it doesn't have to be optimized
5878+ * for performance, even if the list was a
5879+ * bit longer. */
5880+
5881+ /* FIXME: Temporary fix. */
5882+ LIST_FOR_EACH(list, dev) {
5883+
5884+ r_list = list_entry(list, drm_map_list_t, head);
5885+ map = r_list->map;
5886+ if (!map)
5887+ continue;
5888+ if (r_list->user_token == VM_OFFSET(vma))
5889+ break;
5890+ }
5891+
5892+ if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
5893+ return -EPERM;
5894+
5895+ /* Check for valid size. */
5896+ if (map->size != vma->vm_end - vma->vm_start) {
5897+ return -EINVAL;
5898+ }
5899+
5900+ if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
5901+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
5902+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
5903+ }
5904+
5905+ switch (map->type) {
5906+
5907+ case _DRM_AGP:
5908+ case _DRM_FRAME_BUFFER:
5909+ if (agp_use_pat()) {
5910+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_PWT | _PAGE_PCD);
5911+ pgprot_val(vma->vm_page_prot) |= _PAGE_PAT;
5912+ vma->vm_flags |= VM_IO; /* not in core dump */
5913+
5914+ offset = VM_OFFSET(vma) - agp_bridge->gart_bus_addr;
5915+ vma->vm_ops = &iegd_plb_vm_ops;
5916+ break;
5917+ }
5918+
5919+ /* Fallthrough */
5920+ case _DRM_REGISTERS:
5921+ if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
5922+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
5923+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
5924+ }
5925+ vma->vm_flags |= VM_IO; /* not in core dump */
5926+ offset = VM_OFFSET(vma) - agp_bridge->gart_bus_addr;
5927+
5928+ vma->vm_ops = &iegd_plb_vm_ops;
5929+ break;
5930+ case _DRM_SHM:
5931+ case _DRM_CONSISTENT:
5932+ case _DRM_SCATTER_GATHER:
5933+ DRM_DEBUG("Fall through to original mmap\n");
5934+ return drm_mmap(filp, vma);
5935+ break;
5936+ default:
5937+ return -EINVAL; /* This should never happen. */
5938+ }
5939+
5940+
5941+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
5942+
5943+ vma->vm_file = filp; /* Needed for drm_vm_open() */
5944+
5945+ vma_entry = ALLOC(sizeof(*vma_entry), DRM_MEM_VMAS);
5946+ if (vma_entry) {
5947+ /*
5948+ * FIXME: Temporary fix. Will figure out later
5949+ */
5950+ INSERT_VMA();
5951+ }
5952+
5953+#endif
5954+ return 0;
5955+}
5956+
5957+int psb_init(intel_device_private_t *priv)
5958+{
5959+ DRM_INIT_WAITQUEUE(&priv->event_queue);
5960+ spin_lock_init(&priv->irqmask_lock);
5961+ priv->event_present = 0;
5962+ priv->out_vdc = 0;
5963+ priv->out_sgx = 0;
5964+ priv->out_sgx2 = 0;
5965+ priv->out_mtx = 0;
5966+
5967+ return 0;
5968+}
5969+
5970+int intel_drm_plb_interrupts( drm_device_t *dev, void *data )
5971+{
5972+ intel_device_private_t *priv;
5973+ interrupt_info_t plb_info;
5974+ unsigned long irqflags;
5975+ int ret = 0;
5976+ int rv;
5977+ priv=(intel_device_private_t *)dev->dev_private;
5978+
5979+ if(copy_from_user(&plb_info, (void __user *) data, sizeof(plb_info))) {
5980+ return -EFAULT;
5981+ }
5982+
5983+ /* USW15 definition of in and out
5984+ *
5985+ * in/out[0] VDC
5986+ * in/out[1] sgx
5987+ * in/out[2] sgx2
5988+ * in/out[3] msvdx
5989+ */
5990+
5991+ plb_info.out[0]=0;
5992+ plb_info.out[1]=0;
5993+ plb_info.out[2]=0;
5994+ plb_info.out[3]=0;
5995+
5996+ switch (plb_info.req_type) {
5997+ case CLEAR_INT:
5998+
5999+ plb_info.in[0] &= priv->vdc_irq_mask;
6000+ plb_info.in[1] &= priv->sgx_irq_mask;
6001+ plb_info.in[2] &= priv->sgx_irq_mask2;
6002+ plb_info.in[3] &= priv->msvdx_irq_mask;
6003+
6004+ if (plb_info.in[0] || plb_info.in[1] ||
6005+ plb_info.in[2] || plb_info.in[3]) {
6006+
6007+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6008+ priv->out_vdc &= ~plb_info.in[0];
6009+ plb_info.out[0] = priv->out_vdc;
6010+
6011+ priv->out_sgx &= ~plb_info.in[1];
6012+ plb_info.out[1] = priv->out_sgx;
6013+
6014+ priv->out_sgx2 &= ~plb_info.in[2];
6015+ plb_info.out[2] = priv->out_sgx2;
6016+
6017+ priv->out_mtx &= ~plb_info.in[3];
6018+ plb_info.out[3] = priv->out_mtx;
6019+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6020+
6021+ plb_info.req_status = INT_CLEARED;
6022+
6023+ } else {
6024+ plb_info.req_status = INT_NOOP;
6025+ }
6026+
6027+ break;
6028+
6029+ case READ_INT:
6030+
6031+
6032+ plb_info.out[0] = priv->out_vdc;
6033+ plb_info.out[1] = priv->out_sgx;
6034+ plb_info.out[2] = priv->out_sgx2;
6035+ plb_info.out[3] = priv->out_mtx;
6036+ plb_info.req_status = INT_READ;
6037+
6038+ break;
6039+
6040+ case WAIT_INT:
6041+
6042+ plb_info.in[0] &= priv->vdc_irq_mask;
6043+ plb_info.in[1] &= priv->sgx_irq_mask;
6044+ plb_info.in[2] &= priv->sgx_irq_mask2;
6045+ plb_info.in[3] &= priv->msvdx_irq_mask;
6046+
6047+ if (plb_info.in[0] || plb_info.in[1] ||
6048+ plb_info.in[2] || plb_info.in[3]) {
6049+
6050+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6051+
6052+ /* none of the interrupts have ocurred */
6053+ if ((priv->out_vdc & plb_info.in[0]) ||
6054+ (priv->out_sgx & plb_info.in[1]) ||
6055+ (priv->out_sgx2 & plb_info.in[2]) ||
6056+ (priv->out_mtx & plb_info.in[3])) {
6057+
6058+ /* At least one of the interrupts has already occurred */
6059+ plb_info.req_status = INT_STORED;
6060+
6061+ } else {
6062+
6063+ /* Wait for an interrupt to occur */
6064+ priv->event_present = 0;
6065+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6066+
6067+ DRM_WAIT_ON(ret, priv->event_queue, 20 * DRM_HZ,
6068+ priv->event_present);
6069+
6070+ if (ret) {
6071+ plb_info.req_status = INT_TIMEOUT;
6072+ break;
6073+ }
6074+
6075+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6076+
6077+ plb_info.req_status = INT_HANDLED;
6078+
6079+ }
6080+
6081+ plb_info.out[0] = priv->out_vdc;
6082+ plb_info.out[1] = priv->out_sgx;
6083+ plb_info.out[2] = priv->out_sgx2;
6084+ plb_info.out[3] = priv->out_mtx;
6085+
6086+ /* Clear the outstanding interrupts that have just been
6087+ * retrieved
6088+ */
6089+ priv->out_vdc &= ~(plb_info.out[0] & plb_info.in[0]);
6090+ priv->out_sgx &= ~(plb_info.out[1] & plb_info.in[1]) ;
6091+ priv->out_sgx2 &= ~(plb_info.out[2] & plb_info.in[2]);
6092+ priv->out_mtx &= ~(plb_info.out[3] & plb_info.in[3]);
6093+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6094+
6095+ } else {
6096+
6097+ /* Unsupported interrupt */
6098+ plb_info.req_status = INT_NOOP;
6099+
6100+ }
6101+
6102+ break;
6103+
6104+ case UNMASK_INT:
6105+
6106+ if (!dev->irq_enabled) {
6107+ rv = drm_irq_install(dev);
6108+ if (rv != 0) {
6109+ DRM_ERROR("%s: could not install IRQs: rv = %d\n", __FUNCTION__, rv);
6110+ return rv;
6111+ }
6112+ }
6113+
6114+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6115+ PSB_WVDC32(0x00000000, IMR);
6116+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6117+
6118+ break;
6119+
6120+ case MASK_INT:
6121+
6122+ if (dev->irq_enabled) {
6123+ rv = drm_irq_uninstall(dev);
6124+ if (rv != 0) {
6125+ DRM_ERROR("%s: could not uninstall IRQs: rv = %d\n", __FUNCTION__, rv);
6126+ return rv;
6127+ }
6128+ }
6129+
6130+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6131+ PSB_WVDC32(0xFFFFFFFF, IMR);
6132+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6133+
6134+ break;
6135+
6136+ default:
6137+
6138+ plb_info.req_status = INT_INVALID;
6139+
6140+ }
6141+
6142+
6143+ if (copy_to_user((void __user *) data, &plb_info, sizeof(plb_info))) {
6144+ return -EFAULT;
6145+ }
6146+
6147+ return 0;
6148+}
6149diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c
6150--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c 1969-12-31 17:00:00.000000000 -0700
6151+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c 2009-10-06 10:30:05.000000000 -0700
6152@@ -0,0 +1,250 @@
6153+/* -*- pse-c -*-
6154+ *----------------------------------------------------------------------------
6155+ * Filename: iegd_interface_2611.c
6156+ * $Revision: 1.6 $
6157+ *----------------------------------------------------------------------------
6158+ * Gart and DRM driver for Intel Embedded Graphics Driver
6159+ * Copyright © 2008, Intel Corporation.
6160+ *
6161+ * This program is free software; you can redistribute it and/or modify it
6162+ * under the terms and conditions of the GNU General Public License,
6163+ * version 2, as published by the Free Software Foundation.
6164+ *
6165+ * This program is distributed in the hope it will be useful, but WITHOUT
6166+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6167+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6168+ * more details.
6169+ *
6170+ * You should have received a copy of the GNU General Public License along with
6171+ * this program; if not, write to the Free Software Foundation, Inc.,
6172+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6173+ *
6174+ */
6175+
6176+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6177+ *
6178+ * Redistribution and use in source and binary forms, with or without
6179+ * modification, are permitted provided that the following conditions are met:
6180+ * Redistributions of source code must retain the above copyright notice,
6181+ * this list of conditions and the following disclaimer.
6182+ *
6183+ * Redistributions in binary form must reproduce the above copyright
6184+ * notice, this list of conditions and the following disclaimer in the
6185+ * documentation and/or other materials provided with the distribution.
6186+ *
6187+ * Neither the name Intel Corporation nor the names of its contributors
6188+ * may be used to endorse or promote products derived from this software
6189+ * without specific prior written permission.
6190+ *
6191+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6192+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6193+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6194+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6195+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6196+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6197+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6198+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6199+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6200+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6201+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6202+ *
6203+ */
6204+
6205+#include "iegd.h"
6206+#include "drmP.h"
6207+#include "drm.h"
6208+
6209+#include "iegd_drm.h"
6210+#include "iegd_drv.h"
6211+
6212+#if KERNEL2611
6213+int intel_postinit_2611(struct drm_device *dev,unsigned long flags){
6214+
6215+ intel_device_private_t *priv;
6216+ priv=(intel_device_private_t *)dev->dev_private;
6217+
6218+ intel_postinit(&priv);
6219+ dev->dev_private=priv;
6220+
6221+ return 0;
6222+
6223+}
6224+
6225+void intel_prerelease_2611(drm_device_t *dev,DRMFILE filp){
6226+
6227+ intel_prerelease(dev);
6228+
6229+}
6230+
6231+int intel_getpages_2611( struct inode *inode, struct file *filp,
6232+ unsigned int cmd, unsigned long arg ){
6233+
6234+ drm_file_t *priv=filp->private_data;
6235+ drm_device_t *dev=priv->head->dev;
6236+ return intel_getpages(dev,filp,arg);
6237+
6238+}
6239+
6240+int intel_freepages_2611( struct inode *inode, struct file *filp,
6241+ unsigned int cmd, unsigned long arg ){
6242+
6243+ drm_file_t *priv=filp->private_data;
6244+ drm_device_t *dev=priv->head->dev;
6245+ return intel_freepages(dev,arg);
6246+}
6247+
6248+int intel_drm_info_init_2611( struct inode *inode, struct file *filp,
6249+ unsigned int cmd, unsigned long arg ){
6250+
6251+ drm_file_t *priv=filp->private_data;
6252+ drm_device_t *dev=priv->head->dev;
6253+ return intel_drm_info_init(dev,arg);
6254+
6255+}
6256+
6257+int intel_drm_info_get_2611( struct inode *inode, struct file *filp,
6258+ unsigned int cmd, unsigned long arg ){
6259+
6260+ drm_file_t *priv=filp->private_data;
6261+ drm_device_t *dev=priv->head->dev;
6262+ return intel_drm_info_get(dev,arg);
6263+
6264+}
6265+
6266+/* Following 2 functions were taken from drm_memory.c
6267+ * For some reason they are not being exported to use
6268+ * by the other drm.
6269+ */
6270+
6271+/**
6272+ * Allocate pages.
6273+ *
6274+ * \param order size order.
6275+ * \param area memory area. (Not used.)
6276+ * \return page address on success, or zero on failure.
6277+ *
6278+ * Allocate and reserve free pages.
6279+ */
6280+unsigned long intel_alloc_pages(int order, int area)
6281+{
6282+ unsigned long address;
6283+ unsigned long bytes = PAGE_SIZE << order;
6284+ unsigned long addr;
6285+ unsigned int sz;
6286+
6287+ address = __get_free_pages(GFP_KERNEL, order);
6288+ if (!address)
6289+ return 0;
6290+
6291+ /* Zero */
6292+ memset((void *)address, 0, bytes);
6293+
6294+ /* Reserve */
6295+ for (addr = address, sz = bytes;
6296+ sz > 0;
6297+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6298+ SetPageReserved(virt_to_page(addr));
6299+ }
6300+
6301+ return address;
6302+}
6303+
6304+/**
6305+ * Free pages.
6306+ *
6307+ * \param address address of the pages to free.
6308+ * \param order size order.
6309+ * \param area memory area. (Not used.)
6310+ *
6311+ * Unreserve and free pages allocated by alloc_pages().
6312+ */
6313+void intel_free_pages(unsigned long address, int order, int area)
6314+{
6315+ unsigned long bytes = PAGE_SIZE << order;
6316+ unsigned long addr;
6317+ unsigned int sz;
6318+
6319+ if (!address)
6320+ return;
6321+
6322+ /* Unreserve */
6323+ for (addr = address, sz = bytes;
6324+ sz > 0;
6325+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6326+ ClearPageReserved(virt_to_page(addr));
6327+ }
6328+
6329+ free_pages(address, order);
6330+}
6331+
6332+drm_ioctl_desc_t intel_ioctls[]={
6333+ [DRM_IOCTL_NR(DRM_INTEL_GETPAGES)] = { intel_getpages_2611, 0,0 },
6334+ [DRM_IOCTL_NR(DRM_INTEL_FREEPAGES)] = { intel_freepages_2611, 0,0 },
6335+ [DRM_IOCTL_NR(DRM_INTEL_INFO_INIT)] = { intel_drm_info_init_2611, 0,0 },
6336+ [DRM_IOCTL_NR(DRM_INTEL_INFO_GET)] = { intel_drm_info_get_2611, 0,0 }
6337+};
6338+
6339+int intel_max_ioctl = DRM_ARRAY_SIZE(intel_ioctls);
6340+
6341+
6342+static int version( drm_version_t *version )
6343+{
6344+ int len;
6345+
6346+ version->version_major = DRIVER_MAJOR;
6347+ version->version_minor = DRIVER_MINOR;
6348+ version->version_patchlevel = DRIVER_PATCHLEVEL;
6349+ DRM_COPY( version->name, DRIVER_NAME );
6350+ DRM_COPY( version->date, DRIVER_DATE );
6351+ DRM_COPY( version->desc, DRIVER_DESC );
6352+ return 0;
6353+}
6354+
6355+static struct pci_device_id pciidlist[] = {
6356+ INTEL_PCI_IDS
6357+};
6358+
6359+static struct drm_driver driver = {
6360+ .driver_features = DRIVER_USE_AGP|DRIVER_REQUIRE_AGP|DRIVER_USE_MTRR,
6361+ .prerelease = intel_prerelease_2611,
6362+ .postinit =intel_postinit_2611,
6363+ .reclaim_buffers=drm_core_reclaim_buffers,
6364+ .get_map_ofs=drm_core_get_map_ofs,
6365+ .get_reg_ofs=drm_core_get_reg_ofs,
6366+ .version = version,
6367+ .ioctls = intel_ioctls,
6368+ .fops = {
6369+ .owner = THIS_MODULE,
6370+ .open = drm_open,
6371+ .release = drm_release,
6372+ .ioctl = drm_ioctl,
6373+ .mmap = drm_mmap,
6374+ .poll = drm_poll,
6375+ .fasync = drm_fasync,
6376+ },
6377+ .pci_driver = {
6378+ .name = DRIVER_NAME,
6379+ .id_table = pciidlist,
6380+ }
6381+};
6382+
6383+int __init intel_init(void)
6384+{
6385+ driver.num_ioctls = intel_max_ioctl;
6386+ return drm_init(&driver);
6387+}
6388+
6389+void __exit intel_exit(void)
6390+{
6391+ drm_exit(&driver);
6392+}
6393+
6394+struct file_operations intel_buffer_fops = {
6395+ .open = drm_open,
6396+ .flush = drm_flush,
6397+ .release = drm_release,
6398+ .ioctl = drm_ioctl,
6399+ .mmap = intel_mmap_buffers,
6400+ .fasync = drm_fasync,
6401+};
6402+#endif
6403diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h
6404--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h 1969-12-31 17:00:00.000000000 -0700
6405+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h 2009-10-06 10:30:05.000000000 -0700
6406@@ -0,0 +1,71 @@
6407+/* -*- pse-c -*-
6408+ *----------------------------------------------------------------------------
6409+ * Filename: iegd_interface_2611.h
6410+ * $Revision: 1.6 $
6411+ *----------------------------------------------------------------------------
6412+ * Gart and DRM driver for Intel Embedded Graphics Driver
6413+ * Copyright © 2008, Intel Corporation.
6414+ *
6415+ * This program is free software; you can redistribute it and/or modify it
6416+ * under the terms and conditions of the GNU General Public License,
6417+ * version 2, as published by the Free Software Foundation.
6418+ *
6419+ * This program is distributed in the hope it will be useful, but WITHOUT
6420+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6421+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6422+ * more details.
6423+ *
6424+ * You should have received a copy of the GNU General Public License along with
6425+ * this program; if not, write to the Free Software Foundation, Inc.,
6426+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6427+ *
6428+ */
6429+
6430+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6431+ *
6432+ * Redistribution and use in source and binary forms, with or without
6433+ * modification, are permitted provided that the following conditions are met:
6434+ * Redistributions of source code must retain the above copyright notice,
6435+ * this list of conditions and the following disclaimer.
6436+ *
6437+ * Redistributions in binary form must reproduce the above copyright
6438+ * notice, this list of conditions and the following disclaimer in the
6439+ * documentation and/or other materials provided with the distribution.
6440+ *
6441+ * Neither the name Intel Corporation nor the names of its contributors
6442+ * may be used to endorse or promote products derived from this software
6443+ * without specific prior written permission.
6444+ *
6445+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6446+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6447+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6448+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6449+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6450+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6451+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6452+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6453+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6454+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6455+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6456+ *
6457+ */
6458+
6459+/* Macros are defined here such that only kernel specific functions can be
6460+ * used.
6461+ */
6462+#if KERNEL2611
6463+#define REMAP_PAGE(a,b,c,d,e) io_remap_pfn_range(a,b, \
6464+ c >>PAGE_SHIFT, \
6465+ d,e)
6466+
6467+#define ORDER(a) drm_order(a)
6468+#define ALLOC_PAGES(a,b) intel_alloc_pages(a,b)
6469+#define ALLOC(a,b) drm_alloc(a,b)
6470+#define FREE(a,b,c) drm_free(a,b,c)
6471+#define FREE_PAGES(a,b,c) intel_free_pages(a,b,c)
6472+
6473+#define LOCK_DRM(d) down(&d->struct_sem)
6474+#define UNLOCK_DRM(d) up(&d->struct_sem)
6475+#endif
6476+
6477+/* endif for KERNEL2611 */
6478diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c
6479--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c 1969-12-31 17:00:00.000000000 -0700
6480+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c 2009-10-06 10:30:05.000000000 -0700
6481@@ -0,0 +1,394 @@
6482+
6483+/* -*- pse-c -*-
6484+ *----------------------------------------------------------------------------
6485+ * Filename: iegd_interface_2615.c
6486+ * $Revision: 1.11 $
6487+ *----------------------------------------------------------------------------
6488+ * Gart and DRM driver for Intel Embedded Graphics Driver
6489+ * Copyright © 2008, Intel Corporation.
6490+ *
6491+ * This program is free software; you can redistribute it and/or modify it
6492+ * under the terms and conditions of the GNU General Public License,
6493+ * version 2, as published by the Free Software Foundation.
6494+ *
6495+ * This program is distributed in the hope it will be useful, but WITHOUT
6496+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6497+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6498+ * more details.
6499+ *
6500+ * You should have received a copy of the GNU General Public License along with
6501+ * this program; if not, write to the Free Software Foundation, Inc.,
6502+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6503+ *
6504+ */
6505+
6506+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6507+ *
6508+ * Redistribution and use in source and binary forms, with or without
6509+ * modification, are permitted provided that the following conditions are met:
6510+ * Redistributions of source code must retain the above copyright notice,
6511+ * this list of conditions and the following disclaimer.
6512+ *
6513+ * Redistributions in binary form must reproduce the above copyright
6514+ * notice, this list of conditions and the following disclaimer in the
6515+ * documentation and/or other materials provided with the distribution.
6516+ *
6517+ * Neither the name Intel Corporation nor the names of its contributors
6518+ * may be used to endorse or promote products derived from this software
6519+ * without specific prior written permission.
6520+ *
6521+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6522+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6523+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6524+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6525+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6526+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6527+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6528+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6529+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6530+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6531+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6532+ *
6533+ */
6534+
6535+#include "iegd.h"
6536+#include "drmP.h"
6537+#include "drm.h"
6538+
6539+#include "iegd_drm.h"
6540+
6541+#include "iegd_drv.h"
6542+#include "psb_intregs.h"
6543+#include "intelpci.h"
6544+#include <linux/i2c.h>
6545+
6546+int drm_irq_install(drm_device_t *dev);
6547+
6548+#if KERNEL2615
6549+int intel_firstopen_2615(struct drm_device *dev)
6550+{
6551+
6552+ intel_device_private_t *priv;
6553+ priv=(intel_device_private_t *)dev->dev_private;
6554+
6555+ intel_postinit(&priv);
6556+ dev->dev_private=priv;
6557+
6558+ return 0;
6559+
6560+}
6561+
6562+
6563+int intel_psb_firstopen_2615(struct drm_device *dev)
6564+{
6565+
6566+ unsigned long resource_start;
6567+ intel_device_private_t *priv;
6568+ priv=(intel_device_private_t *)dev->dev_private;
6569+
6570+ intel_postinit(&priv);
6571+ psb_init(priv);
6572+ dev->dev_private=priv;
6573+
6574+
6575+ /*
6576+ * Map MMIO addresses so that the DRM can control interrupt support
6577+ */
6578+
6579+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
6580+
6581+ priv->vdc_reg = ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
6582+
6583+ if (!priv->vdc_reg) {
6584+ /* Normally we'd want to unload the driver on failure. But due
6585+ * to circular dependancies, we can only return failure.
6586+ */
6587+ /* psb_driver_unload(dev); */
6588+ return 1;
6589+ }
6590+
6591+ priv->sgx_reg = ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
6592+ if (!priv->sgx_reg) {
6593+ /* Normally we'd want to unload the driver on failure. But due
6594+ * to circular dependancies, we can only return failure.
6595+ */
6596+ /* psb_driver_unload(dev); */
6597+ return 1;
6598+ }
6599+
6600+ priv->msvdx_reg = ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
6601+ if (!priv->msvdx_reg) {
6602+ /* Normally we'd want to unload the driver on failure. But due
6603+ * to circular dependancies, we can only return failure.
6604+ */
6605+ /* psb_driver_unload(dev); */
6606+ return 1;
6607+ }
6608+
6609+ return 0;
6610+
6611+}
6612+
6613+void intel_preclose_2615(drm_device_t *dev,DRMFILE filp)
6614+{
6615+ intel_prerelease(dev);
6616+}
6617+
6618+
6619+int intel_getpages_2615( struct inode *inode, struct file *filp,
6620+ unsigned int cmd, unsigned long arg)
6621+{
6622+ drm_file_t *priv=filp->private_data;
6623+ drm_device_t *dev=priv->head->dev;
6624+ return intel_getpages(dev,filp,arg);
6625+}
6626+
6627+
6628+int intel_freepages_2615( struct inode *inode, struct file *filp,
6629+ unsigned int cmd, unsigned long arg )
6630+{
6631+
6632+ drm_file_t *priv=filp->private_data;
6633+ drm_device_t *dev=priv->head->dev;
6634+ return intel_freepages(dev,arg);
6635+}
6636+
6637+
6638+int intel_drm_info_init_2615( struct inode *inode, struct file *filp,
6639+ unsigned int cmd, unsigned long arg )
6640+{
6641+
6642+ drm_file_t *priv=filp->private_data;
6643+ drm_device_t *dev=priv->head->dev;
6644+ return intel_drm_info_init(dev,arg);
6645+
6646+}
6647+
6648+
6649+int intel_drm_info_get_2615( struct inode *inode, struct file *filp,
6650+ unsigned int cmd, unsigned long arg )
6651+{
6652+
6653+ drm_file_t *priv=filp->private_data;
6654+ drm_device_t *dev=priv->head->dev;
6655+ return intel_drm_info_get(dev,arg);
6656+
6657+}
6658+
6659+
6660+/* Following 2 functions were taken from drm_memory.c
6661+ * For some reason they are not being exported to use
6662+ * by the other drm.
6663+ */
6664+
6665+/**
6666+ * Allocate pages.
6667+ *
6668+ * \param order size order.
6669+ * \param area memory area. (Not used.)
6670+ * \return page address on success, or zero on failure.
6671+ *
6672+ * Allocate and reserve free pages.
6673+ */
6674+unsigned long intel_alloc_pages(int order, int area)
6675+{
6676+ unsigned long address;
6677+ unsigned long bytes = PAGE_SIZE << order;
6678+ unsigned long addr;
6679+ unsigned int sz;
6680+
6681+ address = __get_free_pages(GFP_KERNEL, order);
6682+ if (!address)
6683+ return 0;
6684+
6685+ /* Zero */
6686+ memset((void *)address, 0, bytes);
6687+
6688+ /* Reserve */
6689+ for (addr = address, sz = bytes;
6690+ sz > 0;
6691+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6692+ SetPageReserved(virt_to_page(addr));
6693+ }
6694+
6695+ return address;
6696+}
6697+
6698+
6699+/**
6700+ * Free pages.
6701+ *
6702+ * \param address address of the pages to free.
6703+ * \param order size order.
6704+ * \param area memory area. (Not used.)
6705+ *
6706+ * Unreserve and free pages allocated by alloc_pages().
6707+ */
6708+void intel_free_pages(unsigned long address, int order, int area)
6709+{
6710+ unsigned long bytes = PAGE_SIZE << order;
6711+ unsigned long addr;
6712+ unsigned int sz;
6713+
6714+ if (!address) {
6715+ return;
6716+ }
6717+
6718+ /* Unreserve */
6719+ for (addr = address, sz = bytes;
6720+ sz > 0;
6721+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6722+ ClearPageReserved(virt_to_page(addr));
6723+ }
6724+
6725+ free_pages(address, order);
6726+}
6727+
6728+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
6729+{
6730+ intel_device_private_t *priv;
6731+
6732+ priv=(intel_device_private_t *)dev->dev_private;
6733+
6734+ return 0;
6735+}
6736+
6737+int intel_drm_plb_interrupts_2615 ( struct inode *inode,
6738+ struct file *filp,
6739+ unsigned int cmd, void *arg )
6740+{
6741+ drm_file_t *priv=filp->private_data;
6742+ drm_device_t *dev=priv->head->dev;
6743+
6744+ return intel_drm_plb_interrupts( dev, arg );
6745+}
6746+
6747+drm_ioctl_desc_t intel_ioctls[] = {
6748+ [DRM_IOCTL_NR(DRM_INTEL_GETPAGES)] = { intel_getpages_2615, 0},
6749+ [DRM_IOCTL_NR(DRM_INTEL_FREEPAGES)] = { intel_freepages_2615, 0},
6750+ [DRM_IOCTL_NR(DRM_INTEL_INFO_INIT)] = { intel_drm_info_init_2615, 0},
6751+ [DRM_IOCTL_NR(DRM_INTEL_INFO_GET)] = { intel_drm_info_get_2615, 0},
6752+ [DRM_IOCTL_NR(DRM_INTEL_INTERRUPT)] = {intel_drm_plb_interrupts_2615,0}
6753+};
6754+
6755+int intel_max_ioctl = DRM_ARRAY_SIZE(intel_ioctls);
6756+
6757+
6758+
6759+static struct pci_device_id pciidlist[] = {
6760+ INTEL_PCI_IDS
6761+};
6762+
6763+int device_is_agp_2615(drm_device_t * dev)
6764+{
6765+ return 1;
6766+}
6767+
6768+
6769+static struct drm_driver driver = {
6770+ .firstopen = intel_firstopen_2615,
6771+ .preclose = intel_preclose_2615,
6772+ .reclaim_buffers=drm_core_reclaim_buffers,
6773+ .get_map_ofs=drm_core_get_map_ofs,
6774+ .get_reg_ofs=drm_core_get_reg_ofs,
6775+
6776+ .device_is_agp = device_is_agp_2615,
6777+
6778+ .major = DRIVER_MAJOR,
6779+ .minor = DRIVER_MINOR,
6780+ .patchlevel = DRIVER_PATCHLEVEL,
6781+ .name = DRIVER_NAME,
6782+ .desc = DRIVER_DESC,
6783+ .date = DRIVER_DATE,
6784+
6785+ .driver_features = DRIVER_USE_AGP|DRIVER_REQUIRE_AGP|DRIVER_USE_MTRR,
6786+ .ioctls = intel_ioctls,
6787+ .fops = {
6788+ .owner = THIS_MODULE,
6789+ .open = drm_open,
6790+ .release = drm_release,
6791+ .ioctl = drm_ioctl,
6792+ .mmap = drm_mmap,
6793+ .poll = drm_poll,
6794+ .fasync = drm_fasync,
6795+ },
6796+ .pci_driver = {
6797+ .name = DRIVER_NAME,
6798+ .id_table = pciidlist,
6799+ }
6800+};
6801+
6802+static struct drm_driver driver_plb = {
6803+ .load = psb_driver_load,
6804+ .firstopen = intel_psb_firstopen_2615,
6805+ .preclose = intel_preclose_2615,
6806+ .reclaim_buffers=drm_core_reclaim_buffers,
6807+ .get_map_ofs=drm_core_get_map_ofs,
6808+ .get_reg_ofs=drm_core_get_reg_ofs,
6809+
6810+ .device_is_agp = device_is_agp_2615,
6811+
6812+ .major = DRIVER_MAJOR,
6813+ .minor = DRIVER_MINOR,
6814+ .patchlevel = DRIVER_PATCHLEVEL,
6815+ .name = DRIVER_NAME,
6816+ .desc = DRIVER_DESC,
6817+ .date = DRIVER_DATE,
6818+
6819+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
6820+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
6821+ .ioctls = intel_ioctls,
6822+ .irq_preinstall = psb_irq_preinstall,
6823+ .irq_postinstall = psb_irq_postinstall,
6824+ .irq_uninstall = psb_irq_uninstall,
6825+ .irq_handler = psb_irq_handler,
6826+
6827+ .fops = {
6828+ .owner = THIS_MODULE,
6829+ .open = drm_open,
6830+ .release = drm_release,
6831+ .ioctl = drm_ioctl,
6832+ .mmap = drm_plb_mmap,
6833+ .poll = drm_poll,
6834+ .fasync = drm_fasync,
6835+ },
6836+ .pci_driver = {
6837+ .name = DRIVER_NAME,
6838+ .id_table = pciidlist,
6839+ }
6840+};
6841+
6842+
6843+int intel_init(void)
6844+{
6845+ driver.num_ioctls = intel_max_ioctl;
6846+ driver_plb.num_ioctls = intel_max_ioctl;
6847+
6848+ /* We are peeking into the global AGP structures that
6849+ * we have access to in order to determine what chipset we're
6850+ * on. This isn't necessarily a good thing to do.
6851+ */
6852+
6853+ if (gart_id->device_id == PCI_DEVICE_ID_PLB) {
6854+ printk(KERN_ERR "Initializing DRM for Intel US15 SCH\n");
6855+ return drm_init(&driver_plb);
6856+ } else {
6857+ return drm_init(&driver);
6858+ }
6859+
6860+}
6861+
6862+void intel_exit(void)
6863+{
6864+ drm_exit(&driver);
6865+}
6866+
6867+struct file_operations intel_buffer_fops = {
6868+ .open = drm_open,
6869+ .release = drm_release,
6870+ .ioctl = drm_ioctl,
6871+ .mmap = intel_mmap_buffers,
6872+ .poll = drm_poll,
6873+ .fasync = drm_fasync,
6874+};
6875+#endif
6876diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h
6877--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h 1969-12-31 17:00:00.000000000 -0700
6878+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h 2009-10-06 10:30:05.000000000 -0700
6879@@ -0,0 +1,72 @@
6880+
6881+/* -*- pse-c -*-
6882+ *----------------------------------------------------------------------------
6883+ * Filename: iegd_interface_2615.h
6884+ * $Revision: 1.6 $
6885+ *----------------------------------------------------------------------------
6886+ * Gart and DRM driver for Intel Embedded Graphics Driver
6887+ * Copyright © 2008, Intel Corporation.
6888+ *
6889+ * This program is free software; you can redistribute it and/or modify it
6890+ * under the terms and conditions of the GNU General Public License,
6891+ * version 2, as published by the Free Software Foundation.
6892+ *
6893+ * This program is distributed in the hope it will be useful, but WITHOUT
6894+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6895+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6896+ * more details.
6897+ *
6898+ * You should have received a copy of the GNU General Public License along with
6899+ * this program; if not, write to the Free Software Foundation, Inc.,
6900+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6901+ *
6902+ */
6903+
6904+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6905+ *
6906+ * Redistribution and use in source and binary forms, with or without
6907+ * modification, are permitted provided that the following conditions are met:
6908+ * Redistributions of source code must retain the above copyright notice,
6909+ * this list of conditions and the following disclaimer.
6910+ *
6911+ * Redistributions in binary form must reproduce the above copyright
6912+ * notice, this list of conditions and the following disclaimer in the
6913+ * documentation and/or other materials provided with the distribution.
6914+ *
6915+ * Neither the name Intel Corporation nor the names of its contributors
6916+ * may be used to endorse or promote products derived from this software
6917+ * without specific prior written permission.
6918+ *
6919+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6920+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6921+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6922+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6923+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6924+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6925+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6926+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6927+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6928+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6929+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6930+ *
6931+ */
6932+
6933+/* Macros are defined here such that only kernel specific functions can be
6934+ * used.
6935+ */
6936+#if KERNEL2615
6937+#define REMAP_PAGE(a,b,c,d,e) io_remap_pfn_range(a,b, \
6938+ c >>PAGE_SHIFT, \
6939+ d,e)
6940+
6941+#define ORDER(a) drm_order(a)
6942+#define ALLOC_PAGES(a,b) intel_alloc_pages(a,b)
6943+#define ALLOC(a,b) drm_alloc(a,b)
6944+#define FREE(a,b,c) drm_free(a,b,c)
6945+#define FREE_PAGES(a,b,c) intel_free_pages(a,b,c)
6946+
6947+#define LOCK_DRM(d) mutex_lock(&d->struct_mutex)
6948+#define UNLOCK_DRM(d) mutex_unlock(&d->struct_mutex)
6949+#endif
6950+
6951+/* endif for KERNEL2615 */
6952diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c
6953--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c 1969-12-31 17:00:00.000000000 -0700
6954+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c 2009-10-06 10:30:05.000000000 -0700
6955@@ -0,0 +1,820 @@
6956+/* -*- pse-c -*-
6957+ *----------------------------------------------------------------------------
6958+ * Filename: iegd_interface_2611.c
6959+ * $Revision: 1.8 $
6960+ *----------------------------------------------------------------------------
6961+ * Gart and DRM driver for Intel Embedded Graphics Driver
6962+ * Copyright © 2008, Intel Corporation.
6963+ *
6964+ * This program is free software; you can redistribute it and/or modify it
6965+ * under the terms and conditions of the GNU General Public License,
6966+ * version 2, as published by the Free Software Foundation.
6967+ *
6968+ * This program is distributed in the hope it will be useful, but WITHOUT
6969+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6970+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6971+ * more details.
6972+ *
6973+ * You should have received a copy of the GNU General Public License along with
6974+ * this program; if not, write to the Free Software Foundation, Inc.,
6975+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6976+ *
6977+ */
6978+
6979+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6980+ *
6981+ * Redistribution and use in source and binary forms, with or without
6982+ * modification, are permitted provided that the following conditions are met:
6983+ * Redistributions of source code must retain the above copyright notice,
6984+ * this list of conditions and the following disclaimer.
6985+ *
6986+ * Redistributions in binary form must reproduce the above copyright
6987+ * notice, this list of conditions and the following disclaimer in the
6988+ * documentation and/or other materials provided with the distribution.
6989+ *
6990+ * Neither the name Intel Corporation nor the names of its contributors
6991+ * may be used to endorse or promote products derived from this software
6992+ * without specific prior written permission.
6993+ *
6994+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6995+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6996+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6997+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6998+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6999+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7000+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
7001+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
7002+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
7003+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
7004+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7005+ *
7006+ */
7007+
7008+#include "iegd.h"
7009+#include "drmP.h"
7010+#include "drm.h"
7011+
7012+#include "iegd_drm.h"
7013+#include "iegd_drv.h"
7014+#include "psb_intregs.h"
7015+#include "intelpci.h"
7016+
7017+int drm_irq_install(drm_device_t *dev);
7018+
7019+#if KERNEL2624
7020+
7021+/* get intel_buffer_fops from the interface_###.c files */
7022+extern struct file_operations intel_buffer_fops;
7023+
7024+/* Global variable to keep track the amount of memory we are using */
7025+static int memory = 0;
7026+
7027+int intel_firstopen_2624(struct drm_device *dev)
7028+{
7029+ intel_device_private_t *priv;
7030+ priv=(intel_device_private_t *)dev->dev_private;
7031+
7032+ intel_postinit(&priv);
7033+ dev->dev_private=priv;
7034+
7035+ return 0;
7036+
7037+}
7038+
7039+int intel_plb_firstopen_2624(struct drm_device *dev)
7040+{
7041+
7042+ unsigned long resource_start;
7043+ intel_device_private_t *priv;
7044+ priv=(intel_device_private_t *)dev->dev_private;
7045+
7046+ intel_postinit(&priv);
7047+ psb_init(priv);
7048+ dev->dev_private=priv;
7049+
7050+ /*
7051+ * Map MMIO addresses so that the DRM can control interrupt support
7052+ */
7053+
7054+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
7055+ priv->vdc_reg = ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
7056+ if (!priv->vdc_reg) {
7057+ /*
7058+ * Normally we'd want to unload the driver on failure. But due
7059+ * to circular dependancies, we can only return failure.
7060+ */
7061+ /* psb_driver_unload(dev); */
7062+ return 1;
7063+ }
7064+
7065+ priv->sgx_reg = ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
7066+ if (!priv->sgx_reg) {
7067+ /*
7068+ * Normally we'd want to unload the driver on failure. But due
7069+ * to circular dependancies, we can only return failure.
7070+ */
7071+ /* psb_driver_unload(dev); */
7072+ return 1;
7073+ }
7074+
7075+ priv->msvdx_reg = ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
7076+ if (!priv->msvdx_reg) {
7077+ /*
7078+ * Normally we'd want to unload the driver on failure. But due
7079+ * to circular dependancies, we can only return failure.
7080+ */
7081+ /* psb_driver_unload(dev); */
7082+ return 1;
7083+ }
7084+
7085+ return 0;
7086+
7087+}
7088+
7089+void intel_preclose_2624(drm_device_t *dev, struct drm_file *filp)
7090+{
7091+ intel_prerelease(dev);
7092+}
7093+
7094+/*
7095+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7096+ * that gets pages of memory from the DRM and returns them to the caller.
7097+ */
7098+int intel_getpages_2624(struct drm_device *dev,
7099+ void *data,
7100+ struct drm_file *filepriv)
7101+{
7102+ drm_intel_getpages_t *getpages;
7103+ unsigned long bytes;
7104+ int order;
7105+ int size;
7106+
7107+ unsigned long address;
7108+ unsigned long phy_address;
7109+ unsigned long offset;
7110+
7111+ struct page *pg;
7112+
7113+ unsigned long virtual;
7114+ struct file_operations *old_fops;
7115+
7116+ intel_device_private_t *dev_ptr = dev->dev_private;
7117+ drm_intel_listpages_t *page;
7118+ drm_intel_list_t *list;
7119+
7120+ DRM_DEBUG("\n");
7121+ DRM_INFO("in intel_getpages_2624, calling intel_getpages\n");
7122+ getpages = (drm_intel_getpages_t *)data;
7123+
7124+ bytes = getpages->size;
7125+
7126+ /*
7127+ * Check to see if this allocation would exceed 16MEG in total memory
7128+ * This is to prevent denial of service attack. 16Meg should be enough.
7129+ */
7130+ if((memory + bytes) > MB(16) ){
7131+ /* We exceeded 16MEG. Bail out */
7132+ DRM_ERROR("Total memory allocated exceeded 16Meg!\n");
7133+ DRM_INFO("Total memory allocated exceeded 16Meg!\n");
7134+ return -EFAULT;
7135+ }
7136+
7137+ /* number of pages that are needed */
7138+ size = bytes>>PAGE_SHIFT;
7139+ if(bytes & ~(PAGE_SIZE*size)){
7140+ ++size;
7141+ }
7142+ order = ORDER(size);
7143+ DRM_DEBUG("Allocating bytes:%#lx,size:%d,order:%d\n",
7144+ (unsigned long)bytes,size,order);
7145+
7146+ /*
7147+ * Allocate the pages.
7148+ * returns kernel logical address.
7149+ * Is this the same as the kernel virtual address??
7150+ */
7151+ address = ALLOC_PAGES(order,0);
7152+ if(!address){
7153+ DRM_ERROR("Can't get pages\n");
7154+ DRM_INFO("Can't get pages\n");
7155+ return -EFAULT;
7156+ }
7157+ phy_address = __pa(address);
7158+
7159+ /* Find virtual address of the phys address */
7160+ pg = virt_to_page((void *)address);
7161+ offset = pg->index;
7162+
7163+ /* Find the number of bytes that is actually allocated */
7164+ size = PAGE_SIZE<<order;
7165+ DRM_DEBUG("Allocated address:%#lx,page offset:%#lx,phy_address:%#lx\n",
7166+ address,offset,phy_address);
7167+
7168+ /*do_mmap on the logical address and return virtual address */
7169+ down_write(&current->mm->mmap_sem);
7170+
7171+ old_fops = (struct file_operations *) (filepriv->filp->f_op);
7172+ filepriv->filp->f_op = &intel_buffer_fops;
7173+
7174+ virtual = do_mmap(filepriv->filp, 0, size,
7175+ PROT_READ|PROT_WRITE,MAP_SHARED, phy_address);
7176+ filepriv->filp->f_op = old_fops;
7177+
7178+ up_write(&current->mm->mmap_sem);
7179+ DRM_DEBUG("Mmaped virtual:%#lx,address:%#lx\n",virtual,
7180+ (unsigned long)__va(phy_address));
7181+
7182+ if(virtual > -1024UL){
7183+ DRM_ERROR("mmap failed:%d\n",(int)virtual);
7184+ DRM_INFO("mmap failed:%d\n",(int)virtual);
7185+ return -EFAULT;
7186+ }
7187+
7188+ getpages->phy_address = phy_address;
7189+ getpages->virt_address = virtual;
7190+ getpages->size = size;
7191+ getpages->offset = offset;
7192+
7193+ DRM_DEBUG("Mmap success requested size:%d (%d)\n",
7194+ getpages->size,(int)bytes);
7195+
7196+ /* alloc the page to be put into the linked list */
7197+ page = ALLOC(sizeof(*page),DRM_MEM_DRIVER);
7198+ if(!page){
7199+ DRM_DEBUG("Can't alloc list for page\n");
7200+ DRM_INFO("Can't alloc list for page\n");
7201+ return -ENOMEM;
7202+ }
7203+
7204+ /*page->pid=current->pid;*/
7205+ page->pid = current->group_leader->pid;
7206+ page->size = size;
7207+ page->phy_address = phy_address;
7208+ page->virt_address = virtual;
7209+ page->offset = offset;
7210+
7211+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
7212+ ,current->parent->pid,current->pid,current->group_leader->pid);
7213+
7214+ /* Alloc the list to be added then add it to the linked list */
7215+ list = ALLOC(sizeof(*list),DRM_MEM_DRIVER);
7216+ if(!list){
7217+ DRM_DEBUG("Can't alloc list for page\n");
7218+ DRM_INFO("Can't alloc list for page\n");
7219+ FREE(page,sizeof(*page),0);
7220+ return -ENOMEM;
7221+ }
7222+ memset(list,0,sizeof(*list));
7223+ list->page = page;
7224+ LOCK_DRM(dev);
7225+ list_add(&list->head,&dev_ptr->pagelist->head);
7226+ UNLOCK_DRM(dev);
7227+
7228+ /* update the total amount of memory we use */
7229+ memory += size;
7230+ DRM_DEBUG("memory has:%d bytes\n",memory);
7231+
7232+ DRM_INFO("intel_getpages Exit\n");
7233+ return 0;
7234+}
7235+
7236+
7237+/*
7238+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7239+ * that frees pages of memory that were previouslly allocated from the DRM.
7240+ */
7241+int intel_freepages_2624(struct drm_device *dev,
7242+ void *data,
7243+ struct drm_file *filepriv)
7244+{
7245+ drm_intel_freepages_t *freepages;
7246+ unsigned long bytes;
7247+ int order;
7248+ int size;
7249+
7250+ intel_device_private_t *dev_ptr=dev->dev_private;
7251+ drm_intel_listpages_t *page;
7252+ drm_intel_list_t *r_list=NULL;
7253+ struct list_head *pagelist;
7254+
7255+ DRM_DEBUG("Freeing pages\n");
7256+ freepages = (drm_intel_freepages_t *)data;
7257+
7258+ /* number of pages that are needed */
7259+ bytes = freepages->size;
7260+ size = bytes>>PAGE_SHIFT;
7261+ if(bytes & ~(PAGE_SIZE*size)){
7262+ ++size;
7263+ }
7264+ order = ORDER(size);
7265+ DRM_DEBUG("bytes:%d,size:%d,order:%d,phy_address:%#lx\n", (int)bytes,
7266+ (int)size,(int)order,freepages->phy_address);
7267+
7268+ /* free the pages */
7269+ DRM_DEBUG("freeing address:%#lx,size:%#lx\n",
7270+ (unsigned long)__va(freepages->phy_address),(unsigned long)bytes);
7271+
7272+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
7273+ ,current->parent->pid,current->pid,current->group_leader->pid);
7274+
7275+ /* See if the requested address is in our page list */
7276+ LOCK_DRM(dev);
7277+ pagelist = &dev_ptr->pagelist->head;
7278+ list_for_each(pagelist, &dev_ptr->pagelist->head){
7279+ r_list=list_entry(pagelist, drm_intel_list_t, head);
7280+ if((r_list->page->pid==current->group_leader->pid)
7281+ && (r_list->page->phy_address==freepages->phy_address)){
7282+
7283+ DRM_DEBUG("found pid:%d\n",current->group_leader->pid);
7284+ DRM_DEBUG("size:%d\n",r_list->page->size);
7285+ DRM_DEBUG("phy_address:%#lx\n",r_list->page->phy_address);
7286+ DRM_DEBUG("virt_add:%#lx\n",r_list->page->virt_address);
7287+ DRM_DEBUG("offset:%#lx\n",r_list->page->offset);
7288+
7289+ break;
7290+ }
7291+
7292+ }
7293+
7294+ if(pagelist == (&dev_ptr->pagelist->head)){
7295+ DRM_DEBUG("Can't find pages alloc for pid:%d\n",current->pid);
7296+ UNLOCK_DRM(dev);
7297+ return -EINVAL;
7298+ }
7299+
7300+ /* munmap the region 1st */
7301+ down_write(&current->mm->mmap_sem);
7302+ DRM_DEBUG("Unmapping virt_address:%#lx\n",freepages->virt_address);
7303+ do_munmap(current->mm,freepages->virt_address,bytes);
7304+ up_write(&current->mm->mmap_sem);
7305+
7306+ /* Free the pages! */
7307+ FREE_PAGES((unsigned long)__va(freepages->phy_address), order, 0);
7308+
7309+ /* Free the page list */
7310+ page = r_list->page;
7311+ list_del(pagelist);
7312+ size = r_list->page->size;
7313+ FREE(pagelist,sizeof(*pagelist),0);
7314+ FREE(page,sizeof(*page),0);
7315+ UNLOCK_DRM(dev);
7316+
7317+ /* update the total memory that we use */
7318+ memory -= size;
7319+ DRM_DEBUG("memory has:%d bytes\n", memory);
7320+ return 0;
7321+}
7322+
7323+
7324+/*
7325+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7326+ * that stores client specific information.
7327+ */
7328+int intel_drm_info_init_2624(struct drm_device *dev,
7329+ void *data,
7330+ struct drm_file *filepriv)
7331+{
7332+ intel_drm_info_t *info;
7333+ intel_drm_info_t *info_ptr;
7334+ intel_device_private_t *dev_ptr;
7335+
7336+ if (dev == NULL) {
7337+ DRM_INFO("ERROR ERROR, drm device is NULL\n");
7338+ return -EFAULT;
7339+ }
7340+ DRM_DEBUG("info init succesful dev_private:%#lx\n",
7341+ (unsigned long)dev->dev_private);
7342+ dev_ptr = dev->dev_private;
7343+
7344+ /* See if dev_private is already allocated */
7345+ if(!dev->dev_private){
7346+ DRM_ERROR("dev_private not allocated!\n");
7347+ return 0;
7348+ }
7349+ info_ptr = dev_ptr->info_ptr;
7350+
7351+ /* See if info is already allocated */
7352+ if(info_ptr->device_id){
7353+ DRM_DEBUG("Info already allocated: device id = 0x%lx\n",
7354+ info_ptr->device_id);
7355+ DRM_ERROR("Info already allocated!\n");
7356+ return 0;
7357+ }
7358+
7359+ info = (intel_drm_info_t *)data;
7360+
7361+ info_ptr->device_id = info->device_id;
7362+ info_ptr->revision = info->revision;
7363+ info_ptr->video_memory_offset = info->video_memory_offset;
7364+ info_ptr->video_memory_size = info->video_memory_size;
7365+ info_ptr->hw_status_offset = info->hw_status_offset;
7366+ DRM_DEBUG("Saving dev_id:%#lx rev:%#lx offset:%#lx size:%#lx, "
7367+ "hwst_offset:%lx\n",
7368+ info_ptr->device_id, info_ptr->revision,
7369+ info_ptr->video_memory_offset, info_ptr->video_memory_size,
7370+ info_ptr->hw_status_offset);
7371+
7372+ return 0;
7373+}
7374+
7375+
7376+/*
7377+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7378+ * that retrieves client specific information.
7379+ */
7380+int intel_drm_info_get_2624(struct drm_device *dev,
7381+ void *data,
7382+ struct drm_file *filepriv)
7383+{
7384+ intel_drm_info_t *info;
7385+ intel_device_private_t *dev_ptr = dev->dev_private;
7386+ intel_drm_info_t *info_ptr = dev_ptr->info_ptr;
7387+
7388+ DRM_DEBUG("Info get dev_id:%#lx rev:%#lx offset:%#lx size:%#lx "
7389+ "hwst_offset:%lx\n",
7390+ info_ptr->device_id,info_ptr->revision,
7391+ info_ptr->video_memory_offset,info_ptr->video_memory_size,
7392+ info_ptr->hw_status_offset);
7393+
7394+ info = (intel_drm_info_t *)data;
7395+
7396+ info->device_id = info_ptr->device_id;
7397+ info->revision = info_ptr->revision;
7398+ info->video_memory_offset = info_ptr->video_memory_offset;
7399+ info->video_memory_size = info_ptr->video_memory_size;
7400+ info->hw_status_offset = info_ptr->hw_status_offset;
7401+
7402+ return 0;
7403+}
7404+
7405+/*
7406+ * The following 2 functions were taken from drm_memory.c
7407+ * For some reason they are not being exported to use by the other drm.
7408+ */
7409+
7410+/**
7411+ * Allocate pages.
7412+ *
7413+ * \param order size order.
7414+ * \param area memory area. (Not used.)
7415+ * \return page address on success, or zero on failure.
7416+ *
7417+ * Allocate and reserve free pages.
7418+ */
7419+unsigned long intel_alloc_pages(int order, int area)
7420+{
7421+ unsigned long address;
7422+ unsigned long bytes = PAGE_SIZE << order;
7423+ unsigned long addr;
7424+ unsigned int sz;
7425+
7426+ address = __get_free_pages(GFP_KERNEL, order);
7427+ if (!address)
7428+ return 0;
7429+
7430+ /* Zero */
7431+ memset((void *)address, 0, bytes);
7432+
7433+ /* Reserve */
7434+ for (addr = address, sz = bytes;
7435+ sz > 0;
7436+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
7437+ SetPageReserved(virt_to_page(addr));
7438+ }
7439+
7440+ return address;
7441+}
7442+
7443+/**
7444+ * Free pages.
7445+ *
7446+ * \param address address of the pages to free.
7447+ * \param order size order.
7448+ * \param area memory area. (Not used.)
7449+ *
7450+ * Unreserve and free pages allocated by alloc_pages().
7451+ */
7452+void intel_free_pages(unsigned long address, int order, int area)
7453+{
7454+ unsigned long bytes = PAGE_SIZE << order;
7455+ unsigned long addr;
7456+ unsigned int sz;
7457+
7458+ if (!address) {
7459+ return;
7460+ }
7461+
7462+ /* Unreserve */
7463+ for (addr = address, sz = bytes; sz > 0;
7464+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
7465+ ClearPageReserved(virt_to_page(addr));
7466+ }
7467+
7468+ free_pages(address, order);
7469+}
7470+
7471+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
7472+{
7473+ intel_device_private_t *priv;
7474+
7475+ priv=(intel_device_private_t *)dev->dev_private;
7476+
7477+ return 0;
7478+}
7479+
7480+int intel_drm_plb_interrupts_2624 (struct drm_device *dev,
7481+ void *data,
7482+ struct drm_file *filepriv)
7483+{
7484+ intel_device_private_t *priv = dev->dev_private;
7485+ interrupt_info_t *plb_info;
7486+ unsigned long irqflags;
7487+ int ret = 0;
7488+ int rv;
7489+
7490+ plb_info = (interrupt_info_t *)data;
7491+
7492+ /* USW15 definition of in and out
7493+ *
7494+ * in/out[0] VDC
7495+ * in/out[1] sgx
7496+ * in/out[2] sgx2
7497+ * in/out[3] msvdx
7498+ */
7499+
7500+ plb_info->out[0]=0;
7501+ plb_info->out[1]=0;
7502+ plb_info->out[2]=0;
7503+ plb_info->out[3]=0;
7504+
7505+ switch (plb_info->req_type) {
7506+ case CLEAR_INT:
7507+ plb_info->in[0] &= priv->vdc_irq_mask;
7508+ plb_info->in[1] &= priv->sgx_irq_mask;
7509+ plb_info->in[2] &= priv->sgx_irq_mask2;
7510+ plb_info->in[3] &= priv->msvdx_irq_mask;
7511+
7512+ if (plb_info->in[0] || plb_info->in[1] ||
7513+ plb_info->in[2] || plb_info->in[3]) {
7514+
7515+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7516+ priv->out_vdc &= ~plb_info->in[0];
7517+ plb_info->out[0] = priv->out_vdc;
7518+
7519+ priv->out_sgx &= ~plb_info->in[1];
7520+ plb_info->out[1] = priv->out_sgx;
7521+
7522+ priv->out_sgx2 &= ~plb_info->in[2];
7523+ plb_info->out[2] = priv->out_sgx2;
7524+
7525+ priv->out_mtx &= ~plb_info->in[3];
7526+ plb_info->out[3] = priv->out_mtx;
7527+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7528+
7529+ plb_info->req_status = INT_CLEARED;
7530+
7531+ } else {
7532+ plb_info->req_status = INT_NOOP;
7533+ }
7534+
7535+ break;
7536+
7537+ case READ_INT:
7538+ plb_info->out[0] = priv->out_vdc;
7539+ plb_info->out[1] = priv->out_sgx;
7540+ plb_info->out[2] = priv->out_sgx2;
7541+ plb_info->out[3] = priv->out_mtx;
7542+ plb_info->req_status = INT_READ;
7543+
7544+ break;
7545+
7546+ case WAIT_INT:
7547+ plb_info->in[0] &= priv->vdc_irq_mask;
7548+ plb_info->in[1] &= priv->sgx_irq_mask;
7549+ plb_info->in[2] &= priv->sgx_irq_mask2;
7550+ plb_info->in[3] &= priv->msvdx_irq_mask;
7551+
7552+ if (plb_info->in[0] || plb_info->in[1] ||
7553+ plb_info->in[2] || plb_info->in[3]) {
7554+
7555+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7556+
7557+ /* none of the interrupts have ocurred */
7558+ if ((priv->out_vdc & plb_info->in[0]) ||
7559+ (priv->out_sgx & plb_info->in[1]) ||
7560+ (priv->out_sgx2 & plb_info->in[2]) ||
7561+ (priv->out_mtx & plb_info->in[3])) {
7562+
7563+ /* At least one of the interrupts has already occurred */
7564+ plb_info->req_status = INT_STORED;
7565+
7566+ } else {
7567+
7568+ /* Wait for an interrupt to occur */
7569+ priv->event_present = 0;
7570+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7571+
7572+ DRM_WAIT_ON(ret, priv->event_queue, 20 * DRM_HZ,
7573+ priv->event_present);
7574+
7575+ if (ret) {
7576+ plb_info->req_status = INT_TIMEOUT;
7577+ break;
7578+ }
7579+
7580+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7581+
7582+ plb_info->req_status = INT_HANDLED;
7583+
7584+ }
7585+ plb_info->out[0] = priv->out_vdc;
7586+ plb_info->out[1] = priv->out_sgx;
7587+ plb_info->out[2] = priv->out_sgx2;
7588+ plb_info->out[3] = priv->out_mtx;
7589+
7590+ /* Clear the outstanding interrupts that have just been
7591+ * retrieved
7592+ */
7593+ priv->out_vdc &= ~(plb_info->out[0] & plb_info->in[0]);
7594+ priv->out_sgx &= ~(plb_info->out[1] & plb_info->in[1]) ;
7595+ priv->out_sgx2 &= ~(plb_info->out[2] & plb_info->in[2]);
7596+ priv->out_mtx &= ~(plb_info->out[3] & plb_info->in[3]);
7597+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7598+
7599+ } else {
7600+
7601+ /* Unsupported interrupt */
7602+ plb_info->req_status = INT_NOOP;
7603+
7604+ }
7605+
7606+ break;
7607+
7608+ case UNMASK_INT:
7609+
7610+ if (!dev->irq_enabled) {
7611+ rv = drm_irq_install(dev);
7612+ if (rv != 0) {
7613+ DRM_ERROR("%s: could not install IRQs: rv = %d\n", __FUNCTION__, rv);
7614+ return rv;
7615+ }
7616+ }
7617+
7618+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7619+ PSB_WVDC32(0x00000000, IMR);
7620+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7621+
7622+ break;
7623+
7624+ case MASK_INT:
7625+
7626+ if (dev->irq_enabled) {
7627+ rv = drm_irq_uninstall(dev);
7628+ if (rv != 0) {
7629+ DRM_ERROR("%s: could not uninstall IRQs: rv = %d\n", __FUNCTION__, rv);
7630+ return rv;
7631+ }
7632+ }
7633+
7634+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7635+ PSB_WVDC32(0xFFFFFFFF, IMR);
7636+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7637+
7638+ break;
7639+
7640+ default:
7641+
7642+ plb_info->req_status = INT_INVALID;
7643+ }
7644+
7645+ return 0;
7646+}
7647+
7648+
7649+drm_ioctl_desc_t intel_ioctls[] = {
7650+ DRM_IOCTL_DEF(DRM_INTEL_GETPAGES, intel_getpages_2624, 0),
7651+ DRM_IOCTL_DEF(DRM_INTEL_FREEPAGES, intel_freepages_2624, 0),
7652+ DRM_IOCTL_DEF(DRM_INTEL_INFO_INIT, intel_drm_info_init_2624, 0),
7653+ DRM_IOCTL_DEF(DRM_INTEL_INFO_GET, intel_drm_info_get_2624, 0),
7654+ DRM_IOCTL_DEF(DRM_INTEL_INTERRUPT, intel_drm_plb_interrupts_2624, 0)
7655+};
7656+
7657+int intel_max_ioctl = DRM_ARRAY_SIZE(intel_ioctls);
7658+
7659+
7660+
7661+static struct pci_device_id pciidlist[] = {
7662+ INTEL_PCI_IDS
7663+};
7664+
7665+int device_is_agp_2624(drm_device_t * dev)
7666+{
7667+ return 1;
7668+}
7669+
7670+static struct drm_driver driver = {
7671+ .firstopen = intel_firstopen_2624,
7672+ .preclose = intel_preclose_2624,
7673+ .reclaim_buffers=drm_core_reclaim_buffers,
7674+ .get_map_ofs=drm_core_get_map_ofs,
7675+ .get_reg_ofs=drm_core_get_reg_ofs,
7676+
7677+ .device_is_agp = device_is_agp_2624,
7678+
7679+ .major = DRIVER_MAJOR,
7680+ .minor = DRIVER_MINOR,
7681+ .patchlevel = DRIVER_PATCHLEVEL,
7682+ .name = DRIVER_NAME,
7683+ .desc = DRIVER_DESC,
7684+ .date = DRIVER_DATE,
7685+
7686+ .driver_features = DRIVER_USE_AGP|DRIVER_REQUIRE_AGP|DRIVER_USE_MTRR,
7687+ .ioctls = intel_ioctls,
7688+ .fops = {
7689+ .owner = THIS_MODULE,
7690+ .open = drm_open,
7691+ .release = drm_release,
7692+ .ioctl = drm_ioctl,
7693+ .mmap = drm_mmap,
7694+ .poll = drm_poll,
7695+ .fasync = drm_fasync,
7696+ },
7697+ .pci_driver = {
7698+ .name = DRIVER_NAME,
7699+ .id_table = pciidlist,
7700+ }
7701+};
7702+
7703+static struct drm_driver driver_plb = {
7704+ .load = psb_driver_load,
7705+ .firstopen = intel_plb_firstopen_2624,
7706+ .preclose = intel_preclose_2624,
7707+ .reclaim_buffers=drm_core_reclaim_buffers,
7708+ .get_map_ofs=drm_core_get_map_ofs,
7709+ .get_reg_ofs=drm_core_get_reg_ofs,
7710+
7711+ .device_is_agp = device_is_agp_2624,
7712+
7713+ .major = DRIVER_MAJOR,
7714+ .minor = DRIVER_MINOR,
7715+ .patchlevel = DRIVER_PATCHLEVEL,
7716+ .name = DRIVER_NAME,
7717+ .desc = DRIVER_DESC,
7718+ .date = DRIVER_DATE,
7719+
7720+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
7721+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_USE_MTRR,
7722+ .ioctls = intel_ioctls,
7723+ .irq_preinstall = psb_irq_preinstall,
7724+ .irq_postinstall = psb_irq_postinstall,
7725+ .irq_uninstall = psb_irq_uninstall,
7726+ .irq_handler = psb_irq_handler,
7727+
7728+ .fops = {
7729+ .owner = THIS_MODULE,
7730+ .open = drm_open,
7731+ .release = drm_release,
7732+ .ioctl = drm_ioctl,
7733+ .mmap = drm_plb_mmap,
7734+ .poll = drm_poll,
7735+ .fasync = drm_fasync,
7736+ },
7737+ .pci_driver = {
7738+ .name = DRIVER_NAME,
7739+ .id_table = pciidlist,
7740+ }
7741+};
7742+
7743+int intel_init(void)
7744+{
7745+ driver.num_ioctls = intel_max_ioctl;
7746+ driver_plb.num_ioctls = intel_max_ioctl;
7747+
7748+ /* We are peeking into the global AGP structures that
7749+ * we have access to in order to determine what chipset we're
7750+ * on. This isn't necessarily a good thing to do.
7751+ */
7752+
7753+ if (gart_id->device_id == PCI_DEVICE_ID_PLB) {
7754+ printk(KERN_ERR "Initializing DRM for Intel US15 SCH\n");
7755+ return drm_init(&driver_plb);
7756+ } else {
7757+ return drm_init(&driver);
7758+ }
7759+
7760+}
7761+
7762+void intel_exit(void)
7763+{
7764+ drm_exit(&driver);
7765+}
7766+
7767+struct file_operations intel_buffer_fops = {
7768+ .open = drm_open,
7769+ .release = drm_release,
7770+ .ioctl = drm_ioctl,
7771+ .mmap = intel_mmap_buffers,
7772+ .poll = drm_poll,
7773+ .fasync = drm_fasync,
7774+};
7775+#endif
7776diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h
7777--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h 1969-12-31 17:00:00.000000000 -0700
7778+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h 2009-10-06 10:30:05.000000000 -0700
7779@@ -0,0 +1,78 @@
7780+/* -*- pse-c -*-
7781+ *----------------------------------------------------------------------------
7782+ * Filename: iegd_interface_2611.h
7783+ * $Revision: 1.5 $
7784+ *----------------------------------------------------------------------------
7785+ * Gart and DRM driver for Intel Embedded Graphics Driver
7786+ * Copyright © 2008, Intel Corporation.
7787+ *
7788+ * This program is free software; you can redistribute it and/or modify it
7789+ * under the terms and conditions of the GNU General Public License,
7790+ * version 2, as published by the Free Software Foundation.
7791+ *
7792+ * This program is distributed in the hope it will be useful, but WITHOUT
7793+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7794+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7795+ * more details.
7796+ *
7797+ * You should have received a copy of the GNU General Public License along with
7798+ * this program; if not, write to the Free Software Foundation, Inc.,
7799+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7800+ *
7801+ */
7802+
7803+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
7804+ *
7805+ * Redistribution and use in source and binary forms, with or without
7806+ * modification, are permitted provided that the following conditions are met:
7807+ * Redistributions of source code must retain the above copyright notice,
7808+ * this list of conditions and the following disclaimer.
7809+ *
7810+ * Redistributions in binary form must reproduce the above copyright
7811+ * notice, this list of conditions and the following disclaimer in the
7812+ * documentation and/or other materials provided with the distribution.
7813+ *
7814+ * Neither the name Intel Corporation nor the names of its contributors
7815+ * may be used to endorse or promote products derived from this software
7816+ * without specific prior written permission.
7817+ *
7818+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
7819+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
7820+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
7821+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
7822+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
7823+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7824+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
7825+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
7826+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
7827+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
7828+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7829+ *
7830+ */
7831+
7832+/* Macros are defined here such that only kernel specific functions can be
7833+ * used.
7834+ */
7835+#if KERNEL2624
7836+#define REMAP_PAGE(a,b,c,d,e) io_remap_pfn_range(a,b, \
7837+ c >>PAGE_SHIFT, \
7838+ d,e)
7839+
7840+#define ORDER(a) drm_order(a)
7841+#define ALLOC_PAGES(a,b) intel_alloc_pages(a,b)
7842+
7843+//kernel version 31 removed some wrapper functions
7844+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
7845+#define ALLOC(a, b) kmalloc(a, GFP_KERNEL)
7846+#define FREE(a,b,c) kfree(a)
7847+#else
7848+#define ALLOC(a,b) drm_alloc(a,b)
7849+#define FREE(a,b,c) drm_free(a,b,c)
7850+#endif
7851+
7852+#define FREE_PAGES(a,b,c) intel_free_pages(a,b,c)
7853+#define LOCK_DRM(d) mutex_lock(&d->struct_mutex)
7854+#define UNLOCK_DRM(d) mutex_unlock(&d->struct_mutex)
7855+#endif
7856+
7857+/* endif for KERNEL2624 */
7858diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.c
7859--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.c 1969-12-31 17:00:00.000000000 -0700
7860+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.c 2009-10-06 10:30:05.000000000 -0700
7861@@ -0,0 +1,147 @@
7862+/* -*- pse-c -*-
7863+ *----------------------------------------------------------------------------
7864+ * Filename: iegd_interface_265.c
7865+ * $Revision: 1.6 $
7866+ *----------------------------------------------------------------------------
7867+ * Gart and DRM driver for Intel Embedded Graphics Driver
7868+ * Copyright © 2008, Intel Corporation.
7869+ *
7870+ * This program is free software; you can redistribute it and/or modify it
7871+ * under the terms and conditions of the GNU General Public License,
7872+ * version 2, as published by the Free Software Foundation.
7873+ *
7874+ * This program is distributed in the hope it will be useful, but WITHOUT
7875+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7876+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7877+ * more details.
7878+ *
7879+ * You should have received a copy of the GNU General Public License along with
7880+ * this program; if not, write to the Free Software Foundation, Inc.,
7881+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7882+ *
7883+ */
7884+
7885+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
7886+ *
7887+ * Redistribution and use in source and binary forms, with or without
7888+ * modification, are permitted provided that the following conditions are met:
7889+ * Redistributions of source code must retain the above copyright notice,
7890+ * this list of conditions and the following disclaimer.
7891+ *
7892+ * Redistributions in binary form must reproduce the above copyright
7893+ * notice, this list of conditions and the following disclaimer in the
7894+ * documentation and/or other materials provided with the distribution.
7895+ *
7896+ * Neither the name Intel Corporation nor the names of its contributors
7897+ * may be used to endorse or promote products derived from this software
7898+ * without specific prior written permission.
7899+ *
7900+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
7901+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
7902+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
7903+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
7904+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
7905+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7906+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
7907+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
7908+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
7909+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
7910+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7911+ *
7912+ */
7913+
7914+#include "iegd.h"
7915+#include "drmP.h"
7916+#include "drm.h"
7917+
7918+#include "iegd_drm.h"
7919+#include "iegd_drv.h"
7920+
7921+#if KERNEL265
7922+#include "drm_agpsupport.h"
7923+#include "drm_auth.h"
7924+#include "drm_bufs.h"
7925+#include "drm_context.h"
7926+#include "drm_dma.h"
7927+#include "drm_drawable.h"
7928+#include "drm_drv.h"
7929+
7930+#include "drm_fops.h"
7931+#include "drm_init.h"
7932+#include "drm_ioctl.h"
7933+#include "drm_lock.h"
7934+#include "drm_memory.h"
7935+#include "drm_proc.h"
7936+#include "drm_vm.h"
7937+#include "drm_stub.h"
7938+
7939+int intel_postinit_265(drm_device_t *dev){
7940+
7941+ intel_device_private_t *priv;
7942+ priv=(intel_device_private_t *)dev->dev_private;
7943+
7944+ intel_postinit(&priv);
7945+ dev->dev_private=priv;
7946+
7947+ return 0;
7948+
7949+}
7950+
7951+int intel_prerelease_265(drm_device_t *dev){
7952+
7953+ intel_prerelease(dev);
7954+
7955+ return 0;
7956+
7957+}
7958+
7959+int intel_getpages_265( struct inode *inode, struct file *filp,
7960+ unsigned int cmd, unsigned long arg ){
7961+
7962+ drm_file_t *priv=filp->private_data;
7963+ drm_device_t *dev=priv->dev;
7964+ return intel_getpages(dev,filp,arg);
7965+
7966+
7967+}
7968+
7969+int intel_freepages_265( struct inode *inode, struct file *filp,
7970+ unsigned int cmd, unsigned long arg ){
7971+
7972+ drm_file_t *priv=filp->private_data;
7973+ drm_device_t *dev=priv->dev;
7974+ return intel_freepages(dev,arg);
7975+
7976+}
7977+
7978+int intel_drm_info_init_265( struct inode *inode, struct file *filp,
7979+ unsigned int cmd, unsigned long arg ){
7980+
7981+ drm_file_t *priv=filp->private_data;
7982+ drm_device_t *dev=priv->dev;
7983+ return intel_drm_info_init(dev,arg);
7984+
7985+}
7986+
7987+int intel_drm_info_get_265( struct inode *inode, struct file *filp,
7988+ unsigned int cmd, unsigned long arg ){
7989+
7990+ drm_file_t *priv=filp->private_data;
7991+ drm_device_t *dev=priv->dev;
7992+ return intel_drm_info_get(dev,arg);
7993+
7994+}
7995+
7996+struct file_operations intel_buffer_fops = {
7997+ .open = DRM(open),
7998+ .flush = DRM(flush),
7999+ .release = DRM(release),
8000+ .ioctl = DRM(ioctl),
8001+ .mmap = intel_mmap_buffers,
8002+ .fasync = DRM(fasync),
8003+};
8004+
8005+#endif
8006+/*end of 2.6.5 definition */
8007+
8008+
8009diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.h
8010--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.h 1969-12-31 17:00:00.000000000 -0700
8011+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.h 2009-10-06 10:30:05.000000000 -0700
8012@@ -0,0 +1,129 @@
8013+/* -*- pse-c -*-
8014+ *----------------------------------------------------------------------------
8015+ * Filename: iegd_interface_265.h
8016+ * $Revision: 1.6 $
8017+ *----------------------------------------------------------------------------
8018+ * Gart and DRM driver for Intel Embedded Graphics Driver
8019+ * Copyright © 2008, Intel Corporation.
8020+ *
8021+ * This program is free software; you can redistribute it and/or modify it
8022+ * under the terms and conditions of the GNU General Public License,
8023+ * version 2, as published by the Free Software Foundation.
8024+ *
8025+ * This program is distributed in the hope it will be useful, but WITHOUT
8026+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8027+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8028+ * more details.
8029+ *
8030+ * You should have received a copy of the GNU General Public License along with
8031+ * this program; if not, write to the Free Software Foundation, Inc.,
8032+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8033+ *
8034+ */
8035+
8036+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
8037+ *
8038+ * Redistribution and use in source and binary forms, with or without
8039+ * modification, are permitted provided that the following conditions are met:
8040+ * Redistributions of source code must retain the above copyright notice,
8041+ * this list of conditions and the following disclaimer.
8042+ *
8043+ * Redistributions in binary form must reproduce the above copyright
8044+ * notice, this list of conditions and the following disclaimer in the
8045+ * documentation and/or other materials provided with the distribution.
8046+ *
8047+ * Neither the name Intel Corporation nor the names of its contributors
8048+ * may be used to endorse or promote products derived from this software
8049+ * without specific prior written permission.
8050+ *
8051+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
8052+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
8053+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
8054+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
8055+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
8056+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8057+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8058+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8059+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8060+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
8061+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8062+ *
8063+ */
8064+
8065+/* For some arcane reasons this must be defined for 2.6.5 kernel in
8066+ * intel.h if not the drm won't compile properly.
8067+ */
8068+#if KERNEL265
8069+
8070+
8071+/* KERNEL265 defines these functions in the drm directory
8072+ * that got expanded when you #define DRM(x) intel_##x. This is very ugly and
8073+ * confusing. Luckily 2.6.11 don't have this. Can't do much here but follow
8074+ * the rules for it.
8075+ */
8076+#define DRM(x) intel_##x
8077+
8078+/* Changing the permission bits to 0,0 for testing. auth,root permission */
8079+#define DRIVER_IOCTLS \
8080+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_GETPAGES)] = { intel_getpages_265, 0, 0 }, \
8081+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_FREEPAGES)] = { intel_freepages_265, 0, 0 },\
8082+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_INFO_INIT)] = { intel_drm_info_init_265, 0, 0 },\
8083+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_INFO_GET)] = { intel_drm_info_get_265, 0, 0 }
8084+
8085+/* Driver customization:
8086+ */
8087+#define __HAVE_RELEASE 1
8088+#define DRIVER_PRERELEASE() do { \
8089+ intel_prerelease_265(dev); \
8090+} while (0)
8091+
8092+#define DRIVER_RELEASE() do { \
8093+} while (0)
8094+
8095+#define DRIVER_PRETAKEDOWN() do { \
8096+} while (0)
8097+
8098+#define DRIVER_POSTSETUP() do { \
8099+} while (0)
8100+
8101+#define DRIVER_POSTCLEANUP() do { \
8102+} while (0)
8103+
8104+#define DRIVER_POSTINIT() do { \
8105+ intel_postinit_265(dev); \
8106+} while (0)
8107+
8108+/*
8109+ * Explaination: For unknown reasons the DRM infrastructure has a lot
8110+ * of really horrid programming techniques to generate custom init
8111+ * code using header files (containing c code) and macros. Apparently
8112+ * this is to save a few nano seconds during init.
8113+ *
8114+ * This logic here is that if you define this magic macro you will use
8115+ * this code to count the number of devices you are supporting. We
8116+ * need to support 2 devices and we don't know the device IDs at startup
8117+ * and there is usually not 2 PCI devices anyway. So we just return 2
8118+ * and worry about it later.
8119+ *
8120+ * Note: DRM has issues with DIH so for now we'll live with one drm
8121+ *#define DRIVER_COUNT_CARDS() 2
8122+ */
8123+
8124+/* KERNEL265 defines these functions in the drm directory
8125+ * that got expanded when you #define DRM(x) intel_##x. This is very ugly and
8126+ * confusing. Luckily 2.6.11 don't have this
8127+ */
8128+#define REMAP_PAGE(a,b,c,d,e) remap_page_range( \
8129+ DRM_RPR_ARG(a) b , \
8130+ c,d,e)
8131+#define ORDER(a) DRM(order)(a)
8132+#define ALLOC_PAGES(a,b) DRM(alloc_pages)(a,b)
8133+#define ALLOC(a,b) DRM(alloc)(a,b)
8134+#define FREE(a,b,c) DRM(free)(a,b,c)
8135+#define FREE_PAGES(a,b,c) DRM(free_pages)(a,b,c)
8136+
8137+#define LOCK_DRM(d) down(&d->struct_sem)
8138+#define UNLOCK_DRM(d) up(&d->struct_sem)
8139+
8140+#endif
8141+/* endif for KERNEL265 */
8142diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/myclient.c patch_script_temp/drivers/gpu/drm/iegd/drm/myclient.c
8143--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/myclient.c 1969-12-31 17:00:00.000000000 -0700
8144+++ patch_script_temp/drivers/gpu/drm/iegd/drm/myclient.c 2009-10-06 10:30:05.000000000 -0700
8145@@ -0,0 +1,210 @@
8146+/* -*- pse-c -*-
8147+ *----------------------------------------------------------------------------
8148+ * Filename: myclient.c
8149+ * $Revision: 1.5 $
8150+ *----------------------------------------------------------------------------
8151+ * DRM test program
8152+ * Copyright © 2008, Intel Corporation.
8153+ *
8154+ * This program is free software; you can redistribute it and/or modify it
8155+ * under the terms and conditions of the GNU General Public License,
8156+ * version 2, as published by the Free Software Foundation.
8157+ *
8158+ * This program is distributed in the hope it will be useful, but WITHOUT
8159+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8160+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8161+ * more details.
8162+ *
8163+ * You should have received a copy of the GNU General Public License along with
8164+ * this program; if not, write to the Free Software Foundation, Inc.,
8165+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8166+ *
8167+ */
8168+
8169+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
8170+ *
8171+ * Redistribution and use in source and binary forms, with or without
8172+ * modification, are permitted provided that the following conditions are met:
8173+ * Redistributions of source code must retain the above copyright notice,
8174+ * this list of conditions and the following disclaimer.
8175+ *
8176+ * Redistributions in binary form must reproduce the above copyright
8177+ * notice, this list of conditions and the following disclaimer in the
8178+ * documentation and/or other materials provided with the distribution.
8179+ *
8180+ * Neither the name Intel Corporation nor the names of its contributors
8181+ * may be used to endorse or promote products derived from this software
8182+ * without specific prior written permission.
8183+ *
8184+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
8185+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
8186+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
8187+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
8188+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
8189+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8190+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8191+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8192+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8193+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
8194+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8195+ *
8196+ */
8197+
8198+/*client to test the ioctl
8199+ * make sure you change the permission bits in intel.h to 0,0
8200+ * before you start using this
8201+ */
8202+
8203+#include "iegd.h"
8204+
8205+#include <fcntl.h>
8206+#include <unistd.h>
8207+#include <sys/ioctl.h>
8208+#include <stdlib.h>
8209+#include <stdio.h>
8210+
8211+#define DRM_IOCTL_BASE 'd'
8212+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
8213+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
8214+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
8215+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
8216+
8217+#include "intel_drm_client.h"
8218+
8219+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
8220+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
8221+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
8222+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
8223+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
8224+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
8225+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
8226+ DRM_INTEL_INFO_GET, intel_drm_info_t)
8227+
8228+
8229+#define PAGE_SIZE 4096
8230+int main()
8231+{
8232+int file_desc, ret_value;
8233+printf("ytay starting client\n");
8234+/* Open the drm */
8235+file_desc=open("/dev/dri/card0",O_RDWR);
8236+
8237+if(file_desc<0){
8238+/* probably Suse distro since the dev tree is different.
8239+ * try /dev/card0
8240+ */
8241+file_desc=open("/dev/card0",O_RDWR);
8242+
8243+}
8244+
8245+if(file_desc<0){
8246+
8247+printf("ytay can't open device file:%s\n",DRIVER_DESC);
8248+ exit(-1);
8249+}
8250+
8251+printf("ytay open device file:%d\n",file_desc);
8252+drm_intel_getpages_t getpages;
8253+/* set the number of bytes we want the drm to allocate */
8254+getpages.size=(PAGE_SIZE- 1000);
8255+
8256+ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
8257+if(ret_value<0){
8258+printf("ytay ioctl failed!\n");
8259+ exit(-1);
8260+}
8261+printf("ytay ioctl success\n");
8262+printf("ytay size%d,phy_address:%#x,virt_address:%#x,offset:%#x\n",getpages.size,getpages.phy_address,getpages.virt_address,getpages.offset);
8263+
8264+/* test for memory access */
8265+
8266+int i;
8267+unsigned long *virt_ptr;
8268+
8269+virt_ptr=(unsigned long *)getpages.virt_address;
8270+
8271+/* input 0..10 into subsequent memory */
8272+
8273+for(i=0;i<=11;i++){
8274+*virt_ptr=i;
8275+virt_ptr++;
8276+
8277+}
8278+
8279+/*read from subsequent memory */
8280+
8281+virt_ptr=(unsigned long *)getpages.virt_address;
8282+for(i=0;i<=15;i++){
8283+printf("virt_ptr@%#x,value:%d\n",virt_ptr,*virt_ptr);
8284+virt_ptr++;
8285+}
8286+/* set the number of bytes we want the drm to allocate */
8287+getpages.size=(PAGE_SIZE- 1000);
8288+
8289+ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
8290+if(ret_value<0){
8291+printf("ytay ioctl failed!\n");
8292+ exit(-1);
8293+}
8294+printf("ytay ioctl success\n");
8295+printf("ytay size%d,phy_address:%#x,virt_address:%#x,offset:%#x\n",getpages.size,getpages.phy_address,getpages.virt_address,getpages.offset);
8296+
8297+
8298+/* freeing memory */
8299+
8300+drm_intel_freepages_t freepages;
8301+freepages.size=getpages.size;
8302+freepages.phy_address=getpages.phy_address;
8303+freepages.virt_address=getpages.virt_address;
8304+printf("ytay freeing phy_address:%#x,size:%#x\n",freepages.phy_address,freepages.size);
8305+/*
8306+ioctl(file_desc,DRM_IOCTL_INTEL_FREEPAGES,&freepages);
8307+*/
8308+/* init the drm info structure in the drm and test its value */
8309+
8310+ intel_drm_info_t info;
8311+ intel_drm_info_t test_info;
8312+ info.device_id=0x456;
8313+ info.revision=333;
8314+ info.video_memory_offset=0x10245;
8315+ info.video_memory_size=987;
8316+ info.hw_status_offset=0x444;
8317+
8318+ printf("Testing init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8319+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
8320+
8321+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
8322+
8323+/* init the drm info structure in the drm and test its value */
8324+
8325+ info.device_id=0x123;
8326+ info.revision=456;
8327+ info.video_memory_offset=0x789;
8328+ info.video_memory_size=111;
8329+ info.hw_status_offset=0x555;
8330+
8331+ printf("Testing init 2nd info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8332+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
8333+
8334+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
8335+
8336+ printf("Testing init 2nd info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8337+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
8338+
8339+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
8340+
8341+
8342+
8343+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_GET,&test_info);
8344+
8345+ printf("Got init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8346+ test_info.device_id,test_info.revision,test_info.video_memory_offset,test_info.video_memory_size,test_info.hw_status_offset);
8347+
8348+
8349+close(file_desc);
8350+/*
8351+sleep(100000000000);
8352+*/
8353+return 0;
8354+
8355+}
8356diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_intregs.h patch_script_temp/drivers/gpu/drm/iegd/drm/psb_intregs.h
8357--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_intregs.h 1969-12-31 17:00:00.000000000 -0700
8358+++ patch_script_temp/drivers/gpu/drm/iegd/drm/psb_intregs.h 2009-10-06 10:30:05.000000000 -0700
8359@@ -0,0 +1,114 @@
8360+/* -*- pse-c -*-
8361+ *-----------------------------------------------------------------------------
8362+ * Filename: psb_intregs.h
8363+ *-----------------------------------------------------------------------------
8364+ * INTEL CONFIDENTIAL
8365+ * Copyright (2002-2008) Intel Corporation All Rights Reserved.
8366+ * The source code contained or described herein and all documents related to
8367+ * the source code ("Material") are owned by Intel Corporation or its suppliers
8368+ * or licensors. Title to the Material remains with Intel Corporation or its
8369+ * suppliers and licensors. The Material contains trade secrets and proprietary
8370+ * and confidential information of Intel or its suppliers and licensors. The
8371+ * Material is protected by worldwide copyright and trade secret laws and
8372+ * treaty provisions. No part of the Material may be used, copied, reproduced,
8373+ * modified, published, uploaded, posted, transmitted, distributed, or
8374+ * disclosed in any way without Intel's prior express written permission.
8375+ *
8376+ * No license under any patent, copyright, trade secret or other intellectual
8377+ * property right is granted to or conferred upon you by disclosure or
8378+ * delivery of the Materials, either expressly, by implication, inducement,
8379+ * estoppel or otherwise. Any license under such intellectual property rights
8380+ * must be express and approved by Intel in writing.
8381+ *
8382+ *
8383+ *-----------------------------------------------------------------------------
8384+ * Description:
8385+ * This file contains the interrupt related register definition and
8386+ * macros for the PLB platform.
8387+ *-----------------------------------------------------------------------------
8388+ */
8389+
8390+#ifndef _REGS_H_
8391+#define _REGS_H_
8392+
8393+/*-----------------------------------------------------------------------------
8394+ * SGX, VDC, and MSVDX interrupt registers
8395+ ----------------------------------------------------------------------------*/
8396+//#define SGX_BASE 0x40000
8397+
8398+#define PSB_MMIO_RESOURCE 0
8399+
8400+#define PSB_VDC_OFFSET 0x00000000
8401+#define PSB_VDC_SIZE 0x000080000
8402+#define PSB_SGX_OFFSET 0x00040000
8403+#define PSB_SGX_SIZE 0x8000
8404+#define PSB_MSVDX_OFFSET 0x00050000
8405+#define PSB_MSVDX_SIZE 0x1000
8406+
8407+/* bits in PSB_CR_EVENT_STATUS */
8408+#define PSB_DPM_3D_MEM_FREE (1<<0)
8409+#define PSB_OUT_OF_MEM_MT (1<<1)
8410+#define PSB_OUT_OF_MEM_GBL (1<<2)
8411+#define PSB_REACHED_MEM_THRESH (1<<3)
8412+#define PSB_TA_TERMINATE (1<<12)
8413+#define PSB_TA_FINISHED (1<<13)
8414+#define PSB_PIXELBE_END_RENDER (1<<18)
8415+#define PSB_DPM_TA_MEM_FREE (1<<24)
8416+#define PSB_DPM_OUT_OF_MEM_ZLS (1<<25)
8417+#define PSB_TWOD_COMPLETE (1<<27)
8418+#define PSB_TA_DPM_FAULT (1<<28)
8419+
8420+#define PSB_BIF_REQ_FAULT (1<<4)
8421+#define PSB_TRIG_DL (1<<5)
8422+#define PSB_TRIG_3D (1<<6)
8423+#define PSB_TRIG_TA (1<<7)
8424+
8425+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
8426+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
8427+#define PSB_CR_EVENT_STATUS2 0x0118
8428+
8429+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
8430+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
8431+#define PSB_CR_EVENT_STATUS 0x012C
8432+
8433+#define PSB_MTX_EVENT_HOST_ENABLE 0x0610
8434+#define PSB_MTX_EVENT_CLEAR 0x060C
8435+#define PSB_MTX_EVENT_STATUS 0x0608
8436+
8437+/*-----------------------------------------------------------------------------
8438+ * Memory mapped I/O Registers Definitions
8439+ *---------------------------------------------------------------------------*/
8440+
8441+/*-----------------------------------------------------------------------------
8442+ * Instruction and Interrupt Control Registers (01000h - 02FFFh)
8443+ *---------------------------------------------------------------------------*/
8444+#define HWSTAM 0x02098 /* Hardware Status Mask */
8445+#define IER 0x020A0 /* Interrupt Enable */
8446+#define IIR 0x020A4 /* Interrupt Identity */
8447+#define IMR 0x020A8 /* Interrupt Mask */
8448+#define ISR 0x020AC /* Interrupt Status */
8449+
8450+#define PIPEA_STAT 0x70024 /* Pipe A Display Status */
8451+#define PIPEB_STAT 0x71024 /* Pipe B Display Status */
8452+
8453+#define VBLANK_CLEAR (1<<1)
8454+#define VSYNC_PIPEB_FLAG (1<<5)
8455+#define VSYNC_PIPEA_FLAG (1<<7)
8456+#define VBLANK_INTERRUPT_ENABLE (1<<17)
8457+#define IRQ_SGX_FLAG (1<<18)
8458+#define IRQ_MSVDX_FLAG (1<<19)
8459+
8460+#define PSB_WVDC32(_val, _offs) \
8461+ iowrite32(_val, priv->vdc_reg + (_offs))
8462+#define PSB_RVDC32(_offs) \
8463+ ioread32(priv->vdc_reg + (_offs))
8464+#define PSB_WSGX32(_val, _offs) \
8465+ iowrite32(_val, priv->sgx_reg + (_offs))
8466+#define PSB_RSGX32(_offs) \
8467+ ioread32(priv->sgx_reg + (_offs))
8468+#define PSB_WMSVDX32(_val, _offs) \
8469+ iowrite32(_val, priv->msvdx_reg + (_offs))
8470+#define PSB_RMSVDX32(_offs) \
8471+ ioread32(priv->msvdx_reg + (_offs))
8472+
8473+#endif /* _REGS_H_ */
8474diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_irq.c patch_script_temp/drivers/gpu/drm/iegd/drm/psb_irq.c
8475--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_irq.c 1969-12-31 17:00:00.000000000 -0700
8476+++ patch_script_temp/drivers/gpu/drm/iegd/drm/psb_irq.c 2009-10-06 10:30:05.000000000 -0700
8477@@ -0,0 +1,185 @@
8478+/* -*- pse-c -*-
8479+ *-----------------------------------------------------------------------------
8480+ * Filename: psb_intregs.h
8481+ *-----------------------------------------------------------------------------
8482+ * INTEL CONFIDENTIAL
8483+ * Copyright (2002-2008) Intel Corporation All Rights Reserved.
8484+ * The source code contained or described herein and all documents related to
8485+ * the source code ("Material") are owned by Intel Corporation or its suppliers
8486+ * or licensors. Title to the Material remains with Intel Corporation or its
8487+ * suppliers and licensors. The Material contains trade secrets and proprietary
8488+ * and confidential information of Intel or its suppliers and licensors. The
8489+ * Material is protected by worldwide copyright and trade secret laws and
8490+ * treaty provisions. No part of the Material may be used, copied, reproduced,
8491+ * modified, published, uploaded, posted, transmitted, distributed, or
8492+ * disclosed in any way without Intel's prior express written permission.
8493+ *
8494+ * No license under any patent, copyright, trade secret or other intellectual
8495+ * property right is granted to or conferred upon you by disclosure or
8496+ * delivery of the Materials, either expressly, by implication, inducement,
8497+ * estoppel or otherwise. Any license under such intellectual property rights
8498+ * must be express and approved by Intel in writing.
8499+ *
8500+ *
8501+ *-----------------------------------------------------------------------------
8502+ * Description:
8503+ * This file contains nterrupt related routines for the PLB platform.
8504+ *-----------------------------------------------------------------------------
8505+ */
8506+
8507+#include <linux/version.h>
8508+#include "drmP.h"
8509+#include "psb_intregs.h"
8510+#include "iegd_drm.h"
8511+
8512+
8513+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
8514+{
8515+ int handled = 0;
8516+ struct drm_device *dev = (struct drm_device *)arg;
8517+ intel_device_private_t *priv=dev->dev_private;
8518+ uint32_t vdc_stat, sgx_stat, sgx_stat2, mtx_stat;
8519+
8520+ spin_lock(&priv->irqmask_lock);
8521+ vdc_stat = PSB_RVDC32(IIR);
8522+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
8523+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
8524+ mtx_stat = PSB_RMSVDX32(PSB_MTX_EVENT_STATUS);
8525+ vdc_stat &= priv->vdc_irq_mask;
8526+ sgx_stat &= priv->sgx_irq_mask;
8527+ sgx_stat2 &= priv->sgx_irq_mask2;
8528+ mtx_stat &= priv->msvdx_irq_mask;
8529+
8530+ if (vdc_stat) {
8531+ PSB_WVDC32(vdc_stat, IIR);
8532+ (void)PSB_RVDC32(IIR);
8533+
8534+ priv->out_vdc |= vdc_stat;
8535+ handled = 1;
8536+
8537+ if (sgx_stat || sgx_stat2 || mtx_stat) {
8538+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
8539+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
8540+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
8541+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
8542+ PSB_WMSVDX32(mtx_stat, PSB_MTX_EVENT_CLEAR);
8543+ (void)PSB_RMSVDX32(PSB_MTX_EVENT_CLEAR);
8544+
8545+ priv->out_sgx |= sgx_stat;
8546+ priv->out_sgx2 |= sgx_stat2;
8547+ priv->out_mtx |= mtx_stat;
8548+
8549+ priv->event_present = 1;
8550+ spin_unlock(&priv->irqmask_lock);
8551+ DRM_WAKEUP(&priv->event_queue);
8552+
8553+ } else {
8554+
8555+ spin_unlock(&priv->irqmask_lock);
8556+
8557+ }
8558+
8559+ } else {
8560+
8561+ spin_unlock(&priv->irqmask_lock);
8562+
8563+ }
8564+
8565+ if (!handled) {
8566+ return IRQ_NONE;
8567+ }
8568+
8569+ return IRQ_HANDLED;
8570+}
8571+
8572+void psb_irq_preinstall(struct drm_device *dev)
8573+{
8574+ intel_device_private_t *priv =
8575+ (intel_device_private_t *)dev->dev_private;
8576+
8577+ spin_lock(&priv->irqmask_lock);
8578+ PSB_WVDC32(0xFFFFFFFF, HWSTAM);
8579+ PSB_WVDC32(0xFFFFFFFF, IMR);
8580+ PSB_WVDC32(0x00000000, IER);
8581+
8582+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
8583+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
8584+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE2);
8585+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE2);
8586+ PSB_WMSVDX32(0x00000000, PSB_MTX_EVENT_HOST_ENABLE);
8587+ (void)PSB_RMSVDX32(PSB_MTX_EVENT_HOST_ENABLE);
8588+
8589+ priv->sgx_irq_mask = PSB_TWOD_COMPLETE |
8590+ PSB_TA_FINISHED | PSB_TA_TERMINATE |
8591+ PSB_PIXELBE_END_RENDER | PSB_DPM_3D_MEM_FREE |
8592+ PSB_OUT_OF_MEM_MT | PSB_OUT_OF_MEM_GBL |
8593+ PSB_REACHED_MEM_THRESH | PSB_DPM_TA_MEM_FREE |
8594+ PSB_DPM_OUT_OF_MEM_ZLS | PSB_TA_DPM_FAULT;
8595+
8596+ priv->sgx_irq_mask2 = PSB_BIF_REQ_FAULT | PSB_TRIG_TA | PSB_TRIG_3D |
8597+ PSB_TRIG_DL;
8598+
8599+ priv->vdc_irq_mask = IRQ_SGX_FLAG | IRQ_MSVDX_FLAG;
8600+
8601+ priv->msvdx_irq_mask = (1<<14); /* Enable only MTX interrupt */
8602+
8603+ spin_unlock(&priv->irqmask_lock);
8604+}
8605+
8606+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
8607+void psb_irq_postinstall(struct drm_device *dev)
8608+#else
8609+int psb_irq_postinstall(struct drm_device *dev)
8610+#endif
8611+{
8612+ intel_device_private_t *priv =
8613+ (intel_device_private_t *)dev->dev_private;
8614+ unsigned long irqflags;
8615+
8616+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
8617+ PSB_WVDC32(priv->vdc_irq_mask, IER);
8618+ PSB_WSGX32(priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
8619+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
8620+ PSB_WSGX32(priv->sgx_irq_mask2, PSB_CR_EVENT_HOST_ENABLE2);
8621+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE2);
8622+ PSB_WMSVDX32(priv->msvdx_irq_mask, PSB_MTX_EVENT_HOST_ENABLE);
8623+ (void)PSB_RMSVDX32(PSB_MTX_EVENT_HOST_ENABLE);
8624+
8625+ priv->irq_enabled = 1;
8626+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
8627+
8628+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
8629+ return 0;
8630+#endif
8631+
8632+}
8633+
8634+void psb_irq_uninstall(struct drm_device *dev)
8635+{
8636+ intel_device_private_t *priv =
8637+ (intel_device_private_t *)dev->dev_private;
8638+ unsigned long irqflags;
8639+
8640+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
8641+
8642+ priv->sgx_irq_mask = 0x00000000;
8643+ priv->sgx_irq_mask2 = 0x00000000;
8644+ priv->vdc_irq_mask = 0x00000000;
8645+ priv->msvdx_irq_mask = 0x00000000;
8646+
8647+ /* By default, we're enabling interrupts buy leaving them masked */
8648+ PSB_WVDC32(0xFFFFFFFF, HWSTAM);
8649+ PSB_WVDC32(0xFFFFFFFF, IMR);
8650+ PSB_WVDC32(priv->vdc_irq_mask, IER);
8651+ PSB_WSGX32(priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
8652+ PSB_WSGX32(priv->sgx_irq_mask2, PSB_CR_EVENT_HOST_ENABLE2);
8653+ PSB_WMSVDX32(priv->msvdx_irq_mask, PSB_MTX_EVENT_HOST_ENABLE);
8654+ wmb();
8655+ PSB_WVDC32(PSB_RVDC32(IIR), IIR);
8656+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
8657+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
8658+ PSB_WMSVDX32(PSB_RMSVDX32(PSB_MTX_EVENT_STATUS), PSB_MTX_EVENT_CLEAR);
8659+
8660+ priv->irq_enabled = 0;
8661+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
8662+}
8663diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/global.h patch_script_temp/drivers/gpu/drm/iegd/include/global.h
8664--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/global.h 1969-12-31 17:00:00.000000000 -0700
8665+++ patch_script_temp/drivers/gpu/drm/iegd/include/global.h 2009-10-06 10:30:05.000000000 -0700
8666@@ -0,0 +1,160 @@
8667+/* -*- pse-c -*-
8668+ * Filename: iegd_interface.c
8669+ * $Revision: 1.19 $
8670+ *----------------------------------------------------------------------------
8671+ * <>
8672+ * Copyright © 2008, Intel Corporation.
8673+ *
8674+ * This program is free software; you can redistribute it and/or modify it
8675+ * under the terms and conditions of the GNU General Public License,
8676+ * version 2, as published by the Free Software Foundation.
8677+ *
8678+ * This program is distributed in the hope it will be useful, but WITHOUT
8679+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8680+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8681+ * more details.
8682+ *
8683+ * You should have received a copy of the GNU General Public License along with
8684+ * this program; if not, write to the Free Software Foundation, Inc.,
8685+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8686+ *
8687+ *
8688+ *----------------------------------------------------------------------------
8689+ * Functions:
8690+ *
8691+ *
8692+ *----------------------------------------------------------------------------
8693+ */
8694+
8695+#ifndef _GART_GLOBAL_DEF
8696+#define _GART_GLOBAL_DEF
8697+#include "igd_gart.h"
8698+#include "igd_abs.h"
8699+#include "interface_abs.h"
8700+
8701+
8702+#define APER_ENTRY(a,b) sizeof((a))/(b)
8703+#define AGP_DCACHE_MEMORY 1
8704+#define AGP_PHYS_MEMORY 2
8705+
8706+#define IEGD "IEGD"
8707+
8708+/**
8709+ * This is global data that is shared across file. New global
8710+ * data should goes here.
8711+ */
8712+extern gart_dispatch_t *gart_id;
8713+extern dev_private_data_t private_data;
8714+extern struct pci_device_id iegd_pci_table[];
8715+extern dispatch_table_t driver_dispatch_list[];
8716+extern struct gatt_mask iegd_cmn_masks[];
8717+extern struct aper_size_info_fixed iegd_i915_sizes[];
8718+extern struct aper_size_info_fixed iegd_iq35_sizes[];
8719+extern struct aper_size_info_fixed iegd_i965_sizes[];
8720+extern struct aper_size_info_fixed intel_i830_sizes[];
8721+extern struct aper_size_info_fixed intel_i810_sizes[];
8722+extern struct aper_size_info_fixed iegd_igm45_sizes[];
8723+
8724+/* All dispatch table for the chipset family goes here */
8725+extern bridge_driver_t drv_alm;
8726+extern bridge_driver_t drv_nap;
8727+extern bridge_driver_t drv_gn4;
8728+extern bridge_driver_t drv_gm45;
8729+
8730+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
8731+/* Poulsbo */
8732+extern struct aper_size_info_fixed iegd_plb_sizes[];
8733+extern bridge_driver_t drv_plb;
8734+
8735+/* Poulsbo specific structure so that the DRM can utilize the
8736+ * AGP's virtual aperture management code
8737+ */
8738+extern struct vm_operations_struct iegd_plb_vm_ops;
8739+#endif
8740+
8741+
8742+/*
8743+ * Macro to fill device information for PCI devices registration.
8744+ * Copy from public agpgart in kernel source
8745+ */
8746+#define ID(x) { \
8747+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
8748+ .class_mask = ~0, \
8749+ .vendor = PCI_VENDOR_ID_INTEL, \
8750+ .device = x, \
8751+ .subvendor = PCI_ANY_ID, \
8752+ .subdevice = PCI_ANY_ID, \
8753+}
8754+
8755+#ifdef CONFIG_AGP_DEBUG
8756+#define AGN_DEBUG(x,y...) printk(KERN_INFO "[" IEGD \
8757+ ":DEBUG]:%s " x "\n", __FUNCTION__, ##y)
8758+#else
8759+#define AGN_DEBUG(x,y...) do {} while(0)
8760+#endif
8761+
8762+#define AGN_ERROR(x,y...) printk(KERN_ALERT "[" IEGD \
8763+ ":ERROR]:%s: " x "\n", __FUNCTION__, ##y)
8764+#define AGN_LOG(x,y...) printk(KERN_INFO "[" IEGD "]: " x "\n", ##y)
8765+
8766+/**
8767+ * Global extern function prototype, basically common function
8768+ * should goes here. Most of this function extern is from
8769+ * drv_cmn.c
8770+ */
8771+extern int iegd_find_device(u16 device);
8772+extern struct pci_dev *iegd_probe_device(void);
8773+extern void iegd_cmn_init_gtt_entries(void);
8774+extern int AGP_FREE_GATT(iegd_cmn_free_gatt_table);
8775+extern void iegd_cmn_free_by_type(struct agp_memory *curr);
8776+extern struct agp_memory *iegd_cmn_alloc_by_type(
8777+ size_t pg_count, int type);
8778+extern int iegd_cmn_insert_entries(struct agp_memory *mem,
8779+ off_t pg_start, int type);
8780+extern int iegd_cmn_remove_entries(struct agp_memory *mem, off_t pg_start,
8781+ int type);
8782+extern int bridge_driver_init(bridge_driver_t **driver_hook,
8783+ unsigned short did, dispatch_table_t *list );
8784+
8785+#ifndef MSR_IA32_CR_PAT
8786+#define MSR_IA32_CR_PAT 0x0277
8787+#endif
8788+#ifndef _PAGE_PAT
8789+#define _PAGE_PAT 0x080
8790+#endif
8791+extern void agp_init_pat(void);
8792+extern int agp_use_pat (void);
8793+
8794+/**
8795+ * masking valid bit for page table entries before
8796+ * put it insert it to gtt table
8797+ */
8798+unsigned long AGP_MASK_MEMORY(iegd_cmn_mask_memory);
8799+int AGP_CREATE_GATT(iegd_alm_create_gatt_table);
8800+extern int iegd_cmn_configure(void);
8801+extern void AGP_ENABLE(iegd_cmn_agp_enable);
8802+
8803+/* Global DRM function prototype */
8804+extern int intel_init(void);
8805+extern void intel_exit(void);
8806+extern int drm_init(void);
8807+extern void drm_cleanup(void);
8808+
8809+/* Generic function to dispatch the information according to
8810+ * chipset id */
8811+static __inline void *dispatch_acquire(
8812+
8813+ unsigned short did,
8814+ dispatch_table_t *table_list) {
8815+
8816+ dispatch_table_t *curr = table_list;
8817+ while(curr && (curr->did != 0)) {
8818+ if(curr->did == did) {
8819+ return curr->table;
8820+ }
8821+ curr++;
8822+ }
8823+
8824+ return NULL;
8825+}
8826+#endif
8827diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_abs.h patch_script_temp/drivers/gpu/drm/iegd/include/igd_abs.h
8828--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_abs.h 1969-12-31 17:00:00.000000000 -0700
8829+++ patch_script_temp/drivers/gpu/drm/iegd/include/igd_abs.h 2009-10-06 10:30:05.000000000 -0700
8830@@ -0,0 +1,136 @@
8831+/* -*- pse-c -*-
8832+ *----------------------------------------------------------------------------
8833+ * Filename: iegd_interface.c
8834+ * $Revision: 1.15 $
8835+ *----------------------------------------------------------------------------
8836+ * <>
8837+ * Copyright © 2008, Intel Corporation.
8838+ *
8839+ * This program is free software; you can redistribute it and/or modify it
8840+ * under the terms and conditions of the GNU General Public License,
8841+ * version 2, as published by the Free Software Foundation.
8842+ *
8843+ * This program is distributed in the hope it will be useful, but WITHOUT
8844+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8845+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8846+ * more details.
8847+ *
8848+ * You should have received a copy of the GNU General Public License along with
8849+ * this program; if not, write to the Free Software Foundation, Inc.,
8850+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8851+ *
8852+ *
8853+ *----------------------------------------------------------------------------
8854+ * Functions:
8855+ *
8856+ *
8857+ *----------------------------------------------------------------------------
8858+ */
8859+
8860+#ifndef _KERNEL_ABS_LAYER
8861+#define _KERNEL_ABS_LAYER
8862+
8863+#include <linux/version.h>
8864+
8865+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
8866+#define IGD_FREE_MEM(a) agp_free_page_array(a)
8867+#else
8868+#define IGD_FREE_MEM(a) vfree((a)->memory)
8869+#endif
8870+
8871+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)
8872+#define DRM_INIT_MODULE() drm_init()
8873+#define DRM_EXIT_MODULE() drm_cleanup()
8874+#define AGP_RET(a) ((a)>=0) ? 1 : 0
8875+#else
8876+#define DRM_INIT_MODULE() intel_init()
8877+#define DRM_EXIT_MODULE() intel_exit()
8878+#define AGP_RET(a) ((a)==0) ? 1 : 0
8879+#endif
8880+
8881+#if LINUX_VERSION_CODE<KERNEL_VERSION(2,6,10)
8882+#define IGD_PCI_SAVE_STATE(a,b) pci_save_state(a,b)
8883+#define IGD_PCI_RESTORE_STATE(a,b) pci_restore_state(a,b)
8884+#define pm_message_t u32
8885+#define IGD_IS_SUSPEND(state) ((state)==3)
8886+#else
8887+#define IGD_PCI_SAVE_STATE(a,b) pci_save_state(a)
8888+#define IGD_PCI_RESTORE_STATE(a,b) pci_restore_state(a)
8889+#define IGD_IS_SUSPEND(state) \
8890+ (((state.event)==PM_EVENT_SUSPEND) | ((state.event)==PM_EVENT_FREEZE))
8891+#endif
8892+
8893+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
8894+#define AGP_LOCK_PAGE(a)
8895+#define AGP_UNLOCK_PAGE(a)
8896+#else
8897+#define AGP_LOCK_PAGE(a) SetPageLocked((a))
8898+#define AGP_UNLOCK_PAGE(a) unlock_page((a))
8899+#endif
8900+
8901+#define MASK_PTE(a,b) (a)->driver->masks[(b)].mask
8902+#define AGP_MASK_ADDR(x) MASK_PTE((x),type)
8903+
8904+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
8905+#ifndef TRUE
8906+#define TRUE 1
8907+#endif
8908+#ifndef FALSE
8909+#define FALSE 0
8910+#endif
8911+#define SET_PAGES_UC(a,b) set_pages_uc(a,b)
8912+#define SET_PAGES_WB(a,b) set_pages_wb(a,b)
8913+#define GLOBAL_FLUSH_TLB()
8914+#ifndef SetPageLocked
8915+#define SetPageLocked(page) set_bit(PG_locked, &page->flags);
8916+#endif
8917+#else
8918+#define SET_PAGES_UC(a,b) change_page_attr(a,b,PAGE_KERNEL_NOCACHE)
8919+#define SET_PAGES_WB(a,b) change_page_attr(a,b,PAGE_KERNEL)
8920+#define GLOBAL_FLUSH_TLB() global_flush_tlb()
8921+#endif
8922+
8923+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
8924+#define ON_EACH_CPU(a,b,c,d) on_each_cpu(a,b,d)
8925+/* Note: drm_dev_to_irq appears 2.6.28, but some distros
8926+ * have pulled it into earlier versions of their kernel.
8927+ * That's why it's defined here.
8928+ */
8929+#define DRM_DEV_TO_IRQ(a) drm_dev_to_irq(a)
8930+#else
8931+#define ON_EACH_CPU(a,b,c,d) on_each_cpu(a,b,c,d)
8932+#define DRM_DEV_TO_IRQ(a) (a->irq)
8933+#endif
8934+
8935+
8936+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
8937+#define AGP_MEM_TYPE struct page*
8938+#define CONVERT_PAGE_TO_GART(a) phys_to_gart(page_to_phys(a));
8939+#define AGP_MEMORY_MEMBER pages
8940+#define PAGE_ADDRESS(a) a
8941+#else
8942+#define AGP_MEM_TYPE void*
8943+#define CONVERT_PAGE_TO_GART(a) a
8944+#define AGP_MEMORY_MEMBER memory
8945+#define PAGE_ADDRESS(a) page_address(a)
8946+#endif
8947+
8948+
8949+/*
8950+ * Kernel interface abstraction. This macro will
8951+ * point to the proper definition for that particular
8952+ * kernel
8953+ */
8954+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
8955+#define AGP_MASK_MEMORY(f) _MASK_MEMORY_PAGE(f)
8956+#else
8957+#define AGP_MASK_MEMORY(f) _MASK_MEMORY(f)
8958+#endif
8959+#define AGP_CREATE_GATT(f) _CREATE_GATT_TABLE(f)
8960+#define AGP_FREE_GATT(f) _FREE_GATT_TABLE(f)
8961+#define AGP_ALLOC_PAGE(f) _ALLOC_PAGE_AGP(f)
8962+#define AGP_ENABLE(f) _ENABLE_AGP(f)
8963+#define AGP_TYPE_TO_MASK_TYPE(f) _TYPE_TO_MASK_TYPE(f)
8964+
8965+#define AGP_MASK_GTT() _mask_gtt()
8966+#endif
8967diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_gart.h patch_script_temp/drivers/gpu/drm/iegd/include/igd_gart.h
8968--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_gart.h 1969-12-31 17:00:00.000000000 -0700
8969+++ patch_script_temp/drivers/gpu/drm/iegd/include/igd_gart.h 2009-10-06 10:30:05.000000000 -0700
8970@@ -0,0 +1,81 @@
8971+/* -*- pse-c -*-
8972+ *----------------------------------------------------------------------------
8973+ * Filename: igd_gart.h
8974+ * $Revision: 1.10 $
8975+ *----------------------------------------------------------------------------
8976+ * <>
8977+ * Copyright © 2008, Intel Corporation.
8978+ *
8979+ * This program is free software; you can redistribute it and/or modify it
8980+ * under the terms and conditions of the GNU General Public License,
8981+ * version 2, as published by the Free Software Foundation.
8982+ *
8983+ * This program is distributed in the hope it will be useful, but WITHOUT
8984+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8985+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8986+ * more details.
8987+ *
8988+ * You should have received a copy of the GNU General Public License along with
8989+ * this program; if not, write to the Free Software Foundation, Inc.,
8990+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8991+ *
8992+ *
8993+ *----------------------------------------------------------------------------
8994+ * Functions:
8995+ *
8996+ *
8997+ *----------------------------------------------------------------------------
8998+ */
8999+
9000+#ifndef _INIT_GART_DISPATCH
9001+#define _INIT_GART_DISPATCH
9002+
9003+#include <linux/pci.h>
9004+#include <linux/agp_backend.h>
9005+#include "agp.h"
9006+
9007+typedef struct agp_bridge_driver bridge_driver_t;
9008+typedef struct agp_bridge_data bridge_data_t;
9009+
9010+/* Dispatch table that contained information about
9011+ * specific chipset */
9012+typedef struct _gart_dispatch {
9013+ unsigned short vendor_id;
9014+ unsigned short bridge_id; /* Bridge device id */
9015+ unsigned short device_id; /* chipset id */
9016+ char *name; /* Name for the chipset */
9017+ unsigned short dev_flag;
9018+ struct pci_driver *old_gart; /* old gart info */
9019+ struct pci_dev *bridge_pdev; /* Bridge device info */
9020+ bridge_data_t *bridge_info; /* bridge information for gart */
9021+}gart_dispatch_t;
9022+
9023+/* Structure that keep the private data for chipset */
9024+typedef struct _dev_private_data {
9025+ struct pci_dev *pdev;
9026+ volatile u8 __iomem *registers;
9027+ volatile u32 __iomem *gtt;
9028+ union {
9029+ int num_dcache_entries;
9030+ int gtt_entries;
9031+ };
9032+ u32 pm_save[16]; /* PCI config saved here on suspend/resume. */
9033+ /* Required for older kernel versions. */
9034+ int split_gtt;
9035+ volatile u32 __iomem *upper_gtt;
9036+}dev_private_data_t;
9037+
9038+/* Dispatch table for function hook */
9039+typedef struct _dispatch_table {
9040+ unsigned short did;
9041+ void *table;
9042+}dispatch_table_t;
9043+
9044+/* Table contained function pointer for specific chipset */
9045+typedef struct _driver_func_table {
9046+ bridge_driver_t driver_func; /* Contained actual function */
9047+ void (*config_private)(void); /* config private */
9048+}driver_func_table_t;
9049+
9050+
9051+#endif
9052diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/intelpci.h patch_script_temp/drivers/gpu/drm/iegd/include/intelpci.h
9053--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/intelpci.h 1969-12-31 17:00:00.000000000 -0700
9054+++ patch_script_temp/drivers/gpu/drm/iegd/include/intelpci.h 2009-10-06 10:30:05.000000000 -0700
9055@@ -0,0 +1,178 @@
9056+/* -*- pse-c -*-
9057+ *----------------------------------------------------------------------------
9058+ * Filename: intelpci.h
9059+ * $Revision: 1.16 $
9060+ *----------------------------------------------------------------------------
9061+ * <>
9062+ * Copyright © 2008, Intel Corporation.
9063+ *
9064+ * This program is free software; you can redistribute it and/or modify it
9065+ * under the terms and conditions of the GNU General Public License,
9066+ * version 2, as published by the Free Software Foundation.
9067+ *
9068+ * This program is distributed in the hope it will be useful, but WITHOUT
9069+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9070+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9071+ * more details.
9072+ *
9073+ * You should have received a copy of the GNU General Public License along with
9074+ * this program; if not, write to the Free Software Foundation, Inc.,
9075+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9076+ *
9077+ *
9078+ *----------------------------------------------------------------------------
9079+ * Functions:
9080+ *
9081+ *
9082+ *----------------------------------------------------------------------------
9083+ */
9084+
9085+#define PCI_VENDOR_ID_INTEL 0x8086
9086+
9087+
9088+/* Start: Whitney core specific */
9089+#define PCI_DEVICE_ID_BRIDGE_810 0x7120
9090+#define PCI_DEVICE_ID_810 0x7121
9091+
9092+#define PCI_DEVICE_ID_BRIDGE_810DC 0x7122
9093+#define PCI_DEVICE_ID_810DC 0x7123
9094+
9095+#define PCI_DEVICE_ID_BRIDGE_810E 0x7124
9096+#define PCI_DEVICE_ID_810E 0x7125
9097+
9098+#define PCI_DEVICE_ID_BRIDGE_815 0x1130
9099+#define PCI_DEVICE_ID_815 0x1132
9100+
9101+
9102+/* Start: Almador core specific */
9103+#define PCI_DEVICE_ID_BRIDGE_830M 0x3575
9104+#define PCI_DEVICE_ID_830M 0x3577
9105+#define PCI_DEVICE_ID_AGP_830M 0x3576
9106+
9107+#define PCI_DEVICE_ID_BRIDGE_835 0x3579
9108+#define PCI_DEVICE_ID_835 0x357b
9109+#define PCI_DEVICE_ID_AGP_835 0x357a
9110+
9111+#define PCI_DEVICE_ID_BRIDGE_845G 0x2560
9112+#define PCI_DEVICE_ID_845G 0x2562
9113+#define PCI_DEVICE_ID_AGP_845G 0x0000
9114+
9115+#define PCI_DEVICE_ID_BRIDGE_855 0x3580 /* Montara-G */
9116+#define PCI_DEVICE_ID_MEM_855 0x3584
9117+#define PCI_DEVICE_ID_855 0x3582
9118+#define PCI_DEVICE_ID_AGP_855 0x0000
9119+
9120+#define PCI_DEVICE_ID_BRIDGE_865G 0x2570
9121+#define PCI_DEVICE_ID_865G 0x2572
9122+#define PCI_DEVICE_ID_AGP_865G 0x0000
9123+
9124+
9125+/* Start: Napa core specific */
9126+/* Grantsdale - 915G/915GV */
9127+#define PCI_DEVICE_ID_BRIDGE_915GD 0x2580
9128+#define PCI_DEVICE_ID_PEG_915GD 0x2581
9129+#define PCI_DEVICE_ID_915GD 0x2582
9130+/* Grantsdale - 910GL*/
9131+#define PCI_DEVICE_ID_BRIDGE_910GL 0x258C
9132+#define PCI_DEVICE_ID_PEG_910GL PCI_DEVICE_ID_PEG_915GD
9133+#define PCI_DEVICE_ID_910GL PCI_DEVICE_ID_915GD
9134+/* Alviso - 915GM/GMS/910GML*/
9135+#define PCI_DEVICE_ID_BRIDGE_915AL 0x2590
9136+#define PCI_DEVICE_ID_PEG_915AL 0x2591
9137+#define PCI_DEVICE_ID_915AL 0x2592
9138+
9139+/* Lakeport - 945G */
9140+#define PCI_DEVICE_ID_BRIDGE_945G 0x2770
9141+#define PCI_DEVICE_ID_PEG_945G 0x2771
9142+#define PCI_DEVICE_ID_945G 0x2772
9143+
9144+/* Calistoga - 945GM */
9145+#define PCI_DEVICE_ID_BRIDGE_945GM 0x27A0
9146+#define PCI_DEVICE_ID_PEG_945GM 0x27A1
9147+#define PCI_DEVICE_ID_945GM 0x27A2
9148+
9149+/* Calistoga Westbriar - 945GME/GSE */
9150+#define PCI_DEVICE_ID_BRIDGE_945GME 0x27AC
9151+#define PCI_DEVICE_ID_PEG_945GME 0x27AD
9152+#define PCI_DEVICE_ID_945GME 0x27AE
9153+
9154+/* Bearlake B - Q35 */
9155+#define PCI_DEVICE_ID_BRIDGE_Q35 0x29C0
9156+#define PCI_DEVICE_ID_PEG_Q35 0x29C1
9157+#define PCI_DEVICE_ID_Q35 0x29C2
9158+
9159+/* Bearlake B - Q35 */
9160+#define PCI_DEVICE_ID_BRIDGE_Q35A2 0x29B0
9161+#define PCI_DEVICE_ID_PEG_Q35A2 0x29B1
9162+#define PCI_DEVICE_ID_Q35A2 0x29B2
9163+
9164+/* Start: Gen4 core specific*/
9165+/* Broadwater - Unlocked - 965G */
9166+#define PCI_DEVICE_ID_BRIDGE_965G 0x2980
9167+#define PCI_DEVICE_ID_PEG_965G 0x2981
9168+#define PCI_DEVICE_ID_965G 0x2982
9169+
9170+/* Broadwater - Value - 945GZ */
9171+#define PCI_DEVICE_ID_BRIDGE_946GZ 0x2970
9172+#define PCI_DEVICE_ID_PEG_946GZ 0x2971
9173+#define PCI_DEVICE_ID_946GZ 0x2972
9174+
9175+/* Broadwater - Consumer - G965 */
9176+#define PCI_DEVICE_ID_BRIDGE_G965 0x29A0
9177+#define PCI_DEVICE_ID_PEG_G965 0x29A1
9178+#define PCI_DEVICE_ID_G965 0x29A2
9179+
9180+/* Broadwater - Corporate - Q965/Q963 */
9181+#define PCI_DEVICE_ID_BRIDGE_Q965 0x2990
9182+#define PCI_DEVICE_ID_PEG_Q965 0x2991
9183+#define PCI_DEVICE_ID_Q965 0x2992
9184+
9185+/* Crestline - Generic GM965 */
9186+#define PCI_DEVICE_ID_BRIDGE_GM965 0x2A00
9187+#define PCI_DEVICE_ID_PEG_GM965 0x2A01
9188+#define PCI_DEVICE_ID_GM965 0x2A02
9189+
9190+/* Crestline Westbriar GME965 */
9191+#define PCI_DEVICE_ID_BRIDGE_GME965 0x2A10
9192+#define PCI_DEVICE_ID_PEG_GME965 0x2A11
9193+#define PCI_DEVICE_ID_GME965 0x2A12
9194+
9195+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
9196+/* Poulsbo */
9197+#define PCI_DEVICE_ID_BRIDGE_PLB 0x8100
9198+#define PCI_DEVICE_ID_PEG_PLB 0x8101
9199+#define PCI_DEVICE_ID_PLB 0x8108
9200+#endif
9201+
9202+/* Cantiga GM45 */
9203+#define PCI_DEVICE_ID_BRIDGE_GM45 0x2A40
9204+#define PCI_DEVICE_ID_PEG_GM45 0x2A41
9205+#define PCI_DEVICE_ID_GM45 0x2A42
9206+
9207+#define PCI_DEVICE_ID_BRIDGE_ELK 0x2E00
9208+#define PCI_DEVICE_ID_PEG_ELK 0x2E01
9209+#define PCI_DEVICE_ID_ELK 0x2E02
9210+
9211+#define PCI_DEVICE_ID_BRIDGE_Q45 0x2E10
9212+#define PCI_DEVICE_ID_PEG_Q45 0x2E11
9213+#define PCI_DEVICE_ID_Q45 0x2E12
9214+
9215+#define PCI_DEVICE_ID_BRIDGE_G45 0x2E20
9216+#define PCI_DEVICE_ID_PEG_G45 0x2E21
9217+#define PCI_DEVICE_ID_G45 0x2E22
9218+
9219+#define PCI_DEVICE_ID_BRIDGE_G41 0x2E30
9220+#define PCI_DEVICE_ID_PEG_G41 0x2E31
9221+#define PCI_DEVICE_ID_G41 0x2E32
9222+
9223+#define I915_GMADDR 0x18
9224+#define I915_MMADDR 0x10
9225+#define I915_PTEADDR 0x1C
9226+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
9227+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
9228+
9229+/* intel Q35 register */
9230+#define IQ35_BASE_STOLEN 0x5c
9231+#define IQ35_GTT_MEM_SIZE 0x300
9232+#define IQ35_GGMS_1MB 0x100
9233+#define IQ35_GGMS_2MB 0x200
9234diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/interface_abs.h patch_script_temp/drivers/gpu/drm/iegd/include/interface_abs.h
9235--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/interface_abs.h 1969-12-31 17:00:00.000000000 -0700
9236+++ patch_script_temp/drivers/gpu/drm/iegd/include/interface_abs.h 2009-10-06 10:30:05.000000000 -0700
9237@@ -0,0 +1,48 @@
9238+/* -*- pse-c -*-
9239+ *----------------------------------------------------------------------------
9240+ * Filename: iegd_interface.c
9241+ * $Revision: 1.4 $
9242+ *----------------------------------------------------------------------------
9243+ * <>
9244+ * Copyright © 2006, Intel Corporation.
9245+ *
9246+ * This program is free software; you can redistribute it and/or modify it
9247+ * under the terms and conditions of the GNU General Public License,
9248+ * version 2, as published by the Free Software Foundation.
9249+ *
9250+ * This program is distributed in the hope it will be useful, but WITHOUT
9251+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9252+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9253+ * more details.
9254+ *
9255+ * You should have received a copy of the GNU General Public License along with
9256+ * this program; if not, write to the Free Software Foundation, Inc.,
9257+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9258+ *
9259+ *----------------------------------------------------------------------------
9260+ * Functions:
9261+ *
9262+ *
9263+ *----------------------------------------------------------------------------
9264+ */
9265+
9266+#ifndef _AGP_INTERFACE_ABS_LAYER
9267+#define _AGP_INTERFACE_ABS_LAYER
9268+
9269+#define AGP_BRIDGE_VAR bridge
9270+
9271+#define _MASK_MEMORY_PAGE(f) f( struct agp_bridge_data *bridge, \
9272+ struct page* addr, int type)
9273+
9274+#define _MASK_MEMORY(f) f( struct agp_bridge_data *bridge, \
9275+ unsigned long addr, int type)
9276+
9277+#define _CREATE_GATT_TABLE(f) f(struct agp_bridge_data *bridge)
9278+#define _FREE_GATT_TABLE(f) f(struct agp_bridge_data *bridge)
9279+#define _ALLOC_PAGE_AGP(f) f(struct agp_bridge_data *bridge)
9280+#define _ENABLE_AGP(f) f(struct agp_bridge_data *bridge, u32 mode)
9281+#define _TYPE_TO_MASK_TYPE(f) f(struct agp_bridge_data *bridge, int x)
9282+
9283+#define _mask_gtt() agp_bridge->driver->mask_memory( \
9284+ agp_bridge, mem->pages[i], mem->type)
9285+#endif
9286diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/include/linux/config.h patch_script_temp/include/linux/config.h
9287--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/include/linux/config.h 1969-12-31 17:00:00.000000000 -0700
9288+++ patch_script_temp/include/linux/config.h 2009-10-06 10:30:05.000000000 -0700
9289@@ -0,0 +1 @@
9290+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch
deleted file mode 100644
index 88c9788013..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch
+++ /dev/null
@@ -1,206 +0,0 @@
1
2
3From: Greg Kroah-Hartman <gregkh@suse.de>
4Subject: Samsung backlight driver
5
6This driver implements backlight controls for Samsung laptops that currently do not have ACPI support for this control.
7
8It has been tested on the N130 laptop and properly works there.
9
10Info for the NC10 was provided by Soeren Sonnenburg <bugreports@nn7.de> Info for the NP-Q45 from Jie Huchet <jeremie@lamah.info>
11
12Many thanks to Dmitry Torokhov <dmitry.torokhov@gmail.com> for cleanups and other suggestions on how to make the driver simpler.
13
14Cc: Soeren Sonnenburg <bugreports@nn7.de>
15Cc: Jie Huchet <jeremie@lamah.info>
16Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
17Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
18
19---
20 drivers/platform/x86/Kconfig | 12 ++
21 drivers/platform/x86/Makefile | 1
22 drivers/platform/x86/samsung-backlight.c | 157
23+++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+)
24diff -purN vanilla-2.6.31-rc6/drivers/platform/x86/Kconfig linux-2.6.31-rc6/drivers/platform/x86/Kconfig
25--- vanilla-2.6.31-rc6/drivers/platform/x86/Kconfig 2009-08-17 20:55:37.000000000 +0000
26+++ linux-2.6.31-rc6/drivers/platform/x86/Kconfig 2009-08-17 20:58:25.000000000 +0000
27@@ -425,4 +425,16 @@ config ACPI_TOSHIBA
28
29 If you have a legacy free Toshiba laptop (such as the Libretto L1
30 series), say Y.
31+
32+config SAMSUNG_BACKLIGHT
33+ tristate "Samsung Backlight driver"
34+ depends on BACKLIGHT_CLASS_DEVICE
35+ depends on DMI
36+ ---help---
37+ This driver adds support to control the backlight on a number of
38+ Samsung laptops, like the N130.
39+
40+ It will only be loaded on laptops that properly need it, so it is
41+ safe to say Y here.
42+
43 endif # X86_PLATFORM_DEVICES
44diff -purN vanilla-2.6.31-rc6/drivers/platform/x86/Makefile linux-2.6.31-rc6/drivers/platform/x86/Makefile
45--- vanilla-2.6.31-rc6/drivers/platform/x86/Makefile 2009-08-17 20:55:37.000000000 +0000
46+++ linux-2.6.31-rc6/drivers/platform/x86/Makefile 2009-08-17 20:58:44.000000000 +0000
47@@ -20,3 +20,4 @@ obj-$(CONFIG_INTEL_MENLOW) += intel_menl
48 obj-$(CONFIG_ACPI_WMI) += wmi.o
49 obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
50 obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
51+obj-$(CONFIG_SAMSUNG_BACKLIGHT) += samsung-backlight.o
52diff -purN vanilla-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c linux-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c
53--- vanilla-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c 1970-01-01 00:00:00.000000000 +0000
54+++ linux-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c 2009-08-17 21:00:10.000000000 +0000
55@@ -0,0 +1,151 @@
56+/*
57+ * Samsung N130 and NC10 Laptop Backlight driver
58+ *
59+ * Copyright (C) 2009 Greg Kroah-Hartman (gregkh@suse.de)
60+ * Copyright (C) 2009 Novell Inc.
61+ *
62+ * This program is free software; you can redistribute it and/or modify it
63+ * under the terms of the GNU General Public License version 2 as published by
64+ * the Free Software Foundation.
65+ */
66+
67+#include <linux/kernel.h>
68+#include <linux/init.h>
69+#include <linux/module.h>
70+#include <linux/pci.h>
71+#include <linux/backlight.h>
72+#include <linux/fb.h>
73+#include <linux/dmi.h>
74+
75+#define MAX_BRIGHT 0xff
76+#define OFFSET 0xf4
77+
78+static int offset = OFFSET;
79+module_param(offset, int, S_IRUGO | S_IWUSR);
80+MODULE_PARM_DESC(offset, "The offset into the PCI device for the brightness control");
81+static struct pci_dev *pci_device;
82+static struct backlight_device *backlight_device;
83+
84+static u8 read_brightness(void)
85+{
86+ u8 brightness;
87+
88+ pci_read_config_byte(pci_device, offset, &brightness);
89+ return brightness;
90+}
91+
92+static void set_brightness(u8 brightness) {
93+ pci_write_config_byte(pci_device, offset, brightness); }
94+
95+static int get_brightness(struct backlight_device *bd) {
96+ return bd->props.brightness;
97+}
98+
99+static int update_status(struct backlight_device *bd) {
100+ set_brightness(bd->props.brightness);
101+ return 0;
102+}
103+
104+static struct backlight_ops backlight_ops = {
105+ .get_brightness = get_brightness,
106+ .update_status = update_status,
107+};
108+
109+static int __init dmi_check_cb(const struct dmi_system_id *id) {
110+ printk(KERN_INFO KBUILD_MODNAME ": found laptop model '%s'\n",
111+ id->ident);
112+ return 0;
113+}
114+
115+static struct dmi_system_id __initdata samsung_dmi_table[] = {
116+ {
117+ .ident = "N120",
118+ .matches = {
119+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
120+ DMI_MATCH(DMI_PRODUCT_NAME, "N120"),
121+ DMI_MATCH(DMI_BOARD_NAME, "N120"),
122+ },
123+ .callback = dmi_check_cb,
124+ },
125+ {
126+ .ident = "N130",
127+ .matches = {
128+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
129+ DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
130+ DMI_MATCH(DMI_BOARD_NAME, "N130"),
131+ },
132+ .callback = dmi_check_cb,
133+ },
134+ {
135+ .ident = "NC10",
136+ .matches = {
137+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
138+ DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
139+ DMI_MATCH(DMI_BOARD_NAME, "NC10"),
140+ },
141+ .callback = dmi_check_cb,
142+ },
143+ {
144+ .ident = "NP-Q45",
145+ .matches = {
146+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
147+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
148+ DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
149+ },
150+ .callback = dmi_check_cb,
151+ },
152+ { },
153+};
154+
155+static int __init samsung_init(void)
156+{
157+ if (!dmi_check_system(samsung_dmi_table))
158+ return -ENODEV;
159+
160+ /*
161+ * The Samsung N120, N130, and NC10 use pci device id 0x27ae, while the
162+ * NP-Q45 uses 0x2a02. Odds are we might need to add more to the
163+ * list over time...
164+ */
165+ pci_device = pci_get_device(PCI_VENDOR_ID_INTEL, 0x27ae, NULL);
166+ if (!pci_device) {
167+ pci_device = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2a02, NULL);
168+ if (!pci_device)
169+ return -ENODEV;
170+ }
171+
172+ /* create a backlight device to talk to this one */
173+ backlight_device = backlight_device_register("samsung",
174+ &pci_device->dev,
175+ NULL, &backlight_ops);
176+ if (IS_ERR(backlight_device)) {
177+ pci_dev_put(pci_device);
178+ return PTR_ERR(backlight_device);
179+ }
180+
181+ backlight_device->props.max_brightness = MAX_BRIGHT;
182+ backlight_device->props.brightness = read_brightness();
183+ backlight_device->props.power = FB_BLANK_UNBLANK;
184+ backlight_update_status(backlight_device);
185+
186+ return 0;
187+}
188+
189+static void __exit samsung_exit(void)
190+{
191+ backlight_device_unregister(backlight_device);
192+
193+ /* we are done with the PCI device, put it back */
194+ pci_dev_put(pci_device);
195+}
196+
197+module_init(samsung_init);
198+module_exit(samsung_exit);
199+
200+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
201+MODULE_DESCRIPTION("Samsung Backlight driver");
202+MODULE_LICENSE("GPL");
203+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnN120:*:rnN120:*");
204+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnN130:*:rnN130:*");
205+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnNC10:*:rnNC10:*");
206+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnSQ45S70S:*:rnSQ45S70S:*");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch
deleted file mode 100644
index 635709ea91..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch
+++ /dev/null
@@ -1,14 +0,0 @@
1KERN_ERR is not appropriate for a printk level of a successful operation
2
3
4--- linux-2.6.30/drivers/hid/hid-wacom.c~ 2009-09-04 10:37:20.000000000 -0700
5+++ linux-2.6.30/drivers/hid/hid-wacom.c 2009-09-04 10:37:20.000000000 -0700
6@@ -244,7 +244,7 @@
7 ret = hid_register_driver(&wacom_driver);
8 if (ret)
9 printk(KERN_ERR "can't register wacom driver\n");
10- printk(KERN_ERR "wacom driver registered\n");
11+ printk(KERN_INFO "wacom driver registered\n");
12 return ret;
13 }
14
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch
deleted file mode 100644
index 6a1204d4c1..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch
+++ /dev/null
@@ -1,173 +0,0 @@
1From edeae90d635501a632efa0c7fe0667aa2cbe29be Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Mon, 28 Sep 2009 15:14:04 +0200
4Subject: [PATCH] acpi: Provide a set of tables to check the BIOS tables for correctness
5
6Today, the BIOS provides us with latency information for each C state.
7Unfortunately this information is sometimes put into the BIOS by
8apprentice BIOS programmers in a hurry, and as a result, it occasionally
9contains utter garbage.
10
11This patch adds a table based verification; if the CPU is known in the table,
12the values the BIOS provides to us are corrected for the apprentice-factor
13so that the CPUIDLE code can rely on the latency and break-even values
14to be reasonably sane.
15
16Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
17---
18 drivers/acpi/Makefile | 2 +-
19 drivers/acpi/processor_idle.c | 3 +
20 drivers/acpi/processor_mwait_table.c | 110 ++++++++++++++++++++++++++++++++++
21 include/acpi/processor.h | 3 +
22 4 files changed, 117 insertions(+), 1 deletions(-)
23 create mode 100644 drivers/acpi/processor_mwait_table.c
24
25diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
26index 82cd49d..ab56b28 100644
27--- a/drivers/acpi/Makefile
28+++ b/drivers/acpi/Makefile
29@@ -60,5 +60,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
30
31 # processor has its own "processor." module_param namespace
32 processor-y := processor_core.o processor_throttling.o
33-processor-y += processor_idle.o processor_thermal.o
34+processor-y += processor_idle.o processor_thermal.o processor_mwait_table.o
35 processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
36diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
37index cc61a62..db444a0 100644
38--- a/drivers/acpi/processor_idle.c
39+++ b/drivers/acpi/processor_idle.c
40@@ -1088,6 +1088,9 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
41 state->target_residency = cx->latency * latency_factor;
42 state->power_usage = cx->power;
43
44+ if (cx->entry_method == ACPI_CSTATE_FFH)
45+ acpi_verify_mwait_data(state, cx);
46+
47 state->flags = 0;
48 switch (cx->type) {
49 case ACPI_STATE_C1:
50diff --git a/drivers/acpi/processor_mwait_table.c b/drivers/acpi/processor_mwait_table.c
51new file mode 100644
52index 0000000..f29c28c
53--- /dev/null
54+++ b/drivers/acpi/processor_mwait_table.c
55@@ -0,0 +1,102 @@
56+/*
57+ * processor_mwait_table.c: BIOS table verification/correction
58+ *
59+ * (C) Copyright 2009 Intel Corporation
60+ * Authors:
61+ * Arjan van de Ven <arjan@linux.intel.com>
62+ *
63+ * This program is free software; you can redistribute it and/or
64+ * modify it under the terms of the GNU General Public License
65+ * as published by the Free Software Foundation; version 2
66+ * of the License.
67+ */
68+
69+#include <asm/processor.h>
70+#include <linux/acpi.h>
71+#include <acpi/processor.h>
72+#include <linux/cpuidle.h>
73+
74+
75+#define ATLEAST 1
76+#define ATMOST 2
77+#define EXACTLY 3
78+
79+#define MAX_ENTRIES 12
80+
81+struct mwait_entry {
82+ unsigned int mwait_value;
83+ unsigned long exit_latency;
84+ unsigned long break_even_point;
85+ int compare_method;
86+};
87+
88+struct cpu_entry {
89+ int vendor;
90+ int family;
91+ int model;
92+
93+ struct mwait_entry entries[MAX_ENTRIES];
94+};
95+
96+static struct cpu_entry mwait_entries[] =
97+{
98+ /* Intel "Atom" CPUs */
99+ {.vendor = X86_VENDOR_INTEL, .family = 6, . model = 28,
100+ .entries = {
101+ {0x00, 1, 1, ATLEAST},
102+ {0x10, 2, 20, ATLEAST},
103+ {0x30, 57, 300, ATLEAST},
104+ {0x50, 64, 4000, ATLEAST},
105+ }
106+ },
107+
108+
109+};
110+
111+
112+static unsigned long
113+compare_and_set(unsigned long original, unsigned long new, int compare)
114+{
115+ if (compare == EXACTLY)
116+ return new;
117+ if (compare == ATLEAST && new > original)
118+ return new;
119+ if (compare == ATMOST && new < original)
120+ return new;
121+ return original;
122+}
123+
124+
125+void acpi_verify_mwait_data(struct cpuidle_state *state,
126+ struct acpi_processor_cx *cx)
127+{
128+#if defined(__i386__) || defined(__x86_64__)
129+ int i;
130+
131+ struct cpuinfo_x86 *cpudata = &boot_cpu_data;
132+
133+
134+ for (i = 0; i < ARRAY_SIZE(mwait_entries); i++) {
135+ int j;
136+ if (mwait_entries[i].vendor != cpudata->x86_vendor)
137+ continue;
138+ if (mwait_entries[i].family != cpudata->x86)
139+ continue;
140+ if (mwait_entries[i].model != cpudata->x86_model)
141+ continue;
142+ for (j = 0; j < ARRAY_SIZE(mwait_entries[i].entries); j++) {
143+ if (!mwait_entries[i].entries[j].compare_method)
144+ continue;
145+ if (mwait_entries[i].entries[j].mwait_value != cx->address)
146+ continue;
147+ state->exit_latency = compare_and_set(state->exit_latency,
148+ mwait_entries[i].entries[j].exit_latency,
149+ mwait_entries[i].entries[j].compare_method);
150+ state->target_residency = compare_and_set(state->target_residency,
151+ mwait_entries[i].entries[j].break_even_point,
152+ mwait_entries[i].entries[j].compare_method);
153+ break;
154+ }
155+ }
156+#endif
157+}
158diff --git a/include/acpi/processor.h b/include/acpi/processor.h
159index 740ac3a..175e4d1 100644
160--- a/include/acpi/processor.h
161+++ b/include/acpi/processor.h
162@@ -352,5 +352,8 @@ static inline void acpi_thermal_cpufreq_exit(void)
163 return;
164 }
165 #endif
166+extern void acpi_verify_mwait_data(struct cpuidle_state *state,
167+ struct acpi_processor_cx *cx);
168+
169
170 #endif
171--
1721.6.2.5
173
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch
deleted file mode 100644
index ef930b76d4..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch
+++ /dev/null
@@ -1,407 +0,0 @@
1From f890417fc5dc4450e1dab69d7a870d6e706825a5 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Sep 2009 08:45:07 +0200
4Subject: [PATCH] cpuidle: Fix the menu governor to boost IO performance
5
6Fix the menu idle governor which balances power savings, energy efficiency
7and performance impact.
8
9The reason for a reworked governor is that there have been serious
10performance issues reported with the existing code on Nehalem server
11systems.
12
13To show this I'm sure Andrew wants to see benchmark results:
14(benchmark is "fio", "no cstates" is using "idle=poll")
15
16 no cstates current linux new algorithm
171 disk 107 Mb/s 85 Mb/s 105 Mb/s
182 disks 215 Mb/s 123 Mb/s 209 Mb/s
1912 disks 590 Mb/s 320 Mb/s 585 Mb/s
20
21In various power benchmark measurements, no degredation was found by our
22measurement&diagnostics team. Obviously a small percentage more power
23was used in the "fio" benchmark, due to the much higher performance.
24
25While it would be a novel idea to describe the new algorithm in this
26commit message, I cheaped out and described it in comments in the code
27instead.
28
29[changes in v2: spelling fixes from akpm, review feedback,
30folded menu-tng into menu.c
31 changes in v3: use this_rq() as per akpm suggestion]
32
33Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
34Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
35Cc: Len Brown <lenb@kernel.org>
36Acked-by: Ingo Molnar <mingo@elte.hu>
37Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
38Cc: Yanmin Zhang <yanmin_zhang@linux.intel.com>
39Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
40---
41 drivers/cpuidle/governors/menu.c | 251 ++++++++++++++++++++++++++++++++------
42 include/linux/sched.h | 4 +
43 kernel/sched.c | 13 ++
44 3 files changed, 229 insertions(+), 39 deletions(-)
45
46diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
47index f1df59f..9f3d775 100644
48--- a/drivers/cpuidle/governors/menu.c
49+++ b/drivers/cpuidle/governors/menu.c
50@@ -2,8 +2,12 @@
51 * menu.c - the menu idle governor
52 *
53 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
54+ * Copyright (C) 2009 Intel Corporation
55+ * Author:
56+ * Arjan van de Ven <arjan@linux.intel.com>
57 *
58- * This code is licenced under the GPL.
59+ * This code is licenced under the GPL version 2 as described
60+ * in the COPYING file that acompanies the Linux Kernel.
61 */
62
63 #include <linux/kernel.h>
64@@ -13,20 +17,153 @@
65 #include <linux/ktime.h>
66 #include <linux/hrtimer.h>
67 #include <linux/tick.h>
68+#include <linux/sched.h>
69
70-#define BREAK_FUZZ 4 /* 4 us */
71-#define PRED_HISTORY_PCT 50
72+#define BUCKETS 12
73+#define RESOLUTION 1024
74+#define DECAY 4
75+#define MAX_INTERESTING 50000
76+
77+/*
78+ * Concepts and ideas behind the menu governor
79+ *
80+ * For the menu governor, there are 3 decision factors for picking a C
81+ * state:
82+ * 1) Energy break even point
83+ * 2) Performance impact
84+ * 3) Latency tolerance (from pmqos infrastructure)
85+ * These these three factors are treated independently.
86+ *
87+ * Energy break even point
88+ * -----------------------
89+ * C state entry and exit have an energy cost, and a certain amount of time in
90+ * the C state is required to actually break even on this cost. CPUIDLE
91+ * provides us this duration in the "target_residency" field. So all that we
92+ * need is a good prediction of how long we'll be idle. Like the traditional
93+ * menu governor, we start with the actual known "next timer event" time.
94+ *
95+ * Since there are other source of wakeups (interrupts for example) than
96+ * the next timer event, this estimation is rather optimistic. To get a
97+ * more realistic estimate, a correction factor is applied to the estimate,
98+ * that is based on historic behavior. For example, if in the past the actual
99+ * duration always was 50% of the next timer tick, the correction factor will
100+ * be 0.5.
101+ *
102+ * menu uses a running average for this correction factor, however it uses a
103+ * set of factors, not just a single factor. This stems from the realization
104+ * that the ratio is dependent on the order of magnitude of the expected
105+ * duration; if we expect 500 milliseconds of idle time the likelihood of
106+ * getting an interrupt very early is much higher than if we expect 50 micro
107+ * seconds of idle time. A second independent factor that has big impact on
108+ * the actual factor is if there is (disk) IO outstanding or not.
109+ * (as a special twist, we consider every sleep longer than 50 milliseconds
110+ * as perfect; there are no power gains for sleeping longer than this)
111+ *
112+ * For these two reasons we keep an array of 12 independent factors, that gets
113+ * indexed based on the magnitude of the expected duration as well as the
114+ * "is IO outstanding" property.
115+ *
116+ * Limiting Performance Impact
117+ * ---------------------------
118+ * C states, especially those with large exit latencies, can have a real
119+ * noticable impact on workloads, which is not acceptable for most sysadmins,
120+ * and in addition, less performance has a power price of its own.
121+ *
122+ * As a general rule of thumb, menu assumes that the following heuristic
123+ * holds:
124+ * The busier the system, the less impact of C states is acceptable
125+ *
126+ * This rule-of-thumb is implemented using a performance-multiplier:
127+ * If the exit latency times the performance multiplier is longer than
128+ * the predicted duration, the C state is not considered a candidate
129+ * for selection due to a too high performance impact. So the higher
130+ * this multiplier is, the longer we need to be idle to pick a deep C
131+ * state, and thus the less likely a busy CPU will hit such a deep
132+ * C state.
133+ *
134+ * Two factors are used in determing this multiplier:
135+ * a value of 10 is added for each point of "per cpu load average" we have.
136+ * a value of 5 points is added for each process that is waiting for
137+ * IO on this CPU.
138+ * (these values are experimentally determined)
139+ *
140+ * The load average factor gives a longer term (few seconds) input to the
141+ * decision, while the iowait value gives a cpu local instantanious input.
142+ * The iowait factor may look low, but realize that this is also already
143+ * represented in the system load average.
144+ *
145+ */
146
147 struct menu_device {
148 int last_state_idx;
149
150 unsigned int expected_us;
151- unsigned int predicted_us;
152- unsigned int current_predicted_us;
153- unsigned int last_measured_us;
154- unsigned int elapsed_us;
155+ u64 predicted_us;
156+ unsigned int measured_us;
157+ unsigned int exit_us;
158+ unsigned int bucket;
159+ u64 correction_factor[BUCKETS];
160 };
161
162+
163+#define LOAD_INT(x) ((x) >> FSHIFT)
164+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
165+
166+static int get_loadavg(void)
167+{
168+ unsigned long this = this_cpu_load();
169+
170+
171+ return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
172+}
173+
174+static inline int which_bucket(unsigned int duration)
175+{
176+ int bucket = 0;
177+
178+ /*
179+ * We keep two groups of stats; one with no
180+ * IO pending, one without.
181+ * This allows us to calculate
182+ * E(duration)|iowait
183+ */
184+ if (nr_iowait_cpu())
185+ bucket = BUCKETS/2;
186+
187+ if (duration < 10)
188+ return bucket;
189+ if (duration < 100)
190+ return bucket + 1;
191+ if (duration < 1000)
192+ return bucket + 2;
193+ if (duration < 10000)
194+ return bucket + 3;
195+ if (duration < 100000)
196+ return bucket + 4;
197+ return bucket + 5;
198+}
199+
200+/*
201+ * Return a multiplier for the exit latency that is intended
202+ * to take performance requirements into account.
203+ * The more performance critical we estimate the system
204+ * to be, the higher this multiplier, and thus the higher
205+ * the barrier to go to an expensive C state.
206+ */
207+static inline int performance_multiplier(void)
208+{
209+ int mult = 1;
210+
211+ /* for higher loadavg, we are more reluctant */
212+
213+ mult += 2 * get_loadavg();
214+
215+ /* for IO wait tasks (per cpu!) we add 5x each */
216+ mult += 10 * nr_iowait_cpu();
217+
218+ return mult;
219+}
220+
221 static DEFINE_PER_CPU(struct menu_device, menu_devices);
222
223 /**
224@@ -38,37 +175,59 @@ static int menu_select(struct cpuidle_device *dev)
225 struct menu_device *data = &__get_cpu_var(menu_devices);
226 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
227 int i;
228+ int multiplier;
229+
230+ data->last_state_idx = 0;
231+ data->exit_us = 0;
232
233 /* Special case when user has set very strict latency requirement */
234- if (unlikely(latency_req == 0)) {
235- data->last_state_idx = 0;
236+ if (unlikely(latency_req == 0))
237 return 0;
238- }
239
240- /* determine the expected residency time */
241+ /* determine the expected residency time, round up */
242 data->expected_us =
243- (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
244+ DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
245+
246+
247+ data->bucket = which_bucket(data->expected_us);
248+
249+ multiplier = performance_multiplier();
250+
251+ /*
252+ * if the correction factor is 0 (eg first time init or cpu hotplug
253+ * etc), we actually want to start out with a unity factor.
254+ */
255+ if (data->correction_factor[data->bucket] == 0)
256+ data->correction_factor[data->bucket] = RESOLUTION * DECAY;
257+
258+ /* Make sure to round up for half microseconds */
259+ data->predicted_us = DIV_ROUND_CLOSEST(
260+ data->expected_us * data->correction_factor[data->bucket],
261+ RESOLUTION * DECAY);
262+
263+ /*
264+ * We want to default to C1 (hlt), not to busy polling
265+ * unless the timer is happening really really soon.
266+ */
267+ if (data->expected_us > 5)
268+ data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
269
270- /* Recalculate predicted_us based on prediction_history_pct */
271- data->predicted_us *= PRED_HISTORY_PCT;
272- data->predicted_us += (100 - PRED_HISTORY_PCT) *
273- data->current_predicted_us;
274- data->predicted_us /= 100;
275
276 /* find the deepest idle state that satisfies our constraints */
277- for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
278+ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
279 struct cpuidle_state *s = &dev->states[i];
280
281- if (s->target_residency > data->expected_us)
282- break;
283 if (s->target_residency > data->predicted_us)
284 break;
285 if (s->exit_latency > latency_req)
286 break;
287+ if (s->exit_latency * multiplier > data->predicted_us)
288+ break;
289+ data->exit_us = s->exit_latency;
290+ data->last_state_idx = i;
291 }
292
293- data->last_state_idx = i - 1;
294- return i - 1;
295+ return data->last_state_idx;
296 }
297
298 /**
299@@ -85,35 +244,49 @@ static void menu_reflect(struct cpuidle_device *dev)
300 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
301 struct cpuidle_state *target = &dev->states[last_idx];
302 unsigned int measured_us;
303+ u64 new_factor;
304
305 /*
306 * Ugh, this idle state doesn't support residency measurements, so we
307 * are basically lost in the dark. As a compromise, assume we slept
308- * for one full standard timer tick. However, be aware that this
309- * could potentially result in a suboptimal state transition.
310+ * for the whole expected time.
311 */
312 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
313- last_idle_us = USEC_PER_SEC / HZ;
314+ last_idle_us = data->expected_us;
315+
316+
317+ measured_us = last_idle_us;
318
319 /*
320- * measured_us and elapsed_us are the cumulative idle time, since the
321- * last time we were woken out of idle by an interrupt.
322+ * We correct for the exit latency; we are assuming here that the
323+ * exit latency happens after the event that we're interested in.
324 */
325- if (data->elapsed_us <= data->elapsed_us + last_idle_us)
326- measured_us = data->elapsed_us + last_idle_us;
327+ if (measured_us > data->exit_us)
328+ measured_us -= data->exit_us;
329+
330+
331+ /* update our correction ratio */
332+
333+ new_factor = data->correction_factor[data->bucket]
334+ * (DECAY - 1) / DECAY;
335+
336+ if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
337+ new_factor += RESOLUTION * measured_us / data->expected_us;
338 else
339- measured_us = -1;
340+ /*
341+ * we were idle so long that we count it as a perfect
342+ * prediction
343+ */
344+ new_factor += RESOLUTION;
345
346- /* Predict time until next break event */
347- data->current_predicted_us = max(measured_us, data->last_measured_us);
348+ /*
349+ * We don't want 0 as factor; we always want at least
350+ * a tiny bit of estimated time.
351+ */
352+ if (new_factor == 0)
353+ new_factor = 1;
354
355- if (last_idle_us + BREAK_FUZZ <
356- data->expected_us - target->exit_latency) {
357- data->last_measured_us = measured_us;
358- data->elapsed_us = 0;
359- } else {
360- data->elapsed_us = measured_us;
361- }
362+ data->correction_factor[data->bucket] = new_factor;
363 }
364
365 /**
366diff --git a/include/linux/sched.h b/include/linux/sched.h
367index cdc1298..d559406 100644
368--- a/include/linux/sched.h
369+++ b/include/linux/sched.h
370@@ -140,6 +140,10 @@ extern int nr_processes(void);
371 extern unsigned long nr_running(void);
372 extern unsigned long nr_uninterruptible(void);
373 extern unsigned long nr_iowait(void);
374+extern unsigned long nr_iowait_cpu(void);
375+extern unsigned long this_cpu_load(void);
376+
377+
378 extern void calc_global_load(void);
379 extern u64 cpu_nr_migrations(int cpu);
380
381diff --git a/kernel/sched.c b/kernel/sched.c
382index 4dbe8e7..541b370 100644
383--- a/kernel/sched.c
384+++ b/kernel/sched.c
385@@ -2910,6 +2910,19 @@ unsigned long nr_iowait(void)
386 return sum;
387 }
388
389+unsigned long nr_iowait_cpu(void)
390+{
391+ struct rq *this = this_rq();
392+ return atomic_read(&this->nr_iowait);
393+}
394+
395+unsigned long this_cpu_load(void)
396+{
397+ struct rq *this = this_rq();
398+ return this->cpu_load[0];
399+}
400+
401+
402 /* Variables and functions for calc_load */
403 static atomic_long_t calc_load_tasks;
404 static unsigned long calc_load_update;
405--
4061.6.0.6
407
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch
deleted file mode 100644
index 216fca7a2b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch
+++ /dev/null
@@ -1,86 +0,0 @@
1commit ee5aa7b8b98774f408d20a2f61f97a89ac66c29b
2Author: Joe Peterson <joe@skyrush.com>
3Date: Wed Sep 9 15:03:13 2009 -0600
4
5 n_tty: honor opost flag for echoes
6
7 Fixes the following bug:
8
9 http://bugs.linuxbase.org/show_bug.cgi?id=2692
10
11 Causes processing of echoed characters (output from the echo buffer) to
12 honor the O_OPOST flag, which is consistent with the old behavior.
13
14 Note that this and the next patch ("n_tty: move echoctl check and
15 clean up logic") were verified together by the bug reporters, and
16 the test now passes.
17
18 Signed-off-by: Joe Peterson <joe@skyrush.com>
19 Cc: Linux Torvalds <torvalds@linux-foundation.org>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
21
22diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
23index 4e28b35..e6eeeb2 100644
24--- a/drivers/char/n_tty.c
25+++ b/drivers/char/n_tty.c
26@@ -272,7 +272,8 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty)
27 *
28 * This is a helper function that handles one output character
29 * (including special characters like TAB, CR, LF, etc.),
30- * putting the results in the tty driver's write buffer.
31+ * doing OPOST processing and putting the results in the
32+ * tty driver's write buffer.
33 *
34 * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY
35 * and NLDLY. They simply aren't relevant in the world today.
36@@ -350,8 +351,9 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
37 * @c: character (or partial unicode symbol)
38 * @tty: terminal device
39 *
40- * Perform OPOST processing. Returns -1 when the output device is
41- * full and the character must be retried.
42+ * Output one character with OPOST processing.
43+ * Returns -1 when the output device is full and the character
44+ * must be retried.
45 *
46 * Locking: output_lock to protect column state and space left
47 * (also, this is called from n_tty_write under the
48@@ -377,8 +379,11 @@ static int process_output(unsigned char c, struct tty_struct *tty)
49 /**
50 * process_output_block - block post processor
51 * @tty: terminal device
52- * @inbuf: user buffer
53- * @nr: number of bytes
54+ * @buf: character buffer
55+ * @nr: number of bytes to output
56+ *
57+ * Output a block of characters with OPOST processing.
58+ * Returns the number of characters output.
59 *
60 * This path is used to speed up block console writes, among other
61 * things when processing blocks of output data. It handles only
62@@ -605,12 +610,18 @@ static void process_echoes(struct tty_struct *tty)
63 if (no_space_left)
64 break;
65 } else {
66- int retval;
67-
68- retval = do_output_char(c, tty, space);
69- if (retval < 0)
70- break;
71- space -= retval;
72+ if (O_OPOST(tty) &&
73+ !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
74+ int retval = do_output_char(c, tty, space);
75+ if (retval < 0)
76+ break;
77+ space -= retval;
78+ } else {
79+ if (!space)
80+ break;
81+ tty_put_char(tty, c);
82+ space -= 1;
83+ }
84 cp += 1;
85 nr -= 1;
86 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch
deleted file mode 100644
index 3a7e0fd942..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch
+++ /dev/null
@@ -1,91 +0,0 @@
1commit 62b263585bb5005d44a764c90d80f9c4bb8188c1
2Author: Joe Peterson <joe@skyrush.com>
3Date: Wed Sep 9 15:03:47 2009 -0600
4
5 n_tty: move echoctl check and clean up logic
6
7 Check L_ECHOCTL before insertting a character in the echo buffer
8 (rather than as the buffer is processed), to be more consistent with
9 when all other L_ flags are checked. Also cleaned up the related logic.
10
11 Note that this and the previous patch ("n_tty: honor opost flag for echoes")
12 were verified together by the reporters of the bug that patch addresses
13 (http://bugs.linuxbase.org/show_bug.cgi?id=2692), and the test now passes.
14
15 Signed-off-by: Joe Peterson <joe@skyrush.com>
16 Cc: Linus Torvalds <torvalds@linux-foundation.org>
17 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
18
19diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
20index e6eeeb2..2e50f4d 100644
21--- a/drivers/char/n_tty.c
22+++ b/drivers/char/n_tty.c
23@@ -576,33 +576,23 @@ static void process_echoes(struct tty_struct *tty)
24 break;
25
26 default:
27- if (iscntrl(op)) {
28- if (L_ECHOCTL(tty)) {
29- /*
30- * Ensure there is enough space
31- * for the whole ctrl pair.
32- */
33- if (space < 2) {
34- no_space_left = 1;
35- break;
36- }
37- tty_put_char(tty, '^');
38- tty_put_char(tty, op ^ 0100);
39- tty->column += 2;
40- space -= 2;
41- } else {
42- if (!space) {
43- no_space_left = 1;
44- break;
45- }
46- tty_put_char(tty, op);
47- space--;
48- }
49- }
50 /*
51- * If above falls through, this was an
52- * undefined op.
53+ * If the op is not a special byte code,
54+ * it is a ctrl char tagged to be echoed
55+ * as "^X" (where X is the letter
56+ * representing the control char).
57+ * Note that we must ensure there is
58+ * enough space for the whole ctrl pair.
59+ *
60 */
61+ if (space < 2) {
62+ no_space_left = 1;
63+ break;
64+ }
65+ tty_put_char(tty, '^');
66+ tty_put_char(tty, op ^ 0100);
67+ tty->column += 2;
68+ space -= 2;
69 cp += 2;
70 nr -= 2;
71 }
72@@ -809,8 +799,8 @@ static void echo_char_raw(unsigned char c, struct tty_struct *tty)
73 * Echo user input back onto the screen. This must be called only when
74 * L_ECHO(tty) is true. Called from the driver receive_buf path.
75 *
76- * This variant tags control characters to be possibly echoed as
77- * as "^X" (where X is the letter representing the control char).
78+ * This variant tags control characters to be echoed as "^X"
79+ * (where X is the letter representing the control char).
80 *
81 * Locking: echo_lock to protect the echo buffer
82 */
83@@ -823,7 +813,7 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
84 add_echo_byte(ECHO_OP_START, tty);
85 add_echo_byte(ECHO_OP_START, tty);
86 } else {
87- if (iscntrl(c) && c != '\t')
88+ if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t')
89 add_echo_byte(ECHO_OP_START, tty);
90 add_echo_byte(c, tty);
91 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch
deleted file mode 100644
index a6f5079fc3..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch
+++ /dev/null
@@ -1,64 +0,0 @@
1From 33725d4939f457b12d7bc1bcbcc0dfb8b2f5bd48 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Thu, 24 Sep 2009 13:24:16 +0200
4Subject: [PATCH] x86, timers: check for pending timers after (device) interrupts
5
6Now that range timers and deferred timers are common, I found a
7problem with these using the "perf timechart" tool.
8
9It turns out that on x86, these two 'opportunistic' timers only
10get checked when another "real" timer happens.
11These opportunistic timers have the objective to save power by
12hitchhiking on other wakeups, as to avoid CPU wakeups by themselves
13as much as possible.
14
15The change in this patch runs this check not only at timer interrupts,
16but at all (device) interrupts. The effect is that
171) the deferred timers/range timers get delayed less
182) the range timers cause less wakeups by themselves because
19 the percentage of hitchhiking on existing wakeup events goes up.
20
21I've verified the working of the patch using "perf timechart",
22the original exposed bug is gone with this patch.
23
24Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
25---
26 arch/x86/kernel/irq.c | 2 ++
27 arch/x86/kernel/smp.c | 1 +
28 2 files changed, 3 insertions(+), 0 deletions(-)
29
30diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
31index 74656d1..3912061 100644
32--- a/arch/x86/kernel/irq.c
33+++ b/arch/x86/kernel/irq.c
34@@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
35 __func__, smp_processor_id(), vector, irq);
36 }
37
38+ run_local_timers();
39 irq_exit();
40
41 set_irq_regs(old_regs);
42@@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs)
43 if (generic_interrupt_extension)
44 generic_interrupt_extension();
45
46+ run_local_timers();
47 irq_exit();
48
49 set_irq_regs(old_regs);
50diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
51index ec1de97..d915d95 100644
52--- a/arch/x86/kernel/smp.c
53+++ b/arch/x86/kernel/smp.c
54@@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
55 {
56 ack_APIC_irq();
57 inc_irq_stat(irq_resched_count);
58+ run_local_timers();
59 /*
60 * KVM uses this interrupt to force a cpu out of guest mode
61 */
62--
631.6.0.6
64
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch
deleted file mode 100644
index 720fda24e8..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch
+++ /dev/null
@@ -1,275 +0,0 @@
1From 524a1da3c45683cec77480acc6cab1d33ae8d5cb Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sat, 26 Sep 2009 12:36:21 +0200
4Subject: [PATCH] x86: Use __builtin_object_size to validate the buffer size for copy_from_user
5
6gcc (4.x) supports the __builtin_object_size() builtin, which reports the
7size of an object that a pointer point to, when known at compile time.
8If the buffer size is not known at compile time, a constant -1 is returned.
9
10This patch uses this feature to add a sanity check to copy_from_user();
11if the target buffer is known to be smaller than the copy size, the copy
12is aborted and a WARNing is emitted in memory debug mode.
13
14These extra checks compile away when the object size is not known,
15or if both the buffer size and the copy length are constants.
16
17Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
18Reviewed-by: Ingo Molnar <mingo@elte.hu>
19---
20 arch/x86/include/asm/uaccess_32.h | 19 ++++++++++++++++++-
21 arch/x86/include/asm/uaccess_64.h | 19 ++++++++++++++++++-
22 arch/x86/kernel/x8664_ksyms_64.c | 2 +-
23 arch/x86/lib/copy_user_64.S | 4 ++--
24 arch/x86/lib/usercopy_32.c | 4 ++--
25 include/linux/compiler-gcc4.h | 2 ++
26 include/linux/compiler.h | 4 ++++
27 7 files changed, 47 insertions(+), 7 deletions(-)
28
29diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
30index 632fb44..582d6ae 100644
31--- a/arch/x86/include/asm/uaccess_32.h
32+++ b/arch/x86/include/asm/uaccess_32.h
33@@ -187,9 +187,26 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
34
35 unsigned long __must_check copy_to_user(void __user *to,
36 const void *from, unsigned long n);
37-unsigned long __must_check copy_from_user(void *to,
38+unsigned long __must_check _copy_from_user(void *to,
39 const void __user *from,
40 unsigned long n);
41+
42+static inline unsigned long __must_check copy_from_user(void *to,
43+ const void __user *from,
44+ unsigned long n)
45+{
46+ int sz = __compiletime_object_size(to);
47+ int ret = -EFAULT;
48+
49+ if (likely(sz == -1 || sz >= n))
50+ ret = _copy_from_user(to, from, n);
51+#ifdef CONFIG_DEBUG_VM
52+ else
53+ WARN(1, "Buffer overflow detected!\n");
54+#endif
55+ return ret;
56+}
57+
58 long __must_check strncpy_from_user(char *dst, const char __user *src,
59 long count);
60 long __must_check __strncpy_from_user(char *dst,
61diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
62index db24b21..ce6fec7 100644
63--- a/arch/x86/include/asm/uaccess_64.h
64+++ b/arch/x86/include/asm/uaccess_64.h
65@@ -21,10 +21,27 @@ copy_user_generic(void *to, const void *from, unsigned len);
66 __must_check unsigned long
67 copy_to_user(void __user *to, const void *from, unsigned len);
68 __must_check unsigned long
69-copy_from_user(void *to, const void __user *from, unsigned len);
70+_copy_from_user(void *to, const void __user *from, unsigned len);
71 __must_check unsigned long
72 copy_in_user(void __user *to, const void __user *from, unsigned len);
73
74+static inline unsigned long __must_check copy_from_user(void *to,
75+ const void __user *from,
76+ unsigned long n)
77+{
78+ int sz = __compiletime_object_size(to);
79+ int ret = -EFAULT;
80+
81+ if (likely(sz == -1 || sz >= n))
82+ ret = _copy_from_user(to, from, n);
83+#ifdef CONFIG_DEBUG_VM
84+ else
85+ WARN(1, "Buffer overflow detected!\n");
86+#endif
87+ return ret;
88+}
89+
90+
91 static __always_inline __must_check
92 int __copy_from_user(void *dst, const void __user *src, unsigned size)
93 {
94diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
95index 3909e3b..a0cdd8c 100644
96--- a/arch/x86/kernel/x8664_ksyms_64.c
97+++ b/arch/x86/kernel/x8664_ksyms_64.c
98@@ -30,7 +30,7 @@ EXPORT_SYMBOL(__put_user_8);
99
100 EXPORT_SYMBOL(copy_user_generic);
101 EXPORT_SYMBOL(__copy_user_nocache);
102-EXPORT_SYMBOL(copy_from_user);
103+EXPORT_SYMBOL(_copy_from_user);
104 EXPORT_SYMBOL(copy_to_user);
105 EXPORT_SYMBOL(__copy_from_user_inatomic);
106
107diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
108index 6ba0f7b..4be3c41 100644
109--- a/arch/x86/lib/copy_user_64.S
110+++ b/arch/x86/lib/copy_user_64.S
111@@ -78,7 +78,7 @@ ENTRY(copy_to_user)
112 ENDPROC(copy_to_user)
113
114 /* Standard copy_from_user with segment limit checking */
115-ENTRY(copy_from_user)
116+ENTRY(_copy_from_user)
117 CFI_STARTPROC
118 GET_THREAD_INFO(%rax)
119 movq %rsi,%rcx
120@@ -88,7 +88,7 @@ ENTRY(copy_from_user)
121 jae bad_from_user
122 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
123 CFI_ENDPROC
124-ENDPROC(copy_from_user)
125+ENDPROC(_copy_from_user)
126
127 ENTRY(copy_user_generic)
128 CFI_STARTPROC
129diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
130index 1f118d4..8498684 100644
131--- a/arch/x86/lib/usercopy_32.c
132+++ b/arch/x86/lib/usercopy_32.c
133@@ -874,7 +874,7 @@ EXPORT_SYMBOL(copy_to_user);
134 * data to the requested size using zero bytes.
135 */
136 unsigned long
137-copy_from_user(void *to, const void __user *from, unsigned long n)
138+_copy_from_user(void *to, const void __user *from, unsigned long n)
139 {
140 if (access_ok(VERIFY_READ, from, n))
141 n = __copy_from_user(to, from, n);
142@@ -882,4 +882,4 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
143 memset(to, 0, n);
144 return n;
145 }
146-EXPORT_SYMBOL(copy_from_user);
147+EXPORT_SYMBOL(_copy_from_user);
148diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
149index 450fa59..a3aef5d 100644
150--- a/include/linux/compiler-gcc4.h
151+++ b/include/linux/compiler-gcc4.h
152@@ -37,3 +37,5 @@
153 #define __cold __attribute__((__cold__))
154
155 #endif
156+
157+#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
158diff --git a/include/linux/compiler.h b/include/linux/compiler.h
159index 9d4c4b0..9c42853 100644
160--- a/include/linux/compiler.h
161+++ b/include/linux/compiler.h
162@@ -185,6 +185,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
163 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
164 #endif
165
166+/* Compile time object size, -1 for unknown */
167+#ifndef __compiletime_object_size
168+# define __compiletime_object_size(obj) -1
169+#endif
170 /*
171 * Prevent the compiler from merging or refetching accesses. The compiler
172 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
173--
1741.6.0.6
175
176From 350cf3cd513e6759ae6852946532a47249f25600 Mon Sep 17 00:00:00 2001
177From: Arjan van de Ven <arjan@linux.intel.com>
178Date: Wed, 30 Sep 2009 12:57:46 +0200
179Subject: [PATCH] x86: Turn the copy_from_user check into an (optional) compile time warning
180
181A previous patch added the buffer size check to copy_from_user().
182
183One of the things learned from analyzing the result of the previous patch
184is that in general, gcc is really good at proving that the code contains
185sufficient security checks to not need to do a runtime check. But that
186for those cases where gcc could not prove this, there was a relatively
187high percentage of real security issues.
188
189This patch turns the case of "gcc cannot prove" into a compile time
190warning, as long as a sufficiently new gcc is in use.
191The objective is that these warnings will trigger developers checking
192new cases out before a security hole enters a linux kernel release.
193
194Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
195---
196 arch/x86/include/asm/uaccess_32.h | 12 +++++++++---
197 arch/x86/lib/usercopy_32.c | 6 ++++++
198 include/linux/compiler-gcc4.h | 3 +++
199 include/linux/compiler.h | 4 ++++
200 4 files changed, 22 insertions(+), 3 deletions(-)
201
202diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
203index 582d6ae..7826639 100644
204--- a/arch/x86/include/asm/uaccess_32.h
205+++ b/arch/x86/include/asm/uaccess_32.h
206@@ -191,6 +191,13 @@ unsigned long __must_check _copy_from_user(void *to,
207 const void __user *from,
208 unsigned long n);
209
210+
211+extern void copy_from_user_overflow(void)
212+#ifdef CONFIG_DEBUG_STACKOVERFLOW
213+ __compiletime_warning("copy_from_user buffer size is not provably correct")
214+#endif
215+;
216+
217 static inline unsigned long __must_check copy_from_user(void *to,
218 const void __user *from,
219 unsigned long n)
220@@ -200,10 +207,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
221
222 if (likely(sz == -1 || sz >= n))
223 ret = _copy_from_user(to, from, n);
224-#ifdef CONFIG_DEBUG_VM
225 else
226- WARN(1, "Buffer overflow detected!\n");
227-#endif
228+ copy_from_user_overflow();
229+
230 return ret;
231 }
232
233diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
234index 8498684..e218d5d 100644
235--- a/arch/x86/lib/usercopy_32.c
236+++ b/arch/x86/lib/usercopy_32.c
237@@ -883,3 +883,9 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
238 return n;
239 }
240 EXPORT_SYMBOL(_copy_from_user);
241+
242+void copy_from_user_overflow(void)
243+{
244+ WARN(1, "Buffer overflow detected!\n");
245+}
246+EXPORT_SYMBOL(copy_from_user_overflow);
247diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
248index a3aef5d..f1709c1 100644
249--- a/include/linux/compiler-gcc4.h
250+++ b/include/linux/compiler-gcc4.h
251@@ -39,3 +39,6 @@
252 #endif
253
254 #define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
255+#if __GNUC_MINOR__ >= 4
256+#define __compiletime_warning(message) __attribute__((warning(message)))
257+#endif
258diff --git a/include/linux/compiler.h b/include/linux/compiler.h
259index 9c42853..241dfd8 100644
260--- a/include/linux/compiler.h
261+++ b/include/linux/compiler.h
262@@ -189,6 +189,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
263 #ifndef __compiletime_object_size
264 # define __compiletime_object_size(obj) -1
265 #endif
266+#ifndef __compiletime_warning
267+# define __compiletime_warning(message)
268+#endif
269+
270 /*
271 * Prevent the compiler from merging or refetching accesses. The compiler
272 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
273--
2741.6.2.5
275
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch
deleted file mode 100644
index 78a297400f..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch
+++ /dev/null
@@ -1,95 +0,0 @@
1From 42cb68d81a218b0fd7c053356d379a93270b40ea Mon Sep 17 00:00:00 2001
2From: Yong Wang <yong.y.wang@intel.com>
3Date: Fri, 30 Oct 2009 10:33:20 +0800
4Subject: [PATCH] x86: Do not unregister PIT clocksource on PIT oneshot setup/shutdown
5
6Backported from upstream commit 8cab02dc3c58a12235c6d463ce684dded9696848
7and this fixes bug #7377 "system can not resume from S3". Further information
8can be found at http://bugzilla.kernel.org/show_bug.cgi?id=14222.
9
10Signed-off-by: Yong Wang <yong.y.wang@intel.com>
11---
12 arch/x86/kernel/i8253.c | 36 ++----------------------------------
13 1 files changed, 2 insertions(+), 34 deletions(-)
14
15diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
16index da890f0..23c1679 100644
17--- a/arch/x86/kernel/i8253.c
18+++ b/arch/x86/kernel/i8253.c
19@@ -19,14 +19,6 @@
20 DEFINE_SPINLOCK(i8253_lock);
21 EXPORT_SYMBOL(i8253_lock);
22
23-#ifdef CONFIG_X86_32
24-static void pit_disable_clocksource(void);
25-static void pit_enable_clocksource(void);
26-#else
27-static inline void pit_disable_clocksource(void) { }
28-static inline void pit_enable_clocksource(void) { }
29-#endif
30-
31 /*
32 * HPET replaces the PIT, when enabled. So we need to know, which of
33 * the two timers is used
34@@ -59,17 +51,15 @@ static void init_pit_timer(enum clock_event_mode mode,
35 outb_pit(0, PIT_CH0);
36 outb_pit(0, PIT_CH0);
37 }
38- pit_disable_clocksource();
39 break;
40
41 case CLOCK_EVT_MODE_ONESHOT:
42 /* One shot setup */
43- pit_disable_clocksource();
44 outb_pit(0x38, PIT_MODE);
45 break;
46
47 case CLOCK_EVT_MODE_RESUME:
48- pit_enable_clocksource();
49+ /* Nothing to do here */
50 break;
51 }
52 spin_unlock(&i8253_lock);
53@@ -202,27 +192,8 @@ static struct clocksource pit_cs = {
54 .shift = 20,
55 };
56
57-int pit_cs_registered;
58-static void pit_disable_clocksource(void)
59-{
60- if (pit_cs_registered) {
61- clocksource_unregister(&pit_cs);
62- pit_cs_registered = 0;
63- }
64-}
65-
66-static void pit_enable_clocksource(void)
67-{
68- if (!pit_cs_registered && !clocksource_register(&pit_cs)) {
69- pit_cs_registered = 1;
70- }
71-}
72-
73-
74-
75 static int __init init_pit_clocksource(void)
76 {
77- int ret;
78 /*
79 * Several reasons not to register PIT as a clocksource:
80 *
81@@ -236,10 +207,7 @@ static int __init init_pit_clocksource(void)
82
83 pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift);
84
85- ret = clocksource_register(&pit_cs);
86- if (!ret)
87- pit_cs_registered = 1;
88- return ret;
89+ return clocksource_register(&pit_cs);
90 }
91 arch_initcall(init_pit_clocksource);
92
93--
941.5.5.1
95
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb b/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb
deleted file mode 100644
index 83b8985f4b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb
+++ /dev/null
@@ -1,46 +0,0 @@
1require linux-moblin.inc
2
3PR = "r12"
4
5DEFAULT_PREFERENCE = "-1"
6DEFAULT_PREFERENCE_netbook = "1"
7DEFAULT_PREFERENCE_menlow = "1"
8
9SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.29.1.tar.bz2 \
10 file://linux-2.6-build-nonintconfig.patch;patch=1 \
11 file://linux-2.6.29-retry-root-mount.patch;patch=1 \
12 file://linux-2.6.29-dont-wait-for-mouse.patch;patch=1 \
13 file://linux-2.6.29-fast-initrd.patch;patch=1 \
14 file://linux-2.6.29-sreadahead.patch;patch=1 \
15 file://linux-2.6.29-enable-async-by-default.patch;patch=1 \
16 file://linux-2.6.29-drm-revert.patch;patch=1 \
17 file://linux-2.6.19-modesetting-by-default.patch;patch=1 \
18 file://linux-2.6.29-fast-kms.patch;patch=1 \
19 file://linux-2.6.29-even-faster-kms.patch;patch=1 \
20 file://linux-2.6.29-silence-acer-message.patch;patch=1 \
21 file://linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch;patch=1 \
22 file://linux-2.6.29-msiwind.patch;patch=1 \
23 file://linux-2.6.29-flip-ide-net.patch;patch=1 \
24 file://linux-2.6.29-kms-after-sata.patch;patch=1 \
25 file://linux-2.6.29-jbd-longer-commit-interval.patch;patch=1 \
26 file://linux-2.6.29-touchkit.patch;patch=1 \
27 file://linux-2.6.30-fix-async.patch;patch=1 \
28 file://linux-2.6.30-fix-suspend.patch;patch=1 \
29 file://0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch;patch=1 \
30 file://0002-drm-Add-a-tracker-for-global-objects.patch;patch=1 \
31 file://0003-drm-Export-hash-table-functionality.patch;patch=1 \
32 file://0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch;patch=1 \
33 file://linux-2.6.29-psb-driver.patch;patch=1 \
34 file://linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch;patch=1 \
35 file://linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch;patch=1 \
36 file://linux-2.6.29-pnv-agp.patch;patch=1 \
37 file://linux-2.6.29-pnv-drm.patch;patch=1 \
38 file://linux-2.6.29-pnv-fix-gtt-size.patch;patch=1 \
39 file://linux-2.6.29-pnv-fix-i2c.patch;patch=1 \
40 file://linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch;patch=1 \
41 file://linux-2.6.29-timberdale.patch;patch=1 \
42# file://i915_split.patch;patch=1 \
43 file://defconfig-menlow \
44 file://defconfig-netbook"
45
46S = "${WORKDIR}/linux-2.6.29.1"
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb b/meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb
deleted file mode 100644
index 409242de31..0000000000
--- a/meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb
+++ /dev/null
@@ -1,47 +0,0 @@
1require linux-moblin.inc
2
3PR = "r0"
4
5DEFAULT_PREFERENCE = "-1"
6DEFAULT_PREFERENCE_netbook = "1"
7#DEFAULT_PREFERENCE_menlow = "1"
8
9SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.31.5.tar.bz2 \
10 file://linux-2.6-build-nonintconfig.patch;patch=1 \
11 file://linux-2.6.32-cpuidle.patch;patch=1 \
12 file://linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch;patch=1 \
13 file://linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch;patch=1 \
14 file://linux-2.6.33-pit-fix.patch;patch=1 \
15 file://linux-2.6.29-dont-wait-for-mouse.patch;patch=1 \
16 file://linux-2.6.29-sreadahead.patch;patch=1 \
17 file://linux-2.6.29-kms-edid-cache.patch;patch=1 \
18 file://linux-2.6.29-kms-run-async.patch;patch=1 \
19 file://linux-2.6.29-kms-dont-blank-display.patch;patch=1 \
20 file://linux-2.6.29-kms-after-sata.patch;patch=1 \
21 file://linux-2.6.30-non-root-X.patch;patch=1 \
22 file://linux-2.6.31-drm-kms-flip.patch;patch=1 \
23 file://linux-2.6.31-drm-mem-info.patch;patch=1 \
24 file://linux-2.6.31-drm-i915-fix.patch;patch=1 \
25 file://linux-2.6.31-drm-i915-opregion.patch;patch=1 \
26 file://linux-2.6.31-drm-i915-vblank-fix.patch;patch=1 \
27 file://linux-2.6.29-silence-acer-message.patch;patch=1 \
28 file://linux-2.6.31-silence-wacom.patch;patch=1 \
29 file://linux-2.6.29-jbd-longer-commit-interval.patch;patch=1 \
30 file://linux-2.6.29-touchkit.patch;patch=1 \
31 file://linux-2.6.31-1-2-timberdale.patch;patch=1 \
32 file://linux-2.6.31-2-2-timberdale.patch;patch=1 \
33 file://linux-2.6-driver-level-usb-autosuspend.patch;patch=1 \
34 file://linux-2.6.31-bluetooth-suspend.patch;patch=1 \
35 file://linux-2.6-usb-uvc-autosuspend.patch;patch=1 \
36 file://linux-2.6.31-samsung.patch;patch=1 \
37 file://MRST-GFX-driver-consolidated.patch;patch=1 \
38 file://linux-2.6.31-iegd.patch;patch=1 \
39 file://linux-2.6.32-acpi-cstate-fixup.patch;patch=1 \
40 file://linux-2.6.32-timer-fix.patch;patch=1 \
41 file://linux-2.6.33-copy-checks.patch;patch=1 \
42 file://close_debug_info_of_rt2860.patch;patch=1 \
43# file://i915_split.patch.patch;patch=1 \
44# file://defconfig-menlow \
45 file://defconfig-netbook"
46
47S = "${WORKDIR}/linux-2.6.31.5"