summaryrefslogtreecommitdiffstats
path: root/meta-moblin
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2010-01-26 15:59:18 +0000
committerRichard Purdie <rpurdie@linux.intel.com>2010-01-26 15:59:18 +0000
commit684d263e75a6a7ede638afa60e35a238e24c12ba (patch)
tree5ab1d38848909494b693e31d0a29659bcaa365e4 /meta-moblin
parent3a32c2c6e9d1d9823971a17c0ee8f8839bd79b1f (diff)
downloadpoky-684d263e75a6a7ede638afa60e35a238e24c12ba.tar.gz
linux-moblin: Add 2.6.31.5
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
Diffstat (limited to 'meta-moblin')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch44328
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook3220
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch142
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch61
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch19
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch47
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch25
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch33
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch118
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch22
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch66
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch146
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch12910
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch44
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch465
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch26
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch43
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch26
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch307
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch140
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch9290
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch206
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch14
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch173
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch407
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch86
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch91
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch64
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch275
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch95
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb47
35 files changed, 73102 insertions, 0 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch
new file mode 100644
index 0000000000..e7676e3725
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/MRST-GFX-driver-consolidated.patch
@@ -0,0 +1,44328 @@
1From 42e6f8da6d694e77678b7ffd8a32a5e9ab56efe3 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alan.r.olsen@intel.com>
3Date: Thu, 15 Oct 2009 10:42:37 -0700
4Subject: [PATCH] Moorestown graphics consolidation patch v2.10
5
6Includes all patches through v2.10 of the PSB drivers as well as
7Alpha2-2.9-mrst-GFX-driver-incremental-restore-MSIreg-in-PCIx.patch.
8
9Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
10Signed-off-by: Sophia (Chia-Hung) Kuo <chia-hung.s.kuo@intel.com>
11Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
12---
13 drivers/gpu/drm/Kconfig | 12 +
14 drivers/gpu/drm/Makefile | 3 +-
15 drivers/gpu/drm/drm_crtc.c | 130 ++
16 drivers/gpu/drm/drm_drv.c | 13 +-
17 drivers/gpu/drm/drm_global.c | 107 +
18 drivers/gpu/drm/drm_irq.c | 30 +
19 drivers/gpu/drm/psb/Makefile | 19 +
20 drivers/gpu/drm/psb/lnc_topaz.c | 676 +++++++
21 drivers/gpu/drm/psb/lnc_topaz.h | 902 +++++++++
22 drivers/gpu/drm/psb/lnc_topazinit.c | 2058 ++++++++++++++++++++
23 drivers/gpu/drm/psb/psb_bl.c | 232 +++
24 drivers/gpu/drm/psb/psb_buffer.c | 519 +++++
25 drivers/gpu/drm/psb/psb_dpst.c | 208 ++
26 drivers/gpu/drm/psb/psb_dpst.h | 90 +
27 drivers/gpu/drm/psb/psb_drm.h | 716 +++++++
28 drivers/gpu/drm/psb/psb_drv.c | 2239 +++++++++++++++++++++
29 drivers/gpu/drm/psb/psb_drv.h | 1224 ++++++++++++
30 drivers/gpu/drm/psb/psb_fb.c | 1833 +++++++++++++++++
31 drivers/gpu/drm/psb/psb_fb.h | 47 +
32 drivers/gpu/drm/psb/psb_fence.c | 359 ++++
33 drivers/gpu/drm/psb/psb_gtt.c | 278 +++
34 drivers/gpu/drm/psb/psb_hotplug.c | 427 ++++
35 drivers/gpu/drm/psb/psb_hotplug.h | 96 +
36 drivers/gpu/drm/psb/psb_intel_bios.c | 309 +++
37 drivers/gpu/drm/psb/psb_intel_bios.h | 436 +++++
38 drivers/gpu/drm/psb/psb_intel_display.c | 2484 ++++++++++++++++++++++++
39 drivers/gpu/drm/psb/psb_intel_display.h | 31 +
40 drivers/gpu/drm/psb/psb_intel_drv.h | 246 +++
41 drivers/gpu/drm/psb/psb_intel_dsi.c | 1798 +++++++++++++++++
42 drivers/gpu/drm/psb/psb_intel_i2c.c | 179 ++
43 drivers/gpu/drm/psb/psb_intel_lvds.c | 1343 +++++++++++++
44 drivers/gpu/drm/psb/psb_intel_modes.c | 64 +
45 drivers/gpu/drm/psb/psb_intel_reg.h | 1015 ++++++++++
46 drivers/gpu/drm/psb/psb_intel_sdvo.c | 1350 +++++++++++++
47 drivers/gpu/drm/psb/psb_intel_sdvo_regs.h | 345 ++++
48 drivers/gpu/drm/psb/psb_irq.c | 621 ++++++
49 drivers/gpu/drm/psb/psb_mmu.c | 1073 ++++++++++
50 drivers/gpu/drm/psb/psb_msvdx.c | 855 ++++++++
51 drivers/gpu/drm/psb/psb_msvdx.h | 527 +++++
52 drivers/gpu/drm/psb/psb_msvdxinit.c | 747 +++++++
53 drivers/gpu/drm/psb/psb_powermgmt.c | 1146 +++++++++++
54 drivers/gpu/drm/psb/psb_powermgmt.h | 73 +
55 drivers/gpu/drm/psb/psb_reg.h | 574 ++++++
56 drivers/gpu/drm/psb/psb_reset.c | 484 +++++
57 drivers/gpu/drm/psb/psb_scene.c | 523 +++++
58 drivers/gpu/drm/psb/psb_scene.h | 119 ++
59 drivers/gpu/drm/psb/psb_schedule.c | 1593 +++++++++++++++
60 drivers/gpu/drm/psb/psb_schedule.h | 181 ++
61 drivers/gpu/drm/psb/psb_setup.c | 18 +
62 drivers/gpu/drm/psb/psb_sgx.c | 1784 +++++++++++++++++
63 drivers/gpu/drm/psb/psb_sgx.h | 41 +
64 drivers/gpu/drm/psb/psb_socket.c | 340 ++++
65 drivers/gpu/drm/psb/psb_ttm_glue.c | 342 ++++
66 drivers/gpu/drm/psb/psb_umevents.c | 490 +++++
67 drivers/gpu/drm/psb/psb_umevents.h | 150 ++
68 drivers/gpu/drm/psb/psb_xhw.c | 652 +++++++
69 drivers/gpu/drm/psb/ttm/ttm_agp_backend.c | 149 ++
70 drivers/gpu/drm/psb/ttm/ttm_bo.c | 1716 ++++++++++++++++
71 drivers/gpu/drm/psb/ttm/ttm_bo_api.h | 578 ++++++
72 drivers/gpu/drm/psb/ttm/ttm_bo_driver.h | 859 ++++++++
73 drivers/gpu/drm/psb/ttm/ttm_bo_util.c | 536 +++++
74 drivers/gpu/drm/psb/ttm/ttm_bo_vm.c | 596 ++++++
75 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c | 115 ++
76 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h | 110 ++
77 drivers/gpu/drm/psb/ttm/ttm_fence.c | 607 ++++++
78 drivers/gpu/drm/psb/ttm/ttm_fence_api.h | 277 +++
79 drivers/gpu/drm/psb/ttm/ttm_fence_driver.h | 309 +++
80 drivers/gpu/drm/psb/ttm/ttm_fence_user.c | 242 +++
81 drivers/gpu/drm/psb/ttm/ttm_fence_user.h | 147 ++
82 drivers/gpu/drm/psb/ttm/ttm_lock.c | 162 ++
83 drivers/gpu/drm/psb/ttm/ttm_lock.h | 181 ++
84 drivers/gpu/drm/psb/ttm/ttm_memory.c | 232 +++
85 drivers/gpu/drm/psb/ttm/ttm_memory.h | 154 ++
86 drivers/gpu/drm/psb/ttm/ttm_object.c | 444 +++++
87 drivers/gpu/drm/psb/ttm/ttm_object.h | 269 +++
88 drivers/gpu/drm/psb/ttm/ttm_pat_compat.c | 178 ++
89 drivers/gpu/drm/psb/ttm/ttm_pat_compat.h | 41 +
90 drivers/gpu/drm/psb/ttm/ttm_placement_common.h | 98 +
91 drivers/gpu/drm/psb/ttm/ttm_placement_user.c | 468 +++++
92 drivers/gpu/drm/psb/ttm/ttm_placement_user.h | 259 +++
93 drivers/gpu/drm/psb/ttm/ttm_regman.h | 74 +
94 drivers/gpu/drm/psb/ttm/ttm_tt.c | 655 +++++++
95 drivers/gpu/drm/psb/ttm/ttm_userobj_api.h | 79 +
96 include/drm/drm.h | 1 +
97 include/drm/drmP.h | 30 +
98 include/drm/drm_crtc.h | 12 +
99 include/drm/drm_mode.h | 18 +
100 include/linux/backlight.h | 3 +
101 89 files changed, 43758 insertions(+), 2 deletions(-)
102 create mode 100644 drivers/gpu/drm/drm_global.c
103 create mode 100644 drivers/gpu/drm/psb/Makefile
104 create mode 100644 drivers/gpu/drm/psb/lnc_topaz.c
105 create mode 100644 drivers/gpu/drm/psb/lnc_topaz.h
106 create mode 100644 drivers/gpu/drm/psb/lnc_topazinit.c
107 create mode 100644 drivers/gpu/drm/psb/psb_bl.c
108 create mode 100644 drivers/gpu/drm/psb/psb_buffer.c
109 create mode 100644 drivers/gpu/drm/psb/psb_dpst.c
110 create mode 100644 drivers/gpu/drm/psb/psb_dpst.h
111 create mode 100644 drivers/gpu/drm/psb/psb_drm.h
112 create mode 100644 drivers/gpu/drm/psb/psb_drv.c
113 create mode 100644 drivers/gpu/drm/psb/psb_drv.h
114 create mode 100644 drivers/gpu/drm/psb/psb_fb.c
115 create mode 100644 drivers/gpu/drm/psb/psb_fb.h
116 create mode 100644 drivers/gpu/drm/psb/psb_fence.c
117 create mode 100644 drivers/gpu/drm/psb/psb_gtt.c
118 create mode 100644 drivers/gpu/drm/psb/psb_hotplug.c
119 create mode 100644 drivers/gpu/drm/psb/psb_hotplug.h
120 create mode 100644 drivers/gpu/drm/psb/psb_intel_bios.c
121 create mode 100644 drivers/gpu/drm/psb/psb_intel_bios.h
122 create mode 100644 drivers/gpu/drm/psb/psb_intel_display.c
123 create mode 100644 drivers/gpu/drm/psb/psb_intel_display.h
124 create mode 100644 drivers/gpu/drm/psb/psb_intel_drv.h
125 create mode 100644 drivers/gpu/drm/psb/psb_intel_dsi.c
126 create mode 100644 drivers/gpu/drm/psb/psb_intel_i2c.c
127 create mode 100644 drivers/gpu/drm/psb/psb_intel_lvds.c
128 create mode 100644 drivers/gpu/drm/psb/psb_intel_modes.c
129 create mode 100644 drivers/gpu/drm/psb/psb_intel_reg.h
130 create mode 100644 drivers/gpu/drm/psb/psb_intel_sdvo.c
131 create mode 100644 drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
132 create mode 100644 drivers/gpu/drm/psb/psb_irq.c
133 create mode 100644 drivers/gpu/drm/psb/psb_mmu.c
134 create mode 100644 drivers/gpu/drm/psb/psb_msvdx.c
135 create mode 100644 drivers/gpu/drm/psb/psb_msvdx.h
136 create mode 100644 drivers/gpu/drm/psb/psb_msvdxinit.c
137 create mode 100644 drivers/gpu/drm/psb/psb_powermgmt.c
138 create mode 100644 drivers/gpu/drm/psb/psb_powermgmt.h
139 create mode 100644 drivers/gpu/drm/psb/psb_reg.h
140 create mode 100644 drivers/gpu/drm/psb/psb_reset.c
141 create mode 100644 drivers/gpu/drm/psb/psb_scene.c
142 create mode 100644 drivers/gpu/drm/psb/psb_scene.h
143 create mode 100644 drivers/gpu/drm/psb/psb_schedule.c
144 create mode 100644 drivers/gpu/drm/psb/psb_schedule.h
145 create mode 100644 drivers/gpu/drm/psb/psb_setup.c
146 create mode 100644 drivers/gpu/drm/psb/psb_sgx.c
147 create mode 100644 drivers/gpu/drm/psb/psb_sgx.h
148 create mode 100644 drivers/gpu/drm/psb/psb_socket.c
149 create mode 100644 drivers/gpu/drm/psb/psb_ttm_glue.c
150 create mode 100644 drivers/gpu/drm/psb/psb_umevents.c
151 create mode 100644 drivers/gpu/drm/psb/psb_umevents.h
152 create mode 100644 drivers/gpu/drm/psb/psb_xhw.c
153 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
154 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo.c
155 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_api.h
156 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
157 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_util.c
158 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
159 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
160 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
161 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence.c
162 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_api.h
163 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
164 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_user.c
165 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_user.h
166 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_lock.c
167 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_lock.h
168 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_memory.c
169 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_memory.h
170 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_object.c
171 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_object.h
172 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
173 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
174 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_common.h
175 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_user.c
176 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_user.h
177 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_regman.h
178 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_tt.c
179 create mode 100644 drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
180
181diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
182index 39b393d..9bd8ca1 100644
183--- a/drivers/gpu/drm/Kconfig
184+++ b/drivers/gpu/drm/Kconfig
185@@ -143,3 +143,15 @@ config DRM_SAVAGE
186 help
187 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
188 chipset. If M is selected the module will be called savage.
189+
190+config DRM_PSB
191+ tristate "Intel Poulsbo/Moorestown"
192+ depends on DRM && PCI
193+ select FB_CFB_COPYAREA
194+ select FB_CFB_FILLRECT
195+ select FB_CFB_IMAGEBLIT
196+ select MRST_RAR_HANDLER
197+ help
198+ Choose this option if you have a Poulsbo or Moorestown platform.
199+ If M is selected the module will be called psb.
200+
201diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
202index d76f167..4989b1e 100644
203--- a/drivers/gpu/drm/Makefile
204+++ b/drivers/gpu/drm/Makefile
205@@ -15,12 +15,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
206 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
207 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
208 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
209- drm_info.o drm_debugfs.o
210+ drm_info.o drm_debugfs.o drm_global.o
211
212 drm-$(CONFIG_COMPAT) += drm_ioc32.o
213
214 obj-$(CONFIG_DRM) += drm.o
215 obj-$(CONFIG_DRM_TTM) += ttm/
216+obj-$(CONFIG_DRM_PSB) +=psb/
217 obj-$(CONFIG_DRM_TDFX) += tdfx/
218 obj-$(CONFIG_DRM_R128) += r128/
219 obj-$(CONFIG_DRM_RADEON)+= radeon/
220diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
221index 2f631c7..11cd2e8 100644
222--- a/drivers/gpu/drm/drm_crtc.c
223+++ b/drivers/gpu/drm/drm_crtc.c
224@@ -146,6 +146,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
225 { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
226 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
227 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
228+ { DRM_MODE_CONNECTOR_MIPI, "MIPI", 0 },
229 };
230
231 static struct drm_prop_enum_list drm_encoder_enum_list[] =
232@@ -154,6 +155,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
233 { DRM_MODE_ENCODER_TMDS, "TMDS" },
234 { DRM_MODE_ENCODER_LVDS, "LVDS" },
235 { DRM_MODE_ENCODER_TVDAC, "TV" },
236+ { DRM_MODE_ENCODER_MIPI, "MIPI" },
237 };
238
239 char *drm_get_encoder_name(struct drm_encoder *encoder)
240diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
241index b7f3a41..81195a7 100644
242--- a/drivers/gpu/drm/drm_drv.c
243+++ b/drivers/gpu/drm/drm_drv.c
244@@ -344,6 +345,8 @@ static int __init drm_core_init(void)
245
246 DRM_INFO("Initialized %s %d.%d.%d %s\n",
247 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
248+ drm_global_init();
249+
250 return 0;
251 err_p3:
252 drm_sysfs_destroy();
253@@ -357,6 +360,7 @@ err_p1:
254
255 static void __exit drm_core_exit(void)
256 {
257+ drm_global_release();
258 remove_proc_entry("dri", NULL);
259 debugfs_remove(drm_debugfs_root);
260 drm_sysfs_destroy();
261@@ -408,9 +412,16 @@ static int drm_version(struct drm_device *dev, void *data,
262 * Looks up the ioctl function in the ::ioctls table, checking for root
263 * previleges if so required, and dispatches to the respective function.
264 */
265+
266 int drm_ioctl(struct inode *inode, struct file *filp,
267 unsigned int cmd, unsigned long arg)
268 {
269+ return drm_unlocked_ioctl(filp, cmd, arg);
270+}
271+EXPORT_SYMBOL(drm_ioctl);
272+
273+long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
274+{
275 struct drm_file *file_priv = filp->private_data;
276 struct drm_device *dev = file_priv->minor->dev;
277 struct drm_ioctl_desc *ioctl;
278@@ -493,7 +504,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
279 return retcode;
280 }
281
282-EXPORT_SYMBOL(drm_ioctl);
283+EXPORT_SYMBOL(drm_unlocked_ioctl);
284
285 struct drm_local_map *drm_getsarea(struct drm_device *dev)
286 {
287diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
288new file mode 100644
289index 0000000..e054c4f
290--- /dev/null
291+++ b/drivers/gpu/drm/drm_global.c
292@@ -0,0 +1,107 @@
293+/**************************************************************************
294+ *
295+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
296+ * All Rights Reserved.
297+ *
298+ * Permission is hereby granted, free of charge, to any person obtaining a
299+ * copy of this software and associated documentation files (the
300+ * "Software"), to deal in the Software without restriction, including
301+ * without limitation the rights to use, copy, modify, merge, publish,
302+ * distribute, sub license, and/or sell copies of the Software, and to
303+ * permit persons to whom the Software is furnished to do so, subject to
304+ * the following conditions:
305+ *
306+ * The above copyright notice and this permission notice (including the
307+ * next paragraph) shall be included in all copies or substantial portions
308+ * of the Software.
309+ *
310+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
311+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
312+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
313+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
314+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
315+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
316+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
317+ *
318+ **************************************************************************/
319+#include <drmP.h>
320+struct drm_global_item {
321+ struct mutex mutex;
322+ void *object;
323+ int refcount;
324+};
325+
326+static struct drm_global_item glob[DRM_GLOBAL_NUM];
327+
328+void drm_global_init(void)
329+{
330+ int i;
331+
332+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
333+ struct drm_global_item *item = &glob[i];
334+ mutex_init(&item->mutex);
335+ item->object = NULL;
336+ item->refcount = 0;
337+ }
338+}
339+
340+void drm_global_release(void)
341+{
342+ int i;
343+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
344+ struct drm_global_item *item = &glob[i];
345+ BUG_ON(item->object != NULL);
346+ BUG_ON(item->refcount != 0);
347+ }
348+}
349+
350+int drm_global_item_ref(struct drm_global_reference *ref)
351+{
352+ int ret;
353+ struct drm_global_item *item = &glob[ref->global_type];
354+ void *object;
355+
356+ mutex_lock(&item->mutex);
357+ if (item->refcount == 0) {
358+ item->object = kmalloc(ref->size, GFP_KERNEL);
359+ if (unlikely(item->object == NULL)) {
360+ ret = -ENOMEM;
361+ goto out_err;
362+ }
363+
364+ ref->object = item->object;
365+ ret = ref->init(ref);
366+ if (unlikely(ret != 0))
367+ goto out_err;
368+
369+ ++item->refcount;
370+ }
371+ ref->object = item->object;
372+ object = item->object;
373+ mutex_unlock(&item->mutex);
374+ return 0;
375+ out_err:
376+ kfree(item->object);
377+ mutex_unlock(&item->mutex);
378+ item->object = NULL;
379+ return ret;
380+}
381+
382+EXPORT_SYMBOL(drm_global_item_ref);
383+
384+void drm_global_item_unref(struct drm_global_reference *ref)
385+{
386+ struct drm_global_item *item = &glob[ref->global_type];
387+
388+ mutex_lock(&item->mutex);
389+ BUG_ON(item->refcount == 0);
390+ BUG_ON(ref->object != item->object);
391+ if (--item->refcount == 0) {
392+ ref->release(ref);
393+ kfree(item->object);
394+ item->object = NULL;
395+ }
396+ mutex_unlock(&item->mutex);
397+}
398+
399+EXPORT_SYMBOL(drm_global_item_unref);
400diff --git a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile
401new file mode 100644
402index 0000000..67319ba
403--- /dev/null
404+++ b/drivers/gpu/drm/psb/Makefile
405@@ -0,0 +1,19 @@
406+#
407+# Makefile for the drm device driver. This driver provides support for the
408+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
409+
410+ccflags-y := -Idrivers/gpu/drm/psb -Iinclude/drm -Iinclude/linux
411+
412+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \
413+ psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \
414+ psb_reset.o psb_xhw.o psb_msvdx.o psb_bl.o psb_intel_bios.o\
415+ psb_umevents.o psb_hotplug.o psb_socket.o psb_dpst.o \
416+ psb_powermgmt.o lnc_topaz.o lnc_topazinit.o \
417+ psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \
418+ ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \
419+ ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \
420+ ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \
421+ ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o
422+
423+obj-$(CONFIG_DRM_PSB) += psb.o
424+
425diff --git a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c
426new file mode 100644
427index 0000000..adabac5
428--- /dev/null
429+++ b/drivers/gpu/drm/psb/lnc_topaz.c
430@@ -0,0 +1,676 @@
431+/**
432+ * file lnc_topaz.c
433+ * TOPAZ I/O operations and IRQ handling
434+ *
435+ */
436+
437+/**************************************************************************
438+ *
439+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
440+ * Copyright (c) Imagination Technologies Limited, UK
441+ * All Rights Reserved.
442+ *
443+ * Permission is hereby granted, free of charge, to any person obtaining a
444+ * copy of this software and associated documentation files (the
445+ * "Software"), to deal in the Software without restriction, including
446+ * without limitation the rights to use, copy, modify, merge, publish,
447+ * distribute, sub license, and/or sell copies of the Software, and to
448+ * permit persons to whom the Software is furnished to do so, subject to
449+ * the following conditions:
450+ *
451+ * The above copyright notice and this permission notice (including the
452+ * next paragraph) shall be included in all copies or substantial portions
453+ * of the Software.
454+ *
455+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
456+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
457+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
458+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
459+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
460+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
461+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
462+ *
463+ **************************************************************************/
464+
465+/* include headers */
466+/* #define DRM_DEBUG_CODE 2 */
467+
468+#include <drm/drmP.h>
469+#include <drm/drm_os_linux.h>
470+
471+#include "psb_drv.h"
472+#include "psb_drm.h"
473+#include "lnc_topaz.h"
474+#include "psb_powermgmt.h"
475+
476+#include <linux/io.h>
477+#include <linux/delay.h>
478+
479+
480+/* static function define */
481+static int lnc_topaz_deliver_command(struct drm_device *dev,
482+ struct ttm_buffer_object *cmd_buffer,
483+ unsigned long cmd_offset,
484+ unsigned long cmd_size,
485+ void **topaz_cmd, uint32_t sequence,
486+ int copy_cmd);
487+static int lnc_topaz_send(struct drm_device *dev, void *cmd,
488+ unsigned long cmd_size, uint32_t sync_seq);
489+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
490+static int lnc_topaz_dequeue_send(struct drm_device *dev);
491+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
492+ unsigned long cmd_size, uint32_t sequence);
493+
494+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat)
495+{
496+ struct drm_psb_private *dev_priv =
497+ (struct drm_psb_private *)dev->dev_private;
498+ uint32_t clr_flag = lnc_topaz_queryirq(dev);
499+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
500+ uint32_t cur_seq;
501+
502+ lnc_topaz_clearirq(dev, clr_flag);
503+
504+ /* ignore non-SYNC interrupts */
505+ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
506+ return;
507+
508+ cur_seq = *(uint32_t *)topaz_priv->topaz_sync_addr;
509+
510+ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
511+ cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
512+
513+ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
514+
515+ /* save frame skip flag for query */
516+ topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv);
517+
518+ topaz_priv->topaz_busy = 1;
519+ lnc_topaz_dequeue_send(dev);
520+
521+ if (drm_topaz_pmpolicy == PSB_PMPOLICY_POWERDOWN)
522+ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
523+}
524+
525+static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
526+ struct ttm_buffer_object *cmd_buffer,
527+ unsigned long cmd_offset, unsigned long cmd_size,
528+ struct ttm_fence_object *fence)
529+{
530+ struct drm_psb_private *dev_priv = dev->dev_private;
531+ unsigned long irq_flags;
532+ int ret = 0;
533+ void *cmd;
534+ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
535+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
536+
537+ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
538+
539+ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
540+
541+ if (topaz_priv->topaz_fw_loaded == 0) {
542+ /* #.# load fw to driver */
543+ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
544+ ret = topaz_init_fw(dev);
545+ if (ret != 0) {
546+ /* FIXME: find a proper return value */
547+ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
548+ "ensure udevd is configured correctly!\n");
549+
550+ return -EFAULT;
551+ }
552+ topaz_priv->topaz_fw_loaded = 1;
553+ }
554+
555+ /* # schedule watchdog */
556+ /* psb_schedule_watchdog(dev_priv); */
557+
558+ /* # spin lock irq save [msvdx_lock] */
559+ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
560+
561+ /* # if topaz need to reset, reset it */
562+ if (topaz_priv->topaz_needs_reset) {
563+ /* #.# reset it */
564+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
565+ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
566+
567+ if (lnc_topaz_reset(dev_priv)) {
568+ ret = -EBUSY;
569+ DRM_ERROR("TOPAZ: reset failed.\n");
570+ return ret;
571+ }
572+
573+ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
574+
575+ /* #.# upload firmware */
576+ if (topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) {
577+ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
578+ return -EBUSY;
579+ }
580+
581+ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
582+ }
583+
584+ if (!topaz_priv->topaz_busy) {
585+ /* # direct map topaz command if topaz is free */
586+ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
587+ sequence);
588+
589+ topaz_priv->topaz_busy = 1;
590+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
591+
592+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
593+ cmd_size, NULL, sequence, 0);
594+
595+ if (ret) {
596+ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
597+ return ret;
598+ }
599+ } else {
600+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
601+ sequence);
602+ cmd = NULL;
603+
604+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
605+
606+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
607+ cmd_size, &cmd, sequence, 1);
608+ if (cmd == NULL || ret) {
609+ DRM_ERROR("TOPAZ: map command for save fialed\n");
610+ return ret;
611+ }
612+
613+ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
614+ if (ret)
615+ DRM_ERROR("TOPAZ: save command failed\n");
616+ }
617+
618+ return ret;
619+}
620+
621+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
622+ unsigned long cmd_size, uint32_t sequence)
623+{
624+ struct drm_psb_private *dev_priv = dev->dev_private;
625+ struct lnc_topaz_cmd_queue *topaz_cmd;
626+ unsigned long irq_flags;
627+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
628+
629+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
630+ sequence);
631+
632+ topaz_cmd = kzalloc(sizeof(struct lnc_topaz_cmd_queue),
633+ GFP_KERNEL);
634+ if (topaz_cmd == NULL) {
635+ mutex_unlock(&topaz_priv->topaz_mutex);
636+ DRM_ERROR("TOPAZ: out of memory....\n");
637+ return -ENOMEM;
638+ }
639+
640+ topaz_cmd->cmd = cmd;
641+ topaz_cmd->cmd_size = cmd_size;
642+ topaz_cmd->sequence = sequence;
643+
644+ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
645+ list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
646+ if (!topaz_priv->topaz_busy) {
647+ /* topaz_priv->topaz_busy = 1; */
648+ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
649+ lnc_topaz_dequeue_send(dev);
650+ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
651+ }
652+
653+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
654+
655+ return 0;
656+}
657+
658+
659+int lnc_cmdbuf_video(struct drm_file *priv,
660+ struct list_head *validate_list,
661+ uint32_t fence_type,
662+ struct drm_psb_cmdbuf_arg *arg,
663+ struct ttm_buffer_object *cmd_buffer,
664+ struct psb_ttm_fence_rep *fence_arg)
665+{
666+ struct drm_device *dev = priv->minor->dev;
667+ struct ttm_fence_object *fence = NULL;
668+ int ret;
669+
670+ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
671+ arg->cmdbuf_size, fence);
672+ if (ret)
673+ return ret;
674+
675+ /* workaround for interrupt issue */
676+ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
677+ validate_list, fence_arg, &fence);
678+
679+ if (fence)
680+ ttm_fence_object_unref(&fence);
681+
682+ mutex_lock(&cmd_buffer->mutex);
683+ if (cmd_buffer->sync_obj != NULL)
684+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
685+ mutex_unlock(&cmd_buffer->mutex);
686+
687+ return 0;
688+}
689+
690+static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
691+{
692+ struct drm_psb_private *dev_priv = dev->dev_private;
693+ uint32_t sync_cmd[3];
694+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
695+
696+#if 0
697+ struct ttm_fence_device *fdev = &dev_priv->fdev;
698+ struct ttm_fence_class_manager *fc =
699+ &fdev->fence_class[LNC_ENGINE_ENCODE];
700+ unsigned long irq_flags;
701+#endif
702+#if LNC_TOPAZ_NO_IRQ
703+ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
704+ int count = 10000;
705+ uint32_t cur_seq;
706+#endif
707+
708+ /* insert a SYNC command here */
709+ topaz_priv->topaz_sync_cmd_seq = (1 << 15) |
710+ topaz_priv->topaz_cmd_seq++;
711+ sync_cmd[0] = 1 | (MTX_CMDID_SYNC << 1) | (3 << 8) |
712+ (topaz_priv->topaz_sync_cmd_seq << 16);
713+ sync_cmd[1] = topaz_priv->topaz_sync_offset;
714+ sync_cmd[2] = sync_seq;
715+
716+ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
717+ "sync_seq (0x%08x)\n",
718+ topaz_priv->topaz_sync_cmd_seq, sync_seq);
719+
720+ lnc_mtx_send(dev_priv, sync_cmd);
721+
722+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
723+ /* # poll topaz register for certain times */
724+ while (count && *sync_p != sync_seq) {
725+ DRM_UDELAY(100);
726+ --count;
727+ }
728+ if ((count == 0) && (*sync_p != sync_seq)) {
729+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
730+ sync_seq, *sync_p);
731+ return -EBUSY;
732+ }
733+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
734+
735+ topaz_priv->topaz_busy = 0;
736+
737+ /* XXX: check psb_fence_handler is suitable for topaz */
738+ cur_seq = *sync_p;
739+#if 0
740+ write_lock_irqsave(&fc->lock, irq_flags);
741+ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
742+ cur_seq,
743+ _PSB_FENCE_TYPE_EXE, 0);
744+ write_unlock_irqrestore(&fc->lock, irq_flags);
745+#endif
746+#endif
747+ return 0;
748+}
749+
750+int
751+lnc_topaz_deliver_command(struct drm_device *dev,
752+ struct ttm_buffer_object *cmd_buffer,
753+ unsigned long cmd_offset, unsigned long cmd_size,
754+ void **topaz_cmd, uint32_t sequence,
755+ int copy_cmd)
756+{
757+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
758+ struct ttm_bo_kmap_obj cmd_kmap;
759+ bool is_iomem;
760+ int ret;
761+ unsigned char *cmd_start, *tmp;
762+
763+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
764+ &cmd_kmap);
765+ if (ret) {
766+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
767+ return ret;
768+ }
769+ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
770+ &is_iomem) + cmd_page_offset;
771+
772+ if (copy_cmd) {
773+ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
774+ tmp = kzalloc(cmd_size, GFP_KERNEL);
775+ if (tmp == NULL) {
776+ ret = -ENOMEM;
777+ goto out;
778+ }
779+ memcpy(tmp, cmd_start, cmd_size);
780+ *topaz_cmd = tmp;
781+ } else {
782+ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
783+ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
784+ if (ret) {
785+ DRM_ERROR("TOPAZ: commit commands failed.\n");
786+ ret = -EINVAL;
787+ }
788+ }
789+
790+out:
791+ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
792+ cmd_size, sequence, copy_cmd);
793+
794+ ttm_bo_kunmap(&cmd_kmap);
795+
796+ return ret;
797+}
798+
799+int
800+lnc_topaz_send(struct drm_device *dev, void *cmd,
801+ unsigned long cmd_size, uint32_t sync_seq)
802+{
803+ struct drm_psb_private *dev_priv = dev->dev_private;
804+ int ret = 0;
805+ unsigned char *command = (unsigned char *) cmd;
806+ struct topaz_cmd_header *cur_cmd_header;
807+ uint32_t cur_cmd_size, cur_cmd_id;
808+ uint32_t codec;
809+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
810+
811+ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
812+
813+ while (cmd_size > 0) {
814+ cur_cmd_header = (struct topaz_cmd_header *) command;
815+ cur_cmd_size = cur_cmd_header->size * 4;
816+ cur_cmd_id = cur_cmd_header->id;
817+
818+ switch (cur_cmd_id) {
819+ case MTX_CMDID_SW_NEW_CODEC:
820+ codec = *((uint32_t *) cmd + 1);
821+
822+ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
823+ codec_to_string(codec), codec);
824+ if (topaz_setup_fw(dev, codec)) {
825+ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
826+ return -EBUSY;
827+ }
828+
829+ topaz_priv->topaz_cur_codec = codec;
830+ break;
831+
832+ case MTX_CMDID_SW_ENTER_LOWPOWER:
833+ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
834+ PSB_DEBUG_GENERAL("XXX: implement it\n");
835+ break;
836+
837+ case MTX_CMDID_SW_LEAVE_LOWPOWER:
838+ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
839+ PSB_DEBUG_GENERAL("XXX: implement it\n");
840+ break;
841+
842+ /* ordinary commmand */
843+ case MTX_CMDID_START_PIC:
844+ /* XXX: specially handle START_PIC hw command */
845+ CCB_CTRL_SET_QP(dev_priv,
846+ *(command + cur_cmd_size - 4));
847+ /* strip the QP parameter (it's software arg) */
848+ cur_cmd_header->size--;
849+ default:
850+ cur_cmd_header->seq = 0x7fff &
851+ topaz_priv->topaz_cmd_seq++;
852+
853+ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
854+ " seq (0x%04x)\n",
855+ cmd_to_string(cur_cmd_id),
856+ cur_cmd_size, cur_cmd_header->seq);
857+ ret = lnc_mtx_send(dev_priv, command);
858+ if (ret) {
859+ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
860+ goto out;
861+ }
862+ break;
863+ }
864+
865+ command += cur_cmd_size;
866+ cmd_size -= cur_cmd_size;
867+ }
868+ lnc_topaz_sync(dev, sync_seq);
869+out:
870+ return ret;
871+}
872+
873+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
874+{
875+ struct topaz_cmd_header *cur_cmd_header =
876+ (struct topaz_cmd_header *) cmd;
877+ uint32_t cmd_size = cur_cmd_header->size;
878+ uint32_t read_index, write_index;
879+ const uint32_t *cmd_pointer = (uint32_t *) cmd;
880+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
881+
882+ int ret = 0;
883+
884+ /* <msvdx does> # enable all clock */
885+
886+ write_index = topaz_priv->topaz_cmd_windex;
887+ if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) {
888+ int free_space = topaz_priv->topaz_ccb_size - write_index;
889+
890+ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
891+ if (free_space > 0) {
892+ struct topaz_cmd_header pad_cmd;
893+
894+ pad_cmd.id = MTX_CMDID_NULL;
895+ pad_cmd.size = free_space;
896+ pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq;
897+
898+ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
899+ " size(%d),seq (0x%04x)\n",
900+ pad_cmd.size, pad_cmd.seq);
901+
902+ TOPAZ_BEGIN_CCB(dev_priv);
903+ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
904+ TOPAZ_END_CCB(dev_priv, 1);
905+
906+ POLL_WB_SEQ(dev_priv, pad_cmd.seq);
907+ ++topaz_priv->topaz_cmd_seq;
908+ }
909+ POLL_WB_RINDEX(dev_priv, 0);
910+ if (ret == 0)
911+ topaz_priv->topaz_cmd_windex = 0;
912+ else {
913+ DRM_ERROR("TOPAZ: poll rindex timeout\n");
914+ return ret; /* HW may hang, need reset */
915+ }
916+ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
917+ }
918+
919+ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
920+ write_index = topaz_priv->topaz_cmd_windex;
921+
922+ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
923+ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
924+ TOPAZ_BEGIN_CCB(dev_priv);
925+ while (cmd_size > 0) {
926+ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
927+ --cmd_size;
928+ }
929+ TOPAZ_END_CCB(dev_priv, 1);
930+
931+#if 0
932+ DRM_UDELAY(1000);
933+ lnc_topaz_clearirq(dev,
934+ lnc_topaz_queryirq(dev));
935+ LNC_TRACEL("TOPAZ: after clear, query again\n");
936+ lnc_topaz_queryirq(dev_priv);
937+#endif
938+
939+ return ret;
940+}
941+
942+int lnc_topaz_dequeue_send(struct drm_device *dev)
943+{
944+ struct drm_psb_private *dev_priv = dev->dev_private;
945+ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
946+ int ret;
947+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
948+
949+ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
950+
951+ if (list_empty(&topaz_priv->topaz_queue)) {
952+ topaz_priv->topaz_busy = 0;
953+ return 0;
954+ }
955+
956+ topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
957+ struct lnc_topaz_cmd_queue, head);
958+
959+ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
960+ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
961+ topaz_cmd->sequence);
962+ if (ret) {
963+ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
964+ ret = -EINVAL;
965+ }
966+
967+ list_del(&topaz_cmd->head);
968+ kfree(topaz_cmd->cmd);
969+ kfree(topaz_cmd
970+ );
971+
972+ return ret;
973+}
974+
975+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
976+{
977+ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
978+ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
979+}
980+
981+int lnc_check_topaz_idle(struct drm_device *dev)
982+{
983+ struct drm_psb_private *dev_priv =
984+ (struct drm_psb_private *)dev->dev_private;
985+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
986+ uint32_t reg_val;
987+
988+ if (topaz_priv->topaz_busy)
989+ return -EBUSY;
990+
991+ MVEA_READ32(MVEA_CR_MVEA_BUSY, &reg_val);
992+ if (reg_val != 0)
993+ return -EBUSY;
994+
995+ MVEA_READ32(MVEA_CR_MVEA_DMACMDFIFO_WAIT, &reg_val);
996+ if (reg_val != 0)
997+ return -EBUSY;
998+
999+ MVEA_READ32(MVEA_CR_MVEA_DMACMDFIFO_STATUS, &reg_val);
1000+ if ((reg_val & (1 << 8)) == 0)
1001+ return -EBUSY;
1002+
1003+ return 0; /* we think it is idle */
1004+}
1005+
1006+int lnc_wait_topaz_idle(struct drm_device *dev)
1007+{
1008+ struct drm_psb_private *dev_priv =
1009+ (struct drm_psb_private *)dev->dev_private;
1010+ struct ttm_fence_device *fdev = &dev_priv->fdev;
1011+ struct ttm_fence_class_manager *fc =
1012+ &fdev->fence_class[LNC_ENGINE_ENCODE];
1013+ struct ttm_fence_object *fence, *next;
1014+ unsigned long _end = jiffies + 5 * DRM_HZ;
1015+ int signaled = 0;
1016+ int ret = 0;
1017+
1018+ /* Ensure that all pending IRQs are serviced, */
1019+ /*
1020+ * Save the last MSVDX fence in dev_priv instead!!!
1021+ * Need to be fc->write_locked while accessing a fence from the ring.
1022+ */
1023+ list_for_each_entry_safe(fence, next, &fc->ring, ring) {
1024+ do {
1025+ signaled = ttm_fence_object_signaled(fence,
1026+ _PSB_FENCE_TYPE_EXE);
1027+ if (signaled)
1028+ break;
1029+ if (time_after_eq(jiffies, _end)) {
1030+ PSB_DEBUG_PM("TOPAZIDLE: fence 0x%x didn't get"
1031+ "signaled for 3 secs\n",
1032+ (unsigned int) fence);
1033+ break;
1034+ }
1035+ DRM_UDELAY(1000);
1036+ } while (1);
1037+ }
1038+
1039+ do {
1040+ ret = lnc_check_topaz_idle(dev);
1041+ if (ret == 0)
1042+ break;
1043+
1044+ if (time_after_eq(jiffies, _end)) {
1045+ PSB_DEBUG_PM("TOPAZIDLE: wait HW idle time out\n");
1046+ break;
1047+ }
1048+ DRM_UDELAY(1000);
1049+ } while (1);
1050+
1051+ return ret;
1052+}
1053+
1054+int lnc_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
1055+{
1056+ struct drm_psb_private *dev_priv =
1057+ (struct drm_psb_private *)dev->dev_private;
1058+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
1059+ int ret;
1060+
1061+ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
1062+ &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
1063+
1064+ if (ret)
1065+ return -EFAULT;
1066+
1067+ return 0;
1068+}
1069+
1070+static void lnc_topaz_flush_cmd_queue(struct topaz_private *topaz_priv)
1071+{
1072+ struct lnc_topaz_cmd_queue *entry, *next;
1073+
1074+ /* remind to reset topaz */
1075+ topaz_priv->topaz_needs_reset = 1;
1076+
1077+ if (list_empty(&topaz_priv->topaz_queue)) {
1078+ topaz_priv->topaz_busy = 0;
1079+ return;
1080+ }
1081+
1082+ /* flush all command in queue */
1083+ list_for_each_entry_safe(entry, next,
1084+ &topaz_priv->topaz_queue,
1085+ head) {
1086+ list_del(&entry->head);
1087+ kfree(entry->cmd);
1088+ kfree(entry);
1089+ }
1090+
1091+ return;
1092+}
1093+
1094+void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev)
1095+{
1096+ struct drm_psb_private *dev_priv =
1097+ container_of(fdev, struct drm_psb_private, fdev);
1098+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
1099+
1100+ lnc_topaz_flush_cmd_queue(topaz_priv);
1101+}
1102+
1103+inline int psb_try_power_down_topaz(struct drm_device *dev)
1104+{
1105+ return powermgmt_suspend_islands(dev->pdev, PSB_VIDEO_ENC_ISLAND, false);
1106+}
1107diff --git a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h
1108new file mode 100644
1109index 0000000..c48cab0
1110--- /dev/null
1111+++ b/drivers/gpu/drm/psb/lnc_topaz.h
1112@@ -0,0 +1,902 @@
1113+/**************************************************************************
1114+ *
1115+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
1116+ * Copyright (c) Imagination Technologies Limited, UK
1117+ * All Rights Reserved.
1118+ *
1119+ * Permission is hereby granted, free of charge, to any person obtaining a
1120+ * copy of this software and associated documentation files (the
1121+ * "Software"), to deal in the Software without restriction, including
1122+ * without limitation the rights to use, copy, modify, merge, publish,
1123+ * distribute, sub license, and/or sell copies of the Software, and to
1124+ * permit persons to whom the Software is furnished to do so, subject to
1125+ * the following conditions:
1126+ *
1127+ * The above copyright notice and this permission notice (including the
1128+ * next paragraph) shall be included in all copies or substantial portions
1129+ * of the Software.
1130+ *
1131+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1132+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1133+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1134+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
1135+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
1136+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
1137+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
1138+ *
1139+ **************************************************************************/
1140+
1141+#ifndef _LNC_TOPAZ_H_
1142+#define _LNC_TOPAZ_H_
1143+
1144+#include "psb_drv.h"
1145+
1146+#define LNC_TOPAZ_NO_IRQ 0
1147+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
1148+
1149+extern int drm_topaz_pmpolicy;
1150+
1151+/*
1152+ * MACROS to insert values into fields within a word. The basename of the
1153+ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
1154+ */
1155+#define MM_WRITE32(base, offset, value) \
1156+do { \
1157+ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
1158+ + base + offset)) = value; \
1159+} while (0)
1160+
1161+#define MM_READ32(base, offset, pointer) \
1162+do { \
1163+ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
1164+ + base + offset)); \
1165+} while (0)
1166+
1167+#define F_MASK(basename) (MASK_##basename)
1168+#define F_SHIFT(basename) (SHIFT_##basename)
1169+
1170+#define F_ENCODE(val, basename) \
1171+ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
1172+
1173+/* MVEA macro */
1174+#define MVEA_START 0x03000
1175+
1176+#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
1177+#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
1178+
1179+#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
1180+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
1181+#define F_ENCODE_MVEA(val, basename) \
1182+ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
1183+
1184+/* VLC macro */
1185+#define TOPAZ_VLC_START 0x05000
1186+
1187+/* TOPAZ macro */
1188+#define TOPAZ_START 0x02000
1189+
1190+#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
1191+#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
1192+
1193+#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
1194+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
1195+#define F_ENCODE_TOPAZ(val, basename) \
1196+ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
1197+
1198+/* MTX macro */
1199+#define MTX_START 0x0
1200+
1201+#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
1202+#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
1203+
1204+/* DMAC macro */
1205+#define DMAC_START 0x0f000
1206+
1207+#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
1208+#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
1209+
1210+#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
1211+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
1212+#define F_ENCODE_DMAC(val, basename) \
1213+ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
1214+
1215+
1216+/* Register CR_IMG_TOPAZ_INTENAB */
1217+#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
1218+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
1219+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
1220+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
1221+
1222+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
1223+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
1224+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
1225+
1226+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
1227+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
1228+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
1229+
1230+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
1231+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
1232+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
1233+
1234+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
1235+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
1236+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
1237+
1238+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
1239+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
1240+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
1241+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
1242+
1243+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
1244+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
1245+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
1246+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
1247+
1248+#define MTX_CCBCTRL_ROFF 0
1249+#define MTX_CCBCTRL_COMPLETE 4
1250+#define MTX_CCBCTRL_CCBSIZE 8
1251+#define MTX_CCBCTRL_QP 12
1252+#define MTX_CCBCTRL_FRAMESKIP 20
1253+#define MTX_CCBCTRL_INITQP 24
1254+
1255+#define TOPAZ_CR_MMU_STATUS 0x001C
1256+#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
1257+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
1258+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
1259+
1260+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
1261+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
1262+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
1263+
1264+#define TOPAZ_CR_MMU_MEM_REQ 0x0020
1265+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
1266+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
1267+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
1268+
1269+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
1270+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
1271+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
1272+
1273+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
1274+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
1275+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
1276+
1277+#define MTX_CR_MTX_KICK 0x0080
1278+#define MASK_MTX_MTX_KICK 0x0000FFFF
1279+#define SHIFT_MTX_MTX_KICK 0
1280+#define REGNUM_MTX_MTX_KICK 0x0080
1281+
1282+#define MTX_DATA_MEM_BASE 0x82880000
1283+
1284+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
1285+#define MASK_MTX_MTX_MCMR 0x00000001
1286+#define SHIFT_MTX_MTX_MCMR 0
1287+#define REGNUM_MTX_MTX_MCMR 0x0108
1288+
1289+#define MASK_MTX_MTX_MCMID 0x0FF00000
1290+#define SHIFT_MTX_MTX_MCMID 20
1291+#define REGNUM_MTX_MTX_MCMID 0x0108
1292+
1293+#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
1294+#define SHIFT_MTX_MTX_MCM_ADDR 2
1295+#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
1296+
1297+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
1298+#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
1299+#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
1300+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
1301+
1302+#define MASK_MTX_MTX_MCMAI 0x00000002
1303+#define SHIFT_MTX_MTX_MCMAI 1
1304+#define REGNUM_MTX_MTX_MCMAI 0x0108
1305+
1306+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
1307+
1308+#define MVEA_CR_MVEA_BUSY 0x0018
1309+#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
1310+#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
1311+
1312+#define MVEA_CR_IMG_MVEA_SRST 0x0000
1313+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
1314+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
1315+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
1316+
1317+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
1318+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
1319+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
1320+
1321+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
1322+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
1323+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
1324+
1325+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
1326+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
1327+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
1328+
1329+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
1330+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
1331+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
1332+
1333+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
1334+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
1335+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
1336+
1337+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
1338+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
1339+
1340+#define TOPAZ_MTX_PC (0x00000005)
1341+#define PC_START_ADDRESS (0x80900000)
1342+
1343+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
1344+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
1345+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
1346+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
1347+
1348+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
1349+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
1350+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
1351+
1352+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
1353+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
1354+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
1355+
1356+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
1357+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
1358+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
1359+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
1360+
1361+#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
1362+
1363+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
1364+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
1365+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
1366+
1367+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
1368+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
1369+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
1370+
1371+#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
1372+
1373+#define TOPAZ_CR_MMU_CONTROL0 0x0024
1374+#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
1375+#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
1376+#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
1377+
1378+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
1379+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
1380+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
1381+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
1382+
1383+#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
1384+#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
1385+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
1386+
1387+#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
1388+#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
1389+#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
1390+
1391+#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
1392+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
1393+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
1394+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
1395+
1396+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
1397+#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
1398+#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
1399+#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
1400+
1401+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
1402+#define TXRPT_WAITONKICK_VALUE 0x8ade0000
1403+
1404+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
1405+
1406+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
1407+#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
1408+
1409+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
1410+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
1411+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
1412+
1413+#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
1414+#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
1415+
1416+#define MTX_CR_MTX_SYSC_CDMAA 0x0344
1417+#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
1418+#define SHIFT_MTX_CDMAA_ADDRESS 2
1419+#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
1420+
1421+#define MTX_CR_MTX_SYSC_CDMAC 0x0340
1422+#define MASK_MTX_LENGTH 0x0000FFFF
1423+#define SHIFT_MTX_LENGTH 0
1424+#define REGNUM_MTX_LENGTH 0x0340
1425+
1426+#define MASK_MTX_BURSTSIZE 0x07000000
1427+#define SHIFT_MTX_BURSTSIZE 24
1428+#define REGNUM_MTX_BURSTSIZE 0x0340
1429+
1430+#define MASK_MTX_RNW 0x00020000
1431+#define SHIFT_MTX_RNW 17
1432+#define REGNUM_MTX_RNW 0x0340
1433+
1434+#define MASK_MTX_ENABLE 0x00010000
1435+#define SHIFT_MTX_ENABLE 16
1436+#define REGNUM_MTX_ENABLE 0x0340
1437+
1438+#define MASK_MTX_LENGTH 0x0000FFFF
1439+#define SHIFT_MTX_LENGTH 0
1440+#define REGNUM_MTX_LENGTH 0x0340
1441+
1442+#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
1443+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
1444+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
1445+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
1446+
1447+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
1448+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
1449+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
1450+
1451+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
1452+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
1453+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
1454+
1455+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
1456+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
1457+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
1458+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
1459+
1460+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
1461+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
1462+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
1463+
1464+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
1465+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
1466+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
1467+
1468+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
1469+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
1470+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
1471+
1472+#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
1473+#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
1474+#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
1475+#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
1476+
1477+#define MTX_CR_MTX_SYSC_CDMAT 0x0350
1478+#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
1479+#define SHIFT_MTX_TRANSFERDATA 0
1480+#define REGNUM_MTX_TRANSFERDATA 0x0350
1481+
1482+#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
1483+#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
1484+#define SHIFT_IMG_SOC_TRANSFER_FIN 17
1485+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
1486+
1487+#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
1488+#define MASK_IMG_SOC_CNT 0x0000FFFF
1489+#define SHIFT_IMG_SOC_CNT 0
1490+#define REGNUM_IMG_SOC_CNT 0x0004
1491+
1492+#define MASK_IMG_SOC_EN 0x00010000
1493+#define SHIFT_IMG_SOC_EN 16
1494+#define REGNUM_IMG_SOC_EN 0x0004
1495+
1496+#define MASK_IMG_SOC_LIST_EN 0x00040000
1497+#define SHIFT_IMG_SOC_LIST_EN 18
1498+#define REGNUM_IMG_SOC_LIST_EN 0x0004
1499+
1500+#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
1501+#define MASK_IMG_SOC_PER_HOLD 0x0000007F
1502+#define SHIFT_IMG_SOC_PER_HOLD 0
1503+#define REGNUM_IMG_SOC_PER_HOLD 0x0018
1504+
1505+#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
1506+#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
1507+#define SHIFT_IMG_SOC_START_ADDRESS 0
1508+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
1509+
1510+#define MASK_IMG_SOC_BSWAP 0x40000000
1511+#define SHIFT_IMG_SOC_BSWAP 30
1512+#define REGNUM_IMG_SOC_BSWAP 0x0004
1513+
1514+#define MASK_IMG_SOC_PW 0x18000000
1515+#define SHIFT_IMG_SOC_PW 27
1516+#define REGNUM_IMG_SOC_PW 0x0004
1517+
1518+#define MASK_IMG_SOC_DIR 0x04000000
1519+#define SHIFT_IMG_SOC_DIR 26
1520+#define REGNUM_IMG_SOC_DIR 0x0004
1521+
1522+#define MASK_IMG_SOC_PI 0x03000000
1523+#define SHIFT_IMG_SOC_PI 24
1524+#define REGNUM_IMG_SOC_PI 0x0004
1525+#define IMG_SOC_PI_1 0x00000002
1526+#define IMG_SOC_PI_2 0x00000001
1527+#define IMG_SOC_PI_4 0x00000000
1528+
1529+#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
1530+#define SHIFT_IMG_SOC_TRANSFER_IEN 29
1531+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
1532+
1533+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
1534+ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
1535+ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
1536+ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
1537+ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
1538+ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
1539+
1540+#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
1541+#define MASK_IMG_SOC_EXT_SA 0x0000000F
1542+#define SHIFT_IMG_SOC_EXT_SA 0
1543+#define REGNUM_IMG_SOC_EXT_SA 0x0008
1544+
1545+#define MASK_IMG_SOC_ACC_DEL 0xE0000000
1546+#define SHIFT_IMG_SOC_ACC_DEL 29
1547+#define REGNUM_IMG_SOC_ACC_DEL 0x0008
1548+
1549+#define MASK_IMG_SOC_INCR 0x08000000
1550+#define SHIFT_IMG_SOC_INCR 27
1551+#define REGNUM_IMG_SOC_INCR 0x0008
1552+
1553+#define MASK_IMG_SOC_BURST 0x07000000
1554+#define SHIFT_IMG_SOC_BURST 24
1555+#define REGNUM_IMG_SOC_BURST 0x0008
1556+
1557+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
1558+((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
1559+(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
1560+(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
1561+
1562+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
1563+#define MASK_IMG_SOC_ADDR 0x007FFFFF
1564+#define SHIFT_IMG_SOC_ADDR 0
1565+#define REGNUM_IMG_SOC_ADDR 0x0014
1566+
1567+/* **************** DMAC define **************** */
1568+enum DMAC_eBSwap {
1569+ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
1570+ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
1571+};
1572+
1573+enum DMAC_ePW {
1574+ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
1575+ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
1576+ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
1577+};
1578+
1579+enum DMAC_eAccDel {
1580+ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
1581+ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
1582+ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
1583+ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
1584+ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
1585+ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
1586+ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
1587+ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
1588+};
1589+
1590+enum DMAC_eBurst {
1591+ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
1592+ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
1593+ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
1594+ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
1595+ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
1596+ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
1597+ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
1598+ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
1599+};
1600+
1601+/* commands for topaz,shared with user space driver */
1602+enum drm_lnc_topaz_cmd {
1603+ MTX_CMDID_NULL = 0,
1604+ MTX_CMDID_DO_HEADER = 1,
1605+ MTX_CMDID_ENCODE_SLICE = 2,
1606+ MTX_CMDID_WRITEREG = 3,
1607+ MTX_CMDID_START_PIC = 4,
1608+ MTX_CMDID_END_PIC = 5,
1609+ MTX_CMDID_SYNC = 6,
1610+ MTX_CMDID_ENCODE_ONE_ROW = 7,
1611+ MTX_CMDID_FLUSH = 8,
1612+ MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
1613+ MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
1614+ MTX_CMDID_SW_NEW_CODEC = 0x7f
1615+};
1616+
1617+/* codecs topaz supports,shared with user space driver */
1618+enum drm_lnc_topaz_codec {
1619+ IMG_CODEC_JPEG = 0,
1620+ IMG_CODEC_H264_NO_RC,
1621+ IMG_CODEC_H264_VBR,
1622+ IMG_CODEC_H264_CBR,
1623+ IMG_CODEC_H263_NO_RC,
1624+ IMG_CODEC_H263_VBR,
1625+ IMG_CODEC_H263_CBR,
1626+ IMG_CODEC_MPEG4_NO_RC,
1627+ IMG_CODEC_MPEG4_VBR,
1628+ IMG_CODEC_MPEG4_CBR,
1629+ IMG_CODEC_NUM
1630+};
1631+
1632+/* XXX: it's a copy of msvdx cmd queue. should have some change? */
1633+struct lnc_topaz_cmd_queue {
1634+ struct list_head head;
1635+ void *cmd;
1636+ unsigned long cmd_size;
1637+ uint32_t sequence;
1638+};
1639+
1640+
1641+struct topaz_cmd_header {
1642+ union {
1643+ struct {
1644+ unsigned long enable_interrupt:1;
1645+ unsigned long id:7;
1646+ unsigned long size:8;
1647+ unsigned long seq:16;
1648+ };
1649+ uint32_t val;
1650+ };
1651+};
1652+
1653+/* define structure */
1654+/* firmware file's info head */
1655+struct topaz_fwinfo {
1656+ unsigned int ver:16;
1657+ unsigned int codec:16;
1658+
1659+ unsigned int text_size;
1660+ unsigned int data_size;
1661+ unsigned int data_location;
1662+};
1663+
1664+/* firmware data array define */
1665+struct topaz_codec_fw {
1666+ uint32_t ver;
1667+ uint32_t codec;
1668+
1669+ uint32_t text_size;
1670+ uint32_t data_size;
1671+ uint32_t data_location;
1672+
1673+ struct ttm_buffer_object *text;
1674+ struct ttm_buffer_object *data;
1675+};
1676+
1677+struct topaz_private {
1678+ /* current video task */
1679+ unsigned int pmstate;
1680+ struct sysfs_dirent *sysfs_pmstate;
1681+ int frame_skip;
1682+
1683+ void *topaz_mtx_reg_state;
1684+ struct ttm_buffer_object *topaz_mtx_data_mem;
1685+ uint32_t topaz_cur_codec;
1686+ uint32_t cur_mtx_data_size;
1687+ int topaz_needs_reset;
1688+
1689+ /*
1690+ *topaz command queue
1691+ */
1692+ spinlock_t topaz_lock;
1693+ struct mutex topaz_mutex;
1694+ struct list_head topaz_queue;
1695+ int topaz_busy; /* 0 means topaz is free */
1696+ int topaz_fw_loaded;
1697+
1698+ /* topaz ccb data */
1699+ /* XXX: should the addr stored by 32 bits? more compatible way?? */
1700+ uint32_t topaz_ccb_buffer_addr;
1701+ uint32_t topaz_ccb_ctrl_addr;
1702+ uint32_t topaz_ccb_size;
1703+ uint32_t topaz_cmd_windex;
1704+ uint16_t topaz_cmd_seq;
1705+
1706+ uint32_t stored_initial_qp;
1707+ uint32_t topaz_dash_access_ctrl;
1708+
1709+ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
1710+ struct ttm_bo_kmap_obj topaz_bo_kmap;
1711+ void *topaz_ccb_wb;
1712+ uint32_t topaz_wb_offset;
1713+ uint32_t *topaz_sync_addr;
1714+ uint32_t topaz_sync_offset;
1715+ uint32_t topaz_sync_cmd_seq;
1716+ uint32_t topaz_mtx_saved;
1717+
1718+ /* firmware */
1719+ struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
1720+};
1721+
1722+/* external function declare */
1723+/* lnc_topazinit.c */
1724+int lnc_topaz_init(struct drm_device *dev);
1725+int lnc_topaz_uninit(struct drm_device *dev);
1726+int lnc_topaz_reset(struct drm_psb_private *dev_priv);
1727+int topaz_init_fw(struct drm_device *dev);
1728+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
1729+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
1730+ uint32_t addr, uint32_t value,
1731+ uint32_t enable);
1732+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
1733+ uint32_t byte_addr, uint32_t val);
1734+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
1735+ uint32_t byte_addr);
1736+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
1737+ uint32_t addr);
1738+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
1739+ uint32_t val);
1740+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
1741+int lnc_topaz_save_mtx_state(struct drm_device *dev);
1742+int lnc_topaz_restore_mtx_state(struct drm_device *dev);
1743+
1744+/* lnc_topaz.c */
1745+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat);
1746+
1747+int lnc_cmdbuf_video(struct drm_file *priv,
1748+ struct list_head *validate_list,
1749+ uint32_t fence_type,
1750+ struct drm_psb_cmdbuf_arg *arg,
1751+ struct ttm_buffer_object *cmd_buffer,
1752+ struct psb_ttm_fence_rep *fence_arg);
1753+
1754+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
1755+void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev);
1756+
1757+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
1758+int lnc_wait_topaz_idle(struct drm_device *dev);
1759+int lnc_check_topaz_idle(struct drm_device *dev);
1760+
1761+/* macros to get/set CCB control data */
1762+#define WB_CCB_CTRL_RINDEX(dev_priv) \
1763+(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb))
1764+
1765+#define WB_CCB_CTRL_SEQ(dev_priv) \
1766+(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb\
1767+ + 1))
1768+
1769+#define POLL_WB_RINDEX(dev_priv, value) \
1770+do { \
1771+ int i; \
1772+ for (i = 0; i < 10000; i++) { \
1773+ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
1774+ break; \
1775+ else \
1776+ DRM_UDELAY(100); \
1777+ } \
1778+ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
1779+ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
1780+ ret = -EBUSY; \
1781+ } \
1782+} while (0)
1783+
1784+#define POLL_WB_SEQ(dev_priv, value) \
1785+do { \
1786+ int i; \
1787+ for (i = 0; i < 10000; i++) { \
1788+ if (CCB_CTRL_SEQ(dev_priv) == value) \
1789+ break; \
1790+ else \
1791+ DRM_UDELAY(1000); \
1792+ } \
1793+ if (CCB_CTRL_SEQ(dev_priv) != value) { \
1794+ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%08x(mtx) vs 0x%08x\n",\
1795+ WB_CCB_CTRL_SEQ(dev_priv), value); \
1796+ ret = -EBUSY; \
1797+ } \
1798+} while (0)
1799+
1800+#define CCB_CTRL_RINDEX(dev_priv) \
1801+ topaz_read_mtx_mem(dev_priv, \
1802+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1803+ + MTX_CCBCTRL_ROFF)
1804+
1805+#define CCB_CTRL_RINDEX(dev_priv) \
1806+ topaz_read_mtx_mem(dev_priv, \
1807+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1808+ + MTX_CCBCTRL_ROFF)
1809+
1810+#define CCB_CTRL_QP(dev_priv) \
1811+ topaz_read_mtx_mem(dev_priv, \
1812+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1813+ + MTX_CCBCTRL_QP)
1814+
1815+#define CCB_CTRL_SEQ(dev_priv) \
1816+ topaz_read_mtx_mem(dev_priv, \
1817+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1818+ + MTX_CCBCTRL_COMPLETE)
1819+
1820+#define CCB_CTRL_FRAMESKIP(dev_priv) \
1821+ topaz_read_mtx_mem(dev_priv, \
1822+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1823+ + MTX_CCBCTRL_FRAMESKIP)
1824+
1825+#define CCB_CTRL_SET_QP(dev_priv, qp) \
1826+ topaz_write_mtx_mem(dev_priv, \
1827+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1828+ + MTX_CCBCTRL_QP, qp)
1829+
1830+#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
1831+ topaz_write_mtx_mem(dev_priv, \
1832+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
1833+ + MTX_CCBCTRL_INITQP, qp)
1834+
1835+
1836+#define TOPAZ_BEGIN_CCB(dev_priv) \
1837+ topaz_write_mtx_mem_multiple_setup(dev_priv, \
1838+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_buffer_addr + \
1839+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex * 4)
1840+
1841+#define TOPAZ_OUT_CCB(dev_priv, cmd) \
1842+do { \
1843+ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
1844+ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex++; \
1845+} while (0)
1846+
1847+#define TOPAZ_END_CCB(dev_priv, kick_count) \
1848+ topaz_mtx_kick(dev_priv, 1);
1849+
1850+static inline char *cmd_to_string(int cmd_id)
1851+{
1852+ switch (cmd_id) {
1853+ case MTX_CMDID_START_PIC:
1854+ return "MTX_CMDID_START_PIC";
1855+ case MTX_CMDID_END_PIC:
1856+ return "MTX_CMDID_END_PIC";
1857+ case MTX_CMDID_DO_HEADER:
1858+ return "MTX_CMDID_DO_HEADER";
1859+ case MTX_CMDID_ENCODE_SLICE:
1860+ return "MTX_CMDID_ENCODE_SLICE";
1861+ case MTX_CMDID_SYNC:
1862+ return "MTX_CMDID_SYNC";
1863+
1864+ default:
1865+ return "Undefined command";
1866+
1867+ }
1868+}
1869+
1870+static inline char *codec_to_string(int codec)
1871+{
1872+ switch (codec) {
1873+ case IMG_CODEC_H264_NO_RC:
1874+ return "H264_NO_RC";
1875+ case IMG_CODEC_H264_VBR:
1876+ return "H264_VBR";
1877+ case IMG_CODEC_H264_CBR:
1878+ return "H264_CBR";
1879+ case IMG_CODEC_H263_NO_RC:
1880+ return "H263_NO_RC";
1881+ case IMG_CODEC_H263_VBR:
1882+ return "H263_VBR";
1883+ case IMG_CODEC_H263_CBR:
1884+ return "H263_CBR";
1885+ case IMG_CODEC_MPEG4_NO_RC:
1886+ return "MPEG4_NO_RC";
1887+ case IMG_CODEC_MPEG4_VBR:
1888+ return "MPEG4_VBR";
1889+ case IMG_CODEC_MPEG4_CBR:
1890+ return "MPEG4_CBR";
1891+ default:
1892+ return "Undefined codec";
1893+ }
1894+}
1895+
1896+
1897+static inline void lnc_topaz_enableirq(struct drm_device *dev)
1898+{
1899+ struct drm_psb_private *dev_priv = dev->dev_private;
1900+ /* uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; */
1901+
1902+ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
1903+
1904+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
1905+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
1906+ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
1907+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
1908+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
1909+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
1910+
1911+ /* write in psb_irq.c */
1912+ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
1913+}
1914+
1915+static inline void lnc_topaz_disableirq(struct drm_device *dev)
1916+{
1917+
1918+ struct drm_psb_private *dev_priv = dev->dev_private;
1919+ /* uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); */
1920+
1921+ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
1922+
1923+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
1924+
1925+ /* write in psb_irq.c */
1926+ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
1927+}
1928+
1929+
1930+static inline void lnc_topaz_clearirq(struct drm_device *dev,
1931+ uint32_t clear_topaz)
1932+{
1933+ struct drm_psb_private *dev_priv = dev->dev_private;
1934+
1935+ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
1936+ if (clear_topaz != 0)
1937+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
1938+
1939+ /* PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
1940+}
1941+
1942+static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
1943+{
1944+ struct drm_psb_private *dev_priv = dev->dev_private;
1945+ uint32_t val, /* iir, */ clear = 0;
1946+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
1947+
1948+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
1949+ /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
1950+
1951+ (void) topaz_priv;
1952+
1953+ if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
1954+ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
1955+ return 0;
1956+ }
1957+
1958+ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
1959+
1960+ if (val & (1<<31))
1961+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
1962+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1963+ CCB_CTRL_SEQ(dev_priv),
1964+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1965+ *(uint32_t *)topaz_priv->topaz_sync_addr);
1966+ else
1967+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
1968+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1969+ CCB_CTRL_SEQ(dev_priv),
1970+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1971+ *(uint32_t *)topaz_priv->topaz_sync_addr);
1972+
1973+ if (val & 0x8) {
1974+ uint32_t mmu_status, mmu_req;
1975+
1976+ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
1977+ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
1978+
1979+ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
1980+ "address=0x%08x,mem req=0x%08x\n",
1981+ mmu_status, mmu_req);
1982+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
1983+ }
1984+
1985+ if (val & 0x4) {
1986+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
1987+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
1988+ }
1989+
1990+ if (val & 0x2) {
1991+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
1992+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
1993+ }
1994+
1995+ if (val & 0x1) {
1996+ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
1997+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
1998+ }
1999+
2000+ return clear;
2001+}
2002+
2003+
2004+#define TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \
2005+do { \
2006+ topaz_priv->pmstate = new_state; \
2007+ sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \
2008+ PSB_DEBUG_PM("TOPAZ: %s\n", \
2009+ (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \
2010+ : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \
2011+ : "clockgated")); \
2012+} while (0)
2013+
2014+#endif /* _LNC_TOPAZ_H_ */
2015diff --git a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c
2016new file mode 100644
2017index 0000000..2e8365c
2018--- /dev/null
2019+++ b/drivers/gpu/drm/psb/lnc_topazinit.c
2020@@ -0,0 +1,2058 @@
2021+/**
2022+ * file lnc_topazinit.c
2023+ * TOPAZ initialization and mtx-firmware upload
2024+ *
2025+ */
2026+
2027+/**************************************************************************
2028+ *
2029+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
2030+ * Copyright (c) Imagination Technologies Limited, UK
2031+ * All Rights Reserved.
2032+ *
2033+ * Permission is hereby granted, free of charge, to any person obtaining a
2034+ * copy of this software and associated documentation files (the
2035+ * "Software"), to deal in the Software without restriction, including
2036+ * without limitation the rights to use, copy, modify, merge, publish,
2037+ * distribute, sub license, and/or sell copies of the Software, and to
2038+ * permit persons to whom the Software is furnished to do so, subject to
2039+ * the following conditions:
2040+ *
2041+ * The above copyright notice and this permission notice (including the
2042+ * next paragraph) shall be included in all copies or substantial portions
2043+ * of the Software.
2044+ *
2045+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2046+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2047+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
2048+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
2049+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2050+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2051+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
2052+ *
2053+ **************************************************************************/
2054+
2055+/* NOTE: (READ BEFORE REFINE CODE)
2056+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
2057+ * measured by word to DMAC.
2058+ *
2059+ *
2060+ *
2061+ */
2062+
2063+/* include headers */
2064+
2065+/* #define DRM_DEBUG_CODE 2 */
2066+
2067+#include <linux/firmware.h>
2068+
2069+#include <drm/drmP.h>
2070+#include <drm/drm.h>
2071+
2072+#include "psb_drv.h"
2073+#include "lnc_topaz.h"
2074+#include "psb_powermgmt.h"
2075+
2076+/* WARNING: this define is very important */
2077+#define RAM_SIZE (1024 * 24)
2078+
2079+/* register default values
2080+ * THIS HEADER IS ONLY INCLUDE ONCE*/
2081+static unsigned long topaz_default_regs[183][3] = {
2082+ {MVEA_START, 0x00000000, 0x00000000},
2083+ {MVEA_START, 0x00000004, 0x00000400},
2084+ {MVEA_START, 0x00000008, 0x00000000},
2085+ {MVEA_START, 0x0000000C, 0x00000000},
2086+ {MVEA_START, 0x00000010, 0x00000000},
2087+ {MVEA_START, 0x00000014, 0x00000000},
2088+ {MVEA_START, 0x00000018, 0x00000000},
2089+ {MVEA_START, 0x0000001C, 0x00000000},
2090+ {MVEA_START, 0x00000020, 0x00000120},
2091+ {MVEA_START, 0x00000024, 0x00000000},
2092+ {MVEA_START, 0x00000028, 0x00000000},
2093+ {MVEA_START, 0x00000100, 0x00000000},
2094+ {MVEA_START, 0x00000104, 0x00000000},
2095+ {MVEA_START, 0x00000108, 0x00000000},
2096+ {MVEA_START, 0x0000010C, 0x00000000},
2097+ {MVEA_START, 0x0000011C, 0x00000001},
2098+ {MVEA_START, 0x0000012C, 0x00000000},
2099+ {MVEA_START, 0x00000180, 0x00000000},
2100+ {MVEA_START, 0x00000184, 0x00000000},
2101+ {MVEA_START, 0x00000188, 0x00000000},
2102+ {MVEA_START, 0x0000018C, 0x00000000},
2103+ {MVEA_START, 0x00000190, 0x00000000},
2104+ {MVEA_START, 0x00000194, 0x00000000},
2105+ {MVEA_START, 0x00000198, 0x00000000},
2106+ {MVEA_START, 0x0000019C, 0x00000000},
2107+ {MVEA_START, 0x000001A0, 0x00000000},
2108+ {MVEA_START, 0x000001A4, 0x00000000},
2109+ {MVEA_START, 0x000001A8, 0x00000000},
2110+ {MVEA_START, 0x000001AC, 0x00000000},
2111+ {MVEA_START, 0x000001B0, 0x00000000},
2112+ {MVEA_START, 0x000001B4, 0x00000000},
2113+ {MVEA_START, 0x000001B8, 0x00000000},
2114+ {MVEA_START, 0x000001BC, 0x00000000},
2115+ {MVEA_START, 0x000001F8, 0x00000000},
2116+ {MVEA_START, 0x000001FC, 0x00000000},
2117+ {MVEA_START, 0x00000200, 0x00000000},
2118+ {MVEA_START, 0x00000204, 0x00000000},
2119+ {MVEA_START, 0x00000208, 0x00000000},
2120+ {MVEA_START, 0x0000020C, 0x00000000},
2121+ {MVEA_START, 0x00000210, 0x00000000},
2122+ {MVEA_START, 0x00000220, 0x00000001},
2123+ {MVEA_START, 0x00000224, 0x0000001F},
2124+ {MVEA_START, 0x00000228, 0x00000100},
2125+ {MVEA_START, 0x0000022C, 0x00001F00},
2126+ {MVEA_START, 0x00000230, 0x00000101},
2127+ {MVEA_START, 0x00000234, 0x00001F1F},
2128+ {MVEA_START, 0x00000238, 0x00001F01},
2129+ {MVEA_START, 0x0000023C, 0x0000011F},
2130+ {MVEA_START, 0x00000240, 0x00000200},
2131+ {MVEA_START, 0x00000244, 0x00001E00},
2132+ {MVEA_START, 0x00000248, 0x00000002},
2133+ {MVEA_START, 0x0000024C, 0x0000001E},
2134+ {MVEA_START, 0x00000250, 0x00000003},
2135+ {MVEA_START, 0x00000254, 0x0000001D},
2136+ {MVEA_START, 0x00000258, 0x00001F02},
2137+ {MVEA_START, 0x0000025C, 0x00000102},
2138+ {MVEA_START, 0x00000260, 0x0000011E},
2139+ {MVEA_START, 0x00000264, 0x00000000},
2140+ {MVEA_START, 0x00000268, 0x00000000},
2141+ {MVEA_START, 0x0000026C, 0x00000000},
2142+ {MVEA_START, 0x00000270, 0x00000000},
2143+ {MVEA_START, 0x00000274, 0x00000000},
2144+ {MVEA_START, 0x00000278, 0x00000000},
2145+ {MVEA_START, 0x00000280, 0x00008000},
2146+ {MVEA_START, 0x00000284, 0x00000000},
2147+ {MVEA_START, 0x00000288, 0x00000000},
2148+ {MVEA_START, 0x0000028C, 0x00000000},
2149+ {MVEA_START, 0x00000314, 0x00000000},
2150+ {MVEA_START, 0x00000318, 0x00000000},
2151+ {MVEA_START, 0x0000031C, 0x00000000},
2152+ {MVEA_START, 0x00000320, 0x00000000},
2153+ {MVEA_START, 0x00000324, 0x00000000},
2154+ {MVEA_START, 0x00000348, 0x00000000},
2155+ {MVEA_START, 0x00000380, 0x00000000},
2156+ {MVEA_START, 0x00000384, 0x00000000},
2157+ {MVEA_START, 0x00000388, 0x00000000},
2158+ {MVEA_START, 0x0000038C, 0x00000000},
2159+ {MVEA_START, 0x00000390, 0x00000000},
2160+ {MVEA_START, 0x00000394, 0x00000000},
2161+ {MVEA_START, 0x00000398, 0x00000000},
2162+ {MVEA_START, 0x0000039C, 0x00000000},
2163+ {MVEA_START, 0x000003A0, 0x00000000},
2164+ {MVEA_START, 0x000003A4, 0x00000000},
2165+ {MVEA_START, 0x000003A8, 0x00000000},
2166+ {MVEA_START, 0x000003B0, 0x00000000},
2167+ {MVEA_START, 0x000003B4, 0x00000000},
2168+ {MVEA_START, 0x000003B8, 0x00000000},
2169+ {MVEA_START, 0x000003BC, 0x00000000},
2170+ {MVEA_START, 0x000003D4, 0x00000000},
2171+ {MVEA_START, 0x000003D8, 0x00000000},
2172+ {MVEA_START, 0x000003DC, 0x00000000},
2173+ {MVEA_START, 0x000003E0, 0x00000000},
2174+ {MVEA_START, 0x000003E4, 0x00000000},
2175+ {MVEA_START, 0x000003EC, 0x00000000},
2176+ {MVEA_START, 0x000002D0, 0x00000000},
2177+ {MVEA_START, 0x000002D4, 0x00000000},
2178+ {MVEA_START, 0x000002D8, 0x00000000},
2179+ {MVEA_START, 0x000002DC, 0x00000000},
2180+ {MVEA_START, 0x000002E0, 0x00000000},
2181+ {MVEA_START, 0x000002E4, 0x00000000},
2182+ {MVEA_START, 0x000002E8, 0x00000000},
2183+ {MVEA_START, 0x000002EC, 0x00000000},
2184+ {MVEA_START, 0x000002F0, 0x00000000},
2185+ {MVEA_START, 0x000002F4, 0x00000000},
2186+ {MVEA_START, 0x000002F8, 0x00000000},
2187+ {MVEA_START, 0x000002FC, 0x00000000},
2188+ {MVEA_START, 0x00000300, 0x00000000},
2189+ {MVEA_START, 0x00000304, 0x00000000},
2190+ {MVEA_START, 0x00000308, 0x00000000},
2191+ {MVEA_START, 0x0000030C, 0x00000000},
2192+ {MVEA_START, 0x00000290, 0x00000000},
2193+ {MVEA_START, 0x00000294, 0x00000000},
2194+ {MVEA_START, 0x00000298, 0x00000000},
2195+ {MVEA_START, 0x0000029C, 0x00000000},
2196+ {MVEA_START, 0x000002A0, 0x00000000},
2197+ {MVEA_START, 0x000002A4, 0x00000000},
2198+ {MVEA_START, 0x000002A8, 0x00000000},
2199+ {MVEA_START, 0x000002AC, 0x00000000},
2200+ {MVEA_START, 0x000002B0, 0x00000000},
2201+ {MVEA_START, 0x000002B4, 0x00000000},
2202+ {MVEA_START, 0x000002B8, 0x00000000},
2203+ {MVEA_START, 0x000002BC, 0x00000000},
2204+ {MVEA_START, 0x000002C0, 0x00000000},
2205+ {MVEA_START, 0x000002C4, 0x00000000},
2206+ {MVEA_START, 0x000002C8, 0x00000000},
2207+ {MVEA_START, 0x000002CC, 0x00000000},
2208+ {MVEA_START, 0x00000080, 0x00000000},
2209+ {MVEA_START, 0x00000084, 0x80705700},
2210+ {MVEA_START, 0x00000088, 0x00000000},
2211+ {MVEA_START, 0x0000008C, 0x00000000},
2212+ {MVEA_START, 0x00000090, 0x00000000},
2213+ {MVEA_START, 0x00000094, 0x00000000},
2214+ {MVEA_START, 0x00000098, 0x00000000},
2215+ {MVEA_START, 0x0000009C, 0x00000000},
2216+ {MVEA_START, 0x000000A0, 0x00000000},
2217+ {MVEA_START, 0x000000A4, 0x00000000},
2218+ {MVEA_START, 0x000000A8, 0x00000000},
2219+ {MVEA_START, 0x000000AC, 0x00000000},
2220+ {MVEA_START, 0x000000B0, 0x00000000},
2221+ {MVEA_START, 0x000000B4, 0x00000000},
2222+ {MVEA_START, 0x000000B8, 0x00000000},
2223+ {MVEA_START, 0x000000BC, 0x00000000},
2224+ {MVEA_START, 0x000000C0, 0x00000000},
2225+ {MVEA_START, 0x000000C4, 0x00000000},
2226+ {MVEA_START, 0x000000C8, 0x00000000},
2227+ {MVEA_START, 0x000000CC, 0x00000000},
2228+ {MVEA_START, 0x000000D0, 0x00000000},
2229+ {MVEA_START, 0x000000D4, 0x00000000},
2230+ {MVEA_START, 0x000000D8, 0x00000000},
2231+ {MVEA_START, 0x000000DC, 0x00000000},
2232+ {MVEA_START, 0x000000E0, 0x00000000},
2233+ {MVEA_START, 0x000000E4, 0x00000000},
2234+ {MVEA_START, 0x000000E8, 0x00000000},
2235+ {MVEA_START, 0x000000EC, 0x00000000},
2236+ {MVEA_START, 0x000000F0, 0x00000000},
2237+ {MVEA_START, 0x000000F4, 0x00000000},
2238+ {MVEA_START, 0x000000F8, 0x00000000},
2239+ {MVEA_START, 0x000000FC, 0x00000000},
2240+ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
2241+ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
2242+ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
2243+ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
2244+ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
2245+ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
2246+ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
2247+ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
2248+ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
2249+ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
2250+ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
2251+ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
2252+ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
2253+ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
2254+ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
2255+ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
2256+ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
2257+ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
2258+ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
2259+ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
2260+ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
2261+ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
2262+ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
2263+ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
2264+ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
2265+};
2266+
2267+#define FIRMWARE_NAME "topaz_fw.bin"
2268+
2269+/* static function define */
2270+static int topaz_upload_fw(struct drm_device *dev,
2271+ enum drm_lnc_topaz_codec codec);
2272+static inline void topaz_set_default_regs(struct drm_psb_private
2273+ *dev_priv);
2274+
2275+#define UPLOAD_FW_BY_DMA 1
2276+
2277+#if UPLOAD_FW_BY_DMA
2278+static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
2279+ uint32_t channel, uint32_t src_phy_addr,
2280+ uint32_t offset, uint32_t dst_addr,
2281+ uint32_t byte_num, uint32_t is_increment,
2282+ uint32_t is_write);
2283+#else
2284+static void topaz_mtx_upload_by_register(struct drm_device *dev,
2285+ uint32_t mtx_mem, uint32_t addr,
2286+ uint32_t size,
2287+ struct ttm_buffer_object *buf);
2288+#endif
2289+
2290+static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
2291+ uint32_t reg, const uint32_t val);
2292+static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
2293+ uint32_t reg, uint32_t *ret_val);
2294+static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
2295+static void release_mtx_control_from_dash(struct drm_psb_private
2296+ *dev_priv);
2297+static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
2298+static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
2299+ uint32_t size);
2300+static void mtx_dma_write(struct drm_device *dev);
2301+
2302+
2303+#define DEBUG_FUNCTION 0
2304+
2305+#if DEBUG_FUNCTION
2306+static int topaz_test_null(struct drm_device *dev, uint32_t seq);
2307+static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
2308+ uint32_t sync_seq);
2309+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
2310+static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
2311+ uint32_t *data);
2312+static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
2313+ uint32_t *data);
2314+static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
2315+ uint32_t seq,
2316+ uint32_t sync_seq,
2317+ uint32_t offset);
2318+static int topaz_test_sync_tt_test(struct drm_device *dev,
2319+ uint32_t seq,
2320+ uint32_t sync_seq);
2321+#endif
2322+
2323+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
2324+ uint32_t byte_addr)
2325+{
2326+ uint32_t read_val;
2327+ uint32_t reg, bank_size, ram_bank_size, ram_id;
2328+
2329+ TOPAZ_READ32(0x3c, &reg);
2330+ reg = 0x0a0a0606;
2331+ bank_size = (reg & 0xF0000) >> 16;
2332+
2333+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
2334+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
2335+
2336+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
2337+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
2338+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
2339+ F_ENCODE(1, MTX_MTX_MCMR));
2340+
2341+ /* ?? poll this reg? */
2342+ topaz_wait_for_register(dev_priv,
2343+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
2344+ 1, 1);
2345+
2346+ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
2347+
2348+ return read_val;
2349+}
2350+
2351+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
2352+ uint32_t byte_addr, uint32_t val)
2353+{
2354+ uint32_t ram_id = 0;
2355+ uint32_t reg, bank_size, ram_bank_size;
2356+
2357+ TOPAZ_READ32(0x3c, &reg);
2358+
2359+ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
2360+ reg = 0x0a0a0606;
2361+
2362+ bank_size = (reg & 0xF0000) >> 16;
2363+
2364+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
2365+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
2366+
2367+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
2368+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
2369+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
2370+
2371+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
2372+
2373+ /* ?? poll this reg? */
2374+ topaz_wait_for_register(dev_priv,
2375+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
2376+ 1, 1);
2377+
2378+ return;
2379+}
2380+
2381+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
2382+ uint32_t byte_addr)
2383+{
2384+ uint32_t ram_id = 0;
2385+ uint32_t reg, bank_size, ram_bank_size;
2386+
2387+ TOPAZ_READ32(0x3c, &reg);
2388+
2389+ reg = 0x0a0a0606;
2390+
2391+ bank_size = (reg & 0xF0000) >> 16;
2392+
2393+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
2394+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
2395+
2396+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
2397+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
2398+ F_ENCODE(1, MTX_MTX_MCMAI) |
2399+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
2400+}
2401+
2402+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
2403+ uint32_t val)
2404+{
2405+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
2406+}
2407+
2408+
2409+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
2410+ uint32_t addr, uint32_t value, uint32_t mask)
2411+{
2412+ uint32_t tmp;
2413+ uint32_t count = 10000;
2414+
2415+ /* # poll topaz register for certain times */
2416+ while (count) {
2417+ /* #.# read */
2418+ MM_READ32(addr, 0, &tmp);
2419+
2420+ if (value == (tmp & mask))
2421+ return 0;
2422+
2423+ /* #.# delay and loop */
2424+ DRM_UDELAY(100);
2425+ --count;
2426+ }
2427+
2428+ /* # now waiting is timeout, return 1 indicat failed */
2429+ /* XXX: testsuit means a timeout 10000 */
2430+
2431+ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
2432+ "actual 0x%08x (0x%08x & 0x%08x)\n",
2433+ addr, value, tmp & mask, tmp, mask);
2434+
2435+ return -EBUSY;
2436+
2437+}
2438+
2439+static ssize_t psb_topaz_pmstate_show(struct device *dev,
2440+ struct device_attribute *attr, char *buf)
2441+{
2442+ struct drm_device *drm_dev = dev_get_drvdata(dev);
2443+ struct drm_psb_private *dev_priv;
2444+ struct topaz_private *topaz_priv;
2445+ unsigned int pmstate;
2446+ unsigned long flags;
2447+ int ret = -EINVAL;
2448+
2449+ if (drm_dev == NULL)
2450+ return 0;
2451+
2452+ dev_priv = drm_dev->dev_private;
2453+ topaz_priv = dev_priv->topaz_private;
2454+ pmstate = topaz_priv->pmstate;
2455+
2456+ pmstate = topaz_priv->pmstate;
2457+ spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
2458+ ret = sprintf(buf, "%s\n",
2459+ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup"
2460+ : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown"
2461+ : "clockgated"));
2462+ spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
2463+
2464+ return ret;
2465+}
2466+
2467+static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
2468+
2469+
2470+/* this function finish the first part of initialization, the rest
2471+ * should be done in topaz_setup_fw
2472+ */
2473+int lnc_topaz_init(struct drm_device *dev)
2474+{
2475+ struct drm_psb_private *dev_priv = dev->dev_private;
2476+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2477+ uint32_t core_id, core_rev;
2478+ int ret = 0, n;
2479+ bool is_iomem;
2480+ struct topaz_private *topaz_priv;
2481+ void *topaz_bo_virt;
2482+
2483+ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
2484+ topaz_priv = kmalloc(sizeof(struct topaz_private), GFP_KERNEL);
2485+ if (topaz_priv == NULL)
2486+ return -1;
2487+
2488+ dev_priv->topaz_private = topaz_priv;
2489+ memset(topaz_priv, 0, sizeof(struct topaz_private));
2490+
2491+ /* get device --> drm_device --> drm_psb_private --> topaz_priv
2492+ * for psb_topaz_pmstate_show: topaz_pmpolicy
2493+ * if not pci_set_drvdata, can't get drm_device from device
2494+ */
2495+ pci_set_drvdata(dev->pdev, dev);
2496+ if (device_create_file(&dev->pdev->dev,
2497+ &dev_attr_topaz_pmstate))
2498+ DRM_ERROR("TOPAZ: could not create sysfs file\n");
2499+ topaz_priv->sysfs_pmstate = sysfs_get_dirent(
2500+ dev->pdev->dev.kobj.sd, "topaz_pmstate");
2501+
2502+ topaz_priv = dev_priv->topaz_private;
2503+
2504+ /* # initialize comand topaz queueing [msvdx_queue] */
2505+ INIT_LIST_HEAD(&topaz_priv->topaz_queue);
2506+ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
2507+ mutex_init(&topaz_priv->topaz_mutex);
2508+ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
2509+ spin_lock_init(&topaz_priv->topaz_lock);
2510+
2511+ /* # topaz status init. [msvdx_busy] */
2512+ topaz_priv->topaz_busy = 0;
2513+ topaz_priv->topaz_cmd_seq = 0;
2514+ topaz_priv->topaz_fw_loaded = 0;
2515+ /* FIXME: workaround since JPEG firmware is not ready */
2516+ topaz_priv->topaz_cur_codec = 1;
2517+ topaz_priv->cur_mtx_data_size = 0;
2518+
2519+ topaz_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
2520+ GFP_KERNEL);
2521+ if (topaz_priv->topaz_mtx_reg_state == NULL) {
2522+ DRM_ERROR("TOPAZ: failed to allocate space "
2523+ "for mtx register\n");
2524+ return -1;
2525+ }
2526+
2527+ /* # gain write back structure,we may only need 32+4=40DW */
2528+ ret = ttm_buffer_object_create(bdev, 4096,
2529+ ttm_bo_type_kernel,
2530+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2531+ 0, 0, 0, NULL, &(topaz_priv->topaz_bo));
2532+ if (ret != 0) {
2533+ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
2534+ return ret;
2535+ }
2536+
2537+ ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
2538+ topaz_priv->topaz_bo->num_pages,
2539+ &topaz_priv->topaz_bo_kmap);
2540+ if (ret) {
2541+ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
2542+ ttm_bo_unref(&topaz_priv->topaz_bo);
2543+ return ret;
2544+ }
2545+
2546+ topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
2547+ &is_iomem);
2548+ topaz_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
2549+ topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
2550+ topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt
2551+ + 2048);
2552+ topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
2553+ + 2048;
2554+ PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack and SYNC\n");
2555+ PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
2556+ topaz_priv->topaz_wb_offset);
2557+ PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
2558+ topaz_priv->topaz_sync_offset);
2559+
2560+ *(topaz_priv->topaz_sync_addr) = ~0; /* reset sync seq */
2561+
2562+ /* # reset topaz */
2563+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2564+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2565+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2566+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2567+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2568+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2569+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2570+
2571+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2572+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2573+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2574+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2575+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2576+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2577+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2578+
2579+ /* # set up MMU */
2580+ topaz_mmu_hwsetup(dev_priv);
2581+
2582+ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
2583+ "when receiving user space commands\n");
2584+
2585+#if 0 /* can't load FW here */
2586+ /* #.# load fw to driver */
2587+ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
2588+ ret = topaz_init_fw(dev);
2589+ if (ret != 0)
2590+ return -1;
2591+
2592+ topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */
2593+#endif
2594+ /* <msvdx does> # minimal clock */
2595+
2596+ /* <msvdx does> # return 0 */
2597+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
2598+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
2599+
2600+ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
2601+ core_id, core_rev);
2602+
2603+ /* create firmware storage */
2604+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2605+ /* #.# malloc DRM object for fw storage */
2606+ ret = ttm_buffer_object_create(bdev, 12 * 4096,
2607+ ttm_bo_type_kernel,
2608+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2609+ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
2610+ if (ret) {
2611+ DRM_ERROR("Failed to allocate firmware.\n");
2612+ goto out;
2613+ }
2614+
2615+ /* #.# malloc DRM object for fw storage */
2616+ ret = ttm_buffer_object_create(bdev, 12 * 4096,
2617+ ttm_bo_type_kernel,
2618+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2619+ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
2620+ if (ret) {
2621+ DRM_ERROR("Failed to allocate firmware.\n");
2622+ goto out;
2623+ }
2624+ }
2625+
2626+ ret = ttm_buffer_object_create(bdev,
2627+ 12 * 4096,
2628+ ttm_bo_type_kernel,
2629+ DRM_PSB_FLAG_MEM_MMU |
2630+ TTM_PL_FLAG_NO_EVICT,
2631+ 0, 0, 0, NULL,
2632+ &topaz_priv->topaz_mtx_data_mem);
2633+ if (ret) {
2634+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
2635+ "mtx data save\n");
2636+ goto out;
2637+ }
2638+ topaz_priv->cur_mtx_data_size = 0;
2639+
2640+ PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n",
2641+ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
2642+ PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n");
2643+
2644+ PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING);
2645+
2646+ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
2647+ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
2648+
2649+ return 0;
2650+
2651+out:
2652+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2653+ if (topaz_priv->topaz_fw[n].text != NULL)
2654+ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
2655+ if (topaz_priv->topaz_fw[n].data != NULL)
2656+ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
2657+ }
2658+
2659+ if (topaz_priv->topaz_mtx_data_mem != NULL)
2660+ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
2661+
2662+ return ret;
2663+}
2664+
2665+int lnc_topaz_uninit(struct drm_device *dev)
2666+{
2667+ struct drm_psb_private *dev_priv = dev->dev_private;
2668+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
2669+ int n;
2670+
2671+ /* flush MMU */
2672+ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
2673+ /* topaz_mmu_flushcache (dev_priv); */
2674+
2675+ /* # reset TOPAZ chip */
2676+ lnc_topaz_reset(dev_priv);
2677+
2678+ /* release resources */
2679+ /* # release write back memory */
2680+ topaz_priv->topaz_ccb_wb = NULL;
2681+
2682+ /* release mtx register save space */
2683+ kfree(topaz_priv->topaz_mtx_reg_state);
2684+
2685+ /* release mtx data memory save space */
2686+ if (topaz_priv->topaz_mtx_data_mem)
2687+ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
2688+
2689+ /* # release firmware storage */
2690+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2691+ if (topaz_priv->topaz_fw[n].text != NULL)
2692+ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
2693+ if (topaz_priv->topaz_fw[n].data != NULL)
2694+ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
2695+ }
2696+
2697+ ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
2698+ ttm_bo_unref(&topaz_priv->topaz_bo);
2699+
2700+ if (topaz_priv) {
2701+ pci_set_drvdata(dev->pdev, NULL);
2702+ device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
2703+ sysfs_put(topaz_priv->sysfs_pmstate);
2704+ topaz_priv->sysfs_pmstate = NULL;
2705+
2706+ kfree(topaz_priv);
2707+ dev_priv->topaz_private = NULL;
2708+ }
2709+
2710+ return 0;
2711+}
2712+
2713+int lnc_topaz_reset(struct drm_psb_private *dev_priv)
2714+{
2715+ struct topaz_private *topaz_priv;
2716+
2717+ topaz_priv = dev_priv->topaz_private;
2718+ topaz_priv->topaz_busy = 0;
2719+ topaz_priv->topaz_cmd_seq = 0;
2720+ topaz_priv->cur_mtx_data_size = 0;
2721+ topaz_priv->topaz_cmd_windex = 0;
2722+ topaz_priv->topaz_needs_reset = 0;
2723+
2724+ /* # reset topaz */
2725+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2726+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2727+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2728+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2729+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2730+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2731+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2732+
2733+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2734+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2735+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2736+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2737+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2738+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2739+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2740+
2741+ /* # set up MMU */
2742+ topaz_mmu_hwsetup(dev_priv);
2743+
2744+ return 0;
2745+}
2746+
2747+/* read firmware bin file and load all data into driver */
2748+int topaz_init_fw(struct drm_device *dev)
2749+{
2750+ struct drm_psb_private *dev_priv = dev->dev_private;
2751+ const struct firmware *raw = NULL;
2752+ unsigned char *ptr;
2753+ int ret = 0;
2754+ int n;
2755+ struct topaz_fwinfo *cur_fw;
2756+ int cur_size;
2757+ struct topaz_codec_fw *cur_codec;
2758+ struct ttm_buffer_object **cur_drm_obj;
2759+ struct ttm_bo_kmap_obj tmp_kmap;
2760+ bool is_iomem;
2761+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
2762+
2763+ topaz_priv->stored_initial_qp = 0;
2764+
2765+ /* # get firmware */
2766+ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
2767+ if (ret != 0) {
2768+ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
2769+ return ret;
2770+ }
2771+
2772+ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
2773+
2774+ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
2775+ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
2776+ goto out;
2777+ }
2778+
2779+ ptr = (unsigned char *) raw->data;
2780+
2781+ if (!ptr) {
2782+ DRM_ERROR("TOPAZ: failed to load firmware.\n");
2783+ goto out;
2784+ }
2785+
2786+ /* # load fw from file */
2787+ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
2788+ cur_fw = NULL;
2789+ /* didn't use the first element */
2790+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2791+ cur_fw = (struct topaz_fwinfo *) ptr;
2792+
2793+ cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
2794+ cur_codec->ver = cur_fw->ver;
2795+ cur_codec->codec = cur_fw->codec;
2796+ cur_codec->text_size = cur_fw->text_size;
2797+ cur_codec->data_size = cur_fw->data_size;
2798+ cur_codec->data_location = cur_fw->data_location;
2799+
2800+ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
2801+ codec_to_string(cur_fw->codec));
2802+
2803+ /* #.# handle text section */
2804+ ptr += sizeof(struct topaz_fwinfo);
2805+ cur_drm_obj = &cur_codec->text;
2806+ cur_size = cur_fw->text_size;
2807+
2808+ /* #.# fill DRM object with firmware data */
2809+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2810+ &tmp_kmap);
2811+ if (ret) {
2812+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2813+ ttm_bo_unref(cur_drm_obj);
2814+ *cur_drm_obj = NULL;
2815+ goto out;
2816+ }
2817+
2818+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2819+ cur_size);
2820+
2821+ ttm_bo_kunmap(&tmp_kmap);
2822+
2823+ /* #.# handle data section */
2824+ ptr += cur_fw->text_size;
2825+ cur_drm_obj = &cur_codec->data;
2826+ cur_size = cur_fw->data_size;
2827+
2828+ /* #.# fill DRM object with firmware data */
2829+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2830+ &tmp_kmap);
2831+ if (ret) {
2832+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2833+ ttm_bo_unref(cur_drm_obj);
2834+ *cur_drm_obj = NULL;
2835+ goto out;
2836+ }
2837+
2838+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2839+ cur_size);
2840+
2841+ ttm_bo_kunmap(&tmp_kmap);
2842+
2843+ /* #.# validate firmware */
2844+
2845+ /* #.# update ptr */
2846+ ptr += cur_fw->data_size;
2847+ }
2848+
2849+ release_firmware(raw);
2850+
2851+ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
2852+
2853+ return 0;
2854+
2855+out:
2856+ if (raw) {
2857+ PSB_DEBUG_GENERAL("release firmware....\n");
2858+ release_firmware(raw);
2859+ }
2860+
2861+ return -1;
2862+}
2863+
2864+/* setup fw when start a new context */
2865+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2866+{
2867+ struct drm_psb_private *dev_priv = dev->dev_private;
2868+ uint32_t mem_size = RAM_SIZE; /* follow DDK */
2869+ uint32_t verify_pc;
2870+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
2871+
2872+#if 0
2873+ if (codec == topaz_priv->topaz_current_codec) {
2874+ LNC_TRACEL("TOPAZ: reuse previous codec\n");
2875+ return 0;
2876+ }
2877+#endif
2878+
2879+ /* XXX: need to rest topaz? */
2880+ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
2881+
2882+ /* XXX: interrupt enable shouldn't be enable here,
2883+ * this funtion is called when interrupt is enable,
2884+ * but here, we've no choice since we have to call setup_fw by
2885+ * manual */
2886+ /* # upload firmware, clear interruputs and start the firmware
2887+ * -- from hostutils.c in TestSuits*/
2888+
2889+ /* # reset MVEA */
2890+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2891+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2892+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2893+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2894+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2895+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2896+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2897+
2898+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2899+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2900+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2901+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2902+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2903+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2904+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2905+
2906+
2907+ topaz_mmu_hwsetup(dev_priv);
2908+
2909+#if !LNC_TOPAZ_NO_IRQ
2910+ psb_irq_uninstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
2911+#endif
2912+
2913+ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
2914+
2915+ topaz_set_default_regs(dev_priv);
2916+
2917+ /* # reset mtx */
2918+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
2919+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
2920+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
2921+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
2922+
2923+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
2924+
2925+ /* # upload fw by drm */
2926+ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
2927+
2928+ topaz_upload_fw(dev, codec);
2929+#if 0
2930+ /* allocate the space for context save & restore if needed */
2931+ if (topaz_priv->topaz_mtx_data_mem == NULL) {
2932+ ret = ttm_buffer_object_create(bdev,
2933+ topaz_priv->cur_mtx_data_size * 4,
2934+ ttm_bo_type_kernel,
2935+ DRM_PSB_FLAG_MEM_MMU |
2936+ TTM_PL_FLAG_NO_EVICT,
2937+ 0, 0, 0, NULL,
2938+ &topaz_priv->topaz_mtx_data_mem);
2939+ if (ret) {
2940+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
2941+ "mtx data save\n");
2942+ return -1;
2943+ }
2944+ }
2945+ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
2946+#endif
2947+
2948+ /* XXX: In power save mode, need to save the complete data memory
2949+ * and restore it. MTX_FWIF.c record the data size */
2950+ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
2951+
2952+ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
2953+ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
2954+
2955+ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
2956+
2957+ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
2958+
2959+ /* enable auto clock is essential for this driver */
2960+ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
2961+ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
2962+ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
2963+ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
2964+ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
2965+ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
2966+ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
2967+ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
2968+
2969+ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
2970+ verify_pc, PC_START_ADDRESS);
2971+
2972+ /* # turn on MTX */
2973+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2974+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2975+
2976+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
2977+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
2978+
2979+ /* # poll on the interrupt which the firmware will generate */
2980+ topaz_wait_for_register(dev_priv,
2981+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
2982+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
2983+ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
2984+
2985+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2986+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2987+
2988+ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
2989+
2990+ /* # get ccb buffer addr -- file hostutils.c */
2991+ topaz_priv->topaz_ccb_buffer_addr =
2992+ topaz_read_mtx_mem(dev_priv,
2993+ MTX_DATA_MEM_BASE + mem_size - 4);
2994+ topaz_priv->topaz_ccb_ctrl_addr =
2995+ topaz_read_mtx_mem(dev_priv,
2996+ MTX_DATA_MEM_BASE + mem_size - 8);
2997+ topaz_priv->topaz_ccb_size =
2998+ topaz_read_mtx_mem(dev_priv,
2999+ topaz_priv->topaz_ccb_ctrl_addr +
3000+ MTX_CCBCTRL_CCBSIZE);
3001+
3002+ topaz_priv->topaz_cmd_windex = 0;
3003+
3004+ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
3005+ topaz_priv->topaz_ccb_buffer_addr,
3006+ topaz_priv->topaz_ccb_ctrl_addr,
3007+ topaz_priv->topaz_ccb_size);
3008+
3009+ /* # write back the initial QP Value */
3010+ topaz_write_mtx_mem(dev_priv,
3011+ topaz_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
3012+ topaz_priv->stored_initial_qp);
3013+
3014+ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
3015+ topaz_priv->topaz_wb_offset);
3016+ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
3017+ topaz_priv->topaz_wb_offset);
3018+
3019+ /* this kick is essential for mtx.... */
3020+ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x01020304;
3021+ topaz_mtx_kick(dev_priv, 1);
3022+ DRM_UDELAY(1000);
3023+ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
3024+ " and here it is 0x%08x\n",
3025+ *((uint32_t *) topaz_priv->topaz_ccb_wb));
3026+
3027+ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
3028+ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
3029+
3030+ /* XXX: is there any need to record next cmd num??
3031+ * we use fence seqence number to record it
3032+ */
3033+ topaz_priv->topaz_busy = 0;
3034+ topaz_priv->topaz_cmd_seq = 0;
3035+
3036+#if !LNC_TOPAZ_NO_IRQ
3037+ psb_irq_preinstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
3038+ psb_irq_postinstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
3039+ lnc_topaz_enableirq(dev);
3040+#endif
3041+
3042+#if 0
3043+ topaz_mmu_flushcache(dev_priv);
3044+ topaz_test_null(dev, 0xe1e1);
3045+ topaz_test_null(dev, 0xe2e2);
3046+ topaz_test_sync(dev, 0xe2e2, 0x87654321);
3047+
3048+ topaz_mmu_test(dev, 0x12345678);
3049+ topaz_test_null(dev, 0xe3e3);
3050+ topaz_mmu_test(dev, 0x8764321);
3051+
3052+ topaz_test_null(dev, 0xe4e4);
3053+ topaz_test_null(dev, 0xf3f3);
3054+#endif
3055+
3056+ return 0;
3057+}
3058+
3059+#if UPLOAD_FW_BY_DMA
3060+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
3061+{
3062+ struct drm_psb_private *dev_priv = dev->dev_private;
3063+ const struct topaz_codec_fw *cur_codec_fw;
3064+ uint32_t text_size, data_size;
3065+ uint32_t data_location;
3066+ uint32_t cur_mtx_data_size;
3067+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3068+
3069+ /* # refer HLD document */
3070+
3071+ /* # MTX reset */
3072+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
3073+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3074+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3075+
3076+ DRM_UDELAY(6000);
3077+
3078+ /* # upload the firmware by DMA */
3079+ cur_codec_fw = &topaz_priv->topaz_fw[codec];
3080+
3081+ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
3082+ " data location(%d)\n", codec_to_string(codec), codec,
3083+ cur_codec_fw->text_size, cur_codec_fw->data_size,
3084+ cur_codec_fw->data_location);
3085+
3086+ /* # upload text */
3087+ text_size = cur_codec_fw->text_size / 4;
3088+
3089+ /* setup the MTX to start recieving data:
3090+ use a register for the transfer which will point to the source
3091+ (MTX_CR_MTX_SYSC_CDMAT) */
3092+ /* #.# fill the dst addr */
3093+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
3094+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3095+ F_ENCODE(2, MTX_BURSTSIZE) |
3096+ F_ENCODE(0, MTX_RNW) |
3097+ F_ENCODE(1, MTX_ENABLE) |
3098+ F_ENCODE(text_size, MTX_LENGTH));
3099+
3100+ /* #.# set DMAC access to host memory via BIF */
3101+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3102+
3103+ /* #.# transfer the codec */
3104+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
3105+ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
3106+
3107+ /* #.# wait dma finish */
3108+ topaz_wait_for_register(dev_priv,
3109+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
3110+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3111+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3112+
3113+ /* #.# clear interrupt */
3114+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3115+
3116+ /* # return access to topaz core */
3117+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3118+
3119+ /* # upload data */
3120+ data_size = cur_codec_fw->data_size / 4;
3121+ data_location = cur_codec_fw->data_location;
3122+
3123+ /* #.# fill the dst addr */
3124+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
3125+ 0x80900000 + (data_location - 0x82880000));
3126+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3127+ F_ENCODE(2, MTX_BURSTSIZE) |
3128+ F_ENCODE(0, MTX_RNW) |
3129+ F_ENCODE(1, MTX_ENABLE) |
3130+ F_ENCODE(data_size, MTX_LENGTH));
3131+
3132+ /* #.# set DMAC access to host memory via BIF */
3133+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3134+
3135+ /* #.# transfer the codec */
3136+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
3137+ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
3138+
3139+ /* #.# wait dma finish */
3140+ topaz_wait_for_register(dev_priv,
3141+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
3142+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3143+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3144+
3145+ /* #.# clear interrupt */
3146+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3147+
3148+ /* # return access to topaz core */
3149+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3150+
3151+ /* record this codec's mtx data size for
3152+ * context save & restore */
3153+ /* FIXME: since non-root sighting fixed by pre allocated,
3154+ * only need to correct the buffer size
3155+ */
3156+ cur_mtx_data_size = data_size;
3157+ if (topaz_priv->cur_mtx_data_size != cur_mtx_data_size)
3158+ topaz_priv->cur_mtx_data_size = cur_mtx_data_size;
3159+
3160+ return 0;
3161+}
3162+
3163+#else
3164+
3165+void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
3166+ uint32_t addr, uint32_t size,
3167+ struct ttm_buffer_object *buf)
3168+{
3169+ struct drm_psb_private *dev_priv = dev->dev_private;
3170+ uint32_t *buf_p;
3171+ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
3172+ uint32_t cur_ram_id, ram_addr , ram_id;
3173+ int map_ret, lp;
3174+ struct ttm_bo_kmap_obj bo_kmap;
3175+ bool is_iomem;
3176+ uint32_t cur_addr;
3177+
3178+ get_mtx_control_from_dash(dev_priv);
3179+
3180+ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
3181+ if (map_ret) {
3182+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
3183+ return;
3184+ }
3185+ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
3186+
3187+
3188+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
3189+ debug_reg = 0x0a0a0606;
3190+ bank_size = (debug_reg & 0xf0000) >> 16;
3191+ bank_ram_size = 1 << (bank_size + 2);
3192+
3193+ bank_count = (debug_reg & 0xf00) >> 8;
3194+
3195+ topaz_wait_for_register(dev_priv,
3196+ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
3197+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
3198+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
3199+
3200+ cur_ram_id = -1;
3201+ cur_addr = addr;
3202+ for (lp = 0; lp < size / 4; ++lp) {
3203+ ram_id = mtx_mem + (cur_addr / bank_ram_size);
3204+
3205+ if (cur_ram_id != ram_id) {
3206+ ram_addr = cur_addr >> 2;
3207+
3208+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
3209+ F_ENCODE(ram_id, MTX_MTX_MCMID) |
3210+ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
3211+ F_ENCODE(1, MTX_MTX_MCMAI));
3212+
3213+ cur_ram_id = ram_id;
3214+ }
3215+ cur_addr += 4;
3216+
3217+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
3218+ *(buf_p + lp));
3219+
3220+ topaz_wait_for_register(dev_priv,
3221+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
3222+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
3223+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
3224+ }
3225+
3226+ ttm_bo_kunmap(&bo_kmap);
3227+
3228+ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
3229+ return;
3230+}
3231+
3232+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
3233+{
3234+ struct drm_psb_private *dev_priv = dev->dev_private;
3235+ const struct topaz_codec_fw *cur_codec_fw;
3236+ uint32_t text_size, data_size;
3237+ uint32_t data_location;
3238+
3239+ /* # refer HLD document */
3240+ /* # MTX reset */
3241+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
3242+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3243+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3244+
3245+ DRM_UDELAY(6000);
3246+
3247+ /* # upload the firmware by DMA */
3248+ cur_codec_fw = &topaz_priv->topaz_fw[codec];
3249+
3250+ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
3251+ " data location(0x%08x)\n", codec_to_string(codec),
3252+ cur_codec_fw->text_size, cur_codec_fw->data_size,
3253+ cur_codec_fw->data_location);
3254+
3255+ /* # upload text */
3256+ text_size = cur_codec_fw->text_size;
3257+
3258+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
3259+ PC_START_ADDRESS - MTX_MEMORY_BASE,
3260+ text_size, cur_codec_fw->text);
3261+
3262+ /* # upload data */
3263+ data_size = cur_codec_fw->data_size;
3264+ data_location = cur_codec_fw->data_location;
3265+
3266+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
3267+ data_location - 0x82880000, data_size,
3268+ cur_codec_fw->data);
3269+
3270+ return 0;
3271+}
3272+
3273+#endif /* UPLOAD_FW_BY_DMA */
3274+
3275+void
3276+topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
3277+ uint32_t src_phy_addr, uint32_t offset,
3278+ uint32_t soc_addr, uint32_t byte_num,
3279+ uint32_t is_increment, uint32_t is_write)
3280+{
3281+ uint32_t dmac_count;
3282+ uint32_t irq_stat;
3283+ uint32_t count;
3284+
3285+ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
3286+ /* # check that no transfer is currently in progress and no
3287+ interrupts are outstanding ?? (why care interrupt) */
3288+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
3289+ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
3290+ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
3291+
3292+ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
3293+
3294+ /* no hold off period */
3295+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
3296+ /* clear previous interrupts */
3297+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
3298+ /* check irq status */
3299+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
3300+ /* assert(0 == irq_stat); */
3301+ if (0 != irq_stat)
3302+ DRM_ERROR("TOPAZ: there is hold up\n");
3303+
3304+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
3305+ (src_phy_addr + offset));
3306+ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
3307+ is_write, DMAC_PWIDTH_32_BIT, byte_num);
3308+ /* generate an interrupt at the end of transfer */
3309+ count |= MASK_IMG_SOC_TRANSFER_IEN;
3310+ count |= F_ENCODE(is_write, IMG_SOC_DIR);
3311+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
3312+
3313+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
3314+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
3315+ is_increment, DMAC_BURST_2));
3316+
3317+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
3318+
3319+ /* Finally, rewrite the count register with
3320+ * the enable bit set to kick off the transfer
3321+ */
3322+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
3323+
3324+ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
3325+
3326+ return;
3327+}
3328+
3329+void topaz_set_default_regs(struct drm_psb_private *dev_priv)
3330+{
3331+ int n;
3332+ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3333+
3334+ for (n = 0; n < count; n++)
3335+ MM_WRITE32(topaz_default_regs[n][0],
3336+ topaz_default_regs[n][1],
3337+ topaz_default_regs[n][2]);
3338+
3339+}
3340+
3341+void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
3342+ const uint32_t val)
3343+{
3344+ uint32_t tmp;
3345+ get_mtx_control_from_dash(dev_priv);
3346+
3347+ /* put data into MTX_RW_DATA */
3348+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
3349+
3350+ /* request a write */
3351+ tmp = reg &
3352+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
3353+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
3354+
3355+ /* wait for operation finished */
3356+ topaz_wait_for_register(dev_priv,
3357+ MTX_START +
3358+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
3359+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
3360+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
3361+
3362+ release_mtx_control_from_dash(dev_priv);
3363+}
3364+
3365+void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
3366+ uint32_t *ret_val)
3367+{
3368+ uint32_t tmp;
3369+
3370+ get_mtx_control_from_dash(dev_priv);
3371+
3372+ /* request a write */
3373+ tmp = (reg &
3374+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
3375+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
3376+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
3377+
3378+ /* wait for operation finished */
3379+ topaz_wait_for_register(dev_priv,
3380+ MTX_START +
3381+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
3382+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
3383+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
3384+
3385+ /* read */
3386+ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
3387+ ret_val);
3388+
3389+ release_mtx_control_from_dash(dev_priv);
3390+}
3391+
3392+void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
3393+{
3394+ int debug_reg_slave_val;
3395+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3396+
3397+ /* GetMTXControlFromDash */
3398+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
3399+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
3400+ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
3401+ do {
3402+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
3403+ &debug_reg_slave_val);
3404+ } while ((debug_reg_slave_val & 0x18) != 0);
3405+
3406+ /* save access control */
3407+ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
3408+ &topaz_priv->topaz_dash_access_ctrl);
3409+}
3410+
3411+void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
3412+{
3413+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3414+
3415+ /* restore access control */
3416+ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
3417+ topaz_priv->topaz_dash_access_ctrl);
3418+
3419+ /* release bus */
3420+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
3421+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
3422+}
3423+
3424+void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
3425+{
3426+ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
3427+
3428+ /* bypass all request while MMU is being configured */
3429+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
3430+ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
3431+
3432+ /* set MMU hardware at the page table directory */
3433+ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
3434+ "into MMU_DIR_LIST0/1\n", pd_addr);
3435+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
3436+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
3437+
3438+ /* setup index register, all pointing to directory bank 0 */
3439+ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
3440+
3441+ /* now enable MMU access for all requestors */
3442+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
3443+}
3444+
3445+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
3446+{
3447+ uint32_t mmu_control;
3448+
3449+ if (!powermgmt_is_hw_on(dev_priv->dev->pdev, PSB_VIDEO_ENC_ISLAND))
3450+ return;
3451+
3452+#if 0
3453+ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
3454+ " so flush using the master core\n");
3455+#endif
3456+ /* XXX: disable interrupt */
3457+
3458+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
3459+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
3460+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
3461+
3462+#if 0
3463+ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
3464+ "still operating afterwards even if not cleared,\n"
3465+ "but may want to replace with MMU_FLUSH?\n");
3466+#endif
3467+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
3468+
3469+ /* clear it */
3470+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
3471+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
3472+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
3473+}
3474+
3475+#if DEBUG_FUNCTION
3476+
3477+static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
3478+ uint32_t sync_seq)
3479+{
3480+ struct drm_psb_private *dev_priv = dev->dev_private;
3481+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3482+ uint32_t sync_cmd[3];
3483+ struct topaz_cmd_header *cmd_hdr;
3484+ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
3485+ int count = 1000;
3486+ uint32_t clr_flag;
3487+
3488+ cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0];
3489+
3490+ /* reset sync area */
3491+ *sync_p = 0;
3492+
3493+ /* insert a SYNC command here */
3494+ cmd_hdr->id = MTX_CMDID_SYNC;
3495+ cmd_hdr->size = 3;
3496+ cmd_hdr->seq = seq;
3497+
3498+ sync_cmd[1] = topaz_priv->topaz_sync_offset;
3499+ sync_cmd[2] = sync_seq;
3500+
3501+ TOPAZ_BEGIN_CCB(dev_priv);
3502+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
3503+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
3504+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
3505+ TOPAZ_END_CCB(dev_priv, 1);
3506+
3507+ PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x,"
3508+ "sync_seq=0x%08x\n", seq, sync_seq);
3509+
3510+ while (count && *sync_p != sync_seq) {
3511+ DRM_UDELAY(100);
3512+ --count;
3513+ }
3514+ if ((count == 0) && (*sync_p != sync_seq)) {
3515+ DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x,"
3516+ "actual 0x%08x\n", sync_seq, *sync_p);
3517+ }
3518+ PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p);
3519+ PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n");
3520+
3521+ clr_flag = lnc_topaz_queryirq(dev);
3522+ lnc_topaz_clearirq(dev, clr_flag);
3523+
3524+ return 0;
3525+}
3526+static int topaz_test_sync_tt_test(struct drm_device *dev,
3527+ uint32_t seq,
3528+ uint32_t sync_seq)
3529+{
3530+ struct drm_psb_private *dev_priv = dev->dev_private;
3531+ struct ttm_bo_device *bdev = &dev_priv->bdev;
3532+ int ret;
3533+ bool is_iomem;
3534+ struct ttm_buffer_object *test_obj;
3535+ struct ttm_bo_kmap_obj test_kmap;
3536+ unsigned int *test_adr;
3537+ uint32_t sync_cmd[3];
3538+ int count = 1000;
3539+ unsigned long pfn;
3540+
3541+ ret = ttm_buffer_object_create(bdev, 4096,
3542+ ttm_bo_type_kernel,
3543+ TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT,
3544+ 0, 0, 0, NULL, &test_obj);
3545+ if (ret) {
3546+ DRM_ERROR("failed create test object buffer\n");
3547+ return -1;
3548+ }
3549+
3550+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
3551+ test_obj->offset, &pfn);
3552+ if (ret) {
3553+ DRM_ERROR("failed to get pfn from virtual\n");
3554+ return -1;
3555+ }
3556+
3557+ PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn);
3558+
3559+ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
3560+ &test_kmap);
3561+ if (ret) {
3562+ DRM_ERROR("failed map buffer\n");
3563+ return -1;
3564+ }
3565+ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
3566+ *test_adr = 0xff55;
3567+ ttm_bo_kunmap(&test_kmap);
3568+
3569+ /* insert a SYNC command here */
3570+ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
3571+ (seq << 16);
3572+ sync_cmd[1] = test_obj->offset;
3573+ sync_cmd[2] = sync_seq;
3574+
3575+ TOPAZ_BEGIN_CCB(dev_priv);
3576+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
3577+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
3578+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
3579+ TOPAZ_END_CCB(dev_priv, 1);
3580+
3581+ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
3582+ &test_kmap);
3583+ if (ret) {
3584+ DRM_ERROR("failed map buffer\n");
3585+ return -1;
3586+ }
3587+ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
3588+
3589+ while (count && *test_adr != sync_seq) {
3590+ DRM_UDELAY(100);
3591+ --count;
3592+ }
3593+ if ((count == 0) && (*test_adr != sync_seq)) {
3594+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
3595+ "actual 0x%08x\n",
3596+ sync_seq, *test_adr);
3597+ }
3598+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr);
3599+ ttm_bo_kunmap(&test_kmap);
3600+ ttm_bo_unref(&test_obj);
3601+
3602+ return 0;
3603+}
3604+
3605+static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
3606+ uint32_t seq,
3607+ uint32_t sync_seq,
3608+ uint32_t offset)
3609+{
3610+ struct drm_psb_private *dev_priv = dev->dev_private;
3611+ int ret;
3612+ uint32_t sync_cmd[3];
3613+ int count = 1000;
3614+ unsigned long pfn;
3615+
3616+ struct page *p;
3617+ uint32_t *v;
3618+/* uint32_t offset = 0xd0000000; */
3619+
3620+ p = alloc_page(GFP_DMA32);
3621+ if (!p) {
3622+ DRM_ERROR("Topaz:Failed allocating page\n");
3623+ return -1;
3624+ }
3625+
3626+ v = kmap(p);
3627+ memset(v, 0x67, PAGE_SIZE);
3628+ pfn = (offset >> PAGE_SHIFT);
3629+ kunmap(p);
3630+
3631+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
3632+ &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
3633+ if (ret) {
3634+ DRM_ERROR("Topaz:Failed inserting mmu page\n");
3635+ return -1;
3636+ }
3637+
3638+ /* insert a SYNC command here */
3639+ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
3640+ (0x5b << 16);
3641+ sync_cmd[1] = pfn << PAGE_SHIFT;
3642+ sync_cmd[2] = seq;
3643+
3644+ TOPAZ_BEGIN_CCB(dev_priv);
3645+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
3646+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
3647+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
3648+ TOPAZ_END_CCB(dev_priv, 1);
3649+
3650+ v = kmap(p);
3651+ while (count && *v != sync_seq) {
3652+ DRM_UDELAY(100);
3653+ --count;
3654+ }
3655+ if ((count == 0) && (*v != sync_seq)) {
3656+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
3657+ "actual 0x%08x\n",
3658+ sync_seq, *v);
3659+ }
3660+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v);
3661+ kunmap(p);
3662+
3663+ return 0;
3664+}
3665+
3666+static int topaz_test_null(struct drm_device *dev, uint32_t seq)
3667+{
3668+ struct drm_psb_private *dev_priv = dev->dev_private;
3669+ struct topaz_cmd_header null_cmd;
3670+ uint32_t clr_flag;
3671+
3672+ /* XXX: here we finished firmware setup....
3673+ * using a NULL command to verify the
3674+ * correctness of firmware
3675+ */
3676+
3677+ null_cmd.id = MTX_CMDID_NULL;
3678+ null_cmd.size = 1;
3679+ null_cmd.seq = seq;
3680+
3681+ TOPAZ_BEGIN_CCB(dev_priv);
3682+ TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd));
3683+ TOPAZ_END_CCB(dev_priv, 1);
3684+
3685+ DRM_UDELAY(1000); /* wait to finish */
3686+
3687+ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
3688+ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
3689+ seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv),
3690+ WB_CCB_CTRL_RINDEX(dev_priv));
3691+
3692+ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
3693+
3694+ clr_flag = lnc_topaz_queryirq(dev);
3695+ lnc_topaz_clearirq(dev, clr_flag);
3696+
3697+ return 0;
3698+}
3699+
3700+
3701+/*
3702+ * this function will test whether the mmu is correct:
3703+ * it get a drm_buffer_object and use CMD_SYNC to write
3704+ * certain value into this buffer.
3705+ */
3706+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
3707+{
3708+ struct drm_psb_private *dev_priv = dev->dev_private;
3709+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3710+ unsigned long real_pfn;
3711+ int ret;
3712+
3713+ /* topaz_mmu_flush(dev); */
3714+ topaz_test_sync(dev, 0x55, sync_value);
3715+
3716+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
3717+ topaz_priv->topaz_sync_offset, &real_pfn);
3718+ if (ret != 0) {
3719+ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
3720+ return;
3721+ }
3722+ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
3723+ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
3724+ topaz_priv->topaz_sync_offset, real_pfn, sync_value);
3725+}
3726+
3727+void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
3728+{
3729+ int n;
3730+ int count;
3731+
3732+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3733+ for (n = 0; n < count; n++, ++data)
3734+ MM_READ32(topaz_default_regs[n][0],
3735+ topaz_default_regs[n][1],
3736+ data);
3737+
3738+}
3739+
3740+void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
3741+ uint32_t *data)
3742+{
3743+ int n;
3744+ int count;
3745+
3746+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3747+ for (n = 0; n < count; n++, ++data)
3748+ MM_WRITE32(topaz_default_regs[n][0],
3749+ topaz_default_regs[n][1],
3750+ *data);
3751+
3752+}
3753+
3754+#endif
3755+
3756+int lnc_topaz_restore_mtx_state(struct drm_device *dev)
3757+{
3758+ struct drm_psb_private *dev_priv =
3759+ (struct drm_psb_private *)dev->dev_private;
3760+ uint32_t reg_val;
3761+ uint32_t *mtx_reg_state;
3762+ int i;
3763+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3764+
3765+ if (!topaz_priv->topaz_mtx_saved)
3766+ return -1;
3767+
3768+ if (topaz_priv->topaz_mtx_data_mem == NULL) {
3769+ PSB_DEBUG_GENERAL("TOPAZ: try to restore context without "
3770+ "space allocated, return directly without restore\n");
3771+ return -1;
3772+ }
3773+
3774+ /* turn on mtx clocks */
3775+ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
3776+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3777+ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
3778+
3779+ /* reset mtx */
3780+ /* FIXME: should use core_write??? */
3781+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3782+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3783+ DRM_UDELAY(6000);
3784+
3785+ topaz_mmu_hwsetup(dev_priv);
3786+ /* upload code, restore mtx data */
3787+ mtx_dma_write(dev);
3788+
3789+ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
3790+ /* restore register */
3791+ /* FIXME: conside to put read/write into one function */
3792+ /* Saves 8 Registers of D0 Bank */
3793+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3794+ for (i = 0; i < 8; i++) {
3795+ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
3796+ *mtx_reg_state);
3797+ mtx_reg_state++;
3798+ }
3799+ /* Saves 8 Registers of D1 Bank */
3800+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3801+ for (i = 0; i < 8; i++) {
3802+ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
3803+ *mtx_reg_state);
3804+ mtx_reg_state++;
3805+ }
3806+ /* Saves 4 Registers of A0 Bank */
3807+ /* A0StP, A0FrP, A0.2 and A0.3 */
3808+ for (i = 0; i < 4; i++) {
3809+ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
3810+ *mtx_reg_state);
3811+ mtx_reg_state++;
3812+ }
3813+ /* Saves 4 Registers of A1 Bank */
3814+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3815+ for (i = 0; i < 4; i++) {
3816+ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
3817+ *mtx_reg_state);
3818+ mtx_reg_state++;
3819+ }
3820+ /* Saves PC and PCX */
3821+ for (i = 0; i < 2; i++) {
3822+ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
3823+ *mtx_reg_state);
3824+ mtx_reg_state++;
3825+ }
3826+ /* Saves 8 Control Registers */
3827+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3828+ * TXGPIOO */
3829+ for (i = 0; i < 8; i++) {
3830+ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
3831+ *mtx_reg_state);
3832+ mtx_reg_state++;
3833+ }
3834+
3835+ /* turn on MTX */
3836+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3837+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
3838+
3839+ topaz_priv->topaz_mtx_saved = 0;
3840+
3841+ return 0;
3842+}
3843+
3844+int lnc_topaz_save_mtx_state(struct drm_device *dev)
3845+{
3846+ struct drm_psb_private *dev_priv =
3847+ (struct drm_psb_private *)dev->dev_private;
3848+ uint32_t *mtx_reg_state;
3849+ int i;
3850+ struct topaz_codec_fw *cur_codec_fw;
3851+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3852+
3853+ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
3854+ if (topaz_priv->topaz_mtx_data_mem == NULL) {
3855+ PSB_DEBUG_GENERAL("TOPAZ: try to save context without space "
3856+ "allocated, return directly without save\n");
3857+ return -1;
3858+ }
3859+ if (topaz_priv->topaz_fw_loaded == 0) {
3860+ PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware "
3861+ "uploaded\n");
3862+ return -1;
3863+ }
3864+
3865+ topaz_wait_for_register(dev_priv,
3866+ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
3867+ TXRPT_WAITONKICK_VALUE,
3868+ 0xffffffff);
3869+
3870+ /* stop mtx */
3871+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3872+ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
3873+
3874+ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
3875+
3876+ /* FIXME: conside to put read/write into one function */
3877+ /* Saves 8 Registers of D0 Bank */
3878+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3879+ for (i = 0; i < 8; i++) {
3880+ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
3881+ mtx_reg_state);
3882+ mtx_reg_state++;
3883+ }
3884+ /* Saves 8 Registers of D1 Bank */
3885+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3886+ for (i = 0; i < 8; i++) {
3887+ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
3888+ mtx_reg_state);
3889+ mtx_reg_state++;
3890+ }
3891+ /* Saves 4 Registers of A0 Bank */
3892+ /* A0StP, A0FrP, A0.2 and A0.3 */
3893+ for (i = 0; i < 4; i++) {
3894+ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
3895+ mtx_reg_state);
3896+ mtx_reg_state++;
3897+ }
3898+ /* Saves 4 Registers of A1 Bank */
3899+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3900+ for (i = 0; i < 4; i++) {
3901+ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
3902+ mtx_reg_state);
3903+ mtx_reg_state++;
3904+ }
3905+ /* Saves PC and PCX */
3906+ for (i = 0; i < 2; i++) {
3907+ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
3908+ mtx_reg_state);
3909+ mtx_reg_state++;
3910+ }
3911+ /* Saves 8 Control Registers */
3912+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3913+ * TXGPIOO */
3914+ for (i = 0; i < 8; i++) {
3915+ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
3916+ mtx_reg_state);
3917+ mtx_reg_state++;
3918+ }
3919+
3920+ /* save mtx data memory */
3921+ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
3922+
3923+ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
3924+ topaz_priv->cur_mtx_data_size);
3925+
3926+ /* turn off mtx clocks */
3927+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3928+ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
3929+
3930+ topaz_priv->topaz_mtx_saved = 1;
3931+
3932+ return 0;
3933+}
3934+
3935+void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
3936+{
3937+ struct drm_psb_private *dev_priv =
3938+ (struct drm_psb_private *)dev->dev_private;
3939+ struct ttm_buffer_object *target;
3940+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
3941+
3942+ /* setup mtx DMAC registers to do transfer */
3943+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
3944+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3945+ F_ENCODE(2, MTX_BURSTSIZE) |
3946+ F_ENCODE(1, MTX_RNW) |
3947+ F_ENCODE(1, MTX_ENABLE) |
3948+ F_ENCODE(size, MTX_LENGTH));
3949+
3950+ /* give the DMAC access to the host memory via BIF */
3951+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3952+
3953+ target = topaz_priv->topaz_mtx_data_mem;
3954+ /* transfert the data */
3955+ /* FIXME: size is meaured by bytes? */
3956+ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
3957+ MTX_CR_MTX_SYSC_CDMAT,
3958+ size, 0, 1);
3959+
3960+ /* wait for it transfer */
3961+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3962+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3963+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3964+ /* clear interrupt */
3965+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3966+ /* give access back to topaz core */
3967+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3968+}
3969+
3970+void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
3971+ uint32_t soc_addr, uint32_t bytes_num,
3972+ int increment, int rnw)
3973+{
3974+ struct drm_psb_private *dev_priv =
3975+ (struct drm_psb_private *)dev->dev_private;
3976+ uint32_t count_reg;
3977+ uint32_t irq_state;
3978+
3979+ /* check no transfer is in progress */
3980+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
3981+ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
3982+ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
3983+ "save mtx data\n");
3984+ /* FIXME: how to handle this error */
3985+ return;
3986+ }
3987+
3988+ /* no hold off period */
3989+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
3990+ /* cleare irq state */
3991+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
3992+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
3993+ if (0 != irq_state) {
3994+ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
3995+ return;
3996+ }
3997+
3998+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
3999+ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
4000+ DMAC_PWIDTH_32_BIT, rnw,
4001+ DMAC_PWIDTH_32_BIT, bytes_num);
4002+ /* generate an interrupt at end of transfer */
4003+ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
4004+ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
4005+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
4006+
4007+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
4008+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
4009+ DMAC_BURST_2));
4010+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
4011+
4012+ /* Finally, rewrite the count register with the enable
4013+ * bit set to kick off the transfer */
4014+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
4015+ count_reg | MASK_IMG_SOC_EN);
4016+}
4017+
4018+void mtx_dma_write(struct drm_device *dev)
4019+{
4020+ struct topaz_codec_fw *cur_codec_fw;
4021+ struct drm_psb_private *dev_priv =
4022+ (struct drm_psb_private *)dev->dev_private;
4023+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
4024+
4025+ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
4026+
4027+ /* upload code */
4028+ /* setup mtx DMAC registers to recieve transfer */
4029+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
4030+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
4031+ F_ENCODE(2, MTX_BURSTSIZE) |
4032+ F_ENCODE(0, MTX_RNW) |
4033+ F_ENCODE(1, MTX_ENABLE) |
4034+ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
4035+
4036+ /* give DMAC access to host memory */
4037+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
4038+
4039+ /* transfer code */
4040+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
4041+ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
4042+ 0, 0);
4043+ /* wait finished */
4044+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
4045+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
4046+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
4047+ /* clear interrupt */
4048+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
4049+
4050+ /* setup mtx start recieving data */
4051+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
4052+ (cur_codec_fw->data_location) - 0x82880000);
4053+
4054+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
4055+ F_ENCODE(2, MTX_BURSTSIZE) |
4056+ F_ENCODE(0, MTX_RNW) |
4057+ F_ENCODE(1, MTX_ENABLE) |
4058+ F_ENCODE(topaz_priv->cur_mtx_data_size, MTX_LENGTH));
4059+
4060+ /* give DMAC access to host memory */
4061+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
4062+
4063+ /* transfer data */
4064+ topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem->offset,
4065+ 0, MTX_CR_MTX_SYSC_CDMAT,
4066+ topaz_priv->cur_mtx_data_size,
4067+ 0, 0);
4068+ /* wait finished */
4069+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
4070+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
4071+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
4072+ /* clear interrupt */
4073+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
4074+
4075+ /* give access back to Topaz Core */
4076+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
4077+}
4078+
4079diff --git a/drivers/gpu/drm/psb/psb_bl.c b/drivers/gpu/drm/psb/psb_bl.c
4080new file mode 100644
4081index 0000000..2c723f4
4082--- /dev/null
4083+++ b/drivers/gpu/drm/psb/psb_bl.c
4084@@ -0,0 +1,232 @@
4085+/*
4086+ * psb backlight using HAL
4087+ *
4088+ * Copyright (c) 2009 Eric Knopp
4089+ *
4090+ * This program is free software; you can redistribute it and/or modify
4091+ * it under the terms of the GNU General Public License version 2 as
4092+ * published by the Free Software Foundation.
4093+ */
4094+
4095+#include <linux/backlight.h>
4096+#include "psb_drv.h"
4097+#include "psb_intel_reg.h"
4098+#include "psb_intel_drv.h"
4099+#include "psb_intel_bios.h"
4100+#include "psb_powermgmt.h"
4101+
4102+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
4103+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
4104+#define BLC_PWM_FREQ_CALC_CONSTANT 32
4105+#define MHz 1000000
4106+#define BRIGHTNESS_MIN_LEVEL 1
4107+#define BRIGHTNESS_MAX_LEVEL 100
4108+#define BRIGHTNESS_MASK 0xFF
4109+#define BLC_POLARITY_NORMAL 0
4110+#define BLC_POLARITY_INVERSE 1
4111+#define BLC_ADJUSTMENT_MAX 100
4112+
4113+#define PSB_BLC_PWM_PRECISION_FACTOR 10
4114+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
4115+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
4116+
4117+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
4118+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
4119+
4120+static int psb_brightness;
4121+static int blc_pol;
4122+static struct backlight_device *psb_backlight_device;
4123+static u8 blc_brightnesscmd;
4124+static u8 blc_type;
4125+
4126+int psb_set_brightness(struct backlight_device *bd)
4127+{
4128+ u32 blc_pwm_ctl;
4129+ u32 max_pwm_blc;
4130+
4131+ struct drm_device *dev =
4132+ (struct drm_device *)psb_backlight_device->priv;
4133+ struct drm_psb_private *dev_priv =
4134+ (struct drm_psb_private *) dev->dev_private;
4135+
4136+ int level = bd->props.brightness;
4137+
4138+ DRM_DEBUG("backlight level set to %d\n", level);
4139+
4140+ /* Perform value bounds checking */
4141+ if (level < BRIGHTNESS_MIN_LEVEL)
4142+ level = BRIGHTNESS_MIN_LEVEL;
4143+
4144+ if(IS_POULSBO(dev)) {
4145+ psb_intel_lvds_set_brightness(dev, level);
4146+ psb_brightness = level;
4147+ return 0;
4148+ }
4149+
4150+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
4151+ /* Calculate and set the brightness value */
4152+ max_pwm_blc = REG_READ(BLC_PWM_CTL) >>
4153+ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
4154+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
4155+
4156+ /* Adjust the backlight level with the percent in
4157+ * dev_priv->blc_adj1;
4158+ */
4159+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
4160+ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
4161+
4162+ if (blc_pol == BLC_POLARITY_INVERSE)
4163+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
4164+
4165+ /* force PWM bit on */
4166+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
4167+ REG_WRITE(BLC_PWM_CTL,
4168+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
4169+ blc_pwm_ctl);
4170+
4171+ /* printk("***backlight brightness = %i\n", level); */
4172+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
4173+ }
4174+
4175+ /* cache the brightness for later use */
4176+ psb_brightness = level;
4177+ return 0;
4178+}
4179+
4180+int psb_get_brightness(struct backlight_device *bd)
4181+{
4182+ /* return locally cached var instead of HW read (due to DPST etc.) */
4183+ return psb_brightness;
4184+}
4185+
4186+struct backlight_ops psb_ops = {
4187+ .get_brightness = psb_get_brightness,
4188+ .update_status = psb_set_brightness,
4189+};
4190+
4191+int psb_backlight_init(struct drm_device *dev)
4192+{
4193+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
4194+ unsigned long CoreClock;
4195+ // u32 bl_max_freq;
4196+ // unsigned long value;
4197+ u16 bl_max_freq;
4198+ uint32_t value;
4199+ uint32_t clock;
4200+ uint32_t blc_pwm_precision_factor;
4201+
4202+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
4203+
4204+ struct drm_psb_private *dev_priv =
4205+ (struct drm_psb_private *) dev->dev_private;
4206+
4207+ psb_backlight_device = backlight_device_register("psb-bl",
4208+ NULL, NULL, &psb_ops);
4209+ if (IS_ERR(psb_backlight_device))
4210+ return PTR_ERR(psb_backlight_device);
4211+
4212+ psb_backlight_device->priv = dev;
4213+
4214+ if(IS_MRST(dev)) {
4215+ /* HACK HACK HACK */
4216+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
4217+
4218+ bl_max_freq = 256; /* this needs to come from VBT when available */
4219+ blc_pol = BLC_POLARITY_NORMAL; /* this needs to be set elsewhere */
4220+ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
4221+
4222+ if (dev_priv->sku_83)
4223+ CoreClock = 166;
4224+ else if (dev_priv->sku_100)
4225+ CoreClock = 200;
4226+ else if (dev_priv->sku_100L)
4227+ CoreClock = 100;
4228+ else
4229+ return 1;
4230+ } else {
4231+ /* get bl_max_freq and pol from dev_priv*/
4232+ if(!dev_priv->lvds_bl){
4233+ DRM_ERROR("Has no valid LVDS backlight info\n");
4234+ return 1;
4235+ }
4236+ bl_max_freq = dev_priv->lvds_bl->freq;
4237+ blc_pol = dev_priv->lvds_bl->pol;
4238+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
4239+ blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd;
4240+ blc_type = dev_priv->lvds_bl->type;
4241+
4242+ //pci_write_config_dword(pci_root, 0xD4, 0x00C32004);
4243+ //pci_write_config_dword(pci_root, 0xD0, 0xE0033000);
4244+
4245+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
4246+ pci_read_config_dword(pci_root, 0xD4, &clock);
4247+
4248+ switch(clock & 0x07) {
4249+ case 0:
4250+ CoreClock = 100;
4251+ break;
4252+ case 1:
4253+ CoreClock = 133;
4254+ break;
4255+ case 2:
4256+ CoreClock = 150;
4257+ break;
4258+ case 3:
4259+ CoreClock = 178;
4260+ break;
4261+ case 4:
4262+ CoreClock = 200;
4263+ break;
4264+ case 5:
4265+ case 6:
4266+ case 7:
4267+ CoreClock = 266;
4268+ default:
4269+ return 1;
4270+ }
4271+ }/*end if(IS_MRST(dev))*/
4272+
4273+ value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
4274+ value *= blc_pwm_precision_factor;
4275+ value /= bl_max_freq;
4276+ value /= blc_pwm_precision_factor;
4277+
4278+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
4279+ if(IS_MRST(dev)) {
4280+ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
4281+ return 2;
4282+ else {
4283+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
4284+ REG_WRITE(BLC_PWM_CTL, value |
4285+ (value << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT));
4286+ }
4287+ } else {
4288+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
4289+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
4290+ return 2;
4291+ else {
4292+ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
4293+ REG_WRITE(BLC_PWM_CTL,
4294+ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
4295+ (value));
4296+ }
4297+ } /*end if(IS_MRST(dev))*/
4298+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
4299+ }
4300+
4301+ psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
4302+ psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
4303+ backlight_update_status(psb_backlight_device);
4304+#endif
4305+ return 0;
4306+}
4307+
4308+void psb_backlight_exit(void)
4309+{
4310+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
4311+ psb_backlight_device->props.brightness = 0;
4312+ backlight_update_status(psb_backlight_device);
4313+ backlight_device_unregister(psb_backlight_device);
4314+#endif
4315+ return;
4316+}
4317diff --git a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c
4318new file mode 100644
4319index 0000000..cb25bde
4320--- /dev/null
4321+++ b/drivers/gpu/drm/psb/psb_buffer.c
4322@@ -0,0 +1,519 @@
4323+/**************************************************************************
4324+ * Copyright (c) 2007, Intel Corporation.
4325+ * All Rights Reserved.
4326+ *
4327+ * This program is free software; you can redistribute it and/or modify it
4328+ * under the terms and conditions of the GNU General Public License,
4329+ * version 2, as published by the Free Software Foundation.
4330+ *
4331+ * This program is distributed in the hope it will be useful, but WITHOUT
4332+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4333+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4334+ * more details.
4335+ *
4336+ * You should have received a copy of the GNU General Public License along with
4337+ * this program; if not, write to the Free Software Foundation, Inc.,
4338+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4339+ *
4340+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4341+ * develop this driver.
4342+ *
4343+ **************************************************************************/
4344+/*
4345+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
4346+ */
4347+#include "ttm/ttm_placement_common.h"
4348+#include "ttm/ttm_execbuf_util.h"
4349+#include "ttm/ttm_fence_api.h"
4350+#include <drm/drmP.h>
4351+#include "psb_drv.h"
4352+#include "psb_schedule.h"
4353+
4354+#define DRM_MEM_TTM 26
4355+
4356+struct drm_psb_ttm_backend {
4357+ struct ttm_backend base;
4358+ struct page **pages;
4359+ unsigned int desired_tile_stride;
4360+ unsigned int hw_tile_stride;
4361+ int mem_type;
4362+ unsigned long offset;
4363+ unsigned long num_pages;
4364+};
4365+
4366+/*
4367+ * Poulsbo GPU virtual space looks like this
4368+ * (We currently use only one MMU context).
4369+ *
4370+ * gatt_start = Start of GATT aperture in bus space.
4371+ * stolen_end = End of GATT populated by stolen memory in bus space.
4372+ * gatt_end = End of GATT
4373+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
4374+ *
4375+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling-
4376+ * and copy operations.
4377+ * This space is not managed and is protected by the
4378+ * temp_mem mutex.
4379+ *
4380+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
4381+ *
4382+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
4383+ *
4384+ * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages.
4385+ *
4386+ * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine.
4387+ *
4388+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not
4389+ * usable by 2D engine.
4390+ *
4391+ * gatt_end -> 0xffffffff Currently unused.
4392+ */
4393+
4394+static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
4395+ struct ttm_mem_type_manager *man)
4396+{
4397+
4398+ struct drm_psb_private *dev_priv =
4399+ container_of(bdev, struct drm_psb_private, bdev);
4400+ struct psb_gtt *pg = dev_priv->pg;
4401+
4402+ switch (type) {
4403+ case TTM_PL_SYSTEM:
4404+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
4405+ man->available_caching = TTM_PL_FLAG_CACHED |
4406+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4407+ man->default_caching = TTM_PL_FLAG_CACHED;
4408+ break;
4409+ case DRM_PSB_MEM_KERNEL:
4410+ man->io_offset = 0x00000000;
4411+ man->io_size = 0x00000000;
4412+ man->io_addr = NULL;
4413+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4414+ TTM_MEMTYPE_FLAG_CMA;
4415+ man->gpu_offset = PSB_MEM_KERNEL_START;
4416+ man->available_caching = TTM_PL_FLAG_CACHED |
4417+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4418+ man->default_caching = TTM_PL_FLAG_WC;
4419+ break;
4420+ case DRM_PSB_MEM_MMU:
4421+ man->io_offset = 0x00000000;
4422+ man->io_size = 0x00000000;
4423+ man->io_addr = NULL;
4424+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4425+ TTM_MEMTYPE_FLAG_CMA;
4426+ man->gpu_offset = PSB_MEM_MMU_START;
4427+ man->available_caching = TTM_PL_FLAG_CACHED |
4428+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4429+ man->default_caching = TTM_PL_FLAG_WC;
4430+ break;
4431+ case DRM_PSB_MEM_PDS:
4432+ man->io_offset = 0x00000000;
4433+ man->io_size = 0x00000000;
4434+ man->io_addr = NULL;
4435+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4436+ TTM_MEMTYPE_FLAG_CMA;
4437+ man->gpu_offset = PSB_MEM_PDS_START;
4438+ man->available_caching = TTM_PL_FLAG_CACHED |
4439+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4440+ man->default_caching = TTM_PL_FLAG_WC;
4441+ break;
4442+ case DRM_PSB_MEM_RASTGEOM:
4443+ man->io_offset = 0x00000000;
4444+ man->io_size = 0x00000000;
4445+ man->io_addr = NULL;
4446+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4447+ TTM_MEMTYPE_FLAG_CMA;
4448+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
4449+ man->available_caching = TTM_PL_FLAG_CACHED |
4450+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4451+ man->default_caching = TTM_PL_FLAG_WC;
4452+ break;
4453+ case TTM_PL_VRAM:
4454+ man->io_addr = NULL;
4455+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4456+ TTM_MEMTYPE_FLAG_FIXED |
4457+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4458+#ifdef PSB_WORKING_HOST_MMU_ACCESS
4459+ man->io_offset = pg->gatt_start;
4460+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
4461+#else
4462+ man->io_offset = pg->stolen_base;
4463+ man->io_size = pg->vram_stolen_size;
4464+#endif
4465+ man->gpu_offset = pg->gatt_start;
4466+ man->available_caching = TTM_PL_FLAG_UNCACHED |
4467+ TTM_PL_FLAG_WC;
4468+ man->default_caching = TTM_PL_FLAG_WC;
4469+ break;
4470+ case TTM_PL_CI:
4471+ man->io_addr = NULL;
4472+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4473+ TTM_MEMTYPE_FLAG_FIXED |
4474+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4475+ man->io_offset = dev_priv->ci_region_start;
4476+ man->io_size = pg->ci_stolen_size;
4477+ man->gpu_offset = pg->gatt_start - pg->ci_stolen_size;
4478+ man->available_caching = TTM_PL_FLAG_UNCACHED;
4479+ man->default_caching = TTM_PL_FLAG_UNCACHED;
4480+ break;
4481+ case TTM_PL_RAR: /* Unmappable RAR memory */
4482+ man->io_offset = dev_priv->rar_region_start;
4483+ man->io_size = pg->rar_stolen_size;
4484+ man->io_addr = NULL;
4485+ man->flags = TTM_MEMTYPE_FLAG_FIXED;
4486+ man->available_caching = TTM_PL_FLAG_UNCACHED;
4487+ man->default_caching = TTM_PL_FLAG_UNCACHED;
4488+ man->gpu_offset = pg->gatt_start + pg->vram_stolen_size;
4489+ break;
4490+ case TTM_PL_TT: /* Mappable GATT memory */
4491+ man->io_offset = pg->gatt_start;
4492+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
4493+ man->io_addr = NULL;
4494+#ifdef PSB_WORKING_HOST_MMU_ACCESS
4495+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4496+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4497+#else
4498+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4499+ TTM_MEMTYPE_FLAG_CMA;
4500+#endif
4501+ man->available_caching = TTM_PL_FLAG_CACHED |
4502+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4503+ man->default_caching = TTM_PL_FLAG_WC;
4504+ man->gpu_offset = pg->gatt_start;
4505+ break;
4506+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
4507+ man->io_offset = pg->gatt_start;
4508+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
4509+ man->io_addr = NULL;
4510+#ifdef PSB_WORKING_HOST_MMU_ACCESS
4511+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4512+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
4513+#else
4514+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
4515+ TTM_MEMTYPE_FLAG_CMA;
4516+#endif
4517+ man->gpu_offset = pg->gatt_start;
4518+ man->available_caching = TTM_PL_FLAG_CACHED |
4519+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
4520+ man->default_caching = TTM_PL_FLAG_WC;
4521+ break;
4522+ default:
4523+ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
4524+ return -EINVAL;
4525+ }
4526+ return 0;
4527+}
4528+
4529+static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
4530+{
4531+ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
4532+
4533+
4534+ switch (bo->mem.mem_type) {
4535+ case TTM_PL_VRAM:
4536+ if (bo->mem.proposed_flags & TTM_PL_FLAG_TT)
4537+ return cur_placement | TTM_PL_FLAG_TT;
4538+ else
4539+ return cur_placement | TTM_PL_FLAG_SYSTEM;
4540+ default:
4541+ return cur_placement | TTM_PL_FLAG_SYSTEM;
4542+ }
4543+}
4544+
4545+static int psb_invalidate_caches(struct ttm_bo_device *bdev,
4546+ uint32_t placement)
4547+{
4548+ return 0;
4549+}
4550+
4551+static int psb_move_blit(struct ttm_buffer_object *bo,
4552+ bool evict, bool no_wait,
4553+ struct ttm_mem_reg *new_mem)
4554+{
4555+ struct drm_psb_private *dev_priv =
4556+ container_of(bo->bdev, struct drm_psb_private, bdev);
4557+ struct drm_device *dev = dev_priv->dev;
4558+ struct ttm_mem_reg *old_mem = &bo->mem;
4559+ struct ttm_fence_object *fence;
4560+ int dir = 0;
4561+ int ret;
4562+
4563+ if ((old_mem->mem_type == new_mem->mem_type) &&
4564+ (new_mem->mm_node->start <
4565+ old_mem->mm_node->start + old_mem->mm_node->size)) {
4566+ dir = 1;
4567+ }
4568+
4569+ psb_emit_2d_copy_blit(dev,
4570+ old_mem->mm_node->start << PAGE_SHIFT,
4571+ new_mem->mm_node->start << PAGE_SHIFT,
4572+ new_mem->num_pages, dir);
4573+
4574+ ret = ttm_fence_object_create(&dev_priv->fdev, 0,
4575+ _PSB_FENCE_TYPE_EXE,
4576+ TTM_FENCE_FLAG_EMIT,
4577+ &fence);
4578+ if (unlikely(ret != 0)) {
4579+ psb_idle_2d(dev);
4580+ if (fence)
4581+ ttm_fence_object_unref(&fence);
4582+ }
4583+
4584+ ret = ttm_bo_move_accel_cleanup(bo, (void *) fence,
4585+ (void *) (unsigned long)
4586+ _PSB_FENCE_TYPE_EXE,
4587+ evict, no_wait, new_mem);
4588+ if (fence)
4589+ ttm_fence_object_unref(&fence);
4590+ return ret;
4591+}
4592+
4593+/*
4594+ * Flip destination ttm into GATT,
4595+ * then blit and subsequently move out again.
4596+ */
4597+
4598+static int psb_move_flip(struct ttm_buffer_object *bo,
4599+ bool evict, bool interruptible, bool no_wait,
4600+ struct ttm_mem_reg *new_mem)
4601+{
4602+ struct ttm_bo_device *bdev = bo->bdev;
4603+ struct ttm_mem_reg tmp_mem;
4604+ int ret;
4605+
4606+ tmp_mem = *new_mem;
4607+ tmp_mem.mm_node = NULL;
4608+ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
4609+
4610+ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
4611+ if (ret)
4612+ return ret;
4613+ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
4614+ if (ret)
4615+ goto out_cleanup;
4616+ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
4617+ if (ret)
4618+ goto out_cleanup;
4619+
4620+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
4621+out_cleanup:
4622+ if (tmp_mem.mm_node) {
4623+ spin_lock(&bdev->lru_lock);
4624+ drm_mm_put_block(tmp_mem.mm_node);
4625+ tmp_mem.mm_node = NULL;
4626+ spin_unlock(&bdev->lru_lock);
4627+ }
4628+ return ret;
4629+}
4630+
4631+static int psb_move(struct ttm_buffer_object *bo,
4632+ bool evict, bool interruptible,
4633+ bool no_wait, struct ttm_mem_reg *new_mem)
4634+{
4635+ struct ttm_mem_reg *old_mem = &bo->mem;
4636+
4637+ if ((old_mem->mem_type == TTM_PL_RAR) ||
4638+ (new_mem->mem_type == TTM_PL_RAR)) {
4639+ ttm_bo_free_old_node(bo);
4640+ *old_mem = *new_mem;
4641+ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
4642+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
4643+ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
4644+ int ret = psb_move_flip(bo, evict, interruptible,
4645+ no_wait, new_mem);
4646+ if (unlikely(ret != 0)) {
4647+ if (ret == -ERESTART)
4648+ return ret;
4649+ else
4650+ return ttm_bo_move_memcpy(bo, evict, no_wait,
4651+ new_mem);
4652+ }
4653+ } else {
4654+ if (psb_move_blit(bo, evict, no_wait, new_mem))
4655+ return ttm_bo_move_memcpy(bo, evict, no_wait,
4656+ new_mem);
4657+ }
4658+ return 0;
4659+}
4660+
4661+static int drm_psb_tbe_populate(struct ttm_backend *backend,
4662+ unsigned long num_pages,
4663+ struct page **pages,
4664+ struct page *dummy_read_page)
4665+{
4666+ struct drm_psb_ttm_backend *psb_be =
4667+ container_of(backend, struct drm_psb_ttm_backend, base);
4668+
4669+ psb_be->pages = pages;
4670+ return 0;
4671+}
4672+
4673+static int drm_psb_tbe_unbind(struct ttm_backend *backend)
4674+{
4675+ struct ttm_bo_device *bdev = backend->bdev;
4676+ struct drm_psb_private *dev_priv =
4677+ container_of(bdev, struct drm_psb_private, bdev);
4678+ struct drm_psb_ttm_backend *psb_be =
4679+ container_of(backend, struct drm_psb_ttm_backend, base);
4680+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
4681+ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
4682+
4683+ PSB_DEBUG_RENDER("MMU unbind.\n");
4684+
4685+ if (psb_be->mem_type == TTM_PL_TT) {
4686+ uint32_t gatt_p_offset =
4687+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
4688+
4689+ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
4690+ psb_be->num_pages,
4691+ psb_be->desired_tile_stride,
4692+ psb_be->hw_tile_stride);
4693+ }
4694+
4695+ psb_mmu_remove_pages(pd, psb_be->offset,
4696+ psb_be->num_pages,
4697+ psb_be->desired_tile_stride,
4698+ psb_be->hw_tile_stride);
4699+
4700+ return 0;
4701+}
4702+
4703+static int drm_psb_tbe_bind(struct ttm_backend *backend,
4704+ struct ttm_mem_reg *bo_mem)
4705+{
4706+ struct ttm_bo_device *bdev = backend->bdev;
4707+ struct drm_psb_private *dev_priv =
4708+ container_of(bdev, struct drm_psb_private, bdev);
4709+ struct drm_psb_ttm_backend *psb_be =
4710+ container_of(backend, struct drm_psb_ttm_backend, base);
4711+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
4712+ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
4713+ int type;
4714+ int ret = 0;
4715+
4716+ psb_be->mem_type = bo_mem->mem_type;
4717+ psb_be->num_pages = bo_mem->num_pages;
4718+ psb_be->desired_tile_stride = 0;
4719+ psb_be->hw_tile_stride = 0;
4720+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
4721+ man->gpu_offset;
4722+
4723+ type =
4724+ (bo_mem->
4725+ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
4726+
4727+ PSB_DEBUG_RENDER("MMU bind.\n");
4728+ if (psb_be->mem_type == TTM_PL_TT) {
4729+ uint32_t gatt_p_offset =
4730+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
4731+
4732+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
4733+ gatt_p_offset,
4734+ psb_be->num_pages,
4735+ psb_be->desired_tile_stride,
4736+ psb_be->hw_tile_stride, type);
4737+ }
4738+
4739+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
4740+ psb_be->offset, psb_be->num_pages,
4741+ psb_be->desired_tile_stride,
4742+ psb_be->hw_tile_stride, type);
4743+ if (ret)
4744+ goto out_err;
4745+
4746+ return 0;
4747+out_err:
4748+ drm_psb_tbe_unbind(backend);
4749+ return ret;
4750+
4751+}
4752+
4753+static void drm_psb_tbe_clear(struct ttm_backend *backend)
4754+{
4755+ struct drm_psb_ttm_backend *psb_be =
4756+ container_of(backend, struct drm_psb_ttm_backend, base);
4757+
4758+ psb_be->pages = NULL;
4759+ return;
4760+}
4761+
4762+static void drm_psb_tbe_destroy(struct ttm_backend *backend)
4763+{
4764+ struct drm_psb_ttm_backend *psb_be =
4765+ container_of(backend, struct drm_psb_ttm_backend, base);
4766+
4767+ if (backend)
4768+ kfree(psb_be);
4769+}
4770+
4771+static struct ttm_backend_func psb_ttm_backend = {
4772+ .populate = drm_psb_tbe_populate,
4773+ .clear = drm_psb_tbe_clear,
4774+ .bind = drm_psb_tbe_bind,
4775+ .unbind = drm_psb_tbe_unbind,
4776+ .destroy = drm_psb_tbe_destroy,
4777+};
4778+
4779+static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
4780+{
4781+ struct drm_psb_ttm_backend *psb_be;
4782+
4783+ psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
4784+ if (!psb_be)
4785+ return NULL;
4786+ psb_be->pages = NULL;
4787+ psb_be->base.func = &psb_ttm_backend;
4788+ psb_be->base.bdev = bdev;
4789+ return &psb_be->base;
4790+}
4791+
4792+/*
4793+ * Use this memory type priority if no eviction is needed.
4794+ */
4795+static uint32_t psb_mem_prios[] = {
4796+ TTM_PL_CI,
4797+ TTM_PL_RAR,
4798+ TTM_PL_VRAM,
4799+ TTM_PL_TT,
4800+ DRM_PSB_MEM_KERNEL,
4801+ DRM_PSB_MEM_MMU,
4802+ DRM_PSB_MEM_RASTGEOM,
4803+ DRM_PSB_MEM_PDS,
4804+ DRM_PSB_MEM_APER,
4805+ TTM_PL_SYSTEM
4806+};
4807+
4808+/*
4809+ * Use this memory type priority if need to evict.
4810+ */
4811+static uint32_t psb_busy_prios[] = {
4812+ TTM_PL_TT,
4813+ TTM_PL_VRAM,
4814+ TTM_PL_CI,
4815+ TTM_PL_RAR,
4816+ DRM_PSB_MEM_KERNEL,
4817+ DRM_PSB_MEM_MMU,
4818+ DRM_PSB_MEM_RASTGEOM,
4819+ DRM_PSB_MEM_PDS,
4820+ DRM_PSB_MEM_APER,
4821+ TTM_PL_SYSTEM
4822+};
4823+
4824+
4825+struct ttm_bo_driver psb_ttm_bo_driver = {
4826+ .mem_type_prio = psb_mem_prios,
4827+ .mem_busy_prio = psb_busy_prios,
4828+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
4829+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
4830+ .create_ttm_backend_entry = &drm_psb_tbe_init,
4831+ .invalidate_caches = &psb_invalidate_caches,
4832+ .init_mem_type = &psb_init_mem_type,
4833+ .evict_flags = &psb_evict_mask,
4834+ .move = &psb_move,
4835+ .verify_access = &psb_verify_access,
4836+ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
4837+ .sync_obj_wait = &ttm_fence_sync_obj_wait,
4838+ .sync_obj_flush = &ttm_fence_sync_obj_flush,
4839+ .sync_obj_unref = &ttm_fence_sync_obj_unref,
4840+ .sync_obj_ref = &ttm_fence_sync_obj_ref
4841+};
4842diff --git a/drivers/gpu/drm/psb/psb_dpst.c b/drivers/gpu/drm/psb/psb_dpst.c
4843new file mode 100644
4844index 0000000..435e53b
4845--- /dev/null
4846+++ b/drivers/gpu/drm/psb/psb_dpst.c
4847@@ -0,0 +1,208 @@
4848+/*
4849+ * Copyright © 2009 Intel Corporation
4850+ *
4851+ * Permission is hereby granted, free of charge, to any person obtaining a
4852+ * copy of this software and associated documentation files (the "Software"),
4853+ * to deal in the Software without restriction, including without limitation
4854+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4855+ * and/or sell copies of the Software, and to permit persons to whom the
4856+ * Software is furnished to do so, subject to the following conditions:
4857+ *
4858+ * The above copyright notice and this permission notice (including the next
4859+ * paragraph) shall be included in all copies or substantial portions of the
4860+ * Software.
4861+ *
4862+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4863+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4864+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
4865+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4866+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4867+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4868+ * IN THE SOFTWARE.
4869+ *
4870+ * Authors:
4871+ * James C. Gualario <james.c.gualario@intel.com>
4872+ *
4873+ */
4874+#include "psb_umevents.h"
4875+#include "psb_dpst.h"
4876+/**
4877+ * inform the kernel of the work to be performed and related function.
4878+ *
4879+ */
4880+DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq);
4881+/**
4882+ * psb_dpst_notify_change_um - notify user mode of hotplug changes
4883+ *
4884+ * @name: name of event to notify user mode of change to
4885+ * @state: dpst state struct to get workqueue from
4886+ *
4887+ */
4888+int psb_dpst_notify_change_um(struct umevent_obj *event,
4889+ struct dpst_state *state)
4890+{
4891+ state->dpst_change_wq_data.dev_name_arry_rw_status
4892+ [state->dpst_change_wq_data.dev_name_write] =
4893+ DRM_DPST_READY_TO_READ;
4894+ state->dpst_change_wq_data.dev_umevent_arry
4895+ [state->dpst_change_wq_data.dev_name_write] =
4896+ event;
4897+ if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1)
4898+ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
4899+ state->dpst_change_wq_data.dev_name_write++;
4900+ if (state->dpst_change_wq_data.dev_name_write ==
4901+ state->dpst_change_wq_data.dev_name_read) {
4902+ state->dpst_change_wq_data.dev_name_write--;
4903+ return IRQ_NONE;
4904+ }
4905+ if (state->dpst_change_wq_data.dev_name_write >
4906+ DRM_DPST_RING_DEPTH_MAX) {
4907+ state->dpst_change_wq_data.dev_name_write = 0;
4908+ state->dpst_change_wq_data.dev_name_write_wrap = 1;
4909+ }
4910+ state->dpst_change_wq_data.hotplug_dev_list = state->list;
4911+ queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work));
4912+ return IRQ_HANDLED;
4913+}
4914+EXPORT_SYMBOL(psb_dpst_notify_change_um);
4915+/**
4916+ *
4917+ * psb_dpst_create_and_notify_um - create and notify user mode of new dev
4918+ *
4919+ * @name: name to give for new event / device
4920+ * @state: dpst state instaces to associate event with
4921+ *
4922+ */
4923+struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
4924+ struct dpst_state *state)
4925+{
4926+ return psb_create_umevent_obj(name, state->list);
4927+
4928+}
4929+EXPORT_SYMBOL(psb_dpst_create_and_notify_um);
4930+/**
4931+ * psb_dpst_device_pool_create_and_init - make new hotplug device pool
4932+ *
4933+ * @parent_kobj - parent kobject to associate dpst kset with
4934+ * @state - dpst state instance to associate list with
4935+ *
4936+ */
4937+struct umevent_list *psb_dpst_device_pool_create_and_init(
4938+ struct kobject *parent_kobj,
4939+ struct dpst_state *state)
4940+{
4941+
4942+ struct umevent_list *new_hotplug_dev_list = NULL;
4943+ new_hotplug_dev_list = psb_umevent_create_list();
4944+ if (new_hotplug_dev_list)
4945+ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
4946+ "psb_dpst");
4947+
4948+ state->dpst_wq = create_singlethread_workqueue("dpst-wq");
4949+
4950+ if (!state->dpst_wq)
4951+ return NULL;
4952+
4953+ INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq);
4954+
4955+ state->dpst_change_wq_data.dev_name_read = 0;
4956+ state->dpst_change_wq_data.dev_name_write = 0;
4957+ state->dpst_change_wq_data.dev_name_write_wrap = 0;
4958+ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
4959+
4960+ memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]),
4961+ 0, sizeof(int)*DRM_DPST_RING_DEPTH);
4962+
4963+ return new_hotplug_dev_list;
4964+}
4965+EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init);
4966+/**
4967+ * psb_dpst_init - init dpst subsystem
4968+ * @parent_kobj - parent kobject to associate dpst state with
4969+ *
4970+ */
4971+struct dpst_state *psb_dpst_init(struct kobject *parent_kobj)
4972+{
4973+ struct dpst_state *state;
4974+ state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL);
4975+ printk(KERN_ALERT "after kzalloc\n");
4976+ state->list = NULL;
4977+ state->list = psb_dpst_device_pool_create_and_init(
4978+ parent_kobj,
4979+ state);
4980+ return state;
4981+}
4982+EXPORT_SYMBOL(psb_dpst_init);
4983+/**
4984+ * psb_dpst_device_pool_destroy - destroy all dpst related resources
4985+ *
4986+ * @state: dpst state instance to destroy
4987+ *
4988+ */
4989+void psb_dpst_device_pool_destroy(struct dpst_state *state)
4990+{
4991+ flush_workqueue(state->dpst_wq);
4992+ destroy_workqueue(state->dpst_wq);
4993+ psb_umevent_cleanup(state->list);
4994+ kfree(state);
4995+}
4996+EXPORT_SYMBOL(psb_dpst_device_pool_destroy);
4997+/**
4998+ * psb_dpst_dev_change_wq - change workqueue implementation
4999+ *
5000+ * @work: work struct to use for kernel scheduling
5001+ *
5002+ */
5003+void psb_dpst_dev_change_wq(struct work_struct *work)
5004+{
5005+ struct dpst_disp_workqueue_data *wq_data;
5006+ wq_data = to_dpst_disp_workqueue_data(work);
5007+ if (wq_data->dev_name_write_wrap == 1) {
5008+ wq_data->dev_name_read_write_wrap_ack = 1;
5009+ wq_data->dev_name_write_wrap = 0;
5010+ while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH_MAX) {
5011+ if (wq_data->dev_name_arry_rw_status
5012+ [wq_data->dev_name_read] ==
5013+ DRM_DPST_READY_TO_READ) {
5014+ wq_data->dev_name_arry_rw_status
5015+ [wq_data->dev_name_read] =
5016+ DRM_DPST_READ_COMPLETE;
5017+ psb_umevent_notify_change_gfxsock
5018+ (wq_data->dev_umevent_arry
5019+ [wq_data->dev_name_read]);
5020+ }
5021+ wq_data->dev_name_read++;
5022+ }
5023+ wq_data->dev_name_read = 0;
5024+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
5025+ if (wq_data->dev_name_arry_rw_status
5026+ [wq_data->dev_name_read] ==
5027+ DRM_DPST_READY_TO_READ) {
5028+ wq_data->dev_name_arry_rw_status
5029+ [wq_data->dev_name_read] =
5030+ DRM_DPST_READ_COMPLETE;
5031+ psb_umevent_notify_change_gfxsock
5032+ (wq_data->dev_umevent_arry
5033+ [wq_data->dev_name_read]);
5034+ }
5035+ wq_data->dev_name_read++;
5036+ }
5037+ } else {
5038+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
5039+ if (wq_data->dev_name_arry_rw_status
5040+ [wq_data->dev_name_read] ==
5041+ DRM_DPST_READY_TO_READ) {
5042+ wq_data->dev_name_arry_rw_status
5043+ [wq_data->dev_name_read] =
5044+ DRM_DPST_READ_COMPLETE;
5045+ psb_umevent_notify_change_gfxsock
5046+ (wq_data->dev_umevent_arry
5047+ [wq_data->dev_name_read]);
5048+ }
5049+ wq_data->dev_name_read++;
5050+ }
5051+ }
5052+ if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX)
5053+ wq_data->dev_name_read = 0;
5054+}
5055+EXPORT_SYMBOL(psb_dpst_dev_change_wq);
5056diff --git a/drivers/gpu/drm/psb/psb_dpst.h b/drivers/gpu/drm/psb/psb_dpst.h
5057new file mode 100644
5058index 0000000..43d3128
5059--- /dev/null
5060+++ b/drivers/gpu/drm/psb/psb_dpst.h
5061@@ -0,0 +1,90 @@
5062+/*
5063+ * Copyright © 2009 Intel Corporation
5064+ *
5065+ * Permission is hereby granted, free of charge, to any person obtaining a
5066+ * copy of this software and associated documentation files (the "Software"),
5067+ * to deal in the Software without restriction, including without limitation
5068+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5069+ * and/or sell copies of the Software, and to permit persons to whom the
5070+ * Software is furnished to do so, subject to the following conditions:
5071+ *
5072+ * The above copyright notice and this permission notice (including the next
5073+ * paragraph) shall be included in all copies or substantial portions of the
5074+ * Software.
5075+ *
5076+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5077+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5078+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
5079+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5080+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
5081+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
5082+ * IN THE SOFTWARE.
5083+ *
5084+ * Authors:
5085+ * James C. Gualario <james.c.gualario@intel.com>
5086+ *
5087+ */
5088+#ifndef _PSB_DPST_H_
5089+#define _PSB_DPST_H_
5090+/**
5091+ * required includes
5092+ *
5093+ */
5094+#include "psb_umevents.h"
5095+/**
5096+ * dpst specific defines
5097+ *
5098+ */
5099+#define DRM_DPST_RING_DEPTH 256
5100+#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1)
5101+#define DRM_DPST_READY_TO_READ 1
5102+#define DRM_DPST_READ_COMPLETE 2
5103+/**
5104+ * dpst workqueue data struct.
5105+ */
5106+struct dpst_disp_workqueue_data {
5107+ struct work_struct work;
5108+ const char *dev_name;
5109+ int dev_name_write;
5110+ int dev_name_read;
5111+ int dev_name_write_wrap;
5112+ int dev_name_read_write_wrap_ack;
5113+ struct umevent_obj *dev_umevent_arry[DRM_DPST_RING_DEPTH];
5114+ int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH];
5115+ struct umevent_list *hotplug_dev_list;
5116+};
5117+/**
5118+ * dpst state structure
5119+ *
5120+ */
5121+struct dpst_state {
5122+ struct workqueue_struct *dpst_wq;
5123+ struct dpst_disp_workqueue_data dpst_change_wq_data;
5124+ struct umevent_list *list;
5125+};
5126+/**
5127+ * main interface function prototytpes for dpst support.
5128+ *
5129+ */
5130+extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj);
5131+extern int psb_dpst_notify_change_um(struct umevent_obj *event,
5132+ struct dpst_state *state);
5133+extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
5134+ struct dpst_state *state);
5135+extern struct umevent_list *psb_dpst_device_pool_create_and_init(
5136+ struct kobject *parent_kobj,
5137+ struct dpst_state *state);
5138+extern void psb_dpst_device_pool_destroy(struct dpst_state *state);
5139+/**
5140+ * to go back and forth between work struct and workqueue data
5141+ *
5142+ */
5143+#define to_dpst_disp_workqueue_data(x) \
5144+ container_of(x, struct dpst_disp_workqueue_data, work)
5145+
5146+/**
5147+ * function prototypes for workqueue implementation
5148+ *
5149+ */
5150+extern void psb_dpst_dev_change_wq(struct work_struct *work);
5151+#endif
5152diff --git a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h
5153new file mode 100644
5154index 0000000..596a9f0
5155--- /dev/null
5156+++ b/drivers/gpu/drm/psb/psb_drm.h
5157@@ -0,0 +1,716 @@
5158+/**************************************************************************
5159+ * Copyright (c) 2007, Intel Corporation.
5160+ * All Rights Reserved.
5161+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
5162+ * All Rights Reserved.
5163+ *
5164+ * This program is free software; you can redistribute it and/or modify it
5165+ * under the terms and conditions of the GNU General Public License,
5166+ * version 2, as published by the Free Software Foundation.
5167+ *
5168+ * This program is distributed in the hope it will be useful, but WITHOUT
5169+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5170+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5171+ * more details.
5172+ *
5173+ * You should have received a copy of the GNU General Public License along with
5174+ * this program; if not, write to the Free Software Foundation, Inc.,
5175+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5176+ *
5177+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5178+ * develop this driver.
5179+ *
5180+ **************************************************************************/
5181+/*
5182+ */
5183+
5184+#ifndef _PSB_DRM_H_
5185+#define _PSB_DRM_H_
5186+
5187+#if defined(__linux__) && !defined(__KERNEL__)
5188+#include<stdint.h>
5189+#include "drm_mode.h"
5190+#endif
5191+
5192+#include "ttm/ttm_fence_user.h"
5193+#include "ttm/ttm_placement_user.h"
5194+
5195+/*
5196+ * Menlow/MRST graphics driver package version
5197+ * a.b.c.xxxx
5198+ * a - Product Family: 5 - Linux
5199+ * b - Major Release Version: 0 - non-Gallium (Unbuntu);
5200+ * 1 - Gallium (Moblin2)
5201+ * c - Hotfix Release
5202+ * xxxx - Graphics internal build #
5203+ */
5204+#define PSB_PACKAGE_VERSION "5.1.0.32L.0124"
5205+
5206+#define DRM_PSB_SAREA_MAJOR 0
5207+#define DRM_PSB_SAREA_MINOR 2
5208+#define PSB_FIXED_SHIFT 16
5209+
5210+#define DRM_PSB_FIRST_TA_USE_REG 3
5211+#define DRM_PSB_NUM_TA_USE_REG 5
5212+#define DRM_PSB_FIRST_RASTER_USE_REG 8
5213+#define DRM_PSB_NUM_RASTER_USE_REG 7
5214+
5215+#define PSB_NUM_PIPE 2
5216+
5217+/*
5218+ * Public memory types.
5219+ */
5220+
5221+#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
5222+#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
5223+#define DRM_PSB_MEM_PDS TTM_PL_PRIV2
5224+#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2
5225+#define DRM_PSB_MEM_APER TTM_PL_PRIV3
5226+#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3
5227+#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4
5228+#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4
5229+#define PSB_MEM_RASTGEOM_START 0x30000000
5230+
5231+typedef int32_t psb_fixed;
5232+typedef uint32_t psb_ufixed;
5233+
5234+static inline int32_t psb_int_to_fixed(int a)
5235+{
5236+ return a * (1 << PSB_FIXED_SHIFT);
5237+}
5238+
5239+static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
5240+{
5241+ return a << PSB_FIXED_SHIFT;
5242+}
5243+
5244+/*Status of the command sent to the gfx device.*/
5245+typedef enum {
5246+ DRM_CMD_SUCCESS,
5247+ DRM_CMD_FAILED,
5248+ DRM_CMD_HANG
5249+} drm_cmd_status_t;
5250+
5251+struct drm_psb_scanout {
5252+ uint32_t buffer_id; /* DRM buffer object ID */
5253+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
5254+ uint32_t stride; /* Buffer stride in bytes */
5255+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
5256+ uint32_t width; /* Buffer width in pixels */
5257+ uint32_t height; /* Buffer height in lines */
5258+ int32_t transform[3][3]; /* Buffer composite transform */
5259+ /* (scaling, rot, reflect) */
5260+};
5261+
5262+#define DRM_PSB_SAREA_OWNERS 16
5263+#define DRM_PSB_SAREA_OWNER_2D 0
5264+#define DRM_PSB_SAREA_OWNER_3D 1
5265+
5266+#define DRM_PSB_SAREA_SCANOUTS 3
5267+
5268+struct drm_psb_sarea {
5269+ /* Track changes of this data structure */
5270+
5271+ uint32_t major;
5272+ uint32_t minor;
5273+
5274+ /* Last context to touch part of hw */
5275+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
5276+
5277+ /* Definition of front- and rotated buffers */
5278+ uint32_t num_scanouts;
5279+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
5280+
5281+ int planeA_x;
5282+ int planeA_y;
5283+ int planeA_w;
5284+ int planeA_h;
5285+ int planeB_x;
5286+ int planeB_y;
5287+ int planeB_w;
5288+ int planeB_h;
5289+ /* Number of active scanouts */
5290+ uint32_t num_active_scanouts;
5291+};
5292+
5293+#define PSB_RELOC_MAGIC 0x67676767
5294+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
5295+#define PSB_RELOC_SHIFT_SHIFT 0
5296+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
5297+#define PSB_RELOC_ALSHIFT_SHIFT 16
5298+
5299+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
5300+ * buffer
5301+ */
5302+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
5303+ * buffer, relative to 2D
5304+ * base address
5305+ */
5306+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
5307+ * relative to PDS base address
5308+ */
5309+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
5310+ * buffer (for tiling)
5311+ */
5312+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
5313+ * relative to base reg
5314+ */
5315+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
5316+
5317+struct drm_psb_reloc {
5318+ uint32_t reloc_op;
5319+ uint32_t where; /* offset in destination buffer */
5320+ uint32_t buffer; /* Buffer reloc applies to */
5321+ uint32_t mask; /* Destination format: */
5322+ uint32_t shift; /* Destination format: */
5323+ uint32_t pre_add; /* Destination format: */
5324+ uint32_t background; /* Destination add */
5325+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
5326+ uint32_t arg0; /* Reloc-op dependant */
5327+ uint32_t arg1;
5328+};
5329+
5330+
5331+#define PSB_GPU_ACCESS_READ (1ULL << 32)
5332+#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
5333+#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
5334+
5335+#define PSB_BO_FLAG_TA (1ULL << 48)
5336+#define PSB_BO_FLAG_SCENE (1ULL << 49)
5337+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
5338+#define PSB_BO_FLAG_USSE (1ULL << 51)
5339+#define PSB_BO_FLAG_COMMAND (1ULL << 52)
5340+
5341+#define PSB_ENGINE_2D 0
5342+#define PSB_ENGINE_VIDEO 1
5343+#define PSB_ENGINE_RASTERIZER 2
5344+#define PSB_ENGINE_TA 3
5345+#define PSB_ENGINE_HPRAST 4
5346+#define LNC_ENGINE_ENCODE 5
5347+
5348+/*
5349+ * For this fence class we have a couple of
5350+ * fence types.
5351+ */
5352+
5353+#define _PSB_FENCE_EXE_SHIFT 0
5354+#define _PSB_FENCE_TA_DONE_SHIFT 1
5355+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
5356+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
5357+#define _PSB_FENCE_FEEDBACK_SHIFT 4
5358+
5359+#define _PSB_ENGINE_TA_FENCE_TYPES 5
5360+#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
5361+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
5362+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
5363+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
5364+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
5365+
5366+#define PSB_ENGINE_HPRAST 4
5367+#define PSB_NUM_ENGINES 6
5368+
5369+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
5370+#define PSB_TA_FLAG_LASTPASS (1 << 1)
5371+
5372+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
5373+
5374+struct drm_psb_extension_rep {
5375+ int32_t exists;
5376+ uint32_t driver_ioctl_offset;
5377+ uint32_t sarea_offset;
5378+ uint32_t major;
5379+ uint32_t minor;
5380+ uint32_t pl;
5381+};
5382+
5383+#define DRM_PSB_EXT_NAME_LEN 128
5384+
5385+union drm_psb_extension_arg {
5386+ char extension[DRM_PSB_EXT_NAME_LEN];
5387+ struct drm_psb_extension_rep rep;
5388+};
5389+
5390+struct psb_validate_req {
5391+ uint64_t set_flags;
5392+ uint64_t clear_flags;
5393+ uint64_t next;
5394+ uint64_t presumed_gpu_offset;
5395+ uint32_t buffer_handle;
5396+ uint32_t presumed_flags;
5397+ uint32_t group;
5398+ uint32_t pad64;
5399+};
5400+
5401+struct psb_validate_rep {
5402+ uint64_t gpu_offset;
5403+ uint32_t placement;
5404+ uint32_t fence_type_mask;
5405+};
5406+
5407+#define PSB_USE_PRESUMED (1 << 0)
5408+
5409+struct psb_validate_arg {
5410+ int handled;
5411+ int ret;
5412+ union {
5413+ struct psb_validate_req req;
5414+ struct psb_validate_rep rep;
5415+ } d;
5416+};
5417+
5418+struct drm_psb_scene {
5419+ int handle_valid;
5420+ uint32_t handle;
5421+ uint32_t w; /* also contains msaa info */
5422+ uint32_t h;
5423+ uint32_t num_buffers;
5424+};
5425+
5426+#define DRM_PSB_FENCE_NO_USER (1 << 0)
5427+
5428+struct psb_ttm_fence_rep {
5429+ uint32_t handle;
5430+ uint32_t fence_class;
5431+ uint32_t fence_type;
5432+ uint32_t signaled_types;
5433+ uint32_t error;
5434+};
5435+
5436+typedef struct drm_psb_cmdbuf_arg {
5437+ uint64_t buffer_list; /* List of buffers to validate */
5438+ uint64_t clip_rects; /* See i915 counterpart */
5439+ uint64_t scene_arg;
5440+ uint64_t fence_arg;
5441+
5442+ uint32_t ta_flags;
5443+
5444+ uint32_t ta_handle; /* TA reg-value pairs */
5445+ uint32_t ta_offset;
5446+ uint32_t ta_size;
5447+
5448+ uint32_t oom_handle;
5449+ uint32_t oom_offset;
5450+ uint32_t oom_size;
5451+
5452+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
5453+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
5454+ uint32_t cmdbuf_size;
5455+
5456+ uint32_t reloc_handle; /* Reloc buffer object */
5457+ uint32_t reloc_offset;
5458+ uint32_t num_relocs;
5459+
5460+ int32_t damage; /* Damage front buffer with cliprects */
5461+ /* Not implemented yet */
5462+ uint32_t fence_flags;
5463+ uint32_t engine;
5464+
5465+ /*
5466+ * Feedback;
5467+ */
5468+
5469+ uint32_t feedback_ops;
5470+ uint32_t feedback_handle;
5471+ uint32_t feedback_offset;
5472+ uint32_t feedback_breakpoints;
5473+ uint32_t feedback_size;
5474+}drm_psb_cmdbuf_arg_t;
5475+
5476+typedef struct drm_psb_pageflip_arg {
5477+ uint32_t flip_offset;
5478+ uint32_t stride;
5479+}drm_psb_pageflip_arg_t;
5480+
5481+typedef enum {
5482+ LNC_VIDEO_FRAME_SKIP,
5483+ LNC_VIDEO_GETPARAM_RAR_REGION_SIZE,
5484+ LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET
5485+} lnc_getparam_key_t;
5486+
5487+struct drm_lnc_video_getparam_arg {
5488+ lnc_getparam_key_t key;
5489+ uint64_t arg; /* argument pointer */
5490+ uint64_t value; /* feed back pointer */
5491+};
5492+
5493+struct drm_psb_xhw_init_arg {
5494+ uint32_t operation;
5495+ uint32_t buffer_handle;
5496+};
5497+
5498+/*
5499+ * Feedback components:
5500+ */
5501+
5502+/*
5503+ * Vistest component. The number of these in the feedback buffer
5504+ * equals the number of vistest breakpoints + 1.
5505+ * This is currently the only feedback component.
5506+ */
5507+
5508+struct drm_psb_vistest {
5509+ uint32_t vt[8];
5510+};
5511+
5512+struct drm_psb_sizes_arg {
5513+ uint32_t ta_mem_size;
5514+ uint32_t mmu_size;
5515+ uint32_t pds_size;
5516+ uint32_t rastgeom_size;
5517+ uint32_t tt_size;
5518+ uint32_t vram_size;
5519+};
5520+
5521+struct mrst_timing_info {
5522+ uint16_t pixel_clock;
5523+ uint8_t hactive_lo;
5524+ uint8_t hblank_lo;
5525+ uint8_t hblank_hi:4;
5526+ uint8_t hactive_hi:4;
5527+ uint8_t vactive_lo;
5528+ uint8_t vblank_lo;
5529+ uint8_t vblank_hi:4;
5530+ uint8_t vactive_hi:4;
5531+ uint8_t hsync_offset_lo;
5532+ uint8_t hsync_pulse_width_lo;
5533+ uint8_t vsync_pulse_width_lo:4;
5534+ uint8_t vsync_offset_lo:4;
5535+ uint8_t vsync_pulse_width_hi:2;
5536+ uint8_t vsync_offset_hi:2;
5537+ uint8_t hsync_pulse_width_hi:2;
5538+ uint8_t hsync_offset_hi:2;
5539+ uint8_t width_mm_lo;
5540+ uint8_t height_mm_lo;
5541+ uint8_t height_mm_hi:4;
5542+ uint8_t width_mm_hi:4;
5543+ uint8_t hborder;
5544+ uint8_t vborder;
5545+ uint8_t unknown0:1;
5546+ uint8_t hsync_positive:1;
5547+ uint8_t vsync_positive:1;
5548+ uint8_t separate_sync:2;
5549+ uint8_t stereo:1;
5550+ uint8_t unknown6:1;
5551+ uint8_t interlaced:1;
5552+} __attribute__((packed));
5553+
5554+struct mrst_panel_descriptor_v1{
5555+ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
5556+ /* 0x61190 if MIPI */
5557+ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
5558+ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
5559+ uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
5560+ /* Register 0x61210 */
5561+ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
5562+ uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
5563+ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
5564+ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
5565+ uint16_t Panel_MIPI_Display_Descriptor;
5566+ /*16 bits, Defined as follows: */
5567+ /* if MIPI, 0x0000 if LVDS */
5568+ /* Bit 0, Type, 2 bits, */
5569+ /* 0: Type-1, */
5570+ /* 1: Type-2, */
5571+ /* 2: Type-3, */
5572+ /* 3: Type-4 */
5573+ /* Bit 2, Pixel Format, 4 bits */
5574+ /* Bit0: 16bpp (not supported in LNC), */
5575+ /* Bit1: 18bpp loosely packed, */
5576+ /* Bit2: 18bpp packed, */
5577+ /* Bit3: 24bpp */
5578+ /* Bit 6, Reserved, 2 bits, 00b */
5579+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
5580+ /* Bit 14, Reserved, 2 bits, 00b */
5581+} __attribute__ ((packed));
5582+
5583+struct mrst_panel_descriptor_v2{
5584+ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
5585+ /* 0x61190 if MIPI */
5586+ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
5587+ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
5588+ uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
5589+ /* Register 0x61210 */
5590+ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
5591+ uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
5592+ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
5593+ uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */
5594+ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
5595+ uint16_t Panel_MIPI_Display_Descriptor;
5596+ /*16 bits, Defined as follows: */
5597+ /* if MIPI, 0x0000 if LVDS */
5598+ /* Bit 0, Type, 2 bits, */
5599+ /* 0: Type-1, */
5600+ /* 1: Type-2, */
5601+ /* 2: Type-3, */
5602+ /* 3: Type-4 */
5603+ /* Bit 2, Pixel Format, 4 bits */
5604+ /* Bit0: 16bpp (not supported in LNC), */
5605+ /* Bit1: 18bpp loosely packed, */
5606+ /* Bit2: 18bpp packed, */
5607+ /* Bit3: 24bpp */
5608+ /* Bit 6, Reserved, 2 bits, 00b */
5609+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
5610+ /* Bit 14, Reserved, 2 bits, 00b */
5611+} __attribute__ ((packed));
5612+
5613+union mrst_panel_rx{
5614+ struct{
5615+ uint16_t NumberOfLanes :2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
5616+ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
5617+ uint16_t MaxLaneFreq :3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
5618+ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
5619+ uint16_t SupportedVideoTransferMode :2; /*0: Non-burst only */
5620+ /* 1: Burst and non-burst */
5621+ /* 2/3: Reserved */
5622+ uint16_t HSClkBehavior :1; /*0: Continuous, 1: Non-continuous*/
5623+ uint16_t DuoDisplaySupport :1; /*1 bit,0: No, 1: Yes*/
5624+ uint16_t ECC_ChecksumCapabilities :1;/*1 bit,0: No, 1: Yes*/
5625+ uint16_t BidirectionalCommunication :1;/*1 bit,0: No, 1: Yes */
5626+ uint16_t Rsvd :5;/*5 bits,00000b */
5627+ }panelrx;
5628+ uint16_t panel_receiver;
5629+} __attribute__ ((packed));
5630+
5631+struct gct_ioctl_arg{
5632+ uint8_t bpi; /* boot panel index, number of panel used during boot */
5633+ uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
5634+ struct mrst_timing_info DTD; /* timing info for the selected panel */
5635+ uint32_t Panel_Port_Control;
5636+ uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/
5637+ uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
5638+ uint32_t PP_Cycle_Delay;
5639+ uint16_t Panel_Backlight_Inverter_Descriptor;
5640+} __attribute__ ((packed));
5641+
5642+struct mrst_vbt{
5643+ char Signature[4]; /*4 bytes,"$GCT" */
5644+ uint8_t Revision; /*1 byte */
5645+ uint8_t Size; /*1 byte */
5646+ uint8_t Checksum; /*1 byte,Calculated*/
5647+ void *mrst_gct;
5648+} __attribute__ ((packed));
5649+
5650+struct mrst_gct_v1{ /* expect this table to change per customer request*/
5651+ union{ /*8 bits,Defined as follows: */
5652+ struct{
5653+ uint8_t PanelType :4; /*4 bits, Bit field for panels*/
5654+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
5655+ uint8_t BootPanelIndex :2;/*2 bits,Specifies which of the*/
5656+ /* 4 panels to use by default*/
5657+ uint8_t BootMIPI_DSI_RxIndex :2;/*Specifies which of*/
5658+ /* the 4 MIPI DSI receivers to use*/
5659+ }PD;
5660+ uint8_t PanelDescriptor;
5661+ };
5662+ struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
5663+ union mrst_panel_rx panelrx[4]; /* panel receivers*/
5664+} __attribute__ ((packed));
5665+
5666+struct mrst_gct_v2{ /* expect this table to change per customer request*/
5667+ union{ /*8 bits,Defined as follows: */
5668+ struct{
5669+ uint8_t PanelType :4; /*4 bits, Bit field for panels*/
5670+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
5671+ uint8_t BootPanelIndex :2;/*2 bits,Specifies which of the*/
5672+ /* 4 panels to use by default*/
5673+ uint8_t BootMIPI_DSI_RxIndex :2;/*Specifies which of*/
5674+ /* the 4 MIPI DSI receivers to use*/
5675+ }PD;
5676+ uint8_t PanelDescriptor;
5677+ };
5678+ struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
5679+ union mrst_panel_rx panelrx[4]; /* panel receivers*/
5680+} __attribute__ ((packed));
5681+
5682+#define PSB_DC_CRTC_SAVE 0x01
5683+#define PSB_DC_CRTC_RESTORE 0x02
5684+#define PSB_DC_OUTPUT_SAVE 0x04
5685+#define PSB_DC_OUTPUT_RESTORE 0x08
5686+#define PSB_DC_CRTC_MASK 0x03
5687+#define PSB_DC_OUTPUT_MASK 0x0C
5688+
5689+struct drm_psb_dc_state_arg {
5690+ uint32_t flags;
5691+ uint32_t obj_id;
5692+};
5693+
5694+struct drm_psb_mode_operation_arg {
5695+ uint32_t obj_id;
5696+ uint16_t operation;
5697+ struct drm_mode_modeinfo mode;
5698+ void * data;
5699+};
5700+
5701+struct drm_psb_stolen_memory_arg {
5702+ uint32_t base;
5703+ uint32_t size;
5704+};
5705+
5706+/*Display Register Bits*/
5707+#define REGRWBITS_PFIT_CONTROLS (1 << 0)
5708+#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
5709+#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
5710+#define REGRWBITS_PIPEASRC (1 << 3)
5711+#define REGRWBITS_PIPEBSRC (1 << 4)
5712+#define REGRWBITS_VTOTAL_A (1 << 5)
5713+#define REGRWBITS_VTOTAL_B (1 << 6)
5714+
5715+/*Overlay Register Bits*/
5716+#define OV_REGRWBITS_OVADD (1 << 0)
5717+#define OV_REGRWBITS_OGAM_ALL (1 << 1)
5718+
5719+struct drm_psb_register_rw_arg {
5720+ uint32_t b_force_hw_on;
5721+
5722+ uint32_t display_read_mask;
5723+ uint32_t display_write_mask;
5724+
5725+ struct {
5726+ uint32_t pfit_controls;
5727+ uint32_t pfit_autoscale_ratios;
5728+ uint32_t pfit_programmed_scale_ratios;
5729+ uint32_t pipeasrc;
5730+ uint32_t pipebsrc;
5731+ uint32_t vtotal_a;
5732+ uint32_t vtotal_b;
5733+ } display;
5734+
5735+ uint32_t overlay_read_mask;
5736+ uint32_t overlay_write_mask;
5737+
5738+ struct {
5739+ uint32_t OVADD;
5740+ uint32_t OGAMC0;
5741+ uint32_t OGAMC1;
5742+ uint32_t OGAMC2;
5743+ uint32_t OGAMC3;
5744+ uint32_t OGAMC4;
5745+ uint32_t OGAMC5;
5746+ } overlay;
5747+};
5748+
5749+#define PSB_HW_COOKIE_SIZE 16
5750+#define PSB_HW_FEEDBACK_SIZE 8
5751+#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2)
5752+
5753+struct drm_psb_xhw_arg {
5754+ uint32_t op;
5755+ int ret;
5756+ uint32_t irq_op;
5757+ uint32_t issue_irq;
5758+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
5759+ union {
5760+ struct {
5761+ uint32_t w; /* also contains msaa info */
5762+ uint32_t h;
5763+ uint32_t size;
5764+ uint32_t clear_p_start;
5765+ uint32_t clear_num_pages;
5766+ } si;
5767+ struct {
5768+ uint32_t fire_flags;
5769+ uint32_t hw_context;
5770+ uint32_t offset;
5771+ uint32_t engine;
5772+ uint32_t flags;
5773+ uint32_t rca;
5774+ uint32_t num_oom_cmds;
5775+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
5776+ } sb;
5777+ struct {
5778+ uint32_t pages;
5779+ uint32_t size;
5780+ uint32_t ta_min_size;
5781+ } bi;
5782+ struct {
5783+ uint32_t bca;
5784+ uint32_t rca;
5785+ uint32_t flags;
5786+ } oom;
5787+ struct {
5788+ uint32_t pt_offset;
5789+ uint32_t param_offset;
5790+ uint32_t flags;
5791+ } bl;
5792+ struct {
5793+ uint32_t value;
5794+ } cl;
5795+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
5796+ } arg;
5797+};
5798+
5799+/* Controlling the kernel modesetting buffers */
5800+
5801+#define DRM_PSB_KMS_OFF 0x00
5802+#define DRM_PSB_KMS_ON 0x01
5803+#define DRM_PSB_VT_LEAVE 0x02
5804+#define DRM_PSB_VT_ENTER 0x03
5805+#define DRM_PSB_XHW_INIT 0x04
5806+#define DRM_PSB_XHW 0x05
5807+#define DRM_PSB_EXTENSION 0x06
5808+#define DRM_PSB_SIZES 0x07
5809+#define DRM_PSB_FUSE_REG 0x08
5810+#define DRM_PSB_VBT 0x09
5811+#define DRM_PSB_DC_STATE 0x0A
5812+#define DRM_PSB_ADB 0x0B
5813+#define DRM_PSB_MODE_OPERATION 0x0C
5814+#define DRM_PSB_STOLEN_MEMORY 0x0D
5815+#define DRM_PSB_REGISTER_RW 0x0E
5816+
5817+/*
5818+ * Xhw commands.
5819+ */
5820+
5821+#define PSB_XHW_INIT 0x00
5822+#define PSB_XHW_TAKEDOWN 0x01
5823+
5824+#define PSB_XHW_FIRE_RASTER 0x00
5825+#define PSB_XHW_SCENE_INFO 0x01
5826+#define PSB_XHW_SCENE_BIND_FIRE 0x02
5827+#define PSB_XHW_TA_MEM_INFO 0x03
5828+#define PSB_XHW_RESET_DPM 0x04
5829+#define PSB_XHW_OOM 0x05
5830+#define PSB_XHW_TERMINATE 0x06
5831+#define PSB_XHW_VISTEST 0x07
5832+#define PSB_XHW_RESUME 0x08
5833+#define PSB_XHW_TA_MEM_LOAD 0x09
5834+#define PSB_XHW_CHECK_LOCKUP 0x0a
5835+
5836+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
5837+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
5838+#define PSB_SCENE_FLAG_SETUP (1 << 2)
5839+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
5840+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
5841+
5842+#define PSB_TA_MEM_FLAG_TA (1 << 0)
5843+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
5844+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
5845+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
5846+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
5847+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
5848+
5849+/*Raster fire will deallocate memory */
5850+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
5851+/*Isp reset needed due to change in ZLS format */
5852+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
5853+/*These are set by Xpsb. */
5854+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
5855+/*The task has had at least one OOM and Xpsb will
5856+ send back messages on each fire. */
5857+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
5858+
5859+#define PSB_SCENE_ENGINE_TA 0
5860+#define PSB_SCENE_ENGINE_RASTER 1
5861+#define PSB_SCENE_NUM_ENGINES 2
5862+
5863+#define PSB_LOCKUP_RASTER (1 << 0)
5864+#define PSB_LOCKUP_TA (1 << 1)
5865+
5866+struct drm_psb_dev_info_arg {
5867+ uint32_t num_use_attribute_registers;
5868+};
5869+#define DRM_PSB_DEVINFO 0x01
5870+
5871+#define PSB_MODE_OPERATION_MODE_VALID 0x01
5872+
5873+#endif
5874diff --git a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c
5875new file mode 100644
5876index 0000000..7019b73
5877--- /dev/null
5878+++ b/drivers/gpu/drm/psb/psb_drv.c
5879@@ -0,0 +1,2239 @@
5880+/**************************************************************************
5881+ * Copyright (c) 2007, Intel Corporation.
5882+ * All Rights Reserved.
5883+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
5884+ * All Rights Reserved.
5885+ *
5886+ * This program is free software; you can redistribute it and/or modify it
5887+ * under the terms and conditions of the GNU General Public License,
5888+ * version 2, as published by the Free Software Foundation.
5889+ *
5890+ * This program is distributed in the hope it will be useful, but WITHOUT
5891+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5892+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5893+ * more details.
5894+ *
5895+ * You should have received a copy of the GNU General Public License along with
5896+ * this program; if not, write to the Free Software Foundation, Inc.,
5897+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5898+ *
5899+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5900+ * develop this driver.
5901+ *
5902+ **************************************************************************/
5903+/*
5904+ */
5905+
5906+#include <drm/drmP.h>
5907+#include <drm/drm.h>
5908+#include "psb_drm.h"
5909+#include "psb_drv.h"
5910+#include "psb_reg.h"
5911+#include "psb_intel_reg.h"
5912+#include "psb_intel_bios.h"
5913+#include "psb_msvdx.h"
5914+#include "lnc_topaz.h"
5915+#include <drm/drm_pciids.h>
5916+#include "psb_scene.h"
5917+#include "psb_powermgmt.h"
5918+#include <linux/cpu.h>
5919+#include <linux/notifier.h>
5920+#include <linux/spinlock.h>
5921+
5922+int drm_psb_debug;
5923+EXPORT_SYMBOL(drm_psb_debug);
5924+static int drm_psb_trap_pagefaults;
5925+static int drm_psb_clock_gating = 2;
5926+static int drm_psb_ta_mem_size = 32 * 1024;
5927+
5928+int drm_psb_disable_vsync = 1;
5929+int drm_psb_no_fb;
5930+int drm_psb_force_pipeb;
5931+int drm_idle_check_interval = 5;
5932+int drm_psb_ospm = 0;
5933+int drm_msvdx_pmpolicy = PSB_PMPOLICY_NOPM;
5934+int drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM;
5935+
5936+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
5937+
5938+MODULE_PARM_DESC(debug, "Enable debug output");
5939+MODULE_PARM_DESC(clock_gating, "clock gating");
5940+MODULE_PARM_DESC(no_fb, "Disable FBdev");
5941+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
5942+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
5943+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
5944+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
5945+MODULE_PARM_DESC(ospm, "switch for ospm support");
5946+MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
5947+MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
5948+module_param_named(debug, drm_psb_debug, int, 0600);
5949+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
5950+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
5951+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
5952+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
5953+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
5954+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
5955+module_param_named(ospm, drm_psb_ospm, int, 0600);
5956+module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
5957+module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
5958+
5959+#ifndef CONFIG_X86_PAT
5960+#warning "Don't build this driver without PAT support!!!"
5961+#endif
5962+
5963+#define psb_PCI_IDS \
5964+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
5965+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
5966+ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5967+ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5968+ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5969+ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5970+ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5971+ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5972+ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5973+ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
5974+ {0, 0, 0}
5975+
5976+static struct pci_device_id pciidlist[] = {
5977+ psb_PCI_IDS
5978+};
5979+
5980+/*
5981+ * Standard IOCTLs.
5982+ */
5983+
5984+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
5985+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
5986+#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
5987+#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
5988+#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \
5989+ struct drm_psb_xhw_init_arg)
5990+#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE)
5991+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
5992+ union drm_psb_extension_arg)
5993+#define DRM_IOCTL_PSB_SIZES DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
5994+ struct drm_psb_sizes_arg)
5995+#define DRM_IOCTL_PSB_FUSE_REG DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, \
5996+ uint32_t)
5997+#define DRM_IOCTL_PSB_VBT DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
5998+ struct gct_ioctl_arg)
5999+#define DRM_IOCTL_PSB_DC_STATE DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
6000+ struct drm_psb_dc_state_arg)
6001+#define DRM_IOCTL_PSB_ADB DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, \
6002+ uint32_t)
6003+#define DRM_IOCTL_PSB_MODE_OPERATION DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
6004+ struct drm_psb_mode_operation_arg)
6005+#define DRM_IOCTL_PSB_STOLEN_MEMORY DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
6006+ struct drm_psb_stolen_memory_arg)
6007+#define DRM_IOCTL_PSB_REGISTER_RW DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
6008+ struct drm_psb_register_rw_arg)
6009+
6010+/*
6011+ * TTM execbuf extension.
6012+ */
6013+
6014+#define DRM_PSB_CMDBUF (DRM_PSB_REGISTER_RW + 1)
6015+#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
6016+#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
6017+ struct drm_psb_cmdbuf_arg)
6018+#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
6019+ struct drm_psb_scene)
6020+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
6021+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
6022+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
6023+ union drm_psb_extension_arg)
6024+/*
6025+ * TTM placement user extension.
6026+ */
6027+
6028+#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
6029+
6030+#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
6031+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
6032+#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
6033+#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
6034+#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
6035+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
6036+
6037+/*
6038+ * TTM fence extension.
6039+ */
6040+
6041+#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
6042+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
6043+#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
6044+#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
6045+
6046+#define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) //20
6047+/* PSB video extension */
6048+#define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1)
6049+
6050+#define DRM_IOCTL_PSB_TTM_PL_CREATE \
6051+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
6052+ union ttm_pl_create_arg)
6053+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
6054+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
6055+ union ttm_pl_reference_arg)
6056+#define DRM_IOCTL_PSB_TTM_PL_UNREF \
6057+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
6058+ struct ttm_pl_reference_req)
6059+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
6060+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
6061+ struct ttm_pl_synccpu_arg)
6062+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
6063+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
6064+ struct ttm_pl_waitidle_arg)
6065+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
6066+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
6067+ union ttm_pl_setstatus_arg)
6068+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
6069+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
6070+ union ttm_fence_signaled_arg)
6071+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
6072+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
6073+ union ttm_fence_finish_arg)
6074+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
6075+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
6076+ struct ttm_fence_unref_arg)
6077+#define DRM_IOCTL_PSB_FLIP \
6078+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
6079+ struct drm_psb_pageflip_arg)
6080+#define DRM_IOCTL_LNC_VIDEO_GETPARAM \
6081+ DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \
6082+ struct drm_lnc_video_getparam_arg)
6083+
6084+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
6085+ struct drm_file *file_priv);
6086+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
6087+ struct drm_file *file_priv);
6088+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
6089+ struct drm_file *file_priv);
6090+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
6091+ struct drm_file *file_priv);
6092+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
6093+ struct drm_file *file_priv);
6094+static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
6095+ struct drm_file *file_priv);
6096+static int psb_adb_ioctl(struct drm_device *dev, void *data,
6097+ struct drm_file *file_priv);
6098+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
6099+ struct drm_file *file_priv);
6100+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
6101+ struct drm_file *file_priv);
6102+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
6103+ struct drm_file *file_priv);
6104+
6105+#define PSB_IOCTL_DEF(ioctl, func, flags) \
6106+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
6107+
6108+static struct drm_ioctl_desc psb_ioctls[] = {
6109+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
6110+ DRM_ROOT_ONLY),
6111+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
6112+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
6113+ DRM_ROOT_ONLY),
6114+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY),
6115+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl,
6116+ DRM_ROOT_ONLY),
6117+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY),
6118+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
6119+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
6120+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
6121+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
6122+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
6123+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
6124+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
6125+ DRM_AUTH),
6126+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
6127+ DRM_AUTH),
6128+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
6129+ DRM_AUTH),
6130+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
6131+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
6132+ DRM_AUTH),
6133+
6134+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
6135+ DRM_AUTH),
6136+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
6137+ DRM_AUTH),
6138+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
6139+ DRM_AUTH),
6140+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
6141+ DRM_AUTH),
6142+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
6143+ DRM_AUTH),
6144+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
6145+ DRM_AUTH),
6146+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
6147+ psb_fence_signaled_ioctl, DRM_AUTH),
6148+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
6149+ DRM_AUTH),
6150+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
6151+ DRM_AUTH),
6152+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH),
6153+ PSB_IOCTL_DEF(DRM_IOCTL_LNC_VIDEO_GETPARAM, lnc_video_getparam, DRM_AUTH)
6154+};
6155+
6156+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
6157+
6158+static void get_ci_info(struct drm_psb_private *dev_priv)
6159+{
6160+ struct pci_dev *pdev;
6161+
6162+ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
6163+ if (pdev == NULL) {
6164+ /* IF no pci_device we set size & addr to 0, no ci
6165+ * share buffer can be created */
6166+ dev_priv->ci_region_start = 0;
6167+ dev_priv->ci_region_size = 0;
6168+ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
6169+ return;
6170+ }
6171+
6172+ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
6173+ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
6174+
6175+ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
6176+ dev_priv->ci_region_start, dev_priv->ci_region_size);
6177+
6178+ pci_dev_put(pdev);
6179+
6180+ return;
6181+}
6182+
6183+static void get_rar_info(struct drm_psb_private *dev_priv)
6184+{
6185+ struct pci_dev *pdev;
6186+ const uint32_t msg_opcode = 0xD0;
6187+ const uint32_t bunit_port = 0x3;
6188+ const uint32_t start_addr_reg_offset = 0x10;
6189+ const uint32_t end_addr_reg_offset = 0x11;
6190+ const uint32_t msg_byte_write_enable = 0xf;
6191+ const uint32_t vendor_id = 0x8086;
6192+ const uint32_t device_id = 0x4110;
6193+ const uint32_t lnc_mcr_offset = 0xd0;
6194+ const uint32_t lnc_mdr_offset = 0xd4;
6195+ uint32_t start_addr_msg, end_addr_msg, start_addr, end_addr;
6196+
6197+ pdev = pci_get_subsys(vendor_id, device_id, 0, 0, NULL);
6198+ if (pdev == NULL) {
6199+ dev_priv->rar_region_start = 0;
6200+ dev_priv->rar_region_size = 0;
6201+ goto out;
6202+ }
6203+
6204+ /* get the start msg */
6205+ start_addr_msg = (msg_opcode << 24) |
6206+ (bunit_port << 16) |
6207+ (start_addr_reg_offset << 8) |
6208+ (msg_byte_write_enable << 4);
6209+
6210+ /* thought write/read is always success */
6211+ pci_write_config_dword(pdev,
6212+ lnc_mcr_offset,
6213+ start_addr_msg);
6214+ pci_read_config_dword(pdev,
6215+ lnc_mdr_offset,
6216+ &start_addr);
6217+
6218+ start_addr &= 0xfffffc00u;
6219+
6220+ /* get the end msg */
6221+ end_addr_msg = (msg_opcode << 24) |
6222+ (bunit_port << 16) |
6223+ (end_addr_reg_offset << 8) |
6224+ (msg_byte_write_enable << 4);
6225+
6226+ pci_write_config_dword(pdev,
6227+ lnc_mcr_offset,
6228+ end_addr_msg);
6229+ pci_read_config_dword(pdev,
6230+ lnc_mdr_offset,
6231+ &end_addr);
6232+
6233+ end_addr |= 0x3ffu;
6234+
6235+ dev_priv->rar_region_start = start_addr;
6236+ dev_priv->rar_region_size = end_addr - start_addr + 1;
6237+
6238+ printk(KERN_INFO "rar for video region [0x%x, 0x%x], size %d\n",
6239+ start_addr, end_addr, dev_priv->rar_region_size);
6240+out:
6241+ if (pdev != NULL)
6242+ pci_dev_put(pdev);
6243+
6244+ return;
6245+}
6246+
6247+static void psb_set_uopt(struct drm_psb_uopt *uopt)
6248+{
6249+ uopt->clock_gating = drm_psb_clock_gating;
6250+}
6251+
6252+static void psb_lastclose(struct drm_device *dev)
6253+{
6254+ struct drm_psb_private *dev_priv =
6255+ (struct drm_psb_private *) dev->dev_private;
6256+
6257+ if (!dev->dev_private)
6258+ return;
6259+
6260+ if (dev_priv->ta_mem)
6261+ psb_ta_mem_unref(&dev_priv->ta_mem);
6262+ mutex_lock(&dev_priv->cmdbuf_mutex);
6263+ if (dev_priv->context.buffers) {
6264+ vfree(dev_priv->context.buffers);
6265+ dev_priv->context.buffers = NULL;
6266+ }
6267+ mutex_unlock(&dev_priv->cmdbuf_mutex);
6268+}
6269+
6270+static void psb_do_takedown(struct drm_device *dev)
6271+{
6272+ struct drm_psb_private *dev_priv =
6273+ (struct drm_psb_private *) dev->dev_private;
6274+ struct ttm_bo_device *bdev = &dev_priv->bdev;
6275+
6276+
6277+ if (dev_priv->have_mem_rastgeom) {
6278+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM);
6279+ dev_priv->have_mem_rastgeom = 0;
6280+ }
6281+ if (dev_priv->have_mem_mmu) {
6282+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
6283+ dev_priv->have_mem_mmu = 0;
6284+ }
6285+ if (dev_priv->have_mem_aper) {
6286+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER);
6287+ dev_priv->have_mem_aper = 0;
6288+ }
6289+ if (dev_priv->have_tt) {
6290+ ttm_bo_clean_mm(bdev, TTM_PL_TT);
6291+ dev_priv->have_tt = 0;
6292+ }
6293+ if (dev_priv->have_vram) {
6294+ ttm_bo_clean_mm(bdev, TTM_PL_VRAM);
6295+ dev_priv->have_vram = 0;
6296+ }
6297+ if (dev_priv->have_camera) {
6298+ ttm_bo_clean_mm(bdev, TTM_PL_CI);
6299+ dev_priv->have_camera = 0;
6300+ }
6301+ if (dev_priv->have_rar) {
6302+ ttm_bo_clean_mm(bdev, TTM_PL_RAR);
6303+ dev_priv->have_rar = 0;
6304+ }
6305+
6306+ psb_msvdx_uninit(dev);
6307+
6308+ if (IS_MRST(dev))
6309+ lnc_topaz_uninit(dev);
6310+
6311+ if (dev_priv->comm) {
6312+ kunmap(dev_priv->comm_page);
6313+ dev_priv->comm = NULL;
6314+ }
6315+ if (dev_priv->comm_page) {
6316+ __free_page(dev_priv->comm_page);
6317+ dev_priv->comm_page = NULL;
6318+ }
6319+}
6320+
6321+void psb_clockgating(struct drm_psb_private *dev_priv)
6322+{
6323+ uint32_t clock_gating;
6324+
6325+ if (dev_priv->uopt.clock_gating == 1) {
6326+ PSB_DEBUG_INIT("Disabling clock gating.\n");
6327+
6328+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6329+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
6330+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6331+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
6332+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6333+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
6334+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6335+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
6336+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6337+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
6338+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6339+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
6340+
6341+ } else if (dev_priv->uopt.clock_gating == 2) {
6342+ PSB_DEBUG_INIT("Enabling clock gating.\n");
6343+
6344+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6345+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
6346+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6347+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
6348+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6349+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
6350+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6351+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
6352+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6353+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
6354+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
6355+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
6356+ } else
6357+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
6358+
6359+#ifdef FIX_TG_2D_CLOCKGATE
6360+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
6361+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
6362+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
6363+#endif
6364+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
6365+ (void) PSB_RSGX32(PSB_CR_CLKGATECTL);
6366+}
6367+
6368+#define FB_REG06 0xD0810600
6369+#define FB_MIPI_DISABLE BIT11
6370+#define FB_REG09 0xD0810900
6371+#define FB_SKU_MASK (BIT12|BIT13|BIT14)
6372+#define FB_SKU_SHIFT 12
6373+#define FB_SKU_100 0
6374+#define FB_SKU_100L 1
6375+#define FB_SKU_83 2
6376+#if 1 /* FIXME remove it after PO */
6377+#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
6378+#define FB_GFX_CLK_DIVIDE_SHIFT 20
6379+#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
6380+#define FB_VED_CLK_DIVIDE_SHIFT 23
6381+#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
6382+#define FB_VEC_CLK_DIVIDE_SHIFT 25
6383+#endif /* FIXME remove it after PO */
6384+
6385+
6386+void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
6387+{
6388+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
6389+ uint32_t fuse_value = 0;
6390+ uint32_t fuse_value_tmp = 0;
6391+
6392+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
6393+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
6394+
6395+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
6396+
6397+ DRM_INFO("internal display is %s\n",
6398+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
6399+
6400+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
6401+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
6402+
6403+ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
6404+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
6405+
6406+ dev_priv->fuse_reg_value = fuse_value;
6407+
6408+ switch (fuse_value_tmp) {
6409+ case FB_SKU_100:
6410+ DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n");
6411+ dev_priv->sku_100 = true;
6412+ break;
6413+ case FB_SKU_100L:
6414+ DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n");
6415+ dev_priv->sku_100L = true;
6416+ break;
6417+ case FB_SKU_83:
6418+ DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n");
6419+ dev_priv->sku_83 = true;
6420+ break;
6421+ default:
6422+ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
6423+ fuse_value_tmp);
6424+ }
6425+
6426+#if 1 /* FIXME remove it after PO */
6427+ fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
6428+
6429+ switch (fuse_value_tmp) {
6430+ case 0:
6431+ DRM_INFO("Gfx clk : core clk = 1:1. \n");
6432+ break;
6433+ case 1:
6434+ DRM_INFO("Gfx clk : core clk = 4:3. \n");
6435+ break;
6436+ case 2:
6437+ DRM_INFO("Gfx clk : core clk = 8:5. \n");
6438+ break;
6439+ case 3:
6440+ DRM_INFO("Gfx clk : core clk = 2:1. \n");
6441+ break;
6442+ case 4:
6443+ DRM_INFO("Gfx clk : core clk = 16:7. \n");
6444+ break;
6445+ case 5:
6446+ DRM_INFO("Gfx clk : core clk = 8:3. \n");
6447+ break;
6448+ case 6:
6449+ DRM_INFO("Gfx clk : core clk = 16:5. \n");
6450+ break;
6451+ case 7:
6452+ DRM_INFO("Gfx clk : core clk = 4:1. \n");
6453+ break;
6454+ default:
6455+ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
6456+ fuse_value_tmp);
6457+ }
6458+
6459+ fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
6460+
6461+ switch (fuse_value_tmp) {
6462+ case 0:
6463+ DRM_INFO("Ved clk : core clk = 1:1. \n");
6464+ break;
6465+ case 1:
6466+ DRM_INFO("Ved clk : core clk = 4:3. \n");
6467+ break;
6468+ case 2:
6469+ DRM_INFO("Ved clk : core clk = 8:5. \n");
6470+ break;
6471+ case 3:
6472+ DRM_INFO("Ved clk : core clk = 2:1. \n");
6473+ break;
6474+ default:
6475+ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
6476+ fuse_value_tmp);
6477+ }
6478+
6479+ fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
6480+
6481+ switch (fuse_value_tmp) {
6482+ case 0:
6483+ DRM_INFO("Vec clk : core clk = 1:1. \n");
6484+ break;
6485+ case 1:
6486+ DRM_INFO("Vec clk : core clk = 4:3. \n");
6487+ break;
6488+ case 2:
6489+ DRM_INFO("Vec clk : core clk = 8:5. \n");
6490+ break;
6491+ case 3:
6492+ DRM_INFO("Vec clk : core clk = 2:1. \n");
6493+ break;
6494+ default:
6495+ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
6496+ fuse_value_tmp);
6497+ }
6498+#endif /* FIXME remove it after PO */
6499+
6500+ return;
6501+}
6502+
6503+bool mrst_get_vbt_data(struct drm_psb_private *dev_priv)
6504+{
6505+ struct mrst_vbt *pVBT = &dev_priv->vbt_data;
6506+ u32 platform_config_address;
6507+ u8 *pVBT_virtual;
6508+ u8 bpi;
6509+ void *pGCT;
6510+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0,PCI_DEVFN(2,0));
6511+
6512+ /*get the address of the platform config vbt, B0:D2:F0;0xFC */
6513+ pci_read_config_dword(pci_gfx_root,0xFC,&platform_config_address);
6514+ DRM_INFO("drm platform config address is %x\n",platform_config_address);
6515+
6516+ /* check for platform config address == 0. */
6517+ /* this means fw doesn't support vbt */
6518+
6519+ if(platform_config_address == 0) {
6520+ pVBT->Size = 0;
6521+ return false;
6522+ }
6523+
6524+ /* get the virtual address of the vbt */
6525+ pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT));
6526+
6527+ memcpy(pVBT, pVBT_virtual, sizeof(*pVBT));
6528+ iounmap(pVBT_virtual); /* Free virtual address space */
6529+
6530+ printk(KERN_ALERT "GCT Revision is %x\n",pVBT->Revision);
6531+ pVBT->mrst_gct = NULL;
6532+ pVBT->mrst_gct = ioremap(platform_config_address + sizeof(*pVBT) - 4,
6533+ pVBT->Size - sizeof(*pVBT) + 4);
6534+ pGCT = pVBT->mrst_gct;
6535+
6536+ switch (pVBT->Revision) {
6537+ case 0:
6538+ bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
6539+ dev_priv->gct_data.bpi = bpi;
6540+ dev_priv->gct_data.pt =
6541+ ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
6542+ memcpy(&dev_priv->gct_data.DTD,
6543+ &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
6544+ sizeof(struct mrst_timing_info));
6545+ dev_priv->gct_data.Panel_Port_Control =
6546+ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
6547+ break;
6548+ case 1:
6549+ bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
6550+ dev_priv->gct_data.bpi = bpi;
6551+ dev_priv->gct_data.pt =
6552+ ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
6553+ memcpy(&dev_priv->gct_data.DTD,
6554+ &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
6555+ sizeof(struct mrst_timing_info));
6556+ dev_priv->gct_data.Panel_Port_Control =
6557+ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
6558+ break;
6559+ default:
6560+ printk(KERN_ALERT "Unknown revision of GCT!\n");
6561+ pVBT->Size = 0;
6562+ return false;
6563+ }
6564+
6565+ return true;
6566+}
6567+
6568+int mrst_get_ospm_io(struct drm_psb_private *dev_priv)
6569+{
6570+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
6571+ uint32_t ospm_base = 0;
6572+
6573+ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
6574+ pci_read_config_dword(pci_root, 0xD4, &ospm_base);
6575+
6576+ dev_priv->ospm_base = ospm_base & 0x0ffff;
6577+
6578+ DRM_INFO("ospm base is %x\n", dev_priv->ospm_base);
6579+
6580+ return 0;
6581+}
6582+
6583+static int psb_do_init(struct drm_device *dev)
6584+{
6585+ struct drm_psb_private *dev_priv =
6586+ (struct drm_psb_private *) dev->dev_private;
6587+ struct ttm_bo_device *bdev = &dev_priv->bdev;
6588+ struct psb_gtt *pg = dev_priv->pg;
6589+
6590+ uint32_t stolen_gtt;
6591+ uint32_t tt_start;
6592+ uint32_t tt_pages;
6593+
6594+ int ret = -ENOMEM;
6595+
6596+ dev_priv->ta_mem_pages =
6597+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024,
6598+ PAGE_SIZE) >> PAGE_SHIFT;
6599+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
6600+ if (!dev_priv->comm_page)
6601+ goto out_err;
6602+
6603+ dev_priv->comm = kmap(dev_priv->comm_page);
6604+ memset((void *) dev_priv->comm, 0, PAGE_SIZE);
6605+
6606+ set_pages_uc(dev_priv->comm_page, 1);
6607+
6608+ /*
6609+ * Initialize sequence numbers for the different command
6610+ * submission mechanisms.
6611+ */
6612+
6613+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
6614+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
6615+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
6616+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
6617+
6618+ if (pg->gatt_start & 0x0FFFFFFF) {
6619+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
6620+ ret = -EINVAL;
6621+ goto out_err;
6622+ }
6623+
6624+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
6625+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
6626+ stolen_gtt =
6627+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
6628+
6629+ dev_priv->gatt_free_offset = pg->gatt_start +
6630+ (stolen_gtt << PAGE_SHIFT) * 1024;
6631+
6632+ /*
6633+ * Insert a cache-coherent communications page in mmu space
6634+ * just after the stolen area. Will be used for fencing etc.
6635+ */
6636+
6637+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
6638+ dev_priv->gatt_free_offset += PAGE_SIZE;
6639+
6640+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
6641+ &dev_priv->comm_page,
6642+ dev_priv->comm_mmu_offset, 1, 0, 0, 0);
6643+
6644+ if (ret)
6645+ goto out_err;
6646+
6647+ if (1 || drm_debug) {
6648+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
6649+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
6650+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
6651+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
6652+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
6653+ _PSB_CC_REVISION_MAJOR_SHIFT,
6654+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
6655+ _PSB_CC_REVISION_MINOR_SHIFT);
6656+ DRM_INFO
6657+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
6658+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
6659+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
6660+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
6661+ _PSB_CC_REVISION_DESIGNER_SHIFT);
6662+ }
6663+
6664+ spin_lock_init(&dev_priv->irqmask_lock);
6665+ dev_priv->fence0_irq_on = 0;
6666+
6667+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
6668+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
6669+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
6670+ tt_pages -= tt_start >> PAGE_SHIFT;
6671+
6672+ dev_priv->sizes.ta_mem_size = drm_psb_ta_mem_size / 1024;
6673+
6674+ if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0,
6675+ pg->vram_stolen_size >> PAGE_SHIFT)) {
6676+ dev_priv->have_vram = 1;
6677+ dev_priv->sizes.vram_size =
6678+ pg->vram_stolen_size / (1024 * 1024);
6679+ }
6680+
6681+ if (IS_MRST(dev) &&
6682+ (dev_priv->ci_region_size != 0) &&
6683+ !ttm_bo_init_mm(bdev, TTM_PL_CI, 0,
6684+ dev_priv->ci_region_size >> PAGE_SHIFT)) {
6685+ dev_priv->have_camera = 1;
6686+ }
6687+
6688+ /* since there is always rar region for video, it is ok */
6689+ if (IS_MRST(dev) &&
6690+ (dev_priv->rar_region_size != 0) &&
6691+ !ttm_bo_init_mm(bdev, TTM_PL_RAR, 0,
6692+ dev_priv->rar_region_size >> PAGE_SHIFT)) {
6693+ dev_priv->have_rar = 1;
6694+ }
6695+
6696+ if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT,
6697+ tt_pages)) {
6698+ dev_priv->have_tt = 1;
6699+ dev_priv->sizes.tt_size =
6700+ (tt_pages << PAGE_SHIFT) / (1024 * 1024);
6701+ }
6702+
6703+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000,
6704+ (pg->gatt_start - PSB_MEM_MMU_START -
6705+ pg->ci_stolen_size) >> PAGE_SHIFT)) {
6706+ dev_priv->have_mem_mmu = 1;
6707+ dev_priv->sizes.mmu_size =
6708+ (pg->gatt_start - PSB_MEM_MMU_START - pg->ci_stolen_size) /
6709+ (1024*1024);
6710+ }
6711+
6712+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
6713+ (PSB_MEM_MMU_START -
6714+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
6715+ dev_priv->have_mem_rastgeom = 1;
6716+ dev_priv->sizes.rastgeom_size =
6717+ (PSB_MEM_MMU_START - PSB_MEM_RASTGEOM_START) /
6718+ (1024 * 1024);
6719+ }
6720+#if 0
6721+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
6722+ if (!ttm_bo_init_mm
6723+ (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
6724+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) {
6725+ dev_priv->have_mem_aper = 1;
6726+ }
6727+ }
6728+#endif
6729+
6730+ PSB_DEBUG_INIT("Init MSVDX\n");
6731+ psb_msvdx_init(dev);
6732+
6733+ if (IS_MRST(dev)) {
6734+ PSB_DEBUG_INIT("Init Topaz\n");
6735+ lnc_topaz_init(dev);
6736+ }
6737+
6738+ return 0;
6739+out_err:
6740+ psb_do_takedown(dev);
6741+ return ret;
6742+}
6743+
6744+static int psb_intel_opregion_init(struct drm_device *dev)
6745+{
6746+ struct drm_psb_private * dev_priv = dev->dev_private;
6747+ /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/
6748+ u32 opregion_phy;
6749+ void * base;
6750+ u32 * lid_state;
6751+
6752+ dev_priv->lid_state = NULL;
6753+
6754+ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
6755+ if(opregion_phy == 0) {
6756+ DRM_DEBUG("Opregion not supported, won't support lid-switch\n");
6757+ return -ENOTSUPP;
6758+ }
6759+ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
6760+
6761+ base = ioremap(opregion_phy, 8*1024);
6762+ if(!base) {
6763+ return -ENOMEM;
6764+ }
6765+
6766+ lid_state = base + 0x01ac;
6767+
6768+ DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state);
6769+
6770+ dev_priv->lid_state = lid_state;
6771+ dev_priv->lid_last_state = *lid_state;
6772+ return 0;
6773+}
6774+
6775+#if 0
6776+/**
6777+ * Get a section from BDB by section id, port from i915 driver
6778+ */
6779+static void * psb_intel_vbt_find_section(struct bdb_header * bdb, int section_id)
6780+{
6781+ u8 * base = (u8 *)bdb;
6782+ int index = 0;
6783+ u16 total, current_size;
6784+ u8 current_id;
6785+
6786+ index += bdb->header_size;
6787+ total = bdb->bdb_size;
6788+
6789+ while(index < total) {
6790+ current_id = *(base + index);
6791+ index++;
6792+ current_size = *((u16 *)(base + index));
6793+ index += 2;
6794+ if(current_id == section_id)
6795+ return base + index;
6796+ index += current_size;
6797+ }
6798+
6799+ return NULL;
6800+}
6801+
6802+static void psb_intel_vbt_parse_backlight_data(struct drm_psb_private * dev_priv, struct bdb_header * bdb)
6803+{
6804+ struct bdb_lvds_backlight * lvds_bl = NULL;
6805+ u8 p_type = 0;
6806+ void * bl_start = NULL;
6807+ struct bdb_lvds_options * lvds_opts
6808+ = psb_intel_vbt_find_section(bdb, BDB_LVDS_OPTIONS);
6809+
6810+ if(lvds_opts) {
6811+ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
6812+ p_type = lvds_opts->panel_type;
6813+ } else {
6814+ DRM_DEBUG("no lvds_options\n");
6815+ }
6816+
6817+ bl_start = psb_intel_vbt_find_section(bdb, BDB_LVDS_BACKLIGHT);
6818+ lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
6819+
6820+ dev_priv->lvds_bl = lvds_bl;
6821+}
6822+
6823+/**
6824+ * Retrieve VBT and blc data. port from i915 driver
6825+ */
6826+static int psb_get_vbt_data(struct drm_device * dev)
6827+{
6828+ struct drm_psb_private * dev_priv = dev->dev_private;
6829+ struct pci_dev * pdev = dev->pdev;
6830+ struct vbt_header * vbt = NULL;
6831+ struct bdb_header * bdb;
6832+ u8 __iomem * bios;
6833+
6834+ size_t size;
6835+ int i;
6836+
6837+ /*FIXME: unmap it when driver exit*/
6838+ bios = pci_map_rom(pdev, &size);
6839+ if(!bios)
6840+ return -1;
6841+
6842+ for(i=0; i + 4 < size; i++) {
6843+ if(!memcmp(bios + i, "$VBT", 4)) {
6844+ vbt = (struct vbt_header *)(bios + i);
6845+ break;
6846+ }
6847+ }
6848+
6849+ if(!vbt) {
6850+ DRM_ERROR("VBT sigature missing\n");
6851+ pci_unmap_rom(pdev, bios);
6852+ return -1;
6853+ }
6854+
6855+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
6856+
6857+ psb_intel_vbt_parse_backlight_data(dev_priv, bdb);
6858+
6859+ DRM_INFO("BIOS Data Block found at %p\n", bdb);
6860+
6861+ return 0;
6862+}
6863+#endif
6864+
6865+static int psb_driver_unload(struct drm_device *dev)
6866+{
6867+ struct drm_psb_private *dev_priv =
6868+ (struct drm_psb_private *) dev->dev_private;
6869+
6870+ psb_backlight_exit(); /*writes minimum value to backlight HW reg */
6871+
6872+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
6873+ drm_irq_uninstall(dev);
6874+ }
6875+
6876+ if (drm_psb_no_fb == 0)
6877+ psb_modeset_cleanup(dev);
6878+
6879+ if (dev_priv) {
6880+ struct ttm_bo_device *bdev = &dev_priv->bdev;
6881+
6882+ if(IS_POULSBO(dev))
6883+ psb_lid_timer_takedown(dev_priv);
6884+
6885+ psb_watchdog_takedown(dev_priv);
6886+ psb_do_takedown(dev);
6887+ psb_xhw_takedown(dev_priv);
6888+ psb_scheduler_takedown(&dev_priv->scheduler);
6889+
6890+ if (dev_priv->have_mem_pds) {
6891+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS);
6892+ dev_priv->have_mem_pds = 0;
6893+ }
6894+ if (dev_priv->have_mem_kernel) {
6895+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL);
6896+ dev_priv->have_mem_kernel = 0;
6897+ }
6898+
6899+ if (dev_priv->pf_pd) {
6900+ psb_mmu_free_pagedir(dev_priv->pf_pd);
6901+ dev_priv->pf_pd = NULL;
6902+ }
6903+ if (dev_priv->mmu) {
6904+ struct psb_gtt *pg = dev_priv->pg;
6905+
6906+ down_read(&pg->sem);
6907+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
6908+ (dev_priv->mmu),
6909+ pg->gatt_start,
6910+ pg->vram_stolen_size >>
6911+ PAGE_SHIFT);
6912+ if (pg->ci_stolen_size != 0)
6913+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
6914+ (dev_priv->mmu),
6915+ pg->gatt_start - pg->ci_stolen_size,
6916+ pg->ci_stolen_size >>
6917+ PAGE_SHIFT);
6918+ if (pg->rar_stolen_size != 0)
6919+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
6920+ (dev_priv->mmu),
6921+ pg->gatt_start + pg->vram_stolen_size,
6922+ pg->rar_stolen_size >>
6923+ PAGE_SHIFT);
6924+ up_read(&pg->sem);
6925+ psb_mmu_driver_takedown(dev_priv->mmu);
6926+ dev_priv->mmu = NULL;
6927+ }
6928+ psb_gtt_takedown(dev_priv->pg, 1);
6929+ if (dev_priv->scratch_page) {
6930+ __free_page(dev_priv->scratch_page);
6931+ dev_priv->scratch_page = NULL;
6932+ }
6933+ if (dev_priv->has_bo_device) {
6934+ ttm_bo_device_release(&dev_priv->bdev);
6935+ dev_priv->has_bo_device = 0;
6936+ }
6937+ if (dev_priv->has_fence_device) {
6938+ ttm_fence_device_release(&dev_priv->fdev);
6939+ dev_priv->has_fence_device = 0;
6940+ }
6941+ if (dev_priv->vdc_reg) {
6942+ iounmap(dev_priv->vdc_reg);
6943+ dev_priv->vdc_reg = NULL;
6944+ }
6945+ if (dev_priv->sgx_reg) {
6946+ iounmap(dev_priv->sgx_reg);
6947+ dev_priv->sgx_reg = NULL;
6948+ }
6949+ if (dev_priv->msvdx_reg) {
6950+ iounmap(dev_priv->msvdx_reg);
6951+ dev_priv->msvdx_reg = NULL;
6952+ }
6953+
6954+ if (IS_MRST(dev)) {
6955+ if (dev_priv->topaz_reg) {
6956+ iounmap(dev_priv->topaz_reg);
6957+ dev_priv->topaz_reg = NULL;
6958+ }
6959+ }
6960+
6961+ if (dev_priv->tdev)
6962+ ttm_object_device_release(&dev_priv->tdev);
6963+
6964+ if (dev_priv->has_global)
6965+ psb_ttm_global_release(dev_priv);
6966+
6967+ kfree(dev_priv);
6968+ dev->dev_private = NULL;
6969+
6970+ /*destory VBT data*/
6971+ if(IS_POULSBO(dev))
6972+ psb_intel_destory_bios(dev);
6973+ }
6974+
6975+ powermgmt_shutdown();
6976+
6977+ return 0;
6978+}
6979+
6980+
6981+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
6982+{
6983+ struct drm_psb_private *dev_priv;
6984+ struct ttm_bo_device *bdev;
6985+ unsigned long resource_start;
6986+ struct psb_gtt *pg;
6987+ unsigned long irqflags;
6988+ int ret = -ENOMEM;
6989+
6990+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
6991+
6992+ if (IS_MRST(dev))
6993+ DRM_INFO("Run drivers on Moorestown platform!\n");
6994+ else
6995+ DRM_INFO("Run drivers on Poulsbo platform!\n");
6996+
6997+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
6998+ if (dev_priv == NULL)
6999+ return -ENOMEM;
7000+
7001+ dev_priv->dev = dev;
7002+ bdev = &dev_priv->bdev;
7003+
7004+ psb_init_ospm(dev_priv);
7005+
7006+ ret = psb_ttm_global_init(dev_priv);
7007+ if (unlikely(ret != 0))
7008+ goto out_err;
7009+ dev_priv->has_global = 1;
7010+
7011+ dev_priv->tdev = ttm_object_device_init
7012+ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
7013+ if (unlikely(dev_priv->tdev == NULL))
7014+ goto out_err;
7015+
7016+ powermgmt_init();
7017+
7018+ mutex_init(&dev_priv->temp_mem);
7019+ mutex_init(&dev_priv->cmdbuf_mutex);
7020+ mutex_init(&dev_priv->reset_mutex);
7021+ INIT_LIST_HEAD(&dev_priv->context.validate_list);
7022+ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
7023+ psb_init_disallowed();
7024+
7025+#ifdef FIX_TG_16
7026+ atomic_set(&dev_priv->lock_2d, 0);
7027+ atomic_set(&dev_priv->ta_wait_2d, 0);
7028+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
7029+ atomic_set(&dev_priv->waiters_2d, 0);;
7030+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
7031+#else
7032+ mutex_init(&dev_priv->mutex_2d);
7033+#endif
7034+
7035+ spin_lock_init(&dev_priv->reloc_lock);
7036+
7037+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
7038+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
7039+
7040+ dev->dev_private = (void *) dev_priv;
7041+ dev_priv->chipset = chipset;
7042+ psb_set_uopt(&dev_priv->uopt);
7043+
7044+ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
7045+ psb_watchdog_init(dev_priv);
7046+ psb_scheduler_init(dev, &dev_priv->scheduler);
7047+
7048+
7049+ PSB_DEBUG_INIT("Mapping MMIO\n");
7050+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
7051+
7052+ if (IS_MRST(dev))
7053+ dev_priv->msvdx_reg =
7054+ ioremap(resource_start + MRST_MSVDX_OFFSET,
7055+ PSB_MSVDX_SIZE);
7056+ else
7057+ dev_priv->msvdx_reg =
7058+ ioremap(resource_start + PSB_MSVDX_OFFSET,
7059+ PSB_MSVDX_SIZE);
7060+
7061+ if (!dev_priv->msvdx_reg)
7062+ goto out_err;
7063+
7064+ if (IS_MRST(dev)) {
7065+ dev_priv->topaz_reg =
7066+ ioremap(resource_start + LNC_TOPAZ_OFFSET,
7067+ LNC_TOPAZ_SIZE);
7068+ if (!dev_priv->topaz_reg)
7069+ goto out_err;
7070+ }
7071+
7072+ dev_priv->vdc_reg =
7073+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
7074+ if (!dev_priv->vdc_reg)
7075+ goto out_err;
7076+
7077+ if (IS_MRST(dev))
7078+ dev_priv->sgx_reg =
7079+ ioremap(resource_start + MRST_SGX_OFFSET,
7080+ PSB_SGX_SIZE);
7081+ else
7082+ dev_priv->sgx_reg =
7083+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
7084+
7085+ if (!dev_priv->sgx_reg)
7086+ goto out_err;
7087+
7088+ if (IS_MRST(dev)){
7089+ mrst_get_fuse_settings(dev_priv);
7090+ mrst_get_vbt_data(dev_priv);
7091+ } else {
7092+ psb_intel_opregion_init(dev);
7093+ psb_intel_init_bios(dev);
7094+ }
7095+
7096+ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
7097+
7098+ if (IS_MRST(dev))
7099+ mrst_get_ospm_io(dev_priv);
7100+
7101+ if (IS_MRST(dev)) {
7102+ get_ci_info(dev_priv);
7103+ get_rar_info(dev_priv);
7104+ }
7105+
7106+ psb_clockgating(dev_priv);
7107+
7108+ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
7109+ if (unlikely(ret != 0))
7110+ goto out_err;
7111+
7112+ dev_priv->has_fence_device = 1;
7113+ ret = ttm_bo_device_init(bdev,
7114+ dev_priv->mem_global_ref.object,
7115+ &psb_ttm_bo_driver,
7116+ DRM_PSB_FILE_PAGE_OFFSET);
7117+ if (unlikely(ret != 0))
7118+ goto out_err;
7119+ dev_priv->has_bo_device = 1;
7120+ ttm_lock_init(&dev_priv->ttm_lock);
7121+
7122+ ret = -ENOMEM;
7123+
7124+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
7125+ if (!dev_priv->scratch_page)
7126+ goto out_err;
7127+
7128+ set_pages_uc(dev_priv->scratch_page, 1);
7129+
7130+ dev_priv->pg = psb_gtt_alloc(dev);
7131+ if (!dev_priv->pg)
7132+ goto out_err;
7133+
7134+ ret = psb_gtt_init(dev_priv->pg, 0);
7135+ if (ret)
7136+ goto out_err;
7137+
7138+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
7139+ drm_psb_trap_pagefaults, 0,
7140+ dev_priv);
7141+ if (!dev_priv->mmu)
7142+ goto out_err;
7143+
7144+ pg = dev_priv->pg;
7145+
7146+ /*
7147+ * Make sgx MMU aware of the stolen memory area we call VRAM.
7148+ */
7149+
7150+ down_read(&pg->sem);
7151+ ret =
7152+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
7153+ (dev_priv->mmu),
7154+ pg->stolen_base >> PAGE_SHIFT,
7155+ pg->gatt_start,
7156+ pg->vram_stolen_size >> PAGE_SHIFT, 0);
7157+ up_read(&pg->sem);
7158+ if (ret)
7159+ goto out_err;
7160+
7161+ /*
7162+ * Make sgx MMU aware of the CI stolen memory area.
7163+ */
7164+ if (dev_priv->pg->ci_stolen_size != 0) {
7165+ down_read(&pg->sem);
7166+ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
7167+ (dev_priv->mmu),
7168+ dev_priv->ci_region_start >> PAGE_SHIFT,
7169+ pg->gatt_start - pg->ci_stolen_size,
7170+ pg->ci_stolen_size >> PAGE_SHIFT, 0);
7171+ up_read(&pg->sem);
7172+ if (ret)
7173+ goto out_err;
7174+ }
7175+
7176+ /*
7177+ * Make sgx MMU aware of the rar stolen memory area.
7178+ */
7179+ if (dev_priv->pg->rar_stolen_size != 0) {
7180+ down_read(&pg->sem);
7181+ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
7182+ dev_priv->rar_region_start >> PAGE_SHIFT,
7183+ pg->gatt_start + pg->vram_stolen_size,
7184+ pg->rar_stolen_size >> PAGE_SHIFT, 0);
7185+ up_read(&pg->sem);
7186+ if (ret)
7187+ goto out_err;
7188+ }
7189+
7190+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
7191+ if (!dev_priv->pf_pd)
7192+ goto out_err;
7193+
7194+ /*
7195+ * Make all presumably unused requestors page-fault by making them
7196+ * use context 1 which does not have any valid mappings.
7197+ */
7198+
7199+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
7200+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
7201+ PSB_RSGX32(PSB_CR_BIF_BANK1);
7202+
7203+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
7204+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
7205+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
7206+
7207+ psb_init_2d(dev_priv);
7208+
7209+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000,
7210+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
7211+ >> PAGE_SHIFT);
7212+ if (ret)
7213+ goto out_err;
7214+ dev_priv->have_mem_kernel = 1;
7215+
7216+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000,
7217+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
7218+ >> PAGE_SHIFT);
7219+ if (ret)
7220+ goto out_err;
7221+ dev_priv->have_mem_pds = 1;
7222+ dev_priv->sizes.pds_size =
7223+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START) / (1024 * 1024);
7224+ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
7225+
7226+ ret = psb_do_init(dev);
7227+ if (ret)
7228+ return ret;
7229+
7230+ ret = psb_xhw_init(dev);
7231+ if (ret)
7232+ return ret;
7233+
7234+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
7235+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
7236+
7237+ /**
7238+ * Init lid switch timer.
7239+ * NOTE: must do this after psb_intel_opregion_init
7240+ * and psb_backlight_init
7241+ */
7242+ if(IS_POULSBO(dev) && dev_priv->lid_state) {
7243+ psb_lid_timer_init(dev_priv);
7244+ }
7245+
7246+ /*initialize the MSI for MRST*/
7247+ if (IS_MRST(dev)) {
7248+ if (pci_enable_msi(dev->pdev)) {
7249+ DRM_ERROR("Enable MSI for MRST failed!\n");
7250+ } else {
7251+ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
7252+ dev->pdev->irq);
7253+ /* pci_write_config_word(pdev, 0x04, 0x07); */
7254+ }
7255+ }
7256+
7257+ //Init vblank module in DRM. Must be done before call to drm_irq_install()
7258+ ret = drm_vblank_init(dev, PSB_NUM_PIPE);
7259+ if (ret)
7260+ goto out_err;
7261+
7262+ /*
7263+ * Install interrupt handlers prior to powering off SGX or else we will
7264+ * crash.
7265+ */
7266+ dev_priv->vdc_irq_mask = 0;
7267+ dev_priv->sgx_irq_mask = 0;
7268+ dev_priv->sgx2_irq_mask = 0;
7269+ dev_priv->pipestat[0] = 0;
7270+ dev_priv->pipestat[1] = 0;
7271+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
7272+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
7273+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
7274+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
7275+ if (drm_core_check_feature(dev, DRIVER_MODESET))
7276+ drm_irq_install(dev);
7277+#if 0
7278+ /*set SGX in low power mode*/
7279+ if (drm_psb_ospm && IS_MRST(dev))
7280+ if (psb_try_power_down_sgx(dev))
7281+ PSB_DEBUG_PM("initialize SGX to low power failed\n");
7282+ if (IS_MRST(dev))
7283+ if (psb_try_power_down_msvdx(dev))
7284+ PSB_DEBUG_PM("Initialize MSVDX to low power failed\n");
7285+ if (IS_MRST(dev)) {
7286+ if (psb_try_power_down_topaz(dev))
7287+ PSB_DEBUG_PM("Initialize TOPAZ to low power failed\n");
7288+ }
7289+#endif
7290+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
7291+
7292+ dev->driver->get_vblank_counter = psb_get_vblank_counter;
7293+
7294+ ret = drm_vblank_init(dev, PSB_NUM_PIPE);
7295+
7296+ if (drm_psb_no_fb == 0) {
7297+ psb_modeset_init(dev);
7298+ drm_helper_initial_config(dev);
7299+ }
7300+
7301+ /*must be after mrst_get_fuse_settings()*/
7302+ ret = psb_backlight_init(dev);
7303+ if (ret)
7304+ return ret;
7305+
7306+ /*dri_page_flipping is set when flipping is enabled*/
7307+ dev_priv->dri_page_flipping = 0;
7308+
7309+ return 0;
7310+out_err:
7311+ psb_driver_unload(dev);
7312+ return ret;
7313+}
7314+
7315+int psb_driver_device_is_agp(struct drm_device *dev)
7316+{
7317+ return 0;
7318+}
7319+
7320+int psb_extension_ioctl(struct drm_device *dev, void *data,
7321+ struct drm_file *file_priv)
7322+{
7323+ union drm_psb_extension_arg *arg = data;
7324+ struct drm_psb_extension_rep *rep = &arg->rep;
7325+
7326+ /*tricky fix for sgx HW access from user space when XPSB is load*/
7327+ static int firsttime = 1;
7328+ if (firsttime) {
7329+ firsttime = 0;
7330+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
7331+ }
7332+ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
7333+ rep->exists = 1;
7334+ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
7335+ rep->sarea_offset = 0;
7336+ rep->major = 1;
7337+ rep->minor = 0;
7338+ rep->pl = 0;
7339+ return 0;
7340+ }
7341+ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
7342+ rep->exists = 1;
7343+ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
7344+ rep->sarea_offset = 0;
7345+ rep->major = 1;
7346+ rep->minor = 0;
7347+ rep->pl = 0;
7348+ return 0;
7349+ }
7350+ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
7351+ rep->exists = 1;
7352+ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
7353+ rep->sarea_offset = 0;
7354+ rep->major = 1;
7355+ rep->minor = 0;
7356+ rep->pl = 0;
7357+ return 0;
7358+ }
7359+
7360+ /*return the page flipping ioctl offset*/
7361+ if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
7362+ rep->exists = 1;
7363+ rep->driver_ioctl_offset = DRM_PSB_FLIP;
7364+ rep->sarea_offset = 0;
7365+ rep->major = 1;
7366+ rep->minor = 0;
7367+ rep->pl = 0;
7368+ return 0;
7369+ }
7370+
7371+ /* return the video rar offset */
7372+ if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
7373+ rep->exists = 1;
7374+ rep->driver_ioctl_offset = DRM_LNC_VIDEO_GETPARAM;
7375+ rep->sarea_offset = 0;
7376+ rep->major = 1;
7377+ rep->minor = 0;
7378+ rep->pl = 0;
7379+ return 0;
7380+ }
7381+
7382+ rep->exists = 0;
7383+ return 0;
7384+}
7385+
7386+/*keep following code*/
7387+#if 0
7388+static void psb_display_states_restore(struct drm_device * dev)
7389+{
7390+ struct drm_crtc * crtc = NULL;
7391+ struct drm_connector * connector = NULL;
7392+ struct drm_crtc_helper_funcs * crtc_helper_funcs = NULL;
7393+ struct drm_encoder * encoder = NULL;
7394+ struct drm_encoder_helper_funcs * encoder_helper_funcs = NULL;
7395+ struct drm_psb_private * dev_priv =
7396+ (struct drm_psb_private *)dev->dev_private;
7397+
7398+ mutex_lock(&dev->mode_config.mutex);
7399+#if 0
7400+ /*Output dpms off*/
7401+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7402+ encoder_helper_funcs =
7403+ (struct drm_encoder_helper_funcs *)encoder->helper_private;
7404+ if(encoder_helper_funcs && encoder_helper_funcs->dpms)
7405+ encoder_helper_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
7406+ }
7407+
7408+ psb_intel_wait_for_vblank(dev);
7409+
7410+ /*CRTC dpms off*/
7411+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7412+ crtc_helper_funcs =
7413+ (struct drm_crtc_helper_funcs *)crtc->helper_private;
7414+ //if(crtc_helper_funcs && crtc_helper_funcs->dpms)
7415+ if(drm_helper_crtc_in_use(crtc))
7416+ crtc_helper_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
7417+ }
7418+
7419+ /*Restore CRTC states*/
7420+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7421+ //if(crtc->funcs && crtc->funcs->restore)
7422+ if(drm_helper_crtc_in_use(crtc))
7423+ crtc->funcs->restore(crtc);
7424+ }
7425+#endif
7426+
7427+ /*Restore outputs states*/
7428+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7429+ if(connector->funcs && connector->funcs->restore)
7430+ connector->funcs->restore(connector);
7431+ }
7432+
7433+
7434+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7435+ if(drm_helper_crtc_in_use(crtc))
7436+ crtc->funcs->restore(crtc);
7437+ }
7438+
7439+ mutex_unlock(&dev->mode_config.mutex);
7440+
7441+ if(IS_MRST(dev))
7442+ return;
7443+
7444+ REG_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
7445+ REG_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
7446+ REG_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
7447+ REG_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
7448+
7449+ /*TODO: SWF registers restore*/
7450+}
7451+
7452+static void psb_display_states_save(struct drm_device * dev)
7453+{
7454+ struct drm_crtc * crtc = NULL;
7455+ struct drm_connector * connector = NULL;
7456+ struct drm_psb_private * dev_priv =
7457+ (struct drm_psb_private *)dev->dev_private;
7458+
7459+ mutex_lock(&dev->mode_config.mutex);
7460+ /*Save output states*/
7461+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7462+ if(connector->funcs && connector->funcs->save)
7463+ connector->funcs->save(connector);
7464+ }
7465+
7466+#if 1
7467+ /*Restore CRTC states*/
7468+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7469+ //if(crtc->funcs && crtc->funcs->save)
7470+ if(drm_helper_crtc_in_use(crtc))
7471+ crtc->funcs->save(crtc);
7472+ }
7473+#endif
7474+
7475+ mutex_unlock(&dev->mode_config.mutex);
7476+
7477+ if(IS_MRST(dev))
7478+ return;
7479+
7480+ dev_priv->saveVCLK_DIVISOR_VGA0 = REG_READ(VCLK_DIVISOR_VGA0);
7481+ dev_priv->saveVCLK_DIVISOR_VGA1 = REG_READ(VCLK_DIVISOR_VGA1);
7482+ dev_priv->saveVCLK_POST_DIV = REG_READ(VCLK_POST_DIV);
7483+ dev_priv->saveVGACNTRL = REG_READ(VGACNTRL);
7484+
7485+ /*TODO: save SWF here if necessary*/
7486+}
7487+#endif
7488+
7489+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
7490+ struct drm_file *file_priv)
7491+{
7492+ struct drm_psb_private *dev_priv = psb_priv(dev);
7493+ struct ttm_bo_device *bdev = &dev_priv->bdev;
7494+ struct ttm_mem_type_manager *man;
7495+ int clean;
7496+ int ret;
7497+
7498+ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
7499+ psb_fpriv(file_priv)->tfile);
7500+ if (unlikely(ret != 0))
7501+ return ret;
7502+
7503+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
7504+
7505+ /*
7506+ * Clean VRAM and TT for fbdev.
7507+ */
7508+
7509+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
7510+ if (unlikely(ret != 0))
7511+ goto out_unlock;
7512+
7513+ man = &bdev->man[TTM_PL_VRAM];
7514+ spin_lock(&bdev->lru_lock);
7515+ clean = drm_mm_clean(&man->manager);
7516+ spin_unlock(&bdev->lru_lock);
7517+ if (unlikely(!clean))
7518+ DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n");
7519+
7520+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
7521+ if (unlikely(ret != 0))
7522+ goto out_unlock;
7523+
7524+ man = &bdev->man[TTM_PL_TT];
7525+ spin_lock(&bdev->lru_lock);
7526+ clean = drm_mm_clean(&man->manager);
7527+ spin_unlock(&bdev->lru_lock);
7528+ if (unlikely(!clean))
7529+ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
7530+
7531+ ttm_bo_swapout_all(&dev_priv->bdev);
7532+
7533+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
7534+ if (drm_psb_ospm && IS_MRST(dev))
7535+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7536+ return 0;
7537+out_unlock:
7538+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
7539+ if (drm_psb_ospm && IS_MRST(dev))
7540+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7541+ (void) ttm_write_unlock(&dev_priv->ttm_lock,
7542+ psb_fpriv(file_priv)->tfile);
7543+ return ret;
7544+}
7545+
7546+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
7547+ struct drm_file *file_priv)
7548+{
7549+ struct drm_psb_private *dev_priv = psb_priv(dev);
7550+ return ttm_write_unlock(&dev_priv->ttm_lock,
7551+ psb_fpriv(file_priv)->tfile);
7552+}
7553+
7554+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
7555+ struct drm_file *file_priv)
7556+{
7557+ struct drm_psb_private *dev_priv = psb_priv(dev);
7558+ struct drm_psb_sizes_arg *arg =
7559+ (struct drm_psb_sizes_arg *) data;
7560+
7561+ *arg = dev_priv->sizes;
7562+ return 0;
7563+}
7564+
7565+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
7566+ struct drm_file *file_priv)
7567+{
7568+ struct drm_psb_private *dev_priv = psb_priv(dev);
7569+ uint32_t *arg = data;
7570+
7571+ *arg = dev_priv->fuse_reg_value;
7572+ return 0;
7573+}
7574+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
7575+ struct drm_file *file_priv)
7576+{
7577+ struct drm_psb_private *dev_priv = psb_priv(dev);
7578+ struct gct_ioctl_arg *pGCT = data;
7579+
7580+ memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
7581+
7582+ return 0;
7583+}
7584+
7585+static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
7586+ struct drm_file *file_priv)
7587+{
7588+ uint32_t flags;
7589+ uint32_t obj_id;
7590+ struct drm_mode_object * obj;
7591+ struct drm_connector * connector;
7592+ struct drm_crtc * crtc;
7593+ struct drm_psb_dc_state_arg * arg =
7594+ (struct drm_psb_dc_state_arg *)data;
7595+
7596+ if(IS_MRST(dev))
7597+ return 0;
7598+
7599+ flags = arg->flags;
7600+ obj_id = arg->obj_id;
7601+
7602+ if(flags & PSB_DC_CRTC_MASK) {
7603+ obj = drm_mode_object_find(dev, obj_id,
7604+ DRM_MODE_OBJECT_CRTC);
7605+ if(! obj) {
7606+ DRM_DEBUG("Invalid CRTC object.\n");
7607+ return -EINVAL;
7608+ }
7609+
7610+ crtc = obj_to_crtc(obj);
7611+
7612+ mutex_lock(&dev->mode_config.mutex);
7613+ if(drm_helper_crtc_in_use(crtc)) {
7614+ if(flags & PSB_DC_CRTC_SAVE)
7615+ crtc->funcs->save(crtc);
7616+ else
7617+ crtc->funcs->restore(crtc);
7618+ }
7619+ mutex_unlock(&dev->mode_config.mutex);
7620+
7621+ return 0;
7622+ } else if (flags & PSB_DC_OUTPUT_MASK) {
7623+ obj = drm_mode_object_find(dev, obj_id,
7624+ DRM_MODE_OBJECT_CONNECTOR);
7625+ if(! obj) {
7626+ DRM_DEBUG("Invalid connector id.\n");
7627+ return -EINVAL;
7628+ }
7629+
7630+ connector = obj_to_connector(obj);
7631+ if(flags & PSB_DC_OUTPUT_SAVE)
7632+ connector->funcs->save(connector);
7633+ else
7634+ connector->funcs->restore(connector);
7635+
7636+ return 0;
7637+ }
7638+
7639+ DRM_DEBUG("Bad flags 0x%x\n", flags);
7640+ return -EINVAL;
7641+}
7642+
7643+static int psb_adb_ioctl(struct drm_device *dev, void *data,
7644+ struct drm_file *file_priv)
7645+{
7646+ struct drm_psb_private *dev_priv = psb_priv(dev);
7647+ uint32_t *arg = data;
7648+ struct backlight_device bd;
7649+ dev_priv->blc_adj1 = *arg;
7650+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
7651+ bd.props.brightness = psb_get_brightness(&bd);
7652+ psb_set_brightness(&bd);
7653+#endif
7654+ return 0;
7655+}
7656+
7657+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
7658+ struct drm_file * file_priv)
7659+{
7660+ uint32_t obj_id;
7661+ uint16_t op;
7662+ struct drm_mode_modeinfo * umode;
7663+ struct drm_display_mode * mode;
7664+ struct drm_psb_mode_operation_arg * arg;
7665+ struct drm_mode_object * obj;
7666+ struct drm_connector * connector;
7667+ struct drm_connector_helper_funcs * connector_funcs;
7668+ int ret = 0;
7669+ int resp = MODE_OK;
7670+
7671+ if (IS_MRST(dev))
7672+ return 0;
7673+
7674+ arg = (struct drm_psb_mode_operation_arg *)data;
7675+ obj_id = arg->obj_id;
7676+ op = arg->operation;
7677+ umode = &arg->mode;
7678+
7679+ mutex_lock(&dev->mode_config.mutex);
7680+
7681+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
7682+ if(!obj) {
7683+ ret = - EINVAL;
7684+ goto mode_op_out;
7685+ }
7686+
7687+ connector = obj_to_connector(obj);
7688+
7689+ mode = drm_mode_create(dev);
7690+ if(!mode) {
7691+ ret = -ENOMEM;
7692+ goto mode_op_out;
7693+ }
7694+
7695+ //drm_crtc_convert_umode(mode, umode);
7696+ {
7697+ mode->clock = umode->clock;
7698+ mode->hdisplay = umode->hdisplay;
7699+ mode->hsync_start = umode->hsync_start;
7700+ mode->hsync_end = umode->hsync_end;
7701+ mode->htotal = umode->htotal;
7702+ mode->hskew = umode->hskew;
7703+ mode->vdisplay = umode->vdisplay;
7704+ mode->vsync_start = umode->vsync_start;
7705+ mode->vsync_end = umode->vsync_end;
7706+ mode->vtotal = umode->vtotal;
7707+ mode->vscan = umode->vscan;
7708+ mode->vrefresh = umode->vrefresh;
7709+ mode->flags = umode->flags;
7710+ mode->type = umode->type;
7711+ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
7712+ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
7713+ }
7714+
7715+ connector_funcs = (struct drm_connector_helper_funcs *)
7716+ connector->helper_private;
7717+
7718+ switch(op) {
7719+ case PSB_MODE_OPERATION_MODE_VALID:
7720+ if(connector_funcs->mode_valid) {
7721+ resp = connector_funcs->mode_valid(connector, mode);
7722+ arg->data = (void *)resp;
7723+ }
7724+ break;
7725+ default:
7726+ DRM_DEBUG("Unsupported psb mode operation");
7727+ ret = -EOPNOTSUPP;
7728+ goto mode_op_err;
7729+ }
7730+
7731+mode_op_err:
7732+ drm_mode_destroy(dev, mode);
7733+mode_op_out:
7734+ mutex_unlock(&dev->mode_config.mutex);
7735+ return ret;
7736+}
7737+
7738+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
7739+ struct drm_file *file_priv)
7740+{
7741+ struct drm_psb_private *dev_priv = psb_priv(dev);
7742+ struct drm_psb_stolen_memory_arg *arg = data;
7743+
7744+ arg->base = dev_priv->pg->stolen_base;
7745+ arg->size = dev_priv->pg->vram_stolen_size;
7746+
7747+ return 0;
7748+}
7749+
7750+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
7751+ struct drm_file *file_priv)
7752+{
7753+ struct drm_psb_private *dev_priv = psb_priv(dev);
7754+ struct drm_psb_register_rw_arg *arg = data;
7755+
7756+ if (arg->display_write_mask != 0) {
7757+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7758+ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
7759+ PSB_WVDC32(arg->display.pfit_controls, PFIT_CONTROL);
7760+ if (arg->display_write_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7761+ PSB_WVDC32(arg->display.pfit_autoscale_ratios, PFIT_AUTO_RATIOS);
7762+ if (arg->display_write_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7763+ PSB_WVDC32(arg->display.pfit_programmed_scale_ratios, PFIT_PGM_RATIOS);
7764+ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
7765+ PSB_WVDC32(arg->display.pipeasrc, PIPEASRC);
7766+ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
7767+ PSB_WVDC32(arg->display.pipebsrc, PIPEBSRC);
7768+ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
7769+ PSB_WVDC32(arg->display.vtotal_a, VTOTAL_A);
7770+ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
7771+ PSB_WVDC32(arg->display.vtotal_b, VTOTAL_B);
7772+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7773+ } else {
7774+ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
7775+ dev_priv->savePFIT_CONTROL = arg->display.pfit_controls;
7776+ if (arg->display_write_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7777+ dev_priv->savePFIT_AUTO_RATIOS = arg->display.pfit_autoscale_ratios;
7778+ if (arg->display_write_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7779+ dev_priv->savePFIT_PGM_RATIOS = arg->display.pfit_programmed_scale_ratios;
7780+ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
7781+ dev_priv->savePIPEASRC = arg->display.pipeasrc;
7782+ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
7783+ dev_priv->savePIPEBSRC = arg->display.pipebsrc;
7784+ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
7785+ dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
7786+ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
7787+ dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
7788+ }
7789+ }
7790+
7791+ if (arg->display_read_mask != 0) {
7792+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7793+ if (arg->display_read_mask & REGRWBITS_PFIT_CONTROLS)
7794+ arg->display.pfit_controls = PSB_RVDC32(PFIT_CONTROL);
7795+ if (arg->display_read_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7796+ arg->display.pfit_autoscale_ratios = PSB_RVDC32(PFIT_AUTO_RATIOS);
7797+ if (arg->display_read_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7798+ arg->display.pfit_programmed_scale_ratios = PSB_RVDC32(PFIT_PGM_RATIOS);
7799+ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
7800+ arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
7801+ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
7802+ arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
7803+ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
7804+ arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
7805+ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
7806+ arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
7807+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7808+ } else {
7809+ if (arg->display_read_mask & REGRWBITS_PFIT_CONTROLS)
7810+ arg->display.pfit_controls = dev_priv->savePFIT_CONTROL;
7811+ if (arg->display_read_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS)
7812+ arg->display.pfit_autoscale_ratios = dev_priv->savePFIT_AUTO_RATIOS;
7813+ if (arg->display_read_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
7814+ arg->display.pfit_programmed_scale_ratios = dev_priv->savePFIT_PGM_RATIOS;
7815+ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
7816+ arg->display.pipeasrc = dev_priv->savePIPEASRC;
7817+ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
7818+ arg->display.pipebsrc = dev_priv->savePIPEBSRC;
7819+ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
7820+ arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
7821+ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
7822+ arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
7823+ }
7824+ }
7825+
7826+ if (arg->overlay_write_mask != 0) {
7827+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7828+ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
7829+ PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
7830+ PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
7831+ PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
7832+ PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
7833+ PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
7834+ PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
7835+ }
7836+ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
7837+ PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
7838+ }
7839+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7840+ } else {
7841+ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
7842+ dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
7843+ dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
7844+ dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
7845+ dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
7846+ dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
7847+ dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
7848+ }
7849+ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
7850+ dev_priv->saveOV_OVADD = arg->overlay.OVADD;
7851+ }
7852+ }
7853+
7854+ if (arg->overlay_read_mask != 0) {
7855+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) {
7856+ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
7857+ arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
7858+ arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
7859+ arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
7860+ arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
7861+ arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
7862+ arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
7863+ }
7864+ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
7865+ arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
7866+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
7867+ } else {
7868+ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
7869+ arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
7870+ arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
7871+ arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
7872+ arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
7873+ arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
7874+ arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
7875+ }
7876+ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
7877+ arg->overlay.OVADD = dev_priv->saveOV_OVADD;
7878+ }
7879+ }
7880+
7881+ return 0;
7882+}
7883+
7884+/* always available as we are SIGIO'd */
7885+static unsigned int psb_poll(struct file *filp,
7886+ struct poll_table_struct *wait)
7887+{
7888+ return POLLIN | POLLRDNORM;
7889+}
7890+
7891+int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
7892+{
7893+ return 0;
7894+}
7895+
7896+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
7897+ unsigned long arg)
7898+{
7899+ struct drm_file *file_priv = filp->private_data;
7900+ struct drm_device *dev = file_priv->minor->dev;
7901+ unsigned int nr = DRM_IOCTL_NR(cmd);
7902+ long ret;
7903+
7904+ /*
7905+ * The driver private ioctls and TTM ioctls should be
7906+ * thread-safe.
7907+ */
7908+
7909+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
7910+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
7911+ struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE];
7912+
7913+ if (unlikely(ioctl->cmd != cmd)) {
7914+ DRM_ERROR("Invalid drm command %d\n",
7915+ nr - DRM_COMMAND_BASE);
7916+ return -EINVAL;
7917+ }
7918+
7919+ return drm_unlocked_ioctl(filp, cmd, arg);
7920+ }
7921+ /*
7922+ * Not all old drm ioctls are thread-safe.
7923+ */
7924+
7925+ lock_kernel();
7926+ ret = drm_unlocked_ioctl(filp, cmd, arg);
7927+ unlock_kernel();
7928+ return ret;
7929+}
7930+
7931+static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
7932+ int *eof, void *data)
7933+{
7934+ struct drm_minor *minor = (struct drm_minor *) data;
7935+ struct drm_device *dev = minor->dev;
7936+ struct drm_psb_private *dev_priv =
7937+ (struct drm_psb_private *) dev->dev_private;
7938+ int len = 0;
7939+#ifdef OSPM_STAT
7940+ unsigned long d0 = 0;
7941+ unsigned long d0i3 = 0;
7942+ unsigned long d3 = 0;
7943+#endif
7944+
7945+ *start = &buf[offset];
7946+ *eof = 0;
7947+ DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled");
7948+
7949+#ifdef OSPM_STAT
7950+ switch (dev_priv->graphics_state) {
7951+ case PSB_PWR_STATE_D0:
7952+ DRM_PROC_PRINT("GFX:%s\n", "D0");
7953+ break;
7954+ case PSB_PWR_STATE_D0i3:
7955+ DRM_PROC_PRINT("GFX:%s\n", "D0i3");
7956+ break;
7957+ case PSB_PWR_STATE_D3:
7958+ DRM_PROC_PRINT("GFX:%s\n", "D3");
7959+ break;
7960+ default:
7961+ DRM_PROC_PRINT("GFX:%s\n", "unknown");
7962+ }
7963+
7964+ d0 = dev_priv->gfx_d0_time * 1000 / HZ;
7965+ d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ;
7966+ d3 = dev_priv->gfx_d3_time * 1000 / HZ;
7967+ switch (dev_priv->graphics_state) {
7968+ case PSB_PWR_STATE_D0:
7969+ d0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
7970+ break;
7971+ case PSB_PWR_STATE_D0i3:
7972+ d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
7973+ break;
7974+ case PSB_PWR_STATE_D3:
7975+ d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
7976+ break;
7977+ }
7978+ DRM_PROC_PRINT("GFX(cnt/ms):\n");
7979+ DRM_PROC_PRINT("D0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n",
7980+ dev_priv->gfx_d0_cnt, d0, dev_priv->gfx_d0i3_cnt, d0i3,
7981+ dev_priv->gfx_d3_cnt, d3);
7982+#endif
7983+ if (len > request + offset)
7984+ return request;
7985+ *eof = 1;
7986+ return len - offset;
7987+}
7988+
7989+/* When a client dies:
7990+ * - Check for and clean up flipped page state
7991+ */
7992+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
7993+{
7994+ unsigned long irqflags;
7995+ int pipe, i;
7996+ if (dev->dev_private) {
7997+ struct drm_psb_private *dev_priv = dev->dev_private;
7998+ if (dev_priv->dri_page_flipping && dev_priv->current_page == 1) {
7999+ for (pipe=0; pipe<2; pipe++) {
8000+ if (dev_priv->pipe_active[pipe] == 1) {
8001+ dev_priv->flip_start[pipe] = dev_priv->saved_start[pipe];
8002+ dev_priv->flip_offset[pipe] = dev_priv->saved_offset[pipe];
8003+ dev_priv->flip_stride[pipe] = dev_priv->saved_stride[pipe];
8004+ psb_flip_set_base(dev_priv, pipe);
8005+ }
8006+ }
8007+ dev_priv->dri_page_flipping = 0;
8008+ dev_priv->current_page = 0;
8009+ }
8010+
8011+ drm_psb_disable_vsync = 1;
8012+ dev_priv->vdc_irq_mask &= ~(_PSB_VSYNC_PIPEA_FLAG | _PSB_VSYNC_PIPEB_FLAG);
8013+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
8014+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
8015+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
8016+
8017+ for (i = 0; i < dev->num_crtcs; i++) {
8018+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
8019+ dev->vblank_enabled[i]) {
8020+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
8021+ dev->last_vblank[i] =
8022+ dev->driver->get_vblank_counter(dev, i);
8023+ dev->vblank_enabled[i] = 0;
8024+ }
8025+ }
8026+ }
8027+}
8028+
8029+static void psb_remove(struct pci_dev *pdev)
8030+{
8031+ struct drm_device *dev = pci_get_drvdata(pdev);
8032+ drm_put_dev(dev);
8033+}
8034+
8035+static int psb_proc_init(struct drm_minor *minor)
8036+{
8037+ struct proc_dir_entry *ent;
8038+ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->proc_root,
8039+ psb_ospm_read, minor);
8040+ if (ent)
8041+ return 0;
8042+ else
8043+ return -1;
8044+}
8045+
8046+static void psb_proc_cleanup(struct drm_minor *minor)
8047+{
8048+ remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root);
8049+ return;
8050+}
8051+
8052+static struct drm_driver driver = {
8053+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | DRIVER_MODESET,
8054+ .load = psb_driver_load,
8055+ .unload = psb_driver_unload,
8056+
8057+ .get_reg_ofs = drm_core_get_reg_ofs,
8058+ .ioctls = psb_ioctls,
8059+ .device_is_agp = psb_driver_device_is_agp,
8060+ .irq_preinstall = psb_irq_preinstall,
8061+ .irq_postinstall = psb_irq_postinstall,
8062+ .irq_uninstall = psb_irq_uninstall,
8063+ .irq_handler = psb_irq_handler,
8064+ .enable_vblank = psb_enable_vblank,
8065+ .disable_vblank = psb_disable_vblank,
8066+ .firstopen = NULL,
8067+ .lastclose = psb_lastclose,
8068+ .open = psb_driver_open,
8069+ .proc_init = psb_proc_init,
8070+ .proc_cleanup = psb_proc_cleanup,
8071+ .preclose = psb_driver_preclose,
8072+ .fops = {
8073+ .owner = THIS_MODULE,
8074+ .open = psb_open,
8075+ .release = psb_release,
8076+ .unlocked_ioctl = psb_unlocked_ioctl,
8077+ .mmap = psb_mmap,
8078+ .poll = psb_poll,
8079+ .fasync = drm_fasync,
8080+ },
8081+ .pci_driver = {
8082+ .name = DRIVER_NAME,
8083+ .id_table = pciidlist,
8084+ .resume = powermgmt_resume,
8085+ .suspend = powermgmt_suspend,
8086+ .probe = psb_probe,
8087+ .remove = psb_remove,
8088+ },
8089+ .name = DRIVER_NAME,
8090+ .desc = DRIVER_DESC,
8091+ .date = PSB_DRM_DRIVER_DATE,
8092+ .major = PSB_DRM_DRIVER_MAJOR,
8093+ .minor = PSB_DRM_DRIVER_MINOR,
8094+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
8095+};
8096+
8097+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8098+{
8099+ return drm_get_dev(pdev, ent, &driver);
8100+}
8101+
8102+static int __init psb_init(void)
8103+{
8104+ driver.num_ioctls = psb_max_ioctl;
8105+ return drm_init(&driver);
8106+}
8107+
8108+static void __exit psb_exit(void)
8109+{
8110+ drm_exit(&driver);
8111+}
8112+
8113+late_initcall(psb_init);
8114+module_exit(psb_exit);
8115+
8116+MODULE_AUTHOR(DRIVER_AUTHOR);
8117+MODULE_DESCRIPTION(DRIVER_DESC);
8118+MODULE_LICENSE("GPL");
8119diff --git a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h
8120new file mode 100644
8121index 0000000..9b2c4e1
8122--- /dev/null
8123+++ b/drivers/gpu/drm/psb/psb_drv.h
8124@@ -0,0 +1,1224 @@
8125+/**************************************************************************
8126+ *Copyright (c) 2007-2008, Intel Corporation.
8127+ *All Rights Reserved.
8128+ *
8129+ *This program is free software; you can redistribute it and/or modify it
8130+ *under the terms and conditions of the GNU General Public License,
8131+ *version 2, as published by the Free Software Foundation.
8132+ *
8133+ *This program is distributed in the hope it will be useful, but WITHOUT
8134+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8135+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8136+ *more details.
8137+ *
8138+ *You should have received a copy of the GNU General Public License along with
8139+ *this program; if not, write to the Free Software Foundation, Inc.,
8140+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8141+ *
8142+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
8143+ *develop this driver.
8144+ *
8145+ **************************************************************************/
8146+/*
8147+ */
8148+#ifndef _PSB_DRV_H_
8149+#define _PSB_DRV_H_
8150+
8151+#include <drm/drmP.h>
8152+#include "psb_drm.h"
8153+#include "psb_reg.h"
8154+#include "psb_schedule.h"
8155+#include "psb_intel_drv.h"
8156+#include "psb_hotplug.h"
8157+#include "psb_dpst.h"
8158+#include "ttm/ttm_object.h"
8159+#include "ttm/ttm_fence_driver.h"
8160+#include "ttm/ttm_bo_driver.h"
8161+#include "ttm/ttm_lock.h"
8162+
8163+extern struct ttm_bo_driver psb_ttm_bo_driver;
8164+
8165+enum {
8166+ CHIP_PSB_8108 = 0,
8167+ CHIP_PSB_8109 = 1,
8168+ CHIP_MRST_4100 = 2
8169+};
8170+
8171+/*
8172+ *Hardware bugfixes
8173+ */
8174+
8175+#define FIX_TG_16
8176+#define FIX_TG_2D_CLOCKGATE
8177+#define OSPM_STAT
8178+
8179+#define DRIVER_NAME "psb"
8180+#define DRIVER_DESC "drm driver for the Intel GMA500"
8181+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
8182+#define OSPM_PROC_ENTRY "ospm"
8183+
8184+#define PSB_DRM_DRIVER_DATE "2009-03-10"
8185+#define PSB_DRM_DRIVER_MAJOR 8
8186+#define PSB_DRM_DRIVER_MINOR 1
8187+#define PSB_DRM_DRIVER_PATCHLEVEL 0
8188+
8189+/*
8190+ *TTM driver private offsets.
8191+ */
8192+
8193+#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
8194+
8195+#define PSB_OBJECT_HASH_ORDER 13
8196+#define PSB_FILE_OBJECT_HASH_ORDER 12
8197+#define PSB_BO_HASH_ORDER 12
8198+
8199+#define PSB_VDC_OFFSET 0x00000000
8200+#define PSB_VDC_SIZE 0x000080000
8201+#define MRST_MMIO_SIZE 0x0000C0000
8202+#define PSB_SGX_SIZE 0x8000
8203+#define PSB_SGX_OFFSET 0x00040000
8204+#define MRST_SGX_OFFSET 0x00080000
8205+#define PSB_MMIO_RESOURCE 0
8206+#define PSB_GATT_RESOURCE 2
8207+#define PSB_GTT_RESOURCE 3
8208+#define PSB_GMCH_CTRL 0x52
8209+#define PSB_BSM 0x5C
8210+#define _PSB_GMCH_ENABLED 0x4
8211+#define PSB_PGETBL_CTL 0x2020
8212+#define _PSB_PGETBL_ENABLED 0x00000001
8213+#define PSB_SGX_2D_SLAVE_PORT 0x4000
8214+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
8215+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
8216+#define PSB_NUM_VALIDATE_BUFFERS 2048
8217+#define PSB_MEM_KERNEL_START 0x10000000
8218+#define PSB_MEM_PDS_START 0x20000000
8219+#define PSB_MEM_MMU_START 0x40000000
8220+
8221+#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0
8222+#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0
8223+
8224+/*
8225+ *Flags for external memory type field.
8226+ */
8227+
8228+#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
8229+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
8230+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
8231+#define PSB_MSVDX_SIZE 0x10000
8232+
8233+#define LNC_TOPAZ_OFFSET 0xA0000
8234+#define LNC_TOPAZ_SIZE 0x10000
8235+
8236+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
8237+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
8238+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
8239+
8240+/*
8241+ *PTE's and PDE's
8242+ */
8243+
8244+#define PSB_PDE_MASK 0x003FFFFF
8245+#define PSB_PDE_SHIFT 22
8246+#define PSB_PTE_SHIFT 12
8247+
8248+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
8249+#define PSB_PTE_WO 0x0002 /* Write only */
8250+#define PSB_PTE_RO 0x0004 /* Read only */
8251+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
8252+
8253+/*
8254+ *VDC registers and bits
8255+ */
8256+#define PSB_MSVDX_CLOCKGATING 0x2064
8257+#define PSB_TOPAZ_CLOCKGATING 0x2068
8258+#define PSB_HWSTAM 0x2098
8259+#define PSB_INSTPM 0x20C0
8260+#define PSB_INT_IDENTITY_R 0x20A4
8261+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
8262+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
8263+#define _PSB_IRQ_SGX_FLAG (1<<18)
8264+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
8265+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
8266+#define PSB_INT_MASK_R 0x20A8
8267+#define PSB_INT_ENABLE_R 0x20A0
8268+
8269+#define _PSB_MMU_ER_MASK 0x0001FF00
8270+#define _PSB_MMU_ER_HOST (1 << 16)
8271+#define GPIOA 0x5010
8272+#define GPIOB 0x5014
8273+#define GPIOC 0x5018
8274+#define GPIOD 0x501c
8275+#define GPIOE 0x5020
8276+#define GPIOF 0x5024
8277+#define GPIOG 0x5028
8278+#define GPIOH 0x502c
8279+#define GPIO_CLOCK_DIR_MASK (1 << 0)
8280+#define GPIO_CLOCK_DIR_IN (0 << 1)
8281+#define GPIO_CLOCK_DIR_OUT (1 << 1)
8282+#define GPIO_CLOCK_VAL_MASK (1 << 2)
8283+#define GPIO_CLOCK_VAL_OUT (1 << 3)
8284+#define GPIO_CLOCK_VAL_IN (1 << 4)
8285+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
8286+#define GPIO_DATA_DIR_MASK (1 << 8)
8287+#define GPIO_DATA_DIR_IN (0 << 9)
8288+#define GPIO_DATA_DIR_OUT (1 << 9)
8289+#define GPIO_DATA_VAL_MASK (1 << 10)
8290+#define GPIO_DATA_VAL_OUT (1 << 11)
8291+#define GPIO_DATA_VAL_IN (1 << 12)
8292+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
8293+
8294+#define VCLK_DIVISOR_VGA0 0x6000
8295+#define VCLK_DIVISOR_VGA1 0x6004
8296+#define VCLK_POST_DIV 0x6010
8297+
8298+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
8299+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
8300+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
8301+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
8302+#define PSB_COMM_USER_IRQ (1024 >> 2)
8303+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
8304+#define PSB_COMM_FW (2048 >> 2)
8305+
8306+#define PSB_UIRQ_VISTEST 1
8307+#define PSB_UIRQ_OOM_REPLY 2
8308+#define PSB_UIRQ_FIRE_TA_REPLY 3
8309+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
8310+
8311+#define PSB_2D_SIZE (256*1024*1024)
8312+#define PSB_MAX_RELOC_PAGES 1024
8313+
8314+#define PSB_LOW_REG_OFFS 0x0204
8315+#define PSB_HIGH_REG_OFFS 0x0600
8316+
8317+#define PSB_NUM_VBLANKS 2
8318+
8319+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
8320+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
8321+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
8322+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
8323+#define PSB_COMM_FW (2048 >> 2)
8324+
8325+#define PSB_2D_SIZE (256*1024*1024)
8326+#define PSB_MAX_RELOC_PAGES 1024
8327+
8328+#define PSB_LOW_REG_OFFS 0x0204
8329+#define PSB_HIGH_REG_OFFS 0x0600
8330+
8331+#define PSB_NUM_VBLANKS 2
8332+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
8333+#define PSB_LID_DELAY (DRM_HZ / 10)
8334+
8335+#define PSB_PWR_STATE_D0 1
8336+#define PSB_PWR_STATE_D0i3 2
8337+#define PSB_PWR_STATE_D3 3
8338+
8339+#define PSB_PMPOLICY_NOPM 0
8340+#define PSB_PMPOLICY_CLOCKGATING 1
8341+#define PSB_PMPOLICY_POWERDOWN 2
8342+
8343+#define PSB_PMSTATE_POWERUP 0
8344+#define PSB_PMSTATE_CLOCKGATED 1
8345+#define PSB_PMSTATE_POWERDOWN 2
8346+
8347+/* Graphics MSI address and data region in PCIx */
8348+#define PSB_PCIx_MSI_ADDR_LOC 0x94
8349+#define PSB_PCIx_MSI_DATA_LOC 0x98
8350+
8351+
8352+/*
8353+ *User options.
8354+ */
8355+
8356+struct drm_psb_uopt {
8357+ int clock_gating;
8358+};
8359+
8360+/**
8361+ *struct psb_context
8362+ *
8363+ *@buffers: array of pre-allocated validate buffers.
8364+ *@used_buffers: number of buffers in @buffers array currently in use.
8365+ *@validate_buffer: buffers validated from user-space.
8366+ *@kern_validate_buffers : buffers validated from kernel-space.
8367+ *@fence_flags : Fence flags to be used for fence creation.
8368+ *
8369+ *This structure is used during execbuf validation.
8370+ */
8371+
8372+struct psb_context {
8373+ struct psb_validate_buffer *buffers;
8374+ uint32_t used_buffers;
8375+ struct list_head validate_list;
8376+ struct list_head kern_validate_list;
8377+ uint32_t fence_types;
8378+ uint32_t val_seq;
8379+};
8380+
8381+struct psb_gtt {
8382+ struct drm_device *dev;
8383+ int initialized;
8384+ uint32_t gatt_start;
8385+ uint32_t gtt_start;
8386+ uint32_t gtt_phys_start;
8387+ unsigned gtt_pages;
8388+ unsigned gatt_pages;
8389+ uint32_t stolen_base;
8390+ uint32_t pge_ctl;
8391+ u16 gmch_ctrl;
8392+ unsigned long stolen_size;
8393+ unsigned long vram_stolen_size;
8394+ unsigned long ci_stolen_size;
8395+ unsigned long rar_stolen_size;
8396+ uint32_t *gtt_map;
8397+ struct rw_semaphore sem;
8398+};
8399+
8400+struct psb_use_base {
8401+ struct list_head head;
8402+ struct ttm_fence_object *fence;
8403+ unsigned int reg;
8404+ unsigned long offset;
8405+ unsigned int dm;
8406+};
8407+
8408+struct psb_validate_buffer;
8409+
8410+struct psb_msvdx_cmd_queue {
8411+ struct list_head head;
8412+ void *cmd;
8413+ unsigned long cmd_size;
8414+ uint32_t sequence;
8415+};
8416+
8417+struct drm_psb_private {
8418+
8419+ /*
8420+ *TTM Glue.
8421+ */
8422+
8423+ struct drm_global_reference mem_global_ref;
8424+ int has_global;
8425+
8426+ struct drm_device *dev;
8427+ struct ttm_object_device *tdev;
8428+ struct ttm_fence_device fdev;
8429+ struct ttm_bo_device bdev;
8430+ struct ttm_lock ttm_lock;
8431+ struct vm_operations_struct *ttm_vm_ops;
8432+ int has_fence_device;
8433+ int has_bo_device;
8434+
8435+ unsigned long chipset;
8436+
8437+ struct psb_xhw_buf resume_buf;
8438+ struct drm_psb_dev_info_arg dev_info;
8439+ struct drm_psb_uopt uopt;
8440+
8441+ struct psb_gtt *pg;
8442+
8443+ struct page *scratch_page;
8444+ struct page *comm_page;
8445+ /* Deleted volatile because it is not recommended to use. */
8446+ uint32_t *comm;
8447+ uint32_t comm_mmu_offset;
8448+ uint32_t mmu_2d_offset;
8449+ uint32_t sequence[PSB_NUM_ENGINES];
8450+ uint32_t last_sequence[PSB_NUM_ENGINES];
8451+ int idle[PSB_NUM_ENGINES];
8452+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
8453+ int engine_lockup_2d;
8454+
8455+ struct psb_mmu_driver *mmu;
8456+ struct psb_mmu_pd *pf_pd;
8457+
8458+ uint8_t *sgx_reg;
8459+ uint8_t *vdc_reg;
8460+ uint32_t gatt_free_offset;
8461+
8462+ /*
8463+ *MSVDX
8464+ */
8465+ uint8_t *msvdx_reg;
8466+ atomic_t msvdx_mmu_invaldc;
8467+ void *msvdx_private;
8468+
8469+ /*
8470+ *TOPAZ
8471+ */
8472+ uint8_t *topaz_reg;
8473+ void *topaz_private;
8474+
8475+ /*
8476+ *Fencing / irq.
8477+ */
8478+
8479+ uint32_t sgx_irq_mask;
8480+ uint32_t sgx2_irq_mask;
8481+ uint32_t vdc_irq_mask;
8482+ u32 pipestat[2];
8483+
8484+ spinlock_t irqmask_lock;
8485+ spinlock_t sequence_lock;
8486+ int fence0_irq_on;
8487+ int irq_enabled;
8488+ unsigned int irqen_count_2d;
8489+ wait_queue_head_t event_2d_queue;
8490+
8491+#ifdef FIX_TG_16
8492+ wait_queue_head_t queue_2d;
8493+ atomic_t lock_2d;
8494+ atomic_t ta_wait_2d;
8495+ atomic_t ta_wait_2d_irq;
8496+ atomic_t waiters_2d;
8497+#else
8498+ struct mutex mutex_2d;
8499+#endif
8500+ int fence2_irq_on;
8501+
8502+ /*
8503+ *Modesetting
8504+ */
8505+ struct psb_intel_mode_device mode_dev;
8506+
8507+
8508+ /*
8509+ * CI share buffer
8510+ */
8511+ unsigned int ci_region_start;
8512+ unsigned int ci_region_size;
8513+
8514+ /*
8515+ * RAR share buffer;
8516+ */
8517+ unsigned int rar_region_start;
8518+ unsigned int rar_region_size;
8519+
8520+ /*
8521+ *Memory managers
8522+ */
8523+
8524+ int have_vram;
8525+ int have_camera;
8526+ int have_rar;
8527+ int have_tt;
8528+ int have_mem_mmu;
8529+ int have_mem_aper;
8530+ int have_mem_kernel;
8531+ int have_mem_pds;
8532+ int have_mem_rastgeom;
8533+ struct mutex temp_mem;
8534+
8535+ /*
8536+ *Relocation buffer mapping.
8537+ */
8538+
8539+ spinlock_t reloc_lock;
8540+ unsigned int rel_mapped_pages;
8541+ wait_queue_head_t rel_mapped_queue;
8542+
8543+ /*
8544+ *SAREA
8545+ */
8546+ struct drm_psb_sarea *sarea_priv;
8547+
8548+ /*
8549+ *OSPM info
8550+ */
8551+ uint32_t ospm_base;
8552+
8553+ /*
8554+ * Sizes info
8555+ */
8556+
8557+ struct drm_psb_sizes_arg sizes;
8558+
8559+ uint32_t fuse_reg_value;
8560+
8561+ /* vbt (gct) header information*/
8562+ struct mrst_vbt vbt_data;
8563+ /* info that is stored from the gct */
8564+ struct gct_ioctl_arg gct_data;
8565+
8566+ /*
8567+ *LVDS info
8568+ */
8569+ int backlight_duty_cycle; /* restore backlight to this value */
8570+ bool panel_wants_dither;
8571+ struct drm_display_mode *panel_fixed_mode;
8572+ struct drm_display_mode *lfp_lvds_vbt_mode;
8573+ struct drm_display_mode *sdvo_lvds_vbt_mode;
8574+
8575+ struct bdb_lvds_backlight * lvds_bl; /*LVDS backlight info from VBT*/
8576+ struct psb_intel_i2c_chan * lvds_i2c_bus;
8577+
8578+ /* Feature bits from the VBIOS*/
8579+ unsigned int int_tv_support:1;
8580+ unsigned int lvds_dither:1;
8581+ unsigned int lvds_vbt:1;
8582+ unsigned int int_crt_support:1;
8583+ unsigned int lvds_use_ssc:1;
8584+ int lvds_ssc_freq;
8585+
8586+/* MRST private date start */
8587+/*FIXME JLIU7 need to revisit */
8588+ bool sku_83;
8589+ bool sku_100;
8590+ bool sku_100L;
8591+ bool sku_bypass;
8592+ uint32_t iLVDS_enable;
8593+
8594+ /* pipe config register value */
8595+ uint32_t pipeconf;
8596+
8597+ /* plane control register value */
8598+ uint32_t dspcntr;
8599+
8600+/* MRST_DSI private date start */
8601+ /*
8602+ *MRST DSI info
8603+ */
8604+ /* The DSI device ready */
8605+ bool dsi_device_ready;
8606+
8607+ /* The DPI panel power on */
8608+ bool dpi_panel_on;
8609+
8610+ /* The DBI panel power on */
8611+ bool dbi_panel_on;
8612+
8613+ /* The DPI display */
8614+ bool dpi;
8615+
8616+ /* status */
8617+ uint32_t videoModeFormat:2;
8618+ uint32_t laneCount:3;
8619+ uint32_t status_reserved:27;
8620+
8621+ /* dual display - DPI & DBI */
8622+ bool dual_display;
8623+
8624+ /* HS or LP transmission */
8625+ bool lp_transmission;
8626+
8627+ /* configuration phase */
8628+ bool config_phase;
8629+
8630+ /* DSI clock */
8631+ uint32_t RRate;
8632+ uint32_t DDR_Clock;
8633+ uint32_t DDR_Clock_Calculated;
8634+ uint32_t ClockBits;
8635+
8636+ /* DBI Buffer pointer */
8637+ u8 *p_DBI_commandBuffer_orig;
8638+ u8 *p_DBI_commandBuffer;
8639+ uint32_t DBI_CB_pointer;
8640+ u8 *p_DBI_dataBuffer_orig;
8641+ u8 *p_DBI_dataBuffer;
8642+ uint32_t DBI_DB_pointer;
8643+
8644+ /* DPI panel spec */
8645+ uint32_t pixelClock;
8646+ uint32_t HsyncWidth;
8647+ uint32_t HbackPorch;
8648+ uint32_t HfrontPorch;
8649+ uint32_t HactiveArea;
8650+ uint32_t VsyncWidth;
8651+ uint32_t VbackPorch;
8652+ uint32_t VfrontPorch;
8653+ uint32_t VactiveArea;
8654+ uint32_t bpp:5;
8655+ uint32_t Reserved:27;
8656+
8657+ /* DBI panel spec */
8658+ uint32_t dbi_pixelClock;
8659+ uint32_t dbi_HsyncWidth;
8660+ uint32_t dbi_HbackPorch;
8661+ uint32_t dbi_HfrontPorch;
8662+ uint32_t dbi_HactiveArea;
8663+ uint32_t dbi_VsyncWidth;
8664+ uint32_t dbi_VbackPorch;
8665+ uint32_t dbi_VfrontPorch;
8666+ uint32_t dbi_VactiveArea;
8667+ uint32_t dbi_bpp:5;
8668+ uint32_t dbi_Reserved:27;
8669+
8670+/* MRST_DSI private date end */
8671+
8672+ /*
8673+ *Register state
8674+ */
8675+ uint32_t saveDSPACNTR;
8676+ uint32_t saveDSPBCNTR;
8677+ uint32_t savePIPEACONF;
8678+ uint32_t savePIPEBCONF;
8679+ uint32_t savePIPEASRC;
8680+ uint32_t savePIPEBSRC;
8681+ uint32_t saveFPA0;
8682+ uint32_t saveFPA1;
8683+ uint32_t saveDPLL_A;
8684+ uint32_t saveDPLL_A_MD;
8685+ uint32_t saveHTOTAL_A;
8686+ uint32_t saveHBLANK_A;
8687+ uint32_t saveHSYNC_A;
8688+ uint32_t saveVTOTAL_A;
8689+ uint32_t saveVBLANK_A;
8690+ uint32_t saveVSYNC_A;
8691+ uint32_t saveDSPASTRIDE;
8692+ uint32_t saveDSPASIZE;
8693+ uint32_t saveDSPAPOS;
8694+ uint32_t saveDSPABASE;
8695+ uint32_t saveDSPASURF;
8696+ uint32_t saveFPB0;
8697+ uint32_t saveFPB1;
8698+ uint32_t saveDPLL_B;
8699+ uint32_t saveDPLL_B_MD;
8700+ uint32_t saveHTOTAL_B;
8701+ uint32_t saveHBLANK_B;
8702+ uint32_t saveHSYNC_B;
8703+ uint32_t saveVTOTAL_B;
8704+ uint32_t saveVBLANK_B;
8705+ uint32_t saveVSYNC_B;
8706+ uint32_t saveDSPBSTRIDE;
8707+ uint32_t saveDSPBSIZE;
8708+ uint32_t saveDSPBPOS;
8709+ uint32_t saveDSPBBASE;
8710+ uint32_t saveDSPBSURF;
8711+ uint32_t saveVCLK_DIVISOR_VGA0;
8712+ uint32_t saveVCLK_DIVISOR_VGA1;
8713+ uint32_t saveVCLK_POST_DIV;
8714+ uint32_t saveVGACNTRL;
8715+ uint32_t saveADPA;
8716+ uint32_t saveLVDS;
8717+ uint32_t saveDVOA;
8718+ uint32_t saveDVOB;
8719+ uint32_t saveDVOC;
8720+ uint32_t savePP_ON;
8721+ uint32_t savePP_OFF;
8722+ uint32_t savePP_CONTROL;
8723+ uint32_t savePP_CYCLE;
8724+ uint32_t savePFIT_CONTROL;
8725+ uint32_t savePaletteA[256];
8726+ uint32_t savePaletteB[256];
8727+ uint32_t saveBLC_PWM_CTL2;
8728+ uint32_t saveBLC_PWM_CTL;
8729+ uint32_t saveCLOCKGATING;
8730+ uint32_t saveDSPARB;
8731+ uint32_t saveDSPATILEOFF;
8732+ uint32_t saveDSPBTILEOFF;
8733+ uint32_t saveDSPAADDR;
8734+ uint32_t saveDSPBADDR;
8735+ uint32_t savePFIT_AUTO_RATIOS;
8736+ uint32_t savePFIT_PGM_RATIOS;
8737+ uint32_t savePP_ON_DELAYS;
8738+ uint32_t savePP_OFF_DELAYS;
8739+ uint32_t savePP_DIVISOR;
8740+ uint32_t saveBSM;
8741+ uint32_t saveVBT;
8742+ uint32_t saveBCLRPAT_A;
8743+ uint32_t saveBCLRPAT_B;
8744+ uint32_t saveDSPALINOFF;
8745+ uint32_t saveDSPBLINOFF;
8746+ uint32_t savePERF_MODE;
8747+ uint32_t saveDSPFW1;
8748+ uint32_t saveDSPFW2;
8749+ uint32_t saveDSPFW3;
8750+ uint32_t saveDSPFW4;
8751+ uint32_t saveDSPFW5;
8752+ uint32_t saveDSPFW6;
8753+ uint32_t saveCHICKENBIT;
8754+ uint32_t saveDSPACURSOR_CTRL;
8755+ uint32_t saveDSPBCURSOR_CTRL;
8756+ uint32_t saveDSPACURSOR_BASE;
8757+ uint32_t saveDSPBCURSOR_BASE;
8758+ uint32_t saveDSPACURSOR_POS;
8759+ uint32_t saveDSPBCURSOR_POS;
8760+ uint32_t save_palette_a[256];
8761+ uint32_t save_palette_b[256];
8762+ uint32_t saveOV_OVADD;
8763+ uint32_t saveOV_OGAMC0;
8764+ uint32_t saveOV_OGAMC1;
8765+ uint32_t saveOV_OGAMC2;
8766+ uint32_t saveOV_OGAMC3;
8767+ uint32_t saveOV_OGAMC4;
8768+ uint32_t saveOV_OGAMC5;
8769+
8770+ /* MSI reg save */
8771+ uint32_t msi_addr;
8772+ uint32_t msi_data;
8773+
8774+ /*
8775+ *Xhw
8776+ */
8777+
8778+ uint32_t *xhw;
8779+ struct ttm_buffer_object *xhw_bo;
8780+ struct ttm_bo_kmap_obj xhw_kmap;
8781+ struct list_head xhw_in;
8782+ spinlock_t xhw_lock;
8783+ atomic_t xhw_client;
8784+ struct drm_file *xhw_file;
8785+ wait_queue_head_t xhw_queue;
8786+ wait_queue_head_t xhw_caller_queue;
8787+ struct mutex xhw_mutex;
8788+ struct psb_xhw_buf *xhw_cur_buf;
8789+ int xhw_submit_ok;
8790+ int xhw_on;
8791+
8792+ /*
8793+ *Scheduling.
8794+ */
8795+
8796+ struct mutex reset_mutex;
8797+ struct psb_scheduler scheduler;
8798+ struct mutex cmdbuf_mutex;
8799+ uint32_t ta_mem_pages;
8800+ struct psb_ta_mem *ta_mem;
8801+ int force_ta_mem_load;
8802+ atomic_t val_seq;
8803+
8804+ /*
8805+ *TODO: change this to be per drm-context.
8806+ */
8807+
8808+ struct psb_context context;
8809+
8810+ /*
8811+ * LID-Switch
8812+ */
8813+ spinlock_t lid_lock;
8814+ struct timer_list lid_timer;
8815+ struct psb_intel_opregion opregion;
8816+ u32 * lid_state;
8817+ u32 lid_last_state;
8818+
8819+ /*
8820+ *Watchdog
8821+ */
8822+
8823+ spinlock_t watchdog_lock;
8824+ struct timer_list watchdog_timer;
8825+ struct work_struct watchdog_wq;
8826+ struct work_struct msvdx_watchdog_wq;
8827+ struct work_struct topaz_watchdog_wq;
8828+ int timer_available;
8829+
8830+ uint32_t apm_reg;
8831+ uint16_t apm_base;
8832+#ifdef OSPM_STAT
8833+ unsigned char graphics_state;
8834+ unsigned long gfx_d0i3_time;
8835+ unsigned long gfx_d0_time;
8836+ unsigned long gfx_d3_time;
8837+ unsigned long gfx_last_mode_change;
8838+ unsigned long gfx_d0_cnt;
8839+ unsigned long gfx_d0i3_cnt;
8840+ unsigned long gfx_d3_cnt;
8841+#endif
8842+
8843+ int dri_page_flipping;
8844+ int current_page;
8845+ int pipe_active[2];
8846+ int saved_start[2];
8847+ int saved_offset[2];
8848+ int saved_stride[2];
8849+
8850+ int flip_start[2];
8851+ int flip_offset[2];
8852+ int flip_stride[2];
8853+
8854+
8855+ /*
8856+ *Used for modifying backlight from xrandr -- consider removing and using HAL instead
8857+ */
8858+ struct drm_property *backlight_property;
8859+ uint32_t blc_adj1;
8860+
8861+ /*
8862+ * DPST and Hotplug state
8863+ */
8864+
8865+ struct dpst_state *psb_dpst_state;
8866+ struct hotplug_state *psb_hotplug_state;
8867+
8868+};
8869+
8870+struct psb_fpriv {
8871+ struct ttm_object_file *tfile;
8872+};
8873+
8874+struct psb_mmu_driver;
8875+
8876+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
8877+extern int drm_pick_crtcs(struct drm_device *dev);
8878+
8879+
8880+static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
8881+{
8882+ return (struct psb_fpriv *) file_priv->driver_priv;
8883+}
8884+
8885+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
8886+{
8887+ return (struct drm_psb_private *) dev->dev_private;
8888+}
8889+
8890+/*
8891+ *TTM glue. psb_ttm_glue.c
8892+ */
8893+
8894+extern int psb_open(struct inode *inode, struct file *filp);
8895+extern int psb_release(struct inode *inode, struct file *filp);
8896+extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
8897+
8898+extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
8899+ struct drm_file *file_priv);
8900+extern int psb_verify_access(struct ttm_buffer_object *bo,
8901+ struct file *filp);
8902+extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
8903+ size_t count, loff_t *f_pos);
8904+extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
8905+ size_t count, loff_t *f_pos);
8906+extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
8907+ struct drm_file *file_priv);
8908+extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
8909+ struct drm_file *file_priv);
8910+extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
8911+ struct drm_file *file_priv);
8912+extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
8913+ struct drm_file *file_priv);
8914+extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
8915+ struct drm_file *file_priv);
8916+extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
8917+ struct drm_file *file_priv);
8918+extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
8919+ struct drm_file *file_priv);
8920+extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
8921+ struct drm_file *file_priv);
8922+extern int psb_extension_ioctl(struct drm_device *dev, void *data,
8923+ struct drm_file *file_priv);
8924+extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
8925+extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
8926+/*
8927+ *MMU stuff.
8928+ */
8929+
8930+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
8931+ int trap_pagefaults,
8932+ int invalid_type,
8933+ struct drm_psb_private *dev_priv);
8934+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
8935+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
8936+ *driver);
8937+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
8938+ uint32_t gtt_start, uint32_t gtt_pages);
8939+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
8940+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
8941+ int trap_pagefaults,
8942+ int invalid_type);
8943+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
8944+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
8945+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
8946+ unsigned long address,
8947+ uint32_t num_pages);
8948+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
8949+ uint32_t start_pfn,
8950+ unsigned long address,
8951+ uint32_t num_pages, int type);
8952+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
8953+ unsigned long *pfn);
8954+
8955+/*
8956+ *Enable / disable MMU for different requestors.
8957+ */
8958+
8959+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
8960+ uint32_t mask);
8961+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
8962+ uint32_t mask);
8963+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
8964+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
8965+ unsigned long address, uint32_t num_pages,
8966+ uint32_t desired_tile_stride,
8967+ uint32_t hw_tile_stride, int type);
8968+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
8969+ unsigned long address, uint32_t num_pages,
8970+ uint32_t desired_tile_stride,
8971+ uint32_t hw_tile_stride);
8972+/*
8973+ *psb_sgx.c
8974+ */
8975+
8976+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
8977+ uint32_t sequence);
8978+extern void psb_init_2d(struct drm_psb_private *dev_priv);
8979+extern int psb_idle_2d(struct drm_device *dev);
8980+extern int psb_idle_3d(struct drm_device *dev);
8981+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
8982+ uint32_t src_offset,
8983+ uint32_t dst_offset, uint32_t pages,
8984+ int direction);
8985+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
8986+ struct drm_file *file_priv);
8987+extern int psb_reg_submit(struct drm_psb_private *dev_priv,
8988+ uint32_t *regs, unsigned int cmds);
8989+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
8990+ struct ttm_buffer_object *cmd_buffer,
8991+ unsigned long cmd_offset,
8992+ unsigned long cmd_size, int engine,
8993+ uint32_t *copy_buffer);
8994+
8995+extern void psb_init_disallowed(void);
8996+extern void psb_fence_or_sync(struct drm_file *file_priv,
8997+ uint32_t engine,
8998+ uint32_t fence_types,
8999+ uint32_t fence_flags,
9000+ struct list_head *list,
9001+ struct psb_ttm_fence_rep *fence_arg,
9002+ struct ttm_fence_object **fence_p);
9003+extern int psb_validate_kernel_buffer(struct psb_context *context,
9004+ struct ttm_buffer_object *bo,
9005+ uint32_t fence_class,
9006+ uint64_t set_flags,
9007+ uint64_t clr_flags);
9008+extern void psb_init_ospm(struct drm_psb_private *dev_priv);
9009+extern int psb_try_power_down_sgx(struct drm_device *dev);
9010+extern int psb_page_flip(struct drm_device *dev, void *data,
9011+ struct drm_file *file_priv);
9012+extern int psb_flip_set_base(struct drm_psb_private *dev_priv, int pipe);
9013+
9014+/*
9015+ *psb_irq.c
9016+ */
9017+
9018+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
9019+extern void psb_irq_preinstall(struct drm_device *dev);
9020+extern int psb_irq_postinstall(struct drm_device *dev);
9021+extern void psb_irq_uninstall(struct drm_device *dev);
9022+extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
9023+extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
9024+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
9025+extern int psb_vblank_wait2(struct drm_device *dev,
9026+ unsigned int *sequence);
9027+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
9028+
9029+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
9030+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
9031+void
9032+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
9033+
9034+void
9035+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
9036+
9037+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
9038+/*
9039+ *psb_fence.c
9040+ */
9041+
9042+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
9043+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
9044+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
9045+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
9046+ uint32_t class);
9047+extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
9048+ uint32_t fence_class,
9049+ uint32_t flags, uint32_t *sequence,
9050+ unsigned long *timeout_jiffies);
9051+extern void psb_fence_error(struct drm_device *dev,
9052+ uint32_t class,
9053+ uint32_t sequence, uint32_t type, int error);
9054+extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
9055+
9056+/* MSVDX/Topaz stuff */
9057+extern int lnc_video_frameskip(struct drm_device *dev,
9058+ uint64_t user_pointer);
9059+extern int lnc_video_getparam(struct drm_device *dev, void *data,
9060+ struct drm_file *file_priv);
9061+extern int psb_try_power_down_topaz(struct drm_device *dev);
9062+extern int psb_try_power_down_msvdx(struct drm_device *dev);
9063+
9064+/*
9065+ *psb_gtt.c
9066+ */
9067+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
9068+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
9069+ unsigned offset_pages, unsigned num_pages,
9070+ unsigned desired_tile_stride,
9071+ unsigned hw_tile_stride, int type);
9072+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
9073+ unsigned num_pages,
9074+ unsigned desired_tile_stride,
9075+ unsigned hw_tile_stride);
9076+
9077+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
9078+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
9079+
9080+/*
9081+ *psb_fb.c
9082+ */
9083+extern int psbfb_probed(struct drm_device *dev);
9084+extern int psbfb_remove(struct drm_device *dev,
9085+ struct drm_framebuffer *fb);
9086+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
9087+ struct drm_file *file_priv);
9088+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
9089+ struct drm_file *file_priv);
9090+
9091+/*
9092+ *psb_reset.c
9093+ */
9094+
9095+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
9096+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
9097+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
9098+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
9099+extern void psb_lid_timer_init(struct drm_psb_private * dev_priv);
9100+extern void psb_lid_timer_takedown(struct drm_psb_private * dev_priv);
9101+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
9102+
9103+/*
9104+ *psb_xhw.c
9105+ */
9106+
9107+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
9108+ struct drm_file *file_priv);
9109+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
9110+ struct drm_file *file_priv);
9111+extern int psb_xhw_init(struct drm_device *dev);
9112+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
9113+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
9114+ struct drm_file *file_priv, int closing);
9115+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
9116+ struct psb_xhw_buf *buf,
9117+ uint32_t fire_flags,
9118+ uint32_t hw_context,
9119+ uint32_t *cookie,
9120+ uint32_t *oom_cmds,
9121+ uint32_t num_oom_cmds,
9122+ uint32_t offset,
9123+ uint32_t engine, uint32_t flags);
9124+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
9125+ struct psb_xhw_buf *buf,
9126+ uint32_t fire_flags);
9127+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
9128+ struct psb_xhw_buf *buf, uint32_t w,
9129+ uint32_t h, uint32_t *hw_cookie,
9130+ uint32_t *bo_size, uint32_t *clear_p_start,
9131+ uint32_t *clear_num_pages);
9132+
9133+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
9134+ struct psb_xhw_buf *buf);
9135+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
9136+ struct psb_xhw_buf *buf, uint32_t *value);
9137+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
9138+ struct psb_xhw_buf *buf,
9139+ uint32_t pages,
9140+ uint32_t * hw_cookie,
9141+ uint32_t * size,
9142+ uint32_t * ta_min_size);
9143+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
9144+ struct psb_xhw_buf *buf, uint32_t *cookie);
9145+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
9146+ struct psb_xhw_buf *buf,
9147+ uint32_t *cookie,
9148+ uint32_t *bca,
9149+ uint32_t *rca, uint32_t *flags);
9150+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
9151+ struct psb_xhw_buf *buf);
9152+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
9153+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
9154+ struct psb_xhw_buf *buf);
9155+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
9156+ struct psb_xhw_buf *buf, uint32_t *cookie);
9157+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
9158+ struct psb_xhw_buf *buf,
9159+ uint32_t flags,
9160+ uint32_t param_offset,
9161+ uint32_t pt_offset, uint32_t *hw_cookie);
9162+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
9163+ struct psb_xhw_buf *buf);
9164+
9165+/*
9166+ *psb_schedule.c: HW bug fixing.
9167+ */
9168+
9169+#ifdef FIX_TG_16
9170+
9171+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
9172+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
9173+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
9174+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
9175+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
9176+extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
9177+#else
9178+
9179+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
9180+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
9181+
9182+#endif
9183+
9184+/* modesetting */
9185+extern void psb_modeset_init(struct drm_device *dev);
9186+extern void psb_modeset_cleanup(struct drm_device *dev);
9187+
9188+/* psb_bl.c */
9189+int psb_backlight_init(struct drm_device *dev);
9190+void psb_backlight_exit(void);
9191+int psb_set_brightness(struct backlight_device *bd);
9192+int psb_get_brightness(struct backlight_device *bd);
9193+
9194+/*
9195+ *Utilities
9196+ */
9197+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
9198+
9199+static inline u32 MSG_READ32(uint port, uint offset)
9200+{
9201+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
9202+ outl(0x800000D0, 0xCF8);
9203+ outl(mcr, 0xCFC);
9204+ outl(0x800000D4, 0xCF8);
9205+ return inl(0xcfc);
9206+}
9207+static inline void MSG_WRITE32(uint port, uint offset, u32 value)
9208+{
9209+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
9210+ outl(0x800000D4, 0xCF8);
9211+ outl(value, 0xcfc);
9212+ outl(0x800000D0, 0xCF8);
9213+ outl(mcr, 0xCFC);
9214+}
9215+
9216+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
9217+{
9218+ struct drm_psb_private *dev_priv = dev->dev_private;
9219+
9220+ return ioread32(dev_priv->vdc_reg + (reg));
9221+}
9222+
9223+#define REG_READ(reg) REGISTER_READ(dev, (reg))
9224+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
9225+ uint32_t val)
9226+{
9227+ struct drm_psb_private *dev_priv = dev->dev_private;
9228+
9229+ iowrite32((val), dev_priv->vdc_reg + (reg));
9230+}
9231+
9232+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
9233+
9234+static inline void REGISTER_WRITE16(struct drm_device *dev,
9235+ uint32_t reg, uint32_t val)
9236+{
9237+ struct drm_psb_private *dev_priv = dev->dev_private;
9238+
9239+ iowrite16((val), dev_priv->vdc_reg + (reg));
9240+}
9241+
9242+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
9243+
9244+static inline void REGISTER_WRITE8(struct drm_device *dev,
9245+ uint32_t reg, uint32_t val)
9246+{
9247+ struct drm_psb_private *dev_priv = dev->dev_private;
9248+
9249+ iowrite8((val), dev_priv->vdc_reg + (reg));
9250+}
9251+
9252+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
9253+
9254+#define PSB_ALIGN_TO(_val, _align) \
9255+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
9256+#define PSB_WVDC32(_val, _offs) \
9257+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
9258+#define PSB_RVDC32(_offs) \
9259+ ioread32(dev_priv->vdc_reg + (_offs))
9260+
9261+//#define TRAP_SGX_PM_FAULT 1
9262+#ifdef TRAP_SGX_PM_FAULT
9263+#define PSB_WSGX32(_val, _offs) \
9264+{ \
9265+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
9266+ printk(KERN_ERR "access sgx when it's off!!(WRITE) %s, %d\n", \
9267+ __FILE__, __LINE__); \
9268+ mdelay(1000); \
9269+ } \
9270+ iowrite32(_val, dev_priv->sgx_reg + (_offs)); \
9271+}
9272+#define PSB_RSGX32(_offs) \
9273+({ \
9274+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
9275+ printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
9276+ __FILE__, __LINE__); \
9277+ mdelay(1000); \
9278+ } \
9279+ ioread32(dev_priv->sgx_reg + (_offs)); \
9280+})
9281+#else
9282+#define PSB_WSGX32(_val, _offs) \
9283+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
9284+#define PSB_RSGX32(_offs) \
9285+ ioread32(dev_priv->sgx_reg + (_offs))
9286+#endif
9287+
9288+#define PSB_WMSVDX32(_val, _offs) \
9289+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
9290+#define PSB_RMSVDX32(_offs) \
9291+ ioread32(dev_priv->msvdx_reg + (_offs))
9292+
9293+#define PSB_ALPL(_val, _base) \
9294+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
9295+#define PSB_ALPLM(_val, _base) \
9296+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
9297+
9298+#define PSB_D_RENDER (1 << 16)
9299+
9300+#define PSB_D_GENERAL (1 << 0)
9301+#define PSB_D_INIT (1 << 1)
9302+#define PSB_D_IRQ (1 << 2)
9303+#define PSB_D_FW (1 << 3)
9304+#define PSB_D_PERF (1 << 4)
9305+#define PSB_D_TMP (1 << 5)
9306+#define PSB_D_PM (1 << 6)
9307+
9308+extern int drm_psb_debug;
9309+extern int drm_psb_no_fb;
9310+extern int drm_psb_disable_vsync;
9311+extern int drm_idle_check_interval;
9312+extern int drm_psb_ospm;
9313+
9314+#define PSB_DEBUG_FW(_fmt, _arg...) \
9315+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
9316+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
9317+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
9318+#define PSB_DEBUG_INIT(_fmt, _arg...) \
9319+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
9320+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
9321+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
9322+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
9323+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
9324+#define PSB_DEBUG_PERF(_fmt, _arg...) \
9325+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
9326+#define PSB_DEBUG_TMP(_fmt, _arg...) \
9327+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
9328+#define PSB_DEBUG_PM(_fmt, _arg...) \
9329+ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
9330+
9331+#if DRM_DEBUG_CODE
9332+#define PSB_DEBUG(_flag, _fmt, _arg...) \
9333+ do { \
9334+ if (unlikely((_flag) & drm_psb_debug)) \
9335+ printk(KERN_DEBUG \
9336+ "[psb:0x%02x:%s] " _fmt , _flag, \
9337+ __func__ , ##_arg); \
9338+ } while (0)
9339+#else
9340+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
9341+#endif
9342+
9343+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
9344+ ((dev)->pci_device == 0x8109))
9345+
9346+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
9347+
9348+#endif
9349diff --git a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
9350new file mode 100644
9351index 0000000..a29694e
9352--- /dev/null
9353+++ b/drivers/gpu/drm/psb/psb_fb.c
9354@@ -0,0 +1,1833 @@
9355+/**************************************************************************
9356+ * Copyright (c) 2007, Intel Corporation.
9357+ * All Rights Reserved.
9358+ *
9359+ * This program is free software; you can redistribute it and/or modify it
9360+ * under the terms and conditions of the GNU General Public License,
9361+ * version 2, as published by the Free Software Foundation.
9362+ *
9363+ * This program is distributed in the hope it will be useful, but WITHOUT
9364+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9365+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9366+ * more details.
9367+ *
9368+ * You should have received a copy of the GNU General Public License along with
9369+ * this program; if not, write to the Free Software Foundation, Inc.,
9370+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9371+ *
9372+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
9373+ * develop this driver.
9374+ *
9375+ **************************************************************************/
9376+
9377+#include <linux/module.h>
9378+#include <linux/kernel.h>
9379+#include <linux/errno.h>
9380+#include <linux/string.h>
9381+#include <linux/mm.h>
9382+#include <linux/tty.h>
9383+#include <linux/slab.h>
9384+#include <linux/delay.h>
9385+#include <linux/fb.h>
9386+#include <linux/init.h>
9387+#include <linux/console.h>
9388+
9389+#include <drm/drmP.h>
9390+#include <drm/drm.h>
9391+#include <drm/drm_crtc.h>
9392+
9393+#include "psb_drv.h"
9394+#include "psb_intel_reg.h"
9395+#include "psb_intel_drv.h"
9396+#include "ttm/ttm_userobj_api.h"
9397+#include "psb_fb.h"
9398+#include "psb_sgx.h"
9399+#include "psb_powermgmt.h"
9400+
9401+static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
9402+{
9403+ switch (depth) {
9404+ case 8:
9405+ var->red.offset = 0;
9406+ var->green.offset = 0;
9407+ var->blue.offset = 0;
9408+ var->red.length = 8;
9409+ var->green.length = 8;
9410+ var->blue.length = 8;
9411+ var->transp.length = 0;
9412+ var->transp.offset = 0;
9413+ break;
9414+ case 15:
9415+ var->red.offset = 10;
9416+ var->green.offset = 5;
9417+ var->blue.offset = 0;
9418+ var->red.length = 5;
9419+ var->green.length = 5;
9420+ var->blue.length = 5;
9421+ var->transp.length = 1;
9422+ var->transp.offset = 15;
9423+ break;
9424+ case 16:
9425+ var->red.offset = 11;
9426+ var->green.offset = 5;
9427+ var->blue.offset = 0;
9428+ var->red.length = 5;
9429+ var->green.length = 6;
9430+ var->blue.length = 5;
9431+ var->transp.length = 0;
9432+ var->transp.offset = 0;
9433+ break;
9434+ case 24:
9435+ var->red.offset = 16;
9436+ var->green.offset = 8;
9437+ var->blue.offset = 0;
9438+ var->red.length = 8;
9439+ var->green.length = 8;
9440+ var->blue.length = 8;
9441+ var->transp.length = 0;
9442+ var->transp.offset = 0;
9443+ break;
9444+ case 32:
9445+ var->red.offset = 16;
9446+ var->green.offset = 8;
9447+ var->blue.offset = 0;
9448+ var->red.length = 8;
9449+ var->green.length = 8;
9450+ var->blue.length = 8;
9451+ var->transp.length = 8;
9452+ var->transp.offset = 24;
9453+ break;
9454+ default:
9455+ return -EINVAL;
9456+ }
9457+
9458+ return 0;
9459+}
9460+
9461+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
9462+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
9463+ struct drm_file *file_priv,
9464+ unsigned int *handle);
9465+
9466+static const struct drm_framebuffer_funcs psb_fb_funcs = {
9467+ .destroy = psb_user_framebuffer_destroy,
9468+ .create_handle = psb_user_framebuffer_create_handle,
9469+};
9470+
9471+struct psbfb_par {
9472+ struct drm_device *dev;
9473+ struct psb_framebuffer *psbfb;
9474+
9475+ int dpms_state;
9476+
9477+ int crtc_count;
9478+ /* crtc currently bound to this */
9479+ uint32_t crtc_ids[2];
9480+};
9481+
9482+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
9483+
9484+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
9485+ unsigned blue, unsigned transp,
9486+ struct fb_info *info)
9487+{
9488+ struct psbfb_par *par = info->par;
9489+ struct drm_framebuffer *fb = &par->psbfb->base;
9490+ uint32_t v;
9491+
9492+ if (!fb)
9493+ return -ENOMEM;
9494+
9495+ if (regno > 255)
9496+ return 1;
9497+
9498+#if 0 /* JB: not drop, check that this works */
9499+ if (fb->bits_per_pixel == 8) {
9500+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9501+ head) {
9502+ for (i = 0; i < par->crtc_count; i++)
9503+ if (crtc->base.id == par->crtc_ids[i])
9504+ break;
9505+
9506+ if (i == par->crtc_count)
9507+ continue;
9508+
9509+ if (crtc->funcs->gamma_set)
9510+ crtc->funcs->gamma_set(crtc, red, green,
9511+ blue, regno);
9512+ }
9513+ return 0;
9514+ }
9515+#endif
9516+
9517+ red = CMAP_TOHW(red, info->var.red.length);
9518+ blue = CMAP_TOHW(blue, info->var.blue.length);
9519+ green = CMAP_TOHW(green, info->var.green.length);
9520+ transp = CMAP_TOHW(transp, info->var.transp.length);
9521+
9522+ v = (red << info->var.red.offset) |
9523+ (green << info->var.green.offset) |
9524+ (blue << info->var.blue.offset) |
9525+ (transp << info->var.transp.offset);
9526+
9527+ if (regno < 16) {
9528+ switch (fb->bits_per_pixel) {
9529+ case 16:
9530+ ((uint32_t *) info->pseudo_palette)[regno] = v;
9531+ break;
9532+ case 24:
9533+ case 32:
9534+ ((uint32_t *) info->pseudo_palette)[regno] = v;
9535+ break;
9536+ }
9537+ }
9538+
9539+ return 0;
9540+}
9541+
9542+static struct drm_display_mode *psbfb_find_first_mode(struct
9543+ fb_var_screeninfo
9544+ *var,
9545+ struct fb_info *info,
9546+ struct drm_crtc
9547+ *crtc)
9548+{
9549+ struct psbfb_par *par = info->par;
9550+ struct drm_device *dev = par->dev;
9551+ struct drm_display_mode *drm_mode;
9552+ struct drm_display_mode *preferred_mode = NULL;
9553+ struct drm_display_mode *last_mode = NULL;
9554+ struct drm_connector *connector;
9555+ int found;
9556+
9557+ found = 0;
9558+ list_for_each_entry(connector, &dev->mode_config.connector_list,
9559+ head) {
9560+ if (connector->encoder && connector->encoder->crtc == crtc) {
9561+ found = 1;
9562+ break;
9563+ }
9564+ }
9565+
9566+ /* found no connector, bail */
9567+ if (!found)
9568+ return NULL;
9569+
9570+ found = 0;
9571+ list_for_each_entry(drm_mode, &connector->modes, head) {
9572+ if (drm_mode->hdisplay == var->xres &&
9573+ drm_mode->vdisplay == var->yres
9574+ && drm_mode->clock != 0) {
9575+ found = 1;
9576+ last_mode = drm_mode;
9577+ if(IS_POULSBO(dev)) {
9578+ if(last_mode->type & DRM_MODE_TYPE_PREFERRED) {
9579+ preferred_mode = last_mode;
9580+ }
9581+ }
9582+ }
9583+ }
9584+
9585+ /* No mode matching mode found */
9586+ if (!found)
9587+ return NULL;
9588+
9589+ if(IS_POULSBO(dev)) {
9590+ if(preferred_mode)
9591+ return preferred_mode;
9592+ else
9593+ return last_mode;
9594+ } else {
9595+ return last_mode;
9596+ }
9597+}
9598+
9599+static int psbfb_check_var(struct fb_var_screeninfo *var,
9600+ struct fb_info *info)
9601+{
9602+ struct psbfb_par *par = info->par;
9603+ struct psb_framebuffer *psbfb = par->psbfb;
9604+ struct drm_device *dev = par->dev;
9605+ int ret;
9606+ int depth;
9607+ int pitch;
9608+ int bpp = var->bits_per_pixel;
9609+
9610+ if (!psbfb)
9611+ return -ENOMEM;
9612+
9613+ if (!var->pixclock)
9614+ return -EINVAL;
9615+
9616+ /* don't support virtuals for now */
9617+ if (var->xres_virtual > var->xres)
9618+ return -EINVAL;
9619+
9620+ if (var->yres_virtual > var->yres)
9621+ return -EINVAL;
9622+
9623+ switch (bpp) {
9624+#if 0 /* JB: for now only support true color */
9625+ case 8:
9626+ depth = 8;
9627+ break;
9628+#endif
9629+ case 16:
9630+ depth = (var->green.length == 6) ? 16 : 15;
9631+ break;
9632+ case 24: /* assume this is 32bpp / depth 24 */
9633+ bpp = 32;
9634+ /* fallthrough */
9635+ case 32:
9636+ depth = (var->transp.length > 0) ? 32 : 24;
9637+ break;
9638+ default:
9639+ return -EINVAL;
9640+ }
9641+
9642+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
9643+
9644+ /* Check that we can resize */
9645+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
9646+#if 1
9647+ /* Need to resize the fb object.
9648+ * But the generic fbdev code doesn't really understand
9649+ * that we can do this. So disable for now.
9650+ */
9651+ DRM_INFO("Can't support requested size, too big!\n");
9652+ return -EINVAL;
9653+#else
9654+ struct drm_psb_private *dev_priv = psb_priv(dev);
9655+ struct ttm_bo_device *bdev = &dev_priv->bdev;
9656+ struct ttm_buffer_object *fbo = NULL;
9657+ struct ttm_bo_kmap_obj tmp_kmap;
9658+
9659+ /* a temporary BO to check if we could resize in setpar.
9660+ * Therefore no need to set NO_EVICT.
9661+ */
9662+ ret = ttm_buffer_object_create(bdev,
9663+ pitch * var->yres,
9664+ ttm_bo_type_kernel,
9665+ TTM_PL_FLAG_TT |
9666+ TTM_PL_FLAG_VRAM |
9667+ TTM_PL_FLAG_NO_EVICT,
9668+ 0, 0, &fbo);
9669+ if (ret || !fbo)
9670+ return -ENOMEM;
9671+
9672+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
9673+ if (ret) {
9674+ ttm_bo_usage_deref_unlocked(&fbo);
9675+ return -EINVAL;
9676+ }
9677+
9678+ ttm_bo_kunmap(&tmp_kmap);
9679+ /* destroy our current fbo! */
9680+ ttm_bo_usage_deref_unlocked(&fbo);
9681+#endif
9682+ }
9683+
9684+ ret = fill_fb_bitfield(var, depth);
9685+ if (ret)
9686+ return ret;
9687+
9688+#if 1
9689+ /* Here we walk the output mode list and look for modes. If we haven't
9690+ * got it, then bail. Not very nice, so this is disabled.
9691+ * In the set_par code, we create our mode based on the incoming
9692+ * parameters. Nicer, but may not be desired by some.
9693+ */
9694+ {
9695+ struct drm_crtc *crtc;
9696+ int i;
9697+
9698+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9699+ head) {
9700+ struct psb_intel_crtc *psb_intel_crtc =
9701+ to_psb_intel_crtc(crtc);
9702+
9703+ for (i = 0; i < par->crtc_count; i++)
9704+ if (crtc->base.id == par->crtc_ids[i])
9705+ break;
9706+
9707+ if (i == par->crtc_count)
9708+ continue;
9709+
9710+ if (psb_intel_crtc->mode_set.num_connectors == 0)
9711+ continue;
9712+
9713+ if (!psbfb_find_first_mode(&info->var, info, crtc))
9714+ return -EINVAL;
9715+ }
9716+ }
9717+#else
9718+ (void) i;
9719+ (void) dev; /* silence warnings */
9720+ (void) crtc;
9721+ (void) drm_mode;
9722+ (void) connector;
9723+#endif
9724+
9725+ return 0;
9726+}
9727+
9728+/* this will let fbcon do the mode init */
9729+static int psbfb_set_par(struct fb_info *info)
9730+{
9731+ struct psbfb_par *par = info->par;
9732+ struct psb_framebuffer *psbfb = par->psbfb;
9733+ struct drm_framebuffer *fb = &psbfb->base;
9734+ struct drm_device *dev = par->dev;
9735+ struct fb_var_screeninfo *var = &info->var;
9736+ struct drm_psb_private *dev_priv = dev->dev_private;
9737+ struct drm_display_mode *drm_mode;
9738+ int pitch;
9739+ int depth;
9740+ int bpp = var->bits_per_pixel;
9741+
9742+ if (!fb)
9743+ return -ENOMEM;
9744+
9745+ switch (bpp) {
9746+ case 8:
9747+ depth = 8;
9748+ break;
9749+ case 16:
9750+ depth = (var->green.length == 6) ? 16 : 15;
9751+ break;
9752+ case 24: /* assume this is 32bpp / depth 24 */
9753+ bpp = 32;
9754+ /* fallthrough */
9755+ case 32:
9756+ depth = (var->transp.length > 0) ? 32 : 24;
9757+ break;
9758+ default:
9759+ DRM_ERROR("Illegal BPP\n");
9760+ return -EINVAL;
9761+ }
9762+
9763+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
9764+
9765+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
9766+#if 1
9767+ /* Need to resize the fb object.
9768+ * But the generic fbdev code doesn't really understand
9769+ * that we can do this. So disable for now.
9770+ */
9771+ DRM_INFO("Can't support requested size, too big!\n");
9772+ return -EINVAL;
9773+#else
9774+ int ret;
9775+ struct ttm_buffer_object *fbo = NULL, *tfbo;
9776+ struct ttm_bo_kmap_obj tmp_kmap, tkmap;
9777+
9778+ ret = ttm_buffer_object_create(bdev,
9779+ pitch * var->yres,
9780+ ttm_bo_type_kernel,
9781+ TTM_PL_FLAG_MEM_TT |
9782+ TTM_PL_FLAG_MEM_VRAM |
9783+ TTM_PL_FLAG_NO_EVICT,
9784+ 0, 0, &fbo);
9785+ if (ret || !fbo) {
9786+ DRM_ERROR
9787+ ("failed to allocate new resized framebuffer\n");
9788+ return -ENOMEM;
9789+ }
9790+
9791+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
9792+ if (ret) {
9793+ DRM_ERROR("failed to kmap framebuffer.\n");
9794+ ttm_bo_usage_deref_unlocked(&fbo);
9795+ return -EINVAL;
9796+ }
9797+
9798+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n",
9799+ fb->width, fb->height, fb->offset, fbo);
9800+
9801+ /* set new screen base */
9802+ info->screen_base = tmp_kmap.virtual;
9803+
9804+ tkmap = fb->kmap;
9805+ fb->kmap = tmp_kmap;
9806+ ttm_bo_kunmap(&tkmap);
9807+
9808+ tfbo = fb->bo;
9809+ fb->bo = fbo;
9810+ ttm_bo_usage_deref_unlocked(&tfbo);
9811+#endif
9812+ }
9813+
9814+ psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start;
9815+ fb->width = var->xres;
9816+ fb->height = var->yres;
9817+ fb->bits_per_pixel = bpp;
9818+ fb->pitch = pitch;
9819+ fb->depth = depth;
9820+
9821+ info->fix.line_length = psbfb->base.pitch;
9822+ info->fix.visual =
9823+ (psbfb->base.depth ==
9824+ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
9825+
9826+ /* some fbdev's apps don't want these to change */
9827+ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
9828+
9829+#if 0
9830+ /* relates to resize - disable */
9831+ info->fix.smem_len = info->fix.line_length * var->yres;
9832+ info->screen_size = info->fix.smem_len; /* ??? */
9833+#endif
9834+
9835+ /* Should we walk the output's modelist or just create our own ???
9836+ * For now, we create and destroy a mode based on the incoming
9837+ * parameters. But there's commented out code below which scans
9838+ * the output list too.
9839+ */
9840+#if 1
9841+ /* This code is now in the for loop futher down. */
9842+#endif
9843+
9844+ {
9845+ struct drm_crtc *crtc;
9846+ int ret;
9847+ int i;
9848+
9849+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9850+ head) {
9851+ struct psb_intel_crtc *psb_intel_crtc =
9852+ to_psb_intel_crtc(crtc);
9853+
9854+ for (i = 0; i < par->crtc_count; i++)
9855+ if (crtc->base.id == par->crtc_ids[i])
9856+ break;
9857+
9858+ if (i == par->crtc_count)
9859+ continue;
9860+
9861+ if (psb_intel_crtc->mode_set.num_connectors == 0)
9862+ continue;
9863+
9864+#if 1
9865+ drm_mode =
9866+ psbfb_find_first_mode(&info->var, info, crtc);
9867+ if (!drm_mode)
9868+ DRM_ERROR("No matching mode found\n");
9869+ psb_intel_crtc->mode_set.mode = drm_mode;
9870+#endif
9871+
9872+#if 0 /* FIXME: TH */
9873+ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
9874+#endif
9875+ DRM_DEBUG
9876+ ("setting mode on crtc %p with id %u\n",
9877+ crtc, crtc->base.id);
9878+ ret =
9879+ crtc->funcs->
9880+ set_config(&psb_intel_crtc->mode_set);
9881+ if (ret) {
9882+ DRM_ERROR("Failed setting mode\n");
9883+ return ret;
9884+ }
9885+#if 0
9886+ }
9887+#endif
9888+ }
9889+ DRM_DEBUG("Set par returned OK.\n");
9890+ return 0;
9891+ }
9892+
9893+ return 0;
9894+}
9895+#if 0
9896+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
9897+ unsigned size)
9898+{
9899+ int ret = 0;
9900+ int i;
9901+ unsigned submit_size;
9902+
9903+ while (size > 0) {
9904+ submit_size = (size < 0x60) ? size : 0x60;
9905+ size -= submit_size;
9906+ ret = psb_2d_wait_available(dev_priv, submit_size);
9907+ if (ret)
9908+ return ret;
9909+
9910+ submit_size <<= 2;
9911+ for (i = 0; i < submit_size; i += 4) {
9912+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
9913+ }
9914+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
9915+ }
9916+ return 0;
9917+}
9918+
9919+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
9920+ uint32_t dst_offset, uint32_t dst_stride,
9921+ uint32_t dst_format, uint16_t dst_x,
9922+ uint16_t dst_y, uint16_t size_x,
9923+ uint16_t size_y, uint32_t fill)
9924+{
9925+ uint32_t buffer[10];
9926+ uint32_t *buf;
9927+
9928+ buf = buffer;
9929+
9930+ *buf++ = PSB_2D_FENCE_BH;
9931+
9932+ *buf++ =
9933+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
9934+ PSB_2D_DST_STRIDE_SHIFT);
9935+ *buf++ = dst_offset;
9936+
9937+ *buf++ =
9938+ PSB_2D_BLIT_BH |
9939+ PSB_2D_ROT_NONE |
9940+ PSB_2D_COPYORDER_TL2BR |
9941+ PSB_2D_DSTCK_DISABLE |
9942+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
9943+
9944+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
9945+ *buf++ =
9946+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
9947+ PSB_2D_DST_YSTART_SHIFT);
9948+ *buf++ =
9949+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
9950+ PSB_2D_DST_YSIZE_SHIFT);
9951+ *buf++ = PSB_2D_FLUSH_BH;
9952+
9953+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
9954+}
9955+
9956+static void psbfb_fillrect_accel(struct fb_info *info,
9957+ const struct fb_fillrect *r)
9958+{
9959+ struct psbfb_par *par = info->par;
9960+ struct psb_framebuffer *psbfb = par->psbfb;
9961+ struct drm_framebuffer *fb = &psbfb->base;
9962+ struct drm_psb_private *dev_priv = par->dev->dev_private;
9963+ uint32_t offset;
9964+ uint32_t stride;
9965+ uint32_t format;
9966+
9967+ if (!fb)
9968+ return;
9969+
9970+ offset = psbfb->offset;
9971+ stride = fb->pitch;
9972+
9973+ switch (fb->depth) {
9974+ case 8:
9975+ format = PSB_2D_DST_332RGB;
9976+ break;
9977+ case 15:
9978+ format = PSB_2D_DST_555RGB;
9979+ break;
9980+ case 16:
9981+ format = PSB_2D_DST_565RGB;
9982+ break;
9983+ case 24:
9984+ case 32:
9985+ /* this is wrong but since we don't do blending its okay */
9986+ format = PSB_2D_DST_8888ARGB;
9987+ break;
9988+ default:
9989+ /* software fallback */
9990+ cfb_fillrect(info, r);
9991+ return;
9992+ }
9993+
9994+ psb_accel_2d_fillrect(dev_priv,
9995+ offset, stride, format,
9996+ r->dx, r->dy, r->width, r->height, r->color);
9997+}
9998+
9999+static void psbfb_fillrect(struct fb_info *info,
10000+ const struct fb_fillrect *rect)
10001+{
10002+ struct psbfb_par *par = info->par;
10003+ struct drm_device *dev = par->dev;
10004+ struct drm_psb_private *dev_priv = dev->dev_private;
10005+
10006+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
10007+ return;
10008+
10009+ if (info->flags & FBINFO_HWACCEL_DISABLED)
10010+ return cfb_fillrect(info, rect);
10011+ /*
10012+ * psbfb_fillrect is atomic so need to do instantaneous check of
10013+ * power on
10014+ */
10015+ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) || powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
10016+ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
10017+ return cfb_fillrect(info, rect);
10018+ if (psb_2d_trylock(dev_priv)) {
10019+ psbfb_fillrect_accel(info, rect);
10020+ psb_2d_unlock(dev_priv);
10021+ if (drm_psb_ospm && IS_MRST(dev))
10022+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
10023+ } else
10024+ cfb_fillrect(info, rect);
10025+}
10026+
10027+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
10028+{
10029+ if (xdir < 0)
10030+ return (ydir <
10031+ 0) ? PSB_2D_COPYORDER_BR2TL :
10032+ PSB_2D_COPYORDER_TR2BL;
10033+ else
10034+ return (ydir <
10035+ 0) ? PSB_2D_COPYORDER_BL2TR :
10036+ PSB_2D_COPYORDER_TL2BR;
10037+}
10038+
10039+/*
10040+ * @srcOffset in bytes
10041+ * @srcStride in bytes
10042+ * @srcFormat psb 2D format defines
10043+ * @dstOffset in bytes
10044+ * @dstStride in bytes
10045+ * @dstFormat psb 2D format defines
10046+ * @srcX offset in pixels
10047+ * @srcY offset in pixels
10048+ * @dstX offset in pixels
10049+ * @dstY offset in pixels
10050+ * @sizeX of the copied area
10051+ * @sizeY of the copied area
10052+ */
10053+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
10054+ uint32_t src_offset, uint32_t src_stride,
10055+ uint32_t src_format, uint32_t dst_offset,
10056+ uint32_t dst_stride, uint32_t dst_format,
10057+ uint16_t src_x, uint16_t src_y,
10058+ uint16_t dst_x, uint16_t dst_y,
10059+ uint16_t size_x, uint16_t size_y)
10060+{
10061+ uint32_t blit_cmd;
10062+ uint32_t buffer[10];
10063+ uint32_t *buf;
10064+ uint32_t direction;
10065+
10066+ buf = buffer;
10067+
10068+ direction =
10069+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
10070+
10071+ if (direction == PSB_2D_COPYORDER_BR2TL ||
10072+ direction == PSB_2D_COPYORDER_TR2BL) {
10073+ src_x += size_x - 1;
10074+ dst_x += size_x - 1;
10075+ }
10076+ if (direction == PSB_2D_COPYORDER_BR2TL ||
10077+ direction == PSB_2D_COPYORDER_BL2TR) {
10078+ src_y += size_y - 1;
10079+ dst_y += size_y - 1;
10080+ }
10081+
10082+ blit_cmd =
10083+ PSB_2D_BLIT_BH |
10084+ PSB_2D_ROT_NONE |
10085+ PSB_2D_DSTCK_DISABLE |
10086+ PSB_2D_SRCCK_DISABLE |
10087+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
10088+
10089+ *buf++ = PSB_2D_FENCE_BH;
10090+ *buf++ =
10091+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
10092+ PSB_2D_DST_STRIDE_SHIFT);
10093+ *buf++ = dst_offset;
10094+ *buf++ =
10095+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
10096+ PSB_2D_SRC_STRIDE_SHIFT);
10097+ *buf++ = src_offset;
10098+ *buf++ =
10099+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
10100+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
10101+ *buf++ = blit_cmd;
10102+ *buf++ =
10103+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
10104+ PSB_2D_DST_YSTART_SHIFT);
10105+ *buf++ =
10106+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
10107+ PSB_2D_DST_YSIZE_SHIFT);
10108+ *buf++ = PSB_2D_FLUSH_BH;
10109+
10110+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
10111+}
10112+
10113+static void psbfb_copyarea_accel(struct fb_info *info,
10114+ const struct fb_copyarea *a)
10115+{
10116+ struct psbfb_par *par = info->par;
10117+ struct psb_framebuffer *psbfb = par->psbfb;
10118+ struct drm_framebuffer *fb = &psbfb->base;
10119+ struct drm_psb_private *dev_priv = par->dev->dev_private;
10120+ uint32_t offset;
10121+ uint32_t stride;
10122+ uint32_t src_format;
10123+ uint32_t dst_format;
10124+
10125+ if (!fb)
10126+ return;
10127+
10128+ offset = psbfb->offset;
10129+ stride = fb->pitch;
10130+
10131+ switch (fb->depth) {
10132+ case 8:
10133+ src_format = PSB_2D_SRC_332RGB;
10134+ dst_format = PSB_2D_DST_332RGB;
10135+ break;
10136+ case 15:
10137+ src_format = PSB_2D_SRC_555RGB;
10138+ dst_format = PSB_2D_DST_555RGB;
10139+ break;
10140+ case 16:
10141+ src_format = PSB_2D_SRC_565RGB;
10142+ dst_format = PSB_2D_DST_565RGB;
10143+ break;
10144+ case 24:
10145+ case 32:
10146+ /* this is wrong but since we don't do blending its okay */
10147+ src_format = PSB_2D_SRC_8888ARGB;
10148+ dst_format = PSB_2D_DST_8888ARGB;
10149+ break;
10150+ default:
10151+ /* software fallback */
10152+ cfb_copyarea(info, a);
10153+ return;
10154+ }
10155+
10156+ psb_accel_2d_copy(dev_priv,
10157+ offset, stride, src_format,
10158+ offset, stride, dst_format,
10159+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
10160+}
10161+
10162+static void psbfb_copyarea(struct fb_info *info,
10163+ const struct fb_copyarea *region)
10164+{
10165+ struct psbfb_par *par = info->par;
10166+ struct drm_device *dev = par->dev;
10167+ struct drm_psb_private *dev_priv = dev->dev_private;
10168+
10169+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
10170+ return;
10171+
10172+ if (info->flags & FBINFO_HWACCEL_DISABLED)
10173+ return cfb_copyarea(info, region);
10174+ /*
10175+ * psbfb_copyarea is atomic so need to do instantaneous check of
10176+ * power on
10177+ */
10178+ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) || powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
10179+ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
10180+ return cfb_copyarea(info, region);
10181+
10182+ if (psb_2d_trylock(dev_priv)) {
10183+ psbfb_copyarea_accel(info, region);
10184+ psb_2d_unlock(dev_priv);
10185+ if (drm_psb_ospm && IS_MRST(dev))
10186+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
10187+ } else
10188+ cfb_copyarea(info, region);
10189+}
10190+#endif
10191+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
10192+{
10193+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
10194+ return;
10195+
10196+ cfb_imageblit(info, image);
10197+}
10198+
10199+static void psbfb_onoff(struct fb_info *info, int dpms_mode)
10200+{
10201+ struct psbfb_par *par = info->par;
10202+ struct drm_device *dev = par->dev;
10203+ struct drm_crtc *crtc;
10204+ struct drm_encoder *encoder;
10205+ int i;
10206+
10207+ /*
10208+ * For each CRTC in this fb, find all associated encoders
10209+ * and turn them off, then turn off the CRTC.
10210+ */
10211+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10212+ struct drm_crtc_helper_funcs *crtc_funcs =
10213+ crtc->helper_private;
10214+
10215+ for (i = 0; i < par->crtc_count; i++)
10216+ if (crtc->base.id == par->crtc_ids[i])
10217+ break;
10218+
10219+ if (i == par->crtc_count)
10220+ continue;
10221+
10222+ if (dpms_mode == DRM_MODE_DPMS_ON)
10223+ crtc_funcs->dpms(crtc, dpms_mode);
10224+
10225+ /* Found a CRTC on this fb, now find encoders */
10226+ list_for_each_entry(encoder,
10227+ &dev->mode_config.encoder_list, head) {
10228+ if (encoder->crtc == crtc) {
10229+ struct drm_encoder_helper_funcs
10230+ *encoder_funcs;
10231+ encoder_funcs = encoder->helper_private;
10232+ encoder_funcs->dpms(encoder, dpms_mode);
10233+ }
10234+ }
10235+
10236+ if (dpms_mode == DRM_MODE_DPMS_OFF)
10237+ crtc_funcs->dpms(crtc, dpms_mode);
10238+ }
10239+}
10240+
10241+static int psbfb_blank(int blank_mode, struct fb_info *info)
10242+{
10243+ struct psbfb_par *par = info->par;
10244+
10245+ par->dpms_state = blank_mode;
10246+ PSB_DEBUG_PM("psbfb_blank \n");
10247+ switch (blank_mode) {
10248+ case FB_BLANK_UNBLANK:
10249+ psbfb_onoff(info, DRM_MODE_DPMS_ON);
10250+ break;
10251+ case FB_BLANK_NORMAL:
10252+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
10253+ break;
10254+ case FB_BLANK_HSYNC_SUSPEND:
10255+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
10256+ break;
10257+ case FB_BLANK_VSYNC_SUSPEND:
10258+ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
10259+ break;
10260+ case FB_BLANK_POWERDOWN:
10261+ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
10262+ break;
10263+ }
10264+
10265+ return 0;
10266+}
10267+
10268+
10269+static int psbfb_kms_off(struct drm_device *dev, int suspend)
10270+{
10271+ struct drm_framebuffer *fb = 0;
10272+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
10273+
10274+ mutex_lock(&dev->mode_config.mutex);
10275+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
10276+ struct fb_info *info = fb->fbdev;
10277+
10278+ if (suspend) {
10279+ fb_set_suspend(info, 1);
10280+ psbfb_blank(FB_BLANK_POWERDOWN, info);
10281+ }
10282+ }
10283+ mutex_unlock(&dev->mode_config.mutex);
10284+ return 0;
10285+}
10286+
10287+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
10288+ struct drm_file *file_priv)
10289+{
10290+ int ret;
10291+
10292+ if (drm_psb_no_fb)
10293+ return 0;
10294+ acquire_console_sem();
10295+ ret = psbfb_kms_off(dev, 0);
10296+ release_console_sem();
10297+
10298+ return ret;
10299+}
10300+
10301+static int psbfb_kms_on(struct drm_device *dev, int resume)
10302+{
10303+ struct drm_framebuffer *fb = 0;
10304+
10305+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
10306+
10307+ mutex_lock(&dev->mode_config.mutex);
10308+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
10309+ struct fb_info *info = fb->fbdev;
10310+
10311+ if (resume) {
10312+ fb_set_suspend(info, 0);
10313+ psbfb_blank(FB_BLANK_UNBLANK, info);
10314+ }
10315+
10316+ }
10317+ mutex_unlock(&dev->mode_config.mutex);
10318+
10319+ return 0;
10320+}
10321+
10322+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
10323+ struct drm_file *file_priv)
10324+{
10325+ int ret;
10326+
10327+ if (drm_psb_no_fb)
10328+ return 0;
10329+ acquire_console_sem();
10330+ ret = psbfb_kms_on(dev, 0);
10331+ release_console_sem();
10332+ drm_helper_disable_unused_functions(dev);
10333+ return ret;
10334+}
10335+
10336+void psbfb_suspend(struct drm_device *dev)
10337+{
10338+ acquire_console_sem();
10339+ psbfb_kms_off(dev, 1);
10340+ release_console_sem();
10341+}
10342+
10343+void psbfb_resume(struct drm_device *dev)
10344+{
10345+ acquire_console_sem();
10346+ psbfb_kms_on(dev, 1);
10347+ release_console_sem();
10348+ drm_helper_disable_unused_functions(dev);
10349+}
10350+
10351+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
10352+{
10353+ struct psbfb_par *par = info->par;
10354+ struct psb_framebuffer *psbfb = par->psbfb;
10355+ struct ttm_buffer_object *bo = psbfb->bo;
10356+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
10357+ unsigned long offset = vma->vm_pgoff;
10358+
10359+ if (vma->vm_pgoff != 0)
10360+ return -EINVAL;
10361+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
10362+ return -EINVAL;
10363+ if (offset + size > bo->num_pages)
10364+ return -EINVAL;
10365+
10366+ mutex_lock(&bo->mutex);
10367+ if (!psbfb->addr_space)
10368+ psbfb->addr_space = vma->vm_file->f_mapping;
10369+ mutex_unlock(&bo->mutex);
10370+
10371+ return ttm_fbdev_mmap(vma, bo);
10372+}
10373+
10374+int psbfb_sync(struct fb_info *info)
10375+{
10376+ struct psbfb_par *par = info->par;
10377+ struct drm_psb_private *dev_priv = par->dev->dev_private;
10378+
10379+ if (psb_2d_trylock(dev_priv)) {
10380+ /*
10381+ * psbfb_sync is atomic so need to do instantaneous check of
10382+ * power on
10383+ */
10384+ if (!powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) &&
10385+ !powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) &&
10386+ powermgmt_is_hw_on(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND))
10387+ psb_idle_2d(par->dev);
10388+
10389+ psb_2d_unlock(dev_priv);
10390+ } else
10391+ udelay(5);
10392+
10393+ return 0;
10394+}
10395+
10396+static struct fb_ops psbfb_ops = {
10397+ .owner = THIS_MODULE,
10398+ .fb_check_var = psbfb_check_var,
10399+ .fb_set_par = psbfb_set_par,
10400+ .fb_setcolreg = psbfb_setcolreg,
10401+ .fb_fillrect = cfb_fillrect,
10402+ .fb_copyarea = cfb_copyarea,
10403+ .fb_imageblit = cfb_imageblit,
10404+ .fb_mmap = psbfb_mmap,
10405+ /*.fb_sync = psbfb_sync,*/
10406+ .fb_blank = psbfb_blank,
10407+};
10408+
10409+static struct drm_mode_set panic_mode;
10410+
10411+int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
10412+ void *panic_str)
10413+{
10414+ DRM_ERROR("panic occurred, switching back to text console\n");
10415+ drm_crtc_helper_set_config(&panic_mode);
10416+
10417+ return 0;
10418+}
10419+EXPORT_SYMBOL(psbfb_panic);
10420+
10421+static struct notifier_block paniced = {
10422+ .notifier_call = psbfb_panic,
10423+};
10424+
10425+
10426+static struct drm_framebuffer *psb_framebuffer_create
10427+ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
10428+ void *mm_private)
10429+{
10430+ struct psb_framebuffer *fb;
10431+ int ret;
10432+
10433+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
10434+ if (!fb)
10435+ return NULL;
10436+
10437+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
10438+
10439+ if (ret)
10440+ goto err;
10441+
10442+ drm_helper_mode_fill_fb_struct(&fb->base, r);
10443+
10444+ fb->bo = mm_private;
10445+
10446+ return &fb->base;
10447+
10448+err:
10449+ kfree(fb);
10450+ return NULL;
10451+}
10452+
10453+static struct drm_framebuffer *psb_user_framebuffer_create
10454+ (struct drm_device *dev, struct drm_file *filp,
10455+ struct drm_mode_fb_cmd *r)
10456+{
10457+ struct psb_framebuffer *psbfb;
10458+ struct ttm_buffer_object *bo = NULL;
10459+ struct drm_framebuffer *fb;
10460+ struct fb_info *info;
10461+ struct ttm_bo_kmap_obj tmp_kmap;
10462+ bool is_iomem;
10463+ uint64_t size;
10464+
10465+ bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle);
10466+ if (!bo)
10467+ return NULL;
10468+ /*the buffer is used as fb, then it should not be put in swap list*/
10469+ list_del_init(&bo->swap);
10470+
10471+ /* JB: TODO not drop, make smarter */
10472+ size = ((uint64_t) bo->num_pages) << PAGE_SHIFT;
10473+ if (size < r->height * r->pitch)
10474+ return NULL;
10475+
10476+ /* JB: TODO not drop, refcount buffer */
10477+// return psb_framebuffer_create(dev, r, bo);
10478+
10479+ fb = psb_framebuffer_create(dev, r, bo);
10480+ if (!fb) {
10481+ DRM_ERROR("failed to allocate fb.\n");
10482+ return NULL;
10483+ }
10484+
10485+ psbfb = to_psb_fb(fb);
10486+ psbfb->bo = bo;
10487+
10488+ info = framebuffer_alloc(sizeof(struct psbfb_par), &dev->pdev->dev);
10489+ if (!info) {
10490+ return NULL;
10491+ }
10492+
10493+ strcpy(info->fix.id, "psbfb");
10494+ info->fix.type = FB_TYPE_PACKED_PIXELS;
10495+ info->fix.visual = FB_VISUAL_TRUECOLOR;
10496+ info->fix.type_aux = 0;
10497+ info->fix.xpanstep = 1; /* doing it in hw */
10498+ info->fix.ypanstep = 1; /* doing it in hw */
10499+ info->fix.ywrapstep = 0;
10500+ info->fix.accel = FB_ACCEL_I830;
10501+ info->fix.type_aux = 0;
10502+
10503+ info->flags = FBINFO_DEFAULT;
10504+
10505+ info->fbops = &psbfb_ops;
10506+
10507+ info->fix.line_length = fb->pitch;
10508+ info->fix.smem_start =
10509+ dev->mode_config.fb_base + psbfb->bo->offset;
10510+ info->fix.smem_len = size;
10511+
10512+ info->flags = FBINFO_DEFAULT;
10513+
10514+ if (ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap) != 0) {
10515+ DRM_ERROR("error mapping fb\n");
10516+ return NULL;
10517+ }
10518+
10519+ psbfb->kmap = tmp_kmap;
10520+
10521+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
10522+ info->screen_size = size;
10523+
10524+/* it is called for kms flip, the back buffer has been rendered, then we should not clear it*/
10525+#if 0
10526+ if (is_iomem)
10527+ memset_io(info->screen_base, 0, size);
10528+ else
10529+ memset(info->screen_base, 0, size);
10530+#endif
10531+ info->pseudo_palette = fb->pseudo_palette;
10532+ info->var.xres_virtual = fb->width;
10533+ info->var.yres_virtual = fb->height;
10534+ info->var.bits_per_pixel = fb->bits_per_pixel;
10535+ info->var.xoffset = 0;
10536+ info->var.yoffset = 0;
10537+ info->var.activate = FB_ACTIVATE_NOW;
10538+ info->var.height = -1;
10539+ info->var.width = -1;
10540+
10541+ info->var.xres = r->width;
10542+ info->var.yres = r->height;
10543+
10544+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
10545+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
10546+
10547+ info->pixmap.size = 64 * 1024;
10548+ info->pixmap.buf_align = 8;
10549+ info->pixmap.access_align = 32;
10550+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
10551+ info->pixmap.scan_align = 1;
10552+
10553+ fill_fb_bitfield(&info->var, fb->depth);
10554+
10555+ register_framebuffer(info);
10556+
10557+ fb->fbdev = info;
10558+
10559+ return fb;
10560+}
10561+
10562+int psbfb_create(struct drm_device *dev, uint32_t fb_width,
10563+ uint32_t fb_height, uint32_t surface_width,
10564+ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
10565+{
10566+ struct fb_info *info;
10567+ struct psbfb_par *par;
10568+ struct drm_framebuffer *fb;
10569+ struct psb_framebuffer *psbfb;
10570+ struct ttm_bo_kmap_obj tmp_kmap;
10571+ struct drm_mode_fb_cmd mode_cmd;
10572+ struct device *device = &dev->pdev->dev;
10573+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
10574+ struct ttm_buffer_object *fbo = NULL;
10575+ int size, aligned_size, ret;
10576+ bool is_iomem;
10577+
10578+ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
10579+ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
10580+
10581+ mode_cmd.bpp = 32;
10582+ mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8);
10583+ mode_cmd.depth = 24;
10584+
10585+ size = mode_cmd.pitch * mode_cmd.height;
10586+ aligned_size = ALIGN(size, PAGE_SIZE);
10587+ ret = ttm_buffer_object_create(bdev,
10588+ aligned_size,
10589+ ttm_bo_type_kernel,
10590+ TTM_PL_FLAG_TT |
10591+ TTM_PL_FLAG_VRAM |
10592+ TTM_PL_FLAG_NO_EVICT,
10593+ 0, 0, 0, NULL, &fbo);
10594+
10595+ if (unlikely(ret != 0)) {
10596+ DRM_ERROR("failed to allocate framebuffer.\n");
10597+ return -ENOMEM;
10598+ }
10599+
10600+ mutex_lock(&dev->struct_mutex);
10601+ fb = psb_framebuffer_create(dev, &mode_cmd, fbo);
10602+ if (!fb) {
10603+ DRM_ERROR("failed to allocate fb.\n");
10604+ ret = -ENOMEM;
10605+ goto out_err0;
10606+ }
10607+ psbfb = to_psb_fb(fb);
10608+ psbfb->bo = fbo;
10609+
10610+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
10611+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
10612+ if (!info) {
10613+ ret = -ENOMEM;
10614+ goto out_err1;
10615+ }
10616+
10617+ par = info->par;
10618+ par->psbfb = psbfb;
10619+
10620+ strcpy(info->fix.id, "psbfb");
10621+ info->fix.type = FB_TYPE_PACKED_PIXELS;
10622+ info->fix.visual = FB_VISUAL_TRUECOLOR;
10623+ info->fix.type_aux = 0;
10624+ info->fix.xpanstep = 1; /* doing it in hw */
10625+ info->fix.ypanstep = 1; /* doing it in hw */
10626+ info->fix.ywrapstep = 0;
10627+ info->fix.accel = FB_ACCEL_I830;
10628+ info->fix.type_aux = 0;
10629+
10630+ info->flags = FBINFO_DEFAULT;
10631+
10632+ info->fbops = &psbfb_ops;
10633+
10634+ info->fix.line_length = fb->pitch;
10635+ info->fix.smem_start =
10636+ dev->mode_config.fb_base + psbfb->bo->offset;
10637+ info->fix.smem_len = size;
10638+
10639+ info->flags = FBINFO_DEFAULT;
10640+
10641+ ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap);
10642+ if (ret) {
10643+ DRM_ERROR("error mapping fb: %d\n", ret);
10644+ goto out_err2;
10645+ }
10646+
10647+
10648+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
10649+ info->screen_size = size;
10650+
10651+ if (is_iomem)
10652+ memset_io(info->screen_base, 0, size);
10653+ else
10654+ memset(info->screen_base, 0, size);
10655+
10656+ info->pseudo_palette = fb->pseudo_palette;
10657+ info->var.xres_virtual = fb->width;
10658+ info->var.yres_virtual = fb->height;
10659+ info->var.bits_per_pixel = fb->bits_per_pixel;
10660+ info->var.xoffset = 0;
10661+ info->var.yoffset = 0;
10662+ info->var.activate = FB_ACTIVATE_NOW;
10663+ info->var.height = -1;
10664+ info->var.width = -1;
10665+
10666+ info->var.xres = fb_width;
10667+ info->var.yres = fb_height;
10668+
10669+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
10670+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
10671+
10672+ info->pixmap.size = 64 * 1024;
10673+ info->pixmap.buf_align = 8;
10674+ info->pixmap.access_align = 32;
10675+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
10676+ info->pixmap.scan_align = 1;
10677+
10678+ DRM_DEBUG("fb depth is %d\n", fb->depth);
10679+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
10680+ fill_fb_bitfield(&info->var, fb->depth);
10681+
10682+ fb->fbdev = info;
10683+
10684+ par->dev = dev;
10685+
10686+ /* To allow resizing without swapping buffers */
10687+ printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n",
10688+ psbfb->base.width,
10689+ psbfb->base.height, psbfb->bo->offset, psbfb->bo);
10690+
10691+ if (psbfb_p)
10692+ *psbfb_p = psbfb;
10693+
10694+ mutex_unlock(&dev->struct_mutex);
10695+
10696+ return 0;
10697+out_err2:
10698+ unregister_framebuffer(info);
10699+out_err1:
10700+ fb->funcs->destroy(fb);
10701+out_err0:
10702+ mutex_unlock(&dev->struct_mutex);
10703+ ttm_bo_unref(&fbo);
10704+ return ret;
10705+}
10706+
10707+static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
10708+ struct drm_crtc *crtc)
10709+{
10710+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10711+ struct drm_framebuffer *fb = crtc->fb;
10712+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
10713+ struct drm_connector *connector;
10714+ struct fb_info *info;
10715+ struct psbfb_par *par;
10716+ struct drm_mode_set *modeset;
10717+ unsigned int width, height;
10718+ int new_fb = 0;
10719+ int ret, i, conn_count;
10720+
10721+ if (!drm_helper_crtc_in_use(crtc))
10722+ return 0;
10723+
10724+ if (!crtc->desired_mode)
10725+ return 0;
10726+
10727+ width = crtc->desired_mode->hdisplay;
10728+ height = crtc->desired_mode->vdisplay;
10729+
10730+ /* is there an fb bound to this crtc already */
10731+ if (!psb_intel_crtc->mode_set.fb) {
10732+ ret =
10733+ psbfb_create(dev, width, height, width, height,
10734+ &psbfb);
10735+ if (ret)
10736+ return -EINVAL;
10737+ new_fb = 1;
10738+ } else {
10739+ fb = psb_intel_crtc->mode_set.fb;
10740+ if ((fb->width < width) || (fb->height < height))
10741+ return -EINVAL;
10742+ }
10743+
10744+ info = fb->fbdev;
10745+ par = info->par;
10746+
10747+ modeset = &psb_intel_crtc->mode_set;
10748+ modeset->fb = fb;
10749+ conn_count = 0;
10750+ list_for_each_entry(connector, &dev->mode_config.connector_list,
10751+ head) {
10752+ if (connector->encoder)
10753+ if (connector->encoder->crtc == modeset->crtc) {
10754+ modeset->connectors[conn_count] =
10755+ connector;
10756+ conn_count++;
10757+ if (conn_count > INTELFB_CONN_LIMIT)
10758+ BUG();
10759+ }
10760+ }
10761+
10762+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
10763+ modeset->connectors[i] = NULL;
10764+
10765+ par->crtc_ids[0] = crtc->base.id;
10766+
10767+ modeset->num_connectors = conn_count;
10768+ if (modeset->mode != modeset->crtc->desired_mode)
10769+ modeset->mode = modeset->crtc->desired_mode;
10770+
10771+ par->crtc_count = 1;
10772+
10773+ if (new_fb) {
10774+ info->var.pixclock = -1;
10775+ if (register_framebuffer(info) < 0)
10776+ return -EINVAL;
10777+ } else
10778+ psbfb_set_par(info);
10779+
10780+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
10781+ info->fix.id);
10782+
10783+ /* Switch back to kernel console on panic */
10784+ panic_mode = *modeset;
10785+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
10786+ printk(KERN_INFO "registered panic notifier\n");
10787+
10788+ return 0;
10789+}
10790+
10791+static int psbfb_multi_fb_probe(struct drm_device *dev)
10792+{
10793+
10794+ struct drm_crtc *crtc;
10795+ int ret = 0;
10796+
10797+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10798+ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
10799+ if (ret)
10800+ return ret;
10801+ }
10802+ return ret;
10803+}
10804+
10805+static int psbfb_single_fb_probe(struct drm_device *dev)
10806+{
10807+ struct drm_crtc *crtc;
10808+ struct drm_connector *connector;
10809+ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
10810+ unsigned int surface_width = 0, surface_height = 0;
10811+ int new_fb = 0;
10812+ int crtc_count = 0;
10813+ int ret, i, conn_count = 0;
10814+ struct fb_info *info;
10815+ struct psbfb_par *par;
10816+ struct drm_mode_set *modeset = NULL;
10817+ struct drm_framebuffer *fb = NULL;
10818+ struct psb_framebuffer *psbfb = NULL;
10819+
10820+ /* first up get a count of crtcs now in use and
10821+ * new min/maxes width/heights */
10822+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10823+ if (drm_helper_crtc_in_use(crtc)) {
10824+ if (crtc->desired_mode) {
10825+ fb = crtc->fb;
10826+ if (crtc->desired_mode->hdisplay <
10827+ fb_width)
10828+ fb_width =
10829+ crtc->desired_mode->hdisplay;
10830+
10831+ if (crtc->desired_mode->vdisplay <
10832+ fb_height)
10833+ fb_height =
10834+ crtc->desired_mode->vdisplay;
10835+
10836+ if (crtc->desired_mode->hdisplay >
10837+ surface_width)
10838+ surface_width =
10839+ crtc->desired_mode->hdisplay;
10840+
10841+ if (crtc->desired_mode->vdisplay >
10842+ surface_height)
10843+ surface_height =
10844+ crtc->desired_mode->vdisplay;
10845+
10846+ }
10847+ crtc_count++;
10848+ }
10849+ }
10850+
10851+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
10852+ /* hmm everyone went away - assume VGA cable just fell out
10853+ and will come back later. */
10854+ return 0;
10855+ }
10856+
10857+ /* do we have an fb already? */
10858+ if (list_empty(&dev->mode_config.fb_kernel_list)) {
10859+ /* create an fb if we don't have one */
10860+ ret =
10861+ psbfb_create(dev, fb_width, fb_height, surface_width,
10862+ surface_height, &psbfb);
10863+ if (ret)
10864+ return -EINVAL;
10865+ new_fb = 1;
10866+ fb = &psbfb->base;
10867+ } else {
10868+ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
10869+ struct drm_framebuffer, filp_head);
10870+
10871+ /* if someone hotplugs something bigger than we have already
10872+ * allocated, we are pwned. As really we can't resize an
10873+ * fbdev that is in the wild currently due to fbdev not really
10874+ * being designed for the lower layers moving stuff around
10875+ * under it. - so in the grand style of things - punt. */
10876+ if ((fb->width < surface_width)
10877+ || (fb->height < surface_height)) {
10878+ DRM_ERROR
10879+ ("Framebuffer not large enough to scale"
10880+ " console onto.\n");
10881+ return -EINVAL;
10882+ }
10883+ }
10884+
10885+ info = fb->fbdev;
10886+ par = info->par;
10887+
10888+ crtc_count = 0;
10889+ /* okay we need to setup new connector sets in the crtcs */
10890+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10891+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10892+ modeset = &psb_intel_crtc->mode_set;
10893+ modeset->fb = fb;
10894+ conn_count = 0;
10895+ list_for_each_entry(connector,
10896+ &dev->mode_config.connector_list,
10897+ head) {
10898+ if (connector->encoder)
10899+ if (connector->encoder->crtc ==
10900+ modeset->crtc) {
10901+ modeset->connectors[conn_count] =
10902+ connector;
10903+ conn_count++;
10904+ if (conn_count >
10905+ INTELFB_CONN_LIMIT)
10906+ BUG();
10907+ }
10908+ }
10909+
10910+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
10911+ modeset->connectors[i] = NULL;
10912+
10913+ par->crtc_ids[crtc_count++] = crtc->base.id;
10914+
10915+ modeset->num_connectors = conn_count;
10916+ if (modeset->mode != modeset->crtc->desired_mode)
10917+ modeset->mode = modeset->crtc->desired_mode;
10918+ }
10919+ par->crtc_count = crtc_count;
10920+
10921+ if (new_fb) {
10922+ info->var.pixclock = -1;
10923+ if (register_framebuffer(info) < 0)
10924+ return -EINVAL;
10925+ } else
10926+ psbfb_set_par(info);
10927+
10928+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
10929+ info->fix.id);
10930+
10931+ /* Switch back to kernel console on panic */
10932+ panic_mode = *modeset;
10933+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
10934+ printk(KERN_INFO "registered panic notifier\n");
10935+
10936+ return 0;
10937+}
10938+
10939+int psbfb_probe(struct drm_device *dev)
10940+{
10941+ int ret = 0;
10942+
10943+ DRM_DEBUG("\n");
10944+
10945+ /* something has changed in the lower levels of hell - deal with it
10946+ here */
10947+
10948+ /* two modes : a) 1 fb to rule all crtcs.
10949+ b) one fb per crtc.
10950+ two actions 1) new connected device
10951+ 2) device removed.
10952+ case a/1 : if the fb surface isn't big enough -
10953+ resize the surface fb.
10954+ if the fb size isn't big enough - resize fb into surface.
10955+ if everything big enough configure the new crtc/etc.
10956+ case a/2 : undo the configuration
10957+ possibly resize down the fb to fit the new configuration.
10958+ case b/1 : see if it is on a new crtc - setup a new fb and add it.
10959+ case b/2 : teardown the new fb.
10960+ */
10961+
10962+ /* mode a first */
10963+ /* search for an fb */
10964+ if (0 /*i915_fbpercrtc == 1 */)
10965+ ret = psbfb_multi_fb_probe(dev);
10966+ else
10967+ ret = psbfb_single_fb_probe(dev);
10968+
10969+ return ret;
10970+}
10971+EXPORT_SYMBOL(psbfb_probe);
10972+
10973+int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
10974+{
10975+ struct fb_info *info;
10976+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
10977+
10978+ if (drm_psb_no_fb)
10979+ return 0;
10980+
10981+ info = fb->fbdev;
10982+
10983+ if (info) {
10984+ unregister_framebuffer(info);
10985+ ttm_bo_kunmap(&psbfb->kmap);
10986+ ttm_bo_unref(&psbfb->bo);
10987+ framebuffer_release(info);
10988+ }
10989+
10990+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
10991+ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
10992+ return 0;
10993+}
10994+EXPORT_SYMBOL(psbfb_remove);
10995+
10996+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
10997+ struct drm_file *file_priv,
10998+ unsigned int *handle)
10999+{
11000+ /* JB: TODO currently we can't go from a bo to a handle with ttm */
11001+ (void) file_priv;
11002+ *handle = 0;
11003+ return 0;
11004+}
11005+
11006+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
11007+{
11008+ struct drm_device *dev = fb->dev;
11009+ if (fb->fbdev)
11010+ psbfb_remove(dev, fb);
11011+
11012+ /* JB: TODO not drop, refcount buffer */
11013+ drm_framebuffer_cleanup(fb);
11014+
11015+ kfree(fb);
11016+}
11017+
11018+static const struct drm_mode_config_funcs psb_mode_funcs = {
11019+ .fb_create = psb_user_framebuffer_create,
11020+ .fb_changed = psbfb_probe,
11021+};
11022+
11023+static int psb_create_backlight_property(struct drm_device *dev)
11024+{
11025+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private;
11026+ struct drm_property *backlight;
11027+
11028+ if (dev_priv->backlight_property)
11029+ return 0;
11030+
11031+ backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE, "backlight", 2);
11032+ backlight->values[0] = 0;
11033+ backlight->values[1] = 100;
11034+
11035+ dev_priv->backlight_property = backlight;
11036+
11037+ return 0;
11038+}
11039+
11040+static void psb_setup_outputs(struct drm_device *dev)
11041+{
11042+ struct drm_psb_private *dev_priv =
11043+ (struct drm_psb_private *) dev->dev_private;
11044+ struct drm_connector *connector;
11045+
11046+ drm_mode_create_scaling_mode_property(dev);
11047+
11048+ psb_create_backlight_property(dev);
11049+
11050+ if (IS_MRST(dev)) {
11051+ if (dev_priv->iLVDS_enable)
11052+ /* Set up integrated LVDS for MRST */
11053+ mrst_lvds_init(dev, &dev_priv->mode_dev);
11054+ else {
11055+ /* Set up integrated MIPI for MRST */
11056+ mrst_dsi_init(dev, &dev_priv->mode_dev);
11057+ }
11058+ } else {
11059+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
11060+ psb_intel_sdvo_init(dev, SDVOB);
11061+ }
11062+
11063+ list_for_each_entry(connector, &dev->mode_config.connector_list,
11064+ head) {
11065+ struct psb_intel_output *psb_intel_output =
11066+ to_psb_intel_output(connector);
11067+ struct drm_encoder *encoder = &psb_intel_output->enc;
11068+ int crtc_mask = 0, clone_mask = 0;
11069+
11070+ /* valid crtcs */
11071+ switch (psb_intel_output->type) {
11072+ case INTEL_OUTPUT_SDVO:
11073+ crtc_mask = ((1 << 0) | (1 << 1));
11074+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
11075+ break;
11076+ case INTEL_OUTPUT_LVDS:
11077+ if (IS_MRST(dev))
11078+ crtc_mask = (1 << 0);
11079+ else
11080+ crtc_mask = (1 << 1);
11081+
11082+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
11083+ break;
11084+ case INTEL_OUTPUT_MIPI:
11085+ crtc_mask = (1 << 0);
11086+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
11087+ break;
11088+ }
11089+ encoder->possible_crtcs = crtc_mask;
11090+ encoder->possible_clones =
11091+ psb_intel_connector_clones(dev, clone_mask);
11092+ }
11093+}
11094+
11095+static void *psb_bo_from_handle(struct drm_device *dev,
11096+ struct drm_file *file_priv,
11097+ unsigned int handle)
11098+{
11099+ return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
11100+ handle);
11101+}
11102+
11103+static size_t psb_bo_size(struct drm_device *dev, void *bof)
11104+{
11105+ struct ttm_buffer_object *bo = bof;
11106+ return bo->num_pages << PAGE_SHIFT;
11107+}
11108+
11109+static size_t psb_bo_offset(struct drm_device *dev, void *bof)
11110+{
11111+ struct drm_psb_private *dev_priv =
11112+ (struct drm_psb_private *) dev->dev_private;
11113+ struct ttm_buffer_object *bo = bof;
11114+
11115+ size_t offset = bo->offset - dev_priv->pg->gatt_start;
11116+ DRM_DEBUG("Offset %u\n", offset);
11117+ return offset;
11118+}
11119+
11120+static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
11121+{
11122+#if 0 /* JB: Not used for the drop */
11123+ struct ttm_buffer_object *bo = bof;
11124+ We should do things like check if
11125+ the buffer is in a scanout : able
11126+ place.And make sure that its pinned.
11127+#endif
11128+ return 0;
11129+ }
11130+
11131+ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
11132+ void *bo) {
11133+#if 0 /* JB: Not used for the drop */
11134+ struct ttm_buffer_object *bo = bof;
11135+#endif
11136+ return 0;
11137+ }
11138+
11139+ void psb_modeset_init(struct drm_device *dev)
11140+ {
11141+ struct drm_psb_private *dev_priv =
11142+ (struct drm_psb_private *) dev->dev_private;
11143+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
11144+ int i;
11145+ int num_pipe;
11146+
11147+ /* Init mm functions */
11148+ mode_dev->bo_from_handle = psb_bo_from_handle;
11149+ mode_dev->bo_size = psb_bo_size;
11150+ mode_dev->bo_offset = psb_bo_offset;
11151+ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
11152+ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
11153+
11154+ drm_mode_config_init(dev);
11155+
11156+ dev->mode_config.min_width = 0;
11157+ dev->mode_config.min_height = 0;
11158+
11159+ dev->mode_config.funcs = (void *) &psb_mode_funcs;
11160+
11161+ dev->mode_config.max_width = 2048;
11162+ dev->mode_config.max_height = 2048;
11163+
11164+ /* set memory base */
11165+ /* MRST and PSB should use BAR 2*/
11166+ dev->mode_config.fb_base =
11167+ pci_resource_start(dev->pdev, 2);
11168+
11169+ if (IS_MRST(dev))
11170+ num_pipe = 1;
11171+ else
11172+ num_pipe = 2;
11173+
11174+
11175+ for (i = 0; i < num_pipe; i++)
11176+ psb_intel_crtc_init(dev, i, mode_dev);
11177+
11178+ psb_setup_outputs(dev);
11179+
11180+ /* setup fbs */
11181+ /* drm_initial_config(dev); */
11182+ }
11183+
11184+ void psb_modeset_cleanup(struct drm_device *dev)
11185+ {
11186+ drm_mode_config_cleanup(dev);
11187+ }
11188diff --git a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h
11189new file mode 100644
11190index 0000000..aa0b23c
11191--- /dev/null
11192+++ b/drivers/gpu/drm/psb/psb_fb.h
11193@@ -0,0 +1,47 @@
11194+/*
11195+ * Copyright (c) 2008, Intel Corporation
11196+ *
11197+ * Permission is hereby granted, free of charge, to any person obtaining a
11198+ * copy of this software and associated documentation files (the "Software"),
11199+ * to deal in the Software without restriction, including without limitation
11200+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11201+ * and/or sell copies of the Software, and to permit persons to whom the
11202+ * Software is furnished to do so, subject to the following conditions:
11203+ *
11204+ * The above copyright notice and this permission notice (including the next
11205+ * paragraph) shall be included in all copies or substantial portions of the
11206+ * Software.
11207+ *
11208+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11209+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11210+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11211+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11212+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
11213+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
11214+ * SOFTWARE.
11215+ *
11216+ * Authors:
11217+ * Eric Anholt <eric@anholt.net>
11218+ *
11219+ **/
11220+
11221+#ifndef _PSB_FB_H_
11222+#define _PSB_FB_H_
11223+
11224+struct psb_framebuffer {
11225+ struct drm_framebuffer base;
11226+ struct address_space *addr_space;
11227+ struct ttm_buffer_object *bo;
11228+ struct ttm_bo_kmap_obj kmap;
11229+ uint64_t offset;
11230+};
11231+
11232+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
11233+
11234+
11235+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
11236+
11237+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);
11238+
11239+#endif
11240+
11241diff --git a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c
11242new file mode 100644
11243index 0000000..b8c64b0
11244--- /dev/null
11245+++ b/drivers/gpu/drm/psb/psb_fence.c
11246@@ -0,0 +1,359 @@
11247+/**************************************************************************
11248+ * Copyright (c) 2007, Intel Corporation.
11249+ * All Rights Reserved.
11250+ *
11251+ * This program is free software; you can redistribute it and/or modify it
11252+ * under the terms and conditions of the GNU General Public License,
11253+ * version 2, as published by the Free Software Foundation.
11254+ *
11255+ * This program is distributed in the hope it will be useful, but WITHOUT
11256+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11257+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11258+ * more details.
11259+ *
11260+ * You should have received a copy of the GNU General Public License along with
11261+ * this program; if not, write to the Free Software Foundation, Inc.,
11262+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11263+ *
11264+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
11265+ * develop this driver.
11266+ *
11267+ **************************************************************************/
11268+/*
11269+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
11270+ */
11271+
11272+#include <drm/drmP.h>
11273+#include "psb_drv.h"
11274+#include "psb_msvdx.h"
11275+#include "lnc_topaz.h"
11276+
11277+static void psb_print_ta_fence_status(struct ttm_fence_device *fdev)
11278+{
11279+ struct drm_psb_private *dev_priv =
11280+ container_of(fdev, struct drm_psb_private, fdev);
11281+ struct psb_scheduler_seq *seq = dev_priv->scheduler.seq;
11282+ int i;
11283+
11284+ for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
11285+ DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n",
11286+ (1 << i),
11287+ (unsigned long) seq->sequence,
11288+ seq->reported);
11289+ seq++;
11290+ }
11291+}
11292+
11293+static void psb_poll_ta(struct ttm_fence_device *fdev,
11294+ uint32_t waiting_types)
11295+{
11296+ struct drm_psb_private *dev_priv =
11297+ container_of(fdev, struct drm_psb_private, fdev);
11298+ uint32_t cur_flag = 1;
11299+ uint32_t flags = 0;
11300+ uint32_t sequence = 0;
11301+ uint32_t remaining = 0xFFFFFFFF;
11302+ uint32_t diff;
11303+
11304+ struct psb_scheduler *scheduler;
11305+ struct psb_scheduler_seq *seq;
11306+ struct ttm_fence_class_manager *fc =
11307+ &fdev->fence_class[PSB_ENGINE_TA];
11308+
11309+ scheduler = &dev_priv->scheduler;
11310+ seq = scheduler->seq;
11311+
11312+ while (likely(waiting_types & remaining)) {
11313+ if (!(waiting_types & cur_flag))
11314+ goto skip;
11315+ if (seq->reported)
11316+ goto skip;
11317+ if (flags == 0)
11318+ sequence = seq->sequence;
11319+ else if (sequence != seq->sequence) {
11320+ ttm_fence_handler(fdev, PSB_ENGINE_TA,
11321+ sequence, flags, 0);
11322+ sequence = seq->sequence;
11323+ flags = 0;
11324+ }
11325+ flags |= cur_flag;
11326+
11327+ /*
11328+ * Sequence may not have ended up on the ring yet.
11329+ * In that case, report it but don't mark it as
11330+ * reported. A subsequent poll will report it again.
11331+ */
11332+
11333+ diff = (fc->latest_queued_sequence - sequence) &
11334+ fc->sequence_mask;
11335+ if (diff < fc->wrap_diff)
11336+ seq->reported = 1;
11337+
11338+skip:
11339+ cur_flag <<= 1;
11340+ remaining <<= 1;
11341+ seq++;
11342+ }
11343+
11344+ if (flags)
11345+ ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0);
11346+
11347+}
11348+
11349+static void psb_poll_other(struct ttm_fence_device *fdev,
11350+ uint32_t fence_class, uint32_t waiting_types)
11351+{
11352+ struct drm_psb_private *dev_priv =
11353+ container_of(fdev, struct drm_psb_private, fdev);
11354+ struct ttm_fence_class_manager *fc =
11355+ &fdev->fence_class[fence_class];
11356+ uint32_t sequence;
11357+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
11358+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
11359+
11360+ if (unlikely(!dev_priv))
11361+ return;
11362+
11363+ if (waiting_types) {
11364+ switch (fence_class) {
11365+ case PSB_ENGINE_VIDEO:
11366+ sequence = msvdx_priv->msvdx_current_sequence;
11367+ break;
11368+ case LNC_ENGINE_ENCODE:
11369+ sequence = *((uint32_t *)topaz_priv->topaz_sync_addr);
11370+ break;
11371+ default:
11372+ sequence = dev_priv->comm[fence_class << 4];
11373+ break;
11374+ }
11375+
11376+ ttm_fence_handler(fdev, fence_class, sequence,
11377+ _PSB_FENCE_TYPE_EXE, 0);
11378+
11379+ switch (fence_class) {
11380+ case PSB_ENGINE_2D:
11381+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
11382+ psb_2D_irq_off(dev_priv);
11383+ dev_priv->fence0_irq_on = 0;
11384+ } else if (!dev_priv->fence0_irq_on
11385+ && fc->waiting_types) {
11386+ psb_2D_irq_on(dev_priv);
11387+ dev_priv->fence0_irq_on = 1;
11388+ }
11389+ break;
11390+#if 0
11391+ /*
11392+ * FIXME: MSVDX irq switching
11393+ */
11394+
11395+ case PSB_ENGINE_VIDEO:
11396+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
11397+ psb_msvdx_irq_off(dev_priv);
11398+ dev_priv->fence2_irq_on = 0;
11399+ } else if (!dev_priv->fence2_irq_on
11400+ && fc->pending_exe_flush) {
11401+ psb_msvdx_irq_on(dev_priv);
11402+ dev_priv->fence2_irq_on = 1;
11403+ }
11404+ break;
11405+#endif
11406+ default:
11407+ return;
11408+ }
11409+ }
11410+}
11411+
11412+static void psb_fence_poll(struct ttm_fence_device *fdev,
11413+ uint32_t fence_class, uint32_t waiting_types)
11414+{
11415+ if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0)))
11416+ PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class);
11417+ switch (fence_class) {
11418+ case PSB_ENGINE_TA:
11419+ psb_poll_ta(fdev, waiting_types);
11420+ break;
11421+ default:
11422+ psb_poll_other(fdev, fence_class, waiting_types);
11423+ break;
11424+ }
11425+}
11426+
11427+void psb_fence_error(struct drm_device *dev,
11428+ uint32_t fence_class,
11429+ uint32_t sequence, uint32_t type, int error)
11430+{
11431+ struct drm_psb_private *dev_priv = psb_priv(dev);
11432+ struct ttm_fence_device *fdev = &dev_priv->fdev;
11433+ unsigned long irq_flags;
11434+ struct ttm_fence_class_manager *fc =
11435+ &fdev->fence_class[fence_class];
11436+
11437+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
11438+ write_lock_irqsave(&fc->lock, irq_flags);
11439+ ttm_fence_handler(fdev, fence_class, sequence, type, error);
11440+ write_unlock_irqrestore(&fc->lock, irq_flags);
11441+}
11442+
11443+int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
11444+ uint32_t fence_class,
11445+ uint32_t flags, uint32_t *sequence,
11446+ unsigned long *timeout_jiffies)
11447+{
11448+ struct drm_psb_private *dev_priv =
11449+ container_of(fdev, struct drm_psb_private, fdev);
11450+ uint32_t seq = 0;
11451+ int ret;
11452+
11453+ if (!dev_priv)
11454+ return -EINVAL;
11455+
11456+ if (fence_class >= PSB_NUM_ENGINES)
11457+ return -EINVAL;
11458+
11459+ switch (fence_class) {
11460+ case PSB_ENGINE_2D:
11461+ spin_lock(&dev_priv->sequence_lock);
11462+ seq = ++dev_priv->sequence[fence_class];
11463+ spin_unlock(&dev_priv->sequence_lock);
11464+ ret = psb_blit_sequence(dev_priv, seq);
11465+ if (ret)
11466+ return ret;
11467+ break;
11468+ case PSB_ENGINE_VIDEO:
11469+ spin_lock(&dev_priv->sequence_lock);
11470+ seq = dev_priv->sequence[fence_class]++;
11471+ spin_unlock(&dev_priv->sequence_lock);
11472+ break;
11473+ case LNC_ENGINE_ENCODE:
11474+ spin_lock(&dev_priv->sequence_lock);
11475+ seq = dev_priv->sequence[fence_class]++;
11476+ spin_unlock(&dev_priv->sequence_lock);
11477+ break;
11478+ default:
11479+ spin_lock(&dev_priv->sequence_lock);
11480+ seq = dev_priv->sequence[fence_class];
11481+ spin_unlock(&dev_priv->sequence_lock);
11482+ }
11483+
11484+ *sequence = seq;
11485+
11486+ if (fence_class == PSB_ENGINE_TA)
11487+ *timeout_jiffies = jiffies + DRM_HZ / 2;
11488+ else
11489+ *timeout_jiffies = jiffies + DRM_HZ * 3;
11490+
11491+ return 0;
11492+}
11493+
11494+uint32_t psb_fence_advance_sequence(struct drm_device *dev,
11495+ uint32_t fence_class)
11496+{
11497+ struct drm_psb_private *dev_priv =
11498+ (struct drm_psb_private *) dev->dev_private;
11499+ uint32_t sequence;
11500+
11501+ spin_lock(&dev_priv->sequence_lock);
11502+ sequence = ++dev_priv->sequence[fence_class];
11503+ spin_unlock(&dev_priv->sequence_lock);
11504+
11505+ return sequence;
11506+}
11507+
11508+static void psb_fence_lockup(struct ttm_fence_object *fence,
11509+ uint32_t fence_types)
11510+{
11511+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
11512+
11513+ if (fence->fence_class == PSB_ENGINE_TA) {
11514+
11515+ /*
11516+ * The 3D engine has its own lockup detection.
11517+ * Just extend the fence expiry time.
11518+ */
11519+
11520+ DRM_INFO("Extending 3D fence timeout.\n");
11521+ write_lock(&fc->lock);
11522+
11523+ DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n",
11524+ (unsigned long) fence->sequence, fence_types,
11525+ fence->info.signaled_types);
11526+
11527+ if (time_after_eq(jiffies, fence->timeout_jiffies))
11528+ fence->timeout_jiffies = jiffies + DRM_HZ / 2;
11529+
11530+ psb_print_ta_fence_status(fence->fdev);
11531+ write_unlock(&fc->lock);
11532+ } else if (fence->fence_class == LNC_ENGINE_ENCODE) {
11533+ DRM_ERROR
11534+ ("TOPAZ timeout (probable lockup) detected on engine %u "
11535+ "fence type 0x%08x\n",
11536+ (unsigned int) fence->fence_class,
11537+ (unsigned int) fence_types);
11538+
11539+ write_lock(&fc->lock);
11540+ lnc_topaz_handle_timeout(fence->fdev);
11541+ ttm_fence_handler(fence->fdev, fence->fence_class,
11542+ fence->sequence, fence_types, -EBUSY);
11543+ write_unlock(&fc->lock);
11544+ } else {
11545+ DRM_ERROR
11546+ ("GPU timeout (probable lockup) detected on engine %u "
11547+ "fence type 0x%08x\n",
11548+ (unsigned int) fence->fence_class,
11549+ (unsigned int) fence_types);
11550+ write_lock(&fc->lock);
11551+ ttm_fence_handler(fence->fdev, fence->fence_class,
11552+ fence->sequence, fence_types, -EBUSY);
11553+ write_unlock(&fc->lock);
11554+ }
11555+}
11556+
11557+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
11558+{
11559+ struct drm_psb_private *dev_priv = psb_priv(dev);
11560+ struct ttm_fence_device *fdev = &dev_priv->fdev;
11561+ struct ttm_fence_class_manager *fc =
11562+ &fdev->fence_class[fence_class];
11563+ unsigned long irq_flags;
11564+
11565+#ifdef FIX_TG_16
11566+ if (fence_class == PSB_ENGINE_2D) {
11567+
11568+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
11569+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
11570+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
11571+ _PSB_C2B_STATUS_BUSY) == 0))
11572+ psb_resume_ta_2d_idle(dev_priv);
11573+ }
11574+#endif
11575+ write_lock_irqsave(&fc->lock, irq_flags);
11576+ psb_fence_poll(fdev, fence_class, fc->waiting_types);
11577+ write_unlock_irqrestore(&fc->lock, irq_flags);
11578+}
11579+
11580+
11581+static struct ttm_fence_driver psb_ttm_fence_driver = {
11582+ .has_irq = NULL,
11583+ .emit = psb_fence_emit_sequence,
11584+ .flush = NULL,
11585+ .poll = psb_fence_poll,
11586+ .needed_flush = NULL,
11587+ .wait = NULL,
11588+ .signaled = NULL,
11589+ .lockup = psb_fence_lockup,
11590+};
11591+
11592+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
11593+{
11594+ struct drm_psb_private *dev_priv =
11595+ container_of(fdev, struct drm_psb_private, fdev);
11596+ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
11597+ .flush_diff = (1 << 29),
11598+ .sequence_mask = 0xFFFFFFFF
11599+ };
11600+
11601+ return ttm_fence_device_init(PSB_NUM_ENGINES,
11602+ dev_priv->mem_global_ref.object,
11603+ fdev, &fci, 1,
11604+ &psb_ttm_fence_driver);
11605+}
11606diff --git a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c
11607new file mode 100644
11608index 0000000..7cb5a3d
11609--- /dev/null
11610+++ b/drivers/gpu/drm/psb/psb_gtt.c
11611@@ -0,0 +1,278 @@
11612+/**************************************************************************
11613+ * Copyright (c) 2007, Intel Corporation.
11614+ * All Rights Reserved.
11615+ *
11616+ * This program is free software; you can redistribute it and/or modify it
11617+ * under the terms and conditions of the GNU General Public License,
11618+ * version 2, as published by the Free Software Foundation.
11619+ *
11620+ * This program is distributed in the hope it will be useful, but WITHOUT
11621+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11622+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11623+ * more details.
11624+ *
11625+ * You should have received a copy of the GNU General Public License along with
11626+ * this program; if not, write to the Free Software Foundation, Inc.,
11627+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11628+ *
11629+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
11630+ * develop this driver.
11631+ *
11632+ **************************************************************************/
11633+/*
11634+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
11635+ */
11636+#include <drm/drmP.h>
11637+#include "psb_drv.h"
11638+
11639+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
11640+{
11641+ uint32_t mask = PSB_PTE_VALID;
11642+
11643+ if (type & PSB_MMU_CACHED_MEMORY)
11644+ mask |= PSB_PTE_CACHED;
11645+ if (type & PSB_MMU_RO_MEMORY)
11646+ mask |= PSB_PTE_RO;
11647+ if (type & PSB_MMU_WO_MEMORY)
11648+ mask |= PSB_PTE_WO;
11649+
11650+ return (pfn << PAGE_SHIFT) | mask;
11651+}
11652+
11653+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
11654+{
11655+ struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
11656+
11657+ if (!tmp)
11658+ return NULL;
11659+
11660+ init_rwsem(&tmp->sem);
11661+ tmp->dev = dev;
11662+
11663+ return tmp;
11664+}
11665+
11666+void psb_gtt_takedown(struct psb_gtt *pg, int free)
11667+{
11668+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
11669+
11670+ if (!pg)
11671+ return;
11672+
11673+ if (pg->gtt_map) {
11674+ iounmap(pg->gtt_map);
11675+ pg->gtt_map = NULL;
11676+ }
11677+ if (pg->initialized) {
11678+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
11679+ pg->gmch_ctrl);
11680+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
11681+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
11682+ }
11683+ if (free)
11684+ kfree(pg);
11685+}
11686+
11687+int psb_gtt_init(struct psb_gtt *pg, int resume)
11688+{
11689+ struct drm_device *dev = pg->dev;
11690+ struct drm_psb_private *dev_priv = dev->dev_private;
11691+ unsigned gtt_pages;
11692+ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
11693+ unsigned long rar_stolen_size;
11694+ unsigned i, num_pages;
11695+ unsigned pfn_base;
11696+ uint32_t vram_pages;
11697+
11698+ int ret = 0;
11699+ uint32_t pte;
11700+
11701+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
11702+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
11703+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
11704+
11705+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
11706+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
11707+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
11708+
11709+ pg->initialized = 1;
11710+
11711+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
11712+
11713+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
11714+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
11715+ gtt_pages =
11716+ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
11717+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
11718+ >> PAGE_SHIFT;
11719+
11720+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
11721+ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
11722+
11723+ /* CI is not included in the stolen size since the TOPAZ MMU bug */
11724+ ci_stolen_size = dev_priv->ci_region_size;
11725+ /* add CI & RAR share buffer space to stolen_size */
11726+ /* stolen_size = vram_stolen_size + ci_stolen_size; */
11727+ stolen_size = vram_stolen_size;
11728+
11729+ rar_stolen_size = dev_priv->rar_region_size;
11730+ stolen_size += rar_stolen_size;
11731+
11732+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
11733+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
11734+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
11735+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
11736+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
11737+
11738+ if (resume && (gtt_pages != pg->gtt_pages) &&
11739+ (stolen_size != pg->stolen_size)) {
11740+ DRM_ERROR("GTT resume error.\n");
11741+ ret = -EINVAL;
11742+ goto out_err;
11743+ }
11744+
11745+ pg->gtt_pages = gtt_pages;
11746+ pg->stolen_size = stolen_size;
11747+ pg->vram_stolen_size = vram_stolen_size;
11748+ pg->ci_stolen_size = ci_stolen_size;
11749+ pg->rar_stolen_size = rar_stolen_size;
11750+ pg->gtt_map =
11751+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
11752+ if (!pg->gtt_map) {
11753+ DRM_ERROR("Failure to map gtt.\n");
11754+ ret = -ENOMEM;
11755+ goto out_err;
11756+ }
11757+
11758+ /*
11759+ * insert vram stolen pages.
11760+ */
11761+
11762+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
11763+ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
11764+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
11765+ num_pages, pfn_base);
11766+ for (i = 0; i < num_pages; ++i) {
11767+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
11768+ iowrite32(pte, pg->gtt_map + i);
11769+ }
11770+#if 0
11771+ /*
11772+ * insert CI stolen pages
11773+ */
11774+
11775+ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
11776+ num_pages = ci_stolen_size >> PAGE_SHIFT;
11777+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
11778+ num_pages, pfn_base);
11779+ for (; i < num_pages; ++i) {
11780+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
11781+ iowrite32(pte, pg->gtt_map + i);
11782+ }
11783+#endif
11784+
11785+ /*
11786+ * insert RAR stolen pages
11787+ */
11788+ if (rar_stolen_size != 0) {
11789+ pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
11790+ num_pages = rar_stolen_size >> PAGE_SHIFT;
11791+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
11792+ num_pages, pfn_base);
11793+ for (; i < num_pages + vram_pages ; ++i) {
11794+ pte = psb_gtt_mask_pte(pfn_base + i - vram_pages, 0);
11795+ iowrite32(pte, pg->gtt_map + i);
11796+ }
11797+ }
11798+ /*
11799+ * Init rest of gtt.
11800+ */
11801+
11802+ pfn_base = page_to_pfn(dev_priv->scratch_page);
11803+ pte = psb_gtt_mask_pte(pfn_base, 0);
11804+ PSB_DEBUG_INIT("Initializing the rest of a total "
11805+ "of %d gtt pages.\n", pg->gatt_pages);
11806+
11807+ for (; i < pg->gatt_pages; ++i)
11808+ iowrite32(pte, pg->gtt_map + i);
11809+ (void) ioread32(pg->gtt_map + i - 1);
11810+
11811+ return 0;
11812+
11813+out_err:
11814+ psb_gtt_takedown(pg, 0);
11815+ return ret;
11816+}
11817+
11818+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
11819+ unsigned offset_pages, unsigned num_pages,
11820+ unsigned desired_tile_stride,
11821+ unsigned hw_tile_stride, int type)
11822+{
11823+ unsigned rows = 1;
11824+ unsigned add;
11825+ unsigned row_add;
11826+ unsigned i;
11827+ unsigned j;
11828+ uint32_t *cur_page = NULL;
11829+ uint32_t pte;
11830+
11831+ if (hw_tile_stride)
11832+ rows = num_pages / desired_tile_stride;
11833+ else
11834+ desired_tile_stride = num_pages;
11835+
11836+ add = desired_tile_stride;
11837+ row_add = hw_tile_stride;
11838+
11839+ down_read(&pg->sem);
11840+ for (i = 0; i < rows; ++i) {
11841+ cur_page = pg->gtt_map + offset_pages;
11842+ for (j = 0; j < desired_tile_stride; ++j) {
11843+ pte =
11844+ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
11845+ iowrite32(pte, cur_page++);
11846+ }
11847+ offset_pages += add;
11848+ }
11849+ (void) ioread32(cur_page - 1);
11850+ up_read(&pg->sem);
11851+
11852+ return 0;
11853+}
11854+
11855+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
11856+ unsigned num_pages, unsigned desired_tile_stride,
11857+ unsigned hw_tile_stride)
11858+{
11859+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
11860+ unsigned rows = 1;
11861+ unsigned add;
11862+ unsigned row_add;
11863+ unsigned i;
11864+ unsigned j;
11865+ uint32_t *cur_page = NULL;
11866+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
11867+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
11868+
11869+ if (hw_tile_stride)
11870+ rows = num_pages / desired_tile_stride;
11871+ else
11872+ desired_tile_stride = num_pages;
11873+
11874+ add = desired_tile_stride;
11875+ row_add = hw_tile_stride;
11876+
11877+ down_read(&pg->sem);
11878+ for (i = 0; i < rows; ++i) {
11879+ cur_page = pg->gtt_map + offset_pages;
11880+ for (j = 0; j < desired_tile_stride; ++j)
11881+ iowrite32(pte, cur_page++);
11882+
11883+ offset_pages += add;
11884+ }
11885+ (void) ioread32(cur_page - 1);
11886+ up_read(&pg->sem);
11887+
11888+ return 0;
11889+}
11890diff --git a/drivers/gpu/drm/psb/psb_hotplug.c b/drivers/gpu/drm/psb/psb_hotplug.c
11891new file mode 100644
11892index 0000000..38e1f35
11893--- /dev/null
11894+++ b/drivers/gpu/drm/psb/psb_hotplug.c
11895@@ -0,0 +1,427 @@
11896+/*
11897+ * Copyright © 2009 Intel Corporation
11898+ *
11899+ * Permission is hereby granted, free of charge, to any person obtaining a
11900+ * copy of this software and associated documentation files (the "Software"),
11901+ * to deal in the Software without restriction, including without limitation
11902+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11903+ * and/or sell copies of the Software, and to permit persons to whom the
11904+ * Software is furnished to do so, subject to the following conditions:
11905+ *
11906+ * The above copyright notice and this permission notice (including the next
11907+ * paragraph) shall be included in all copies or substantial portions of the
11908+ * Software.
11909+ *
11910+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11911+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11912+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11913+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11914+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11915+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
11916+ * IN THE SOFTWARE.
11917+ *
11918+ * Authors:
11919+ * James C. Gualario <james.c.gualario@intel.com>
11920+ *
11921+ */
11922+#include "psb_umevents.h"
11923+#include "psb_hotplug.h"
11924+/**
11925+ * inform the kernel of the work to be performed and related function.
11926+ *
11927+ */
11928+DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq);
11929+DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq);
11930+DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq);
11931+/**
11932+ * psb_hotplug_notify_change_um - notify user mode of hotplug changes
11933+ *
11934+ * @name: name of event to notify user mode of change to
11935+ * @state: hotplug state to search for event object in
11936+ *
11937+ */
11938+int psb_hotplug_notify_change_um(const char *name,
11939+ struct hotplug_state *state)
11940+{
11941+ strcpy(&(state->hotplug_change_wq_data.dev_name_arry
11942+ [state->hotplug_change_wq_data.dev_name_write][0]), name);
11943+ state->hotplug_change_wq_data.dev_name_arry_rw_status
11944+ [state->hotplug_change_wq_data.dev_name_write] =
11945+ DRM_HOTPLUG_READY_TO_READ;
11946+ if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1)
11947+ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
11948+ state->hotplug_change_wq_data.dev_name_write++;
11949+ if (state->hotplug_change_wq_data.dev_name_write ==
11950+ state->hotplug_change_wq_data.dev_name_read) {
11951+ state->hotplug_change_wq_data.dev_name_write--;
11952+ return IRQ_NONE;
11953+ }
11954+ if (state->hotplug_change_wq_data.dev_name_write >
11955+ DRM_HOTPLUG_RING_DEPTH_MAX) {
11956+ state->hotplug_change_wq_data.dev_name_write = 0;
11957+ state->hotplug_change_wq_data.dev_name_write_wrap = 1;
11958+ }
11959+ state->hotplug_change_wq_data.hotplug_dev_list = state->list;
11960+ queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work));
11961+ return IRQ_HANDLED;
11962+}
11963+/**
11964+ *
11965+ * psb_hotplug_create_and_notify_um - create and notify user mode of new dev
11966+ *
11967+ * @name: name to give for new event / device
11968+ * @state: hotplug state to track new event /device in
11969+ *
11970+ */
11971+int psb_hotplug_create_and_notify_um(const char *name,
11972+ struct hotplug_state *state)
11973+{
11974+ strcpy(&(state->hotplug_create_wq_data.dev_name_arry
11975+ [state->hotplug_create_wq_data.dev_name_write][0]), name);
11976+ state->hotplug_create_wq_data.dev_name_arry_rw_status
11977+ [state->hotplug_create_wq_data.dev_name_write] =
11978+ DRM_HOTPLUG_READY_TO_READ;
11979+ if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1)
11980+ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
11981+ state->hotplug_create_wq_data.dev_name_write++;
11982+ if (state->hotplug_create_wq_data.dev_name_write ==
11983+ state->hotplug_create_wq_data.dev_name_read) {
11984+ state->hotplug_create_wq_data.dev_name_write--;
11985+ return IRQ_NONE;
11986+ }
11987+ if (state->hotplug_create_wq_data.dev_name_write >
11988+ DRM_HOTPLUG_RING_DEPTH_MAX) {
11989+ state->hotplug_create_wq_data.dev_name_write = 0;
11990+ state->hotplug_create_wq_data.dev_name_write_wrap = 1;
11991+ }
11992+ state->hotplug_create_wq_data.hotplug_dev_list = state->list;
11993+ queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work));
11994+ return IRQ_HANDLED;
11995+}
11996+EXPORT_SYMBOL(psb_hotplug_create_and_notify_um);
11997+/**
11998+ * psb_hotplug_remove_and_notify_um - remove device and notify user mode
11999+ *
12000+ * @name: name of event / device to remove
12001+ * @state: hotplug state to remove event / device from
12002+ *
12003+ */
12004+int psb_hotplug_remove_and_notify_um(const char *name,
12005+ struct hotplug_state *state)
12006+{
12007+ strcpy(&(state->hotplug_remove_wq_data.dev_name_arry
12008+ [state->hotplug_remove_wq_data.dev_name_write][0]), name);
12009+ state->hotplug_remove_wq_data.dev_name_arry_rw_status
12010+ [state->hotplug_remove_wq_data.dev_name_write] =
12011+ DRM_HOTPLUG_READY_TO_READ;
12012+ if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1)
12013+ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
12014+ state->hotplug_remove_wq_data.dev_name_write++;
12015+ if (state->hotplug_remove_wq_data.dev_name_write ==
12016+ state->hotplug_remove_wq_data.dev_name_read) {
12017+ state->hotplug_remove_wq_data.dev_name_write--;
12018+ return IRQ_NONE;
12019+ }
12020+ if (state->hotplug_remove_wq_data.dev_name_write >
12021+ DRM_HOTPLUG_RING_DEPTH_MAX) {
12022+ state->hotplug_remove_wq_data.dev_name_write = 0;
12023+ state->hotplug_remove_wq_data.dev_name_write_wrap = 1;
12024+ }
12025+ state->hotplug_remove_wq_data.hotplug_dev_list = state->list;
12026+ queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work));
12027+ return IRQ_HANDLED;
12028+}
12029+EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um);
12030+/**
12031+ * psb_hotplug_device_pool_create_and_init - make new hotplug device pool
12032+ *
12033+ * @parent_kobj: parent kobject to associate hotplug kset with
12034+ * @state: hotplug state to assocaite workqueues with
12035+ *
12036+ */
12037+struct umevent_list *psb_hotplug_device_pool_create_and_init(
12038+ struct kobject *parent_kobj,
12039+ struct hotplug_state *state)
12040+{
12041+ struct umevent_list *new_hotplug_dev_list = NULL;
12042+
12043+ new_hotplug_dev_list = psb_umevent_create_list();
12044+ if (new_hotplug_dev_list)
12045+ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
12046+ "psb_hotplug");
12047+
12048+ state->hotplug_wq = create_singlethread_workqueue("hotplug-wq");
12049+ if (!state->hotplug_wq)
12050+ return NULL;
12051+
12052+ INIT_WORK(&state->hotplug_create_wq_data.work,
12053+ psb_hotplug_dev_create_wq);
12054+ INIT_WORK(&state->hotplug_remove_wq_data.work,
12055+ psb_hotplug_dev_remove_wq);
12056+ INIT_WORK(&state->hotplug_change_wq_data.work,
12057+ psb_hotplug_dev_change_wq);
12058+
12059+ state->hotplug_create_wq_data.dev_name_read = 0;
12060+ state->hotplug_create_wq_data.dev_name_write = 0;
12061+ state->hotplug_create_wq_data.dev_name_write_wrap = 0;
12062+ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
12063+ memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]),
12064+ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
12065+
12066+ state->hotplug_remove_wq_data.dev_name_read = 0;
12067+ state->hotplug_remove_wq_data.dev_name_write = 0;
12068+ state->hotplug_remove_wq_data.dev_name_write_wrap = 0;
12069+ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
12070+ memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]),
12071+ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
12072+
12073+ state->hotplug_change_wq_data.dev_name_read = 0;
12074+ state->hotplug_change_wq_data.dev_name_write = 0;
12075+ state->hotplug_change_wq_data.dev_name_write_wrap = 0;
12076+ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
12077+ memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]),
12078+ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
12079+
12080+ return new_hotplug_dev_list;
12081+}
12082+EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init);
12083+/**
12084+ *
12085+ * psb_hotplug_init - init hotplug subsystem
12086+ *
12087+ * @parent_kobj: parent kobject to associate hotplug state with
12088+ *
12089+ */
12090+struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj)
12091+{
12092+ struct hotplug_state *state;
12093+ state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL);
12094+ state->list = NULL;
12095+ state->list = psb_hotplug_device_pool_create_and_init(
12096+ parent_kobj,
12097+ state);
12098+ return state;
12099+}
12100+/**
12101+ * psb_hotplug_device_pool_destroy - destroy all hotplug related resources
12102+ *
12103+ * @state: hotplug state to destroy
12104+ *
12105+ */
12106+void psb_hotplug_device_pool_destroy(struct hotplug_state *state)
12107+{
12108+ flush_workqueue(state->hotplug_wq);
12109+ destroy_workqueue(state->hotplug_wq);
12110+ psb_umevent_cleanup(state->list);
12111+ kfree(state);
12112+}
12113+EXPORT_SYMBOL(psb_hotplug_device_pool_destroy);
12114+/**
12115+ * psb_hotplug_dev_create_wq - create workqueue implementation
12116+ *
12117+ * @work: work struct to use for kernel scheduling
12118+ *
12119+ */
12120+void psb_hotplug_dev_create_wq(struct work_struct *work)
12121+{
12122+ struct hotplug_disp_workqueue_data *wq_data;
12123+ struct umevent_obj *wq_working_hotplug_disp_obj;
12124+ wq_data = to_hotplug_disp_workqueue_data(work);
12125+ if (wq_data->dev_name_write_wrap == 1) {
12126+ wq_data->dev_name_read_write_wrap_ack = 1;
12127+ wq_data->dev_name_write_wrap = 0;
12128+ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
12129+ if (wq_data->dev_name_arry_rw_status
12130+ [wq_data->dev_name_read] ==
12131+ DRM_HOTPLUG_READY_TO_READ) {
12132+ wq_working_hotplug_disp_obj =
12133+ psb_create_umevent_obj(
12134+ &wq_data->dev_name_arry
12135+ [wq_data->dev_name_read][0],
12136+ wq_data->hotplug_dev_list);
12137+ wq_data->dev_name_arry_rw_status
12138+ [wq_data->dev_name_read] =
12139+ DRM_HOTPLUG_READ_COMPLETE;
12140+ psb_umevent_notify
12141+ (wq_working_hotplug_disp_obj);
12142+ }
12143+ wq_data->dev_name_read++;
12144+ }
12145+ wq_data->dev_name_read = 0;
12146+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
12147+ if (wq_data->dev_name_arry_rw_status
12148+ [wq_data->dev_name_read] ==
12149+ DRM_HOTPLUG_READY_TO_READ) {
12150+ wq_working_hotplug_disp_obj =
12151+ psb_create_umevent_obj(
12152+ &wq_data->dev_name_arry
12153+ [wq_data->dev_name_read][0],
12154+ wq_data->hotplug_dev_list);
12155+ wq_data->dev_name_arry_rw_status
12156+ [wq_data->dev_name_read] =
12157+ DRM_HOTPLUG_READ_COMPLETE;
12158+ psb_umevent_notify
12159+ (wq_working_hotplug_disp_obj);
12160+ }
12161+ wq_data->dev_name_read++;
12162+ }
12163+ } else {
12164+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
12165+ if (wq_data->dev_name_arry_rw_status
12166+ [wq_data->dev_name_read] ==
12167+ DRM_HOTPLUG_READY_TO_READ) {
12168+ wq_working_hotplug_disp_obj =
12169+ psb_create_umevent_obj(
12170+ &wq_data->dev_name_arry
12171+ [wq_data->dev_name_read][0],
12172+ wq_data->hotplug_dev_list);
12173+ wq_data->dev_name_arry_rw_status
12174+ [wq_data->dev_name_read] =
12175+ DRM_HOTPLUG_READ_COMPLETE;
12176+ psb_umevent_notify
12177+ (wq_working_hotplug_disp_obj);
12178+ }
12179+ wq_data->dev_name_read++;
12180+ }
12181+ }
12182+ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
12183+ wq_data->dev_name_read = 0;
12184+}
12185+EXPORT_SYMBOL(psb_hotplug_dev_create_wq);
12186+/**
12187+ * psb_hotplug_dev_remove_wq - remove workqueue implementation
12188+ *
12189+ * @work: work struct to use for kernel scheduling
12190+ *
12191+ */
12192+void psb_hotplug_dev_remove_wq(struct work_struct *work)
12193+{
12194+ struct hotplug_disp_workqueue_data *wq_data;
12195+ wq_data = to_hotplug_disp_workqueue_data(work);
12196+ if (wq_data->dev_name_write_wrap == 1) {
12197+ wq_data->dev_name_read_write_wrap_ack = 1;
12198+ wq_data->dev_name_write_wrap = 0;
12199+ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
12200+ if (wq_data->dev_name_arry_rw_status
12201+ [wq_data->dev_name_read] ==
12202+ DRM_HOTPLUG_READY_TO_READ) {
12203+ psb_umevent_remove_from_list(
12204+ wq_data->hotplug_dev_list,
12205+ &wq_data->dev_name_arry
12206+ [wq_data->dev_name_read][0]);
12207+ wq_data->dev_name_arry_rw_status
12208+ [wq_data->dev_name_read] =
12209+ DRM_HOTPLUG_READ_COMPLETE;
12210+ }
12211+ wq_data->dev_name_read++;
12212+ }
12213+ wq_data->dev_name_read = 0;
12214+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
12215+ if (wq_data->dev_name_arry_rw_status
12216+ [wq_data->dev_name_read] ==
12217+ DRM_HOTPLUG_READY_TO_READ) {
12218+ psb_umevent_remove_from_list(
12219+ wq_data->hotplug_dev_list,
12220+ &wq_data->dev_name_arry
12221+ [wq_data->dev_name_read][0]);
12222+ wq_data->dev_name_arry_rw_status
12223+ [wq_data->dev_name_read] =
12224+ DRM_HOTPLUG_READ_COMPLETE;
12225+ }
12226+ wq_data->dev_name_read++;
12227+ }
12228+ } else {
12229+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
12230+ if (wq_data->dev_name_arry_rw_status
12231+ [wq_data->dev_name_read] ==
12232+ DRM_HOTPLUG_READY_TO_READ) {
12233+ psb_umevent_remove_from_list(
12234+ wq_data->hotplug_dev_list,
12235+ &wq_data->dev_name_arry
12236+ [wq_data->dev_name_read][0]);
12237+ wq_data->dev_name_arry_rw_status
12238+ [wq_data->dev_name_read] =
12239+ DRM_HOTPLUG_READ_COMPLETE;
12240+ }
12241+ wq_data->dev_name_read++;
12242+ }
12243+ }
12244+ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
12245+ wq_data->dev_name_read = 0;
12246+}
12247+EXPORT_SYMBOL(psb_hotplug_dev_remove_wq);
12248+/**
12249+ * psb_hotplug_dev_change_wq - change workqueue implementation
12250+ *
12251+ * @work: work struct to use for kernel scheduling
12252+ *
12253+ */
12254+void psb_hotplug_dev_change_wq(struct work_struct *work)
12255+{
12256+ struct hotplug_disp_workqueue_data *wq_data;
12257+ struct umevent_obj *wq_working_hotplug_disp_obj;
12258+ wq_data = to_hotplug_disp_workqueue_data(work);
12259+ if (wq_data->dev_name_write_wrap == 1) {
12260+ wq_data->dev_name_read_write_wrap_ack = 1;
12261+ wq_data->dev_name_write_wrap = 0;
12262+ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
12263+ if (wq_data->dev_name_arry_rw_status
12264+ [wq_data->dev_name_read] ==
12265+ DRM_HOTPLUG_READY_TO_READ) {
12266+ wq_data->dev_name_arry_rw_status
12267+ [wq_data->dev_name_read] =
12268+ DRM_HOTPLUG_READ_COMPLETE;
12269+
12270+ wq_working_hotplug_disp_obj =
12271+ psb_umevent_find_obj(
12272+ &wq_data->dev_name_arry
12273+ [wq_data->dev_name_read][0],
12274+ wq_data->hotplug_dev_list);
12275+ psb_umevent_notify_change_gfxsock
12276+ (wq_working_hotplug_disp_obj);
12277+ }
12278+ wq_data->dev_name_read++;
12279+ }
12280+ wq_data->dev_name_read = 0;
12281+ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
12282+ if (wq_data->dev_name_arry_rw_status
12283+ [wq_data->dev_name_read] ==
12284+ DRM_HOTPLUG_READY_TO_READ) {
12285+ wq_data->dev_name_arry_rw_status
12286+ [wq_data->dev_name_read] =
12287+ DRM_HOTPLUG_READ_COMPLETE;
12288+
12289+ wq_working_hotplug_disp_obj =
12290+ psb_umevent_find_obj(
12291+ &wq_data->dev_name_arry
12292+ [wq_data->dev_name_read][0],
12293+ wq_data->hotplug_dev_list);
12294+ psb_umevent_notify_change_gfxsock
12295+ (wq_working_hotplug_disp_obj);
12296+ }
12297+ wq_data->dev_name_read++;
12298+ }
12299+ } else {
12300+ while (wq_data->dev_name_read < wq_data->dev_name_write) {
12301+ if (wq_data->dev_name_arry_rw_status
12302+ [wq_data->dev_name_read] ==
12303+ DRM_HOTPLUG_READY_TO_READ) {
12304+ wq_data->dev_name_arry_rw_status
12305+ [wq_data->dev_name_read] =
12306+ DRM_HOTPLUG_READ_COMPLETE;
12307+
12308+ wq_working_hotplug_disp_obj =
12309+ psb_umevent_find_obj(
12310+ &wq_data->dev_name_arry
12311+ [wq_data->dev_name_read][0],
12312+ wq_data->hotplug_dev_list);
12313+ psb_umevent_notify_change_gfxsock
12314+ (wq_working_hotplug_disp_obj);
12315+ }
12316+ wq_data->dev_name_read++;
12317+ }
12318+ }
12319+ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
12320+ wq_data->dev_name_read = 0;
12321+}
12322+EXPORT_SYMBOL(psb_hotplug_dev_change_wq);
12323diff --git a/drivers/gpu/drm/psb/psb_hotplug.h b/drivers/gpu/drm/psb/psb_hotplug.h
12324new file mode 100644
12325index 0000000..8a63efc
12326--- /dev/null
12327+++ b/drivers/gpu/drm/psb/psb_hotplug.h
12328@@ -0,0 +1,96 @@
12329+/*
12330+ * Copyright © 2009 Intel Corporation
12331+ *
12332+ * Permission is hereby granted, free of charge, to any person obtaining a
12333+ * copy of this software and associated documentation files (the "Software"),
12334+ * to deal in the Software without restriction, including without limitation
12335+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12336+ * and/or sell copies of the Software, and to permit persons to whom the
12337+ * Software is furnished to do so, subject to the following conditions:
12338+ *
12339+ * The above copyright notice and this permission notice (including the next
12340+ * paragraph) shall be included in all copies or substantial portions of the
12341+ * Software.
12342+ *
12343+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12344+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12345+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12346+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12347+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
12348+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
12349+ * IN THE SOFTWARE.
12350+ *
12351+ * Authors:
12352+ * James C. Gualario <james.c.gualario@intel.com>
12353+ *
12354+ */
12355+#ifndef _PSB_HOTPLUG_H_
12356+#define _PSB_HOTPLUG_H_
12357+/**
12358+ * required includes
12359+ *
12360+ */
12361+#include "psb_umevents.h"
12362+/**
12363+ * hotplug specific defines
12364+ *
12365+ */
12366+#define DRM_HOTPLUG_RING_DEPTH 256
12367+#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1)
12368+#define DRM_HOTPLUG_READY_TO_READ 1
12369+#define DRM_HOTPLUG_READ_COMPLETE 2
12370+/**
12371+ * hotplug workqueue data struct.
12372+ */
12373+struct hotplug_disp_workqueue_data {
12374+ struct work_struct work;
12375+ const char *dev_name;
12376+ int dev_name_write;
12377+ int dev_name_read;
12378+ int dev_name_write_wrap;
12379+ int dev_name_read_write_wrap_ack;
12380+ char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24];
12381+ int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH];
12382+ struct umevent_list *hotplug_dev_list;
12383+};
12384+/**
12385+ * hotplug state structure
12386+ *
12387+ */
12388+struct hotplug_state {
12389+ struct workqueue_struct *hotplug_wq;
12390+ struct hotplug_disp_workqueue_data hotplug_remove_wq_data;
12391+ struct hotplug_disp_workqueue_data hotplug_create_wq_data;
12392+ struct hotplug_disp_workqueue_data hotplug_change_wq_data;
12393+ struct umevent_list *list;
12394+};
12395+/**
12396+ * main interface function prototytpes for hotplug support.
12397+ *
12398+ */
12399+struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj);
12400+extern int psb_hotplug_notify_change_um(const char *name,
12401+ struct hotplug_state *state);
12402+extern int psb_hotplug_create_and_notify_um(const char *name,
12403+ struct hotplug_state *state);
12404+extern int psb_hotplug_remove_and_notify_um(const char *name,
12405+ struct hotplug_state *state);
12406+extern struct umevent_list *psb_hotplug_device_pool_create_and_init(
12407+ struct kobject *parent_kobj,
12408+ struct hotplug_state *state);
12409+extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state);
12410+/**
12411+ * to go back and forth between work strauct and workqueue data
12412+ *
12413+ */
12414+#define to_hotplug_disp_workqueue_data(x) \
12415+ container_of(x, struct hotplug_disp_workqueue_data, work)
12416+
12417+/**
12418+ * function prototypes for workqueue implementation
12419+ *
12420+ */
12421+extern void psb_hotplug_dev_create_wq(struct work_struct *work);
12422+extern void psb_hotplug_dev_remove_wq(struct work_struct *work);
12423+extern void psb_hotplug_dev_change_wq(struct work_struct *work);
12424+#endif
12425diff --git a/drivers/gpu/drm/psb/psb_intel_bios.c b/drivers/gpu/drm/psb/psb_intel_bios.c
12426new file mode 100644
12427index 0000000..02e4e27
12428--- /dev/null
12429+++ b/drivers/gpu/drm/psb/psb_intel_bios.c
12430@@ -0,0 +1,309 @@
12431+/*
12432+ * Copyright © 2006 Intel Corporation
12433+ *
12434+ * Permission is hereby granted, free of charge, to any person obtaining a
12435+ * copy of this software and associated documentation files (the "Software"),
12436+ * to deal in the Software without restriction, including without limitation
12437+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12438+ * and/or sell copies of the Software, and to permit persons to whom the
12439+ * Software is furnished to do so, subject to the following conditions:
12440+ *
12441+ * The above copyright notice and this permission notice (including the next
12442+ * paragraph) shall be included in all copies or substantial portions of the
12443+ * Software.
12444+ *
12445+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12446+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12447+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12448+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12449+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
12450+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
12451+ * SOFTWARE.
12452+ *
12453+ * Authors:
12454+ * Eric Anholt <eric@anholt.net>
12455+ *
12456+ */
12457+#include "drmP.h"
12458+#include "drm.h"
12459+#include "psb_drm.h"
12460+#include "psb_drv.h"
12461+#include "psb_intel_drv.h"
12462+#include "psb_intel_reg.h"
12463+#include "psb_intel_bios.h"
12464+
12465+
12466+static void * find_section(struct bdb_header *bdb, int section_id)
12467+{
12468+ u8 *base = (u8 *)bdb;
12469+ int index = 0;
12470+ u16 total, current_size;
12471+ u8 current_id;
12472+
12473+ /* skip to first section */
12474+ index += bdb->header_size;
12475+ total = bdb->bdb_size;
12476+
12477+ /* walk the sections looking for section_id */
12478+ while (index < total) {
12479+ current_id = *(base + index);
12480+ index++;
12481+ current_size = *((u16 *)(base + index));
12482+ index += 2;
12483+ if (current_id == section_id)
12484+ return base + index;
12485+ index += current_size;
12486+ }
12487+
12488+ return NULL;
12489+}
12490+
12491+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
12492+ struct lvds_dvo_timing *dvo_timing)
12493+{
12494+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
12495+ dvo_timing->hactive_lo;
12496+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
12497+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
12498+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
12499+ dvo_timing->hsync_pulse_width;
12500+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
12501+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
12502+
12503+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
12504+ dvo_timing->vactive_lo;
12505+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
12506+ dvo_timing->vsync_off;
12507+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
12508+ dvo_timing->vsync_pulse_width;
12509+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
12510+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
12511+ panel_fixed_mode->clock = dvo_timing->clock * 10;
12512+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
12513+
12514+ /* Some VBTs have bogus h/vtotal values */
12515+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
12516+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
12517+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
12518+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
12519+
12520+ drm_mode_set_name(panel_fixed_mode);
12521+}
12522+
12523+static void parse_backlight_data(struct drm_psb_private * dev_priv,
12524+ struct bdb_header *bdb)
12525+{
12526+ struct bdb_lvds_backlight * vbt_lvds_bl = NULL;
12527+ struct bdb_lvds_backlight * lvds_bl;
12528+ u8 p_type = 0;
12529+ void * bl_start = NULL;
12530+ struct bdb_lvds_options * lvds_opts
12531+ = find_section(bdb, BDB_LVDS_OPTIONS);
12532+
12533+ dev_priv->lvds_bl = NULL;
12534+
12535+ if(lvds_opts) {
12536+ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
12537+ p_type = lvds_opts->panel_type;
12538+ } else {
12539+ DRM_DEBUG("no lvds_options\n");
12540+ return;
12541+ }
12542+
12543+ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
12544+ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
12545+
12546+ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
12547+ if(!lvds_bl) {
12548+ DRM_DEBUG("No memory\n");
12549+ return;
12550+ }
12551+
12552+ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
12553+
12554+ dev_priv->lvds_bl = lvds_bl;
12555+}
12556+
12557+/* Try to find integrated panel data */
12558+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
12559+ struct bdb_header *bdb)
12560+{
12561+ struct bdb_lvds_options *lvds_options;
12562+ struct bdb_lvds_lfp_data *lvds_lfp_data;
12563+ struct bdb_lvds_lfp_data_entry *entry;
12564+ struct lvds_dvo_timing *dvo_timing;
12565+ struct drm_display_mode *panel_fixed_mode;
12566+
12567+ /* Defaults if we can't find VBT info */
12568+ dev_priv->lvds_dither = 0;
12569+ dev_priv->lvds_vbt = 0;
12570+
12571+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
12572+ if (!lvds_options)
12573+ return;
12574+
12575+ dev_priv->lvds_dither = lvds_options->pixel_dither;
12576+ if (lvds_options->panel_type == 0xff)
12577+ return;
12578+
12579+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
12580+ if (!lvds_lfp_data)
12581+ return;
12582+
12583+ dev_priv->lvds_vbt = 1;
12584+
12585+ entry = &lvds_lfp_data->data[lvds_options->panel_type];
12586+ dvo_timing = &entry->dvo_timing;
12587+
12588+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
12589+ GFP_KERNEL);
12590+
12591+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
12592+
12593+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
12594+
12595+ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
12596+ drm_mode_debug_printmodeline(panel_fixed_mode);
12597+
12598+ return;
12599+}
12600+
12601+/* Try to find sdvo panel data */
12602+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
12603+ struct bdb_header *bdb)
12604+{
12605+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
12606+ struct lvds_dvo_timing *dvo_timing;
12607+ struct drm_display_mode *panel_fixed_mode;
12608+
12609+ dev_priv->sdvo_lvds_vbt_mode = NULL;
12610+
12611+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
12612+ if (!sdvo_lvds_options)
12613+ return;
12614+
12615+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
12616+ if (!dvo_timing)
12617+ return;
12618+
12619+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
12620+
12621+ if (!panel_fixed_mode)
12622+ return;
12623+
12624+ fill_detail_timing_data(panel_fixed_mode,
12625+ dvo_timing + sdvo_lvds_options->panel_type);
12626+
12627+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
12628+
12629+ return;
12630+}
12631+
12632+static void parse_general_features(struct drm_psb_private *dev_priv,
12633+ struct bdb_header *bdb)
12634+{
12635+ struct bdb_general_features *general;
12636+
12637+ /* Set sensible defaults in case we can't find the general block */
12638+ dev_priv->int_tv_support = 1;
12639+ dev_priv->int_crt_support = 1;
12640+
12641+ general = find_section(bdb, BDB_GENERAL_FEATURES);
12642+ if (general) {
12643+ dev_priv->int_tv_support = general->int_tv_support;
12644+ dev_priv->int_crt_support = general->int_crt_support;
12645+ dev_priv->lvds_use_ssc = general->enable_ssc;
12646+
12647+ if (dev_priv->lvds_use_ssc) {
12648+ if (IS_I855(dev_priv->dev))
12649+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
12650+ else
12651+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
12652+ }
12653+ }
12654+}
12655+
12656+/**
12657+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
12658+ * @dev: DRM device
12659+ *
12660+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
12661+ * to appropriate values.
12662+ *
12663+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
12664+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
12665+ * feed an updated VBT back through that, compared to what we'll fetch using
12666+ * this method of groping around in the BIOS data.
12667+ *
12668+ * Returns 0 on success, nonzero on failure.
12669+ */
12670+bool psb_intel_init_bios(struct drm_device *dev)
12671+{
12672+ struct drm_psb_private *dev_priv = dev->dev_private;
12673+ struct pci_dev *pdev = dev->pdev;
12674+ struct vbt_header *vbt = NULL;
12675+ struct bdb_header *bdb;
12676+ u8 __iomem *bios;
12677+ size_t size;
12678+ int i;
12679+
12680+ bios = pci_map_rom(pdev, &size);
12681+ if (!bios)
12682+ return -1;
12683+
12684+ /* Scour memory looking for the VBT signature */
12685+ for (i = 0; i + 4 < size; i++) {
12686+ if (!memcmp(bios + i, "$VBT", 4)) {
12687+ vbt = (struct vbt_header *)(bios + i);
12688+ break;
12689+ }
12690+ }
12691+
12692+ if (!vbt) {
12693+ DRM_ERROR("VBT signature missing\n");
12694+ pci_unmap_rom(pdev, bios);
12695+ return -1;
12696+ }
12697+
12698+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
12699+
12700+ /* Grab useful general definitions */
12701+ parse_general_features(dev_priv, bdb);
12702+ parse_lfp_panel_data(dev_priv, bdb);
12703+ parse_sdvo_panel_data(dev_priv, bdb);
12704+ parse_backlight_data(dev_priv, bdb);
12705+
12706+ pci_unmap_rom(pdev, bios);
12707+
12708+ return 0;
12709+}
12710+
12711+/**
12712+ * Destory and free VBT data
12713+ */
12714+void psb_intel_destory_bios(struct drm_device * dev)
12715+{
12716+ struct drm_psb_private *dev_priv = dev->dev_private;
12717+ struct drm_display_mode * sdvo_lvds_vbt_mode =
12718+ dev_priv->sdvo_lvds_vbt_mode;
12719+ struct drm_display_mode * lfp_lvds_vbt_mode =
12720+ dev_priv->lfp_lvds_vbt_mode;
12721+ struct bdb_lvds_backlight * lvds_bl =
12722+ dev_priv->lvds_bl;
12723+
12724+ /*free sdvo panel mode*/
12725+ if(sdvo_lvds_vbt_mode) {
12726+ dev_priv->sdvo_lvds_vbt_mode = NULL;
12727+ kfree(sdvo_lvds_vbt_mode);
12728+ }
12729+
12730+ if(lfp_lvds_vbt_mode) {
12731+ dev_priv->lfp_lvds_vbt_mode = NULL;
12732+ kfree(lfp_lvds_vbt_mode);
12733+ }
12734+
12735+ if(lvds_bl) {
12736+ dev_priv->lvds_bl = NULL;
12737+ kfree(lvds_bl);
12738+ }
12739+}
12740diff --git a/drivers/gpu/drm/psb/psb_intel_bios.h b/drivers/gpu/drm/psb/psb_intel_bios.h
12741new file mode 100644
12742index 0000000..1b0251d
12743--- /dev/null
12744+++ b/drivers/gpu/drm/psb/psb_intel_bios.h
12745@@ -0,0 +1,436 @@
12746+/*
12747+ * Copyright © 2006 Intel Corporation
12748+ *
12749+ * Permission is hereby granted, free of charge, to any person obtaining a
12750+ * copy of this software and associated documentation files (the "Software"),
12751+ * to deal in the Software without restriction, including without limitation
12752+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12753+ * and/or sell copies of the Software, and to permit persons to whom the
12754+ * Software is furnished to do so, subject to the following conditions:
12755+ *
12756+ * The above copyright notice and this permission notice (including the next
12757+ * paragraph) shall be included in all copies or substantial portions of the
12758+ * Software.
12759+ *
12760+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12761+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12762+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12763+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12764+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
12765+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
12766+ * SOFTWARE.
12767+ *
12768+ * Authors:
12769+ * Eric Anholt <eric@anholt.net>
12770+ *
12771+ */
12772+
12773+#ifndef _I830_BIOS_H_
12774+#define _I830_BIOS_H_
12775+
12776+#include "drmP.h"
12777+
12778+struct vbt_header {
12779+ u8 signature[20]; /**< Always starts with 'VBT$' */
12780+ u16 version; /**< decimal */
12781+ u16 header_size; /**< in bytes */
12782+ u16 vbt_size; /**< in bytes */
12783+ u8 vbt_checksum;
12784+ u8 reserved0;
12785+ u32 bdb_offset; /**< from beginning of VBT */
12786+ u32 aim_offset[4]; /**< from beginning of VBT */
12787+} __attribute__((packed));
12788+
12789+
12790+struct bdb_header {
12791+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
12792+ u16 version; /**< decimal */
12793+ u16 header_size; /**< in bytes */
12794+ u16 bdb_size; /**< in bytes */
12795+};
12796+
12797+/* strictly speaking, this is a "skip" block, but it has interesting info */
12798+struct vbios_data {
12799+ u8 type; /* 0 == desktop, 1 == mobile */
12800+ u8 relstage;
12801+ u8 chipset;
12802+ u8 lvds_present:1;
12803+ u8 tv_present:1;
12804+ u8 rsvd2:6; /* finish byte */
12805+ u8 rsvd3[4];
12806+ u8 signon[155];
12807+ u8 copyright[61];
12808+ u16 code_segment;
12809+ u8 dos_boot_mode;
12810+ u8 bandwidth_percent;
12811+ u8 rsvd4; /* popup memory size */
12812+ u8 resize_pci_bios;
12813+ u8 rsvd5; /* is crt already on ddc2 */
12814+} __attribute__((packed));
12815+
12816+/*
12817+ * There are several types of BIOS data blocks (BDBs), each block has
12818+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
12819+ * Known types are listed below.
12820+ */
12821+#define BDB_GENERAL_FEATURES 1
12822+#define BDB_GENERAL_DEFINITIONS 2
12823+#define BDB_OLD_TOGGLE_LIST 3
12824+#define BDB_MODE_SUPPORT_LIST 4
12825+#define BDB_GENERIC_MODE_TABLE 5
12826+#define BDB_EXT_MMIO_REGS 6
12827+#define BDB_SWF_IO 7
12828+#define BDB_SWF_MMIO 8
12829+#define BDB_DOT_CLOCK_TABLE 9
12830+#define BDB_MODE_REMOVAL_TABLE 10
12831+#define BDB_CHILD_DEVICE_TABLE 11
12832+#define BDB_DRIVER_FEATURES 12
12833+#define BDB_DRIVER_PERSISTENCE 13
12834+#define BDB_EXT_TABLE_PTRS 14
12835+#define BDB_DOT_CLOCK_OVERRIDE 15
12836+#define BDB_DISPLAY_SELECT 16
12837+/* 17 rsvd */
12838+#define BDB_DRIVER_ROTATION 18
12839+#define BDB_DISPLAY_REMOVE 19
12840+#define BDB_OEM_CUSTOM 20
12841+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
12842+#define BDB_SDVO_LVDS_OPTIONS 22
12843+#define BDB_SDVO_PANEL_DTDS 23
12844+#define BDB_SDVO_LVDS_PNP_IDS 24
12845+#define BDB_SDVO_LVDS_POWER_SEQ 25
12846+#define BDB_TV_OPTIONS 26
12847+#define BDB_LVDS_OPTIONS 40
12848+#define BDB_LVDS_LFP_DATA_PTRS 41
12849+#define BDB_LVDS_LFP_DATA 42
12850+#define BDB_LVDS_BACKLIGHT 43
12851+#define BDB_LVDS_POWER 44
12852+#define BDB_SKIP 254 /* VBIOS private block, ignore */
12853+
12854+struct bdb_general_features {
12855+ /* bits 1 */
12856+ u8 panel_fitting:2;
12857+ u8 flexaim:1;
12858+ u8 msg_enable:1;
12859+ u8 clear_screen:3;
12860+ u8 color_flip:1;
12861+
12862+ /* bits 2 */
12863+ u8 download_ext_vbt:1;
12864+ u8 enable_ssc:1;
12865+ u8 ssc_freq:1;
12866+ u8 enable_lfp_on_override:1;
12867+ u8 disable_ssc_ddt:1;
12868+ u8 rsvd8:3; /* finish byte */
12869+
12870+ /* bits 3 */
12871+ u8 disable_smooth_vision:1;
12872+ u8 single_dvi:1;
12873+ u8 rsvd9:6; /* finish byte */
12874+
12875+ /* bits 4 */
12876+ u8 legacy_monitor_detect;
12877+
12878+ /* bits 5 */
12879+ u8 int_crt_support:1;
12880+ u8 int_tv_support:1;
12881+ u8 rsvd11:6; /* finish byte */
12882+} __attribute__((packed));
12883+
12884+struct bdb_general_definitions {
12885+ /* DDC GPIO */
12886+ u8 crt_ddc_gmbus_pin;
12887+
12888+ /* DPMS bits */
12889+ u8 dpms_acpi:1;
12890+ u8 skip_boot_crt_detect:1;
12891+ u8 dpms_aim:1;
12892+ u8 rsvd1:5; /* finish byte */
12893+
12894+ /* boot device bits */
12895+ u8 boot_display[2];
12896+ u8 child_dev_size;
12897+
12898+ /* device info */
12899+ u8 tv_or_lvds_info[33];
12900+ u8 dev1[33];
12901+ u8 dev2[33];
12902+ u8 dev3[33];
12903+ u8 dev4[33];
12904+ /* may be another device block here on some platforms */
12905+};
12906+
12907+struct bdb_lvds_options {
12908+ u8 panel_type;
12909+ u8 rsvd1;
12910+ /* LVDS capabilities, stored in a dword */
12911+ u8 pfit_mode:2;
12912+ u8 pfit_text_mode_enhanced:1;
12913+ u8 pfit_gfx_mode_enhanced:1;
12914+ u8 pfit_ratio_auto:1;
12915+ u8 pixel_dither:1;
12916+ u8 lvds_edid:1;
12917+ u8 rsvd2:1;
12918+ u8 rsvd4;
12919+} __attribute__((packed));
12920+
12921+struct bdb_lvds_backlight {
12922+ u8 type:2;
12923+ u8 pol:1;
12924+ u8 gpio:3;
12925+ u8 gmbus:2;
12926+ u16 freq;
12927+ u8 minbrightness;
12928+ u8 i2caddr;
12929+ u8 brightnesscmd;
12930+ /*FIXME: more...*/
12931+}__attribute__((packed));
12932+
12933+/* LFP pointer table contains entries to the struct below */
12934+struct bdb_lvds_lfp_data_ptr {
12935+ u16 fp_timing_offset; /* offsets are from start of bdb */
12936+ u8 fp_table_size;
12937+ u16 dvo_timing_offset;
12938+ u8 dvo_table_size;
12939+ u16 panel_pnp_id_offset;
12940+ u8 pnp_table_size;
12941+} __attribute__((packed));
12942+
12943+struct bdb_lvds_lfp_data_ptrs {
12944+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
12945+ struct bdb_lvds_lfp_data_ptr ptr[16];
12946+} __attribute__((packed));
12947+
12948+/* LFP data has 3 blocks per entry */
12949+struct lvds_fp_timing {
12950+ u16 x_res;
12951+ u16 y_res;
12952+ u32 lvds_reg;
12953+ u32 lvds_reg_val;
12954+ u32 pp_on_reg;
12955+ u32 pp_on_reg_val;
12956+ u32 pp_off_reg;
12957+ u32 pp_off_reg_val;
12958+ u32 pp_cycle_reg;
12959+ u32 pp_cycle_reg_val;
12960+ u32 pfit_reg;
12961+ u32 pfit_reg_val;
12962+ u16 terminator;
12963+} __attribute__((packed));
12964+
12965+struct lvds_dvo_timing {
12966+ u16 clock; /**< In 10khz */
12967+ u8 hactive_lo;
12968+ u8 hblank_lo;
12969+ u8 hblank_hi:4;
12970+ u8 hactive_hi:4;
12971+ u8 vactive_lo;
12972+ u8 vblank_lo;
12973+ u8 vblank_hi:4;
12974+ u8 vactive_hi:4;
12975+ u8 hsync_off_lo;
12976+ u8 hsync_pulse_width;
12977+ u8 vsync_pulse_width:4;
12978+ u8 vsync_off:4;
12979+ u8 rsvd0:6;
12980+ u8 hsync_off_hi:2;
12981+ u8 h_image;
12982+ u8 v_image;
12983+ u8 max_hv;
12984+ u8 h_border;
12985+ u8 v_border;
12986+ u8 rsvd1:3;
12987+ u8 digital:2;
12988+ u8 vsync_positive:1;
12989+ u8 hsync_positive:1;
12990+ u8 rsvd2:1;
12991+} __attribute__((packed));
12992+
12993+struct lvds_pnp_id {
12994+ u16 mfg_name;
12995+ u16 product_code;
12996+ u32 serial;
12997+ u8 mfg_week;
12998+ u8 mfg_year;
12999+} __attribute__((packed));
13000+
13001+struct bdb_lvds_lfp_data_entry {
13002+ struct lvds_fp_timing fp_timing;
13003+ struct lvds_dvo_timing dvo_timing;
13004+ struct lvds_pnp_id pnp_id;
13005+} __attribute__((packed));
13006+
13007+struct bdb_lvds_lfp_data {
13008+ struct bdb_lvds_lfp_data_entry data[16];
13009+} __attribute__((packed));
13010+
13011+struct aimdb_header {
13012+ char signature[16];
13013+ char oem_device[20];
13014+ u16 aimdb_version;
13015+ u16 aimdb_header_size;
13016+ u16 aimdb_size;
13017+} __attribute__((packed));
13018+
13019+struct aimdb_block {
13020+ u8 aimdb_id;
13021+ u16 aimdb_size;
13022+} __attribute__((packed));
13023+
13024+struct vch_panel_data {
13025+ u16 fp_timing_offset;
13026+ u8 fp_timing_size;
13027+ u16 dvo_timing_offset;
13028+ u8 dvo_timing_size;
13029+ u16 text_fitting_offset;
13030+ u8 text_fitting_size;
13031+ u16 graphics_fitting_offset;
13032+ u8 graphics_fitting_size;
13033+} __attribute__((packed));
13034+
13035+struct vch_bdb_22 {
13036+ struct aimdb_block aimdb_block;
13037+ struct vch_panel_data panels[16];
13038+} __attribute__((packed));
13039+
13040+struct bdb_sdvo_lvds_options {
13041+ u8 panel_backlight;
13042+ u8 h40_set_panel_type;
13043+ u8 panel_type;
13044+ u8 ssc_clk_freq;
13045+ u16 als_low_trip;
13046+ u16 als_high_trip;
13047+ u8 sclalarcoeff_tab_row_num;
13048+ u8 sclalarcoeff_tab_row_size;
13049+ u8 coefficient[8];
13050+ u8 panel_misc_bits_1;
13051+ u8 panel_misc_bits_2;
13052+ u8 panel_misc_bits_3;
13053+ u8 panel_misc_bits_4;
13054+} __attribute__((packed));
13055+
13056+
13057+extern bool psb_intel_init_bios(struct drm_device *dev);
13058+extern void psb_intel_destory_bios(struct drm_device * dev);
13059+
13060+/*
13061+ * Driver<->VBIOS interaction occurs through scratch bits in
13062+ * GR18 & SWF*.
13063+ */
13064+
13065+/* GR18 bits are set on display switch and hotkey events */
13066+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
13067+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
13068+#define GR18_HK_NONE (0x0<<3)
13069+#define GR18_HK_LFP_STRETCH (0x1<<3)
13070+#define GR18_HK_TOGGLE_DISP (0x2<<3)
13071+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
13072+#define GR18_HK_POPUP_DISABLED (0x6<<3)
13073+#define GR18_HK_POPUP_ENABLED (0x7<<3)
13074+#define GR18_HK_PFIT (0x8<<3)
13075+#define GR18_HK_APM_CHANGE (0xa<<3)
13076+#define GR18_HK_MULTIPLE (0xc<<3)
13077+#define GR18_USER_INT_EN (1<<2)
13078+#define GR18_A0000_FLUSH_EN (1<<1)
13079+#define GR18_SMM_EN (1<<0)
13080+
13081+/* Set by driver, cleared by VBIOS */
13082+#define SWF00_YRES_SHIFT 16
13083+#define SWF00_XRES_SHIFT 0
13084+#define SWF00_RES_MASK 0xffff
13085+
13086+/* Set by VBIOS at boot time and driver at runtime */
13087+#define SWF01_TV2_FORMAT_SHIFT 8
13088+#define SWF01_TV1_FORMAT_SHIFT 0
13089+#define SWF01_TV_FORMAT_MASK 0xffff
13090+
13091+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
13092+#define SWF10_GTT_OVERRIDE_EN (1<<28)
13093+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
13094+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
13095+#define SWF10_OLD_TOGGLE 0x0
13096+#define SWF10_TOGGLE_LIST_1 0x1
13097+#define SWF10_TOGGLE_LIST_2 0x2
13098+#define SWF10_TOGGLE_LIST_3 0x3
13099+#define SWF10_TOGGLE_LIST_4 0x4
13100+#define SWF10_PANNING_EN (1<<23)
13101+#define SWF10_DRIVER_LOADED (1<<22)
13102+#define SWF10_EXTENDED_DESKTOP (1<<21)
13103+#define SWF10_EXCLUSIVE_MODE (1<<20)
13104+#define SWF10_OVERLAY_EN (1<<19)
13105+#define SWF10_PLANEB_HOLDOFF (1<<18)
13106+#define SWF10_PLANEA_HOLDOFF (1<<17)
13107+#define SWF10_VGA_HOLDOFF (1<<16)
13108+#define SWF10_ACTIVE_DISP_MASK 0xffff
13109+#define SWF10_PIPEB_LFP2 (1<<15)
13110+#define SWF10_PIPEB_EFP2 (1<<14)
13111+#define SWF10_PIPEB_TV2 (1<<13)
13112+#define SWF10_PIPEB_CRT2 (1<<12)
13113+#define SWF10_PIPEB_LFP (1<<11)
13114+#define SWF10_PIPEB_EFP (1<<10)
13115+#define SWF10_PIPEB_TV (1<<9)
13116+#define SWF10_PIPEB_CRT (1<<8)
13117+#define SWF10_PIPEA_LFP2 (1<<7)
13118+#define SWF10_PIPEA_EFP2 (1<<6)
13119+#define SWF10_PIPEA_TV2 (1<<5)
13120+#define SWF10_PIPEA_CRT2 (1<<4)
13121+#define SWF10_PIPEA_LFP (1<<3)
13122+#define SWF10_PIPEA_EFP (1<<2)
13123+#define SWF10_PIPEA_TV (1<<1)
13124+#define SWF10_PIPEA_CRT (1<<0)
13125+
13126+#define SWF11_MEMORY_SIZE_SHIFT 16
13127+#define SWF11_SV_TEST_EN (1<<15)
13128+#define SWF11_IS_AGP (1<<14)
13129+#define SWF11_DISPLAY_HOLDOFF (1<<13)
13130+#define SWF11_DPMS_REDUCED (1<<12)
13131+#define SWF11_IS_VBE_MODE (1<<11)
13132+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
13133+#define SWF11_DPMS_MASK 0x07
13134+#define SWF11_DPMS_OFF (1<<2)
13135+#define SWF11_DPMS_SUSPEND (1<<1)
13136+#define SWF11_DPMS_STANDBY (1<<0)
13137+#define SWF11_DPMS_ON 0
13138+
13139+#define SWF14_GFX_PFIT_EN (1<<31)
13140+#define SWF14_TEXT_PFIT_EN (1<<30)
13141+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
13142+#define SWF14_POPUP_EN (1<<28)
13143+#define SWF14_DISPLAY_HOLDOFF (1<<27)
13144+#define SWF14_DISP_DETECT_EN (1<<26)
13145+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
13146+#define SWF14_DRIVER_STATUS (1<<24)
13147+#define SWF14_OS_TYPE_WIN9X (1<<23)
13148+#define SWF14_OS_TYPE_WINNT (1<<22)
13149+/* 21:19 rsvd */
13150+#define SWF14_PM_TYPE_MASK 0x00070000
13151+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
13152+#define SWF14_PM_ACPI (0x3 << 16)
13153+#define SWF14_PM_APM_12 (0x2 << 16)
13154+#define SWF14_PM_APM_11 (0x1 << 16)
13155+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
13156+ /* if GR18 indicates a display switch */
13157+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
13158+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
13159+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
13160+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
13161+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
13162+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
13163+#define SWF14_DS_PIPEB_TV_EN (1<<9)
13164+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
13165+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
13166+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
13167+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
13168+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
13169+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
13170+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
13171+#define SWF14_DS_PIPEA_TV_EN (1<<1)
13172+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
13173+ /* if GR18 indicates a panel fitting request */
13174+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
13175+ /* if GR18 indicates an APM change request */
13176+#define SWF14_APM_HIBERNATE 0x4
13177+#define SWF14_APM_SUSPEND 0x3
13178+#define SWF14_APM_STANDBY 0x1
13179+#define SWF14_APM_RESTORE 0x0
13180+
13181+#endif /* _I830_BIOS_H_ */
13182diff --git a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c
13183new file mode 100644
13184index 0000000..9cc0ec1
13185--- /dev/null
13186+++ b/drivers/gpu/drm/psb/psb_intel_display.c
13187@@ -0,0 +1,2484 @@
13188+/*
13189+ * Copyright © 2006-2007 Intel Corporation
13190+ *
13191+ * Permission is hereby granted, free of charge, to any person obtaining a
13192+ * copy of this software and associated documentation files (the "Software"),
13193+ * to deal in the Software without restriction, including without limitation
13194+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13195+ * and/or sell copies of the Software, and to permit persons to whom the
13196+ * Software is furnished to do so, subject to the following conditions:
13197+ *
13198+ * The above copyright notice and this permission notice (including the next
13199+ * paragraph) shall be included in all copies or substantial portions of the
13200+ * Software.
13201+ *
13202+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13203+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13204+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13205+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13206+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13207+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13208+ * DEALINGS IN THE SOFTWARE.
13209+ *
13210+ * Authors:
13211+ * Eric Anholt <eric@anholt.net>
13212+ */
13213+
13214+#include <linux/i2c.h>
13215+
13216+#include <drm/drm_crtc_helper.h>
13217+#include "psb_fb.h"
13218+#include "psb_intel_display.h"
13219+#include "psb_powermgmt.h"
13220+
13221+struct psb_intel_clock_t {
13222+ /* given values */
13223+ int n;
13224+ int m1, m2;
13225+ int p1, p2;
13226+ /* derived values */
13227+ int dot;
13228+ int vco;
13229+ int m;
13230+ int p;
13231+};
13232+
13233+struct psb_intel_range_t {
13234+ int min, max;
13235+};
13236+
13237+struct psb_intel_p2_t {
13238+ int dot_limit;
13239+ int p2_slow, p2_fast;
13240+};
13241+
13242+#define INTEL_P2_NUM 2
13243+
13244+struct psb_intel_limit_t {
13245+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
13246+ struct psb_intel_p2_t p2;
13247+};
13248+
13249+#define I8XX_DOT_MIN 25000
13250+#define I8XX_DOT_MAX 350000
13251+#define I8XX_VCO_MIN 930000
13252+#define I8XX_VCO_MAX 1400000
13253+#define I8XX_N_MIN 3
13254+#define I8XX_N_MAX 16
13255+#define I8XX_M_MIN 96
13256+#define I8XX_M_MAX 140
13257+#define I8XX_M1_MIN 18
13258+#define I8XX_M1_MAX 26
13259+#define I8XX_M2_MIN 6
13260+#define I8XX_M2_MAX 16
13261+#define I8XX_P_MIN 4
13262+#define I8XX_P_MAX 128
13263+#define I8XX_P1_MIN 2
13264+#define I8XX_P1_MAX 33
13265+#define I8XX_P1_LVDS_MIN 1
13266+#define I8XX_P1_LVDS_MAX 6
13267+#define I8XX_P2_SLOW 4
13268+#define I8XX_P2_FAST 2
13269+#define I8XX_P2_LVDS_SLOW 14
13270+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
13271+#define I8XX_P2_SLOW_LIMIT 165000
13272+
13273+#define I9XX_DOT_MIN 20000
13274+#define I9XX_DOT_MAX 400000
13275+#define I9XX_VCO_MIN 1400000
13276+#define I9XX_VCO_MAX 2800000
13277+#define I9XX_N_MIN 3
13278+#define I9XX_N_MAX 8
13279+#define I9XX_M_MIN 70
13280+#define I9XX_M_MAX 120
13281+#define I9XX_M1_MIN 10
13282+#define I9XX_M1_MAX 20
13283+#define I9XX_M2_MIN 5
13284+#define I9XX_M2_MAX 9
13285+#define I9XX_P_SDVO_DAC_MIN 5
13286+#define I9XX_P_SDVO_DAC_MAX 80
13287+#define I9XX_P_LVDS_MIN 7
13288+#define I9XX_P_LVDS_MAX 98
13289+#define I9XX_P1_MIN 1
13290+#define I9XX_P1_MAX 8
13291+#define I9XX_P2_SDVO_DAC_SLOW 10
13292+#define I9XX_P2_SDVO_DAC_FAST 5
13293+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
13294+#define I9XX_P2_LVDS_SLOW 14
13295+#define I9XX_P2_LVDS_FAST 7
13296+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
13297+
13298+#define INTEL_LIMIT_I8XX_DVO_DAC 0
13299+#define INTEL_LIMIT_I8XX_LVDS 1
13300+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
13301+#define INTEL_LIMIT_I9XX_LVDS 3
13302+
13303+static const struct psb_intel_limit_t psb_intel_limits[] = {
13304+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
13305+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
13306+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
13307+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
13308+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
13309+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
13310+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
13311+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
13312+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
13313+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
13314+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
13315+ },
13316+ { /* INTEL_LIMIT_I8XX_LVDS */
13317+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
13318+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
13319+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
13320+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
13321+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
13322+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
13323+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
13324+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
13325+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
13326+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
13327+ },
13328+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
13329+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
13330+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
13331+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
13332+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
13333+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
13334+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
13335+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
13336+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
13337+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
13338+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
13339+ I9XX_P2_SDVO_DAC_FAST},
13340+ },
13341+ { /* INTEL_LIMIT_I9XX_LVDS */
13342+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
13343+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
13344+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
13345+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
13346+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
13347+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
13348+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
13349+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
13350+ /* The single-channel range is 25-112Mhz, and dual-channel
13351+ * is 80-224Mhz. Prefer single channel as much as possible.
13352+ */
13353+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
13354+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
13355+ },
13356+};
13357+
13358+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
13359+{
13360+ struct drm_device *dev = crtc->dev;
13361+ const struct psb_intel_limit_t *limit;
13362+
13363+ if (IS_I9XX(dev)) {
13364+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
13365+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
13366+ else
13367+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
13368+ } else {
13369+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
13370+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
13371+ else
13372+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
13373+ }
13374+ return limit;
13375+}
13376+
13377+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
13378+
13379+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
13380+{
13381+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
13382+ clock->p = clock->p1 * clock->p2;
13383+ clock->vco = refclk * clock->m / (clock->n + 2);
13384+ clock->dot = clock->vco / clock->p;
13385+}
13386+
13387+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
13388+
13389+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
13390+{
13391+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
13392+ clock->p = clock->p1 * clock->p2;
13393+ clock->vco = refclk * clock->m / (clock->n + 2);
13394+ clock->dot = clock->vco / clock->p;
13395+}
13396+
13397+static void psb_intel_clock(struct drm_device *dev, int refclk,
13398+ struct psb_intel_clock_t *clock)
13399+{
13400+ if (IS_I9XX(dev))
13401+ return i9xx_clock(refclk, clock);
13402+ else
13403+ return i8xx_clock(refclk, clock);
13404+}
13405+
13406+/**
13407+ * Returns whether any output on the specified pipe is of the specified type
13408+ */
13409+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
13410+{
13411+ struct drm_device *dev = crtc->dev;
13412+ struct drm_mode_config *mode_config = &dev->mode_config;
13413+ struct drm_connector *l_entry;
13414+
13415+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
13416+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
13417+ struct psb_intel_output *psb_intel_output =
13418+ to_psb_intel_output(l_entry);
13419+ if (psb_intel_output->type == type)
13420+ return true;
13421+ }
13422+ }
13423+ return false;
13424+}
13425+
13426+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
13427+/**
13428+ * Returns whether the given set of divisors are valid for a given refclk with
13429+ * the given connectors.
13430+ */
13431+
13432+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
13433+ struct psb_intel_clock_t *clock)
13434+{
13435+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
13436+
13437+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
13438+ INTELPllInvalid("p1 out of range\n");
13439+ if (clock->p < limit->p.min || limit->p.max < clock->p)
13440+ INTELPllInvalid("p out of range\n");
13441+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
13442+ INTELPllInvalid("m2 out of range\n");
13443+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
13444+ INTELPllInvalid("m1 out of range\n");
13445+ if (clock->m1 <= clock->m2)
13446+ INTELPllInvalid("m1 <= m2\n");
13447+ if (clock->m < limit->m.min || limit->m.max < clock->m)
13448+ INTELPllInvalid("m out of range\n");
13449+ if (clock->n < limit->n.min || limit->n.max < clock->n)
13450+ INTELPllInvalid("n out of range\n");
13451+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
13452+ INTELPllInvalid("vco out of range\n");
13453+ /* XXX: We may need to be checking "Dot clock"
13454+ * depending on the multiplier, connector, etc.,
13455+ * rather than just a single range.
13456+ */
13457+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
13458+ INTELPllInvalid("dot out of range\n");
13459+
13460+ return true;
13461+}
13462+
13463+/**
13464+ * Returns a set of divisors for the desired target clock with the given
13465+ * refclk, or FALSE. The returned values represent the clock equation:
13466+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
13467+ */
13468+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
13469+ int refclk,
13470+ struct psb_intel_clock_t *best_clock)
13471+{
13472+ struct drm_device *dev = crtc->dev;
13473+ struct psb_intel_clock_t clock;
13474+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
13475+ int err = target;
13476+
13477+ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
13478+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
13479+ /*
13480+ * For LVDS, if the panel is on, just rely on its current
13481+ * settings for dual-channel. We haven't figured out how to
13482+ * reliably set up different single/dual channel state, if we
13483+ * even can.
13484+ */
13485+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
13486+ LVDS_CLKB_POWER_UP)
13487+ clock.p2 = limit->p2.p2_fast;
13488+ else
13489+ clock.p2 = limit->p2.p2_slow;
13490+ } else {
13491+ if (target < limit->p2.dot_limit)
13492+ clock.p2 = limit->p2.p2_slow;
13493+ else
13494+ clock.p2 = limit->p2.p2_fast;
13495+ }
13496+
13497+ memset(best_clock, 0, sizeof(*best_clock));
13498+
13499+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
13500+ clock.m1++) {
13501+ for (clock.m2 = limit->m2.min;
13502+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
13503+ clock.m2++) {
13504+ for (clock.n = limit->n.min;
13505+ clock.n <= limit->n.max; clock.n++) {
13506+ for (clock.p1 = limit->p1.min;
13507+ clock.p1 <= limit->p1.max;
13508+ clock.p1++) {
13509+ int this_err;
13510+
13511+ psb_intel_clock(dev, refclk, &clock);
13512+
13513+ if (!psb_intel_PLL_is_valid
13514+ (crtc, &clock))
13515+ continue;
13516+
13517+ this_err = abs(clock.dot - target);
13518+ if (this_err < err) {
13519+ *best_clock = clock;
13520+ err = this_err;
13521+ }
13522+ }
13523+ }
13524+ }
13525+ }
13526+
13527+ return err != target;
13528+}
13529+
13530+void psb_intel_wait_for_vblank(struct drm_device *dev)
13531+{
13532+ /* Wait for 20ms, i.e. one cycle at 50hz. */
13533+ udelay(20000);
13534+}
13535+
13536+int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
13537+{
13538+ struct drm_device *dev = crtc->dev;
13539+ /* struct drm_i915_master_private *master_priv; */
13540+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13541+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
13542+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
13543+ int pipe = psb_intel_crtc->pipe;
13544+ unsigned long Start, Offset;
13545+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
13546+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
13547+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
13548+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13549+ u32 dspcntr;
13550+ int ret = 0;
13551+
13552+ /* no fb bound */
13553+ if (!crtc->fb) {
13554+ DRM_DEBUG("No FB bound\n");
13555+ return 0;
13556+ }
13557+
13558+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
13559+
13560+ if (IS_MRST(dev) && (pipe == 0))
13561+ dspbase = MRST_DSPABASE;
13562+
13563+ Start = mode_dev->bo_offset(dev, psbfb->bo);
13564+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
13565+
13566+ REG_WRITE(dspstride, crtc->fb->pitch);
13567+
13568+ dspcntr = REG_READ(dspcntr_reg);
13569+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
13570+
13571+ switch (crtc->fb->bits_per_pixel) {
13572+ case 8:
13573+ dspcntr |= DISPPLANE_8BPP;
13574+ break;
13575+ case 16:
13576+ if (crtc->fb->depth == 15)
13577+ dspcntr |= DISPPLANE_15_16BPP;
13578+ else
13579+ dspcntr |= DISPPLANE_16BPP;
13580+ break;
13581+ case 24:
13582+ case 32:
13583+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
13584+ break;
13585+ default:
13586+ DRM_ERROR("Unknown color depth\n");
13587+ ret = -EINVAL;
13588+ goto psb_intel_pipe_set_base_exit;
13589+ }
13590+ REG_WRITE(dspcntr_reg, dspcntr);
13591+
13592+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
13593+ if (IS_I965G(dev) || IS_MRST(dev)) {
13594+ REG_WRITE(dspbase, Offset);
13595+ REG_READ(dspbase);
13596+ REG_WRITE(dspsurf, Start);
13597+ REG_READ(dspsurf);
13598+ } else {
13599+ REG_WRITE(dspbase, Start + Offset);
13600+ REG_READ(dspbase);
13601+ }
13602+
13603+psb_intel_pipe_set_base_exit:
13604+
13605+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
13606+
13607+ return ret;
13608+}
13609+
13610+int psb_kms_flip_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
13611+{
13612+ struct drm_device *dev = crtc->dev;
13613+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
13614+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13615+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
13616+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
13617+ int pipe = psb_intel_crtc->pipe;
13618+
13619+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
13620+ struct psb_task *task = NULL;
13621+ task = kzalloc(sizeof(*task), GFP_KERNEL);
13622+ if (!task)
13623+ return -ENOMEM;
13624+ INIT_LIST_HEAD(&task->head);
13625+ INIT_LIST_HEAD(&task->buf.head);
13626+ task->task_type = psb_flip_task;
13627+
13628+ spin_lock_irq(&scheduler->lock);
13629+ list_add_tail(&task->head, &scheduler->ta_queue);
13630+ /**
13631+ * From this point we may no longer dereference task,
13632+ * as the object it points to may be freed by another thread.
13633+ */
13634+
13635+ task = NULL;
13636+ spin_unlock_irq(&scheduler->lock);
13637+
13638+ /* no fb bound */
13639+ if (!crtc->fb) {
13640+ DRM_DEBUG("No FB bound\n");
13641+ return 0;
13642+ }
13643+
13644+ dev_priv->flip_start[pipe] = mode_dev->bo_offset(dev, psbfb->bo);
13645+ dev_priv->flip_offset[pipe] = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
13646+ dev_priv->flip_stride[pipe] = crtc->fb->pitch;
13647+ dev_priv->pipe_active[pipe] = 1;
13648+ dev_priv->pipe_active[1-pipe] = 0;
13649+
13650+ return 0;
13651+}
13652+
13653+/**
13654+ * Sets the power management mode of the pipe and plane.
13655+ *
13656+ * This code should probably grow support for turning the cursor off and back
13657+ * on appropriately at the same time as we're turning the pipe off/on.
13658+ */
13659+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
13660+{
13661+ struct drm_device *dev = crtc->dev;
13662+ /* struct drm_i915_master_private *master_priv; */
13663+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
13664+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13665+ int pipe = psb_intel_crtc->pipe;
13666+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
13667+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13668+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
13669+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
13670+ u32 temp;
13671+ bool enabled;
13672+
13673+ /* XXX: When our outputs are all unaware of DPMS modes other than off
13674+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
13675+ */
13676+ switch (mode) {
13677+ case DRM_MODE_DPMS_ON:
13678+ case DRM_MODE_DPMS_STANDBY:
13679+ case DRM_MODE_DPMS_SUSPEND:
13680+ /* Enable the DPLL */
13681+ temp = REG_READ(dpll_reg);
13682+ if ((temp & DPLL_VCO_ENABLE) == 0) {
13683+ REG_WRITE(dpll_reg, temp);
13684+ REG_READ(dpll_reg);
13685+ /* Wait for the clocks to stabilize. */
13686+ udelay(150);
13687+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
13688+ REG_READ(dpll_reg);
13689+ /* Wait for the clocks to stabilize. */
13690+ udelay(150);
13691+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
13692+ REG_READ(dpll_reg);
13693+ /* Wait for the clocks to stabilize. */
13694+ udelay(150);
13695+ }
13696+
13697+ /* Enable the pipe */
13698+ temp = REG_READ(pipeconf_reg);
13699+ if ((temp & PIPEACONF_ENABLE) == 0)
13700+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
13701+
13702+ /* Enable the plane */
13703+ temp = REG_READ(dspcntr_reg);
13704+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
13705+ REG_WRITE(dspcntr_reg,
13706+ temp | DISPLAY_PLANE_ENABLE);
13707+ /* Flush the plane changes */
13708+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
13709+ }
13710+
13711+ psb_intel_crtc_load_lut(crtc);
13712+
13713+ /* Give the overlay scaler a chance to enable
13714+ * if it's on this pipe */
13715+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
13716+ break;
13717+ case DRM_MODE_DPMS_OFF:
13718+ /* Give the overlay scaler a chance to disable
13719+ * if it's on this pipe */
13720+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
13721+
13722+ /* Disable the VGA plane that we never use */
13723+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
13724+
13725+ /* Disable display plane */
13726+ temp = REG_READ(dspcntr_reg);
13727+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
13728+ REG_WRITE(dspcntr_reg,
13729+ temp & ~DISPLAY_PLANE_ENABLE);
13730+ /* Flush the plane changes */
13731+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
13732+ REG_READ(dspbase_reg);
13733+ }
13734+
13735+ if (!IS_I9XX(dev)) {
13736+ /* Wait for vblank for the disable to take effect */
13737+ psb_intel_wait_for_vblank(dev);
13738+ }
13739+
13740+ /* Next, disable display pipes */
13741+ temp = REG_READ(pipeconf_reg);
13742+ if ((temp & PIPEACONF_ENABLE) != 0) {
13743+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
13744+ REG_READ(pipeconf_reg);
13745+ }
13746+
13747+ /* Wait for vblank for the disable to take effect. */
13748+ psb_intel_wait_for_vblank(dev);
13749+
13750+ temp = REG_READ(dpll_reg);
13751+ if ((temp & DPLL_VCO_ENABLE) != 0) {
13752+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
13753+ REG_READ(dpll_reg);
13754+ }
13755+
13756+ /* Wait for the clocks to turn off. */
13757+ udelay(150);
13758+ break;
13759+ }
13760+
13761+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
13762+
13763+#if 0 /* JB: Add vblank support later */
13764+ if (enabled)
13765+ dev_priv->vblank_pipe |= (1 << pipe);
13766+ else
13767+ dev_priv->vblank_pipe &= ~(1 << pipe);
13768+#endif
13769+
13770+ psb_intel_crtc->dpms_mode = mode;
13771+
13772+#if 0 /* JB: Add sarea support later */
13773+ if (!dev->primary->master)
13774+ return 0;
13775+
13776+ master_priv = dev->primary->master->driver_priv;
13777+ if (!master_priv->sarea_priv)
13778+ return 0;
13779+
13780+ switch (pipe) {
13781+ case 0:
13782+ master_priv->sarea_priv->planeA_w =
13783+ enabled ? crtc->mode.hdisplay : 0;
13784+ master_priv->sarea_priv->planeA_h =
13785+ enabled ? crtc->mode.vdisplay : 0;
13786+ break;
13787+ case 1:
13788+ master_priv->sarea_priv->planeB_w =
13789+ enabled ? crtc->mode.hdisplay : 0;
13790+ master_priv->sarea_priv->planeB_h =
13791+ enabled ? crtc->mode.vdisplay : 0;
13792+ break;
13793+ default:
13794+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
13795+ break;
13796+ }
13797+#endif
13798+
13799+ /*Set FIFO Watermarks*/
13800+ REG_WRITE(DSPARB, 0x3F3E);
13801+}
13802+
13803+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
13804+{
13805+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
13806+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
13807+}
13808+
13809+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
13810+{
13811+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
13812+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
13813+}
13814+
13815+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
13816+{
13817+ struct drm_encoder_helper_funcs *encoder_funcs =
13818+ encoder->helper_private;
13819+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
13820+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
13821+}
13822+
13823+void psb_intel_encoder_commit(struct drm_encoder *encoder)
13824+{
13825+ struct drm_encoder_helper_funcs *encoder_funcs =
13826+ encoder->helper_private;
13827+ /* lvds has its own version of commit see psb_intel_lvds_commit */
13828+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
13829+}
13830+
13831+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
13832+ struct drm_display_mode *mode,
13833+ struct drm_display_mode *adjusted_mode)
13834+{
13835+ return true;
13836+}
13837+
13838+
13839+/**
13840+ * Return the pipe currently connected to the panel fitter,
13841+ * or -1 if the panel fitter is not present or not in use
13842+ */
13843+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
13844+{
13845+ u32 pfit_control;
13846+
13847+ /* i830 doesn't have a panel fitter */
13848+ if (IS_I830(dev))
13849+ return -1;
13850+
13851+ pfit_control = REG_READ(PFIT_CONTROL);
13852+
13853+ /* See if the panel fitter is in use */
13854+ if ((pfit_control & PFIT_ENABLE) == 0)
13855+ return -1;
13856+
13857+ /* 965 can place panel fitter on either pipe */
13858+ if (IS_I965G(dev) || IS_MRST(dev))
13859+ return (pfit_control >> 29) & 0x3;
13860+
13861+ /* older chips can only use pipe 1 */
13862+ return 1;
13863+}
13864+
13865+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
13866+ struct drm_display_mode *mode,
13867+ struct drm_display_mode *adjusted_mode,
13868+ int x, int y,
13869+ struct drm_framebuffer *old_fb)
13870+{
13871+ struct drm_device *dev = crtc->dev;
13872+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
13873+ int pipe = psb_intel_crtc->pipe;
13874+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
13875+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
13876+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
13877+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13878+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
13879+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
13880+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
13881+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
13882+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
13883+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
13884+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
13885+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
13886+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
13887+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
13888+ int refclk;
13889+ struct psb_intel_clock_t clock;
13890+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
13891+ bool ok, is_sdvo = false, is_dvo = false;
13892+ bool is_crt = false, is_lvds = false, is_tv = false;
13893+ struct drm_mode_config *mode_config = &dev->mode_config;
13894+ struct drm_connector *connector;
13895+
13896+ list_for_each_entry(connector, &mode_config->connector_list, head) {
13897+ struct psb_intel_output *psb_intel_output =
13898+ to_psb_intel_output(connector);
13899+
13900+ if (!connector->encoder
13901+ || connector->encoder->crtc != crtc)
13902+ continue;
13903+
13904+ switch (psb_intel_output->type) {
13905+ case INTEL_OUTPUT_LVDS:
13906+ is_lvds = true;
13907+ break;
13908+ case INTEL_OUTPUT_SDVO:
13909+ is_sdvo = true;
13910+ break;
13911+ case INTEL_OUTPUT_DVO:
13912+ is_dvo = true;
13913+ break;
13914+ case INTEL_OUTPUT_TVOUT:
13915+ is_tv = true;
13916+ break;
13917+ case INTEL_OUTPUT_ANALOG:
13918+ is_crt = true;
13919+ break;
13920+ }
13921+ }
13922+
13923+ if (IS_I9XX(dev))
13924+ refclk = 96000;
13925+ else
13926+ refclk = 48000;
13927+
13928+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
13929+ &clock);
13930+ if (!ok) {
13931+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
13932+ return 0;
13933+ }
13934+
13935+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
13936+
13937+ dpll = DPLL_VGA_MODE_DIS;
13938+ if (IS_I9XX(dev)) {
13939+ if (is_lvds) {
13940+ dpll |= DPLLB_MODE_LVDS;
13941+ if (IS_POULSBO(dev))
13942+ dpll |= DPLL_DVO_HIGH_SPEED;
13943+ } else
13944+ dpll |= DPLLB_MODE_DAC_SERIAL;
13945+ if (is_sdvo) {
13946+ dpll |= DPLL_DVO_HIGH_SPEED;
13947+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
13948+ int sdvo_pixel_multiply =
13949+ adjusted_mode->clock / mode->clock;
13950+ dpll |=
13951+ (sdvo_pixel_multiply -
13952+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
13953+ }
13954+ }
13955+
13956+ /* compute bitmask from p1 value */
13957+ dpll |= (1 << (clock.p1 - 1)) << 16;
13958+ switch (clock.p2) {
13959+ case 5:
13960+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
13961+ break;
13962+ case 7:
13963+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
13964+ break;
13965+ case 10:
13966+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
13967+ break;
13968+ case 14:
13969+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
13970+ break;
13971+ }
13972+ if (IS_I965G(dev))
13973+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
13974+ } else {
13975+ if (is_lvds) {
13976+ dpll |=
13977+ (1 << (clock.p1 - 1)) <<
13978+ DPLL_FPA01_P1_POST_DIV_SHIFT;
13979+ } else {
13980+ if (clock.p1 == 2)
13981+ dpll |= PLL_P1_DIVIDE_BY_TWO;
13982+ else
13983+ dpll |=
13984+ (clock.p1 -
13985+ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
13986+ if (clock.p2 == 4)
13987+ dpll |= PLL_P2_DIVIDE_BY_4;
13988+ }
13989+ }
13990+
13991+ if (is_tv) {
13992+ /* XXX: just matching BIOS for now */
13993+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
13994+ dpll |= 3;
13995+ }
13996+#if 0
13997+ else if (is_lvds)
13998+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
13999+#endif
14000+ else
14001+ dpll |= PLL_REF_INPUT_DREFCLK;
14002+
14003+ /* setup pipeconf */
14004+ pipeconf = REG_READ(pipeconf_reg);
14005+
14006+ /* Set up the display plane register */
14007+ dspcntr = DISPPLANE_GAMMA_ENABLE;
14008+
14009+ if (pipe == 0)
14010+ dspcntr |= DISPPLANE_SEL_PIPE_A;
14011+ else
14012+ dspcntr |= DISPPLANE_SEL_PIPE_B;
14013+
14014+ dspcntr |= DISPLAY_PLANE_ENABLE;
14015+ pipeconf |= PIPEACONF_ENABLE;
14016+ dpll |= DPLL_VCO_ENABLE;
14017+
14018+
14019+ /* Disable the panel fitter if it was on our pipe */
14020+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
14021+ REG_WRITE(PFIT_CONTROL, 0);
14022+
14023+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
14024+ drm_mode_debug_printmodeline(mode);
14025+
14026+ if (dpll & DPLL_VCO_ENABLE) {
14027+ REG_WRITE(fp_reg, fp);
14028+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
14029+ REG_READ(dpll_reg);
14030+ udelay(150);
14031+ }
14032+
14033+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
14034+ * This is an exception to the general rule that mode_set doesn't turn
14035+ * things on.
14036+ */
14037+ if (is_lvds) {
14038+ u32 lvds = REG_READ(LVDS);
14039+
14040+ lvds |=
14041+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
14042+ LVDS_PIPEB_SELECT;
14043+ /* Set the B0-B3 data pairs corresponding to
14044+ * whether we're going to
14045+ * set the DPLLs for dual-channel mode or not.
14046+ */
14047+ if (clock.p2 == 7)
14048+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
14049+ else
14050+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
14051+
14052+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
14053+ * appropriately here, but we need to look more
14054+ * thoroughly into how panels behave in the two modes.
14055+ */
14056+
14057+ REG_WRITE(LVDS, lvds);
14058+ REG_READ(LVDS);
14059+ }
14060+
14061+ REG_WRITE(fp_reg, fp);
14062+ REG_WRITE(dpll_reg, dpll);
14063+ REG_READ(dpll_reg);
14064+ /* Wait for the clocks to stabilize. */
14065+ udelay(150);
14066+
14067+ if (IS_I965G(dev)) {
14068+ int sdvo_pixel_multiply =
14069+ adjusted_mode->clock / mode->clock;
14070+ REG_WRITE(dpll_md_reg,
14071+ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
14072+ ((sdvo_pixel_multiply -
14073+ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
14074+ } else {
14075+ /* write it again -- the BIOS does, after all */
14076+ REG_WRITE(dpll_reg, dpll);
14077+ }
14078+ REG_READ(dpll_reg);
14079+ /* Wait for the clocks to stabilize. */
14080+ udelay(150);
14081+
14082+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
14083+ ((adjusted_mode->crtc_htotal - 1) << 16));
14084+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
14085+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
14086+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
14087+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
14088+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
14089+ ((adjusted_mode->crtc_vtotal - 1) << 16));
14090+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
14091+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
14092+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
14093+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
14094+ /* pipesrc and dspsize control the size that is scaled from,
14095+ * which should always be the user's requested size.
14096+ */
14097+ REG_WRITE(dspsize_reg,
14098+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
14099+ REG_WRITE(dsppos_reg, 0);
14100+ REG_WRITE(pipesrc_reg,
14101+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
14102+ REG_WRITE(pipeconf_reg, pipeconf);
14103+ REG_READ(pipeconf_reg);
14104+
14105+ psb_intel_wait_for_vblank(dev);
14106+
14107+ REG_WRITE(dspcntr_reg, dspcntr);
14108+
14109+ /* Flush the plane changes */
14110+ {
14111+ struct drm_crtc_helper_funcs *crtc_funcs =
14112+ crtc->helper_private;
14113+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
14114+ }
14115+
14116+ psb_intel_wait_for_vblank(dev);
14117+
14118+ return 0;
14119+}
14120+
14121+/** Loads the palette/gamma unit for the CRTC with the prepared values */
14122+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
14123+{
14124+ struct drm_device *dev = crtc->dev;
14125+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14126+ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
14127+ int i;
14128+
14129+ /* The clocks have to be on to load the palette. */
14130+ if (!crtc->enabled)
14131+ return;
14132+
14133+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14134+ for (i = 0; i < 256; i++) {
14135+ REG_WRITE(palreg + 4 * i,
14136+ (psb_intel_crtc->lut_r[i] << 16) |
14137+ (psb_intel_crtc->lut_g[i] << 8) |
14138+ psb_intel_crtc->lut_b[i]);
14139+ }
14140+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14141+ }
14142+}
14143+
14144+#ifndef CONFIG_MRST
14145+/**
14146+ * Save HW states of giving crtc
14147+ */
14148+static void psb_intel_crtc_save(struct drm_crtc * crtc)
14149+{
14150+ struct drm_device * dev = crtc->dev;
14151+ // struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
14152+ struct psb_intel_crtc * psb_intel_crtc = to_psb_intel_crtc(crtc);
14153+ struct psb_intel_crtc_state * crtc_state = psb_intel_crtc->crtc_state;
14154+ int pipeA = (psb_intel_crtc->pipe == 0);
14155+ uint32_t paletteReg;
14156+ int i;
14157+
14158+ DRM_DEBUG("\n");
14159+
14160+ if(!crtc_state) {
14161+ DRM_DEBUG("No CRTC state found\n");
14162+ return;
14163+ }
14164+
14165+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
14166+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
14167+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
14168+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
14169+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
14170+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
14171+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
14172+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
14173+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
14174+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
14175+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
14176+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
14177+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
14178+
14179+ /*NOTE: DSPSIZE DSPPOS only for psb*/
14180+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
14181+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
14182+
14183+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
14184+
14185+ DRM_DEBUG("(%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n",
14186+ crtc_state->saveDSPCNTR,
14187+ crtc_state->savePIPECONF,
14188+ crtc_state->savePIPESRC,
14189+ crtc_state->saveFP0,
14190+ crtc_state->saveFP1,
14191+ crtc_state->saveDPLL,
14192+ crtc_state->saveHTOTAL,
14193+ crtc_state->saveHBLANK,
14194+ crtc_state->saveHSYNC,
14195+ crtc_state->saveVTOTAL,
14196+ crtc_state->saveVBLANK,
14197+ crtc_state->saveVSYNC,
14198+ crtc_state->saveDSPSTRIDE,
14199+ crtc_state->saveDSPSIZE,
14200+ crtc_state->saveDSPPOS,
14201+ crtc_state->saveDSPBASE
14202+ );
14203+
14204+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
14205+ for(i=0; i<256; ++i) {
14206+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
14207+ }
14208+}
14209+
14210+/**
14211+ * Restore HW states of giving crtc
14212+ */
14213+static void psb_intel_crtc_restore(struct drm_crtc * crtc)
14214+{
14215+ struct drm_device * dev = crtc->dev;
14216+ // struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
14217+ struct psb_intel_crtc * psb_intel_crtc = to_psb_intel_crtc(crtc);
14218+ struct psb_intel_crtc_state * crtc_state = psb_intel_crtc->crtc_state;
14219+ // struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private;
14220+ int pipeA = (psb_intel_crtc->pipe == 0);
14221+ uint32_t paletteReg;
14222+ int i;
14223+
14224+ DRM_DEBUG("\n");
14225+
14226+ if(!crtc_state) {
14227+ DRM_DEBUG("No crtc state\n");
14228+ return;
14229+ }
14230+
14231+ DRM_DEBUG("current: (%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n",
14232+ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
14233+ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
14234+ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
14235+ REG_READ(pipeA ? FPA0 : FPB0),
14236+ REG_READ(pipeA ? FPA1 : FPB1),
14237+ REG_READ(pipeA ? DPLL_A : DPLL_B),
14238+ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
14239+ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
14240+ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
14241+ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
14242+ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
14243+ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
14244+ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
14245+ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
14246+ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
14247+ REG_READ(pipeA ? DSPABASE : DSPBBASE)
14248+ );
14249+
14250+ DRM_DEBUG("saved: (%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n",
14251+ crtc_state->saveDSPCNTR,
14252+ crtc_state->savePIPECONF,
14253+ crtc_state->savePIPESRC,
14254+ crtc_state->saveFP0,
14255+ crtc_state->saveFP1,
14256+ crtc_state->saveDPLL,
14257+ crtc_state->saveHTOTAL,
14258+ crtc_state->saveHBLANK,
14259+ crtc_state->saveHSYNC,
14260+ crtc_state->saveVTOTAL,
14261+ crtc_state->saveVBLANK,
14262+ crtc_state->saveVSYNC,
14263+ crtc_state->saveDSPSTRIDE,
14264+ crtc_state->saveDSPSIZE,
14265+ crtc_state->saveDSPPOS,
14266+ crtc_state->saveDSPBASE
14267+ );
14268+
14269+
14270+#if 0
14271+ if(drm_helper_crtc_in_use(crtc))
14272+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
14273+
14274+
14275+ if(psb_intel_panel_fitter_pipe(dev) == psb_intel_crtc->pipe) {
14276+ REG_WRITE(PFIT_CONTROL, crtc_state->savePFITCTRL);
14277+ DRM_DEBUG("write pfit_controle: %x\n", REG_READ(PFIT_CONTROL));
14278+ }
14279+#endif
14280+
14281+ if(crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
14282+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
14283+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
14284+ REG_READ(pipeA ? DPLL_A : DPLL_B);
14285+ DRM_DEBUG("write dpll: %x\n", REG_READ(pipeA ? DPLL_A : DPLL_B));
14286+ udelay(150);
14287+ }
14288+
14289+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
14290+ REG_READ(pipeA ? FPA0 : FPB0);
14291+
14292+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
14293+ REG_READ(pipeA ? FPA1 : FPB1);
14294+
14295+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
14296+ REG_READ(pipeA ? DPLL_A : DPLL_B);
14297+ udelay(150);
14298+
14299+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
14300+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
14301+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
14302+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
14303+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
14304+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
14305+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
14306+
14307+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
14308+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
14309+
14310+ REG_WRITE(pipeA ? PIPEASRC :PIPEBSRC, crtc_state->savePIPESRC);
14311+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
14312+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
14313+
14314+ psb_intel_wait_for_vblank(dev);
14315+
14316+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
14317+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
14318+
14319+ psb_intel_wait_for_vblank(dev);
14320+
14321+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
14322+ for(i=0; i<256; ++i) {
14323+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
14324+ }
14325+}
14326+#endif
14327+
14328+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
14329+ struct drm_file *file_priv,
14330+ uint32_t handle,
14331+ uint32_t width, uint32_t height)
14332+{
14333+ struct drm_device *dev = crtc->dev;
14334+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
14335+ struct psb_gtt * pg = dev_priv->pg;
14336+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14337+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
14338+ int pipe = psb_intel_crtc->pipe;
14339+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
14340+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
14341+ uint32_t temp;
14342+ size_t addr = 0;
14343+ size_t size;
14344+ void *bo;
14345+ int ret;
14346+
14347+ DRM_DEBUG("\n");
14348+
14349+ /* if we want to turn of the cursor ignore width and height */
14350+ if (!handle) {
14351+ DRM_DEBUG("cursor off\n");
14352+ /* turn of the cursor */
14353+ temp = 0;
14354+ temp |= CURSOR_MODE_DISABLE;
14355+
14356+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14357+ REG_WRITE(control, temp);
14358+ REG_WRITE(base, 0);
14359+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14360+ }
14361+
14362+ /* unpin the old bo */
14363+ if (psb_intel_crtc->cursor_bo) {
14364+ mode_dev->bo_unpin_for_scanout(dev,
14365+ psb_intel_crtc->
14366+ cursor_bo);
14367+ psb_intel_crtc->cursor_bo = NULL;
14368+ }
14369+
14370+ return 0;
14371+ }
14372+
14373+ /* Currently we only support 64x64 cursors */
14374+ if (width != 64 || height != 64) {
14375+ DRM_ERROR("we currently only support 64x64 cursors\n");
14376+ return -EINVAL;
14377+ }
14378+
14379+ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
14380+ if (!bo)
14381+ return -ENOENT;
14382+ ret = mode_dev->bo_pin_for_scanout(dev, bo);
14383+ if (ret)
14384+ return ret;
14385+ size = mode_dev->bo_size(dev, bo);
14386+ if (size < width * height * 4) {
14387+ DRM_ERROR("buffer is to small\n");
14388+ return -ENOMEM;
14389+ }
14390+ addr = mode_dev->bo_offset(dev, bo);
14391+ if(IS_POULSBO(dev)) {
14392+ addr += pg->stolen_base;
14393+ }
14394+
14395+ psb_intel_crtc->cursor_addr = addr;
14396+
14397+ temp = 0;
14398+ /* set the pipe for the cursor */
14399+ temp |= (pipe << 28);
14400+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
14401+
14402+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14403+ REG_WRITE(control, temp);
14404+ REG_WRITE(base, addr);
14405+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14406+ }
14407+
14408+ /* unpin the old bo */
14409+ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
14410+ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
14411+ psb_intel_crtc->cursor_bo = bo;
14412+ }
14413+
14414+ return 0;
14415+}
14416+
14417+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
14418+{
14419+ struct drm_device *dev = crtc->dev;
14420+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14421+ int pipe = psb_intel_crtc->pipe;
14422+ uint32_t temp = 0;
14423+ uint32_t adder;
14424+
14425+ if (x < 0) {
14426+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
14427+ x = -x;
14428+ }
14429+ if (y < 0) {
14430+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
14431+ y = -y;
14432+ }
14433+
14434+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
14435+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
14436+
14437+ adder = psb_intel_crtc->cursor_addr;
14438+
14439+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14440+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
14441+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
14442+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14443+ }
14444+ return 0;
14445+}
14446+
14447+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
14448+ u16 *green, u16 *blue, uint32_t size)
14449+{
14450+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14451+ int i;
14452+
14453+ if (size != 256)
14454+ return;
14455+
14456+ for (i = 0; i < 256; i++) {
14457+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
14458+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
14459+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
14460+ }
14461+
14462+ psb_intel_crtc_load_lut(crtc);
14463+}
14464+
14465+/* Returns the clock of the currently programmed mode of the given pipe. */
14466+static int psb_intel_crtc_clock_get(struct drm_device *dev,
14467+ struct drm_crtc *crtc)
14468+{
14469+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14470+ int pipe = psb_intel_crtc->pipe;
14471+ u32 dpll;
14472+ u32 fp;
14473+ struct psb_intel_clock_t clock;
14474+ bool is_lvds;
14475+ struct drm_psb_private *dev_priv = dev->dev_private;
14476+
14477+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14478+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
14479+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
14480+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
14481+ else
14482+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
14483+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
14484+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14485+ } else {
14486+ dpll = (pipe == 0) ? dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
14487+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
14488+ fp = (pipe == 0) ? dev_priv->saveFPA0 : dev_priv->saveFPB0;
14489+ else
14490+ fp = (pipe == 0) ? dev_priv->saveFPA1 : dev_priv->saveFPB1;
14491+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
14492+ }
14493+
14494+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
14495+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
14496+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
14497+
14498+ if (is_lvds) {
14499+ clock.p1 =
14500+ ffs((dpll &
14501+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
14502+ DPLL_FPA01_P1_POST_DIV_SHIFT);
14503+ clock.p2 = 14;
14504+
14505+ if ((dpll & PLL_REF_INPUT_MASK) ==
14506+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
14507+ /* XXX: might not be 66MHz */
14508+ i8xx_clock(66000, &clock);
14509+ } else
14510+ i8xx_clock(48000, &clock);
14511+ } else {
14512+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
14513+ clock.p1 = 2;
14514+ else {
14515+ clock.p1 =
14516+ ((dpll &
14517+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
14518+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
14519+ }
14520+ if (dpll & PLL_P2_DIVIDE_BY_4)
14521+ clock.p2 = 4;
14522+ else
14523+ clock.p2 = 2;
14524+
14525+ i8xx_clock(48000, &clock);
14526+ }
14527+
14528+ /* XXX: It would be nice to validate the clocks, but we can't reuse
14529+ * i830PllIsValid() because it relies on the xf86_config connector
14530+ * configuration being accurate, which it isn't necessarily.
14531+ */
14532+
14533+ return clock.dot;
14534+}
14535+
14536+/** Returns the currently programmed mode of the given pipe. */
14537+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
14538+ struct drm_crtc *crtc)
14539+{
14540+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14541+ int pipe = psb_intel_crtc->pipe;
14542+ struct drm_display_mode *mode;
14543+ int htot;
14544+ int hsync;
14545+ int vtot;
14546+ int vsync;
14547+ struct drm_psb_private *dev_priv = dev->dev_private;
14548+
14549+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
14550+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
14551+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
14552+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
14553+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
14554+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
14555+ } else {
14556+ htot = (pipe == 0) ? dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
14557+ hsync = (pipe == 0) ? dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
14558+ vtot = (pipe == 0) ? dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
14559+ vsync = (pipe == 0) ? dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
14560+ }
14561+
14562+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
14563+ if (!mode)
14564+ return NULL;
14565+
14566+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
14567+ mode->hdisplay = (htot & 0xffff) + 1;
14568+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
14569+ mode->hsync_start = (hsync & 0xffff) + 1;
14570+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
14571+ mode->vdisplay = (vtot & 0xffff) + 1;
14572+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
14573+ mode->vsync_start = (vsync & 0xffff) + 1;
14574+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
14575+
14576+ drm_mode_set_name(mode);
14577+ drm_mode_set_crtcinfo(mode, 0);
14578+
14579+ return mode;
14580+}
14581+
14582+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
14583+{
14584+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14585+
14586+#ifndef CONFIG_MRST
14587+ if(psb_intel_crtc->crtc_state)
14588+ kfree(psb_intel_crtc->crtc_state);
14589+#endif
14590+ drm_crtc_cleanup(crtc);
14591+ kfree(psb_intel_crtc);
14592+}
14593+
14594+static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
14595+ .dpms = psb_intel_crtc_dpms,
14596+ .mode_fixup = psb_intel_crtc_mode_fixup,
14597+ .mode_set = psb_intel_crtc_mode_set,
14598+ .mode_set_base = psb_intel_pipe_set_base,
14599+ .prepare = psb_intel_crtc_prepare,
14600+ .commit = psb_intel_crtc_commit,
14601+};
14602+
14603+static const struct drm_crtc_helper_funcs mrst_helper_funcs;
14604+
14605+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
14606+#ifndef CONFIG_MRST
14607+ .save = psb_intel_crtc_save,
14608+ .restore = psb_intel_crtc_restore,
14609+#endif
14610+ .cursor_set = psb_intel_crtc_cursor_set,
14611+ .cursor_move = psb_intel_crtc_cursor_move,
14612+ .gamma_set = psb_intel_crtc_gamma_set,
14613+ .set_config = drm_crtc_helper_set_config,
14614+ .destroy = psb_intel_crtc_destroy,
14615+ .set_base = psb_kms_flip_set_base,
14616+};
14617+
14618+
14619+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
14620+ struct psb_intel_mode_device *mode_dev)
14621+{
14622+ struct psb_intel_crtc *psb_intel_crtc;
14623+ int i;
14624+ uint16_t *r_base, *g_base, *b_base;
14625+
14626+#if PRINT_JLIU7
14627+ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
14628+#endif /* PRINT_JLIU7 */
14629+
14630+ /* We allocate a extra array of drm_connector pointers
14631+ * for fbdev after the crtc */
14632+ psb_intel_crtc =
14633+ kzalloc(sizeof(struct psb_intel_crtc) +
14634+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
14635+ GFP_KERNEL);
14636+ if (psb_intel_crtc == NULL)
14637+ return;
14638+
14639+#ifndef CONFIG_MRST
14640+ psb_intel_crtc->crtc_state = kzalloc(sizeof(struct psb_intel_crtc_state),
14641+ GFP_KERNEL);
14642+ if(!psb_intel_crtc->crtc_state) {
14643+ DRM_INFO("Crtc state error: No memory\n");
14644+ kfree(psb_intel_crtc);
14645+ return;
14646+ }
14647+#endif
14648+
14649+ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
14650+
14651+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
14652+ psb_intel_crtc->pipe = pipe;
14653+
14654+ r_base = psb_intel_crtc->base.gamma_store;
14655+ g_base = r_base + 256;
14656+ b_base = g_base + 256;
14657+ for (i = 0; i < 256; i++) {
14658+ psb_intel_crtc->lut_r[i] = i;
14659+ psb_intel_crtc->lut_g[i] = i;
14660+ psb_intel_crtc->lut_b[i] = i;
14661+ r_base[i] = i << 8;
14662+ g_base[i] = i << 8;
14663+ b_base[i] = i << 8;
14664+ }
14665+
14666+ psb_intel_crtc->mode_dev = mode_dev;
14667+ psb_intel_crtc->cursor_addr = 0;
14668+ psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
14669+
14670+ if (IS_MRST(dev)) {
14671+ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
14672+ } else {
14673+ drm_crtc_helper_add(&psb_intel_crtc->base,
14674+ &psb_intel_helper_funcs);
14675+ }
14676+
14677+ /* Setup the array of drm_connector pointer array */
14678+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
14679+ psb_intel_crtc->mode_set.connectors =
14680+ (struct drm_connector **) (psb_intel_crtc + 1);
14681+ psb_intel_crtc->mode_set.num_connectors = 0;
14682+
14683+#if 0 /* JB: not drop, What should go in here? */
14684+ if (i915_fbpercrtc)
14685+#endif
14686+}
14687+
14688+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
14689+{
14690+ struct drm_crtc *crtc = NULL;
14691+
14692+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
14693+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
14694+ if (psb_intel_crtc->pipe == pipe)
14695+ break;
14696+ }
14697+ return crtc;
14698+}
14699+
14700+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
14701+{
14702+ int index_mask = 0;
14703+ struct drm_connector *connector;
14704+ int entry = 0;
14705+
14706+ list_for_each_entry(connector, &dev->mode_config.connector_list,
14707+ head) {
14708+ struct psb_intel_output *psb_intel_output =
14709+ to_psb_intel_output(connector);
14710+ if (type_mask & (1 << psb_intel_output->type))
14711+ index_mask |= (1 << entry);
14712+ entry++;
14713+ }
14714+ return index_mask;
14715+}
14716+
14717+#if 0 /* JB: Should be per device */
14718+static void psb_intel_setup_outputs(struct drm_device *dev)
14719+{
14720+ struct drm_connector *connector;
14721+
14722+ psb_intel_crt_init(dev);
14723+
14724+ /* Set up integrated LVDS */
14725+ if (IS_MOBILE(dev) && !IS_I830(dev))
14726+ psb_intel_lvds_init(dev);
14727+
14728+ if (IS_I9XX(dev)) {
14729+ psb_intel_sdvo_init(dev, SDVOB);
14730+ psb_intel_sdvo_init(dev, SDVOC);
14731+ } else
14732+ psb_intel_dvo_init(dev);
14733+
14734+ if (IS_I9XX(dev) && !IS_I915G(dev))
14735+ psb_intel_tv_init(dev);
14736+
14737+ list_for_each_entry(connector, &dev->mode_config.connector_list,
14738+ head) {
14739+ struct psb_intel_output *psb_intel_output =
14740+ to_psb_intel_output(connector);
14741+ struct drm_encoder *encoder = &psb_intel_output->enc;
14742+ int crtc_mask = 0, clone_mask = 0;
14743+
14744+ /* valid crtcs */
14745+ switch (psb_intel_output->type) {
14746+ case INTEL_OUTPUT_DVO:
14747+ case INTEL_OUTPUT_SDVO:
14748+ crtc_mask = ((1 << 0) | (1 << 1));
14749+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
14750+ (1 << INTEL_OUTPUT_DVO) |
14751+ (1 << INTEL_OUTPUT_SDVO));
14752+ break;
14753+ case INTEL_OUTPUT_ANALOG:
14754+ crtc_mask = ((1 << 0) | (1 << 1));
14755+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
14756+ (1 << INTEL_OUTPUT_DVO) |
14757+ (1 << INTEL_OUTPUT_SDVO));
14758+ break;
14759+ case INTEL_OUTPUT_LVDS:
14760+ crtc_mask = (1 << 1);
14761+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
14762+ break;
14763+ case INTEL_OUTPUT_TVOUT:
14764+ crtc_mask = ((1 << 0) | (1 << 1));
14765+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
14766+ break;
14767+ }
14768+ encoder->possible_crtcs = crtc_mask;
14769+ encoder->possible_clones =
14770+ psb_intel_connector_clones(dev, clone_mask);
14771+ }
14772+}
14773+#endif
14774+
14775+#if 0 /* JB: Rework framebuffer code into something none device specific */
14776+static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14777+{
14778+ struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb);
14779+ struct drm_device *dev = fb->dev;
14780+
14781+ if (fb->fbdev)
14782+ intelfb_remove(dev, fb);
14783+
14784+ drm_framebuffer_cleanup(fb);
14785+ drm_gem_object_unreference(fb->mm_private);
14786+
14787+ kfree(psb_intel_fb);
14788+}
14789+
14790+static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14791+ struct drm_file *file_priv,
14792+ unsigned int *handle)
14793+{
14794+ struct drm_gem_object *object = fb->mm_private;
14795+
14796+ return drm_gem_handle_create(file_priv, object, handle);
14797+}
14798+
14799+static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
14800+ .destroy = psb_intel_user_framebuffer_destroy,
14801+ .create_handle = psb_intel_user_framebuffer_create_handle,
14802+};
14803+
14804+struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
14805+ struct drm_mode_fb_cmd
14806+ *mode_cmd,
14807+ void *mm_private)
14808+{
14809+ struct psb_intel_framebuffer *psb_intel_fb;
14810+
14811+ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
14812+ if (!psb_intel_fb)
14813+ return NULL;
14814+
14815+ if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs))
14816+ return NULL;
14817+
14818+ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
14819+
14820+ return &psb_intel_fb->base;
14821+}
14822+
14823+
14824+static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
14825+ drm_device
14826+ *dev,
14827+ struct
14828+ drm_file
14829+ *filp,
14830+ struct
14831+ drm_mode_fb_cmd
14832+ *mode_cmd)
14833+{
14834+ struct drm_gem_object *obj;
14835+
14836+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
14837+ if (!obj)
14838+ return NULL;
14839+
14840+ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
14841+}
14842+
14843+static int psb_intel_insert_new_fb(struct drm_device *dev,
14844+ struct drm_file *file_priv,
14845+ struct drm_framebuffer *fb,
14846+ struct drm_mode_fb_cmd *mode_cmd)
14847+{
14848+ struct psb_intel_framebuffer *psb_intel_fb;
14849+ struct drm_gem_object *obj;
14850+ struct drm_crtc *crtc;
14851+
14852+ psb_intel_fb = to_psb_intel_framebuffer(fb);
14853+
14854+ mutex_lock(&dev->struct_mutex);
14855+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
14856+
14857+ if (!obj) {
14858+ mutex_unlock(&dev->struct_mutex);
14859+ return -EINVAL;
14860+ }
14861+ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
14862+ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
14863+ mutex_unlock(&dev->struct_mutex);
14864+
14865+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
14866+ if (crtc->fb == fb) {
14867+ struct drm_crtc_helper_funcs *crtc_funcs =
14868+ crtc->helper_private;
14869+ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
14870+ }
14871+ }
14872+ return 0;
14873+}
14874+
14875+static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
14876+ .resize_fb = psb_intel_insert_new_fb,
14877+ .fb_create = psb_intel_user_framebuffer_create,
14878+ .fb_changed = intelfb_probe,
14879+};
14880+#endif
14881+
14882+#if 0 /* Should be per device */
14883+void psb_intel_modeset_init(struct drm_device *dev)
14884+{
14885+ int num_pipe;
14886+ int i;
14887+
14888+ drm_mode_config_init(dev);
14889+
14890+ dev->mode_config.min_width = 0;
14891+ dev->mode_config.min_height = 0;
14892+
14893+ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
14894+
14895+ if (IS_I965G(dev)) {
14896+ dev->mode_config.max_width = 8192;
14897+ dev->mode_config.max_height = 8192;
14898+ } else {
14899+ dev->mode_config.max_width = 2048;
14900+ dev->mode_config.max_height = 2048;
14901+ }
14902+
14903+ /* set memory base */
14904+ /* MRST and PSB should use BAR 2*/
14905+ dev->mode_config.fb_base =
14906+ pci_resource_start(dev->pdev, 2);
14907+
14908+ if (IS_MOBILE(dev) || IS_I9XX(dev))
14909+ num_pipe = 2;
14910+ else
14911+ num_pipe = 1;
14912+ DRM_DEBUG("%d display pipe%s available.\n",
14913+ num_pipe, num_pipe > 1 ? "s" : "");
14914+
14915+ for (i = 0; i < num_pipe; i++)
14916+ psb_intel_crtc_init(dev, i);
14917+
14918+ psb_intel_setup_outputs(dev);
14919+
14920+ /* setup fbs */
14921+ /* drm_initial_config(dev); */
14922+}
14923+#endif
14924+
14925+void psb_intel_modeset_cleanup(struct drm_device *dev)
14926+{
14927+ drm_mode_config_cleanup(dev);
14928+}
14929+
14930+
14931+/* current intel driver doesn't take advantage of encoders
14932+ always give back the encoder for the connector
14933+*/
14934+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
14935+{
14936+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14937+
14938+ return &psb_intel_output->enc;
14939+}
14940+
14941+/* MRST_PLATFORM start */
14942+
14943+#if DUMP_REGISTER
14944+void dump_dc_registers(struct drm_device *dev)
14945+{
14946+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14947+ unsigned int i = 0;
14948+
14949+ DRM_INFO("jliu7 dump_dc_registers\n");
14950+
14951+
14952+ if (0x80000000 & REG_READ(0x70008)) {
14953+ for (i = 0x20a0; i < 0x20af; i += 4) {
14954+ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i));
14955+ }
14956+
14957+ for (i = 0xf014; i < 0xf047; i += 4) {
14958+ DRM_INFO
14959+ ("jliu7 pipe A dpll register=0x%x, value=%x\n",
14960+ i, (unsigned int) REG_READ(i));
14961+ }
14962+
14963+ for (i = 0x60000; i < 0x6005f; i += 4) {
14964+ DRM_INFO
14965+ ("jliu7 pipe A timing register=0x%x, value=%x\n",
14966+ i, (unsigned int) REG_READ(i));
14967+ }
14968+
14969+ for (i = 0x61140; i < 0x61143; i += 4) {
14970+ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
14971+ i, (unsigned int) REG_READ(i));
14972+ }
14973+
14974+ for (i = 0x61180; i < 0x6123F; i += 4) {
14975+ DRM_INFO
14976+ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
14977+ i, (unsigned int) REG_READ(i));
14978+ }
14979+
14980+ for (i = 0x61254; i < 0x612AB; i += 4) {
14981+ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
14982+ i, (unsigned int) REG_READ(i));
14983+ }
14984+
14985+ for (i = 0x70000; i < 0x70047; i += 4) {
14986+ DRM_INFO
14987+ ("jliu7 PIPE A control register=0x%x, value=%x\n",
14988+ i, (unsigned int) REG_READ(i));
14989+ }
14990+
14991+ for (i = 0x70180; i < 0x7020b; i += 4) {
14992+ DRM_INFO("jliu7 display A control register=0x%x,"
14993+ "value=%x\n", i,
14994+ (unsigned int) REG_READ(i));
14995+ }
14996+
14997+ for (i = 0x71400; i < 0x71403; i += 4) {
14998+ DRM_INFO
14999+ ("jliu7 VGA Display Plane Control register=0x%x,"
15000+ "value=%x\n", i, (unsigned int) REG_READ(i));
15001+ }
15002+ }
15003+
15004+ if (0x80000000 & REG_READ(0x71008)) {
15005+ for (i = 0x61000; i < 0x6105f; i += 4) {
15006+ DRM_INFO
15007+ ("jliu7 pipe B timing register=0x%x, value=%x\n",
15008+ i, (unsigned int) REG_READ(i));
15009+ }
15010+
15011+ for (i = 0x71000; i < 0x71047; i += 4) {
15012+ DRM_INFO
15013+ ("jliu7 PIPE B control register=0x%x, value=%x\n",
15014+ i, (unsigned int) REG_READ(i));
15015+ }
15016+
15017+ for (i = 0x71180; i < 0x7120b; i += 4) {
15018+ DRM_INFO("jliu7 display B control register=0x%x,"
15019+ "value=%x\n", i,
15020+ (unsigned int) REG_READ(i));
15021+ }
15022+ }
15023+#if 0
15024+ for (i = 0x70080; i < 0x700df; i += 4) {
15025+ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
15026+ i, (unsigned int) REG_READ(i));
15027+ }
15028+#endif
15029+
15030+}
15031+
15032+void dump_dsi_registers(struct drm_device *dev)
15033+{
15034+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15035+ unsigned int i = 0;
15036+
15037+ DRM_INFO("jliu7 dump_dsi_registers\n");
15038+
15039+ for (i = 0xb000; i < 0xb064; i += 4) {
15040+ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
15041+ (unsigned int) REG_READ(i));
15042+ }
15043+
15044+ i = 0xb104;
15045+ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
15046+ i, (unsigned int) REG_READ(i));
15047+}
15048+#endif /* DUMP_REGISTER */
15049+
15050+
15051+struct mrst_limit_t {
15052+ struct psb_intel_range_t dot, m, p1;
15053+};
15054+
15055+struct mrst_clock_t {
15056+ /* derived values */
15057+ int dot;
15058+ int m;
15059+ int p1;
15060+};
15061+
15062+#define MRST_LIMIT_LVDS_100L 0
15063+#define MRST_LIMIT_LVDS_83 1
15064+#define MRST_LIMIT_LVDS_100 2
15065+
15066+#define MRST_DOT_MIN 19750
15067+#define MRST_DOT_MAX 120000
15068+#define MRST_M_MIN_100L 20
15069+#define MRST_M_MIN_100 10
15070+#define MRST_M_MIN_83 12
15071+#define MRST_M_MAX_100L 34
15072+#define MRST_M_MAX_100 17
15073+#define MRST_M_MAX_83 20
15074+#define MRST_P1_MIN 2
15075+#define MRST_P1_MAX_0 7
15076+#define MRST_P1_MAX_1 8
15077+
15078+static const struct mrst_limit_t mrst_limits[] = {
15079+ { /* MRST_LIMIT_LVDS_100L */
15080+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
15081+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
15082+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
15083+ },
15084+ { /* MRST_LIMIT_LVDS_83L */
15085+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
15086+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
15087+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
15088+ },
15089+ { /* MRST_LIMIT_LVDS_100 */
15090+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
15091+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
15092+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
15093+ },
15094+};
15095+
15096+#define MRST_M_MIN 10
15097+static const u32 mrst_m_converts[] = {
15098+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
15099+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
15100+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
15101+};
15102+
15103+#define COUNT_MAX 0x10000000
15104+void mrstWaitForPipeDisable(struct drm_device *dev)
15105+{
15106+ int count, temp;
15107+
15108+ /* FIXME JLIU7_PO */
15109+ psb_intel_wait_for_vblank(dev);
15110+ return;
15111+
15112+ /* Wait for for the pipe disable to take effect. */
15113+ for (count = 0; count < COUNT_MAX; count++) {
15114+ temp = REG_READ(PIPEACONF);
15115+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
15116+ break;
15117+ }
15118+
15119+ if (count == COUNT_MAX) {
15120+#if PRINT_JLIU7
15121+ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
15122+#endif /* PRINT_JLIU7 */
15123+ } else {
15124+#if PRINT_JLIU7
15125+ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
15126+ count);
15127+#endif /* PRINT_JLIU7 */
15128+ }
15129+}
15130+
15131+void mrstWaitForPipeEnable(struct drm_device *dev)
15132+{
15133+ int count, temp;
15134+
15135+ /* FIXME JLIU7_PO */
15136+ psb_intel_wait_for_vblank(dev);
15137+ return;
15138+
15139+ /* Wait for for the pipe disable to take effect. */
15140+ for (count = 0; count < COUNT_MAX; count++) {
15141+ temp = REG_READ(PIPEACONF);
15142+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
15143+ break;
15144+ }
15145+
15146+ if (count == COUNT_MAX) {
15147+#if PRINT_JLIU7
15148+ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
15149+#endif /* PRINT_JLIU7 */
15150+ } else {
15151+#if PRINT_JLIU7
15152+ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
15153+ count);
15154+#endif /* PRINT_JLIU7 */
15155+ }
15156+}
15157+
15158+static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
15159+{
15160+ const struct mrst_limit_t *limit;
15161+ struct drm_device *dev = crtc->dev;
15162+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15163+
15164+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
15165+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
15166+ if (dev_priv->sku_100L)
15167+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
15168+ if (dev_priv->sku_83)
15169+ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
15170+ if (dev_priv->sku_100)
15171+ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
15172+ } else {
15173+ limit = NULL;
15174+#if PRINT_JLIU7
15175+ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
15176+#endif /* PRINT_JLIU7 */
15177+ }
15178+
15179+ return limit;
15180+}
15181+
15182+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
15183+static void mrst_clock(int refclk, struct mrst_clock_t *clock)
15184+{
15185+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
15186+}
15187+
15188+void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
15189+{
15190+#if PRINT_JLIU7
15191+ DRM_INFO
15192+ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
15193+ prefix, clock->dot, clock->m, clock->p1);
15194+#endif /* PRINT_JLIU7 */
15195+}
15196+
15197+/**
15198+ * Returns a set of divisors for the desired target clock with the given refclk,
15199+ * or FALSE. Divisor values are the actual divisors for
15200+ */
15201+static bool
15202+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
15203+ struct mrst_clock_t *best_clock)
15204+{
15205+ struct mrst_clock_t clock;
15206+ const struct mrst_limit_t *limit = mrst_limit(crtc);
15207+ int err = target;
15208+
15209+ memset(best_clock, 0, sizeof(*best_clock));
15210+
15211+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
15212+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
15213+ clock.p1++) {
15214+ int this_err;
15215+
15216+ mrst_clock(refclk, &clock);
15217+
15218+ this_err = abs(clock.dot - target);
15219+ if (this_err < err) {
15220+ *best_clock = clock;
15221+ err = this_err;
15222+ }
15223+ }
15224+ }
15225+ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
15226+
15227+ return err != target;
15228+}
15229+
15230+/**
15231+ * Sets the power management mode of the pipe and plane.
15232+ *
15233+ * This code should probably grow support for turning the cursor off and back
15234+ * on appropriately at the same time as we're turning the pipe off/on.
15235+ */
15236+static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
15237+{
15238+ struct drm_device *dev = crtc->dev;
15239+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
15240+ int pipe = psb_intel_crtc->pipe;
15241+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
15242+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
15243+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
15244+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
15245+ u32 temp;
15246+ bool enabled;
15247+
15248+#if PRINT_JLIU7
15249+ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
15250+ mode, pipe);
15251+#endif /* PRINT_JLIU7 */
15252+
15253+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
15254+
15255+ /* XXX: When our outputs are all unaware of DPMS modes other than off
15256+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
15257+ */
15258+ switch (mode) {
15259+ case DRM_MODE_DPMS_ON:
15260+ case DRM_MODE_DPMS_STANDBY:
15261+ case DRM_MODE_DPMS_SUSPEND:
15262+ /* Enable the DPLL */
15263+ temp = REG_READ(dpll_reg);
15264+ if ((temp & DPLL_VCO_ENABLE) == 0) {
15265+ REG_WRITE(dpll_reg, temp);
15266+ REG_READ(dpll_reg);
15267+ /* Wait for the clocks to stabilize. */
15268+ udelay(150);
15269+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
15270+ REG_READ(dpll_reg);
15271+ /* Wait for the clocks to stabilize. */
15272+ udelay(150);
15273+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
15274+ REG_READ(dpll_reg);
15275+ /* Wait for the clocks to stabilize. */
15276+ udelay(150);
15277+ }
15278+
15279+ /* Enable the pipe */
15280+ temp = REG_READ(pipeconf_reg);
15281+ if ((temp & PIPEACONF_ENABLE) == 0)
15282+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
15283+
15284+ /* Enable the plane */
15285+ temp = REG_READ(dspcntr_reg);
15286+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
15287+ REG_WRITE(dspcntr_reg,
15288+ temp | DISPLAY_PLANE_ENABLE);
15289+ /* Flush the plane changes */
15290+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
15291+ }
15292+
15293+ psb_intel_crtc_load_lut(crtc);
15294+
15295+ /* Give the overlay scaler a chance to enable
15296+ if it's on this pipe */
15297+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
15298+ break;
15299+ case DRM_MODE_DPMS_OFF:
15300+ /* Give the overlay scaler a chance to disable
15301+ * if it's on this pipe */
15302+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
15303+
15304+ /* Disable the VGA plane that we never use */
15305+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
15306+
15307+ /* Disable display plane */
15308+ temp = REG_READ(dspcntr_reg);
15309+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
15310+ REG_WRITE(dspcntr_reg,
15311+ temp & ~DISPLAY_PLANE_ENABLE);
15312+ /* Flush the plane changes */
15313+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
15314+ REG_READ(dspbase_reg);
15315+ }
15316+
15317+ if (!IS_I9XX(dev)) {
15318+ /* Wait for vblank for the disable to take effect */
15319+ psb_intel_wait_for_vblank(dev);
15320+ }
15321+
15322+ /* Next, disable display pipes */
15323+ temp = REG_READ(pipeconf_reg);
15324+ if ((temp & PIPEACONF_ENABLE) != 0) {
15325+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
15326+ REG_READ(pipeconf_reg);
15327+ }
15328+
15329+ /* Wait for for the pipe disable to take effect. */
15330+ mrstWaitForPipeDisable(dev);
15331+
15332+ temp = REG_READ(dpll_reg);
15333+ if ((temp & DPLL_VCO_ENABLE) != 0) {
15334+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
15335+ REG_READ(dpll_reg);
15336+ }
15337+
15338+ /* Wait for the clocks to turn off. */
15339+ udelay(150);
15340+ break;
15341+ }
15342+
15343+#if DUMP_REGISTER
15344+ dump_dc_registers(dev);
15345+#endif /* DUMP_REGISTER */
15346+
15347+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
15348+
15349+#if 0 /* JB: Add vblank support later */
15350+ if (enabled)
15351+ dev_priv->vblank_pipe |= (1 << pipe);
15352+ else
15353+ dev_priv->vblank_pipe &= ~(1 << pipe);
15354+#endif
15355+
15356+ psb_intel_crtc->dpms_mode = mode;
15357+
15358+#if 0 /* JB: Add sarea support later */
15359+ if (!dev->primary->master)
15360+ return;
15361+
15362+ master_priv = dev->primary->master->driver_priv;
15363+ if (!master_priv->sarea_priv)
15364+ return;
15365+
15366+ switch (pipe) {
15367+ case 0:
15368+ master_priv->sarea_priv->planeA_w =
15369+ enabled ? crtc->mode.hdisplay : 0;
15370+ master_priv->sarea_priv->planeA_h =
15371+ enabled ? crtc->mode.vdisplay : 0;
15372+ break;
15373+ case 1:
15374+ master_priv->sarea_priv->planeB_w =
15375+ enabled ? crtc->mode.hdisplay : 0;
15376+ master_priv->sarea_priv->planeB_h =
15377+ enabled ? crtc->mode.vdisplay : 0;
15378+ break;
15379+ default:
15380+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
15381+ break;
15382+ }
15383+#endif
15384+
15385+ /*Set FIFO Watermarks*/
15386+ REG_WRITE(DSPARB, 0x3FFF);
15387+ REG_WRITE(DSPFW1, 0x3F88080A);
15388+ REG_WRITE(DSPFW2, 0x0b060808);
15389+ REG_WRITE(DSPFW3, 0x0);
15390+ REG_WRITE(DSPFW4, 0x08030404);
15391+ REG_WRITE(DSPFW5, 0x04040404);
15392+ REG_WRITE(DSPFW6, 0x78);
15393+ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
15394+ /* Must write Bit 14 of the Chicken Bit Register */
15395+
15396+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
15397+}
15398+
15399+static int mrst_crtc_mode_set(struct drm_crtc *crtc,
15400+ struct drm_display_mode *mode,
15401+ struct drm_display_mode *adjusted_mode,
15402+ int x, int y,
15403+ struct drm_framebuffer *old_fb)
15404+{
15405+ struct drm_device *dev = crtc->dev;
15406+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
15407+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15408+ int pipe = psb_intel_crtc->pipe;
15409+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
15410+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
15411+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
15412+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
15413+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
15414+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
15415+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
15416+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
15417+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
15418+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
15419+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
15420+ int refclk = 0;
15421+ struct mrst_clock_t clock;
15422+ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
15423+ bool ok, is_sdvo = false;
15424+ bool is_crt = false, is_lvds = false, is_tv = false;
15425+ bool is_mipi = false;
15426+ struct drm_mode_config *mode_config = &dev->mode_config;
15427+ struct psb_intel_output *psb_intel_output = NULL;
15428+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
15429+ struct drm_encoder *encoder;
15430+
15431+#if PRINT_JLIU7
15432+ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
15433+#endif /* PRINT_JLIU7 */
15434+
15435+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
15436+
15437+ memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
15438+ memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
15439+
15440+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
15441+
15442+ if (encoder->crtc != crtc)
15443+ continue;
15444+
15445+ psb_intel_output = enc_to_psb_intel_output(encoder);
15446+ switch (psb_intel_output->type) {
15447+ case INTEL_OUTPUT_LVDS:
15448+ is_lvds = true;
15449+ break;
15450+ case INTEL_OUTPUT_SDVO:
15451+ is_sdvo = true;
15452+ break;
15453+ case INTEL_OUTPUT_TVOUT:
15454+ is_tv = true;
15455+ break;
15456+ case INTEL_OUTPUT_ANALOG:
15457+ is_crt = true;
15458+ break;
15459+ case INTEL_OUTPUT_MIPI:
15460+ is_mipi = true;
15461+ break;
15462+ }
15463+ }
15464+
15465+ if (is_lvds | is_mipi) {
15466+ /*FIXME JLIU7 Get panel power delay parameters from
15467+ config data */
15468+ REG_WRITE(0x61208, 0x25807d0);
15469+ REG_WRITE(0x6120c, 0x1f407d0);
15470+ REG_WRITE(0x61210, 0x270f04);
15471+ }
15472+
15473+ /* Disable the VGA plane that we never use */
15474+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
15475+
15476+ /* Disable the panel fitter if it was on our pipe */
15477+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
15478+ REG_WRITE(PFIT_CONTROL, 0);
15479+
15480+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
15481+
15482+ if (psb_intel_output)
15483+ drm_connector_property_get_value(&psb_intel_output->base,
15484+ dev->mode_config.scaling_mode_property, &scalingType);
15485+
15486+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
15487+ /*Moorestown doesn't have register support for centering so we need to
15488+ mess with the h/vblank and h/vsync start and ends to get centering*/
15489+ int offsetX = 0, offsetY = 0;
15490+
15491+ offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
15492+ offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
15493+
15494+ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
15495+ ((adjusted_mode->crtc_htotal - 1) << 16));
15496+ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
15497+ ((adjusted_mode->crtc_vtotal - 1) << 16));
15498+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
15499+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
15500+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
15501+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
15502+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
15503+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
15504+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
15505+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
15506+ } else {
15507+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
15508+ ((adjusted_mode->crtc_htotal - 1) << 16));
15509+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
15510+ ((adjusted_mode->crtc_vtotal - 1) << 16));
15511+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
15512+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
15513+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
15514+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
15515+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
15516+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
15517+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
15518+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
15519+ }
15520+
15521+ /* Flush the plane changes */
15522+ {
15523+ struct drm_crtc_helper_funcs *crtc_funcs =
15524+ crtc->helper_private;
15525+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
15526+ }
15527+
15528+ /* setup pipeconf */
15529+ pipeconf = REG_READ(pipeconf_reg);
15530+
15531+ /* Set up the display plane register */
15532+ dspcntr = REG_READ(dspcntr_reg);
15533+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
15534+
15535+ if (pipe == 0)
15536+ dspcntr |= DISPPLANE_SEL_PIPE_A;
15537+ else
15538+ dspcntr |= DISPPLANE_SEL_PIPE_B;
15539+
15540+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
15541+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
15542+
15543+ if (is_mipi)
15544+ goto mrst_crtc_mode_set_exit;
15545+
15546+ if (dev_priv->sku_100L)
15547+ refclk = 100000;
15548+ else if (dev_priv->sku_83)
15549+ refclk = 166000;
15550+ else if (dev_priv->sku_100)
15551+ refclk = 200000;
15552+
15553+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
15554+
15555+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
15556+
15557+ if (!ok) {
15558+#if PRINT_JLIU7
15559+ DRM_INFO
15560+ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
15561+#endif /* PRINT_JLIU7 */
15562+ } else {
15563+#if PRINT_JLIU7
15564+ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
15565+ "m = %x, p1 = %x. \n", clock.dot, clock.m,
15566+ clock.p1);
15567+#endif /* PRINT_JLIU7 */
15568+ }
15569+
15570+ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
15571+
15572+ dpll |= DPLL_VGA_MODE_DIS;
15573+
15574+
15575+ dpll |= DPLL_VCO_ENABLE;
15576+
15577+ if (is_lvds)
15578+ dpll |= DPLLA_MODE_LVDS;
15579+ else
15580+ dpll |= DPLLB_MODE_DAC_SERIAL;
15581+
15582+ if (is_sdvo) {
15583+ int sdvo_pixel_multiply =
15584+ adjusted_mode->clock / mode->clock;
15585+
15586+ dpll |= DPLL_DVO_HIGH_SPEED;
15587+ dpll |=
15588+ (sdvo_pixel_multiply -
15589+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
15590+ }
15591+
15592+
15593+ /* compute bitmask from p1 value */
15594+ dpll |= (1 << (clock.p1 - 2)) << 17;
15595+
15596+ dpll |= DPLL_VCO_ENABLE;
15597+
15598+#if PRINT_JLIU7
15599+ mrstPrintPll("chosen", &clock);
15600+#endif /* PRINT_JLIU7 */
15601+
15602+#if 0
15603+ if (!xf86ModesEqual(mode, adjusted_mode)) {
15604+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
15605+ "Adjusted mode for pipe %c:\n",
15606+ pipe == 0 ? 'A' : 'B');
15607+ xf86PrintModeline(pScrn->scrnIndex, mode);
15608+ }
15609+ i830PrintPll("chosen", &clock);
15610+#endif
15611+
15612+ if (dpll & DPLL_VCO_ENABLE) {
15613+ REG_WRITE(fp_reg, fp);
15614+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
15615+ REG_READ(dpll_reg);
15616+/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
15617+ udelay(150);
15618+ }
15619+
15620+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
15621+ * This is an exception to the general rule that mode_set doesn't turn
15622+ * things on.
15623+ */
15624+ if (is_lvds) {
15625+
15626+ /*lvdsport = 0x803003c0;*/
15627+ /*lvdsport = 0x813003c0;*/
15628+ lvdsport = dev_priv->gct_data.Panel_Port_Control;
15629+
15630+ REG_WRITE(LVDS, lvdsport);
15631+ }
15632+
15633+ REG_WRITE(fp_reg, fp);
15634+ REG_WRITE(dpll_reg, dpll);
15635+ REG_READ(dpll_reg);
15636+ /* Wait for the clocks to stabilize. */
15637+ udelay(150);
15638+
15639+ /* write it again -- the BIOS does, after all */
15640+ REG_WRITE(dpll_reg, dpll);
15641+ REG_READ(dpll_reg);
15642+ /* Wait for the clocks to stabilize. */
15643+ udelay(150);
15644+
15645+ REG_WRITE(pipeconf_reg, pipeconf);
15646+ REG_READ(pipeconf_reg);
15647+
15648+ /* Wait for for the pipe enable to take effect. */
15649+ mrstWaitForPipeEnable(dev);
15650+
15651+ REG_WRITE(dspcntr_reg, dspcntr);
15652+ psb_intel_wait_for_vblank(dev);
15653+
15654+mrst_crtc_mode_set_exit:
15655+
15656+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
15657+
15658+ return 0;
15659+}
15660+
15661+
15662+static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
15663+ .dpms = mrst_crtc_dpms,
15664+ .mode_fixup = psb_intel_crtc_mode_fixup,
15665+ .mode_set = mrst_crtc_mode_set,
15666+ .mode_set_base = psb_intel_pipe_set_base,
15667+ .prepare = psb_intel_crtc_prepare,
15668+ .commit = psb_intel_crtc_commit,
15669+};
15670+
15671+/* MRST_PLATFORM end */
15672diff --git a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h
15673new file mode 100644
15674index 0000000..dcb79d4
15675--- /dev/null
15676+++ b/drivers/gpu/drm/psb/psb_intel_display.h
15677@@ -0,0 +1,31 @@
15678+
15679+/* copyright (c) 2008, Intel Corporation
15680+ * Permission is hereby granted, free of charge, to any person obtaining a
15681+ * copy of this software and associated documentation files (the "Software"),
15682+ * to deal in the Software without restriction, including without limitation
15683+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15684+ * and/or sell copies of the Software, and to permit persons to whom the
15685+ * Software is furnished to do so, subject to the following conditions:
15686+ *
15687+ * The above copyright notice and this permission notice (including the next
15688+ * paragraph) shall be included in all copies or substantial portions of the
15689+ * Software.
15690+ *
15691+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15692+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15693+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15694+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15695+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15696+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
15697+ * DEALINGS IN THE SOFTWARE.
15698+ *
15699+ * Authors:
15700+ * Eric Anholt <eric@anholt.net>
15701+ */
15702+
15703+#ifndef _INTEL_DISPLAY_H_
15704+#define _INTEL_DISPLAY_H_
15705+
15706+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
15707+
15708+#endif
15709diff --git a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h
15710new file mode 100644
15711index 0000000..a64ce59
15712--- /dev/null
15713+++ b/drivers/gpu/drm/psb/psb_intel_drv.h
15714@@ -0,0 +1,246 @@
15715+/*
15716+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
15717+ * Copyright (c) 2007 Intel Corporation
15718+ * Jesse Barnes <jesse.barnes@intel.com>
15719+ */
15720+#ifndef __INTEL_DRV_H__
15721+#define __INTEL_DRV_H__
15722+
15723+#include <linux/i2c.h>
15724+#include <linux/i2c-id.h>
15725+#include <linux/i2c-algo-bit.h>
15726+#include <drm/drm_crtc.h>
15727+
15728+#include <drm/drm_crtc_helper.h>
15729+
15730+/*
15731+ * MOORESTOWN defines
15732+ */
15733+#define MRST_I2C 0
15734+
15735+#define DUMP_REGISTER 0
15736+#define MRST_24BIT_LVDS 1
15737+#define MRST_24BIT_DOT_1 0
15738+#define MRST_24BIT_WA 0
15739+
15740+#define PRINT_JLIU7 0
15741+#define DELAY_TIME1 2000 /* 1000 = 1ms */
15742+
15743+/*
15744+ * Display related stuff
15745+ */
15746+
15747+/* store information about an Ixxx DVO */
15748+/* The i830->i865 use multiple DVOs with multiple i2cs */
15749+/* the i915, i945 have a single sDVO i2c bus - which is different */
15750+#define MAX_OUTPUTS 6
15751+/* maximum connectors per crtcs in the mode set */
15752+#define INTELFB_CONN_LIMIT 4
15753+
15754+#define INTEL_I2C_BUS_DVO 1
15755+#define INTEL_I2C_BUS_SDVO 2
15756+
15757+/* these are outputs from the chip - integrated only
15758+ * external chips are via DVO or SDVO output */
15759+#define INTEL_OUTPUT_UNUSED 0
15760+#define INTEL_OUTPUT_ANALOG 1
15761+#define INTEL_OUTPUT_DVO 2
15762+#define INTEL_OUTPUT_SDVO 3
15763+#define INTEL_OUTPUT_LVDS 4
15764+#define INTEL_OUTPUT_TVOUT 5
15765+#define INTEL_OUTPUT_MIPI 6
15766+
15767+#define INTEL_DVO_CHIP_NONE 0
15768+#define INTEL_DVO_CHIP_LVDS 1
15769+#define INTEL_DVO_CHIP_TMDS 2
15770+#define INTEL_DVO_CHIP_TVOUT 4
15771+
15772+struct opregion_header {
15773+ u8 signature[16];
15774+ u32 size;
15775+ u32 opregion_ver;
15776+ u8 bios_ver[32];
15777+ u8 vbios_ver[16];
15778+ u8 driver_ver[16];
15779+ u32 mboxes;
15780+ u8 reserved[164];
15781+}__attribute__((packed));
15782+
15783+struct opregion_apci {
15784+ /*FIXME: add it later*/
15785+}__attribute__((packed));
15786+
15787+struct opregion_swsci {
15788+ /*FIXME: add it later*/
15789+}__attribute__((packed));
15790+
15791+struct opregion_acpi {
15792+ /*FIXME: add it later*/
15793+}__attribute__((packed));
15794+
15795+struct psb_intel_opregion {
15796+ struct opregion_header * header;
15797+ struct opregion_acpi * acpi;
15798+ struct opregion_swsci * swsci;
15799+ struct opregion_asle * asle;
15800+ int enabled;
15801+};
15802+
15803+/**
15804+ * Hold information useally put on the device driver privates here,
15805+ * since it needs to be shared across multiple of devices drivers privates.
15806+ */
15807+struct psb_intel_mode_device {
15808+
15809+ /*
15810+ * Abstracted memory manager operations
15811+ */
15812+ void *(*bo_from_handle) (struct drm_device *dev,
15813+ struct drm_file *file_priv,
15814+ unsigned int handle);
15815+ size_t(*bo_size) (struct drm_device *dev, void *bo);
15816+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
15817+ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
15818+ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
15819+
15820+ /*
15821+ * Cursor
15822+ */
15823+ int cursor_needs_physical;
15824+
15825+ /*
15826+ * LVDS info
15827+ */
15828+ int backlight_duty_cycle; /* restore backlight to this value */
15829+ bool panel_wants_dither;
15830+ struct drm_display_mode *panel_fixed_mode;
15831+ struct drm_display_mode *vbt_mode; /* if any */
15832+
15833+ uint32_t saveBLC_PWM_CTL;
15834+};
15835+
15836+struct psb_intel_i2c_chan {
15837+ /* for getting at dev. private (mmio etc.) */
15838+ struct drm_device *drm_dev;
15839+ u32 reg; /* GPIO reg */
15840+ struct i2c_adapter adapter;
15841+ struct i2c_algo_bit_data algo;
15842+ u8 slave_addr;
15843+};
15844+
15845+struct psb_intel_output {
15846+ struct drm_connector base;
15847+
15848+ struct drm_encoder enc;
15849+ int type;
15850+ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
15851+ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
15852+ bool load_detect_temp;
15853+ void *dev_priv;
15854+
15855+ struct psb_intel_mode_device *mode_dev;
15856+
15857+};
15858+
15859+struct psb_intel_crtc_state {
15860+ uint32_t saveDSPCNTR;
15861+ uint32_t savePIPECONF;
15862+ uint32_t savePIPESRC;
15863+ uint32_t saveDPLL;
15864+ uint32_t saveFP0;
15865+ uint32_t saveFP1;
15866+ uint32_t saveHTOTAL;
15867+ uint32_t saveHBLANK;
15868+ uint32_t saveHSYNC;
15869+ uint32_t saveVTOTAL;
15870+ uint32_t saveVBLANK;
15871+ uint32_t saveVSYNC;
15872+ uint32_t saveDSPSTRIDE;
15873+ uint32_t saveDSPSIZE;
15874+ uint32_t saveDSPPOS;
15875+ uint32_t saveDSPBASE;
15876+ uint32_t savePalette[256];
15877+};
15878+
15879+struct psb_intel_crtc {
15880+ struct drm_crtc base;
15881+ int pipe;
15882+ int plane;
15883+ uint32_t cursor_addr;
15884+ u8 lut_r[256], lut_g[256], lut_b[256];
15885+ int dpms_mode;
15886+ struct psb_intel_framebuffer *fbdev_fb;
15887+ /* a mode_set for fbdev users on this crtc */
15888+ struct drm_mode_set mode_set;
15889+
15890+ /* current bo we scanout from */
15891+ void *scanout_bo;
15892+
15893+ /* current bo we cursor from */
15894+ void *cursor_bo;
15895+
15896+ struct drm_display_mode saved_mode;
15897+ struct drm_display_mode saved_adjusted_mode;
15898+
15899+ struct psb_intel_mode_device *mode_dev;
15900+
15901+/*FIXME: Workaround to avoid MRST block.*/
15902+#ifndef CONFIG_MRST
15903+ /**
15904+ * Saved Crtc HW states
15905+ */
15906+ struct psb_intel_crtc_state * crtc_state;
15907+#endif
15908+};
15909+
15910+#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base)
15911+#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base)
15912+#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc)
15913+#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base)
15914+
15915+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
15916+ const u32 reg, const char *name);
15917+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
15918+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
15919+extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
15920+
15921+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
15922+ struct psb_intel_mode_device *mode_dev);
15923+extern void psb_intel_crt_init(struct drm_device *dev);
15924+extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
15925+extern void psb_intel_dvo_init(struct drm_device *dev);
15926+extern void psb_intel_tv_init(struct drm_device *dev);
15927+extern void psb_intel_lvds_init(struct drm_device *dev,
15928+ struct psb_intel_mode_device *mode_dev);
15929+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
15930+extern void mrst_lvds_init(struct drm_device *dev,
15931+ struct psb_intel_mode_device *mode_dev);
15932+extern void mrst_dsi_init(struct drm_device *dev,
15933+ struct psb_intel_mode_device *mode_dev);
15934+
15935+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
15936+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
15937+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
15938+
15939+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
15940+ *connector);
15941+
15942+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
15943+ struct drm_crtc *crtc);
15944+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
15945+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
15946+ int pipe);
15947+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
15948+ int sdvoB);
15949+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
15950+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
15951+ int enable);
15952+extern int intelfb_probe(struct drm_device *dev);
15953+extern int intelfb_remove(struct drm_device *dev,
15954+ struct drm_framebuffer *fb);
15955+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
15956+ *dev, struct
15957+ drm_mode_fb_cmd
15958+ *mode_cmd,
15959+ void *mm_private);
15960+#endif /* __INTEL_DRV_H__ */
15961diff --git a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c
15962new file mode 100644
15963index 0000000..bcfee62
15964--- /dev/null
15965+++ b/drivers/gpu/drm/psb/psb_intel_dsi.c
15966@@ -0,0 +1,1798 @@
15967+/*
15968+ * Copyright © 2006-2007 Intel Corporation
15969+ *
15970+ * Permission is hereby granted, free of charge, to any person obtaining a
15971+ * copy of this software and associated documentation files (the "Software"),
15972+ * to deal in the Software without restriction, including without limitation
15973+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15974+ * and/or sell copies of the Software, and to permit persons to whom the
15975+ * Software is furnished to do so, subject to the following conditions:
15976+ *
15977+ * The above copyright notice and this permission notice (including the next
15978+ * paragraph) shall be included in all copies or substantial portions of the
15979+ * Software.
15980+ *
15981+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15982+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15983+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15984+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15985+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15986+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
15987+ * DEALINGS IN THE SOFTWARE.
15988+ *
15989+ * Authors:
15990+ * jim liu <jim.liu@intel.com>
15991+ */
15992+
15993+#include <linux/backlight.h>
15994+#include <drm/drm_crtc.h>
15995+#include <drm/drm_edid.h>
15996+
15997+#define DRM_MODE_ENCODER_MIPI 5
15998+#define DRM_MODE_CONNECTOR_MIPI 13
15999+
16000+#if DUMP_REGISTER
16001+extern void dump_dsi_registers(struct drm_device *dev);
16002+#endif /* DUMP_REGISTER */
16003+
16004+int dsi_backlight; /* restore backlight to this value */
16005+
16006+/**
16007+ * Returns the maximum level of the backlight duty cycle field.
16008+ */
16009+static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
16010+{
16011+#if PRINT_JLIU7
16012+ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
16013+#endif /* PRINT_JLIU7 */
16014+
16015+ return BRIGHTNESS_MAX_LEVEL;
16016+
16017+/* FIXME jliu7 need to revisit */
16018+}
16019+
16020+/**
16021+ * Sets the power state for the panel.
16022+ */
16023+static void mrst_dsi_set_power(struct drm_device *dev,
16024+ struct psb_intel_output *output, bool on)
16025+{
16026+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
16027+ u32 pp_status;
16028+
16029+#if PRINT_JLIU7
16030+ DRM_INFO("JLIU7 enter mrst_dsi_set_power \n");
16031+#endif /* PRINT_JLIU7 */
16032+ /*
16033+ * The DIS device must be ready before we can change power state.
16034+ */
16035+ if (!dev_priv->dsi_device_ready)
16036+ {
16037+ return;
16038+ }
16039+
16040+ /*
16041+ * We don't support dual DSI yet. May be in POR in the future.
16042+ */
16043+ if (dev_priv->dual_display)
16044+ {
16045+ return;
16046+ }
16047+
16048+
16049+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16050+
16051+ if (on) {
16052+ if (dev_priv->dpi & (!dev_priv->dpi_panel_on))
16053+ {
16054+
16055+#if PRINT_JLIU7
16056+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n");
16057+#endif /* PRINT_JLIU7 */
16058+ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
16059+#if 0 /*FIXME JLIU7 */
16060+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA);
16061+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON);
16062+#endif /*FIXME JLIU7 */
16063+
16064+ dev_priv->dpi_panel_on = true;
16065+
16066+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
16067+ POWER_TARGET_ON);
16068+ do {
16069+ pp_status = REG_READ(PP_STATUS);
16070+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
16071+ }
16072+ else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on))
16073+ {
16074+#if PRINT_JLIU7
16075+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n");
16076+#endif /* PRINT_JLIU7 */
16077+
16078+ dev_priv->DBI_CB_pointer = 0;
16079+ /* exit sleep mode */
16080+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode;
16081+
16082+#if 0 /*FIXME JLIU7 */
16083+ /* Check MIPI Adatper command registers */
16084+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
16085+#endif /*FIXME JLIU7 */
16086+
16087+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
16088+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
16089+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
16090+
16091+ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
16092+ command. This delay allows the supply voltages and clock circuits to stabilize */
16093+ udelay(5000);
16094+
16095+ dev_priv->DBI_CB_pointer = 0;
16096+
16097+ /* set display on */
16098+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ;
16099+
16100+#if 0 /*FIXME JLIU7 */
16101+ /* Check MIPI Adatper command registers */
16102+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
16103+#endif /*FIXME JLIU7 */
16104+
16105+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
16106+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
16107+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
16108+
16109+ dev_priv->dbi_panel_on = true;
16110+ }
16111+/*FIXME JLIU7 */
16112+/* Need to figure out how to control the MIPI panel power on sequence*/
16113+
16114+ }
16115+ else
16116+ {
16117+/*FIXME JLIU7 */
16118+/* Need to figure out how to control the MIPI panel power down sequence*/
16119+ /*
16120+ * Only save the current backlight value if we're going from
16121+ * on to off.
16122+ */
16123+ if (dev_priv->dpi & dev_priv->dpi_panel_on)
16124+ {
16125+#if PRINT_JLIU7
16126+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n");
16127+#endif /* PRINT_JLIU7 */
16128+
16129+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
16130+ ~POWER_TARGET_ON);
16131+ do {
16132+ pp_status = REG_READ(PP_STATUS);
16133+ } while (pp_status & PP_ON);
16134+
16135+#if 0 /*FIXME JLIU7 */
16136+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA);
16137+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF);
16138+#endif /*FIXME JLIU7 */
16139+ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
16140+ dev_priv->dpi_panel_on = false;
16141+ }
16142+ else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on)
16143+ {
16144+#if PRINT_JLIU7
16145+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n");
16146+#endif /* PRINT_JLIU7 */
16147+ dev_priv->DBI_CB_pointer = 0;
16148+ /* enter sleep mode */
16149+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode;
16150+
16151+ /* Check MIPI Adatper command registers */
16152+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
16153+
16154+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
16155+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
16156+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
16157+ dev_priv->dbi_panel_on = false;
16158+ }
16159+ }
16160+
16161+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16162+}
16163+
16164+static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
16165+{
16166+ struct drm_device *dev = encoder->dev;
16167+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
16168+
16169+#if PRINT_JLIU7
16170+ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
16171+#endif /* PRINT_JLIU7 */
16172+
16173+ if (mode == DRM_MODE_DPMS_ON)
16174+ mrst_dsi_set_power(dev, output, true);
16175+ else
16176+ mrst_dsi_set_power(dev, output, false);
16177+
16178+ /* XXX: We never power down the DSI pairs. */
16179+}
16180+
16181+static void mrst_dsi_save(struct drm_connector *connector)
16182+{
16183+#if 0 /* JB: Disable for drop */
16184+ struct drm_device *dev = connector->dev;
16185+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
16186+
16187+#if PRINT_JLIU7
16188+ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
16189+#endif /* PRINT_JLIU7 */
16190+
16191+ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
16192+ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
16193+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
16194+ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
16195+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
16196+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
16197+ BACKLIGHT_DUTY_CYCLE_MASK);
16198+
16199+ /*
16200+ * make backlight to full brightness
16201+ */
16202+ dsi_backlight = mrst_dsi_get_max_backlight(dev);
16203+#endif
16204+}
16205+
16206+static void mrst_dsi_restore(struct drm_connector *connector)
16207+{
16208+#if 0 /* JB: Disable for drop */
16209+ struct drm_device *dev = connector->dev;
16210+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
16211+
16212+#if PRINT_JLIU7
16213+ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
16214+#endif /* PRINT_JLIU7 */
16215+
16216+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
16217+ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
16218+ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
16219+ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
16220+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
16221+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
16222+ mrst_dsi_set_power(dev, true);
16223+ else
16224+ mrst_dsi_set_power(dev, false);
16225+#endif
16226+}
16227+
16228+static void mrst_dsi_prepare(struct drm_encoder *encoder)
16229+{
16230+ struct drm_device *dev = encoder->dev;
16231+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
16232+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
16233+
16234+#if PRINT_JLIU7
16235+ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
16236+#endif /* PRINT_JLIU7 */
16237+
16238+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16239+
16240+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
16241+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
16242+ BACKLIGHT_DUTY_CYCLE_MASK);
16243+
16244+ mrst_dsi_set_power(dev, output, false);
16245+
16246+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16247+}
16248+
16249+static void mrst_dsi_commit( struct drm_encoder *encoder)
16250+{
16251+ struct drm_device *dev = encoder->dev;
16252+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
16253+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
16254+
16255+#if PRINT_JLIU7
16256+ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
16257+#endif /* PRINT_JLIU7 */
16258+
16259+ if (mode_dev->backlight_duty_cycle == 0)
16260+ mode_dev->backlight_duty_cycle =
16261+ mrst_dsi_get_max_backlight(dev);
16262+
16263+ mrst_dsi_set_power(dev, output, true);
16264+
16265+#if DUMP_REGISTER
16266+ dump_dsi_registers(dev);
16267+#endif /* DUMP_REGISTER */
16268+}
16269+
16270+#if 0
16271+/* ************************************************************************* *\
16272+FUNCTION: GetHS_TX_timeoutCount
16273+ `
16274+DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
16275+ (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended.
16276+
16277+ In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs).
16278+ To timeout this timer 1+ of the above said value is recommended.
16279+
16280+\* ************************************************************************* */
16281+static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
16282+{
16283+
16284+ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
16285+
16286+ /* Total pixels need to be transfer per line*/
16287+ HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea;
16288+
16289+ /* byte count = (pixel count * bits per pixel) / 8 */
16290+ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
16291+
16292+ if (dev_priv->videoModeFormat == BURST_MODE)
16293+ {
16294+ timeoutCount = HTOT_count + 1;
16295+#if 1 /*FIXME remove it after power-on */
16296+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
16297+ + dev_priv->VsyncWidth;
16298+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
16299+ timeoutCount = (HTOT_count * VTOT_count) + 1;
16300+#endif
16301+ }
16302+ else
16303+ {
16304+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
16305+ + dev_priv->VsyncWidth;
16306+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
16307+ timeoutCount = (HTOT_count * VTOT_count) + 1;
16308+ }
16309+
16310+ return timeoutCount & 0xFFFF;
16311+}
16312+
16313+/* ************************************************************************* *\
16314+FUNCTION: GetLP_RX_timeoutCount
16315+
16316+DESCRIPTION: The timeout value is protocol specific. Time out value is calculated
16317+ from txclkesc(50ns).
16318+
16319+ Minimum value =
16320+ Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence)
16321+ + 8-bit trigger message (2x8xtxclkesc)
16322+ +1 txclksesc [stop_state]
16323+ = 21 X txclkesc [ 15h]
16324+
16325+ Maximum Value =
16326+ Time to send a long packet with maximum payload data
16327+ = 4 X txclkesc [Escape mode entry sequence)
16328+ + 8-bit Low power data transmission Command (2x8xtxclkesc)
16329+ + packet header [ 4X8X2X txclkesc]
16330+ +payload [ nX8X2Xtxclkesc]
16331+ +CRC[2X8X2txclkesc]
16332+ +1 txclksesc [stop_state]
16333+ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
16334+
16335+\* ************************************************************************* */
16336+static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
16337+{
16338+
16339+ u32 timeoutCount = 0;
16340+
16341+ if (dev_priv->config_phase)
16342+ {
16343+ /* Assuming 256 byte DDB data.*/
16344+ timeoutCount = 117 + 256 * 16;
16345+ }
16346+ else
16347+ {
16348+ /* For DPI video only mode use the minimum value.*/
16349+ timeoutCount = 0x15;
16350+#if 1 /*FIXME remove it after power-on */
16351+ /* Assuming 256 byte DDB data.*/
16352+ timeoutCount = 117 + 256 * 16;
16353+#endif
16354+ }
16355+
16356+ return timeoutCount;
16357+}
16358+#endif // #if 0 - to avoid warnings
16359+
16360+/* ************************************************************************* *\
16361+FUNCTION: GetHSA_Count
16362+
16363+DESCRIPTION: Shows the horizontal sync value in terms of byte clock
16364+ (txbyteclkhs)
16365+ Minimum HSA period should be sufficient to transmit a hsync start short
16366+ packet(4 bytes)
16367+ i) For Non-burst Mode with sync pulse, Min value � 4 in decimal [plus
16368+ an optional 6 bytes for a zero payload blanking packet]. But if
16369+ the value is less than 10 but more than 4, then this count will
16370+ be added to the HBP�s count for one lane.
16371+ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you
16372+ can program this to zero. If you program this register, these
16373+ byte values will be added to HBP.
16374+ iii) For Burst mode of operation, normally the values programmed in
16375+ terms of byte clock are based on the principle - time for transfering
16376+ HSA in Burst mode is the same as in non-bust mode.
16377+\* ************************************************************************* */
16378+static u32 GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16379+{
16380+ u32 HSA_count;
16381+ u32 HSA_countX8;
16382+
16383+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16384+ /*HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
16385+
16386+ if (dev_priv->videoModeFormat == BURST_MODE)
16387+ {
16388+ HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16389+ }
16390+
16391+ HSA_count = HSA_countX8 / 8;*/
16392+
16393+ /* since mode_set already computed Display Controller timings,
16394+ * read the register and compute mipi timings.
16395+ */
16396+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16397+ HSA_countX8 = REG_READ(HSYNC_A);
16398+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16399+ } else
16400+ HSA_countX8 = dev_priv->saveHSYNC_A;
16401+
16402+ /* Get the hsync pulse width */
16403+ HSA_count = ((HSA_countX8 & 0xffff0000)>>16) - (HSA_countX8 & 0xffff);
16404+ /* compute HSA according to equation:
16405+ (hsync_end - hsync_start) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
16406+ HSA_count = (HSA_count * dev_priv->bpp)/(2 * 8 * 2);
16407+ if (HSA_count < 4) /* minimum value of 4 */
16408+ HSA_count = 4;
16409+
16410+ return HSA_count;
16411+}
16412+
16413+/* ************************************************************************* *\
16414+FUNCTION: GetHBP_Count
16415+
16416+DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
16417+ Minimum HBP period should be sufficient to transmit a �hsync end short
16418+ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)�
16419+ For Burst mode of operation, normally the values programmed in terms of
16420+ byte clock are based on the principle - time for transfering HBP
16421+ in Burst mode is the same as in non-bust mode.
16422+
16423+ Min value � 14 in decimal [ accounted with zero payload for blanking packet] for one lane.
16424+ Max value � any value greater than 14 based on DPI resolution
16425+\* ************************************************************************* */
16426+static u32 GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16427+{
16428+ u32 HBP_count;
16429+ u32 HBE, HSE;
16430+
16431+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16432+ /*HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
16433+
16434+ if (dev_priv->videoModeFormat == BURST_MODE)
16435+ {
16436+ HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16437+ }
16438+
16439+ HBP_count = HBP_countX8 / 8;*/
16440+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16441+ HBE = (REG_READ(HBLANK_A) & 0xffff0000) >> 16;
16442+ HSE = (REG_READ(HSYNC_A) & 0xffff0000) >> 16;
16443+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16444+ } else {
16445+ HBE = (dev_priv->saveHBLANK_A & 0xffff0000) >> 16;
16446+ HSE = (dev_priv->saveHSYNC_A & 0xffff0000) >> 16;
16447+ }
16448+
16449+ /* Get the hsync pulse width */
16450+ HBP_count = HBE - HSE;
16451+ /* compute HSA according to equation:
16452+ * (hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
16453+ HBP_count = (HBP_count * dev_priv->bpp)/(2 * 8 * 2);
16454+ if (HBP_count < 8) /* minimum value of 8 */
16455+ HBP_count = 8;
16456+
16457+ return HBP_count;
16458+}
16459+
16460+/* ************************************************************************* *\
16461+FUNCTION: GetHFP_Count
16462+
16463+DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
16464+ Minimum HFP period should be sufficient to transmit �RGB Data packet
16465+ footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode.
16466+
16467+ For burst mode, Minimum HFP period should be sufficient to transmit
16468+ Blanking packet overhead(6 bytes)�
16469+
16470+ For Burst mode of operation, normally the values programmed in terms of
16471+ byte clock are based on the principle - time for transfering HFP
16472+ in Burst mode is the same as in non-bust mode.
16473+
16474+ Min value � 8 in decimal for non-burst mode [accounted with zero payload
16475+ for blanking packet] for one lane.
16476+ Min value � 6 in decimal for burst mode for one lane.
16477+
16478+ Max value � any value greater than the minimum vaue based on DPI resolution
16479+\* ************************************************************************* */
16480+static u32 GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16481+{
16482+ u32 HFP_count;
16483+ u32 HBS, HSS;
16484+
16485+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16486+ /*HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
16487+
16488+ if (dev_priv->videoModeFormat == BURST_MODE)
16489+ {
16490+ HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16491+ }
16492+
16493+ HFP_count = HFP_countX8 / 8;*/
16494+
16495+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16496+ HBS = REG_READ(HBLANK_A) & 0xffff;
16497+ HSS = REG_READ(HSYNC_A) & 0xffff;
16498+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16499+ } else {
16500+ HBS = dev_priv->saveHBLANK_A & 0xffff;
16501+ HSS = dev_priv->saveHSYNC_A & 0xffff;
16502+ }
16503+
16504+ /* Get the hsync pulse width */
16505+ HFP_count = HSS - HBS;
16506+ /* compute HSA according to equation:
16507+ * (hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
16508+ HFP_count = (HFP_count * dev_priv->bpp)/(2 * 8 * 2);
16509+ if (HFP_count < 8) /* minimum value of 8 */
16510+ HFP_count = 8;
16511+
16512+ return HFP_count;
16513+}
16514+
16515+/* ************************************************************************* *\
16516+FUNCTION: GetHAdr_Count
16517+
16518+DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
16519+ In Non Burst Mode, Count equal to RGB word count value
16520+
16521+ In Burst Mode, RGB pixel packets are time-compressed, leaving more time
16522+ during a scan line for LP mode (saving power) or for multiplexing
16523+ other transmissions onto the DSI link. Hence, the count equals the
16524+ time in txbyteclkhs for sending time compressed RGB pixels plus
16525+ the time needed for moving to power save mode or the time needed
16526+ for secondary channel to use the DSI link.
16527+
16528+ But if the left out time for moving to low power mode is less than
16529+ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
16530+ 6txbyteclkhs for a blanking packet with zero payload], then
16531+ this count will be added to the HFP's count for one lane.
16532+
16533+ Min value � 8 in decimal for non-burst mode [accounted with zero payload
16534+ for blanking packet] for one lane.
16535+ Min value � 6 in decimal for burst mode for one lane.
16536+
16537+ Max value � any value greater than the minimum vaue based on DPI resolution
16538+\* ************************************************************************* */
16539+static u32 GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16540+{
16541+ u32 HAdr_count;
16542+ u32 Hactive;
16543+
16544+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
16545+ /*HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
16546+
16547+ if (dev_priv->videoModeFormat == BURST_MODE)
16548+ {
16549+ HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
16550+ }
16551+
16552+ HAdr_count = HAdr_countX8 / 8;*/
16553+
16554+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16555+ Hactive = (REG_READ(HTOTAL_A) & 0x0fff) + 1;
16556+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16557+ } else
16558+ Hactive = (dev_priv->saveHTOTAL_A & 0x0fff) + 1;
16559+
16560+ /* compute HAdr according to equation:
16561+ * (hactive * 24 bpp/8) / 2 lanes)*/
16562+
16563+ HAdr_count = (Hactive * dev_priv->bpp/8) / 2;
16564+
16565+ return HAdr_count;
16566+}
16567+
16568+/* ************************************************************************* *\
16569+FUNCTION: GetVSA_Count
16570+
16571+DESCRIPTION: Shows the vertical sync value in terms of lines
16572+
16573+\* ************************************************************************* */
16574+static u32 GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16575+{
16576+ u32 VSA_count;
16577+ u32 VSA_countX8;
16578+
16579+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16580+ VSA_countX8 = REG_READ(VSYNC_A);
16581+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16582+ } else
16583+ VSA_countX8 = dev_priv->saveVSYNC_A;
16584+
16585+ /* Get the vsync pulse width */
16586+ VSA_count = ((VSA_countX8 & 0xffff0000)>>16) - (VSA_countX8 & 0xffff);
16587+
16588+ if (VSA_count < 2) /* minimum value of 2 */
16589+ VSA_count = 2;
16590+
16591+ return VSA_count;
16592+}
16593+
16594+/* ************************************************************************* *\
16595+ * FUNCTION: GetVBP_Count
16596+ *
16597+ * DESCRIPTION: Shows the vertical back porch value in lines.
16598+ *
16599+\* ************************************************************************* */
16600+static u32 GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16601+{
16602+ u32 VBP_count;
16603+ u32 VBE, VSE;
16604+
16605+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16606+ VBE = (REG_READ(VBLANK_A) & 0xffff0000) >> 16;
16607+ VSE = (REG_READ(VSYNC_A) & 0xffff0000) >> 16;
16608+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16609+ } else {
16610+ VBE = (dev_priv->saveVBLANK_A & 0xffff0000) >> 16;
16611+ VSE = (dev_priv->saveVSYNC_A & 0xffff0000) >> 16;
16612+ }
16613+
16614+ /* Get the hsync pulse width */
16615+ VBP_count = VBE - VSE;
16616+
16617+ if (VBP_count < 2) /* minimum value of 2 */
16618+ VBP_count = 2;
16619+
16620+ return VBP_count;
16621+}
16622+/* ************************************************************************* *\
16623+ * FUNCTION: GetVFP_Count
16624+ *
16625+ * DESCRIPTION: Shows the vertical front porch value in terms of lines.
16626+ *
16627+\* ************************************************************************* */
16628+static u32 GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
16629+{
16630+ u32 VFP_count;
16631+ u32 VBS, VSS;
16632+
16633+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
16634+ VBS = REG_READ(VBLANK_A) & 0xffff;
16635+ VSS = REG_READ(VSYNC_A) & 0xffff;
16636+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16637+ } else {
16638+ VBS = dev_priv->saveVBLANK_A & 0xffff;
16639+ VSS = dev_priv->saveVSYNC_A & 0xffff;
16640+ }
16641+
16642+ /* Get the hsync pulse width */
16643+ VFP_count = VSS - VBS;
16644+
16645+ if (VFP_count < 2) /* minimum value of 2 */
16646+ VFP_count = 2;
16647+
16648+ return VFP_count;
16649+}
16650+
16651+#if 0
16652+/* ************************************************************************* *\
16653+FUNCTION: GetHighLowSwitchCount
16654+
16655+DESCRIPTION: High speed to low power or Low power to high speed switching time
16656+ in terms byte clock (txbyteclkhs). This value is based on the
16657+ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
16658+
16659+ Typical value - Number of byte clocks required to switch from low power mode
16660+ to high speed mode after "txrequesths" is asserted.
16661+
16662+ The worst count value among the low to high or high to low switching time
16663+ in terms of txbyteclkhs has to be programmed in this register.
16664+
16665+ Usefull Formulae:
16666+ DDR clock period = 2 times UI
16667+ txbyteclkhs clock = 8 times UI
16668+ Tlpx = 1 / txclkesc
16669+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
16670+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
16671+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
16672+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
16673+\* ************************************************************************* */
16674+static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
16675+{
16676+ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
16677+
16678+/* ************************************************************************* *\
16679+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
16680+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
16681+
16682+ Tlpx = 50 ns, Using max txclkesc (20MHz)
16683+
16684+ txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
16685+ UI_period = 500 / dev_priv->DDR_Clock; in ns
16686+
16687+ HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
16688+ = 9000 / dev_priv->DDR_Clock + 200;
16689+
16690+ HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
16691+ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
16692+ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
16693+
16694+\* ************************************************************************* */
16695+ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
16696+
16697+/* ************************************************************************* *\
16698+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
16699+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
16700+
16701+ LP_to_HS = 10 * UI_period + 5 * Tlpx =
16702+ = 5000 / dev_priv->DDR_Clock + 250;
16703+
16704+ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
16705+ = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock)
16706+ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
16707+
16708+\* ************************************************************************* */
16709+ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
16710+
16711+ if (HighToLowSwitchCount > LowToHighSwitchCount)
16712+ {
16713+ HighLowSwitchCount = HighToLowSwitchCount;
16714+ }
16715+ else
16716+ {
16717+ HighLowSwitchCount = LowToHighSwitchCount;
16718+ }
16719+
16720+
16721+ /* FIXME jliu need to fine tune the above formulae and remove the following after power on */
16722+ if (HighLowSwitchCount < 0x1f)
16723+ HighLowSwitchCount = 0x1f;
16724+
16725+ return HighLowSwitchCount;
16726+}
16727+
16728+/* ************************************************************************* *\
16729+FUNCTION: mrst_gen_long_write
16730+ `
16731+DESCRIPTION:
16732+
16733+\* ************************************************************************* */
16734+static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc)
16735+{
16736+ u32 gen_data_reg = HS_GEN_DATA_REG;
16737+ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
16738+ u32 date_full_bit = HS_DATA_FIFO_FULL;
16739+ u32 control_full_bit = HS_CTRL_FIFO_FULL;
16740+ u16 wc_saved = wc;
16741+
16742+#if PRINT_JLIU7
16743+ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
16744+#endif /* PRINT_JLIU7 */
16745+
16746+ /* sanity check */
16747+ if (vc > 4)
16748+ {
16749+ DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n");
16750+ return;
16751+ }
16752+
16753+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16754+
16755+ if (0) /* FIXME JLIU7 check if it is in LP*/
16756+ {
16757+ gen_data_reg = LP_GEN_DATA_REG;
16758+ gen_ctrl_reg = LP_GEN_CTRL_REG;
16759+ date_full_bit = LP_DATA_FIFO_FULL;
16760+ control_full_bit = LP_CTRL_FIFO_FULL;
16761+ }
16762+
16763+ while (wc >= 4)
16764+ {
16765+ /* Check if MIPI IP generic data fifo is not full */
16766+ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit);
16767+
16768+ /* write to data buffer */
16769+ REG_WRITE(gen_data_reg, *data);
16770+
16771+ wc -= 4;
16772+ data ++;
16773+ }
16774+
16775+ switch (wc)
16776+ {
16777+ case 1:
16778+ REG_WRITE8(gen_data_reg, *((u8 *)data));
16779+ break;
16780+ case 2:
16781+ REG_WRITE16(gen_data_reg, *((u16 *)data));
16782+ break;
16783+ case 3:
16784+ REG_WRITE16(gen_data_reg, *((u16 *)data));
16785+ data = (u32*)((u8*) data + 2);
16786+ REG_WRITE8(gen_data_reg, *((u8 *)data));
16787+ break;
16788+ }
16789+
16790+ /* Check if MIPI IP generic control fifo is not full */
16791+ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit);
16792+ /* write to control buffer */
16793+ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
16794+
16795+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
16796+}
16797+
16798+/* ************************************************************************* *\
16799+FUNCTION: mrst_init_HIMAX_MIPI_bridge
16800+ `
16801+DESCRIPTION:
16802+
16803+\* ************************************************************************* */
16804+static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
16805+{
16806+ u32 gen_data[2];
16807+ u16 wc = 0;
16808+ u8 vc =0;
16809+ u32 gen_data_intel = 0x200105;
16810+
16811+#if PRINT_JLIU7
16812+ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
16813+#endif /* PRINT_JLIU7 */
16814+
16815+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
16816+
16817+ /* exit sleep mode */
16818+ wc = 0x5;
16819+ gen_data[0] = gen_data_intel | (0x11 << 24);
16820+ gen_data[1] = 0;
16821+ mrst_gen_long_write(dev, gen_data, wc, vc);
16822+
16823+ /* set_pixel_format */
16824+ gen_data[0] = gen_data_intel | (0x3A << 24);
16825+ gen_data[1] = 0x77;
16826+ mrst_gen_long_write(dev, gen_data, wc, vc);
16827+
16828+ /* Set resolution for (800X480) */
16829+ wc = 0x8;
16830+ gen_data[0] = gen_data_intel | (0x2A << 24);
16831+ gen_data[1] = 0x1F030000;
16832+ mrst_gen_long_write(dev, gen_data, wc, vc);
16833+ gen_data[0] = gen_data_intel | (0x2B << 24);
16834+ gen_data[1] = 0xDF010000;
16835+ mrst_gen_long_write(dev, gen_data, wc, vc);
16836+
16837+ /* System control */
16838+ wc = 0x6;
16839+ gen_data[0] = gen_data_intel | (0xEE << 24);
16840+ gen_data[1] = 0x10FA;
16841+ mrst_gen_long_write(dev, gen_data, wc, vc);
16842+
16843+ /* INPUT TIMING FOR TEST PATTERN(800X480) */
16844+ /* H-size */
16845+ gen_data[1] = 0x2000;
16846+ mrst_gen_long_write(dev, gen_data, wc, vc);
16847+ gen_data[1] = 0x0301;
16848+ mrst_gen_long_write(dev, gen_data, wc, vc);
16849+
16850+ /* V-size */
16851+ gen_data[1] = 0xE002;
16852+ mrst_gen_long_write(dev, gen_data, wc, vc);
16853+ gen_data[1] = 0x0103;
16854+ mrst_gen_long_write(dev, gen_data, wc, vc);
16855+
16856+ /* H-total */
16857+ gen_data[1] = 0x2004;
16858+ mrst_gen_long_write(dev, gen_data, wc, vc);
16859+ gen_data[1] = 0x0405;
16860+ mrst_gen_long_write(dev, gen_data, wc, vc);
16861+
16862+ /* V-total */
16863+ gen_data[1] = 0x0d06;
16864+ mrst_gen_long_write(dev, gen_data, wc, vc);
16865+ gen_data[1] = 0x0207;
16866+ mrst_gen_long_write(dev, gen_data, wc, vc);
16867+
16868+ /* H-blank */
16869+ gen_data[1] = 0x0308;
16870+ mrst_gen_long_write(dev, gen_data, wc, vc);
16871+ gen_data[1] = 0x0009;
16872+ mrst_gen_long_write(dev, gen_data, wc, vc);
16873+
16874+ /* H-blank */
16875+ gen_data[1] = 0x030A;
16876+ mrst_gen_long_write(dev, gen_data, wc, vc);
16877+ gen_data[1] = 0x000B;
16878+ mrst_gen_long_write(dev, gen_data, wc, vc);
16879+
16880+ /* H-start */
16881+ gen_data[1] = 0xD80C;
16882+ mrst_gen_long_write(dev, gen_data, wc, vc);
16883+ gen_data[1] = 0x000D;
16884+ mrst_gen_long_write(dev, gen_data, wc, vc);
16885+
16886+ /* V-start */
16887+ gen_data[1] = 0x230E;
16888+ mrst_gen_long_write(dev, gen_data, wc, vc);
16889+ gen_data[1] = 0x000F;
16890+ mrst_gen_long_write(dev, gen_data, wc, vc);
16891+
16892+ /* RGB domain */
16893+ gen_data[1] = 0x0027;
16894+ mrst_gen_long_write(dev, gen_data, wc, vc);
16895+
16896+ /* INP_FORM Setting */
16897+ /* set_1 */
16898+ gen_data[1] = 0x1C10;
16899+ mrst_gen_long_write(dev, gen_data, wc, vc);
16900+
16901+ /* set_2 */
16902+ gen_data[1] = 0x0711;
16903+ mrst_gen_long_write(dev, gen_data, wc, vc);
16904+
16905+ /* set_3 */
16906+ gen_data[1] = 0x0012;
16907+ mrst_gen_long_write(dev, gen_data, wc, vc);
16908+
16909+ /* set_4 */
16910+ gen_data[1] = 0x0013;
16911+ mrst_gen_long_write(dev, gen_data, wc, vc);
16912+
16913+ /* set_5 */
16914+ gen_data[1] = 0x2314;
16915+ mrst_gen_long_write(dev, gen_data, wc, vc);
16916+
16917+ /* set_6 */
16918+ gen_data[1] = 0x0015;
16919+ mrst_gen_long_write(dev, gen_data, wc, vc);
16920+
16921+ /* set_7 */
16922+ gen_data[1] = 0x2316;
16923+ mrst_gen_long_write(dev, gen_data, wc, vc);
16924+
16925+ /* set_8 */
16926+ gen_data[1] = 0x0017;
16927+ mrst_gen_long_write(dev, gen_data, wc, vc);
16928+
16929+ /* set_1 */
16930+ gen_data[1] = 0x0330;
16931+ mrst_gen_long_write(dev, gen_data, wc, vc);
16932+
16933+ /* FRC Setting */
16934+ /* FRC_set_2 */
16935+ gen_data[1] = 0x237A;
16936+ mrst_gen_long_write(dev, gen_data, wc, vc);
16937+
16938+ /* FRC_set_3 */
16939+ gen_data[1] = 0x4C7B;
16940+ mrst_gen_long_write(dev, gen_data, wc, vc);
16941+
16942+ /* FRC_set_4 */
16943+ gen_data[1] = 0x037C;
16944+ mrst_gen_long_write(dev, gen_data, wc, vc);
16945+
16946+ /* FRC_set_5 */
16947+ gen_data[1] = 0x3482;
16948+ mrst_gen_long_write(dev, gen_data, wc, vc);
16949+
16950+ /* FRC_set_7 */
16951+ gen_data[1] = 0x1785;
16952+ mrst_gen_long_write(dev, gen_data, wc, vc);
16953+
16954+#if 0
16955+ /* FRC_set_8 */
16956+ gen_data[1] = 0xD08F;
16957+ mrst_gen_long_write(dev, gen_data, wc, vc);
16958+#endif
16959+
16960+ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
16961+ /* out_htotal */
16962+ gen_data[1] = 0x2090;
16963+ mrst_gen_long_write(dev, gen_data, wc, vc);
16964+ gen_data[1] = 0x0491;
16965+ mrst_gen_long_write(dev, gen_data, wc, vc);
16966+
16967+ /* out_hsync */
16968+ gen_data[1] = 0x0392;
16969+ mrst_gen_long_write(dev, gen_data, wc, vc);
16970+ gen_data[1] = 0x0093;
16971+ mrst_gen_long_write(dev, gen_data, wc, vc);
16972+
16973+ /* out_hstart */
16974+ gen_data[1] = 0xD894;
16975+ mrst_gen_long_write(dev, gen_data, wc, vc);
16976+ gen_data[1] = 0x0095;
16977+ mrst_gen_long_write(dev, gen_data, wc, vc);
16978+
16979+ /* out_hsize */
16980+ gen_data[1] = 0x2096;
16981+ mrst_gen_long_write(dev, gen_data, wc, vc);
16982+ gen_data[1] = 0x0397;
16983+ mrst_gen_long_write(dev, gen_data, wc, vc);
16984+
16985+ /* out_vtotal */
16986+ gen_data[1] = 0x0D98;
16987+ mrst_gen_long_write(dev, gen_data, wc, vc);
16988+ gen_data[1] = 0x0299;
16989+ mrst_gen_long_write(dev, gen_data, wc, vc);
16990+
16991+ /* out_vsync */
16992+ gen_data[1] = 0x039A;
16993+ mrst_gen_long_write(dev, gen_data, wc, vc);
16994+ gen_data[1] = 0x009B;
16995+ mrst_gen_long_write(dev, gen_data, wc, vc);
16996+
16997+ /* out_vstart */
16998+ gen_data[1] = 0x239C;
16999+ mrst_gen_long_write(dev, gen_data, wc, vc);
17000+ gen_data[1] = 0x009D;
17001+ mrst_gen_long_write(dev, gen_data, wc, vc);
17002+
17003+ /* out_vsize */
17004+ gen_data[1] = 0xE09E;
17005+ mrst_gen_long_write(dev, gen_data, wc, vc);
17006+ gen_data[1] = 0x019F;
17007+ mrst_gen_long_write(dev, gen_data, wc, vc);
17008+
17009+ /* FRC_set_6 */
17010+ gen_data[1] = 0x9084;
17011+ mrst_gen_long_write(dev, gen_data, wc, vc);
17012+
17013+ /* Other setting */
17014+ gen_data[1] = 0x0526;
17015+ mrst_gen_long_write(dev, gen_data, wc, vc);
17016+
17017+ /* RBG domain */
17018+ gen_data[1] = 0x1177;
17019+ mrst_gen_long_write(dev, gen_data, wc, vc);
17020+
17021+ /* rgbw */
17022+ /* set_1 */
17023+ gen_data[1] = 0xD28F;
17024+ mrst_gen_long_write(dev, gen_data, wc, vc);
17025+
17026+ /* set_2 */
17027+ gen_data[1] = 0x02D0;
17028+ mrst_gen_long_write(dev, gen_data, wc, vc);
17029+
17030+ /* set_3 */
17031+ gen_data[1] = 0x08D1;
17032+ mrst_gen_long_write(dev, gen_data, wc, vc);
17033+
17034+ /* set_4 */
17035+ gen_data[1] = 0x05D2;
17036+ mrst_gen_long_write(dev, gen_data, wc, vc);
17037+
17038+ /* set_5 */
17039+ gen_data[1] = 0x24D4;
17040+ mrst_gen_long_write(dev, gen_data, wc, vc);
17041+
17042+ /* set_6 */
17043+ gen_data[1] = 0x00D5;
17044+ mrst_gen_long_write(dev, gen_data, wc, vc);
17045+ gen_data[1] = 0x02D7;
17046+ mrst_gen_long_write(dev, gen_data, wc, vc);
17047+ gen_data[1] = 0x00D8;
17048+ mrst_gen_long_write(dev, gen_data, wc, vc);
17049+
17050+ gen_data[1] = 0x48F3;
17051+ mrst_gen_long_write(dev, gen_data, wc, vc);
17052+ gen_data[1] = 0xD4F2;
17053+ mrst_gen_long_write(dev, gen_data, wc, vc);
17054+ gen_data[1] = 0x3D8E;
17055+ mrst_gen_long_write(dev, gen_data, wc, vc);
17056+ gen_data[1] = 0x60FD;
17057+ mrst_gen_long_write(dev, gen_data, wc, vc);
17058+ gen_data[1] = 0x00B5;
17059+ mrst_gen_long_write(dev, gen_data, wc, vc);
17060+ gen_data[1] = 0x48F4;
17061+ mrst_gen_long_write(dev, gen_data, wc, vc);
17062+
17063+ /* inside patten */
17064+ gen_data[1] = 0x0060;
17065+ mrst_gen_long_write(dev, gen_data, wc, vc);
17066+
17067+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
17068+}
17069+#endif
17070+static void mrst_wait_for_LP_CTRL_FIFO(struct drm_device *dev)
17071+{
17072+ while(REG_READ(GEN_FIFO_STAT_REG) & LP_CTRL_FIFO_FULL);
17073+}
17074+
17075+/* ************************************************************************* *\
17076+FUNCTION: mrst_init_NSC_MIPI_bridge
17077+ `
17078+DESCRIPTION:
17079+
17080+\* ************************************************************************* */
17081+static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
17082+{
17083+
17084+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17085+#if PRINT_JLIU7
17086+ DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n");
17087+#endif /* PRINT_JLIU7 */
17088+ /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event,
17089+ 1 or 2 Data Lanes */
17090+
17091+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
17092+
17093+ mrst_wait_for_LP_CTRL_FIFO(dev);
17094+ /* enable RGB24*/
17095+ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
17096+
17097+ mrst_wait_for_LP_CTRL_FIFO(dev);
17098+ /* enable all error reporting*/
17099+ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
17100+ mrst_wait_for_LP_CTRL_FIFO(dev);
17101+ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
17102+
17103+ mrst_wait_for_LP_CTRL_FIFO(dev);
17104+ /* enable 2 data lane; video shaping & error reporting */
17105+ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
17106+
17107+ mrst_wait_for_LP_CTRL_FIFO(dev);
17108+ /* HS timeout */
17109+ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
17110+
17111+ mrst_wait_for_LP_CTRL_FIFO(dev);
17112+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
17113+ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
17114+
17115+ mrst_wait_for_LP_CTRL_FIFO(dev);
17116+ /* enable all virtual channels */
17117+ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
17118+
17119+ mrst_wait_for_LP_CTRL_FIFO(dev);
17120+ /* set output strength to low-drive */
17121+ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
17122+
17123+ mrst_wait_for_LP_CTRL_FIFO(dev);
17124+ if (dev_priv->sku_83)
17125+ {
17126+ /* set escape clock to divede by 8 */
17127+ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
17128+ }
17129+ else if(dev_priv->sku_100L)
17130+ {
17131+ /* set escape clock to divede by 16 */
17132+ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
17133+ }
17134+ else if(dev_priv->sku_100)
17135+ {
17136+ /* set escape clock to divede by 32*/
17137+ REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);
17138+
17139+ mrst_wait_for_LP_CTRL_FIFO(dev);
17140+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
17141+ REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);
17142+ }
17143+
17144+ mrst_wait_for_LP_CTRL_FIFO(dev);
17145+ /* CFG_VALID=1; RGB_CLK_EN=1. */
17146+ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
17147+
17148+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
17149+}
17150+
17151+static void mrst_dsi_mode_set(struct drm_encoder *encoder,
17152+ struct drm_display_mode *mode,
17153+ struct drm_display_mode *adjusted_mode)
17154+{
17155+ struct drm_device *dev = encoder->dev;
17156+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17157+ u32 dsiFuncPrgValue = 0;
17158+ u32 SupportedFormat = 0;
17159+ u32 channelNumber = 0;
17160+ u32 DBI_dataWidth = 0;
17161+ u32 resolution = 0;
17162+ u32 mipiport = 0;
17163+ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
17164+
17165+#if PRINT_JLIU7
17166+ DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n");
17167+#endif /* PRINT_JLIU7 */
17168+
17169+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
17170+
17171+ switch (dev_priv->bpp)
17172+ {
17173+ case 16:
17174+ SupportedFormat = RGB_565_FMT;
17175+ break;
17176+ case 18:
17177+ SupportedFormat = RGB_666_FMT;
17178+ break;
17179+ case 24:
17180+ SupportedFormat = RGB_888_FMT;
17181+ break;
17182+ default:
17183+ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
17184+ break;
17185+ }
17186+
17187+ resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
17188+
17189+ if (dev_priv->dpi)
17190+ {
17191+ drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base, dev->mode_config.scaling_mode_property, &curValue);
17192+
17193+ if (curValue == DRM_MODE_SCALE_NO_SCALE)
17194+ REG_WRITE(PFIT_CONTROL, 0);
17195+ else if (curValue == DRM_MODE_SCALE_ASPECT) {
17196+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
17197+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay))
17198+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
17199+ else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay))
17200+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX);
17201+ else
17202+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX);
17203+ } else
17204+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
17205+ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
17206+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
17207+
17208+ /* Enable MIPI Port */
17209+ mipiport = MIPI_PORT_EN | MIPI_BORDER_EN;
17210+ REG_WRITE(MIPI, mipiport);
17211+
17212+ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
17213+ REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
17214+
17215+ /* Enable all the error interrupt */
17216+ REG_WRITE(INTR_EN_REG, 0xffffffff);
17217+ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
17218+ REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/
17219+ REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */
17220+
17221+ SupportedFormat <<= FMT_DPI_POS;
17222+ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
17223+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
17224+
17225+ REG_WRITE(DPI_RESOLUTION_REG, resolution);
17226+ /*REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);*/
17227+
17228+ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, GetVSA_Count(dev, dev_priv));
17229+ REG_WRITE(VERT_BACK_PORCH_COUNT_REG,
17230+ GetVBP_Count(dev, dev_priv));
17231+ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG,
17232+ GetVFP_Count(dev, dev_priv));
17233+
17234+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG,
17235+ GetHSA_Count(dev, dev_priv));
17236+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG,
17237+ GetHBP_Count(dev, dev_priv));
17238+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG,
17239+ GetHFP_Count(dev, dev_priv));
17240+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG,
17241+ GetHAdr_Count(dev, dev_priv));
17242+
17243+ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
17244+ }
17245+ else
17246+ {
17247+ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/
17248+ channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
17249+ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
17250+ dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth;
17251+ /* JLIU7 FIXME */
17252+ SupportedFormat <<= FMT_DBI_POS;
17253+ dsiFuncPrgValue |= SupportedFormat;
17254+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
17255+
17256+ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
17257+ REG_WRITE(DBI_RESOLUTION_REG, resolution);
17258+ }
17259+
17260+#if 1 /*JLIU7_PO hard code for NSC PO */
17261+ REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
17262+ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
17263+
17264+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
17265+#else /*JLIU7_PO hard code for NSC PO */
17266+ REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv));
17267+ REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv));
17268+
17269+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv));
17270+#endif /*JLIU7_PO hard code for NSC PO */
17271+
17272+
17273+ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
17274+
17275+ /* FIXME JLIU7 for NSC PO */
17276+ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
17277+
17278+ REG_WRITE(DEVICE_READY_REG, 0x00000001);
17279+ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
17280+
17281+ dev_priv->dsi_device_ready = true;
17282+
17283+#if 0 /*JLIU7_PO */
17284+ mrst_init_HIMAX_MIPI_bridge(dev);
17285+#endif /*JLIU7_PO */
17286+ mrst_init_NSC_MIPI_bridge(dev);
17287+
17288+ if (dev_priv->sku_100L)
17289+ /* Set DSI link to 100MHz; 2:1 clock ratio */
17290+ REG_WRITE(MIPI_CONTROL_REG, 0x00000009);
17291+
17292+ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
17293+ REG_READ(PIPEACONF);
17294+
17295+ /* Wait for 20ms for the pipe enable to take effect. */
17296+ udelay(20000);
17297+
17298+ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
17299+
17300+ /* Wait for 20ms for the plane enable to take effect. */
17301+ udelay(20000);
17302+
17303+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
17304+}
17305+
17306+/**
17307+ * Detect the MIPI connection.
17308+ *
17309+ * This always returns CONNECTOR_STATUS_CONNECTED.
17310+ * This connector should only have
17311+ * been set up if the MIPI was actually connected anyway.
17312+ */
17313+static enum drm_connector_status mrst_dsi_detect(struct drm_connector
17314+ *connector)
17315+{
17316+#if PRINT_JLIU7
17317+ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
17318+#endif /* PRINT_JLIU7 */
17319+
17320+ return connector_status_connected;
17321+}
17322+
17323+/**
17324+ * Return the list of MIPI DDB modes if available.
17325+ */
17326+static int mrst_dsi_get_modes(struct drm_connector *connector)
17327+{
17328+ struct drm_device *dev = connector->dev;
17329+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
17330+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
17331+
17332+/* FIXME get the MIPI DDB modes */
17333+
17334+ /* Didn't get an DDB, so
17335+ * Set wide sync ranges so we get all modes
17336+ * handed to valid_mode for checking
17337+ */
17338+ connector->display_info.min_vfreq = 0;
17339+ connector->display_info.max_vfreq = 200;
17340+ connector->display_info.min_hfreq = 0;
17341+ connector->display_info.max_hfreq = 200;
17342+
17343+ if (mode_dev->panel_fixed_mode != NULL) {
17344+ struct drm_display_mode *mode =
17345+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
17346+ drm_mode_probed_add(connector, mode);
17347+ return 1;
17348+ }
17349+
17350+ return 0;
17351+}
17352+
17353+static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
17354+ .dpms = mrst_dsi_dpms,
17355+ .mode_fixup = psb_intel_lvds_mode_fixup,
17356+ .prepare = mrst_dsi_prepare,
17357+ .mode_set = mrst_dsi_mode_set,
17358+ .commit = mrst_dsi_commit,
17359+};
17360+
17361+static const struct drm_connector_helper_funcs
17362+ mrst_dsi_connector_helper_funcs = {
17363+ .get_modes = mrst_dsi_get_modes,
17364+ .mode_valid = psb_intel_lvds_mode_valid,
17365+ .best_encoder = psb_intel_best_encoder,
17366+};
17367+
17368+static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
17369+ .dpms = psb_intel_lvds_connector_dpms,
17370+ .save = mrst_dsi_save,
17371+ .restore = mrst_dsi_restore,
17372+ .detect = mrst_dsi_detect,
17373+ .fill_modes = drm_helper_probe_single_connector_modes,
17374+ .set_property = psb_intel_lvds_set_property,
17375+ .destroy = psb_intel_lvds_destroy,
17376+};
17377+
17378+/** Returns the panel fixed mode from configuration. */
17379+/** FIXME JLIU7 need to revist it. */
17380+struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
17381+{
17382+ struct drm_display_mode *mode;
17383+ struct drm_psb_private *dev_priv =
17384+ (struct drm_psb_private *) dev->dev_private;
17385+ u8 panel_index = dev_priv->gct_data.bpi;
17386+ u8 panel_type = dev_priv->gct_data.pt;
17387+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
17388+ bool use_gct = false;
17389+
17390+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
17391+ if (!mode)
17392+ return NULL;
17393+
17394+ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
17395+ if ((1<<panel_index) & panel_type) /* if non-zero,*/
17396+ use_gct = true; /*then mipi panel.*/
17397+
17398+ if (use_gct) {
17399+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
17400+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
17401+ mode->hsync_start = mode->hdisplay + \
17402+ ((ti->hsync_offset_hi << 8) | \
17403+ ti->hsync_offset_lo);
17404+ mode->hsync_end = mode->hsync_start + \
17405+ ((ti->hsync_pulse_width_hi << 8) | \
17406+ ti->hsync_pulse_width_lo);
17407+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
17408+ ti->hblank_lo);
17409+ mode->vsync_start = \
17410+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
17411+ ti->vsync_offset_lo);
17412+ mode->vsync_end = \
17413+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
17414+ ti->vsync_pulse_width_lo);
17415+ mode->vtotal = mode->vdisplay + \
17416+ ((ti->vblank_hi << 8) | ti->vblank_lo);
17417+ mode->clock = ti->pixel_clock * 10;
17418+#if 1
17419+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
17420+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
17421+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
17422+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
17423+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
17424+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
17425+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
17426+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
17427+ printk(KERN_INFO "clock is %d\n", mode->clock);
17428+#endif
17429+
17430+ } else {
17431+#if 1 /*FIXME jliu7 remove it later */
17432+ /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
17433+ mode->hdisplay = 800;
17434+ mode->vdisplay = 480;
17435+ mode->hsync_start = 808;
17436+ mode->hsync_end = 848;
17437+ mode->htotal = 880;
17438+ mode->vsync_start = 482;
17439+ mode->vsync_end = 483;
17440+ mode->vtotal = 486;
17441+ mode->clock = 33264;
17442+#endif /*FIXME jliu7 remove it later */
17443+
17444+#if 0 /*FIXME jliu7 remove it later */
17445+ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
17446+ mode->hdisplay = 800;
17447+ mode->vdisplay = 480;
17448+ mode->hsync_start = 836;
17449+ mode->hsync_end = 846;
17450+ mode->htotal = 1056;
17451+ mode->vsync_start = 489;
17452+ mode->vsync_end = 491;
17453+ mode->vtotal = 525;
17454+ mode->clock = 33264;
17455+#endif /*FIXME jliu7 remove it later */
17456+
17457+#if 0 /*FIXME jliu7 remove it later */
17458+ /* hard coded fixed mode for LVDS 800x480 */
17459+ mode->hdisplay = 800;
17460+ mode->vdisplay = 480;
17461+ mode->hsync_start = 801;
17462+ mode->hsync_end = 802;
17463+ mode->htotal = 1024;
17464+ mode->vsync_start = 481;
17465+ mode->vsync_end = 482;
17466+ mode->vtotal = 525;
17467+ mode->clock = 30994;
17468+#endif /*FIXME jliu7 remove it later */
17469+
17470+#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
17471+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
17472+ mode->hdisplay = 1024;
17473+ mode->vdisplay = 600;
17474+ mode->hsync_start = 1072;
17475+ mode->hsync_end = 1104;
17476+ mode->htotal = 1184;
17477+ mode->vsync_start = 603;
17478+ mode->vsync_end = 604;
17479+ mode->vtotal = 608;
17480+ mode->clock = 53990;
17481+#endif /*FIXME jliu7 remove it later */
17482+
17483+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
17484+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
17485+ mode->hdisplay = 1024;
17486+ mode->vdisplay = 600;
17487+ mode->hsync_start = 1104;
17488+ mode->hsync_end = 1136;
17489+ mode->htotal = 1184;
17490+ mode->vsync_start = 603;
17491+ mode->vsync_end = 604;
17492+ mode->vtotal = 608;
17493+ mode->clock = 53990;
17494+#endif /*FIXME jliu7 remove it later */
17495+
17496+#if 0 /*FIXME jliu7 remove it later */
17497+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
17498+ mode->hdisplay = 1024;
17499+ mode->vdisplay = 600;
17500+ mode->hsync_start = 1124;
17501+ mode->hsync_end = 1204;
17502+ mode->htotal = 1312;
17503+ mode->vsync_start = 607;
17504+ mode->vsync_end = 610;
17505+ mode->vtotal = 621;
17506+ mode->clock = 48885;
17507+#endif /*FIXME jliu7 remove it later */
17508+
17509+#if 0 /*FIXME jliu7 remove it later */
17510+ /* hard coded fixed mode for LVDS 1024x768 */
17511+ mode->hdisplay = 1024;
17512+ mode->vdisplay = 768;
17513+ mode->hsync_start = 1048;
17514+ mode->hsync_end = 1184;
17515+ mode->htotal = 1344;
17516+ mode->vsync_start = 771;
17517+ mode->vsync_end = 777;
17518+ mode->vtotal = 806;
17519+ mode->clock = 65000;
17520+#endif /*FIXME jliu7 remove it later */
17521+
17522+#if 0 /*FIXME jliu7 remove it later */
17523+ /* hard coded fixed mode for LVDS 1366x768 */
17524+ mode->hdisplay = 1366;
17525+ mode->vdisplay = 768;
17526+ mode->hsync_start = 1430;
17527+ mode->hsync_end = 1558;
17528+ mode->htotal = 1664;
17529+ mode->vsync_start = 769;
17530+ mode->vsync_end = 770;
17531+ mode->vtotal = 776;
17532+ mode->clock = 77500;
17533+#endif /*FIXME jliu7 remove it later */
17534+ }
17535+ drm_mode_set_name(mode);
17536+ drm_mode_set_crtcinfo(mode, 0);
17537+
17538+ return mode;
17539+}
17540+
17541+/* ************************************************************************* *\
17542+FUNCTION: mrstDSI_clockInit
17543+ `
17544+DESCRIPTION:
17545+
17546+\* ************************************************************************* */
17547+static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
17548+static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
17549+static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
17550+#define MIPI_2XCLK_COUNT 0x04
17551+
17552+static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
17553+{
17554+ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
17555+ u32 i = 0;
17556+ u32 *p_mipi_2xclk = NULL;
17557+
17558+#if 0 /* JLIU7_PO old values */
17559+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
17560+ dev_priv->pixelClock = 33264; /*KHz*/
17561+ dev_priv->HsyncWidth = 10;
17562+ dev_priv->HbackPorch = 210;
17563+ dev_priv->HfrontPorch = 36;
17564+ dev_priv->HactiveArea = 800;
17565+ dev_priv->VsyncWidth = 2;
17566+ dev_priv->VbackPorch = 34;
17567+ dev_priv->VfrontPorch = 9;
17568+ dev_priv->VactiveArea = 480;
17569+ dev_priv->bpp = 24;
17570+
17571+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
17572+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
17573+ dev_priv->dbi_HsyncWidth = 10;
17574+ dev_priv->dbi_HbackPorch = 210;
17575+ dev_priv->dbi_HfrontPorch = 36;
17576+ dev_priv->dbi_HactiveArea = 800;
17577+ dev_priv->dbi_VsyncWidth = 2;
17578+ dev_priv->dbi_VbackPorch = 34;
17579+ dev_priv->dbi_VfrontPorch = 9;
17580+ dev_priv->dbi_VactiveArea = 480;
17581+ dev_priv->dbi_bpp = 24;
17582+#else /* JLIU7_PO old values */
17583+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
17584+ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
17585+ dev_priv->pixelClock = 33264; /*KHz*/
17586+ dev_priv->HsyncWidth = 10;
17587+ dev_priv->HbackPorch = 8;
17588+ dev_priv->HfrontPorch = 3;
17589+ dev_priv->HactiveArea = 800;
17590+ dev_priv->VsyncWidth = 2;
17591+ dev_priv->VbackPorch = 3;
17592+ dev_priv->VfrontPorch = 2;
17593+ dev_priv->VactiveArea = 480;
17594+ dev_priv->bpp = 24;
17595+
17596+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
17597+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
17598+ dev_priv->dbi_HsyncWidth = 10;
17599+ dev_priv->dbi_HbackPorch = 8;
17600+ dev_priv->dbi_HfrontPorch = 3;
17601+ dev_priv->dbi_HactiveArea = 800;
17602+ dev_priv->dbi_VsyncWidth = 2;
17603+ dev_priv->dbi_VbackPorch = 3;
17604+ dev_priv->dbi_VfrontPorch = 2;
17605+ dev_priv->dbi_VactiveArea = 480;
17606+ dev_priv->dbi_bpp = 24;
17607+#endif /* JLIU7_PO old values */
17608+
17609+ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea;
17610+ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea;
17611+
17612+ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
17613+
17614+ dev_priv->RRate = RRate;
17615+
17616+ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
17617+ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */
17618+ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
17619+
17620+ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
17621+
17622+ if (dev_priv->sku_100)
17623+ {
17624+ p_mipi_2xclk = sku_100_mipi_2xclk;
17625+ }
17626+ else if (dev_priv->sku_100L)
17627+ {
17628+ p_mipi_2xclk = sku_100L_mipi_2xclk;
17629+ }
17630+ else
17631+ {
17632+ p_mipi_2xclk = sku_83_mipi_2xclk;
17633+ }
17634+
17635+ for (; i < MIPI_2XCLK_COUNT; i++)
17636+ {
17637+ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
17638+ break;
17639+ }
17640+
17641+ if (i == MIPI_2XCLK_COUNT)
17642+ {
17643+ DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
17644+ return false;
17645+ }
17646+
17647+ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
17648+ dev_priv->ClockBits = i;
17649+
17650+#if 1 /* FIXME remove it after power on*/
17651+ DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated);
17652+#endif /* FIXME remove it after power on*/
17653+
17654+ return true;
17655+}
17656+
17657+/**
17658+ * mrst_dsi_init - setup MIPI connectors on this device
17659+ * @dev: drm device
17660+ *
17661+ * Create the connector, try to figure out what
17662+ * modes we can display on the MIPI panel (if present).
17663+ */
17664+void mrst_dsi_init(struct drm_device *dev,
17665+ struct psb_intel_mode_device *mode_dev)
17666+{
17667+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17668+ struct psb_intel_output *psb_intel_output;
17669+ struct drm_connector *connector;
17670+ struct drm_encoder *encoder;
17671+
17672+#if PRINT_JLIU7
17673+ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
17674+#endif /* PRINT_JLIU7 */
17675+
17676+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
17677+ if (!psb_intel_output)
17678+ return;
17679+
17680+ psb_intel_output->mode_dev = mode_dev;
17681+ connector = &psb_intel_output->base;
17682+ encoder = &psb_intel_output->enc;
17683+ drm_connector_init(dev, &psb_intel_output->base,
17684+ &mrst_dsi_connector_funcs,
17685+ DRM_MODE_CONNECTOR_MIPI);
17686+
17687+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
17688+ DRM_MODE_ENCODER_MIPI);
17689+
17690+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
17691+ &psb_intel_output->enc);
17692+ psb_intel_output->type = INTEL_OUTPUT_MIPI;
17693+
17694+ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
17695+ drm_connector_helper_add(connector,
17696+ &mrst_dsi_connector_helper_funcs);
17697+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
17698+ connector->interlace_allowed = false;
17699+ connector->doublescan_allowed = false;
17700+
17701+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
17702+ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
17703+
17704+ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
17705+ blc_pol = BLC_POLARITY_NORMAL;
17706+ blc_freq = 0xc8;
17707+
17708+ /*
17709+ * MIPI discovery:
17710+ * 1) check for DDB data
17711+ * 2) check for VBT data
17712+ * 4) make sure lid is open
17713+ * if closed, act like it's not there for now
17714+ */
17715+
17716+ /* FIXME jliu7 we only support DPI */
17717+ dev_priv->dpi = true;
17718+
17719+ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */
17720+ dev_priv->laneCount = 2;
17721+
17722+ /* FIXME hard coded for NSC PO. */
17723+ /* We only support BUST_MODE */
17724+ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */
17725+ /* FIXME change it to true if GET_DDB works */
17726+ dev_priv->config_phase = false;
17727+
17728+ if (!mrstDSI_clockInit(dev_priv))
17729+ {
17730+ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
17731+#if 0 /* FIXME JLIU7 */
17732+ goto failed_find;
17733+#endif /* FIXME JLIU7 */
17734+ }
17735+
17736+ /*
17737+ * If we didn't get DDB data, try geting panel timing
17738+ * from configuration data
17739+ */
17740+ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
17741+
17742+ if (mode_dev->panel_fixed_mode) {
17743+ mode_dev->panel_fixed_mode->type |=
17744+ DRM_MODE_TYPE_PREFERRED;
17745+ goto out; /* FIXME: check for quirks */
17746+ }
17747+
17748+ /* If we still don't have a mode after all that, give up. */
17749+ if (!mode_dev->panel_fixed_mode) {
17750+ DRM_DEBUG
17751+ ("Found no modes on the lvds, ignoring the LVDS\n");
17752+ goto failed_find;
17753+ }
17754+
17755+out:
17756+ drm_sysfs_connector_add(connector);
17757+ return;
17758+
17759+failed_find:
17760+ DRM_DEBUG("No MIIP modes found, disabling.\n");
17761+ drm_encoder_cleanup(encoder);
17762+ drm_connector_cleanup(connector);
17763+ kfree(connector);
17764+}
17765diff --git a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c
17766new file mode 100644
17767index 0000000..60165fd
17768--- /dev/null
17769+++ b/drivers/gpu/drm/psb/psb_intel_i2c.c
17770@@ -0,0 +1,179 @@
17771+/*
17772+ * Copyright © 2006-2007 Intel Corporation
17773+ *
17774+ * Permission is hereby granted, free of charge, to any person obtaining a
17775+ * copy of this software and associated documentation files (the "Software"),
17776+ * to deal in the Software without restriction, including without limitation
17777+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17778+ * and/or sell copies of the Software, and to permit persons to whom the
17779+ * Software is furnished to do so, subject to the following conditions:
17780+ *
17781+ * The above copyright notice and this permission notice (including the next
17782+ * paragraph) shall be included in all copies or substantial portions of the
17783+ * Software.
17784+ *
17785+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17786+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17787+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17788+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17789+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17790+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
17791+ * DEALINGS IN THE SOFTWARE.
17792+ *
17793+ * Authors:
17794+ * Eric Anholt <eric@anholt.net>
17795+ */
17796+/*
17797+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
17798+ * Jesse Barnes <jesse.barnes@intel.com>
17799+ */
17800+
17801+#include <linux/i2c.h>
17802+#include <linux/i2c-id.h>
17803+#include <linux/i2c-algo-bit.h>
17804+
17805+/*
17806+ * Intel GPIO access functions
17807+ */
17808+
17809+#define I2C_RISEFALL_TIME 20
17810+
17811+static int get_clock(void *data)
17812+{
17813+ struct psb_intel_i2c_chan *chan = data;
17814+ struct drm_device *dev = chan->drm_dev;
17815+ u32 val;
17816+
17817+ val = REG_READ(chan->reg);
17818+ return (val & GPIO_CLOCK_VAL_IN) != 0;
17819+}
17820+
17821+static int get_data(void *data)
17822+{
17823+ struct psb_intel_i2c_chan *chan = data;
17824+ struct drm_device *dev = chan->drm_dev;
17825+ u32 val;
17826+
17827+ val = REG_READ(chan->reg);
17828+ return (val & GPIO_DATA_VAL_IN) != 0;
17829+}
17830+
17831+static void set_clock(void *data, int state_high)
17832+{
17833+ struct psb_intel_i2c_chan *chan = data;
17834+ struct drm_device *dev = chan->drm_dev;
17835+ u32 reserved = 0, clock_bits;
17836+
17837+ /* On most chips, these bits must be preserved in software. */
17838+ if (!IS_I830(dev) && !IS_845G(dev))
17839+ reserved =
17840+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
17841+ GPIO_CLOCK_PULLUP_DISABLE);
17842+
17843+ if (state_high)
17844+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
17845+ else
17846+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
17847+ GPIO_CLOCK_VAL_MASK;
17848+ REG_WRITE(chan->reg, reserved | clock_bits);
17849+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
17850+}
17851+
17852+static void set_data(void *data, int state_high)
17853+{
17854+ struct psb_intel_i2c_chan *chan = data;
17855+ struct drm_device *dev = chan->drm_dev;
17856+ u32 reserved = 0, data_bits;
17857+
17858+ /* On most chips, these bits must be preserved in software. */
17859+ if (!IS_I830(dev) && !IS_845G(dev))
17860+ reserved =
17861+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
17862+ GPIO_CLOCK_PULLUP_DISABLE);
17863+
17864+ if (state_high)
17865+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
17866+ else
17867+ data_bits =
17868+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
17869+ GPIO_DATA_VAL_MASK;
17870+
17871+ REG_WRITE(chan->reg, reserved | data_bits);
17872+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
17873+}
17874+
17875+/**
17876+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
17877+ * @dev: DRM device
17878+ * @output: driver specific output device
17879+ * @reg: GPIO reg to use
17880+ * @name: name for this bus
17881+ *
17882+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
17883+ * in output probing and control (e.g. DDC or SDVO control functions).
17884+ *
17885+ * Possible values for @reg include:
17886+ * %GPIOA
17887+ * %GPIOB
17888+ * %GPIOC
17889+ * %GPIOD
17890+ * %GPIOE
17891+ * %GPIOF
17892+ * %GPIOG
17893+ * %GPIOH
17894+ * see PRM for details on how these different busses are used.
17895+ */
17896+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
17897+ const u32 reg, const char *name)
17898+{
17899+ struct psb_intel_i2c_chan *chan;
17900+
17901+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
17902+ if (!chan)
17903+ goto out_free;
17904+
17905+ chan->drm_dev = dev;
17906+ chan->reg = reg;
17907+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
17908+ chan->adapter.owner = THIS_MODULE;
17909+ chan->adapter.algo_data = &chan->algo;
17910+ chan->adapter.dev.parent = &dev->pdev->dev;
17911+ chan->algo.setsda = set_data;
17912+ chan->algo.setscl = set_clock;
17913+ chan->algo.getsda = get_data;
17914+ chan->algo.getscl = get_clock;
17915+ chan->algo.udelay = 20;
17916+ chan->algo.timeout = usecs_to_jiffies(2200);
17917+ chan->algo.data = chan;
17918+
17919+ i2c_set_adapdata(&chan->adapter, chan);
17920+
17921+ if (i2c_bit_add_bus(&chan->adapter))
17922+ goto out_free;
17923+
17924+ /* JJJ: raise SCL and SDA? */
17925+ set_data(chan, 1);
17926+ set_clock(chan, 1);
17927+ udelay(20);
17928+
17929+ return chan;
17930+
17931+out_free:
17932+ kfree(chan);
17933+ return NULL;
17934+}
17935+
17936+/**
17937+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
17938+ * @output: channel to free
17939+ *
17940+ * Unregister the adapter from the i2c layer, then free the structure.
17941+ */
17942+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
17943+{
17944+ if (!chan)
17945+ return;
17946+
17947+ i2c_del_adapter(&chan->adapter);
17948+ kfree(chan);
17949+}
17950diff --git a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c
17951new file mode 100644
17952index 0000000..4fa29f8
17953--- /dev/null
17954+++ b/drivers/gpu/drm/psb/psb_intel_lvds.c
17955@@ -0,0 +1,1343 @@
17956+/*
17957+ * Copyright © 2006-2007 Intel Corporation
17958+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
17959+ *
17960+ * Permission is hereby granted, free of charge, to any person obtaining a
17961+ * copy of this software and associated documentation files (the "Software"),
17962+ * to deal in the Software without restriction, including without limitation
17963+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17964+ * and/or sell copies of the Software, and to permit persons to whom the
17965+ * Software is furnished to do so, subject to the following conditions:
17966+ *
17967+ * The above copyright notice and this permission notice (including the next
17968+ * paragraph) shall be included in all copies or substantial portions of the
17969+ * Software.
17970+ *
17971+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17972+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17973+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17974+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17975+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17976+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
17977+ * DEALINGS IN THE SOFTWARE.
17978+ *
17979+ * Authors:
17980+ * Eric Anholt <eric@anholt.net>
17981+ * Dave Airlie <airlied@linux.ie>
17982+ * Jesse Barnes <jesse.barnes@intel.com>
17983+ */
17984+
17985+#include <linux/i2c.h>
17986+#include <drm/drm_crtc.h>
17987+#include <drm/drm_edid.h>
17988+
17989+#include "psb_intel_bios.h"
17990+#include "psb_powermgmt.h"
17991+
17992+/* MRST defines start */
17993+uint8_t blc_type;
17994+uint8_t blc_pol;
17995+uint8_t blc_freq;
17996+uint8_t blc_minbrightness;
17997+uint8_t blc_i2caddr;
17998+uint8_t blc_brightnesscmd;
17999+int lvds_backlight; /* restore backlight to this value */
18000+
18001+u32 CoreClock;
18002+u32 PWMControlRegFreq;
18003+
18004+/**
18005+ * LVDS I2C backlight control macros
18006+ */
18007+#define BRIGHTNESS_MAX_LEVEL 100
18008+#define BRIGHTNESS_MASK 0xFF
18009+#define BLC_I2C_TYPE 0x01
18010+#define BLC_PWM_TYPT 0x02
18011+
18012+#define BLC_POLARITY_NORMAL 0
18013+#define BLC_POLARITY_INVERSE 1
18014+
18015+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
18016+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
18017+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
18018+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
18019+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
18020+
18021+struct psb_intel_lvds_priv {
18022+ /**
18023+ * Saved LVDO output states
18024+ */
18025+ uint32_t savePP_ON;
18026+ uint32_t savePP_OFF;
18027+ uint32_t saveLVDS;
18028+ uint32_t savePP_CONTROL;
18029+ uint32_t savePP_CYCLE;
18030+ uint32_t savePFIT_CONTROL;
18031+ uint32_t savePFIT_PGM_RATIOS;
18032+ uint32_t saveBLC_PWM_CTL;
18033+};
18034+
18035+/* MRST defines end */
18036+
18037+/**
18038+ * Returns the maximum level of the backlight duty cycle field.
18039+ */
18040+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
18041+{
18042+ struct drm_psb_private *dev_priv = dev->dev_private;
18043+ u32 retVal;
18044+
18045+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
18046+ retVal = ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
18047+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
18048+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18049+ } else
18050+ retVal = ((dev_priv->saveBLC_PWM_CTL & BACKLIGHT_MODULATION_FREQ_MASK) >>
18051+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
18052+
18053+ return retVal;
18054+}
18055+
18056+/**
18057+ * Set LVDS backlight level by I2C command
18058+ */
18059+static int psb_lvds_i2c_set_brightness(struct drm_device * dev, unsigned int level)
18060+ {
18061+ struct drm_psb_private * dev_priv =
18062+ (struct drm_psb_private*)dev->dev_private;
18063+
18064+ struct psb_intel_i2c_chan * lvds_i2c_bus = dev_priv->lvds_i2c_bus;
18065+ u8 out_buf[2];
18066+ unsigned int blc_i2c_brightness;
18067+
18068+ struct i2c_msg msgs[] = {
18069+ {
18070+ .addr = lvds_i2c_bus->slave_addr,
18071+ .flags = 0,
18072+ .len = 2,
18073+ .buf = out_buf,
18074+ }
18075+ };
18076+
18077+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
18078+ BRIGHTNESS_MASK /
18079+ BRIGHTNESS_MAX_LEVEL);
18080+
18081+ if(dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) {
18082+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
18083+ }
18084+
18085+
18086+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
18087+ out_buf[1] = (u8)blc_i2c_brightness;
18088+
18089+ if(i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
18090+ DRM_DEBUG("I2C set brightness done.(command, value) (%d, %d)\n", blc_brightnesscmd, blc_i2c_brightness);
18091+ return 0;
18092+ }
18093+
18094+ DRM_ERROR("I2C transfer error\n");
18095+ return -1;
18096+}
18097+
18098+
18099+static int psb_lvds_pwm_set_brightness(struct drm_device * dev, int level)
18100+{
18101+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18102+
18103+ u32 max_pwm_blc;
18104+ u32 blc_pwm_duty_cycle;
18105+
18106+ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
18107+
18108+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
18109+ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ ) == 0);
18110+
18111+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
18112+
18113+ if(dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE){
18114+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
18115+ }
18116+
18117+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
18118+ REG_WRITE(BLC_PWM_CTL,
18119+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
18120+ (blc_pwm_duty_cycle));
18121+
18122+ return 0;
18123+}
18124+
18125+/**
18126+ * Set LVDS backlight level either by I2C or PWM
18127+ */
18128+void psb_intel_lvds_set_brightness(struct drm_device * dev, int level)
18129+{
18130+ /*u32 blc_pwm_ctl;*/
18131+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18132+
18133+ DRM_DEBUG("backlight level is %d\n", level);
18134+
18135+ if(!dev_priv->lvds_bl) {
18136+ DRM_ERROR("NO LVDS Backlight Info\n");
18137+ return;
18138+ }
18139+
18140+ if(IS_MRST(dev)) {
18141+ DRM_ERROR("psb_intel_lvds_set_brightness called from MRST...not expected\n");
18142+ return;
18143+ }
18144+
18145+ if(dev_priv->lvds_bl->type == BLC_I2C_TYPE) {
18146+ psb_lvds_i2c_set_brightness(dev, level);
18147+ } else {
18148+ psb_lvds_pwm_set_brightness(dev, level);
18149+ }
18150+}
18151+
18152+/**
18153+ * Sets the backlight level.
18154+ *
18155+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
18156+ */
18157+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
18158+{
18159+ struct drm_psb_private *dev_priv = dev->dev_private;
18160+ u32 blc_pwm_ctl;
18161+
18162+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false) ) {
18163+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
18164+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
18165+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
18166+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18167+ } else {
18168+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL & ~BACKLIGHT_DUTY_CYCLE_MASK;
18169+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
18170+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
18171+ }
18172+}
18173+
18174+/**
18175+ * Sets the power state for the panel.
18176+ */
18177+static void psb_intel_lvds_set_power(struct drm_device *dev,
18178+ struct psb_intel_output *output, bool on)
18179+{
18180+ u32 pp_status;
18181+
18182+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18183+
18184+ if (on) {
18185+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
18186+ POWER_TARGET_ON);
18187+ do {
18188+ pp_status = REG_READ(PP_STATUS);
18189+ } while ((pp_status & PP_ON) == 0);
18190+
18191+ psb_intel_lvds_set_backlight(dev,
18192+ output->
18193+ mode_dev->backlight_duty_cycle);
18194+ } else {
18195+ psb_intel_lvds_set_backlight(dev, 0);
18196+
18197+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
18198+ ~POWER_TARGET_ON);
18199+ do {
18200+ pp_status = REG_READ(PP_STATUS);
18201+ } while (pp_status & PP_ON);
18202+ }
18203+
18204+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18205+}
18206+
18207+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
18208+{
18209+ struct drm_device *dev = encoder->dev;
18210+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18211+
18212+ if (mode == DRM_MODE_DPMS_ON)
18213+ psb_intel_lvds_set_power(dev, output, true);
18214+ else
18215+ psb_intel_lvds_set_power(dev, output, false);
18216+
18217+ /* XXX: We never power down the LVDS pairs. */
18218+}
18219+
18220+static void psb_intel_lvds_save(struct drm_connector *connector)
18221+{
18222+ struct drm_device *dev = connector->dev;
18223+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18224+ struct psb_intel_output * psb_intel_output = to_psb_intel_output(connector);
18225+ struct psb_intel_lvds_priv * lvds_priv =
18226+ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
18227+
18228+ if(IS_POULSBO(dev)) {
18229+ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
18230+ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
18231+ lvds_priv->saveLVDS = REG_READ(LVDS);
18232+ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
18233+ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
18234+ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
18235+ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
18236+ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
18237+ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
18238+
18239+ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
18240+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
18241+ BACKLIGHT_DUTY_CYCLE_MASK);
18242+
18243+ /*
18244+ * If the light is off at server startup, just make it full brightness
18245+ */
18246+ if (dev_priv->backlight_duty_cycle == 0)
18247+ dev_priv->backlight_duty_cycle =
18248+ psb_intel_lvds_get_max_backlight(dev);
18249+
18250+ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON,
18251+ lvds_priv->savePP_OFF,
18252+ lvds_priv->saveLVDS,
18253+ lvds_priv->savePP_CONTROL,
18254+ lvds_priv->savePP_CYCLE,
18255+ lvds_priv->saveBLC_PWM_CTL);
18256+ }
18257+}
18258+
18259+static void psb_intel_lvds_restore(struct drm_connector *connector)
18260+{
18261+ struct drm_device *dev = connector->dev;
18262+ u32 pp_status;
18263+
18264+ /*struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;*/
18265+ struct psb_intel_output * psb_intel_output = to_psb_intel_output(connector);
18266+ struct psb_intel_lvds_priv * lvds_priv =
18267+ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
18268+
18269+ if(IS_POULSBO(dev)) {
18270+ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON,
18271+ lvds_priv->savePP_OFF,
18272+ lvds_priv->saveLVDS,
18273+ lvds_priv->savePP_CONTROL,
18274+ lvds_priv->savePP_CYCLE,
18275+ lvds_priv->saveBLC_PWM_CTL);
18276+
18277+ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
18278+ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
18279+ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
18280+ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
18281+ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
18282+ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
18283+ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
18284+ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
18285+ REG_WRITE(LVDS, lvds_priv->saveLVDS);
18286+
18287+ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
18288+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
18289+ POWER_TARGET_ON);
18290+ do {
18291+ pp_status = REG_READ(PP_STATUS);
18292+ } while((pp_status & PP_ON) == 0);
18293+ } else {
18294+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
18295+ ~POWER_TARGET_ON);
18296+ do {
18297+ pp_status = REG_READ(PP_STATUS);
18298+ }while(pp_status & PP_ON);
18299+ }
18300+ }
18301+}
18302+
18303+static int psb_intel_lvds_mode_valid(struct drm_connector *connector,
18304+ struct drm_display_mode *mode)
18305+{
18306+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
18307+ struct drm_display_mode *fixed_mode =
18308+ psb_intel_output->mode_dev->panel_fixed_mode;
18309+
18310+#if PRINT_JLIU7
18311+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
18312+#endif /* PRINT_JLIU7 */
18313+
18314+ /* just in case */
18315+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
18316+ return MODE_NO_DBLESCAN;
18317+
18318+ /* just in case */
18319+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
18320+ return MODE_NO_INTERLACE;
18321+
18322+ if (fixed_mode) {
18323+ if (mode->hdisplay > fixed_mode->hdisplay)
18324+ return MODE_PANEL;
18325+ if (mode->vdisplay > fixed_mode->vdisplay)
18326+ return MODE_PANEL;
18327+ }
18328+ return MODE_OK;
18329+}
18330+
18331+static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
18332+ struct drm_display_mode *mode,
18333+ struct drm_display_mode *adjusted_mode)
18334+{
18335+ struct psb_intel_mode_device *mode_dev =
18336+ enc_to_psb_intel_output(encoder)->mode_dev;
18337+ struct drm_device *dev = encoder->dev;
18338+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
18339+ struct drm_encoder *tmp_encoder;
18340+
18341+#if PRINT_JLIU7
18342+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
18343+#endif /* PRINT_JLIU7 */
18344+
18345+ /* Should never happen!! */
18346+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
18347+ printk(KERN_ERR
18348+ "Can't support LVDS/MIPI on pipe B on MRST\n");
18349+ return false;
18350+ } else if (!IS_MRST(dev) && !IS_I965G(dev)
18351+ && psb_intel_crtc->pipe == 0) {
18352+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
18353+ return false;
18354+ }
18355+ /* Should never happen!! */
18356+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
18357+ head) {
18358+ if (tmp_encoder != encoder
18359+ && tmp_encoder->crtc == encoder->crtc) {
18360+ printk(KERN_ERR "Can't enable LVDS and another "
18361+ "encoder on the same pipe\n");
18362+ return false;
18363+ }
18364+ }
18365+
18366+ /*
18367+ * If we have timings from the BIOS for the panel, put them in
18368+ * to the adjusted mode. The CRTC will be set up for this mode,
18369+ * with the panel scaling set up to source from the H/VDisplay
18370+ * of the original mode.
18371+ */
18372+ if (mode_dev->panel_fixed_mode != NULL) {
18373+ adjusted_mode->hdisplay =
18374+ mode_dev->panel_fixed_mode->hdisplay;
18375+ adjusted_mode->hsync_start =
18376+ mode_dev->panel_fixed_mode->hsync_start;
18377+ adjusted_mode->hsync_end =
18378+ mode_dev->panel_fixed_mode->hsync_end;
18379+ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
18380+ adjusted_mode->vdisplay =
18381+ mode_dev->panel_fixed_mode->vdisplay;
18382+ adjusted_mode->vsync_start =
18383+ mode_dev->panel_fixed_mode->vsync_start;
18384+ adjusted_mode->vsync_end =
18385+ mode_dev->panel_fixed_mode->vsync_end;
18386+ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
18387+ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
18388+ drm_mode_set_crtcinfo(adjusted_mode,
18389+ CRTC_INTERLACE_HALVE_V);
18390+ }
18391+
18392+ /*
18393+ * XXX: It would be nice to support lower refresh rates on the
18394+ * panels to reduce power consumption, and perhaps match the
18395+ * user's requested refresh rate.
18396+ */
18397+
18398+ return true;
18399+}
18400+
18401+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
18402+{
18403+ struct drm_device *dev = encoder->dev;
18404+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18405+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
18406+
18407+#if PRINT_JLIU7
18408+ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
18409+#endif /* PRINT_JLIU7 */
18410+
18411+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18412+
18413+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
18414+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
18415+ BACKLIGHT_DUTY_CYCLE_MASK);
18416+
18417+ psb_intel_lvds_set_power(dev, output, false);
18418+
18419+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18420+}
18421+
18422+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
18423+{
18424+ struct drm_device *dev = encoder->dev;
18425+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18426+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
18427+
18428+#if PRINT_JLIU7
18429+ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
18430+#endif /* PRINT_JLIU7 */
18431+
18432+ if (mode_dev->backlight_duty_cycle == 0)
18433+ mode_dev->backlight_duty_cycle =
18434+ psb_intel_lvds_get_max_backlight(dev);
18435+
18436+ psb_intel_lvds_set_power(dev, output, true);
18437+}
18438+
18439+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
18440+ struct drm_display_mode *mode,
18441+ struct drm_display_mode *adjusted_mode)
18442+{
18443+ struct psb_intel_mode_device *mode_dev =
18444+ enc_to_psb_intel_output(encoder)->mode_dev;
18445+ struct drm_device *dev = encoder->dev;
18446+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
18447+ u32 pfit_control;
18448+
18449+ /*
18450+ * The LVDS pin pair will already have been turned on in the
18451+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
18452+ * settings.
18453+ */
18454+
18455+ /*
18456+ * Enable automatic panel scaling so that non-native modes fill the
18457+ * screen. Should be enabled before the pipe is enabled, according to
18458+ * register description and PRM.
18459+ */
18460+ if (mode->hdisplay != adjusted_mode->hdisplay ||
18461+ mode->vdisplay != adjusted_mode->vdisplay)
18462+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
18463+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
18464+ HORIZ_INTERP_BILINEAR);
18465+ else
18466+ pfit_control = 0;
18467+
18468+ if (!IS_I965G(dev)) {
18469+ if (mode_dev->panel_wants_dither)
18470+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
18471+ } else
18472+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
18473+
18474+ REG_WRITE(PFIT_CONTROL, pfit_control);
18475+}
18476+
18477+/**
18478+ * Detect the LVDS connection.
18479+ *
18480+ * This always returns CONNECTOR_STATUS_CONNECTED.
18481+ * This connector should only have
18482+ * been set up if the LVDS was actually connected anyway.
18483+ */
18484+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
18485+ *connector)
18486+{
18487+ return connector_status_connected;
18488+}
18489+
18490+/**
18491+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
18492+ */
18493+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
18494+{
18495+ struct drm_device *dev = connector->dev;
18496+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
18497+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
18498+ int ret = 0;
18499+
18500+ if (!IS_MRST(dev))
18501+ ret = psb_intel_ddc_get_modes(psb_intel_output);
18502+
18503+ if (ret)
18504+ return ret;
18505+
18506+ /* Didn't get an EDID, so
18507+ * Set wide sync ranges so we get all modes
18508+ * handed to valid_mode for checking
18509+ */
18510+ connector->display_info.min_vfreq = 0;
18511+ connector->display_info.max_vfreq = 200;
18512+ connector->display_info.min_hfreq = 0;
18513+ connector->display_info.max_hfreq = 200;
18514+
18515+ if (mode_dev->panel_fixed_mode != NULL) {
18516+ struct drm_display_mode *mode =
18517+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
18518+ drm_mode_probed_add(connector, mode);
18519+ return 1;
18520+ }
18521+
18522+ return 0;
18523+}
18524+
18525+/**
18526+ * psb_intel_lvds_destroy - unregister and free LVDS structures
18527+ * @connector: connector to free
18528+ *
18529+ * Unregister the DDC bus for this connector then free the driver private
18530+ * structure.
18531+ */
18532+static void psb_intel_lvds_destroy(struct drm_connector *connector)
18533+{
18534+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
18535+
18536+ if (psb_intel_output->ddc_bus)
18537+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
18538+ drm_sysfs_connector_remove(connector);
18539+ drm_connector_cleanup(connector);
18540+ kfree(connector);
18541+}
18542+
18543+static void psb_intel_lvds_connector_dpms(struct drm_connector *connector, int mode)
18544+{
18545+ struct drm_encoder *pEncoder = connector->encoder;
18546+ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
18547+ struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;
18548+ struct drm_device * dev = connector->dev;
18549+ pEncHFuncs->dpms(pEncoder, mode);
18550+ /*FIXME: crtc dpms will crash kernel on menlow*/
18551+ if (IS_MRST(dev))
18552+ pCrtcHFuncs->dpms(pEncoder->crtc, mode);
18553+}
18554+
18555+static int psb_intel_lvds_set_property(struct drm_connector *connector,
18556+ struct drm_property *property,
18557+ uint64_t value)
18558+{
18559+ struct drm_encoder *pEncoder = connector->encoder;
18560+
18561+ if (!strcmp(property->name, "scaling mode") && pEncoder) {
18562+ struct psb_intel_crtc *pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc);
18563+ bool bTransitionFromToCentered;
18564+ uint64_t curValue;
18565+
18566+ if (!pPsbCrtc)
18567+ goto set_prop_error;
18568+
18569+ switch (value) {
18570+ case DRM_MODE_SCALE_FULLSCREEN:
18571+ break;
18572+ case DRM_MODE_SCALE_NO_SCALE:
18573+ break;
18574+ case DRM_MODE_SCALE_ASPECT:
18575+ break;
18576+ default:
18577+ goto set_prop_error;
18578+ }
18579+
18580+ if (drm_connector_property_get_value(connector, property, &curValue))
18581+ goto set_prop_error;
18582+
18583+ if (curValue == value)
18584+ goto set_prop_done;
18585+
18586+ if (drm_connector_property_set_value(connector, property, value))
18587+ goto set_prop_error;
18588+
18589+ bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
18590+ (value == DRM_MODE_SCALE_NO_SCALE);
18591+
18592+ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
18593+ pPsbCrtc->saved_mode.vdisplay != 0) {
18594+ if (bTransitionFromToCentered) {
18595+ if (!drm_crtc_helper_set_mode(pEncoder->crtc, &pPsbCrtc->saved_mode,
18596+ pEncoder->crtc->x, pEncoder->crtc->y, pEncoder->crtc->fb))
18597+ goto set_prop_error;
18598+ } else {
18599+ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
18600+ pEncHFuncs->mode_set(pEncoder, &pPsbCrtc->saved_mode,
18601+ &pPsbCrtc->saved_adjusted_mode);
18602+ }
18603+ }
18604+ } else if (!strcmp(property->name, "backlight") && pEncoder) {
18605+ if (drm_connector_property_set_value(connector, property, value))
18606+ goto set_prop_error;
18607+ else {
18608+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
18609+ struct backlight_device bd;
18610+ bd.props.brightness = value;
18611+ psb_set_brightness(&bd);
18612+#endif
18613+ }
18614+ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
18615+ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
18616+ /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;*/
18617+ pEncHFuncs->dpms(pEncoder, value);
18618+ /*pCrtcHFuncs->dpms(pEncoder->crtc, value);*/
18619+ }
18620+
18621+set_prop_done:
18622+ return 0;
18623+set_prop_error:
18624+ return -1;
18625+}
18626+
18627+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
18628+ .dpms = psb_intel_lvds_encoder_dpms,
18629+ .mode_fixup = psb_intel_lvds_mode_fixup,
18630+ .prepare = psb_intel_lvds_prepare,
18631+ .mode_set = psb_intel_lvds_mode_set,
18632+ .commit = psb_intel_lvds_commit,
18633+};
18634+
18635+static const struct drm_connector_helper_funcs
18636+ psb_intel_lvds_connector_helper_funcs = {
18637+ .get_modes = psb_intel_lvds_get_modes,
18638+ .mode_valid = psb_intel_lvds_mode_valid,
18639+ .best_encoder = psb_intel_best_encoder,
18640+};
18641+
18642+static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
18643+ .dpms = psb_intel_lvds_connector_dpms,
18644+ .save = psb_intel_lvds_save,
18645+ .restore = psb_intel_lvds_restore,
18646+ .detect = psb_intel_lvds_detect,
18647+ .fill_modes = drm_helper_probe_single_connector_modes,
18648+ .set_property = psb_intel_lvds_set_property,
18649+ .destroy = psb_intel_lvds_destroy,
18650+};
18651+
18652+
18653+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
18654+{
18655+ drm_encoder_cleanup(encoder);
18656+}
18657+
18658+static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
18659+ .destroy = psb_intel_lvds_enc_destroy,
18660+};
18661+
18662+
18663+
18664+/**
18665+ * psb_intel_lvds_init - setup LVDS connectors on this device
18666+ * @dev: drm device
18667+ *
18668+ * Create the connector, register the LVDS DDC bus, and try to figure out what
18669+ * modes we can display on the LVDS panel (if present).
18670+ */
18671+void psb_intel_lvds_init(struct drm_device *dev,
18672+ struct psb_intel_mode_device *mode_dev)
18673+{
18674+ struct psb_intel_output *psb_intel_output;
18675+ struct psb_intel_lvds_priv * lvds_priv;
18676+ struct drm_connector *connector;
18677+ struct drm_encoder *encoder;
18678+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
18679+ struct drm_crtc *crtc;
18680+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
18681+ u32 lvds;
18682+ int pipe;
18683+
18684+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
18685+ if (!psb_intel_output)
18686+ return;
18687+
18688+ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
18689+ if(!lvds_priv) {
18690+ kfree(psb_intel_output);
18691+ DRM_DEBUG("LVDS private allocation error\n");
18692+ return;
18693+ }
18694+
18695+ psb_intel_output->dev_priv = lvds_priv;
18696+
18697+ psb_intel_output->mode_dev = mode_dev;
18698+ connector = &psb_intel_output->base;
18699+ encoder = &psb_intel_output->enc;
18700+ drm_connector_init(dev, &psb_intel_output->base,
18701+ &psb_intel_lvds_connector_funcs,
18702+ DRM_MODE_CONNECTOR_LVDS);
18703+
18704+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
18705+ DRM_MODE_ENCODER_LVDS);
18706+
18707+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
18708+ &psb_intel_output->enc);
18709+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
18710+
18711+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
18712+ drm_connector_helper_add(connector,
18713+ &psb_intel_lvds_connector_helper_funcs);
18714+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
18715+ connector->interlace_allowed = false;
18716+ connector->doublescan_allowed = false;
18717+
18718+ /*Attach connector properties*/
18719+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
18720+ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
18721+
18722+ /**
18723+ * Set up I2C bus
18724+ * FIXME: distroy i2c_bus when exit
18725+ */
18726+ psb_intel_output->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
18727+ if(!psb_intel_output->i2c_bus) {
18728+ dev_printk(KERN_ERR,
18729+ &dev->pdev->dev, "I2C bus registration failed.\n");
18730+ goto failed_blc_i2c;
18731+ }
18732+ psb_intel_output->i2c_bus->slave_addr = 0x2C;
18733+ dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
18734+
18735+ /*
18736+ * LVDS discovery:
18737+ * 1) check for EDID on DDC
18738+ * 2) check for VBT data
18739+ * 3) check to see if LVDS is already on
18740+ * if none of the above, no panel
18741+ * 4) make sure lid is open
18742+ * if closed, act like it's not there for now
18743+ */
18744+
18745+ /* Set up the DDC bus. */
18746+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
18747+ if (!psb_intel_output->ddc_bus) {
18748+ dev_printk(KERN_ERR, &dev->pdev->dev,
18749+ "DDC bus registration " "failed.\n");
18750+ goto failed_ddc;
18751+ }
18752+
18753+ /*
18754+ * Attempt to get the fixed panel mode from DDC. Assume that the
18755+ * preferred mode is the right one.
18756+ */
18757+ psb_intel_ddc_get_modes(psb_intel_output);
18758+ list_for_each_entry(scan, &connector->probed_modes, head) {
18759+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
18760+ mode_dev->panel_fixed_mode =
18761+ drm_mode_duplicate(dev, scan);
18762+ goto out; /* FIXME: check for quirks */
18763+ }
18764+ }
18765+
18766+ /* Failed to get EDID, what about VBT? do we need this?*/
18767+ if (mode_dev->vbt_mode)
18768+ mode_dev->panel_fixed_mode =
18769+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
18770+
18771+ if(!mode_dev->panel_fixed_mode)
18772+ if (dev_priv->lfp_lvds_vbt_mode)
18773+ mode_dev->panel_fixed_mode =
18774+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
18775+
18776+ /*
18777+ * If we didn't get EDID, try checking if the panel is already turned
18778+ * on. If so, assume that whatever is currently programmed is the
18779+ * correct mode.
18780+ */
18781+ lvds = REG_READ(LVDS);
18782+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
18783+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
18784+
18785+ if (crtc && (lvds & LVDS_PORT_EN)) {
18786+ mode_dev->panel_fixed_mode =
18787+ psb_intel_crtc_mode_get(dev, crtc);
18788+ if (mode_dev->panel_fixed_mode) {
18789+ mode_dev->panel_fixed_mode->type |=
18790+ DRM_MODE_TYPE_PREFERRED;
18791+ goto out; /* FIXME: check for quirks */
18792+ }
18793+ }
18794+
18795+ /* If we still don't have a mode after all that, give up. */
18796+ if (!mode_dev->panel_fixed_mode) {
18797+ DRM_DEBUG
18798+ ("Found no modes on the lvds, ignoring the LVDS\n");
18799+ goto failed_find;
18800+ }
18801+
18802+ /* FIXME: detect aopen & mac mini type stuff automatically? */
18803+ /*
18804+ * Blacklist machines with BIOSes that list an LVDS panel without
18805+ * actually having one.
18806+ */
18807+ if (IS_I945GM(dev)) {
18808+ /* aopen mini pc */
18809+ if (dev->pdev->subsystem_vendor == 0xa0a0) {
18810+ DRM_DEBUG
18811+ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
18812+ goto failed_find;
18813+ }
18814+
18815+ if ((dev->pdev->subsystem_vendor == 0x8086) &&
18816+ (dev->pdev->subsystem_device == 0x7270)) {
18817+ /* It's a Mac Mini or Macbook Pro. */
18818+
18819+ if (mode_dev->panel_fixed_mode != NULL &&
18820+ mode_dev->panel_fixed_mode->hdisplay == 800 &&
18821+ mode_dev->panel_fixed_mode->vdisplay == 600) {
18822+ DRM_DEBUG
18823+ ("Suspected Mac Mini, ignoring the LVDS\n");
18824+ goto failed_find;
18825+ }
18826+ }
18827+ }
18828+
18829+out:
18830+ drm_sysfs_connector_add(connector);
18831+
18832+#if PRINT_JLIU7
18833+ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
18834+ mode_dev->panel_fixed_mode->hdisplay);
18835+ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
18836+ mode_dev->panel_fixed_mode->vdisplay);
18837+ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
18838+ mode_dev->panel_fixed_mode->hsync_start);
18839+ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
18840+ mode_dev->panel_fixed_mode->hsync_end);
18841+ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
18842+ mode_dev->panel_fixed_mode->htotal);
18843+ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
18844+ mode_dev->panel_fixed_mode->vsync_start);
18845+ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
18846+ mode_dev->panel_fixed_mode->vsync_end);
18847+ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
18848+ mode_dev->panel_fixed_mode->vtotal);
18849+ DRM_INFO("PRINT_JLIU7 clock = %d\n",
18850+ mode_dev->panel_fixed_mode->clock);
18851+#endif /* PRINT_JLIU7 */
18852+ return;
18853+
18854+failed_find:
18855+ if (psb_intel_output->ddc_bus)
18856+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
18857+failed_ddc:
18858+ if (psb_intel_output->i2c_bus)
18859+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
18860+failed_blc_i2c:
18861+ drm_encoder_cleanup(encoder);
18862+ drm_connector_cleanup(connector);
18863+ kfree(connector);
18864+}
18865+
18866+/* MRST platform start */
18867+
18868+/*
18869+ * FIXME need to move to register define head file
18870+ */
18871+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
18872+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
18873+
18874+/* The max/min PWM frequency in BPCR[31:17] - */
18875+/* The smallest number is 1 (not 0) that can fit in the
18876+ * 15-bit field of the and then*/
18877+/* shifts to the left by one bit to get the actual 16-bit
18878+ * value that the 15-bits correspond to.*/
18879+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
18880+
18881+#define BRIGHTNESS_MAX_LEVEL 100
18882+#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
18883+#define BLC_PWM_FREQ_CALC_CONSTANT 32
18884+#define MHz 1000000
18885+#define BLC_POLARITY_NORMAL 0
18886+#define BLC_POLARITY_INVERSE 1
18887+
18888+/**
18889+ * Calculate PWM control register value.
18890+ */
18891+#if 0
18892+static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
18893+{
18894+ unsigned long value = 0;
18895+ if (blc_freq == 0) {
18896+ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
18897+ * Frequency Requested is 0.\n"); */
18898+ return false;
18899+ }
18900+
18901+ value = (CoreClock * MHz);
18902+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
18903+ value = (value * BLC_PWM_PRECISION_FACTOR);
18904+ value = (value / blc_freq);
18905+ value = (value / BLC_PWM_PRECISION_FACTOR);
18906+
18907+ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
18908+ return 0;
18909+ } else {
18910+ PWMControlRegFreq = (u32) value;
18911+ return 1;
18912+ }
18913+}
18914+#endif
18915+/**
18916+ * Sets the power state for the panel.
18917+ */
18918+static void mrst_lvds_set_power(struct drm_device *dev,
18919+ struct psb_intel_output *output, bool on)
18920+{
18921+ u32 pp_status;
18922+
18923+#if PRINT_JLIU7
18924+ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
18925+#endif /* PRINT_JLIU7 */
18926+
18927+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18928+
18929+ if (on) {
18930+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
18931+ POWER_TARGET_ON);
18932+ do {
18933+ pp_status = REG_READ(PP_STATUS);
18934+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
18935+ } else {
18936+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
18937+ ~POWER_TARGET_ON);
18938+ do {
18939+ pp_status = REG_READ(PP_STATUS);
18940+ } while (pp_status & PP_ON);
18941+ }
18942+
18943+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
18944+}
18945+
18946+static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
18947+{
18948+ struct drm_device *dev = encoder->dev;
18949+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
18950+
18951+#if PRINT_JLIU7
18952+ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
18953+#endif /* PRINT_JLIU7 */
18954+
18955+ if (mode == DRM_MODE_DPMS_ON)
18956+ mrst_lvds_set_power(dev, output, true);
18957+ else
18958+ mrst_lvds_set_power(dev, output, false);
18959+
18960+ /* XXX: We never power down the LVDS pairs. */
18961+}
18962+
18963+static void mrst_lvds_mode_set(struct drm_encoder *encoder,
18964+ struct drm_display_mode *mode,
18965+ struct drm_display_mode *adjusted_mode)
18966+{
18967+ struct psb_intel_mode_device *mode_dev = enc_to_psb_intel_output(encoder)->mode_dev;
18968+ struct drm_device *dev = encoder->dev;
18969+ u32 lvds_port;
18970+ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
18971+
18972+#if PRINT_JLIU7
18973+ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
18974+#endif /* PRINT_JLIU7 */
18975+
18976+ powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true);
18977+
18978+ /*
18979+ * The LVDS pin pair will already have been turned on in the
18980+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
18981+ * settings.
18982+ */
18983+ /*FIXME JLIU7 Get panel power delay parameters from config data */
18984+ REG_WRITE(0x61208, 0x25807d0);
18985+ REG_WRITE(0x6120c, 0x1f407d0);
18986+ REG_WRITE(0x61210, 0x270f04);
18987+
18988+ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN | LVDS_BORDER_EN;
18989+
18990+ if (mode_dev->panel_wants_dither)
18991+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
18992+
18993+ REG_WRITE(LVDS, lvds_port);
18994+
18995+ drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base,
18996+ dev->mode_config.scaling_mode_property, &curValue);
18997+
18998+ if (curValue == DRM_MODE_SCALE_NO_SCALE)
18999+ REG_WRITE(PFIT_CONTROL, 0);
19000+ else if (curValue == DRM_MODE_SCALE_ASPECT) {
19001+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
19002+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay))
19003+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
19004+ else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay))
19005+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX);
19006+ else
19007+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX);
19008+ } else
19009+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
19010+ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
19011+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
19012+
19013+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
19014+}
19015+
19016+
19017+static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
19018+ .dpms = mrst_lvds_dpms,
19019+ .mode_fixup = psb_intel_lvds_mode_fixup,
19020+ .prepare = psb_intel_lvds_prepare,
19021+ .mode_set = mrst_lvds_mode_set,
19022+ .commit = psb_intel_lvds_commit,
19023+};
19024+
19025+/** Returns the panel fixed mode from configuration. */
19026+/** FIXME JLIU7 need to revist it. */
19027+struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
19028+ *dev)
19029+{
19030+ struct drm_display_mode *mode;
19031+ struct drm_psb_private *dev_priv =
19032+ (struct drm_psb_private *) dev->dev_private;
19033+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
19034+
19035+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
19036+ if (!mode)
19037+ return NULL;
19038+
19039+ if (dev_priv->vbt_data.Size != 0x00) { /*if non-zero, then use vbt*/
19040+
19041+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
19042+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
19043+ mode->hsync_start = mode->hdisplay + \
19044+ ((ti->hsync_offset_hi << 8) | \
19045+ ti->hsync_offset_lo);
19046+ mode->hsync_end = mode->hsync_start + \
19047+ ((ti->hsync_pulse_width_hi << 8) | \
19048+ ti->hsync_pulse_width_lo);
19049+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
19050+ ti->hblank_lo);
19051+ mode->vsync_start = \
19052+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
19053+ ti->vsync_offset_lo);
19054+ mode->vsync_end = \
19055+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
19056+ ti->vsync_pulse_width_lo);
19057+ mode->vtotal = mode->vdisplay + \
19058+ ((ti->vblank_hi << 8) | ti->vblank_lo);
19059+ mode->clock = ti->pixel_clock * 10;
19060+#if 0
19061+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
19062+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
19063+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
19064+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
19065+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
19066+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
19067+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
19068+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
19069+ printk(KERN_INFO "clock is %d\n", mode->clock);
19070+#endif
19071+ }
19072+ else {
19073+
19074+#if 0 /*FIXME jliu7 remove it later */
19075+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
19076+ mode->hdisplay = 800;
19077+ mode->vdisplay = 480;
19078+ mode->hsync_start = 836;
19079+ mode->hsync_end = 846;
19080+ mode->htotal = 1056;
19081+ mode->vsync_start = 489;
19082+ mode->vsync_end = 491;
19083+ mode->vtotal = 525;
19084+ mode->clock = 33264;
19085+#endif /*FIXME jliu7 remove it later */
19086+
19087+#if 0 /*FIXME jliu7 remove it later */
19088+ /* hard coded fixed mode for LVDS 800x480 */
19089+ mode->hdisplay = 800;
19090+ mode->vdisplay = 480;
19091+ mode->hsync_start = 801;
19092+ mode->hsync_end = 802;
19093+ mode->htotal = 1024;
19094+ mode->vsync_start = 481;
19095+ mode->vsync_end = 482;
19096+ mode->vtotal = 525;
19097+ mode->clock = 30994;
19098+#endif /*FIXME jliu7 remove it later */
19099+
19100+#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
19101+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
19102+ mode->hdisplay = 1024;
19103+ mode->vdisplay = 600;
19104+ mode->hsync_start = 1072;
19105+ mode->hsync_end = 1104;
19106+ mode->htotal = 1184;
19107+ mode->vsync_start = 603;
19108+ mode->vsync_end = 604;
19109+ mode->vtotal = 608;
19110+ mode->clock = 53990;
19111+#endif /*FIXME jliu7 remove it later */
19112+
19113+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
19114+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
19115+ mode->hdisplay = 1024;
19116+ mode->vdisplay = 600;
19117+ mode->hsync_start = 1104;
19118+ mode->hsync_end = 1136;
19119+ mode->htotal = 1184;
19120+ mode->vsync_start = 603;
19121+ mode->vsync_end = 604;
19122+ mode->vtotal = 608;
19123+ mode->clock = 53990;
19124+#endif /*FIXME jliu7 remove it later */
19125+
19126+#if 0 /*FIXME jliu7 remove it later */
19127+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
19128+ mode->hdisplay = 1024;
19129+ mode->vdisplay = 600;
19130+ mode->hsync_start = 1124;
19131+ mode->hsync_end = 1204;
19132+ mode->htotal = 1312;
19133+ mode->vsync_start = 607;
19134+ mode->vsync_end = 610;
19135+ mode->vtotal = 621;
19136+ mode->clock = 48885;
19137+#endif /*FIXME jliu7 remove it later */
19138+
19139+#if 0 /*FIXME jliu7 remove it later */
19140+ /* hard coded fixed mode for LVDS 1024x768 */
19141+ mode->hdisplay = 1024;
19142+ mode->vdisplay = 768;
19143+ mode->hsync_start = 1048;
19144+ mode->hsync_end = 1184;
19145+ mode->htotal = 1344;
19146+ mode->vsync_start = 771;
19147+ mode->vsync_end = 777;
19148+ mode->vtotal = 806;
19149+ mode->clock = 65000;
19150+#endif /*FIXME jliu7 remove it later */
19151+
19152+#if 0 /*FIXME jliu7 remove it later */
19153+ /* hard coded fixed mode for LVDS 1366x768 */
19154+ mode->hdisplay = 1366;
19155+ mode->vdisplay = 768;
19156+ mode->hsync_start = 1430;
19157+ mode->hsync_end = 1558;
19158+ mode->htotal = 1664;
19159+ mode->vsync_start = 769;
19160+ mode->vsync_end = 770;
19161+ mode->vtotal = 776;
19162+ mode->clock = 77500;
19163+#endif /*FIXME jliu7 remove it later */
19164+ }
19165+ drm_mode_set_name(mode);
19166+ drm_mode_set_crtcinfo(mode, 0);
19167+
19168+ return mode;
19169+}
19170+
19171+/**
19172+ * mrst_lvds_init - setup LVDS connectors on this device
19173+ * @dev: drm device
19174+ *
19175+ * Create the connector, register the LVDS DDC bus, and try to figure out what
19176+ * modes we can display on the LVDS panel (if present).
19177+ */
19178+void mrst_lvds_init(struct drm_device *dev,
19179+ struct psb_intel_mode_device *mode_dev)
19180+{
19181+ struct psb_intel_output *psb_intel_output;
19182+ struct drm_connector *connector;
19183+ struct drm_encoder *encoder;
19184+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private;
19185+ struct edid *edid;
19186+ int ret = 0;
19187+ struct i2c_adapter *i2c_adap;
19188+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
19189+
19190+#if PRINT_JLIU7
19191+ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
19192+#endif /* PRINT_JLIU7 */
19193+
19194+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
19195+ if (!psb_intel_output)
19196+ return;
19197+
19198+ psb_intel_output->mode_dev = mode_dev;
19199+ connector = &psb_intel_output->base;
19200+ encoder = &psb_intel_output->enc;
19201+ drm_connector_init(dev, &psb_intel_output->base,
19202+ &psb_intel_lvds_connector_funcs,
19203+ DRM_MODE_CONNECTOR_LVDS);
19204+
19205+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
19206+ DRM_MODE_ENCODER_LVDS);
19207+
19208+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
19209+ &psb_intel_output->enc);
19210+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
19211+
19212+ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
19213+ drm_connector_helper_add(connector,
19214+ &psb_intel_lvds_connector_helper_funcs);
19215+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
19216+ connector->interlace_allowed = false;
19217+ connector->doublescan_allowed = false;
19218+
19219+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
19220+ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
19221+
19222+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
19223+
19224+ /*
19225+ * LVDS discovery:
19226+ * 1) check for EDID on DDC
19227+ * 2) check for VBT data
19228+ * 3) check to see if LVDS is already on
19229+ * if none of the above, no panel
19230+ * 4) make sure lid is open
19231+ * if closed, act like it's not there for now
19232+ */
19233+ i2c_adap = i2c_get_adapter(2);
19234+ if (i2c_adap == NULL)
19235+ printk(KERN_ALERT "No ddc adapter available!\n");
19236+ /* Set up the DDC bus. */
19237+/* psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
19238+ if (!psb_intel_output->ddc_bus) {
19239+ dev_printk(KERN_ERR, &dev->pdev->dev,
19240+ "DDC bus registration " "failed.\n");
19241+ goto failed_ddc;
19242+ }*/
19243+
19244+ /*
19245+ * Attempt to get the fixed panel mode from DDC. Assume that the
19246+ * preferred mode is the right one.
19247+ */
19248+ edid = drm_get_edid(connector, i2c_adap);
19249+ if (edid) {
19250+ drm_mode_connector_update_edid_property(connector, edid);
19251+ ret = drm_add_edid_modes(connector, edid);
19252+ kfree(edid);
19253+ }
19254+
19255+ list_for_each_entry(scan, &connector->probed_modes, head) {
19256+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
19257+ mode_dev->panel_fixed_mode =
19258+ drm_mode_duplicate(dev, scan);
19259+ goto out; /* FIXME: check for quirks */
19260+ }
19261+ }
19262+
19263+ /*
19264+ * If we didn't get EDID, try geting panel timing
19265+ * from configuration data
19266+ */
19267+ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
19268+
19269+ if (mode_dev->panel_fixed_mode) {
19270+ mode_dev->panel_fixed_mode->type |=
19271+ DRM_MODE_TYPE_PREFERRED;
19272+ goto out; /* FIXME: check for quirks */
19273+ }
19274+
19275+ /* If we still don't have a mode after all that, give up. */
19276+ if (!mode_dev->panel_fixed_mode) {
19277+ DRM_DEBUG
19278+ ("Found no modes on the lvds, ignoring the LVDS\n");
19279+ goto failed_find;
19280+ }
19281+
19282+out:
19283+ drm_sysfs_connector_add(connector);
19284+ return;
19285+
19286+failed_find:
19287+ DRM_DEBUG("No LVDS modes found, disabling.\n");
19288+ if (psb_intel_output->ddc_bus)
19289+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
19290+
19291+failed_ddc:
19292+
19293+ drm_encoder_cleanup(encoder);
19294+ drm_connector_cleanup(connector);
19295+ kfree(connector);
19296+}
19297+
19298+/* MRST platform end */
19299diff --git a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c
19300new file mode 100644
19301index 0000000..54abe86
19302--- /dev/null
19303+++ b/drivers/gpu/drm/psb/psb_intel_modes.c
19304@@ -0,0 +1,64 @@
19305+/*
19306+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
19307+ * Copyright (c) 2007 Intel Corporation
19308+ * Jesse Barnes <jesse.barnes@intel.com>
19309+ */
19310+
19311+#include <linux/i2c.h>
19312+#include <linux/fb.h>
19313+#include <drm/drmP.h>
19314+#include "psb_intel_drv.h"
19315+
19316+/**
19317+ * psb_intel_ddc_probe
19318+ *
19319+ */
19320+bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
19321+{
19322+ u8 out_buf[] = { 0x0, 0x0 };
19323+ u8 buf[2];
19324+ int ret;
19325+ struct i2c_msg msgs[] = {
19326+ {
19327+ .addr = 0x50,
19328+ .flags = 0,
19329+ .len = 1,
19330+ .buf = out_buf,
19331+ },
19332+ {
19333+ .addr = 0x50,
19334+ .flags = I2C_M_RD,
19335+ .len = 1,
19336+ .buf = buf,
19337+ }
19338+ };
19339+
19340+ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
19341+ if (ret == 2)
19342+ return true;
19343+
19344+ return false;
19345+}
19346+
19347+/**
19348+ * psb_intel_ddc_get_modes - get modelist from monitor
19349+ * @connector: DRM connector device to use
19350+ *
19351+ * Fetch the EDID information from @connector using the DDC bus.
19352+ */
19353+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
19354+{
19355+ struct edid *edid;
19356+ int ret = 0;
19357+
19358+ edid =
19359+ drm_get_edid(&psb_intel_output->base,
19360+ &psb_intel_output->ddc_bus->adapter);
19361+ if (edid) {
19362+ drm_mode_connector_update_edid_property(&psb_intel_output->
19363+ base, edid);
19364+ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
19365+ kfree(edid);
19366+ }
19367+ return ret;
19368+}
19369diff --git a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h
19370new file mode 100644
19371index 0000000..7e22463
19372--- /dev/null
19373+++ b/drivers/gpu/drm/psb/psb_intel_reg.h
19374@@ -0,0 +1,1015 @@
19375+#define BLC_PWM_CTL 0x61254
19376+#define BLC_PWM_CTL2 0x61250
19377+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
19378+/**
19379+ * This is the most significant 15 bits of the number of backlight cycles in a
19380+ * complete cycle of the modulated backlight control.
19381+ *
19382+ * The actual value is this field multiplied by two.
19383+ */
19384+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
19385+#define BLM_LEGACY_MODE (1 << 16)
19386+/**
19387+ * This is the number of cycles out of the backlight modulation cycle for which
19388+ * the backlight is on.
19389+ *
19390+ * This field must be no greater than the number of cycles in the complete
19391+ * backlight modulation cycle.
19392+ */
19393+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
19394+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
19395+
19396+#define I915_GCFGC 0xf0
19397+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
19398+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
19399+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
19400+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
19401+
19402+#define I855_HPLLCC 0xc0
19403+#define I855_CLOCK_CONTROL_MASK (3 << 0)
19404+#define I855_CLOCK_133_200 (0 << 0)
19405+#define I855_CLOCK_100_200 (1 << 0)
19406+#define I855_CLOCK_100_133 (2 << 0)
19407+#define I855_CLOCK_166_250 (3 << 0)
19408+
19409+/* I830 CRTC registers */
19410+#define HTOTAL_A 0x60000
19411+#define HBLANK_A 0x60004
19412+#define HSYNC_A 0x60008
19413+#define VTOTAL_A 0x6000c
19414+#define VBLANK_A 0x60010
19415+#define VSYNC_A 0x60014
19416+#define PIPEASRC 0x6001c
19417+#define BCLRPAT_A 0x60020
19418+#define VSYNCSHIFT_A 0x60028
19419+
19420+#define HTOTAL_B 0x61000
19421+#define HBLANK_B 0x61004
19422+#define HSYNC_B 0x61008
19423+#define VTOTAL_B 0x6100c
19424+#define VBLANK_B 0x61010
19425+#define VSYNC_B 0x61014
19426+#define PIPEBSRC 0x6101c
19427+#define BCLRPAT_B 0x61020
19428+#define VSYNCSHIFT_B 0x61028
19429+
19430+#define PP_STATUS 0x61200
19431+# define PP_ON (1 << 31)
19432+/**
19433+ * Indicates that all dependencies of the panel are on:
19434+ *
19435+ * - PLL enabled
19436+ * - pipe enabled
19437+ * - LVDS/DVOB/DVOC on
19438+ */
19439+# define PP_READY (1 << 30)
19440+# define PP_SEQUENCE_NONE (0 << 28)
19441+# define PP_SEQUENCE_ON (1 << 28)
19442+# define PP_SEQUENCE_OFF (2 << 28)
19443+# define PP_SEQUENCE_MASK 0x30000000
19444+#define PP_CONTROL 0x61204
19445+# define POWER_TARGET_ON (1 << 0)
19446+
19447+#define LVDSPP_ON 0x61208
19448+#define LVDSPP_OFF 0x6120c
19449+#define PP_CYCLE 0x61210
19450+
19451+#define PFIT_CONTROL 0x61230
19452+# define PFIT_ENABLE (1 << 31)
19453+# define PFIT_PIPE_MASK (3 << 29)
19454+# define PFIT_PIPE_SHIFT 29
19455+# define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
19456+# define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
19457+# define VERT_INTERP_DISABLE (0 << 10)
19458+# define VERT_INTERP_BILINEAR (1 << 10)
19459+# define VERT_INTERP_MASK (3 << 10)
19460+# define VERT_AUTO_SCALE (1 << 9)
19461+# define HORIZ_INTERP_DISABLE (0 << 6)
19462+# define HORIZ_INTERP_BILINEAR (1 << 6)
19463+# define HORIZ_INTERP_MASK (3 << 6)
19464+# define HORIZ_AUTO_SCALE (1 << 5)
19465+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
19466+
19467+#define PFIT_PGM_RATIOS 0x61234
19468+# define PFIT_VERT_SCALE_MASK 0xfff00000
19469+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
19470+
19471+#define PFIT_AUTO_RATIOS 0x61238
19472+
19473+
19474+#define DPLL_A 0x06014
19475+#define DPLL_B 0x06018
19476+# define DPLL_VCO_ENABLE (1 << 31)
19477+# define DPLL_DVO_HIGH_SPEED (1 << 30)
19478+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
19479+# define DPLL_VGA_MODE_DIS (1 << 28)
19480+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
19481+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
19482+# define DPLL_MODE_MASK (3 << 26)
19483+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
19484+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
19485+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
19486+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
19487+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
19488+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
19489+/**
19490+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
19491+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
19492+ */
19493+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
19494+/**
19495+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
19496+ * this field (only one bit may be set).
19497+ */
19498+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
19499+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
19500+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
19501+ * in DVO non-gang */
19502+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
19503+# define PLL_REF_INPUT_DREFCLK (0 << 13)
19504+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
19505+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
19506+ * TVCLKIN */
19507+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
19508+# define PLL_REF_INPUT_MASK (3 << 13)
19509+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
19510+/*
19511+ * Parallel to Serial Load Pulse phase selection.
19512+ * Selects the phase for the 10X DPLL clock for the PCIe
19513+ * digital display port. The range is 4 to 13; 10 or more
19514+ * is just a flip delay. The default is 6
19515+ */
19516+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
19517+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
19518+
19519+/**
19520+ * SDVO multiplier for 945G/GM. Not used on 965.
19521+ *
19522+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
19523+ */
19524+# define SDVO_MULTIPLIER_MASK 0x000000ff
19525+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
19526+# define SDVO_MULTIPLIER_SHIFT_VGA 0
19527+
19528+/** @defgroup DPLL_MD
19529+ * @{
19530+ */
19531+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
19532+#define DPLL_A_MD 0x0601c
19533+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
19534+#define DPLL_B_MD 0x06020
19535+/**
19536+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
19537+ *
19538+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
19539+ */
19540+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
19541+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
19542+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
19543+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
19544+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
19545+/**
19546+ * SDVO/UDI pixel multiplier.
19547+ *
19548+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
19549+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
19550+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
19551+ * dummy bytes in the datastream at an increased clock rate, with both sides of
19552+ * the link knowing how many bytes are fill.
19553+ *
19554+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
19555+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
19556+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
19557+ * through an SDVO command.
19558+ *
19559+ * This register field has values of multiplication factor minus 1, with
19560+ * a maximum multiplier of 5 for SDVO.
19561+ */
19562+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
19563+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
19564+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
19565+ * This best be set to the default value (3) or the CRT won't work. No,
19566+ * I don't entirely understand what this does...
19567+ */
19568+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
19569+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
19570+/** @} */
19571+
19572+#define DPLL_TEST 0x606c
19573+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
19574+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
19575+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
19576+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
19577+# define DPLLB_TEST_N_BYPASS (1 << 19)
19578+# define DPLLB_TEST_M_BYPASS (1 << 18)
19579+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
19580+# define DPLLA_TEST_N_BYPASS (1 << 3)
19581+# define DPLLA_TEST_M_BYPASS (1 << 2)
19582+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
19583+
19584+#define ADPA 0x61100
19585+#define ADPA_DAC_ENABLE (1<<31)
19586+#define ADPA_DAC_DISABLE 0
19587+#define ADPA_PIPE_SELECT_MASK (1<<30)
19588+#define ADPA_PIPE_A_SELECT 0
19589+#define ADPA_PIPE_B_SELECT (1<<30)
19590+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
19591+#define ADPA_SETS_HVPOLARITY 0
19592+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
19593+#define ADPA_VSYNC_CNTL_ENABLE 0
19594+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
19595+#define ADPA_HSYNC_CNTL_ENABLE 0
19596+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
19597+#define ADPA_VSYNC_ACTIVE_LOW 0
19598+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
19599+#define ADPA_HSYNC_ACTIVE_LOW 0
19600+
19601+#define FPA0 0x06040
19602+#define FPA1 0x06044
19603+#define FPB0 0x06048
19604+#define FPB1 0x0604c
19605+# define FP_N_DIV_MASK 0x003f0000
19606+# define FP_N_DIV_SHIFT 16
19607+# define FP_M1_DIV_MASK 0x00003f00
19608+# define FP_M1_DIV_SHIFT 8
19609+# define FP_M2_DIV_MASK 0x0000003f
19610+# define FP_M2_DIV_SHIFT 0
19611+
19612+
19613+#define PORT_HOTPLUG_EN 0x61110
19614+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
19615+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
19616+# define TV_HOTPLUG_INT_EN (1 << 18)
19617+# define CRT_HOTPLUG_INT_EN (1 << 9)
19618+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
19619+
19620+#define PORT_HOTPLUG_STAT 0x61114
19621+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
19622+# define TV_HOTPLUG_INT_STATUS (1 << 10)
19623+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
19624+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
19625+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
19626+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
19627+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
19628+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
19629+
19630+#define SDVOB 0x61140
19631+#define SDVOC 0x61160
19632+#define SDVO_ENABLE (1 << 31)
19633+#define SDVO_PIPE_B_SELECT (1 << 30)
19634+#define SDVO_STALL_SELECT (1 << 29)
19635+#define SDVO_INTERRUPT_ENABLE (1 << 26)
19636+/**
19637+ * 915G/GM SDVO pixel multiplier.
19638+ *
19639+ * Programmed value is multiplier - 1, up to 5x.
19640+ *
19641+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
19642+ */
19643+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
19644+#define SDVO_PORT_MULTIPLY_SHIFT 23
19645+#define SDVO_PHASE_SELECT_MASK (15 << 19)
19646+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
19647+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
19648+#define SDVOC_GANG_MODE (1 << 16)
19649+#define SDVO_BORDER_ENABLE (1 << 7)
19650+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
19651+#define SDVO_DETECTED (1 << 2)
19652+/* Bits to be preserved when writing */
19653+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
19654+#define SDVOC_PRESERVE_MASK (1 << 17)
19655+
19656+/** @defgroup LVDS
19657+ * @{
19658+ */
19659+/**
19660+ * This register controls the LVDS output enable, pipe selection, and data
19661+ * format selection.
19662+ *
19663+ * All of the clock/data pairs are force powered down by power sequencing.
19664+ */
19665+#define LVDS 0x61180
19666+/**
19667+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
19668+ * the DPLL semantics change when the LVDS is assigned to that pipe.
19669+ */
19670+# define LVDS_PORT_EN (1 << 31)
19671+/** Selects pipe B for LVDS data. Must be set on pre-965. */
19672+# define LVDS_PIPEB_SELECT (1 << 30)
19673+
19674+/** Turns on border drawing to allow centered display. */
19675+# define LVDS_BORDER_EN (1 << 15)
19676+
19677+/**
19678+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
19679+ * pixel.
19680+ */
19681+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
19682+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
19683+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
19684+/**
19685+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
19686+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
19687+ * on.
19688+ */
19689+# define LVDS_A3_POWER_MASK (3 << 6)
19690+# define LVDS_A3_POWER_DOWN (0 << 6)
19691+# define LVDS_A3_POWER_UP (3 << 6)
19692+/**
19693+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
19694+ * is set.
19695+ */
19696+# define LVDS_CLKB_POWER_MASK (3 << 4)
19697+# define LVDS_CLKB_POWER_DOWN (0 << 4)
19698+# define LVDS_CLKB_POWER_UP (3 << 4)
19699+
19700+/**
19701+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
19702+ * setting for whether we are in dual-channel mode. The B3 pair will
19703+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
19704+ */
19705+# define LVDS_B0B3_POWER_MASK (3 << 2)
19706+# define LVDS_B0B3_POWER_DOWN (0 << 2)
19707+# define LVDS_B0B3_POWER_UP (3 << 2)
19708+
19709+#define PIPEACONF 0x70008
19710+#define PIPEACONF_ENABLE (1<<31)
19711+#define PIPEACONF_DISABLE 0
19712+#define PIPEACONF_DOUBLE_WIDE (1<<30)
19713+#define I965_PIPECONF_ACTIVE (1<<30)
19714+#define PIPEACONF_SINGLE_WIDE 0
19715+#define PIPEACONF_PIPE_UNLOCKED 0
19716+#define PIPEACONF_PIPE_LOCKED (1<<25)
19717+#define PIPEACONF_PALETTE 0
19718+#define PIPEACONF_GAMMA (1<<24)
19719+#define PIPECONF_FORCE_BORDER (1<<25)
19720+#define PIPECONF_PROGRESSIVE (0 << 21)
19721+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
19722+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
19723+
19724+#define PIPEBCONF 0x71008
19725+#define PIPEBCONF_ENABLE (1<<31)
19726+#define PIPEBCONF_DISABLE 0
19727+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
19728+#define PIPEBCONF_DISABLE 0
19729+#define PIPEBCONF_GAMMA (1<<24)
19730+#define PIPEBCONF_PALETTE 0
19731+
19732+#define PIPEBGCMAXRED 0x71010
19733+#define PIPEBGCMAXGREEN 0x71014
19734+#define PIPEBGCMAXBLUE 0x71018
19735+
19736+#define PIPEASTAT 0x70024
19737+#define PIPEBSTAT 0x71024
19738+#define PIPE_VBLANK_CLEAR (1 << 1)
19739+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18)
19740+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
19741+
19742+#define PIPEAFRAMEHIGH 0x70040
19743+#define PIPEAFRAMEPIXEL 0x70044
19744+#define PIPEBFRAMEHIGH 0x71040
19745+#define PIPEBFRAMEPIXEL 0x71044
19746+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
19747+#define PIPE_FRAME_HIGH_SHIFT 0
19748+#define PIPE_FRAME_LOW_MASK 0xff000000
19749+#define PIPE_FRAME_LOW_SHIFT 24
19750+#define PIPE_PIXEL_MASK 0x00ffffff
19751+#define PIPE_PIXEL_SHIFT 0
19752+
19753+#define DSPARB 0x70030
19754+#define DSPFW1 0x70034
19755+#define DSPFW2 0x70038
19756+#define DSPFW3 0x7003c
19757+#define DSPFW4 0x70050
19758+#define DSPFW5 0x70054
19759+#define DSPFW6 0x70058
19760+#define DSPCHICKENBIT 0x70400
19761+#define DSPACNTR 0x70180
19762+#define DSPBCNTR 0x71180
19763+#define DISPLAY_PLANE_ENABLE (1<<31)
19764+#define DISPLAY_PLANE_DISABLE 0
19765+#define DISPPLANE_GAMMA_ENABLE (1<<30)
19766+#define DISPPLANE_GAMMA_DISABLE 0
19767+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
19768+#define DISPPLANE_8BPP (0x2<<26)
19769+#define DISPPLANE_15_16BPP (0x4<<26)
19770+#define DISPPLANE_16BPP (0x5<<26)
19771+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
19772+#define DISPPLANE_32BPP (0x7<<26)
19773+#define DISPPLANE_STEREO_ENABLE (1<<25)
19774+#define DISPPLANE_STEREO_DISABLE 0
19775+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
19776+#define DISPPLANE_SEL_PIPE_A 0
19777+#define DISPPLANE_SEL_PIPE_B (1<<24)
19778+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
19779+#define DISPPLANE_SRC_KEY_DISABLE 0
19780+#define DISPPLANE_LINE_DOUBLE (1<<20)
19781+#define DISPPLANE_NO_LINE_DOUBLE 0
19782+#define DISPPLANE_STEREO_POLARITY_FIRST 0
19783+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
19784+/* plane B only */
19785+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
19786+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
19787+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
19788+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
19789+
19790+#define DSPABASE 0x70184
19791+#define DSPALINOFF 0x70184
19792+#define DSPASTRIDE 0x70188
19793+
19794+#define DSPBBASE 0x71184
19795+#define DSPBLINOFF 0X71184
19796+#define DSPBADDR DSPBBASE
19797+#define DSPBSTRIDE 0x71188
19798+
19799+#define DSPAKEYVAL 0x70194
19800+#define DSPAKEYMASK 0x70198
19801+
19802+#define DSPAPOS 0x7018C /* reserved */
19803+#define DSPASIZE 0x70190
19804+#define DSPBPOS 0x7118C
19805+#define DSPBSIZE 0x71190
19806+
19807+#define DSPASURF 0x7019C
19808+#define DSPATILEOFF 0x701A4
19809+
19810+#define DSPBSURF 0x7119C
19811+#define DSPBTILEOFF 0x711A4
19812+
19813+#define VGACNTRL 0x71400
19814+# define VGA_DISP_DISABLE (1 << 31)
19815+# define VGA_2X_MODE (1 << 30)
19816+# define VGA_PIPE_B_SELECT (1 << 29)
19817+
19818+/*
19819+ * Overlay registers
19820+ */
19821+#define OV_OVADD 0x30000
19822+#define OV_OGAMC5 0x30010
19823+#define OV_OGAMC4 0x30014
19824+#define OV_OGAMC3 0x30018
19825+#define OV_OGAMC2 0x3001C
19826+#define OV_OGAMC1 0x30020
19827+#define OV_OGAMC0 0x30024
19828+
19829+/*
19830+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
19831+ * of video memory available to the BIOS in SWF1.
19832+ */
19833+
19834+#define SWF0 0x71410
19835+#define SWF1 0x71414
19836+#define SWF2 0x71418
19837+#define SWF3 0x7141c
19838+#define SWF4 0x71420
19839+#define SWF5 0x71424
19840+#define SWF6 0x71428
19841+
19842+/*
19843+ * 855 scratch registers.
19844+ */
19845+#define SWF00 0x70410
19846+#define SWF01 0x70414
19847+#define SWF02 0x70418
19848+#define SWF03 0x7041c
19849+#define SWF04 0x70420
19850+#define SWF05 0x70424
19851+#define SWF06 0x70428
19852+
19853+#define SWF10 SWF0
19854+#define SWF11 SWF1
19855+#define SWF12 SWF2
19856+#define SWF13 SWF3
19857+#define SWF14 SWF4
19858+#define SWF15 SWF5
19859+#define SWF16 SWF6
19860+
19861+#define SWF30 0x72414
19862+#define SWF31 0x72418
19863+#define SWF32 0x7241c
19864+
19865+
19866+/*
19867+ * Palette registers
19868+ */
19869+#define PALETTE_A 0x0a000
19870+#define PALETTE_B 0x0a800
19871+
19872+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
19873+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
19874+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
19875+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
19876+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
19877+
19878+
19879+/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
19880+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
19881+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
19882+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
19883+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
19884+
19885+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
19886+ (dev)->pci_device == 0x2982 || \
19887+ (dev)->pci_device == 0x2992 || \
19888+ (dev)->pci_device == 0x29A2 || \
19889+ (dev)->pci_device == 0x2A02 || \
19890+ (dev)->pci_device == 0x2A12)
19891+
19892+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
19893+
19894+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
19895+ (dev)->pci_device == 0x29B2 || \
19896+ (dev)->pci_device == 0x29D2)
19897+
19898+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
19899+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
19900+ IS_MRST(dev))
19901+
19902+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
19903+ IS_I945GM(dev) || IS_I965GM(dev) || \
19904+ IS_POULSBO(dev) || IS_MRST(dev))
19905+
19906+/* Cursor A & B regs */
19907+#define CURACNTR 0x70080
19908+#define CURSOR_MODE_DISABLE 0x00
19909+#define CURSOR_MODE_64_32B_AX 0x07
19910+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
19911+#define MCURSOR_GAMMA_ENABLE (1 << 26)
19912+#define CURABASE 0x70084
19913+#define CURAPOS 0x70088
19914+#define CURSOR_POS_MASK 0x007FF
19915+#define CURSOR_POS_SIGN 0x8000
19916+#define CURSOR_X_SHIFT 0
19917+#define CURSOR_Y_SHIFT 16
19918+#define CURBCNTR 0x700c0
19919+#define CURBBASE 0x700c4
19920+#define CURBPOS 0x700c8
19921+
19922+/*
19923+ * Interrupt Registers
19924+ */
19925+#define IER 0x020a0
19926+#define IIR 0x020a4
19927+#define IMR 0x020a8
19928+#define ISR 0x020ac
19929+
19930+/*
19931+ * MOORESTOWN delta registers
19932+ */
19933+#define MRST_DPLL_A 0x0f014
19934+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
19935+#define MRST_FPA0 0x0f040
19936+#define MRST_FPA1 0x0f044
19937+#define MRST_PERF_MODE 0x020f4
19938+
19939+/* #define LVDS 0x61180 */
19940+# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
19941+# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
19942+# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
19943+
19944+#define MIPI 0x61190
19945+# define MIPI_PORT_EN (1 << 31)
19946+/** Turns on border drawing to allow centered display. */
19947+# define MIPI_BORDER_EN (1 << 15)
19948+
19949+/* #define PP_CONTROL 0x61204 */
19950+# define POWER_DOWN_ON_RESET (1 << 1)
19951+
19952+/* #define PFIT_CONTROL 0x61230 */
19953+# define PFIT_PIPE_SELECT (3 << 29)
19954+# define PFIT_PIPE_SELECT_SHIFT (29)
19955+
19956+/* #define BLC_PWM_CTL 0x61254 */
19957+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
19958+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
19959+
19960+/* #define PIPEACONF 0x70008 */
19961+#define PIPEACONF_PIPE_STATE (1<<30)
19962+/* #define DSPACNTR 0x70180 */
19963+#if 0 /*FIXME JLIU7 need to define the following */
19964+1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
19965+pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
19966+(16 : 16 : 16 : 16) 16 bit floating point pixel format.
19967+Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
19968+ Ignore
19969+ alpha.
19970+#endif /*FIXME JLIU7 need to define the following */
19971+
19972+#define MRST_DSPABASE 0x7019c
19973+
19974+/*
19975+ * MOORESTOWN reserved registers
19976+ */
19977+#if 0
19978+#define DSPAPOS 0x7018C /* reserved */
19979+#define DSPASIZE 0x70190
19980+#endif
19981+/*
19982+ * Moorestown registers.
19983+ */
19984+/*===========================================================================
19985+; General Constants
19986+;--------------------------------------------------------------------------*/
19987+#define BIT0 0x00000001
19988+#define BIT1 0x00000002
19989+#define BIT2 0x00000004
19990+#define BIT3 0x00000008
19991+#define BIT4 0x00000010
19992+#define BIT5 0x00000020
19993+#define BIT6 0x00000040
19994+#define BIT7 0x00000080
19995+#define BIT8 0x00000100
19996+#define BIT9 0x00000200
19997+#define BIT10 0x00000400
19998+#define BIT11 0x00000800
19999+#define BIT12 0x00001000
20000+#define BIT13 0x00002000
20001+#define BIT14 0x00004000
20002+#define BIT15 0x00008000
20003+#define BIT16 0x00010000
20004+#define BIT17 0x00020000
20005+#define BIT18 0x00040000
20006+#define BIT19 0x00080000
20007+#define BIT20 0x00100000
20008+#define BIT21 0x00200000
20009+#define BIT22 0x00400000
20010+#define BIT23 0x00800000
20011+#define BIT24 0x01000000
20012+#define BIT25 0x02000000
20013+#define BIT26 0x04000000
20014+#define BIT27 0x08000000
20015+#define BIT28 0x10000000
20016+#define BIT29 0x20000000
20017+#define BIT30 0x40000000
20018+#define BIT31 0x80000000
20019+/*===========================================================================
20020+; MIPI IP registers
20021+;--------------------------------------------------------------------------*/
20022+#define DEVICE_READY_REG 0xb000
20023+#define INTR_STAT_REG 0xb004
20024+#define RX_SOT_ERROR BIT0
20025+#define RX_SOT_SYNC_ERROR BIT1
20026+#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
20027+#define RX_LP_TX_SYNC_ERROR BIT4
20028+#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
20029+#define RX_FALSE_CONTROL_ERROR BIT6
20030+#define RX_ECC_SINGLE_BIT_ERROR BIT7
20031+#define RX_ECC_MULTI_BIT_ERROR BIT8
20032+#define RX_CHECKSUM_ERROR BIT9
20033+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
20034+#define RX_DSI_VC_ID_INVALID BIT11
20035+#define TX_FALSE_CONTROL_ERROR BIT12
20036+#define TX_ECC_SINGLE_BIT_ERROR BIT13
20037+#define TX_ECC_MULTI_BIT_ERROR BIT14
20038+#define TX_CHECKSUM_ERROR BIT15
20039+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
20040+#define TX_DSI_VC_ID_INVALID BIT17
20041+#define HIGH_CONTENTION BIT18
20042+#define LOW_CONTENTION BIT19
20043+#define DPI_FIFO_UNDER_RUN BIT20
20044+#define HS_TX_TIMEOUT BIT21
20045+#define LP_RX_TIMEOUT BIT22
20046+#define TURN_AROUND_ACK_TIMEOUT BIT23
20047+#define ACK_WITH_NO_ERROR BIT24
20048+#define INTR_EN_REG 0xb008
20049+#define DSI_FUNC_PRG_REG 0xb00c
20050+#define DPI_CHANNEL_NUMBER_POS 0x03
20051+#define DBI_CHANNEL_NUMBER_POS 0x05
20052+#define FMT_DPI_POS 0x07
20053+#define FMT_DBI_POS 0x0A
20054+#define DBI_DATA_WIDTH_POS 0x0D
20055+#define HS_TX_TIMEOUT_REG 0xb010
20056+#define LP_RX_TIMEOUT_REG 0xb014
20057+#define TURN_AROUND_TIMEOUT_REG 0xb018
20058+#define DEVICE_RESET_REG 0xb01C
20059+#define DPI_RESOLUTION_REG 0xb020
20060+#define RES_V_POS 0x10
20061+#define DBI_RESOLUTION_REG 0xb024
20062+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
20063+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
20064+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
20065+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
20066+#define VERT_SYNC_PAD_COUNT_REG 0xb038
20067+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
20068+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
20069+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
20070+#define DPI_CONTROL_REG 0xb048
20071+#define DPI_SHUT_DOWN BIT0
20072+#define DPI_TURN_ON BIT1
20073+#define DPI_COLOR_MODE_ON BIT2
20074+#define DPI_COLOR_MODE_OFF BIT3
20075+#define DPI_BACK_LIGHT_ON BIT4
20076+#define DPI_BACK_LIGHT_OFF BIT5
20077+#define DPI_LP BIT6
20078+#define DPI_DATA_REG 0xb04c
20079+#define DPI_BACK_LIGHT_ON_DATA 0x07
20080+#define DPI_BACK_LIGHT_OFF_DATA 0x17
20081+#define INIT_COUNT_REG 0xb050
20082+#define MAX_RET_PAK_REG 0xb054
20083+#define VIDEO_FMT_REG 0xb058
20084+#define EOT_DISABLE_REG 0xb05c
20085+#define LP_BYTECLK_REG 0xb060
20086+#define LP_GEN_DATA_REG 0xb064
20087+#define HS_GEN_DATA_REG 0xb068
20088+#define LP_GEN_CTRL_REG 0xb06C
20089+#define HS_GEN_CTRL_REG 0xb070
20090+#define GEN_FIFO_STAT_REG 0xb074
20091+#define HS_DATA_FIFO_FULL BIT0
20092+#define HS_DATA_FIFO_HALF_EMPTY BIT1
20093+#define HS_DATA_FIFO_EMPTY BIT2
20094+#define LP_DATA_FIFO_FULL BIT8
20095+#define LP_DATA_FIFO_HALF_EMPTY BIT9
20096+#define LP_DATA_FIFO_EMPTY BIT10
20097+#define HS_CTRL_FIFO_FULL BIT16
20098+#define HS_CTRL_FIFO_HALF_EMPTY BIT17
20099+#define HS_CTRL_FIFO_EMPTY BIT18
20100+#define LP_CTRL_FIFO_FULL BIT24
20101+#define LP_CTRL_FIFO_HALF_EMPTY BIT25
20102+#define LP_CTRL_FIFO_EMPTY BIT26
20103+/*===========================================================================
20104+; MIPI Adapter registers
20105+;--------------------------------------------------------------------------*/
20106+#define MIPI_CONTROL_REG 0xb104
20107+#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
20108+#define MIPI_DATA_ADDRESS_REG 0xb108
20109+#define MIPI_DATA_LENGTH_REG 0xb10C
20110+#define MIPI_COMMAND_ADDRESS_REG 0xb110
20111+#define MIPI_COMMAND_LENGTH_REG 0xb114
20112+#define MIPI_READ_DATA_RETURN_REG0 0xb118
20113+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
20114+#define MIPI_READ_DATA_RETURN_REG2 0xb120
20115+#define MIPI_READ_DATA_RETURN_REG3 0xb124
20116+#define MIPI_READ_DATA_RETURN_REG4 0xb128
20117+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
20118+#define MIPI_READ_DATA_RETURN_REG6 0xb130
20119+#define MIPI_READ_DATA_RETURN_REG7 0xb134
20120+#define MIPI_READ_DATA_VALID_REG 0xb138
20121+/* DBI COMMANDS */
20122+#define soft_reset 0x01
20123+/* ************************************************************************* *\
20124+The display module performs a software reset.
20125+Registers are written with their SW Reset default values.
20126+\* ************************************************************************* */
20127+#define get_power_mode 0x0a
20128+/* ************************************************************************* *\
20129+The display module returns the current power mode
20130+\* ************************************************************************* */
20131+#define get_address_mode 0x0b
20132+/* ************************************************************************* *\
20133+The display module returns the current status.
20134+\* ************************************************************************* */
20135+#define get_pixel_format 0x0c
20136+/* ************************************************************************* *\
20137+This command gets the pixel format for the RGB image data
20138+used by the interface.
20139+\* ************************************************************************* */
20140+#define get_display_mode 0x0d
20141+/* ************************************************************************* *\
20142+The display module returns the Display Image Mode status.
20143+\* ************************************************************************* */
20144+#define get_signal_mode 0x0e
20145+/* ************************************************************************* *\
20146+The display module returns the Display Signal Mode.
20147+\* ************************************************************************* */
20148+#define get_diagnostic_result 0x0f
20149+/* ************************************************************************* *\
20150+The display module returns the self-diagnostic results following
20151+a Sleep Out command.
20152+\* ************************************************************************* */
20153+#define enter_sleep_mode 0x10
20154+/* ************************************************************************* *\
20155+This command causes the display module to enter the Sleep mode.
20156+In this mode, all unnecessary blocks inside the display module are disabled
20157+except interface communication. This is the lowest power mode
20158+the display module supports.
20159+\* ************************************************************************* */
20160+#define exit_sleep_mode 0x11
20161+/* ************************************************************************* *\
20162+This command causes the display module to exit Sleep mode.
20163+All blocks inside the display module are enabled.
20164+\* ************************************************************************* */
20165+#define enter_partial_mode 0x12
20166+/* ************************************************************************* *\
20167+This command causes the display module to enter the Partial Display Mode.
20168+The Partial Display Mode window is described by the set_partial_area command.
20169+\* ************************************************************************* */
20170+#define enter_normal_mode 0x13
20171+/* ************************************************************************* *\
20172+This command causes the display module to enter the Normal mode.
20173+Normal Mode is defined as Partial Display mode and Scroll mode are off
20174+\* ************************************************************************* */
20175+#define exit_invert_mode 0x20
20176+/* ************************************************************************* *\
20177+This command causes the display module to stop inverting the image data on
20178+the display device. The frame memory contents remain unchanged.
20179+No status bits are changed.
20180+\* ************************************************************************* */
20181+#define enter_invert_mode 0x21
20182+/* ************************************************************************* *\
20183+This command causes the display module to invert the image data only on
20184+the display device. The frame memory contents remain unchanged.
20185+No status bits are changed.
20186+\* ************************************************************************* */
20187+#define set_gamma_curve 0x26
20188+/* ************************************************************************* *\
20189+This command selects the desired gamma curve for the display device.
20190+Four fixed gamma curves are defined in section DCS spec.
20191+\* ************************************************************************* */
20192+#define set_display_off 0x28
20193+/* ************************************************************************* *\
20194+This command causes the display module to stop displaying the image data
20195+on the display device. The frame memory contents remain unchanged.
20196+No status bits are changed.
20197+\* ************************************************************************* */
20198+#define set_display_on 0x29
20199+/* ************************************************************************* *\
20200+This command causes the display module to start displaying the image data
20201+on the display device. The frame memory contents remain unchanged.
20202+No status bits are changed.
20203+\* ************************************************************************* */
20204+#define set_column_address 0x2a
20205+/* ************************************************************************* *\
20206+This command defines the column extent of the frame memory accessed by the
20207+hostprocessor with the read_memory_continue and write_memory_continue commands.
20208+No status bits are changed.
20209+\* ************************************************************************* */
20210+#define set_page_address 0x2b
20211+/* ************************************************************************* *\
20212+This command defines the page extent of the frame memory accessed by the host
20213+processor with the write_memory_continue and read_memory_continue command.
20214+No status bits are changed.
20215+\* ************************************************************************* */
20216+#define write_mem_start 0x2c
20217+/* ************************************************************************* *\
20218+This command transfers image data from the host processor to the display
20219+module s frame memory starting at the pixel location specified by
20220+preceding set_column_address and set_page_address commands.
20221+\* ************************************************************************* */
20222+#define set_partial_area 0x30
20223+/* ************************************************************************* *\
20224+This command defines the Partial Display mode s display area.
20225+There are two parameters associated with
20226+this command, the first defines the Start Row (SR) and the second the End Row
20227+(ER). SR and ER refer to the Frame Memory Line Pointer.
20228+\* ************************************************************************* */
20229+#define set_scroll_area 0x33
20230+/* ************************************************************************* *\
20231+This command defines the display modules Vertical Scrolling Area.
20232+\* ************************************************************************* */
20233+#define set_tear_off 0x34
20234+/* ************************************************************************* *\
20235+This command turns off the display modules Tearing Effect output signal on
20236+the TE signal line.
20237+\* ************************************************************************* */
20238+#define set_tear_on 0x35
20239+/* ************************************************************************* *\
20240+This command turns on the display modules Tearing Effect output signal
20241+on the TE signal line.
20242+\* ************************************************************************* */
20243+#define set_address_mode 0x36
20244+/* ************************************************************************* *\
20245+This command sets the data order for transfers from the host processor to
20246+display modules frame memory,bits B[7:5] and B3, and from the display
20247+modules frame memory to the display device, bits B[2:0] and B4.
20248+\* ************************************************************************* */
20249+#define set_scroll_start 0x37
20250+/* ************************************************************************* *\
20251+This command sets the start of the vertical scrolling area in the frame memory.
20252+The vertical scrolling area is fully defined when this command is used with
20253+the set_scroll_area command The set_scroll_start command has one parameter,
20254+the Vertical Scroll Pointer. The VSP defines the line in the frame memory
20255+that is written to the display device as the first line of the vertical
20256+scroll area.
20257+\* ************************************************************************* */
20258+#define exit_idle_mode 0x38
20259+/* ************************************************************************* *\
20260+This command causes the display module to exit Idle mode.
20261+\* ************************************************************************* */
20262+#define enter_idle_mode 0x39
20263+/* ************************************************************************* *\
20264+This command causes the display module to enter Idle Mode.
20265+In Idle Mode, color expression is reduced. Colors are shown on the display
20266+device using the MSB of each of the R, G and B color components in the frame
20267+memory
20268+\* ************************************************************************* */
20269+#define set_pixel_format 0x3a
20270+/* ************************************************************************* *\
20271+This command sets the pixel format for the RGB image data used by the interface.
20272+Bits D[6:4] DPI Pixel Format Definition
20273+Bits D[2:0] DBI Pixel Format Definition
20274+Bits D7 and D3 are not used.
20275+\* ************************************************************************* */
20276+#define write_mem_cont 0x3c
20277+/* ************************************************************************* *\
20278+This command transfers image data from the host processor to the display
20279+module's frame memory continuing from the pixel location following the
20280+previous write_memory_continue or write_memory_start command.
20281+\* ************************************************************************* */
20282+#define set_tear_scanline 0x44
20283+/* ************************************************************************* *\
20284+This command turns on the display modules Tearing Effect output signal on the
20285+TE signal line when the display module reaches line N.
20286+\* ************************************************************************* */
20287+#define get_scanline 0x45
20288+/* ************************************************************************* *\
20289+The display module returns the current scanline, N, used to update the
20290+display device. The total number of scanlines on a display device is
20291+defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
20292+the first line of V Sync and is denoted as Line 0.
20293+When in Sleep Mode, the value returned by get_scanline is undefined.
20294+\* ************************************************************************* */
20295+/* DCS Interface Pixel Formats */
20296+#define DCS_PIXEL_FORMAT_3BPP 0x1
20297+#define DCS_PIXEL_FORMAT_8BPP 0x2
20298+#define DCS_PIXEL_FORMAT_12BPP 0x3
20299+#define DCS_PIXEL_FORMAT_16BPP 0x5
20300+#define DCS_PIXEL_FORMAT_18BPP 0x6
20301+#define DCS_PIXEL_FORMAT_24BPP 0x7
20302+/* ONE PARAMETER READ DATA */
20303+#define addr_mode_data 0xfc
20304+#define diag_res_data 0x00
20305+#define disp_mode_data 0x23
20306+#define pxl_fmt_data 0x77
20307+#define pwr_mode_data 0x74
20308+#define sig_mode_data 0x00
20309+/* TWO PARAMETERS READ DATA */
20310+#define scanline_data1 0xff
20311+#define scanline_data2 0xff
20312+/* DPI PIXEL FORMATS */
20313+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
20314+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
20315+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
20316+ * 666 FORMAT
20317+ */
20318+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
20319+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
20320+ * with Sync Pulse
20321+ */
20322+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
20323+ * with Sync events
20324+ */
20325+#define BURST_MODE 0x03 /* Burst Mode */
20326+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
20327+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
20328+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
20329+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
20330+#define DBI_NOT_SUPPORTED 0x00 /* command mode
20331+ * is not supported
20332+ */
20333+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
20334+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
20335+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
20336+#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
20337+ * 0x100 Byte with 32
20338+ * byte alignment
20339+ */
20340+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
20341+ * 0x100 Byte with 32
20342+ * byte alignment
20343+ */
20344+#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
20345+#define SKU_83 0x01
20346+#define SKU_100 0x02
20347+#define SKU_100L 0x04
20348+#define SKU_BYPASS 0x08
20349+#if 0
20350+/* ************************************************************************* *\
20351+DSI command data structure
20352+\* ************************************************************************* */
20353+union DSI_LONG_PACKET_HEADER {
20354+ u32 DSI_longPacketHeader;
20355+ struct {
20356+ u8 dataID;
20357+ u16 wordCount;
20358+ u8 ECC;
20359+ };
20360+#if 0 /*FIXME JLIU7 */
20361+ struct {
20362+ u8 DT:6;
20363+ u8 VC:2;
20364+ };
20365+#endif /*FIXME JLIU7 */
20366+};
20367+
20368+union MIPI_ADPT_CMD_LNG_REG {
20369+ u32 commnadLengthReg;
20370+ struct {
20371+ u8 command0;
20372+ u8 command1;
20373+ u8 command2;
20374+ u8 command3;
20375+ };
20376+};
20377+
20378+struct SET_COLUMN_ADDRESS_DATA {
20379+ u8 command;
20380+ u16 SC; /* Start Column */
20381+ u16 EC; /* End Column */
20382+};
20383+
20384+struct SET_PAGE_ADDRESS_DATA {
20385+ u8 command;
20386+ u16 SP; /* Start Page */
20387+ u16 EP; /* End Page */
20388+};
20389+#endif
20390diff --git a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c
20391new file mode 100644
20392index 0000000..9f68d8d
20393--- /dev/null
20394+++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c
20395@@ -0,0 +1,1350 @@
20396+/*
20397+ * Copyright © 2006-2007 Intel Corporation
20398+ *
20399+ * Permission is hereby granted, free of charge, to any person obtaining a
20400+ * copy of this software and associated documentation files (the "Software"),
20401+ * to deal in the Software without restriction, including without limitation
20402+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20403+ * and/or sell copies of the Software, and to permit persons to whom the
20404+ * Software is furnished to do so, subject to the following conditions:
20405+ *
20406+ * The above copyright notice and this permission notice (including the next
20407+ * paragraph) shall be included in all copies or substantial portions of the
20408+ * Software.
20409+ *
20410+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20411+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20412+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20413+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20414+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20415+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20416+ * DEALINGS IN THE SOFTWARE.
20417+ *
20418+ * Authors:
20419+ * Eric Anholt <eric@anholt.net>
20420+ */
20421+/*
20422+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
20423+ * Jesse Barnes <jesse.barnes@intel.com>
20424+ */
20425+
20426+#include <linux/i2c.h>
20427+#include <linux/delay.h>
20428+#include <drm/drm_crtc.h>
20429+#include "psb_intel_sdvo_regs.h"
20430+
20431+struct psb_intel_sdvo_priv {
20432+ struct psb_intel_i2c_chan *i2c_bus;
20433+ int slaveaddr;
20434+ int output_device;
20435+
20436+ u16 active_outputs;
20437+
20438+ struct psb_intel_sdvo_caps caps;
20439+ int pixel_clock_min, pixel_clock_max;
20440+
20441+ int save_sdvo_mult;
20442+ u16 save_active_outputs;
20443+ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
20444+ struct psb_intel_sdvo_dtd save_output_dtd[16];
20445+ u32 save_SDVOX;
20446+ u8 in_out_map[4];
20447+
20448+ u8 by_input_wiring;
20449+ u32 active_device;
20450+};
20451+
20452+/**
20453+ * Writes the SDVOB or SDVOC with the given value, but always writes both
20454+ * SDVOB and SDVOC to work around apparent hardware issues (according to
20455+ * comments in the BIOS).
20456+ */
20457+void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val)
20458+{
20459+ struct drm_device *dev = psb_intel_output->base.dev;
20460+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20461+ u32 bval = val, cval = val;
20462+ int i;
20463+
20464+ if (sdvo_priv->output_device == SDVOB)
20465+ cval = REG_READ(SDVOC);
20466+ else
20467+ bval = REG_READ(SDVOB);
20468+ /*
20469+ * Write the registers twice for luck. Sometimes,
20470+ * writing them only once doesn't appear to 'stick'.
20471+ * The BIOS does this too. Yay, magic
20472+ */
20473+ for (i = 0; i < 2; i++) {
20474+ REG_WRITE(SDVOB, bval);
20475+ REG_READ(SDVOB);
20476+ REG_WRITE(SDVOC, cval);
20477+ REG_READ(SDVOC);
20478+ }
20479+}
20480+
20481+static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output,
20482+ u8 addr, u8 *ch)
20483+{
20484+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20485+ u8 out_buf[2];
20486+ u8 buf[2];
20487+ int ret;
20488+
20489+ struct i2c_msg msgs[] = {
20490+ {
20491+ .addr = sdvo_priv->i2c_bus->slave_addr,
20492+ .flags = 0,
20493+ .len = 1,
20494+ .buf = out_buf,
20495+ },
20496+ {
20497+ .addr = sdvo_priv->i2c_bus->slave_addr,
20498+ .flags = I2C_M_RD,
20499+ .len = 1,
20500+ .buf = buf,
20501+ }
20502+ };
20503+
20504+ out_buf[0] = addr;
20505+ out_buf[1] = 0;
20506+
20507+ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
20508+ if (ret == 2) {
20509+ /* DRM_DEBUG("got back from addr %02X = %02x\n",
20510+ * out_buf[0], buf[0]);
20511+ */
20512+ *ch = buf[0];
20513+ return true;
20514+ }
20515+
20516+ DRM_DEBUG("i2c transfer returned %d\n", ret);
20517+ return false;
20518+}
20519+
20520+static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output,
20521+ int addr, u8 ch)
20522+{
20523+ u8 out_buf[2];
20524+ struct i2c_msg msgs[] = {
20525+ {
20526+ .addr = psb_intel_output->i2c_bus->slave_addr,
20527+ .flags = 0,
20528+ .len = 2,
20529+ .buf = out_buf,
20530+ }
20531+ };
20532+
20533+ out_buf[0] = addr;
20534+ out_buf[1] = ch;
20535+
20536+ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
20537+ return true;
20538+ return false;
20539+}
20540+
20541+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
20542+/** Mapping of command numbers to names, for debug output */
20543+const static struct _sdvo_cmd_name {
20544+ u8 cmd;
20545+ char *name;
20546+} sdvo_cmd_names[] = {
20547+SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
20548+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
20549+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
20550+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
20551+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
20552+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
20553+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
20554+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
20555+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
20556+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
20557+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
20558+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
20559+ SDVO_CMD_NAME_ENTRY
20560+ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
20561+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
20562+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
20563+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
20564+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
20565+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
20566+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
20567+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
20568+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
20569+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
20570+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
20571+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
20572+ SDVO_CMD_NAME_ENTRY
20573+ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
20574+ SDVO_CMD_NAME_ENTRY
20575+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
20576+ SDVO_CMD_NAME_ENTRY
20577+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
20578+ SDVO_CMD_NAME_ENTRY
20579+ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
20580+ SDVO_CMD_NAME_ENTRY
20581+ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
20582+ SDVO_CMD_NAME_ENTRY
20583+ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
20584+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
20585+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
20586+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
20587+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
20588+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
20589+ SDVO_CMD_NAME_ENTRY
20590+ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
20591+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
20592+
20593+#define SDVO_NAME(dev_priv) \
20594+ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
20595+#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
20596+
20597+static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd,
20598+ void *args, int args_len)
20599+{
20600+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20601+ int i;
20602+
20603+ if (1) {
20604+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
20605+ for (i = 0; i < args_len; i++)
20606+ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
20607+ for (; i < 8; i++)
20608+ printk(" ");
20609+ for (i = 0;
20610+ i <
20611+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
20612+ i++) {
20613+ if (cmd == sdvo_cmd_names[i].cmd) {
20614+ printk("(%s)", sdvo_cmd_names[i].name);
20615+ break;
20616+ }
20617+ }
20618+ if (i ==
20619+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
20620+ printk("(%02X)", cmd);
20621+ printk("\n");
20622+ }
20623+
20624+ for (i = 0; i < args_len; i++) {
20625+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i,
20626+ ((u8 *) args)[i]);
20627+ }
20628+
20629+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
20630+}
20631+
20632+static const char *cmd_status_names[] = {
20633+ "Power on",
20634+ "Success",
20635+ "Not supported",
20636+ "Invalid arg",
20637+ "Pending",
20638+ "Target not specified",
20639+ "Scaling not supported"
20640+};
20641+
20642+static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output,
20643+ void *response, int response_len)
20644+{
20645+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20646+ int i;
20647+ u8 status;
20648+ u8 retry = 50;
20649+
20650+ while (retry--) {
20651+ /* Read the command response */
20652+ for (i = 0; i < response_len; i++) {
20653+ psb_intel_sdvo_read_byte(psb_intel_output,
20654+ SDVO_I2C_RETURN_0 + i,
20655+ &((u8 *) response)[i]);
20656+ }
20657+
20658+ /* read the return status */
20659+ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS,
20660+ &status);
20661+
20662+ if (1) {
20663+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
20664+ for (i = 0; i < response_len; i++)
20665+ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
20666+ for (; i < 8; i++)
20667+ printk(" ");
20668+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
20669+ printk(KERN_INFO"(%s)",
20670+ cmd_status_names[status]);
20671+ else
20672+ printk(KERN_INFO"(??? %d)", status);
20673+ printk("\n");
20674+ }
20675+
20676+ if (status != SDVO_CMD_STATUS_PENDING)
20677+ return status;
20678+
20679+ mdelay(50);
20680+ }
20681+
20682+ return status;
20683+}
20684+
20685+int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
20686+{
20687+ if (mode->clock >= 100000)
20688+ return 1;
20689+ else if (mode->clock >= 50000)
20690+ return 2;
20691+ else
20692+ return 4;
20693+}
20694+
20695+/**
20696+ * Don't check status code from this as it switches the bus back to the
20697+ * SDVO chips which defeats the purpose of doing a bus switch in the first
20698+ * place.
20699+ */
20700+void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output,
20701+ u8 target)
20702+{
20703+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
20704+ &target, 1);
20705+}
20706+
20707+static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output,
20708+ bool target_0, bool target_1)
20709+{
20710+ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
20711+ u8 status;
20712+
20713+ if (target_0 && target_1)
20714+ return SDVO_CMD_STATUS_NOTSUPP;
20715+
20716+ if (target_1)
20717+ targets.target_1 = 1;
20718+
20719+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
20720+ &targets, sizeof(targets));
20721+
20722+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20723+
20724+ return status == SDVO_CMD_STATUS_SUCCESS;
20725+}
20726+
20727+/**
20728+ * Return whether each input is trained.
20729+ *
20730+ * This function is making an assumption about the layout of the response,
20731+ * which should be checked against the docs.
20732+ */
20733+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
20734+ *psb_intel_output, bool *input_1,
20735+ bool *input_2)
20736+{
20737+ struct psb_intel_sdvo_get_trained_inputs_response response;
20738+ u8 status;
20739+
20740+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
20741+ NULL, 0);
20742+ status =
20743+ psb_intel_sdvo_read_response(psb_intel_output, &response,
20744+ sizeof(response));
20745+ if (status != SDVO_CMD_STATUS_SUCCESS)
20746+ return false;
20747+
20748+ *input_1 = response.input0_trained;
20749+ *input_2 = response.input1_trained;
20750+ return true;
20751+}
20752+
20753+static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
20754+ *psb_intel_output, u16 *outputs)
20755+{
20756+ u8 status;
20757+
20758+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
20759+ NULL, 0);
20760+ status =
20761+ psb_intel_sdvo_read_response(psb_intel_output, outputs,
20762+ sizeof(*outputs));
20763+
20764+ return status == SDVO_CMD_STATUS_SUCCESS;
20765+}
20766+
20767+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
20768+ *psb_intel_output, u16 outputs)
20769+{
20770+ u8 status;
20771+
20772+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
20773+ &outputs, sizeof(outputs));
20774+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20775+ return status == SDVO_CMD_STATUS_SUCCESS;
20776+}
20777+
20778+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
20779+ *psb_intel_output, int mode)
20780+{
20781+ u8 status, state = SDVO_ENCODER_STATE_ON;
20782+
20783+ switch (mode) {
20784+ case DRM_MODE_DPMS_ON:
20785+ state = SDVO_ENCODER_STATE_ON;
20786+ break;
20787+ case DRM_MODE_DPMS_STANDBY:
20788+ state = SDVO_ENCODER_STATE_STANDBY;
20789+ break;
20790+ case DRM_MODE_DPMS_SUSPEND:
20791+ state = SDVO_ENCODER_STATE_SUSPEND;
20792+ break;
20793+ case DRM_MODE_DPMS_OFF:
20794+ state = SDVO_ENCODER_STATE_OFF;
20795+ break;
20796+ }
20797+
20798+ psb_intel_sdvo_write_cmd(psb_intel_output,
20799+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
20800+ sizeof(state));
20801+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20802+
20803+ return status == SDVO_CMD_STATUS_SUCCESS;
20804+}
20805+
20806+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
20807+ *psb_intel_output,
20808+ int *clock_min,
20809+ int *clock_max)
20810+{
20811+ struct psb_intel_sdvo_pixel_clock_range clocks;
20812+ u8 status;
20813+
20814+ psb_intel_sdvo_write_cmd(psb_intel_output,
20815+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
20816+ 0);
20817+
20818+ status =
20819+ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
20820+ sizeof(clocks));
20821+
20822+ if (status != SDVO_CMD_STATUS_SUCCESS)
20823+ return false;
20824+
20825+ /* Convert the values from units of 10 kHz to kHz. */
20826+ *clock_min = clocks.min * 10;
20827+ *clock_max = clocks.max * 10;
20828+
20829+ return true;
20830+}
20831+
20832+static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output,
20833+ u16 outputs)
20834+{
20835+ u8 status;
20836+
20837+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
20838+ &outputs, sizeof(outputs));
20839+
20840+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20841+ return status == SDVO_CMD_STATUS_SUCCESS;
20842+}
20843+
20844+static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
20845+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
20846+{
20847+ u8 status;
20848+
20849+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
20850+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
20851+ sizeof(dtd->part1));
20852+ if (status != SDVO_CMD_STATUS_SUCCESS)
20853+ return false;
20854+
20855+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
20856+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
20857+ sizeof(dtd->part2));
20858+ if (status != SDVO_CMD_STATUS_SUCCESS)
20859+ return false;
20860+
20861+ return true;
20862+}
20863+
20864+static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output,
20865+ struct psb_intel_sdvo_dtd *dtd)
20866+{
20867+ return psb_intel_sdvo_get_timing(psb_intel_output,
20868+ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
20869+ dtd);
20870+}
20871+#if 0
20872+static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output,
20873+ struct psb_intel_sdvo_dtd *dtd)
20874+{
20875+ return psb_intel_sdvo_get_timing(psb_intel_output,
20876+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
20877+ dtd);
20878+}
20879+#endif
20880+static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output,
20881+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
20882+{
20883+ u8 status;
20884+
20885+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
20886+ sizeof(dtd->part1));
20887+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20888+ if (status != SDVO_CMD_STATUS_SUCCESS)
20889+ return false;
20890+
20891+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
20892+ sizeof(dtd->part2));
20893+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20894+ if (status != SDVO_CMD_STATUS_SUCCESS)
20895+ return false;
20896+
20897+ return true;
20898+}
20899+
20900+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output,
20901+ struct psb_intel_sdvo_dtd *dtd)
20902+{
20903+ return psb_intel_sdvo_set_timing(psb_intel_output,
20904+ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
20905+ dtd);
20906+}
20907+
20908+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output,
20909+ struct psb_intel_sdvo_dtd *dtd)
20910+{
20911+ return psb_intel_sdvo_set_timing(psb_intel_output,
20912+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
20913+ dtd);
20914+}
20915+
20916+#if 0
20917+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
20918+ *psb_intel_output,
20919+ struct psb_intel_sdvo_dtd
20920+ *dtd)
20921+{
20922+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
20923+ u8 status;
20924+
20925+ psb_intel_sdvo_write_cmd(psb_intel_output,
20926+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
20927+ NULL, 0);
20928+
20929+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
20930+ sizeof(dtd->part1));
20931+ if (status != SDVO_CMD_STATUS_SUCCESS)
20932+ return false;
20933+
20934+ psb_intel_sdvo_write_cmd(psb_intel_output,
20935+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
20936+ NULL, 0);
20937+ status =
20938+ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
20939+ sizeof(dtd->part2));
20940+ if (status != SDVO_CMD_STATUS_SUCCESS)
20941+ return false;
20942+
20943+ return true;
20944+}
20945+#endif
20946+
20947+static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
20948+ *psb_intel_output)
20949+{
20950+ u8 response, status;
20951+
20952+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT,
20953+ NULL, 0);
20954+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
20955+
20956+ if (status != SDVO_CMD_STATUS_SUCCESS) {
20957+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
20958+ return SDVO_CLOCK_RATE_MULT_1X;
20959+ } else {
20960+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
20961+ }
20962+
20963+ return response;
20964+}
20965+
20966+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
20967+ *psb_intel_output, u8 val)
20968+{
20969+ u8 status;
20970+
20971+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT,
20972+ &val, 1);
20973+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
20974+ if (status != SDVO_CMD_STATUS_SUCCESS)
20975+ return false;
20976+
20977+ return true;
20978+}
20979+
20980+static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output * output, u32 in0outputmask,
20981+ u32 in1outputmask)
20982+{
20983+ u8 byArgs[4];
20984+ u8 status;
20985+ int i;
20986+ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
20987+
20988+ /* Make all fields of the args/ret to zero */
20989+ memset(byArgs, 0, sizeof(byArgs));
20990+
20991+ /* Fill up the arguement values; */
20992+ byArgs[0] = (u8) (in0outputmask & 0xFF);
20993+ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
20994+ byArgs[2] = (u8) (in1outputmask & 0xFF);
20995+ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
20996+
20997+
20998+ /*save inoutmap arg here*/
20999+ for(i=0; i<4; i++) {
21000+ sdvo_priv->in_out_map[i] = byArgs[0];
21001+ }
21002+
21003+
21004+ psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
21005+ status = psb_intel_sdvo_read_response(output, NULL, 0);
21006+
21007+ if (status != SDVO_CMD_STATUS_SUCCESS)
21008+ return false;
21009+ return true;
21010+}
21011+
21012+
21013+static void psb_intel_sdvo_set_iomap(struct psb_intel_output * output)
21014+{
21015+ u32 dwCurrentSDVOIn0 = 0;
21016+ u32 dwCurrentSDVOIn1 = 0;
21017+ u32 dwDevMask = 0;
21018+
21019+
21020+ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
21021+
21022+ /* Please DO NOT change the following code. */
21023+ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
21024+ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
21025+ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
21026+ switch (sdvo_priv->active_device) {
21027+ case SDVO_DEVICE_LVDS:
21028+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
21029+ break;
21030+ case SDVO_DEVICE_TMDS:
21031+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
21032+ break;
21033+ case SDVO_DEVICE_TV:
21034+ dwDevMask =
21035+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
21036+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
21037+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
21038+ break;
21039+ case SDVO_DEVICE_CRT:
21040+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
21041+ break;
21042+ }
21043+ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
21044+ } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
21045+ switch (sdvo_priv->active_device) {
21046+ case SDVO_DEVICE_LVDS:
21047+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
21048+ break;
21049+ case SDVO_DEVICE_TMDS:
21050+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
21051+ break;
21052+ case SDVO_DEVICE_TV:
21053+ dwDevMask =
21054+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
21055+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
21056+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
21057+ break;
21058+ case SDVO_DEVICE_CRT:
21059+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
21060+ break;
21061+ }
21062+ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
21063+ }
21064+
21065+ psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
21066+ dwCurrentSDVOIn1);
21067+}
21068+
21069+
21070+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
21071+ struct drm_display_mode *mode,
21072+ struct drm_display_mode *adjusted_mode)
21073+{
21074+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
21075+ * device will be told of the multiplier during mode_set.
21076+ */
21077+ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
21078+ return true;
21079+}
21080+
21081+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
21082+ struct drm_display_mode *mode,
21083+ struct drm_display_mode *adjusted_mode)
21084+{
21085+ struct drm_device *dev = encoder->dev;
21086+ struct drm_crtc *crtc = encoder->crtc;
21087+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
21088+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
21089+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21090+ u16 width, height;
21091+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
21092+ u16 h_sync_offset, v_sync_offset;
21093+ u32 sdvox;
21094+ struct psb_intel_sdvo_dtd output_dtd;
21095+ int sdvo_pixel_multiply;
21096+
21097+ if (!mode)
21098+ return;
21099+
21100+ psb_intel_sdvo_set_target_output(psb_intel_output, 0);
21101+
21102+ width = mode->crtc_hdisplay;
21103+ height = mode->crtc_vdisplay;
21104+
21105+ /* do some mode translations */
21106+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
21107+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
21108+
21109+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
21110+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
21111+
21112+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
21113+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
21114+
21115+ output_dtd.part1.clock = mode->clock / 10;
21116+ output_dtd.part1.h_active = width & 0xff;
21117+ output_dtd.part1.h_blank = h_blank_len & 0xff;
21118+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
21119+ ((h_blank_len >> 8) & 0xf);
21120+ output_dtd.part1.v_active = height & 0xff;
21121+ output_dtd.part1.v_blank = v_blank_len & 0xff;
21122+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
21123+ ((v_blank_len >> 8) & 0xf);
21124+
21125+ output_dtd.part2.h_sync_off = h_sync_offset;
21126+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
21127+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
21128+ (v_sync_len & 0xf);
21129+ output_dtd.part2.sync_off_width_high =
21130+ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
21131+ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
21132+
21133+ output_dtd.part2.dtd_flags = 0x18;
21134+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
21135+ output_dtd.part2.dtd_flags |= 0x2;
21136+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
21137+ output_dtd.part2.dtd_flags |= 0x4;
21138+
21139+ output_dtd.part2.sdvo_flags = 0;
21140+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
21141+ output_dtd.part2.reserved = 0;
21142+
21143+ /* Set the output timing to the screen */
21144+ psb_intel_sdvo_set_target_output(psb_intel_output,
21145+ sdvo_priv->active_outputs);
21146+
21147+ /* Set the input timing to the screen. Assume always input 0. */
21148+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21149+
21150+ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
21151+
21152+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
21153+ * provide the device with a timing it can support, if it supports that
21154+ * feature. However, presumably we would need to adjust the CRTC to
21155+ * output the preferred timing, and we don't support that currently.
21156+ */
21157+#if 0
21158+ success =
21159+ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock,
21160+ width, height);
21161+ if (success) {
21162+ struct psb_intel_sdvo_dtd *input_dtd;
21163+
21164+ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
21165+ &input_dtd);
21166+ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
21167+ }
21168+#else
21169+ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
21170+#endif
21171+
21172+ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
21173+ case 1:
21174+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21175+ SDVO_CLOCK_RATE_MULT_1X);
21176+ break;
21177+ case 2:
21178+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21179+ SDVO_CLOCK_RATE_MULT_2X);
21180+ break;
21181+ case 4:
21182+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21183+ SDVO_CLOCK_RATE_MULT_4X);
21184+ break;
21185+ }
21186+
21187+ /* Set the SDVO control regs. */
21188+ if (0 /*IS_I965GM(dev) */) {
21189+ sdvox = SDVO_BORDER_ENABLE;
21190+ } else {
21191+ sdvox = REG_READ(sdvo_priv->output_device);
21192+ switch (sdvo_priv->output_device) {
21193+ case SDVOB:
21194+ sdvox &= SDVOB_PRESERVE_MASK;
21195+ break;
21196+ case SDVOC:
21197+ sdvox &= SDVOC_PRESERVE_MASK;
21198+ break;
21199+ }
21200+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
21201+ }
21202+ if (psb_intel_crtc->pipe == 1)
21203+ sdvox |= SDVO_PIPE_B_SELECT;
21204+
21205+ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
21206+
21207+#if 0
21208+ if (IS_I965G(dev)) {
21209+ /* done in crtc_mode_set as the dpll_md reg must be written
21210+ * early */
21211+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
21212+ /* done in crtc_mode_set as it lives inside the
21213+ * dpll register */
21214+ } else {
21215+ sdvox |=
21216+ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
21217+ }
21218+#endif
21219+
21220+ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
21221+
21222+ psb_intel_sdvo_set_iomap(psb_intel_output);
21223+}
21224+
21225+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
21226+{
21227+ struct drm_device *dev = encoder->dev;
21228+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
21229+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21230+ u32 temp;
21231+
21232+ if (mode != DRM_MODE_DPMS_ON) {
21233+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
21234+ if (0)
21235+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
21236+ mode);
21237+
21238+ if (mode == DRM_MODE_DPMS_OFF) {
21239+ temp = REG_READ(sdvo_priv->output_device);
21240+ if ((temp & SDVO_ENABLE) != 0) {
21241+ psb_intel_sdvo_write_sdvox(psb_intel_output,
21242+ temp &
21243+ ~SDVO_ENABLE);
21244+ }
21245+ }
21246+ } else {
21247+ bool input1, input2;
21248+ int i;
21249+ u8 status;
21250+
21251+ temp = REG_READ(sdvo_priv->output_device);
21252+ if ((temp & SDVO_ENABLE) == 0)
21253+ psb_intel_sdvo_write_sdvox(psb_intel_output,
21254+ temp | SDVO_ENABLE);
21255+ for (i = 0; i < 2; i++)
21256+ psb_intel_wait_for_vblank(dev);
21257+
21258+ status =
21259+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
21260+ &input2);
21261+
21262+
21263+ /* Warn if the device reported failure to sync.
21264+ * A lot of SDVO devices fail to notify of sync, but it's
21265+ * a given it the status is a success, we succeeded.
21266+ */
21267+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
21268+ DRM_DEBUG
21269+ ("First %s output reported failure to sync\n",
21270+ SDVO_NAME(sdvo_priv));
21271+ }
21272+
21273+ if (0)
21274+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
21275+ mode);
21276+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
21277+ sdvo_priv->active_outputs);
21278+ }
21279+ return;
21280+}
21281+
21282+static void psb_intel_sdvo_save(struct drm_connector *connector)
21283+{
21284+ struct drm_device *dev = connector->dev;
21285+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21286+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21287+ /*int o;*/
21288+
21289+ sdvo_priv->save_sdvo_mult =
21290+ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
21291+ psb_intel_sdvo_get_active_outputs(psb_intel_output,
21292+ &sdvo_priv->save_active_outputs);
21293+
21294+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
21295+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21296+ psb_intel_sdvo_get_input_timing(psb_intel_output,
21297+ &sdvo_priv->save_input_dtd_1);
21298+ }
21299+
21300+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
21301+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
21302+ psb_intel_sdvo_get_input_timing(psb_intel_output,
21303+ &sdvo_priv->save_input_dtd_2);
21304+ }
21305+
21306+#if 0
21307+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
21308+ u16 this_output = (1 << o);
21309+ if (sdvo_priv->caps.output_flags & this_output) {
21310+ psb_intel_sdvo_set_target_output(psb_intel_output,
21311+ this_output);
21312+ psb_intel_sdvo_get_output_timing(psb_intel_output,
21313+ &sdvo_priv->
21314+ save_output_dtd[o]);
21315+ }
21316+ }
21317+#endif
21318+
21319+ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
21320+
21321+ /*TODO: save the in_out_map state*/
21322+}
21323+
21324+static void psb_intel_sdvo_restore(struct drm_connector *connector)
21325+{
21326+ struct drm_device *dev = connector->dev;
21327+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21328+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21329+ /*int o;*/
21330+ int i;
21331+ bool input1, input2;
21332+ u8 status;
21333+
21334+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
21335+
21336+#if 0
21337+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
21338+ u16 this_output = (1 << o);
21339+ if (sdvo_priv->caps.output_flags & this_output) {
21340+ psb_intel_sdvo_set_target_output(psb_intel_output,
21341+ this_output);
21342+ psb_intel_sdvo_set_output_timing(psb_intel_output,
21343+ &sdvo_priv->
21344+ save_output_dtd[o]);
21345+ }
21346+ }
21347+#endif
21348+
21349+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
21350+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21351+ psb_intel_sdvo_set_input_timing(psb_intel_output,
21352+ &sdvo_priv->save_input_dtd_1);
21353+ }
21354+
21355+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
21356+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
21357+ psb_intel_sdvo_set_input_timing(psb_intel_output,
21358+ &sdvo_priv->save_input_dtd_2);
21359+ }
21360+
21361+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
21362+ sdvo_priv->save_sdvo_mult);
21363+
21364+ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
21365+
21366+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
21367+ for (i = 0; i < 2; i++)
21368+ psb_intel_wait_for_vblank(dev);
21369+ status =
21370+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
21371+ &input2);
21372+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
21373+ DRM_DEBUG
21374+ ("First %s output reported failure to sync\n",
21375+ SDVO_NAME(sdvo_priv));
21376+ }
21377+
21378+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
21379+ sdvo_priv->save_active_outputs);
21380+
21381+ /*TODO: restore in_out_map*/
21382+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_IN_OUT_MAP, sdvo_priv->in_out_map, 4);
21383+ psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
21384+}
21385+
21386+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
21387+ struct drm_display_mode *mode)
21388+{
21389+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21390+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
21391+
21392+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
21393+ return MODE_NO_DBLESCAN;
21394+
21395+ if (sdvo_priv->pixel_clock_min > mode->clock)
21396+ return MODE_CLOCK_LOW;
21397+
21398+ if (sdvo_priv->pixel_clock_max < mode->clock)
21399+ return MODE_CLOCK_HIGH;
21400+
21401+ return MODE_OK;
21402+}
21403+
21404+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output,
21405+ struct psb_intel_sdvo_caps *caps)
21406+{
21407+ u8 status;
21408+
21409+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL,
21410+ 0);
21411+ status =
21412+ psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps));
21413+ if (status != SDVO_CMD_STATUS_SUCCESS)
21414+ return false;
21415+
21416+ return true;
21417+}
21418+
21419+struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
21420+{
21421+ struct drm_connector *connector = NULL;
21422+ struct psb_intel_output *iout = NULL;
21423+ struct psb_intel_sdvo_priv *sdvo;
21424+
21425+ /* find the sdvo connector */
21426+ list_for_each_entry(connector, &dev->mode_config.connector_list,
21427+ head) {
21428+ iout = to_psb_intel_output(connector);
21429+
21430+ if (iout->type != INTEL_OUTPUT_SDVO)
21431+ continue;
21432+
21433+ sdvo = iout->dev_priv;
21434+
21435+ if (sdvo->output_device == SDVOB && sdvoB)
21436+ return connector;
21437+
21438+ if (sdvo->output_device == SDVOC && !sdvoB)
21439+ return connector;
21440+
21441+ }
21442+
21443+ return NULL;
21444+}
21445+
21446+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
21447+{
21448+ u8 response[2];
21449+ u8 status;
21450+ struct psb_intel_output *psb_intel_output;
21451+ DRM_DEBUG("\n");
21452+
21453+ if (!connector)
21454+ return 0;
21455+
21456+ psb_intel_output = to_psb_intel_output(connector);
21457+
21458+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
21459+ NULL, 0);
21460+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21461+
21462+ if (response[0] != 0)
21463+ return 1;
21464+
21465+ return 0;
21466+}
21467+
21468+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
21469+{
21470+ u8 response[2];
21471+ u8 status;
21472+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21473+
21474+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
21475+ NULL, 0);
21476+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21477+
21478+ if (on) {
21479+ psb_intel_sdvo_write_cmd(psb_intel_output,
21480+ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
21481+ 0);
21482+ status =
21483+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21484+
21485+ psb_intel_sdvo_write_cmd(psb_intel_output,
21486+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
21487+ &response, 2);
21488+ } else {
21489+ response[0] = 0;
21490+ response[1] = 0;
21491+ psb_intel_sdvo_write_cmd(psb_intel_output,
21492+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
21493+ &response, 2);
21494+ }
21495+
21496+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
21497+ NULL, 0);
21498+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21499+}
21500+
21501+static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
21502+ *connector)
21503+{
21504+ u8 response[2];
21505+ u8 status;
21506+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21507+
21508+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS,
21509+ NULL, 0);
21510+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
21511+
21512+ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
21513+ if ((response[0] != 0) || (response[1] != 0))
21514+ return connector_status_connected;
21515+ else
21516+ return connector_status_disconnected;
21517+}
21518+
21519+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
21520+{
21521+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21522+
21523+ /* set the bus switch and get the modes */
21524+ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
21525+ SDVO_CONTROL_BUS_DDC2);
21526+ psb_intel_ddc_get_modes(psb_intel_output);
21527+
21528+ if (list_empty(&connector->probed_modes))
21529+ return 0;
21530+ return 1;
21531+#if 0
21532+ /* Mac mini hack. On this device, I get DDC through the analog, which
21533+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
21534+ * but it does load-detect as connected. So, just steal the DDC bits
21535+ * from analog when we fail at finding it the right way.
21536+ */
21537+ /* TODO */
21538+ return NULL;
21539+
21540+ return NULL;
21541+#endif
21542+}
21543+
21544+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
21545+{
21546+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
21547+
21548+ if (psb_intel_output->i2c_bus)
21549+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
21550+ drm_sysfs_connector_remove(connector);
21551+ drm_connector_cleanup(connector);
21552+ kfree(psb_intel_output);
21553+}
21554+
21555+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
21556+ .dpms = psb_intel_sdvo_dpms,
21557+ .mode_fixup = psb_intel_sdvo_mode_fixup,
21558+ .prepare = psb_intel_encoder_prepare,
21559+ .mode_set = psb_intel_sdvo_mode_set,
21560+ .commit = psb_intel_encoder_commit,
21561+};
21562+
21563+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
21564+ .dpms = drm_helper_connector_dpms,
21565+ .save = psb_intel_sdvo_save,
21566+ .restore = psb_intel_sdvo_restore,
21567+ .detect = psb_intel_sdvo_detect,
21568+ .fill_modes = drm_helper_probe_single_connector_modes,
21569+ .destroy = psb_intel_sdvo_destroy,
21570+};
21571+
21572+static const struct drm_connector_helper_funcs
21573+ psb_intel_sdvo_connector_helper_funcs = {
21574+ .get_modes = psb_intel_sdvo_get_modes,
21575+ .mode_valid = psb_intel_sdvo_mode_valid,
21576+ .best_encoder = psb_intel_best_encoder,
21577+};
21578+
21579+void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
21580+{
21581+ drm_encoder_cleanup(encoder);
21582+}
21583+
21584+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
21585+ .destroy = psb_intel_sdvo_enc_destroy,
21586+};
21587+
21588+
21589+void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
21590+{
21591+ struct drm_connector *connector;
21592+ struct psb_intel_output *psb_intel_output;
21593+ struct psb_intel_sdvo_priv *sdvo_priv;
21594+ struct psb_intel_i2c_chan *i2cbus = NULL;
21595+ int connector_type;
21596+ u8 ch[0x40];
21597+ int i;
21598+ int encoder_type, output_id;
21599+
21600+ psb_intel_output =
21601+ kcalloc(sizeof(struct psb_intel_output) +
21602+ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
21603+ if (!psb_intel_output)
21604+ return;
21605+
21606+ connector = &psb_intel_output->base;
21607+
21608+ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
21609+ DRM_MODE_CONNECTOR_Unknown);
21610+ drm_connector_helper_add(connector,
21611+ &psb_intel_sdvo_connector_helper_funcs);
21612+ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
21613+ psb_intel_output->type = INTEL_OUTPUT_SDVO;
21614+
21615+ connector->interlace_allowed = 0;
21616+ connector->doublescan_allowed = 0;
21617+
21618+ /* setup the DDC bus. */
21619+ if (output_device == SDVOB)
21620+ i2cbus =
21621+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
21622+ else
21623+ i2cbus =
21624+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
21625+
21626+ if (!i2cbus)
21627+ goto err_connector;
21628+
21629+ sdvo_priv->i2c_bus = i2cbus;
21630+
21631+ if (output_device == SDVOB) {
21632+ output_id = 1;
21633+ sdvo_priv->by_input_wiring = SDVOB_IN0;
21634+ sdvo_priv->i2c_bus->slave_addr = 0x38;
21635+ } else {
21636+ output_id = 2;
21637+ sdvo_priv->i2c_bus->slave_addr = 0x39;
21638+ }
21639+
21640+ sdvo_priv->output_device = output_device;
21641+ psb_intel_output->i2c_bus = i2cbus;
21642+ psb_intel_output->dev_priv = sdvo_priv;
21643+
21644+
21645+ /* Read the regs to test if we can talk to the device */
21646+ for (i = 0; i < 0x40; i++) {
21647+ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
21648+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
21649+ output_device == SDVOB ? 'B' : 'C');
21650+ goto err_i2c;
21651+ }
21652+ }
21653+
21654+ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
21655+
21656+ memset(&sdvo_priv->active_outputs, 0,
21657+ sizeof(sdvo_priv->active_outputs));
21658+
21659+ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
21660+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
21661+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
21662+ sdvo_priv->active_device = SDVO_DEVICE_CRT;
21663+ connector->display_info.subpixel_order =
21664+ SubPixelHorizontalRGB;
21665+ encoder_type = DRM_MODE_ENCODER_DAC;
21666+ connector_type = DRM_MODE_CONNECTOR_VGA;
21667+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
21668+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
21669+ sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
21670+ connector->display_info.subpixel_order =
21671+ SubPixelHorizontalRGB;
21672+ encoder_type = DRM_MODE_ENCODER_DAC;
21673+ connector_type = DRM_MODE_CONNECTOR_VGA;
21674+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
21675+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
21676+ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
21677+ connector->display_info.subpixel_order =
21678+ SubPixelHorizontalRGB;
21679+ encoder_type = DRM_MODE_ENCODER_TMDS;
21680+ connector_type = DRM_MODE_CONNECTOR_DVID;
21681+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
21682+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
21683+ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
21684+ connector->display_info.subpixel_order =
21685+ SubPixelHorizontalRGB;
21686+ encoder_type = DRM_MODE_ENCODER_TMDS;
21687+ connector_type = DRM_MODE_CONNECTOR_DVID;
21688+ } else {
21689+ unsigned char bytes[2];
21690+
21691+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
21692+ DRM_DEBUG
21693+ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
21694+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
21695+ goto err_i2c;
21696+ }
21697+
21698+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
21699+ encoder_type);
21700+ drm_encoder_helper_add(&psb_intel_output->enc,
21701+ &psb_intel_sdvo_helper_funcs);
21702+ connector->connector_type = connector_type;
21703+
21704+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
21705+ &psb_intel_output->enc);
21706+ drm_sysfs_connector_add(connector);
21707+
21708+ /* Set the input timing to the screen. Assume always input 0. */
21709+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
21710+
21711+ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
21712+ &sdvo_priv->pixel_clock_min,
21713+ &sdvo_priv->
21714+ pixel_clock_max);
21715+
21716+
21717+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
21718+ "clock range %dMHz - %dMHz, "
21719+ "input 1: %c, input 2: %c, "
21720+ "output 1: %c, output 2: %c\n",
21721+ SDVO_NAME(sdvo_priv),
21722+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
21723+ sdvo_priv->caps.device_rev_id,
21724+ sdvo_priv->pixel_clock_min / 1000,
21725+ sdvo_priv->pixel_clock_max / 1000,
21726+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
21727+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
21728+ /* check currently supported outputs */
21729+ sdvo_priv->caps.output_flags &
21730+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
21731+ sdvo_priv->caps.output_flags &
21732+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
21733+
21734+ psb_intel_output->ddc_bus = i2cbus;
21735+
21736+ return;
21737+
21738+err_i2c:
21739+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
21740+err_connector:
21741+ drm_connector_cleanup(connector);
21742+ kfree(psb_intel_output);
21743+
21744+ return;
21745+}
21746diff --git a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
21747new file mode 100644
21748index 0000000..bf3d72e
21749--- /dev/null
21750+++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
21751@@ -0,0 +1,345 @@
21752+/*
21753+ * Copyright (c) 2008, Intel Corporation
21754+ *
21755+ * Permission is hereby granted, free of charge, to any person obtaining a
21756+ * copy of this software and associated documentation files (the "Software"),
21757+ * to deal in the Software without restriction, including without limitation
21758+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21759+ * and/or sell copies of the Software, and to permit persons to whom the
21760+ * Software is furnished to do so, subject to the following conditions:
21761+ *
21762+ * The above copyright notice and this permission notice (including the next
21763+ * paragraph) shall be included in all copies or substantial portions of the
21764+ * Software.
21765+ *
21766+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21767+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21768+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21769+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21770+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21771+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21772+ * DEALINGS IN THE SOFTWARE.
21773+ *
21774+ * Authors:
21775+ * Eric Anholt <eric@anholt.net>
21776+ */
21777+
21778+/**
21779+ * @file SDVO command definitions and structures.
21780+ */
21781+
21782+#define SDVO_OUTPUT_FIRST (0)
21783+#define SDVO_OUTPUT_TMDS0 (1 << 0)
21784+#define SDVO_OUTPUT_RGB0 (1 << 1)
21785+#define SDVO_OUTPUT_CVBS0 (1 << 2)
21786+#define SDVO_OUTPUT_SVID0 (1 << 3)
21787+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
21788+#define SDVO_OUTPUT_SCART0 (1 << 5)
21789+#define SDVO_OUTPUT_LVDS0 (1 << 6)
21790+#define SDVO_OUTPUT_TMDS1 (1 << 8)
21791+#define SDVO_OUTPUT_RGB1 (1 << 9)
21792+#define SDVO_OUTPUT_CVBS1 (1 << 10)
21793+#define SDVO_OUTPUT_SVID1 (1 << 11)
21794+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
21795+#define SDVO_OUTPUT_SCART1 (1 << 13)
21796+#define SDVO_OUTPUT_LVDS1 (1 << 14)
21797+#define SDVO_OUTPUT_LAST (14)
21798+
21799+struct psb_intel_sdvo_caps {
21800+ u8 vendor_id;
21801+ u8 device_id;
21802+ u8 device_rev_id;
21803+ u8 sdvo_version_major;
21804+ u8 sdvo_version_minor;
21805+ unsigned int sdvo_inputs_mask:2;
21806+ unsigned int smooth_scaling:1;
21807+ unsigned int sharp_scaling:1;
21808+ unsigned int up_scaling:1;
21809+ unsigned int down_scaling:1;
21810+ unsigned int stall_support:1;
21811+ unsigned int pad:1;
21812+ u16 output_flags;
21813+} __attribute__ ((packed));
21814+
21815+/** This matches the EDID DTD structure, more or less */
21816+struct psb_intel_sdvo_dtd {
21817+ struct {
21818+ u16 clock; /**< pixel clock, in 10kHz units */
21819+ u8 h_active; /**< lower 8 bits (pixels) */
21820+ u8 h_blank; /**< lower 8 bits (pixels) */
21821+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
21822+ u8 v_active; /**< lower 8 bits (lines) */
21823+ u8 v_blank; /**< lower 8 bits (lines) */
21824+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
21825+ } part1;
21826+
21827+ struct {
21828+ u8 h_sync_off;
21829+ /**< lower 8 bits, from hblank start */
21830+ u8 h_sync_width;/**< lower 8 bits (pixels) */
21831+ /** lower 4 bits each vsync offset, vsync width */
21832+ u8 v_sync_off_width;
21833+ /**
21834+ * 2 high bits of hsync offset, 2 high bits of hsync width,
21835+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
21836+ */
21837+ u8 sync_off_width_high;
21838+ u8 dtd_flags;
21839+ u8 sdvo_flags;
21840+ /** bits 6-7 of vsync offset at bits 6-7 */
21841+ u8 v_sync_off_high;
21842+ u8 reserved;
21843+ } part2;
21844+} __attribute__ ((packed));
21845+
21846+struct psb_intel_sdvo_pixel_clock_range {
21847+ u16 min; /**< pixel clock, in 10kHz units */
21848+ u16 max; /**< pixel clock, in 10kHz units */
21849+} __attribute__ ((packed));
21850+
21851+struct psb_intel_sdvo_preferred_input_timing_args {
21852+ u16 clock;
21853+ u16 width;
21854+ u16 height;
21855+} __attribute__ ((packed));
21856+
21857+/* I2C registers for SDVO */
21858+#define SDVO_I2C_ARG_0 0x07
21859+#define SDVO_I2C_ARG_1 0x06
21860+#define SDVO_I2C_ARG_2 0x05
21861+#define SDVO_I2C_ARG_3 0x04
21862+#define SDVO_I2C_ARG_4 0x03
21863+#define SDVO_I2C_ARG_5 0x02
21864+#define SDVO_I2C_ARG_6 0x01
21865+#define SDVO_I2C_ARG_7 0x00
21866+#define SDVO_I2C_OPCODE 0x08
21867+#define SDVO_I2C_CMD_STATUS 0x09
21868+#define SDVO_I2C_RETURN_0 0x0a
21869+#define SDVO_I2C_RETURN_1 0x0b
21870+#define SDVO_I2C_RETURN_2 0x0c
21871+#define SDVO_I2C_RETURN_3 0x0d
21872+#define SDVO_I2C_RETURN_4 0x0e
21873+#define SDVO_I2C_RETURN_5 0x0f
21874+#define SDVO_I2C_RETURN_6 0x10
21875+#define SDVO_I2C_RETURN_7 0x11
21876+#define SDVO_I2C_VENDOR_BEGIN 0x20
21877+
21878+/* Status results */
21879+#define SDVO_CMD_STATUS_POWER_ON 0x0
21880+#define SDVO_CMD_STATUS_SUCCESS 0x1
21881+#define SDVO_CMD_STATUS_NOTSUPP 0x2
21882+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
21883+#define SDVO_CMD_STATUS_PENDING 0x4
21884+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
21885+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
21886+
21887+/* SDVO commands, argument/result registers */
21888+
21889+#define SDVO_CMD_RESET 0x01
21890+
21891+/** Returns a struct psb_intel_sdvo_caps */
21892+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
21893+
21894+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
21895+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
21896+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
21897+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
21898+
21899+/**
21900+ * Reports which inputs are trained (managed to sync).
21901+ *
21902+ * Devices must have trained within 2 vsyncs of a mode change.
21903+ */
21904+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
21905+struct psb_intel_sdvo_get_trained_inputs_response {
21906+ unsigned int input0_trained:1;
21907+ unsigned int input1_trained:1;
21908+ unsigned int pad:6;
21909+} __attribute__ ((packed));
21910+
21911+/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
21912+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
21913+
21914+/**
21915+ * Sets the current set of active outputs.
21916+ *
21917+ * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
21918+ * on multi-output devices.
21919+ */
21920+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
21921+
21922+/**
21923+ * Returns the current mapping of SDVO inputs to outputs on the device.
21924+ *
21925+ * Returns two struct psb_intel_sdvo_output_flags structures.
21926+ */
21927+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
21928+
21929+/**
21930+ * Sets the current mapping of SDVO inputs to outputs on the device.
21931+ *
21932+ * Takes two struct i380_sdvo_output_flags structures.
21933+ */
21934+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
21935+
21936+/**
21937+ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
21938+ */
21939+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
21940+
21941+/**
21942+ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
21943+ */
21944+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
21945+
21946+/**
21947+ * Takes a struct psb_intel_sdvo_output_flags.
21948+ */
21949+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
21950+
21951+/**
21952+ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
21953+ * interrupts enabled.
21954+ */
21955+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
21956+
21957+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
21958+struct psb_intel_sdvo_get_interrupt_event_source_response {
21959+ u16 interrupt_status;
21960+ unsigned int ambient_light_interrupt:1;
21961+ unsigned int pad:7;
21962+} __attribute__ ((packed));
21963+
21964+/**
21965+ * Selects which input is affected by future input commands.
21966+ *
21967+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
21968+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
21969+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
21970+ */
21971+#define SDVO_CMD_SET_TARGET_INPUT 0x10
21972+struct psb_intel_sdvo_set_target_input_args {
21973+ unsigned int target_1:1;
21974+ unsigned int pad:7;
21975+} __attribute__ ((packed));
21976+
21977+/**
21978+ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
21979+ * future output commands.
21980+ *
21981+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
21982+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
21983+ */
21984+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
21985+
21986+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
21987+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
21988+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
21989+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
21990+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
21991+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
21992+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
21993+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
21994+/* Part 1 */
21995+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
21996+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
21997+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
21998+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
21999+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
22000+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
22001+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
22002+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
22003+/* Part 2 */
22004+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
22005+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
22006+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
22007+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
22008+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
22009+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
22010+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
22011+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
22012+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
22013+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
22014+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
22015+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
22016+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
22017+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
22018+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
22019+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
22020+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
22021+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
22022+
22023+/**
22024+ * Generates a DTD based on the given width, height, and flags.
22025+ *
22026+ * This will be supported by any device supporting scaling or interlaced
22027+ * modes.
22028+ */
22029+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
22030+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
22031+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
22032+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
22033+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
22034+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
22035+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
22036+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
22037+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
22038+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
22039+
22040+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
22041+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
22042+
22043+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
22044+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
22045+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
22046+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
22047+
22048+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
22049+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
22050+
22051+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
22052+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
22053+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
22054+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
22055+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
22056+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
22057+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
22058+
22059+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
22060+
22061+#define SDVO_CMD_GET_TV_FORMAT 0x28
22062+
22063+#define SDVO_CMD_SET_TV_FORMAT 0x29
22064+
22065+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
22066+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
22067+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
22068+# define SDVO_ENCODER_STATE_ON (1 << 0)
22069+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
22070+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
22071+# define SDVO_ENCODER_STATE_OFF (1 << 3)
22072+
22073+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
22074+
22075+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
22076+# define SDVO_CONTROL_BUS_PROM 0x0
22077+# define SDVO_CONTROL_BUS_DDC1 0x1
22078+# define SDVO_CONTROL_BUS_DDC2 0x2
22079+# define SDVO_CONTROL_BUS_DDC3 0x3
22080+
22081+/* SDVO Bus & SDVO Inputs wiring details*/
22082+/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
22083+/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
22084+/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
22085+/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
22086+#define SDVOB_IN0 0x01
22087+#define SDVOB_IN1 0x02
22088+#define SDVOC_IN0 0x04
22089+#define SDVOC_IN1 0x08
22090+
22091+#define SDVO_DEVICE_NONE 0x00
22092+#define SDVO_DEVICE_CRT 0x01
22093+#define SDVO_DEVICE_TV 0x02
22094+#define SDVO_DEVICE_LVDS 0x04
22095+#define SDVO_DEVICE_TMDS 0x08
22096+
22097diff --git a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c
22098new file mode 100644
22099index 0000000..983e2ad
22100--- /dev/null
22101+++ b/drivers/gpu/drm/psb/psb_irq.c
22102@@ -0,0 +1,621 @@
22103+/**************************************************************************
22104+ * Copyright (c) 2007, Intel Corporation.
22105+ * All Rights Reserved.
22106+ *
22107+ * This program is free software; you can redistribute it and/or modify it
22108+ * under the terms and conditions of the GNU General Public License,
22109+ * version 2, as published by the Free Software Foundation.
22110+ *
22111+ * This program is distributed in the hope it will be useful, but WITHOUT
22112+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22113+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22114+ * more details.
22115+ *
22116+ * You should have received a copy of the GNU General Public License along with
22117+ * this program; if not, write to the Free Software Foundation, Inc.,
22118+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22119+ *
22120+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22121+ * develop this driver.
22122+ *
22123+ **************************************************************************/
22124+/*
22125+ */
22126+
22127+#include <drm/drmP.h>
22128+#include "psb_drv.h"
22129+#include "psb_reg.h"
22130+#include "psb_msvdx.h"
22131+#include "lnc_topaz.h"
22132+#include "psb_intel_reg.h"
22133+#include "psb_powermgmt.h"
22134+
22135+/*
22136+ * Video display controller interrupt.
22137+ */
22138+
22139+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
22140+{
22141+ struct drm_psb_private *dev_priv =
22142+ (struct drm_psb_private *) dev->dev_private;
22143+
22144+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
22145+#ifdef PSB_FIXME
22146+ atomic_inc(&dev->vbl_received);
22147+#endif
22148+ PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE |
22149+ PIPE_VBLANK_CLEAR, PIPEASTAT);
22150+ drm_handle_vblank(dev, 0);
22151+ }
22152+
22153+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
22154+#ifdef PSB_FIXME
22155+ atomic_inc(&dev->vbl_received2);
22156+#endif
22157+ PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE |
22158+ PIPE_VBLANK_CLEAR, PIPEBSTAT);
22159+ drm_handle_vblank(dev, 1);
22160+ }
22161+}
22162+
22163+/*
22164+ * SGX interrupt source 1.
22165+ */
22166+
22167+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
22168+ uint32_t sgx_stat2)
22169+{
22170+ struct drm_psb_private *dev_priv =
22171+ (struct drm_psb_private *) dev->dev_private;
22172+
22173+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
22174+ DRM_WAKEUP(&dev_priv->event_2d_queue);
22175+ psb_fence_handler(dev, PSB_ENGINE_2D);
22176+ }
22177+
22178+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
22179+ psb_print_pagefault(dev_priv);
22180+
22181+ psb_scheduler_handler(dev_priv, sgx_stat);
22182+}
22183+
22184+
22185+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
22186+{
22187+ struct drm_device *dev = (struct drm_device *) arg;
22188+ struct drm_psb_private *dev_priv =
22189+ (struct drm_psb_private *) dev->dev_private;
22190+
22191+ uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0;
22192+ uint32_t sgx_stat = 0;
22193+ uint32_t sgx_stat2 = 0;
22194+ uint32_t sgx_int = 0;
22195+ int handled = 0;
22196+
22197+ spin_lock(&dev_priv->irqmask_lock);
22198+
22199+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
22200+
22201+ if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
22202+ PSB_DEBUG_IRQ("Got SGX interrupt\n");
22203+ sgx_int = 1;
22204+ }
22205+ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
22206+ PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
22207+ msvdx_int = 1;
22208+ }
22209+
22210+ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
22211+ PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
22212+ topaz_int = 1;
22213+ }
22214+ if (sgx_int && powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22215+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
22216+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
22217+
22218+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
22219+ sgx_stat &= dev_priv->sgx_irq_mask;
22220+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
22221+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
22222+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
22223+ } else if (unlikely(PSB_D_PM & drm_psb_debug)) {
22224+ if (sgx_int)
22225+ PSB_DEBUG_PM("sgx int in down mode\n");
22226+ }
22227+ vdc_stat &= dev_priv->vdc_irq_mask;
22228+ spin_unlock(&dev_priv->irqmask_lock);
22229+
22230+ if (msvdx_int &&
22231+ powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)) {
22232+ uint32_t msvdx_stat = 0;
22233+
22234+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
22235+ psb_msvdx_interrupt(dev, msvdx_stat);
22236+ handled = 1;
22237+ }
22238+
22239+ if (IS_MRST(dev) && topaz_int &&
22240+ powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)) {
22241+ /* sometimes, even topaz power down, IIR
22242+ * may still have topaz bit set
22243+ */
22244+ uint32_t topaz_stat = 0;
22245+
22246+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat);
22247+ lnc_topaz_interrupt (dev, topaz_stat);
22248+ handled = 1;
22249+ }
22250+
22251+ if (vdc_stat && powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22252+ psb_vdc_interrupt(dev, vdc_stat);
22253+ handled = 1;
22254+ }
22255+
22256+ if (sgx_stat || sgx_stat2) {
22257+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
22258+ handled = 1;
22259+ }
22260+
22261+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
22262+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
22263+ DRM_READMEMORYBARRIER();
22264+
22265+ if (!handled)
22266+ return IRQ_NONE;
22267+
22268+
22269+ return IRQ_HANDLED;
22270+}
22271+
22272+void psb_irq_preinstall(struct drm_device *dev)
22273+{
22274+ psb_irq_preinstall_islands(dev, PSB_ALL_ISLANDS);
22275+}
22276+
22277+void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands)
22278+{
22279+ struct drm_psb_private *dev_priv =
22280+ (struct drm_psb_private *) dev->dev_private;
22281+ unsigned long irqflags;
22282+
22283+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22284+
22285+ if (hw_islands & PSB_DISPLAY_ISLAND) {
22286+ if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22287+ if (IS_POULSBO(dev))
22288+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
22289+ if (dev->vblank_enabled[0])
22290+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
22291+ if (dev->vblank_enabled[1])
22292+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
22293+ }
22294+ }
22295+
22296+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22297+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22298+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
22299+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22300+
22301+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
22302+ _PSB_CE_DPM_3D_MEM_FREE |
22303+ _PSB_CE_TA_FINISHED |
22304+ _PSB_CE_DPM_REACHED_MEM_THRESH |
22305+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
22306+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
22307+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
22308+
22309+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
22310+ dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
22311+ }
22312+ }
22313+
22314+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22315+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND))
22316+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
22317+
22318+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22319+ if (IS_MRST(dev) && powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND))
22320+ dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
22321+
22322+ /*This register is safe even if display island is off*/
22323+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22324+
22325+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22326+}
22327+
22328+int psb_irq_postinstall(struct drm_device *dev)
22329+{
22330+ return psb_irq_postinstall_islands(dev, PSB_ALL_ISLANDS);
22331+}
22332+
22333+int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands)
22334+{
22335+ struct drm_psb_private *dev_priv =
22336+ (struct drm_psb_private *) dev->dev_private;
22337+ unsigned long irqflags;
22338+
22339+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22340+
22341+ /*This register is safe even if display island is off*/
22342+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22343+
22344+ if (hw_islands & PSB_DISPLAY_ISLAND) {
22345+ if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22346+ if (IS_POULSBO(dev))
22347+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
22348+ if (dev->vblank_enabled[0]) {
22349+ if (IS_MRST(dev))
22350+ psb_enable_pipestat(dev_priv, 0,
22351+ PIPE_START_VBLANK_INTERRUPT_ENABLE |
22352+ PIPE_VBLANK_INTERRUPT_ENABLE);
22353+ else
22354+ psb_enable_pipestat(dev_priv, 0,
22355+ PIPE_VBLANK_INTERRUPT_ENABLE);
22356+ } else
22357+ psb_disable_pipestat(dev_priv, 0,
22358+ PIPE_VBLANK_INTERRUPT_ENABLE |
22359+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22360+
22361+ if (dev->vblank_enabled[1]) {
22362+ if (IS_MRST(dev))
22363+ psb_enable_pipestat(dev_priv, 1,
22364+ PIPE_START_VBLANK_INTERRUPT_ENABLE |
22365+ PIPE_VBLANK_INTERRUPT_ENABLE);
22366+ else
22367+ psb_enable_pipestat(dev_priv, 1,
22368+ PIPE_VBLANK_INTERRUPT_ENABLE);
22369+ } else
22370+ psb_disable_pipestat(dev_priv, 1,
22371+ PIPE_VBLANK_INTERRUPT_ENABLE |
22372+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22373+ }
22374+ }
22375+
22376+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22377+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22378+ PSB_WSGX32(dev_priv->sgx2_irq_mask,
22379+ PSB_CR_EVENT_HOST_ENABLE2);
22380+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22381+ PSB_CR_EVENT_HOST_ENABLE);
22382+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22383+ }
22384+ }
22385+
22386+ if (IS_MRST(dev))
22387+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22388+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND))
22389+ lnc_topaz_enableirq(dev);
22390+
22391+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22392+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND))
22393+ psb_msvdx_enableirq(dev);
22394+
22395+ if (hw_islands == PSB_ALL_ISLANDS)
22396+ dev_priv->irq_enabled = 1;
22397+
22398+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22399+
22400+ return 0;
22401+}
22402+
22403+void psb_irq_uninstall(struct drm_device *dev)
22404+{
22405+ psb_irq_uninstall_islands(dev, PSB_ALL_ISLANDS);
22406+}
22407+
22408+void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands)
22409+{
22410+ struct drm_psb_private *dev_priv =
22411+ (struct drm_psb_private *) dev->dev_private;
22412+ unsigned long irqflags;
22413+
22414+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22415+
22416+ if (hw_islands & PSB_DISPLAY_ISLAND) {
22417+ if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) {
22418+ if (IS_POULSBO(dev))
22419+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
22420+ if (dev->vblank_enabled[0])
22421+ psb_disable_pipestat(dev_priv, 0,
22422+ PIPE_VBLANK_INTERRUPT_ENABLE |
22423+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22424+ if (dev->vblank_enabled[1])
22425+ psb_disable_pipestat(dev_priv, 1,
22426+ PIPE_VBLANK_INTERRUPT_ENABLE |
22427+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22428+ }
22429+ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
22430+ _PSB_IRQ_MSVDX_FLAG |
22431+ _LNC_IRQ_TOPAZ_FLAG;
22432+ }
22433+
22434+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22435+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
22436+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22437+ dev_priv->sgx_irq_mask = 0x00000000;
22438+ dev_priv->sgx2_irq_mask = 0x00000000;
22439+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22440+ PSB_CR_EVENT_HOST_ENABLE);
22441+ PSB_WSGX32(dev_priv->sgx2_irq_mask,
22442+ PSB_CR_EVENT_HOST_ENABLE2);
22443+ }
22444+ }
22445+
22446+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22447+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
22448+
22449+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22450+ dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
22451+
22452+ /*These two registers are safe even if display island is off*/
22453+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22454+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22455+
22456+ wmb();
22457+
22458+ /*This register is safe even if display island is off*/
22459+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
22460+
22461+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
22462+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22463+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS),
22464+ PSB_CR_EVENT_HOST_CLEAR);
22465+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2),
22466+ PSB_CR_EVENT_HOST_CLEAR2);
22467+ }
22468+ }
22469+
22470+ if (IS_MRST(dev))
22471+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
22472+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND))
22473+ lnc_topaz_disableirq(dev);
22474+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
22475+ if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND))
22476+ psb_msvdx_disableirq(dev);
22477+
22478+
22479+ if (hw_islands == PSB_ALL_ISLANDS)
22480+ dev_priv->irq_enabled = 0;
22481+
22482+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22483+}
22484+
22485+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
22486+{
22487+ unsigned long irqflags;
22488+ uint32_t old_mask;
22489+ uint32_t cleared_mask;
22490+ struct drm_device *dev;
22491+
22492+ dev = container_of((void *) dev_priv, struct drm_device, dev_private);
22493+
22494+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22495+ --dev_priv->irqen_count_2d;
22496+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
22497+
22498+ old_mask = dev_priv->sgx_irq_mask;
22499+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
22500+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22501+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22502+ PSB_CR_EVENT_HOST_ENABLE);
22503+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22504+
22505+ cleared_mask =
22506+ (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
22507+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
22508+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
22509+ }
22510+ }
22511+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22512+}
22513+
22514+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
22515+{
22516+ unsigned long irqflags;
22517+ struct drm_device *dev;
22518+
22519+ dev = container_of((void *) dev_priv, struct drm_device, dev_private);
22520+
22521+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22522+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
22523+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
22524+ if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) {
22525+ PSB_WSGX32(dev_priv->sgx_irq_mask,
22526+ PSB_CR_EVENT_HOST_ENABLE);
22527+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
22528+ }
22529+ }
22530+ ++dev_priv->irqen_count_2d;
22531+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22532+}
22533+
22534+#ifdef PSB_FIXME
22535+static int psb_vblank_do_wait(struct drm_device *dev,
22536+ unsigned int *sequence, atomic_t *counter)
22537+{
22538+ unsigned int cur_vblank;
22539+ int ret = 0;
22540+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
22541+ (((cur_vblank = atomic_read(counter))
22542+ - *sequence) <= (1 << 23)));
22543+ *sequence = cur_vblank;
22544+
22545+ return ret;
22546+}
22547+#endif
22548+
22549+
22550+/* Called from drm generic code, passed 'crtc' which
22551+ * we use as a pipe index
22552+ */
22553+int psb_enable_vblank(struct drm_device *dev, int pipe)
22554+{
22555+ struct drm_psb_private *dev_priv =
22556+ (struct drm_psb_private *) dev->dev_private;
22557+ unsigned long irqflags;
22558+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
22559+ u32 pipeconf = 0;
22560+
22561+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22562+ pipeconf = REG_READ(pipeconf_reg);
22563+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22564+ }
22565+ if (!(pipeconf & PIPEACONF_ENABLE))
22566+ return -EINVAL;
22567+
22568+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22569+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22570+ drm_psb_disable_vsync = 0;
22571+ if (pipe == 0)
22572+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
22573+ else
22574+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
22575+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22576+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22577+ if (IS_MRST(dev)) {
22578+ psb_enable_pipestat(dev_priv, pipe,
22579+ PIPE_START_VBLANK_INTERRUPT_ENABLE |
22580+ PIPE_VBLANK_INTERRUPT_ENABLE);
22581+ } else
22582+ psb_enable_pipestat(dev_priv, pipe,
22583+ PIPE_VBLANK_INTERRUPT_ENABLE);
22584+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22585+ }
22586+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22587+
22588+ return 0;
22589+}
22590+
22591+/* Called from drm generic code, passed 'crtc' which
22592+ * we use as a pipe index
22593+ */
22594+void psb_disable_vblank(struct drm_device *dev, int pipe)
22595+{
22596+ struct drm_psb_private *dev_priv =
22597+ (struct drm_psb_private *) dev->dev_private;
22598+ unsigned long irqflags;
22599+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
22600+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22601+ if (pipe == 0)
22602+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
22603+ else
22604+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
22605+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
22606+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
22607+ psb_disable_pipestat(dev_priv, pipe,
22608+ PIPE_VBLANK_INTERRUPT_ENABLE |
22609+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
22610+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22611+ }
22612+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
22613+}
22614+
22615+static inline u32
22616+psb_pipestat(int pipe)
22617+{
22618+ if (pipe == 0)
22619+ return PIPEASTAT;
22620+ if (pipe == 1)
22621+ return PIPEBSTAT;
22622+ BUG();
22623+}
22624+
22625+void
22626+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
22627+{
22628+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
22629+ u32 reg = psb_pipestat(pipe);
22630+ dev_priv->pipestat[pipe] |= mask;
22631+ /* Enable the interrupt, clear any pending status */
22632+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22633+ u32 writeVal = PSB_RVDC32(reg);
22634+ writeVal |= (mask | (mask >> 16));
22635+ PSB_WVDC32(writeVal, reg);
22636+ (void) PSB_RVDC32(reg);
22637+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22638+ }
22639+ }
22640+}
22641+
22642+void
22643+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
22644+{
22645+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
22646+ u32 reg = psb_pipestat(pipe);
22647+ dev_priv->pipestat[pipe] &= ~mask;
22648+ if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22649+ u32 writeVal = PSB_RVDC32(reg);
22650+ writeVal &= ~mask;
22651+ PSB_WVDC32(writeVal, reg);
22652+ (void) PSB_RVDC32(reg);
22653+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22654+ }
22655+ }
22656+}
22657+
22658+/**
22659+ * psb_pipe_enabled - check if a pipe is enabled
22660+ * @dev: DRM device
22661+ * @pipe: pipe to check
22662+ *
22663+ * Reading certain registers when the pipe is disabled can hang the chip.
22664+ * Use this routine to make sure the PLL is running and the pipe is active
22665+ * before reading such registers if unsure.
22666+ */
22667+static int
22668+psb_pipe_enabled(struct drm_device *dev, int pipe)
22669+{
22670+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
22671+ int ret = 0;
22672+
22673+ if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) {
22674+ ret = (REG_READ(pipeconf) & PIPEACONF_ENABLE);
22675+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22676+ }
22677+
22678+ return ret;
22679+}
22680+
22681+/* Called from drm generic code, passed a 'crtc', which
22682+ * we use as a pipe index
22683+ */
22684+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
22685+{
22686+ unsigned long high_frame;
22687+ unsigned long low_frame;
22688+ u32 high1, high2, low;
22689+ u32 count = 0;
22690+
22691+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
22692+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
22693+
22694+ if (!powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false))
22695+ return 0;
22696+
22697+ if (!psb_pipe_enabled(dev, pipe)) {
22698+ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
22699+ goto psb_get_vblank_counter_exit;
22700+ }
22701+
22702+ /*
22703+ * High & low register fields aren't synchronized, so make sure
22704+ * we get a low value that's stable across two reads of the high
22705+ * register.
22706+ */
22707+ do {
22708+ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
22709+ PIPE_FRAME_HIGH_SHIFT);
22710+ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
22711+ PIPE_FRAME_LOW_SHIFT);
22712+ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
22713+ PIPE_FRAME_HIGH_SHIFT);
22714+ } while (high1 != high2);
22715+
22716+ count = (high1 << 8) | low;
22717+
22718+psb_get_vblank_counter_exit:
22719+
22720+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
22721+
22722+ return count;
22723+}
22724diff --git a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c
22725new file mode 100644
22726index 0000000..d3ff8e0
22727--- /dev/null
22728+++ b/drivers/gpu/drm/psb/psb_mmu.c
22729@@ -0,0 +1,1073 @@
22730+/**************************************************************************
22731+ * Copyright (c) 2007, Intel Corporation.
22732+ * All Rights Reserved.
22733+ *
22734+ * This program is free software; you can redistribute it and/or modify it
22735+ * under the terms and conditions of the GNU General Public License,
22736+ * version 2, as published by the Free Software Foundation.
22737+ *
22738+ * This program is distributed in the hope it will be useful, but WITHOUT
22739+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22740+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22741+ * more details.
22742+ *
22743+ * You should have received a copy of the GNU General Public License along with
22744+ * this program; if not, write to the Free Software Foundation, Inc.,
22745+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22746+ *
22747+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22748+ * develop this driver.
22749+ *
22750+ **************************************************************************/
22751+#include <drm/drmP.h>
22752+#include "psb_drv.h"
22753+#include "psb_reg.h"
22754+#include "psb_powermgmt.h"
22755+
22756+/*
22757+ * Code for the SGX MMU:
22758+ */
22759+
22760+/*
22761+ * clflush on one processor only:
22762+ * clflush should apparently flush the cache line on all processors in an
22763+ * SMP system.
22764+ */
22765+
22766+/*
22767+ * kmap atomic:
22768+ * The usage of the slots must be completely encapsulated within a spinlock, and
22769+ * no other functions that may be using the locks for other purposed may be
22770+ * called from within the locked region.
22771+ * Since the slots are per processor, this will guarantee that we are the only
22772+ * user.
22773+ */
22774+
22775+/*
22776+ * TODO: Inserting ptes from an interrupt handler:
22777+ * This may be desirable for some SGX functionality where the GPU can fault in
22778+ * needed pages. For that, we need to make an atomic insert_pages function, that
22779+ * may fail.
22780+ * If it fails, the caller need to insert the page using a workqueue function,
22781+ * but on average it should be fast.
22782+ */
22783+
22784+struct psb_mmu_driver {
22785+ /* protects driver- and pd structures. Always take in read mode
22786+ * before taking the page table spinlock.
22787+ */
22788+ struct rw_semaphore sem;
22789+
22790+ /* protects page tables, directory tables and pt tables.
22791+ * and pt structures.
22792+ */
22793+ spinlock_t lock;
22794+
22795+ atomic_t needs_tlbflush;
22796+
22797+ uint8_t __iomem *register_map;
22798+ struct psb_mmu_pd *default_pd;
22799+ uint32_t bif_ctrl;
22800+ int has_clflush;
22801+ int clflush_add;
22802+ unsigned long clflush_mask;
22803+
22804+ struct drm_psb_private *dev_priv;
22805+};
22806+
22807+struct psb_mmu_pd;
22808+
22809+struct psb_mmu_pt {
22810+ struct psb_mmu_pd *pd;
22811+ uint32_t index;
22812+ uint32_t count;
22813+ struct page *p;
22814+ uint32_t *v;
22815+};
22816+
22817+struct psb_mmu_pd {
22818+ struct psb_mmu_driver *driver;
22819+ int hw_context;
22820+ struct psb_mmu_pt **tables;
22821+ struct page *p;
22822+ struct page *dummy_pt;
22823+ struct page *dummy_page;
22824+ uint32_t pd_mask;
22825+ uint32_t invalid_pde;
22826+ uint32_t invalid_pte;
22827+};
22828+
22829+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
22830+
22831+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
22832+{
22833+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
22834+}
22835+
22836+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
22837+{
22838+ return offset >> PSB_PDE_SHIFT;
22839+}
22840+
22841+#if defined(CONFIG_X86)
22842+static inline void psb_clflush(void *addr)
22843+{
22844+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
22845+}
22846+
22847+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
22848+ void *addr)
22849+{
22850+ if (!driver->has_clflush)
22851+ return;
22852+
22853+ mb();
22854+ psb_clflush(addr);
22855+ mb();
22856+}
22857+#else
22858+
22859+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
22860+ void *addr)
22861+{;
22862+}
22863+
22864+#endif
22865+
22866+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
22867+ uint32_t val, uint32_t offset)
22868+{
22869+ iowrite32(val, d->register_map + offset);
22870+}
22871+
22872+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
22873+ uint32_t offset)
22874+{
22875+ return ioread32(d->register_map + offset);
22876+}
22877+
22878+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
22879+ int force)
22880+{
22881+ if (atomic_read(&driver->needs_tlbflush) || force) {
22882+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
22883+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
22884+ PSB_CR_BIF_CTRL);
22885+ wmb();
22886+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
22887+ PSB_CR_BIF_CTRL);
22888+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
22889+ if (driver->dev_priv) {
22890+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
22891+ if (IS_MRST(driver->dev_priv->dev))
22892+ topaz_mmu_flushcache(driver->dev_priv);
22893+ }
22894+ }
22895+ atomic_set(&driver->needs_tlbflush, 0);
22896+}
22897+
22898+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
22899+{
22900+ down_write(&driver->sem);
22901+ psb_mmu_flush_pd_locked(driver, force);
22902+ up_write(&driver->sem);
22903+}
22904+
22905+void psb_mmu_flush(struct psb_mmu_driver *driver)
22906+{
22907+ uint32_t val;
22908+
22909+ if (powermgmt_using_hw_begin(driver->dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, false)) {
22910+ down_write(&driver->sem);
22911+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
22912+ if (atomic_read(&driver->needs_tlbflush))
22913+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
22914+ PSB_CR_BIF_CTRL);
22915+ else
22916+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
22917+ PSB_CR_BIF_CTRL);
22918+ wmb();
22919+ psb_iowrite32(driver,
22920+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
22921+ PSB_CR_BIF_CTRL);
22922+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
22923+ atomic_set(&driver->needs_tlbflush, 0);
22924+ up_write(&driver->sem);
22925+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
22926+ } else {
22927+ PSB_DEBUG_PM("mmu flush when down\n");
22928+ }
22929+
22930+ down_write(&driver->sem);
22931+ if (driver->dev_priv) {
22932+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
22933+ if (IS_MRST(driver->dev_priv->dev))
22934+ topaz_mmu_flushcache(driver->dev_priv);
22935+ }
22936+
22937+ up_write(&driver->sem);
22938+}
22939+
22940+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
22941+{
22942+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
22943+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
22944+
22945+ ttm_tt_cache_flush(&pd->p, 1);
22946+ down_write(&pd->driver->sem);
22947+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT),
22948+ offset);
22949+ wmb();
22950+ psb_mmu_flush_pd_locked(pd->driver, 1);
22951+ pd->hw_context = hw_context;
22952+ up_write(&pd->driver->sem);
22953+
22954+}
22955+
22956+static inline unsigned long psb_pd_addr_end(unsigned long addr,
22957+ unsigned long end)
22958+{
22959+
22960+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
22961+ return (addr < end) ? addr : end;
22962+}
22963+
22964+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
22965+{
22966+ uint32_t mask = PSB_PTE_VALID;
22967+
22968+ if (type & PSB_MMU_CACHED_MEMORY)
22969+ mask |= PSB_PTE_CACHED;
22970+ if (type & PSB_MMU_RO_MEMORY)
22971+ mask |= PSB_PTE_RO;
22972+ if (type & PSB_MMU_WO_MEMORY)
22973+ mask |= PSB_PTE_WO;
22974+
22975+ return (pfn << PAGE_SHIFT) | mask;
22976+}
22977+
22978+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
22979+ int trap_pagefaults, int invalid_type)
22980+{
22981+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
22982+ uint32_t *v;
22983+ int i;
22984+
22985+ if (!pd)
22986+ return NULL;
22987+
22988+ pd->p = alloc_page(GFP_DMA32);
22989+ if (!pd->p)
22990+ goto out_err1;
22991+ pd->dummy_pt = alloc_page(GFP_DMA32);
22992+ if (!pd->dummy_pt)
22993+ goto out_err2;
22994+ pd->dummy_page = alloc_page(GFP_DMA32);
22995+ if (!pd->dummy_page)
22996+ goto out_err3;
22997+
22998+ if (!trap_pagefaults) {
22999+ pd->invalid_pde =
23000+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
23001+ invalid_type);
23002+ pd->invalid_pte =
23003+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
23004+ invalid_type);
23005+ } else {
23006+ pd->invalid_pde = 0;
23007+ pd->invalid_pte = 0;
23008+ }
23009+
23010+ v = kmap(pd->dummy_pt);
23011+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
23012+ v[i] = pd->invalid_pte;
23013+
23014+ kunmap(pd->dummy_pt);
23015+
23016+ v = kmap(pd->p);
23017+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
23018+ v[i] = pd->invalid_pde;
23019+
23020+ kunmap(pd->p);
23021+
23022+ clear_page(kmap(pd->dummy_page));
23023+ kunmap(pd->dummy_page);
23024+
23025+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
23026+ if (!pd->tables)
23027+ goto out_err4;
23028+
23029+ pd->hw_context = -1;
23030+ pd->pd_mask = PSB_PTE_VALID;
23031+ pd->driver = driver;
23032+
23033+ return pd;
23034+
23035+out_err4:
23036+ __free_page(pd->dummy_page);
23037+out_err3:
23038+ __free_page(pd->dummy_pt);
23039+out_err2:
23040+ __free_page(pd->p);
23041+out_err1:
23042+ kfree(pd);
23043+ return NULL;
23044+}
23045+
23046+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
23047+{
23048+ __free_page(pt->p);
23049+ kfree(pt);
23050+}
23051+
23052+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
23053+{
23054+ struct psb_mmu_driver *driver = pd->driver;
23055+ struct psb_mmu_pt *pt;
23056+ int i;
23057+
23058+ down_write(&driver->sem);
23059+ if (pd->hw_context != -1) {
23060+ psb_iowrite32(driver, 0,
23061+ PSB_CR_BIF_DIR_LIST_BASE0 +
23062+ pd->hw_context * 4);
23063+ psb_mmu_flush_pd_locked(driver, 1);
23064+ }
23065+
23066+ /* Should take the spinlock here, but we don't need to do that
23067+ since we have the semaphore in write mode. */
23068+
23069+ for (i = 0; i < 1024; ++i) {
23070+ pt = pd->tables[i];
23071+ if (pt)
23072+ psb_mmu_free_pt(pt);
23073+ }
23074+
23075+ vfree(pd->tables);
23076+ __free_page(pd->dummy_page);
23077+ __free_page(pd->dummy_pt);
23078+ __free_page(pd->p);
23079+ kfree(pd);
23080+ up_write(&driver->sem);
23081+}
23082+
23083+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
23084+{
23085+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
23086+ void *v;
23087+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
23088+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
23089+ spinlock_t *lock = &pd->driver->lock;
23090+ uint8_t *clf;
23091+ uint32_t *ptes;
23092+ int i;
23093+
23094+ if (!pt)
23095+ return NULL;
23096+
23097+ pt->p = alloc_page(GFP_DMA32);
23098+ if (!pt->p) {
23099+ kfree(pt);
23100+ return NULL;
23101+ }
23102+
23103+ spin_lock(lock);
23104+
23105+ v = kmap_atomic(pt->p, KM_USER0);
23106+ clf = (uint8_t *) v;
23107+ ptes = (uint32_t *) v;
23108+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
23109+ *ptes++ = pd->invalid_pte;
23110+
23111+
23112+#if defined(CONFIG_X86)
23113+ if (pd->driver->has_clflush && pd->hw_context != -1) {
23114+ mb();
23115+ for (i = 0; i < clflush_count; ++i) {
23116+ psb_clflush(clf);
23117+ clf += clflush_add;
23118+ }
23119+ mb();
23120+ }
23121+#endif
23122+ kunmap_atomic(v, KM_USER0);
23123+ spin_unlock(lock);
23124+
23125+ pt->count = 0;
23126+ pt->pd = pd;
23127+ pt->index = 0;
23128+
23129+ return pt;
23130+}
23131+
23132+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
23133+ unsigned long addr)
23134+{
23135+ uint32_t index = psb_mmu_pd_index(addr);
23136+ struct psb_mmu_pt *pt;
23137+ uint32_t *v;
23138+ spinlock_t *lock = &pd->driver->lock;
23139+
23140+ spin_lock(lock);
23141+ pt = pd->tables[index];
23142+ while (!pt) {
23143+ spin_unlock(lock);
23144+ pt = psb_mmu_alloc_pt(pd);
23145+ if (!pt)
23146+ return NULL;
23147+ spin_lock(lock);
23148+
23149+ if (pd->tables[index]) {
23150+ spin_unlock(lock);
23151+ psb_mmu_free_pt(pt);
23152+ spin_lock(lock);
23153+ pt = pd->tables[index];
23154+ continue;
23155+ }
23156+
23157+ v = kmap_atomic(pd->p, KM_USER0);
23158+ pd->tables[index] = pt;
23159+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
23160+ pt->index = index;
23161+ kunmap_atomic((void *) v, KM_USER0);
23162+
23163+ if (pd->hw_context != -1) {
23164+ psb_mmu_clflush(pd->driver, (void *) &v[index]);
23165+ atomic_set(&pd->driver->needs_tlbflush, 1);
23166+ }
23167+ }
23168+ pt->v = kmap_atomic(pt->p, KM_USER0);
23169+ return pt;
23170+}
23171+
23172+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
23173+ unsigned long addr)
23174+{
23175+ uint32_t index = psb_mmu_pd_index(addr);
23176+ struct psb_mmu_pt *pt;
23177+ spinlock_t *lock = &pd->driver->lock;
23178+
23179+ spin_lock(lock);
23180+ pt = pd->tables[index];
23181+ if (!pt) {
23182+ spin_unlock(lock);
23183+ return NULL;
23184+ }
23185+ pt->v = kmap_atomic(pt->p, KM_USER0);
23186+ return pt;
23187+}
23188+
23189+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
23190+{
23191+ struct psb_mmu_pd *pd = pt->pd;
23192+ uint32_t *v;
23193+
23194+ kunmap_atomic(pt->v, KM_USER0);
23195+ if (pt->count == 0) {
23196+ v = kmap_atomic(pd->p, KM_USER0);
23197+ v[pt->index] = pd->invalid_pde;
23198+ pd->tables[pt->index] = NULL;
23199+
23200+ if (pd->hw_context != -1) {
23201+ psb_mmu_clflush(pd->driver,
23202+ (void *) &v[pt->index]);
23203+ atomic_set(&pd->driver->needs_tlbflush, 1);
23204+ }
23205+ kunmap_atomic(pt->v, KM_USER0);
23206+ spin_unlock(&pd->driver->lock);
23207+ psb_mmu_free_pt(pt);
23208+ return;
23209+ }
23210+ spin_unlock(&pd->driver->lock);
23211+}
23212+
23213+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
23214+ unsigned long addr, uint32_t pte)
23215+{
23216+ pt->v[psb_mmu_pt_index(addr)] = pte;
23217+}
23218+
23219+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
23220+ unsigned long addr)
23221+{
23222+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
23223+}
23224+
23225+#if 0
23226+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
23227+ uint32_t mmu_offset)
23228+{
23229+ uint32_t *v;
23230+ uint32_t pfn;
23231+
23232+ v = kmap_atomic(pd->p, KM_USER0);
23233+ if (!v) {
23234+ printk(KERN_INFO "Could not kmap pde page.\n");
23235+ return 0;
23236+ }
23237+ pfn = v[psb_mmu_pd_index(mmu_offset)];
23238+ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
23239+ kunmap_atomic(v, KM_USER0);
23240+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
23241+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
23242+ mmu_offset, pfn);
23243+ }
23244+ v = ioremap(pfn & 0xFFFFF000, 4096);
23245+ if (!v) {
23246+ printk(KERN_INFO "Could not kmap pte page.\n");
23247+ return 0;
23248+ }
23249+ pfn = v[psb_mmu_pt_index(mmu_offset)];
23250+ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
23251+ iounmap(v);
23252+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
23253+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
23254+ mmu_offset, pfn);
23255+ }
23256+ return pfn >> PAGE_SHIFT;
23257+}
23258+
23259+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
23260+ uint32_t mmu_offset,
23261+ uint32_t gtt_pages)
23262+{
23263+ uint32_t start;
23264+ uint32_t next;
23265+
23266+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
23267+ mmu_offset, gtt_pages);
23268+ down_read(&pd->driver->sem);
23269+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
23270+ mmu_offset += PAGE_SIZE;
23271+ gtt_pages -= 1;
23272+ while (gtt_pages--) {
23273+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
23274+ if (next != start + 1) {
23275+ printk(KERN_INFO
23276+ "Ptes out of order: 0x%08x, 0x%08x.\n",
23277+ start, next);
23278+ }
23279+ start = next;
23280+ mmu_offset += PAGE_SIZE;
23281+ }
23282+ up_read(&pd->driver->sem);
23283+}
23284+
23285+#endif
23286+
23287+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
23288+ uint32_t mmu_offset, uint32_t gtt_start,
23289+ uint32_t gtt_pages)
23290+{
23291+ uint32_t *v;
23292+ uint32_t start = psb_mmu_pd_index(mmu_offset);
23293+ struct psb_mmu_driver *driver = pd->driver;
23294+ int num_pages = gtt_pages;
23295+
23296+ down_read(&driver->sem);
23297+ spin_lock(&driver->lock);
23298+
23299+ v = kmap_atomic(pd->p, KM_USER0);
23300+ v += start;
23301+
23302+ while (gtt_pages--) {
23303+ *v++ = gtt_start | pd->pd_mask;
23304+ gtt_start += PAGE_SIZE;
23305+ }
23306+
23307+ ttm_tt_cache_flush(&pd->p, num_pages);
23308+ kunmap_atomic(v, KM_USER0);
23309+ spin_unlock(&driver->lock);
23310+
23311+ if (pd->hw_context != -1)
23312+ atomic_set(&pd->driver->needs_tlbflush, 1);
23313+
23314+ up_read(&pd->driver->sem);
23315+ psb_mmu_flush_pd(pd->driver, 0);
23316+}
23317+
23318+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
23319+{
23320+ struct psb_mmu_pd *pd;
23321+
23322+ down_read(&driver->sem);
23323+ pd = driver->default_pd;
23324+ up_read(&driver->sem);
23325+
23326+ return pd;
23327+}
23328+
23329+/* Returns the physical address of the PD shared by sgx/msvdx */
23330+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
23331+{
23332+ struct psb_mmu_pd *pd;
23333+
23334+ pd = psb_mmu_get_default_pd(driver);
23335+ return page_to_pfn(pd->p) << PAGE_SHIFT;
23336+}
23337+
23338+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
23339+{
23340+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
23341+ psb_mmu_free_pagedir(driver->default_pd);
23342+ kfree(driver);
23343+}
23344+
23345+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
23346+ int trap_pagefaults,
23347+ int invalid_type,
23348+ struct drm_psb_private *dev_priv)
23349+{
23350+ struct psb_mmu_driver *driver;
23351+
23352+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
23353+
23354+ if (!driver)
23355+ return NULL;
23356+ driver->dev_priv = dev_priv;
23357+
23358+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
23359+ invalid_type);
23360+ if (!driver->default_pd)
23361+ goto out_err1;
23362+
23363+ spin_lock_init(&driver->lock);
23364+ init_rwsem(&driver->sem);
23365+ down_write(&driver->sem);
23366+ driver->register_map = registers;
23367+ atomic_set(&driver->needs_tlbflush, 1);
23368+
23369+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
23370+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
23371+ PSB_CR_BIF_CTRL);
23372+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
23373+ PSB_CR_BIF_CTRL);
23374+
23375+ driver->has_clflush = 0;
23376+
23377+#if defined(CONFIG_X86)
23378+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
23379+ uint32_t tfms, misc, cap0, cap4, clflush_size;
23380+
23381+ /*
23382+ * clflush size is determined at kernel setup for x86_64
23383+ * but not for i386. We have to do it here.
23384+ */
23385+
23386+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
23387+ clflush_size = ((misc >> 8) & 0xff) * 8;
23388+ driver->has_clflush = 1;
23389+ driver->clflush_add =
23390+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
23391+ driver->clflush_mask = driver->clflush_add - 1;
23392+ driver->clflush_mask = ~driver->clflush_mask;
23393+ }
23394+#endif
23395+
23396+ up_write(&driver->sem);
23397+ return driver;
23398+
23399+out_err1:
23400+ kfree(driver);
23401+ return NULL;
23402+}
23403+
23404+#if defined(CONFIG_X86)
23405+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
23406+ unsigned long address, uint32_t num_pages,
23407+ uint32_t desired_tile_stride,
23408+ uint32_t hw_tile_stride)
23409+{
23410+ struct psb_mmu_pt *pt;
23411+ uint32_t rows = 1;
23412+ uint32_t i;
23413+ unsigned long addr;
23414+ unsigned long end;
23415+ unsigned long next;
23416+ unsigned long add;
23417+ unsigned long row_add;
23418+ unsigned long clflush_add = pd->driver->clflush_add;
23419+ unsigned long clflush_mask = pd->driver->clflush_mask;
23420+
23421+ if (!pd->driver->has_clflush) {
23422+ ttm_tt_cache_flush(&pd->p, num_pages);
23423+ return;
23424+ }
23425+
23426+ if (hw_tile_stride)
23427+ rows = num_pages / desired_tile_stride;
23428+ else
23429+ desired_tile_stride = num_pages;
23430+
23431+ add = desired_tile_stride << PAGE_SHIFT;
23432+ row_add = hw_tile_stride << PAGE_SHIFT;
23433+ mb();
23434+ for (i = 0; i < rows; ++i) {
23435+
23436+ addr = address;
23437+ end = addr + add;
23438+
23439+ do {
23440+ next = psb_pd_addr_end(addr, end);
23441+ pt = psb_mmu_pt_map_lock(pd, addr);
23442+ if (!pt)
23443+ continue;
23444+ do {
23445+ psb_clflush(&pt->v
23446+ [psb_mmu_pt_index(addr)]);
23447+ } while (addr +=
23448+ clflush_add,
23449+ (addr & clflush_mask) < next);
23450+
23451+ psb_mmu_pt_unmap_unlock(pt);
23452+ } while (addr = next, next != end);
23453+ address += row_add;
23454+ }
23455+ mb();
23456+}
23457+#else
23458+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
23459+ unsigned long address, uint32_t num_pages,
23460+ uint32_t desired_tile_stride,
23461+ uint32_t hw_tile_stride)
23462+{
23463+ drm_ttm_cache_flush(&pd->p, num_pages);
23464+}
23465+#endif
23466+
23467+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
23468+ unsigned long address, uint32_t num_pages)
23469+{
23470+ struct psb_mmu_pt *pt;
23471+ unsigned long addr;
23472+ unsigned long end;
23473+ unsigned long next;
23474+ unsigned long f_address = address;
23475+
23476+ down_read(&pd->driver->sem);
23477+
23478+ addr = address;
23479+ end = addr + (num_pages << PAGE_SHIFT);
23480+
23481+ do {
23482+ next = psb_pd_addr_end(addr, end);
23483+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
23484+ if (!pt)
23485+ goto out;
23486+ do {
23487+ psb_mmu_invalidate_pte(pt, addr);
23488+ --pt->count;
23489+ } while (addr += PAGE_SIZE, addr < next);
23490+ psb_mmu_pt_unmap_unlock(pt);
23491+
23492+ } while (addr = next, next != end);
23493+
23494+out:
23495+ if (pd->hw_context != -1)
23496+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
23497+
23498+ up_read(&pd->driver->sem);
23499+
23500+ if (pd->hw_context != -1)
23501+ psb_mmu_flush(pd->driver);
23502+
23503+ return;
23504+}
23505+
23506+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
23507+ uint32_t num_pages, uint32_t desired_tile_stride,
23508+ uint32_t hw_tile_stride)
23509+{
23510+ struct psb_mmu_pt *pt;
23511+ uint32_t rows = 1;
23512+ uint32_t i;
23513+ unsigned long addr;
23514+ unsigned long end;
23515+ unsigned long next;
23516+ unsigned long add;
23517+ unsigned long row_add;
23518+ unsigned long f_address = address;
23519+
23520+ if (hw_tile_stride)
23521+ rows = num_pages / desired_tile_stride;
23522+ else
23523+ desired_tile_stride = num_pages;
23524+
23525+ add = desired_tile_stride << PAGE_SHIFT;
23526+ row_add = hw_tile_stride << PAGE_SHIFT;
23527+
23528+ down_read(&pd->driver->sem);
23529+
23530+ /* Make sure we only need to flush this processor's cache */
23531+
23532+ for (i = 0; i < rows; ++i) {
23533+
23534+ addr = address;
23535+ end = addr + add;
23536+
23537+ do {
23538+ next = psb_pd_addr_end(addr, end);
23539+ pt = psb_mmu_pt_map_lock(pd, addr);
23540+ if (!pt)
23541+ continue;
23542+ do {
23543+ psb_mmu_invalidate_pte(pt, addr);
23544+ --pt->count;
23545+
23546+ } while (addr += PAGE_SIZE, addr < next);
23547+ psb_mmu_pt_unmap_unlock(pt);
23548+
23549+ } while (addr = next, next != end);
23550+ address += row_add;
23551+ }
23552+ if (pd->hw_context != -1)
23553+ psb_mmu_flush_ptes(pd, f_address, num_pages,
23554+ desired_tile_stride, hw_tile_stride);
23555+
23556+ up_read(&pd->driver->sem);
23557+
23558+ if (pd->hw_context != -1)
23559+ psb_mmu_flush(pd->driver);
23560+}
23561+
23562+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
23563+ unsigned long address, uint32_t num_pages,
23564+ int type)
23565+{
23566+ struct psb_mmu_pt *pt;
23567+ uint32_t pte;
23568+ unsigned long addr;
23569+ unsigned long end;
23570+ unsigned long next;
23571+ unsigned long f_address = address;
23572+ int ret = 0;
23573+
23574+ down_read(&pd->driver->sem);
23575+
23576+ addr = address;
23577+ end = addr + (num_pages << PAGE_SHIFT);
23578+
23579+ do {
23580+ next = psb_pd_addr_end(addr, end);
23581+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
23582+ if (!pt) {
23583+ ret = -ENOMEM;
23584+ goto out;
23585+ }
23586+ do {
23587+ pte = psb_mmu_mask_pte(start_pfn++, type);
23588+ psb_mmu_set_pte(pt, addr, pte);
23589+ pt->count++;
23590+ } while (addr += PAGE_SIZE, addr < next);
23591+ psb_mmu_pt_unmap_unlock(pt);
23592+
23593+ } while (addr = next, next != end);
23594+
23595+out:
23596+ if (pd->hw_context != -1)
23597+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
23598+
23599+ up_read(&pd->driver->sem);
23600+
23601+ if (pd->hw_context != -1)
23602+ psb_mmu_flush(pd->driver);
23603+
23604+ return ret;
23605+}
23606+
23607+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
23608+ unsigned long address, uint32_t num_pages,
23609+ uint32_t desired_tile_stride,
23610+ uint32_t hw_tile_stride, int type)
23611+{
23612+ struct psb_mmu_pt *pt;
23613+ uint32_t rows = 1;
23614+ uint32_t i;
23615+ uint32_t pte;
23616+ unsigned long addr;
23617+ unsigned long end;
23618+ unsigned long next;
23619+ unsigned long add;
23620+ unsigned long row_add;
23621+ unsigned long f_address = address;
23622+ int ret = 0;
23623+
23624+ if (hw_tile_stride) {
23625+ if (num_pages % desired_tile_stride != 0)
23626+ return -EINVAL;
23627+ rows = num_pages / desired_tile_stride;
23628+ } else {
23629+ desired_tile_stride = num_pages;
23630+ }
23631+
23632+ add = desired_tile_stride << PAGE_SHIFT;
23633+ row_add = hw_tile_stride << PAGE_SHIFT;
23634+
23635+ down_read(&pd->driver->sem);
23636+
23637+ for (i = 0; i < rows; ++i) {
23638+
23639+ addr = address;
23640+ end = addr + add;
23641+
23642+ do {
23643+ next = psb_pd_addr_end(addr, end);
23644+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
23645+ if (!pt) {
23646+ ret = -ENOMEM;
23647+ goto out;
23648+ }
23649+ do {
23650+ pte =
23651+ psb_mmu_mask_pte(page_to_pfn(*pages++),
23652+ type);
23653+ psb_mmu_set_pte(pt, addr, pte);
23654+ pt->count++;
23655+ } while (addr += PAGE_SIZE, addr < next);
23656+ psb_mmu_pt_unmap_unlock(pt);
23657+
23658+ } while (addr = next, next != end);
23659+
23660+ address += row_add;
23661+ }
23662+out:
23663+ if (pd->hw_context != -1)
23664+ psb_mmu_flush_ptes(pd, f_address, num_pages,
23665+ desired_tile_stride, hw_tile_stride);
23666+
23667+ up_read(&pd->driver->sem);
23668+
23669+ if (pd->hw_context != -1)
23670+ psb_mmu_flush(pd->driver);
23671+
23672+ return ret;
23673+}
23674+
23675+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
23676+{
23677+ mask &= _PSB_MMU_ER_MASK;
23678+ psb_iowrite32(driver,
23679+ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
23680+ PSB_CR_BIF_CTRL);
23681+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
23682+}
23683+
23684+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
23685+ uint32_t mask)
23686+{
23687+ mask &= _PSB_MMU_ER_MASK;
23688+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
23689+ PSB_CR_BIF_CTRL);
23690+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
23691+}
23692+
23693+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
23694+ unsigned long *pfn)
23695+{
23696+ int ret;
23697+ struct psb_mmu_pt *pt;
23698+ uint32_t tmp;
23699+ spinlock_t *lock = &pd->driver->lock;
23700+
23701+ down_read(&pd->driver->sem);
23702+ pt = psb_mmu_pt_map_lock(pd, virtual);
23703+ if (!pt) {
23704+ uint32_t *v;
23705+
23706+ spin_lock(lock);
23707+ v = kmap_atomic(pd->p, KM_USER0);
23708+ tmp = v[psb_mmu_pd_index(virtual)];
23709+ kunmap_atomic(v, KM_USER0);
23710+ spin_unlock(lock);
23711+
23712+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
23713+ !(pd->invalid_pte & PSB_PTE_VALID)) {
23714+ ret = -EINVAL;
23715+ goto out;
23716+ }
23717+ ret = 0;
23718+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
23719+ goto out;
23720+ }
23721+ tmp = pt->v[psb_mmu_pt_index(virtual)];
23722+ if (!(tmp & PSB_PTE_VALID)) {
23723+ ret = -EINVAL;
23724+ } else {
23725+ ret = 0;
23726+ *pfn = tmp >> PAGE_SHIFT;
23727+ }
23728+ psb_mmu_pt_unmap_unlock(pt);
23729+out:
23730+ up_read(&pd->driver->sem);
23731+ return ret;
23732+}
23733+
23734+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
23735+{
23736+ struct page *p;
23737+ unsigned long pfn;
23738+ int ret = 0;
23739+ struct psb_mmu_pd *pd;
23740+ uint32_t *v;
23741+ uint32_t *vmmu;
23742+
23743+ pd = driver->default_pd;
23744+ if (!pd)
23745+ printk(KERN_WARNING "Could not get default pd\n");
23746+
23747+
23748+ p = alloc_page(GFP_DMA32);
23749+
23750+ if (!p) {
23751+ printk(KERN_WARNING "Failed allocating page\n");
23752+ return;
23753+ }
23754+
23755+ v = kmap(p);
23756+ memset(v, 0x67, PAGE_SIZE);
23757+
23758+ pfn = (offset >> PAGE_SHIFT);
23759+
23760+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
23761+ if (ret) {
23762+ printk(KERN_WARNING "Failed inserting mmu page\n");
23763+ goto out_err1;
23764+ }
23765+
23766+ /* Ioremap the page through the GART aperture */
23767+
23768+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
23769+ if (!vmmu) {
23770+ printk(KERN_WARNING "Failed ioremapping page\n");
23771+ goto out_err2;
23772+ }
23773+
23774+ /* Read from the page with mmu disabled. */
23775+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
23776+
23777+ /* Enable the mmu for host accesses and read again. */
23778+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
23779+
23780+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
23781+ ioread32(vmmu));
23782+ *v = 0x15243705;
23783+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
23784+ ioread32(vmmu));
23785+ iowrite32(0x16243355, vmmu);
23786+ (void) ioread32(vmmu);
23787+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
23788+
23789+ printk(KERN_INFO "Int stat is 0x%08x\n",
23790+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
23791+ printk(KERN_INFO "Fault is 0x%08x\n",
23792+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
23793+
23794+ /* Disable MMU for host accesses and clear page fault register */
23795+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
23796+ iounmap(vmmu);
23797+out_err2:
23798+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
23799+out_err1:
23800+ kunmap(p);
23801+ __free_page(p);
23802+}
23803diff --git a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c
23804new file mode 100644
23805index 0000000..6930880
23806--- /dev/null
23807+++ b/drivers/gpu/drm/psb/psb_msvdx.c
23808@@ -0,0 +1,855 @@
23809+/**
23810+ * file psb_msvdx.c
23811+ * MSVDX I/O operations and IRQ handling
23812+ *
23813+ */
23814+
23815+/**************************************************************************
23816+ *
23817+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
23818+ * Copyright (c) Imagination Technologies Limited, UK
23819+ * All Rights Reserved.
23820+ *
23821+ * Permission is hereby granted, free of charge, to any person obtaining a
23822+ * copy of this software and associated documentation files (the
23823+ * "Software"), to deal in the Software without restriction, including
23824+ * without limitation the rights to use, copy, modify, merge, publish,
23825+ * distribute, sub license, and/or sell copies of the Software, and to
23826+ * permit persons to whom the Software is furnished to do so, subject to
23827+ * the following conditions:
23828+ *
23829+ * The above copyright notice and this permission notice (including the
23830+ * next paragraph) shall be included in all copies or substantial portions
23831+ * of the Software.
23832+ *
23833+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23834+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23835+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23836+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23837+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23838+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23839+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
23840+ *
23841+ **************************************************************************/
23842+
23843+#include <drm/drmP.h>
23844+#include <drm/drm_os_linux.h>
23845+#include "psb_drv.h"
23846+#include "psb_drm.h"
23847+#include "psb_msvdx.h"
23848+#include "lnc_topaz.h"
23849+#include "psb_powermgmt.h"
23850+#include <linux/io.h>
23851+#include <linux/delay.h>
23852+
23853+#ifndef list_first_entry
23854+#define list_first_entry(ptr, type, member) \
23855+ list_entry((ptr)->next, type, member)
23856+#endif
23857+
23858+
23859+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
23860+ unsigned long cmd_size);
23861+
23862+static int psb_msvdx_dequeue_send(struct drm_device *dev)
23863+{
23864+ struct drm_psb_private *dev_priv = dev->dev_private;
23865+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
23866+ int ret = 0;
23867+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
23868+
23869+ if (list_empty(&msvdx_priv->msvdx_queue)) {
23870+ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
23871+ msvdx_priv->msvdx_busy = 0;
23872+ return -EINVAL;
23873+ }
23874+ msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue,
23875+ struct psb_msvdx_cmd_queue, head);
23876+ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
23877+ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
23878+ if (ret) {
23879+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
23880+ ret = -EINVAL;
23881+ }
23882+ list_del(&msvdx_cmd->head);
23883+ kfree(msvdx_cmd->cmd);
23884+ kfree(msvdx_cmd);
23885+
23886+ return ret;
23887+}
23888+
23889+static int psb_msvdx_map_command(struct drm_device *dev,
23890+ struct ttm_buffer_object *cmd_buffer,
23891+ unsigned long cmd_offset, unsigned long cmd_size,
23892+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
23893+{
23894+ struct drm_psb_private *dev_priv = dev->dev_private;
23895+ int ret = 0;
23896+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
23897+ unsigned long cmd_size_remaining;
23898+ struct ttm_bo_kmap_obj cmd_kmap;
23899+ void *cmd, *tmp, *cmd_start;
23900+ bool is_iomem;
23901+
23902+ /* command buffers may not exceed page boundary */
23903+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
23904+ return -EINVAL;
23905+
23906+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap);
23907+ if (ret) {
23908+ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
23909+ return ret;
23910+ }
23911+
23912+ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
23913+ + cmd_page_offset;
23914+ cmd = cmd_start;
23915+ cmd_size_remaining = cmd_size;
23916+
23917+ while (cmd_size_remaining > 0) {
23918+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
23919+ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
23920+ uint32_t mmu_ptd = 0, tmp = 0;
23921+
23922+ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
23923+ " cur_cmd_id = %02x fence = %08x\n",
23924+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
23925+ if ((cur_cmd_size % sizeof(uint32_t))
23926+ || (cur_cmd_size > cmd_size_remaining)) {
23927+ ret = -EINVAL;
23928+ DRM_ERROR("MSVDX: ret:%d\n", ret);
23929+ goto out;
23930+ }
23931+
23932+ switch (cur_cmd_id) {
23933+ case VA_MSGID_RENDER:
23934+ /* Fence ID */
23935+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
23936+ sequence);
23937+ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
23938+ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
23939+ 1, 0);
23940+ if (tmp == 1) {
23941+ mmu_ptd |= 1;
23942+ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
23943+ }
23944+
23945+ /* PTD */
23946+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
23947+ break;
23948+
23949+ default:
23950+ /* Msg not supported */
23951+ ret = -EINVAL;
23952+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
23953+ goto out;
23954+ }
23955+
23956+ cmd += cur_cmd_size;
23957+ cmd_size_remaining -= cur_cmd_size;
23958+ }
23959+
23960+ if (copy_cmd) {
23961+ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
23962+
23963+ tmp = kzalloc(cmd_size, GFP_KERNEL);
23964+ if (tmp == NULL) {
23965+ ret = -ENOMEM;
23966+ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
23967+ goto out;
23968+ }
23969+ memcpy(tmp, cmd_start, cmd_size);
23970+ *msvdx_cmd = tmp;
23971+ } else {
23972+ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
23973+ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
23974+ if (ret) {
23975+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
23976+ ret = -EINVAL;
23977+ }
23978+ }
23979+
23980+out:
23981+ ttm_bo_kunmap(&cmd_kmap);
23982+
23983+ return ret;
23984+}
23985+
23986+int psb_submit_video_cmdbuf(struct drm_device *dev,
23987+ struct ttm_buffer_object *cmd_buffer,
23988+ unsigned long cmd_offset, unsigned long cmd_size,
23989+ struct ttm_fence_object *fence)
23990+{
23991+ struct drm_psb_private *dev_priv = dev->dev_private;
23992+ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
23993+ unsigned long irq_flags;
23994+ int ret = 0;
23995+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
23996+
23997+ psb_schedule_watchdog(dev_priv);
23998+
23999+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24000+ if (msvdx_priv->msvdx_needs_reset) {
24001+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24002+ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
24003+ if (psb_msvdx_reset(dev_priv)) {
24004+ ret = -EBUSY;
24005+ DRM_ERROR("MSVDX: Reset failed\n");
24006+ return ret;
24007+ }
24008+ msvdx_priv->msvdx_needs_reset = 0;
24009+ msvdx_priv->msvdx_busy = 0;
24010+
24011+ psb_msvdx_init(dev);
24012+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24013+ }
24014+
24015+ if (!msvdx_priv->msvdx_fw_loaded) {
24016+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24017+ PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n");
24018+
24019+ ret = psb_setup_fw(dev);
24020+ if (ret) {
24021+ DRM_ERROR("MSVDX:fail to load FW\n");
24022+ /* FIXME: find a proper return value */
24023+ return -EFAULT;
24024+ }
24025+ msvdx_priv->msvdx_fw_loaded = 1;
24026+
24027+ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
24028+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24029+ }
24030+
24031+ if (!msvdx_priv->msvdx_busy) {
24032+ msvdx_priv->msvdx_busy = 1;
24033+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24034+ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
24035+ sequence);
24036+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
24037+ cmd_size, NULL, sequence, 0);
24038+ if (ret) {
24039+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
24040+ return ret;
24041+ }
24042+ } else {
24043+ struct psb_msvdx_cmd_queue *msvdx_cmd;
24044+ void *cmd = NULL;
24045+
24046+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24047+ /* queue the command to be sent when the h/w is ready */
24048+ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
24049+ sequence);
24050+ msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue),
24051+ GFP_KERNEL);
24052+ if (msvdx_cmd == NULL) {
24053+ DRM_ERROR("MSVDXQUE: Out of memory...\n");
24054+ return -ENOMEM;
24055+ }
24056+
24057+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
24058+ cmd_size, &cmd, sequence, 1);
24059+ if (ret) {
24060+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
24061+ kfree(msvdx_cmd
24062+ );
24063+ return ret;
24064+ }
24065+ msvdx_cmd->cmd = cmd;
24066+ msvdx_cmd->cmd_size = cmd_size;
24067+ msvdx_cmd->sequence = sequence;
24068+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
24069+ list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue);
24070+ if (!msvdx_priv->msvdx_busy) {
24071+ msvdx_priv->msvdx_busy = 1;
24072+ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
24073+ psb_msvdx_dequeue_send(dev);
24074+ }
24075+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
24076+ }
24077+
24078+ return ret;
24079+}
24080+
24081+int psb_cmdbuf_video(struct drm_file *priv,
24082+ struct list_head *validate_list,
24083+ uint32_t fence_type,
24084+ struct drm_psb_cmdbuf_arg *arg,
24085+ struct ttm_buffer_object *cmd_buffer,
24086+ struct psb_ttm_fence_rep *fence_arg)
24087+{
24088+ struct drm_device *dev = priv->minor->dev;
24089+ struct ttm_fence_object *fence;
24090+ int ret;
24091+
24092+ /*
24093+ * Check this. Doesn't seem right. Have fencing done AFTER command
24094+ * submission and make sure drm_psb_idle idles the MSVDX completely.
24095+ */
24096+ ret =
24097+ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
24098+ arg->cmdbuf_size, NULL);
24099+ if (ret)
24100+ return ret;
24101+
24102+
24103+ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
24104+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
24105+ arg->fence_flags, validate_list, fence_arg,
24106+ &fence);
24107+
24108+ ttm_fence_object_unref(&fence);
24109+ mutex_lock(&cmd_buffer->mutex);
24110+ if (cmd_buffer->sync_obj != NULL)
24111+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
24112+ mutex_unlock(&cmd_buffer->mutex);
24113+
24114+ return 0;
24115+}
24116+
24117+
24118+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
24119+ unsigned long cmd_size)
24120+{
24121+ int ret = 0;
24122+ struct drm_psb_private *dev_priv = dev->dev_private;
24123+
24124+ while (cmd_size > 0) {
24125+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
24126+ if (cur_cmd_size > cmd_size) {
24127+ ret = -EINVAL;
24128+ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
24129+ cmd_size, (unsigned long)cur_cmd_size);
24130+ goto out;
24131+ }
24132+
24133+ /* Send the message to h/w */
24134+ ret = psb_mtx_send(dev_priv, cmd);
24135+ if (ret) {
24136+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
24137+ goto out;
24138+ }
24139+ cmd += cur_cmd_size;
24140+ cmd_size -= cur_cmd_size;
24141+ }
24142+
24143+out:
24144+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
24145+ return ret;
24146+}
24147+
24148+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
24149+{
24150+ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
24151+ const uint32_t *p_msg = (uint32_t *) msg;
24152+ uint32_t msg_num, words_free, ridx, widx;
24153+ int ret = 0;
24154+
24155+ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
24156+
24157+ /* we need clocks enabled before we touch VEC local ram */
24158+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24159+
24160+ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
24161+ if (msg_num > NUM_WORDS_MTX_BUF) {
24162+ ret = -EINVAL;
24163+ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
24164+ goto out;
24165+ }
24166+
24167+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
24168+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
24169+
24170+ /* message would wrap, need to send a pad message */
24171+ if (widx + msg_num > NUM_WORDS_MTX_BUF) {
24172+ /* Shouldn't happen for a PAD message itself */
24173+ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
24174+ == FWRK_MSGID_PADDING);
24175+
24176+ /* if the read pointer is at zero then we must wait for it to
24177+ * change otherwise the write pointer will equal the read
24178+ * pointer,which should only happen when the buffer is empty
24179+ *
24180+ * This will only happens if we try to overfill the queue,
24181+ * queue management should make
24182+ * sure this never happens in the first place.
24183+ */
24184+ BUG_ON(0 == ridx);
24185+ if (0 == ridx) {
24186+ ret = -EINVAL;
24187+ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
24188+ goto out;
24189+ }
24190+
24191+ /* Send a pad message */
24192+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
24193+ (NUM_WORDS_MTX_BUF - widx) << 2);
24194+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
24195+ FWRK_MSGID_PADDING);
24196+ psb_mtx_send(dev_priv, pad_msg);
24197+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
24198+ }
24199+
24200+ if (widx >= ridx)
24201+ words_free = NUM_WORDS_MTX_BUF - (widx - ridx);
24202+ else
24203+ words_free = ridx - widx;
24204+
24205+ BUG_ON(msg_num > words_free);
24206+ if (msg_num > words_free) {
24207+ ret = -EINVAL;
24208+ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
24209+ goto out;
24210+ }
24211+ while (msg_num > 0) {
24212+ PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2));
24213+ msg_num--;
24214+ widx++;
24215+ if (NUM_WORDS_MTX_BUF == widx)
24216+ widx = 0;
24217+ }
24218+ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
24219+
24220+ /* Make sure clocks are enabled before we kick */
24221+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24222+
24223+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24224+
24225+ /* signal an interrupt to let the mtx know there is a new message */
24226+ PSB_WMSVDX32(1, MSVDX_MTX_KICKI);
24227+
24228+out:
24229+ return ret;
24230+}
24231+
24232+/*
24233+ * MSVDX MTX interrupt
24234+ */
24235+static void psb_msvdx_mtx_interrupt(struct drm_device *dev)
24236+{
24237+ struct drm_psb_private *dev_priv =
24238+ (struct drm_psb_private *)dev->dev_private;
24239+ static uint32_t buf[128]; /* message buffer */
24240+ uint32_t ridx, widx;
24241+ uint32_t num, ofs; /* message num and offset */
24242+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24243+
24244+ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
24245+
24246+ /* Are clocks enabled - If not enable before
24247+ * attempting to read from VLR
24248+ */
24249+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
24250+ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
24251+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24252+ }
24253+
24254+loop: /* just for coding style check */
24255+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
24256+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
24257+
24258+ /* Get out of here if nothing */
24259+ if (ridx == widx)
24260+ goto done;
24261+
24262+ ofs = 0;
24263+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
24264+
24265+ /* round to nearest word */
24266+ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
24267+
24268+ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
24269+
24270+ if (++ridx >= NUM_WORDS_HOST_BUF)
24271+ ridx = 0;
24272+
24273+ for (ofs++; ofs < num; ofs++) {
24274+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
24275+
24276+ if (++ridx >= NUM_WORDS_HOST_BUF)
24277+ ridx = 0;
24278+ }
24279+
24280+ /* Update the Read index */
24281+ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
24282+
24283+ if (msvdx_priv->msvdx_needs_reset)
24284+ goto loop;
24285+
24286+ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
24287+ case VA_MSGID_CMD_HW_PANIC:
24288+ case VA_MSGID_CMD_FAILED: {
24289+ uint32_t fence = MEMIO_READ_FIELD(buf,
24290+ FW_VA_CMD_FAILED_FENCE_VALUE);
24291+ uint32_t fault = MEMIO_READ_FIELD(buf,
24292+ FW_VA_CMD_FAILED_IRQSTATUS);
24293+ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
24294+ uint32_t diff = 0;
24295+
24296+ (void) fault;
24297+ if (msg_id == VA_MSGID_CMD_HW_PANIC)
24298+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
24299+ "Fault detected"
24300+ " - Fence: %08x, Status: %08x"
24301+ " - resetting and ignoring error\n",
24302+ fence, fault);
24303+ else
24304+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
24305+ "Fault detected"
24306+ " - Fence: %08x, Status: %08x"
24307+ " - resetting and ignoring error\n",
24308+ fence, fault);
24309+
24310+ msvdx_priv->msvdx_needs_reset = 1;
24311+
24312+ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
24313+ diff = msvdx_priv->msvdx_current_sequence
24314+ - dev_priv->sequence[PSB_ENGINE_VIDEO];
24315+
24316+ if (diff > 0x0FFFFFFF)
24317+ msvdx_priv->msvdx_current_sequence++;
24318+
24319+ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
24320+ "assuming %08x\n",
24321+ msvdx_priv->msvdx_current_sequence);
24322+ } else {
24323+ msvdx_priv->msvdx_current_sequence = fence;
24324+ }
24325+
24326+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
24327+ msvdx_priv->msvdx_current_sequence,
24328+ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
24329+
24330+ /* Flush the command queue */
24331+ psb_msvdx_flush_cmd_queue(dev);
24332+
24333+ goto done;
24334+ }
24335+ case VA_MSGID_CMD_COMPLETED: {
24336+ uint32_t fence = MEMIO_READ_FIELD(buf,
24337+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
24338+ uint32_t flags = MEMIO_READ_FIELD(buf,
24339+ FW_VA_CMD_COMPLETED_FLAGS);
24340+
24341+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
24342+ "FenceID: %08x, flags: 0x%x\n",
24343+ fence, flags);
24344+
24345+ msvdx_priv->msvdx_current_sequence = fence;
24346+
24347+ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
24348+
24349+ if (flags & FW_VA_RENDER_HOST_INT) {
24350+ /*Now send the next command from the msvdx cmd queue */
24351+ psb_msvdx_dequeue_send(dev);
24352+ goto done;
24353+ }
24354+
24355+ break;
24356+ }
24357+ case VA_MSGID_CMD_COMPLETED_BATCH: {
24358+ uint32_t fence = MEMIO_READ_FIELD(buf,
24359+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
24360+ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
24361+ FW_VA_CMD_COMPLETED_NO_TICKS);
24362+ (void)tickcnt;
24363+ /* we have the fence value in the message */
24364+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
24365+ " FenceID: %08x, TickCount: %08x\n",
24366+ fence, tickcnt);
24367+ msvdx_priv->msvdx_current_sequence = fence;
24368+
24369+ break;
24370+ }
24371+ case VA_MSGID_ACK:
24372+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
24373+ break;
24374+
24375+ case VA_MSGID_TEST1:
24376+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
24377+ break;
24378+
24379+ case VA_MSGID_TEST2:
24380+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
24381+ break;
24382+ /* Don't need to do anything with these messages */
24383+
24384+ case VA_MSGID_DEBLOCK_REQUIRED: {
24385+ uint32_t ctxid = MEMIO_READ_FIELD(buf,
24386+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
24387+ (void) ctxid;
24388+ /* The BE we now be locked. */
24389+ /* Unblock rendec by reading the mtx2mtx end of slice */
24390+ (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA);
24391+
24392+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
24393+ " Context=%08x\n", ctxid);
24394+ goto done;
24395+ }
24396+ default:
24397+ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
24398+ goto done;
24399+ }
24400+
24401+done:
24402+ /* we get a frame/slice done, try to save some power*/
24403+ if (drm_msvdx_pmpolicy == PSB_PMPOLICY_POWERDOWN)
24404+ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
24405+
24406+ DRM_MEMORYBARRIER(); /* TBD check this... */
24407+}
24408+
24409+
24410+/*
24411+ * MSVDX interrupt.
24412+ */
24413+void psb_msvdx_interrupt(struct drm_device *dev,
24414+ uint32_t msvdx_stat)
24415+{
24416+ struct drm_psb_private *dev_priv =
24417+ (struct drm_psb_private *) dev->dev_private;
24418+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24419+
24420+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
24421+ /*Ideally we should we should never get to this */
24422+ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n",
24423+ msvdx_stat, dev_priv->fence2_irq_on);
24424+
24425+ /* Pause MMU */
24426+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
24427+ MSVDX_MMU_CONTROL0);
24428+ DRM_WRITEMEMORYBARRIER();
24429+
24430+ /* Clear this interupt bit only */
24431+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
24432+ MSVDX_INTERRUPT_CLEAR);
24433+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
24434+ DRM_READMEMORYBARRIER();
24435+
24436+ msvdx_priv->msvdx_needs_reset = 1;
24437+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
24438+ PSB_DEBUG_IRQ
24439+ ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n",
24440+ msvdx_stat, dev_priv->fence2_irq_on);
24441+
24442+ /* Clear all interupt bits */
24443+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
24444+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
24445+ DRM_READMEMORYBARRIER();
24446+
24447+ psb_msvdx_mtx_interrupt(dev);
24448+ }
24449+}
24450+
24451+
24452+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
24453+ int *msvdx_lockup, int *msvdx_idle)
24454+{
24455+ int tmp;
24456+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24457+
24458+ *msvdx_lockup = 0;
24459+ *msvdx_idle = 1;
24460+
24461+#if 0
24462+ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
24463+ "last_sequence:%d and last_submitted_sequence :%d\n",
24464+ msvdx_priv->msvdx_current_sequence,
24465+ msvdx_priv->msvdx_last_sequence,
24466+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
24467+#endif
24468+
24469+ tmp = msvdx_priv->msvdx_current_sequence -
24470+ dev_priv->sequence[PSB_ENGINE_VIDEO];
24471+
24472+ if (tmp > 0x0FFFFFFF) {
24473+ if (msvdx_priv->msvdx_current_sequence ==
24474+ msvdx_priv->msvdx_last_sequence) {
24475+ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
24476+ msvdx_priv->msvdx_current_sequence);
24477+ *msvdx_lockup = 1;
24478+ } else {
24479+ PSB_DEBUG_GENERAL("MSVDXTimer: "
24480+ "msvdx responded fine so far\n");
24481+ msvdx_priv->msvdx_last_sequence =
24482+ msvdx_priv->msvdx_current_sequence;
24483+ *msvdx_idle = 0;
24484+ }
24485+ }
24486+}
24487+
24488+int psb_check_msvdx_idle(struct drm_device *dev)
24489+{
24490+ struct drm_psb_private *dev_priv =
24491+ (struct drm_psb_private *)dev->dev_private;
24492+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24493+ uint32_t fs_status, ccb_roff, ccb_woff;
24494+
24495+ if (msvdx_priv->msvdx_busy) {
24496+ PSB_DEBUG_PM("MSVDX: psb_check_msvdx_idle returns busy\n");
24497+ return -EBUSY;
24498+ }
24499+
24500+ /* check that clocks are enabled before reading VLR */
24501+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
24502+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
24503+
24504+ fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS);
24505+ ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
24506+ ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
24507+
24508+ /* If the firmware says the hardware is idle
24509+ * and the CCB is empty then we can say it is IDLE
24510+ */
24511+ if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) && (ccb_roff == ccb_woff)) {
24512+ PSB_DEBUG_PM("MSVDXIDLE: FW indicate IDLE\n");
24513+ return 0;
24514+ }
24515+
24516+ return -EBUSY; /* not checkout fence, CCB, etc here*/
24517+}
24518+
24519+int psb_wait_msvdx_idle(struct drm_device *dev)
24520+{
24521+ struct drm_psb_private *dev_priv =
24522+ (struct drm_psb_private *)dev->dev_private;
24523+ struct ttm_fence_device *fdev = &dev_priv->fdev;
24524+ struct ttm_fence_class_manager *fc =
24525+ &fdev->fence_class[PSB_ENGINE_VIDEO];
24526+ struct ttm_fence_object *fence, *next;
24527+ int signaled = 0;
24528+ unsigned long _end = jiffies + 5 * DRM_HZ;
24529+ int ret = 0;
24530+
24531+ /* Ensure that all pending IRQs are serviced, */
24532+
24533+ /*
24534+ * Save the last MSVDX fence in dev_priv instead!!!
24535+ * Need to be fc->write_locked while accessing a fence from the ring.
24536+ */
24537+ list_for_each_entry_safe(fence, next, &fc->ring, ring) {
24538+ do {
24539+ signaled = ttm_fence_object_signaled(fence,
24540+ _PSB_FENCE_TYPE_EXE);
24541+ if (signaled) {
24542+ PSB_DEBUG_PM("MSVDXIDLE:wait_fence success\n");
24543+ break;
24544+ }
24545+ if (time_after_eq(jiffies, _end)) {
24546+ PSB_DEBUG_PM("MSVDXIDLE: fence 0x%x didn't get"
24547+ "signaled for 3 secs\n",
24548+ (unsigned int) fence);
24549+ break;
24550+ }
24551+ DRM_UDELAY(1000);
24552+ } while (1);
24553+ }
24554+ do {
24555+ ret = psb_check_msvdx_idle(dev);
24556+ if (ret == 0) {
24557+ PSB_DEBUG_PM("MSVDXIDLE: check_idle succeeded!\n");
24558+ break;
24559+ }
24560+
24561+ if (time_after_eq(jiffies, _end)) {
24562+ PSB_DEBUG_PM("MSVDXIDLE: wait HW idle time out\n");
24563+ break;
24564+ }
24565+ DRM_UDELAY(1000);
24566+ } while (1);
24567+
24568+ return ret;
24569+}
24570+
24571+#if 0
24572+static int psb_power_gated_msvdx(struct drm_device *dev)
24573+{
24574+ struct drm_psb_private *dev_priv =
24575+ (struct drm_psb_private *)dev->dev_private;
24576+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24577+
24578+ PSB_DEBUG_PM("MSVDX: Setting clock to minimal\n");
24579+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
24580+
24581+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_CLOCKGATED);
24582+
24583+ return 0;
24584+}
24585+
24586+static int psb_power_ungated_msvdx(struct drm_device *dev)
24587+{
24588+ struct drm_psb_private *dev_priv =
24589+ (struct drm_psb_private *)dev->dev_private;
24590+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
24591+
24592+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP);
24593+
24594+ return 0;
24595+}
24596+#endif
24597+
24598+int lnc_video_getparam(struct drm_device *dev, void *data,
24599+ struct drm_file *file_priv)
24600+{
24601+ struct drm_lnc_video_getparam_arg *arg = data;
24602+ int ret = 0;
24603+ struct drm_psb_private *dev_priv =
24604+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
24605+#if defined(CONFIG_MRST_RAR_HANDLER)
24606+ struct RAR_buffer rar_buf;
24607+ size_t rar_status;
24608+#endif
24609+ void *rar_handler;
24610+ uint32_t offset = 0;
24611+
24612+ switch (arg->key) {
24613+ case LNC_VIDEO_GETPARAM_RAR_REGION_SIZE:
24614+ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
24615+ &dev_priv->rar_region_size,
24616+ sizeof(dev_priv->rar_region_size));
24617+ break;
24618+ case LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET:
24619+ ret = copy_from_user(&rar_handler,
24620+ (void __user *)((unsigned long)arg->arg),
24621+ sizeof(rar_handler));
24622+ if (ret)
24623+ break;
24624+
24625+#if defined(CONFIG_MRST_RAR_HANDLER)
24626+ rar_buf.info.handle = rar_handler;
24627+ rar_buf.bus_address = dev_priv->rar_region_start;
24628+ rar_status = 1;
24629+
24630+ rar_status = rar_handle_to_bus(&rar_buf, 1);
24631+ if (rar_status != 1) {
24632+ DRM_ERROR("MSVDX:rar_handle_to_bus failed\n");
24633+ ret = -1;
24634+ break;
24635+ }
24636+
24637+ offset = rar_buf.bus_address - dev_priv->rar_region_start;
24638+ PSB_DEBUG_GENERAL("MSVDX:RAR handler %p, bus address=0x%08x,"
24639+ "RAR region=0x%08x\n", rar_handler,
24640+ rar_buf.bus_address,dev_priv->rar_region_start);
24641+#endif
24642+ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
24643+ &offset,
24644+ sizeof(offset));
24645+ break;
24646+ case LNC_VIDEO_FRAME_SKIP:
24647+ ret = lnc_video_frameskip(dev, arg->value);
24648+ break;
24649+ default:
24650+ ret = -EFAULT;
24651+ break;
24652+ }
24653+
24654+ if (ret)
24655+ return -EFAULT;
24656+
24657+ return 0;
24658+}
24659+
24660+inline int psb_try_power_down_msvdx(struct drm_device *dev)
24661+{
24662+ return powermgmt_suspend_islands(dev->pdev, PSB_VIDEO_DEC_ISLAND, false);
24663+}
24664diff --git a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h
24665new file mode 100644
24666index 0000000..8d8d8b5
24667--- /dev/null
24668+++ b/drivers/gpu/drm/psb/psb_msvdx.h
24669@@ -0,0 +1,527 @@
24670+/**************************************************************************
24671+ *
24672+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
24673+ * Copyright (c) Imagination Technologies Limited, UK
24674+ * All Rights Reserved.
24675+ *
24676+ * Permission is hereby granted, free of charge, to any person obtaining a
24677+ * copy of this software and associated documentation files (the
24678+ * "Software"), to deal in the Software without restriction, including
24679+ * without limitation the rights to use, copy, modify, merge, publish,
24680+ * distribute, sub license, and/or sell copies of the Software, and to
24681+ * permit persons to whom the Software is furnished to do so, subject to
24682+ * the following conditions:
24683+ *
24684+ * The above copyright notice and this permission notice (including the
24685+ * next paragraph) shall be included in all copies or substantial portions
24686+ * of the Software.
24687+ *
24688+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24689+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24690+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24691+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24692+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24693+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24694+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
24695+ *
24696+ **************************************************************************/
24697+
24698+#ifndef _PSB_MSVDX_H_
24699+#define _PSB_MSVDX_H_
24700+
24701+#include "psb_drv.h"
24702+
24703+#if defined(CONFIG_MRST_RAR_HANDLER)
24704+#include "rar/memrar.h"
24705+#endif
24706+
24707+extern int drm_msvdx_pmpolicy;
24708+
24709+void psb_msvdx_interrupt(struct drm_device *dev,
24710+ uint32_t msvdx_stat);
24711+
24712+int psb_msvdx_init(struct drm_device *dev);
24713+int psb_msvdx_uninit(struct drm_device *dev);
24714+int psb_msvdx_reset(struct drm_psb_private *dev_priv);
24715+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
24716+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
24717+void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
24718+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
24719+ int *msvdx_lockup, int *msvdx_idle);
24720+int psb_setup_fw(struct drm_device *dev);
24721+int psb_check_msvdx_idle(struct drm_device *dev);
24722+int psb_wait_msvdx_idle(struct drm_device *dev);
24723+int psb_cmdbuf_video(struct drm_file *priv,
24724+ struct list_head *validate_list,
24725+ uint32_t fence_type,
24726+ struct drm_psb_cmdbuf_arg *arg,
24727+ struct ttm_buffer_object *cmd_buffer,
24728+ struct psb_ttm_fence_rep *fence_arg);
24729+
24730+/* Non-Optimal Invalidation is not default */
24731+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
24732+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
24733+
24734+#define FW_VA_RENDER_HOST_INT 0x00004000
24735+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
24736+
24737+/* There is no work currently underway on the hardware */
24738+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
24739+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
24740+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
24741+ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
24742+ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
24743+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
24744+
24745+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
24746+ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
24747+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
24748+
24749+#define POULSBO_D0 0x5
24750+#define POULSBO_D1 0x6
24751+#define PSB_REVID_OFFSET 0x8
24752+
24753+#define MTX_CODE_BASE (0x80900000)
24754+#define MTX_DATA_BASE (0x82880000)
24755+#define PC_START_ADDRESS (0x80900000)
24756+
24757+#define MTX_CORE_CODE_MEM (0x10)
24758+#define MTX_CORE_DATA_MEM (0x18)
24759+
24760+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
24761+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
24762+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
24763+ (0x00010000)
24764+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
24765+ (0x00100000)
24766+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
24767+ (0x01000000)
24768+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
24769+ (0x10000000)
24770+
24771+#define clk_enable_all \
24772+(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24773+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
24774+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
24775+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
24776+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
24777+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
24778+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
24779+
24780+#define clk_enable_minimal \
24781+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24782+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
24783+
24784+#define clk_enable_auto \
24785+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
24786+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
24787+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
24788+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
24789+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
24790+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24791+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
24792+
24793+#define msvdx_sw_reset_all \
24794+(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
24795+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
24796+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
24797+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
24798+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
24799+
24800+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
24801+ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
24802+#define MTX_PC MTX_INTERNAL_REG(0, 5)
24803+
24804+#define RENDEC_A_SIZE (1024 * 1024)
24805+#define RENDEC_B_SIZE (1024 * 1024)
24806+
24807+#define MEMIO_READ_FIELD(vpMem, field) \
24808+ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
24809+ & field##_MASK) >> field##_SHIFT))
24810+
24811+#define MEMIO_WRITE_FIELD(vpMem, field, value) \
24812+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
24813+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
24814+ & (field##_TYPE)~field##_MASK) | \
24815+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
24816+
24817+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
24818+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
24819+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
24820+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
24821+
24822+#define REGIO_READ_FIELD(reg_val, reg, field) \
24823+ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
24824+
24825+#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
24826+ (reg_val) = \
24827+ ((reg_val) & ~(reg##_##field##_MASK)) | \
24828+ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
24829+
24830+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
24831+ (reg_val) = \
24832+ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
24833+
24834+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
24835+ (0x00000001)
24836+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
24837+ (0x00000002)
24838+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
24839+ (0x00000004)
24840+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
24841+ (0x00000008)
24842+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
24843+ (0x00000010)
24844+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
24845+ (0x00000020)
24846+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
24847+ (0x00000040)
24848+
24849+#define clk_enable_all \
24850+ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24851+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
24852+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
24853+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
24854+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
24855+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
24856+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
24857+
24858+#define clk_enable_minimal \
24859+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
24860+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
24861+
24862+/* MTX registers */
24863+#define MSVDX_MTX_ENABLE (0x0000)
24864+#define MSVDX_MTX_KICKI (0x0088)
24865+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
24866+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
24867+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
24868+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
24869+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
24870+#define MSVDX_MTX_SOFT_RESET (0x0200)
24871+
24872+/* MSVDX registers */
24873+#define MSVDX_CONTROL (0x0600)
24874+#define MSVDX_INTERRUPT_CLEAR (0x060C)
24875+#define MSVDX_INTERRUPT_STATUS (0x0608)
24876+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
24877+#define MSVDX_MMU_CONTROL0 (0x0680)
24878+#define MSVDX_MTX_RAM_BANK (0x06F0)
24879+#define MSVDX_MAN_CLK_ENABLE (0x0620)
24880+
24881+/* RENDEC registers */
24882+#define MSVDX_RENDEC_CONTROL0 (0x0868)
24883+#define MSVDX_RENDEC_CONTROL1 (0x086C)
24884+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
24885+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
24886+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
24887+#define MSVDX_RENDEC_READ_DATA (0x0898)
24888+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
24889+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
24890+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
24891+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
24892+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
24893+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
24894+
24895+/*
24896+ * This defines the MSVDX communication buffer
24897+ */
24898+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
24899+/*!< Host buffer size (in 32-bit words) */
24900+#define NUM_WORDS_HOST_BUF (100)
24901+/*!< MTX buffer size (in 32-bit words) */
24902+#define NUM_WORDS_MTX_BUF (100)
24903+
24904+/* There is no work currently underway on the hardware */
24905+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
24906+
24907+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
24908+
24909+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
24910+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
24911+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
24912+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
24913+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
24914+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
24915+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
24916+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
24917+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
24918+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
24919+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
24920+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
24921+#define MSVDX_COMMS_TO_MTX_BUF \
24922+ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
24923+
24924+#define MSVDX_COMMS_AREA_END \
24925+ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
24926+
24927+#if (MSVDX_COMMS_AREA_END != 0x03000)
24928+#error
24929+#endif
24930+
24931+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
24932+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
24933+
24934+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
24935+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
24936+
24937+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
24938+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
24939+
24940+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
24941+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
24942+
24943+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
24944+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
24945+
24946+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
24947+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
24948+
24949+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
24950+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
24951+
24952+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
24953+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
24954+
24955+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
24956+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
24957+
24958+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
24959+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
24960+
24961+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
24962+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
24963+
24964+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
24965+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
24966+
24967+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
24968+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
24969+
24970+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
24971+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
24972+
24973+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
24974+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
24975+
24976+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
24977+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
24978+
24979+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
24980+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
24981+
24982+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
24983+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
24984+
24985+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
24986+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
24987+
24988+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
24989+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
24990+
24991+/* Start of parser specific Host->MTX messages. */
24992+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
24993+
24994+/* Start of parser specific MTX->Host messages. */
24995+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
24996+
24997+#define FWRK_MSGID_PADDING (0)
24998+
24999+#define FWRK_GENMSG_SIZE_TYPE uint8_t
25000+#define FWRK_GENMSG_SIZE_MASK (0xFF)
25001+#define FWRK_GENMSG_SIZE_SHIFT (0)
25002+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
25003+#define FWRK_GENMSG_ID_TYPE uint8_t
25004+#define FWRK_GENMSG_ID_MASK (0xFF)
25005+#define FWRK_GENMSG_ID_SHIFT (0)
25006+#define FWRK_GENMSG_ID_OFFSET (0x0001)
25007+#define FWRK_PADMSG_SIZE (2)
25008+
25009+/* This type defines the framework specified message ids */
25010+enum {
25011+ /* ! Sent by the DXVA driver on the host to the mtx firmware.
25012+ */
25013+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
25014+ VA_MSGID_RENDER,
25015+ VA_MSGID_DEBLOCK,
25016+ VA_MSGID_BUBBLE,
25017+
25018+ /* Test Messages */
25019+ VA_MSGID_TEST1,
25020+ VA_MSGID_TEST2,
25021+
25022+ /*! Sent by the mtx firmware to itself.
25023+ */
25024+ VA_MSGID_RENDER_MC_INTERRUPT,
25025+
25026+ /*! Sent by the DXVA firmware on the MTX to the host.
25027+ */
25028+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
25029+ VA_MSGID_CMD_COMPLETED_BATCH,
25030+ VA_MSGID_DEBLOCK_REQUIRED,
25031+ VA_MSGID_TEST_RESPONCE,
25032+ VA_MSGID_ACK,
25033+
25034+ VA_MSGID_CMD_FAILED,
25035+ VA_MSGID_CMD_UNSUPPORTED,
25036+ VA_MSGID_CMD_HW_PANIC,
25037+};
25038+
25039+/* MSVDX private structure */
25040+struct msvdx_private {
25041+ int msvdx_needs_reset;
25042+
25043+ unsigned int pmstate;
25044+
25045+ struct sysfs_dirent *sysfs_pmstate;
25046+
25047+ uint32_t msvdx_current_sequence;
25048+ uint32_t msvdx_last_sequence;
25049+
25050+ /*
25051+ *MSVDX Rendec Memory
25052+ */
25053+ struct ttm_buffer_object *ccb0;
25054+ uint32_t base_addr0;
25055+ struct ttm_buffer_object *ccb1;
25056+ uint32_t base_addr1;
25057+
25058+ /*
25059+ *msvdx command queue
25060+ */
25061+ spinlock_t msvdx_lock;
25062+ struct mutex msvdx_mutex;
25063+ struct list_head msvdx_queue;
25064+ int msvdx_busy;
25065+ int msvdx_fw_loaded;
25066+ void *msvdx_fw;
25067+ int msvdx_fw_size;
25068+};
25069+
25070+/* MSVDX Firmware interface */
25071+#define FW_VA_INIT_SIZE (8)
25072+#define FW_VA_DEBUG_TEST2_SIZE (4)
25073+
25074+/* FW_VA_DEBUG_TEST2 MSG_SIZE */
25075+#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
25076+#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
25077+#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
25078+#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
25079+
25080+/* FW_VA_DEBUG_TEST2 ID */
25081+#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
25082+#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
25083+#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
25084+#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
25085+
25086+/* FW_VA_CMD_FAILED FENCE_VALUE */
25087+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
25088+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
25089+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
25090+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
25091+
25092+/* FW_VA_CMD_FAILED IRQSTATUS */
25093+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
25094+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
25095+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
25096+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
25097+
25098+/* FW_VA_CMD_COMPLETED FENCE_VALUE */
25099+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
25100+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
25101+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
25102+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
25103+
25104+/* FW_VA_CMD_COMPLETED FLAGS */
25105+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
25106+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
25107+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
25108+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
25109+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
25110+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
25111+
25112+/* FW_VA_CMD_COMPLETED NO_TICKS */
25113+#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
25114+#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
25115+#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
25116+#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
25117+
25118+/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
25119+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
25120+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
25121+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
25122+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
25123+
25124+/* FW_VA_INIT GLOBAL_PTD */
25125+#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
25126+#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
25127+#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
25128+#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
25129+
25130+/* FW_VA_RENDER FENCE_VALUE */
25131+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
25132+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
25133+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
25134+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
25135+
25136+/* FW_VA_RENDER MMUPTD */
25137+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
25138+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
25139+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
25140+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
25141+
25142+/* FW_VA_RENDER BUFFER_ADDRESS */
25143+#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
25144+#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
25145+#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
25146+#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
25147+
25148+/* FW_VA_RENDER BUFFER_SIZE */
25149+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
25150+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
25151+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
25152+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
25153+
25154+
25155+static inline void psb_msvdx_clearirq(struct drm_device *dev)
25156+{
25157+ struct drm_psb_private *dev_priv = dev->dev_private;
25158+ unsigned long mtx_int = 0;
25159+
25160+ PSB_DEBUG_IRQ("MSVDX: clear IRQ\n");
25161+
25162+ /* Clear MTX interrupt */
25163+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
25164+ 1);
25165+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
25166+}
25167+
25168+
25169+static inline void psb_msvdx_disableirq(struct drm_device *dev)
25170+{
25171+ /* nothing */
25172+}
25173+
25174+
25175+static inline void psb_msvdx_enableirq(struct drm_device *dev)
25176+{
25177+ struct drm_psb_private *dev_priv = dev->dev_private;
25178+ unsigned long enables = 0;
25179+
25180+ PSB_DEBUG_IRQ("MSVDX: enable MSVDX MTX IRQ\n");
25181+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
25182+ 1);
25183+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
25184+}
25185+
25186+#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state) \
25187+do { \
25188+ msvdx_priv->pmstate = new_state; \
25189+ sysfs_notify_dirent(msvdx_priv->sysfs_pmstate); \
25190+ PSB_DEBUG_PM("MSVDX: %s\n", \
25191+ (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \
25192+ : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \
25193+ : "clockgated")); \
25194+} while (0)
25195+
25196+#endif
25197diff --git a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c
25198new file mode 100644
25199index 0000000..49c5041
25200--- /dev/null
25201+++ b/drivers/gpu/drm/psb/psb_msvdxinit.c
25202@@ -0,0 +1,747 @@
25203+/**
25204+ * file psb_msvdxinit.c
25205+ * MSVDX initialization and mtx-firmware upload
25206+ *
25207+ */
25208+
25209+/**************************************************************************
25210+ *
25211+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
25212+ * Copyright (c) Imagination Technologies Limited, UK
25213+ * All Rights Reserved.
25214+ *
25215+ * Permission is hereby granted, free of charge, to any person obtaining a
25216+ * copy of this software and associated documentation files (the
25217+ * "Software"), to deal in the Software without restriction, including
25218+ * without limitation the rights to use, copy, modify, merge, publish,
25219+ * distribute, sub license, and/or sell copies of the Software, and to
25220+ * permit persons to whom the Software is furnished to do so, subject to
25221+ * the following conditions:
25222+ *
25223+ * The above copyright notice and this permission notice (including the
25224+ * next paragraph) shall be included in all copies or substantial portions
25225+ * of the Software.
25226+ *
25227+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25228+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25229+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25230+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25231+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25232+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25233+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
25234+ *
25235+ **************************************************************************/
25236+
25237+#include <drm/drmP.h>
25238+#include <drm/drm.h>
25239+#include "psb_drv.h"
25240+#include "psb_msvdx.h"
25241+#include <linux/firmware.h>
25242+
25243+#define MSVDX_REG (dev_priv->msvdx_reg)
25244+uint8_t psb_rev_id;
25245+/*MSVDX FW header*/
25246+struct msvdx_fw {
25247+ uint32_t ver;
25248+ uint32_t text_size;
25249+ uint32_t data_size;
25250+ uint32_t data_location;
25251+};
25252+
25253+int psb_wait_for_register(struct drm_psb_private *dev_priv,
25254+ uint32_t offset, uint32_t value, uint32_t enable)
25255+{
25256+ uint32_t tmp;
25257+ uint32_t poll_cnt = 10000;
25258+ while (poll_cnt) {
25259+ tmp = PSB_RMSVDX32(offset);
25260+ if (value == (tmp & enable)) /* All the bits are reset */
25261+ return 0; /* So exit */
25262+
25263+ /* Wait a bit */
25264+ DRM_UDELAY(1000);
25265+ poll_cnt--;
25266+ }
25267+ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
25268+ " expecting %08x (mask %08x), got %08x\n",
25269+ offset, value, enable, tmp);
25270+
25271+ return 1;
25272+}
25273+
25274+int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
25275+{
25276+ int ret = 0;
25277+ uint32_t mtx_int = 0;
25278+
25279+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
25280+ 1);
25281+
25282+ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
25283+ /* Required value */
25284+ mtx_int,
25285+ /* Enabled bits */
25286+ mtx_int);
25287+
25288+ if (ret) {
25289+ DRM_ERROR("MSVDX: Error Mtx did not return"
25290+ " int within a resonable time\n");
25291+ return ret;
25292+ }
25293+
25294+ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
25295+
25296+ /* Got it so clear the bit */
25297+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
25298+
25299+ return ret;
25300+}
25301+
25302+void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
25303+ const uint32_t core_reg, const uint32_t val)
25304+{
25305+ uint32_t reg = 0;
25306+
25307+ /* Put data in MTX_RW_DATA */
25308+ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
25309+
25310+ /* DREADY is set to 0 and request a write */
25311+ reg = core_reg;
25312+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
25313+ MTX_RNW, 0);
25314+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
25315+ MTX_DREADY, 0);
25316+ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
25317+
25318+ psb_wait_for_register(dev_priv,
25319+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
25320+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
25321+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
25322+}
25323+
25324+void psb_upload_fw(struct drm_psb_private *dev_priv,
25325+ const uint32_t data_mem, uint32_t ram_bank_size,
25326+ uint32_t address, const unsigned int words,
25327+ const uint32_t * const data)
25328+{
25329+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
25330+ uint32_t access_ctrl;
25331+
25332+ /* Save the access control register... */
25333+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
25334+
25335+ /* Wait for MCMSTAT to become be idle 1 */
25336+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25337+ 1, /* Required Value */
25338+ 0xffffffff /* Enables */);
25339+
25340+ for (loop = 0; loop < words; loop++) {
25341+ ram_id = data_mem + (address / ram_bank_size);
25342+ if (ram_id != cur_bank) {
25343+ addr = address >> 2;
25344+ ctrl = 0;
25345+ REGIO_WRITE_FIELD_LITE(ctrl,
25346+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25347+ MTX_MCMID, ram_id);
25348+ REGIO_WRITE_FIELD_LITE(ctrl,
25349+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25350+ MTX_MCM_ADDR, addr);
25351+ REGIO_WRITE_FIELD_LITE(ctrl,
25352+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25353+ MTX_MCMAI, 1);
25354+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25355+ cur_bank = ram_id;
25356+ }
25357+ address += 4;
25358+
25359+ PSB_WMSVDX32(data[loop],
25360+ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
25361+
25362+ /* Wait for MCMSTAT to become be idle 1 */
25363+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25364+ 1, /* Required Value */
25365+ 0xffffffff /* Enables */);
25366+ }
25367+ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
25368+
25369+ /* Restore the access control register... */
25370+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25371+}
25372+
25373+static int psb_verify_fw(struct drm_psb_private *dev_priv,
25374+ const uint32_t ram_bank_size,
25375+ const uint32_t data_mem, uint32_t address,
25376+ const uint32_t words, const uint32_t * const data)
25377+{
25378+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
25379+ uint32_t access_ctrl;
25380+ int ret = 0;
25381+
25382+ /* Save the access control register... */
25383+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
25384+
25385+ /* Wait for MCMSTAT to become be idle 1 */
25386+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25387+ 1, /* Required Value */
25388+ 0xffffffff /* Enables */);
25389+
25390+ for (loop = 0; loop < words; loop++) {
25391+ uint32_t tmp;
25392+ ram_id = data_mem + (address / ram_bank_size);
25393+
25394+ if (ram_id != cur_bank) {
25395+ addr = address >> 2;
25396+ ctrl = 0;
25397+ REGIO_WRITE_FIELD_LITE(ctrl,
25398+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25399+ MTX_MCMID, ram_id);
25400+ REGIO_WRITE_FIELD_LITE(ctrl,
25401+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25402+ MTX_MCM_ADDR, addr);
25403+ REGIO_WRITE_FIELD_LITE(ctrl,
25404+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25405+ MTX_MCMAI, 1);
25406+ REGIO_WRITE_FIELD_LITE(ctrl,
25407+ MSVDX_MTX_RAM_ACCESS_CONTROL,
25408+ MTX_MCMR, 1);
25409+
25410+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25411+
25412+ cur_bank = ram_id;
25413+ }
25414+ address += 4;
25415+
25416+ /* Wait for MCMSTAT to become be idle 1 */
25417+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
25418+ 1, /* Required Value */
25419+ 0xffffffff /* Enables */);
25420+
25421+ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
25422+ if (data[loop] != tmp) {
25423+ DRM_ERROR("psb: Firmware validation fails"
25424+ " at index=%08x\n", loop);
25425+ ret = 1;
25426+ break;
25427+ }
25428+ }
25429+
25430+ /* Restore the access control register... */
25431+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
25432+
25433+ return ret;
25434+}
25435+
25436+static uint32_t *msvdx_get_fw(struct drm_device *dev,
25437+ const struct firmware **raw, uint8_t *name)
25438+{
25439+ struct drm_psb_private *dev_priv = dev->dev_private;
25440+ int rc, fw_size;
25441+ int *ptr = NULL;
25442+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
25443+
25444+ rc = request_firmware(raw, name, &dev->pdev->dev);
25445+ if (rc < 0) {
25446+ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
25447+ name, rc);
25448+ return NULL;
25449+ }
25450+
25451+ if ((*raw)->size < sizeof(struct msvdx_fw)) {
25452+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
25453+ name, (*raw)->size);
25454+ return NULL;
25455+ }
25456+
25457+ ptr = (int *) ((*raw))->data;
25458+
25459+ if (!ptr) {
25460+ DRM_ERROR("MSVDX: Failed to load %s\n", name);
25461+ return NULL;
25462+ }
25463+
25464+ /* another sanity check... */
25465+ fw_size = sizeof(struct msvdx_fw) +
25466+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
25467+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
25468+ if ((*raw)->size != fw_size) {
25469+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
25470+ name, (*raw)->size);
25471+ return NULL;
25472+ }
25473+ msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL);
25474+ if (msvdx_priv->msvdx_fw == NULL)
25475+ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
25476+ else {
25477+ memcpy(msvdx_priv->msvdx_fw, ptr, fw_size);
25478+ msvdx_priv->msvdx_fw_size = fw_size;
25479+ }
25480+
25481+ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
25482+ release_firmware(*raw);
25483+
25484+ return msvdx_priv->msvdx_fw;
25485+}
25486+
25487+int psb_setup_fw(struct drm_device *dev)
25488+{
25489+ struct drm_psb_private *dev_priv = dev->dev_private;
25490+ int ret = 0;
25491+
25492+ uint32_t ram_bank_size;
25493+ struct msvdx_fw *fw;
25494+ uint32_t *fw_ptr = NULL;
25495+ uint32_t *text_ptr = NULL;
25496+ uint32_t *data_ptr = NULL;
25497+ const struct firmware *raw = NULL;
25498+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
25499+
25500+ /* todo : Assert the clock is on - if not turn it on to upload code */
25501+ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
25502+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25503+
25504+ /* Reset MTX */
25505+ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
25506+ MSVDX_MTX_SOFT_RESET);
25507+
25508+ /* Initialses Communication controll area to 0 */
25509+ if (psb_rev_id >= POULSBO_D1) {
25510+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
25511+ " or later revision.\n");
25512+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
25513+ MSVDX_COMMS_OFFSET_FLAGS);
25514+ } else {
25515+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
25516+ " or earlier revision.\n");
25517+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
25518+ MSVDX_COMMS_OFFSET_FLAGS);
25519+ }
25520+
25521+ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
25522+ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
25523+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
25524+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
25525+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
25526+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
25527+ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
25528+
25529+ /* read register bank size */
25530+ {
25531+ uint32_t bank_size, reg;
25532+ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
25533+ bank_size =
25534+ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
25535+ CR_MTX_RAM_BANK_SIZE);
25536+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
25537+ }
25538+
25539+ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
25540+ ram_bank_size);
25541+
25542+ /* if FW already loaded from storage */
25543+ if (msvdx_priv->msvdx_fw)
25544+ fw_ptr = msvdx_priv->msvdx_fw;
25545+ else {
25546+ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd\n");
25547+ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
25548+ }
25549+
25550+ if (!fw_ptr) {
25551+ DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n");
25552+ ret = 1;
25553+ goto out;
25554+ }
25555+
25556+ fw = (struct msvdx_fw *) fw_ptr;
25557+ if (fw->ver != 0x02) {
25558+ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
25559+ "got version=%02x expected version=%02x\n",
25560+ fw->ver, 0x02);
25561+ ret = 1;
25562+ goto out;
25563+ }
25564+
25565+ text_ptr =
25566+ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
25567+ data_ptr = text_ptr + fw->text_size;
25568+
25569+ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
25570+ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
25571+ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
25572+ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
25573+ fw->data_location);
25574+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
25575+ *text_ptr);
25576+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
25577+ *data_ptr);
25578+
25579+ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
25580+ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
25581+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
25582+ text_ptr);
25583+ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
25584+ fw->data_location - MTX_DATA_BASE, fw->data_size,
25585+ data_ptr);
25586+
25587+#if 0
25588+ /* todo : Verify code upload possibly only in debug */
25589+ ret = psb_verify_fw(dev_priv, ram_bank_size,
25590+ MTX_CORE_CODE_MEM,
25591+ PC_START_ADDRESS - MTX_CODE_BASE,
25592+ fw->text_size, text_ptr);
25593+ if (ret) {
25594+ /* Firmware code upload failed */
25595+ ret = 1;
25596+ goto out;
25597+ }
25598+
25599+ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
25600+ fw->data_location - MTX_DATA_BASE,
25601+ fw->data_size, data_ptr);
25602+ if (ret) {
25603+ /* Firmware data upload failed */
25604+ ret = 1;
25605+ goto out;
25606+ }
25607+#else
25608+ (void)psb_verify_fw;
25609+#endif
25610+ /* -- Set starting PC address */
25611+ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
25612+
25613+ /* -- Turn on the thread */
25614+ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
25615+
25616+ /* Wait for the signature value to be written back */
25617+ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
25618+ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
25619+ 0xffffffff /* Enabled bits */);
25620+ if (ret) {
25621+ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
25622+ goto out;
25623+ }
25624+
25625+ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
25626+ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
25627+ MSVDX_COMMS_AREA_ADDR);
25628+#if 0
25629+
25630+ /* Send test message */
25631+ {
25632+ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
25633+
25634+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
25635+ FW_VA_DEBUG_TEST2_SIZE);
25636+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
25637+ VA_MSGID_TEST2);
25638+
25639+ ret = psb_mtx_send(dev_priv, msg_buf);
25640+ if (ret) {
25641+ DRM_ERROR("psb: MSVDX sending fails.\n");
25642+ goto out;
25643+ }
25644+
25645+ /* Wait for Mtx to ack this message */
25646+ psb_poll_mtx_irq(dev_priv);
25647+
25648+ }
25649+#endif
25650+out:
25651+
25652+ return ret;
25653+}
25654+
25655+
25656+static void psb_free_ccb(struct ttm_buffer_object **ccb)
25657+{
25658+ ttm_bo_unref(ccb);
25659+ *ccb = NULL;
25660+}
25661+
25662+/**
25663+ * Reset chip and disable interrupts.
25664+ * Return 0 success, 1 failure
25665+ */
25666+int psb_msvdx_reset(struct drm_psb_private *dev_priv)
25667+{
25668+ int ret = 0;
25669+
25670+ /* Issue software reset */
25671+ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
25672+
25673+ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
25674+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
25675+
25676+ if (!ret) {
25677+ /* Clear interrupt enabled flag */
25678+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
25679+
25680+ /* Clear any pending interrupt flags */
25681+ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
25682+ }
25683+
25684+ /* mutex_destroy(&msvdx_priv->msvdx_mutex); */
25685+
25686+ return ret;
25687+}
25688+
25689+static int psb_allocate_ccb(struct drm_device *dev,
25690+ struct ttm_buffer_object **ccb,
25691+ uint32_t *base_addr, int size)
25692+{
25693+ struct drm_psb_private *dev_priv = psb_priv(dev);
25694+ struct ttm_bo_device *bdev = &dev_priv->bdev;
25695+ int ret;
25696+ struct ttm_bo_kmap_obj tmp_kmap;
25697+ bool is_iomem;
25698+
25699+ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
25700+
25701+ ret = ttm_buffer_object_create(bdev, size,
25702+ ttm_bo_type_kernel,
25703+ DRM_PSB_FLAG_MEM_KERNEL |
25704+ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
25705+ NULL, ccb);
25706+ if (ret) {
25707+ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
25708+ *ccb = NULL;
25709+ return 1;
25710+ }
25711+
25712+ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
25713+ if (ret) {
25714+ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
25715+ ttm_bo_unref(ccb);
25716+ *ccb = NULL;
25717+ return 1;
25718+ }
25719+
25720+ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
25721+ RENDEC_A_SIZE);
25722+ ttm_bo_kunmap(&tmp_kmap);
25723+
25724+ *base_addr = (*ccb)->offset;
25725+ return 0;
25726+}
25727+
25728+static ssize_t psb_msvdx_pmstate_show(struct device *dev,
25729+ struct device_attribute *attr, char *buf)
25730+{
25731+ struct drm_device *drm_dev = dev_get_drvdata(dev);
25732+ struct drm_psb_private *dev_priv;
25733+ struct msvdx_private *msvdx_priv;
25734+ unsigned int pmstate;
25735+ unsigned long flags;
25736+ int ret = -EINVAL;
25737+
25738+ if (drm_dev == NULL)
25739+ return 0;
25740+
25741+ dev_priv = drm_dev->dev_private;
25742+ msvdx_priv = dev_priv->msvdx_private;
25743+ pmstate = msvdx_priv->pmstate;
25744+
25745+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, flags);
25746+ ret = sprintf(buf, "%s\n",
25747+ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup"
25748+ : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown"
25749+ : "clockgated"));
25750+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, flags);
25751+
25752+ return ret;
25753+}
25754+
25755+static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL);
25756+
25757+
25758+int psb_msvdx_init(struct drm_device *dev)
25759+{
25760+ struct drm_psb_private *dev_priv = dev->dev_private;
25761+ /* uint32_t clk_gate_ctrl = clk_enable_all; */
25762+ uint32_t cmd;
25763+ int ret;
25764+ struct msvdx_private *msvdx_priv;
25765+
25766+ if (!dev_priv->msvdx_private) {
25767+ msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL);
25768+ if (msvdx_priv == NULL)
25769+ goto err_exit;
25770+
25771+ dev_priv->msvdx_private = msvdx_priv;
25772+ memset(msvdx_priv, 0, sizeof(struct msvdx_private));
25773+
25774+ /* get device --> drm_device --> drm_psb_private --> msvdx_priv
25775+ * for psb_msvdx_pmstate_show: msvdx_pmpolicy
25776+ * if not pci_set_drvdata, can't get drm_device from device
25777+ */
25778+ /* pci_set_drvdata(dev->pdev, dev); */
25779+ if (device_create_file(&dev->pdev->dev,
25780+ &dev_attr_msvdx_pmstate))
25781+ DRM_ERROR("MSVDX: could not create sysfs file\n");
25782+ msvdx_priv->sysfs_pmstate = sysfs_get_dirent(
25783+ dev->pdev->dev.kobj.sd, "msvdx_pmstate");
25784+ }
25785+
25786+ msvdx_priv = dev_priv->msvdx_private;
25787+ if (!msvdx_priv->ccb0) { /* one for the first time */
25788+ /* Initialize comand msvdx queueing */
25789+ INIT_LIST_HEAD(&msvdx_priv->msvdx_queue);
25790+ mutex_init(&msvdx_priv->msvdx_mutex);
25791+ spin_lock_init(&msvdx_priv->msvdx_lock);
25792+ /*figure out the stepping */
25793+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
25794+ }
25795+
25796+ msvdx_priv->msvdx_busy = 0;
25797+
25798+ /* Enable Clocks */
25799+ PSB_DEBUG_GENERAL("Enabling clocks\n");
25800+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25801+
25802+ /* Enable MMU by removing all bypass bits */
25803+ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
25804+
25805+ /* move firmware loading to the place receiving first command buffer */
25806+
25807+ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
25808+ /* Allocate device virtual memory as required by rendec.... */
25809+ if (!msvdx_priv->ccb0) {
25810+ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb0,
25811+ &msvdx_priv->base_addr0,
25812+ RENDEC_A_SIZE);
25813+ if (ret)
25814+ goto err_exit;
25815+ }
25816+
25817+ if (!msvdx_priv->ccb1) {
25818+ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb1,
25819+ &msvdx_priv->base_addr1,
25820+ RENDEC_B_SIZE);
25821+ if (ret)
25822+ goto err_exit;
25823+ }
25824+
25825+
25826+ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
25827+ msvdx_priv->base_addr0, msvdx_priv->base_addr1);
25828+
25829+ PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
25830+ PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
25831+
25832+ cmd = 0;
25833+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
25834+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
25835+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
25836+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
25837+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
25838+
25839+ cmd = 0;
25840+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25841+ RENDEC_DECODE_START_SIZE, 0);
25842+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25843+ RENDEC_BURST_SIZE_W, 1);
25844+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25845+ RENDEC_BURST_SIZE_R, 1);
25846+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
25847+ RENDEC_EXTERNAL_MEMORY, 1);
25848+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
25849+
25850+ cmd = 0x00101010;
25851+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
25852+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
25853+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
25854+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
25855+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
25856+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
25857+
25858+ cmd = 0;
25859+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
25860+ 1);
25861+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
25862+
25863+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
25864+ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
25865+ " place when receiving user space commands\n");
25866+
25867+ msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */
25868+
25869+ psb_msvdx_clearirq(dev);
25870+ psb_msvdx_enableirq(dev);
25871+
25872+ if (IS_MRST(dev)) {
25873+ PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n",
25874+ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
25875+ PSB_DEBUG_INIT("MSVDX:rest MSDVX to disable clock gating\n");
25876+
25877+ PSB_WVDC32(0x000101ff, PSB_MSVDX_CLOCKGATING);
25878+
25879+ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
25880+ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
25881+ }
25882+
25883+#if 0
25884+ ret = psb_setup_fw(dev);
25885+ if (ret)
25886+ goto err_exit;
25887+ /* Send Initialisation message to firmware */
25888+ if (0) {
25889+ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
25890+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
25891+ FW_VA_INIT_SIZE);
25892+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
25893+
25894+ /* Need to set this for all but A0 */
25895+ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
25896+ psb_get_default_pd_addr(dev_priv->mmu));
25897+
25898+ ret = psb_mtx_send(dev_priv, msg_init);
25899+ if (ret)
25900+ goto err_exit;
25901+
25902+ psb_poll_mtx_irq(dev_priv);
25903+ }
25904+#endif
25905+
25906+ return 0;
25907+
25908+err_exit:
25909+ DRM_ERROR("MSVDX: initialization failed\n");
25910+ if (msvdx_priv->ccb0)
25911+ psb_free_ccb(&msvdx_priv->ccb0);
25912+ if (msvdx_priv->ccb1)
25913+ psb_free_ccb(&msvdx_priv->ccb1);
25914+ kfree(dev_priv->msvdx_private);
25915+
25916+ return 1;
25917+}
25918+
25919+int psb_msvdx_uninit(struct drm_device *dev)
25920+{
25921+ struct drm_psb_private *dev_priv = dev->dev_private;
25922+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
25923+
25924+ /* Reset MSVDX chip */
25925+ psb_msvdx_reset(dev_priv);
25926+
25927+ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
25928+ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
25929+ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
25930+
25931+ if (msvdx_priv->ccb0)
25932+ psb_free_ccb(&msvdx_priv->ccb0);
25933+ if (msvdx_priv->ccb1)
25934+ psb_free_ccb(&msvdx_priv->ccb1);
25935+ if (msvdx_priv->msvdx_fw)
25936+ kfree(msvdx_priv->msvdx_fw
25937+ );
25938+ if (msvdx_priv) {
25939+ /* pci_set_drvdata(dev->pdev, NULL); */
25940+ device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate);
25941+ sysfs_put(msvdx_priv->sysfs_pmstate);
25942+ msvdx_priv->sysfs_pmstate = NULL;
25943+
25944+ kfree(msvdx_priv);
25945+ dev_priv->msvdx_private = NULL;
25946+ }
25947+
25948+ return 0;
25949+}
25950diff --git a/drivers/gpu/drm/psb/psb_powermgmt.c b/drivers/gpu/drm/psb/psb_powermgmt.c
25951new file mode 100644
25952index 0000000..c59a701
25953--- /dev/null
25954+++ b/drivers/gpu/drm/psb/psb_powermgmt.c
25955@@ -0,0 +1,1146 @@
25956+/**************************************************************************
25957+ * Copyright (c) 2009, Intel Corporation.
25958+ * All Rights Reserved.
25959+
25960+ * Permission is hereby granted, free of charge, to any person obtaining a
25961+ * copy of this software and associated documentation files (the "Software"),
25962+ * to deal in the Software without restriction, including without limitation
25963+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
25964+ * and/or sell copies of the Software, and to permit persons to whom the
25965+ * Software is furnished to do so, subject to the following conditions:
25966+ *
25967+ * The above copyright notice and this permission notice (including the next
25968+ * paragraph) shall be included in all copies or substantial portions of the
25969+ * Software.
25970+ *
25971+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25972+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25973+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25974+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25975+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25976+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25977+ * SOFTWARE.
25978+ *
25979+ * Authors:
25980+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
25981+ *
25982+ */
25983+#include "psb_powermgmt.h"
25984+#include "psb_drv.h"
25985+#include "psb_intel_reg.h"
25986+#include "psb_scene.h"
25987+#include "lnc_topaz.h"
25988+#include "psb_msvdx.h"
25989+
25990+#include <linux/mutex.h>
25991+
25992+static struct mutex g_state_change_mutex;
25993+static int g_hw_power_status_mask;
25994+static int g_pci_power_status;
25995+static atomic_t g_display_access_count;
25996+static atomic_t g_graphics_access_count;
25997+static atomic_t g_videoenc_access_count;
25998+static atomic_t g_videodec_access_count;
25999+static bool g_suspend_in_progress;
26000+static bool g_resume_in_progress;
26001+static int g_suspend_mask;
26002+static int g_resume_mask;
26003+static bool g_forcing_resume;
26004+static atomic_t g_pm_waiters;
26005+
26006+/*#define PWRMGMT_DEBUG*/
26007+#ifdef PWRMGMT_DEBUG
26008+ #define PWR_PRINT(_fmt, _arg...) \
26009+ printk(KERN_INFO _fmt, ##_arg)
26010+#else
26011+ #define PWR_PRINT(_fmt, _arg...) {}
26012+#endif
26013+
26014+/*
26015+ * powermgmt_init
26016+ *
26017+ * Description: Initialize this power management module
26018+ */
26019+void powermgmt_init(void)
26020+{
26021+ mutex_init(&g_state_change_mutex);
26022+ g_hw_power_status_mask = PSB_ALL_ISLANDS;
26023+ g_pci_power_status = 1;
26024+ atomic_set(&g_display_access_count, 0);
26025+ atomic_set(&g_graphics_access_count, 0);
26026+ atomic_set(&g_videoenc_access_count, 0);
26027+ atomic_set(&g_videodec_access_count, 0);
26028+ atomic_set(&g_pm_waiters, 0);
26029+}
26030+
26031+/*
26032+ * powermgmt_shutdown
26033+ *
26034+ * Description: Shut down this power management module
26035+ */
26036+void powermgmt_shutdown(void)
26037+{
26038+ mutex_destroy(&g_state_change_mutex);
26039+}
26040+
26041+/*
26042+ * powermgmt_down_island_power
26043+ *
26044+ * Description: Cut power to the specified island (powergating)
26045+ */
26046+void powermgmt_down_island_power(struct drm_device *dev, int islands)
26047+{
26048+ u32 pwr_cnt = 0;
26049+ u32 pwr_mask = 0;
26050+ u32 pwr_sts;
26051+
26052+ struct drm_psb_private *dev_priv =
26053+ (struct drm_psb_private *) dev->dev_private;
26054+
26055+ PWR_PRINT("BEN_KERNEL_OSPM************DOWN ISLAND POWER %d\n", islands);
26056+
26057+ if (!IS_MRST(dev)) {
26058+ g_hw_power_status_mask &= ~islands;
26059+ return;
26060+ }
26061+
26062+ g_hw_power_status_mask &= ~islands;
26063+
26064+ if (islands & PSB_GRAPHICS_ISLAND) {
26065+ pwr_cnt |= PSB_PWRGT_GFX_MASK;
26066+ pwr_mask |= PSB_PWRGT_GFX_MASK;
26067+ }
26068+ if (islands & PSB_VIDEO_ENC_ISLAND) {
26069+ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
26070+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
26071+ }
26072+ if (islands & PSB_VIDEO_DEC_ISLAND) {
26073+ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
26074+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
26075+ }
26076+ if (pwr_cnt) {
26077+ pwr_cnt |= inl(dev_priv->apm_base);
26078+ outl(pwr_cnt, dev_priv->apm_base);
26079+ while (true) {
26080+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
26081+ if ((pwr_sts & pwr_mask) == pwr_mask)
26082+ break;
26083+ else
26084+ udelay(10);
26085+ }
26086+ }
26087+
26088+ if (islands & PSB_DISPLAY_ISLAND) {
26089+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
26090+ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
26091+ while (true) {
26092+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
26093+ if ((pwr_sts & pwr_mask) == pwr_mask)
26094+ break;
26095+ else
26096+ udelay(10);
26097+ }
26098+ }
26099+}
26100+
26101+/*
26102+ * powermgmt_up_island_power
26103+ *
26104+ * Description: Restore power to the specified island (powergating)
26105+ */
26106+void powermgmt_up_island_power(struct drm_device *dev, int islands)
26107+{
26108+ u32 pwr_cnt;
26109+ u32 pwr_sts;
26110+ u32 pwr_mask;
26111+ u32 count;
26112+ struct drm_psb_private *dev_priv =
26113+ (struct drm_psb_private *) dev->dev_private;
26114+
26115+ PWR_PRINT("BEN_KERNEL_OSPM************UP ISLAND POWER %d\n", islands);
26116+
26117+ if (!IS_MRST(dev)) {
26118+ g_hw_power_status_mask |= islands;
26119+ return;
26120+ }
26121+
26122+ if (islands & (PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND |
26123+ PSB_VIDEO_DEC_ISLAND)) {
26124+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
26125+ pwr_mask = 0;
26126+ if (islands & PSB_GRAPHICS_ISLAND) {
26127+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
26128+ pwr_mask |= PSB_PWRGT_GFX_MASK;
26129+ }
26130+ if (islands & PSB_VIDEO_ENC_ISLAND) {
26131+ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
26132+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
26133+ }
26134+ if (islands & PSB_VIDEO_DEC_ISLAND) {
26135+ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
26136+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
26137+ }
26138+
26139+ if (pwr_mask) {
26140+ count = 5;
26141+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
26142+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
26143+ while (true) {
26144+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
26145+ if ((pwr_sts & pwr_mask) == 0)
26146+ break;
26147+ else
26148+ udelay(10);
26149+ }
26150+ }
26151+ }
26152+
26153+ if (islands & PSB_DISPLAY_ISLAND) {
26154+ count = 5;
26155+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
26156+ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
26157+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
26158+ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
26159+ while (true) {
26160+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
26161+ if ((pwr_sts & pwr_mask) == 0)
26162+ break;
26163+ else
26164+ udelay(10);
26165+ }
26166+ }
26167+
26168+ g_hw_power_status_mask |= islands;
26169+}
26170+
26171+/*
26172+ * save_display_registers
26173+ *
26174+ * Description: We are going to suspend so save current display
26175+ * register state.
26176+ */
26177+static int save_display_registers(struct drm_device *dev)
26178+{
26179+ struct drm_psb_private *dev_priv = dev->dev_private;
26180+ struct drm_crtc * crtc;
26181+ struct drm_connector * connector;
26182+ int i;
26183+
26184+ /* Display arbitration control + watermarks */
26185+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
26186+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
26187+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
26188+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
26189+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
26190+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
26191+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
26192+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
26193+
26194+ if (IS_MRST(dev)) {
26195+ /* Pipe & plane A info */
26196+ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
26197+ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
26198+ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
26199+ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
26200+ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
26201+ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
26202+ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
26203+ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
26204+ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
26205+ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
26206+ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
26207+ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
26208+ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
26209+ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
26210+ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
26211+ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
26212+ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
26213+ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
26214+
26215+ /*save cursor regs*/
26216+ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
26217+ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
26218+ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
26219+
26220+ /*save palette (gamma) */
26221+ for (i = 0; i < 256; i++)
26222+ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2));
26223+
26224+ /*save performance state*/
26225+ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
26226+
26227+ /* LVDS state */
26228+ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
26229+ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
26230+ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
26231+ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
26232+ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
26233+ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
26234+ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
26235+ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
26236+ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
26237+ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
26238+
26239+ /* HW overlay */
26240+ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
26241+ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
26242+ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
26243+ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
26244+ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
26245+ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
26246+ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
26247+
26248+ } else { /*PSB*/
26249+ /*save crtc and output state*/
26250+ mutex_lock(&dev->mode_config.mutex);
26251+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
26252+ if(drm_helper_crtc_in_use(crtc)) {
26253+ crtc->funcs->save(crtc);
26254+ }
26255+ }
26256+
26257+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
26258+ connector->funcs->save(connector);
26259+ }
26260+ mutex_unlock(&dev->mode_config.mutex);
26261+ }
26262+
26263+ /* Interrupt state */
26264+ /*
26265+ * Handled in psb_irq.c
26266+ */
26267+
26268+ return 0;
26269+}
26270+
26271+/*
26272+ * restore_display_registers
26273+ *
26274+ * Description: We are going to resume so restore display register state.
26275+ */
26276+static int restore_display_registers(struct drm_device *dev)
26277+{
26278+ struct drm_psb_private *dev_priv = dev->dev_private;
26279+ struct drm_crtc * crtc;
26280+ struct drm_connector * connector;
26281+ unsigned long i, pp_stat;
26282+
26283+ /* Display arbitration + watermarks */
26284+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
26285+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
26286+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
26287+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
26288+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
26289+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
26290+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
26291+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
26292+
26293+ /*make sure VGA plane is off. it initializes to on after reset!*/
26294+ PSB_WVDC32(0x80000000, VGACNTRL);
26295+
26296+ if (IS_MRST(dev)) {
26297+ /* set the plls */
26298+ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
26299+ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
26300+ /* Actually enable it */
26301+ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
26302+ DRM_UDELAY(150);
26303+
26304+ /* Restore mode */
26305+ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
26306+ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
26307+ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
26308+ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
26309+ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
26310+ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
26311+ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
26312+ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
26313+
26314+ /*restore performance mode*/
26315+ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
26316+
26317+ /*enable the pipe*/
26318+ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
26319+
26320+ /*set up the plane*/
26321+ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
26322+ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
26323+ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
26324+
26325+ /* Enable the plane */
26326+ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
26327+ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
26328+
26329+ /*Enable Cursor A*/
26330+ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
26331+ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
26332+ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
26333+
26334+ /* restore palette (gamma) */
26335+ /*DRM_UDELAY(50000); */
26336+ for (i = 0; i < 256; i++)
26337+ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2));
26338+
26339+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
26340+ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
26341+ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
26342+ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
26343+ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
26344+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
26345+ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
26346+ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
26347+ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
26348+ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
26349+
26350+ /*wait for cycle delay*/
26351+ do {
26352+ pp_stat = PSB_RVDC32(PP_STATUS);
26353+ } while (pp_stat & 0x08000000);
26354+
26355+ DRM_UDELAY(999);
26356+ /*wait for panel power up*/
26357+ do {
26358+ pp_stat = PSB_RVDC32(PP_STATUS);
26359+ } while (pp_stat & 0x10000000);
26360+
26361+ /* restore HW overlay */
26362+ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
26363+ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
26364+ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
26365+ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
26366+ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
26367+ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
26368+ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
26369+
26370+ } else { /*PSB*/
26371+ mutex_lock(&dev->mode_config.mutex);
26372+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
26373+ if(drm_helper_crtc_in_use(crtc))
26374+ crtc->funcs->restore(crtc);
26375+ }
26376+
26377+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
26378+ connector->funcs->restore(connector);
26379+ }
26380+ mutex_unlock(&dev->mode_config.mutex);
26381+ }
26382+
26383+
26384+ /*Interrupt state*/
26385+ /*
26386+ * Handled in psb_irq.c
26387+ */
26388+
26389+ return 0;
26390+}
26391+
26392+/*
26393+ * powermgmt_suspend_graphics
26394+ *
26395+ * Description: Suspend the graphics hardware saving state and disabling
26396+ * as necessary.
26397+ */
26398+static void powermgmt_suspend_graphics(struct drm_device *dev, bool b_initiated_by_ospm)
26399+{
26400+ struct drm_psb_private *dev_priv = dev->dev_private;
26401+
26402+ if (!(g_hw_power_status_mask & PSB_GRAPHICS_ISLAND))
26403+ return;
26404+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics\n");
26405+
26406+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
26407+ if (b_initiated_by_ospm) {
26408+ int ret = -EBUSY;
26409+ ret = psb_idle_3d(dev);
26410+ if (ret == -EBUSY)
26411+ {
26412+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics ***3d BUSY!!!!!!\n");
26413+ return;
26414+ }
26415+
26416+ ret = psb_idle_2d(dev);
26417+ if (ret == -EBUSY)
26418+ {
26419+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics ***2d BUSY!!!!!!\n");
26420+ return;
26421+ }
26422+ if (IS_POULSBO(dev))
26423+ flush_scheduled_work();
26424+ }
26425+ psb_irq_uninstall_islands(dev, PSB_GRAPHICS_ISLAND);
26426+ powermgmt_down_island_power(dev, PSB_GRAPHICS_ISLAND);
26427+}
26428+
26429+/*
26430+ * powermgmt_resume_graphics
26431+ *
26432+ * Description: Resume the graphics hardware restoring state and enabling
26433+ * as necessary.
26434+ */
26435+static void powermgmt_resume_graphics(struct drm_device *dev)
26436+{
26437+ struct drm_psb_private *dev_priv = dev->dev_private;
26438+
26439+ if (g_hw_power_status_mask & PSB_GRAPHICS_ISLAND)
26440+ return;
26441+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_graphics\n");
26442+
26443+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
26444+
26445+ powermgmt_up_island_power(dev, PSB_GRAPHICS_ISLAND);
26446+
26447+ /*
26448+ * The SGX loses it's register contents.
26449+ * Restore BIF registers. The MMU page tables are
26450+ * "normal" pages, so their contents should be kept.
26451+ */
26452+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
26453+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
26454+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
26455+ PSB_RSGX32(PSB_CR_BIF_BANK1);
26456+
26457+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
26458+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
26459+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
26460+
26461+ if (IS_POULSBO(dev))
26462+ psb_reset(dev_priv, 1);
26463+
26464+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
26465+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
26466+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
26467+
26468+ /*
26469+ * Persistant 3D base registers and USSE base registers..
26470+ */
26471+
26472+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
26473+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
26474+
26475+ /*
26476+ * Now, re-initialize the 3D engine.
26477+ */
26478+
26479+ if (dev_priv->xhw_on)
26480+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
26481+
26482+ psb_scheduler_ta_mem_check(dev_priv);
26483+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
26484+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
26485+ PSB_TA_MEM_FLAG_TA |
26486+ PSB_TA_MEM_FLAG_RASTER |
26487+ PSB_TA_MEM_FLAG_HOSTA |
26488+ PSB_TA_MEM_FLAG_HOSTD |
26489+ PSB_TA_MEM_FLAG_INIT,
26490+ dev_priv->ta_mem->ta_memory->offset,
26491+ dev_priv->ta_mem->hw_data->offset,
26492+ dev_priv->ta_mem->hw_cookie);
26493+ }
26494+}
26495+
26496+/*
26497+ * powermgmt_suspend_videodec
26498+ *
26499+ * Description: Suspend the video decode hardware saving state and disabling
26500+ * as necessary.
26501+ */
26502+static void powermgmt_suspend_videodec(struct drm_device *dev, bool b_initiated_by_ospm)
26503+{
26504+ struct drm_psb_private *dev_priv =
26505+ (struct drm_psb_private *)dev->dev_private;
26506+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
26507+
26508+ if (!(g_hw_power_status_mask & PSB_VIDEO_DEC_ISLAND))
26509+ return;
26510+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_videodec\n");
26511+
26512+ if (b_initiated_by_ospm)
26513+ psb_wait_msvdx_idle(dev);
26514+ else {
26515+ /* return without power off for D0i3/APM */
26516+ if (psb_check_msvdx_idle(dev))
26517+ return;
26518+ }
26519+
26520+ psb_irq_uninstall_islands(dev, PSB_VIDEO_DEC_ISLAND);
26521+ /* UGLY ... expose internal structure..
26522+ * it should be a function of save_context
26523+ * but there is no need for restore_context...
26524+ * replace it with a function?
26525+ */
26526+ msvdx_priv->msvdx_needs_reset = 1;
26527+ powermgmt_down_island_power(dev, PSB_VIDEO_DEC_ISLAND);
26528+
26529+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERDOWN);
26530+}
26531+
26532+/*
26533+ * powermgmt_resume_videodec
26534+ *
26535+ * Description: Resume the video decode hardware restoring state and enabling
26536+ * as necessary.
26537+ */
26538+static void powermgmt_resume_videodec(struct drm_device *dev)
26539+{
26540+ struct drm_psb_private *dev_priv =
26541+ (struct drm_psb_private *)dev->dev_private;
26542+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
26543+
26544+ if (g_hw_power_status_mask & PSB_VIDEO_DEC_ISLAND)
26545+ return;
26546+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_videodec\n");
26547+
26548+ powermgmt_up_island_power(dev, PSB_VIDEO_DEC_ISLAND);
26549+ MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP);
26550+}
26551+
26552+/*
26553+ * powermgmt_suspend_videoenc
26554+ *
26555+ * Description: Suspend the video encode hardware saving state and disabling
26556+ * as necessary.
26557+ */
26558+static void powermgmt_suspend_videoenc(struct drm_device *dev, bool b_initiated_by_ospm)
26559+{
26560+ struct drm_psb_private *dev_priv =
26561+ (struct drm_psb_private *)dev->dev_private;
26562+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
26563+
26564+ if (!(g_hw_power_status_mask & PSB_VIDEO_ENC_ISLAND))
26565+ return;
26566+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_videoenc\n");
26567+
26568+ if (b_initiated_by_ospm)
26569+ lnc_wait_topaz_idle(dev);
26570+ else {
26571+ /* return without power off for D0i3/APM */
26572+ if (lnc_check_topaz_idle(dev))
26573+ return;
26574+ }
26575+
26576+ psb_irq_uninstall_islands(dev, PSB_VIDEO_ENC_ISLAND);
26577+ lnc_topaz_save_mtx_state(dev);
26578+ powermgmt_down_island_power(dev, PSB_VIDEO_ENC_ISLAND);
26579+
26580+ TOPAZ_NEW_PMSTATE(dev, topaz_priv, PSB_PMSTATE_POWERDOWN);
26581+}
26582+
26583+/*
26584+ * powermgmt_resume_videoenc
26585+ *
26586+ * Description: Resume the video encode hardware restoring state and enabling
26587+ * as necessary.
26588+ */
26589+static void powermgmt_resume_videoenc(struct drm_device *dev)
26590+{
26591+ struct drm_psb_private *dev_priv =
26592+ (struct drm_psb_private *)dev->dev_private;
26593+ struct topaz_private *topaz_priv = dev_priv->topaz_private;
26594+
26595+ if (g_hw_power_status_mask & PSB_VIDEO_ENC_ISLAND)
26596+ return;
26597+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_videoenc\n");
26598+
26599+ powermgmt_up_island_power(dev, PSB_VIDEO_ENC_ISLAND);
26600+ lnc_topaz_restore_mtx_state(dev);
26601+
26602+ TOPAZ_NEW_PMSTATE(dev, topaz_priv, PSB_PMSTATE_POWERUP);
26603+}
26604+
26605+/*
26606+ * powermgmt_suspend_display
26607+ *
26608+ * Description: Suspend the display hardware saving state and disabling
26609+ * as necessary.
26610+ */
26611+static void powermgmt_suspend_display(struct drm_device *dev)
26612+{
26613+ struct drm_psb_private *dev_priv = dev->dev_private;
26614+ int pp_stat, jb;
26615+
26616+ if (!(g_hw_power_status_mask & PSB_DISPLAY_ISLAND))
26617+ return;
26618+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_display\n");
26619+
26620+ save_display_registers(dev);
26621+
26622+ /*shutdown the panel*/
26623+ PSB_WVDC32(0, PP_CONTROL);
26624+
26625+ do {
26626+ pp_stat = PSB_RVDC32(PP_STATUS);
26627+ } while (pp_stat & 0x80000000);
26628+
26629+ /*turn off the plane*/
26630+ PSB_WVDC32(0x58000000, DSPACNTR);
26631+ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
26632+ jb = jiffies + 4; /*wait 4 ticks*/
26633+ while (jiffies < jb)
26634+ schedule();
26635+
26636+ /*turn off pipe*/
26637+ PSB_WVDC32(0x0, PIPEACONF);
26638+ jb = jiffies + 8; /*wait 8 ticks*/
26639+ while (jiffies < jb)
26640+ schedule();
26641+
26642+ /*turn off PLLs*/
26643+ PSB_WVDC32(0, MRST_DPLL_A);
26644+
26645+ powermgmt_down_island_power(dev, PSB_DISPLAY_ISLAND);
26646+}
26647+
26648+/*
26649+ * powermgmt_resume_display
26650+ *
26651+ * Description: Resume the display hardware restoring state and enabling
26652+ * as necessary.
26653+ */
26654+static void powermgmt_resume_display(struct pci_dev *pdev)
26655+{
26656+ struct drm_device *dev = pci_get_drvdata(pdev);
26657+ struct drm_psb_private *dev_priv = dev->dev_private;
26658+ struct psb_gtt *pg = dev_priv->pg;
26659+
26660+ if (g_hw_power_status_mask & PSB_DISPLAY_ISLAND)
26661+ return;
26662+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_display\n");
26663+
26664+ /* turn on the display power island */
26665+ powermgmt_up_island_power(dev, PSB_DISPLAY_ISLAND);
26666+
26667+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
26668+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
26669+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
26670+
26671+ /* Don't reinitialize the GTT as it is unnecessary. The gtt is
26672+ * stored in memory so it will automatically be restored. All
26673+ * we need to do is restore the PGETBL_CTL which we already do
26674+ * above.
26675+ */
26676+ /*psb_gtt_init(dev_priv->pg, 1);*/
26677+
26678+ restore_display_registers(dev);
26679+}
26680+
26681+/*
26682+ * powermgmt_suspend_pci
26683+ *
26684+ * Description: Suspend the pci device saving state and disabling
26685+ * as necessary.
26686+ */
26687+static void powermgmt_suspend_pci(struct pci_dev *pdev)
26688+{
26689+ struct drm_device *dev = pci_get_drvdata(pdev);
26690+ struct drm_psb_private *dev_priv = dev->dev_private;
26691+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
26692+ int bsm, vbt;
26693+
26694+ if (!g_pci_power_status)
26695+ return;
26696+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_pci\n");
26697+
26698+ pci_save_state(pdev);
26699+ pci_read_config_dword(pci_gfx_root, 0x5C, &bsm);
26700+ dev_priv->saveBSM = bsm;
26701+ pci_read_config_dword(pci_gfx_root, 0xFC, &vbt);
26702+ dev_priv->saveVBT = vbt;
26703+ pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
26704+ pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
26705+
26706+ pci_disable_device(pdev);
26707+ pci_set_power_state(pdev, PCI_D3hot);
26708+
26709+ g_pci_power_status = 0;
26710+}
26711+
26712+/*
26713+ * powermgmt_resume_pci
26714+ *
26715+ * Description: Resume the pci device restoring state and enabling
26716+ * as necessary.
26717+ */
26718+static int powermgmt_resume_pci(struct pci_dev *pdev)
26719+{
26720+ struct drm_device *dev = pci_get_drvdata(pdev);
26721+ struct drm_psb_private *dev_priv = dev->dev_private;
26722+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
26723+ int ret = 0;
26724+
26725+ if (g_pci_power_status)
26726+ return ret;
26727+
26728+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_pci\n");
26729+
26730+ pci_set_power_state(pdev, PCI_D0);
26731+ pci_restore_state(pdev);
26732+ pci_write_config_dword(pci_gfx_root, 0x5c, dev_priv->saveBSM);
26733+ pci_write_config_dword(pci_gfx_root, 0xFC, dev_priv->saveVBT);
26734+ /* retoring MSI address and data in PCIx space */
26735+ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
26736+ pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
26737+ ret = pci_enable_device(pdev);
26738+
26739+ g_pci_power_status = 1;
26740+
26741+ return ret;
26742+}
26743+
26744+/*
26745+ * powermgmt_suspend
26746+ *
26747+ * Description: OSPM is telling our driver to suspend to save state
26748+ * and power down all hardware.
26749+ */
26750+int powermgmt_suspend(struct pci_dev *pdev, pm_message_t state)
26751+{
26752+ int ret;
26753+ ret = powermgmt_suspend_islands(pdev, PSB_ALL_ISLANDS, true);
26754+ if (ret == -EBUSY)
26755+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend***BUSY!!!!!!\n");
26756+
26757+ return ret;
26758+}
26759+
26760+/*
26761+ * powermgmt_suspend_islands
26762+ *
26763+ * Description: Suspend the specified island by saving state
26764+ * and power down the hardware.
26765+ */
26766+int powermgmt_suspend_islands(struct pci_dev *pdev, int hw_islands, bool b_initiated_by_ospm)
26767+{
26768+ struct drm_device *dev = pci_get_drvdata(pdev);
26769+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
26770+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
26771+ unsigned long irq_flags;
26772+ int ret = 0;
26773+
26774+ if (in_interrupt()) {
26775+ /*
26776+ * If an interrupt handler calls powermgmt_suspend_islands(), we can't call mutex_lock.
26777+ * Right now, only video enc/dec calls us from interrupt handler. Should be safe to
26778+ * just proceed since the only code that resumes video enc/dec is internal to our driver
26779+ * and should be written in such a way that shouldn't cause any issues. If we are already
26780+ * in the middle of an OSPM initiated suspend, then just return since that will take care
26781+ * of powering off video enc/dec for us. Also, don't set g_suspend_mask and
26782+ * g_suspend_in_progress since this function will be atomic since we are in an
26783+ * interrupt handler and thus no outside parties will get the chance to care and we
26784+ * don't want to overright any pending suspend operations that go interrupted.
26785+ */
26786+ if (b_initiated_by_ospm)
26787+ return ret;
26788+ }
26789+ else {
26790+ mutex_lock(&g_state_change_mutex);
26791+
26792+ g_suspend_mask = hw_islands;
26793+ g_suspend_in_progress = true;
26794+ }
26795+ atomic_inc(&g_pm_waiters);
26796+
26797+ if (g_hw_power_status_mask & PSB_GRAPHICS_ISLAND) {
26798+ if (atomic_read(&g_graphics_access_count))
26799+ ret = -EBUSY;
26800+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) !=
26801+ _PSB_C2_SOCIF_EMPTY) ||
26802+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
26803+ _PSB_C2B_STATUS_BUSY) != 0)) {
26804+ ret = -EBUSY;
26805+ }
26806+ spin_lock_irqsave(&scheduler->lock, irq_flags);
26807+ if (!scheduler->idle ||
26808+ !list_empty(&scheduler->raster_queue) ||
26809+ !list_empty(&scheduler->ta_queue) ||
26810+ !list_empty(&scheduler->hp_raster_queue) ||
26811+ scheduler->feedback_task) {
26812+ ret = -EBUSY;
26813+ }
26814+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
26815+ }
26816+ if ((hw_islands & PSB_VIDEO_DEC_ISLAND) &&
26817+ atomic_read(&g_videodec_access_count))
26818+ ret = -EBUSY;
26819+ if ((hw_islands & PSB_VIDEO_ENC_ISLAND) &&
26820+ atomic_read(&g_videoenc_access_count))
26821+ ret = -EBUSY;
26822+ if ((hw_islands & PSB_DISPLAY_ISLAND) &&
26823+ atomic_read(&g_display_access_count))
26824+ ret = -EBUSY;
26825+
26826+ atomic_dec(&g_pm_waiters);
26827+
26828+ if (!ret) {
26829+ /*disable gfx interupt later when sgx is idle*/
26830+ psb_irq_uninstall_islands(dev, hw_islands & ~PSB_GRAPHICS_ISLAND &
26831+ ~PSB_VIDEO_ENC_ISLAND & ~PSB_VIDEO_DEC_ISLAND);
26832+
26833+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
26834+ powermgmt_suspend_videodec(dev, b_initiated_by_ospm);
26835+ if(IS_MRST(dev)) {
26836+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
26837+ powermgmt_suspend_videoenc(dev, b_initiated_by_ospm);
26838+ }
26839+ if (hw_islands & PSB_GRAPHICS_ISLAND)
26840+ powermgmt_suspend_graphics(dev, b_initiated_by_ospm);
26841+ if (hw_islands & PSB_DISPLAY_ISLAND)
26842+ powermgmt_suspend_display(dev);
26843+ if (g_hw_power_status_mask == 0) {
26844+ if (drm_core_check_feature(dev, DRIVER_MODESET))
26845+ drm_irq_uninstall(dev);
26846+ powermgmt_suspend_pci(pdev);
26847+ }
26848+ }
26849+
26850+#ifdef OSPM_STAT
26851+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
26852+ bool b_change = true;
26853+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0)
26854+ dev_priv->gfx_d0_time += jiffies - dev_priv->gfx_last_mode_change;
26855+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
26856+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
26857+ else
26858+ b_change = false;
26859+ if (b_change) {
26860+ dev_priv->gfx_last_mode_change = jiffies;
26861+ if (g_hw_power_status_mask & PSB_DISPLAY_ISLAND) {
26862+ dev_priv->graphics_state = PSB_PWR_STATE_D0i3;
26863+ dev_priv->gfx_d0i3_cnt++;
26864+ } else {
26865+ dev_priv->graphics_state = PSB_PWR_STATE_D3;
26866+ dev_priv->gfx_d3_cnt++;
26867+ }
26868+ }
26869+ }
26870+#endif
26871+
26872+ if (!in_interrupt()) {
26873+ g_suspend_in_progress = false;
26874+ mutex_unlock(&g_state_change_mutex);
26875+ }
26876+
26877+ return ret;
26878+}
26879+
26880+/*
26881+ * powermgmt_resume
26882+ *
26883+ * Description: OSPM is telling our driver to resume so restore state
26884+ * and power up display. Leave graphics and video powered off as they
26885+ * will be powered up once needed.
26886+ */
26887+int powermgmt_resume(struct pci_dev *pdev)
26888+{
26889+ return 0;
26890+ //return powermgmt_resume_islands(pdev, PSB_DISPLAY_ISLAND);
26891+}
26892+
26893+/*
26894+ * powermgmt_resume_islands
26895+ *
26896+ * Description: Resume the specified islands by restoring state
26897+ * and power things up.
26898+ */
26899+int powermgmt_resume_islands(struct pci_dev *pdev, int hw_islands)
26900+{
26901+ struct drm_device *dev = pci_get_drvdata(pdev);
26902+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
26903+ bool b_install_irq = false;
26904+ int ret = 0;
26905+
26906+ if (!g_forcing_resume)
26907+ mutex_lock(&g_state_change_mutex);
26908+
26909+ g_resume_mask = hw_islands;
26910+ g_resume_in_progress = true;
26911+
26912+ PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_islands\n");
26913+
26914+ if (g_hw_power_status_mask == 0) {
26915+ if (powermgmt_resume_pci(pdev))
26916+ goto resume_exit;
26917+ b_install_irq = drm_core_check_feature(dev, DRIVER_MODESET);
26918+ }
26919+
26920+ if (hw_islands & PSB_DISPLAY_ISLAND)
26921+ powermgmt_resume_display(pdev);
26922+ if (IS_MRST(dev)) {
26923+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
26924+ powermgmt_resume_videoenc(dev);
26925+ }
26926+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
26927+ powermgmt_resume_videodec(dev);
26928+ if (hw_islands & PSB_GRAPHICS_ISLAND)
26929+ powermgmt_resume_graphics(dev);
26930+ if (b_install_irq)
26931+ drm_irq_install(dev);
26932+ else {
26933+ psb_irq_preinstall_islands(dev, hw_islands);
26934+ psb_irq_postinstall_islands(dev, hw_islands);
26935+ }
26936+
26937+#ifdef OSPM_STAT
26938+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
26939+ bool b_change = true;
26940+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
26941+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
26942+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
26943+ dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change;
26944+ else
26945+ b_change = false;
26946+
26947+ if (b_change) {
26948+ dev_priv->gfx_last_mode_change = jiffies;
26949+ dev_priv->graphics_state = PSB_PWR_STATE_D0;
26950+ dev_priv->gfx_d0_cnt++;
26951+ }
26952+ }
26953+#endif
26954+
26955+ g_resume_in_progress = false;
26956+
26957+resume_exit:
26958+ if (!g_forcing_resume)
26959+ mutex_unlock(&g_state_change_mutex);
26960+ return ret;
26961+}
26962+
26963+/*
26964+ * powermgmt_using_hw_begin
26965+ *
26966+ * Description: Notify PowerMgmt module that you will be accessing the
26967+ * specified islands' hw so don't power it off. If force_on is true,
26968+ * this will power on any of the specified islands which are off.
26969+ * Otherwise, this will return false and the caller is expected to not
26970+ * access the hw.
26971+ *
26972+ * NOTE *** If this is called from and interrupt handler or other atomic
26973+ * context, then it will return false if we are in the middle of a
26974+ * power state transition and the caller will be expected to handle that
26975+ * even if force_on is set to true.
26976+ */
26977+bool powermgmt_using_hw_begin(struct pci_dev *pdev, int hw_islands, bool force_on)
26978+{
26979+ bool ret = true;
26980+ int off_islands = 0;
26981+ bool b_atomic = (in_interrupt() || in_atomic());
26982+
26983+ if (!b_atomic)
26984+ mutex_lock(&g_state_change_mutex);
26985+
26986+ if (b_atomic &&
26987+ (powermgmt_is_suspend_in_progress(hw_islands) ||
26988+ powermgmt_is_resume_in_progress(hw_islands))) {
26989+ if (force_on)
26990+ printk(KERN_WARNING "!!!WARNING!!! powermgmt_using_hw_begin - force_on failed - be sure to check return value !!!WARNING!!!\n");
26991+ ret = false;
26992+ } else {
26993+ off_islands = hw_islands & (PSB_ALL_ISLANDS & ~g_hw_power_status_mask);
26994+ if (off_islands) {
26995+ if (force_on) {
26996+ g_forcing_resume = true;
26997+ powermgmt_resume_islands(pdev, off_islands);
26998+ g_forcing_resume = false;
26999+ } else {
27000+ ret = false;
27001+ }
27002+ }
27003+ }
27004+
27005+ if (ret) {
27006+ if (hw_islands & PSB_GRAPHICS_ISLAND)
27007+ atomic_inc(&g_graphics_access_count);
27008+ if (hw_islands & PSB_VIDEO_ENC_ISLAND)
27009+ atomic_inc(&g_videoenc_access_count);
27010+ if (hw_islands & PSB_VIDEO_DEC_ISLAND)
27011+ atomic_inc(&g_videodec_access_count);
27012+ if (hw_islands & PSB_DISPLAY_ISLAND)
27013+ atomic_inc(&g_display_access_count);
27014+ }
27015+
27016+ if (!b_atomic)
27017+ mutex_unlock(&g_state_change_mutex);
27018+
27019+ return ret;
27020+}
27021+
27022+/*
27023+ * powermgmt_using_hw_end
27024+ *
27025+ * Description: Notify PowerMgmt module that you are done accessing the
27026+ * specified islands' hw so feel free to power it off. Note that this
27027+ * function doesn't actually power off the islands. The caller should
27028+ * call psb_suspend(hw_islands) if it wishes to proactively power them
27029+ * down.
27030+ */
27031+void powermgmt_using_hw_end(int hw_islands)
27032+{
27033+ if (hw_islands & PSB_GRAPHICS_ISLAND) {
27034+ atomic_dec(&g_graphics_access_count);
27035+ }
27036+ if (hw_islands & PSB_VIDEO_ENC_ISLAND){
27037+ atomic_dec(&g_videoenc_access_count);
27038+ }
27039+ if (hw_islands & PSB_VIDEO_DEC_ISLAND){
27040+ atomic_dec(&g_videodec_access_count);
27041+ }
27042+ if (hw_islands & PSB_DISPLAY_ISLAND){
27043+ atomic_dec(&g_display_access_count);
27044+ }
27045+
27046+ if(!atomic_read(&g_graphics_access_count) &&
27047+ !atomic_read(&g_videoenc_access_count) &&
27048+ !atomic_read(&g_videodec_access_count) &&
27049+ !atomic_read(&g_display_access_count) &&
27050+ atomic_read(&g_pm_waiters))
27051+
27052+ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
27053+ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
27054+ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
27055+ WARN_ON(atomic_read(&g_display_access_count) < 0);
27056+}
27057+
27058+/*
27059+ * powermgmt_is_hw_on
27060+ *
27061+ * Description: do an instantaneous check for if the specified islands
27062+ * are on. Only use this in cases where you know the g_state_change_mutex
27063+ * is already held such as in irq install/uninstall. Otherwise, use
27064+ * powermgmt_usinghw_begin().
27065+ */
27066+bool powermgmt_is_hw_on(struct pci_dev *pdev, int hw_islands)
27067+{
27068+ return ((g_hw_power_status_mask & hw_islands) == hw_islands);
27069+}
27070+
27071+/*
27072+ * powermgmt_is_suspend_in_progress
27073+ *
27074+ * Description: Are we in the middle of suspending any of the
27075+ * specified hardware?
27076+ */
27077+bool powermgmt_is_suspend_in_progress(int hw_islands)
27078+{
27079+ return (g_suspend_in_progress) ? ((g_suspend_mask & hw_islands) ? true : false) : false;
27080+}
27081+
27082+/*
27083+ * powermgmt_is_resume_in_progress
27084+ *
27085+ * Description: Are we in the middle of resuming any of the
27086+ * specified hardware?
27087+ */
27088+bool powermgmt_is_resume_in_progress(int hw_islands)
27089+{
27090+ return (g_resume_in_progress) ? ((g_resume_mask & hw_islands) ? true : false) : false;
27091+}
27092+/*
27093+ * powermgmt_is_gfx_busy
27094+ *
27095+ * Description: Is someone useing GFX HW currently?
27096+ *
27097+ */
27098+bool powermgmt_is_gfx_busy()
27099+{
27100+ return (atomic_read(&g_graphics_access_count) ? true : false);
27101+}
27102diff --git a/drivers/gpu/drm/psb/psb_powermgmt.h b/drivers/gpu/drm/psb/psb_powermgmt.h
27103new file mode 100644
27104index 0000000..5b40495
27105--- /dev/null
27106+++ b/drivers/gpu/drm/psb/psb_powermgmt.h
27107@@ -0,0 +1,73 @@
27108+/**************************************************************************
27109+ * Copyright (c) 2009, Intel Corporation.
27110+ * All Rights Reserved.
27111+
27112+ * Permission is hereby granted, free of charge, to any person obtaining a
27113+ * copy of this software and associated documentation files (the "Software"),
27114+ * to deal in the Software without restriction, including without limitation
27115+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
27116+ * and/or sell copies of the Software, and to permit persons to whom the
27117+ * Software is furnished to do so, subject to the following conditions:
27118+ *
27119+ * The above copyright notice and this permission notice (including the next
27120+ * paragraph) shall be included in all copies or substantial portions of the
27121+ * Software.
27122+ *
27123+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27124+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27125+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27126+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27127+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27128+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27129+ * SOFTWARE.
27130+ *
27131+ * Authors:
27132+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
27133+ *
27134+ */
27135+#ifndef _PSB_POWERMGMT_H_
27136+#define _PSB_POWERMGMT_H_
27137+
27138+#include <linux/pci.h>
27139+
27140+#define PSB_GRAPHICS_ISLAND 0x1
27141+#define PSB_VIDEO_ENC_ISLAND 0x2
27142+#define PSB_VIDEO_DEC_ISLAND 0x4
27143+#define PSB_DISPLAY_ISLAND 0x8
27144+#define PSB_ALL_ISLANDS 0xf
27145+
27146+void powermgmt_init(void);
27147+void powermgmt_shutdown(void);
27148+
27149+/*
27150+ * OSPM will call these functions
27151+ */
27152+int powermgmt_suspend(struct pci_dev *pdev, pm_message_t state);
27153+int powermgmt_resume(struct pci_dev *pdev);
27154+
27155+/*
27156+ * These are the functions the driver should call to do internally driven
27157+ * power gating (D0i3)
27158+ */
27159+int powermgmt_suspend_islands(struct pci_dev *pdev, int hw_islands, bool b_initiated_by_ospm);
27160+int powermgmt_resume_islands(struct pci_dev *pdev, int hw_islands);
27161+
27162+/*
27163+ * These are the functions the driver should use to wrap all hw access
27164+ * (i.e. register reads and writes)
27165+ */
27166+bool powermgmt_using_hw_begin(struct pci_dev *pdev, int hw_islands, bool force_on);
27167+void powermgmt_using_hw_end(int hw_islands);
27168+
27169+/*
27170+ * Use this function to do an instantaneous check for if the hw is on.
27171+ * Only use this in cases where you know the g_state_change_mutex
27172+ * is already held such as in irq install/uninstall and you need to
27173+ * prevent a deadlock situation. Otherwise use powermgmt_using_hw_begin().
27174+ */
27175+bool powermgmt_is_hw_on(struct pci_dev *pdev, int hw_islands);
27176+
27177+bool powermgmt_is_suspend_in_progress(int hw_islands);
27178+bool powermgmt_is_resume_in_progress(int hw_islands);
27179+bool powermgmt_is_gfx_busy(void);
27180+#endif /*_PSB_POWERMGMT_H_*/
27181diff --git a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h
27182new file mode 100644
27183index 0000000..4974689
27184--- /dev/null
27185+++ b/drivers/gpu/drm/psb/psb_reg.h
27186@@ -0,0 +1,574 @@
27187+/**************************************************************************
27188+ *
27189+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
27190+ * Copyright (c) 2007, Intel Corporation.
27191+ * All Rights Reserved.
27192+ *
27193+ * This program is free software; you can redistribute it and/or modify it
27194+ * under the terms and conditions of the GNU General Public License,
27195+ * version 2, as published by the Free Software Foundation.
27196+ *
27197+ * This program is distributed in the hope it will be useful, but WITHOUT
27198+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27199+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27200+ * more details.
27201+ *
27202+ * You should have received a copy of the GNU General Public License along with
27203+ * this program; if not, write to the Free Software Foundation, Inc.,
27204+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27205+ *
27206+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27207+ * develop this driver.
27208+ *
27209+ **************************************************************************/
27210+/*
27211+ */
27212+#ifndef _PSB_REG_H_
27213+#define _PSB_REG_H_
27214+
27215+#define PSB_CR_CLKGATECTL 0x0000
27216+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
27217+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
27218+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
27219+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
27220+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
27221+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
27222+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
27223+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
27224+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
27225+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
27226+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
27227+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
27228+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
27229+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
27230+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
27231+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
27232+
27233+#define PSB_CR_CORE_ID 0x0010
27234+#define _PSB_CC_ID_ID_SHIFT (16)
27235+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
27236+#define _PSB_CC_ID_CONFIG_SHIFT (0)
27237+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
27238+
27239+#define PSB_CR_CORE_REVISION 0x0014
27240+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
27241+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
27242+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
27243+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
27244+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
27245+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
27246+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
27247+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
27248+
27249+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
27250+
27251+#define PSB_CR_SOFT_RESET 0x0080
27252+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
27253+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
27254+#define _PSB_CS_RESET_USE_RESET (1 << 4)
27255+#define _PSB_CS_RESET_TA_RESET (1 << 3)
27256+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
27257+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
27258+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
27259+
27260+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
27261+
27262+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
27263+
27264+#define PSB_CR_EVENT_STATUS2 0x0118
27265+
27266+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
27267+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
27268+
27269+#define PSB_CR_EVENT_STATUS 0x012C
27270+
27271+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
27272+
27273+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
27274+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
27275+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
27276+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
27277+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
27278+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
27279+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
27280+#define _PSB_CE_SW_EVENT (1 << 14)
27281+#define _PSB_CE_TA_FINISHED (1 << 13)
27282+#define _PSB_CE_TA_TERMINATE (1 << 12)
27283+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
27284+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
27285+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
27286+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
27287+
27288+
27289+#define PSB_USE_OFFSET_MASK 0x0007FFFF
27290+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
27291+#define PSB_CR_USE_CODE_BASE0 0x0A0C
27292+#define PSB_CR_USE_CODE_BASE1 0x0A10
27293+#define PSB_CR_USE_CODE_BASE2 0x0A14
27294+#define PSB_CR_USE_CODE_BASE3 0x0A18
27295+#define PSB_CR_USE_CODE_BASE4 0x0A1C
27296+#define PSB_CR_USE_CODE_BASE5 0x0A20
27297+#define PSB_CR_USE_CODE_BASE6 0x0A24
27298+#define PSB_CR_USE_CODE_BASE7 0x0A28
27299+#define PSB_CR_USE_CODE_BASE8 0x0A2C
27300+#define PSB_CR_USE_CODE_BASE9 0x0A30
27301+#define PSB_CR_USE_CODE_BASE10 0x0A34
27302+#define PSB_CR_USE_CODE_BASE11 0x0A38
27303+#define PSB_CR_USE_CODE_BASE12 0x0A3C
27304+#define PSB_CR_USE_CODE_BASE13 0x0A40
27305+#define PSB_CR_USE_CODE_BASE14 0x0A44
27306+#define PSB_CR_USE_CODE_BASE15 0x0A48
27307+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
27308+#define _PSB_CUC_BASE_DM_SHIFT (25)
27309+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
27310+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
27311+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
27312+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
27313+#define _PSB_CUC_DM_VERTEX (0)
27314+#define _PSB_CUC_DM_PIXEL (1)
27315+#define _PSB_CUC_DM_RESERVED (2)
27316+#define _PSB_CUC_DM_EDM (3)
27317+
27318+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
27319+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
27320+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
27321+
27322+#define PSB_CR_EVENT_KICKER 0x0AC4
27323+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
27324+
27325+#define PSB_CR_EVENT_KICK 0x0AC8
27326+#define _PSB_CE_KICK_NOW (1 << 0)
27327+
27328+
27329+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
27330+
27331+#define PSB_CR_BIF_CTRL 0x0C00
27332+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
27333+#define _PSB_CB_CTRL_INVALDC (1 << 3)
27334+#define _PSB_CB_CTRL_FLUSH (1 << 2)
27335+
27336+#define PSB_CR_BIF_INT_STAT 0x0C04
27337+
27338+#define PSB_CR_BIF_FAULT 0x0C08
27339+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
27340+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
27341+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
27342+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
27343+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
27344+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
27345+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
27346+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
27347+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
27348+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
27349+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
27350+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
27351+
27352+#define PSB_CR_BIF_BANK0 0x0C78
27353+
27354+#define PSB_CR_BIF_BANK1 0x0C7C
27355+
27356+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
27357+
27358+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
27359+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
27360+
27361+#define PSB_CR_2D_SOCIF 0x0E18
27362+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
27363+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
27364+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
27365+
27366+#define PSB_CR_2D_BLIT_STATUS 0x0E04
27367+#define _PSB_C2B_STATUS_BUSY (1 << 24)
27368+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
27369+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
27370+
27371+/*
27372+ * 2D defs.
27373+ */
27374+
27375+/*
27376+ * 2D Slave Port Data : Block Header's Object Type
27377+ */
27378+
27379+#define PSB_2D_CLIP_BH (0x00000000)
27380+#define PSB_2D_PAT_BH (0x10000000)
27381+#define PSB_2D_CTRL_BH (0x20000000)
27382+#define PSB_2D_SRC_OFF_BH (0x30000000)
27383+#define PSB_2D_MASK_OFF_BH (0x40000000)
27384+#define PSB_2D_RESERVED1_BH (0x50000000)
27385+#define PSB_2D_RESERVED2_BH (0x60000000)
27386+#define PSB_2D_FENCE_BH (0x70000000)
27387+#define PSB_2D_BLIT_BH (0x80000000)
27388+#define PSB_2D_SRC_SURF_BH (0x90000000)
27389+#define PSB_2D_DST_SURF_BH (0xA0000000)
27390+#define PSB_2D_PAT_SURF_BH (0xB0000000)
27391+#define PSB_2D_SRC_PAL_BH (0xC0000000)
27392+#define PSB_2D_PAT_PAL_BH (0xD0000000)
27393+#define PSB_2D_MASK_SURF_BH (0xE0000000)
27394+#define PSB_2D_FLUSH_BH (0xF0000000)
27395+
27396+/*
27397+ * Clip Definition block (PSB_2D_CLIP_BH)
27398+ */
27399+#define PSB_2D_CLIPCOUNT_MAX (1)
27400+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
27401+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
27402+#define PSB_2D_CLIPCOUNT_SHIFT (0)
27403+/* clip rectangle min & max */
27404+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
27405+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
27406+#define PSB_2D_CLIP_XMAX_SHIFT (12)
27407+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
27408+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
27409+#define PSB_2D_CLIP_XMIN_SHIFT (0)
27410+/* clip rectangle offset */
27411+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
27412+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
27413+#define PSB_2D_CLIP_YMAX_SHIFT (12)
27414+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
27415+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
27416+#define PSB_2D_CLIP_YMIN_SHIFT (0)
27417+
27418+/*
27419+ * Pattern Control (PSB_2D_PAT_BH)
27420+ */
27421+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
27422+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
27423+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
27424+#define PSB_2D_PAT_WIDTH_SHIFT (5)
27425+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
27426+#define PSB_2D_PAT_YSTART_SHIFT (10)
27427+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
27428+#define PSB_2D_PAT_XSTART_SHIFT (15)
27429+
27430+/*
27431+ * 2D Control block (PSB_2D_CTRL_BH)
27432+ */
27433+/* Present Flags */
27434+#define PSB_2D_SRCCK_CTRL (0x00000001)
27435+#define PSB_2D_DSTCK_CTRL (0x00000002)
27436+#define PSB_2D_ALPHA_CTRL (0x00000004)
27437+/* Colour Key Colour (SRC/DST)*/
27438+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
27439+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
27440+#define PSB_2D_CK_COL_SHIFT (0)
27441+/* Colour Key Mask (SRC/DST)*/
27442+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
27443+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
27444+#define PSB_2D_CK_MASK_SHIFT (0)
27445+/* Alpha Control (Alpha/RGB)*/
27446+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
27447+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
27448+#define PSB_2D_GBLALPHA_SHIFT (12)
27449+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
27450+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
27451+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
27452+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
27453+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
27454+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
27455+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
27456+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
27457+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
27458+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
27459+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
27460+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
27461+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
27462+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
27463+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
27464+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
27465+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
27466+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
27467+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
27468+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
27469+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
27470+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
27471+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
27472+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
27473+
27474+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
27475+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
27476+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
27477+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
27478+
27479+/*
27480+ *Source Offset (PSB_2D_SRC_OFF_BH)
27481+ */
27482+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
27483+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
27484+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
27485+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
27486+
27487+/*
27488+ * Mask Offset (PSB_2D_MASK_OFF_BH)
27489+ */
27490+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
27491+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
27492+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
27493+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
27494+
27495+/*
27496+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
27497+ */
27498+
27499+/*
27500+ *Blit Rectangle (PSB_2D_BLIT_BH)
27501+ */
27502+
27503+#define PSB_2D_ROT_MASK (3<<25)
27504+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
27505+#define PSB_2D_ROT_NONE (0<<25)
27506+#define PSB_2D_ROT_90DEGS (1<<25)
27507+#define PSB_2D_ROT_180DEGS (2<<25)
27508+#define PSB_2D_ROT_270DEGS (3<<25)
27509+
27510+#define PSB_2D_COPYORDER_MASK (3<<23)
27511+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
27512+#define PSB_2D_COPYORDER_TL2BR (0<<23)
27513+#define PSB_2D_COPYORDER_BR2TL (1<<23)
27514+#define PSB_2D_COPYORDER_TR2BL (2<<23)
27515+#define PSB_2D_COPYORDER_BL2TR (3<<23)
27516+
27517+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
27518+#define PSB_2D_DSTCK_DISABLE (0x00000000)
27519+#define PSB_2D_DSTCK_PASS (0x00200000)
27520+#define PSB_2D_DSTCK_REJECT (0x00400000)
27521+
27522+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
27523+#define PSB_2D_SRCCK_DISABLE (0x00000000)
27524+#define PSB_2D_SRCCK_PASS (0x00080000)
27525+#define PSB_2D_SRCCK_REJECT (0x00100000)
27526+
27527+#define PSB_2D_CLIP_ENABLE (0x00040000)
27528+
27529+#define PSB_2D_ALPHA_ENABLE (0x00020000)
27530+
27531+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
27532+#define PSB_2D_PAT_MASK (0x00010000)
27533+#define PSB_2D_USE_PAT (0x00010000)
27534+#define PSB_2D_USE_FILL (0x00000000)
27535+/*
27536+ * Tungsten Graphics note on rop codes: If rop A and rop B are
27537+ * identical, the mask surface will not be read and need not be
27538+ * set up.
27539+ */
27540+
27541+#define PSB_2D_ROP3B_MASK (0x0000FF00)
27542+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
27543+#define PSB_2D_ROP3B_SHIFT (8)
27544+/* rop code A */
27545+#define PSB_2D_ROP3A_MASK (0x000000FF)
27546+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
27547+#define PSB_2D_ROP3A_SHIFT (0)
27548+
27549+#define PSB_2D_ROP4_MASK (0x0000FFFF)
27550+/*
27551+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
27552+ * Fill Colour RGBA8888
27553+ */
27554+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
27555+#define PSB_2D_FILLCOLOUR_SHIFT (0)
27556+/*
27557+ * DWORD1: (Always Present)
27558+ * X Start (Dest)
27559+ * Y Start (Dest)
27560+ */
27561+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
27562+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
27563+#define PSB_2D_DST_XSTART_SHIFT (12)
27564+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
27565+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
27566+#define PSB_2D_DST_YSTART_SHIFT (0)
27567+/*
27568+ * DWORD2: (Always Present)
27569+ * X Size (Dest)
27570+ * Y Size (Dest)
27571+ */
27572+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
27573+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
27574+#define PSB_2D_DST_XSIZE_SHIFT (12)
27575+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
27576+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
27577+#define PSB_2D_DST_YSIZE_SHIFT (0)
27578+
27579+/*
27580+ * Source Surface (PSB_2D_SRC_SURF_BH)
27581+ */
27582+/*
27583+ * WORD 0
27584+ */
27585+
27586+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
27587+#define PSB_2D_SRC_1_PAL (0x00000000)
27588+#define PSB_2D_SRC_2_PAL (0x00008000)
27589+#define PSB_2D_SRC_4_PAL (0x00010000)
27590+#define PSB_2D_SRC_8_PAL (0x00018000)
27591+#define PSB_2D_SRC_8_ALPHA (0x00020000)
27592+#define PSB_2D_SRC_4_ALPHA (0x00028000)
27593+#define PSB_2D_SRC_332RGB (0x00030000)
27594+#define PSB_2D_SRC_4444ARGB (0x00038000)
27595+#define PSB_2D_SRC_555RGB (0x00040000)
27596+#define PSB_2D_SRC_1555ARGB (0x00048000)
27597+#define PSB_2D_SRC_565RGB (0x00050000)
27598+#define PSB_2D_SRC_0888ARGB (0x00058000)
27599+#define PSB_2D_SRC_8888ARGB (0x00060000)
27600+#define PSB_2D_SRC_8888UYVY (0x00068000)
27601+#define PSB_2D_SRC_RESERVED (0x00070000)
27602+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
27603+
27604+
27605+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
27606+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
27607+#define PSB_2D_SRC_STRIDE_SHIFT (0)
27608+/*
27609+ * WORD 1 - Base Address
27610+ */
27611+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
27612+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
27613+#define PSB_2D_SRC_ADDR_SHIFT (2)
27614+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
27615+
27616+/*
27617+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
27618+ */
27619+/*
27620+ * WORD 0
27621+ */
27622+
27623+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
27624+#define PSB_2D_PAT_1_PAL (0x00000000)
27625+#define PSB_2D_PAT_2_PAL (0x00008000)
27626+#define PSB_2D_PAT_4_PAL (0x00010000)
27627+#define PSB_2D_PAT_8_PAL (0x00018000)
27628+#define PSB_2D_PAT_8_ALPHA (0x00020000)
27629+#define PSB_2D_PAT_4_ALPHA (0x00028000)
27630+#define PSB_2D_PAT_332RGB (0x00030000)
27631+#define PSB_2D_PAT_4444ARGB (0x00038000)
27632+#define PSB_2D_PAT_555RGB (0x00040000)
27633+#define PSB_2D_PAT_1555ARGB (0x00048000)
27634+#define PSB_2D_PAT_565RGB (0x00050000)
27635+#define PSB_2D_PAT_0888ARGB (0x00058000)
27636+#define PSB_2D_PAT_8888ARGB (0x00060000)
27637+
27638+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
27639+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
27640+#define PSB_2D_PAT_STRIDE_SHIFT (0)
27641+/*
27642+ * WORD 1 - Base Address
27643+ */
27644+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
27645+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
27646+#define PSB_2D_PAT_ADDR_SHIFT (2)
27647+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
27648+
27649+/*
27650+ * Destination Surface (PSB_2D_DST_SURF_BH)
27651+ */
27652+/*
27653+ * WORD 0
27654+ */
27655+
27656+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
27657+#define PSB_2D_DST_332RGB (0x00030000)
27658+#define PSB_2D_DST_4444ARGB (0x00038000)
27659+#define PSB_2D_DST_555RGB (0x00040000)
27660+#define PSB_2D_DST_1555ARGB (0x00048000)
27661+#define PSB_2D_DST_565RGB (0x00050000)
27662+#define PSB_2D_DST_0888ARGB (0x00058000)
27663+#define PSB_2D_DST_8888ARGB (0x00060000)
27664+#define PSB_2D_DST_8888AYUV (0x00070000)
27665+
27666+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
27667+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
27668+#define PSB_2D_DST_STRIDE_SHIFT (0)
27669+/*
27670+ * WORD 1 - Base Address
27671+ */
27672+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
27673+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
27674+#define PSB_2D_DST_ADDR_SHIFT (2)
27675+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
27676+
27677+/*
27678+ * Mask Surface (PSB_2D_MASK_SURF_BH)
27679+ */
27680+/*
27681+ * WORD 0
27682+ */
27683+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
27684+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
27685+#define PSB_2D_MASK_STRIDE_SHIFT (0)
27686+/*
27687+ * WORD 1 - Base Address
27688+ */
27689+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
27690+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
27691+#define PSB_2D_MASK_ADDR_SHIFT (2)
27692+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
27693+
27694+/*
27695+ * Source Palette (PSB_2D_SRC_PAL_BH)
27696+ */
27697+
27698+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
27699+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
27700+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
27701+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
27702+
27703+/*
27704+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
27705+ */
27706+
27707+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
27708+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
27709+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
27710+#define PSB_2D_PATPAL_BYTEALIGN (1024)
27711+
27712+/*
27713+ * Rop3 Codes (2 LS bytes)
27714+ */
27715+
27716+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
27717+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
27718+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
27719+#define PSB_2D_ROP3_BLACKNESS (0x0000)
27720+#define PSB_2D_ROP3_SRC (0xCC)
27721+#define PSB_2D_ROP3_PAT (0xF0)
27722+#define PSB_2D_ROP3_DST (0xAA)
27723+
27724+
27725+/*
27726+ * Sizes.
27727+ */
27728+
27729+#define PSB_SCENE_HW_COOKIE_SIZE 16
27730+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
27731+
27732+/*
27733+ * Scene stuff.
27734+ */
27735+
27736+#define PSB_NUM_HW_SCENES 2
27737+
27738+/*
27739+ * Scheduler completion actions.
27740+ */
27741+
27742+#define PSB_RASTER_BLOCK 0
27743+#define PSB_RASTER 1
27744+#define PSB_RETURN 2
27745+#define PSB_TA 3
27746+
27747+
27748+/*Power management*/
27749+#define PSB_PUNIT_PORT 0x04
27750+#define PSB_APMBA 0x7a
27751+#define PSB_APM_CMD 0x0
27752+#define PSB_APM_STS 0x04
27753+#define PSB_PWRGT_GFX_MASK 0x3
27754+#define PSB_PWRGT_VID_ENC_MASK 0x30
27755+#define PSB_PWRGT_VID_DEC_MASK 0xc
27756+
27757+#define PSB_PM_SSC 0x20
27758+#define PSB_PM_SSS 0x30
27759+#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
27760+#endif
27761diff --git a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c
27762new file mode 100644
27763index 0000000..04c9378
27764--- /dev/null
27765+++ b/drivers/gpu/drm/psb/psb_reset.c
27766@@ -0,0 +1,484 @@
27767+/**************************************************************************
27768+ * Copyright (c) 2007, Intel Corporation.
27769+ * All Rights Reserved.
27770+ *
27771+ * This program is free software; you can redistribute it and/or modify it
27772+ * under the terms and conditions of the GNU General Public License,
27773+ * version 2, as published by the Free Software Foundation.
27774+ *
27775+ * This program is distributed in the hope it will be useful, but WITHOUT
27776+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27777+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27778+ * more details.
27779+ *
27780+ * You should have received a copy of the GNU General Public License along with
27781+ * this program; if not, write to the Free Software Foundation, Inc.,
27782+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27783+ *
27784+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27785+ * develop this driver.
27786+ *
27787+ **************************************************************************/
27788+/*
27789+ * Authors:
27790+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27791+ */
27792+
27793+#include <drm/drmP.h>
27794+#include "psb_drv.h"
27795+#include "psb_reg.h"
27796+#include "psb_intel_reg.h"
27797+#include "psb_scene.h"
27798+#include "psb_msvdx.h"
27799+#include "lnc_topaz.h"
27800+#include <linux/spinlock.h>
27801+#include "psb_powermgmt.h"
27802+#define PSB_2D_TIMEOUT_MSEC 100
27803+
27804+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
27805+{
27806+ uint32_t val;
27807+
27808+ val = _PSB_CS_RESET_BIF_RESET |
27809+ _PSB_CS_RESET_DPM_RESET |
27810+ _PSB_CS_RESET_TA_RESET |
27811+ _PSB_CS_RESET_USE_RESET |
27812+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
27813+
27814+ if (reset_2d)
27815+ val |= _PSB_CS_RESET_TWOD_RESET;
27816+
27817+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
27818+ (void) PSB_RSGX32(PSB_CR_SOFT_RESET);
27819+
27820+ udelay(100);
27821+
27822+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
27823+ wmb();
27824+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
27825+ PSB_CR_BIF_CTRL);
27826+ wmb();
27827+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
27828+
27829+ udelay(100);
27830+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
27831+ PSB_CR_BIF_CTRL);
27832+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
27833+}
27834+
27835+void psb_print_pagefault(struct drm_psb_private *dev_priv)
27836+{
27837+ uint32_t val;
27838+ uint32_t addr;
27839+
27840+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
27841+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
27842+
27843+ if (val) {
27844+ if (val & _PSB_CBI_STAT_PF_N_RW)
27845+ DRM_ERROR("Poulsbo MMU page fault:\n");
27846+ else
27847+ DRM_ERROR("Poulsbo MMU read / write "
27848+ "protection fault:\n");
27849+
27850+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
27851+ DRM_ERROR("\tCache requestor.\n");
27852+ if (val & _PSB_CBI_STAT_FAULT_TA)
27853+ DRM_ERROR("\tTA requestor.\n");
27854+ if (val & _PSB_CBI_STAT_FAULT_VDM)
27855+ DRM_ERROR("\tVDM requestor.\n");
27856+ if (val & _PSB_CBI_STAT_FAULT_2D)
27857+ DRM_ERROR("\t2D requestor.\n");
27858+ if (val & _PSB_CBI_STAT_FAULT_PBE)
27859+ DRM_ERROR("\tPBE requestor.\n");
27860+ if (val & _PSB_CBI_STAT_FAULT_TSP)
27861+ DRM_ERROR("\tTSP requestor.\n");
27862+ if (val & _PSB_CBI_STAT_FAULT_ISP)
27863+ DRM_ERROR("\tISP requestor.\n");
27864+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
27865+ DRM_ERROR("\tUSSEPDS requestor.\n");
27866+ if (val & _PSB_CBI_STAT_FAULT_HOST)
27867+ DRM_ERROR("\tHost requestor.\n");
27868+
27869+ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
27870+ (unsigned) addr);
27871+ }
27872+}
27873+
27874+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
27875+{
27876+ struct timer_list *wt = &dev_priv->watchdog_timer;
27877+ unsigned long irq_flags;
27878+
27879+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27880+ if (dev_priv->timer_available && !timer_pending(wt)) {
27881+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
27882+ add_timer(wt);
27883+ }
27884+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
27885+}
27886+
27887+#if 0
27888+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
27889+ unsigned int engine, int *lockup,
27890+ int *idle)
27891+{
27892+ uint32_t received_seq;
27893+
27894+ received_seq = dev_priv->comm[engine << 4];
27895+ spin_lock(&dev_priv->sequence_lock);
27896+ *idle = (received_seq == dev_priv->sequence[engine]);
27897+ spin_unlock(&dev_priv->sequence_lock);
27898+
27899+ if (*idle) {
27900+ dev_priv->idle[engine] = 1;
27901+ *lockup = 0;
27902+ return;
27903+ }
27904+
27905+ if (dev_priv->idle[engine]) {
27906+ dev_priv->idle[engine] = 0;
27907+ dev_priv->last_sequence[engine] = received_seq;
27908+ *lockup = 0;
27909+ return;
27910+ }
27911+
27912+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
27913+}
27914+
27915+#endif
27916+static void psb_watchdog_func(unsigned long data)
27917+{
27918+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
27919+ int lockup;
27920+ int msvdx_lockup;
27921+ int msvdx_idle;
27922+ int lockup_2d;
27923+ int idle_2d;
27924+ int idle;
27925+ unsigned long irq_flags;
27926+
27927+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
27928+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
27929+
27930+#if 0
27931+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
27932+#else
27933+ lockup_2d = false;
27934+ idle_2d = true;
27935+#endif
27936+ if (lockup || msvdx_lockup || lockup_2d) {
27937+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27938+ dev_priv->timer_available = 0;
27939+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
27940+ irq_flags);
27941+ if (lockup) {
27942+ /*comment out this to aviod illegal access for ospm*/
27943+ /*psb_print_pagefault(dev_priv);*/
27944+ schedule_work(&dev_priv->watchdog_wq);
27945+ }
27946+ if (msvdx_lockup)
27947+ schedule_work(&dev_priv->msvdx_watchdog_wq);
27948+ }
27949+ if (!idle || !msvdx_idle || !idle_2d)
27950+ psb_schedule_watchdog(dev_priv);
27951+}
27952+
27953+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
27954+{
27955+ struct drm_psb_private *dev_priv = dev->dev_private;
27956+ struct psb_msvdx_cmd_queue *msvdx_cmd;
27957+ struct list_head *list, *next;
27958+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
27959+
27960+ /*Flush the msvdx cmd queue and signal all fences in the queue */
27961+ list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) {
27962+ msvdx_cmd =
27963+ list_entry(list, struct psb_msvdx_cmd_queue, head);
27964+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
27965+ msvdx_cmd->sequence);
27966+ msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence;
27967+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
27968+ msvdx_priv->msvdx_current_sequence,
27969+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
27970+ list_del(list);
27971+ kfree(msvdx_cmd->cmd);
27972+ kfree(msvdx_cmd
27973+ );
27974+ }
27975+}
27976+
27977+static void psb_msvdx_reset_wq(struct work_struct *work)
27978+{
27979+ struct drm_psb_private *dev_priv =
27980+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
27981+ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
27982+
27983+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
27984+ unsigned long irq_flags;
27985+
27986+ mutex_lock(&msvdx_priv->msvdx_mutex);
27987+ msvdx_priv->msvdx_needs_reset = 1;
27988+ msvdx_priv->msvdx_current_sequence++;
27989+ PSB_DEBUG_GENERAL
27990+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
27991+ msvdx_priv->msvdx_current_sequence);
27992+
27993+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
27994+ msvdx_priv->msvdx_current_sequence,
27995+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
27996+
27997+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27998+ dev_priv->timer_available = 1;
27999+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28000+
28001+ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
28002+ psb_msvdx_flush_cmd_queue(scheduler->dev);
28003+ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
28004+
28005+ psb_schedule_watchdog(dev_priv);
28006+ mutex_unlock(&msvdx_priv->msvdx_mutex);
28007+}
28008+
28009+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
28010+{
28011+ struct psb_xhw_buf buf;
28012+ uint32_t bif_ctrl;
28013+
28014+ INIT_LIST_HEAD(&buf.head);
28015+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
28016+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
28017+ PSB_WSGX32(bif_ctrl |
28018+ _PSB_CB_CTRL_CLEAR_FAULT |
28019+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
28020+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
28021+ udelay(100);
28022+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
28023+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
28024+ return psb_xhw_reset_dpm(dev_priv, &buf);
28025+}
28026+
28027+/*
28028+ * Block command submission and reset hardware and schedulers.
28029+ */
28030+
28031+static void psb_reset_wq(struct work_struct *work)
28032+{
28033+ struct drm_psb_private *dev_priv =
28034+ container_of(work, struct drm_psb_private, watchdog_wq);
28035+ int lockup_2d;
28036+ int idle_2d;
28037+ unsigned long irq_flags;
28038+ int ret;
28039+ int reset_count = 0;
28040+ struct psb_xhw_buf buf;
28041+ uint32_t xhw_lockup;
28042+
28043+ /*
28044+ * Block command submission.
28045+ */
28046+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
28047+
28048+ if (!powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, false)) {
28049+ DRM_ERROR("lock up hapeens when island off \n");
28050+ return;
28051+ }
28052+ mutex_lock(&dev_priv->reset_mutex);
28053+
28054+ INIT_LIST_HEAD(&buf.head);
28055+ ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup);
28056+ if (likely(ret == 0)) {
28057+ if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) {
28058+ /*
28059+ * no lockup, just re-schedule
28060+ */
28061+ spin_lock_irqsave(&dev_priv->watchdog_lock,
28062+ irq_flags);
28063+ dev_priv->timer_available = 1;
28064+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
28065+ irq_flags);
28066+ psb_schedule_watchdog(dev_priv);
28067+ mutex_unlock(&dev_priv->reset_mutex);
28068+ return;
28069+ }
28070+ } else {
28071+ DRM_ERROR("Check lockup returned %d\n", ret);
28072+ }
28073+#if 0
28074+ mdelay(PSB_2D_TIMEOUT_MSEC);
28075+
28076+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
28077+
28078+ if (lockup_2d) {
28079+ uint32_t seq_2d;
28080+ spin_lock(&dev_priv->sequence_lock);
28081+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
28082+ spin_unlock(&dev_priv->sequence_lock);
28083+ psb_fence_error(dev_priv->scheduler.dev,
28084+ PSB_ENGINE_2D,
28085+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
28086+ DRM_INFO("Resetting 2D engine.\n");
28087+ }
28088+
28089+ psb_reset(dev_priv, lockup_2d);
28090+#else
28091+ (void) lockup_2d;
28092+ (void) idle_2d;
28093+ psb_reset(dev_priv, 0);
28094+#endif
28095+ (void) psb_xhw_mmu_reset(dev_priv);
28096+ DRM_INFO("Resetting scheduler.\n");
28097+ psb_scheduler_pause(dev_priv);
28098+ psb_scheduler_reset(dev_priv, -EBUSY);
28099+ psb_scheduler_ta_mem_check(dev_priv);
28100+
28101+ while (dev_priv->ta_mem &&
28102+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
28103+ struct ttm_fence_object *fence;
28104+
28105+ /*
28106+ * TA memory is currently fenced so offsets
28107+ * are valid. Reload offsets into the dpm now.
28108+ */
28109+
28110+ struct psb_xhw_buf buf;
28111+ INIT_LIST_HEAD(&buf.head);
28112+
28113+ mdelay(100);
28114+
28115+ fence = dev_priv->ta_mem->ta_memory->sync_obj;
28116+
28117+ DRM_INFO("Reloading TA memory at offset "
28118+ "0x%08lx to 0x%08lx seq %d\n",
28119+ dev_priv->ta_mem->ta_memory->offset,
28120+ dev_priv->ta_mem->ta_memory->offset +
28121+ (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT),
28122+ fence->sequence);
28123+
28124+ fence = dev_priv->ta_mem->hw_data->sync_obj;
28125+
28126+ DRM_INFO("Reloading TA HW memory at offset "
28127+ "0x%08lx to 0x%08lx seq %u\n",
28128+ dev_priv->ta_mem->hw_data->offset,
28129+ dev_priv->ta_mem->hw_data->offset +
28130+ (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT),
28131+ fence->sequence);
28132+
28133+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28134+ PSB_TA_MEM_FLAG_TA |
28135+ PSB_TA_MEM_FLAG_RASTER |
28136+ PSB_TA_MEM_FLAG_HOSTA |
28137+ PSB_TA_MEM_FLAG_HOSTD |
28138+ PSB_TA_MEM_FLAG_INIT,
28139+ dev_priv->ta_mem->ta_memory->
28140+ offset,
28141+ dev_priv->ta_mem->hw_data->
28142+ offset,
28143+ dev_priv->ta_mem->hw_cookie);
28144+ if (!ret)
28145+ break;
28146+
28147+ DRM_INFO("Reloading TA memory failed. Retrying.\n");
28148+ psb_reset(dev_priv, 0);
28149+ (void) psb_xhw_mmu_reset(dev_priv);
28150+ }
28151+
28152+ psb_scheduler_restart(dev_priv);
28153+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28154+ dev_priv->timer_available = 1;
28155+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28156+ mutex_unlock(&dev_priv->reset_mutex);
28157+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
28158+}
28159+
28160+void psb_watchdog_init(struct drm_psb_private *dev_priv)
28161+{
28162+ struct timer_list *wt = &dev_priv->watchdog_timer;
28163+ unsigned long irq_flags;
28164+
28165+ spin_lock_init(&dev_priv->watchdog_lock);
28166+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28167+ init_timer(wt);
28168+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
28169+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
28170+ wt->data = (unsigned long) dev_priv;
28171+ wt->function = &psb_watchdog_func;
28172+ dev_priv->timer_available = 1;
28173+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28174+}
28175+
28176+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
28177+{
28178+ unsigned long irq_flags;
28179+
28180+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28181+ dev_priv->timer_available = 0;
28182+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28183+ (void) del_timer_sync(&dev_priv->watchdog_timer);
28184+}
28185+
28186+static void psb_lid_timer_func(unsigned long data)
28187+{
28188+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
28189+ struct drm_device * dev = (struct drm_device *)dev_priv->dev;
28190+ struct timer_list * lid_timer = &dev_priv->lid_timer;
28191+ unsigned long irq_flags;
28192+ u32 * lid_state = dev_priv->lid_state;
28193+ u32 pp_status;
28194+
28195+ if(*lid_state == dev_priv->lid_last_state)
28196+ goto lid_timer_schedule;
28197+
28198+ if((*lid_state) & 0x01) {
28199+ /*lid state is open*/
28200+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
28201+ do {
28202+ pp_status = REG_READ(PP_STATUS);
28203+ } while((pp_status & PP_ON) == 0);
28204+
28205+ /*FIXME: should be backlight level before*/
28206+ psb_intel_lvds_set_brightness(dev, 100);
28207+ } else {
28208+ psb_intel_lvds_set_brightness(dev, 0);
28209+
28210+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
28211+ do {
28212+ pp_status = REG_READ(PP_STATUS);
28213+ } while((pp_status & PP_ON) == 0);
28214+ }
28215+ //printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__);
28216+
28217+ dev_priv->lid_last_state = *lid_state;
28218+
28219+lid_timer_schedule:
28220+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
28221+ if(!timer_pending(lid_timer)){
28222+ lid_timer->expires = jiffies + PSB_LID_DELAY;
28223+ add_timer(lid_timer);
28224+ }
28225+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
28226+}
28227+
28228+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
28229+{
28230+ struct timer_list * lid_timer = &dev_priv->lid_timer;
28231+ unsigned long irq_flags;
28232+
28233+ spin_lock_init(&dev_priv->lid_lock);
28234+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
28235+
28236+ init_timer(lid_timer);
28237+
28238+ lid_timer->data = (unsigned long)dev_priv;
28239+ lid_timer->function = psb_lid_timer_func;
28240+ lid_timer->expires = jiffies + PSB_LID_DELAY;
28241+
28242+ add_timer(lid_timer);
28243+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
28244+}
28245+
28246+void psb_lid_timer_takedown(struct drm_psb_private * dev_priv)
28247+{
28248+ del_timer_sync(&dev_priv->lid_timer);
28249+}
28250+
28251diff --git a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c
28252new file mode 100644
28253index 0000000..42b823d
28254--- /dev/null
28255+++ b/drivers/gpu/drm/psb/psb_scene.c
28256@@ -0,0 +1,523 @@
28257+/**************************************************************************
28258+ * Copyright (c) 2007, Intel Corporation.
28259+ * All Rights Reserved.
28260+ *
28261+ * This program is free software; you can redistribute it and/or modify it
28262+ * under the terms and conditions of the GNU General Public License,
28263+ * version 2, as published by the Free Software Foundation.
28264+ *
28265+ * This program is distributed in the hope it will be useful, but WITHOUT
28266+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28267+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28268+ * more details.
28269+ *
28270+ * You should have received a copy of the GNU General Public License along with
28271+ * this program; if not, write to the Free Software Foundation, Inc.,
28272+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28273+ *
28274+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28275+ * develop this driver.
28276+ *
28277+ **************************************************************************/
28278+/*
28279+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28280+ */
28281+
28282+#include <drm/drmP.h>
28283+#include "psb_drv.h"
28284+#include "psb_scene.h"
28285+#include "psb_powermgmt.h"
28286+
28287+void psb_clear_scene_atomic(struct psb_scene *scene)
28288+{
28289+ int i;
28290+ struct page *page;
28291+ void *v;
28292+
28293+ for (i = 0; i < scene->clear_num_pages; ++i) {
28294+ page = ttm_tt_get_page(scene->hw_data->ttm,
28295+ scene->clear_p_start + i);
28296+ if (in_irq())
28297+ v = kmap_atomic(page, KM_IRQ0);
28298+ else
28299+ v = kmap_atomic(page, KM_USER0);
28300+
28301+ memset(v, 0, PAGE_SIZE);
28302+
28303+ if (in_irq())
28304+ kunmap_atomic(v, KM_IRQ0);
28305+ else
28306+ kunmap_atomic(v, KM_USER0);
28307+ }
28308+}
28309+
28310+int psb_clear_scene(struct psb_scene *scene)
28311+{
28312+ struct ttm_bo_kmap_obj bmo;
28313+ bool is_iomem;
28314+ void *addr;
28315+
28316+ int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start,
28317+ scene->clear_num_pages, &bmo);
28318+
28319+ PSB_DEBUG_RENDER("Scene clear.\n");
28320+ if (ret)
28321+ return ret;
28322+
28323+ addr = ttm_kmap_obj_virtual(&bmo, &is_iomem);
28324+ BUG_ON(is_iomem);
28325+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
28326+ ttm_bo_kunmap(&bmo);
28327+
28328+ return 0;
28329+}
28330+
28331+static void psb_destroy_scene(struct kref *kref)
28332+{
28333+ struct psb_scene *scene =
28334+ container_of(kref, struct psb_scene, kref);
28335+
28336+ PSB_DEBUG_RENDER("Scene destroy.\n");
28337+ psb_scheduler_remove_scene_refs(scene);
28338+ ttm_bo_unref(&scene->hw_data);
28339+ kfree(scene);
28340+}
28341+
28342+void psb_scene_unref(struct psb_scene **p_scene)
28343+{
28344+ struct psb_scene *scene = *p_scene;
28345+
28346+ PSB_DEBUG_RENDER("Scene unref.\n");
28347+ *p_scene = NULL;
28348+ kref_put(&scene->kref, &psb_destroy_scene);
28349+}
28350+
28351+struct psb_scene *psb_scene_ref(struct psb_scene *src)
28352+{
28353+ PSB_DEBUG_RENDER("Scene ref.\n");
28354+ kref_get(&src->kref);
28355+ return src;
28356+}
28357+
28358+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
28359+ uint32_t w, uint32_t h)
28360+{
28361+ struct drm_psb_private *dev_priv =
28362+ (struct drm_psb_private *) dev->dev_private;
28363+ struct ttm_bo_device *bdev = &dev_priv->bdev;
28364+ int ret = -EINVAL;
28365+ struct psb_scene *scene;
28366+ uint32_t bo_size;
28367+ struct psb_xhw_buf buf;
28368+
28369+ PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h,
28370+ w >> 16);
28371+
28372+ scene = kzalloc(sizeof(*scene), GFP_KERNEL);
28373+
28374+ if (!scene) {
28375+ DRM_ERROR("Out of memory allocating scene object.\n");
28376+ return NULL;
28377+ }
28378+
28379+ scene->dev = dev;
28380+ scene->w = w;
28381+ scene->h = h;
28382+ scene->hw_scene = NULL;
28383+ kref_init(&scene->kref);
28384+
28385+ INIT_LIST_HEAD(&buf.head);
28386+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
28387+ scene->hw_cookie, &bo_size,
28388+ &scene->clear_p_start,
28389+ &scene->clear_num_pages);
28390+ if (ret)
28391+ goto out_err;
28392+
28393+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
28394+ DRM_PSB_FLAG_MEM_MMU |
28395+ TTM_PL_FLAG_CACHED,
28396+ 0, 0, 1, NULL, &scene->hw_data);
28397+ if (ret)
28398+ goto out_err;
28399+
28400+ return scene;
28401+out_err:
28402+ kfree(scene);
28403+ return NULL;
28404+}
28405+
28406+int psb_validate_scene_pool(struct psb_context *context,
28407+ struct psb_scene_pool *pool,
28408+ uint32_t w,
28409+ uint32_t h,
28410+ int final_pass, struct psb_scene **scene_p)
28411+{
28412+ struct drm_device *dev = pool->dev;
28413+ struct drm_psb_private *dev_priv =
28414+ (struct drm_psb_private *) dev->dev_private;
28415+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
28416+ int ret;
28417+ unsigned long irq_flags;
28418+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
28419+ uint32_t bin_pt_offset;
28420+ uint32_t bin_param_offset;
28421+
28422+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n",
28423+ pool->cur_scene);
28424+
28425+ if (unlikely(!dev_priv->ta_mem)) {
28426+ dev_priv->ta_mem =
28427+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
28428+ if (!dev_priv->ta_mem)
28429+ return -ENOMEM;
28430+
28431+ bin_pt_offset = ~0;
28432+ bin_param_offset = ~0;
28433+ } else {
28434+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
28435+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
28436+ }
28437+
28438+ pool->w = w;
28439+ pool->h = h;
28440+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
28441+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28442+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
28443+ spin_unlock_irqrestore(&scheduler->lock,
28444+ irq_flags);
28445+ DRM_ERROR("Trying to resize a dirty scene.\n");
28446+ return -EINVAL;
28447+ }
28448+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28449+ psb_scene_unref(&pool->scenes[pool->cur_scene]);
28450+ scene = NULL;
28451+ }
28452+
28453+ if (!scene) {
28454+ pool->scenes[pool->cur_scene] = scene =
28455+ psb_alloc_scene(pool->dev, pool->w, pool->h);
28456+
28457+ if (!scene)
28458+ return -ENOMEM;
28459+
28460+ scene->flags = PSB_SCENE_FLAG_CLEARED;
28461+ }
28462+
28463+ ret = psb_validate_kernel_buffer(context, scene->hw_data,
28464+ PSB_ENGINE_TA,
28465+ PSB_BO_FLAG_SCENE |
28466+ PSB_GPU_ACCESS_READ |
28467+ PSB_GPU_ACCESS_WRITE, 0);
28468+ if (unlikely(ret != 0))
28469+ return ret;
28470+
28471+ /*
28472+ * FIXME: We need atomic bit manipulation here for the
28473+ * scheduler. For now use the spinlock.
28474+ */
28475+
28476+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28477+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
28478+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28479+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
28480+ mutex_lock(&scene->hw_data->mutex);
28481+
28482+ ret = ttm_bo_wait(scene->hw_data, 0, 1, 0);
28483+ mutex_unlock(&scene->hw_data->mutex);
28484+ if (ret)
28485+ return ret;
28486+
28487+ ret = psb_clear_scene(scene);
28488+
28489+ if (ret)
28490+ return ret;
28491+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28492+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
28493+ }
28494+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28495+
28496+ ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data,
28497+ PSB_ENGINE_TA,
28498+ PSB_BO_FLAG_SCENE |
28499+ PSB_GPU_ACCESS_READ |
28500+ PSB_GPU_ACCESS_WRITE, 0);
28501+ if (unlikely(ret != 0))
28502+ return ret;
28503+
28504+ ret =
28505+ psb_validate_kernel_buffer(context,
28506+ dev_priv->ta_mem->ta_memory,
28507+ PSB_ENGINE_TA,
28508+ PSB_BO_FLAG_SCENE |
28509+ PSB_GPU_ACCESS_READ |
28510+ PSB_GPU_ACCESS_WRITE, 0);
28511+
28512+ if (unlikely(ret != 0))
28513+ return ret;
28514+
28515+ if (unlikely(bin_param_offset !=
28516+ dev_priv->ta_mem->ta_memory->offset ||
28517+ bin_pt_offset !=
28518+ dev_priv->ta_mem->hw_data->offset ||
28519+ dev_priv->force_ta_mem_load)) {
28520+
28521+ struct psb_xhw_buf buf;
28522+
28523+ INIT_LIST_HEAD(&buf.head);
28524+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28525+ PSB_TA_MEM_FLAG_TA |
28526+ PSB_TA_MEM_FLAG_RASTER |
28527+ PSB_TA_MEM_FLAG_HOSTA |
28528+ PSB_TA_MEM_FLAG_HOSTD |
28529+ PSB_TA_MEM_FLAG_INIT,
28530+ dev_priv->ta_mem->ta_memory->
28531+ offset,
28532+ dev_priv->ta_mem->hw_data->
28533+ offset,
28534+ dev_priv->ta_mem->hw_cookie);
28535+ if (ret)
28536+ return ret;
28537+
28538+ dev_priv->force_ta_mem_load = 0;
28539+ }
28540+
28541+ if (final_pass) {
28542+
28543+ /*
28544+ * Clear the scene on next use. Advance the scene counter.
28545+ */
28546+
28547+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28548+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
28549+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28550+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
28551+ }
28552+
28553+ *scene_p = psb_scene_ref(scene);
28554+ return 0;
28555+}
28556+
28557+static void psb_scene_pool_destroy(struct kref *kref)
28558+{
28559+ struct psb_scene_pool *pool =
28560+ container_of(kref, struct psb_scene_pool, kref);
28561+ int i;
28562+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
28563+
28564+ for (i = 0; i < pool->num_scenes; ++i) {
28565+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
28566+ (unsigned long) pool->scenes[i]);
28567+ if (pool->scenes[i])
28568+ psb_scene_unref(&pool->scenes[i]);
28569+ }
28570+
28571+ kfree(pool);
28572+}
28573+
28574+void psb_scene_pool_unref(struct psb_scene_pool **p_pool)
28575+{
28576+ struct psb_scene_pool *pool = *p_pool;
28577+
28578+ PSB_DEBUG_RENDER("Scene pool unref\n");
28579+ *p_pool = NULL;
28580+ kref_put(&pool->kref, &psb_scene_pool_destroy);
28581+}
28582+
28583+struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src)
28584+{
28585+ kref_get(&src->kref);
28586+ return src;
28587+}
28588+
28589+/*
28590+ * Callback for base object manager.
28591+ */
28592+
28593+static void psb_scene_pool_release(struct ttm_base_object **p_base)
28594+{
28595+ struct ttm_base_object *base = *p_base;
28596+ struct psb_scene_pool *pool =
28597+ container_of(base, struct psb_scene_pool, base);
28598+ *p_base = NULL;
28599+
28600+ psb_scene_pool_unref(&pool);
28601+}
28602+
28603+struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv,
28604+ uint32_t handle,
28605+ int check_owner)
28606+{
28607+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
28608+ struct ttm_base_object *base;
28609+ struct psb_scene_pool *pool;
28610+
28611+
28612+ base = ttm_base_object_lookup(tfile, handle);
28613+ if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) {
28614+ DRM_ERROR("Could not find scene pool object 0x%08x\n",
28615+ handle);
28616+ return NULL;
28617+ }
28618+
28619+ if (check_owner && tfile != base->tfile && !base->shareable) {
28620+ ttm_base_object_unref(&base);
28621+ return NULL;
28622+ }
28623+
28624+ pool = container_of(base, struct psb_scene_pool, base);
28625+ kref_get(&pool->kref);
28626+ ttm_base_object_unref(&base);
28627+ return pool;
28628+}
28629+
28630+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv,
28631+ int shareable,
28632+ uint32_t num_scenes,
28633+ uint32_t w, uint32_t h)
28634+{
28635+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
28636+ struct drm_device *dev = file_priv->minor->dev;
28637+ struct psb_scene_pool *pool;
28638+ int ret;
28639+
28640+ PSB_DEBUG_RENDER("Scene pool alloc\n");
28641+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
28642+ if (!pool) {
28643+ DRM_ERROR("Out of memory allocating scene pool object.\n");
28644+ return NULL;
28645+ }
28646+ pool->w = w;
28647+ pool->h = h;
28648+ pool->dev = dev;
28649+ pool->num_scenes = num_scenes;
28650+ kref_init(&pool->kref);
28651+
28652+ /*
28653+ * The base object holds a reference.
28654+ */
28655+
28656+ kref_get(&pool->kref);
28657+ ret = ttm_base_object_init(tfile, &pool->base, shareable,
28658+ PSB_USER_OBJECT_SCENE_POOL,
28659+ &psb_scene_pool_release, NULL);
28660+ if (unlikely(ret != 0))
28661+ goto out_err;
28662+
28663+ return pool;
28664+out_err:
28665+ kfree(pool);
28666+ return NULL;
28667+}
28668+
28669+/*
28670+ * Code to support multiple ta memory buffers.
28671+ */
28672+
28673+static void psb_ta_mem_destroy(struct kref *kref)
28674+{
28675+ struct psb_ta_mem *ta_mem =
28676+ container_of(kref, struct psb_ta_mem, kref);
28677+
28678+ ttm_bo_unref(&ta_mem->hw_data);
28679+ ttm_bo_unref(&ta_mem->ta_memory);
28680+ kfree(ta_mem);
28681+}
28682+
28683+void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem)
28684+{
28685+ struct psb_ta_mem *ta_mem = *p_ta_mem;
28686+ *p_ta_mem = NULL;
28687+ kref_put(&ta_mem->kref, psb_ta_mem_destroy);
28688+}
28689+
28690+struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src)
28691+{
28692+ kref_get(&src->kref);
28693+ return src;
28694+}
28695+
28696+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
28697+{
28698+ struct drm_psb_private *dev_priv =
28699+ (struct drm_psb_private *) dev->dev_private;
28700+ struct ttm_bo_device *bdev = &dev_priv->bdev;
28701+ int ret = -EINVAL;
28702+ struct psb_ta_mem *ta_mem;
28703+ uint32_t bo_size;
28704+ uint32_t ta_min_size;
28705+ struct psb_xhw_buf buf;
28706+
28707+ INIT_LIST_HEAD(&buf.head);
28708+
28709+ ta_mem = kzalloc(sizeof(*ta_mem), GFP_KERNEL);
28710+
28711+ if (!ta_mem) {
28712+ DRM_ERROR("Out of memory allocating parameter memory.\n");
28713+ return NULL;
28714+ }
28715+
28716+ kref_init(&ta_mem->kref);
28717+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
28718+ ta_mem->hw_cookie,
28719+ &bo_size,
28720+ &ta_min_size);
28721+ if (ret == -ENOMEM) {
28722+ DRM_ERROR("Parameter memory size is too small.\n");
28723+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
28724+ (unsigned int) (pages * (PAGE_SIZE / 1024)));
28725+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
28726+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
28727+ (unsigned int)(ta_min_size / 1024));
28728+ DRM_INFO("\"ta_mem_size\" parameter!\n");
28729+ }
28730+ if (ret)
28731+ goto out_err0;
28732+
28733+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
28734+ DRM_PSB_FLAG_MEM_MMU,
28735+ 0, 0, 0, NULL,
28736+ &ta_mem->hw_data);
28737+ if (ret)
28738+ goto out_err0;
28739+
28740+ bo_size = pages * PAGE_SIZE;
28741+ ret =
28742+ ttm_buffer_object_create(bdev, bo_size,
28743+ ttm_bo_type_kernel,
28744+ DRM_PSB_FLAG_MEM_RASTGEOM,
28745+ 0,
28746+ 1024 * 1024 >> PAGE_SHIFT, 0,
28747+ NULL,
28748+ &ta_mem->ta_memory);
28749+ if (ret)
28750+ goto out_err1;
28751+
28752+ return ta_mem;
28753+out_err1:
28754+ ttm_bo_unref(&ta_mem->hw_data);
28755+out_err0:
28756+ kfree(ta_mem);
28757+ return NULL;
28758+}
28759+
28760+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
28761+ void *data, struct drm_file *file_priv)
28762+{
28763+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
28764+ struct drm_psb_scene *scene = (struct drm_psb_scene *) data;
28765+ int ret = 0;
28766+ struct drm_psb_private *dev_priv = psb_priv(dev);
28767+ if (!scene->handle_valid)
28768+ return 0;
28769+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
28770+
28771+ ret =
28772+ ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE);
28773+ if (unlikely(ret != 0))
28774+ DRM_ERROR("Could not unreference a scene object.\n");
28775+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
28776+ if (drm_psb_ospm && IS_MRST(dev))
28777+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
28778+ return ret;
28779+}
28780diff --git a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h
28781new file mode 100644
28782index 0000000..2a4f8bc
28783--- /dev/null
28784+++ b/drivers/gpu/drm/psb/psb_scene.h
28785@@ -0,0 +1,119 @@
28786+/**************************************************************************
28787+ * Copyright (c) 2007, Intel Corporation.
28788+ * All Rights Reserved.
28789+ *
28790+ * This program is free software; you can redistribute it and/or modify it
28791+ * under the terms and conditions of the GNU General Public License,
28792+ * version 2, as published by the Free Software Foundation.
28793+ *
28794+ * This program is distributed in the hope it will be useful, but WITHOUT
28795+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28796+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28797+ * more details.
28798+ *
28799+ * You should have received a copy of the GNU General Public License along with
28800+ * this program; if not, write to the Free Software Foundation, Inc.,
28801+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28802+ *
28803+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28804+ * develop this driver.
28805+ *
28806+ **************************************************************************/
28807+/*
28808+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28809+ */
28810+
28811+#ifndef _PSB_SCENE_H_
28812+#define _PSB_SCENE_H_
28813+
28814+#include "ttm/ttm_object.h"
28815+
28816+#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0
28817+#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1
28818+#define PSB_MAX_NUM_SCENES 8
28819+
28820+struct psb_hw_scene;
28821+struct psb_hw_ta_mem;
28822+
28823+struct psb_scene_pool {
28824+ struct ttm_base_object base;
28825+ struct drm_device *dev;
28826+ struct kref kref;
28827+ uint32_t w;
28828+ uint32_t h;
28829+ uint32_t cur_scene;
28830+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
28831+ uint32_t num_scenes;
28832+};
28833+
28834+struct psb_scene {
28835+ struct drm_device *dev;
28836+ struct kref kref;
28837+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
28838+ uint32_t bo_size;
28839+ uint32_t w;
28840+ uint32_t h;
28841+ struct psb_ta_mem *ta_mem;
28842+ struct psb_hw_scene *hw_scene;
28843+ struct ttm_buffer_object *hw_data;
28844+ uint32_t flags;
28845+ uint32_t clear_p_start;
28846+ uint32_t clear_num_pages;
28847+};
28848+
28849+#if 0
28850+struct psb_scene_entry {
28851+ struct list_head head;
28852+ struct psb_scene *scene;
28853+};
28854+
28855+struct psb_user_scene {
28856+ struct ttm_base_object base;
28857+ struct drm_device *dev;
28858+};
28859+
28860+#endif
28861+
28862+struct psb_ta_mem {
28863+ struct ttm_base_object base;
28864+ struct drm_device *dev;
28865+ struct kref kref;
28866+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
28867+ uint32_t bo_size;
28868+ struct ttm_buffer_object *ta_memory;
28869+ struct ttm_buffer_object *hw_data;
28870+ int is_deallocating;
28871+ int deallocating_scheduled;
28872+};
28873+
28874+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
28875+ int shareable,
28876+ uint32_t num_scenes,
28877+ uint32_t w, uint32_t h);
28878+extern void psb_scene_pool_unref(struct psb_scene_pool **pool);
28879+extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file
28880+ *priv,
28881+ uint32_t handle,
28882+ int check_owner);
28883+extern int psb_validate_scene_pool(struct psb_context *context,
28884+ struct psb_scene_pool *pool,
28885+ uint32_t w,
28886+ uint32_t h, int final_pass,
28887+ struct psb_scene **scene_p);
28888+extern void psb_scene_unref(struct psb_scene **scene);
28889+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
28890+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
28891+ void *data,
28892+ struct drm_file *file_priv);
28893+
28894+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
28895+{
28896+ return pool->base.hash.key;
28897+}
28898+
28899+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
28900+ uint32_t pages);
28901+extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src);
28902+extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem);
28903+
28904+#endif
28905diff --git a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c
28906new file mode 100644
28907index 0000000..9c4e2cd
28908--- /dev/null
28909+++ b/drivers/gpu/drm/psb/psb_schedule.c
28910@@ -0,0 +1,1593 @@
28911+/**************************************************************************
28912+ * Copyright (c) 2007, Intel Corporation.
28913+ * All Rights Reserved.
28914+ *
28915+ * This program is free software; you can redistribute it and/or modify it
28916+ * under the terms and conditions of the GNU General Public License,
28917+ * version 2, as published by the Free Software Foundation.
28918+ *
28919+ * This program is distributed in the hope it will be useful, but WITHOUT
28920+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28921+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28922+ * more details.
28923+ *
28924+ * You should have received a copy of the GNU General Public License along with
28925+ * this program; if not, write to the Free Software Foundation, Inc.,
28926+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28927+ *
28928+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28929+ * develop this driver.
28930+ *
28931+ **************************************************************************/
28932+/*
28933+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28934+ */
28935+
28936+#include <drm/drmP.h>
28937+#include "psb_drm.h"
28938+#include "psb_drv.h"
28939+#include "psb_reg.h"
28940+#include "psb_scene.h"
28941+#include "ttm/ttm_execbuf_util.h"
28942+
28943+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
28944+#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 20)
28945+#define PSB_RASTER_TIMEOUT (DRM_HZ / 10)
28946+#define PSB_TA_TIMEOUT (DRM_HZ / 10)
28947+
28948+#undef PSB_SOFTWARE_WORKAHEAD
28949+
28950+#ifdef PSB_STABLE_SETTING
28951+
28952+/*
28953+ * Software blocks completely while the engines are working so there can be no
28954+ * overlap.
28955+ */
28956+
28957+#define PSB_WAIT_FOR_RASTER_COMPLETION
28958+#define PSB_WAIT_FOR_TA_COMPLETION
28959+
28960+#elif defined(PSB_PARANOID_SETTING)
28961+/*
28962+ * Software blocks "almost" while the engines are working so there can be no
28963+ * overlap.
28964+ */
28965+
28966+#define PSB_WAIT_FOR_RASTER_COMPLETION
28967+#define PSB_WAIT_FOR_TA_COMPLETION
28968+#define PSB_BE_PARANOID
28969+
28970+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
28971+/*
28972+ * Software leaps ahead while the rasterizer is running and prepares
28973+ * a new ta job that can be scheduled before the rasterizer has
28974+ * finished.
28975+ */
28976+
28977+#define PSB_WAIT_FOR_TA_COMPLETION
28978+
28979+#elif defined(PSB_SOFTWARE_WORKAHEAD)
28980+/*
28981+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
28982+ * But block overlapping in the scheduler.
28983+ */
28984+
28985+#define PSB_BLOCK_OVERLAP
28986+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
28987+
28988+#endif
28989+
28990+/*
28991+ * Avoid pixelbe pagefaults on C0.
28992+ */
28993+#if 0
28994+#define PSB_BLOCK_OVERLAP
28995+#endif
28996+
28997+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
28998+ struct psb_scheduler *scheduler,
28999+ uint32_t reply_flag);
29000+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29001+ struct psb_scheduler *scheduler,
29002+ uint32_t reply_flag);
29003+
29004+#ifdef FIX_TG_16
29005+
29006+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
29007+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
29008+
29009+#endif
29010+
29011+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
29012+ int *lockup, int *idle)
29013+{
29014+ unsigned long irq_flags;
29015+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29016+
29017+ *lockup = 0;
29018+ *idle = 1;
29019+
29020+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29021+
29022+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
29023+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
29024+ *lockup = 1;
29025+ }
29026+ if (!*lockup
29027+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29028+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
29029+ *lockup = 1;
29030+ }
29031+ if (!*lockup)
29032+ *idle = scheduler->idle;
29033+
29034+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29035+}
29036+
29037+static inline void psb_set_idle(struct psb_scheduler *scheduler)
29038+{
29039+ scheduler->idle =
29040+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
29041+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
29042+ if (scheduler->idle)
29043+ wake_up(&scheduler->idle_queue);
29044+}
29045+
29046+/*
29047+ * Call with the scheduler spinlock held.
29048+ * Assigns a scene context to either the ta or the rasterizer,
29049+ * flushing out other scenes to memory if necessary.
29050+ */
29051+
29052+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
29053+ struct psb_scene *scene,
29054+ int engine, struct psb_task *task)
29055+{
29056+ uint32_t flags = 0;
29057+ struct psb_hw_scene *hw_scene;
29058+ struct drm_device *dev = scene->dev;
29059+ struct drm_psb_private *dev_priv =
29060+ (struct drm_psb_private *) dev->dev_private;
29061+
29062+ hw_scene = scene->hw_scene;
29063+ if (hw_scene && hw_scene->last_scene == scene) {
29064+
29065+ /*
29066+ * Reuse the last hw scene context and delete it from the
29067+ * free list.
29068+ */
29069+
29070+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
29071+ hw_scene->context_number);
29072+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
29073+
29074+ /*
29075+ * No hw context initialization to be done.
29076+ */
29077+
29078+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
29079+ }
29080+
29081+ list_del_init(&hw_scene->head);
29082+
29083+ } else {
29084+ struct list_head *list;
29085+ hw_scene = NULL;
29086+
29087+ /*
29088+ * Grab a new hw scene context.
29089+ */
29090+
29091+ list_for_each(list, &scheduler->hw_scenes) {
29092+ hw_scene =
29093+ list_entry(list, struct psb_hw_scene, head);
29094+ break;
29095+ }
29096+ BUG_ON(!hw_scene);
29097+ PSB_DEBUG_RENDER("New hw scene %d.\n",
29098+ hw_scene->context_number);
29099+
29100+ list_del_init(list);
29101+ }
29102+ scene->hw_scene = hw_scene;
29103+ hw_scene->last_scene = scene;
29104+
29105+ flags |= PSB_SCENE_FLAG_SETUP;
29106+
29107+ /*
29108+ * Switch context and setup the engine.
29109+ */
29110+
29111+ return psb_xhw_scene_bind_fire(dev_priv,
29112+ &task->buf,
29113+ task->flags,
29114+ hw_scene->context_number,
29115+ scene->hw_cookie,
29116+ task->oom_cmds,
29117+ task->oom_cmd_size,
29118+ scene->hw_data->offset,
29119+ engine, flags | scene->flags);
29120+}
29121+
29122+static inline void psb_report_fence(struct drm_psb_private *dev_priv,
29123+ struct psb_scheduler *scheduler,
29124+ uint32_t class,
29125+ uint32_t sequence,
29126+ uint32_t type, int call_handler)
29127+{
29128+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
29129+ struct ttm_fence_device *fdev = &dev_priv->fdev;
29130+ struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA];
29131+ unsigned long irq_flags;
29132+
29133+ /**
29134+ * Block racing poll_ta calls, that take the lock in write mode.
29135+ */
29136+
29137+ read_lock_irqsave(&fc->lock, irq_flags);
29138+ seq->sequence = sequence;
29139+ seq->reported = 0;
29140+ read_unlock_irqrestore(&fc->lock, irq_flags);
29141+
29142+ if (call_handler)
29143+ psb_fence_handler(scheduler->dev, class);
29144+}
29145+
29146+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29147+ struct psb_scheduler *scheduler);
29148+
29149+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
29150+ struct psb_scheduler *scheduler)
29151+{
29152+ struct psb_task *task = NULL;
29153+ struct list_head *list, *next;
29154+ int pushed_raster_task = 0;
29155+
29156+ PSB_DEBUG_RENDER("schedule ta\n");
29157+
29158+ if (scheduler->idle_count != 0)
29159+ return;
29160+
29161+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
29162+ return;
29163+
29164+ if (scheduler->ta_state)
29165+ return;
29166+
29167+ /*
29168+ * Skip the ta stage for rasterization-only
29169+ * tasks. They arrive here to make sure we're rasterizing
29170+ * tasks in the correct order.
29171+ */
29172+
29173+ list_for_each_safe(list, next, &scheduler->ta_queue) {
29174+ task = list_entry(list, struct psb_task, head);
29175+ if (task->task_type != psb_raster_task && task->task_type != psb_flip_task)
29176+ break;
29177+
29178+ if (task->task_type == psb_flip_task) {
29179+ list_del_init(list);
29180+ list_add_tail(list, &scheduler->raster_queue);
29181+ task = NULL;
29182+ }
29183+ else {
29184+ list_del_init(list);
29185+ list_add_tail(list, &scheduler->raster_queue);
29186+ psb_report_fence(dev_priv, scheduler, task->engine,
29187+ task->sequence,
29188+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29189+ task = NULL;
29190+ pushed_raster_task = 1;
29191+ }
29192+ }
29193+
29194+ if (pushed_raster_task)
29195+ psb_schedule_raster(dev_priv, scheduler);
29196+
29197+ if (!task)
29198+ return;
29199+
29200+ /*
29201+ * Still waiting for a vistest?
29202+ */
29203+
29204+ if (scheduler->feedback_task == task)
29205+ return;
29206+
29207+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
29208+
29209+ /*
29210+ * Block ta from trying to use both hardware contexts
29211+ * without the rasterizer starting to render from one of them.
29212+ */
29213+
29214+ if (!list_empty(&scheduler->raster_queue))
29215+ return;
29216+
29217+#endif
29218+
29219+#ifdef PSB_BLOCK_OVERLAP
29220+ /*
29221+ * Make sure rasterizer isn't doing anything.
29222+ */
29223+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29224+ return;
29225+#endif
29226+ if (list_empty(&scheduler->hw_scenes))
29227+ return;
29228+
29229+#ifdef FIX_TG_16
29230+ if (psb_check_2d_idle(dev_priv))
29231+ return;
29232+#endif
29233+
29234+ list_del_init(&task->head);
29235+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
29236+ scheduler->ta_state = 1;
29237+
29238+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
29239+ scheduler->idle = 0;
29240+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
29241+ scheduler->total_ta_jiffies = 0;
29242+
29243+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29244+ 0x00000000 : PSB_RF_FIRE_TA;
29245+
29246+ (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
29247+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA,
29248+ task);
29249+ psb_schedule_watchdog(dev_priv);
29250+}
29251+
29252+static int psb_fire_raster(struct psb_scheduler *scheduler,
29253+ struct psb_task *task)
29254+{
29255+ struct drm_device *dev = scheduler->dev;
29256+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
29257+ dev->dev_private;
29258+
29259+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
29260+
29261+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
29262+}
29263+
29264+/*
29265+ * Take the first rasterization task from the hp raster queue or from the
29266+ * raster queue and fire the rasterizer.
29267+ */
29268+
29269+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29270+ struct psb_scheduler *scheduler)
29271+{
29272+ struct psb_task *task;
29273+ struct list_head *list;
29274+ int pipe;
29275+
29276+ if (scheduler->idle_count != 0)
29277+ return;
29278+
29279+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
29280+ PSB_DEBUG_RENDER("Raster busy.\n");
29281+ return;
29282+ }
29283+#ifdef PSB_BLOCK_OVERLAP
29284+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
29285+ PSB_DEBUG_RENDER("TA busy.\n");
29286+ return;
29287+ }
29288+#endif
29289+
29290+ if (!list_empty(&scheduler->hp_raster_queue))
29291+ list = scheduler->hp_raster_queue.next;
29292+ else if (!list_empty(&scheduler->raster_queue))
29293+ list = scheduler->raster_queue.next;
29294+ else {
29295+ PSB_DEBUG_RENDER("Nothing in list\n");
29296+ return;
29297+ }
29298+
29299+ task = list_entry(list, struct psb_task, head);
29300+
29301+ if (task->task_type == psb_flip_task) {
29302+ for (pipe=0; pipe<2; pipe++) {
29303+ if (dev_priv->pipe_active[pipe] == 1)
29304+ psb_flip_set_base(dev_priv, pipe);
29305+ }
29306+ list_del_init(list);
29307+ task = NULL;
29308+ psb_schedule_raster(dev_priv, scheduler);
29309+ return;
29310+ }
29311+
29312+ /*
29313+ * Sometimes changing ZLS format requires an ISP reset.
29314+ * Doesn't seem to consume too much time.
29315+ */
29316+
29317+ if (task->scene)
29318+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
29319+
29320+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
29321+
29322+ list_del_init(list);
29323+ scheduler->idle = 0;
29324+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
29325+ scheduler->total_raster_jiffies = 0;
29326+
29327+ if (task->scene)
29328+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
29329+
29330+ (void) psb_reg_submit(dev_priv, task->raster_cmds,
29331+ task->raster_cmd_size);
29332+
29333+ if (task->scene) {
29334+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29335+ 0x00000000 : PSB_RF_FIRE_RASTER;
29336+ psb_set_scene_fire(scheduler,
29337+ task->scene, PSB_SCENE_ENGINE_RASTER,
29338+ task);
29339+ } else {
29340+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
29341+ psb_fire_raster(scheduler, task);
29342+ }
29343+ psb_schedule_watchdog(dev_priv);
29344+}
29345+
29346+int psb_extend_timeout(struct drm_psb_private *dev_priv,
29347+ uint32_t xhw_lockup)
29348+{
29349+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29350+ unsigned long irq_flags;
29351+ int ret = -EBUSY;
29352+
29353+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29354+
29355+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
29356+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
29357+ if (xhw_lockup & PSB_LOCKUP_TA) {
29358+ goto out_unlock;
29359+ } else {
29360+ scheduler->total_ta_jiffies +=
29361+ jiffies - scheduler->ta_end_jiffies +
29362+ PSB_TA_TIMEOUT;
29363+ if (scheduler->total_ta_jiffies >
29364+ PSB_ALLOWED_TA_RUNTIME)
29365+ goto out_unlock;
29366+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
29367+ }
29368+ }
29369+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL &&
29370+ time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
29371+ if (xhw_lockup & PSB_LOCKUP_RASTER) {
29372+ goto out_unlock;
29373+ } else {
29374+ scheduler->total_raster_jiffies +=
29375+ jiffies - scheduler->raster_end_jiffies +
29376+ PSB_RASTER_TIMEOUT;
29377+ if (scheduler->total_raster_jiffies >
29378+ PSB_ALLOWED_RASTER_RUNTIME)
29379+ goto out_unlock;
29380+ scheduler->raster_end_jiffies =
29381+ jiffies + PSB_RASTER_TIMEOUT;
29382+ }
29383+ }
29384+
29385+ ret = 0;
29386+
29387+out_unlock:
29388+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29389+ return ret;
29390+}
29391+
29392+/*
29393+ * TA done handler.
29394+ */
29395+
29396+static void psb_ta_done(struct drm_psb_private *dev_priv,
29397+ struct psb_scheduler *scheduler)
29398+{
29399+ struct psb_task *task =
29400+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29401+ struct psb_scene *scene = task->scene;
29402+
29403+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
29404+
29405+ switch (task->ta_complete_action) {
29406+ case PSB_RASTER_BLOCK:
29407+ scheduler->ta_state = 1;
29408+ scene->flags |=
29409+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29410+ list_add_tail(&task->head, &scheduler->raster_queue);
29411+ break;
29412+ case PSB_RASTER:
29413+ scene->flags |=
29414+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29415+ list_add_tail(&task->head, &scheduler->raster_queue);
29416+ break;
29417+ case PSB_RETURN:
29418+ scheduler->ta_state = 0;
29419+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
29420+ list_add_tail(&scene->hw_scene->head,
29421+ &scheduler->hw_scenes);
29422+
29423+ break;
29424+ }
29425+
29426+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
29427+
29428+#ifdef FIX_TG_16
29429+ psb_2d_atomic_unlock(dev_priv);
29430+#endif
29431+
29432+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
29433+ psb_report_fence(dev_priv, scheduler, task->engine,
29434+ task->sequence,
29435+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29436+
29437+ psb_schedule_raster(dev_priv, scheduler);
29438+ psb_schedule_ta(dev_priv, scheduler);
29439+ psb_set_idle(scheduler);
29440+
29441+ if (task->ta_complete_action != PSB_RETURN)
29442+ return;
29443+
29444+ list_add_tail(&task->head, &scheduler->task_done_queue);
29445+ schedule_delayed_work(&scheduler->wq, 0);
29446+}
29447+
29448+/*
29449+ * Rasterizer done handler.
29450+ */
29451+
29452+static void psb_raster_done(struct drm_psb_private *dev_priv,
29453+ struct psb_scheduler *scheduler)
29454+{
29455+ struct psb_task *task =
29456+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29457+ struct psb_scene *scene = task->scene;
29458+ uint32_t complete_action = task->raster_complete_action;
29459+
29460+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
29461+
29462+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
29463+
29464+ if (complete_action != PSB_RASTER)
29465+ psb_schedule_raster(dev_priv, scheduler);
29466+
29467+ if (scene) {
29468+ if (task->feedback.page) {
29469+ if (unlikely(scheduler->feedback_task)) {
29470+ /*
29471+ * This should never happen, since the previous
29472+ * feedback query will return before the next
29473+ * raster task is fired.
29474+ */
29475+ DRM_ERROR("Feedback task busy.\n");
29476+ }
29477+ scheduler->feedback_task = task;
29478+ psb_xhw_vistest(dev_priv, &task->buf);
29479+ }
29480+ switch (complete_action) {
29481+ case PSB_RETURN:
29482+ scene->flags &=
29483+ ~(PSB_SCENE_FLAG_DIRTY |
29484+ PSB_SCENE_FLAG_COMPLETE);
29485+ list_add_tail(&scene->hw_scene->head,
29486+ &scheduler->hw_scenes);
29487+ psb_report_fence(dev_priv, scheduler, task->engine,
29488+ task->sequence,
29489+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
29490+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
29491+ scheduler->ta_state = 0;
29492+
29493+ break;
29494+ case PSB_RASTER:
29495+ list_add(&task->head, &scheduler->raster_queue);
29496+ task->raster_complete_action = PSB_RETURN;
29497+ psb_schedule_raster(dev_priv, scheduler);
29498+ break;
29499+ case PSB_TA:
29500+ list_add(&task->head, &scheduler->ta_queue);
29501+ scheduler->ta_state = 0;
29502+ task->raster_complete_action = PSB_RETURN;
29503+ task->ta_complete_action = PSB_RASTER;
29504+ break;
29505+
29506+ }
29507+ }
29508+ psb_schedule_ta(dev_priv, scheduler);
29509+ psb_set_idle(scheduler);
29510+
29511+ if (complete_action == PSB_RETURN) {
29512+ if (task->scene == NULL) {
29513+ psb_report_fence(dev_priv, scheduler, task->engine,
29514+ task->sequence,
29515+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29516+ }
29517+ if (!task->feedback.page) {
29518+ list_add_tail(&task->head,
29519+ &scheduler->task_done_queue);
29520+ schedule_delayed_work(&scheduler->wq, 0);
29521+ }
29522+ }
29523+}
29524+
29525+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
29526+{
29527+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29528+ unsigned long irq_flags;
29529+
29530+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29531+ scheduler->idle_count++;
29532+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29533+}
29534+
29535+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
29536+{
29537+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29538+ unsigned long irq_flags;
29539+
29540+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29541+ if (--scheduler->idle_count == 0) {
29542+ psb_schedule_ta(dev_priv, scheduler);
29543+ psb_schedule_raster(dev_priv, scheduler);
29544+ }
29545+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29546+}
29547+
29548+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
29549+{
29550+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29551+ unsigned long irq_flags;
29552+ int ret;
29553+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29554+ ret = scheduler->idle_count != 0 && scheduler->idle;
29555+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29556+ return ret;
29557+}
29558+
29559+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
29560+{
29561+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29562+ unsigned long irq_flags;
29563+ int ret;
29564+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29565+ ret = (scheduler->idle &&
29566+ list_empty(&scheduler->raster_queue) &&
29567+ list_empty(&scheduler->ta_queue) &&
29568+ list_empty(&scheduler->hp_raster_queue));
29569+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29570+ return ret;
29571+}
29572+
29573+static void psb_ta_oom(struct drm_psb_private *dev_priv,
29574+ struct psb_scheduler *scheduler)
29575+{
29576+
29577+ struct psb_task *task =
29578+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29579+ if (!task)
29580+ return;
29581+
29582+ if (task->aborting)
29583+ return;
29584+ task->aborting = 1;
29585+
29586+ DRM_INFO("Info: TA out of parameter memory.\n");
29587+
29588+ (void) psb_xhw_ta_oom(dev_priv, &task->buf,
29589+ task->scene->hw_cookie);
29590+}
29591+
29592+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
29593+ struct psb_scheduler *scheduler)
29594+{
29595+
29596+ struct psb_task *task =
29597+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29598+ uint32_t flags;
29599+ if (!task)
29600+ return;
29601+
29602+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
29603+ task->scene->hw_cookie,
29604+ &task->ta_complete_action,
29605+ &task->raster_complete_action, &flags);
29606+ task->flags |= flags;
29607+ task->aborting = 0;
29608+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
29609+}
29610+
29611+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
29612+ struct psb_scheduler *scheduler)
29613+{
29614+ DRM_ERROR("TA hw scene freed.\n");
29615+}
29616+
29617+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
29618+ struct psb_scheduler *scheduler)
29619+{
29620+ struct psb_task *task = scheduler->feedback_task;
29621+ uint8_t *feedback_map;
29622+ uint32_t add;
29623+ uint32_t cur;
29624+ struct drm_psb_vistest *vistest;
29625+ int i;
29626+
29627+ scheduler->feedback_task = NULL;
29628+ if (!task) {
29629+ DRM_ERROR("No Poulsbo feedback task.\n");
29630+ return;
29631+ }
29632+ if (!task->feedback.page) {
29633+ DRM_ERROR("No Poulsbo feedback page.\n");
29634+ goto out;
29635+ }
29636+
29637+ if (in_irq())
29638+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
29639+ else
29640+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
29641+
29642+ /*
29643+ * Loop over all requested vistest components here.
29644+ * Only one (vistest) currently.
29645+ */
29646+
29647+ vistest = (struct drm_psb_vistest *)
29648+ (feedback_map + task->feedback.offset);
29649+
29650+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
29651+ add = task->buf.arg.arg.feedback[i];
29652+ cur = vistest->vt[i];
29653+
29654+ /*
29655+ * Vistest saturates.
29656+ */
29657+
29658+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
29659+ }
29660+ if (in_irq())
29661+ kunmap_atomic(feedback_map, KM_IRQ0);
29662+ else
29663+ kunmap_atomic(feedback_map, KM_USER0);
29664+out:
29665+ psb_report_fence(dev_priv, scheduler, task->engine, task->sequence,
29666+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
29667+
29668+ if (list_empty(&task->head)) {
29669+ list_add_tail(&task->head, &scheduler->task_done_queue);
29670+ schedule_delayed_work(&scheduler->wq, 0);
29671+ } else
29672+ psb_schedule_ta(dev_priv, scheduler);
29673+}
29674+
29675+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
29676+ struct psb_scheduler *scheduler)
29677+{
29678+ struct psb_task *task =
29679+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29680+
29681+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29682+
29683+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
29684+}
29685+
29686+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
29687+ struct psb_scheduler *scheduler)
29688+{
29689+ struct psb_task *task =
29690+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29691+ uint32_t reply_flags;
29692+
29693+ if (!task) {
29694+ DRM_ERROR("Null task.\n");
29695+ return;
29696+ }
29697+
29698+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
29699+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29700+
29701+ reply_flags = PSB_RF_FIRE_RASTER;
29702+ if (task->raster_complete_action == PSB_RASTER)
29703+ reply_flags |= PSB_RF_DEALLOC;
29704+
29705+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
29706+}
29707+
29708+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
29709+ struct psb_scheduler *scheduler)
29710+{
29711+ uint32_t type;
29712+ int ret;
29713+ unsigned long irq_flags;
29714+
29715+ /*
29716+ * Xhw cannot write directly to the comm page, so
29717+ * do it here. Firmware would have written directly.
29718+ */
29719+
29720+ ret = psb_xhw_handler(dev_priv);
29721+ if (unlikely(ret))
29722+ return ret;
29723+
29724+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
29725+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
29726+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
29727+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
29728+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
29729+ DRM_ERROR("Lost Poulsbo hardware event.\n");
29730+ }
29731+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
29732+
29733+ if (type == 0)
29734+ return 0;
29735+
29736+ switch (type) {
29737+ case PSB_UIRQ_VISTEST:
29738+ psb_vistest_reply(dev_priv, scheduler);
29739+ break;
29740+ case PSB_UIRQ_OOM_REPLY:
29741+ psb_ta_oom_reply(dev_priv, scheduler);
29742+ break;
29743+ case PSB_UIRQ_FIRE_TA_REPLY:
29744+ psb_ta_fire_reply(dev_priv, scheduler);
29745+ break;
29746+ case PSB_UIRQ_FIRE_RASTER_REPLY:
29747+ psb_raster_fire_reply(dev_priv, scheduler);
29748+ break;
29749+ default:
29750+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
29751+ }
29752+ return 0;
29753+}
29754+
29755+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
29756+{
29757+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29758+ unsigned long irq_flags;
29759+ int ret;
29760+
29761+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29762+ ret = psb_user_interrupt(dev_priv, scheduler);
29763+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29764+ return ret;
29765+}
29766+
29767+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
29768+ struct psb_scheduler *scheduler,
29769+ uint32_t reply_flag)
29770+{
29771+ struct psb_task *task =
29772+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
29773+ uint32_t flags;
29774+ uint32_t mask;
29775+
29776+ if (unlikely(!task))
29777+ return;
29778+
29779+ task->reply_flags |= reply_flag;
29780+ flags = task->reply_flags;
29781+ mask = PSB_RF_FIRE_TA;
29782+
29783+ if (!(flags & mask))
29784+ return;
29785+
29786+ mask = PSB_RF_TA_DONE;
29787+ if ((flags & mask) == mask) {
29788+ task->reply_flags &= ~mask;
29789+ psb_ta_done(dev_priv, scheduler);
29790+ }
29791+
29792+ mask = PSB_RF_OOM;
29793+ if ((flags & mask) == mask) {
29794+ task->reply_flags &= ~mask;
29795+ psb_ta_oom(dev_priv, scheduler);
29796+ }
29797+
29798+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
29799+ if ((flags & mask) == mask) {
29800+ task->reply_flags &= ~mask;
29801+ psb_ta_done(dev_priv, scheduler);
29802+ }
29803+}
29804+
29805+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29806+ struct psb_scheduler *scheduler,
29807+ uint32_t reply_flag)
29808+{
29809+ struct psb_task *task =
29810+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29811+ uint32_t flags;
29812+ uint32_t mask;
29813+
29814+ if (unlikely(!task))
29815+ return;
29816+
29817+ task->reply_flags |= reply_flag;
29818+ flags = task->reply_flags;
29819+ mask = PSB_RF_FIRE_RASTER;
29820+
29821+ if (!(flags & mask))
29822+ return;
29823+
29824+ /*
29825+ * For rasterizer-only tasks, don't report fence done here,
29826+ * as this is time consuming and the rasterizer wants a new
29827+ * task immediately. For other tasks, the hardware is probably
29828+ * still busy deallocating TA memory, so we can report
29829+ * fence done in parallel.
29830+ */
29831+
29832+ if (task->raster_complete_action == PSB_RETURN &&
29833+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
29834+ psb_report_fence(dev_priv, scheduler, task->engine,
29835+ task->sequence,
29836+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29837+ }
29838+
29839+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
29840+ if ((flags & mask) == mask) {
29841+ task->reply_flags &= ~mask;
29842+ psb_raster_done(dev_priv, scheduler);
29843+ }
29844+}
29845+
29846+void psb_scheduler_handler(struct drm_psb_private *dev_priv,
29847+ uint32_t status)
29848+{
29849+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29850+
29851+ spin_lock(&scheduler->lock);
29852+
29853+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
29854+ psb_dispatch_raster(dev_priv, scheduler,
29855+ PSB_RF_RASTER_DONE);
29856+ }
29857+ if (status & _PSB_CE_DPM_3D_MEM_FREE)
29858+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
29859+
29860+ if (status & _PSB_CE_TA_FINISHED)
29861+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
29862+
29863+ if (status & _PSB_CE_TA_TERMINATE)
29864+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
29865+
29866+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
29867+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
29868+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
29869+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
29870+ }
29871+ if (status & _PSB_CE_DPM_TA_MEM_FREE)
29872+ psb_ta_hw_scene_freed(dev_priv, scheduler);
29873+
29874+ if (status & _PSB_CE_SW_EVENT)
29875+ psb_user_interrupt(dev_priv, scheduler);
29876+
29877+ spin_unlock(&scheduler->lock);
29878+}
29879+
29880+static void psb_free_task_wq(struct work_struct *work)
29881+{
29882+ struct psb_scheduler *scheduler =
29883+ container_of(work, struct psb_scheduler, wq.work);
29884+
29885+ struct list_head *list, *next;
29886+ unsigned long irq_flags;
29887+ struct psb_task *task;
29888+
29889+ if (!mutex_trylock(&scheduler->task_wq_mutex))
29890+ return;
29891+
29892+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29893+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
29894+ task = list_entry(list, struct psb_task, head);
29895+ list_del_init(list);
29896+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29897+
29898+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
29899+ "Feedback bo 0x%08lx, done %d\n",
29900+ task->sequence,
29901+ (unsigned long) task->scene,
29902+ (unsigned long) task->feedback.bo,
29903+ atomic_read(&task->buf.done));
29904+
29905+ if (task->scene) {
29906+ PSB_DEBUG_RENDER("Unref scene %d\n",
29907+ task->sequence);
29908+ psb_scene_unref(&task->scene);
29909+ if (task->feedback.bo) {
29910+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
29911+ task->sequence);
29912+ ttm_bo_unref(&task->feedback.bo);
29913+ }
29914+ }
29915+
29916+ if (atomic_read(&task->buf.done)) {
29917+ PSB_DEBUG_RENDER("Deleting task %d\n",
29918+ task->sequence);
29919+ kfree(task);
29920+ task = NULL;
29921+ }
29922+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29923+ if (task != NULL)
29924+ list_add(list, &scheduler->task_done_queue);
29925+ }
29926+ if (!list_empty(&scheduler->task_done_queue)) {
29927+ PSB_DEBUG_RENDER("Rescheduling wq\n");
29928+ schedule_delayed_work(&scheduler->wq, 1);
29929+ }
29930+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29931+ if (list_empty(&scheduler->task_done_queue) &&
29932+ drm_psb_ospm && IS_MRST(scheduler->dev)) {
29933+ psb_try_power_down_sgx(scheduler->dev);
29934+ }
29935+ mutex_unlock(&scheduler->task_wq_mutex);
29936+}
29937+
29938+static void psb_powerdown_topaz(struct work_struct *work)
29939+{
29940+ struct psb_scheduler *scheduler =
29941+ container_of(work, struct psb_scheduler, topaz_suspend_wq.work);
29942+
29943+ if (!mutex_trylock(&scheduler->topaz_power_mutex))
29944+ return;
29945+
29946+ psb_try_power_down_topaz(scheduler->dev);
29947+ mutex_unlock(&scheduler->topaz_power_mutex);
29948+}
29949+
29950+static void psb_powerdown_msvdx(struct work_struct *work)
29951+{
29952+ struct psb_scheduler *scheduler =
29953+ container_of(work, struct psb_scheduler, msvdx_suspend_wq.work);
29954+
29955+ if (!mutex_trylock(&scheduler->msvdx_power_mutex))
29956+ return;
29957+
29958+ psb_try_power_down_msvdx(scheduler->dev);
29959+ mutex_unlock(&scheduler->msvdx_power_mutex);
29960+}
29961+
29962+/*
29963+ * Check if any of the tasks in the queues is using a scene.
29964+ * In that case we know the TA memory buffer objects are
29965+ * fenced and will not be evicted until that fence is signaled.
29966+ */
29967+
29968+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
29969+{
29970+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29971+ unsigned long irq_flags;
29972+ struct psb_task *task;
29973+ struct psb_task *next_task;
29974+
29975+ dev_priv->force_ta_mem_load = 1;
29976+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29977+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue,
29978+ head) {
29979+ if (task->scene) {
29980+ dev_priv->force_ta_mem_load = 0;
29981+ break;
29982+ }
29983+ }
29984+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
29985+ head) {
29986+ if (task->scene) {
29987+ dev_priv->force_ta_mem_load = 0;
29988+ break;
29989+ }
29990+ }
29991+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29992+}
29993+
29994+void psb_scheduler_reset(struct drm_psb_private *dev_priv,
29995+ int error_condition)
29996+{
29997+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29998+ unsigned long wait_jiffies;
29999+ unsigned long cur_jiffies;
30000+ struct psb_task *task;
30001+ struct psb_task *next_task;
30002+ unsigned long irq_flags;
30003+
30004+ psb_scheduler_pause(dev_priv);
30005+ if (!psb_scheduler_idle(dev_priv)) {
30006+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30007+
30008+ cur_jiffies = jiffies;
30009+ wait_jiffies = cur_jiffies;
30010+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
30011+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
30012+ wait_jiffies = scheduler->ta_end_jiffies;
30013+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
30014+ time_after_eq(scheduler->raster_end_jiffies,
30015+ wait_jiffies))
30016+ wait_jiffies = scheduler->raster_end_jiffies;
30017+
30018+ wait_jiffies -= cur_jiffies;
30019+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30020+
30021+ (void) wait_event_timeout(scheduler->idle_queue,
30022+ psb_scheduler_idle(dev_priv),
30023+ wait_jiffies);
30024+ }
30025+
30026+ if (!psb_scheduler_idle(dev_priv)) {
30027+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30028+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
30029+ if (task) {
30030+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
30031+ if (task->engine == PSB_ENGINE_HPRAST) {
30032+ psb_fence_error(scheduler->dev,
30033+ PSB_ENGINE_HPRAST,
30034+ task->sequence,
30035+ _PSB_FENCE_TYPE_RASTER_DONE,
30036+ error_condition);
30037+
30038+ list_del(&task->head);
30039+ psb_xhw_clean_buf(dev_priv, &task->buf);
30040+ list_add_tail(&task->head,
30041+ &scheduler->task_done_queue);
30042+ } else {
30043+ list_add(&task->head,
30044+ &scheduler->raster_queue);
30045+ }
30046+ }
30047+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
30048+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
30049+ if (task) {
30050+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
30051+ list_add_tail(&task->head,
30052+ &scheduler->raster_queue);
30053+#ifdef FIX_TG_16
30054+ psb_2d_atomic_unlock(dev_priv);
30055+#endif
30056+ }
30057+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
30058+ scheduler->ta_state = 0;
30059+
30060+#ifdef FIX_TG_16
30061+ atomic_set(&dev_priv->ta_wait_2d, 0);
30062+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
30063+ wake_up(&dev_priv->queue_2d);
30064+#endif
30065+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30066+ }
30067+
30068+ /*
30069+ * Empty raster queue.
30070+ */
30071+
30072+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30073+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
30074+ head) {
30075+ struct psb_scene *scene = task->scene;
30076+
30077+ DRM_INFO("Signaling fence sequence %u\n",
30078+ task->sequence);
30079+
30080+ psb_fence_error(scheduler->dev,
30081+ task->engine,
30082+ task->sequence,
30083+ _PSB_FENCE_TYPE_TA_DONE |
30084+ _PSB_FENCE_TYPE_RASTER_DONE |
30085+ _PSB_FENCE_TYPE_SCENE_DONE |
30086+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
30087+ if (scene) {
30088+ scene->flags = 0;
30089+ if (scene->hw_scene) {
30090+ list_add_tail(&scene->hw_scene->head,
30091+ &scheduler->hw_scenes);
30092+ scene->hw_scene = NULL;
30093+ }
30094+ }
30095+
30096+ psb_xhw_clean_buf(dev_priv, &task->buf);
30097+ list_del(&task->head);
30098+ list_add_tail(&task->head, &scheduler->task_done_queue);
30099+ }
30100+
30101+ schedule_delayed_work(&scheduler->wq, 1);
30102+ scheduler->idle = 1;
30103+ wake_up(&scheduler->idle_queue);
30104+
30105+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30106+ psb_scheduler_restart(dev_priv);
30107+
30108+}
30109+
30110+int psb_scheduler_init(struct drm_device *dev,
30111+ struct psb_scheduler *scheduler)
30112+{
30113+ struct psb_hw_scene *hw_scene;
30114+ int i;
30115+
30116+ memset(scheduler, 0, sizeof(*scheduler));
30117+ scheduler->dev = dev;
30118+ mutex_init(&scheduler->task_wq_mutex);
30119+ mutex_init(&scheduler->topaz_power_mutex);
30120+ mutex_init(&scheduler->msvdx_power_mutex);
30121+ spin_lock_init(&scheduler->lock);
30122+ scheduler->idle = 1;
30123+
30124+ INIT_LIST_HEAD(&scheduler->ta_queue);
30125+ INIT_LIST_HEAD(&scheduler->raster_queue);
30126+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
30127+ INIT_LIST_HEAD(&scheduler->hw_scenes);
30128+ INIT_LIST_HEAD(&scheduler->task_done_queue);
30129+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
30130+ INIT_DELAYED_WORK(&scheduler->topaz_suspend_wq,
30131+ &psb_powerdown_topaz);
30132+ INIT_DELAYED_WORK(&scheduler->msvdx_suspend_wq,
30133+ &psb_powerdown_msvdx);
30134+ init_waitqueue_head(&scheduler->idle_queue);
30135+
30136+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
30137+ hw_scene = &scheduler->hs[i];
30138+ hw_scene->context_number = i;
30139+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
30140+ }
30141+
30142+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i)
30143+ scheduler->seq[i].reported = 0;
30144+ return 0;
30145+}
30146+
30147+/*
30148+ * Scene references maintained by the scheduler are not refcounted.
30149+ * Remove all references to a particular scene here.
30150+ */
30151+
30152+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
30153+{
30154+ struct drm_psb_private *dev_priv =
30155+ (struct drm_psb_private *) scene->dev->dev_private;
30156+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30157+ struct psb_hw_scene *hw_scene;
30158+ unsigned long irq_flags;
30159+ unsigned int i;
30160+
30161+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30162+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
30163+ hw_scene = &scheduler->hs[i];
30164+ if (hw_scene->last_scene == scene) {
30165+ BUG_ON(list_empty(&hw_scene->head));
30166+ hw_scene->last_scene = NULL;
30167+ }
30168+ }
30169+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30170+}
30171+
30172+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
30173+{
30174+ flush_scheduled_work();
30175+}
30176+
30177+static int psb_setup_task(struct drm_device *dev,
30178+ struct drm_psb_cmdbuf_arg *arg,
30179+ struct ttm_buffer_object *raster_cmd_buffer,
30180+ struct ttm_buffer_object *ta_cmd_buffer,
30181+ struct ttm_buffer_object *oom_cmd_buffer,
30182+ struct psb_scene *scene,
30183+ enum psb_task_type task_type,
30184+ uint32_t engine,
30185+ uint32_t flags, struct psb_task **task_p)
30186+{
30187+ struct psb_task *task;
30188+ int ret;
30189+
30190+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
30191+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
30192+ return -EINVAL;
30193+ }
30194+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
30195+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
30196+ return -EINVAL;
30197+ }
30198+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
30199+ DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size);
30200+ return -EINVAL;
30201+ }
30202+
30203+ task = kzalloc(sizeof(*task), GFP_KERNEL);
30204+ if (!task)
30205+ return -ENOMEM;
30206+
30207+ atomic_set(&task->buf.done, 1);
30208+ task->engine = engine;
30209+ INIT_LIST_HEAD(&task->head);
30210+ INIT_LIST_HEAD(&task->buf.head);
30211+ if (ta_cmd_buffer && arg->ta_size != 0) {
30212+ task->ta_cmd_size = arg->ta_size;
30213+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
30214+ arg->ta_offset,
30215+ arg->ta_size,
30216+ PSB_ENGINE_TA, task->ta_cmds);
30217+ if (ret)
30218+ goto out_err;
30219+ }
30220+ if (raster_cmd_buffer) {
30221+ task->raster_cmd_size = arg->cmdbuf_size;
30222+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
30223+ arg->cmdbuf_offset,
30224+ arg->cmdbuf_size,
30225+ PSB_ENGINE_TA,
30226+ task->raster_cmds);
30227+ if (ret)
30228+ goto out_err;
30229+ }
30230+ if (oom_cmd_buffer && arg->oom_size != 0) {
30231+ task->oom_cmd_size = arg->oom_size;
30232+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
30233+ arg->oom_offset,
30234+ arg->oom_size,
30235+ PSB_ENGINE_TA,
30236+ task->oom_cmds);
30237+ if (ret)
30238+ goto out_err;
30239+ }
30240+ task->task_type = task_type;
30241+ task->flags = flags;
30242+ if (scene)
30243+ task->scene = psb_scene_ref(scene);
30244+
30245+ *task_p = task;
30246+ return 0;
30247+out_err:
30248+ kfree(task);
30249+ *task_p = NULL;
30250+ return ret;
30251+}
30252+
30253+int psb_cmdbuf_ta(struct drm_file *priv,
30254+ struct psb_context *context,
30255+ struct drm_psb_cmdbuf_arg *arg,
30256+ struct ttm_buffer_object *cmd_buffer,
30257+ struct ttm_buffer_object *ta_buffer,
30258+ struct ttm_buffer_object *oom_buffer,
30259+ struct psb_scene *scene,
30260+ struct psb_feedback_info *feedback,
30261+ struct psb_ttm_fence_rep *fence_arg)
30262+{
30263+ struct drm_device *dev = priv->minor->dev;
30264+ struct drm_psb_private *dev_priv = dev->dev_private;
30265+ struct ttm_fence_object *fence = NULL;
30266+ struct psb_task *task = NULL;
30267+ int ret;
30268+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30269+ uint32_t sequence;
30270+
30271+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
30272+
30273+ ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer,
30274+ oom_buffer, scene,
30275+ psb_ta_task, PSB_ENGINE_TA,
30276+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
30277+
30278+ if (ret)
30279+ goto out_err;
30280+
30281+ task->feedback = *feedback;
30282+ mutex_lock(&dev_priv->reset_mutex);
30283+
30284+ /*
30285+ * Hand the task over to the scheduler.
30286+ */
30287+
30288+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30289+
30290+ task->ta_complete_action = PSB_RASTER;
30291+ task->raster_complete_action = PSB_RETURN;
30292+ sequence = task->sequence;
30293+
30294+ spin_lock_irq(&scheduler->lock);
30295+
30296+ list_add_tail(&task->head, &scheduler->ta_queue);
30297+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
30298+
30299+ psb_schedule_ta(dev_priv, scheduler);
30300+
30301+ /**
30302+ * From this point we may no longer dereference task,
30303+ * as the object it points to may be freed by another thread.
30304+ */
30305+
30306+ task = NULL;
30307+ spin_unlock_irq(&scheduler->lock);
30308+ mutex_unlock(&dev_priv->reset_mutex);
30309+
30310+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
30311+ arg->fence_flags,
30312+ &context->validate_list, fence_arg, &fence);
30313+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
30314+
30315+ if (fence) {
30316+ spin_lock_irq(&scheduler->lock);
30317+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA,
30318+ sequence, _PSB_FENCE_EXE_SHIFT, 1);
30319+ spin_unlock_irq(&scheduler->lock);
30320+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
30321+ }
30322+
30323+out_err:
30324+ if (ret && ret != -ERESTART)
30325+ DRM_ERROR("TA task queue job failed.\n");
30326+
30327+ if (fence) {
30328+#ifdef PSB_WAIT_FOR_TA_COMPLETION
30329+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30330+ _PSB_FENCE_TYPE_TA_DONE);
30331+#ifdef PSB_BE_PARANOID
30332+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30333+ _PSB_FENCE_TYPE_SCENE_DONE);
30334+#endif
30335+#endif
30336+ ttm_fence_object_unref(&fence);
30337+ }
30338+ return ret;
30339+}
30340+
30341+int psb_cmdbuf_raster(struct drm_file *priv,
30342+ struct psb_context *context,
30343+ struct drm_psb_cmdbuf_arg *arg,
30344+ struct ttm_buffer_object *cmd_buffer,
30345+ struct psb_ttm_fence_rep *fence_arg)
30346+{
30347+ struct drm_device *dev = priv->minor->dev;
30348+ struct drm_psb_private *dev_priv = dev->dev_private;
30349+ struct ttm_fence_object *fence = NULL;
30350+ struct psb_task *task = NULL;
30351+ int ret;
30352+ uint32_t sequence;
30353+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30354+
30355+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
30356+
30357+ ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL,
30358+ NULL, psb_raster_task,
30359+ PSB_ENGINE_TA, 0, &task);
30360+
30361+ if (ret)
30362+ goto out_err;
30363+
30364+ /*
30365+ * Hand the task over to the scheduler.
30366+ */
30367+
30368+ mutex_lock(&dev_priv->reset_mutex);
30369+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30370+ task->ta_complete_action = PSB_RASTER;
30371+ task->raster_complete_action = PSB_RETURN;
30372+ sequence = task->sequence;
30373+
30374+ spin_lock_irq(&scheduler->lock);
30375+ list_add_tail(&task->head, &scheduler->ta_queue);
30376+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
30377+ psb_schedule_ta(dev_priv, scheduler);
30378+
30379+ /**
30380+ * From this point we may no longer dereference task,
30381+ * as the object it points to may be freed by another thread.
30382+ */
30383+
30384+ task = NULL;
30385+ spin_unlock_irq(&scheduler->lock);
30386+ mutex_unlock(&dev_priv->reset_mutex);
30387+
30388+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
30389+ arg->fence_flags,
30390+ &context->validate_list, fence_arg, &fence);
30391+
30392+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
30393+ if (fence) {
30394+ spin_lock_irq(&scheduler->lock);
30395+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence,
30396+ _PSB_FENCE_EXE_SHIFT, 1);
30397+ spin_unlock_irq(&scheduler->lock);
30398+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
30399+ }
30400+out_err:
30401+ if (ret && ret != -ERESTART)
30402+ DRM_ERROR("Raster task queue job failed.\n");
30403+
30404+ if (fence) {
30405+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
30406+ ttm_fence_object_wait(fence, 1, 1, fence->type);
30407+#endif
30408+ ttm_fence_object_unref(&fence);
30409+ }
30410+
30411+ return ret;
30412+}
30413+
30414+#ifdef FIX_TG_16
30415+
30416+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
30417+{
30418+ if (psb_2d_trylock(dev_priv)) {
30419+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
30420+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
30421+ _PSB_C2B_STATUS_BUSY))) {
30422+ return 0;
30423+ }
30424+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
30425+ psb_2D_irq_on(dev_priv);
30426+
30427+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
30428+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
30429+ (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
30430+
30431+ psb_2d_atomic_unlock(dev_priv);
30432+ }
30433+
30434+ atomic_set(&dev_priv->ta_wait_2d, 1);
30435+ return -EBUSY;
30436+}
30437+
30438+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30439+{
30440+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30441+
30442+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
30443+ psb_schedule_ta(dev_priv, scheduler);
30444+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30445+ wake_up(&dev_priv->queue_2d);
30446+ }
30447+}
30448+
30449+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30450+{
30451+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30452+ unsigned long irq_flags;
30453+
30454+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30455+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
30456+ atomic_set(&dev_priv->ta_wait_2d, 0);
30457+ psb_2D_irq_off(dev_priv);
30458+ psb_schedule_ta(dev_priv, scheduler);
30459+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30460+ wake_up(&dev_priv->queue_2d);
30461+ }
30462+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30463+}
30464+
30465+/*
30466+ * 2D locking functions. Can't use a mutex since the trylock() and
30467+ * unlock() methods need to be accessible from interrupt context.
30468+ */
30469+
30470+int psb_2d_trylock(struct drm_psb_private *dev_priv)
30471+{
30472+ return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0;
30473+}
30474+
30475+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
30476+{
30477+ atomic_set(&dev_priv->lock_2d, 0);
30478+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30479+ wake_up(&dev_priv->queue_2d);
30480+}
30481+
30482+void psb_2d_unlock(struct drm_psb_private *dev_priv)
30483+{
30484+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30485+ unsigned long irq_flags;
30486+
30487+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30488+ psb_2d_atomic_unlock(dev_priv);
30489+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
30490+ psb_atomic_resume_ta_2d_idle(dev_priv);
30491+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30492+}
30493+
30494+void psb_2d_lock(struct drm_psb_private *dev_priv)
30495+{
30496+ atomic_inc(&dev_priv->waiters_2d);
30497+ wait_event(dev_priv->queue_2d,
30498+ atomic_read(&dev_priv->ta_wait_2d) == 0);
30499+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
30500+ atomic_dec(&dev_priv->waiters_2d);
30501+}
30502+
30503+#endif
30504diff --git a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h
30505new file mode 100644
30506index 0000000..01c27b0
30507--- /dev/null
30508+++ b/drivers/gpu/drm/psb/psb_schedule.h
30509@@ -0,0 +1,181 @@
30510+/**************************************************************************
30511+ * Copyright (c) 2007, Intel Corporation.
30512+ * All Rights Reserved.
30513+ *
30514+ * This program is free software; you can redistribute it and/or modify it
30515+ * under the terms and conditions of the GNU General Public License,
30516+ * version 2, as published by the Free Software Foundation.
30517+ *
30518+ * This program is distributed in the hope it will be useful, but WITHOUT
30519+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30520+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30521+ * more details.
30522+ *
30523+ * You should have received a copy of the GNU General Public License along with
30524+ * this program; if not, write to the Free Software Foundation, Inc.,
30525+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30526+ *
30527+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30528+ * develop this driver.
30529+ *
30530+ **************************************************************************/
30531+/*
30532+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
30533+ */
30534+
30535+#ifndef _PSB_SCHEDULE_H_
30536+#define _PSB_SCHEDULE_H_
30537+
30538+#include <drm/drmP.h>
30539+
30540+struct psb_context;
30541+
30542+enum psb_task_type {
30543+ psb_ta_midscene_task,
30544+ psb_ta_task,
30545+ psb_raster_task,
30546+ psb_freescene_task,
30547+ psb_flip_task
30548+};
30549+
30550+#define PSB_MAX_TA_CMDS 60
30551+#define PSB_MAX_RASTER_CMDS 66
30552+#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6)
30553+
30554+struct psb_xhw_buf {
30555+ struct list_head head;
30556+ int copy_back;
30557+ atomic_t done;
30558+ struct drm_psb_xhw_arg arg;
30559+
30560+};
30561+
30562+struct psb_feedback_info {
30563+ struct ttm_buffer_object *bo;
30564+ struct page *page;
30565+ uint32_t offset;
30566+};
30567+
30568+struct psb_task {
30569+ struct list_head head;
30570+ struct psb_scene *scene;
30571+ struct psb_feedback_info feedback;
30572+ enum psb_task_type task_type;
30573+ uint32_t engine;
30574+ uint32_t sequence;
30575+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
30576+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
30577+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
30578+ uint32_t ta_cmd_size;
30579+ uint32_t raster_cmd_size;
30580+ uint32_t oom_cmd_size;
30581+ uint32_t feedback_offset;
30582+ uint32_t ta_complete_action;
30583+ uint32_t raster_complete_action;
30584+ uint32_t hw_cookie;
30585+ uint32_t flags;
30586+ uint32_t reply_flags;
30587+ uint32_t aborting;
30588+ struct psb_xhw_buf buf;
30589+};
30590+
30591+struct psb_hw_scene {
30592+ struct list_head head;
30593+ uint32_t context_number;
30594+
30595+ /*
30596+ * This pointer does not refcount the last_scene_buffer,
30597+ * so we must make sure it is set to NULL before destroying
30598+ * the corresponding task.
30599+ */
30600+
30601+ struct psb_scene *last_scene;
30602+};
30603+
30604+struct psb_scene;
30605+struct drm_psb_private;
30606+
30607+struct psb_scheduler_seq {
30608+ uint32_t sequence;
30609+ int reported;
30610+};
30611+
30612+struct psb_scheduler {
30613+ struct drm_device *dev;
30614+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
30615+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
30616+ struct mutex task_wq_mutex;
30617+ struct mutex topaz_power_mutex;
30618+ struct mutex msvdx_power_mutex;
30619+ spinlock_t lock;
30620+ struct list_head hw_scenes;
30621+ struct list_head ta_queue;
30622+ struct list_head raster_queue;
30623+ struct list_head hp_raster_queue;
30624+ struct list_head task_done_queue;
30625+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
30626+ struct psb_task *feedback_task;
30627+ int ta_state;
30628+ struct psb_hw_scene *pending_hw_scene;
30629+ uint32_t pending_hw_scene_seq;
30630+ struct delayed_work wq;
30631+ struct delayed_work topaz_suspend_wq;
30632+ struct delayed_work msvdx_suspend_wq;
30633+ struct psb_scene_pool *pool;
30634+ uint32_t idle_count;
30635+ int idle;
30636+ wait_queue_head_t idle_queue;
30637+ unsigned long ta_end_jiffies;
30638+ unsigned long total_ta_jiffies;
30639+ unsigned long raster_end_jiffies;
30640+ unsigned long total_raster_jiffies;
30641+};
30642+
30643+#define PSB_RF_FIRE_TA (1 << 0)
30644+#define PSB_RF_OOM (1 << 1)
30645+#define PSB_RF_OOM_REPLY (1 << 2)
30646+#define PSB_RF_TERMINATE (1 << 3)
30647+#define PSB_RF_TA_DONE (1 << 4)
30648+#define PSB_RF_FIRE_RASTER (1 << 5)
30649+#define PSB_RF_RASTER_DONE (1 << 6)
30650+#define PSB_RF_DEALLOC (1 << 7)
30651+
30652+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
30653+ int shareable,
30654+ uint32_t w, uint32_t h);
30655+extern uint32_t psb_scene_handle(struct psb_scene *scene);
30656+extern int psb_scheduler_init(struct drm_device *dev,
30657+ struct psb_scheduler *scheduler);
30658+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
30659+extern int psb_cmdbuf_ta(struct drm_file *priv,
30660+ struct psb_context *context,
30661+ struct drm_psb_cmdbuf_arg *arg,
30662+ struct ttm_buffer_object *cmd_buffer,
30663+ struct ttm_buffer_object *ta_buffer,
30664+ struct ttm_buffer_object *oom_buffer,
30665+ struct psb_scene *scene,
30666+ struct psb_feedback_info *feedback,
30667+ struct psb_ttm_fence_rep *fence_arg);
30668+extern int psb_cmdbuf_raster(struct drm_file *priv,
30669+ struct psb_context *context,
30670+ struct drm_psb_cmdbuf_arg *arg,
30671+ struct ttm_buffer_object *cmd_buffer,
30672+ struct psb_ttm_fence_rep *fence_arg);
30673+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
30674+ uint32_t status);
30675+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
30676+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
30677+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
30678+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
30679+
30680+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
30681+ int *lockup, int *idle);
30682+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
30683+ int error_condition);
30684+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
30685+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
30686+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
30687+extern int psb_extend_timeout(struct drm_psb_private *dev_priv,
30688+ uint32_t xhw_lockup);
30689+
30690+#endif
30691diff --git a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c
30692new file mode 100644
30693index 0000000..134ff08
30694--- /dev/null
30695+++ b/drivers/gpu/drm/psb/psb_setup.c
30696@@ -0,0 +1,18 @@
30697+#include <drm/drmP.h>
30698+#include <drm/drm.h>
30699+#include <drm/drm_crtc.h>
30700+#include <drm/drm_edid.h>
30701+#include "psb_intel_drv.h"
30702+#include "psb_drv.h"
30703+#include "psb_intel_reg.h"
30704+
30705+/* Fixed name */
30706+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
30707+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
30708+
30709+#include "psb_intel_i2c.c"
30710+#include "psb_intel_sdvo.c"
30711+#include "psb_intel_modes.c"
30712+#include "psb_intel_lvds.c"
30713+#include "psb_intel_dsi.c"
30714+#include "psb_intel_display.c"
30715diff --git a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c
30716new file mode 100644
30717index 0000000..2c1f1a4
30718--- /dev/null
30719+++ b/drivers/gpu/drm/psb/psb_sgx.c
30720@@ -0,0 +1,1784 @@
30721+/**************************************************************************
30722+ * Copyright (c) 2007, Intel Corporation.
30723+ * All Rights Reserved.
30724+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
30725+ * All Rights Reserved.
30726+ *
30727+ * This program is free software; you can redistribute it and/or modify it
30728+ * under the terms and conditions of the GNU General Public License,
30729+ * version 2, as published by the Free Software Foundation.
30730+ *
30731+ * This program is distributed in the hope it will be useful, but WITHOUT
30732+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30733+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30734+ * more details.
30735+ *
30736+ * You should have received a copy of the GNU General Public License along with
30737+ * this program; if not, write to the Free Software Foundation, Inc.,
30738+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30739+ *
30740+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30741+ * develop this driver.
30742+ *
30743+ **************************************************************************/
30744+/*
30745+ */
30746+
30747+#include <drm/drmP.h>
30748+#include "psb_drv.h"
30749+#include "psb_drm.h"
30750+#include "psb_reg.h"
30751+#include "psb_scene.h"
30752+#include "psb_msvdx.h"
30753+#include "lnc_topaz.h"
30754+#include "ttm/ttm_bo_api.h"
30755+#include "ttm/ttm_execbuf_util.h"
30756+#include "ttm/ttm_userobj_api.h"
30757+#include "ttm/ttm_placement_common.h"
30758+#include "psb_sgx.h"
30759+#include "psb_intel_reg.h"
30760+#include "psb_powermgmt.h"
30761+
30762+
30763+static inline int psb_same_page(unsigned long offset,
30764+ unsigned long offset2)
30765+{
30766+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
30767+}
30768+
30769+static inline unsigned long psb_offset_end(unsigned long offset,
30770+ unsigned long end)
30771+{
30772+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
30773+ return (end < offset) ? end : offset;
30774+}
30775+
30776+static void psb_idle_engine(struct drm_device *dev, int engine);
30777+
30778+struct psb_dstbuf_cache {
30779+ unsigned int dst;
30780+ struct ttm_buffer_object *dst_buf;
30781+ unsigned long dst_offset;
30782+ uint32_t *dst_page;
30783+ unsigned int dst_page_offset;
30784+ struct ttm_bo_kmap_obj dst_kmap;
30785+ bool dst_is_iomem;
30786+};
30787+
30788+struct psb_validate_buffer {
30789+ struct ttm_validate_buffer base;
30790+ struct psb_validate_req req;
30791+ int ret;
30792+ struct psb_validate_arg __user *user_val_arg;
30793+ uint32_t flags;
30794+ uint32_t offset;
30795+ int po_correct;
30796+};
30797+
30798+
30799+
30800+#define PSB_REG_GRAN_SHIFT 2
30801+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
30802+#define PSB_MAX_REG 0x1000
30803+
30804+static const uint32_t disallowed_ranges[][2] = {
30805+ {0x0000, 0x0200},
30806+ {0x0208, 0x0214},
30807+ {0x021C, 0x0224},
30808+ {0x0230, 0x0234},
30809+ {0x0248, 0x024C},
30810+ {0x0254, 0x0358},
30811+ {0x0428, 0x0428},
30812+ {0x0430, 0x043C},
30813+ {0x0498, 0x04B4},
30814+ {0x04CC, 0x04D8},
30815+ {0x04E0, 0x07FC},
30816+ {0x0804, 0x0A14},
30817+ {0x0A4C, 0x0A58},
30818+ {0x0A68, 0x0A80},
30819+ {0x0AA0, 0x0B1C},
30820+ {0x0B2C, 0x0CAC},
30821+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
30822+};
30823+
30824+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
30825+ (PSB_REG_GRANULARITY *
30826+ (sizeof(uint32_t) << 3))];
30827+
30828+static inline int psb_disallowed(uint32_t reg)
30829+{
30830+ reg >>= PSB_REG_GRAN_SHIFT;
30831+ return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0;
30832+}
30833+
30834+void psb_init_disallowed(void)
30835+{
30836+ int i;
30837+ uint32_t reg, tmp;
30838+ static int initialized;
30839+
30840+ if (initialized)
30841+ return;
30842+
30843+ initialized = 1;
30844+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
30845+
30846+ for (i = 0;
30847+ i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
30848+ ++i) {
30849+ for (reg = disallowed_ranges[i][0];
30850+ reg <= disallowed_ranges[i][1]; reg += 4) {
30851+ tmp = reg >> 2;
30852+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
30853+ }
30854+ }
30855+}
30856+
30857+static int psb_memcpy_check(uint32_t *dst, const uint32_t *src,
30858+ uint32_t size)
30859+{
30860+ size >>= 3;
30861+ while (size--) {
30862+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
30863+ DRM_ERROR("Forbidden SGX register access: "
30864+ "0x%04x.\n", *src);
30865+ return -EPERM;
30866+ }
30867+ *dst++ = *src++;
30868+ *dst++ = *src++;
30869+ }
30870+ return 0;
30871+}
30872+
30873+int psb_2d_wait_available(struct drm_psb_private *dev_priv,
30874+ unsigned size)
30875+{
30876+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30877+ int ret = 0;
30878+
30879+retry:
30880+ if (avail < size) {
30881+#if 0
30882+ /* We'd ideally
30883+ * like to have an IRQ-driven event here.
30884+ */
30885+
30886+ psb_2D_irq_on(dev_priv);
30887+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
30888+ ((avail =
30889+ PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
30890+ psb_2D_irq_off(dev_priv);
30891+ if (ret == 0)
30892+ return 0;
30893+ if (ret == -EINTR) {
30894+ ret = 0;
30895+ goto retry;
30896+ }
30897+#else
30898+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30899+ goto retry;
30900+#endif
30901+ }
30902+ return ret;
30903+}
30904+
30905+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
30906+ unsigned size)
30907+{
30908+ int ret = 0;
30909+ int i;
30910+ unsigned submit_size;
30911+
30912+ while (size > 0) {
30913+ submit_size = (size < 0x60) ? size : 0x60;
30914+ size -= submit_size;
30915+ ret = psb_2d_wait_available(dev_priv, submit_size);
30916+ if (ret)
30917+ return ret;
30918+
30919+ submit_size <<= 2;
30920+ mutex_lock(&dev_priv->reset_mutex);
30921+ for (i = 0; i < submit_size; i += 4) {
30922+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
30923+ }
30924+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
30925+ mutex_unlock(&dev_priv->reset_mutex);
30926+ }
30927+ return 0;
30928+}
30929+
30930+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
30931+{
30932+ uint32_t buffer[8];
30933+ uint32_t *bufp = buffer;
30934+ int ret;
30935+
30936+ *bufp++ = PSB_2D_FENCE_BH;
30937+
30938+ *bufp++ = PSB_2D_DST_SURF_BH |
30939+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
30940+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
30941+
30942+ *bufp++ = PSB_2D_BLIT_BH |
30943+ PSB_2D_ROT_NONE |
30944+ PSB_2D_COPYORDER_TL2BR |
30945+ PSB_2D_DSTCK_DISABLE |
30946+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
30947+
30948+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
30949+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
30950+ (0 << PSB_2D_DST_YSTART_SHIFT);
30951+ *bufp++ =
30952+ (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
30953+
30954+ *bufp++ = PSB_2D_FLUSH_BH;
30955+
30956+ psb_2d_lock(dev_priv);
30957+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
30958+ psb_2d_unlock(dev_priv);
30959+
30960+ if (!ret)
30961+ psb_schedule_watchdog(dev_priv);
30962+ return ret;
30963+}
30964+
30965+int psb_emit_2d_copy_blit(struct drm_device *dev,
30966+ uint32_t src_offset,
30967+ uint32_t dst_offset, uint32_t pages,
30968+ int direction)
30969+{
30970+ uint32_t cur_pages;
30971+ struct drm_psb_private *dev_priv = dev->dev_private;
30972+ uint32_t buf[10];
30973+ uint32_t *bufp;
30974+ uint32_t xstart;
30975+ uint32_t ystart;
30976+ uint32_t blit_cmd;
30977+ uint32_t pg_add;
30978+ int ret = 0;
30979+
30980+ if (!dev_priv)
30981+ return 0;
30982+
30983+ if (direction) {
30984+ pg_add = (pages - 1) << PAGE_SHIFT;
30985+ src_offset += pg_add;
30986+ dst_offset += pg_add;
30987+ }
30988+
30989+ blit_cmd = PSB_2D_BLIT_BH |
30990+ PSB_2D_ROT_NONE |
30991+ PSB_2D_DSTCK_DISABLE |
30992+ PSB_2D_SRCCK_DISABLE |
30993+ PSB_2D_USE_PAT |
30994+ PSB_2D_ROP3_SRCCOPY |
30995+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
30996+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
30997+
30998+ psb_2d_lock(dev_priv);
30999+ while (pages > 0) {
31000+ cur_pages = pages;
31001+ if (cur_pages > 2048)
31002+ cur_pages = 2048;
31003+ pages -= cur_pages;
31004+ ystart = (direction) ? cur_pages - 1 : 0;
31005+
31006+ bufp = buf;
31007+ *bufp++ = PSB_2D_FENCE_BH;
31008+
31009+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
31010+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
31011+ *bufp++ = dst_offset;
31012+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
31013+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
31014+ *bufp++ = src_offset;
31015+ *bufp++ =
31016+ PSB_2D_SRC_OFF_BH | (xstart <<
31017+ PSB_2D_SRCOFF_XSTART_SHIFT) |
31018+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
31019+ *bufp++ = blit_cmd;
31020+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
31021+ (ystart << PSB_2D_DST_YSTART_SHIFT);
31022+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
31023+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
31024+
31025+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
31026+ if (ret)
31027+ goto out;
31028+ pg_add =
31029+ (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
31030+ src_offset += pg_add;
31031+ dst_offset += pg_add;
31032+ }
31033+out:
31034+ psb_2d_unlock(dev_priv);
31035+ return ret;
31036+}
31037+
31038+void psb_init_2d(struct drm_psb_private *dev_priv)
31039+{
31040+ spin_lock_init(&dev_priv->sequence_lock);
31041+ psb_reset(dev_priv, 1);
31042+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
31043+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
31044+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
31045+}
31046+
31047+int psb_idle_2d(struct drm_device *dev)
31048+{
31049+ struct drm_psb_private *dev_priv = dev->dev_private;
31050+ unsigned long _end = jiffies + DRM_HZ;
31051+ int busy = 0;
31052+ bool b_need_release = false;
31053+
31054+ if (!powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND)) {
31055+ if (!powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, false))
31056+ return 0;
31057+ else
31058+ b_need_release = true;
31059+ }
31060+
31061+ /*
31062+ * First idle the 2D engine.
31063+ */
31064+
31065+ if (dev_priv->engine_lockup_2d) {
31066+ busy = -EBUSY;
31067+ goto out;
31068+ }
31069+
31070+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
31071+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) ==
31072+ 0))
31073+ goto out;
31074+
31075+ do {
31076+ busy =
31077+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
31078+ } while (busy && !time_after_eq(jiffies, _end));
31079+
31080+ if (busy)
31081+ busy =
31082+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
31083+ if (busy)
31084+ goto out;
31085+
31086+ do {
31087+ busy =
31088+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
31089+ _PSB_C2B_STATUS_BUSY)
31090+ != 0);
31091+ } while (busy && !time_after_eq(jiffies, _end));
31092+ if (busy)
31093+ busy =
31094+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
31095+ _PSB_C2B_STATUS_BUSY)
31096+ != 0);
31097+
31098+out:
31099+ if (busy)
31100+ dev_priv->engine_lockup_2d = 1;
31101+
31102+ if (b_need_release)
31103+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
31104+
31105+ return (busy) ? -EBUSY : 0;
31106+}
31107+
31108+int psb_idle_3d(struct drm_device *dev)
31109+{
31110+ struct drm_psb_private *dev_priv = dev->dev_private;
31111+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
31112+ int ret;
31113+
31114+ ret = wait_event_timeout(scheduler->idle_queue,
31115+ psb_scheduler_finished(dev_priv),
31116+ DRM_HZ * 10);
31117+
31118+ /*
31119+ *
31120+ * wait_event_timeout - function returns 0 if the @timeout elapsed, and the remaining
31121+ * jiffies if the condition evaluated to true before the timeout elapsed.
31122+ *
31123+ */
31124+ if(ret == 0)
31125+ DRM_ERROR(" wait_event_timeout - timeout elapsed in waiting for scheduler wq \n");
31126+
31127+ return (ret < 1) ? -EBUSY : 0;
31128+}
31129+
31130+static int psb_check_presumed(struct psb_validate_req *req,
31131+ struct ttm_buffer_object *bo,
31132+ struct psb_validate_arg __user *data,
31133+ int *presumed_ok)
31134+{
31135+ struct psb_validate_req __user *user_req = &(data->d.req);
31136+
31137+ *presumed_ok = 0;
31138+
31139+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
31140+ *presumed_ok = 1;
31141+ return 0;
31142+ }
31143+
31144+ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
31145+ return 0;
31146+
31147+ if (bo->offset == req->presumed_gpu_offset) {
31148+ *presumed_ok = 1;
31149+ return 0;
31150+ }
31151+
31152+ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
31153+ &user_req->presumed_flags);
31154+}
31155+
31156+
31157+static void psb_unreference_buffers(struct psb_context *context)
31158+{
31159+ struct ttm_validate_buffer *entry, *next;
31160+ struct psb_validate_buffer *vbuf;
31161+ struct list_head *list = &context->validate_list;
31162+
31163+ list_for_each_entry_safe(entry, next, list, head) {
31164+ vbuf =
31165+ container_of(entry, struct psb_validate_buffer, base);
31166+ list_del(&entry->head);
31167+ ttm_bo_unref(&entry->bo);
31168+ }
31169+
31170+ list = &context->kern_validate_list;
31171+
31172+ list_for_each_entry_safe(entry, next, list, head) {
31173+ vbuf =
31174+ container_of(entry, struct psb_validate_buffer, base);
31175+ list_del(&entry->head);
31176+ ttm_bo_unref(&entry->bo);
31177+ }
31178+}
31179+
31180+
31181+static int psb_lookup_validate_buffer(struct drm_file *file_priv,
31182+ uint64_t data,
31183+ struct psb_validate_buffer *item)
31184+{
31185+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
31186+
31187+ item->user_val_arg =
31188+ (struct psb_validate_arg __user *) (unsigned long) data;
31189+
31190+ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
31191+ sizeof(item->req)) != 0)) {
31192+ DRM_ERROR("Lookup copy fault.\n");
31193+ return -EFAULT;
31194+ }
31195+
31196+ item->base.bo =
31197+ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
31198+
31199+ if (unlikely(item->base.bo == NULL)) {
31200+ DRM_ERROR("Bo lookup fault.\n");
31201+ return -EINVAL;
31202+ }
31203+
31204+ return 0;
31205+}
31206+
31207+static int psb_reference_buffers(struct drm_file *file_priv,
31208+ uint64_t data,
31209+ struct psb_context *context)
31210+{
31211+ struct psb_validate_buffer *item;
31212+ int ret;
31213+
31214+ while (likely(data != 0)) {
31215+ if (unlikely(context->used_buffers >=
31216+ PSB_NUM_VALIDATE_BUFFERS)) {
31217+ DRM_ERROR("Too many buffers "
31218+ "on validate list.\n");
31219+ ret = -EINVAL;
31220+ goto out_err0;
31221+ }
31222+
31223+ item = &context->buffers[context->used_buffers];
31224+
31225+ ret = psb_lookup_validate_buffer(file_priv, data, item);
31226+ if (unlikely(ret != 0))
31227+ goto out_err0;
31228+
31229+ item->base.reserved = 0;
31230+ list_add_tail(&item->base.head, &context->validate_list);
31231+ context->used_buffers++;
31232+ data = item->req.next;
31233+ }
31234+ return 0;
31235+
31236+out_err0:
31237+ psb_unreference_buffers(context);
31238+ return ret;
31239+}
31240+
31241+static int
31242+psb_placement_fence_type(struct ttm_buffer_object *bo,
31243+ uint64_t set_val_flags,
31244+ uint64_t clr_val_flags,
31245+ uint32_t new_fence_class,
31246+ uint32_t *new_fence_type)
31247+{
31248+ int ret;
31249+ uint32_t n_fence_type;
31250+ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
31251+ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
31252+ struct ttm_fence_object *old_fence;
31253+ uint32_t old_fence_type;
31254+
31255+ if (unlikely
31256+ (!(set_val_flags &
31257+ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
31258+ DRM_ERROR
31259+ ("GPU access type (read / write) is not indicated.\n");
31260+ return -EINVAL;
31261+ }
31262+
31263+ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
31264+ if (unlikely(ret != 0))
31265+ return ret;
31266+
31267+ switch (new_fence_class) {
31268+ case PSB_ENGINE_TA:
31269+ n_fence_type = _PSB_FENCE_TYPE_EXE |
31270+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
31271+ if (set_val_flags & PSB_BO_FLAG_TA)
31272+ n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
31273+ if (set_val_flags & PSB_BO_FLAG_COMMAND)
31274+ n_fence_type &=
31275+ ~(_PSB_FENCE_TYPE_RASTER_DONE |
31276+ _PSB_FENCE_TYPE_TA_DONE);
31277+ if (set_val_flags & PSB_BO_FLAG_SCENE)
31278+ n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE;
31279+ if (set_val_flags & PSB_BO_FLAG_FEEDBACK)
31280+ n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK;
31281+ break;
31282+ default:
31283+ n_fence_type = _PSB_FENCE_TYPE_EXE;
31284+ }
31285+
31286+ *new_fence_type = n_fence_type;
31287+ old_fence = (struct ttm_fence_object *) bo->sync_obj;
31288+ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
31289+
31290+ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
31291+ ((n_fence_type ^ old_fence_type) &
31292+ old_fence_type))) {
31293+ ret = ttm_bo_wait(bo, 0, 1, 0);
31294+ if (unlikely(ret != 0))
31295+ return ret;
31296+ }
31297+
31298+ bo->proposed_flags = (bo->proposed_flags | set_flags)
31299+ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
31300+
31301+ return 0;
31302+}
31303+
31304+int psb_validate_kernel_buffer(struct psb_context *context,
31305+ struct ttm_buffer_object *bo,
31306+ uint32_t fence_class,
31307+ uint64_t set_flags, uint64_t clr_flags)
31308+{
31309+ struct psb_validate_buffer *item;
31310+ uint32_t cur_fence_type;
31311+ int ret;
31312+
31313+ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
31314+ DRM_ERROR("Out of free validation buffer entries for "
31315+ "kernel buffer validation.\n");
31316+ return -ENOMEM;
31317+ }
31318+
31319+ item = &context->buffers[context->used_buffers];
31320+ item->user_val_arg = NULL;
31321+ item->base.reserved = 0;
31322+
31323+ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
31324+ if (unlikely(ret != 0))
31325+ goto out_unlock;
31326+
31327+ mutex_lock(&bo->mutex);
31328+ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
31329+ &cur_fence_type);
31330+ if (unlikely(ret != 0)) {
31331+ ttm_bo_unreserve(bo);
31332+ goto out_unlock;
31333+ }
31334+
31335+ item->base.bo = ttm_bo_reference(bo);
31336+ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
31337+ item->base.reserved = 1;
31338+
31339+ list_add_tail(&item->base.head, &context->kern_validate_list);
31340+ context->used_buffers++;
31341+
31342+ ret = ttm_buffer_object_validate(bo, 1, 0);
31343+ if (unlikely(ret != 0))
31344+ goto out_unlock;
31345+
31346+ item->offset = bo->offset;
31347+ item->flags = bo->mem.flags;
31348+ context->fence_types |= cur_fence_type;
31349+
31350+out_unlock:
31351+ mutex_unlock(&bo->mutex);
31352+ return ret;
31353+}
31354+
31355+
31356+static int psb_validate_buffer_list(struct drm_file *file_priv,
31357+ uint32_t fence_class,
31358+ struct psb_context *context,
31359+ int *po_correct)
31360+{
31361+ struct psb_validate_buffer *item;
31362+ struct ttm_buffer_object *bo;
31363+ int ret;
31364+ struct psb_validate_req *req;
31365+ uint32_t fence_types = 0;
31366+ uint32_t cur_fence_type;
31367+ struct ttm_validate_buffer *entry;
31368+ struct list_head *list = &context->validate_list;
31369+
31370+ *po_correct = 1;
31371+
31372+ list_for_each_entry(entry, list, head) {
31373+ item =
31374+ container_of(entry, struct psb_validate_buffer, base);
31375+ bo = entry->bo;
31376+ item->ret = 0;
31377+ req = &item->req;
31378+
31379+ mutex_lock(&bo->mutex);
31380+ ret = psb_placement_fence_type(bo,
31381+ req->set_flags,
31382+ req->clear_flags,
31383+ fence_class,
31384+ &cur_fence_type);
31385+ if (unlikely(ret != 0))
31386+ goto out_err;
31387+
31388+ ret = ttm_buffer_object_validate(bo, 1, 0);
31389+
31390+ if (unlikely(ret != 0))
31391+ goto out_err;
31392+
31393+ fence_types |= cur_fence_type;
31394+ entry->new_sync_obj_arg = (void *)
31395+ (unsigned long) cur_fence_type;
31396+
31397+ item->offset = bo->offset;
31398+ item->flags = bo->mem.flags;
31399+ mutex_unlock(&bo->mutex);
31400+
31401+ ret =
31402+ psb_check_presumed(&item->req, bo, item->user_val_arg,
31403+ &item->po_correct);
31404+ if (unlikely(ret != 0))
31405+ goto out_err;
31406+
31407+ if (unlikely(!item->po_correct))
31408+ *po_correct = 0;
31409+
31410+ item++;
31411+ }
31412+
31413+ context->fence_types |= fence_types;
31414+
31415+ return 0;
31416+out_err:
31417+ mutex_unlock(&bo->mutex);
31418+ item->ret = ret;
31419+ return ret;
31420+}
31421+
31422+
31423+int
31424+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs,
31425+ unsigned int cmds)
31426+{
31427+ int i;
31428+
31429+ /*
31430+ * cmds is 32-bit words.
31431+ */
31432+
31433+ cmds >>= 1;
31434+ for (i = 0; i < cmds; ++i) {
31435+ PSB_WSGX32(regs[1], regs[0]);
31436+ regs += 2;
31437+ }
31438+ wmb();
31439+ return 0;
31440+}
31441+
31442+/*
31443+ * Security: Block user-space writing to MMU mapping registers.
31444+ * This is important for security and brings Poulsbo DRM
31445+ * up to par with the other DRM drivers. Using this,
31446+ * user-space should not be able to map arbitrary memory
31447+ * pages to graphics memory, but all user-space processes
31448+ * basically have access to all buffer objects mapped to
31449+ * graphics memory.
31450+ */
31451+
31452+int
31453+psb_submit_copy_cmdbuf(struct drm_device *dev,
31454+ struct ttm_buffer_object *cmd_buffer,
31455+ unsigned long cmd_offset,
31456+ unsigned long cmd_size,
31457+ int engine, uint32_t *copy_buffer)
31458+{
31459+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
31460+ struct drm_psb_private *dev_priv = dev->dev_private;
31461+ unsigned long cmd_page_offset =
31462+ cmd_offset - (cmd_offset & PAGE_MASK);
31463+ unsigned long cmd_next;
31464+ struct ttm_bo_kmap_obj cmd_kmap;
31465+ uint32_t *cmd_page;
31466+ unsigned cmds;
31467+ bool is_iomem;
31468+ int ret = 0;
31469+
31470+ if (cmd_size == 0)
31471+ return 0;
31472+
31473+ if (engine == PSB_ENGINE_2D)
31474+ psb_2d_lock(dev_priv);
31475+
31476+ do {
31477+ cmd_next = psb_offset_end(cmd_offset, cmd_end);
31478+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
31479+ 1, &cmd_kmap);
31480+
31481+ if (ret) {
31482+ if (engine == PSB_ENGINE_2D)
31483+ psb_2d_unlock(dev_priv);
31484+ return ret;
31485+ }
31486+ cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem);
31487+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
31488+ cmds = (cmd_next - cmd_offset) >> 2;
31489+
31490+ switch (engine) {
31491+ case PSB_ENGINE_2D:
31492+ ret =
31493+ psb_2d_submit(dev_priv,
31494+ cmd_page + cmd_page_offset,
31495+ cmds);
31496+ break;
31497+ case PSB_ENGINE_RASTERIZER:
31498+ case PSB_ENGINE_TA:
31499+ case PSB_ENGINE_HPRAST:
31500+ PSB_DEBUG_GENERAL("Reg copy.\n");
31501+ ret = psb_memcpy_check(copy_buffer,
31502+ cmd_page + cmd_page_offset,
31503+ cmds * sizeof(uint32_t));
31504+ copy_buffer += cmds;
31505+ break;
31506+ default:
31507+ ret = -EINVAL;
31508+ }
31509+ ttm_bo_kunmap(&cmd_kmap);
31510+ if (ret)
31511+ break;
31512+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
31513+
31514+ if (engine == PSB_ENGINE_2D)
31515+ psb_2d_unlock(dev_priv);
31516+
31517+ return ret;
31518+}
31519+
31520+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
31521+{
31522+ if (dst_cache->dst_page) {
31523+ ttm_bo_kunmap(&dst_cache->dst_kmap);
31524+ dst_cache->dst_page = NULL;
31525+ }
31526+ dst_cache->dst_buf = NULL;
31527+ dst_cache->dst = ~0;
31528+}
31529+
31530+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
31531+ struct psb_validate_buffer *buffers,
31532+ unsigned int dst,
31533+ unsigned long dst_offset)
31534+{
31535+ int ret;
31536+
31537+ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
31538+
31539+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
31540+ psb_clear_dstbuf_cache(dst_cache);
31541+ dst_cache->dst = dst;
31542+ dst_cache->dst_buf = buffers[dst].base.bo;
31543+ }
31544+
31545+ if (unlikely
31546+ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
31547+ DRM_ERROR("Relocation destination out of bounds.\n");
31548+ return -EINVAL;
31549+ }
31550+
31551+ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
31552+ NULL == dst_cache->dst_page) {
31553+ if (NULL != dst_cache->dst_page) {
31554+ ttm_bo_kunmap(&dst_cache->dst_kmap);
31555+ dst_cache->dst_page = NULL;
31556+ }
31557+
31558+ ret =
31559+ ttm_bo_kmap(dst_cache->dst_buf,
31560+ dst_offset >> PAGE_SHIFT, 1,
31561+ &dst_cache->dst_kmap);
31562+ if (ret) {
31563+ DRM_ERROR("Could not map destination buffer for "
31564+ "relocation.\n");
31565+ return ret;
31566+ }
31567+
31568+ dst_cache->dst_page =
31569+ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
31570+ &dst_cache->dst_is_iomem);
31571+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
31572+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
31573+ }
31574+ return 0;
31575+}
31576+
31577+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
31578+ uint32_t fence_class,
31579+ const struct drm_psb_reloc *reloc,
31580+ struct psb_validate_buffer *buffers,
31581+ int num_buffers,
31582+ struct psb_dstbuf_cache *dst_cache,
31583+ int no_wait, int interruptible)
31584+{
31585+ uint32_t val;
31586+ uint32_t background;
31587+ unsigned int index;
31588+ int ret;
31589+ unsigned int shift;
31590+ unsigned int align_shift;
31591+ struct ttm_buffer_object *reloc_bo;
31592+
31593+
31594+ PSB_DEBUG_GENERAL("Reloc type %d\n"
31595+ "\t where 0x%04x\n"
31596+ "\t buffer 0x%04x\n"
31597+ "\t mask 0x%08x\n"
31598+ "\t shift 0x%08x\n"
31599+ "\t pre_add 0x%08x\n"
31600+ "\t background 0x%08x\n"
31601+ "\t dst_buffer 0x%08x\n"
31602+ "\t arg0 0x%08x\n"
31603+ "\t arg1 0x%08x\n",
31604+ reloc->reloc_op,
31605+ reloc->where,
31606+ reloc->buffer,
31607+ reloc->mask,
31608+ reloc->shift,
31609+ reloc->pre_add,
31610+ reloc->background,
31611+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
31612+
31613+ if (unlikely(reloc->buffer >= num_buffers)) {
31614+ DRM_ERROR("Illegal relocation buffer %d.\n",
31615+ reloc->buffer);
31616+ return -EINVAL;
31617+ }
31618+
31619+ if (buffers[reloc->buffer].po_correct)
31620+ return 0;
31621+
31622+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
31623+ DRM_ERROR
31624+ ("Illegal destination buffer for relocation %d.\n",
31625+ reloc->dst_buffer);
31626+ return -EINVAL;
31627+ }
31628+
31629+ ret =
31630+ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
31631+ reloc->where << 2);
31632+ if (ret)
31633+ return ret;
31634+
31635+ reloc_bo = buffers[reloc->buffer].base.bo;
31636+
31637+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
31638+ DRM_ERROR("Illegal relocation offset add.\n");
31639+ return -EINVAL;
31640+ }
31641+
31642+ switch (reloc->reloc_op) {
31643+ case PSB_RELOC_OP_OFFSET:
31644+ val = reloc_bo->offset + reloc->pre_add;
31645+ break;
31646+ case PSB_RELOC_OP_2D_OFFSET:
31647+ val = reloc_bo->offset + reloc->pre_add -
31648+ dev_priv->mmu_2d_offset;
31649+ if (unlikely(val >= PSB_2D_SIZE)) {
31650+ DRM_ERROR("2D relocation out of bounds\n");
31651+ return -EINVAL;
31652+ }
31653+ break;
31654+ case PSB_RELOC_OP_PDS_OFFSET:
31655+ val =
31656+ reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
31657+ if (unlikely
31658+ (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
31659+ DRM_ERROR("PDS relocation out of bounds\n");
31660+ return -EINVAL;
31661+ }
31662+ break;
31663+ default:
31664+ DRM_ERROR("Unimplemented relocation.\n");
31665+ return -EINVAL;
31666+ }
31667+
31668+ shift =
31669+ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
31670+ align_shift =
31671+ (reloc->
31672+ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
31673+
31674+ val = ((val >> align_shift) << shift);
31675+ index = reloc->where - dst_cache->dst_page_offset;
31676+
31677+ background = reloc->background;
31678+ val = (background & ~reloc->mask) | (val & reloc->mask);
31679+ dst_cache->dst_page[index] = val;
31680+
31681+ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
31682+ reloc->dst_buffer, index,
31683+ dst_cache->dst_page[index]);
31684+
31685+ return 0;
31686+}
31687+
31688+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
31689+ unsigned int num_pages)
31690+{
31691+ int ret = 0;
31692+
31693+ spin_lock(&dev_priv->reloc_lock);
31694+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
31695+ dev_priv->rel_mapped_pages += num_pages;
31696+ ret = 1;
31697+ }
31698+ spin_unlock(&dev_priv->reloc_lock);
31699+ return ret;
31700+}
31701+
31702+static int psb_fixup_relocs(struct drm_file *file_priv,
31703+ uint32_t fence_class,
31704+ unsigned int num_relocs,
31705+ unsigned int reloc_offset,
31706+ uint32_t reloc_handle,
31707+ struct psb_context *context,
31708+ int no_wait, int interruptible)
31709+{
31710+ struct drm_device *dev = file_priv->minor->dev;
31711+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
31712+ struct drm_psb_private *dev_priv =
31713+ (struct drm_psb_private *) dev->dev_private;
31714+ struct ttm_buffer_object *reloc_buffer = NULL;
31715+ unsigned int reloc_num_pages;
31716+ unsigned int reloc_first_page;
31717+ unsigned int reloc_last_page;
31718+ struct psb_dstbuf_cache dst_cache;
31719+ struct drm_psb_reloc *reloc;
31720+ struct ttm_bo_kmap_obj reloc_kmap;
31721+ bool reloc_is_iomem;
31722+ int count;
31723+ int ret = 0;
31724+ int registered = 0;
31725+ uint32_t num_buffers = context->used_buffers;
31726+
31727+ if (num_relocs == 0)
31728+ return 0;
31729+
31730+ memset(&dst_cache, 0, sizeof(dst_cache));
31731+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
31732+
31733+ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
31734+ if (!reloc_buffer)
31735+ goto out;
31736+
31737+ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
31738+ DRM_ERROR("Relocation buffer was not on validate list.\n");
31739+ ret = -EINVAL;
31740+ goto out;
31741+ }
31742+
31743+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
31744+ reloc_last_page =
31745+ (reloc_offset +
31746+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
31747+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
31748+ reloc_offset &= ~PAGE_MASK;
31749+
31750+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
31751+ DRM_ERROR("Relocation buffer is too large\n");
31752+ ret = -EINVAL;
31753+ goto out;
31754+ }
31755+
31756+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
31757+ (registered =
31758+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
31759+
31760+ if (ret == -EINTR) {
31761+ ret = -ERESTART;
31762+ goto out;
31763+ }
31764+ if (ret) {
31765+ DRM_ERROR("Error waiting for space to map "
31766+ "relocation buffer.\n");
31767+ goto out;
31768+ }
31769+
31770+ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
31771+ reloc_num_pages, &reloc_kmap);
31772+
31773+ if (ret) {
31774+ DRM_ERROR("Could not map relocation buffer.\n"
31775+ "\tReloc buffer id 0x%08x.\n"
31776+ "\tReloc first page %d.\n"
31777+ "\tReloc num pages %d.\n",
31778+ reloc_handle, reloc_first_page, reloc_num_pages);
31779+ goto out;
31780+ }
31781+
31782+ reloc = (struct drm_psb_reloc *)
31783+ ((unsigned long)
31784+ ttm_kmap_obj_virtual(&reloc_kmap,
31785+ &reloc_is_iomem) + reloc_offset);
31786+
31787+ for (count = 0; count < num_relocs; ++count) {
31788+ ret = psb_apply_reloc(dev_priv, fence_class,
31789+ reloc, context->buffers,
31790+ num_buffers, &dst_cache,
31791+ no_wait, interruptible);
31792+ if (ret)
31793+ goto out1;
31794+ reloc++;
31795+ }
31796+
31797+out1:
31798+ ttm_bo_kunmap(&reloc_kmap);
31799+out:
31800+ if (registered) {
31801+ spin_lock(&dev_priv->reloc_lock);
31802+ dev_priv->rel_mapped_pages -= reloc_num_pages;
31803+ spin_unlock(&dev_priv->reloc_lock);
31804+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
31805+ }
31806+
31807+ psb_clear_dstbuf_cache(&dst_cache);
31808+ if (reloc_buffer)
31809+ ttm_bo_unref(&reloc_buffer);
31810+ return ret;
31811+}
31812+
31813+void psb_fence_or_sync(struct drm_file *file_priv,
31814+ uint32_t engine,
31815+ uint32_t fence_types,
31816+ uint32_t fence_flags,
31817+ struct list_head *list,
31818+ struct psb_ttm_fence_rep *fence_arg,
31819+ struct ttm_fence_object **fence_p)
31820+{
31821+ struct drm_device *dev = file_priv->minor->dev;
31822+ struct drm_psb_private *dev_priv = psb_priv(dev);
31823+ struct ttm_fence_device *fdev = &dev_priv->fdev;
31824+ int ret;
31825+ struct ttm_fence_object *fence;
31826+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
31827+ uint32_t handle;
31828+
31829+ ret = ttm_fence_user_create(fdev, tfile,
31830+ engine, fence_types,
31831+ TTM_FENCE_FLAG_EMIT, &fence, &handle);
31832+ if (ret) {
31833+
31834+ /*
31835+ * Fence creation failed.
31836+ * Fall back to synchronous operation and idle the engine.
31837+ */
31838+
31839+ psb_idle_engine(dev, engine);
31840+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
31841+
31842+ /*
31843+ * Communicate to user-space that
31844+ * fence creation has failed and that
31845+ * the engine is idle.
31846+ */
31847+
31848+ fence_arg->handle = ~0;
31849+ fence_arg->error = ret;
31850+ }
31851+
31852+ ttm_eu_backoff_reservation(list);
31853+ if (fence_p)
31854+ *fence_p = NULL;
31855+ return;
31856+ }
31857+
31858+ ttm_eu_fence_buffer_objects(list, fence);
31859+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
31860+ struct ttm_fence_info info = ttm_fence_get_info(fence);
31861+ fence_arg->handle = handle;
31862+ fence_arg->fence_class = ttm_fence_class(fence);
31863+ fence_arg->fence_type = ttm_fence_types(fence);
31864+ fence_arg->signaled_types = info.signaled_types;
31865+ fence_arg->error = 0;
31866+ } else {
31867+ ret =
31868+ ttm_ref_object_base_unref(tfile, handle,
31869+ ttm_fence_type);
31870+ BUG_ON(ret);
31871+ }
31872+
31873+ if (fence_p)
31874+ *fence_p = fence;
31875+ else if (fence)
31876+ ttm_fence_object_unref(&fence);
31877+}
31878+
31879+
31880+
31881+static int psb_cmdbuf_2d(struct drm_file *priv,
31882+ struct list_head *validate_list,
31883+ uint32_t fence_type,
31884+ struct drm_psb_cmdbuf_arg *arg,
31885+ struct ttm_buffer_object *cmd_buffer,
31886+ struct psb_ttm_fence_rep *fence_arg)
31887+{
31888+ struct drm_device *dev = priv->minor->dev;
31889+ int ret;
31890+
31891+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
31892+ arg->cmdbuf_size, PSB_ENGINE_2D,
31893+ NULL);
31894+ if (ret)
31895+ goto out_unlock;
31896+
31897+ psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type,
31898+ arg->fence_flags, validate_list, fence_arg,
31899+ NULL);
31900+
31901+ mutex_lock(&cmd_buffer->mutex);
31902+ if (cmd_buffer->sync_obj != NULL)
31903+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
31904+ mutex_unlock(&cmd_buffer->mutex);
31905+out_unlock:
31906+ return ret;
31907+}
31908+
31909+#if 0
31910+static int psb_dump_page(struct ttm_buffer_object *bo,
31911+ unsigned int page_offset, unsigned int num)
31912+{
31913+ struct ttm_bo_kmap_obj kmobj;
31914+ int is_iomem;
31915+ uint32_t *p;
31916+ int ret;
31917+ unsigned int i;
31918+
31919+ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
31920+ if (ret)
31921+ return ret;
31922+
31923+ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
31924+ for (i = 0; i < num; ++i)
31925+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
31926+
31927+ ttm_bo_kunmap(&kmobj);
31928+ return 0;
31929+}
31930+#endif
31931+
31932+static void psb_idle_engine(struct drm_device *dev, int engine)
31933+{
31934+ struct drm_psb_private *dev_priv =
31935+ (struct drm_psb_private *) dev->dev_private;
31936+ uint32_t dummy;
31937+ unsigned long dummy2;
31938+
31939+ switch (engine) {
31940+ case PSB_ENGINE_2D:
31941+
31942+ /*
31943+ * Make sure we flush 2D properly using a dummy
31944+ * fence sequence emit.
31945+ */
31946+
31947+ (void) psb_fence_emit_sequence(&dev_priv->fdev,
31948+ PSB_ENGINE_2D, 0,
31949+ &dummy, &dummy2);
31950+ psb_2d_lock(dev_priv);
31951+ (void) psb_idle_2d(dev);
31952+ psb_2d_unlock(dev_priv);
31953+ break;
31954+ case PSB_ENGINE_TA:
31955+ case PSB_ENGINE_RASTERIZER:
31956+ case PSB_ENGINE_HPRAST:
31957+ (void) psb_idle_3d(dev);
31958+ break;
31959+ default:
31960+
31961+ /*
31962+ * FIXME: Insert video engine idle command here.
31963+ */
31964+
31965+ break;
31966+ }
31967+}
31968+
31969+static int psb_handle_copyback(struct drm_device *dev,
31970+ struct psb_context *context,
31971+ int ret)
31972+{
31973+ int err = ret;
31974+ struct ttm_validate_buffer *entry;
31975+ struct psb_validate_arg arg;
31976+ struct list_head *list = &context->validate_list;
31977+
31978+ if (ret) {
31979+ ttm_eu_backoff_reservation(list);
31980+ ttm_eu_backoff_reservation(&context->kern_validate_list);
31981+ }
31982+
31983+
31984+ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
31985+ list_for_each_entry(entry, list, head) {
31986+ struct psb_validate_buffer *vbuf =
31987+ container_of(entry, struct psb_validate_buffer,
31988+ base);
31989+ arg.handled = 1;
31990+ arg.ret = vbuf->ret;
31991+ if (!arg.ret) {
31992+ struct ttm_buffer_object *bo = entry->bo;
31993+ mutex_lock(&bo->mutex);
31994+ arg.d.rep.gpu_offset = bo->offset;
31995+ arg.d.rep.placement = bo->mem.flags;
31996+ arg.d.rep.fence_type_mask =
31997+ (uint32_t) (unsigned long)
31998+ entry->new_sync_obj_arg;
31999+ mutex_unlock(&bo->mutex);
32000+ }
32001+
32002+ if (__copy_to_user(vbuf->user_val_arg,
32003+ &arg, sizeof(arg)))
32004+ err = -EFAULT;
32005+
32006+ if (arg.ret)
32007+ break;
32008+ }
32009+ }
32010+
32011+ return err;
32012+}
32013+
32014+
32015+
32016+static int psb_feedback_buf(struct ttm_object_file *tfile,
32017+ struct psb_context *context,
32018+ uint32_t feedback_ops,
32019+ uint32_t handle,
32020+ uint32_t offset,
32021+ uint32_t feedback_breakpoints,
32022+ uint32_t feedback_size,
32023+ struct psb_feedback_info *feedback)
32024+{
32025+ struct ttm_buffer_object *bo;
32026+ struct page *page;
32027+ uint32_t page_no;
32028+ uint32_t page_offset;
32029+ int ret;
32030+
32031+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
32032+ DRM_ERROR("Illegal feedback op.\n");
32033+ return -EINVAL;
32034+ }
32035+
32036+ if (feedback_breakpoints != 0) {
32037+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
32038+ return -EINVAL;
32039+ }
32040+
32041+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
32042+ DRM_ERROR("Feedback buffer size too small.\n");
32043+ return -EINVAL;
32044+ }
32045+
32046+ page_offset = offset & ~PAGE_MASK;
32047+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
32048+ < page_offset) {
32049+ DRM_ERROR("Illegal feedback buffer alignment.\n");
32050+ return -EINVAL;
32051+ }
32052+
32053+ bo = ttm_buffer_object_lookup(tfile, handle);
32054+ if (unlikely(bo == NULL)) {
32055+ DRM_ERROR("Failed looking up feedback buffer.\n");
32056+ return -EINVAL;
32057+ }
32058+
32059+
32060+ ret = psb_validate_kernel_buffer(context, bo,
32061+ PSB_ENGINE_TA,
32062+ TTM_PL_FLAG_SYSTEM |
32063+ TTM_PL_FLAG_CACHED |
32064+ PSB_GPU_ACCESS_WRITE |
32065+ PSB_BO_FLAG_FEEDBACK,
32066+ TTM_PL_MASK_MEM &
32067+ ~(TTM_PL_FLAG_SYSTEM |
32068+ TTM_PL_FLAG_CACHED));
32069+ if (unlikely(ret != 0))
32070+ goto out_unref;
32071+
32072+ page_no = offset >> PAGE_SHIFT;
32073+ if (unlikely(page_no >= bo->num_pages)) {
32074+ ret = -EINVAL;
32075+ DRM_ERROR("Illegal feedback buffer offset.\n");
32076+ goto out_unref;
32077+ }
32078+
32079+ if (unlikely(bo->ttm == NULL)) {
32080+ ret = -EINVAL;
32081+ DRM_ERROR("Vistest buffer without TTM.\n");
32082+ goto out_unref;
32083+ }
32084+
32085+ page = ttm_tt_get_page(bo->ttm, page_no);
32086+ if (unlikely(page == NULL)) {
32087+ ret = -ENOMEM;
32088+ goto out_unref;
32089+ }
32090+
32091+ feedback->page = page;
32092+ feedback->offset = page_offset;
32093+
32094+ /*
32095+ * Note: bo referece transferred.
32096+ */
32097+
32098+ feedback->bo = bo;
32099+ return 0;
32100+
32101+out_unref:
32102+ ttm_bo_unref(&bo);
32103+ return ret;
32104+}
32105+
32106+inline int psb_try_power_down_sgx(struct drm_device *dev)
32107+{
32108+ if(powermgmt_is_gfx_busy()){
32109+ return 0;
32110+ }
32111+
32112+ return powermgmt_suspend_islands(dev->pdev, PSB_GRAPHICS_ISLAND, false);
32113+}
32114+
32115+void psb_init_ospm(struct drm_psb_private *dev_priv)
32116+{
32117+ static int init;
32118+ if (!init) {
32119+ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
32120+ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
32121+ PSB_DEBUG_PM("apm_reg:%x\n", dev_priv->apm_reg);
32122+#ifdef OSPM_STAT
32123+ dev_priv->graphics_state = PSB_PWR_STATE_D0;
32124+ dev_priv->gfx_last_mode_change = jiffies;
32125+ dev_priv->gfx_d0_time = 0;
32126+ dev_priv->gfx_d0i3_time = 0;
32127+ dev_priv->gfx_d3_time = 0;
32128+#endif
32129+ init = 1;
32130+ }
32131+}
32132+
32133+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
32134+ struct drm_file *file_priv)
32135+{
32136+ struct drm_psb_cmdbuf_arg *arg = data;
32137+ int ret = 0;
32138+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
32139+ struct ttm_buffer_object *cmd_buffer = NULL;
32140+ struct ttm_buffer_object *ta_buffer = NULL;
32141+ struct ttm_buffer_object *oom_buffer = NULL;
32142+ struct psb_ttm_fence_rep fence_arg;
32143+ struct drm_psb_scene user_scene;
32144+ struct psb_scene_pool *pool = NULL;
32145+ struct psb_scene *scene = NULL;
32146+ struct drm_psb_private *dev_priv =
32147+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
32148+ int engine;
32149+ struct psb_feedback_info feedback;
32150+ int po_correct;
32151+ struct psb_context *context;
32152+ unsigned num_buffers;
32153+
32154+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
32155+
32156+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
32157+ if (unlikely(ret != 0))
32158+ return ret;
32159+
32160+ if (arg->engine == PSB_ENGINE_VIDEO)
32161+ powermgmt_using_hw_begin(dev->pdev, PSB_VIDEO_DEC_ISLAND, true);
32162+
32163+ if (arg->engine == LNC_ENGINE_ENCODE)
32164+ powermgmt_using_hw_begin(dev->pdev, PSB_VIDEO_ENC_ISLAND, true);
32165+
32166+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA) ||
32167+ (arg->engine == PSB_ENGINE_RASTERIZER))
32168+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
32169+
32170+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
32171+ if (unlikely(ret != 0))
32172+ goto out_err0;
32173+
32174+
32175+ context = &dev_priv->context;
32176+ context->used_buffers = 0;
32177+ context->fence_types = 0;
32178+ BUG_ON(!list_empty(&context->validate_list));
32179+ BUG_ON(!list_empty(&context->kern_validate_list));
32180+
32181+ if (unlikely(context->buffers == NULL)) {
32182+ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
32183+ sizeof(*context->buffers));
32184+ if (unlikely(context->buffers == NULL)) {
32185+ ret = -ENOMEM;
32186+ goto out_err1;
32187+ }
32188+ }
32189+
32190+ ret = psb_reference_buffers(file_priv,
32191+ arg->buffer_list,
32192+ context);
32193+
32194+ if (unlikely(ret != 0))
32195+ goto out_err1;
32196+
32197+ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
32198+
32199+ ret = ttm_eu_reserve_buffers(&context->validate_list,
32200+ context->val_seq);
32201+ if (unlikely(ret != 0)) {
32202+ goto out_err2;
32203+ }
32204+
32205+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
32206+ PSB_ENGINE_TA : arg->engine;
32207+
32208+ ret = psb_validate_buffer_list(file_priv, engine,
32209+ context, &po_correct);
32210+ if (unlikely(ret != 0))
32211+ goto out_err3;
32212+
32213+ if (!po_correct) {
32214+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
32215+ arg->reloc_offset,
32216+ arg->reloc_handle, context, 0, 1);
32217+ if (unlikely(ret != 0))
32218+ goto out_err3;
32219+
32220+ }
32221+
32222+ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
32223+ if (unlikely(cmd_buffer == NULL)) {
32224+ ret = -EINVAL;
32225+ goto out_err4;
32226+ }
32227+
32228+ switch (arg->engine) {
32229+ case PSB_ENGINE_2D:
32230+ ret = psb_cmdbuf_2d(file_priv, &context->validate_list,
32231+ context->fence_types, arg, cmd_buffer,
32232+ &fence_arg);
32233+ if (unlikely(ret != 0))
32234+ goto out_err4;
32235+ break;
32236+ case PSB_ENGINE_VIDEO:
32237+ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
32238+ context->fence_types, arg,
32239+ cmd_buffer, &fence_arg);
32240+
32241+ if (unlikely(ret != 0))
32242+ goto out_err4;
32243+ break;
32244+ case LNC_ENGINE_ENCODE:
32245+ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
32246+ context->fence_types, arg,
32247+ cmd_buffer, &fence_arg);
32248+ if (unlikely(ret != 0))
32249+ goto out_err4;
32250+ break;
32251+ case PSB_ENGINE_RASTERIZER:
32252+ ret = psb_cmdbuf_raster(file_priv, context,
32253+ arg, cmd_buffer, &fence_arg);
32254+ if (unlikely(ret != 0))
32255+ goto out_err4;
32256+ break;
32257+ case PSB_ENGINE_TA:
32258+ if (arg->ta_handle == arg->cmdbuf_handle) {
32259+ ta_buffer = ttm_bo_reference(cmd_buffer);
32260+ } else {
32261+ ta_buffer =
32262+ ttm_buffer_object_lookup(tfile,
32263+ arg->ta_handle);
32264+ if (!ta_buffer) {
32265+ ret = -EINVAL;
32266+ goto out_err4;
32267+ }
32268+ }
32269+ if (arg->oom_size != 0) {
32270+ if (arg->oom_handle == arg->cmdbuf_handle) {
32271+ oom_buffer = ttm_bo_reference(cmd_buffer);
32272+ } else {
32273+ oom_buffer =
32274+ ttm_buffer_object_lookup(tfile,
32275+ arg->
32276+ oom_handle);
32277+ if (!oom_buffer) {
32278+ ret = -EINVAL;
32279+ goto out_err4;
32280+ }
32281+ }
32282+ }
32283+
32284+ ret = copy_from_user(&user_scene, (void __user *)
32285+ ((unsigned long) arg->scene_arg),
32286+ sizeof(user_scene));
32287+ if (ret)
32288+ goto out_err4;
32289+
32290+ if (!user_scene.handle_valid) {
32291+ pool = psb_scene_pool_alloc(file_priv, 0,
32292+ user_scene.num_buffers,
32293+ user_scene.w,
32294+ user_scene.h);
32295+ if (!pool) {
32296+ ret = -ENOMEM;
32297+ goto out_err0;
32298+ }
32299+
32300+ user_scene.handle = psb_scene_pool_handle(pool);
32301+ user_scene.handle_valid = 1;
32302+ ret = copy_to_user((void __user *)
32303+ ((unsigned long) arg->
32304+ scene_arg), &user_scene,
32305+ sizeof(user_scene));
32306+
32307+ if (ret)
32308+ goto out_err4;
32309+ } else {
32310+ pool =
32311+ psb_scene_pool_lookup(file_priv,
32312+ user_scene.handle, 1);
32313+ if (!pool) {
32314+ ret = -EINVAL;
32315+ goto out_err4;
32316+ }
32317+ }
32318+
32319+ ret = psb_validate_scene_pool(context, pool,
32320+ user_scene.w,
32321+ user_scene.h,
32322+ arg->ta_flags &
32323+ PSB_TA_FLAG_LASTPASS, &scene);
32324+ if (ret)
32325+ goto out_err4;
32326+
32327+ memset(&feedback, 0, sizeof(feedback));
32328+ if (arg->feedback_ops) {
32329+ ret = psb_feedback_buf(tfile,
32330+ context,
32331+ arg->feedback_ops,
32332+ arg->feedback_handle,
32333+ arg->feedback_offset,
32334+ arg->feedback_breakpoints,
32335+ arg->feedback_size,
32336+ &feedback);
32337+ if (ret)
32338+ goto out_err4;
32339+ }
32340+ ret = psb_cmdbuf_ta(file_priv, context,
32341+ arg, cmd_buffer, ta_buffer,
32342+ oom_buffer, scene, &feedback,
32343+ &fence_arg);
32344+ if (ret)
32345+ goto out_err4;
32346+ break;
32347+ default:
32348+ DRM_ERROR
32349+ ("Unimplemented command submission mechanism (%x).\n",
32350+ arg->engine);
32351+ ret = -EINVAL;
32352+ goto out_err4;
32353+ }
32354+
32355+ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
32356+ ret = copy_to_user((void __user *)
32357+ ((unsigned long) arg->fence_arg),
32358+ &fence_arg, sizeof(fence_arg));
32359+ }
32360+
32361+out_err4:
32362+ if (scene)
32363+ psb_scene_unref(&scene);
32364+ if (pool)
32365+ psb_scene_pool_unref(&pool);
32366+ if (cmd_buffer)
32367+ ttm_bo_unref(&cmd_buffer);
32368+ if (ta_buffer)
32369+ ttm_bo_unref(&ta_buffer);
32370+ if (oom_buffer)
32371+ ttm_bo_unref(&oom_buffer);
32372+out_err3:
32373+ ret = psb_handle_copyback(dev, context, ret);
32374+out_err2:
32375+ psb_unreference_buffers(context);
32376+out_err1:
32377+ mutex_unlock(&dev_priv->cmdbuf_mutex);
32378+out_err0:
32379+ ttm_read_unlock(&dev_priv->ttm_lock);
32380+
32381+ if (arg->engine == PSB_ENGINE_VIDEO)
32382+ powermgmt_using_hw_end(PSB_VIDEO_DEC_ISLAND);
32383+
32384+ if (arg->engine == LNC_ENGINE_ENCODE)
32385+ powermgmt_using_hw_end(PSB_VIDEO_ENC_ISLAND);
32386+
32387+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
32388+ || (arg->engine == PSB_ENGINE_RASTERIZER))
32389+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
32390+ return ret;
32391+}
32392+
32393+static int psb_do_init_pageflip(struct drm_device * dev)
32394+{
32395+ struct drm_psb_private *dev_priv = dev->dev_private;
32396+ u32 pipe_status[2];
32397+ int pipe, dspbase;
32398+
32399+ if (!powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false))
32400+ return -1;
32401+
32402+ dev_priv->dri_page_flipping = 1;
32403+ dev_priv->current_page = 0;
32404+ for (pipe = 0; pipe < 2; pipe++){
32405+ pipe_status[pipe] = REG_READ(pipe == 0 ? PIPEACONF : PIPEBCONF);
32406+ if (pipe_status[pipe] & PIPEACONF_ENABLE){
32407+ dev_priv->pipe_active[pipe] = 1;
32408+ dev_priv->saved_stride[pipe] = REG_READ((pipe == 0) ? DSPASTRIDE : DSPBSTRIDE);
32409+ dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
32410+ if (IS_MRST(dev) && (pipe == 0))
32411+ dspbase = MRST_DSPABASE;
32412+ if (IS_MRST(dev)) {
32413+ dev_priv->saved_start[pipe] = REG_READ(pipe == 0 ? DSPASURF : DSPBSURF);
32414+ dev_priv->saved_offset[pipe] = REG_READ(dspbase);
32415+ } else {
32416+ dev_priv->saved_start[pipe] = REG_READ(pipe == 0 ? DSPABASE : DSPBBASE);
32417+ dev_priv->saved_offset[pipe] = 0;
32418+ }
32419+ }
32420+ else
32421+ dev_priv->pipe_active[pipe] = 0;
32422+ }
32423+
32424+ powermgmt_using_hw_end(PSB_DISPLAY_ISLAND);
32425+
32426+ return 0;
32427+}
32428+
32429+int psb_page_flip(struct drm_device *dev, void *data,
32430+ struct drm_file *file_priv)
32431+{
32432+ struct drm_psb_pageflip_arg *arg = data;
32433+ int pipe;
32434+
32435+ struct drm_psb_private *dev_priv =
32436+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
32437+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
32438+ struct psb_task *task = NULL;
32439+
32440+ if (!dev_priv->dri_page_flipping)
32441+ if (psb_do_init_pageflip(dev))
32442+ return 0;
32443+
32444+ task = kzalloc(sizeof(*task), GFP_KERNEL);
32445+ if (!task)
32446+ return -ENOMEM;
32447+ INIT_LIST_HEAD(&task->head);
32448+ INIT_LIST_HEAD(&task->buf.head);
32449+ task->task_type = psb_flip_task;
32450+
32451+ spin_lock_irq(&scheduler->lock);
32452+ list_add_tail(&task->head, &scheduler->ta_queue);
32453+ /**
32454+ * From this point we may no longer dereference task,
32455+ * as the object it points to may be freed by another thread.
32456+ */
32457+
32458+ task = NULL;
32459+ spin_unlock_irq(&scheduler->lock);
32460+ for (pipe=0; pipe<2; pipe++) {
32461+ if (dev_priv->pipe_active[pipe] == 1) {
32462+ dev_priv->flip_start[pipe] = arg->flip_offset;
32463+ dev_priv->flip_offset[pipe] = dev_priv->saved_offset[pipe];
32464+ dev_priv->flip_stride[pipe] = dev_priv->saved_stride[pipe];
32465+ }
32466+ }
32467+ return 0;
32468+}
32469+
32470+int psb_flip_set_base(struct drm_psb_private *dev_priv, int pipe)
32471+{
32472+ struct drm_device *dev = dev_priv->dev;
32473+
32474+ unsigned long Start, Offset, Stride;
32475+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
32476+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
32477+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
32478+
32479+ if (IS_MRST(dev) && (pipe == 0))
32480+ dspbase = MRST_DSPABASE;
32481+
32482+ Start = dev_priv->flip_start[pipe];
32483+ Offset = dev_priv->flip_offset[pipe];
32484+ Stride = dev_priv->flip_stride[pipe];
32485+
32486+ REG_WRITE(dspstride, Stride);
32487+
32488+ DRM_DEBUG("Writing base: %08lX Offset: %08lX Stride: %08lXn", Start, Offset, Stride);
32489+ if (IS_MRST(dev)) {
32490+ REG_WRITE(dspbase, Offset);
32491+ REG_READ(dspbase);
32492+ REG_WRITE(dspsurf, Start);
32493+ REG_READ(dspsurf);
32494+ } else {
32495+ REG_WRITE(dspbase, Start + Offset);
32496+ REG_READ(dspbase);
32497+ }
32498+
32499+ if (dev_priv->dri_page_flipping == 1)
32500+ dev_priv->current_page = 1 - dev_priv->current_page;
32501+
32502+ return 0;
32503+}
32504+
32505diff --git a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h
32506new file mode 100644
32507index 0000000..9321b98
32508--- /dev/null
32509+++ b/drivers/gpu/drm/psb/psb_sgx.h
32510@@ -0,0 +1,41 @@
32511+/*
32512+ * Copyright (c) 2008, Intel Corporation
32513+ *
32514+ * Permission is hereby granted, free of charge, to any person obtaining a
32515+ * copy of this software and associated documentation files (the "Software"),
32516+ * to deal in the Software without restriction, including without limitation
32517+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
32518+ * and/or sell copies of the Software, and to permit persons to whom the
32519+ * Software is furnished to do so, subject to the following conditions:
32520+ *
32521+ * The above copyright notice and this permission notice (including the next
32522+ * paragraph) shall be included in all copies or substantial portions of the
32523+ * Software.
32524+ *
32525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32526+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32527+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
32528+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32529+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
32530+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32531+ * SOFTWARE.
32532+ *
32533+ * Authors:
32534+ * Eric Anholt <eric@anholt.net>
32535+ *
32536+ **/
32537+#ifndef _PSB_SGX_H_
32538+#define _PSB_SGX_H_
32539+
32540+extern int psb_submit_video_cmdbuf(struct drm_device *dev,
32541+ struct ttm_buffer_object *cmd_buffer,
32542+ unsigned long cmd_offset,
32543+ unsigned long cmd_size,
32544+ struct ttm_fence_object *fence);
32545+
32546+extern int psb_2d_wait_available(struct drm_psb_private *dev_priv,
32547+ unsigned size);
32548+extern int drm_idle_check_interval;
32549+extern int drm_psb_ospm;
32550+
32551+#endif
32552diff --git a/drivers/gpu/drm/psb/psb_socket.c b/drivers/gpu/drm/psb/psb_socket.c
32553new file mode 100644
32554index 0000000..4814e55
32555--- /dev/null
32556+++ b/drivers/gpu/drm/psb/psb_socket.c
32557@@ -0,0 +1,340 @@
32558+/*
32559+ * kernel userspace event delivery
32560+ *
32561+ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
32562+ * Copyright (C) 2004 Novell, Inc. All rights reserved.
32563+ * Copyright (C) 2004 IBM, Inc. All rights reserved.
32564+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
32565+ *
32566+ * Licensed under the GNU GPL v2.
32567+ *
32568+ * Authors:
32569+ * Robert Love <rml@novell.com>
32570+ * Kay Sievers <kay.sievers@vrfy.org>
32571+ * Arjan van de Ven <arjanv@redhat.com>
32572+ * Greg Kroah-Hartman <greg@kroah.com>
32573+ * James C. Gualario <james.c.gualario@intel.com>
32574+ *
32575+ */
32576+
32577+#include <linux/spinlock.h>
32578+#include <linux/string.h>
32579+#include <linux/kobject.h>
32580+#include <linux/module.h>
32581+#include <linux/socket.h>
32582+#include <linux/skbuff.h>
32583+#include <linux/netlink.h>
32584+#include <net/sock.h>
32585+
32586+#define NETLINK_PSB_KOBJECT_UEVENT 31
32587+
32588+u64 psb_uevent_seqnum;
32589+char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
32590+static DEFINE_SPINLOCK(sequence_lock);
32591+#if defined(CONFIG_NET)
32592+static struct sock *uevent_sock;
32593+#endif
32594+
32595+/* the strings here must match the enum in include/linux/kobject.h */
32596+static const char *psb_kobject_actions[] = {
32597+ [KOBJ_ADD] = "add",
32598+ [KOBJ_REMOVE] = "remove",
32599+ [KOBJ_CHANGE] = "change",
32600+ [KOBJ_MOVE] = "move",
32601+ [KOBJ_ONLINE] = "online",
32602+ [KOBJ_OFFLINE] = "offline",
32603+};
32604+
32605+/**
32606+ * kobject_action_type - translate action string to numeric type
32607+ *
32608+ * @buf: buffer containing the action string, newline is ignored
32609+ * @len: length of buffer
32610+ * @type: pointer to the location to store the action type
32611+ *
32612+ * Returns 0 if the action string was recognized.
32613+ */
32614+int psb_kobject_action_type(const char *buf, size_t count,
32615+ enum kobject_action *type)
32616+{
32617+ enum kobject_action action;
32618+ int ret = -EINVAL;
32619+
32620+ if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
32621+ count--;
32622+
32623+ if (!count)
32624+ goto out;
32625+
32626+ for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) {
32627+ if (strncmp(psb_kobject_actions[action], buf, count) != 0)
32628+ continue;
32629+ if (psb_kobject_actions[action][count] != '\0')
32630+ continue;
32631+ *type = action;
32632+ ret = 0;
32633+ break;
32634+ }
32635+out:
32636+ return ret;
32637+}
32638+
32639+/**
32640+ * psb_kobject_uevent_env - send an uevent with environmental data
32641+ *
32642+ * @action: action that is happening
32643+ * @kobj: struct kobject that the action is happening to
32644+ * @envp_ext: pointer to environmental data
32645+ *
32646+ * Returns 0 if kobject_uevent() is completed with success or the
32647+ * corresponding error when it fails.
32648+ */
32649+int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
32650+ char *envp_ext[])
32651+{
32652+ struct kobj_uevent_env *env;
32653+ const char *action_string = psb_kobject_actions[action];
32654+ const char *devpath = NULL;
32655+ const char *subsystem;
32656+ struct kobject *top_kobj;
32657+ struct kset *kset;
32658+ struct kset_uevent_ops *uevent_ops;
32659+ u64 seq;
32660+ int i = 0;
32661+ int retval = 0;
32662+
32663+ pr_debug("kobject: '%s' (%p): %s\n",
32664+ kobject_name(kobj), kobj, __func__);
32665+
32666+ /* search the kset we belong to */
32667+ top_kobj = kobj;
32668+ while (!top_kobj->kset && top_kobj->parent)
32669+ top_kobj = top_kobj->parent;
32670+
32671+ if (!top_kobj->kset) {
32672+ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
32673+ "without kset!\n", kobject_name(kobj), kobj,
32674+ __func__);
32675+ return -EINVAL;
32676+ }
32677+
32678+ kset = top_kobj->kset;
32679+ uevent_ops = kset->uevent_ops;
32680+
32681+ /* skip the event, if uevent_suppress is set*/
32682+ if (kobj->uevent_suppress) {
32683+ pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
32684+ "caused the event to drop!\n",
32685+ kobject_name(kobj), kobj, __func__);
32686+ return 0;
32687+ }
32688+ /* skip the event, if the filter returns zero. */
32689+ if (uevent_ops && uevent_ops->filter)
32690+ if (!uevent_ops->filter(kset, kobj)) {
32691+ pr_debug("kobject: '%s' (%p): %s: filter function "
32692+ "caused the event to drop!\n",
32693+ kobject_name(kobj), kobj, __func__);
32694+ return 0;
32695+ }
32696+
32697+ /* originating subsystem */
32698+ if (uevent_ops && uevent_ops->name)
32699+ subsystem = uevent_ops->name(kset, kobj);
32700+ else
32701+ subsystem = kobject_name(&kset->kobj);
32702+ if (!subsystem) {
32703+ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
32704+ "event to drop!\n", kobject_name(kobj), kobj,
32705+ __func__);
32706+ return 0;
32707+ }
32708+
32709+ /* environment buffer */
32710+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
32711+ if (!env)
32712+ return -ENOMEM;
32713+
32714+ /* complete object path */
32715+ devpath = kobject_get_path(kobj, GFP_KERNEL);
32716+ if (!devpath) {
32717+ retval = -ENOENT;
32718+ goto exit;
32719+ }
32720+
32721+ /* default keys */
32722+ retval = add_uevent_var(env, "ACTION=%s", action_string);
32723+ if (retval)
32724+ goto exit;
32725+ retval = add_uevent_var(env, "DEVPATH=%s", devpath);
32726+ if (retval)
32727+ goto exit;
32728+ retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
32729+ if (retval)
32730+ goto exit;
32731+
32732+ /* keys passed in from the caller */
32733+ if (envp_ext) {
32734+ for (i = 0; envp_ext[i]; i++) {
32735+ retval = add_uevent_var(env, "%s", envp_ext[i]);
32736+ if (retval)
32737+ goto exit;
32738+ }
32739+ }
32740+
32741+ /* let the kset specific function add its stuff */
32742+ if (uevent_ops && uevent_ops->uevent) {
32743+ retval = uevent_ops->uevent(kset, kobj, env);
32744+ if (retval) {
32745+ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
32746+ "%d\n", kobject_name(kobj), kobj,
32747+ __func__, retval);
32748+ goto exit;
32749+ }
32750+ }
32751+
32752+ /*
32753+ * Mark "add" and "remove" events in the object to ensure proper
32754+ * events to userspace during automatic cleanup. If the object did
32755+ * send an "add" event, "remove" will automatically generated by
32756+ * the core, if not already done by the caller.
32757+ */
32758+ if (action == KOBJ_ADD)
32759+ kobj->state_add_uevent_sent = 1;
32760+ else if (action == KOBJ_REMOVE)
32761+ kobj->state_remove_uevent_sent = 1;
32762+
32763+ /* we will send an event, so request a new sequence number */
32764+ spin_lock(&sequence_lock);
32765+ seq = ++psb_uevent_seqnum;
32766+ spin_unlock(&sequence_lock);
32767+ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
32768+ if (retval)
32769+ goto exit;
32770+
32771+#if defined(CONFIG_NET)
32772+ /* send netlink message */
32773+ if (uevent_sock) {
32774+ struct sk_buff *skb;
32775+ size_t len;
32776+
32777+ /* allocate message with the maximum possible size */
32778+ len = strlen(action_string) + strlen(devpath) + 2;
32779+ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
32780+ if (skb) {
32781+ char *scratch;
32782+
32783+ /* add header */
32784+ scratch = skb_put(skb, len);
32785+ sprintf(scratch, "%s@%s", action_string, devpath);
32786+
32787+ /* copy keys to our continuous event payload buffer */
32788+ for (i = 0; i < env->envp_idx; i++) {
32789+ len = strlen(env->envp[i]) + 1;
32790+ scratch = skb_put(skb, len);
32791+ strcpy(scratch, env->envp[i]);
32792+ }
32793+
32794+ NETLINK_CB(skb).dst_group = 1;
32795+ retval = netlink_broadcast(uevent_sock, skb, 0, 1,
32796+ GFP_KERNEL);
32797+ /* ENOBUFS should be handled in userspace */
32798+ if (retval == -ENOBUFS)
32799+ retval = 0;
32800+ } else
32801+ retval = -ENOMEM;
32802+ }
32803+#endif
32804+
32805+ /* call psb_uevent_helper, usually only enabled during early boot */
32806+ if (psb_uevent_helper[0]) {
32807+ char *argv[3];
32808+
32809+ argv[0] = psb_uevent_helper;
32810+ argv[1] = (char *)subsystem;
32811+ argv[2] = NULL;
32812+ retval = add_uevent_var(env, "HOME=/");
32813+ if (retval)
32814+ goto exit;
32815+ retval = add_uevent_var(env,
32816+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
32817+ if (retval)
32818+ goto exit;
32819+
32820+ retval = call_usermodehelper(argv[0], argv,
32821+ env->envp, UMH_WAIT_EXEC);
32822+ }
32823+
32824+exit:
32825+ kfree(devpath);
32826+ kfree(env);
32827+ return retval;
32828+}
32829+EXPORT_SYMBOL_GPL(psb_kobject_uevent_env);
32830+
32831+/**
32832+ * psb_kobject_uevent - notify userspace by ending an uevent
32833+ *
32834+ * @action: action that is happening
32835+ * @kobj: struct kobject that the action is happening to
32836+ *
32837+ * Returns 0 if psb_kobject_uevent() is completed with success or the
32838+ * corresponding error when it fails.
32839+ */
32840+int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action)
32841+{
32842+ return psb_kobject_uevent_env(kobj, action, NULL);
32843+}
32844+EXPORT_SYMBOL_GPL(psb_kobject_uevent);
32845+
32846+/**
32847+ * psb_add_uevent_var - add key value string to the environment buffer
32848+ * @env: environment buffer structure
32849+ * @format: printf format for the key=value pair
32850+ *
32851+ * Returns 0 if environment variable was added successfully or -ENOMEM
32852+ * if no space was available.
32853+ */
32854+int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
32855+{
32856+ va_list args;
32857+ int len;
32858+
32859+ if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
32860+ WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n");
32861+ return -ENOMEM;
32862+ }
32863+
32864+ va_start(args, format);
32865+ len = vsnprintf(&env->buf[env->buflen],
32866+ sizeof(env->buf) - env->buflen,
32867+ format, args);
32868+ va_end(args);
32869+
32870+ if (len >= (sizeof(env->buf) - env->buflen)) {
32871+ WARN(1,
32872+ KERN_ERR "psb_add_uevent_var: buffer size too small\n");
32873+ return -ENOMEM;
32874+ }
32875+
32876+ env->envp[env->envp_idx++] = &env->buf[env->buflen];
32877+ env->buflen += len + 1;
32878+ return 0;
32879+}
32880+EXPORT_SYMBOL_GPL(psb_add_uevent_var);
32881+
32882+#if defined(CONFIG_NET)
32883+static int __init psb_kobject_uevent_init(void)
32884+{
32885+ uevent_sock = netlink_kernel_create(&init_net,
32886+ NETLINK_PSB_KOBJECT_UEVENT,
32887+ 1, NULL, NULL, THIS_MODULE);
32888+ if (!uevent_sock) {
32889+ printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n");
32890+ return -ENODEV;
32891+ }
32892+ netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV);
32893+ return 0;
32894+}
32895+
32896+postcore_initcall(psb_kobject_uevent_init);
32897+#endif
32898diff --git a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c
32899new file mode 100644
32900index 0000000..cada0d9
32901--- /dev/null
32902+++ b/drivers/gpu/drm/psb/psb_ttm_glue.c
32903@@ -0,0 +1,342 @@
32904+/**************************************************************************
32905+ * Copyright (c) 2008, Intel Corporation.
32906+ * All Rights Reserved.
32907+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
32908+ * All Rights Reserved.
32909+ *
32910+ * This program is free software; you can redistribute it and/or modify it
32911+ * under the terms and conditions of the GNU General Public License,
32912+ * version 2, as published by the Free Software Foundation.
32913+ *
32914+ * This program is distributed in the hope it will be useful, but WITHOUT
32915+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32916+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32917+ * more details.
32918+ *
32919+ * You should have received a copy of the GNU General Public License along with
32920+ * this program; if not, write to the Free Software Foundation, Inc.,
32921+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
32922+ *
32923+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
32924+ * develop this driver.
32925+ *
32926+ **************************************************************************/
32927+/*
32928+ */
32929+
32930+#include <drm/drmP.h>
32931+#include "psb_drv.h"
32932+#include "ttm/ttm_userobj_api.h"
32933+#include "psb_powermgmt.h"
32934+
32935+static struct vm_operations_struct psb_ttm_vm_ops;
32936+
32937+int psb_open(struct inode *inode, struct file *filp)
32938+{
32939+ struct drm_file *file_priv;
32940+ struct drm_psb_private *dev_priv;
32941+ struct psb_fpriv *psb_fp;
32942+ int ret;
32943+
32944+ ret = drm_open(inode, filp);
32945+ if (unlikely(ret))
32946+ return ret;
32947+
32948+ psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
32949+
32950+ if (unlikely(psb_fp == NULL))
32951+ goto out_err0;
32952+
32953+ file_priv = (struct drm_file *) filp->private_data;
32954+ dev_priv = psb_priv(file_priv->minor->dev);
32955+
32956+
32957+ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
32958+ PSB_FILE_OBJECT_HASH_ORDER);
32959+ if (unlikely(psb_fp->tfile == NULL))
32960+ goto out_err1;
32961+
32962+ file_priv->driver_priv = psb_fp;
32963+
32964+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
32965+ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
32966+
32967+ return 0;
32968+
32969+out_err1:
32970+ kfree(psb_fp);
32971+out_err0:
32972+ (void) drm_release(inode, filp);
32973+ return ret;
32974+}
32975+
32976+int psb_release(struct inode *inode, struct file *filp)
32977+{
32978+ struct drm_file *file_priv;
32979+ struct psb_fpriv *psb_fp;
32980+ struct drm_psb_private *dev_priv;
32981+ int ret;
32982+
32983+ file_priv = (struct drm_file *) filp->private_data;
32984+ psb_fp = psb_fpriv(file_priv);
32985+ dev_priv = psb_priv(file_priv->minor->dev);
32986+
32987+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND|PSB_DISPLAY_ISLAND, true);
32988+
32989+ ttm_object_file_release(&psb_fp->tfile);
32990+ kfree(psb_fp);
32991+
32992+ if (dev_priv && dev_priv->xhw_file)
32993+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
32994+
32995+ ret = drm_release(inode, filp);
32996+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND|PSB_DISPLAY_ISLAND);
32997+ if (drm_psb_ospm && IS_MRST(dev_priv->dev))
32998+ schedule_delayed_work(&dev_priv->scheduler.wq, 0);
32999+
33000+ if (IS_MRST(dev_priv->dev))
33001+ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
33002+ if (IS_MRST(dev_priv->dev))
33003+ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
33004+
33005+ return ret;
33006+}
33007+
33008+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
33009+ struct drm_file *file_priv)
33010+{
33011+ int ret;
33012+ struct drm_psb_private *dev_priv = psb_priv(dev);
33013+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33014+ ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
33015+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33016+ if (drm_psb_ospm && IS_MRST(dev))
33017+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33018+ return ret;
33019+}
33020+
33021+int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
33022+ struct drm_file *file_priv)
33023+{
33024+ int ret;
33025+ struct drm_psb_private *dev_priv = psb_priv(dev);
33026+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33027+ ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
33028+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33029+ if (drm_psb_ospm && IS_MRST(dev))
33030+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33031+ return ret;
33032+}
33033+
33034+int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
33035+ struct drm_file *file_priv)
33036+{
33037+ int ret;
33038+ struct drm_psb_private *dev_priv = psb_priv(dev);
33039+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33040+ ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
33041+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33042+ if (drm_psb_ospm && IS_MRST(dev))
33043+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33044+ return ret;
33045+}
33046+
33047+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
33048+ struct drm_file *file_priv)
33049+{
33050+ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
33051+}
33052+
33053+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
33054+ struct drm_file *file_priv)
33055+{
33056+ int ret;
33057+ struct drm_psb_private *dev_priv = psb_priv(dev);
33058+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33059+ ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
33060+ &psb_priv(dev)->ttm_lock, data);
33061+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33062+ if (drm_psb_ospm && IS_MRST(dev))
33063+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33064+ return ret;
33065+}
33066+
33067+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
33068+ struct drm_file *file_priv)
33069+{
33070+ int ret;
33071+ struct drm_psb_private *dev_priv = psb_priv(dev);
33072+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33073+ ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
33074+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33075+ if (drm_psb_ospm && IS_MRST(dev))
33076+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33077+ return ret;
33078+}
33079+
33080+int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
33081+ struct drm_file *file_priv)
33082+{
33083+ struct drm_psb_private *dev_priv = psb_priv(dev);
33084+ int ret;
33085+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33086+ ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
33087+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33088+ if (drm_psb_ospm && IS_MRST(dev))
33089+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33090+ return ret;
33091+}
33092+
33093+int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
33094+ struct drm_file *file_priv)
33095+{
33096+ struct drm_psb_private *dev_priv = psb_priv(dev);
33097+ int ret;
33098+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33099+ ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
33100+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33101+ if (drm_psb_ospm && IS_MRST(dev))
33102+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33103+ return ret;
33104+}
33105+
33106+int psb_pl_create_ioctl(struct drm_device *dev, void *data,
33107+ struct drm_file *file_priv)
33108+{
33109+ struct drm_psb_private *dev_priv = psb_priv(dev);
33110+ int ret;
33111+ powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true);
33112+ ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
33113+ &dev_priv->bdev, &dev_priv->ttm_lock, data);
33114+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
33115+ if (drm_psb_ospm && IS_MRST(dev))
33116+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
33117+ return ret;
33118+}
33119+
33120+/**
33121+ * psb_ttm_fault - Wrapper around the ttm fault method.
33122+ *
33123+ * @vma: The struct vm_area_struct as in the vm fault() method.
33124+ * @vmf: The struct vm_fault as in the vm fault() method.
33125+ *
33126+ * Since ttm_fault() will reserve buffers while faulting,
33127+ * we need to take the ttm read lock around it, as this driver
33128+ * relies on the ttm_lock in write mode to exclude all threads from
33129+ * reserving and thus validating buffers in aperture- and memory shortage
33130+ * situations.
33131+ */
33132+
33133+static int psb_ttm_fault(struct vm_area_struct *vma,
33134+ struct vm_fault *vmf)
33135+{
33136+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33137+ vma->vm_private_data;
33138+ struct drm_psb_private *dev_priv =
33139+ container_of(bo->bdev, struct drm_psb_private, bdev);
33140+ int ret;
33141+
33142+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
33143+ if (unlikely(ret != 0))
33144+ return VM_FAULT_NOPAGE;
33145+
33146+ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
33147+
33148+ ttm_read_unlock(&dev_priv->ttm_lock);
33149+ return ret;
33150+}
33151+
33152+
33153+int psb_mmap(struct file *filp, struct vm_area_struct *vma)
33154+{
33155+ struct drm_file *file_priv;
33156+ struct drm_psb_private *dev_priv;
33157+ int ret;
33158+
33159+ if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET))
33160+ return drm_mmap(filp, vma);
33161+
33162+ file_priv = (struct drm_file *) filp->private_data;
33163+ dev_priv = psb_priv(file_priv->minor->dev);
33164+
33165+ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
33166+ if (unlikely(ret != 0))
33167+ return ret;
33168+
33169+ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
33170+ dev_priv->ttm_vm_ops = vma->vm_ops;
33171+ psb_ttm_vm_ops = *vma->vm_ops;
33172+ psb_ttm_vm_ops.fault = &psb_ttm_fault;
33173+ }
33174+
33175+ vma->vm_ops = &psb_ttm_vm_ops;
33176+
33177+ return 0;
33178+}
33179+
33180+ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
33181+ size_t count, loff_t *f_pos)
33182+{
33183+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
33184+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
33185+
33186+ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
33187+}
33188+
33189+ssize_t psb_ttm_read(struct file *filp, char __user *buf,
33190+ size_t count, loff_t *f_pos)
33191+{
33192+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
33193+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
33194+
33195+ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
33196+}
33197+
33198+int psb_verify_access(struct ttm_buffer_object *bo,
33199+ struct file *filp)
33200+{
33201+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
33202+
33203+ if (capable(CAP_SYS_ADMIN))
33204+ return 0;
33205+
33206+ if (unlikely(!file_priv->authenticated))
33207+ return -EPERM;
33208+
33209+ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
33210+}
33211+
33212+static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
33213+{
33214+ return ttm_mem_global_init(ref->object);
33215+}
33216+
33217+static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
33218+{
33219+ ttm_mem_global_release(ref->object);
33220+}
33221+
33222+int psb_ttm_global_init(struct drm_psb_private *dev_priv)
33223+{
33224+ struct drm_global_reference *global_ref;
33225+ int ret;
33226+
33227+ global_ref = &dev_priv->mem_global_ref;
33228+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
33229+ global_ref->size = sizeof(struct ttm_mem_global);
33230+ global_ref->init = &psb_ttm_mem_global_init;
33231+ global_ref->release = &psb_ttm_mem_global_release;
33232+
33233+ ret = drm_global_item_ref(global_ref);
33234+ if (unlikely(ret != 0)) {
33235+ DRM_ERROR("Failed referencing a global TTM memory object.\n");
33236+ return ret;
33237+ }
33238+
33239+ return 0;
33240+}
33241+
33242+void psb_ttm_global_release(struct drm_psb_private *dev_priv)
33243+{
33244+ drm_global_item_unref(&dev_priv->mem_global_ref);
33245+}
33246diff --git a/drivers/gpu/drm/psb/psb_umevents.c b/drivers/gpu/drm/psb/psb_umevents.c
33247new file mode 100644
33248index 0000000..90b91c1
33249--- /dev/null
33250+++ b/drivers/gpu/drm/psb/psb_umevents.c
33251@@ -0,0 +1,490 @@
33252+/*
33253+ * Copyright © 2009 Intel Corporation
33254+ *
33255+ * Permission is hereby granted, free of charge, to any person obtaining a
33256+ * copy of this software and associated documentation files (the "Software"),
33257+ * to deal in the Software without restriction, including without limitation
33258+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33259+ * and/or sell copies of the Software, and to permit persons to whom the
33260+ * Software is furnished to do so, subject to the following conditions:
33261+ *
33262+ * The above copyright notice and this permission notice (including the next
33263+ * paragraph) shall be included in all copies or substantial portions of the
33264+ * Software.
33265+ *
33266+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33267+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33268+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33269+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33270+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33271+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33272+ * IN THE SOFTWARE.
33273+ *
33274+ * Authors:
33275+ * James C. Gualario <james.c.gualario@intel.com>
33276+ *
33277+ */
33278+#include "psb_umevents.h"
33279+/**
33280+ * define sysfs operations supported by umevent objects.
33281+ *
33282+ */
33283+static struct sysfs_ops umevent_obj_sysfs_ops = {
33284+ .show = psb_umevent_attr_show,
33285+ .store = psb_umevent_attr_store,
33286+};
33287+/**
33288+ * define the data attributes we will expose through sysfs.
33289+ *
33290+ */
33291+static struct umevent_attribute data_0 =
33292+ __ATTR(data_0_val, 0666, psb_umevent_attr_show_imp,
33293+ psb_umevent_attr_store_imp);
33294+static struct umevent_attribute data_1 =
33295+ __ATTR(data_1_val, 0666, psb_umevent_attr_show_imp,
33296+ psb_umevent_attr_store_imp);
33297+static struct umevent_attribute data_2 =
33298+ __ATTR(data_2_val, 0666, psb_umevent_attr_show_imp,
33299+ psb_umevent_attr_store_imp);
33300+static struct umevent_attribute data_3 =
33301+ __ATTR(data_3_val, 0666, psb_umevent_attr_show_imp,
33302+ psb_umevent_attr_store_imp);
33303+static struct umevent_attribute data_4 =
33304+ __ATTR(data_4_val, 0666, psb_umevent_attr_show_imp,
33305+ psb_umevent_attr_store_imp);
33306+static struct umevent_attribute data_5 =
33307+ __ATTR(data_5_val, 0666, psb_umevent_attr_show_imp,
33308+ psb_umevent_attr_store_imp);
33309+static struct umevent_attribute data_6 =
33310+ __ATTR(data_6_val, 0666, psb_umevent_attr_show_imp,
33311+ psb_umevent_attr_store_imp);
33312+static struct umevent_attribute data_7 =
33313+ __ATTR(data_7_val, 0666, psb_umevent_attr_show_imp,
33314+ psb_umevent_attr_store_imp);
33315+/**
33316+ * define the structure used to seed our ktype.
33317+ *
33318+ */
33319+static struct attribute *umevent_obj_default_attrs[] = {
33320+ &data_0.attr,
33321+ &data_1.attr,
33322+ &data_2.attr,
33323+ &data_3.attr,
33324+ &data_4.attr,
33325+ &data_5.attr,
33326+ &data_6.attr,
33327+ &data_7.attr,
33328+ NULL, /* need to NULL terminate the list of attributes */
33329+};
33330+/**
33331+ * specify the ktype for our kobjects.
33332+ *
33333+ */
33334+static struct kobj_type umevent_obj_ktype = {
33335+ .sysfs_ops = &umevent_obj_sysfs_ops,
33336+ .release = psb_umevent_obj_release,
33337+ .default_attrs = umevent_obj_default_attrs,
33338+};
33339+/**
33340+ * psb_umevent_attr_show - default kobject show function
33341+ *
33342+ * @kobj: kobject associated with the show operation
33343+ * @attr: attribute being requested
33344+ * @buf: pointer to the return buffer
33345+ *
33346+ */
33347+ssize_t psb_umevent_attr_show(struct kobject *kobj,
33348+ struct attribute *attr,
33349+ char *buf)
33350+{
33351+ struct umevent_attribute *attribute;
33352+ struct umevent_obj *any_umevent_obj;
33353+ attribute = to_umevent_attr(attr);
33354+ any_umevent_obj = to_umevent_obj(kobj);
33355+ if (!attribute->show)
33356+ return -EIO;
33357+
33358+ return attribute->show(any_umevent_obj, attribute, buf);
33359+}
33360+/**
33361+ * psb_umevent_attr_store - default kobject store function
33362+ *
33363+ * @kobj: kobject associated with the store operation
33364+ * @attr: attribute being requested
33365+ * @buf: input data to write to attribute
33366+ * @len: character count
33367+ *
33368+ */
33369+ssize_t psb_umevent_attr_store(struct kobject *kobj,
33370+ struct attribute *attr,
33371+ const char *buf, size_t len)
33372+{
33373+ struct umevent_attribute *attribute;
33374+ struct umevent_obj *any_umevent_obj;
33375+ attribute = to_umevent_attr(attr);
33376+ any_umevent_obj = to_umevent_obj(kobj);
33377+ if (!attribute->store)
33378+ return -EIO;
33379+
33380+ return attribute->store(any_umevent_obj, attribute, buf, len);
33381+}
33382+/**
33383+ * psb_umevent_obj_release - kobject release funtion
33384+ *
33385+ * @kobj: kobject to be released.
33386+ */
33387+void psb_umevent_obj_release(struct kobject *kobj)
33388+{
33389+ struct umevent_obj *any_umevent_obj;
33390+ any_umevent_obj = to_umevent_obj(kobj);
33391+ kfree(any_umevent_obj);
33392+}
33393+/**
33394+ * psb_umevent_attr_show_imp - attribute show implementation
33395+ *
33396+ * @any_umevent_obj: kobject managed data to read from
33397+ * @attr: attribute being requested
33398+ * @buf: pointer to the return buffer
33399+ *
33400+ */
33401+ssize_t psb_umevent_attr_show_imp(struct umevent_obj
33402+ *any_umevent_obj,
33403+ struct umevent_attribute *attr,
33404+ char *buf)
33405+{
33406+ int var;
33407+
33408+ if (strcmp(attr->attr.name, "data_0_val") == 0)
33409+ var = any_umevent_obj->data_0_val;
33410+ else if (strcmp(attr->attr.name, "data_1_val") == 0)
33411+ var = any_umevent_obj->data_1_val;
33412+ else if (strcmp(attr->attr.name, "data_2_val") == 0)
33413+ var = any_umevent_obj->data_2_val;
33414+ else if (strcmp(attr->attr.name, "data_3_val") == 0)
33415+ var = any_umevent_obj->data_3_val;
33416+ else if (strcmp(attr->attr.name, "data_4_val") == 0)
33417+ var = any_umevent_obj->data_4_val;
33418+ else if (strcmp(attr->attr.name, "data_5_val") == 0)
33419+ var = any_umevent_obj->data_5_val;
33420+ else if (strcmp(attr->attr.name, "data_6_val") == 0)
33421+ var = any_umevent_obj->data_6_val;
33422+ else
33423+ var = any_umevent_obj->data_7_val;
33424+
33425+ return sprintf(buf, "%d\n", var);
33426+}
33427+/**
33428+ * psb_umevent_attr_store_imp - attribute store implementation
33429+ *
33430+ * @any_umevent_obj: kobject managed data to write to
33431+ * @attr: attribute being requested
33432+ * @buf: input data to write to attribute
33433+ * @count: character count
33434+ *
33435+ */
33436+ssize_t psb_umevent_attr_store_imp(struct umevent_obj
33437+ *any_umevent_obj,
33438+ struct umevent_attribute *attr,
33439+ const char *buf, size_t count)
33440+{
33441+ int var;
33442+
33443+ sscanf(buf, "%du", &var);
33444+ if (strcmp(attr->attr.name, "data_0_val") == 0)
33445+ any_umevent_obj->data_0_val = var;
33446+ else if (strcmp(attr->attr.name, "data_1_val") == 0)
33447+ any_umevent_obj->data_1_val = var;
33448+ else if (strcmp(attr->attr.name, "data_2_val") == 0)
33449+ any_umevent_obj->data_2_val = var;
33450+ else if (strcmp(attr->attr.name, "data_3_val") == 0)
33451+ any_umevent_obj->data_3_val = var;
33452+ else if (strcmp(attr->attr.name, "data_4_val") == 0)
33453+ any_umevent_obj->data_4_val = var;
33454+ else if (strcmp(attr->attr.name, "data_5_val") == 0)
33455+ any_umevent_obj->data_5_val = var;
33456+ else if (strcmp(attr->attr.name, "data_6_val") == 0)
33457+ any_umevent_obj->data_6_val = var;
33458+ else
33459+ any_umevent_obj->data_7_val = var;
33460+ return count;
33461+}
33462+/**
33463+ * psb_create_umevent_obj - create and track new event objects
33464+ *
33465+ * @name: name to give to new sysfs / kobject entry
33466+ * @list: event object list to track the kobject in
33467+ */
33468+struct umevent_obj *psb_create_umevent_obj(const char *name,
33469+ struct umevent_list
33470+ *list)
33471+{
33472+ struct umevent_obj *new_umevent_obj;
33473+ int retval;
33474+ new_umevent_obj = kzalloc(sizeof(*new_umevent_obj),
33475+ GFP_KERNEL);
33476+ if (!new_umevent_obj)
33477+ return NULL;
33478+
33479+ new_umevent_obj->kobj.kset = list->umevent_disp_pool;
33480+ retval = kobject_init_and_add(&new_umevent_obj->kobj,
33481+ &umevent_obj_ktype, NULL,
33482+ "%s", name);
33483+ if (retval) {
33484+ kobject_put(&new_umevent_obj->kobj);
33485+ return NULL;
33486+ }
33487+ psb_umevent_add_to_list(list, new_umevent_obj);
33488+ return new_umevent_obj;
33489+}
33490+EXPORT_SYMBOL(psb_create_umevent_obj);
33491+/**
33492+ * psb_umevent_notify - info user mode of a new device
33493+ *
33494+ * @notify_disp_obj: event object to perform notification for
33495+ *
33496+ */
33497+void psb_umevent_notify(struct umevent_obj *notify_disp_obj)
33498+{
33499+ kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD);
33500+}
33501+EXPORT_SYMBOL(psb_umevent_notify);
33502+/**
33503+ * psb_umevent_notify_change - notify user mode of a change to a device
33504+ *
33505+ * @notify_disp_obj: event object to perform notification for
33506+ *
33507+ */
33508+void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj)
33509+{
33510+ kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
33511+}
33512+EXPORT_SYMBOL(psb_umevent_notify_change);
33513+/**
33514+ * psb_umevent_notify_change - notify user mode of a change to a device
33515+ *
33516+ * @notify_disp_obj: event object to perform notification for
33517+ *
33518+ */
33519+void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj)
33520+{
33521+ psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
33522+}
33523+EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock);
33524+/**
33525+ * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it
33526+ *
33527+ * @any_umevent_obj: event object to destroy
33528+ *
33529+ */
33530+void psb_destroy_umevent_obj(struct umevent_obj
33531+ *any_umevent_obj)
33532+{
33533+ kobject_put(&any_umevent_obj->kobj);
33534+}
33535+/**
33536+ *
33537+ * psb_umevent_init - init the event pool
33538+ *
33539+ * @parent_kobj: parent kobject to associate new kset with
33540+ * @new_umevent_list: event list to associate kset with
33541+ * @name: name to give to new sysfs entry
33542+ *
33543+ */
33544+int psb_umevent_init(struct kobject *parent_kobj,
33545+ struct umevent_list *new_umevent_list,
33546+ const char *name)
33547+{
33548+ psb_umevent_init_list(new_umevent_list);
33549+ new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL,
33550+ parent_kobj);
33551+ if (!new_umevent_list->umevent_disp_pool)
33552+ return -ENOMEM;
33553+
33554+ return 0;
33555+}
33556+EXPORT_SYMBOL(psb_umevent_init);
33557+/**
33558+ *
33559+ * psb_umevent_cleanup - cleanup all event objects
33560+ *
33561+ * @kill_list: list of events to destroy
33562+ *
33563+ */
33564+void psb_umevent_cleanup(struct umevent_list *kill_list)
33565+{
33566+ psb_umevent_destroy_list(kill_list);
33567+}
33568+EXPORT_SYMBOL(psb_umevent_cleanup);
33569+/**
33570+ * psb_umevent_add_to_list - add an event to the event list
33571+ *
33572+ * @list: list to add the event to
33573+ * @umevent_obj_to_add: event to add
33574+ *
33575+ */
33576+void psb_umevent_add_to_list(struct umevent_list *list,
33577+ struct umevent_obj *umevent_obj_to_add)
33578+{
33579+ unsigned long flags;
33580+ spin_lock_irqsave(&list->list_lock, flags);
33581+ list_add(&umevent_obj_to_add->head, &list->head);
33582+ spin_unlock_irqrestore(&list->list_lock, flags);
33583+}
33584+/**
33585+ * psb_umevent_init_list - initialize event list
33586+ *
33587+ * @list: list to initialize
33588+ *
33589+ */
33590+void psb_umevent_init_list(struct umevent_list *list)
33591+{
33592+ spin_lock_init(&list->list_lock);
33593+ INIT_LIST_HEAD(&list->head);
33594+}
33595+/**
33596+ * psb_umevent_create_list - allocate an event list
33597+ *
33598+ */
33599+struct umevent_list *psb_umevent_create_list()
33600+{
33601+ struct umevent_list *new_umevent_list;
33602+ new_umevent_list = NULL;
33603+ new_umevent_list = kmalloc(sizeof(struct umevent_list),
33604+ GFP_ATOMIC);
33605+ return new_umevent_list;
33606+}
33607+EXPORT_SYMBOL(psb_umevent_create_list);
33608+/**
33609+ * psb_umevent_destroy_list - destroy a list and clean up all mem
33610+ *
33611+ * @list: list to destroy and clean up after
33612+ *
33613+ */
33614+void psb_umevent_destroy_list(struct umevent_list *list)
33615+{
33616+ struct umevent_obj *umevent_obj_curr;
33617+ struct list_head *node;
33618+ struct list_head *node_kill;
33619+ int i;
33620+ i = 0;
33621+ node = NULL;
33622+ node_kill = NULL;
33623+ node = list->head.next;
33624+ while (node != (&list->head)) {
33625+ umevent_obj_curr = list_entry(node,
33626+ struct umevent_obj,
33627+ head);
33628+ node_kill = node;
33629+ node = umevent_obj_curr->head.next;
33630+ psb_destroy_umevent_obj(umevent_obj_curr);
33631+ umevent_obj_curr = NULL;
33632+ list_del(node_kill);
33633+ i++;
33634+ }
33635+ kset_unregister(list->umevent_disp_pool);
33636+ kfree(list);
33637+}
33638+/**
33639+ * psb_umevent_remove_from_list - remove an event from tracking list
33640+ *
33641+ * @list: list to remove the event from
33642+ * @disp_to_remove: name of event to remove.
33643+ *
33644+ */
33645+void psb_umevent_remove_from_list(struct umevent_list *list,
33646+ const char *disp_to_remove)
33647+{
33648+ struct umevent_obj *umevent_obj_curr = NULL;
33649+ struct list_head *node = NULL;
33650+ struct list_head *node_kill = NULL;
33651+ int i = 0;
33652+ int found_match = 0;
33653+ i = 0;
33654+ node = NULL;
33655+ node_kill = NULL;
33656+ node = list->head.next;
33657+ while (node != (&list->head)) {
33658+ umevent_obj_curr = list_entry(node,
33659+ struct umevent_obj, head);
33660+ if (strcmp(umevent_obj_curr->kobj.name,
33661+ disp_to_remove) == 0) {
33662+ found_match = 1;
33663+ break;
33664+ }
33665+ node = NULL;
33666+ node = umevent_obj_curr->head.next;
33667+ i++;
33668+ }
33669+ if (found_match == 1) {
33670+ node_kill = node;
33671+ node = umevent_obj_curr->head.next;
33672+ psb_destroy_umevent_obj(umevent_obj_curr);
33673+ umevent_obj_curr = NULL;
33674+ list_del(node_kill);
33675+ }
33676+}
33677+EXPORT_SYMBOL(psb_umevent_remove_from_list);
33678+/**
33679+ * psb_umevent_find_obj - find an event in a tracking list
33680+ *
33681+ * @name: name of the event to find
33682+ * @list: list to find the event in
33683+ *
33684+ */
33685+struct umevent_obj *psb_umevent_find_obj(const char *name,
33686+ struct umevent_list *list)
33687+{
33688+ struct umevent_obj *umevent_obj_curr = NULL;
33689+ struct list_head *node = NULL;
33690+ struct list_head *node_find = NULL;
33691+ int i = 0;
33692+ int found_match = 0;
33693+ i = 0;
33694+ node = NULL;
33695+ node_find = NULL;
33696+ node = list->head.next;
33697+ while (node != (&list->head)) {
33698+ umevent_obj_curr = list_entry(node,
33699+ struct umevent_obj, head);
33700+ if (strcmp(umevent_obj_curr->kobj.name,
33701+ name) == 0) {
33702+ found_match = 1;
33703+ break;
33704+ }
33705+ node = NULL;
33706+ node = umevent_obj_curr->head.next;
33707+ i++;
33708+ }
33709+ if (found_match == 1)
33710+ return umevent_obj_curr;
33711+
33712+ return NULL;
33713+}
33714+EXPORT_SYMBOL(psb_umevent_find_obj);
33715+/**
33716+ * psb_umevent_debug_dump_list - debug list dump
33717+ *
33718+ * @list: list to dump
33719+ *
33720+ */
33721+void psb_umevent_debug_dump_list(struct umevent_list *list)
33722+{
33723+ struct umevent_obj *umevent_obj_curr;
33724+ unsigned long flags;
33725+ struct list_head *node;
33726+ int i;
33727+ spin_lock_irqsave(&list->list_lock, flags);
33728+ i = 0;
33729+ node = NULL;
33730+ node = list->head.next;
33731+ while (node != (&list->head)) {
33732+ umevent_obj_curr = list_entry(node,
33733+ struct umevent_obj,
33734+ head);
33735+ /*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK*/
33736+ node = NULL;
33737+ node = umevent_obj_curr->head.next;
33738+ i++;
33739+ }
33740+ spin_unlock_irqrestore(&list->list_lock, flags);
33741+}
33742diff --git a/drivers/gpu/drm/psb/psb_umevents.h b/drivers/gpu/drm/psb/psb_umevents.h
33743new file mode 100644
33744index 0000000..05dbc8b
33745--- /dev/null
33746+++ b/drivers/gpu/drm/psb/psb_umevents.h
33747@@ -0,0 +1,150 @@
33748+/*
33749+ * Copyright © 2009 Intel Corporation
33750+ *
33751+ * Permission is hereby granted, free of charge, to any person obtaining a
33752+ * copy of this software and associated documentation files (the "Software"),
33753+ * to deal in the Software without restriction, including without limitation
33754+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33755+ * and/or sell copies of the Software, and to permit persons to whom the
33756+ * Software is furnished to do so, subject to the following conditions:
33757+ *
33758+ * The above copyright notice and this permission notice (including the next
33759+ * paragraph) shall be included in all copies or substantial portions of the
33760+ * Software.
33761+ *
33762+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33763+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33764+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33765+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33766+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33767+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33768+ * IN THE SOFTWARE.
33769+ *
33770+ * Authors:
33771+ * James C. Gualario <james.c.gualario@intel.com>
33772+ *
33773+ */
33774+#ifndef _PSB_UMEVENT_H_
33775+#define _PSB_UMEVENT_H_
33776+/**
33777+ * required includes
33778+ *
33779+ */
33780+#include <linux/init.h>
33781+#include <linux/module.h>
33782+#include <linux/slab.h>
33783+#include <drm/drmP.h>
33784+#include <drm/drm_core.h>
33785+#include <drm/drm_pciids.h>
33786+#include <linux/spinlock.h>
33787+/**
33788+ * event structure managed by kobjects
33789+ *
33790+ */
33791+struct umevent_obj {
33792+ struct kobject kobj;
33793+ struct list_head head;
33794+ int data_0_val;
33795+ int data_1_val;
33796+ int data_2_val;
33797+ int data_3_val;
33798+ int data_4_val;
33799+ int data_5_val;
33800+ int data_6_val;
33801+ int data_7_val;
33802+};
33803+/**
33804+ * event tracking list element
33805+ *
33806+ */
33807+struct umevent_list{
33808+ struct list_head head;
33809+ struct kset *umevent_disp_pool;
33810+ spinlock_t list_lock;
33811+};
33812+/**
33813+ * to go back and forth between kobjects and their main container
33814+ *
33815+ */
33816+#define to_umevent_obj(x) \
33817+ container_of(x, struct umevent_obj, kobj)
33818+
33819+/**
33820+ * event attributes exposed via sysfs
33821+ *
33822+ */
33823+struct umevent_attribute {
33824+ struct attribute attr;
33825+ ssize_t (*show)(struct umevent_obj *any_umevent_obj,
33826+ struct umevent_attribute *attr, char *buf);
33827+ ssize_t (*store)(struct umevent_obj *any_umevent_obj,
33828+ struct umevent_attribute *attr,
33829+ const char *buf, size_t count);
33830+};
33831+/**
33832+ * to go back and forth between the attribute passed to us by the OS
33833+ * and the umevent_attribute
33834+ *
33835+ */
33836+#define to_umevent_attr(x) \
33837+ container_of(x, struct umevent_attribute, \
33838+ attr)
33839+
33840+/**
33841+ * umevent function prototypes
33842+ *
33843+ */
33844+extern struct umevent_obj *psb_create_umevent_obj(const char *name,
33845+ struct umevent_list
33846+ *list);
33847+extern ssize_t psb_umevent_attr_show(struct kobject *kobj,
33848+ struct attribute *attr, char *buf);
33849+extern ssize_t psb_umevent_attr_store(struct kobject *kobj,
33850+ struct attribute *attr,
33851+ const char *buf, size_t len);
33852+extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj
33853+ *any_umevent_obj,
33854+ struct umevent_attribute *attr,
33855+ char *buf);
33856+extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj
33857+ *any_umevent_obj,
33858+ struct umevent_attribute *attr,
33859+ const char *buf, size_t count);
33860+extern void psb_umevent_cleanup(struct umevent_list *kill_list);
33861+extern int psb_umevent_init(struct kobject *parent_kobj,
33862+ struct umevent_list *new_umevent_list,
33863+ const char *name);
33864+extern void psb_umevent_init_list(struct umevent_list *list);
33865+extern void psb_umevent_debug_dump_list(struct umevent_list *list);
33866+extern void psb_umevent_add_to_list(struct umevent_list *list,
33867+ struct umevent_obj
33868+ *umevent_obj_to_add);
33869+extern void psb_umevent_destroy_list(struct umevent_list *list);
33870+extern struct umevent_list *psb_umevent_create_list(void);
33871+extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj);
33872+extern void psb_umevent_obj_release(struct kobject *kobj);
33873+extern void psb_umevent_remove_from_list(struct umevent_list *list,
33874+ const char *disp_to_remove);
33875+extern void psb_umevent_workqueue_dispatch(int work_type, const char *name,
33876+ struct umevent_list *list);
33877+extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj);
33878+extern void psb_umevent_notify_change_gfxsock(struct umevent_obj
33879+ *notify_disp_obj);
33880+extern struct umevent_obj *psb_umevent_find_obj(const char *name,
33881+ struct umevent_list
33882+ *list);
33883+/**
33884+ * socket function prototypes
33885+ *
33886+ */
33887+extern int psb_kobject_uevent(struct kobject *kobj,
33888+ enum kobject_action action);
33889+extern int psb_kobject_uevent_env(struct kobject *kobj,
33890+ enum kobject_action action,
33891+ char *envp[]);
33892+int psb_add_uevent_var(struct kobj_uevent_env *env,
33893+ const char *format, ...)
33894+ __attribute__((format (printf, 2, 3)));
33895+int psb_kobject_action_type(const char *buf,
33896+ size_t count, enum kobject_action *type);
33897+#endif
33898diff --git a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c
33899new file mode 100644
33900index 0000000..58ce493
33901--- /dev/null
33902+++ b/drivers/gpu/drm/psb/psb_xhw.c
33903@@ -0,0 +1,652 @@
33904+/**************************************************************************
33905+ *Copyright (c) 2007-2008, Intel Corporation.
33906+ *All Rights Reserved.
33907+ *
33908+ *This program is free software; you can redistribute it and/or modify it
33909+ *under the terms and conditions of the GNU General Public License,
33910+ *version 2, as published by the Free Software Foundation.
33911+ *
33912+ *This program is distributed in the hope it will be useful, but WITHOUT
33913+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
33914+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33915+ *more details.
33916+ *
33917+ *You should have received a copy of the GNU General Public License along with
33918+ *this program; if not, write to the Free Software Foundation, Inc.,
33919+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
33920+ *
33921+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
33922+ *develop this driver.
33923+ *
33924+ **************************************************************************/
33925+/*
33926+ *Make calls into closed source X server code.
33927+ */
33928+
33929+#include <drm/drmP.h>
33930+#include "psb_drv.h"
33931+#include "ttm/ttm_userobj_api.h"
33932+#include "psb_powermgmt.h"
33933+
33934+void
33935+psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
33936+ struct psb_xhw_buf *buf)
33937+{
33938+ unsigned long irq_flags;
33939+
33940+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
33941+ list_del_init(&buf->head);
33942+ if (dev_priv->xhw_cur_buf == buf)
33943+ dev_priv->xhw_cur_buf = NULL;
33944+ atomic_set(&buf->done, 1);
33945+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
33946+}
33947+
33948+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
33949+ struct psb_xhw_buf *buf)
33950+{
33951+ unsigned long irq_flags;
33952+
33953+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
33954+ atomic_set(&buf->done, 0);
33955+ if (unlikely(!dev_priv->xhw_submit_ok)) {
33956+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
33957+ DRM_ERROR("No Xpsb 3D extension available.\n");
33958+ return -EINVAL;
33959+ }
33960+ if (!list_empty(&buf->head)) {
33961+ DRM_ERROR("Recursive list adding.\n");
33962+ goto out;
33963+ }
33964+ list_add_tail(&buf->head, &dev_priv->xhw_in);
33965+ wake_up_interruptible(&dev_priv->xhw_queue);
33966+out:
33967+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
33968+ return 0;
33969+}
33970+
33971+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
33972+ struct psb_xhw_buf *buf,
33973+ uint32_t w,
33974+ uint32_t h,
33975+ uint32_t *hw_cookie,
33976+ uint32_t *bo_size,
33977+ uint32_t *clear_p_start,
33978+ uint32_t *clear_num_pages)
33979+{
33980+ struct drm_psb_xhw_arg *xa = &buf->arg;
33981+ int ret;
33982+
33983+ buf->copy_back = 1;
33984+ xa->op = PSB_XHW_SCENE_INFO;
33985+ xa->irq_op = 0;
33986+ xa->issue_irq = 0;
33987+ xa->arg.si.w = w;
33988+ xa->arg.si.h = h;
33989+
33990+ ret = psb_xhw_add(dev_priv, buf);
33991+ if (ret)
33992+ return ret;
33993+
33994+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
33995+ atomic_read(&buf->done), DRM_HZ);
33996+
33997+ if (!atomic_read(&buf->done)) {
33998+ psb_xhw_clean_buf(dev_priv, buf);
33999+ return -EBUSY;
34000+ }
34001+
34002+ if (!xa->ret) {
34003+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
34004+ *bo_size = xa->arg.si.size;
34005+ *clear_p_start = xa->arg.si.clear_p_start;
34006+ *clear_num_pages = xa->arg.si.clear_num_pages;
34007+ }
34008+ return xa->ret;
34009+}
34010+
34011+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
34012+ struct psb_xhw_buf *buf, uint32_t fire_flags)
34013+{
34014+ struct drm_psb_xhw_arg *xa = &buf->arg;
34015+
34016+ buf->copy_back = 0;
34017+ xa->op = PSB_XHW_FIRE_RASTER;
34018+ xa->issue_irq = 0;
34019+ xa->arg.sb.fire_flags = 0;
34020+
34021+ return psb_xhw_add(dev_priv, buf);
34022+}
34023+
34024+int psb_xhw_vistest(struct drm_psb_private *dev_priv,
34025+ struct psb_xhw_buf *buf)
34026+{
34027+ struct drm_psb_xhw_arg *xa = &buf->arg;
34028+
34029+ buf->copy_back = 1;
34030+ xa->op = PSB_XHW_VISTEST;
34031+ /*
34032+ *Could perhaps decrease latency somewhat by
34033+ *issuing an irq in this case.
34034+ */
34035+ xa->issue_irq = 0;
34036+ xa->irq_op = PSB_UIRQ_VISTEST;
34037+ return psb_xhw_add(dev_priv, buf);
34038+}
34039+
34040+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
34041+ struct psb_xhw_buf *buf,
34042+ uint32_t fire_flags,
34043+ uint32_t hw_context,
34044+ uint32_t *cookie,
34045+ uint32_t *oom_cmds,
34046+ uint32_t num_oom_cmds,
34047+ uint32_t offset, uint32_t engine,
34048+ uint32_t flags)
34049+{
34050+ struct drm_psb_xhw_arg *xa = &buf->arg;
34051+
34052+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
34053+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
34054+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
34055+ if (unlikely(buf->copy_back))
34056+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
34057+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
34058+ else
34059+ xa->irq_op = 0;
34060+ xa->arg.sb.fire_flags = fire_flags;
34061+ xa->arg.sb.hw_context = hw_context;
34062+ xa->arg.sb.offset = offset;
34063+ xa->arg.sb.engine = engine;
34064+ xa->arg.sb.flags = flags;
34065+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
34066+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
34067+ if (num_oom_cmds)
34068+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
34069+ sizeof(uint32_t) * num_oom_cmds);
34070+ return psb_xhw_add(dev_priv, buf);
34071+}
34072+
34073+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
34074+ struct psb_xhw_buf *buf)
34075+{
34076+ struct drm_psb_xhw_arg *xa = &buf->arg;
34077+ int ret;
34078+
34079+ buf->copy_back = 1;
34080+ xa->op = PSB_XHW_RESET_DPM;
34081+ xa->issue_irq = 0;
34082+ xa->irq_op = 0;
34083+
34084+ ret = psb_xhw_add(dev_priv, buf);
34085+ if (ret)
34086+ return ret;
34087+
34088+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34089+ atomic_read(&buf->done), 3 * DRM_HZ);
34090+
34091+ if (!atomic_read(&buf->done)) {
34092+ psb_xhw_clean_buf(dev_priv, buf);
34093+ return -EBUSY;
34094+ }
34095+
34096+ return xa->ret;
34097+}
34098+
34099+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
34100+ struct psb_xhw_buf *buf, uint32_t *value)
34101+{
34102+ struct drm_psb_xhw_arg *xa = &buf->arg;
34103+ int ret;
34104+
34105+ *value = 0;
34106+
34107+ buf->copy_back = 1;
34108+ xa->op = PSB_XHW_CHECK_LOCKUP;
34109+ xa->issue_irq = 0;
34110+ xa->irq_op = 0;
34111+
34112+ ret = psb_xhw_add(dev_priv, buf);
34113+ if (ret)
34114+ return ret;
34115+
34116+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34117+ atomic_read(&buf->done), DRM_HZ * 3);
34118+
34119+ if (!atomic_read(&buf->done)) {
34120+ psb_xhw_clean_buf(dev_priv, buf);
34121+ return -EBUSY;
34122+ }
34123+
34124+ if (!xa->ret)
34125+ *value = xa->arg.cl.value;
34126+
34127+ return xa->ret;
34128+}
34129+
34130+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
34131+ struct psb_xhw_buf *buf)
34132+{
34133+ struct drm_psb_xhw_arg *xa = &buf->arg;
34134+ unsigned long irq_flags;
34135+
34136+ buf->copy_back = 0;
34137+ xa->op = PSB_XHW_TERMINATE;
34138+ xa->issue_irq = 0;
34139+
34140+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34141+ dev_priv->xhw_submit_ok = 0;
34142+ atomic_set(&buf->done, 0);
34143+ if (!list_empty(&buf->head)) {
34144+ DRM_ERROR("Recursive list adding.\n");
34145+ goto out;
34146+ }
34147+ list_add_tail(&buf->head, &dev_priv->xhw_in);
34148+out:
34149+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34150+ wake_up_interruptible(&dev_priv->xhw_queue);
34151+
34152+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34153+ atomic_read(&buf->done), DRM_HZ / 10);
34154+
34155+ if (!atomic_read(&buf->done)) {
34156+ DRM_ERROR("Xpsb terminate timeout.\n");
34157+ psb_xhw_clean_buf(dev_priv, buf);
34158+ return -EBUSY;
34159+ }
34160+
34161+ return 0;
34162+}
34163+
34164+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
34165+ struct psb_xhw_buf *buf,
34166+ uint32_t pages, uint32_t * hw_cookie,
34167+ uint32_t * size,
34168+ uint32_t * ta_min_size)
34169+{
34170+ struct drm_psb_xhw_arg *xa = &buf->arg;
34171+ int ret;
34172+
34173+ buf->copy_back = 1;
34174+ xa->op = PSB_XHW_TA_MEM_INFO;
34175+ xa->issue_irq = 0;
34176+ xa->irq_op = 0;
34177+ xa->arg.bi.pages = pages;
34178+
34179+ ret = psb_xhw_add(dev_priv, buf);
34180+ if (ret)
34181+ return ret;
34182+
34183+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34184+ atomic_read(&buf->done), DRM_HZ);
34185+
34186+ if (!atomic_read(&buf->done)) {
34187+ psb_xhw_clean_buf(dev_priv, buf);
34188+ return -EBUSY;
34189+ }
34190+
34191+ if (!xa->ret)
34192+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
34193+
34194+ *size = xa->arg.bi.size;
34195+ *ta_min_size = xa->arg.bi.ta_min_size;
34196+ return xa->ret;
34197+}
34198+
34199+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
34200+ struct psb_xhw_buf *buf,
34201+ uint32_t flags,
34202+ uint32_t param_offset,
34203+ uint32_t pt_offset, uint32_t *hw_cookie)
34204+{
34205+ struct drm_psb_xhw_arg *xa = &buf->arg;
34206+ int ret;
34207+
34208+ buf->copy_back = 1;
34209+ xa->op = PSB_XHW_TA_MEM_LOAD;
34210+ xa->issue_irq = 0;
34211+ xa->irq_op = 0;
34212+ xa->arg.bl.flags = flags;
34213+ xa->arg.bl.param_offset = param_offset;
34214+ xa->arg.bl.pt_offset = pt_offset;
34215+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
34216+
34217+ ret = psb_xhw_add(dev_priv, buf);
34218+ if (ret)
34219+ return ret;
34220+
34221+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34222+ atomic_read(&buf->done), 3 * DRM_HZ);
34223+
34224+ if (!atomic_read(&buf->done)) {
34225+ psb_xhw_clean_buf(dev_priv, buf);
34226+ return -EBUSY;
34227+ }
34228+
34229+ if (!xa->ret)
34230+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
34231+
34232+ return xa->ret;
34233+}
34234+
34235+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
34236+ struct psb_xhw_buf *buf, uint32_t *cookie)
34237+{
34238+ struct drm_psb_xhw_arg *xa = &buf->arg;
34239+
34240+ /*
34241+ *This calls the extensive closed source
34242+ *OOM handler, which resolves the condition and
34243+ *sends a reply telling the scheduler what to do
34244+ *with the task.
34245+ */
34246+
34247+ buf->copy_back = 1;
34248+ xa->op = PSB_XHW_OOM;
34249+ xa->issue_irq = 1;
34250+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
34251+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
34252+
34253+ return psb_xhw_add(dev_priv, buf);
34254+}
34255+
34256+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
34257+ struct psb_xhw_buf *buf,
34258+ uint32_t *cookie,
34259+ uint32_t *bca, uint32_t *rca, uint32_t *flags)
34260+{
34261+ struct drm_psb_xhw_arg *xa = &buf->arg;
34262+
34263+ /*
34264+ *Get info about how to schedule an OOM task.
34265+ */
34266+
34267+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
34268+ *bca = xa->arg.oom.bca;
34269+ *rca = xa->arg.oom.rca;
34270+ *flags = xa->arg.oom.flags;
34271+}
34272+
34273+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
34274+ struct psb_xhw_buf *buf, uint32_t *cookie)
34275+{
34276+ struct drm_psb_xhw_arg *xa = &buf->arg;
34277+
34278+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
34279+}
34280+
34281+int psb_xhw_resume(struct drm_psb_private *dev_priv,
34282+ struct psb_xhw_buf *buf)
34283+{
34284+ struct drm_psb_xhw_arg *xa = &buf->arg;
34285+ int ret;
34286+ /*
34287+ *For D0i3, force resume to complete
34288+ */
34289+ buf->copy_back = 1;
34290+ xa->op = PSB_XHW_RESUME;
34291+ xa->issue_irq = 0;
34292+ xa->irq_op = 0;
34293+ ret = psb_xhw_add(dev_priv, buf);
34294+ if (ret)
34295+ return ret;
34296+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
34297+ atomic_read(&buf->done), 3 * DRM_HZ);
34298+
34299+ if (!atomic_read(&buf->done)) {
34300+ psb_xhw_clean_buf(dev_priv, buf);
34301+ DRM_ERROR("Xpsb resume fail\n");
34302+ return -EBUSY;
34303+ }
34304+ return ret;
34305+}
34306+
34307+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
34308+{
34309+}
34310+
34311+int psb_xhw_init(struct drm_device *dev)
34312+{
34313+ struct drm_psb_private *dev_priv =
34314+ (struct drm_psb_private *) dev->dev_private;
34315+ unsigned long irq_flags;
34316+
34317+ INIT_LIST_HEAD(&dev_priv->xhw_in);
34318+ spin_lock_init(&dev_priv->xhw_lock);
34319+ atomic_set(&dev_priv->xhw_client, 0);
34320+ init_waitqueue_head(&dev_priv->xhw_queue);
34321+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
34322+ mutex_init(&dev_priv->xhw_mutex);
34323+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34324+ dev_priv->xhw_on = 0;
34325+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34326+
34327+ return 0;
34328+}
34329+
34330+static int psb_xhw_init_init(struct drm_device *dev,
34331+ struct drm_file *file_priv,
34332+ struct drm_psb_xhw_init_arg *arg)
34333+{
34334+ struct drm_psb_private *dev_priv =
34335+ (struct drm_psb_private *) dev->dev_private;
34336+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
34337+ int ret;
34338+ bool is_iomem;
34339+
34340+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
34341+ unsigned long irq_flags;
34342+
34343+ dev_priv->xhw_bo =
34344+ ttm_buffer_object_lookup(tfile, arg->buffer_handle);
34345+ if (!dev_priv->xhw_bo) {
34346+ ret = -EINVAL;
34347+ goto out_err;
34348+ }
34349+ ret = ttm_bo_kmap(dev_priv->xhw_bo, 0,
34350+ dev_priv->xhw_bo->num_pages,
34351+ &dev_priv->xhw_kmap);
34352+ if (ret) {
34353+ DRM_ERROR("Failed mapping X server "
34354+ "communications buffer.\n");
34355+ goto out_err0;
34356+ }
34357+ dev_priv->xhw =
34358+ ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem);
34359+ if (is_iomem) {
34360+ DRM_ERROR("X server communications buffer"
34361+ "is in device memory.\n");
34362+ ret = -EINVAL;
34363+ goto out_err1;
34364+ }
34365+ dev_priv->xhw_file = file_priv;
34366+
34367+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34368+ dev_priv->xhw_on = 1;
34369+ dev_priv->xhw_submit_ok = 1;
34370+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34371+ return 0;
34372+ } else {
34373+ DRM_ERROR("Xhw is already initialized.\n");
34374+ return -EBUSY;
34375+ }
34376+out_err1:
34377+ dev_priv->xhw = NULL;
34378+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
34379+out_err0:
34380+ ttm_bo_unref(&dev_priv->xhw_bo);
34381+out_err:
34382+ atomic_dec(&dev_priv->xhw_client);
34383+ return ret;
34384+}
34385+
34386+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
34387+{
34388+ struct psb_xhw_buf *cur_buf, *next;
34389+ unsigned long irq_flags;
34390+
34391+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34392+ dev_priv->xhw_submit_ok = 0;
34393+
34394+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
34395+ list_del_init(&cur_buf->head);
34396+ if (cur_buf->copy_back)
34397+ cur_buf->arg.ret = -EINVAL;
34398+ atomic_set(&cur_buf->done, 1);
34399+ }
34400+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34401+ wake_up(&dev_priv->xhw_caller_queue);
34402+}
34403+
34404+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
34405+ struct drm_file *file_priv, int closing)
34406+{
34407+
34408+ if (dev_priv->xhw_file == file_priv &&
34409+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
34410+
34411+ if (closing)
34412+ psb_xhw_queue_empty(dev_priv);
34413+ else {
34414+ struct psb_xhw_buf buf;
34415+ INIT_LIST_HEAD(&buf.head);
34416+
34417+ psb_xhw_terminate(dev_priv, &buf);
34418+ psb_xhw_queue_empty(dev_priv);
34419+ }
34420+
34421+ dev_priv->xhw = NULL;
34422+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
34423+ ttm_bo_unref(&dev_priv->xhw_bo);
34424+ dev_priv->xhw_file = NULL;
34425+ }
34426+}
34427+
34428+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
34429+ struct drm_file *file_priv)
34430+{
34431+ struct drm_psb_xhw_init_arg *arg =
34432+ (struct drm_psb_xhw_init_arg *) data;
34433+ struct drm_psb_private *dev_priv =
34434+ (struct drm_psb_private *) dev->dev_private;
34435+ int ret = 0;
34436+ powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true);
34437+ switch (arg->operation) {
34438+ case PSB_XHW_INIT:
34439+ ret = psb_xhw_init_init(dev, file_priv, arg);
34440+ break;
34441+ case PSB_XHW_TAKEDOWN:
34442+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
34443+ break;
34444+ }
34445+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
34446+ return ret;
34447+}
34448+
34449+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
34450+{
34451+ int empty;
34452+ unsigned long irq_flags;
34453+
34454+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34455+ empty = list_empty(&dev_priv->xhw_in);
34456+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34457+ return empty;
34458+}
34459+
34460+int psb_xhw_handler(struct drm_psb_private *dev_priv)
34461+{
34462+ unsigned long irq_flags;
34463+ struct drm_psb_xhw_arg *xa;
34464+ struct psb_xhw_buf *buf;
34465+
34466+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34467+
34468+ if (!dev_priv->xhw_on) {
34469+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34470+ return -EINVAL;
34471+ }
34472+
34473+ buf = dev_priv->xhw_cur_buf;
34474+ if (buf && buf->copy_back) {
34475+ xa = &buf->arg;
34476+ /*w/a for resume, save this memcpy for perfmance*/
34477+ if (xa->op != PSB_XHW_RESUME)
34478+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
34479+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
34480+ atomic_set(&buf->done, 1);
34481+ wake_up(&dev_priv->xhw_caller_queue);
34482+ } else
34483+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
34484+
34485+ dev_priv->xhw_cur_buf = 0;
34486+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34487+ return 0;
34488+}
34489+
34490+int psb_xhw_ioctl(struct drm_device *dev, void *data,
34491+ struct drm_file *file_priv)
34492+{
34493+ struct drm_psb_private *dev_priv =
34494+ (struct drm_psb_private *) dev->dev_private;
34495+ unsigned long irq_flags;
34496+ struct drm_psb_xhw_arg *xa;
34497+ int ret;
34498+ struct list_head *list;
34499+ struct psb_xhw_buf *buf;
34500+ static int firsttime = 1;
34501+
34502+ if (!dev_priv)
34503+ return -EINVAL;
34504+
34505+ /*tricky fix for sgx HW access from user space when XPSB is load*/
34506+ if(firsttime) {
34507+ firsttime = 0;
34508+ powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND);
34509+ }
34510+
34511+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
34512+ return -ERESTART;
34513+
34514+ if (psb_forced_user_interrupt(dev_priv)) {
34515+ mutex_unlock(&dev_priv->xhw_mutex);
34516+ return -EINVAL;
34517+ }
34518+
34519+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34520+ while (list_empty(&dev_priv->xhw_in)) {
34521+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34522+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
34523+ !psb_xhw_in_empty
34524+ (dev_priv), DRM_HZ);
34525+ if (ret == -ERESTARTSYS || ret == 0) {
34526+ mutex_unlock(&dev_priv->xhw_mutex);
34527+ return -ERESTART;
34528+ }
34529+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
34530+ }
34531+
34532+ list = dev_priv->xhw_in.next;
34533+ list_del_init(list);
34534+
34535+ buf = list_entry(list, struct psb_xhw_buf, head);
34536+ xa = &buf->arg;
34537+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
34538+
34539+ if (unlikely(buf->copy_back))
34540+ dev_priv->xhw_cur_buf = buf;
34541+ else {
34542+ atomic_set(&buf->done, 1);
34543+ dev_priv->xhw_cur_buf = NULL;
34544+ }
34545+
34546+ if (xa->op == PSB_XHW_TERMINATE) {
34547+ dev_priv->xhw_on = 0;
34548+ wake_up(&dev_priv->xhw_caller_queue);
34549+ }
34550+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
34551+
34552+ mutex_unlock(&dev_priv->xhw_mutex);
34553+
34554+ return 0;
34555+}
34556diff --git a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
34557new file mode 100644
34558index 0000000..28fbe3b
34559--- /dev/null
34560+++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
34561@@ -0,0 +1,149 @@
34562+/**************************************************************************
34563+ *
34564+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34565+ * All Rights Reserved.
34566+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34567+ * All Rights Reserved.
34568+ *
34569+ * Permission is hereby granted, free of charge, to any person obtaining a
34570+ * copy of this software and associated documentation files (the
34571+ * "Software"), to deal in the Software without restriction, including
34572+ * without limitation the rights to use, copy, modify, merge, publish,
34573+ * distribute, sub license, and/or sell copies of the Software, and to
34574+ * permit persons to whom the Software is furnished to do so, subject to
34575+ * the following conditions:
34576+ *
34577+ * The above copyright notice and this permission notice (including the
34578+ * next paragraph) shall be included in all copies or substantial portions
34579+ * of the Software.
34580+ *
34581+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34582+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34583+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34584+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34585+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34586+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34587+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34588+ *
34589+ **************************************************************************/
34590+/*
34591+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34592+ * Keith Packard.
34593+ */
34594+
34595+#include "ttm/ttm_bo_driver.h"
34596+#ifdef TTM_HAS_AGP
34597+#include "ttm/ttm_placement_common.h"
34598+#include <linux/agp_backend.h>
34599+#include <asm/agp.h>
34600+#include <asm/io.h>
34601+
34602+struct ttm_agp_backend {
34603+ struct ttm_backend backend;
34604+ struct agp_memory *mem;
34605+ struct agp_bridge_data *bridge;
34606+};
34607+
34608+static int ttm_agp_populate(struct ttm_backend *backend,
34609+ unsigned long num_pages, struct page **pages,
34610+ struct page *dummy_read_page)
34611+{
34612+ struct ttm_agp_backend *agp_be =
34613+ container_of(backend, struct ttm_agp_backend, backend);
34614+ struct page **cur_page, **last_page = pages + num_pages;
34615+ struct agp_memory *mem;
34616+
34617+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
34618+ if (unlikely(mem == NULL))
34619+ return -ENOMEM;
34620+
34621+ mem->page_count = 0;
34622+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
34623+ struct page *page = *cur_page;
34624+ if (!page) {
34625+ page = dummy_read_page;
34626+ }
34627+ mem->memory[mem->page_count++] =
34628+ phys_to_gart(page_to_phys(page));
34629+ }
34630+ agp_be->mem = mem;
34631+ return 0;
34632+}
34633+
34634+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
34635+{
34636+ struct ttm_agp_backend *agp_be =
34637+ container_of(backend, struct ttm_agp_backend, backend);
34638+ struct agp_memory *mem = agp_be->mem;
34639+ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
34640+ int ret;
34641+
34642+ mem->is_flushed = 1;
34643+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
34644+
34645+ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
34646+ if (ret)
34647+ printk(KERN_ERR "AGP Bind memory failed.\n");
34648+
34649+ return ret;
34650+}
34651+
34652+static int ttm_agp_unbind(struct ttm_backend *backend)
34653+{
34654+ struct ttm_agp_backend *agp_be =
34655+ container_of(backend, struct ttm_agp_backend, backend);
34656+
34657+ if (agp_be->mem->is_bound)
34658+ return agp_unbind_memory(agp_be->mem);
34659+ else
34660+ return 0;
34661+}
34662+
34663+static void ttm_agp_clear(struct ttm_backend *backend)
34664+{
34665+ struct ttm_agp_backend *agp_be =
34666+ container_of(backend, struct ttm_agp_backend, backend);
34667+ struct agp_memory *mem = agp_be->mem;
34668+
34669+ if (mem) {
34670+ ttm_agp_unbind(backend);
34671+ agp_free_memory(mem);
34672+ }
34673+ agp_be->mem = NULL;
34674+}
34675+
34676+static void ttm_agp_destroy(struct ttm_backend *backend)
34677+{
34678+ struct ttm_agp_backend *agp_be =
34679+ container_of(backend, struct ttm_agp_backend, backend);
34680+
34681+ if (agp_be->mem)
34682+ ttm_agp_clear(backend);
34683+ kfree(agp_be);
34684+}
34685+
34686+static struct ttm_backend_func ttm_agp_func = {
34687+ .populate = ttm_agp_populate,
34688+ .clear = ttm_agp_clear,
34689+ .bind = ttm_agp_bind,
34690+ .unbind = ttm_agp_unbind,
34691+ .destroy = ttm_agp_destroy,
34692+};
34693+
34694+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
34695+ struct agp_bridge_data *bridge)
34696+{
34697+ struct ttm_agp_backend *agp_be;
34698+
34699+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
34700+ if (!agp_be)
34701+ return NULL;
34702+
34703+ agp_be->mem = NULL;
34704+ agp_be->bridge = bridge;
34705+ agp_be->backend.func = &ttm_agp_func;
34706+ agp_be->backend.bdev = bdev;
34707+ return &agp_be->backend;
34708+}
34709+
34710+#endif
34711diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c
34712new file mode 100644
34713index 0000000..7cdbd45
34714--- /dev/null
34715+++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c
34716@@ -0,0 +1,1716 @@
34717+/**************************************************************************
34718+ *
34719+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34720+ * All Rights Reserved.
34721+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34722+ * All Rights Reserved.
34723+ *
34724+ * Permission is hereby granted, free of charge, to any person obtaining a
34725+ * copy of this software and associated documentation files (the
34726+ * "Software"), to deal in the Software without restriction, including
34727+ * without limitation the rights to use, copy, modify, merge, publish,
34728+ * distribute, sub license, and/or sell copies of the Software, and to
34729+ * permit persons to whom the Software is furnished to do so, subject to
34730+ * the following conditions:
34731+ *
34732+ * The above copyright notice and this permission notice (including the
34733+ * next paragraph) shall be included in all copies or substantial portions
34734+ * of the Software.
34735+ *
34736+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34737+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34738+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34739+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34740+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34741+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34742+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34743+ *
34744+ **************************************************************************/
34745+/*
34746+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34747+ */
34748+
34749+#include "ttm/ttm_bo_driver.h"
34750+#include "ttm/ttm_placement_common.h"
34751+#include <linux/jiffies.h>
34752+#include <linux/slab.h>
34753+#include <linux/sched.h>
34754+#include <linux/mm.h>
34755+#include <linux/file.h>
34756+
34757+#define TTM_ASSERT_LOCKED(param)
34758+#define TTM_DEBUG(fmt, arg...)
34759+#define TTM_BO_HASH_ORDER 13
34760+
34761+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
34762+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
34763+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
34764+
34765+static inline uint32_t ttm_bo_type_flags(unsigned type)
34766+{
34767+ return (1 << (type));
34768+}
34769+
34770+static void ttm_bo_release_list(struct kref *list_kref)
34771+{
34772+ struct ttm_buffer_object *bo =
34773+ container_of(list_kref, struct ttm_buffer_object, list_kref);
34774+ struct ttm_bo_device *bdev = bo->bdev;
34775+
34776+ BUG_ON(atomic_read(&bo->list_kref.refcount));
34777+ BUG_ON(atomic_read(&bo->kref.refcount));
34778+ BUG_ON(atomic_read(&bo->cpu_writers));
34779+ BUG_ON(bo->sync_obj != NULL);
34780+ BUG_ON(bo->mem.mm_node != NULL);
34781+ BUG_ON(!list_empty(&bo->lru));
34782+ BUG_ON(!list_empty(&bo->ddestroy));
34783+
34784+ if (bo->ttm)
34785+ ttm_tt_destroy(bo->ttm);
34786+ if (bo->destroy)
34787+ bo->destroy(bo);
34788+ else {
34789+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
34790+ kfree(bo);
34791+ }
34792+}
34793+
34794+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
34795+{
34796+
34797+ if (interruptible) {
34798+ int ret = 0;
34799+
34800+ ret = wait_event_interruptible(bo->event_queue,
34801+ atomic_read(&bo->reserved) == 0);
34802+ if (unlikely(ret != 0))
34803+ return -ERESTART;
34804+ } else {
34805+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
34806+ }
34807+ return 0;
34808+}
34809+
34810+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
34811+{
34812+ struct ttm_bo_device *bdev = bo->bdev;
34813+ struct ttm_mem_type_manager *man;
34814+
34815+ BUG_ON(!atomic_read(&bo->reserved));
34816+
34817+ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
34818+
34819+ BUG_ON(!list_empty(&bo->lru));
34820+
34821+ man = &bdev->man[bo->mem.mem_type];
34822+ list_add_tail(&bo->lru, &man->lru);
34823+ kref_get(&bo->list_kref);
34824+
34825+ if (bo->ttm != NULL) {
34826+ list_add_tail(&bo->swap, &bdev->swap_lru);
34827+ kref_get(&bo->list_kref);
34828+ }
34829+ }
34830+}
34831+
34832+/*
34833+ * Call with bdev->lru_lock and bdev->global->swap_lock held..
34834+ */
34835+
34836+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
34837+{
34838+ int put_count = 0;
34839+
34840+ if (!list_empty(&bo->swap)) {
34841+ list_del_init(&bo->swap);
34842+ ++put_count;
34843+ }
34844+ if (!list_empty(&bo->lru)) {
34845+ list_del_init(&bo->lru);
34846+ ++put_count;
34847+ }
34848+
34849+ /*
34850+ * TODO: Add a driver hook to delete from
34851+ * driver-specific LRU's here.
34852+ */
34853+
34854+ return put_count;
34855+}
34856+
34857+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
34858+ bool interruptible,
34859+ bool no_wait, bool use_sequence, uint32_t sequence)
34860+{
34861+ struct ttm_bo_device *bdev = bo->bdev;
34862+ int ret;
34863+
34864+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
34865+ if (use_sequence && bo->seq_valid &&
34866+ (sequence - bo->val_seq < (1 << 31))) {
34867+ return -EAGAIN;
34868+ }
34869+
34870+ if (no_wait)
34871+ return -EBUSY;
34872+
34873+ spin_unlock(&bdev->lru_lock);
34874+ ret = ttm_bo_wait_unreserved(bo, interruptible);
34875+ spin_lock(&bdev->lru_lock);
34876+
34877+ if (unlikely(ret))
34878+ return ret;
34879+ }
34880+
34881+ if (use_sequence) {
34882+ bo->val_seq = sequence;
34883+ bo->seq_valid = true;
34884+ } else {
34885+ bo->seq_valid = false;
34886+ }
34887+
34888+ return 0;
34889+}
34890+
34891+static void ttm_bo_ref_bug(struct kref *list_kref)
34892+{
34893+ BUG();
34894+}
34895+
34896+int ttm_bo_reserve(struct ttm_buffer_object *bo,
34897+ bool interruptible,
34898+ bool no_wait, bool use_sequence, uint32_t sequence)
34899+{
34900+ struct ttm_bo_device *bdev = bo->bdev;
34901+ int put_count = 0;
34902+ int ret;
34903+
34904+ spin_lock(&bdev->lru_lock);
34905+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
34906+ sequence);
34907+ if (likely(ret == 0))
34908+ put_count = ttm_bo_del_from_lru(bo);
34909+ spin_unlock(&bdev->lru_lock);
34910+
34911+ while (put_count--)
34912+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
34913+
34914+ return ret;
34915+}
34916+
34917+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
34918+{
34919+ struct ttm_bo_device *bdev = bo->bdev;
34920+
34921+ spin_lock(&bdev->lru_lock);
34922+ ttm_bo_add_to_lru(bo);
34923+ atomic_set(&bo->reserved, 0);
34924+ wake_up_all(&bo->event_queue);
34925+ spin_unlock(&bdev->lru_lock);
34926+}
34927+
34928+/*
34929+ * Call bo->mutex locked.
34930+ */
34931+
34932+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
34933+{
34934+ struct ttm_bo_device *bdev = bo->bdev;
34935+ int ret = 0;
34936+ uint32_t page_flags = 0;
34937+
34938+ TTM_ASSERT_LOCKED(&bo->mutex);
34939+ bo->ttm = NULL;
34940+
34941+ switch (bo->type) {
34942+ case ttm_bo_type_device:
34943+ case ttm_bo_type_kernel:
34944+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
34945+ page_flags, bdev->dummy_read_page);
34946+ if (unlikely(bo->ttm == NULL))
34947+ ret = -ENOMEM;
34948+ break;
34949+ case ttm_bo_type_user:
34950+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
34951+ page_flags | TTM_PAGE_FLAG_USER,
34952+ bdev->dummy_read_page);
34953+ if (unlikely(bo->ttm == NULL))
34954+ ret = -ENOMEM;
34955+ break;
34956+
34957+ ret = ttm_tt_set_user(bo->ttm, current,
34958+ bo->buffer_start, bo->num_pages);
34959+ if (unlikely(ret != 0))
34960+ ttm_tt_destroy(bo->ttm);
34961+ break;
34962+ default:
34963+ printk(KERN_ERR "Illegal buffer object type\n");
34964+ ret = -EINVAL;
34965+ break;
34966+ }
34967+
34968+ return ret;
34969+}
34970+
34971+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
34972+ struct ttm_mem_reg *mem,
34973+ bool evict, bool interruptible, bool no_wait)
34974+{
34975+ struct ttm_bo_device *bdev = bo->bdev;
34976+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
34977+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
34978+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
34979+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
34980+ int ret = 0;
34981+
34982+ if (old_is_pci || new_is_pci ||
34983+ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
34984+ ttm_bo_unmap_virtual(bo);
34985+
34986+ /*
34987+ * Create and bind a ttm if required.
34988+ */
34989+
34990+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
34991+ ret = ttm_bo_add_ttm(bo);
34992+ if (ret)
34993+ goto out_err;
34994+
34995+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
34996+ if (ret)
34997+ return ret;
34998+
34999+ if (mem->mem_type != TTM_PL_SYSTEM) {
35000+ ret = ttm_tt_bind(bo->ttm, mem);
35001+ if (ret)
35002+ goto out_err;
35003+ }
35004+
35005+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
35006+
35007+ struct ttm_mem_reg *old_mem = &bo->mem;
35008+ uint32_t save_flags = old_mem->flags;
35009+ uint32_t save_proposed_flags = old_mem->proposed_flags;
35010+
35011+ *old_mem = *mem;
35012+ mem->mm_node = NULL;
35013+ old_mem->proposed_flags = save_proposed_flags;
35014+ ttm_flag_masked(&save_flags, mem->flags,
35015+ TTM_PL_MASK_MEMTYPE);
35016+ goto moved;
35017+ }
35018+
35019+ }
35020+
35021+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
35022+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
35023+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
35024+ else if (bdev->driver->move)
35025+ ret = bdev->driver->move(bo, evict, interruptible,
35026+ no_wait, mem);
35027+ else
35028+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
35029+
35030+ if (ret)
35031+ goto out_err;
35032+
35033+ moved:
35034+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
35035+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
35036+ if (ret)
35037+ printk(KERN_ERR "Can not flush read caches\n");
35038+ }
35039+
35040+ ttm_flag_masked(&bo->priv_flags,
35041+ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
35042+ TTM_BO_PRIV_FLAG_EVICTED);
35043+
35044+ if (bo->mem.mm_node)
35045+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
35046+ bdev->man[bo->mem.mem_type].gpu_offset;
35047+
35048+ return 0;
35049+
35050+ out_err:
35051+ new_man = &bdev->man[bo->mem.mem_type];
35052+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
35053+ ttm_tt_unbind(bo->ttm);
35054+ ttm_tt_destroy(bo->ttm);
35055+ bo->ttm = NULL;
35056+ }
35057+
35058+ return ret;
35059+}
35060+
35061+static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
35062+ bool allow_errors)
35063+{
35064+ struct ttm_bo_device *bdev = bo->bdev;
35065+ struct ttm_bo_driver *driver = bdev->driver;
35066+
35067+ if (bo->sync_obj) {
35068+ if (bdev->nice_mode) {
35069+ unsigned long _end = jiffies + 3 * HZ;
35070+ int ret;
35071+ do {
35072+ ret = ttm_bo_wait(bo, false, false, false);
35073+ if (ret && allow_errors)
35074+ return ret;
35075+
35076+ } while (ret && !time_after_eq(jiffies, _end));
35077+
35078+ if (bo->sync_obj) {
35079+ bdev->nice_mode = false;
35080+ printk(KERN_ERR "Detected probable GPU lockup. "
35081+ "Evicting buffer.\n");
35082+ }
35083+ }
35084+ if (bo->sync_obj) {
35085+ driver->sync_obj_unref(&bo->sync_obj);
35086+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35087+ }
35088+ }
35089+ return 0;
35090+}
35091+
35092+/**
35093+ * If bo idle, remove from delayed- and lru lists, and unref.
35094+ * If not idle, and already on delayed list, do nothing.
35095+ * If not idle, and not on delayed list, put on delayed list,
35096+ * up the list_kref and schedule a delayed list check.
35097+ */
35098+
35099+static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
35100+{
35101+ struct ttm_bo_device *bdev = bo->bdev;
35102+ struct ttm_bo_driver *driver = bdev->driver;
35103+
35104+ mutex_lock(&bo->mutex);
35105+
35106+ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
35107+ bo->sync_obj_arg)) {
35108+ driver->sync_obj_unref(&bo->sync_obj);
35109+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35110+ }
35111+
35112+ if (bo->sync_obj && remove_all)
35113+ (void)ttm_bo_expire_sync_obj(bo, false);
35114+
35115+ if (!bo->sync_obj) {
35116+ int put_count;
35117+
35118+ if (bo->ttm)
35119+ ttm_tt_unbind(bo->ttm);
35120+ spin_lock(&bdev->lru_lock);
35121+ if (!list_empty(&bo->ddestroy)) {
35122+ list_del_init(&bo->ddestroy);
35123+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
35124+ }
35125+ if (bo->mem.mm_node) {
35126+ drm_mm_put_block(bo->mem.mm_node);
35127+ bo->mem.mm_node = NULL;
35128+ }
35129+ put_count = ttm_bo_del_from_lru(bo);
35130+ spin_unlock(&bdev->lru_lock);
35131+ mutex_unlock(&bo->mutex);
35132+ while (put_count--)
35133+ kref_put(&bo->list_kref, ttm_bo_release_list);
35134+
35135+ return;
35136+ }
35137+
35138+ spin_lock(&bdev->lru_lock);
35139+ if (list_empty(&bo->ddestroy)) {
35140+ spin_unlock(&bdev->lru_lock);
35141+ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
35142+ spin_lock(&bdev->lru_lock);
35143+ if (list_empty(&bo->ddestroy)) {
35144+ kref_get(&bo->list_kref);
35145+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
35146+ }
35147+ spin_unlock(&bdev->lru_lock);
35148+ schedule_delayed_work(&bdev->wq,
35149+ ((HZ / 100) < 1) ? 1 : HZ / 100);
35150+ } else
35151+ spin_unlock(&bdev->lru_lock);
35152+
35153+ mutex_unlock(&bo->mutex);
35154+ return;
35155+}
35156+
35157+/**
35158+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
35159+ * encountered buffers.
35160+ */
35161+
35162+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
35163+{
35164+ struct ttm_buffer_object *entry, *nentry;
35165+ struct list_head *list, *next;
35166+ int ret;
35167+
35168+ spin_lock(&bdev->lru_lock);
35169+ list_for_each_safe(list, next, &bdev->ddestroy) {
35170+ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
35171+ nentry = NULL;
35172+
35173+ /*
35174+ * Protect the next list entry from destruction while we
35175+ * unlock the lru_lock.
35176+ */
35177+
35178+ if (next != &bdev->ddestroy) {
35179+ nentry = list_entry(next, struct ttm_buffer_object,
35180+ ddestroy);
35181+ kref_get(&nentry->list_kref);
35182+ }
35183+ kref_get(&entry->list_kref);
35184+
35185+ spin_unlock(&bdev->lru_lock);
35186+ ttm_bo_cleanup_refs(entry, remove_all);
35187+ kref_put(&entry->list_kref, ttm_bo_release_list);
35188+ spin_lock(&bdev->lru_lock);
35189+
35190+ if (nentry) {
35191+ bool next_onlist = !list_empty(next);
35192+ kref_put(&nentry->list_kref, ttm_bo_release_list);
35193+
35194+ /*
35195+ * Someone might have raced us and removed the
35196+ * next entry from the list. We don't bother restarting
35197+ * list traversal.
35198+ */
35199+
35200+ if (!next_onlist)
35201+ break;
35202+ }
35203+ }
35204+ ret = !list_empty(&bdev->ddestroy);
35205+ spin_unlock(&bdev->lru_lock);
35206+
35207+ return ret;
35208+}
35209+
35210+static void ttm_bo_delayed_workqueue(struct work_struct *work)
35211+{
35212+ struct ttm_bo_device *bdev =
35213+ container_of(work, struct ttm_bo_device, wq.work);
35214+
35215+ if (ttm_bo_delayed_delete(bdev, false)) {
35216+ schedule_delayed_work(&bdev->wq,
35217+ ((HZ / 100) < 1) ? 1 : HZ / 100);
35218+ }
35219+}
35220+
35221+static void ttm_bo_release(struct kref *kref)
35222+{
35223+ struct ttm_buffer_object *bo =
35224+ container_of(kref, struct ttm_buffer_object, kref);
35225+ struct ttm_bo_device *bdev = bo->bdev;
35226+
35227+ if (likely(bo->vm_node != NULL)) {
35228+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
35229+ drm_mm_put_block(bo->vm_node);
35230+ }
35231+ write_unlock(&bdev->vm_lock);
35232+ ttm_bo_cleanup_refs(bo, false);
35233+ kref_put(&bo->list_kref, ttm_bo_release_list);
35234+ write_lock(&bdev->vm_lock);
35235+}
35236+
35237+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
35238+{
35239+ struct ttm_buffer_object *bo = *p_bo;
35240+ struct ttm_bo_device *bdev = bo->bdev;
35241+
35242+ *p_bo = NULL;
35243+ write_lock(&bdev->vm_lock);
35244+ kref_put(&bo->kref, ttm_bo_release);
35245+ write_unlock(&bdev->vm_lock);
35246+}
35247+
35248+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
35249+ bool interruptible, bool no_wait)
35250+{
35251+ int ret = 0;
35252+ struct ttm_bo_device *bdev = bo->bdev;
35253+ struct ttm_mem_reg evict_mem;
35254+
35255+ if (bo->mem.mem_type != mem_type)
35256+ goto out;
35257+
35258+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
35259+ if (ret && ret != -ERESTART) {
35260+ printk(KERN_ERR "Failed to expire sync object before "
35261+ "buffer eviction.\n");
35262+ goto out;
35263+ }
35264+
35265+ BUG_ON(!atomic_read(&bo->reserved));
35266+
35267+ evict_mem = bo->mem;
35268+ evict_mem.mm_node = NULL;
35269+
35270+ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
35271+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
35272+
35273+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
35274+ if (unlikely(ret != 0 && ret != -ERESTART)) {
35275+ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
35276+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
35277+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
35278+ }
35279+
35280+ if (ret) {
35281+ if (ret != -ERESTART)
35282+ printk(KERN_ERR "Failed to find memory space for "
35283+ "buffer 0x%p eviction.\n", bo);
35284+ goto out;
35285+ }
35286+
35287+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait);
35288+ if (ret) {
35289+ if (ret != -ERESTART)
35290+ printk(KERN_ERR "Buffer eviction failed\n");
35291+ goto out;
35292+ }
35293+
35294+ spin_lock(&bdev->lru_lock);
35295+ if (evict_mem.mm_node) {
35296+ drm_mm_put_block(evict_mem.mm_node);
35297+ evict_mem.mm_node = NULL;
35298+ }
35299+ spin_unlock(&bdev->lru_lock);
35300+
35301+ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
35302+ TTM_BO_PRIV_FLAG_EVICTED);
35303+
35304+ out:
35305+ return ret;
35306+}
35307+
35308+/**
35309+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
35310+ * space, or we've evicted everything and there isn't enough space.
35311+ */
35312+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
35313+ struct ttm_mem_reg *mem,
35314+ uint32_t mem_type,
35315+ bool interruptible, bool no_wait)
35316+{
35317+ struct drm_mm_node *node;
35318+ struct ttm_buffer_object *entry;
35319+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
35320+ struct list_head *lru;
35321+ unsigned long num_pages = mem->num_pages;
35322+ int put_count = 0;
35323+ int ret;
35324+
35325+ retry_pre_get:
35326+ ret = drm_mm_pre_get(&man->manager);
35327+ if (unlikely(ret != 0))
35328+ return ret;
35329+
35330+ spin_lock(&bdev->lru_lock);
35331+ do {
35332+ node = drm_mm_search_free(&man->manager, num_pages,
35333+ mem->page_alignment, 1);
35334+ if (node)
35335+ break;
35336+
35337+ lru = &man->lru;
35338+ if (list_empty(lru))
35339+ break;
35340+
35341+ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
35342+ kref_get(&entry->list_kref);
35343+
35344+ ret =
35345+ ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0);
35346+
35347+ if (likely(ret == 0))
35348+ put_count = ttm_bo_del_from_lru(entry);
35349+
35350+ spin_unlock(&bdev->lru_lock);
35351+
35352+ if (unlikely(ret != 0))
35353+ return ret;
35354+
35355+ while (put_count--)
35356+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
35357+
35358+ mutex_lock(&entry->mutex);
35359+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
35360+ mutex_unlock(&entry->mutex);
35361+
35362+ ttm_bo_unreserve(entry);
35363+
35364+ kref_put(&entry->list_kref, ttm_bo_release_list);
35365+ if (ret)
35366+ return ret;
35367+
35368+ spin_lock(&bdev->lru_lock);
35369+ } while (1);
35370+
35371+ if (!node) {
35372+ spin_unlock(&bdev->lru_lock);
35373+ return -ENOMEM;
35374+ }
35375+
35376+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
35377+ if (unlikely(!node)) {
35378+ spin_unlock(&bdev->lru_lock);
35379+ goto retry_pre_get;
35380+ }
35381+
35382+ spin_unlock(&bdev->lru_lock);
35383+ mem->mm_node = node;
35384+ mem->mem_type = mem_type;
35385+ return 0;
35386+}
35387+
35388+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
35389+ bool disallow_fixed,
35390+ uint32_t mem_type,
35391+ uint32_t mask, uint32_t * res_mask)
35392+{
35393+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
35394+
35395+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
35396+ return false;
35397+
35398+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
35399+ return false;
35400+
35401+ if ((mask & man->available_caching) == 0)
35402+ return false;
35403+ if (mask & man->default_caching)
35404+ cur_flags |= man->default_caching;
35405+ else if (mask & TTM_PL_FLAG_CACHED)
35406+ cur_flags |= TTM_PL_FLAG_CACHED;
35407+ else if (mask & TTM_PL_FLAG_WC)
35408+ cur_flags |= TTM_PL_FLAG_WC;
35409+ else
35410+ cur_flags |= TTM_PL_FLAG_UNCACHED;
35411+
35412+ *res_mask = cur_flags;
35413+ return true;
35414+}
35415+
35416+/**
35417+ * Creates space for memory region @mem according to its type.
35418+ *
35419+ * This function first searches for free space in compatible memory types in
35420+ * the priority order defined by the driver. If free space isn't found, then
35421+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
35422+ * space.
35423+ */
35424+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
35425+ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
35426+{
35427+ struct ttm_bo_device *bdev = bo->bdev;
35428+ struct ttm_mem_type_manager *man;
35429+
35430+ uint32_t num_prios = bdev->driver->num_mem_type_prio;
35431+ const uint32_t *prios = bdev->driver->mem_type_prio;
35432+ uint32_t i;
35433+ uint32_t mem_type = TTM_PL_SYSTEM;
35434+ uint32_t cur_flags = 0;
35435+ bool type_found = false;
35436+ bool type_ok = false;
35437+ bool has_eagain = false;
35438+ struct drm_mm_node *node = NULL;
35439+ int ret;
35440+
35441+ mem->mm_node = NULL;
35442+ for (i = 0; i < num_prios; ++i) {
35443+ mem_type = prios[i];
35444+ man = &bdev->man[mem_type];
35445+
35446+ type_ok = ttm_bo_mt_compatible(man,
35447+ bo->type == ttm_bo_type_user,
35448+ mem_type, mem->proposed_flags,
35449+ &cur_flags);
35450+
35451+ if (!type_ok)
35452+ continue;
35453+
35454+ if (mem_type == TTM_PL_SYSTEM)
35455+ break;
35456+
35457+ if (man->has_type && man->use_type) {
35458+ type_found = true;
35459+ do {
35460+ ret = drm_mm_pre_get(&man->manager);
35461+ if (unlikely(ret))
35462+ return ret;
35463+
35464+ spin_lock(&bdev->lru_lock);
35465+ node = drm_mm_search_free(&man->manager,
35466+ mem->num_pages,
35467+ mem->page_alignment,
35468+ 1);
35469+ if (unlikely(!node)) {
35470+ spin_unlock(&bdev->lru_lock);
35471+ break;
35472+ }
35473+ node = drm_mm_get_block_atomic(node,
35474+ mem->num_pages,
35475+ mem->
35476+ page_alignment);
35477+ spin_unlock(&bdev->lru_lock);
35478+ } while (!node);
35479+ }
35480+ if (node)
35481+ break;
35482+ }
35483+
35484+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
35485+ mem->mm_node = node;
35486+ mem->mem_type = mem_type;
35487+ mem->flags = cur_flags;
35488+ return 0;
35489+ }
35490+
35491+ if (!type_found)
35492+ return -EINVAL;
35493+
35494+ num_prios = bdev->driver->num_mem_busy_prio;
35495+ prios = bdev->driver->mem_busy_prio;
35496+
35497+ for (i = 0; i < num_prios; ++i) {
35498+ mem_type = prios[i];
35499+ man = &bdev->man[mem_type];
35500+
35501+ if (!man->has_type)
35502+ continue;
35503+
35504+ if (!ttm_bo_mt_compatible(man,
35505+ bo->type == ttm_bo_type_user,
35506+ mem_type,
35507+ mem->proposed_flags, &cur_flags))
35508+ continue;
35509+
35510+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
35511+ interruptible, no_wait);
35512+
35513+ if (ret == 0 && mem->mm_node) {
35514+ mem->flags = cur_flags;
35515+ return 0;
35516+ }
35517+
35518+ if (ret == -ERESTART)
35519+ has_eagain = true;
35520+ }
35521+
35522+ ret = (has_eagain) ? -ERESTART : -ENOMEM;
35523+ return ret;
35524+}
35525+
35526+/*
35527+ * Call bo->mutex locked.
35528+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
35529+ */
35530+
35531+static int ttm_bo_busy(struct ttm_buffer_object *bo)
35532+{
35533+ void *sync_obj = bo->sync_obj;
35534+ struct ttm_bo_driver *driver = bo->bdev->driver;
35535+
35536+ if (sync_obj) {
35537+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
35538+ driver->sync_obj_unref(&bo->sync_obj);
35539+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35540+ return 0;
35541+ }
35542+ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
35543+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
35544+ driver->sync_obj_unref(&bo->sync_obj);
35545+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
35546+ return 0;
35547+ }
35548+ return 1;
35549+ }
35550+ return 0;
35551+}
35552+
35553+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
35554+{
35555+ int ret = 0;
35556+
35557+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
35558+ return -EBUSY;
35559+
35560+ ret = wait_event_interruptible(bo->event_queue,
35561+ atomic_read(&bo->cpu_writers) == 0);
35562+
35563+ if (ret == -ERESTARTSYS)
35564+ ret = -ERESTART;
35565+
35566+ return ret;
35567+}
35568+
35569+/*
35570+ * bo->mutex locked.
35571+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
35572+ */
35573+
35574+int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
35575+ bool interruptible, bool no_wait)
35576+{
35577+ struct ttm_bo_device *bdev = bo->bdev;
35578+ int ret = 0;
35579+ struct ttm_mem_reg mem;
35580+
35581+ BUG_ON(!atomic_read(&bo->reserved));
35582+
35583+ /*
35584+ * FIXME: It's possible to pipeline buffer moves.
35585+ * Have the driver move function wait for idle when necessary,
35586+ * instead of doing it here.
35587+ */
35588+
35589+ ttm_bo_busy(bo);
35590+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
35591+ if (ret)
35592+ return ret;
35593+
35594+ mem.num_pages = bo->num_pages;
35595+ mem.size = mem.num_pages << PAGE_SHIFT;
35596+ mem.proposed_flags = new_mem_flags;
35597+ mem.page_alignment = bo->mem.page_alignment;
35598+
35599+ /*
35600+ * Determine where to move the buffer.
35601+ */
35602+
35603+ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
35604+ if (ret)
35605+ goto out_unlock;
35606+
35607+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
35608+
35609+ out_unlock:
35610+ if (ret && mem.mm_node) {
35611+ spin_lock(&bdev->lru_lock);
35612+ drm_mm_put_block(mem.mm_node);
35613+ spin_unlock(&bdev->lru_lock);
35614+ }
35615+ return ret;
35616+}
35617+
35618+static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
35619+{
35620+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
35621+ return 0;
35622+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
35623+ return 0;
35624+
35625+ return 1;
35626+}
35627+
35628+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
35629+ bool interruptible, bool no_wait)
35630+{
35631+ int ret;
35632+
35633+ BUG_ON(!atomic_read(&bo->reserved));
35634+ bo->mem.proposed_flags = bo->proposed_flags;
35635+
35636+ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
35637+ (unsigned long)bo->mem.proposed_flags,
35638+ (unsigned long)bo->mem.flags);
35639+
35640+ /*
35641+ * Check whether we need to move buffer.
35642+ */
35643+
35644+ if (!ttm_bo_mem_compat(&bo->mem)) {
35645+ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
35646+ interruptible, no_wait);
35647+ if (ret) {
35648+ if (ret != -ERESTART)
35649+ printk(KERN_ERR "Failed moving buffer. "
35650+ "Proposed placement 0x%08x\n",
35651+ bo->mem.proposed_flags);
35652+ if (ret == -ENOMEM)
35653+ printk(KERN_ERR "Out of aperture space or "
35654+ "DRM memory quota.\n");
35655+ return ret;
35656+ }
35657+ }
35658+
35659+ /*
35660+ * We might need to add a TTM.
35661+ */
35662+
35663+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
35664+ ret = ttm_bo_add_ttm(bo);
35665+ if (ret)
35666+ return ret;
35667+ }
35668+ /*
35669+ * Validation has succeeded, move the access and other
35670+ * non-mapping-related flag bits from the proposed flags to
35671+ * the active flags
35672+ */
35673+
35674+ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
35675+ ~TTM_PL_MASK_MEMTYPE);
35676+
35677+ return 0;
35678+}
35679+
35680+int
35681+ttm_bo_check_placement(struct ttm_buffer_object *bo,
35682+ uint32_t set_flags, uint32_t clr_flags)
35683+{
35684+ uint32_t new_mask = set_flags | clr_flags;
35685+
35686+ if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) {
35687+ printk(KERN_ERR
35688+ "User buffers require cache-coherent memory.\n");
35689+ return -EINVAL;
35690+ }
35691+
35692+ if (!capable(CAP_SYS_ADMIN)) {
35693+ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
35694+ printk(KERN_ERR "Need to be root to modify"
35695+ " NO_EVICT status.\n");
35696+ return -EINVAL;
35697+ }
35698+
35699+ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
35700+ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
35701+ printk(KERN_ERR "Incompatible memory specification"
35702+ " for NO_EVICT buffer.\n");
35703+ return -EINVAL;
35704+ }
35705+ }
35706+ return 0;
35707+}
35708+
35709+int ttm_buffer_object_init(struct ttm_bo_device *bdev,
35710+ struct ttm_buffer_object *bo,
35711+ unsigned long size,
35712+ enum ttm_bo_type type,
35713+ uint32_t flags,
35714+ uint32_t page_alignment,
35715+ unsigned long buffer_start,
35716+ bool interruptible,
35717+ struct file *persistant_swap_storage,
35718+ size_t acc_size,
35719+ void (*destroy) (struct ttm_buffer_object *))
35720+{
35721+ int ret = 0;
35722+ unsigned long num_pages;
35723+
35724+ size += buffer_start & ~PAGE_MASK;
35725+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
35726+ if (num_pages == 0) {
35727+ printk(KERN_ERR "Illegal buffer object size.\n");
35728+ return -EINVAL;
35729+ }
35730+ bo->destroy = destroy;
35731+
35732+ mutex_init(&bo->mutex);
35733+ mutex_lock(&bo->mutex);
35734+ kref_init(&bo->kref);
35735+ kref_init(&bo->list_kref);
35736+ atomic_set(&bo->cpu_writers, 0);
35737+ atomic_set(&bo->reserved, 1);
35738+ init_waitqueue_head(&bo->event_queue);
35739+ INIT_LIST_HEAD(&bo->lru);
35740+ INIT_LIST_HEAD(&bo->ddestroy);
35741+ INIT_LIST_HEAD(&bo->swap);
35742+ bo->bdev = bdev;
35743+ bo->type = type;
35744+ bo->num_pages = num_pages;
35745+ bo->mem.mem_type = TTM_PL_SYSTEM;
35746+ bo->mem.num_pages = bo->num_pages;
35747+ bo->mem.mm_node = NULL;
35748+ bo->mem.page_alignment = page_alignment;
35749+ bo->buffer_start = buffer_start & PAGE_MASK;
35750+ bo->priv_flags = 0;
35751+ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
35752+ bo->seq_valid = false;
35753+ bo->persistant_swap_storage = persistant_swap_storage;
35754+ bo->acc_size = acc_size;
35755+
35756+ ret = ttm_bo_check_placement(bo, flags, 0ULL);
35757+ if (unlikely(ret != 0))
35758+ goto out_err;
35759+
35760+ /*
35761+ * If no caching attributes are set, accept any form of caching.
35762+ */
35763+
35764+ if ((flags & TTM_PL_MASK_CACHING) == 0)
35765+ flags |= TTM_PL_MASK_CACHING;
35766+
35767+ bo->proposed_flags = flags;
35768+ bo->mem.proposed_flags = flags;
35769+
35770+ /*
35771+ * For ttm_bo_type_device buffers, allocate
35772+ * address space from the device.
35773+ */
35774+
35775+ if (bo->type == ttm_bo_type_device) {
35776+ ret = ttm_bo_setup_vm(bo);
35777+ if (ret)
35778+ goto out_err;
35779+ }
35780+
35781+ ret = ttm_buffer_object_validate(bo, interruptible, false);
35782+ if (ret)
35783+ goto out_err;
35784+
35785+ mutex_unlock(&bo->mutex);
35786+ ttm_bo_unreserve(bo);
35787+ return 0;
35788+
35789+ out_err:
35790+ mutex_unlock(&bo->mutex);
35791+ ttm_bo_unreserve(bo);
35792+ ttm_bo_unref(&bo);
35793+
35794+ return ret;
35795+}
35796+
35797+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
35798+ unsigned long num_pages)
35799+{
35800+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
35801+ PAGE_MASK;
35802+
35803+ return bdev->ttm_bo_size + 2 * page_array_size;
35804+}
35805+
35806+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
35807+ unsigned long size,
35808+ enum ttm_bo_type type,
35809+ uint32_t flags,
35810+ uint32_t page_alignment,
35811+ unsigned long buffer_start,
35812+ bool interruptible,
35813+ struct file *persistant_swap_storage,
35814+ struct ttm_buffer_object **p_bo)
35815+{
35816+ struct ttm_buffer_object *bo;
35817+ int ret;
35818+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
35819+
35820+ size_t acc_size =
35821+ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
35822+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
35823+ if (unlikely(ret != 0))
35824+ return ret;
35825+
35826+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
35827+
35828+ if (unlikely(bo == NULL)) {
35829+ ttm_mem_global_free(mem_glob, acc_size, false);
35830+ return -ENOMEM;
35831+ }
35832+
35833+ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
35834+ page_alignment, buffer_start,
35835+ interruptible,
35836+ persistant_swap_storage, acc_size, NULL);
35837+ if (likely(ret == 0))
35838+ *p_bo = bo;
35839+
35840+ return ret;
35841+}
35842+
35843+static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
35844+ uint32_t mem_type, bool allow_errors)
35845+{
35846+ int ret;
35847+
35848+ mutex_lock(&bo->mutex);
35849+
35850+ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
35851+ if (ret)
35852+ goto out;
35853+
35854+ if (bo->mem.mem_type == mem_type)
35855+ ret = ttm_bo_evict(bo, mem_type, false, false);
35856+
35857+ if (ret) {
35858+ if (allow_errors) {
35859+ goto out;
35860+ } else {
35861+ ret = 0;
35862+ printk(KERN_ERR "Cleanup eviction failed\n");
35863+ }
35864+ }
35865+
35866+ out:
35867+ mutex_unlock(&bo->mutex);
35868+ return ret;
35869+}
35870+
35871+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
35872+ struct list_head *head,
35873+ unsigned mem_type, bool allow_errors)
35874+{
35875+ struct ttm_buffer_object *entry;
35876+ int ret;
35877+ int put_count;
35878+
35879+ /*
35880+ * Can't use standard list traversal since we're unlocking.
35881+ */
35882+
35883+ spin_lock(&bdev->lru_lock);
35884+
35885+ while (!list_empty(head)) {
35886+ entry = list_first_entry(head, struct ttm_buffer_object, lru);
35887+ kref_get(&entry->list_kref);
35888+ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
35889+ put_count = ttm_bo_del_from_lru(entry);
35890+ spin_unlock(&bdev->lru_lock);
35891+ while (put_count--)
35892+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
35893+ BUG_ON(ret);
35894+ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
35895+ ttm_bo_unreserve(entry);
35896+ kref_put(&entry->list_kref, ttm_bo_release_list);
35897+ spin_lock(&bdev->lru_lock);
35898+ }
35899+
35900+ spin_unlock(&bdev->lru_lock);
35901+
35902+ return 0;
35903+}
35904+
35905+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
35906+{
35907+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
35908+ int ret = -EINVAL;
35909+
35910+ if (mem_type >= TTM_NUM_MEM_TYPES) {
35911+ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
35912+ return ret;
35913+ }
35914+
35915+ if (!man->has_type) {
35916+ printk(KERN_ERR "Trying to take down uninitialized "
35917+ "memory manager type %u\n", mem_type);
35918+ return ret;
35919+ }
35920+
35921+ man->use_type = false;
35922+ man->has_type = false;
35923+
35924+ ret = 0;
35925+ if (mem_type > 0) {
35926+ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
35927+
35928+ spin_lock(&bdev->lru_lock);
35929+ if (drm_mm_clean(&man->manager)) {
35930+ drm_mm_takedown(&man->manager);
35931+ } else {
35932+ ret = -EBUSY;
35933+ }
35934+ spin_unlock(&bdev->lru_lock);
35935+ }
35936+
35937+ return ret;
35938+}
35939+
35940+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
35941+{
35942+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
35943+
35944+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
35945+ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
35946+ mem_type);
35947+ return -EINVAL;
35948+ }
35949+
35950+ if (!man->has_type) {
35951+ printk(KERN_ERR "Memory type %u has not been initialized.\n",
35952+ mem_type);
35953+ return 0;
35954+ }
35955+
35956+ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
35957+}
35958+
35959+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
35960+ unsigned long p_offset, unsigned long p_size)
35961+{
35962+ int ret = -EINVAL;
35963+ struct ttm_mem_type_manager *man;
35964+
35965+ if (type >= TTM_NUM_MEM_TYPES) {
35966+ printk(KERN_ERR "Illegal memory type %d\n", type);
35967+ return ret;
35968+ }
35969+
35970+ man = &bdev->man[type];
35971+ if (man->has_type) {
35972+ printk(KERN_ERR
35973+ "Memory manager already initialized for type %d\n",
35974+ type);
35975+ return ret;
35976+ }
35977+
35978+ ret = bdev->driver->init_mem_type(bdev, type, man);
35979+ if (ret)
35980+ return ret;
35981+
35982+ ret = 0;
35983+ if (type != TTM_PL_SYSTEM) {
35984+ if (!p_size) {
35985+ printk(KERN_ERR "Zero size memory manager type %d\n",
35986+ type);
35987+ return ret;
35988+ }
35989+ ret = drm_mm_init(&man->manager, p_offset, p_size);
35990+ if (ret)
35991+ return ret;
35992+ }
35993+ man->has_type = true;
35994+ man->use_type = true;
35995+ man->size = p_size;
35996+
35997+ INIT_LIST_HEAD(&man->lru);
35998+
35999+ return 0;
36000+}
36001+
36002+int ttm_bo_device_release(struct ttm_bo_device *bdev)
36003+{
36004+ int ret = 0;
36005+ unsigned i = TTM_NUM_MEM_TYPES;
36006+ struct ttm_mem_type_manager *man;
36007+
36008+ while (i--) {
36009+ man = &bdev->man[i];
36010+ if (man->has_type) {
36011+ man->use_type = false;
36012+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
36013+ ret = -EBUSY;
36014+ printk(KERN_ERR "DRM memory manager type %d "
36015+ "is not clean.\n", i);
36016+ }
36017+ man->has_type = false;
36018+ }
36019+ }
36020+
36021+ if (!cancel_delayed_work(&bdev->wq))
36022+ flush_scheduled_work();
36023+
36024+ while (ttm_bo_delayed_delete(bdev, true)) ;
36025+
36026+ spin_lock(&bdev->lru_lock);
36027+ if (list_empty(&bdev->ddestroy))
36028+ TTM_DEBUG("Delayed destroy list was clean\n");
36029+
36030+ if (list_empty(&bdev->man[0].lru))
36031+ TTM_DEBUG("Swap list was clean\n");
36032+ spin_unlock(&bdev->lru_lock);
36033+
36034+ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
36035+ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
36036+ write_lock(&bdev->vm_lock);
36037+ drm_mm_takedown(&bdev->addr_space_mm);
36038+ write_unlock(&bdev->vm_lock);
36039+
36040+ __free_page(bdev->dummy_read_page);
36041+ return ret;
36042+}
36043+
36044+/*
36045+ * This function is intended to be called on drm driver load.
36046+ * If you decide to call it from firstopen, you must protect the call
36047+ * from a potentially racing ttm_bo_driver_finish in lastclose.
36048+ * (This may happen on X server restart).
36049+ */
36050+
36051+int ttm_bo_device_init(struct ttm_bo_device *bdev,
36052+ struct ttm_mem_global *mem_glob,
36053+ struct ttm_bo_driver *driver, uint64_t file_page_offset)
36054+{
36055+ int ret = -EINVAL;
36056+
36057+ bdev->dummy_read_page = NULL;
36058+ rwlock_init(&bdev->vm_lock);
36059+ spin_lock_init(&bdev->lru_lock);
36060+
36061+ bdev->driver = driver;
36062+ bdev->mem_glob = mem_glob;
36063+
36064+ memset(bdev->man, 0, sizeof(bdev->man));
36065+
36066+ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
36067+ if (unlikely(bdev->dummy_read_page == NULL)) {
36068+ ret = -ENOMEM;
36069+ goto out_err0;
36070+ }
36071+
36072+ /*
36073+ * Initialize the system memory buffer type.
36074+ * Other types need to be driver / IOCTL initialized.
36075+ */
36076+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
36077+ if (unlikely(ret != 0))
36078+ goto out_err1;
36079+
36080+ bdev->addr_space_rb = RB_ROOT;
36081+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
36082+ if (unlikely(ret != 0))
36083+ goto out_err2;
36084+
36085+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
36086+ bdev->nice_mode = true;
36087+ INIT_LIST_HEAD(&bdev->ddestroy);
36088+ INIT_LIST_HEAD(&bdev->swap_lru);
36089+ bdev->dev_mapping = NULL;
36090+ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
36091+ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
36092+ if (unlikely(ret != 0)) {
36093+ printk(KERN_ERR "Could not register buffer object swapout.\n");
36094+ goto out_err2;
36095+ }
36096+ return 0;
36097+ out_err2:
36098+ ttm_bo_clean_mm(bdev, 0);
36099+ out_err1:
36100+ __free_page(bdev->dummy_read_page);
36101+ out_err0:
36102+ return ret;
36103+}
36104+
36105+/*
36106+ * buffer object vm functions.
36107+ */
36108+
36109+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
36110+{
36111+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
36112+
36113+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
36114+ if (mem->mem_type == TTM_PL_SYSTEM)
36115+ return false;
36116+
36117+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
36118+ return false;
36119+
36120+ if (mem->flags & TTM_PL_FLAG_CACHED)
36121+ return false;
36122+ }
36123+ return true;
36124+}
36125+
36126+int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
36127+ struct ttm_mem_reg *mem,
36128+ unsigned long *bus_base,
36129+ unsigned long *bus_offset, unsigned long *bus_size)
36130+{
36131+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
36132+
36133+ *bus_size = 0;
36134+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
36135+ return -EINVAL;
36136+
36137+ if (ttm_mem_reg_is_pci(bdev, mem)) {
36138+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
36139+ *bus_size = mem->num_pages << PAGE_SHIFT;
36140+ *bus_base = man->io_offset;
36141+ }
36142+
36143+ return 0;
36144+}
36145+
36146+/**
36147+ * \c Kill all user-space virtual mappings of this buffer object.
36148+ *
36149+ * \param bo The buffer object.
36150+ *
36151+ * Call bo->mutex locked.
36152+ */
36153+
36154+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
36155+{
36156+ struct ttm_bo_device *bdev = bo->bdev;
36157+ loff_t offset = (loff_t) bo->addr_space_offset;
36158+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
36159+
36160+ if (!bdev->dev_mapping)
36161+ return;
36162+
36163+ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
36164+}
36165+
36166+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
36167+{
36168+ struct ttm_bo_device *bdev = bo->bdev;
36169+ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
36170+ struct rb_node *parent = NULL;
36171+ struct ttm_buffer_object *cur_bo;
36172+ unsigned long offset = bo->vm_node->start;
36173+ unsigned long cur_offset;
36174+
36175+ while (*cur) {
36176+ parent = *cur;
36177+ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
36178+ cur_offset = cur_bo->vm_node->start;
36179+ if (offset < cur_offset)
36180+ cur = &parent->rb_left;
36181+ else if (offset > cur_offset)
36182+ cur = &parent->rb_right;
36183+ else
36184+ BUG();
36185+ }
36186+
36187+ rb_link_node(&bo->vm_rb, parent, cur);
36188+ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
36189+}
36190+
36191+/**
36192+ * ttm_bo_setup_vm:
36193+ *
36194+ * @bo: the buffer to allocate address space for
36195+ *
36196+ * Allocate address space in the drm device so that applications
36197+ * can mmap the buffer and access the contents. This only
36198+ * applies to ttm_bo_type_device objects as others are not
36199+ * placed in the drm device address space.
36200+ */
36201+
36202+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
36203+{
36204+ struct ttm_bo_device *bdev = bo->bdev;
36205+ int ret;
36206+
36207+ retry_pre_get:
36208+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
36209+ if (unlikely(ret != 0))
36210+ return ret;
36211+
36212+ write_lock(&bdev->vm_lock);
36213+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
36214+ bo->mem.num_pages, 0, 0);
36215+
36216+ if (unlikely(bo->vm_node == NULL)) {
36217+ ret = -ENOMEM;
36218+ goto out_unlock;
36219+ }
36220+
36221+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
36222+ bo->mem.num_pages, 0);
36223+
36224+ if (unlikely(bo->vm_node == NULL)) {
36225+ write_unlock(&bdev->vm_lock);
36226+ goto retry_pre_get;
36227+ }
36228+
36229+ ttm_bo_vm_insert_rb(bo);
36230+ write_unlock(&bdev->vm_lock);
36231+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
36232+
36233+ return 0;
36234+ out_unlock:
36235+ write_unlock(&bdev->vm_lock);
36236+ return ret;
36237+}
36238+
36239+int ttm_bo_wait(struct ttm_buffer_object *bo,
36240+ bool lazy, bool interruptible, bool no_wait)
36241+{
36242+ struct ttm_bo_driver *driver = bo->bdev->driver;
36243+ void *sync_obj;
36244+ void *sync_obj_arg;
36245+ int ret = 0;
36246+
36247+ while (bo->sync_obj) {
36248+ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
36249+ driver->sync_obj_unref(&bo->sync_obj);
36250+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
36251+ goto out;
36252+ }
36253+ if (no_wait) {
36254+ ret = -EBUSY;
36255+ goto out;
36256+ }
36257+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
36258+ sync_obj_arg = bo->sync_obj_arg;
36259+ mutex_unlock(&bo->mutex);
36260+ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
36261+ lazy, interruptible);
36262+
36263+ mutex_lock(&bo->mutex);
36264+ if (unlikely(ret != 0)) {
36265+ driver->sync_obj_unref(&sync_obj);
36266+ return ret;
36267+ }
36268+
36269+ if (bo->sync_obj == sync_obj) {
36270+ driver->sync_obj_unref(&bo->sync_obj);
36271+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
36272+ }
36273+ driver->sync_obj_unref(&sync_obj);
36274+ }
36275+ out:
36276+ return 0;
36277+}
36278+
36279+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
36280+{
36281+ atomic_set(&bo->reserved, 0);
36282+ wake_up_all(&bo->event_queue);
36283+}
36284+
36285+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
36286+ bool no_wait)
36287+{
36288+ int ret;
36289+
36290+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
36291+ if (no_wait)
36292+ return -EBUSY;
36293+ else if (interruptible) {
36294+ ret = wait_event_interruptible
36295+ (bo->event_queue, atomic_read(&bo->reserved) == 0);
36296+ if (unlikely(ret != 0))
36297+ return -ERESTART;
36298+ } else {
36299+ wait_event(bo->event_queue,
36300+ atomic_read(&bo->reserved) == 0);
36301+ }
36302+ }
36303+ return 0;
36304+}
36305+
36306+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
36307+{
36308+ int ret = 0;
36309+
36310+ /*
36311+ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
36312+ * makes sure the lru lists are updated.
36313+ */
36314+
36315+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
36316+ if (unlikely(ret != 0))
36317+ return ret;
36318+ mutex_lock(&bo->mutex);
36319+ ret = ttm_bo_wait(bo, false, true, no_wait);
36320+ if (unlikely(ret != 0))
36321+ goto out_err0;
36322+ atomic_inc(&bo->cpu_writers);
36323+ out_err0:
36324+ mutex_unlock(&bo->mutex);
36325+ ttm_bo_unreserve(bo);
36326+ return ret;
36327+}
36328+
36329+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
36330+{
36331+ if (atomic_dec_and_test(&bo->cpu_writers))
36332+ wake_up_all(&bo->event_queue);
36333+}
36334+
36335+/**
36336+ * A buffer object shrink method that tries to swap out the first
36337+ * buffer object on the bo_global::swap_lru list.
36338+ */
36339+
36340+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
36341+{
36342+ struct ttm_bo_device *bdev =
36343+ container_of(shrink, struct ttm_bo_device, shrink);
36344+ struct ttm_buffer_object *bo;
36345+ int ret = -EBUSY;
36346+ int put_count;
36347+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
36348+
36349+ spin_lock(&bdev->lru_lock);
36350+ while (ret == -EBUSY) {
36351+ if (unlikely(list_empty(&bdev->swap_lru))) {
36352+ spin_unlock(&bdev->lru_lock);
36353+ return -EBUSY;
36354+ }
36355+
36356+ bo = list_first_entry(&bdev->swap_lru,
36357+ struct ttm_buffer_object, swap);
36358+ kref_get(&bo->list_kref);
36359+
36360+ /**
36361+ * Reserve buffer. Since we unlock while sleeping, we need
36362+ * to re-check that nobody removed us from the swap-list while
36363+ * we slept.
36364+ */
36365+
36366+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
36367+ if (unlikely(ret == -EBUSY)) {
36368+ spin_unlock(&bdev->lru_lock);
36369+ ttm_bo_wait_unreserved(bo, false);
36370+ kref_put(&bo->list_kref, ttm_bo_release_list);
36371+ spin_lock(&bdev->lru_lock);
36372+ }
36373+ }
36374+
36375+ BUG_ON(ret != 0);
36376+ put_count = ttm_bo_del_from_lru(bo);
36377+ spin_unlock(&bdev->lru_lock);
36378+
36379+ while (put_count--)
36380+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
36381+
36382+ /**
36383+ * Wait for GPU, then move to system cached.
36384+ */
36385+
36386+ mutex_lock(&bo->mutex);
36387+ ret = ttm_bo_wait(bo, false, false, false);
36388+ if (unlikely(ret != 0))
36389+ goto out;
36390+
36391+ if ((bo->mem.flags & swap_placement) != swap_placement) {
36392+ struct ttm_mem_reg evict_mem;
36393+
36394+ evict_mem = bo->mem;
36395+ evict_mem.mm_node = NULL;
36396+ evict_mem.proposed_flags =
36397+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
36398+ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
36399+ evict_mem.mem_type = TTM_PL_SYSTEM;
36400+
36401+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false);
36402+ if (unlikely(ret != 0))
36403+ goto out;
36404+ }
36405+
36406+ ttm_bo_unmap_virtual(bo);
36407+
36408+ /**
36409+ * Swap out. Buffer will be swapped in again as soon as
36410+ * anyone tries to access a ttm page.
36411+ */
36412+
36413+ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
36414+ out:
36415+ mutex_unlock(&bo->mutex);
36416+
36417+ /**
36418+ *
36419+ * Unreserve without putting on LRU to avoid swapping out an
36420+ * already swapped buffer.
36421+ */
36422+
36423+ atomic_set(&bo->reserved, 0);
36424+ wake_up_all(&bo->event_queue);
36425+ kref_put(&bo->list_kref, ttm_bo_release_list);
36426+ return ret;
36427+}
36428+
36429+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
36430+{
36431+ while (ttm_bo_swapout(&bdev->shrink) == 0) ;
36432+}
36433diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
36434new file mode 100644
36435index 0000000..faf7475
36436--- /dev/null
36437+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
36438@@ -0,0 +1,578 @@
36439+/**************************************************************************
36440+ *
36441+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
36442+ * All Rights Reserved.
36443+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
36444+ * All Rights Reserved.
36445+ *
36446+ * Permission is hereby granted, free of charge, to any person obtaining a
36447+ * copy of this software and associated documentation files (the
36448+ * "Software"), to deal in the Software without restriction, including
36449+ * without limitation the rights to use, copy, modify, merge, publish,
36450+ * distribute, sub license, and/or sell copies of the Software, and to
36451+ * permit persons to whom the Software is furnished to do so, subject to
36452+ * the following conditions:
36453+ *
36454+ * The above copyright notice and this permission notice (including the
36455+ * next paragraph) shall be included in all copies or substantial portions
36456+ * of the Software.
36457+ *
36458+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36459+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36460+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
36461+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36462+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36463+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
36464+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
36465+ *
36466+ **************************************************************************/
36467+/*
36468+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36469+ */
36470+
36471+#ifndef _TTM_BO_API_H_
36472+#define _TTM_BO_API_H_
36473+
36474+#include <drm/drm_hashtab.h>
36475+#include <linux/kref.h>
36476+#include <linux/list.h>
36477+#include <linux/wait.h>
36478+#include <linux/mutex.h>
36479+#include <linux/mm.h>
36480+#include <linux/rbtree.h>
36481+
36482+struct ttm_bo_device;
36483+
36484+struct drm_mm_node;
36485+
36486+/**
36487+ * struct ttm_mem_reg
36488+ *
36489+ * @mm_node: Memory manager node.
36490+ * @size: Requested size of memory region.
36491+ * @num_pages: Actual size of memory region in pages.
36492+ * @page_alignment: Page alignment.
36493+ * @flags: Placement flags.
36494+ * @proposed_flags: Proposed placement flags.
36495+ *
36496+ * Structure indicating the placement and space resources used by a
36497+ * buffer object.
36498+ */
36499+
36500+struct ttm_mem_reg {
36501+ struct drm_mm_node *mm_node;
36502+ unsigned long size;
36503+ unsigned long num_pages;
36504+ uint32_t page_alignment;
36505+ uint32_t mem_type;
36506+ uint32_t flags;
36507+ uint32_t proposed_flags;
36508+};
36509+
36510+/**
36511+ * enum ttm_bo_type
36512+ *
36513+ * @ttm_bo_type_device: These are 'normal' buffers that can
36514+ * be mmapped by user space. Each of these bos occupy a slot in the
36515+ * device address space, that can be used for normal vm operations.
36516+ *
36517+ * @ttm_bo_type_user: These are user-space memory areas that are made
36518+ * available to the GPU by mapping the buffer pages into the GPU aperture
36519+ * space. These buffers cannot be mmaped from the device address space.
36520+ *
36521+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
36522+ * but they cannot be accessed from user-space. For kernel-only use.
36523+ */
36524+
36525+enum ttm_bo_type {
36526+ ttm_bo_type_device,
36527+ ttm_bo_type_user,
36528+ ttm_bo_type_kernel
36529+};
36530+
36531+struct ttm_tt;
36532+
36533+/**
36534+ * struct ttm_buffer_object
36535+ *
36536+ * @bdev: Pointer to the buffer object device structure.
36537+ * @kref: Reference count of this buffer object. When this refcount reaches
36538+ * zero, the object is put on the delayed delete list.
36539+ * @list_kref: List reference count of this buffer object. This member is
36540+ * used to avoid destruction while the buffer object is still on a list.
36541+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
36542+ * keeps one refcount. When this refcount reaches zero,
36543+ * the object is destroyed.
36544+ * @proposed_flags: Proposed placement for the buffer. Changed only by the
36545+ * creator prior to validation as opposed to bo->mem.proposed_flags which is
36546+ * changed by the implementation prior to a buffer move if it wants to outsmart
36547+ * the buffer creator / user. This latter happens, for example, at eviction.
36548+ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
36549+ * buffers.
36550+ * @type: The bo type.
36551+ * @offset: The current GPU offset, which can have different meanings
36552+ * depending on the memory type. For SYSTEM type memory, it should be 0.
36553+ * @mem: structure describing current placement.
36554+ * @val_seq: Sequence of the validation holding the @reserved lock.
36555+ * Used to avoid starvation when many processes compete to validate the
36556+ * buffer. This member is protected by the bo_device::lru_lock.
36557+ * @seq_valid: The value of @val_seq is valid. This value is protected by
36558+ * the bo_device::lru_lock.
36559+ * @lru: List head for the lru list.
36560+ * @ddestroy: List head for the delayed destroy list.
36561+ * @swap: List head for swap LRU list.
36562+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
36563+ * pinned in physical memory. If this behaviour is not desired, this member
36564+ * holds a pointer to a persistant shmem object.
36565+ * @destroy: Destruction function. If NULL, kfree is used.
36566+ * @sync_obj_arg: Opaque argument to synchronization object function.
36567+ * @sync_obj: Pointer to a synchronization object.
36568+ * @priv_flags: Flags describing buffer object internal state.
36569+ * @event_queue: Queue for processes waiting on buffer object status change.
36570+ * @mutex: Lock protecting all members with the exception of constant members
36571+ * and list heads. We should really use a spinlock here.
36572+ * @num_pages: Actual number of pages.
36573+ * @ttm: TTM structure holding system pages.
36574+ * @vm_hash: Hash item for fast address space lookup. Need to change to a
36575+ * rb-tree node.
36576+ * @vm_node: Address space manager node.
36577+ * @addr_space_offset: Address space offset.
36578+ * @cpu_writes: For synchronization. Number of cpu writers.
36579+ * @reserved: Deadlock-free lock used for synchronization state transitions.
36580+ * @acc_size: Accounted size for this object.
36581+ *
36582+ * Base class for TTM buffer object, that deals with data placement and CPU
36583+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
36584+ * the driver can usually use the placement offset @offset directly as the
36585+ * GPU virtual address. For drivers implementing multiple
36586+ * GPU memory manager contexts, the driver should manage the address space
36587+ * in these contexts separately and use these objects to get the correct
36588+ * placement and caching for these GPU maps. This makes it possible to use
36589+ * these objects for even quite elaborate memory management schemes.
36590+ * The destroy member, the API visibility of this object makes it possible
36591+ * to derive driver specific types.
36592+ */
36593+
36594+struct ttm_buffer_object {
36595+ struct ttm_bo_device *bdev;
36596+ struct kref kref;
36597+ struct kref list_kref;
36598+
36599+ /*
36600+ * If there is a possibility that the usage variable is zero,
36601+ * then dev->struct_mutex should be locked before incrementing it.
36602+ */
36603+
36604+ uint32_t proposed_flags;
36605+ unsigned long buffer_start;
36606+ enum ttm_bo_type type;
36607+ unsigned long offset;
36608+ struct ttm_mem_reg mem;
36609+ uint32_t val_seq;
36610+ bool seq_valid;
36611+
36612+ struct list_head lru;
36613+ struct list_head ddestroy;
36614+ struct list_head swap;
36615+
36616+ struct file *persistant_swap_storage;
36617+
36618+ void (*destroy) (struct ttm_buffer_object *);
36619+
36620+ void *sync_obj_arg;
36621+ void *sync_obj;
36622+
36623+ uint32_t priv_flags;
36624+ wait_queue_head_t event_queue;
36625+ struct mutex mutex;
36626+ unsigned long num_pages;
36627+
36628+ struct ttm_tt *ttm;
36629+ struct rb_node vm_rb;
36630+ struct drm_mm_node *vm_node;
36631+ uint64_t addr_space_offset;
36632+
36633+ atomic_t cpu_writers;
36634+ atomic_t reserved;
36635+
36636+ size_t acc_size;
36637+};
36638+
36639+/**
36640+ * struct ttm_bo_kmap_obj
36641+ *
36642+ * @virtual: The current kernel virtual address.
36643+ * @page: The page when kmap'ing a single page.
36644+ * @bo_kmap_type: Type of bo_kmap.
36645+ *
36646+ * Object describing a kernel mapping. Since a TTM bo may be located
36647+ * in various memory types with various caching policies, the
36648+ * mapping can either be an ioremap, a vmap, a kmap or part of a
36649+ * premapped region.
36650+ */
36651+
36652+struct ttm_bo_kmap_obj {
36653+ void *virtual;
36654+ struct page *page;
36655+ enum {
36656+ ttm_bo_map_iomap,
36657+ ttm_bo_map_vmap,
36658+ ttm_bo_map_kmap,
36659+ ttm_bo_map_premapped,
36660+ } bo_kmap_type;
36661+};
36662+
36663+/**
36664+ * ttm_bo_reference - reference a struct ttm_buffer_object
36665+ *
36666+ * @bo: The buffer object.
36667+ *
36668+ * Returns a refcounted pointer to a buffer object.
36669+ */
36670+
36671+static inline struct ttm_buffer_object *ttm_bo_reference(struct
36672+ ttm_buffer_object *bo)
36673+{
36674+ kref_get(&bo->kref);
36675+ return bo;
36676+}
36677+
36678+/**
36679+ * ttm_bo_wait - wait for buffer idle.
36680+ *
36681+ * @bo: The buffer object.
36682+ * @interruptible: Use interruptible wait.
36683+ * @no_wait: Return immediately if buffer is busy.
36684+ *
36685+ * This function must be called with the bo::mutex held, and makes
36686+ * sure any previous rendering to the buffer is completed.
36687+ * Note: It might be necessary to block validations before the
36688+ * wait by reserving the buffer.
36689+ * Returns -EBUSY if no_wait is true and the buffer is busy.
36690+ * Returns -ERESTART if interrupted by a signal.
36691+ */
36692+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
36693+ bool interruptible, bool no_wait);
36694+/**
36695+ * ttm_buffer_object_validate
36696+ *
36697+ * @bo: The buffer object.
36698+ * @interruptible: Sleep interruptible if sleeping.
36699+ * @no_wait: Return immediately if the buffer is busy.
36700+ *
36701+ * Changes placement and caching policy of the buffer object
36702+ * according to bo::proposed_flags.
36703+ * Returns
36704+ * -EINVAL on invalid proposed_flags.
36705+ * -ENOMEM on out-of-memory condition.
36706+ * -EBUSY if no_wait is true and buffer busy.
36707+ * -ERESTART if interrupted by a signal.
36708+ */
36709+extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
36710+ bool interruptible, bool no_wait);
36711+/**
36712+ * ttm_bo_unref
36713+ *
36714+ * @bo: The buffer object.
36715+ *
36716+ * Unreference and clear a pointer to a buffer object.
36717+ */
36718+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
36719+
36720+/**
36721+ * ttm_bo_synccpu_write_grab
36722+ *
36723+ * @bo: The buffer object:
36724+ * @no_wait: Return immediately if buffer is busy.
36725+ *
36726+ * Synchronizes a buffer object for CPU RW access. This means
36727+ * blocking command submission that affects the buffer and
36728+ * waiting for buffer idle. This lock is recursive.
36729+ * Returns
36730+ * -EBUSY if the buffer is busy and no_wait is true.
36731+ * -ERESTART if interrupted by a signal.
36732+ */
36733+
36734+extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
36735+/**
36736+ * ttm_bo_synccpu_write_release:
36737+ *
36738+ * @bo : The buffer object.
36739+ *
36740+ * Releases a synccpu lock.
36741+ */
36742+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
36743+
36744+/**
36745+ * ttm_buffer_object_init
36746+ *
36747+ * @bdev: Pointer to a ttm_bo_device struct.
36748+ * @bo: Pointer to a ttm_buffer_object to be initialized.
36749+ * @size: Requested size of buffer object.
36750+ * @type: Requested type of buffer object.
36751+ * @flags: Initial placement flags.
36752+ * @page_alignment: Data alignment in pages.
36753+ * @buffer_start: Virtual address of user space data backing a
36754+ * user buffer object.
36755+ * @interruptible: If needing to sleep to wait for GPU resources,
36756+ * sleep interruptible.
36757+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
36758+ * pinned in physical memory. If this behaviour is not desired, this member
36759+ * holds a pointer to a persistant shmem object. Typically, this would
36760+ * point to the shmem object backing a GEM object if TTM is used to back a
36761+ * GEM user interface.
36762+ * @acc_size: Accounted size for this object.
36763+ * @destroy: Destroy function. Use NULL for kfree().
36764+ *
36765+ * This function initializes a pre-allocated struct ttm_buffer_object.
36766+ * As this object may be part of a larger structure, this function,
36767+ * together with the @destroy function,
36768+ * enables driver-specific objects derived from a ttm_buffer_object.
36769+ * On successful return, the object kref and list_kref are set to 1.
36770+ * Returns
36771+ * -ENOMEM: Out of memory.
36772+ * -EINVAL: Invalid placement flags.
36773+ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
36774+ */
36775+
36776+extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
36777+ struct ttm_buffer_object *bo,
36778+ unsigned long size,
36779+ enum ttm_bo_type type,
36780+ uint32_t flags,
36781+ uint32_t page_alignment,
36782+ unsigned long buffer_start,
36783+ bool interrubtible,
36784+ struct file *persistant_swap_storage,
36785+ size_t acc_size,
36786+ void (*destroy) (struct ttm_buffer_object *));
36787+/**
36788+ * ttm_bo_synccpu_object_init
36789+ *
36790+ * @bdev: Pointer to a ttm_bo_device struct.
36791+ * @bo: Pointer to a ttm_buffer_object to be initialized.
36792+ * @size: Requested size of buffer object.
36793+ * @type: Requested type of buffer object.
36794+ * @flags: Initial placement flags.
36795+ * @page_alignment: Data alignment in pages.
36796+ * @buffer_start: Virtual address of user space data backing a
36797+ * user buffer object.
36798+ * @interruptible: If needing to sleep while waiting for GPU resources,
36799+ * sleep interruptible.
36800+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
36801+ * pinned in physical memory. If this behaviour is not desired, this member
36802+ * holds a pointer to a persistant shmem object. Typically, this would
36803+ * point to the shmem object backing a GEM object if TTM is used to back a
36804+ * GEM user interface.
36805+ * @p_bo: On successful completion *p_bo points to the created object.
36806+ *
36807+ * This function allocates a ttm_buffer_object, and then calls
36808+ * ttm_buffer_object_init on that object.
36809+ * The destroy function is set to kfree().
36810+ * Returns
36811+ * -ENOMEM: Out of memory.
36812+ * -EINVAL: Invalid placement flags.
36813+ * -ERESTART: Interrupted by signal while waiting for resources.
36814+ */
36815+
36816+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
36817+ unsigned long size,
36818+ enum ttm_bo_type type,
36819+ uint32_t flags,
36820+ uint32_t page_alignment,
36821+ unsigned long buffer_start,
36822+ bool interruptible,
36823+ struct file *persistant_swap_storage,
36824+ struct ttm_buffer_object **p_bo);
36825+
36826+/**
36827+ * ttm_bo_check_placement
36828+ *
36829+ * @bo: the buffer object.
36830+ * @set_flags: placement flags to set.
36831+ * @clr_flags: placement flags to clear.
36832+ *
36833+ * Performs minimal validity checking on an intended change of
36834+ * placement flags.
36835+ * Returns
36836+ * -EINVAL: Intended change is invalid or not allowed.
36837+ */
36838+
36839+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
36840+ uint32_t set_flags, uint32_t clr_flags);
36841+
36842+/**
36843+ * ttm_bo_init_mm
36844+ *
36845+ * @bdev: Pointer to a ttm_bo_device struct.
36846+ * @mem_type: The memory type.
36847+ * @p_offset: offset for managed area in pages.
36848+ * @p_size: size managed area in pages.
36849+ *
36850+ * Initialize a manager for a given memory type.
36851+ * Note: if part of driver firstopen, it must be protected from a
36852+ * potentially racing lastclose.
36853+ * Returns:
36854+ * -EINVAL: invalid size or memory type.
36855+ * -ENOMEM: Not enough memory.
36856+ * May also return driver-specified errors.
36857+ */
36858+
36859+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
36860+ unsigned long p_offset, unsigned long p_size);
36861+/**
36862+ * ttm_bo_clean_mm
36863+ *
36864+ * @bdev: Pointer to a ttm_bo_device struct.
36865+ * @mem_type: The memory type.
36866+ *
36867+ * Take down a manager for a given memory type after first walking
36868+ * the LRU list to evict any buffers left alive.
36869+ *
36870+ * Normally, this function is part of lastclose() or unload(), and at that
36871+ * point there shouldn't be any buffers left created by user-space, since
36872+ * there should've been removed by the file descriptor release() method.
36873+ * However, before this function is run, make sure to signal all sync objects,
36874+ * and verify that the delayed delete queue is empty. The driver must also
36875+ * make sure that there are no NO_EVICT buffers present in this memory type
36876+ * when the call is made.
36877+ *
36878+ * If this function is part of a VT switch, the caller must make sure that
36879+ * there are no appications currently validating buffers before this
36880+ * function is called. The caller can do that by first taking the
36881+ * struct ttm_bo_device::ttm_lock in write mode.
36882+ *
36883+ * Returns:
36884+ * -EINVAL: invalid or uninitialized memory type.
36885+ * -EBUSY: There are still buffers left in this memory type.
36886+ */
36887+
36888+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
36889+
36890+/**
36891+ * ttm_bo_evict_mm
36892+ *
36893+ * @bdev: Pointer to a ttm_bo_device struct.
36894+ * @mem_type: The memory type.
36895+ *
36896+ * Evicts all buffers on the lru list of the memory type.
36897+ * This is normally part of a VT switch or an
36898+ * out-of-memory-space-due-to-fragmentation handler.
36899+ * The caller must make sure that there are no other processes
36900+ * currently validating buffers, and can do that by taking the
36901+ * struct ttm_bo_device::ttm_lock in write mode.
36902+ *
36903+ * Returns:
36904+ * -EINVAL: Invalid or uninitialized memory type.
36905+ * -ERESTART: The call was interrupted by a signal while waiting to
36906+ * evict a buffer.
36907+ */
36908+
36909+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
36910+
36911+/**
36912+ * ttm_kmap_obj_virtual
36913+ *
36914+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
36915+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
36916+ * virtual map is io memory, 0 if normal memory.
36917+ *
36918+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
36919+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
36920+ * that should strictly be accessed by the iowriteXX() and similar functions.
36921+ */
36922+
36923+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
36924+ bool *is_iomem)
36925+{
36926+ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
36927+ map->bo_kmap_type == ttm_bo_map_premapped);
36928+ return map->virtual;
36929+}
36930+
36931+/**
36932+ * ttm_bo_kmap
36933+ *
36934+ * @bo: The buffer object.
36935+ * @start_page: The first page to map.
36936+ * @num_pages: Number of pages to map.
36937+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
36938+ *
36939+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
36940+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
36941+ * used to obtain a virtual address to the data.
36942+ *
36943+ * Returns
36944+ * -ENOMEM: Out of memory.
36945+ * -EINVAL: Invalid range.
36946+ */
36947+
36948+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
36949+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
36950+
36951+/**
36952+ * ttm_bo_kunmap
36953+ *
36954+ * @map: Object describing the map to unmap.
36955+ *
36956+ * Unmaps a kernel map set up by ttm_bo_kmap.
36957+ */
36958+
36959+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
36960+
36961+#if 0
36962+#endif
36963+
36964+/**
36965+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
36966+ *
36967+ * @vma: vma as input from the fbdev mmap method.
36968+ * @bo: The bo backing the address space. The address space will
36969+ * have the same size as the bo, and start at offset 0.
36970+ *
36971+ * This function is intended to be called by the fbdev mmap method
36972+ * if the fbdev address space is to be backed by a bo.
36973+ */
36974+
36975+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
36976+ struct ttm_buffer_object *bo);
36977+
36978+/**
36979+ * ttm_bo_mmap - mmap out of the ttm device address space.
36980+ *
36981+ * @filp: filp as input from the mmap method.
36982+ * @vma: vma as input from the mmap method.
36983+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
36984+ *
36985+ * This function is intended to be called by the device mmap method.
36986+ * if the device address space is to be backed by the bo manager.
36987+ */
36988+
36989+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
36990+ struct ttm_bo_device *bdev);
36991+
36992+/**
36993+ * ttm_bo_io
36994+ *
36995+ * @bdev: Pointer to the struct ttm_bo_device.
36996+ * @filp: Pointer to the struct file attempting to read / write.
36997+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
36998+ * @rbuf: User-space pointer to address of buffer to read into. Null on write.
36999+ * @count: Number of bytes to read / write.
37000+ * @f_pos: Pointer to current file position.
37001+ * @write: 1 for read, 0 for write.
37002+ *
37003+ * This function implements read / write into ttm buffer objects, and is intended to
37004+ * be called from the fops::read and fops::write method.
37005+ * Returns:
37006+ * See man (2) write, man(2) read. In particular, the function may return -EINTR if
37007+ * interrupted by a signal.
37008+ */
37009+
37010+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
37011+ const char __user * wbuf, char __user * rbuf,
37012+ size_t count, loff_t * f_pos, bool write);
37013+
37014+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
37015+
37016+#endif
37017diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
37018new file mode 100644
37019index 0000000..f7efb45
37020--- /dev/null
37021+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
37022@@ -0,0 +1,859 @@
37023+/**************************************************************************
37024+ *
37025+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37026+ * All Rights Reserved.
37027+ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
37028+ * All Rights Reserved.
37029+ *
37030+ * Permission is hereby granted, free of charge, to any person obtaining a
37031+ * copy of this software and associated documentation files (the
37032+ * "Software"), to deal in the Software without restriction, including
37033+ * without limitation the rights to use, copy, modify, merge, publish,
37034+ * distribute, sub license, and/or sell copies of the Software, and to
37035+ * permit persons to whom the Software is furnished to do so, subject to
37036+ * the following conditions:
37037+ *
37038+ * The above copyright notice and this permission notice (including the
37039+ * next paragraph) shall be included in all copies or substantial portions
37040+ * of the Software.
37041+ *
37042+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37043+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37044+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37045+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37046+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37047+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37048+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37049+ *
37050+ **************************************************************************/
37051+/*
37052+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
37053+ */
37054+#ifndef _TTM_BO_DRIVER_H_
37055+#define _TTM_BO_DRIVER_H_
37056+
37057+#include "ttm/ttm_bo_api.h"
37058+#include "ttm/ttm_memory.h"
37059+#include <drm/drm_mm.h>
37060+#include "linux/workqueue.h"
37061+#include "linux/fs.h"
37062+#include "linux/spinlock.h"
37063+
37064+struct ttm_backend;
37065+
37066+struct ttm_backend_func {
37067+ /**
37068+ * struct ttm_backend_func member populate
37069+ *
37070+ * @backend: Pointer to a struct ttm_backend.
37071+ * @num_pages: Number of pages to populate.
37072+ * @pages: Array of pointers to ttm pages.
37073+ * @dummy_read_page: Page to be used instead of NULL pages in the
37074+ * array @pages.
37075+ *
37076+ * Populate the backend with ttm pages. Depending on the backend,
37077+ * it may or may not copy the @pages array.
37078+ */
37079+ int (*populate) (struct ttm_backend * backend,
37080+ unsigned long num_pages, struct page ** pages,
37081+ struct page * dummy_read_page);
37082+ /**
37083+ * struct ttm_backend_func member clear
37084+ *
37085+ * @backend: Pointer to a struct ttm_backend.
37086+ *
37087+ * This is an "unpopulate" function. Release all resources
37088+ * allocated with populate.
37089+ */
37090+ void (*clear) (struct ttm_backend * backend);
37091+
37092+ /**
37093+ * struct ttm_backend_func member bind
37094+ *
37095+ * @backend: Pointer to a struct ttm_backend.
37096+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
37097+ * memory type and location for binding.
37098+ *
37099+ * Bind the backend pages into the aperture in the location
37100+ * indicated by @bo_mem. This function should be able to handle
37101+ * differences between aperture- and system page sizes.
37102+ */
37103+ int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem);
37104+
37105+ /**
37106+ * struct ttm_backend_func member unbind
37107+ *
37108+ * @backend: Pointer to a struct ttm_backend.
37109+ *
37110+ * Unbind previously bound backend pages. This function should be
37111+ * able to handle differences between aperture- and system page sizes.
37112+ */
37113+ int (*unbind) (struct ttm_backend * backend);
37114+
37115+ /**
37116+ * struct ttm_backend_func member destroy
37117+ *
37118+ * @backend: Pointer to a struct ttm_backend.
37119+ *
37120+ * Destroy the backend.
37121+ */
37122+ void (*destroy) (struct ttm_backend * backend);
37123+};
37124+
37125+/**
37126+ * struct ttm_backend
37127+ *
37128+ * @bdev: Pointer to a struct ttm_bo_device.
37129+ * @flags: For driver use.
37130+ * @func: Pointer to a struct ttm_backend_func that describes
37131+ * the backend methods.
37132+ *
37133+ */
37134+
37135+struct ttm_backend {
37136+ struct ttm_bo_device *bdev;
37137+ uint32_t flags;
37138+ struct ttm_backend_func *func;
37139+};
37140+
37141+#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
37142+#define TTM_PAGE_FLAG_USER (1 << 1)
37143+#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
37144+#define TTM_PAGE_FLAG_WRITE (1 << 3)
37145+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
37146+#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
37147+
37148+enum ttm_caching_state {
37149+ tt_uncached,
37150+ tt_wc,
37151+ tt_cached
37152+};
37153+
37154+/**
37155+ * struct ttm_tt
37156+ *
37157+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
37158+ * pointer.
37159+ * @pages: Array of pages backing the data.
37160+ * @first_himem_page: Himem pages are put last in the page array, which
37161+ * enables us to run caching attribute changes on only the first part
37162+ * of the page array containing lomem pages. This is the index of the
37163+ * first himem page.
37164+ * @last_lomem_page: Index of the last lomem page in the page array.
37165+ * @num_pages: Number of pages in the page array.
37166+ * @bdev: Pointer to the current struct ttm_bo_device.
37167+ * @be: Pointer to the ttm backend.
37168+ * @tsk: The task for user ttm.
37169+ * @start: virtual address for user ttm.
37170+ * @swap_storage: Pointer to shmem struct file for swap storage.
37171+ * @caching_state: The current caching state of the pages.
37172+ * @state: The current binding state of the pages.
37173+ *
37174+ * This is a structure holding the pages, caching- and aperture binding
37175+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
37176+ * memory.
37177+ */
37178+
37179+struct ttm_tt {
37180+ struct page *dummy_read_page;
37181+ struct page **pages;
37182+ long first_himem_page;
37183+ long last_lomem_page;
37184+ uint32_t page_flags;
37185+ unsigned long num_pages;
37186+ struct ttm_bo_device *bdev;
37187+ struct ttm_backend *be;
37188+ struct task_struct *tsk;
37189+ unsigned long start;
37190+ struct file *swap_storage;
37191+ enum ttm_caching_state caching_state;
37192+ enum {
37193+ tt_bound,
37194+ tt_unbound,
37195+ tt_unpopulated,
37196+ } state;
37197+};
37198+
37199+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
37200+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
37201+#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
37202+ before kernel access. */
37203+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
37204+
37205+/**
37206+ * struct ttm_mem_type_manager
37207+ *
37208+ * @has_type: The memory type has been initialized.
37209+ * @use_type: The memory type is enabled.
37210+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
37211+ * managed by this memory type.
37212+ * @gpu_offset: If used, the GPU offset of the first managed page of
37213+ * fixed memory or the first managed location in an aperture.
37214+ * @io_offset: The io_offset of the first managed page of IO memory or
37215+ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
37216+ * memory, this should be set to NULL.
37217+ * @io_size: The size of a managed IO region (fixed memory or aperture).
37218+ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
37219+ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
37220+ * @io_addr should be set to NULL.
37221+ * @size: Size of the managed region.
37222+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
37223+ * as defined in ttm_placement_common.h
37224+ * @default_caching: The default caching policy used for a buffer object
37225+ * placed in this memory type if the user doesn't provide one.
37226+ * @manager: The range manager used for this memory type. FIXME: If the aperture
37227+ * has a page size different from the underlying system, the granularity
37228+ * of this manager should take care of this. But the range allocating code
37229+ * in ttm_bo.c needs to be modified for this.
37230+ * @lru: The lru list for this memory type.
37231+ *
37232+ * This structure is used to identify and manage memory types for a device.
37233+ * It's set up by the ttm_bo_driver::init_mem_type method.
37234+ */
37235+
37236+struct ttm_mem_type_manager {
37237+
37238+ /*
37239+ * No protection. Constant from start.
37240+ */
37241+
37242+ bool has_type;
37243+ bool use_type;
37244+ uint32_t flags;
37245+ unsigned long gpu_offset;
37246+ unsigned long io_offset;
37247+ unsigned long io_size;
37248+ void *io_addr;
37249+ uint64_t size;
37250+ uint32_t available_caching;
37251+ uint32_t default_caching;
37252+
37253+ /*
37254+ * Protected by the bdev->lru_lock.
37255+ * TODO: Consider one lru_lock per ttm_mem_type_manager.
37256+ * Plays ill with list removal, though.
37257+ */
37258+
37259+ struct drm_mm manager;
37260+ struct list_head lru;
37261+};
37262+
37263+/**
37264+ * struct ttm_bo_driver
37265+ *
37266+ * @mem_type_prio: Priority array of memory types to place a buffer object in
37267+ * if it fits without evicting buffers from any of these memory types.
37268+ * @mem_busy_prio: Priority array of memory types to place a buffer object in
37269+ * if it needs to evict buffers to make room.
37270+ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
37271+ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
37272+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
37273+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
37274+ * has been evicted.
37275+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure.
37276+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
37277+ * @move: Callback for a driver to hook in accelerated functions to move a buffer.
37278+ * If set to NULL, a potentially slow memcpy() move is used.
37279+ * @sync_obj_signaled: See ttm_fence_api.h
37280+ * @sync_obj_wait: See ttm_fence_api.h
37281+ * @sync_obj_flush: See ttm_fence_api.h
37282+ * @sync_obj_unref: See ttm_fence_api.h
37283+ * @sync_obj_ref: See ttm_fence_api.h
37284+ */
37285+
37286+struct ttm_bo_driver {
37287+ const uint32_t *mem_type_prio;
37288+ const uint32_t *mem_busy_prio;
37289+ uint32_t num_mem_type_prio;
37290+ uint32_t num_mem_busy_prio;
37291+
37292+ /**
37293+ * struct ttm_bo_driver member create_ttm_backend_entry
37294+ *
37295+ * @bdev: The buffer object device.
37296+ *
37297+ * Create a driver specific struct ttm_backend.
37298+ */
37299+
37300+ struct ttm_backend *(*create_ttm_backend_entry)
37301+ (struct ttm_bo_device * bdev);
37302+
37303+ /**
37304+ * struct ttm_bo_driver member invalidate_caches
37305+ *
37306+ * @bdev: the buffer object device.
37307+ * @flags: new placement of the rebound buffer object.
37308+ *
37309+ * A previosly evicted buffer has been rebound in a
37310+ * potentially new location. Tell the driver that it might
37311+ * consider invalidating read (texture) caches on the next command
37312+ * submission as a consequence.
37313+ */
37314+
37315+ int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags);
37316+ int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type,
37317+ struct ttm_mem_type_manager * man);
37318+ /**
37319+ * struct ttm_bo_driver member evict_flags:
37320+ *
37321+ * @bo: the buffer object to be evicted
37322+ *
37323+ * Return the bo flags for a buffer which is not mapped to the hardware.
37324+ * These will be placed in proposed_flags so that when the move is
37325+ * finished, they'll end up in bo->mem.flags
37326+ */
37327+
37328+ uint32_t(*evict_flags) (struct ttm_buffer_object * bo);
37329+ /**
37330+ * struct ttm_bo_driver member move:
37331+ *
37332+ * @bo: the buffer to move
37333+ * @evict: whether this motion is evicting the buffer from
37334+ * the graphics address space
37335+ * @interruptible: Use interruptible sleeps if possible when sleeping.
37336+ * @no_wait: whether this should give up and return -EBUSY
37337+ * if this move would require sleeping
37338+ * @new_mem: the new memory region receiving the buffer
37339+ *
37340+ * Move a buffer between two memory regions.
37341+ */
37342+ int (*move) (struct ttm_buffer_object * bo,
37343+ bool evict, bool interruptible,
37344+ bool no_wait, struct ttm_mem_reg * new_mem);
37345+
37346+ /**
37347+ * struct ttm_bo_driver_member verify_access
37348+ *
37349+ * @bo: Pointer to a buffer object.
37350+ * @filp: Pointer to a struct file trying to access the object.
37351+ *
37352+ * Called from the map / write / read methods to verify that the
37353+ * caller is permitted to access the buffer object.
37354+ * This member may be set to NULL, which will refuse this kind of
37355+ * access for all buffer objects.
37356+ * This function should return 0 if access is granted, -EPERM otherwise.
37357+ */
37358+ int (*verify_access) (struct ttm_buffer_object * bo,
37359+ struct file * filp);
37360+
37361+ /**
37362+ * In case a driver writer dislikes the TTM fence objects,
37363+ * the driver writer can replace those with sync objects of
37364+ * his / her own. If it turns out that no driver writer is
37365+ * using these. I suggest we remove these hooks and plug in
37366+ * fences directly. The bo driver needs the following functionality:
37367+ * See the corresponding functions in the fence object API
37368+ * documentation.
37369+ */
37370+
37371+ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
37372+ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
37373+ bool lazy, bool interruptible);
37374+ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
37375+ void (*sync_obj_unref) (void **sync_obj);
37376+ void *(*sync_obj_ref) (void *sync_obj);
37377+};
37378+
37379+#define TTM_NUM_MEM_TYPES 11
37380+
37381+#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
37382+#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs
37383+ idling before CPU mapping */
37384+/**
37385+ * struct ttm_bo_device - Buffer object driver device-specific data.
37386+ *
37387+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
37388+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
37389+ * @count: Current number of buffer object.
37390+ * @pages: Current number of pinned pages.
37391+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
37392+ * of unpopulated pages.
37393+ * @shrink: A shrink callback object used for buffre object swap.
37394+ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
37395+ * used by a buffer object. This is excluding page arrays and backing pages.
37396+ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
37397+ * @man: An array of mem_type_managers.
37398+ * @addr_space_mm: Range manager for the device address space.
37399+ * lru_lock: Spinlock that protects the buffer+device lru lists and
37400+ * ddestroy lists.
37401+ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
37402+ * If a GPU lockup has been detected, this is forced to 0.
37403+ * @dev_mapping: A pointer to the struct address_space representing the
37404+ * device address space.
37405+ * @wq: Work queue structure for the delayed delete workqueue.
37406+ *
37407+ */
37408+
37409+struct ttm_bo_device {
37410+
37411+ /*
37412+ * Constant after bo device init / atomic.
37413+ */
37414+
37415+ struct ttm_mem_global *mem_glob;
37416+ struct ttm_bo_driver *driver;
37417+ struct page *dummy_read_page;
37418+ struct ttm_mem_shrink shrink;
37419+
37420+ size_t ttm_bo_extra_size;
37421+ size_t ttm_bo_size;
37422+
37423+ rwlock_t vm_lock;
37424+ /*
37425+ * Protected by the vm lock.
37426+ */
37427+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
37428+ struct rb_root addr_space_rb;
37429+ struct drm_mm addr_space_mm;
37430+
37431+ /*
37432+ * Might want to change this to one lock per manager.
37433+ */
37434+ spinlock_t lru_lock;
37435+ /*
37436+ * Protected by the lru lock.
37437+ */
37438+ struct list_head ddestroy;
37439+ struct list_head swap_lru;
37440+
37441+ /*
37442+ * Protected by load / firstopen / lastclose /unload sync.
37443+ */
37444+
37445+ bool nice_mode;
37446+ struct address_space *dev_mapping;
37447+
37448+ /*
37449+ * Internal protection.
37450+ */
37451+
37452+ struct delayed_work wq;
37453+};
37454+
37455+/**
37456+ * ttm_flag_masked
37457+ *
37458+ * @old: Pointer to the result and original value.
37459+ * @new: New value of bits.
37460+ * @mask: Mask of bits to change.
37461+ *
37462+ * Convenience function to change a number of bits identified by a mask.
37463+ */
37464+
37465+static inline uint32_t
37466+ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask)
37467+{
37468+ *old ^= (*old ^ new) & mask;
37469+ return *old;
37470+}
37471+
37472+/**
37473+ * ttm_tt_create
37474+ *
37475+ * @bdev: pointer to a struct ttm_bo_device:
37476+ * @size: Size of the data needed backing.
37477+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
37478+ * @dummy_read_page: See struct ttm_bo_device.
37479+ *
37480+ * Create a struct ttm_tt to back data with system memory pages.
37481+ * No pages are actually allocated.
37482+ * Returns:
37483+ * NULL: Out of memory.
37484+ */
37485+extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
37486+ unsigned long size,
37487+ uint32_t page_flags,
37488+ struct page *dummy_read_page);
37489+
37490+/**
37491+ * ttm_tt_set_user:
37492+ *
37493+ * @ttm: The struct ttm_tt to populate.
37494+ * @tsk: A struct task_struct for which @start is a valid user-space address.
37495+ * @start: A valid user-space address.
37496+ * @num_pages: Size in pages of the user memory area.
37497+ *
37498+ * Populate a struct ttm_tt with a user-space memory area after first pinning
37499+ * the pages backing it.
37500+ * Returns:
37501+ * !0: Error.
37502+ */
37503+
37504+extern int ttm_tt_set_user(struct ttm_tt *ttm,
37505+ struct task_struct *tsk,
37506+ unsigned long start, unsigned long num_pages);
37507+
37508+/**
37509+ * ttm_ttm_bind:
37510+ *
37511+ * @ttm: The struct ttm_tt containing backing pages.
37512+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
37513+ *
37514+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
37515+ */
37516+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
37517+
37518+/**
37519+ * ttm_ttm_destroy:
37520+ *
37521+ * @ttm: The struct ttm_tt.
37522+ *
37523+ * Unbind, unpopulate and destroy a struct ttm_tt.
37524+ */
37525+extern void ttm_tt_destroy(struct ttm_tt *ttm);
37526+
37527+/**
37528+ * ttm_ttm_unbind:
37529+ *
37530+ * @ttm: The struct ttm_tt.
37531+ *
37532+ * Unbind a struct ttm_tt.
37533+ */
37534+extern void ttm_tt_unbind(struct ttm_tt *ttm);
37535+
37536+/**
37537+ * ttm_ttm_destroy:
37538+ *
37539+ * @ttm: The struct ttm_tt.
37540+ * @index: Index of the desired page.
37541+ *
37542+ * Return a pointer to the struct page backing @ttm at page
37543+ * index @index. If the page is unpopulated, one will be allocated to
37544+ * populate that index.
37545+ *
37546+ * Returns:
37547+ * NULL on OOM.
37548+ */
37549+extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
37550+
37551+/**
37552+ * ttm_tt_cache_flush:
37553+ *
37554+ * @pages: An array of pointers to struct page:s to flush.
37555+ * @num_pages: Number of pages to flush.
37556+ *
37557+ * Flush the data of the indicated pages from the cpu caches.
37558+ * This is used when changing caching attributes of the pages from
37559+ * cache-coherent.
37560+ */
37561+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
37562+
37563+/**
37564+ * ttm_tt_set_placement_caching:
37565+ *
37566+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
37567+ * @placement: Flag indicating the desired caching policy.
37568+ *
37569+ * This function will change caching policy of any default kernel mappings of
37570+ * the pages backing @ttm. If changing from cached to uncached or write-combined,
37571+ * all CPU caches will first be flushed to make sure the data of the pages
37572+ * hit RAM. This function may be very costly as it involves global TLB
37573+ * and cache flushes and potential page splitting / combining.
37574+ */
37575+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
37576+extern int ttm_tt_swapout(struct ttm_tt *ttm,
37577+ struct file *persistant_swap_storage);
37578+
37579+/*
37580+ * ttm_bo.c
37581+ */
37582+
37583+/**
37584+ * ttm_mem_reg_is_pci
37585+ *
37586+ * @bdev: Pointer to a struct ttm_bo_device.
37587+ * @mem: A valid struct ttm_mem_reg.
37588+ *
37589+ * Returns true if the memory described by @mem is PCI memory,
37590+ * false otherwise.
37591+ */
37592+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
37593+ struct ttm_mem_reg *mem);
37594+
37595+/**
37596+ * ttm_bo_mem_space
37597+ *
37598+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
37599+ * we want to allocate space for.
37600+ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
37601+ * up.
37602+ * @interruptible: Sleep interruptible when sliping.
37603+ * @no_wait: Don't sleep waiting for space to become available.
37604+ *
37605+ * Allocate memory space for the buffer object pointed to by @bo, using
37606+ * the placement flags in @mem, potentially evicting other idle buffer objects.
37607+ * This function may sleep while waiting for space to become available.
37608+ * Returns:
37609+ * -EBUSY: No space available (only if no_wait == 1).
37610+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
37611+ * fragmentation or concurrent allocators.
37612+ * -ERESTART: An interruptible sleep was interrupted by a signal.
37613+ */
37614+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
37615+ struct ttm_mem_reg *mem,
37616+ bool interruptible, bool no_wait);
37617+/**
37618+ * ttm_bo_wait_for_cpu
37619+ *
37620+ * @bo: Pointer to a struct ttm_buffer_object.
37621+ * @no_wait: Don't sleep while waiting.
37622+ *
37623+ * Wait until a buffer object is no longer sync'ed for CPU access.
37624+ * Returns:
37625+ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
37626+ * -ERESTART: An interruptible sleep was interrupted by a signal.
37627+ */
37628+
37629+extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
37630+
37631+/**
37632+ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
37633+ *
37634+ * @bo Pointer to a struct ttm_buffer_object.
37635+ * @bus_base On return the base of the PCI region
37636+ * @bus_offset On return the byte offset into the PCI region
37637+ * @bus_size On return the byte size of the buffer object or zero if
37638+ * the buffer object memory is not accessible through a PCI region.
37639+ *
37640+ * Returns:
37641+ * -EINVAL if the buffer object is currently not mappable.
37642+ * 0 otherwise.
37643+ */
37644+
37645+extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
37646+ struct ttm_mem_reg *mem,
37647+ unsigned long *bus_base,
37648+ unsigned long *bus_offset,
37649+ unsigned long *bus_size);
37650+
37651+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
37652+
37653+/**
37654+ * ttm_bo_device_init
37655+ *
37656+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
37657+ * @mem_global: A pointer to an initialized struct ttm_mem_global.
37658+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
37659+ * @file_page_offset: Offset into the device address space that is available
37660+ * for buffer data. This ensures compatibility with other users of the
37661+ * address space.
37662+ *
37663+ * Initializes a struct ttm_bo_device:
37664+ * Returns:
37665+ * !0: Failure.
37666+ */
37667+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
37668+ struct ttm_mem_global *mem_glob,
37669+ struct ttm_bo_driver *driver,
37670+ uint64_t file_page_offset);
37671+
37672+/**
37673+ * ttm_bo_reserve:
37674+ *
37675+ * @bo: A pointer to a struct ttm_buffer_object.
37676+ * @interruptible: Sleep interruptible if waiting.
37677+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
37678+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
37679+ * it to become unreserved if @sequence < (@bo)->sequence.
37680+ *
37681+ * Locks a buffer object for validation. (Or prevents other processes from
37682+ * locking it for validation) and removes it from lru lists, while taking
37683+ * a number of measures to prevent deadlocks.
37684+ *
37685+ * Deadlocks may occur when two processes try to reserve multiple buffers in
37686+ * different order, either by will or as a result of a buffer being evicted
37687+ * to make room for a buffer already reserved. (Buffers are reserved before
37688+ * they are evicted). The following algorithm prevents such deadlocks from
37689+ * occuring:
37690+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
37691+ * reservation they are removed from the lru list. This stops a reserved buffer
37692+ * from being evicted. However the lru spinlock is released between the time
37693+ * a buffer is selected for eviction and the time it is reserved.
37694+ * Therefore a check is made when a buffer is reserved for eviction, that it
37695+ * is still the first buffer in the lru list, before it is removed from the
37696+ * list. @check_lru == 1 forces this check. If it fails, the function returns
37697+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
37698+ * the procedure.
37699+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
37700+ * (typically execbuf), should first obtain a unique 32-bit
37701+ * validation sequence number,
37702+ * and call this function with @use_sequence == 1 and @sequence == the unique
37703+ * sequence number. If upon call of this function, the buffer object is already
37704+ * reserved, the validation sequence is checked against the validation
37705+ * sequence of the process currently reserving the buffer,
37706+ * and if the current validation sequence is greater than that of the process
37707+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
37708+ * waiting for the buffer to become unreserved, after which it retries reserving.
37709+ * The caller should, when receiving an -EAGAIN error
37710+ * release all its buffer reservations, wait for @bo to become unreserved, and
37711+ * then rerun the validation with the same validation sequence. This procedure
37712+ * will always guarantee that the process with the lowest validation sequence
37713+ * will eventually succeed, preventing both deadlocks and starvation.
37714+ *
37715+ * Returns:
37716+ * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations,
37717+ * wait for @bo to become unreserved and try again. (only if use_sequence == 1).
37718+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
37719+ * a signal. Release all buffer reservations and return to user-space.
37720+ */
37721+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
37722+ bool interruptible,
37723+ bool no_wait, bool use_sequence, uint32_t sequence);
37724+
37725+/**
37726+ * ttm_bo_unreserve
37727+ *
37728+ * @bo: A pointer to a struct ttm_buffer_object.
37729+ *
37730+ * Unreserve a previous reservation of @bo.
37731+ */
37732+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
37733+
37734+/**
37735+ * ttm_bo_wait_unreserved
37736+ *
37737+ * @bo: A pointer to a struct ttm_buffer_object.
37738+ *
37739+ * Wait for a struct ttm_buffer_object to become unreserved.
37740+ * This is typically used in the execbuf code to relax cpu-usage when
37741+ * a potential deadlock condition backoff.
37742+ */
37743+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
37744+ bool interruptible);
37745+
37746+/**
37747+ * ttm_bo_block_reservation
37748+ *
37749+ * @bo: A pointer to a struct ttm_buffer_object.
37750+ * @interruptible: Use interruptible sleep when waiting.
37751+ * @no_wait: Don't sleep, but rather return -EBUSY.
37752+ *
37753+ * Block reservation for validation by simply reserving the buffer. This is intended
37754+ * for single buffer use only without eviction, and thus needs no deadlock protection.
37755+ *
37756+ * Returns:
37757+ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
37758+ * -ERESTART: If interruptible == 1 and the process received a signal while sleeping.
37759+ */
37760+extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
37761+ bool interruptible, bool no_wait);
37762+
37763+/**
37764+ * ttm_bo_unblock_reservation
37765+ *
37766+ * @bo: A pointer to a struct ttm_buffer_object.
37767+ *
37768+ * Unblocks reservation leaving lru lists untouched.
37769+ */
37770+extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
37771+
37772+/*
37773+ * ttm_bo_util.c
37774+ */
37775+
37776+/**
37777+ * ttm_bo_move_ttm
37778+ *
37779+ * @bo: A pointer to a struct ttm_buffer_object.
37780+ * @evict: 1: This is an eviction. Don't try to pipeline.
37781+ * @no_wait: Never sleep, but rather return with -EBUSY.
37782+ * @new_mem: struct ttm_mem_reg indicating where to move.
37783+ *
37784+ * Optimized move function for a buffer object with both old and
37785+ * new placement backed by a TTM. The function will, if successful,
37786+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
37787+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
37788+ * data remains untouched, and it's up to the caller to free the
37789+ * memory space indicated by @new_mem.
37790+ * Returns:
37791+ * !0: Failure.
37792+ */
37793+
37794+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
37795+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem);
37796+
37797+/**
37798+ * ttm_bo_move_memcpy
37799+ *
37800+ * @bo: A pointer to a struct ttm_buffer_object.
37801+ * @evict: 1: This is an eviction. Don't try to pipeline.
37802+ * @no_wait: Never sleep, but rather return with -EBUSY.
37803+ * @new_mem: struct ttm_mem_reg indicating where to move.
37804+ *
37805+ * Fallback move function for a mappable buffer object in mappable memory.
37806+ * The function will, if successful,
37807+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
37808+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
37809+ * data remains untouched, and it's up to the caller to free the
37810+ * memory space indicated by @new_mem.
37811+ * Returns:
37812+ * !0: Failure.
37813+ */
37814+
37815+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
37816+ bool evict,
37817+ bool no_wait, struct ttm_mem_reg *new_mem);
37818+
37819+/**
37820+ * ttm_bo_free_old_node
37821+ *
37822+ * @bo: A pointer to a struct ttm_buffer_object.
37823+ *
37824+ * Utility function to free an old placement after a successful move.
37825+ */
37826+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
37827+
37828+/**
37829+ * ttm_bo_move_accel_cleanup.
37830+ *
37831+ * @bo: A pointer to a struct ttm_buffer_object.
37832+ * @sync_obj: A sync object that signals when moving is complete.
37833+ * @sync_obj_arg: An argument to pass to the sync object idle / wait
37834+ * functions.
37835+ * @evict: This is an evict move. Don't return until the buffer is idle.
37836+ * @no_wait: Never sleep, but rather return with -EBUSY.
37837+ * @new_mem: struct ttm_mem_reg indicating where to move.
37838+ *
37839+ * Accelerated move function to be called when an accelerated move
37840+ * has been scheduled. The function will create a new temporary buffer object
37841+ * representing the old placement, and put the sync object on both buffer
37842+ * objects. After that the newly created buffer object is unref'd to be
37843+ * destroyed when the move is complete. This will help pipeline
37844+ * buffer moves.
37845+ */
37846+
37847+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
37848+ void *sync_obj,
37849+ void *sync_obj_arg,
37850+ bool evict, bool no_wait,
37851+ struct ttm_mem_reg *new_mem);
37852+/**
37853+ * ttm_io_prot
37854+ *
37855+ * @c_state: Caching state.
37856+ * @tmp: Page protection flag for a normal, cached mapping.
37857+ *
37858+ * Utility function that returns the pgprot_t that should be used for
37859+ * setting up a PTE with the caching model indicated by @c_state.
37860+ */
37861+extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
37862+
37863+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
37864+#define TTM_HAS_AGP
37865+#include <linux/agp_backend.h>
37866+
37867+/**
37868+ * ttm_agp_backend_init
37869+ *
37870+ * @bdev: Pointer to a struct ttm_bo_device.
37871+ * @bridge: The agp bridge this device is sitting on.
37872+ *
37873+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
37874+ * for TT memory. This function uses the linux agpgart interface to
37875+ * bind and unbind memory backing a ttm_tt.
37876+ */
37877+extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
37878+ struct agp_bridge_data *bridge);
37879+#endif
37880+
37881+#endif
37882diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
37883new file mode 100644
37884index 0000000..6c92310
37885--- /dev/null
37886+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
37887@@ -0,0 +1,536 @@
37888+/**************************************************************************
37889+ *
37890+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37891+ * All Rights Reserved.
37892+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37893+ * All Rights Reserved.
37894+ *
37895+ * Permission is hereby granted, free of charge, to any person obtaining a
37896+ * copy of this software and associated documentation files (the
37897+ * "Software"), to deal in the Software without restriction, including
37898+ * without limitation the rights to use, copy, modify, merge, publish,
37899+ * distribute, sub license, and/or sell copies of the Software, and to
37900+ * permit persons to whom the Software is furnished to do so, subject to
37901+ * the following conditions:
37902+ *
37903+ * The above copyright notice and this permission notice (including the
37904+ * next paragraph) shall be included in all copies or substantial portions
37905+ * of the Software.
37906+ *
37907+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37908+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37909+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37910+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37911+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37912+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37913+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37914+ *
37915+ **************************************************************************/
37916+/*
37917+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37918+ */
37919+
37920+#include "ttm/ttm_bo_driver.h"
37921+#include "ttm/ttm_placement_common.h"
37922+#include "ttm/ttm_pat_compat.h"
37923+#include <linux/io.h>
37924+#include <linux/highmem.h>
37925+#include <linux/wait.h>
37926+#include <linux/version.h>
37927+
37928+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
37929+{
37930+ struct ttm_mem_reg *old_mem = &bo->mem;
37931+
37932+ if (old_mem->mm_node) {
37933+ spin_lock(&bo->bdev->lru_lock);
37934+ drm_mm_put_block(old_mem->mm_node);
37935+ spin_unlock(&bo->bdev->lru_lock);
37936+ }
37937+ old_mem->mm_node = NULL;
37938+}
37939+
37940+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
37941+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
37942+{
37943+ struct ttm_tt *ttm = bo->ttm;
37944+ struct ttm_mem_reg *old_mem = &bo->mem;
37945+ uint32_t save_flags = old_mem->flags;
37946+ uint32_t save_proposed_flags = old_mem->proposed_flags;
37947+ int ret;
37948+
37949+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
37950+ ttm_tt_unbind(ttm);
37951+ ttm_bo_free_old_node(bo);
37952+ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
37953+ TTM_PL_MASK_MEM);
37954+ old_mem->mem_type = TTM_PL_SYSTEM;
37955+ save_flags = old_mem->flags;
37956+ }
37957+
37958+ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
37959+ if (unlikely(ret != 0))
37960+ return ret;
37961+
37962+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
37963+ ret = ttm_tt_bind(ttm, new_mem);
37964+ if (unlikely(ret != 0))
37965+ return ret;
37966+ }
37967+
37968+ *old_mem = *new_mem;
37969+ new_mem->mm_node = NULL;
37970+ old_mem->proposed_flags = save_proposed_flags;
37971+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
37972+ return 0;
37973+}
37974+
37975+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
37976+ void **virtual)
37977+{
37978+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
37979+ unsigned long bus_offset;
37980+ unsigned long bus_size;
37981+ unsigned long bus_base;
37982+ int ret;
37983+ void *addr;
37984+
37985+ *virtual = NULL;
37986+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
37987+ if (ret || bus_size == 0)
37988+ return ret;
37989+
37990+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
37991+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
37992+ else {
37993+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
37994+ if (mem->flags & TTM_PL_FLAG_WC)
37995+ addr = ioremap_wc(bus_base + bus_offset, bus_size);
37996+ else
37997+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
37998+#else
37999+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
38000+#endif
38001+ if (!addr)
38002+ return -ENOMEM;
38003+ }
38004+ *virtual = addr;
38005+ return 0;
38006+}
38007+
38008+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
38009+ void *virtual)
38010+{
38011+ struct ttm_mem_type_manager *man;
38012+
38013+ man = &bdev->man[mem->mem_type];
38014+
38015+ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
38016+ iounmap(virtual);
38017+}
38018+
38019+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
38020+{
38021+ uint32_t *dstP =
38022+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
38023+ uint32_t *srcP =
38024+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
38025+
38026+ int i;
38027+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
38028+ iowrite32(ioread32(srcP++), dstP++);
38029+ return 0;
38030+}
38031+
38032+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
38033+ unsigned long page)
38034+{
38035+ struct page *d = ttm_tt_get_page(ttm, page);
38036+ void *dst;
38037+
38038+ if (!d)
38039+ return -ENOMEM;
38040+
38041+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
38042+ dst = kmap(d);
38043+ if (!dst)
38044+ return -ENOMEM;
38045+
38046+ memcpy_fromio(dst, src, PAGE_SIZE);
38047+ kunmap(d);
38048+ return 0;
38049+}
38050+
38051+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
38052+ unsigned long page)
38053+{
38054+ struct page *s = ttm_tt_get_page(ttm, page);
38055+ void *src;
38056+
38057+ if (!s)
38058+ return -ENOMEM;
38059+
38060+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
38061+ src = kmap(s);
38062+ if (!src)
38063+ return -ENOMEM;
38064+
38065+ memcpy_toio(dst, src, PAGE_SIZE);
38066+ kunmap(s);
38067+ return 0;
38068+}
38069+
38070+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
38071+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
38072+{
38073+ struct ttm_bo_device *bdev = bo->bdev;
38074+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
38075+ struct ttm_tt *ttm = bo->ttm;
38076+ struct ttm_mem_reg *old_mem = &bo->mem;
38077+ struct ttm_mem_reg old_copy = *old_mem;
38078+ void *old_iomap;
38079+ void *new_iomap;
38080+ int ret;
38081+ uint32_t save_flags = old_mem->flags;
38082+ uint32_t save_proposed_flags = old_mem->proposed_flags;
38083+ unsigned long i;
38084+ unsigned long page;
38085+ unsigned long add = 0;
38086+ int dir;
38087+
38088+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
38089+ if (ret)
38090+ return ret;
38091+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
38092+ if (ret)
38093+ goto out;
38094+
38095+ if (old_iomap == NULL && new_iomap == NULL)
38096+ goto out2;
38097+ if (old_iomap == NULL && ttm == NULL)
38098+ goto out2;
38099+
38100+ add = 0;
38101+ dir = 1;
38102+
38103+ if ((old_mem->mem_type == new_mem->mem_type) &&
38104+ (new_mem->mm_node->start <
38105+ old_mem->mm_node->start + old_mem->mm_node->size)) {
38106+ dir = -1;
38107+ add = new_mem->num_pages - 1;
38108+ }
38109+
38110+ for (i = 0; i < new_mem->num_pages; ++i) {
38111+ page = i * dir + add;
38112+ if (old_iomap == NULL)
38113+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
38114+ else if (new_iomap == NULL)
38115+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
38116+ else
38117+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
38118+ if (ret)
38119+ goto out1;
38120+ }
38121+ mb();
38122+ out2:
38123+ ttm_bo_free_old_node(bo);
38124+
38125+ *old_mem = *new_mem;
38126+ new_mem->mm_node = NULL;
38127+ old_mem->proposed_flags = save_proposed_flags;
38128+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
38129+
38130+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
38131+ ttm_tt_unbind(ttm);
38132+ ttm_tt_destroy(ttm);
38133+ bo->ttm = NULL;
38134+ }
38135+
38136+ out1:
38137+ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
38138+ out:
38139+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
38140+ return ret;
38141+}
38142+
38143+/**
38144+ * ttm_buffer_object_transfer
38145+ *
38146+ * @bo: A pointer to a struct ttm_buffer_object.
38147+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
38148+ * holding the data of @bo with the old placement.
38149+ *
38150+ * This is a utility function that may be called after an accelerated move
38151+ * has been scheduled. A new buffer object is created as a placeholder for
38152+ * the old data while it's being copied. When that buffer object is idle,
38153+ * it can be destroyed, releasing the space of the old placement.
38154+ * Returns:
38155+ * !0: Failure.
38156+ */
38157+
38158+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
38159+ struct ttm_buffer_object **new_obj)
38160+{
38161+ struct ttm_buffer_object *fbo;
38162+ struct ttm_bo_device *bdev = bo->bdev;
38163+ struct ttm_bo_driver *driver = bdev->driver;
38164+
38165+ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
38166+ if (!fbo)
38167+ return -ENOMEM;
38168+
38169+ *fbo = *bo;
38170+ mutex_init(&fbo->mutex);
38171+ mutex_lock(&fbo->mutex);
38172+
38173+ init_waitqueue_head(&fbo->event_queue);
38174+ INIT_LIST_HEAD(&fbo->ddestroy);
38175+ INIT_LIST_HEAD(&fbo->lru);
38176+
38177+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
38178+ if (fbo->mem.mm_node)
38179+ fbo->mem.mm_node->private = (void *)fbo;
38180+ kref_init(&fbo->list_kref);
38181+ kref_init(&fbo->kref);
38182+
38183+ mutex_unlock(&fbo->mutex);
38184+
38185+ *new_obj = fbo;
38186+ return 0;
38187+}
38188+
38189+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
38190+{
38191+#if defined(__i386__) || defined(__x86_64__)
38192+ if (caching_flags & TTM_PL_FLAG_WC) {
38193+ tmp = pgprot_ttm_x86_wc(tmp);
38194+ } else if (boot_cpu_data.x86 > 3 &&
38195+ (caching_flags & TTM_PL_FLAG_UNCACHED)) {
38196+ tmp = pgprot_noncached(tmp);
38197+ }
38198+#elif defined(__powerpc__)
38199+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
38200+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
38201+ if (caching_flags & TTM_PL_FLAG_UNCACHED)
38202+ pgprot_val(tmp) |= _PAGE_GUARDED;
38203+ }
38204+#endif
38205+#if defined(__ia64__)
38206+ if (caching_flags & TTM_PL_FLAG_WC)
38207+ tmp = pgprot_writecombine(tmp);
38208+ else
38209+ tmp = pgprot_noncached(tmp);
38210+#endif
38211+#if defined(__sparc__)
38212+ if (!(caching_flags & TTM_PL_FLAG_CACHED))
38213+ tmp = pgprot_noncached(tmp);
38214+#endif
38215+ return tmp;
38216+}
38217+
38218+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
38219+ unsigned long bus_base,
38220+ unsigned long bus_offset,
38221+ unsigned long bus_size,
38222+ struct ttm_bo_kmap_obj *map)
38223+{
38224+ struct ttm_bo_device * bdev = bo->bdev;
38225+ struct ttm_mem_reg * mem = &bo->mem;
38226+ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
38227+
38228+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
38229+ map->bo_kmap_type = ttm_bo_map_premapped;
38230+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
38231+ map->bo_kmap_type = ttm_bo_map_iomap;
38232+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
38233+ if (mem->flags & TTM_PL_FLAG_WC)
38234+ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
38235+ else
38236+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
38237+#else
38238+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
38239+#endif
38240+ }
38241+ return (!map->virtual) ? -ENOMEM : 0;
38242+}
38243+
38244+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
38245+ unsigned long start_page,
38246+ unsigned long num_pages,
38247+ struct ttm_bo_kmap_obj *map)
38248+{
38249+ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
38250+ struct ttm_tt * ttm = bo->ttm;
38251+ struct page * d;
38252+ bool do_kmap = false;
38253+ int i;
38254+ BUG_ON(!ttm);
38255+ if (num_pages == 1) {
38256+ map->page = ttm_tt_get_page(ttm, start_page);
38257+ do_kmap = (!PageHighMem(map->page) ||
38258+ (mem->flags & TTM_PL_FLAG_CACHED));
38259+ }
38260+
38261+ if (do_kmap) {
38262+ /*
38263+ * We're mapping a single page, and the desired
38264+ * page protection is consistent with the bo.
38265+ */
38266+ map->bo_kmap_type = ttm_bo_map_kmap;
38267+ map->virtual = kmap(map->page);
38268+ } else {
38269+ /*
38270+ * Populate the part we're mapping;
38271+ */
38272+ for (i = start_page; i < start_page + num_pages; ++i) {
38273+ d = ttm_tt_get_page(ttm, i); if (!d)
38274+ return -ENOMEM;
38275+ }
38276+
38277+ /*
38278+ * We need to use vmap to get the desired page protection
38279+ * or to make the buffer object look contigous.
38280+ */
38281+ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
38282+ PAGE_KERNEL :
38283+ ttm_io_prot(mem->flags, PAGE_KERNEL);
38284+ map->bo_kmap_type = ttm_bo_map_vmap;
38285+ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);
38286+ }
38287+ return (!map->virtual) ? -ENOMEM : 0;
38288+}
38289+
38290+int ttm_bo_kmap(struct ttm_buffer_object *bo,
38291+ unsigned long start_page, unsigned long num_pages,
38292+ struct ttm_bo_kmap_obj *map)
38293+{
38294+ int ret;
38295+ unsigned long bus_base;
38296+ unsigned long bus_offset;
38297+ unsigned long bus_size;
38298+ BUG_ON(!list_empty(&bo->swap));
38299+ map->virtual = NULL;
38300+ if (num_pages > bo->num_pages)
38301+ return -EINVAL;
38302+ if (start_page > bo->num_pages)
38303+ return -EINVAL;
38304+#if 0
38305+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
38306+ return -EPERM;
38307+#endif
38308+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
38309+ &bus_offset, &bus_size);
38310+ if (ret)
38311+ return ret;
38312+ if (bus_size == 0) {
38313+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
38314+ } else {
38315+ bus_offset += start_page << PAGE_SHIFT;
38316+ bus_size = num_pages << PAGE_SHIFT;
38317+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
38318+ }
38319+}
38320+
38321+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
38322+{
38323+ if (!map->virtual)
38324+ return;
38325+ switch (map->bo_kmap_type) {
38326+ case ttm_bo_map_iomap:
38327+ iounmap(map->virtual);
38328+ break;
38329+ case ttm_bo_map_vmap:
38330+ vunmap(map->virtual);
38331+ break;
38332+ case ttm_bo_map_kmap:
38333+ kunmap(map->page);
38334+ break;
38335+ case ttm_bo_map_premapped:
38336+ break;
38337+ default:
38338+ BUG();
38339+ }
38340+ map->virtual = NULL;
38341+ map->page = NULL;
38342+}
38343+
38344+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
38345+ unsigned long dst_offset,
38346+ unsigned long *pfn, pgprot_t * prot)
38347+{
38348+ struct ttm_mem_reg * mem = &bo->mem;
38349+ struct ttm_bo_device * bdev = bo->bdev;
38350+ unsigned long bus_offset;
38351+ unsigned long bus_size;
38352+ unsigned long bus_base;
38353+ int ret;
38354+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
38355+ &bus_size);
38356+ if (ret)
38357+ return -EINVAL;
38358+ if (bus_size != 0)
38359+ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
38360+ else
38361+ if (!bo->ttm)
38362+ return -EINVAL;
38363+ else
38364+ *pfn =
38365+ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
38366+ *prot =
38367+ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
38368+ flags,
38369+ PAGE_KERNEL);
38370+ return 0;
38371+}
38372+
38373+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
38374+ void *sync_obj,
38375+ void *sync_obj_arg,
38376+ bool evict, bool no_wait,
38377+ struct ttm_mem_reg *new_mem)
38378+{
38379+ struct ttm_bo_device * bdev = bo->bdev;
38380+ struct ttm_bo_driver * driver = bdev->driver;
38381+ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
38382+ struct ttm_mem_reg * old_mem = &bo->mem;
38383+ int ret;
38384+ uint32_t save_flags = old_mem->flags;
38385+ uint32_t save_proposed_flags = old_mem->proposed_flags;
38386+ struct ttm_buffer_object * old_obj;
38387+ if (bo->sync_obj)
38388+ driver->sync_obj_unref(&bo->sync_obj);
38389+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
38390+ bo->sync_obj_arg = sync_obj_arg;
38391+ if (evict) {
38392+ ret = ttm_bo_wait(bo, false, false, false);
38393+ if (ret)
38394+ return ret;
38395+ ttm_bo_free_old_node(bo);
38396+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
38397+ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;
38398+ }
38399+ } else {
38400+
38401+ /* This should help pipeline ordinary buffer moves.
38402+ *
38403+ * Hang old buffer memory on a new buffer object,
38404+ * and leave it to be released when the GPU
38405+ * operation has completed.
38406+ */
38407+ ret = ttm_buffer_object_transfer(bo, &old_obj);
38408+ if (ret)
38409+ return ret;
38410+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
38411+ old_obj->ttm = NULL;
38412+ else
38413+ bo->ttm = NULL;
38414+ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
38415+ ttm_bo_unreserve(old_obj);
38416+ }
38417+
38418+ *old_mem = *new_mem;
38419+ new_mem->mm_node = NULL;
38420+ old_mem->proposed_flags = save_proposed_flags;
38421+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
38422+ return 0;
38423+}
38424diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
38425new file mode 100644
38426index 0000000..4d950fc
38427--- /dev/null
38428+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
38429@@ -0,0 +1,596 @@
38430+/**************************************************************************
38431+ *
38432+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
38433+ * All Rights Reserved.
38434+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
38435+ * All Rights Reserved.
38436+ *
38437+ * Permission is hereby granted, free of charge, to any person obtaining a
38438+ * copy of this software and associated documentation files (the
38439+ * "Software"), to deal in the Software without restriction, including
38440+ * without limitation the rights to use, copy, modify, merge, publish,
38441+ * distribute, sub license, and/or sell copies of the Software, and to
38442+ * permit persons to whom the Software is furnished to do so, subject to
38443+ * the following conditions:
38444+ *
38445+ * The above copyright notice and this permission notice (including the
38446+ * next paragraph) shall be included in all copies or substantial portions
38447+ * of the Software.
38448+ *
38449+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38450+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38451+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
38452+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
38453+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
38454+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
38455+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
38456+ *
38457+ **************************************************************************/
38458+/*
38459+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
38460+ */
38461+
38462+
38463+#include "ttm/ttm_bo_driver.h"
38464+#include "ttm/ttm_placement_common.h"
38465+#include <linux/mm.h>
38466+#include <linux/version.h>
38467+#include <linux/rbtree.h>
38468+#include <asm/uaccess.h>
38469+
38470+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
38471+#error "TTM doesn't build on kernel versions below 2.6.25."
38472+#endif
38473+
38474+#define TTM_BO_VM_NUM_PREFAULT 16
38475+
38476+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
38477+ unsigned long page_start,
38478+ unsigned long num_pages)
38479+{
38480+ struct rb_node *cur = bdev->addr_space_rb.rb_node;
38481+ unsigned long cur_offset;
38482+ struct ttm_buffer_object *bo;
38483+ struct ttm_buffer_object *best_bo = NULL;
38484+
38485+ while (likely(cur != NULL)) {
38486+ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
38487+ cur_offset = bo->vm_node->start;
38488+ if (page_start >= cur_offset) {
38489+ cur = cur->rb_right;
38490+ best_bo = bo;
38491+ if (page_start == cur_offset)
38492+ break;
38493+ } else
38494+ cur = cur->rb_left;
38495+ }
38496+
38497+ if (unlikely(best_bo == NULL))
38498+ return NULL;
38499+
38500+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
38501+ (page_start + num_pages)))
38502+ return NULL;
38503+
38504+ return best_bo;
38505+}
38506+
38507+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
38508+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38509+{
38510+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
38511+ vma->vm_private_data;
38512+ struct ttm_bo_device *bdev = bo->bdev;
38513+ unsigned long bus_base;
38514+ unsigned long bus_offset;
38515+ unsigned long bus_size;
38516+ unsigned long page_offset;
38517+ unsigned long page_last;
38518+ unsigned long pfn;
38519+ struct ttm_tt *ttm = NULL;
38520+ struct page *page;
38521+ int ret;
38522+ int i;
38523+ bool is_iomem;
38524+ unsigned long address = (unsigned long)vmf->virtual_address;
38525+ int retval = VM_FAULT_NOPAGE;
38526+
38527+ ret = ttm_bo_reserve(bo, true, false, false, 0);
38528+ if (unlikely(ret != 0))
38529+ return VM_FAULT_NOPAGE;
38530+
38531+ mutex_lock(&bo->mutex);
38532+
38533+ /*
38534+ * Wait for buffer data in transit, due to a pipelined
38535+ * move.
38536+ */
38537+
38538+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
38539+ ret = ttm_bo_wait(bo, false, true, false);
38540+ if (unlikely(ret != 0)) {
38541+ retval = (ret != -ERESTART) ?
38542+ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
38543+ goto out_unlock;
38544+ }
38545+ }
38546+
38547+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
38548+ &bus_size);
38549+ if (unlikely(ret != 0)) {
38550+ retval = VM_FAULT_SIGBUS;
38551+ goto out_unlock;
38552+ }
38553+
38554+ is_iomem = (bus_size != 0);
38555+
38556+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
38557+ bo->vm_node->start - vma->vm_pgoff;
38558+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
38559+ bo->vm_node->start - vma->vm_pgoff;
38560+
38561+ if (unlikely(page_offset >= bo->num_pages)) {
38562+ retval = VM_FAULT_SIGBUS;
38563+ goto out_unlock;
38564+ }
38565+
38566+ /*
38567+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
38568+ * since the mmap_sem is only held in read mode. However, we
38569+ * modify only the caching bits of vma->vm_page_prot and
38570+ * consider those bits protected by
38571+ * the bo->mutex, as we should be the only writers.
38572+ * There shouldn't really be any readers of these bits except
38573+ * within vm_insert_mixed()? fork?
38574+ *
38575+ * TODO: Add a list of vmas to the bo, and change the
38576+ * vma->vm_page_prot when the object changes caching policy, with
38577+ * the correct locks held.
38578+ */
38579+
38580+ if (is_iomem) {
38581+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
38582+ vma->vm_page_prot);
38583+ } else {
38584+ ttm = bo->ttm;
38585+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
38586+ vm_get_page_prot(vma->vm_flags) :
38587+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
38588+ }
38589+
38590+ /*
38591+ * Speculatively prefault a number of pages. Only error on
38592+ * first page.
38593+ */
38594+
38595+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
38596+
38597+ if (is_iomem)
38598+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
38599+ page_offset;
38600+ else {
38601+ page = ttm_tt_get_page(ttm, page_offset);
38602+ if (unlikely(!page && i == 0)) {
38603+ retval = VM_FAULT_OOM;
38604+ goto out_unlock;
38605+ } else if (unlikely(!page)) {
38606+ break;
38607+ }
38608+ pfn = page_to_pfn(page);
38609+ }
38610+
38611+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
38612+ ret = vm_insert_mixed(vma, address, pfn);
38613+#else
38614+ ret = vm_insert_pfn(vma, address, pfn);
38615+#endif
38616+ /*
38617+ * Somebody beat us to this PTE or prefaulting to
38618+ * an already populated PTE, or prefaulting error.
38619+ */
38620+
38621+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
38622+ break;
38623+ else if (unlikely(ret != 0)) {
38624+ retval =
38625+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
38626+ goto out_unlock;
38627+
38628+ }
38629+
38630+ address += PAGE_SIZE;
38631+ if (unlikely(++page_offset >= page_last))
38632+ break;
38633+ }
38634+
38635+ out_unlock:
38636+ mutex_unlock(&bo->mutex);
38637+ ttm_bo_unreserve(bo);
38638+ return retval;
38639+}
38640+
38641+#else
38642+
38643+static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
38644+ unsigned long address)
38645+{
38646+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
38647+ vma->vm_private_data;
38648+ struct ttm_bo_device *bdev = bo->bdev;
38649+ unsigned long bus_base;
38650+ unsigned long bus_offset;
38651+ unsigned long bus_size;
38652+ unsigned long page_offset;
38653+ unsigned long page_last;
38654+ unsigned long pfn;
38655+ struct ttm_tt *ttm = NULL;
38656+ struct page *page;
38657+ int ret;
38658+ int i;
38659+ bool is_iomem;
38660+ unsigned long retval = NOPFN_REFAULT;
38661+
38662+ ret = ttm_bo_reserve(bo, true, false, false, 0);
38663+ if (unlikely(ret != 0))
38664+ return NOPFN_REFAULT;
38665+
38666+ mutex_lock(&bo->mutex);
38667+
38668+ /*
38669+ * Wait for buffer data in transit, due to a pipelined
38670+ * move.
38671+ */
38672+
38673+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
38674+ ret = ttm_bo_wait(bo, false, true, false);
38675+ if (unlikely(ret != 0)) {
38676+ retval = (ret != -ERESTART) ?
38677+ NOPFN_SIGBUS : NOPFN_REFAULT;
38678+ goto out_unlock;
38679+ }
38680+ }
38681+
38682+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
38683+ &bus_size);
38684+ if (unlikely(ret != 0)) {
38685+ printk(KERN_ERR "Attempted buffer object access "
38686+ "of unmappable object.\n");
38687+ retval = NOPFN_SIGBUS;
38688+ goto out_unlock;
38689+ }
38690+
38691+ is_iomem = (bus_size != 0);
38692+
38693+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
38694+ bo->vm_node->start - vma->vm_pgoff;
38695+
38696+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
38697+ bo->vm_node->start - vma->vm_pgoff;
38698+
38699+ if (unlikely(page_offset >= bo->num_pages)) {
38700+ printk(KERN_ERR "Attempted buffer object access "
38701+ "outside object.\n");
38702+ retval = NOPFN_SIGBUS;
38703+ goto out_unlock;
38704+ }
38705+
38706+ /*
38707+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
38708+ * since the mmap_sem is only held in read mode. However, we
38709+ * modify only the caching bits of vma->vm_page_prot and
38710+ * consider those bits protected by
38711+ * the bo->mutex, as we should be the only writers.
38712+ * There shouldn't really be any readers of these bits except
38713+ * within vm_insert_mixed()? fork?
38714+ *
38715+ * TODO: Add a list of vmas to the bo, and change the
38716+ * vma->vm_page_prot when the object changes caching policy, with
38717+ * the correct locks held.
38718+ */
38719+
38720+ if (is_iomem) {
38721+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
38722+ vma->vm_page_prot);
38723+ } else {
38724+ ttm = bo->ttm;
38725+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
38726+ vm_get_page_prot(vma->vm_flags) :
38727+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
38728+ }
38729+
38730+ /*
38731+ * Speculatively prefault a number of pages. Only error on
38732+ * first page.
38733+ */
38734+
38735+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
38736+
38737+ if (is_iomem)
38738+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
38739+ page_offset;
38740+ else {
38741+ page = ttm_tt_get_page(ttm, page_offset);
38742+ if (unlikely(!page && i == 0)) {
38743+ retval = NOPFN_OOM;
38744+ goto out_unlock;
38745+ } else if (unlikely(!page)) {
38746+ break;
38747+ }
38748+ pfn = page_to_pfn(page);
38749+ }
38750+
38751+ ret = vm_insert_pfn(vma, address, pfn);
38752+ if (unlikely(ret == -EBUSY || (ret != 0 && i != 0)))
38753+ break;
38754+
38755+ /*
38756+ * Somebody beat us to this PTE or prefaulting to
38757+ * an already populated PTE, or prefaulting error.
38758+ */
38759+
38760+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
38761+ break;
38762+ else if (unlikely(ret != 0)) {
38763+ retval =
38764+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
38765+ goto out_unlock;
38766+ }
38767+
38768+ address += PAGE_SIZE;
38769+ if (unlikely(++page_offset >= page_last))
38770+ break;
38771+ }
38772+
38773+ out_unlock:
38774+ mutex_unlock(&bo->mutex);
38775+ ttm_bo_unreserve(bo);
38776+ return retval;
38777+}
38778+#endif
38779+
38780+static void ttm_bo_vm_open(struct vm_area_struct *vma)
38781+{
38782+ struct ttm_buffer_object *bo =
38783+ (struct ttm_buffer_object *)vma->vm_private_data;
38784+
38785+ (void)ttm_bo_reference(bo);
38786+}
38787+
38788+static void ttm_bo_vm_close(struct vm_area_struct *vma)
38789+{
38790+ struct ttm_buffer_object *bo =
38791+ (struct ttm_buffer_object *)vma->vm_private_data;
38792+
38793+ ttm_bo_unref(&bo);
38794+ vma->vm_private_data = NULL;
38795+}
38796+
38797+static struct vm_operations_struct ttm_bo_vm_ops = {
38798+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
38799+ .fault = ttm_bo_vm_fault,
38800+#else
38801+ .nopfn = ttm_bo_vm_nopfn,
38802+#endif
38803+ .open = ttm_bo_vm_open,
38804+ .close = ttm_bo_vm_close
38805+};
38806+
38807+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
38808+ struct ttm_bo_device *bdev)
38809+{
38810+ struct ttm_bo_driver *driver;
38811+ struct ttm_buffer_object *bo;
38812+ int ret;
38813+
38814+ read_lock(&bdev->vm_lock);
38815+ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
38816+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
38817+ if (likely(bo != NULL))
38818+ ttm_bo_reference(bo);
38819+ read_unlock(&bdev->vm_lock);
38820+
38821+ if (unlikely(bo == NULL)) {
38822+ printk(KERN_ERR "Could not find buffer object to map.\n");
38823+ ret = -EINVAL;
38824+ goto out_unref;
38825+ }
38826+
38827+ driver = bo->bdev->driver;
38828+ if (unlikely(!driver->verify_access)) {
38829+ ret = -EPERM;
38830+ goto out_unref;
38831+ }
38832+ ret = driver->verify_access(bo, filp);
38833+ if (unlikely(ret != 0))
38834+ goto out_unref;
38835+
38836+ vma->vm_ops = &ttm_bo_vm_ops;
38837+
38838+ /*
38839+ * Note: We're transferring the bo reference to
38840+ * vma->vm_private_data here.
38841+ */
38842+
38843+ vma->vm_private_data = bo;
38844+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
38845+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
38846+#else
38847+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
38848+#endif
38849+ return 0;
38850+ out_unref:
38851+ ttm_bo_unref(&bo);
38852+ return ret;
38853+}
38854+
38855+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
38856+{
38857+ if (vma->vm_pgoff != 0)
38858+ return -EACCES;
38859+
38860+ vma->vm_ops = &ttm_bo_vm_ops;
38861+ vma->vm_private_data = ttm_bo_reference(bo);
38862+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
38863+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
38864+#else
38865+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
38866+#endif
38867+ return 0;
38868+}
38869+
38870+ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
38871+ const char __user * wbuf, char __user * rbuf, size_t count,
38872+ loff_t * f_pos, bool write)
38873+{
38874+ struct ttm_buffer_object *bo;
38875+ struct ttm_bo_driver *driver;
38876+ struct ttm_bo_kmap_obj map;
38877+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
38878+ unsigned long kmap_offset;
38879+ unsigned long kmap_end;
38880+ unsigned long kmap_num;
38881+ size_t io_size;
38882+ unsigned int page_offset;
38883+ char *virtual;
38884+ int ret;
38885+ bool no_wait = false;
38886+ bool dummy;
38887+
38888+ read_lock(&bdev->vm_lock);
38889+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
38890+ if (likely(bo != NULL))
38891+ ttm_bo_reference(bo);
38892+ read_unlock(&bdev->vm_lock);
38893+
38894+ if (unlikely(bo == NULL))
38895+ return -EFAULT;
38896+
38897+ driver = bo->bdev->driver;
38898+ if (unlikely(driver->verify_access))
38899+ return -EPERM;
38900+
38901+ ret = driver->verify_access(bo, filp);
38902+ if (unlikely(ret != 0))
38903+ goto out_unref;
38904+
38905+ kmap_offset = dev_offset - bo->vm_node->start;
38906+ if (unlikely(kmap_offset) >= bo->num_pages) {
38907+ ret = -EFBIG;
38908+ goto out_unref;
38909+ }
38910+
38911+ page_offset = *f_pos & ~PAGE_MASK;
38912+ io_size = bo->num_pages - kmap_offset;
38913+ io_size = (io_size << PAGE_SHIFT) - page_offset;
38914+ if (count < io_size)
38915+ io_size = count;
38916+
38917+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
38918+ kmap_num = kmap_end - kmap_offset + 1;
38919+
38920+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
38921+
38922+ switch (ret) {
38923+ case 0:
38924+ break;
38925+ case -ERESTART:
38926+ ret = -EINTR;
38927+ goto out_unref;
38928+ case -EBUSY:
38929+ ret = -EAGAIN;
38930+ goto out_unref;
38931+ default:
38932+ goto out_unref;
38933+ }
38934+
38935+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
38936+ if (unlikely(ret != 0))
38937+ goto out_unref;
38938+
38939+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
38940+ virtual += page_offset;
38941+
38942+ if (write)
38943+ ret = copy_from_user(virtual, wbuf, io_size);
38944+ else
38945+ ret = copy_to_user(rbuf, virtual, io_size);
38946+
38947+ ttm_bo_kunmap(&map);
38948+ ttm_bo_unreserve(bo);
38949+ ttm_bo_unref(&bo);
38950+
38951+ if (unlikely(ret != 0))
38952+ return -EFBIG;
38953+
38954+ *f_pos += io_size;
38955+
38956+ return io_size;
38957+ out_unref:
38958+ ttm_bo_unref(&bo);
38959+ return ret;
38960+}
38961+
38962+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
38963+ char __user * rbuf, size_t count, loff_t * f_pos,
38964+ bool write)
38965+{
38966+ struct ttm_bo_kmap_obj map;
38967+ unsigned long kmap_offset;
38968+ unsigned long kmap_end;
38969+ unsigned long kmap_num;
38970+ size_t io_size;
38971+ unsigned int page_offset;
38972+ char *virtual;
38973+ int ret;
38974+ bool no_wait = false;
38975+ bool dummy;
38976+
38977+ kmap_offset = (*f_pos >> PAGE_SHIFT);
38978+ if (unlikely(kmap_offset) >= bo->num_pages)
38979+ return -EFBIG;
38980+
38981+ page_offset = *f_pos & ~PAGE_MASK;
38982+ io_size = bo->num_pages - kmap_offset;
38983+ io_size = (io_size << PAGE_SHIFT) - page_offset;
38984+ if (count < io_size)
38985+ io_size = count;
38986+
38987+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
38988+ kmap_num = kmap_end - kmap_offset + 1;
38989+
38990+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
38991+
38992+ switch (ret) {
38993+ case 0:
38994+ break;
38995+ case -ERESTART:
38996+ return -EINTR;
38997+ case -EBUSY:
38998+ return -EAGAIN;
38999+ default:
39000+ return ret;
39001+ }
39002+
39003+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
39004+ if (unlikely(ret != 0))
39005+ return ret;
39006+
39007+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
39008+ virtual += page_offset;
39009+
39010+ if (write)
39011+ ret = copy_from_user(virtual, wbuf, io_size);
39012+ else
39013+ ret = copy_to_user(rbuf, virtual, io_size);
39014+
39015+ ttm_bo_kunmap(&map);
39016+ ttm_bo_unreserve(bo);
39017+ ttm_bo_unref(&bo);
39018+
39019+ if (unlikely(ret != 0))
39020+ return ret;
39021+
39022+ *f_pos += io_size;
39023+
39024+ return io_size;
39025+}
39026diff --git a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
39027new file mode 100644
39028index 0000000..4a34c18
39029--- /dev/null
39030+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
39031@@ -0,0 +1,115 @@
39032+/**************************************************************************
39033+ *
39034+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39035+ * All Rights Reserved.
39036+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39037+ * All Rights Reserved.
39038+ *
39039+ * Permission is hereby granted, free of charge, to any person obtaining a
39040+ * copy of this software and associated documentation files (the
39041+ * "Software"), to deal in the Software without restriction, including
39042+ * without limitation the rights to use, copy, modify, merge, publish,
39043+ * distribute, sub license, and/or sell copies of the Software, and to
39044+ * permit persons to whom the Software is furnished to do so, subject to
39045+ * the following conditions:
39046+ *
39047+ * The above copyright notice and this permission notice (including the
39048+ * next paragraph) shall be included in all copies or substantial portions
39049+ * of the Software.
39050+ *
39051+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39052+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39053+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39054+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39055+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39056+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39057+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39058+ *
39059+ **************************************************************************/
39060+
39061+#include "ttm/ttm_execbuf_util.h"
39062+#include "ttm/ttm_bo_driver.h"
39063+#include "ttm/ttm_placement_common.h"
39064+#include <linux/wait.h>
39065+#include <linux/sched.h>
39066+
39067+void ttm_eu_backoff_reservation(struct list_head *list)
39068+{
39069+ struct ttm_validate_buffer *entry;
39070+
39071+ list_for_each_entry(entry, list, head) {
39072+ struct ttm_buffer_object *bo = entry->bo;
39073+ if (!entry->reserved)
39074+ continue;
39075+
39076+ entry->reserved = false;
39077+ ttm_bo_unreserve(bo);
39078+ }
39079+}
39080+
39081+/*
39082+ * Reserve buffers for validation.
39083+ *
39084+ * If a buffer in the list is marked for CPU access, we back off and
39085+ * wait for that buffer to become free for GPU access.
39086+ *
39087+ * If a buffer is reserved for another validation, the validator with
39088+ * the highest validation sequence backs off and waits for that buffer
39089+ * to become unreserved. This prevents deadlocks when validating multiple
39090+ * buffers in different orders.
39091+ */
39092+
39093+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
39094+{
39095+ struct ttm_validate_buffer *entry;
39096+ int ret;
39097+
39098+ retry:
39099+ list_for_each_entry(entry, list, head) {
39100+ struct ttm_buffer_object *bo = entry->bo;
39101+
39102+ entry->reserved = false;
39103+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
39104+ if (ret != 0) {
39105+ ttm_eu_backoff_reservation(list);
39106+ if (ret == -EAGAIN) {
39107+ ret = ttm_bo_wait_unreserved(bo, true);
39108+ if (unlikely(ret != 0))
39109+ return ret;
39110+ goto retry;
39111+ } else
39112+ return ret;
39113+ }
39114+
39115+ entry->reserved = true;
39116+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
39117+ ttm_eu_backoff_reservation(list);
39118+ ret = ttm_bo_wait_cpu(bo, false);
39119+ if (ret)
39120+ return ret;
39121+ goto retry;
39122+ }
39123+ }
39124+ return 0;
39125+}
39126+
39127+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
39128+{
39129+ struct ttm_validate_buffer *entry;
39130+
39131+ list_for_each_entry(entry, list, head) {
39132+ struct ttm_buffer_object *bo = entry->bo;
39133+ struct ttm_bo_driver *driver = bo->bdev->driver;
39134+ void *old_sync_obj;
39135+
39136+ mutex_lock(&bo->mutex);
39137+ old_sync_obj = bo->sync_obj;
39138+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
39139+ bo->sync_obj_arg = entry->new_sync_obj_arg;
39140+ mutex_unlock(&bo->mutex);
39141+ ttm_bo_unreserve(bo);
39142+ entry->reserved = false;
39143+ if (old_sync_obj)
39144+ driver->sync_obj_unref(&old_sync_obj);
39145+ }
39146+}
39147diff --git a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
39148new file mode 100644
39149index 0000000..6577f63
39150--- /dev/null
39151+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
39152@@ -0,0 +1,110 @@
39153+/**************************************************************************
39154+ *
39155+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39156+ * All Rights Reserved.
39157+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39158+ * All Rights Reserved.
39159+ *
39160+ * Permission is hereby granted, free of charge, to any person obtaining a
39161+ * copy of this software and associated documentation files (the
39162+ * "Software"), to deal in the Software without restriction, including
39163+ * without limitation the rights to use, copy, modify, merge, publish,
39164+ * distribute, sub license, and/or sell copies of the Software, and to
39165+ * permit persons to whom the Software is furnished to do so, subject to
39166+ * the following conditions:
39167+ *
39168+ * The above copyright notice and this permission notice (including the
39169+ * next paragraph) shall be included in all copies or substantial portions
39170+ * of the Software.
39171+ *
39172+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39173+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39174+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39175+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39176+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39177+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39178+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39179+ *
39180+ **************************************************************************/
39181+/*
39182+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
39183+ */
39184+
39185+#ifndef _TTM_EXECBUF_UTIL_H_
39186+#define _TTM_EXECBUF_UTIL_H_
39187+
39188+#include "ttm/ttm_bo_api.h"
39189+#include "ttm/ttm_fence_api.h"
39190+#include <linux/list.h>
39191+
39192+/**
39193+ * struct ttm_validate_buffer
39194+ *
39195+ * @head: list head for thread-private list.
39196+ * @bo: refcounted buffer object pointer.
39197+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
39198+ * adding a new sync object.
39199+ * @reservied: Indicates whether @bo has been reserved for validation.
39200+ */
39201+
39202+struct ttm_validate_buffer {
39203+ struct list_head head;
39204+ struct ttm_buffer_object *bo;
39205+ void *new_sync_obj_arg;
39206+ bool reserved;
39207+};
39208+
39209+/**
39210+ * function ttm_eu_backoff_reservation
39211+ *
39212+ * @list: thread private list of ttm_validate_buffer structs.
39213+ *
39214+ * Undoes all buffer validation reservations for bos pointed to by
39215+ * the list entries.
39216+ */
39217+
39218+extern void ttm_eu_backoff_reservation(struct list_head *list);
39219+
39220+/**
39221+ * function ttm_eu_reserve_buffers
39222+ *
39223+ * @list: thread private list of ttm_validate_buffer structs.
39224+ * @val_seq: A unique sequence number.
39225+ *
39226+ * Tries to reserve bos pointed to by the list entries for validation.
39227+ * If the function returns 0, all buffers are marked as "unfenced",
39228+ * taken off the lru lists and are not synced for write CPU usage.
39229+ *
39230+ * If the function detects a deadlock due to multiple threads trying to
39231+ * reserve the same buffers in reverse order, all threads except one will
39232+ * back off and retry. This function may sleep while waiting for
39233+ * CPU write reservations to be cleared, and for other threads to
39234+ * unreserve their buffers.
39235+ *
39236+ * This function may return -ERESTART or -EAGAIN if the calling process
39237+ * receives a signal while waiting. In that case, no buffers on the list
39238+ * will be reserved upon return.
39239+ *
39240+ * Buffers reserved by this function should be unreserved by
39241+ * a call to either ttm_eu_backoff_reservation() or
39242+ * ttm_eu_fence_buffer_objects() when command submission is complete or
39243+ * has failed.
39244+ */
39245+
39246+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
39247+
39248+/**
39249+ * function ttm_eu_fence_buffer_objects.
39250+ *
39251+ * @list: thread private list of ttm_validate_buffer structs.
39252+ * @sync_obj: The new sync object for the buffers.
39253+ *
39254+ * This function should be called when command submission is complete, and
39255+ * it will add a new sync object to bos pointed to by entries on @list.
39256+ * It also unreserves all buffers, putting them on lru lists.
39257+ *
39258+ */
39259+
39260+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
39261+
39262+#endif
39263diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c
39264new file mode 100644
39265index 0000000..115e7b7
39266--- /dev/null
39267+++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c
39268@@ -0,0 +1,607 @@
39269+/**************************************************************************
39270+ *
39271+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39272+ * All Rights Reserved.
39273+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39274+ * All Rights Reserved.
39275+ *
39276+ * Permission is hereby granted, free of charge, to any person obtaining a
39277+ * copy of this software and associated documentation files (the
39278+ * "Software"), to deal in the Software without restriction, including
39279+ * without limitation the rights to use, copy, modify, merge, publish,
39280+ * distribute, sub license, and/or sell copies of the Software, and to
39281+ * permit persons to whom the Software is furnished to do so, subject to
39282+ * the following conditions:
39283+ *
39284+ * The above copyright notice and this permission notice (including the
39285+ * next paragraph) shall be included in all copies or substantial portions
39286+ * of the Software.
39287+ *
39288+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39289+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39290+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39291+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39292+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39293+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39294+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39295+ *
39296+ **************************************************************************/
39297+/*
39298+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
39299+ */
39300+
39301+#include "ttm/ttm_fence_api.h"
39302+#include "ttm/ttm_fence_driver.h"
39303+#include <linux/wait.h>
39304+#include <linux/sched.h>
39305+
39306+#include <drm/drmP.h>
39307+
39308+/*
39309+ * Simple implementation for now.
39310+ */
39311+
39312+static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
39313+{
39314+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39315+
39316+ printk(KERN_ERR "GPU lockup dectected on engine %u "
39317+ "fence type 0x%08x\n",
39318+ (unsigned int)fence->fence_class, (unsigned int)mask);
39319+ /*
39320+ * Give engines some time to idle?
39321+ */
39322+
39323+ write_lock(&fc->lock);
39324+ ttm_fence_handler(fence->fdev, fence->fence_class,
39325+ fence->sequence, mask, -EBUSY);
39326+ write_unlock(&fc->lock);
39327+}
39328+
39329+/*
39330+ * Convenience function to be called by fence::wait methods that
39331+ * need polling.
39332+ */
39333+
39334+int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
39335+ bool interruptible, uint32_t mask)
39336+{
39337+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39338+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39339+ uint32_t count = 0;
39340+ int ret;
39341+ unsigned long end_jiffies = fence->timeout_jiffies;
39342+
39343+ DECLARE_WAITQUEUE(entry, current);
39344+ add_wait_queue(&fc->fence_queue, &entry);
39345+
39346+ ret = 0;
39347+
39348+ for (;;) {
39349+ __set_current_state((interruptible) ?
39350+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
39351+ if (ttm_fence_object_signaled(fence, mask))
39352+ break;
39353+ if (time_after_eq(jiffies, end_jiffies)) {
39354+ if (driver->lockup)
39355+ driver->lockup(fence, mask);
39356+ else
39357+ ttm_fence_lockup(fence, mask);
39358+ continue;
39359+ }
39360+ if (lazy)
39361+ schedule_timeout(1);
39362+ else if ((++count & 0x0F) == 0) {
39363+ __set_current_state(TASK_RUNNING);
39364+ schedule();
39365+ __set_current_state((interruptible) ?
39366+ TASK_INTERRUPTIBLE :
39367+ TASK_UNINTERRUPTIBLE);
39368+ }
39369+ if (interruptible && signal_pending(current)) {
39370+ ret = -ERESTART;
39371+ break;
39372+ }
39373+ }
39374+ __set_current_state(TASK_RUNNING);
39375+ remove_wait_queue(&fc->fence_queue, &entry);
39376+ return ret;
39377+}
39378+
39379+/*
39380+ * Typically called by the IRQ handler.
39381+ */
39382+
39383+void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
39384+ uint32_t sequence, uint32_t type, uint32_t error)
39385+{
39386+ int wake = 0;
39387+ uint32_t diff;
39388+ uint32_t relevant_type;
39389+ uint32_t new_type;
39390+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
39391+ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
39392+ struct list_head *head;
39393+ struct ttm_fence_object *fence, *next;
39394+ bool found = false;
39395+
39396+ if (list_empty(&fc->ring))
39397+ return;
39398+
39399+ list_for_each_entry(fence, &fc->ring, ring) {
39400+ diff = (sequence - fence->sequence) & fc->sequence_mask;
39401+ if (diff > fc->wrap_diff) {
39402+ found = true;
39403+ break;
39404+ }
39405+ }
39406+
39407+ fc->waiting_types &= ~type;
39408+ head = (found) ? &fence->ring : &fc->ring;
39409+
39410+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
39411+ if (&fence->ring == &fc->ring)
39412+ break;
39413+
39414+ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
39415+ (unsigned long)fence, fence->sequence,
39416+ fence->fence_type);
39417+
39418+ if (error) {
39419+ fence->info.error = error;
39420+ fence->info.signaled_types = fence->fence_type;
39421+ list_del_init(&fence->ring);
39422+ wake = 1;
39423+ break;
39424+ }
39425+
39426+ relevant_type = type & fence->fence_type;
39427+ new_type = (fence->info.signaled_types | relevant_type) ^
39428+ fence->info.signaled_types;
39429+
39430+ if (new_type) {
39431+ fence->info.signaled_types |= new_type;
39432+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
39433+ (unsigned long)fence,
39434+ fence->info.signaled_types);
39435+
39436+ if (unlikely(driver->signaled))
39437+ driver->signaled(fence);
39438+
39439+ if (driver->needed_flush)
39440+ fc->pending_flush |=
39441+ driver->needed_flush(fence);
39442+
39443+ if (new_type & fence->waiting_types)
39444+ wake = 1;
39445+ }
39446+
39447+ fc->waiting_types |=
39448+ fence->waiting_types & ~fence->info.signaled_types;
39449+
39450+ if (!(fence->fence_type & ~fence->info.signaled_types)) {
39451+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
39452+ (unsigned long)fence);
39453+ list_del_init(&fence->ring);
39454+ }
39455+ }
39456+
39457+ /*
39458+ * Reinstate lost waiting types.
39459+ */
39460+
39461+ if ((fc->waiting_types & type) != type) {
39462+ head = head->prev;
39463+ list_for_each_entry(fence, head, ring) {
39464+ if (&fence->ring == &fc->ring)
39465+ break;
39466+ diff =
39467+ (fc->highest_waiting_sequence -
39468+ fence->sequence) & fc->sequence_mask;
39469+ if (diff > fc->wrap_diff)
39470+ break;
39471+
39472+ fc->waiting_types |=
39473+ fence->waiting_types & ~fence->info.signaled_types;
39474+ }
39475+ }
39476+
39477+ if (wake)
39478+ wake_up_all(&fc->fence_queue);
39479+}
39480+
39481+static void ttm_fence_unring(struct ttm_fence_object *fence)
39482+{
39483+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39484+ unsigned long irq_flags;
39485+
39486+ write_lock_irqsave(&fc->lock, irq_flags);
39487+ list_del_init(&fence->ring);
39488+ write_unlock_irqrestore(&fc->lock, irq_flags);
39489+}
39490+
39491+bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
39492+{
39493+ unsigned long flags;
39494+ bool signaled;
39495+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39496+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39497+
39498+ mask &= fence->fence_type;
39499+ read_lock_irqsave(&fc->lock, flags);
39500+ signaled = (mask & fence->info.signaled_types) == mask;
39501+ read_unlock_irqrestore(&fc->lock, flags);
39502+ if (!signaled && driver->poll) {
39503+ write_lock_irqsave(&fc->lock, flags);
39504+ driver->poll(fence->fdev, fence->fence_class, mask);
39505+ signaled = (mask & fence->info.signaled_types) == mask;
39506+ write_unlock_irqrestore(&fc->lock, flags);
39507+ }
39508+ return signaled;
39509+}
39510+
39511+int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
39512+{
39513+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39514+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39515+ unsigned long irq_flags;
39516+ uint32_t saved_pending_flush;
39517+ uint32_t diff;
39518+ bool call_flush;
39519+
39520+ if (type & ~fence->fence_type) {
39521+ DRM_ERROR("Flush trying to extend fence type, "
39522+ "0x%x, 0x%x\n", type, fence->fence_type);
39523+ return -EINVAL;
39524+ }
39525+
39526+ write_lock_irqsave(&fc->lock, irq_flags);
39527+ fence->waiting_types |= type;
39528+ fc->waiting_types |= fence->waiting_types;
39529+ diff = (fence->sequence - fc->highest_waiting_sequence) &
39530+ fc->sequence_mask;
39531+
39532+ if (diff < fc->wrap_diff)
39533+ fc->highest_waiting_sequence = fence->sequence;
39534+
39535+ /*
39536+ * fence->waiting_types has changed. Determine whether
39537+ * we need to initiate some kind of flush as a result of this.
39538+ */
39539+
39540+ saved_pending_flush = fc->pending_flush;
39541+ if (driver->needed_flush)
39542+ fc->pending_flush |= driver->needed_flush(fence);
39543+
39544+ if (driver->poll)
39545+ driver->poll(fence->fdev, fence->fence_class,
39546+ fence->waiting_types);
39547+
39548+ call_flush = (fc->pending_flush != 0);
39549+ write_unlock_irqrestore(&fc->lock, irq_flags);
39550+
39551+ if (call_flush && driver->flush)
39552+ driver->flush(fence->fdev, fence->fence_class);
39553+
39554+ return 0;
39555+}
39556+
39557+/*
39558+ * Make sure old fence objects are signaled before their fence sequences are
39559+ * wrapped around and reused.
39560+ */
39561+
39562+void ttm_fence_flush_old(struct ttm_fence_device *fdev,
39563+ uint32_t fence_class, uint32_t sequence)
39564+{
39565+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
39566+ struct ttm_fence_object *fence;
39567+ unsigned long irq_flags;
39568+ const struct ttm_fence_driver *driver = fdev->driver;
39569+ bool call_flush;
39570+
39571+ uint32_t diff;
39572+
39573+ write_lock_irqsave(&fc->lock, irq_flags);
39574+
39575+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
39576+ diff = (sequence - fence->sequence) & fc->sequence_mask;
39577+ if (diff <= fc->flush_diff)
39578+ break;
39579+
39580+ fence->waiting_types = fence->fence_type;
39581+ fc->waiting_types |= fence->fence_type;
39582+
39583+ if (driver->needed_flush)
39584+ fc->pending_flush |= driver->needed_flush(fence);
39585+ }
39586+
39587+ if (driver->poll)
39588+ driver->poll(fdev, fence_class, fc->waiting_types);
39589+
39590+ call_flush = (fc->pending_flush != 0);
39591+ write_unlock_irqrestore(&fc->lock, irq_flags);
39592+
39593+ if (call_flush && driver->flush)
39594+ driver->flush(fdev, fence->fence_class);
39595+
39596+ /*
39597+ * FIXME: Shold we implement a wait here for really old fences?
39598+ */
39599+
39600+}
39601+
39602+int ttm_fence_object_wait(struct ttm_fence_object *fence,
39603+ bool lazy, bool interruptible, uint32_t mask)
39604+{
39605+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39606+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39607+ int ret = 0;
39608+ unsigned long timeout;
39609+ unsigned long cur_jiffies;
39610+ unsigned long to_jiffies;
39611+
39612+ if (mask & ~fence->fence_type) {
39613+ DRM_ERROR("Wait trying to extend fence type"
39614+ " 0x%08x 0x%08x\n", mask, fence->fence_type);
39615+ BUG();
39616+ return -EINVAL;
39617+ }
39618+
39619+ if (driver->wait)
39620+ return driver->wait(fence, lazy, interruptible, mask);
39621+
39622+ ttm_fence_object_flush(fence, mask);
39623+ retry:
39624+ if (!driver->has_irq ||
39625+ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
39626+
39627+ cur_jiffies = jiffies;
39628+ to_jiffies = fence->timeout_jiffies;
39629+
39630+ timeout = (time_after(to_jiffies, cur_jiffies)) ?
39631+ to_jiffies - cur_jiffies : 1;
39632+
39633+ if (interruptible)
39634+ ret = wait_event_interruptible_timeout
39635+ (fc->fence_queue,
39636+ ttm_fence_object_signaled(fence, mask), timeout);
39637+ else
39638+ ret = wait_event_timeout
39639+ (fc->fence_queue,
39640+ ttm_fence_object_signaled(fence, mask), timeout);
39641+
39642+ if (unlikely(ret == -ERESTARTSYS))
39643+ return -ERESTART;
39644+
39645+ if (unlikely(ret == 0)) {
39646+ if (driver->lockup)
39647+ driver->lockup(fence, mask);
39648+ else
39649+ ttm_fence_lockup(fence, mask);
39650+ goto retry;
39651+ }
39652+
39653+ return 0;
39654+ }
39655+
39656+ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
39657+}
39658+
39659+int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
39660+ uint32_t fence_class, uint32_t type)
39661+{
39662+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
39663+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39664+ unsigned long flags;
39665+ uint32_t sequence;
39666+ unsigned long timeout;
39667+ int ret;
39668+
39669+ ttm_fence_unring(fence);
39670+ ret = driver->emit(fence->fdev,
39671+ fence_class, fence_flags, &sequence, &timeout);
39672+ if (ret)
39673+ return ret;
39674+
39675+ write_lock_irqsave(&fc->lock, flags);
39676+ fence->fence_class = fence_class;
39677+ fence->fence_type = type;
39678+ fence->waiting_types = 0;
39679+ fence->info.signaled_types = 0;
39680+ fence->info.error = 0;
39681+ fence->sequence = sequence;
39682+ fence->timeout_jiffies = timeout;
39683+ if (list_empty(&fc->ring))
39684+ fc->highest_waiting_sequence = sequence - 1;
39685+ list_add_tail(&fence->ring, &fc->ring);
39686+ fc->latest_queued_sequence = sequence;
39687+ write_unlock_irqrestore(&fc->lock, flags);
39688+ return 0;
39689+}
39690+
39691+int ttm_fence_object_init(struct ttm_fence_device *fdev,
39692+ uint32_t fence_class,
39693+ uint32_t type,
39694+ uint32_t create_flags,
39695+ void (*destroy) (struct ttm_fence_object *),
39696+ struct ttm_fence_object *fence)
39697+{
39698+ int ret = 0;
39699+
39700+ kref_init(&fence->kref);
39701+ fence->fence_class = fence_class;
39702+ fence->fence_type = type;
39703+ fence->info.signaled_types = 0;
39704+ fence->waiting_types = 0;
39705+ fence->sequence = 0;
39706+ fence->info.error = 0;
39707+ fence->fdev = fdev;
39708+ fence->destroy = destroy;
39709+ INIT_LIST_HEAD(&fence->ring);
39710+ atomic_inc(&fdev->count);
39711+
39712+ if (create_flags & TTM_FENCE_FLAG_EMIT) {
39713+ ret = ttm_fence_object_emit(fence, create_flags,
39714+ fence->fence_class, type);
39715+ }
39716+
39717+ return ret;
39718+}
39719+
39720+int ttm_fence_object_create(struct ttm_fence_device *fdev,
39721+ uint32_t fence_class,
39722+ uint32_t type,
39723+ uint32_t create_flags,
39724+ struct ttm_fence_object **c_fence)
39725+{
39726+ struct ttm_fence_object *fence;
39727+ int ret;
39728+
39729+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false);
39730+ if (unlikely(ret != 0)) {
39731+ printk(KERN_ERR "Out of memory creating fence object\n");
39732+ return ret;
39733+ }
39734+
39735+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
39736+ if (!fence) {
39737+ printk(KERN_ERR "Out of memory creating fence object\n");
39738+ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
39739+ return -ENOMEM;
39740+ }
39741+
39742+ ret = ttm_fence_object_init(fdev, fence_class, type,
39743+ create_flags, NULL, fence);
39744+ if (ret) {
39745+ ttm_fence_object_unref(&fence);
39746+ return ret;
39747+ }
39748+ *c_fence = fence;
39749+
39750+ return 0;
39751+}
39752+
39753+static void ttm_fence_object_destroy(struct kref *kref)
39754+{
39755+ struct ttm_fence_object *fence =
39756+ container_of(kref, struct ttm_fence_object, kref);
39757+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39758+ unsigned long irq_flags;
39759+
39760+ write_lock_irqsave(&fc->lock, irq_flags);
39761+ list_del_init(&fence->ring);
39762+ write_unlock_irqrestore(&fc->lock, irq_flags);
39763+
39764+ atomic_dec(&fence->fdev->count);
39765+ if (fence->destroy)
39766+ fence->destroy(fence);
39767+ else {
39768+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false);
39769+ kfree(fence);
39770+ }
39771+}
39772+
39773+void ttm_fence_device_release(struct ttm_fence_device *fdev)
39774+{
39775+ kfree(fdev->fence_class);
39776+}
39777+
39778+int
39779+ttm_fence_device_init(int num_classes,
39780+ struct ttm_mem_global *mem_glob,
39781+ struct ttm_fence_device *fdev,
39782+ const struct ttm_fence_class_init *init,
39783+ bool replicate_init, const struct ttm_fence_driver *driver)
39784+{
39785+ struct ttm_fence_class_manager *fc;
39786+ const struct ttm_fence_class_init *fci;
39787+ int i;
39788+
39789+ fdev->mem_glob = mem_glob;
39790+ fdev->fence_class = kzalloc(num_classes *
39791+ sizeof(*fdev->fence_class), GFP_KERNEL);
39792+
39793+ if (unlikely(!fdev->fence_class))
39794+ return -ENOMEM;
39795+
39796+ fdev->num_classes = num_classes;
39797+ atomic_set(&fdev->count, 0);
39798+ fdev->driver = driver;
39799+
39800+ for (i = 0; i < fdev->num_classes; ++i) {
39801+ fc = &fdev->fence_class[i];
39802+ fci = &init[(replicate_init) ? 0 : i];
39803+
39804+ fc->wrap_diff = fci->wrap_diff;
39805+ fc->flush_diff = fci->flush_diff;
39806+ fc->sequence_mask = fci->sequence_mask;
39807+
39808+ rwlock_init(&fc->lock);
39809+ INIT_LIST_HEAD(&fc->ring);
39810+ init_waitqueue_head(&fc->fence_queue);
39811+ }
39812+
39813+ return 0;
39814+}
39815+
39816+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
39817+{
39818+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
39819+ struct ttm_fence_info tmp;
39820+ unsigned long irq_flags;
39821+
39822+ read_lock_irqsave(&fc->lock, irq_flags);
39823+ tmp = fence->info;
39824+ read_unlock_irqrestore(&fc->lock, irq_flags);
39825+
39826+ return tmp;
39827+}
39828+
39829+void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
39830+{
39831+ struct ttm_fence_object *fence = *p_fence;
39832+
39833+ *p_fence = NULL;
39834+ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
39835+}
39836+
39837+/*
39838+ * Placement / BO sync object glue.
39839+ */
39840+
39841+bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
39842+{
39843+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
39844+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
39845+
39846+ return ttm_fence_object_signaled(fence, fence_types);
39847+}
39848+
39849+int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
39850+ bool lazy, bool interruptible)
39851+{
39852+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
39853+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
39854+
39855+ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
39856+}
39857+
39858+int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
39859+{
39860+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
39861+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
39862+
39863+ return ttm_fence_object_flush(fence, fence_types);
39864+}
39865+
39866+void ttm_fence_sync_obj_unref(void **sync_obj)
39867+{
39868+ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
39869+}
39870+
39871+void *ttm_fence_sync_obj_ref(void *sync_obj)
39872+{
39873+ return (void *)
39874+ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
39875+}
39876diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
39877new file mode 100644
39878index 0000000..2a4e12b
39879--- /dev/null
39880+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
39881@@ -0,0 +1,277 @@
39882+/**************************************************************************
39883+ *
39884+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
39885+ * All Rights Reserved.
39886+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
39887+ * All Rights Reserved.
39888+ *
39889+ * Permission is hereby granted, free of charge, to any person obtaining a
39890+ * copy of this software and associated documentation files (the
39891+ * "Software"), to deal in the Software without restriction, including
39892+ * without limitation the rights to use, copy, modify, merge, publish,
39893+ * distribute, sub license, and/or sell copies of the Software, and to
39894+ * permit persons to whom the Software is furnished to do so, subject to
39895+ * the following conditions:
39896+ *
39897+ * The above copyright notice and this permission notice (including the
39898+ * next paragraph) shall be included in all copies or substantial portions
39899+ * of the Software.
39900+ *
39901+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39902+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39903+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39904+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
39905+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
39906+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
39907+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
39908+ *
39909+ **************************************************************************/
39910+/*
39911+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
39912+ */
39913+#ifndef _TTM_FENCE_API_H_
39914+#define _TTM_FENCE_API_H_
39915+
39916+#include <linux/list.h>
39917+#include <linux/kref.h>
39918+
39919+#define TTM_FENCE_FLAG_EMIT (1 << 0)
39920+#define TTM_FENCE_TYPE_EXE (1 << 0)
39921+
39922+struct ttm_fence_device;
39923+
39924+/**
39925+ * struct ttm_fence_info
39926+ *
39927+ * @fence_class: The fence class.
39928+ * @fence_type: Bitfield indicating types for this fence.
39929+ * @signaled_types: Bitfield indicating which types are signaled.
39930+ * @error: Last error reported from the device.
39931+ *
39932+ * Used as output from the ttm_fence_get_info
39933+ */
39934+
39935+struct ttm_fence_info {
39936+ uint32_t signaled_types;
39937+ uint32_t error;
39938+};
39939+
39940+/**
39941+ * struct ttm_fence_object
39942+ *
39943+ * @fdev: Pointer to the fence device struct.
39944+ * @kref: Holds the reference count of this fence object.
39945+ * @ring: List head used for the circular list of not-completely
39946+ * signaled fences.
39947+ * @info: Data for fast retrieval using the ttm_fence_get_info()
39948+ * function.
39949+ * @timeout_jiffies: Absolute jiffies value indicating when this fence
39950+ * object times out and, if waited on, calls ttm_fence_lockup
39951+ * to check for and resolve a GPU lockup.
39952+ * @sequence: Fence sequence number.
39953+ * @waiting_types: Types currently waited on.
39954+ * @destroy: Called to free the fence object, when its refcount has
39955+ * reached zero. If NULL, kfree is used.
39956+ *
39957+ * This struct is provided in the driver interface so that drivers can
39958+ * derive from it and create their own fence implementation. All members
39959+ * are private to the fence implementation and the fence driver callbacks.
39960+ * Otherwise a driver may access the derived object using container_of().
39961+ */
39962+
39963+struct ttm_fence_object {
39964+ struct ttm_fence_device *fdev;
39965+ struct kref kref;
39966+ uint32_t fence_class;
39967+ uint32_t fence_type;
39968+
39969+ /*
39970+ * The below fields are protected by the fence class
39971+ * manager spinlock.
39972+ */
39973+
39974+ struct list_head ring;
39975+ struct ttm_fence_info info;
39976+ unsigned long timeout_jiffies;
39977+ uint32_t sequence;
39978+ uint32_t waiting_types;
39979+ void (*destroy) (struct ttm_fence_object *);
39980+};
39981+
39982+/**
39983+ * ttm_fence_object_init
39984+ *
39985+ * @fdev: Pointer to a struct ttm_fence_device.
39986+ * @fence_class: Fence class for this fence.
39987+ * @type: Fence type for this fence.
39988+ * @create_flags: Flags indicating varios actions at init time. At this point
39989+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
39990+ * the command stream.
39991+ * @destroy: Destroy function. If NULL, kfree() is used.
39992+ * @fence: The struct ttm_fence_object to initialize.
39993+ *
39994+ * Initialize a pre-allocated fence object. This function, together with the
39995+ * destroy function makes it possible to derive driver-specific fence objects.
39996+ */
39997+
39998+extern int
39999+ttm_fence_object_init(struct ttm_fence_device *fdev,
40000+ uint32_t fence_class,
40001+ uint32_t type,
40002+ uint32_t create_flags,
40003+ void (*destroy) (struct ttm_fence_object * fence),
40004+ struct ttm_fence_object *fence);
40005+
40006+/**
40007+ * ttm_fence_object_create
40008+ *
40009+ * @fdev: Pointer to a struct ttm_fence_device.
40010+ * @fence_class: Fence class for this fence.
40011+ * @type: Fence type for this fence.
40012+ * @create_flags: Flags indicating varios actions at init time. At this point
40013+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
40014+ * the command stream.
40015+ * @c_fence: On successful termination, *(@c_fence) will point to the created
40016+ * fence object.
40017+ *
40018+ * Create and initialize a struct ttm_fence_object. The destroy function will
40019+ * be set to kfree().
40020+ */
40021+
40022+extern int
40023+ttm_fence_object_create(struct ttm_fence_device *fdev,
40024+ uint32_t fence_class,
40025+ uint32_t type,
40026+ uint32_t create_flags,
40027+ struct ttm_fence_object **c_fence);
40028+
40029+/**
40030+ * ttm_fence_object_wait
40031+ *
40032+ * @fence: The fence object to wait on.
40033+ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
40034+ * @interruptible: Sleep interruptible when waiting.
40035+ * @type_mask: Wait for the given type_mask to signal.
40036+ *
40037+ * Wait for a fence to signal the given type_mask. The function will
40038+ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
40039+ *
40040+ * Returns
40041+ * -ERESTART if interrupted by a signal.
40042+ * May return driver-specific error codes if timed-out.
40043+ */
40044+
40045+extern int
40046+ttm_fence_object_wait(struct ttm_fence_object *fence,
40047+ bool lazy, bool interruptible, uint32_t type_mask);
40048+
40049+/**
40050+ * ttm_fence_object_flush
40051+ *
40052+ * @fence: The fence object to flush.
40053+ * @flush_mask: Fence types to flush.
40054+ *
40055+ * Make sure that the given fence eventually signals the
40056+ * types indicated by @flush_mask. Note that this may or may not
40057+ * map to a CPU or GPU flush.
40058+ */
40059+
40060+extern int
40061+ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
40062+
40063+/**
40064+ * ttm_fence_get_info
40065+ *
40066+ * @fence: The fence object.
40067+ *
40068+ * Copy the info block from the fence while holding relevant locks.
40069+ */
40070+
40071+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
40072+
40073+/**
40074+ * ttm_fence_object_ref
40075+ *
40076+ * @fence: The fence object.
40077+ *
40078+ * Return a ref-counted pointer to the fence object indicated by @fence.
40079+ */
40080+
40081+static inline struct ttm_fence_object *ttm_fence_object_ref(struct
40082+ ttm_fence_object
40083+ *fence)
40084+{
40085+ kref_get(&fence->kref);
40086+ return fence;
40087+}
40088+
40089+/**
40090+ * ttm_fence_object_unref
40091+ *
40092+ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
40093+ *
40094+ * Unreference the fence object pointed to by *(@p_fence), clearing
40095+ * *(p_fence).
40096+ */
40097+
40098+extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
40099+
40100+/**
40101+ * ttm_fence_object_signaled
40102+ *
40103+ * @fence: Pointer to the struct ttm_fence_object.
40104+ * @mask: Type mask to check whether signaled.
40105+ *
40106+ * This function checks (without waiting) whether the fence object
40107+ * pointed to by @fence has signaled the types indicated by @mask,
40108+ * and returns 1 if true, 0 if false. This function does NOT perform
40109+ * an implicit fence flush.
40110+ */
40111+
40112+extern bool
40113+ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
40114+
40115+/**
40116+ * ttm_fence_class
40117+ *
40118+ * @fence: Pointer to the struct ttm_fence_object.
40119+ *
40120+ * Convenience function that returns the fence class of a struct ttm_fence_object.
40121+ */
40122+
40123+static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
40124+{
40125+ return fence->fence_class;
40126+}
40127+
40128+/**
40129+ * ttm_fence_types
40130+ *
40131+ * @fence: Pointer to the struct ttm_fence_object.
40132+ *
40133+ * Convenience function that returns the fence types of a struct ttm_fence_object.
40134+ */
40135+
40136+static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
40137+{
40138+ return fence->fence_type;
40139+}
40140+
40141+/*
40142+ * The functions below are wrappers to the above functions, with
40143+ * similar names but with sync_obj omitted. These wrappers are intended
40144+ * to be plugged directly into the buffer object driver's sync object
40145+ * API, if the driver chooses to use ttm_fence_objects as buffer object
40146+ * sync objects. In the prototypes below, a sync_obj is cast to a
40147+ * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing
40148+ * a fence_type argument.
40149+ */
40150+
40151+extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
40152+extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
40153+ bool lazy, bool interruptible);
40154+extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
40155+extern void ttm_fence_sync_obj_unref(void **sync_obj);
40156+extern void *ttm_fence_sync_obj_ref(void *sync_obj);
40157+
40158+#endif
40159diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
40160new file mode 100644
40161index 0000000..2eca494
40162--- /dev/null
40163+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
40164@@ -0,0 +1,309 @@
40165+/**************************************************************************
40166+ *
40167+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40168+ * All Rights Reserved.
40169+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40170+ * All Rights Reserved.
40171+ *
40172+ * Permission is hereby granted, free of charge, to any person obtaining a
40173+ * copy of this software and associated documentation files (the
40174+ * "Software"), to deal in the Software without restriction, including
40175+ * without limitation the rights to use, copy, modify, merge, publish,
40176+ * distribute, sub license, and/or sell copies of the Software, and to
40177+ * permit persons to whom the Software is furnished to do so, subject to
40178+ * the following conditions:
40179+ *
40180+ * The above copyright notice and this permission notice (including the
40181+ * next paragraph) shall be included in all copies or substantial portions
40182+ * of the Software.
40183+ *
40184+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40185+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40186+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40187+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40188+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40189+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40190+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40191+ *
40192+ **************************************************************************/
40193+/*
40194+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
40195+ */
40196+#ifndef _TTM_FENCE_DRIVER_H_
40197+#define _TTM_FENCE_DRIVER_H_
40198+
40199+#include <linux/kref.h>
40200+#include <linux/spinlock.h>
40201+#include <linux/wait.h>
40202+#include "ttm_fence_api.h"
40203+#include "ttm_memory.h"
40204+
40205+/** @file ttm_fence_driver.h
40206+ *
40207+ * Definitions needed for a driver implementing the
40208+ * ttm_fence subsystem.
40209+ */
40210+
40211+/**
40212+ * struct ttm_fence_class_manager:
40213+ *
40214+ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
40215+ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
40216+ * @flush_diff: Sequence difference to trigger fence flush.
40217+ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
40218+ * seqa as old an needing a flush.
40219+ * @sequence_mask: Mask of valid bits in a fence sequence.
40220+ * @lock: Lock protecting this struct as well as fence objects
40221+ * associated with this struct.
40222+ * @ring: Circular sequence-ordered list of fence objects.
40223+ * @pending_flush: Fence types currently needing a flush.
40224+ * @waiting_types: Fence types that are currently waited for.
40225+ * @fence_queue: Queue of waiters on fences belonging to this fence class.
40226+ * @highest_waiting_sequence: Sequence number of the fence with highest sequence
40227+ * number and that is waited for.
40228+ * @latest_queued_sequence: Sequence number of the fence latest queued on the ring.
40229+ */
40230+
40231+struct ttm_fence_class_manager {
40232+
40233+ /*
40234+ * Unprotected constant members.
40235+ */
40236+
40237+ uint32_t wrap_diff;
40238+ uint32_t flush_diff;
40239+ uint32_t sequence_mask;
40240+
40241+ /*
40242+ * The rwlock protects this structure as well as
40243+ * the data in all fence objects belonging to this
40244+ * class. This should be OK as most fence objects are
40245+ * only read from once they're created.
40246+ */
40247+
40248+ rwlock_t lock;
40249+ struct list_head ring;
40250+ uint32_t pending_flush;
40251+ uint32_t waiting_types;
40252+ wait_queue_head_t fence_queue;
40253+ uint32_t highest_waiting_sequence;
40254+ uint32_t latest_queued_sequence;
40255+};
40256+
40257+/**
40258+ * struct ttm_fence_device
40259+ *
40260+ * @fence_class: Array of fence class managers.
40261+ * @num_classes: Array dimension of @fence_class.
40262+ * @count: Current number of fence objects for statistics.
40263+ * @driver: Driver struct.
40264+ *
40265+ * Provided in the driver interface so that the driver can derive
40266+ * from this struct for its driver_private, and accordingly
40267+ * access the driver_private from the fence driver callbacks.
40268+ *
40269+ * All members except "count" are initialized at creation and
40270+ * never touched after that. No protection needed.
40271+ *
40272+ * This struct is private to the fence implementation and to the fence
40273+ * driver callbacks, and may otherwise be used by drivers only to
40274+ * obtain the derived device_private object using container_of().
40275+ */
40276+
40277+struct ttm_fence_device {
40278+ struct ttm_mem_global *mem_glob;
40279+ struct ttm_fence_class_manager *fence_class;
40280+ uint32_t num_classes;
40281+ atomic_t count;
40282+ const struct ttm_fence_driver *driver;
40283+};
40284+
40285+/**
40286+ * struct ttm_fence_class_init
40287+ *
40288+ * @wrap_diff: Fence sequence number wrap indicator. If
40289+ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
40290+ * considered to be older than sequence2.
40291+ * @flush_diff: Fence sequence number flush indicator.
40292+ * If a non-completely-signaled fence has a fence sequence number
40293+ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
40294+ * the fence is considered too old and it will be flushed upon the
40295+ * next call of ttm_fence_flush_old(), to make sure no fences with
40296+ * stale sequence numbers remains unsignaled. @flush_diff should
40297+ * be sufficiently less than @wrap_diff.
40298+ * @sequence_mask: Mask with valid bits of the fence sequence
40299+ * number set to 1.
40300+ *
40301+ * This struct is used as input to ttm_fence_device_init.
40302+ */
40303+
40304+struct ttm_fence_class_init {
40305+ uint32_t wrap_diff;
40306+ uint32_t flush_diff;
40307+ uint32_t sequence_mask;
40308+};
40309+
40310+/**
40311+ * struct ttm_fence_driver
40312+ *
40313+ * @has_irq: Called by a potential waiter. Should return 1 if a
40314+ * fence object with indicated parameters is expected to signal
40315+ * automatically, and 0 if the fence implementation needs to
40316+ * repeatedly call @poll to make it signal.
40317+ * @emit: Make sure a fence with the given parameters is
40318+ * present in the indicated command stream. Return its sequence number
40319+ * in "breadcrumb".
40320+ * @poll: Check and report sequences of the given "fence_class"
40321+ * that have signaled "types"
40322+ * @flush: Make sure that the types indicated by the bitfield
40323+ * ttm_fence_class_manager::pending_flush will eventually
40324+ * signal. These bits have been put together using the
40325+ * result from the needed_flush function described below.
40326+ * @needed_flush: Given the fence_class and fence_types indicated by
40327+ * "fence", and the last received fence sequence of this
40328+ * fence class, indicate what types need a fence flush to
40329+ * signal. Return as a bitfield.
40330+ * @wait: Set to non-NULL if the driver wants to override the fence
40331+ * wait implementation. Return 0 on success, -EBUSY on failure,
40332+ * and -ERESTART if interruptible and a signal is pending.
40333+ * @signaled: Driver callback that is called whenever a
40334+ * ttm_fence_object::signaled_types has changed status.
40335+ * This function is called from atomic context,
40336+ * with the ttm_fence_class_manager::lock held in write mode.
40337+ * @lockup: Driver callback that is called whenever a wait has exceeded
40338+ * the lifetime of a fence object.
40339+ * If there is a GPU lockup,
40340+ * this function should, if possible, reset the GPU,
40341+ * call the ttm_fence_handler with an error status, and
40342+ * return. If no lockup was detected, simply extend the
40343+ * fence timeout_jiffies and return. The driver might
40344+ * want to protect the lockup check with a mutex and cache a
40345+ * non-locked-up status for a while to avoid an excessive
40346+ * amount of lockup checks from every waiting thread.
40347+ */
40348+
40349+struct ttm_fence_driver {
40350+ bool (*has_irq) (struct ttm_fence_device * fdev,
40351+ uint32_t fence_class, uint32_t flags);
40352+ int (*emit) (struct ttm_fence_device * fdev,
40353+ uint32_t fence_class,
40354+ uint32_t flags,
40355+ uint32_t * breadcrumb, unsigned long *timeout_jiffies);
40356+ void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class);
40357+ void (*poll) (struct ttm_fence_device * fdev,
40358+ uint32_t fence_class, uint32_t types);
40359+ uint32_t(*needed_flush)
40360+ (struct ttm_fence_object * fence);
40361+ int (*wait) (struct ttm_fence_object * fence, bool lazy,
40362+ bool interruptible, uint32_t mask);
40363+ void (*signaled) (struct ttm_fence_object * fence);
40364+ void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types);
40365+};
40366+
40367+/**
40368+ * function ttm_fence_device_init
40369+ *
40370+ * @num_classes: Number of fence classes for this fence implementation.
40371+ * @mem_global: Pointer to the global memory accounting info.
40372+ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
40373+ * @init: Array of initialization info for each fence class.
40374+ * @replicate_init: Use the first @init initialization info for all classes.
40375+ * @driver: Driver callbacks.
40376+ *
40377+ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
40378+ * out-of-memory. Otherwise returns 0.
40379+ */
40380+extern int
40381+ttm_fence_device_init(int num_classes,
40382+ struct ttm_mem_global *mem_glob,
40383+ struct ttm_fence_device *fdev,
40384+ const struct ttm_fence_class_init *init,
40385+ bool replicate_init,
40386+ const struct ttm_fence_driver *driver);
40387+
40388+/**
40389+ * function ttm_fence_device_release
40390+ *
40391+ * @fdev: Pointer to the fence device.
40392+ *
40393+ * Release all resources held by a fence device. Note that before
40394+ * this function is called, the caller must have made sure all fence
40395+ * objects belonging to this fence device are completely signaled.
40396+ */
40397+
40398+extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
40399+
40400+/**
40401+ * ttm_fence_handler - the fence handler.
40402+ *
40403+ * @fdev: Pointer to the fence device.
40404+ * @fence_class: Fence class that signals.
40405+ * @sequence: Signaled sequence.
40406+ * @type: Types that signal.
40407+ * @error: Error from the engine.
40408+ *
40409+ * This function signals all fences with a sequence previous to the
40410+ * @sequence argument, and belonging to @fence_class. The signaled fence
40411+ * types are provided in @type. If error is non-zero, the error member
40412+ * of the fence with sequence = @sequence is set to @error. This value
40413+ * may be reported back to user-space, indicating, for example an illegal
40414+ * 3D command or illegal mpeg data.
40415+ *
40416+ * This function is typically called from the driver::poll method when the
40417+ * command sequence preceding the fence marker has executed. It should be
40418+ * called with the ttm_fence_class_manager::lock held in write mode and
40419+ * may be called from interrupt context.
40420+ */
40421+
40422+extern void
40423+ttm_fence_handler(struct ttm_fence_device *fdev,
40424+ uint32_t fence_class,
40425+ uint32_t sequence, uint32_t type, uint32_t error);
40426+
40427+/**
40428+ * ttm_fence_driver_from_dev
40429+ *
40430+ * @fdev: The ttm fence device.
40431+ *
40432+ * Returns a pointer to the fence driver struct.
40433+ */
40434+
40435+static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct
40436+ ttm_fence_device
40437+ *fdev)
40438+{
40439+ return fdev->driver;
40440+}
40441+
40442+/**
40443+ * ttm_fence_driver
40444+ *
40445+ * @fence: Pointer to a ttm fence object.
40446+ *
40447+ * Returns a pointer to the fence driver struct.
40448+ */
40449+
40450+static inline const struct ttm_fence_driver *ttm_fence_driver(struct
40451+ ttm_fence_object
40452+ *fence)
40453+{
40454+ return ttm_fence_driver_from_dev(fence->fdev);
40455+}
40456+
40457+/**
40458+ * ttm_fence_fc
40459+ *
40460+ * @fence: Pointer to a ttm fence object.
40461+ *
40462+ * Returns a pointer to the struct ttm_fence_class_manager for the
40463+ * fence class of @fence.
40464+ */
40465+
40466+static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
40467+ ttm_fence_object
40468+ *fence)
40469+{
40470+ return &fence->fdev->fence_class[fence->fence_class];
40471+}
40472+
40473+#endif
40474diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
40475new file mode 100644
40476index 0000000..d9bb787
40477--- /dev/null
40478+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
40479@@ -0,0 +1,242 @@
40480+/**************************************************************************
40481+ *
40482+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40483+ * All Rights Reserved.
40484+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40485+ * All Rights Reserved.
40486+ *
40487+ * Permission is hereby granted, free of charge, to any person obtaining a
40488+ * copy of this software and associated documentation files (the
40489+ * "Software"), to deal in the Software without restriction, including
40490+ * without limitation the rights to use, copy, modify, merge, publish,
40491+ * distribute, sub license, and/or sell copies of the Software, and to
40492+ * permit persons to whom the Software is furnished to do so, subject to
40493+ * the following conditions:
40494+ *
40495+ * The above copyright notice and this permission notice (including the
40496+ * next paragraph) shall be included in all copies or substantial portions
40497+ * of the Software.
40498+ *
40499+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40500+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40501+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40502+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40503+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40504+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40505+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40506+ *
40507+ **************************************************************************/
40508+/*
40509+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
40510+ */
40511+
40512+#include <drm/drmP.h>
40513+#include "ttm/ttm_fence_user.h"
40514+#include "ttm/ttm_object.h"
40515+#include "ttm/ttm_fence_driver.h"
40516+#include "ttm/ttm_userobj_api.h"
40517+
40518+/**
40519+ * struct ttm_fence_user_object
40520+ *
40521+ * @base: The base object used for user-space visibility and refcounting.
40522+ *
40523+ * @fence: The fence object itself.
40524+ *
40525+ */
40526+
40527+struct ttm_fence_user_object {
40528+ struct ttm_base_object base;
40529+ struct ttm_fence_object fence;
40530+};
40531+
40532+static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct
40533+ ttm_object_file
40534+ *tfile,
40535+ uint32_t
40536+ handle)
40537+{
40538+ struct ttm_base_object *base;
40539+
40540+ base = ttm_base_object_lookup(tfile, handle);
40541+ if (unlikely(base == NULL)) {
40542+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
40543+ (unsigned long)handle);
40544+ return NULL;
40545+ }
40546+
40547+ if (unlikely(base->object_type != ttm_fence_type)) {
40548+ ttm_base_object_unref(&base);
40549+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
40550+ (unsigned long)handle);
40551+ return NULL;
40552+ }
40553+
40554+ return container_of(base, struct ttm_fence_user_object, base);
40555+}
40556+
40557+/*
40558+ * The fence object destructor.
40559+ */
40560+
40561+static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
40562+{
40563+ struct ttm_fence_user_object *ufence =
40564+ container_of(fence, struct ttm_fence_user_object, fence);
40565+
40566+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
40567+ kfree(ufence);
40568+}
40569+
40570+/*
40571+ * The base object destructor. We basically unly unreference the
40572+ * attached fence object.
40573+ */
40574+
40575+static void ttm_fence_user_release(struct ttm_base_object **p_base)
40576+{
40577+ struct ttm_fence_user_object *ufence;
40578+ struct ttm_base_object *base = *p_base;
40579+ struct ttm_fence_object *fence;
40580+
40581+ *p_base = NULL;
40582+
40583+ if (unlikely(base == NULL))
40584+ return;
40585+
40586+ ufence = container_of(base, struct ttm_fence_user_object, base);
40587+ fence = &ufence->fence;
40588+ ttm_fence_object_unref(&fence);
40589+}
40590+
40591+int
40592+ttm_fence_user_create(struct ttm_fence_device *fdev,
40593+ struct ttm_object_file *tfile,
40594+ uint32_t fence_class,
40595+ uint32_t fence_types,
40596+ uint32_t create_flags,
40597+ struct ttm_fence_object **fence, uint32_t * user_handle)
40598+{
40599+ int ret;
40600+ struct ttm_fence_object *tmp;
40601+ struct ttm_fence_user_object *ufence;
40602+
40603+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false);
40604+ if (unlikely(ret != 0))
40605+ return -ENOMEM;
40606+
40607+ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
40608+ if (unlikely(ufence == NULL)) {
40609+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
40610+ return -ENOMEM;
40611+ }
40612+
40613+ ret = ttm_fence_object_init(fdev,
40614+ fence_class,
40615+ fence_types, create_flags,
40616+ &ttm_fence_user_destroy, &ufence->fence);
40617+
40618+ if (unlikely(ret != 0))
40619+ goto out_err0;
40620+
40621+ /*
40622+ * One fence ref is held by the fence ptr we return.
40623+ * The other one by the base object. Need to up the
40624+ * fence refcount before we publish this object to
40625+ * user-space.
40626+ */
40627+
40628+ tmp = ttm_fence_object_ref(&ufence->fence);
40629+ ret = ttm_base_object_init(tfile, &ufence->base,
40630+ false, ttm_fence_type,
40631+ &ttm_fence_user_release, NULL);
40632+
40633+ if (unlikely(ret != 0))
40634+ goto out_err1;
40635+
40636+ *fence = &ufence->fence;
40637+ *user_handle = ufence->base.hash.key;
40638+
40639+ return 0;
40640+ out_err1:
40641+ ttm_fence_object_unref(&tmp);
40642+ tmp = &ufence->fence;
40643+ ttm_fence_object_unref(&tmp);
40644+ return ret;
40645+ out_err0:
40646+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
40647+ kfree(ufence);
40648+ return ret;
40649+}
40650+
40651+int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
40652+{
40653+ int ret;
40654+ union ttm_fence_signaled_arg *arg = data;
40655+ struct ttm_fence_object *fence;
40656+ struct ttm_fence_info info;
40657+ struct ttm_fence_user_object *ufence;
40658+ struct ttm_base_object *base;
40659+ ret = 0;
40660+
40661+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
40662+ if (unlikely(ufence == NULL))
40663+ return -EINVAL;
40664+
40665+ fence = &ufence->fence;
40666+
40667+ if (arg->req.flush) {
40668+ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
40669+ if (unlikely(ret != 0))
40670+ goto out;
40671+ }
40672+
40673+ info = ttm_fence_get_info(fence);
40674+ arg->rep.signaled_types = info.signaled_types;
40675+ arg->rep.fence_error = info.error;
40676+
40677+ out:
40678+ base = &ufence->base;
40679+ ttm_base_object_unref(&base);
40680+ return ret;
40681+}
40682+
40683+int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
40684+{
40685+ int ret;
40686+ union ttm_fence_finish_arg *arg = data;
40687+ struct ttm_fence_user_object *ufence;
40688+ struct ttm_base_object *base;
40689+ struct ttm_fence_object *fence;
40690+ ret = 0;
40691+
40692+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
40693+ if (unlikely(ufence == NULL))
40694+ return -EINVAL;
40695+
40696+ fence = &ufence->fence;
40697+
40698+ ret = ttm_fence_object_wait(fence,
40699+ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
40700+ true, arg->req.fence_type);
40701+ if (likely(ret == 0)) {
40702+ struct ttm_fence_info info = ttm_fence_get_info(fence);
40703+
40704+ arg->rep.signaled_types = info.signaled_types;
40705+ arg->rep.fence_error = info.error;
40706+ }
40707+
40708+ base = &ufence->base;
40709+ ttm_base_object_unref(&base);
40710+
40711+ return ret;
40712+}
40713+
40714+int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
40715+{
40716+ struct ttm_fence_unref_arg *arg = data;
40717+ int ret = 0;
40718+
40719+ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
40720+ return ret;
40721+}
40722diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
40723new file mode 100644
40724index 0000000..0cad597
40725--- /dev/null
40726+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
40727@@ -0,0 +1,147 @@
40728+/**************************************************************************
40729+ *
40730+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40731+ * All Rights Reserved.
40732+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40733+ * All Rights Reserved.
40734+ *
40735+ * Permission is hereby granted, free of charge, to any person obtaining a
40736+ * copy of this software and associated documentation files (the
40737+ * "Software"), to deal in the Software without restriction, including
40738+ * without limitation the rights to use, copy, modify, merge, publish,
40739+ * distribute, sub license, and/or sell copies of the Software, and to
40740+ * permit persons to whom the Software is furnished to do so, subject to
40741+ * the following conditions:
40742+ *
40743+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40744+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40745+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40746+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40747+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40748+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40749+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40750+ *
40751+ * The above copyright notice and this permission notice (including the
40752+ * next paragraph) shall be included in all copies or substantial portions
40753+ * of the Software.
40754+ *
40755+ **************************************************************************/
40756+/*
40757+ * Authors
40758+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
40759+ */
40760+
40761+#ifndef TTM_FENCE_USER_H
40762+#define TTM_FENCE_USER_H
40763+
40764+#if !defined(__KERNEL__) && !defined(_KERNEL)
40765+#include <stdint.h>
40766+#endif
40767+
40768+#define TTM_FENCE_MAJOR 0
40769+#define TTM_FENCE_MINOR 1
40770+#define TTM_FENCE_PL 0
40771+#define TTM_FENCE_DATE "080819"
40772+
40773+/**
40774+ * struct ttm_fence_signaled_req
40775+ *
40776+ * @handle: Handle to the fence object. Input.
40777+ *
40778+ * @fence_type: Fence types we want to flush. Input.
40779+ *
40780+ * @flush: Boolean. Flush the indicated fence_types. Input.
40781+ *
40782+ * Argument to the TTM_FENCE_SIGNALED ioctl.
40783+ */
40784+
40785+struct ttm_fence_signaled_req {
40786+ uint32_t handle;
40787+ uint32_t fence_type;
40788+ int32_t flush;
40789+ uint32_t pad64;
40790+};
40791+
40792+/**
40793+ * struct ttm_fence_rep
40794+ *
40795+ * @signaled_types: Fence type that has signaled.
40796+ *
40797+ * @fence_error: Command execution error.
40798+ * Hardware errors that are consequences of the execution
40799+ * of the command stream preceding the fence are reported
40800+ * here.
40801+ *
40802+ * Output argument to the TTM_FENCE_SIGNALED and
40803+ * TTM_FENCE_FINISH ioctls.
40804+ */
40805+
40806+struct ttm_fence_rep {
40807+ uint32_t signaled_types;
40808+ uint32_t fence_error;
40809+};
40810+
40811+union ttm_fence_signaled_arg {
40812+ struct ttm_fence_signaled_req req;
40813+ struct ttm_fence_rep rep;
40814+};
40815+
40816+/*
40817+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
40818+ *
40819+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
40820+ * wait.
40821+ *
40822+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
40823+ * but return -EBUSY if the buffer is busy.
40824+ */
40825+
40826+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
40827+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
40828+
40829+/**
40830+ * struct ttm_fence_finish_req
40831+ *
40832+ * @handle: Handle to the fence object. Input.
40833+ *
40834+ * @fence_type: Fence types we want to finish.
40835+ *
40836+ * @mode: Wait mode.
40837+ *
40838+ * Input to the TTM_FENCE_FINISH ioctl.
40839+ */
40840+
40841+struct ttm_fence_finish_req {
40842+ uint32_t handle;
40843+ uint32_t fence_type;
40844+ uint32_t mode;
40845+ uint32_t pad64;
40846+};
40847+
40848+union ttm_fence_finish_arg {
40849+ struct ttm_fence_finish_req req;
40850+ struct ttm_fence_rep rep;
40851+};
40852+
40853+/**
40854+ * struct ttm_fence_unref_arg
40855+ *
40856+ * @handle: Handle to the fence object.
40857+ *
40858+ * Argument to the TTM_FENCE_UNREF ioctl.
40859+ */
40860+
40861+struct ttm_fence_unref_arg {
40862+ uint32_t handle;
40863+ uint32_t pad64;
40864+};
40865+
40866+/*
40867+ * Ioctl offsets frome extenstion start.
40868+ */
40869+
40870+#define TTM_FENCE_SIGNALED 0x01
40871+#define TTM_FENCE_FINISH 0x02
40872+#define TTM_FENCE_UNREF 0x03
40873+
40874+#endif
40875diff --git a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c
40876new file mode 100644
40877index 0000000..a3b503f
40878--- /dev/null
40879+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c
40880@@ -0,0 +1,162 @@
40881+/**************************************************************************
40882+ *
40883+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
40884+ * All Rights Reserved.
40885+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
40886+ * All Rights Reserved.
40887+ *
40888+ * Permission is hereby granted, free of charge, to any person obtaining a
40889+ * copy of this software and associated documentation files (the
40890+ * "Software"), to deal in the Software without restriction, including
40891+ * without limitation the rights to use, copy, modify, merge, publish,
40892+ * distribute, sub license, and/or sell copies of the Software, and to
40893+ * permit persons to whom the Software is furnished to do so, subject to
40894+ * the following conditions:
40895+ *
40896+ * The above copyright notice and this permission notice (including the
40897+ * next paragraph) shall be included in all copies or substantial portions
40898+ * of the Software.
40899+ *
40900+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40901+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40902+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
40903+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
40904+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
40905+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
40906+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
40907+ *
40908+ **************************************************************************/
40909+/*
40910+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
40911+ */
40912+
40913+#include "ttm/ttm_lock.h"
40914+#include <asm/atomic.h>
40915+#include <linux/errno.h>
40916+#include <linux/wait.h>
40917+#include <linux/sched.h>
40918+
40919+void ttm_lock_init(struct ttm_lock *lock)
40920+{
40921+ init_waitqueue_head(&lock->queue);
40922+ atomic_set(&lock->write_lock_pending, 0);
40923+ atomic_set(&lock->readers, 0);
40924+ lock->kill_takers = false;
40925+ lock->signal = SIGKILL;
40926+}
40927+
40928+void ttm_read_unlock(struct ttm_lock *lock)
40929+{
40930+ if (atomic_dec_and_test(&lock->readers))
40931+ wake_up_all(&lock->queue);
40932+}
40933+
40934+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
40935+{
40936+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
40937+ int ret;
40938+
40939+ if (!interruptible) {
40940+ wait_event(lock->queue,
40941+ atomic_read(&lock->write_lock_pending) == 0);
40942+ continue;
40943+ }
40944+ ret = wait_event_interruptible
40945+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
40946+ if (ret)
40947+ return -ERESTART;
40948+ }
40949+
40950+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
40951+ int ret;
40952+ if (!interruptible) {
40953+ wait_event(lock->queue,
40954+ atomic_read(&lock->readers) != -1);
40955+ continue;
40956+ }
40957+ ret = wait_event_interruptible
40958+ (lock->queue, atomic_read(&lock->readers) != -1);
40959+ if (ret)
40960+ return -ERESTART;
40961+ }
40962+
40963+ if (unlikely(lock->kill_takers)) {
40964+ send_sig(lock->signal, current, 0);
40965+ ttm_read_unlock(lock);
40966+ return -ERESTART;
40967+ }
40968+
40969+ return 0;
40970+}
40971+
40972+static int __ttm_write_unlock(struct ttm_lock *lock)
40973+{
40974+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
40975+ return -EINVAL;
40976+ wake_up_all(&lock->queue);
40977+ return 0;
40978+}
40979+
40980+static void ttm_write_lock_remove(struct ttm_base_object **p_base)
40981+{
40982+ struct ttm_base_object *base = *p_base;
40983+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
40984+ int ret;
40985+
40986+ *p_base = NULL;
40987+ ret = __ttm_write_unlock(lock);
40988+ BUG_ON(ret != 0);
40989+}
40990+
40991+int ttm_write_lock(struct ttm_lock *lock,
40992+ bool interruptible,
40993+ struct ttm_object_file *tfile)
40994+{
40995+ int ret = 0;
40996+
40997+ atomic_inc(&lock->write_lock_pending);
40998+
40999+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
41000+ if (!interruptible) {
41001+ wait_event(lock->queue,
41002+ atomic_read(&lock->readers) == 0);
41003+ continue;
41004+ }
41005+ ret = wait_event_interruptible
41006+ (lock->queue, atomic_read(&lock->readers) == 0);
41007+
41008+ if (ret) {
41009+ if (atomic_dec_and_test(&lock->write_lock_pending))
41010+ wake_up_all(&lock->queue);
41011+ return -ERESTART;
41012+ }
41013+ }
41014+
41015+ if (atomic_dec_and_test(&lock->write_lock_pending))
41016+ wake_up_all(&lock->queue);
41017+
41018+ if (unlikely(lock->kill_takers)) {
41019+ send_sig(lock->signal, current, 0);
41020+ __ttm_write_unlock(lock);
41021+ return -ERESTART;
41022+ }
41023+
41024+ /*
41025+ * Add a base-object, the destructor of which will
41026+ * make sure the lock is released if the client dies
41027+ * while holding it.
41028+ */
41029+
41030+ ret = ttm_base_object_init(tfile, &lock->base, false,
41031+ ttm_lock_type, &ttm_write_lock_remove, NULL);
41032+ if (ret)
41033+ (void)__ttm_write_unlock(lock);
41034+
41035+ return ret;
41036+}
41037+
41038+int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
41039+{
41040+ return ttm_ref_object_base_unref(tfile,
41041+ lock->base.hash.key, TTM_REF_USAGE);
41042+}
41043diff --git a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h
41044new file mode 100644
41045index 0000000..0169ad7
41046--- /dev/null
41047+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h
41048@@ -0,0 +1,181 @@
41049+/**************************************************************************
41050+ *
41051+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41052+ * All Rights Reserved.
41053+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41054+ * All Rights Reserved.
41055+ *
41056+ * Permission is hereby granted, free of charge, to any person obtaining a
41057+ * copy of this software and associated documentation files (the
41058+ * "Software"), to deal in the Software without restriction, including
41059+ * without limitation the rights to use, copy, modify, merge, publish,
41060+ * distribute, sub license, and/or sell copies of the Software, and to
41061+ * permit persons to whom the Software is furnished to do so, subject to
41062+ * the following conditions:
41063+ *
41064+ * The above copyright notice and this permission notice (including the
41065+ * next paragraph) shall be included in all copies or substantial portions
41066+ * of the Software.
41067+ *
41068+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41069+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41070+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41071+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41072+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41073+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41074+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41075+ *
41076+ **************************************************************************/
41077+/*
41078+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
41079+ */
41080+
41081+/** @file ttm_lock.h
41082+ * This file implements a simple replacement for the buffer manager use
41083+ * of the DRM heavyweight hardware lock.
41084+ * The lock is a read-write lock. Taking it in read mode is fast, and
41085+ * intended for in-kernel use only.
41086+ * Taking it in write mode is slow.
41087+ *
41088+ * The write mode is used only when there is a need to block all
41089+ * user-space processes from validating buffers.
41090+ * It's allowed to leave kernel space with the write lock held.
41091+ * If a user-space process dies while having the write-lock,
41092+ * it will be released during the file descriptor release.
41093+ *
41094+ * The read lock is typically placed at the start of an IOCTL- or
41095+ * user-space callable function that may end up allocating a memory area.
41096+ * This includes setstatus, super-ioctls and faults; the latter may move
41097+ * unmappable regions to mappable. It's a bug to leave kernel space with the
41098+ * read lock held.
41099+ *
41100+ * Both read- and write lock taking is interruptible for low signal-delivery
41101+ * latency. The locking functions will return -ERESTART if interrupted by a
41102+ * signal.
41103+ *
41104+ * Locking order: The lock should be taken BEFORE any TTM mutexes
41105+ * or spinlocks.
41106+ *
41107+ * Typical usages:
41108+ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
41109+ * stops it from being repopulated.
41110+ * b) out-of-VRAM or out-of-aperture space, in which case the process
41111+ * receiving the out-of-space notification may take the lock in write mode
41112+ * and evict all buffers prior to start validating its own buffers.
41113+ */
41114+
41115+#ifndef _TTM_LOCK_H_
41116+#define _TTM_LOCK_H_
41117+
41118+#include "ttm_object.h"
41119+#include <linux/wait.h>
41120+#include <asm/atomic.h>
41121+
41122+/**
41123+ * struct ttm_lock
41124+ *
41125+ * @base: ttm base object used solely to release the lock if the client
41126+ * holding the lock dies.
41127+ * @queue: Queue for processes waiting for lock change-of-status.
41128+ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
41129+ * write lock starvation.
41130+ * @readers: The lock status: A negative number indicates that a write lock is
41131+ * held. Positive values indicate number of concurrent readers.
41132+ */
41133+
41134+struct ttm_lock {
41135+ struct ttm_base_object base;
41136+ wait_queue_head_t queue;
41137+ atomic_t write_lock_pending;
41138+ atomic_t readers;
41139+ bool kill_takers;
41140+ int signal;
41141+};
41142+
41143+/**
41144+ * ttm_lock_init
41145+ *
41146+ * @lock: Pointer to a struct ttm_lock
41147+ * Initializes the lock.
41148+ */
41149+extern void ttm_lock_init(struct ttm_lock *lock);
41150+
41151+/**
41152+ * ttm_read_unlock
41153+ *
41154+ * @lock: Pointer to a struct ttm_lock
41155+ *
41156+ * Releases a read lock.
41157+ */
41158+
41159+extern void ttm_read_unlock(struct ttm_lock *lock);
41160+
41161+/**
41162+ * ttm_read_unlock
41163+ *
41164+ * @lock: Pointer to a struct ttm_lock
41165+ * @interruptible: Interruptible sleeping while waiting for a lock.
41166+ *
41167+ * Takes the lock in read mode.
41168+ * Returns:
41169+ * -ERESTART If interrupted by a signal and interruptible is true.
41170+ */
41171+
41172+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
41173+
41174+/**
41175+ * ttm_write_lock
41176+ *
41177+ * @lock: Pointer to a struct ttm_lock
41178+ * @interruptible: Interruptible sleeping while waiting for a lock.
41179+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
41180+ * application taking the lock.
41181+ *
41182+ * Takes the lock in write mode.
41183+ * Returns:
41184+ * -ERESTART If interrupted by a signal and interruptible is true.
41185+ * -ENOMEM: Out of memory when locking.
41186+ */
41187+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
41188+ struct ttm_object_file *tfile);
41189+
41190+/**
41191+ * ttm_write_unlock
41192+ *
41193+ * @lock: Pointer to a struct ttm_lock
41194+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
41195+ * application taking the lock.
41196+ *
41197+ * Releases a write lock.
41198+ * Returns:
41199+ * -EINVAL If the lock was not held.
41200+ */
41201+extern int ttm_write_unlock(struct ttm_lock *lock,
41202+ struct ttm_object_file *tfile);
41203+
41204+/**
41205+ * ttm_lock_set_kill
41206+ *
41207+ * @lock: Pointer to a struct ttm_lock
41208+ * @val: Boolean whether to kill processes taking the lock.
41209+ * @signal: Signal to send to the process taking the lock.
41210+ *
41211+ * The kill-when-taking-lock functionality is used to kill processes that keep
41212+ * on using the TTM functionality when its resources has been taken down, for
41213+ * example when the X server exits. A typical sequence would look like this:
41214+ * - X server takes lock in write mode.
41215+ * - ttm_lock_set_kill() is called with @val set to true.
41216+ * - As part of X server exit, TTM resources are taken down.
41217+ * - X server releases the lock on file release.
41218+ * - Another dri client wants to render, takes the lock and is killed.
41219+ *
41220+ */
41221+
41222+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal)
41223+{
41224+ lock->kill_takers = val;
41225+ if (val)
41226+ lock->signal = signal;
41227+}
41228+
41229+#endif
41230diff --git a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c
41231new file mode 100644
41232index 0000000..75df380
41233--- /dev/null
41234+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c
41235@@ -0,0 +1,232 @@
41236+/**************************************************************************
41237+ *
41238+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41239+ * All Rights Reserved.
41240+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41241+ * All Rights Reserved.
41242+ *
41243+ * Permission is hereby granted, free of charge, to any person obtaining a
41244+ * copy of this software and associated documentation files (the
41245+ * "Software"), to deal in the Software without restriction, including
41246+ * without limitation the rights to use, copy, modify, merge, publish,
41247+ * distribute, sub license, and/or sell copies of the Software, and to
41248+ * permit persons to whom the Software is furnished to do so, subject to
41249+ * the following conditions:
41250+ *
41251+ * The above copyright notice and this permission notice (including the
41252+ * next paragraph) shall be included in all copies or substantial portions
41253+ * of the Software.
41254+ *
41255+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41256+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41257+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41258+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41259+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41260+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41261+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41262+ *
41263+ **************************************************************************/
41264+
41265+#include "ttm/ttm_memory.h"
41266+#include <linux/spinlock.h>
41267+#include <linux/sched.h>
41268+#include <linux/wait.h>
41269+#include <linux/mm.h>
41270+
41271+#define TTM_MEMORY_ALLOC_RETRIES 4
41272+
41273+/**
41274+ * At this point we only support a single shrink callback.
41275+ * Extend this if needed, perhaps using a linked list of callbacks.
41276+ * Note that this function is reentrant:
41277+ * many threads may try to swap out at any given time.
41278+ */
41279+
41280+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
41281+ uint64_t extra)
41282+{
41283+ int ret;
41284+ struct ttm_mem_shrink *shrink;
41285+ uint64_t target;
41286+ uint64_t total_target;
41287+
41288+ spin_lock(&glob->lock);
41289+ if (glob->shrink == NULL)
41290+ goto out;
41291+
41292+ if (from_workqueue) {
41293+ target = glob->swap_limit;
41294+ total_target = glob->total_memory_swap_limit;
41295+ } else if (capable(CAP_SYS_ADMIN)) {
41296+ total_target = glob->emer_total_memory;
41297+ target = glob->emer_memory;
41298+ } else {
41299+ total_target = glob->max_total_memory;
41300+ target = glob->max_memory;
41301+ }
41302+
41303+ total_target = (extra >= total_target) ? 0: total_target - extra;
41304+ target = (extra >= target) ? 0: target - extra;
41305+
41306+ while (glob->used_memory > target ||
41307+ glob->used_total_memory > total_target) {
41308+ shrink = glob->shrink;
41309+ spin_unlock(&glob->lock);
41310+ ret = shrink->do_shrink(shrink);
41311+ spin_lock(&glob->lock);
41312+ if (unlikely(ret != 0))
41313+ goto out;
41314+ }
41315+ out:
41316+ spin_unlock(&glob->lock);
41317+}
41318+
41319+static void ttm_shrink_work(struct work_struct *work)
41320+{
41321+ struct ttm_mem_global *glob =
41322+ container_of(work, struct ttm_mem_global, work);
41323+
41324+ ttm_shrink(glob, true, 0ULL);
41325+}
41326+
41327+int ttm_mem_global_init(struct ttm_mem_global *glob)
41328+{
41329+ struct sysinfo si;
41330+ uint64_t mem;
41331+
41332+ spin_lock_init(&glob->lock);
41333+ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
41334+ INIT_WORK(&glob->work, ttm_shrink_work);
41335+ init_waitqueue_head(&glob->queue);
41336+
41337+ si_meminfo(&si);
41338+
41339+ mem = si.totalram - si.totalhigh;
41340+ mem *= si.mem_unit;
41341+
41342+ glob->max_memory = mem >> 1;
41343+ glob->emer_memory = glob->max_memory + (mem >> 2);
41344+ glob->swap_limit = glob->max_memory - (mem >> 5);
41345+ glob->used_memory = 0;
41346+ glob->used_total_memory = 0;
41347+ glob->shrink = NULL;
41348+
41349+ mem = si.totalram;
41350+ mem *= si.mem_unit;
41351+
41352+ glob->max_total_memory = mem >> 1;
41353+ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
41354+ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
41355+
41356+ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
41357+ glob->max_total_memory >> 20);
41358+ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
41359+ glob->max_memory >> 20);
41360+ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
41361+ glob->swap_limit >> 20);
41362+
41363+ return 0;
41364+}
41365+
41366+void ttm_mem_global_release(struct ttm_mem_global *glob)
41367+{
41368+ printk(KERN_INFO "Used total memory is %llu bytes.\n",
41369+ (unsigned long long)glob->used_total_memory);
41370+ flush_workqueue(glob->swap_queue);
41371+ destroy_workqueue(glob->swap_queue);
41372+ glob->swap_queue = NULL;
41373+}
41374+
41375+static inline void ttm_check_swapping(struct ttm_mem_global *glob)
41376+{
41377+ bool needs_swapping;
41378+
41379+ spin_lock(&glob->lock);
41380+ needs_swapping = (glob->used_memory > glob->swap_limit ||
41381+ glob->used_total_memory >
41382+ glob->total_memory_swap_limit);
41383+ spin_unlock(&glob->lock);
41384+
41385+ if (unlikely(needs_swapping))
41386+ (void)queue_work(glob->swap_queue, &glob->work);
41387+
41388+}
41389+
41390+void ttm_mem_global_free(struct ttm_mem_global *glob,
41391+ uint64_t amount, bool himem)
41392+{
41393+ spin_lock(&glob->lock);
41394+ glob->used_total_memory -= amount;
41395+ if (!himem)
41396+ glob->used_memory -= amount;
41397+ wake_up_all(&glob->queue);
41398+ spin_unlock(&glob->lock);
41399+}
41400+
41401+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
41402+ uint64_t amount, bool himem, bool reserve)
41403+{
41404+ uint64_t limit;
41405+ uint64_t lomem_limit;
41406+ int ret = -ENOMEM;
41407+
41408+ spin_lock(&glob->lock);
41409+
41410+ if (capable(CAP_SYS_ADMIN)) {
41411+ limit = glob->emer_total_memory;
41412+ lomem_limit = glob->emer_memory;
41413+ } else {
41414+ limit = glob->max_total_memory;
41415+ lomem_limit = glob->max_memory;
41416+ }
41417+
41418+ if (unlikely(glob->used_total_memory + amount > limit))
41419+ goto out_unlock;
41420+ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
41421+ goto out_unlock;
41422+
41423+ if (reserve) {
41424+ glob->used_total_memory += amount;
41425+ if (!himem)
41426+ glob->used_memory += amount;
41427+ }
41428+ ret = 0;
41429+ out_unlock:
41430+ spin_unlock(&glob->lock);
41431+ ttm_check_swapping(glob);
41432+
41433+ return ret;
41434+}
41435+
41436+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
41437+ bool no_wait, bool interruptible, bool himem)
41438+{
41439+ int count = TTM_MEMORY_ALLOC_RETRIES;
41440+
41441+ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) {
41442+ if (no_wait)
41443+ return -ENOMEM;
41444+ if (unlikely(count-- == 0))
41445+ return -ENOMEM;
41446+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
41447+ }
41448+
41449+ return 0;
41450+}
41451+
41452+size_t ttm_round_pot(size_t size)
41453+{
41454+ if ((size & (size - 1)) == 0)
41455+ return size;
41456+ else if (size > PAGE_SIZE)
41457+ return PAGE_ALIGN(size);
41458+ else {
41459+ size_t tmp_size = 4;
41460+
41461+ while (tmp_size < size)
41462+ tmp_size <<= 1;
41463+
41464+ return tmp_size;
41465+ }
41466+ return 0;
41467+}
41468diff --git a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h
41469new file mode 100644
41470index 0000000..9bff60f
41471--- /dev/null
41472+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h
41473@@ -0,0 +1,154 @@
41474+/**************************************************************************
41475+ *
41476+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41477+ * All Rights Reserved.
41478+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41479+ * All Rights Reserved.
41480+ *
41481+ * Permission is hereby granted, free of charge, to any person obtaining a
41482+ * copy of this software and associated documentation files (the
41483+ * "Software"), to deal in the Software without restriction, including
41484+ * without limitation the rights to use, copy, modify, merge, publish,
41485+ * distribute, sub license, and/or sell copies of the Software, and to
41486+ * permit persons to whom the Software is furnished to do so, subject to
41487+ * the following conditions:
41488+ *
41489+ * The above copyright notice and this permission notice (including the
41490+ * next paragraph) shall be included in all copies or substantial portions
41491+ * of the Software.
41492+ *
41493+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41494+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41495+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41496+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41497+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41498+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41499+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41500+ *
41501+ **************************************************************************/
41502+
41503+#ifndef TTM_MEMORY_H
41504+#define TTM_MEMORY_H
41505+
41506+#include <linux/workqueue.h>
41507+#include <linux/spinlock.h>
41508+#include <linux/wait.h>
41509+
41510+/**
41511+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
41512+ *
41513+ * @do_shrink: The callback function.
41514+ *
41515+ * Arguments to the do_shrink functions are intended to be passed using
41516+ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
41517+ * and can be accessed using container_of().
41518+ */
41519+
41520+struct ttm_mem_shrink {
41521+ int (*do_shrink) (struct ttm_mem_shrink *);
41522+};
41523+
41524+/**
41525+ * struct ttm_mem_global - Global memory accounting structure.
41526+ *
41527+ * @shrink: A single callback to shrink TTM memory usage. Extend this
41528+ * to a linked list to be able to handle multiple callbacks when needed.
41529+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
41530+ * need a separate workqueue since it will spend a lot of time waiting
41531+ * for the GPU, and this will otherwise block other workqueue tasks(?)
41532+ * At this point we use only a single-threaded workqueue.
41533+ * @work: The workqueue callback for the shrink queue.
41534+ * @queue: Wait queue for processes suspended waiting for memory.
41535+ * @lock: Lock to protect the @shrink - and the memory accounting members,
41536+ * that is, essentially the whole structure with some exceptions.
41537+ * @emer_memory: Lowmem memory limit available for root.
41538+ * @max_memory: Lowmem memory limit available for non-root.
41539+ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
41540+ * @used_memory: Currently used lowmem memory.
41541+ * @used_total_memory: Currently used total (lowmem + highmem) memory.
41542+ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
41543+ * kicks in.
41544+ * @max_total_memory: Total memory available to non-root processes.
41545+ * @emer_total_memory: Total memory available to root processes.
41546+ *
41547+ * Note that this structure is not per device. It should be global for all
41548+ * graphics devices.
41549+ */
41550+
41551+struct ttm_mem_global {
41552+ struct ttm_mem_shrink *shrink;
41553+ struct workqueue_struct *swap_queue;
41554+ struct work_struct work;
41555+ wait_queue_head_t queue;
41556+ spinlock_t lock;
41557+ uint64_t emer_memory;
41558+ uint64_t max_memory;
41559+ uint64_t swap_limit;
41560+ uint64_t used_memory;
41561+ uint64_t used_total_memory;
41562+ uint64_t total_memory_swap_limit;
41563+ uint64_t max_total_memory;
41564+ uint64_t emer_total_memory;
41565+};
41566+
41567+/**
41568+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
41569+ *
41570+ * @shrink: The object to initialize.
41571+ * @func: The callback function.
41572+ */
41573+
41574+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
41575+ int (*func) (struct ttm_mem_shrink *))
41576+{
41577+ shrink->do_shrink = func;
41578+}
41579+
41580+/**
41581+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
41582+ *
41583+ * @glob: The struct ttm_mem_global object to register with.
41584+ * @shrink: An initialized struct ttm_mem_shrink object to register.
41585+ *
41586+ * Returns:
41587+ * -EBUSY: There's already a callback registered. (May change).
41588+ */
41589+
41590+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
41591+ struct ttm_mem_shrink *shrink)
41592+{
41593+ spin_lock(&glob->lock);
41594+ if (glob->shrink != NULL) {
41595+ spin_unlock(&glob->lock);
41596+ return -EBUSY;
41597+ }
41598+ glob->shrink = shrink;
41599+ spin_unlock(&glob->lock);
41600+ return 0;
41601+}
41602+
41603+/**
41604+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
41605+ *
41606+ * @glob: The struct ttm_mem_global object to unregister from.
41607+ * @shrink: A previously registert struct ttm_mem_shrink object.
41608+ *
41609+ */
41610+
41611+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
41612+ struct ttm_mem_shrink *shrink)
41613+{
41614+ spin_lock(&glob->lock);
41615+ BUG_ON(glob->shrink != shrink);
41616+ glob->shrink = NULL;
41617+ spin_unlock(&glob->lock);
41618+}
41619+
41620+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
41621+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
41622+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
41623+ bool no_wait, bool interruptible, bool himem);
41624+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
41625+ uint64_t amount, bool himem);
41626+extern size_t ttm_round_pot(size_t size);
41627+#endif
41628diff --git a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c
41629new file mode 100644
41630index 0000000..294a795
41631--- /dev/null
41632+++ b/drivers/gpu/drm/psb/ttm/ttm_object.c
41633@@ -0,0 +1,444 @@
41634+/**************************************************************************
41635+ *
41636+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
41637+ * All Rights Reserved.
41638+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
41639+ * All Rights Reserved.
41640+ *
41641+ * Permission is hereby granted, free of charge, to any person obtaining a
41642+ * copy of this software and associated documentation files (the
41643+ * "Software"), to deal in the Software without restriction, including
41644+ * without limitation the rights to use, copy, modify, merge, publish,
41645+ * distribute, sub license, and/or sell copies of the Software, and to
41646+ * permit persons to whom the Software is furnished to do so, subject to
41647+ * the following conditions:
41648+ *
41649+ * The above copyright notice and this permission notice (including the
41650+ * next paragraph) shall be included in all copies or substantial portions
41651+ * of the Software.
41652+ *
41653+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41654+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41655+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
41656+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
41657+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
41658+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
41659+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
41660+ *
41661+ **************************************************************************/
41662+/*
41663+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
41664+ */
41665+/** @file ttm_ref_object.c
41666+ *
41667+ * Base- and reference object implementation for the various
41668+ * ttm objects. Implements reference counting, minimal security checks
41669+ * and release on file close.
41670+ */
41671+
41672+/**
41673+ * struct ttm_object_file
41674+ *
41675+ * @tdev: Pointer to the ttm_object_device.
41676+ *
41677+ * @lock: Lock that protects the ref_list list and the
41678+ * ref_hash hash tables.
41679+ *
41680+ * @ref_list: List of ttm_ref_objects to be destroyed at
41681+ * file release.
41682+ *
41683+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
41684+ * for fast lookup of ref objects given a base object.
41685+ */
41686+
41687+#include "ttm/ttm_object.h"
41688+#include <linux/list.h>
41689+#include <linux/spinlock.h>
41690+#include <linux/slab.h>
41691+#include <asm/atomic.h>
41692+
41693+struct ttm_object_file {
41694+ struct ttm_object_device *tdev;
41695+ rwlock_t lock;
41696+ struct list_head ref_list;
41697+ struct drm_open_hash ref_hash[TTM_REF_NUM];
41698+ struct kref refcount;
41699+};
41700+
41701+/**
41702+ * struct ttm_object_device
41703+ *
41704+ * @object_lock: lock that protects the object_hash hash table.
41705+ *
41706+ * @object_hash: hash table for fast lookup of object global names.
41707+ *
41708+ * @object_count: Per device object count.
41709+ *
41710+ * This is the per-device data structure needed for ttm object management.
41711+ */
41712+
41713+struct ttm_object_device {
41714+ rwlock_t object_lock;
41715+ struct drm_open_hash object_hash;
41716+ atomic_t object_count;
41717+ struct ttm_mem_global *mem_glob;
41718+};
41719+
41720+/**
41721+ * struct ttm_ref_object
41722+ *
41723+ * @hash: Hash entry for the per-file object reference hash.
41724+ *
41725+ * @head: List entry for the per-file list of ref-objects.
41726+ *
41727+ * @kref: Ref count.
41728+ *
41729+ * @obj: Base object this ref object is referencing.
41730+ *
41731+ * @ref_type: Type of ref object.
41732+ *
41733+ * This is similar to an idr object, but it also has a hash table entry
41734+ * that allows lookup with a pointer to the referenced object as a key. In
41735+ * that way, one can easily detect whether a base object is referenced by
41736+ * a particular ttm_object_file. It also carries a ref count to avoid creating
41737+ * multiple ref objects if a ttm_object_file references the same base object more
41738+ * than once.
41739+ */
41740+
41741+struct ttm_ref_object {
41742+ struct drm_hash_item hash;
41743+ struct list_head head;
41744+ struct kref kref;
41745+ struct ttm_base_object *obj;
41746+ enum ttm_ref_type ref_type;
41747+ struct ttm_object_file *tfile;
41748+};
41749+
41750+static inline struct ttm_object_file *
41751+ttm_object_file_ref(struct ttm_object_file *tfile)
41752+{
41753+ kref_get(&tfile->refcount);
41754+ return tfile;
41755+}
41756+
41757+static void ttm_object_file_destroy(struct kref *kref)
41758+{
41759+ struct ttm_object_file *tfile =
41760+ container_of(kref, struct ttm_object_file, refcount);
41761+
41762+// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile);
41763+ kfree(tfile);
41764+}
41765+
41766+
41767+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
41768+{
41769+ struct ttm_object_file *tfile = *p_tfile;
41770+
41771+ *p_tfile = NULL;
41772+ kref_put(&tfile->refcount, ttm_object_file_destroy);
41773+}
41774+
41775+
41776+int ttm_base_object_init(struct ttm_object_file *tfile,
41777+ struct ttm_base_object *base,
41778+ bool shareable,
41779+ enum ttm_object_type object_type,
41780+ void (*refcount_release) (struct ttm_base_object **),
41781+ void (*ref_obj_release) (struct ttm_base_object *,
41782+ enum ttm_ref_type ref_type))
41783+{
41784+ struct ttm_object_device *tdev = tfile->tdev;
41785+ int ret;
41786+
41787+ base->shareable = shareable;
41788+ base->tfile = ttm_object_file_ref(tfile);
41789+ base->refcount_release = refcount_release;
41790+ base->ref_obj_release = ref_obj_release;
41791+ base->object_type = object_type;
41792+ write_lock(&tdev->object_lock);
41793+ kref_init(&base->refcount);
41794+ ret = drm_ht_just_insert_please(&tdev->object_hash,
41795+ &base->hash,
41796+ (unsigned long)base, 31, 0, 0);
41797+ write_unlock(&tdev->object_lock);
41798+ if (unlikely(ret != 0))
41799+ goto out_err0;
41800+
41801+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
41802+ if (unlikely(ret != 0))
41803+ goto out_err1;
41804+
41805+ ttm_base_object_unref(&base);
41806+
41807+ return 0;
41808+ out_err1:
41809+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
41810+ out_err0:
41811+ return ret;
41812+}
41813+
41814+static void ttm_release_base(struct kref *kref)
41815+{
41816+ struct ttm_base_object *base =
41817+ container_of(kref, struct ttm_base_object, refcount);
41818+ struct ttm_object_device *tdev = base->tfile->tdev;
41819+
41820+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
41821+ write_unlock(&tdev->object_lock);
41822+ if (base->refcount_release) {
41823+ ttm_object_file_unref(&base->tfile);
41824+ base->refcount_release(&base);
41825+ }
41826+ write_lock(&tdev->object_lock);
41827+}
41828+
41829+void ttm_base_object_unref(struct ttm_base_object **p_base)
41830+{
41831+ struct ttm_base_object *base = *p_base;
41832+ struct ttm_object_device *tdev = base->tfile->tdev;
41833+
41834+ // printk(KERN_INFO "TTM base object unref.\n");
41835+ *p_base = NULL;
41836+
41837+ /*
41838+ * Need to take the lock here to avoid racing with
41839+ * users trying to look up the object.
41840+ */
41841+
41842+ write_lock(&tdev->object_lock);
41843+ (void)kref_put(&base->refcount, &ttm_release_base);
41844+ write_unlock(&tdev->object_lock);
41845+}
41846+
41847+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
41848+ uint32_t key)
41849+{
41850+ struct ttm_object_device *tdev = tfile->tdev;
41851+ struct ttm_base_object *base;
41852+ struct drm_hash_item *hash;
41853+ int ret;
41854+
41855+ read_lock(&tdev->object_lock);
41856+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
41857+
41858+ if (likely(ret == 0)) {
41859+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
41860+ kref_get(&base->refcount);
41861+ }
41862+ read_unlock(&tdev->object_lock);
41863+
41864+ if (unlikely(ret != 0))
41865+ return NULL;
41866+
41867+ if (tfile != base->tfile && !base->shareable) {
41868+ printk(KERN_ERR "Attempted access of non-shareable object.\n");
41869+ ttm_base_object_unref(&base);
41870+ return NULL;
41871+ }
41872+
41873+ return base;
41874+}
41875+
41876+int ttm_ref_object_add(struct ttm_object_file *tfile,
41877+ struct ttm_base_object *base,
41878+ enum ttm_ref_type ref_type, bool *existed)
41879+{
41880+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
41881+ struct ttm_ref_object *ref;
41882+ struct drm_hash_item *hash;
41883+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
41884+ int ret = -EINVAL;
41885+
41886+ if (existed != NULL)
41887+ *existed = true;
41888+
41889+ while (ret == -EINVAL) {
41890+ read_lock(&tfile->lock);
41891+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
41892+
41893+ if (ret == 0) {
41894+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
41895+ kref_get(&ref->kref);
41896+ read_unlock(&tfile->lock);
41897+ break;
41898+ }
41899+
41900+ read_unlock(&tfile->lock);
41901+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false);
41902+ if (unlikely(ret != 0))
41903+ return ret;
41904+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
41905+ if (unlikely(ref == NULL)) {
41906+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
41907+ return -ENOMEM;
41908+ }
41909+
41910+ ref->hash.key = base->hash.key;
41911+ ref->obj = base;
41912+ ref->tfile = tfile;
41913+ ref->ref_type = ref_type;
41914+ kref_init(&ref->kref);
41915+
41916+ write_lock(&tfile->lock);
41917+ ret = drm_ht_insert_item(ht, &ref->hash);
41918+
41919+ if (likely(ret == 0)) {
41920+ list_add_tail(&ref->head, &tfile->ref_list);
41921+ kref_get(&base->refcount);
41922+ write_unlock(&tfile->lock);
41923+ if (existed != NULL)
41924+ *existed = false;
41925+ break;
41926+ }
41927+
41928+ write_unlock(&tfile->lock);
41929+ BUG_ON(ret != -EINVAL);
41930+
41931+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
41932+ kfree(ref);
41933+ }
41934+
41935+ return ret;
41936+}
41937+
41938+static void ttm_ref_object_release(struct kref *kref)
41939+{
41940+ struct ttm_ref_object *ref =
41941+ container_of(kref, struct ttm_ref_object, kref);
41942+ struct ttm_base_object *base = ref->obj;
41943+ struct ttm_object_file *tfile = ref->tfile;
41944+ struct drm_open_hash *ht;
41945+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
41946+
41947+ ht = &tfile->ref_hash[ref->ref_type];
41948+ (void)drm_ht_remove_item(ht, &ref->hash);
41949+ list_del(&ref->head);
41950+ write_unlock(&tfile->lock);
41951+
41952+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
41953+ base->ref_obj_release(base, ref->ref_type);
41954+
41955+ ttm_base_object_unref(&ref->obj);
41956+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
41957+ kfree(ref);
41958+ write_lock(&tfile->lock);
41959+}
41960+
41961+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
41962+ unsigned long key, enum ttm_ref_type ref_type)
41963+{
41964+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
41965+ struct ttm_ref_object *ref;
41966+ struct drm_hash_item *hash;
41967+ int ret;
41968+
41969+ write_lock(&tfile->lock);
41970+ ret = drm_ht_find_item(ht, key, &hash);
41971+ if (unlikely(ret != 0)) {
41972+ write_unlock(&tfile->lock);
41973+ return -EINVAL;
41974+ }
41975+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
41976+ kref_put(&ref->kref, ttm_ref_object_release);
41977+ write_unlock(&tfile->lock);
41978+ return 0;
41979+}
41980+
41981+void ttm_object_file_release(struct ttm_object_file **p_tfile)
41982+{
41983+ struct ttm_ref_object *ref;
41984+ struct list_head *list;
41985+ unsigned int i;
41986+ struct ttm_object_file *tfile = *p_tfile;
41987+
41988+ *p_tfile = NULL;
41989+ write_lock(&tfile->lock);
41990+
41991+ /*
41992+ * Since we release the lock within the loop, we have to
41993+ * restart it from the beginning each time.
41994+ */
41995+
41996+ while (!list_empty(&tfile->ref_list)) {
41997+ list = tfile->ref_list.next;
41998+ ref = list_entry(list, struct ttm_ref_object, head);
41999+ ttm_ref_object_release(&ref->kref);
42000+ }
42001+
42002+ for (i = 0; i < TTM_REF_NUM; ++i) {
42003+ drm_ht_remove(&tfile->ref_hash[i]);
42004+ }
42005+
42006+ write_unlock(&tfile->lock);
42007+ ttm_object_file_unref(&tfile);
42008+}
42009+
42010+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
42011+ unsigned int hash_order)
42012+{
42013+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
42014+ unsigned int i;
42015+ unsigned int j = 0;
42016+ int ret;
42017+
42018+ if (unlikely(tfile == NULL))
42019+ return NULL;
42020+
42021+ rwlock_init(&tfile->lock);
42022+ tfile->tdev = tdev;
42023+ kref_init(&tfile->refcount);
42024+ INIT_LIST_HEAD(&tfile->ref_list);
42025+
42026+ for (i = 0; i < TTM_REF_NUM; ++i) {
42027+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
42028+ if (ret) {
42029+ j = i;
42030+ goto out_err;
42031+ }
42032+ }
42033+
42034+ return tfile;
42035+ out_err:
42036+ for (i = 0; i < j; ++i) {
42037+ drm_ht_remove(&tfile->ref_hash[i]);
42038+ }
42039+ kfree(tfile);
42040+
42041+ return NULL;
42042+}
42043+
42044+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
42045+ *mem_glob,
42046+ unsigned int hash_order)
42047+{
42048+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
42049+ int ret;
42050+
42051+ if (unlikely(tdev == NULL))
42052+ return NULL;
42053+
42054+ tdev->mem_glob = mem_glob;
42055+ rwlock_init(&tdev->object_lock);
42056+ atomic_set(&tdev->object_count, 0);
42057+ ret = drm_ht_create(&tdev->object_hash, hash_order);
42058+
42059+ if (likely(ret == 0))
42060+ return tdev;
42061+
42062+ kfree(tdev);
42063+ return NULL;
42064+}
42065+
42066+void ttm_object_device_release(struct ttm_object_device **p_tdev)
42067+{
42068+ struct ttm_object_device *tdev = *p_tdev;
42069+
42070+ *p_tdev = NULL;
42071+
42072+ write_lock(&tdev->object_lock);
42073+ drm_ht_remove(&tdev->object_hash);
42074+ write_unlock(&tdev->object_lock);
42075+
42076+ kfree(tdev);
42077+}
42078diff --git a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h
42079new file mode 100644
42080index 0000000..0925ac5
42081--- /dev/null
42082+++ b/drivers/gpu/drm/psb/ttm/ttm_object.h
42083@@ -0,0 +1,269 @@
42084+/**************************************************************************
42085+ *
42086+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42087+ * All Rights Reserved.
42088+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42089+ * All Rights Reserved.
42090+ *
42091+ * Permission is hereby granted, free of charge, to any person obtaining a
42092+ * copy of this software and associated documentation files (the
42093+ * "Software"), to deal in the Software without restriction, including
42094+ * without limitation the rights to use, copy, modify, merge, publish,
42095+ * distribute, sub license, and/or sell copies of the Software, and to
42096+ * permit persons to whom the Software is furnished to do so, subject to
42097+ * the following conditions:
42098+ *
42099+ * The above copyright notice and this permission notice (including the
42100+ * next paragraph) shall be included in all copies or substantial portions
42101+ * of the Software.
42102+ *
42103+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42104+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42105+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42106+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42107+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42108+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42109+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42110+ *
42111+ **************************************************************************/
42112+/*
42113+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42114+ */
42115+/** @file ttm_ref_object.h
42116+ *
42117+ * Base- and reference object implementation for the various
42118+ * ttm objects. Implements reference counting, minimal security checks
42119+ * and release on file close.
42120+ */
42121+
42122+#ifndef _TTM_OBJECT_H_
42123+#define _TTM_OBJECT_H_
42124+
42125+#include <linux/list.h>
42126+#include <drm/drm_hashtab.h>
42127+#include <linux/kref.h>
42128+#include <ttm/ttm_memory.h>
42129+
42130+/**
42131+ * enum ttm_ref_type
42132+ *
42133+ * Describes what type of reference a ref object holds.
42134+ *
42135+ * TTM_REF_USAGE is a simple refcount on a base object.
42136+ *
42137+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
42138+ * buffer object.
42139+ *
42140+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
42141+ * buffer object.
42142+ *
42143+ */
42144+
42145+enum ttm_ref_type {
42146+ TTM_REF_USAGE,
42147+ TTM_REF_SYNCCPU_READ,
42148+ TTM_REF_SYNCCPU_WRITE,
42149+ TTM_REF_NUM
42150+};
42151+
42152+/**
42153+ * enum ttm_object_type
42154+ *
42155+ * One entry per ttm object type.
42156+ * Device-specific types should use the
42157+ * ttm_driver_typex types.
42158+ */
42159+
42160+enum ttm_object_type {
42161+ ttm_fence_type,
42162+ ttm_buffer_type,
42163+ ttm_lock_type,
42164+ ttm_driver_type0 = 256,
42165+ ttm_driver_type1
42166+};
42167+
42168+struct ttm_object_file;
42169+struct ttm_object_device;
42170+
42171+/**
42172+ * struct ttm_base_object
42173+ *
42174+ * @hash: hash entry for the per-device object hash.
42175+ * @type: derived type this object is base class for.
42176+ * @shareable: Other ttm_object_files can access this object.
42177+ *
42178+ * @tfile: Pointer to ttm_object_file of the creator.
42179+ * NULL if the object was not created by a user request.
42180+ * (kernel object).
42181+ *
42182+ * @refcount: Number of references to this object, not
42183+ * including the hash entry. A reference to a base object can
42184+ * only be held by a ref object.
42185+ *
42186+ * @refcount_release: A function to be called when there are
42187+ * no more references to this object. This function should
42188+ * destroy the object (or make sure destruction eventually happens),
42189+ * and when it is called, the object has
42190+ * already been taken out of the per-device hash. The parameter
42191+ * "base" should be set to NULL by the function.
42192+ *
42193+ * @ref_obj_release: A function to be called when a reference object
42194+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
42195+ * this function may, for example, release a lock held by a user-space
42196+ * process.
42197+ *
42198+ * This struct is intended to be used as a base struct for objects that
42199+ * are visible to user-space. It provides a global name, race-safe
42200+ * access and refcounting, minimal access contol and hooks for unref actions.
42201+ */
42202+
42203+struct ttm_base_object {
42204+ struct drm_hash_item hash;
42205+ enum ttm_object_type object_type;
42206+ bool shareable;
42207+ struct ttm_object_file *tfile;
42208+ struct kref refcount;
42209+ void (*refcount_release) (struct ttm_base_object ** base);
42210+ void (*ref_obj_release) (struct ttm_base_object * base,
42211+ enum ttm_ref_type ref_type);
42212+};
42213+
42214+/**
42215+ * ttm_base_object_init
42216+ *
42217+ * @tfile: Pointer to a struct ttm_object_file.
42218+ * @base: The struct ttm_base_object to initialize.
42219+ * @shareable: This object is shareable with other applcations.
42220+ * (different @tfile pointers.)
42221+ * @type: The object type.
42222+ * @refcount_release: See the struct ttm_base_object description.
42223+ * @ref_obj_release: See the struct ttm_base_object description.
42224+ *
42225+ * Initializes a struct ttm_base_object.
42226+ */
42227+
42228+extern int ttm_base_object_init(struct ttm_object_file *tfile,
42229+ struct ttm_base_object *base,
42230+ bool shareable,
42231+ enum ttm_object_type type,
42232+ void (*refcount_release) (struct ttm_base_object
42233+ **),
42234+ void (*ref_obj_release) (struct ttm_base_object
42235+ *,
42236+ enum ttm_ref_type
42237+ ref_type));
42238+
42239+/**
42240+ * ttm_base_object_lookup
42241+ *
42242+ * @tfile: Pointer to a struct ttm_object_file.
42243+ * @key: Hash key
42244+ *
42245+ * Looks up a struct ttm_base_object with the key @key.
42246+ * Also verifies that the object is visible to the application, by
42247+ * comparing the @tfile argument and checking the object shareable flag.
42248+ */
42249+
42250+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
42251+ *tfile, uint32_t key);
42252+
42253+/**
42254+ * ttm_base_object_unref
42255+ *
42256+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
42257+ *
42258+ * Decrements the base object refcount and clears the pointer pointed to by
42259+ * p_base.
42260+ */
42261+
42262+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
42263+
42264+/**
42265+ * ttm_ref_object_add.
42266+ *
42267+ * @tfile: A struct ttm_object_file representing the application owning the
42268+ * ref_object.
42269+ * @base: The base object to reference.
42270+ * @ref_type: The type of reference.
42271+ * @existed: Upon completion, indicates that an identical reference object
42272+ * already existed, and the refcount was upped on that object instead.
42273+ *
42274+ * Adding a ref object to a base object is basically like referencing the
42275+ * base object, but a user-space application holds the reference. When the
42276+ * file corresponding to @tfile is closed, all its reference objects are
42277+ * deleted. A reference object can have different types depending on what
42278+ * it's intended for. It can be refcounting to prevent object destruction,
42279+ * When user-space takes a lock, it can add a ref object to that lock to
42280+ * make sure the lock is released if the application dies. A ref object
42281+ * will hold a single reference on a base object.
42282+ */
42283+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
42284+ struct ttm_base_object *base,
42285+ enum ttm_ref_type ref_type, bool *existed);
42286+/**
42287+ * ttm_ref_object_base_unref
42288+ *
42289+ * @key: Key representing the base object.
42290+ * @ref_type: Ref type of the ref object to be dereferenced.
42291+ *
42292+ * Unreference a ref object with type @ref_type
42293+ * on the base object identified by @key. If there are no duplicate
42294+ * references, the ref object will be destroyed and the base object
42295+ * will be unreferenced.
42296+ */
42297+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
42298+ unsigned long key,
42299+ enum ttm_ref_type ref_type);
42300+
42301+/**
42302+ * ttm_object_file_init - initialize a struct ttm_object file
42303+ *
42304+ * @tdev: A struct ttm_object device this file is initialized on.
42305+ * @hash_order: Order of the hash table used to hold the reference objects.
42306+ *
42307+ * This is typically called by the file_ops::open function.
42308+ */
42309+
42310+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
42311+ *tdev,
42312+ unsigned int hash_order);
42313+
42314+/**
42315+ * ttm_object_file_release - release data held by a ttm_object_file
42316+ *
42317+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
42318+ * *p_tfile will be set to NULL by this function.
42319+ *
42320+ * Releases all data associated by a ttm_object_file.
42321+ * Typically called from file_ops::release. The caller must
42322+ * ensure that there are no concurrent users of tfile.
42323+ */
42324+
42325+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
42326+
42327+/**
42328+ * ttm_object device init - initialize a struct ttm_object_device
42329+ *
42330+ * @hash_order: Order of hash table used to hash the base objects.
42331+ *
42332+ * This function is typically called on device initialization to prepare
42333+ * data structures needed for ttm base and ref objects.
42334+ */
42335+
42336+extern struct ttm_object_device *ttm_object_device_init
42337+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
42338+
42339+/**
42340+ * ttm_object_device_release - release data held by a ttm_object_device
42341+ *
42342+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
42343+ * *p_tdev will be set to NULL by this function.
42344+ *
42345+ * Releases all data associated by a ttm_object_device.
42346+ * Typically called from driver::unload before the destruction of the
42347+ * device private data structure.
42348+ */
42349+
42350+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
42351+
42352+#endif
42353diff --git a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
42354new file mode 100644
42355index 0000000..701be0d
42356--- /dev/null
42357+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
42358@@ -0,0 +1,178 @@
42359+/**************************************************************************
42360+ *
42361+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42362+ * All Rights Reserved.
42363+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42364+ * All Rights Reserved.
42365+ *
42366+ * Permission is hereby granted, free of charge, to any person obtaining a
42367+ * copy of this software and associated documentation files (the
42368+ * "Software"), to deal in the Software without restriction, including
42369+ * without limitation the rights to use, copy, modify, merge, publish,
42370+ * distribute, sub license, and/or sell copies of the Software, and to
42371+ * permit persons to whom the Software is furnished to do so, subject to
42372+ * the following conditions:
42373+ *
42374+ * The above copyright notice and this permission notice (including the
42375+ * next paragraph) shall be included in all copies or substantial portions
42376+ * of the Software.
42377+ *
42378+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42379+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42380+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42381+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42382+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42383+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42384+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42385+ *
42386+ **************************************************************************/
42387+/*
42388+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42389+ */
42390+
42391+#include "ttm/ttm_pat_compat.h"
42392+#include <linux/version.h>
42393+#include <asm/page.h>
42394+#include <linux/spinlock.h>
42395+#include <asm/pgtable.h>
42396+
42397+#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
42398+#include <asm/tlbflush.h>
42399+#include <asm/msr.h>
42400+#include <asm/system.h>
42401+#include <linux/notifier.h>
42402+#include <linux/cpu.h>
42403+
42404+#ifndef MSR_IA32_CR_PAT
42405+#define MSR_IA32_CR_PAT 0x0277
42406+#endif
42407+
42408+#ifndef _PAGE_PAT
42409+#define _PAGE_PAT 0x080
42410+#endif
42411+
42412+static int ttm_has_pat = 0;
42413+
42414+/*
42415+ * Used at resume-time when CPU-s are fired up.
42416+ */
42417+
42418+static void ttm_pat_ipi_handler(void *notused)
42419+{
42420+ u32 v1, v2;
42421+
42422+ rdmsr(MSR_IA32_CR_PAT, v1, v2);
42423+ v2 &= 0xFFFFFFF8;
42424+ v2 |= 0x00000001;
42425+ wbinvd();
42426+ wrmsr(MSR_IA32_CR_PAT, v1, v2);
42427+ wbinvd();
42428+ __flush_tlb_all();
42429+}
42430+
42431+static void ttm_pat_enable(void)
42432+{
42433+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
42434+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) {
42435+#else
42436+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) {
42437+#endif
42438+ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
42439+ }
42440+}
42441+
42442+void ttm_pat_resume(void)
42443+{
42444+ if (unlikely(!ttm_has_pat))
42445+ return;
42446+
42447+ ttm_pat_enable();
42448+}
42449+
42450+static int psb_cpu_callback(struct notifier_block *nfb,
42451+ unsigned long action, void *hcpu)
42452+{
42453+ if (action == CPU_ONLINE) {
42454+ ttm_pat_resume();
42455+ }
42456+
42457+ return 0;
42458+}
42459+
42460+static struct notifier_block psb_nb = {
42461+ .notifier_call = psb_cpu_callback,
42462+ .priority = 1
42463+};
42464+
42465+/*
42466+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
42467+ */
42468+
42469+void ttm_pat_init(void)
42470+{
42471+ if (likely(ttm_has_pat))
42472+ return;
42473+
42474+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
42475+ return;
42476+ }
42477+
42478+ ttm_pat_enable();
42479+
42480+ if (num_present_cpus() > 1)
42481+ register_cpu_notifier(&psb_nb);
42482+
42483+ ttm_has_pat = 1;
42484+}
42485+
42486+void ttm_pat_takedown(void)
42487+{
42488+ if (unlikely(!ttm_has_pat))
42489+ return;
42490+
42491+ if (num_present_cpus() > 1)
42492+ unregister_cpu_notifier(&psb_nb);
42493+
42494+ ttm_has_pat = 0;
42495+}
42496+
42497+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
42498+{
42499+ if (likely(ttm_has_pat)) {
42500+ pgprot_val(prot) |= _PAGE_PAT;
42501+ return prot;
42502+ } else {
42503+ return pgprot_noncached(prot);
42504+ }
42505+}
42506+
42507+#else
42508+
42509+void ttm_pat_init(void)
42510+{
42511+}
42512+
42513+void ttm_pat_takedown(void)
42514+{
42515+}
42516+
42517+void ttm_pat_resume(void)
42518+{
42519+}
42520+
42521+#ifdef CONFIG_X86
42522+#include <asm/pat.h>
42523+
42524+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
42525+{
42526+ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
42527+
42528+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
42529+}
42530+#else
42531+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
42532+{
42533+ BUG();
42534+}
42535+#endif
42536+#endif
42537diff --git a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
42538new file mode 100644
42539index 0000000..d767570
42540--- /dev/null
42541+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
42542@@ -0,0 +1,41 @@
42543+/**************************************************************************
42544+ *
42545+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42546+ * All Rights Reserved.
42547+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42548+ * All Rights Reserved.
42549+ *
42550+ * Permission is hereby granted, free of charge, to any person obtaining a
42551+ * copy of this software and associated documentation files (the
42552+ * "Software"), to deal in the Software without restriction, including
42553+ * without limitation the rights to use, copy, modify, merge, publish,
42554+ * distribute, sub license, and/or sell copies of the Software, and to
42555+ * permit persons to whom the Software is furnished to do so, subject to
42556+ * the following conditions:
42557+ *
42558+ * The above copyright notice and this permission notice (including the
42559+ * next paragraph) shall be included in all copies or substantial portions
42560+ * of the Software.
42561+ *
42562+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42563+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42564+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42565+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42566+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42567+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42568+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42569+ *
42570+ **************************************************************************/
42571+/*
42572+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42573+ */
42574+
42575+#ifndef _TTM_PAT_COMPAT_
42576+#define _TTM_PAT_COMPAT_
42577+#include <asm/page.h>
42578+#include <asm/pgtable_types.h>
42579+extern void ttm_pat_init(void);
42580+extern void ttm_pat_takedown(void);
42581+extern void ttm_pat_resume(void);
42582+extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
42583+#endif
42584diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
42585new file mode 100644
42586index 0000000..13f3861
42587--- /dev/null
42588+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
42589@@ -0,0 +1,98 @@
42590+/**************************************************************************
42591+ *
42592+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42593+ * All Rights Reserved.
42594+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
42595+ * All Rights Reserved.
42596+ *
42597+ * Permission is hereby granted, free of charge, to any person obtaining a
42598+ * copy of this software and associated documentation files (the
42599+ * "Software"), to deal in the Software without restriction, including
42600+ * without limitation the rights to use, copy, modify, merge, publish,
42601+ * distribute, sub license, and/or sell copies of the Software, and to
42602+ * permit persons to whom the Software is furnished to do so, subject to
42603+ * the following conditions:
42604+ *
42605+ * The above copyright notice and this permission notice (including the
42606+ * next paragraph) shall be included in all copies or substantial portions
42607+ * of the Software.
42608+ *
42609+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42610+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42611+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42612+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42613+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42614+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42615+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42616+ *
42617+ **************************************************************************/
42618+/*
42619+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42620+ */
42621+
42622+#ifndef _TTM_PL_COMMON_H_
42623+#define _TTM_PL_COMMON_H_
42624+/*
42625+ * Memory regions for data placement.
42626+ */
42627+
42628+#define TTM_PL_SYSTEM 0
42629+#define TTM_PL_TT 1
42630+#define TTM_PL_VRAM 2
42631+#define TTM_PL_PRIV0 3
42632+#define TTM_PL_PRIV1 4
42633+#define TTM_PL_PRIV2 5
42634+#define TTM_PL_PRIV3 6
42635+#define TTM_PL_PRIV4 7
42636+#define TTM_PL_PRIV5 8
42637+#define TTM_PL_CI 9
42638+#define TTM_PL_RAR 10
42639+#define TTM_PL_SWAPPED 15
42640+
42641+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
42642+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
42643+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
42644+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
42645+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
42646+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
42647+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
42648+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
42649+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
42650+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
42651+#define TTM_PL_FLAG_RAR (1 << TTM_PL_RAR)
42652+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
42653+#define TTM_PL_MASK_MEM 0x0000FFFF
42654+
42655+/*
42656+ * Other flags that affects data placement.
42657+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
42658+ * if available.
42659+ * TTM_PL_FLAG_SHARED means that another application may
42660+ * reference the buffer.
42661+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
42662+ * be evicted to make room for other buffers.
42663+ */
42664+
42665+#define TTM_PL_FLAG_CACHED (1 << 16)
42666+#define TTM_PL_FLAG_UNCACHED (1 << 17)
42667+#define TTM_PL_FLAG_WC (1 << 18)
42668+#define TTM_PL_FLAG_SHARED (1 << 20)
42669+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
42670+
42671+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
42672+ TTM_PL_FLAG_UNCACHED | \
42673+ TTM_PL_FLAG_WC)
42674+
42675+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
42676+
42677+/*
42678+ * Access flags to be used for CPU- and GPU- mappings.
42679+ * The idea is that the TTM synchronization mechanism will
42680+ * allow concurrent READ access and exclusive write access.
42681+ * Currently GPU- and CPU accesses are exclusive.
42682+ */
42683+
42684+#define TTM_ACCESS_READ (1 << 0)
42685+#define TTM_ACCESS_WRITE (1 << 1)
42686+
42687+#endif
42688diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
42689new file mode 100644
42690index 0000000..68cbb08
42691--- /dev/null
42692+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
42693@@ -0,0 +1,468 @@
42694+/**************************************************************************
42695+ *
42696+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
42697+ * All Rights Reserved.
42698+ *
42699+ * Permission is hereby granted, free of charge, to any person obtaining a
42700+ * copy of this software and associated documentation files (the
42701+ * "Software"), to deal in the Software without restriction, including
42702+ * without limitation the rights to use, copy, modify, merge, publish,
42703+ * distribute, sub license, and/or sell copies of the Software, and to
42704+ * permit persons to whom the Software is furnished to do so, subject to
42705+ * the following conditions:
42706+ *
42707+ * The above copyright notice and this permission notice (including the
42708+ * next paragraph) shall be included in all copies or substantial portions
42709+ * of the Software.
42710+ *
42711+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42712+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42713+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
42714+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
42715+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
42716+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
42717+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
42718+ *
42719+ **************************************************************************/
42720+/*
42721+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
42722+ */
42723+
42724+#include "ttm/ttm_placement_user.h"
42725+#include "ttm/ttm_bo_driver.h"
42726+#include "ttm/ttm_object.h"
42727+#include "ttm/ttm_userobj_api.h"
42728+#include "ttm/ttm_lock.h"
42729+
42730+struct ttm_bo_user_object {
42731+ struct ttm_base_object base;
42732+ struct ttm_buffer_object bo;
42733+};
42734+
42735+static size_t pl_bo_size = 0;
42736+
42737+static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
42738+{
42739+ size_t page_array_size =
42740+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
42741+
42742+ if (unlikely(pl_bo_size == 0)) {
42743+ pl_bo_size = bdev->ttm_bo_extra_size +
42744+ ttm_round_pot(sizeof(struct ttm_bo_user_object));
42745+ }
42746+
42747+ return bdev->ttm_bo_size + 2 * page_array_size;
42748+}
42749+
42750+static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
42751+ *tfile, uint32_t handle)
42752+{
42753+ struct ttm_base_object *base;
42754+
42755+ base = ttm_base_object_lookup(tfile, handle);
42756+ if (unlikely(base == NULL)) {
42757+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
42758+ (unsigned long)handle);
42759+ return NULL;
42760+ }
42761+
42762+ if (unlikely(base->object_type != ttm_buffer_type)) {
42763+ ttm_base_object_unref(&base);
42764+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
42765+ (unsigned long)handle);
42766+ return NULL;
42767+ }
42768+
42769+ return container_of(base, struct ttm_bo_user_object, base);
42770+}
42771+
42772+struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
42773+ *tfile, uint32_t handle)
42774+{
42775+ struct ttm_bo_user_object *user_bo;
42776+ struct ttm_base_object *base;
42777+
42778+ user_bo = ttm_bo_user_lookup(tfile, handle);
42779+ if (unlikely(user_bo == NULL))
42780+ return NULL;
42781+
42782+ (void)ttm_bo_reference(&user_bo->bo);
42783+ base = &user_bo->base;
42784+ ttm_base_object_unref(&base);
42785+ return &user_bo->bo;
42786+}
42787+
42788+static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
42789+{
42790+ struct ttm_bo_user_object *user_bo =
42791+ container_of(bo, struct ttm_bo_user_object, bo);
42792+
42793+ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
42794+ kfree(user_bo);
42795+}
42796+
42797+static void ttm_bo_user_release(struct ttm_base_object **p_base)
42798+{
42799+ struct ttm_bo_user_object *user_bo;
42800+ struct ttm_base_object *base = *p_base;
42801+ struct ttm_buffer_object *bo;
42802+
42803+ *p_base = NULL;
42804+
42805+ if (unlikely(base == NULL))
42806+ return;
42807+
42808+ user_bo = container_of(base, struct ttm_bo_user_object, base);
42809+ bo = &user_bo->bo;
42810+ ttm_bo_unref(&bo);
42811+}
42812+
42813+static void ttm_bo_user_ref_release(struct ttm_base_object *base,
42814+ enum ttm_ref_type ref_type)
42815+{
42816+ struct ttm_bo_user_object *user_bo =
42817+ container_of(base, struct ttm_bo_user_object, base);
42818+ struct ttm_buffer_object *bo = &user_bo->bo;
42819+
42820+ switch (ref_type) {
42821+ case TTM_REF_SYNCCPU_WRITE:
42822+ ttm_bo_synccpu_write_release(bo);
42823+ break;
42824+ default:
42825+ BUG();
42826+ }
42827+}
42828+
42829+static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
42830+ struct ttm_pl_rep *rep)
42831+{
42832+ struct ttm_bo_user_object *user_bo =
42833+ container_of(bo, struct ttm_bo_user_object, bo);
42834+
42835+ rep->gpu_offset = bo->offset;
42836+ rep->bo_size = bo->num_pages << PAGE_SHIFT;
42837+ rep->map_handle = bo->addr_space_offset;
42838+ rep->placement = bo->mem.flags;
42839+ rep->handle = user_bo->base.hash.key;
42840+ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
42841+}
42842+
42843+int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
42844+ struct ttm_bo_device *bdev,
42845+ struct ttm_lock *lock, void *data)
42846+{
42847+ union ttm_pl_create_arg *arg = data;
42848+ struct ttm_pl_create_req *req = &arg->req;
42849+ struct ttm_pl_rep *rep = &arg->rep;
42850+ struct ttm_buffer_object *bo;
42851+ struct ttm_buffer_object *tmp;
42852+ struct ttm_bo_user_object *user_bo;
42853+ uint32_t flags;
42854+ int ret = 0;
42855+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
42856+ size_t acc_size =
42857+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
42858+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
42859+ if (unlikely(ret != 0))
42860+ return ret;
42861+
42862+ flags = req->placement;
42863+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
42864+ if (unlikely(user_bo == NULL)) {
42865+ ttm_mem_global_free(mem_glob, acc_size, false);
42866+ return -ENOMEM;
42867+ }
42868+
42869+ bo = &user_bo->bo;
42870+ ret = ttm_read_lock(lock, true);
42871+ if (unlikely(ret != 0)) {
42872+ ttm_mem_global_free(mem_glob, acc_size, false);
42873+ kfree(user_bo);
42874+ return ret;
42875+ }
42876+
42877+ ret = ttm_buffer_object_init(bdev, bo, req->size,
42878+ ttm_bo_type_device, flags,
42879+ req->page_alignment, 0, true,
42880+ NULL, acc_size, &ttm_bo_user_destroy);
42881+ ttm_read_unlock(lock);
42882+
42883+ /*
42884+ * Note that the ttm_buffer_object_init function
42885+ * would've called the destroy function on failure!!
42886+ */
42887+
42888+ if (unlikely(ret != 0))
42889+ goto out;
42890+
42891+ tmp = ttm_bo_reference(bo);
42892+ ret = ttm_base_object_init(tfile, &user_bo->base,
42893+ flags & TTM_PL_FLAG_SHARED,
42894+ ttm_buffer_type,
42895+ &ttm_bo_user_release,
42896+ &ttm_bo_user_ref_release);
42897+ if (unlikely(ret != 0))
42898+ goto out_err;
42899+
42900+ mutex_lock(&bo->mutex);
42901+ ttm_pl_fill_rep(bo, rep);
42902+ mutex_unlock(&bo->mutex);
42903+ ttm_bo_unref(&bo);
42904+ out:
42905+ return 0;
42906+ out_err:
42907+ ttm_bo_unref(&tmp);
42908+ ttm_bo_unref(&bo);
42909+ return ret;
42910+}
42911+
42912+int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
42913+ struct ttm_bo_device *bdev,
42914+ struct ttm_lock *lock, void *data)
42915+{
42916+ union ttm_pl_create_ub_arg *arg = data;
42917+ struct ttm_pl_create_ub_req *req = &arg->req;
42918+ struct ttm_pl_rep *rep = &arg->rep;
42919+ struct ttm_buffer_object *bo;
42920+ struct ttm_buffer_object *tmp;
42921+ struct ttm_bo_user_object *user_bo;
42922+ uint32_t flags;
42923+ int ret = 0;
42924+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
42925+ size_t acc_size =
42926+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
42927+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
42928+ if (unlikely(ret != 0))
42929+ return ret;
42930+
42931+ flags = req->placement;
42932+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
42933+ if (unlikely(user_bo == NULL)) {
42934+ ttm_mem_global_free(mem_glob, acc_size, false);
42935+ return -ENOMEM;
42936+ }
42937+ ret = ttm_read_lock(lock, true);
42938+ if (unlikely(ret != 0)) {
42939+ ttm_mem_global_free(mem_glob, acc_size, false);
42940+ kfree(user_bo);
42941+ return ret;
42942+ }
42943+ bo = &user_bo->bo;
42944+ ret = ttm_buffer_object_init(bdev, bo, req->size,
42945+ ttm_bo_type_user, flags,
42946+ req->page_alignment, req->user_address,
42947+ true, NULL, acc_size, &ttm_bo_user_destroy);
42948+
42949+ /*
42950+ * Note that the ttm_buffer_object_init function
42951+ * would've called the destroy function on failure!!
42952+ */
42953+ ttm_read_unlock(lock);
42954+ if (unlikely(ret != 0))
42955+ goto out;
42956+
42957+ tmp = ttm_bo_reference(bo);
42958+ ret = ttm_base_object_init(tfile, &user_bo->base,
42959+ flags & TTM_PL_FLAG_SHARED,
42960+ ttm_buffer_type,
42961+ &ttm_bo_user_release,
42962+ &ttm_bo_user_ref_release);
42963+ if (unlikely(ret != 0))
42964+ goto out_err;
42965+
42966+ mutex_lock(&bo->mutex);
42967+ ttm_pl_fill_rep(bo, rep);
42968+ mutex_unlock(&bo->mutex);
42969+ ttm_bo_unref(&bo);
42970+ out:
42971+ return 0;
42972+ out_err:
42973+ ttm_bo_unref(&tmp);
42974+ ttm_bo_unref(&bo);
42975+ return ret;
42976+}
42977+
42978+int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
42979+{
42980+ union ttm_pl_reference_arg *arg = data;
42981+ struct ttm_pl_rep *rep = &arg->rep;
42982+ struct ttm_bo_user_object *user_bo;
42983+ struct ttm_buffer_object *bo;
42984+ struct ttm_base_object *base;
42985+ int ret;
42986+
42987+ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
42988+ if (unlikely(user_bo == NULL)) {
42989+ printk(KERN_ERR "Could not reference buffer object.\n");
42990+ return -EINVAL;
42991+ }
42992+
42993+ bo = &user_bo->bo;
42994+ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
42995+ if (unlikely(ret != 0)) {
42996+ printk(KERN_ERR
42997+ "Could not add a reference to buffer object.\n");
42998+ goto out;
42999+ }
43000+
43001+ mutex_lock(&bo->mutex);
43002+ ttm_pl_fill_rep(bo, rep);
43003+ mutex_unlock(&bo->mutex);
43004+
43005+ out:
43006+ base = &user_bo->base;
43007+ ttm_base_object_unref(&base);
43008+ return ret;
43009+}
43010+
43011+int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
43012+{
43013+ struct ttm_pl_reference_req *arg = data;
43014+
43015+ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
43016+}
43017+
43018+int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
43019+{
43020+ struct ttm_pl_synccpu_arg *arg = data;
43021+ struct ttm_bo_user_object *user_bo;
43022+ struct ttm_buffer_object *bo;
43023+ struct ttm_base_object *base;
43024+ bool existed;
43025+ int ret;
43026+
43027+ switch (arg->op) {
43028+ case TTM_PL_SYNCCPU_OP_GRAB:
43029+ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
43030+ if (unlikely(user_bo == NULL)) {
43031+ printk(KERN_ERR
43032+ "Could not find buffer object for synccpu.\n");
43033+ return -EINVAL;
43034+ }
43035+ bo = &user_bo->bo;
43036+ base = &user_bo->base;
43037+ ret = ttm_bo_synccpu_write_grab(bo,
43038+ arg->access_mode &
43039+ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
43040+ if (unlikely(ret != 0)) {
43041+ ttm_base_object_unref(&base);
43042+ goto out;
43043+ }
43044+ ret = ttm_ref_object_add(tfile, &user_bo->base,
43045+ TTM_REF_SYNCCPU_WRITE, &existed);
43046+ if (existed || ret != 0)
43047+ ttm_bo_synccpu_write_release(bo);
43048+ ttm_base_object_unref(&base);
43049+ break;
43050+ case TTM_PL_SYNCCPU_OP_RELEASE:
43051+ ret = ttm_ref_object_base_unref(tfile, arg->handle,
43052+ TTM_REF_SYNCCPU_WRITE);
43053+ break;
43054+ default:
43055+ ret = -EINVAL;
43056+ break;
43057+ }
43058+ out:
43059+ return ret;
43060+}
43061+
43062+int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
43063+ struct ttm_lock *lock, void *data)
43064+{
43065+ union ttm_pl_setstatus_arg *arg = data;
43066+ struct ttm_pl_setstatus_req *req = &arg->req;
43067+ struct ttm_pl_rep *rep = &arg->rep;
43068+ struct ttm_buffer_object *bo;
43069+ struct ttm_bo_device *bdev;
43070+ int ret;
43071+
43072+ bo = ttm_buffer_object_lookup(tfile, req->handle);
43073+ if (unlikely(bo == NULL)) {
43074+ printk(KERN_ERR
43075+ "Could not find buffer object for setstatus.\n");
43076+ return -EINVAL;
43077+ }
43078+
43079+ bdev = bo->bdev;
43080+
43081+ ret = ttm_read_lock(lock, true);
43082+ if (unlikely(ret != 0))
43083+ goto out_err0;
43084+
43085+ ret = ttm_bo_reserve(bo, true, false, false, 0);
43086+ if (unlikely(ret != 0))
43087+ goto out_err1;
43088+
43089+ ret = ttm_bo_wait_cpu(bo, false);
43090+ if (unlikely(ret != 0))
43091+ goto out_err2;
43092+
43093+ mutex_lock(&bo->mutex);
43094+ ret = ttm_bo_check_placement(bo, req->set_placement,
43095+ req->clr_placement);
43096+ if (unlikely(ret != 0))
43097+ goto out_err2;
43098+
43099+ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
43100+ & ~req->clr_placement;
43101+ ret = ttm_buffer_object_validate(bo, true, false);
43102+ if (unlikely(ret != 0))
43103+ goto out_err2;
43104+
43105+ ttm_pl_fill_rep(bo, rep);
43106+ out_err2:
43107+ mutex_unlock(&bo->mutex);
43108+ ttm_bo_unreserve(bo);
43109+ out_err1:
43110+ ttm_read_unlock(lock);
43111+ out_err0:
43112+ ttm_bo_unref(&bo);
43113+ return ret;
43114+}
43115+
43116+int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
43117+{
43118+ struct ttm_pl_waitidle_arg *arg = data;
43119+ struct ttm_buffer_object *bo;
43120+ int ret;
43121+
43122+ bo = ttm_buffer_object_lookup(tfile, arg->handle);
43123+ if (unlikely(bo == NULL)) {
43124+ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
43125+ return -EINVAL;
43126+ }
43127+
43128+ ret =
43129+ ttm_bo_block_reservation(bo, true,
43130+ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
43131+ if (unlikely(ret != 0))
43132+ goto out;
43133+ mutex_lock(&bo->mutex);
43134+ ret = ttm_bo_wait(bo,
43135+ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
43136+ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
43137+ mutex_unlock(&bo->mutex);
43138+ ttm_bo_unblock_reservation(bo);
43139+ out:
43140+ ttm_bo_unref(&bo);
43141+ return ret;
43142+}
43143+
43144+int ttm_pl_verify_access(struct ttm_buffer_object *bo,
43145+ struct ttm_object_file *tfile)
43146+{
43147+ struct ttm_bo_user_object *ubo;
43148+
43149+ /*
43150+ * Check bo subclass.
43151+ */
43152+
43153+ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
43154+ return -EPERM;
43155+
43156+ ubo = container_of(bo, struct ttm_bo_user_object, bo);
43157+ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
43158+ return 0;
43159+
43160+ return -EPERM;
43161+}
43162diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
43163new file mode 100644
43164index 0000000..9f69cdc
43165--- /dev/null
43166+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
43167@@ -0,0 +1,259 @@
43168+/**************************************************************************
43169+ *
43170+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
43171+ * All Rights Reserved.
43172+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
43173+ * All Rights Reserved.
43174+ *
43175+ * Permission is hereby granted, free of charge, to any person obtaining a
43176+ * copy of this software and associated documentation files (the
43177+ * "Software"), to deal in the Software without restriction, including
43178+ * without limitation the rights to use, copy, modify, merge, publish,
43179+ * distribute, sub license, and/or sell copies of the Software, and to
43180+ * permit persons to whom the Software is furnished to do so, subject to
43181+ * the following conditions:
43182+ *
43183+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43184+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43185+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43186+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
43187+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
43188+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
43189+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
43190+ *
43191+ * The above copyright notice and this permission notice (including the
43192+ * next paragraph) shall be included in all copies or substantial portions
43193+ * of the Software.
43194+ *
43195+ **************************************************************************/
43196+/*
43197+ * Authors
43198+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43199+ */
43200+
43201+#ifndef _TTM_PLACEMENT_USER_H_
43202+#define _TTM_PLACEMENT_USER_H_
43203+
43204+#if !defined(__KERNEL__) && !defined(_KERNEL)
43205+#include <stdint.h>
43206+#else
43207+#include <linux/kernel.h>
43208+#endif
43209+
43210+#include "ttm/ttm_placement_common.h"
43211+
43212+#define TTM_PLACEMENT_MAJOR 0
43213+#define TTM_PLACEMENT_MINOR 1
43214+#define TTM_PLACEMENT_PL 0
43215+#define TTM_PLACEMENT_DATE "080819"
43216+
43217+/**
43218+ * struct ttm_pl_create_req
43219+ *
43220+ * @size: The buffer object size.
43221+ * @placement: Flags that indicate initial acceptable
43222+ * placement.
43223+ * @page_alignment: Required alignment in pages.
43224+ *
43225+ * Input to the TTM_BO_CREATE ioctl.
43226+ */
43227+
43228+struct ttm_pl_create_req {
43229+ uint64_t size;
43230+ uint32_t placement;
43231+ uint32_t page_alignment;
43232+};
43233+
43234+/**
43235+ * struct ttm_pl_create_ub_req
43236+ *
43237+ * @size: The buffer object size.
43238+ * @user_address: User-space address of the memory area that
43239+ * should be used to back the buffer object cast to 64-bit.
43240+ * @placement: Flags that indicate initial acceptable
43241+ * placement.
43242+ * @page_alignment: Required alignment in pages.
43243+ *
43244+ * Input to the TTM_BO_CREATE_UB ioctl.
43245+ */
43246+
43247+struct ttm_pl_create_ub_req {
43248+ uint64_t size;
43249+ uint64_t user_address;
43250+ uint32_t placement;
43251+ uint32_t page_alignment;
43252+};
43253+
43254+/**
43255+ * struct ttm_pl_rep
43256+ *
43257+ * @gpu_offset: The current offset into the memory region used.
43258+ * This can be used directly by the GPU if there are no
43259+ * additional GPU mapping procedures used by the driver.
43260+ *
43261+ * @bo_size: Actual buffer object size.
43262+ *
43263+ * @map_handle: Offset into the device address space.
43264+ * Used for map, seek, read, write. This will never change
43265+ * during the lifetime of an object.
43266+ *
43267+ * @placement: Flag indicating the placement status of
43268+ * the buffer object using the TTM_PL flags above.
43269+ *
43270+ * @sync_object_arg: Used for user-space synchronization and
43271+ * depends on the synchronization model used. If fences are
43272+ * used, this is the buffer_object::fence_type_mask
43273+ *
43274+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
43275+ * TTM_PL_SETSTATUS ioctls.
43276+ */
43277+
43278+struct ttm_pl_rep {
43279+ uint64_t gpu_offset;
43280+ uint64_t bo_size;
43281+ uint64_t map_handle;
43282+ uint32_t placement;
43283+ uint32_t handle;
43284+ uint32_t sync_object_arg;
43285+ uint32_t pad64;
43286+};
43287+
43288+/**
43289+ * struct ttm_pl_setstatus_req
43290+ *
43291+ * @set_placement: Placement flags to set.
43292+ *
43293+ * @clr_placement: Placement flags to clear.
43294+ *
43295+ * @handle: The object handle
43296+ *
43297+ * Input to the TTM_PL_SETSTATUS ioctl.
43298+ */
43299+
43300+struct ttm_pl_setstatus_req {
43301+ uint32_t set_placement;
43302+ uint32_t clr_placement;
43303+ uint32_t handle;
43304+ uint32_t pad64;
43305+};
43306+
43307+/**
43308+ * struct ttm_pl_reference_req
43309+ *
43310+ * @handle: The object to put a reference on.
43311+ *
43312+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
43313+ */
43314+
43315+struct ttm_pl_reference_req {
43316+ uint32_t handle;
43317+ uint32_t pad64;
43318+};
43319+
43320+/*
43321+ * ACCESS mode flags for SYNCCPU.
43322+ *
43323+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
43324+ * writing to the buffer.
43325+ *
43326+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
43327+ * accessing the buffer.
43328+ *
43329+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
43330+ * for GPU accesses to finish but return -EBUSY.
43331+ *
43332+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
43333+ * memory while synchronized for CPU.
43334+ */
43335+
43336+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
43337+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
43338+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
43339+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
43340+
43341+/**
43342+ * struct ttm_pl_synccpu_arg
43343+ *
43344+ * @handle: The object to synchronize.
43345+ *
43346+ * @access_mode: access mode indicated by the
43347+ * TTM_SYNCCPU_MODE flags.
43348+ *
43349+ * @op: indicates whether to grab or release the
43350+ * buffer for cpu usage.
43351+ *
43352+ * Input to the TTM_PL_SYNCCPU ioctl.
43353+ */
43354+
43355+struct ttm_pl_synccpu_arg {
43356+ uint32_t handle;
43357+ uint32_t access_mode;
43358+ enum {
43359+ TTM_PL_SYNCCPU_OP_GRAB,
43360+ TTM_PL_SYNCCPU_OP_RELEASE
43361+ } op;
43362+ uint32_t pad64;
43363+};
43364+
43365+/*
43366+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
43367+ *
43368+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
43369+ * wait.
43370+ *
43371+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
43372+ * but return -EBUSY if the buffer is busy.
43373+ */
43374+
43375+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
43376+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
43377+
43378+/**
43379+ * struct ttm_waitidle_arg
43380+ *
43381+ * @handle: The object to synchronize.
43382+ *
43383+ * @mode: wait mode indicated by the
43384+ * TTM_SYNCCPU_MODE flags.
43385+ *
43386+ * Argument to the TTM_BO_WAITIDLE ioctl.
43387+ */
43388+
43389+struct ttm_pl_waitidle_arg {
43390+ uint32_t handle;
43391+ uint32_t mode;
43392+};
43393+
43394+union ttm_pl_create_arg {
43395+ struct ttm_pl_create_req req;
43396+ struct ttm_pl_rep rep;
43397+};
43398+
43399+union ttm_pl_reference_arg {
43400+ struct ttm_pl_reference_req req;
43401+ struct ttm_pl_rep rep;
43402+};
43403+
43404+union ttm_pl_setstatus_arg {
43405+ struct ttm_pl_setstatus_req req;
43406+ struct ttm_pl_rep rep;
43407+};
43408+
43409+union ttm_pl_create_ub_arg {
43410+ struct ttm_pl_create_ub_req req;
43411+ struct ttm_pl_rep rep;
43412+};
43413+
43414+/*
43415+ * Ioctl offsets.
43416+ */
43417+
43418+#define TTM_PL_CREATE 0x00
43419+#define TTM_PL_REFERENCE 0x01
43420+#define TTM_PL_UNREF 0x02
43421+#define TTM_PL_SYNCCPU 0x03
43422+#define TTM_PL_WAITIDLE 0x04
43423+#define TTM_PL_SETSTATUS 0x05
43424+#define TTM_PL_CREATE_UB 0x06
43425+
43426+#endif
43427diff --git a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h
43428new file mode 100644
43429index 0000000..5db5eda
43430--- /dev/null
43431+++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h
43432@@ -0,0 +1,74 @@
43433+/**************************************************************************
43434+ *
43435+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
43436+ * All Rights Reserved.
43437+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
43438+ * All Rights Reserved.
43439+ *
43440+ * Permission is hereby granted, free of charge, to any person obtaining a
43441+ * copy of this software and associated documentation files (the
43442+ * "Software"), to deal in the Software without restriction, including
43443+ * without limitation the rights to use, copy, modify, merge, publish,
43444+ * distribute, sub license, and/or sell copies of the Software, and to
43445+ * permit persons to whom the Software is furnished to do so, subject to
43446+ * the following conditions:
43447+ *
43448+ * The above copyright notice and this permission notice (including the
43449+ * next paragraph) shall be included in all copies or substantial portions
43450+ * of the Software.
43451+ *
43452+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43453+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43454+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43455+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
43456+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
43457+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
43458+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
43459+ *
43460+ **************************************************************************/
43461+/*
43462+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
43463+ */
43464+
43465+#ifndef _TTM_REGMAN_H_
43466+#define _TTM_REGMAN_H_
43467+
43468+#include <linux/list.h>
43469+
43470+struct ttm_fence_object;
43471+
43472+struct ttm_reg {
43473+ struct list_head head;
43474+ struct ttm_fence_object *fence;
43475+ uint32_t fence_type;
43476+ uint32_t new_fence_type;
43477+};
43478+
43479+struct ttm_reg_manager {
43480+ struct list_head free;
43481+ struct list_head lru;
43482+ struct list_head unfenced;
43483+
43484+ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
43485+ void (*reg_destroy)(struct ttm_reg *reg);
43486+};
43487+
43488+extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
43489+ const void *data,
43490+ uint32_t fence_class,
43491+ uint32_t fence_type,
43492+ int interruptible,
43493+ int no_wait,
43494+ struct ttm_reg **reg);
43495+
43496+extern void ttm_regs_fence(struct ttm_reg_manager *regs,
43497+ struct ttm_fence_object *fence);
43498+
43499+extern void ttm_regs_free(struct ttm_reg_manager *manager);
43500+extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
43501+extern void ttm_regs_init(struct ttm_reg_manager *manager,
43502+ int (*reg_reusable)(const struct ttm_reg *,
43503+ const void *),
43504+ void (*reg_destroy)(struct ttm_reg *));
43505+
43506+#endif
43507diff --git a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c
43508new file mode 100644
43509index 0000000..5119aec
43510--- /dev/null
43511+++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c
43512@@ -0,0 +1,655 @@
43513+/**************************************************************************
43514+ *
43515+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
43516+ * All Rights Reserved.
43517+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
43518+ * All Rights Reserved.
43519+ *
43520+ * Permission is hereby granted, free of charge, to any person obtaining a
43521+ * copy of this software and associated documentation files (the
43522+ * "Software"), to deal in the Software without restriction, including
43523+ * without limitation the rights to use, copy, modify, merge, publish,
43524+ * distribute, sub license, and/or sell copies of the Software, and to
43525+ * permit persons to whom the Software is furnished to do so, subject to
43526+ * the following conditions:
43527+ *
43528+ * The above copyright notice and this permission notice (including the
43529+ * next paragraph) shall be included in all copies or substantial portions
43530+ * of the Software.
43531+ *
43532+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43533+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43534+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43535+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
43536+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
43537+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
43538+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
43539+ *
43540+ **************************************************************************/
43541+/*
43542+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43543+ */
43544+
43545+#include <linux/version.h>
43546+#include <linux/vmalloc.h>
43547+#include <linux/sched.h>
43548+#include <linux/highmem.h>
43549+#include <linux/pagemap.h>
43550+#include <linux/file.h>
43551+#include <linux/swap.h>
43552+#include "ttm/ttm_bo_driver.h"
43553+#include "ttm/ttm_placement_common.h"
43554+
43555+static int ttm_tt_swapin(struct ttm_tt *ttm);
43556+
43557+#if defined( CONFIG_X86 )
43558+static void ttm_tt_clflush_page(struct page *page)
43559+{
43560+ uint8_t *page_virtual;
43561+ unsigned int i;
43562+
43563+ if (unlikely(page == NULL))
43564+ return;
43565+
43566+ page_virtual = kmap_atomic(page, KM_USER0);
43567+
43568+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
43569+ clflush(page_virtual + i);
43570+
43571+ kunmap_atomic(page_virtual, KM_USER0);
43572+}
43573+
43574+static void ttm_tt_cache_flush_clflush(struct page *pages[],
43575+ unsigned long num_pages)
43576+{
43577+ unsigned long i;
43578+
43579+ mb();
43580+ for (i = 0; i < num_pages; ++i)
43581+ ttm_tt_clflush_page(*pages++);
43582+ mb();
43583+}
43584+#else
43585+static void ttm_tt_ipi_handler(void *null)
43586+{
43587+ ;
43588+}
43589+#endif
43590+
43591+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
43592+{
43593+
43594+#if defined( CONFIG_X86 )
43595+ if (cpu_has_clflush) {
43596+ ttm_tt_cache_flush_clflush(pages, num_pages);
43597+ return;
43598+ }
43599+#else
43600+ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
43601+ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
43602+#endif
43603+}
43604+
43605+/**
43606+ * Allocates storage for pointers to the pages that back the ttm.
43607+ *
43608+ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
43609+ */
43610+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
43611+{
43612+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
43613+ ttm->pages = NULL;
43614+
43615+ if (size <= PAGE_SIZE)
43616+ ttm->pages = kzalloc(size, GFP_KERNEL);
43617+
43618+ if (!ttm->pages) {
43619+ ttm->pages = vmalloc_user(size);
43620+ if (ttm->pages)
43621+ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
43622+ }
43623+}
43624+
43625+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
43626+{
43627+ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
43628+ vfree(ttm->pages);
43629+ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
43630+ } else {
43631+ kfree(ttm->pages);
43632+ }
43633+ ttm->pages = NULL;
43634+}
43635+
43636+static struct page *ttm_tt_alloc_page(void)
43637+{
43638+ return alloc_page(GFP_KERNEL | __GFP_ZERO);
43639+}
43640+
43641+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
43642+{
43643+ int write;
43644+ int dirty;
43645+ struct page *page;
43646+ int i;
43647+ struct ttm_backend *be = ttm->be;
43648+
43649+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
43650+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
43651+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
43652+
43653+ if (be)
43654+ be->func->clear(be);
43655+
43656+ for (i = 0; i < ttm->num_pages; ++i) {
43657+ page = ttm->pages[i];
43658+ if (page == NULL)
43659+ continue;
43660+
43661+ if (page == ttm->dummy_read_page) {
43662+ BUG_ON(write);
43663+ continue;
43664+ }
43665+
43666+ if (write && dirty && !PageReserved(page))
43667+ set_page_dirty_lock(page);
43668+
43669+ ttm->pages[i] = NULL;
43670+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
43671+ put_page(page);
43672+ }
43673+ ttm->state = tt_unpopulated;
43674+ ttm->first_himem_page = ttm->num_pages;
43675+ ttm->last_lomem_page = -1;
43676+}
43677+
43678+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
43679+{
43680+ struct page *p;
43681+ struct ttm_bo_device *bdev = ttm->bdev;
43682+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
43683+ int ret;
43684+
43685+ while (NULL == (p = ttm->pages[index])) {
43686+ p = ttm_tt_alloc_page();
43687+
43688+ if (!p)
43689+ return NULL;
43690+
43691+ if (PageHighMem(p)) {
43692+ ret =
43693+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true);
43694+ if (unlikely(ret != 0))
43695+ goto out_err;
43696+ ttm->pages[--ttm->first_himem_page] = p;
43697+ } else {
43698+ ret =
43699+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false);
43700+ if (unlikely(ret != 0))
43701+ goto out_err;
43702+ ttm->pages[++ttm->last_lomem_page] = p;
43703+ }
43704+ }
43705+ return p;
43706+ out_err:
43707+ put_page(p);
43708+ return NULL;
43709+}
43710+
43711+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
43712+{
43713+ int ret;
43714+
43715+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
43716+ ret = ttm_tt_swapin(ttm);
43717+ if (unlikely(ret != 0))
43718+ return NULL;
43719+ }
43720+ return __ttm_tt_get_page(ttm, index);
43721+}
43722+
43723+int ttm_tt_populate(struct ttm_tt *ttm)
43724+{
43725+ struct page *page;
43726+ unsigned long i;
43727+ struct ttm_backend *be;
43728+ int ret;
43729+
43730+ if (ttm->state != tt_unpopulated)
43731+ return 0;
43732+
43733+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
43734+ ret = ttm_tt_swapin(ttm);
43735+ if (unlikely(ret != 0))
43736+ return ret;
43737+ }
43738+
43739+ be = ttm->be;
43740+
43741+ for (i = 0; i < ttm->num_pages; ++i) {
43742+ page = __ttm_tt_get_page(ttm, i);
43743+ if (!page)
43744+ return -ENOMEM;
43745+ }
43746+
43747+ be->func->populate(be, ttm->num_pages, ttm->pages,
43748+ ttm->dummy_read_page);
43749+ ttm->state = tt_unbound;
43750+ return 0;
43751+}
43752+
43753+#ifdef CONFIG_X86
43754+static inline int ttm_tt_set_page_caching(struct page *p,
43755+ enum ttm_caching_state c_state)
43756+{
43757+ if (PageHighMem(p))
43758+ return 0;
43759+
43760+ switch (c_state) {
43761+ case tt_cached:
43762+ return set_pages_wb(p, 1);
43763+ case tt_wc:
43764+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
43765+ return set_memory_wc((unsigned long) page_address(p), 1);
43766+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */
43767+ default:
43768+ return set_pages_uc(p, 1);
43769+ }
43770+}
43771+#else /* CONFIG_X86 */
43772+static inline int ttm_tt_set_page_caching(struct page *p,
43773+ enum ttm_caching_state c_state)
43774+{
43775+ return 0;
43776+}
43777+#endif /* CONFIG_X86 */
43778+
43779+/*
43780+ * Change caching policy for the linear kernel map
43781+ * for range of pages in a ttm.
43782+ */
43783+
43784+static int ttm_tt_set_caching(struct ttm_tt *ttm,
43785+ enum ttm_caching_state c_state)
43786+{
43787+ int i, j;
43788+ struct page *cur_page;
43789+ int ret;
43790+
43791+ if (ttm->caching_state == c_state)
43792+ return 0;
43793+
43794+ if (c_state != tt_cached) {
43795+ ret = ttm_tt_populate(ttm);
43796+ if (unlikely(ret != 0))
43797+ return ret;
43798+ }
43799+
43800+ if (ttm->caching_state == tt_cached)
43801+ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
43802+
43803+ for (i = 0; i < ttm->num_pages; ++i) {
43804+ cur_page = ttm->pages[i];
43805+ if (likely(cur_page != NULL)) {
43806+ ret = ttm_tt_set_page_caching(cur_page, c_state);
43807+ if (unlikely(ret != 0))
43808+ goto out_err;
43809+ }
43810+ }
43811+
43812+ ttm->caching_state = c_state;
43813+
43814+ return 0;
43815+
43816+ out_err:
43817+ for (j = 0; j < i; ++j) {
43818+ cur_page = ttm->pages[j];
43819+ if (likely(cur_page != NULL)) {
43820+ (void)ttm_tt_set_page_caching(cur_page,
43821+ ttm->caching_state);
43822+ }
43823+ }
43824+
43825+ return ret;
43826+}
43827+
43828+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
43829+{
43830+ enum ttm_caching_state state;
43831+
43832+ if (placement & TTM_PL_FLAG_WC)
43833+ state = tt_wc;
43834+ else if (placement & TTM_PL_FLAG_UNCACHED)
43835+ state = tt_uncached;
43836+ else
43837+ state = tt_cached;
43838+
43839+ return ttm_tt_set_caching(ttm, state);
43840+}
43841+
43842+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
43843+{
43844+ int i;
43845+ struct page *cur_page;
43846+ struct ttm_backend *be = ttm->be;
43847+
43848+ if (be)
43849+ be->func->clear(be);
43850+ (void)ttm_tt_set_caching(ttm, tt_cached);
43851+ for (i = 0; i < ttm->num_pages; ++i) {
43852+ cur_page = ttm->pages[i];
43853+ ttm->pages[i] = NULL;
43854+ if (cur_page) {
43855+ if (page_count(cur_page) != 1)
43856+ printk(KERN_ERR
43857+ "Erroneous page count. Leaking pages.\n");
43858+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
43859+ PageHighMem(cur_page));
43860+ __free_page(cur_page);
43861+ }
43862+ }
43863+ ttm->state = tt_unpopulated;
43864+ ttm->first_himem_page = ttm->num_pages;
43865+ ttm->last_lomem_page = -1;
43866+}
43867+
43868+void ttm_tt_destroy(struct ttm_tt *ttm)
43869+{
43870+ struct ttm_backend *be;
43871+
43872+ if (unlikely(ttm == NULL))
43873+ return;
43874+
43875+ be = ttm->be;
43876+ if (likely(be != NULL)) {
43877+ be->func->destroy(be);
43878+ ttm->be = NULL;
43879+ }
43880+
43881+ if (likely(ttm->pages != NULL)) {
43882+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
43883+ ttm_tt_free_user_pages(ttm);
43884+ else
43885+ ttm_tt_free_alloced_pages(ttm);
43886+
43887+ ttm_tt_free_page_directory(ttm);
43888+ }
43889+
43890+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
43891+ ttm->swap_storage)
43892+ fput(ttm->swap_storage);
43893+
43894+ kfree(ttm);
43895+}
43896+
43897+int ttm_tt_set_user(struct ttm_tt *ttm,
43898+ struct task_struct *tsk,
43899+ unsigned long start, unsigned long num_pages)
43900+{
43901+ struct mm_struct *mm = tsk->mm;
43902+ int ret;
43903+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
43904+ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
43905+
43906+ BUG_ON(num_pages != ttm->num_pages);
43907+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
43908+
43909+ /**
43910+ * Account user pages as lowmem pages for now.
43911+ */
43912+
43913+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false);
43914+ if (unlikely(ret != 0))
43915+ return ret;
43916+
43917+ down_read(&mm->mmap_sem);
43918+ ret = get_user_pages(tsk, mm, start, num_pages,
43919+ write, 0, ttm->pages, NULL);
43920+ up_read(&mm->mmap_sem);
43921+
43922+ if (ret != num_pages && write) {
43923+ ttm_tt_free_user_pages(ttm);
43924+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
43925+ return -ENOMEM;
43926+ }
43927+
43928+ ttm->tsk = tsk;
43929+ ttm->start = start;
43930+ ttm->state = tt_unbound;
43931+
43932+ return 0;
43933+}
43934+
43935+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
43936+ uint32_t page_flags, struct page *dummy_read_page)
43937+{
43938+ struct ttm_bo_driver *bo_driver = bdev->driver;
43939+ struct ttm_tt *ttm;
43940+
43941+ if (!bo_driver)
43942+ return NULL;
43943+
43944+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
43945+ if (!ttm)
43946+ return NULL;
43947+
43948+ ttm->bdev = bdev;
43949+
43950+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
43951+ ttm->first_himem_page = ttm->num_pages;
43952+ ttm->last_lomem_page = -1;
43953+ ttm->caching_state = tt_cached;
43954+ ttm->page_flags = page_flags;
43955+
43956+ ttm->dummy_read_page = dummy_read_page;
43957+
43958+ ttm_tt_alloc_page_directory(ttm);
43959+ if (!ttm->pages) {
43960+ ttm_tt_destroy(ttm);
43961+ printk(KERN_ERR "Failed allocating page table\n");
43962+ return NULL;
43963+ }
43964+ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
43965+ if (!ttm->be) {
43966+ ttm_tt_destroy(ttm);
43967+ printk(KERN_ERR "Failed creating ttm backend entry\n");
43968+ return NULL;
43969+ }
43970+ ttm->state = tt_unpopulated;
43971+ return ttm;
43972+}
43973+
43974+/**
43975+ * ttm_tt_unbind:
43976+ *
43977+ * @ttm: the object to unbind from the graphics device
43978+ *
43979+ * Unbind an object from the aperture. This removes the mappings
43980+ * from the graphics device and flushes caches if necessary.
43981+ */
43982+void ttm_tt_unbind(struct ttm_tt *ttm)
43983+{
43984+ int ret;
43985+ struct ttm_backend *be = ttm->be;
43986+
43987+ if (ttm->state == tt_bound) {
43988+ ret = be->func->unbind(be);
43989+ BUG_ON(ret);
43990+ }
43991+ ttm->state = tt_unbound;
43992+}
43993+
43994+/**
43995+ * ttm_tt_bind:
43996+ *
43997+ * @ttm: the ttm object to bind to the graphics device
43998+ *
43999+ * @bo_mem: the aperture memory region which will hold the object
44000+ *
44001+ * Bind a ttm object to the aperture. This ensures that the necessary
44002+ * pages are allocated, flushes CPU caches as needed and marks the
44003+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
44004+ * modified by the GPU
44005+ */
44006+
44007+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
44008+{
44009+ int ret = 0;
44010+ struct ttm_backend *be;
44011+
44012+ if (!ttm)
44013+ return -EINVAL;
44014+
44015+ if (ttm->state == tt_bound)
44016+ return 0;
44017+
44018+ be = ttm->be;
44019+
44020+ ret = ttm_tt_populate(ttm);
44021+ if (ret)
44022+ return ret;
44023+
44024+ ret = be->func->bind(be, bo_mem);
44025+ if (ret) {
44026+ printk(KERN_ERR "Couldn't bind backend.\n");
44027+ return ret;
44028+ }
44029+
44030+ ttm->state = tt_bound;
44031+
44032+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
44033+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
44034+ return 0;
44035+}
44036+
44037+static int ttm_tt_swapin(struct ttm_tt *ttm)
44038+{
44039+ struct address_space *swap_space;
44040+ struct file *swap_storage;
44041+ struct page *from_page;
44042+ struct page *to_page;
44043+ void *from_virtual;
44044+ void *to_virtual;
44045+ int i;
44046+ int ret;
44047+
44048+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
44049+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
44050+ ttm->num_pages);
44051+ if (unlikely(ret != 0))
44052+ return ret;
44053+
44054+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
44055+ return 0;
44056+ }
44057+
44058+ swap_storage = ttm->swap_storage;
44059+ BUG_ON(swap_storage == NULL);
44060+
44061+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
44062+
44063+ for (i = 0; i < ttm->num_pages; ++i) {
44064+ from_page = read_mapping_page(swap_space, i, NULL);
44065+ if (IS_ERR(from_page))
44066+ goto out_err;
44067+ to_page = __ttm_tt_get_page(ttm, i);
44068+ if (unlikely(to_page == NULL))
44069+ goto out_err;
44070+
44071+ preempt_disable();
44072+ from_virtual = kmap_atomic(from_page, KM_USER0);
44073+ to_virtual = kmap_atomic(to_page, KM_USER1);
44074+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
44075+ kunmap_atomic(to_virtual, KM_USER1);
44076+ kunmap_atomic(from_virtual, KM_USER0);
44077+ preempt_enable();
44078+ page_cache_release(from_page);
44079+ }
44080+
44081+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
44082+ fput(swap_storage);
44083+ ttm->swap_storage = NULL;
44084+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
44085+
44086+ return 0;
44087+ out_err:
44088+ ttm_tt_free_alloced_pages(ttm);
44089+ return -ENOMEM;
44090+}
44091+
44092+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
44093+{
44094+ struct address_space *swap_space;
44095+ struct file *swap_storage;
44096+ struct page *from_page;
44097+ struct page *to_page;
44098+ void *from_virtual;
44099+ void *to_virtual;
44100+ int i;
44101+
44102+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
44103+ BUG_ON(ttm->caching_state != tt_cached);
44104+
44105+ /*
44106+ * For user buffers, just unpin the pages, as there should be
44107+ * vma references.
44108+ */
44109+
44110+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
44111+ ttm_tt_free_user_pages(ttm);
44112+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
44113+ ttm->swap_storage = NULL;
44114+ return 0;
44115+ }
44116+
44117+ if (!persistant_swap_storage) {
44118+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
44119+ swap_storage = shmem_file_setup("ttm swap",
44120+ ttm->num_pages << PAGE_SHIFT,
44121+ 0);
44122+ if (unlikely(IS_ERR(swap_storage))) {
44123+ printk(KERN_ERR "Failed allocating swap storage.\n");
44124+ return -ENOMEM;
44125+ }
44126+#else
44127+ return -ENOMEM;
44128+#endif
44129+ } else
44130+ swap_storage = persistant_swap_storage;
44131+
44132+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
44133+
44134+ for (i = 0; i < ttm->num_pages; ++i) {
44135+ from_page = ttm->pages[i];
44136+ if (unlikely(from_page == NULL))
44137+ continue;
44138+ to_page = read_mapping_page(swap_space, i, NULL);
44139+ if (unlikely(to_page == NULL))
44140+ goto out_err;
44141+
44142+ preempt_disable();
44143+ from_virtual = kmap_atomic(from_page, KM_USER0);
44144+ to_virtual = kmap_atomic(to_page, KM_USER1);
44145+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
44146+ kunmap_atomic(to_virtual, KM_USER1);
44147+ kunmap_atomic(from_virtual, KM_USER0);
44148+ preempt_enable();
44149+ set_page_dirty(to_page);
44150+ mark_page_accessed(to_page);
44151+// unlock_page(to_page);
44152+ page_cache_release(to_page);
44153+ }
44154+
44155+ ttm_tt_free_alloced_pages(ttm);
44156+ ttm->swap_storage = swap_storage;
44157+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
44158+ if (persistant_swap_storage)
44159+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
44160+
44161+ return 0;
44162+ out_err:
44163+ if (!persistant_swap_storage)
44164+ fput(swap_storage);
44165+
44166+ return -ENOMEM;
44167+}
44168diff --git a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
44169new file mode 100644
44170index 0000000..5309050
44171--- /dev/null
44172+++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
44173@@ -0,0 +1,79 @@
44174+/**************************************************************************
44175+ *
44176+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
44177+ * All Rights Reserved.
44178+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
44179+ * All Rights Reserved.
44180+ *
44181+ * Permission is hereby granted, free of charge, to any person obtaining a
44182+ * copy of this software and associated documentation files (the
44183+ * "Software"), to deal in the Software without restriction, including
44184+ * without limitation the rights to use, copy, modify, merge, publish,
44185+ * distribute, sub license, and/or sell copies of the Software, and to
44186+ * permit persons to whom the Software is furnished to do so, subject to
44187+ * the following conditions:
44188+ *
44189+ * The above copyright notice and this permission notice (including the
44190+ * next paragraph) shall be included in all copies or substantial portions
44191+ * of the Software.
44192+ *
44193+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44194+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
44195+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
44196+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
44197+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
44198+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
44199+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
44200+ *
44201+ **************************************************************************/
44202+/*
44203+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
44204+ */
44205+
44206+#ifndef _TTM_USEROBJ_API_H_
44207+#define _TTM_USEROBJ_API_H_
44208+
44209+#include "ttm/ttm_placement_user.h"
44210+#include "ttm/ttm_fence_user.h"
44211+#include "ttm/ttm_object.h"
44212+#include "ttm/ttm_fence_api.h"
44213+#include "ttm/ttm_bo_api.h"
44214+
44215+struct ttm_lock;
44216+
44217+/*
44218+ * User ioctls.
44219+ */
44220+
44221+extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
44222+ struct ttm_bo_device *bdev,
44223+ struct ttm_lock *lock, void *data);
44224+extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
44225+ struct ttm_bo_device *bdev,
44226+ struct ttm_lock *lock, void *data);
44227+extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
44228+extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
44229+extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
44230+extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
44231+ struct ttm_lock *lock, void *data);
44232+extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
44233+extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
44234+extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
44235+extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
44236+
44237+extern int
44238+ttm_fence_user_create(struct ttm_fence_device *fdev,
44239+ struct ttm_object_file *tfile,
44240+ uint32_t fence_class,
44241+ uint32_t fence_types,
44242+ uint32_t create_flags,
44243+ struct ttm_fence_object **fence, uint32_t * user_handle);
44244+
44245+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
44246+ *tfile,
44247+ uint32_t handle);
44248+
44249+extern int
44250+ttm_pl_verify_access(struct ttm_buffer_object *bo,
44251+ struct ttm_object_file *tfile);
44252+#endif
44253diff --git a/include/drm/drmP.h b/include/drm/drmP.h
44254index 5575b9a..9c0b919 100644
44255--- a/include/drm/drmP.h
44256+++ b/include/drm/drmP.h
44257@@ -1101,6 +1109,8 @@ extern int drm_init(struct drm_driver *driver);
44258 extern void drm_exit(struct drm_driver *driver);
44259 extern int drm_ioctl(struct inode *inode, struct file *filp,
44260 unsigned int cmd, unsigned long arg);
44261+extern long drm_unlocked_ioctl(struct file *filp,
44262+ unsigned int cmd, unsigned long arg);
44263 extern long drm_compat_ioctl(struct file *filp,
44264 unsigned int cmd, unsigned long arg);
44265 extern int drm_lastclose(struct drm_device *dev);
44266@@ -1514,5 +1524,25 @@ static __inline void drm_free_large(void *ptr)
44267 }
44268 /*@}*/
44269
44270+enum drm_global_types {
44271+ DRM_GLOBAL_TTM_MEM = 0,
44272+ DRM_GLOBAL_TTM_BO,
44273+ DRM_GLOBAL_TTM_OBJECT,
44274+ DRM_GLOBAL_NUM
44275+};
44276+
44277+struct drm_global_reference {
44278+ enum drm_global_types global_type;
44279+ size_t size;
44280+ void *object;
44281+ int (*init) (struct drm_global_reference *);
44282+ void (*release) (struct drm_global_reference *);
44283+};
44284+
44285+extern void drm_global_init(void);
44286+extern void drm_global_release(void);
44287+extern int drm_global_item_ref(struct drm_global_reference *ref);
44288+extern void drm_global_item_unref(struct drm_global_reference *ref);
44289+
44290 #endif /* __KERNEL__ */
44291 #endif
44292diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
44293index ae304cc..43a62a8 100644
44294--- a/include/drm/drm_mode.h
44295+++ b/include/drm/drm_mode.h
44296@@ -121,6 +121,7 @@ struct drm_mode_crtc {
44297 #define DRM_MODE_ENCODER_TMDS 2
44298 #define DRM_MODE_ENCODER_LVDS 3
44299 #define DRM_MODE_ENCODER_TVDAC 4
44300+#define DRM_MODE_ENCODER_MIPI 5
44301
44302 struct drm_mode_get_encoder {
44303 __u32 encoder_id;
44304@@ -155,6 +156,7 @@ struct drm_mode_get_encoder {
44305 #define DRM_MODE_CONNECTOR_DisplayPort 10
44306 #define DRM_MODE_CONNECTOR_HDMIA 11
44307 #define DRM_MODE_CONNECTOR_HDMIB 12
44308+#define DRM_MODE_CONNECTOR_MIPI 13
44309
44310 struct drm_mode_get_connector {
44311
44312diff --git a/include/linux/backlight.h b/include/linux/backlight.h
44313index 79ca2da..00d7255 100644
44314--- a/include/linux/backlight.h
44315+++ b/include/linux/backlight.h
44316@@ -87,6 +87,9 @@ struct backlight_device {
44317 struct notifier_block fb_notif;
44318
44319 struct device dev;
44320+
44321+ /* Private Backlight Data */
44322+ void *priv;
44323 };
44324
44325 static inline void backlight_update_status(struct backlight_device *bd)
44326--
443271.6.0.6
44328
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch
new file mode 100644
index 0000000000..4aa8469042
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/close_debug_info_of_rt2860.patch
@@ -0,0 +1,38 @@
1diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
2index 80176b2..fc682be 100644
3--- a/drivers/staging/rt2860/rt_linux.c
4+++ b/drivers/staging/rt2860/rt_linux.c
5@@ -27,7 +27,7 @@
6
7 #include "rt_config.h"
8
9-ULONG RTDebugLevel = RT_DEBUG_ERROR;
10+ULONG RTDebugLevel = RT_DEBUG_OFF;
11
12 BUILD_TIMER_FUNCTION(MlmePeriodicExec);
13 BUILD_TIMER_FUNCTION(AsicRxAntEvalTimeout);
14diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
15index 25b53ac..56ac9ac 100644
16--- a/drivers/staging/rt2860/rt_linux.h
17+++ b/drivers/staging/rt2860/rt_linux.h
18@@ -375,18 +375,9 @@ extern ULONG RTDebugLevel;
19
20 #define DBGPRINT(Level, Fmt) DBGPRINT_RAW(Level, Fmt)
21
22+#define DBGPRINT_ERR(Fmt)
23
24-#define DBGPRINT_ERR(Fmt) \
25-{ \
26- printk("ERROR!!! "); \
27- printk Fmt; \
28-}
29-
30-#define DBGPRINT_S(Status, Fmt) \
31-{ \
32- printk Fmt; \
33-}
34-
35+#define DBGPRINT_S(Status, Fmt)
36
37 #else
38 #define DBGPRINT(Level, Fmt)
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook
new file mode 100644
index 0000000000..f88e5becb9
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/defconfig-netbook
@@ -0,0 +1,3220 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.rc7-4.netbook
4# Mon Dec 8 01:05:27 2008
5#
6# CONFIG_64BIT is not set
7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set
9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11CONFIG_GENERIC_TIME=y
12CONFIG_GENERIC_CMOS_UPDATE=y
13CONFIG_CLOCKSOURCE_WATCHDOG=y
14CONFIG_GENERIC_CLOCKEVENTS=y
15CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
16CONFIG_LOCKDEP_SUPPORT=y
17CONFIG_STACKTRACE_SUPPORT=y
18CONFIG_HAVE_LATENCYTOP_SUPPORT=y
19CONFIG_FAST_CMPXCHG_LOCAL=y
20CONFIG_MMU=y
21CONFIG_ZONE_DMA=y
22CONFIG_GENERIC_ISA_DMA=y
23CONFIG_GENERIC_IOMAP=y
24CONFIG_GENERIC_BUG=y
25CONFIG_GENERIC_HWEIGHT=y
26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
30CONFIG_GENERIC_CALIBRATE_DELAY=y
31# CONFIG_GENERIC_TIME_VSYSCALL is not set
32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
37CONFIG_ARCH_HIBERNATION_POSSIBLE=y
38CONFIG_ARCH_SUSPEND_POSSIBLE=y
39# CONFIG_ZONE_DMA32 is not set
40CONFIG_ARCH_POPULATES_NODE_MAP=y
41# CONFIG_AUDIT_ARCH is not set
42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
43CONFIG_GENERIC_HARDIRQS=y
44CONFIG_GENERIC_IRQ_PROBE=y
45CONFIG_GENERIC_PENDING_IRQ=y
46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
48CONFIG_X86_32_SMP=y
49CONFIG_X86_HT=y
50CONFIG_X86_BIOS_REBOOT=y
51CONFIG_X86_TRAMPOLINE=y
52CONFIG_KTIME_SCALAR=y
53CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
54
55#
56# General setup
57#
58CONFIG_EXPERIMENTAL=y
59CONFIG_LOCK_KERNEL=y
60CONFIG_INIT_ENV_ARG_LIMIT=32
61CONFIG_LOCALVERSION="-netbook"
62# CONFIG_LOCALVERSION_AUTO is not set
63CONFIG_SWAP=y
64CONFIG_SYSVIPC=y
65CONFIG_SYSVIPC_SYSCTL=y
66CONFIG_POSIX_MQUEUE=y
67CONFIG_BSD_PROCESS_ACCT=y
68CONFIG_BSD_PROCESS_ACCT_V3=y
69# CONFIG_TASKSTATS is not set
70# CONFIG_AUDIT is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=17
73# CONFIG_CGROUPS is not set
74CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
75# CONFIG_GROUP_SCHED is not set
76# CONFIG_SYSFS_DEPRECATED_V2 is not set
77CONFIG_RELAY=y
78CONFIG_NAMESPACES=y
79# CONFIG_UTS_NS is not set
80# CONFIG_IPC_NS is not set
81# CONFIG_USER_NS is not set
82# CONFIG_PID_NS is not set
83CONFIG_BLK_DEV_INITRD=y
84CONFIG_INITRAMFS_SOURCE=""
85CONFIG_CC_OPTIMIZE_FOR_SIZE=y
86CONFIG_FASTBOOT=y
87CONFIG_SYSCTL=y
88# CONFIG_EMBEDDED is not set
89CONFIG_UID16=y
90CONFIG_SYSCTL_SYSCALL=y
91CONFIG_KALLSYMS=y
92CONFIG_KALLSYMS_ALL=y
93CONFIG_KALLSYMS_EXTRA_PASS=y
94CONFIG_KALLSYMS_STRIP_GENERATED=y
95CONFIG_HOTPLUG=y
96CONFIG_PRINTK=y
97CONFIG_BUG=y
98CONFIG_ELF_CORE=y
99CONFIG_PCSPKR_PLATFORM=y
100# CONFIG_COMPAT_BRK is not set
101CONFIG_BASE_FULL=y
102CONFIG_FUTEX=y
103CONFIG_ANON_INODES=y
104CONFIG_EPOLL=y
105CONFIG_SIGNALFD=y
106CONFIG_TIMERFD=y
107CONFIG_EVENTFD=y
108CONFIG_SHMEM=y
109CONFIG_AIO=y
110CONFIG_VM_EVENT_COUNTERS=y
111CONFIG_PCI_QUIRKS=y
112CONFIG_SLAB=y
113# CONFIG_SLUB is not set
114# CONFIG_SLOB is not set
115CONFIG_PROFILING=y
116# CONFIG_MARKERS is not set
117CONFIG_OPROFILE=y
118# CONFIG_OPROFILE_IBS is not set
119CONFIG_HAVE_OPROFILE=y
120# CONFIG_KPROBES is not set
121CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
122CONFIG_HAVE_IOREMAP_PROT=y
123CONFIG_HAVE_KPROBES=y
124CONFIG_HAVE_KRETPROBES=y
125CONFIG_HAVE_ARCH_TRACEHOOK=y
126CONFIG_HAVE_GENERIC_DMA_COHERENT=y
127CONFIG_SLABINFO=y
128CONFIG_RT_MUTEXES=y
129# CONFIG_TINY_SHMEM is not set
130CONFIG_BASE_SMALL=0
131CONFIG_MODULES=y
132# CONFIG_MODULE_FORCE_LOAD is not set
133CONFIG_MODULE_UNLOAD=y
134# CONFIG_MODULE_FORCE_UNLOAD is not set
135# CONFIG_MODVERSIONS is not set
136# CONFIG_MODULE_SRCVERSION_ALL is not set
137CONFIG_KMOD=y
138CONFIG_STOP_MACHINE=y
139CONFIG_BLOCK=y
140CONFIG_LBD=y
141CONFIG_BLK_DEV_IO_TRACE=y
142# CONFIG_LSF is not set
143CONFIG_BLK_DEV_BSG=y
144# CONFIG_BLK_DEV_INTEGRITY is not set
145
146#
147# IO Schedulers
148#
149CONFIG_IOSCHED_NOOP=y
150# CONFIG_IOSCHED_AS is not set
151# CONFIG_IOSCHED_DEADLINE is not set
152CONFIG_IOSCHED_CFQ=y
153# CONFIG_DEFAULT_AS is not set
154# CONFIG_DEFAULT_DEADLINE is not set
155CONFIG_DEFAULT_CFQ=y
156# CONFIG_DEFAULT_NOOP is not set
157CONFIG_DEFAULT_IOSCHED="cfq"
158CONFIG_CLASSIC_RCU=y
159CONFIG_FREEZER=y
160
161#
162# Processor type and features
163#
164CONFIG_TICK_ONESHOT=y
165CONFIG_NO_HZ=y
166CONFIG_HIGH_RES_TIMERS=y
167CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
168CONFIG_SMP=y
169# CONFIG_SPARSE_IRQ is not set
170CONFIG_X86_FIND_SMP_CONFIG=y
171CONFIG_X86_MPPARSE=y
172# CONFIG_X86_PC is not set
173# CONFIG_X86_ELAN is not set
174# CONFIG_X86_VOYAGER is not set
175CONFIG_X86_GENERICARCH=y
176# CONFIG_X86_NUMAQ is not set
177# CONFIG_X86_SUMMIT is not set
178# CONFIG_X86_ES7000 is not set
179# CONFIG_X86_BIGSMP is not set
180# CONFIG_X86_VSMP is not set
181# CONFIG_X86_RDC321X is not set
182CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
183# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
184# CONFIG_PARAVIRT_GUEST is not set
185# CONFIG_MEMTEST is not set
186CONFIG_X86_CYCLONE_TIMER=y
187# CONFIG_M386 is not set
188# CONFIG_M486 is not set
189# CONFIG_M586 is not set
190# CONFIG_M586TSC is not set
191# CONFIG_M586MMX is not set
192# CONFIG_M686 is not set
193# CONFIG_MPENTIUMII is not set
194# CONFIG_MPENTIUMIII is not set
195CONFIG_MPENTIUMM=y
196# CONFIG_MPENTIUM4 is not set
197# CONFIG_MK6 is not set
198# CONFIG_MK7 is not set
199# CONFIG_MK8 is not set
200# CONFIG_MCRUSOE is not set
201# CONFIG_MEFFICEON is not set
202# CONFIG_MWINCHIPC6 is not set
203# CONFIG_MWINCHIP3D is not set
204# CONFIG_MGEODEGX1 is not set
205# CONFIG_MGEODE_LX is not set
206# CONFIG_MCYRIXIII is not set
207# CONFIG_MVIAC3_2 is not set
208# CONFIG_MVIAC7 is not set
209# CONFIG_MPSC is not set
210# CONFIG_MCORE2 is not set
211# CONFIG_GENERIC_CPU is not set
212CONFIG_X86_GENERIC=y
213CONFIG_X86_CPU=y
214CONFIG_X86_CMPXCHG=y
215CONFIG_X86_L1_CACHE_SHIFT=7
216CONFIG_X86_XADD=y
217CONFIG_X86_WP_WORKS_OK=y
218CONFIG_X86_INVLPG=y
219CONFIG_X86_BSWAP=y
220CONFIG_X86_POPAD_OK=y
221CONFIG_X86_INTEL_USERCOPY=y
222CONFIG_X86_USE_PPRO_CHECKSUM=y
223CONFIG_X86_TSC=y
224CONFIG_X86_CMPXCHG64=y
225CONFIG_X86_CMOV=y
226CONFIG_X86_MINIMUM_CPU_FAMILY=4
227CONFIG_X86_DEBUGCTLMSR=y
228CONFIG_CPU_SUP_INTEL=y
229CONFIG_CPU_SUP_CYRIX_32=y
230CONFIG_CPU_SUP_AMD=y
231CONFIG_CPU_SUP_CENTAUR_32=y
232CONFIG_CPU_SUP_TRANSMETA_32=y
233CONFIG_CPU_SUP_UMC_32=y
234# CONFIG_X86_DS is not set
235# CONFIG_X86_PTRACE_BTS is not set
236CONFIG_HPET_TIMER=y
237CONFIG_HPET_EMULATE_RTC=y
238CONFIG_DMI=y
239# CONFIG_IOMMU_HELPER is not set
240CONFIG_NR_CPUS=8
241CONFIG_SCHED_SMT=y
242CONFIG_SCHED_MC=y
243# CONFIG_PREEMPT_NONE is not set
244# CONFIG_PREEMPT_VOLUNTARY is not set
245CONFIG_PREEMPT=y
246# CONFIG_DEBUG_PREEMPT is not set
247# CONFIG_PREEMPT_TRACER is not set
248CONFIG_X86_LOCAL_APIC=y
249CONFIG_X86_IO_APIC=y
250# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
251CONFIG_X86_MCE=y
252# CONFIG_X86_MCE_NONFATAL is not set
253CONFIG_X86_MCE_P4THERMAL=y
254CONFIG_VM86=y
255CONFIG_TOSHIBA=m
256CONFIG_I8K=m
257CONFIG_X86_REBOOTFIXUPS=y
258CONFIG_MICROCODE=y
259CONFIG_MICROCODE_INTEL=y
260# CONFIG_MICROCODE_AMD is not set
261CONFIG_MICROCODE_OLD_INTERFACE=y
262CONFIG_X86_MSR=y
263CONFIG_X86_CPUID=y
264# CONFIG_NOHIGHMEM is not set
265# CONFIG_HIGHMEM4G is not set
266CONFIG_HIGHMEM64G=y
267CONFIG_PAGE_OFFSET=0xC0000000
268CONFIG_HIGHMEM=y
269CONFIG_X86_PAE=y
270CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
271CONFIG_ARCH_FLATMEM_ENABLE=y
272CONFIG_ARCH_SPARSEMEM_ENABLE=y
273CONFIG_ARCH_SELECT_MEMORY_MODEL=y
274CONFIG_SELECT_MEMORY_MODEL=y
275CONFIG_FLATMEM_MANUAL=y
276# CONFIG_DISCONTIGMEM_MANUAL is not set
277# CONFIG_SPARSEMEM_MANUAL is not set
278CONFIG_FLATMEM=y
279CONFIG_FLAT_NODE_MEM_MAP=y
280CONFIG_SPARSEMEM_STATIC=y
281CONFIG_PAGEFLAGS_EXTENDED=y
282CONFIG_SPLIT_PTLOCK_CPUS=4
283CONFIG_RESOURCES_64BIT=y
284CONFIG_PHYS_ADDR_T_64BIT=y
285CONFIG_ZONE_DMA_FLAG=1
286CONFIG_BOUNCE=y
287CONFIG_VIRT_TO_BUS=y
288CONFIG_UNEVICTABLE_LRU=y
289CONFIG_HIGHPTE=y
290# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
291CONFIG_X86_RESERVE_LOW_64K=y
292# CONFIG_MATH_EMULATION is not set
293CONFIG_MTRR=y
294CONFIG_MTRR_SANITIZER=y
295CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
296CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
297CONFIG_X86_PAT=y
298CONFIG_EFI=y
299# CONFIG_SECCOMP is not set
300# CONFIG_HZ_100 is not set
301# CONFIG_HZ_250 is not set
302# CONFIG_HZ_300 is not set
303CONFIG_HZ_1000=y
304CONFIG_HZ=1000
305CONFIG_SCHED_HRTICK=y
306# CONFIG_KEXEC is not set
307# CONFIG_CRASH_DUMP is not set
308CONFIG_PHYSICAL_START=0x100000
309# CONFIG_RELOCATABLE is not set
310CONFIG_PHYSICAL_ALIGN=0x400000
311CONFIG_HOTPLUG_CPU=y
312# CONFIG_COMPAT_VDSO is not set
313# CONFIG_CMDLINE_BOOL is not set
314# CONFIG_CMDLINE is not set
315# CONFIG_CMDLINE_OVERRIDE is not set
316CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
317
318#
319# Power management and ACPI options
320#
321CONFIG_PM=y
322CONFIG_PM_DEBUG=y
323# CONFIG_PM_VERBOSE is not set
324CONFIG_CAN_PM_TRACE=y
325CONFIG_PM_TRACE=y
326CONFIG_PM_TRACE_RTC=y
327CONFIG_PM_SLEEP_SMP=y
328CONFIG_PM_SLEEP=y
329CONFIG_SUSPEND=y
330# CONFIG_PM_TEST_SUSPEND is not set
331CONFIG_SUSPEND_FREEZER=y
332CONFIG_HIBERNATION=y
333CONFIG_PM_STD_PARTITION=""
334CONFIG_ACPI=y
335CONFIG_ACPI_SLEEP=y
336CONFIG_ACPI_PROCFS=y
337CONFIG_ACPI_PROCFS_POWER=y
338CONFIG_ACPI_SYSFS_POWER=y
339CONFIG_ACPI_PROC_EVENT=y
340CONFIG_ACPI_AC=y
341CONFIG_ACPI_BATTERY=m
342CONFIG_ACPI_BUTTON=y
343CONFIG_ACPI_VIDEO=y
344CONFIG_ACPI_FAN=y
345CONFIG_ACPI_DOCK=y
346CONFIG_ACPI_PROCESSOR=y
347CONFIG_ACPI_HOTPLUG_CPU=y
348CONFIG_ACPI_THERMAL=y
349CONFIG_ACPI_WMI=y
350CONFIG_ACPI_ASUS=m
351CONFIG_ACPI_TOSHIBA=m
352# CONFIG_ACPI_CUSTOM_DSDT is not set
353CONFIG_ACPI_BLACKLIST_YEAR=1999
354# CONFIG_ACPI_DEBUG is not set
355# CONFIG_ACPI_PCI_SLOT is not set
356CONFIG_ACPI_SYSTEM=y
357CONFIG_X86_PM_TIMER=y
358CONFIG_ACPI_CONTAINER=y
359CONFIG_ACPI_SBS=m
360# CONFIG_APM is not set
361
362#
363# CPU Frequency scaling
364#
365CONFIG_CPU_FREQ=y
366CONFIG_CPU_FREQ_TABLE=y
367CONFIG_CPU_FREQ_DEBUG=y
368CONFIG_CPU_FREQ_STAT=y
369CONFIG_CPU_FREQ_STAT_DETAILS=y
370# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
371# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
372# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
373CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
374# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
375CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
376# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
377CONFIG_CPU_FREQ_GOV_USERSPACE=y
378CONFIG_CPU_FREQ_GOV_ONDEMAND=y
379# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
380
381#
382# CPUFreq processor drivers
383#
384CONFIG_X86_ACPI_CPUFREQ=y
385# CONFIG_X86_POWERNOW_K6 is not set
386# CONFIG_X86_POWERNOW_K7 is not set
387# CONFIG_X86_POWERNOW_K8 is not set
388# CONFIG_X86_GX_SUSPMOD is not set
389# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
390# CONFIG_X86_SPEEDSTEP_ICH is not set
391# CONFIG_X86_SPEEDSTEP_SMI is not set
392# CONFIG_X86_P4_CLOCKMOD is not set
393# CONFIG_X86_CPUFREQ_NFORCE2 is not set
394# CONFIG_X86_LONGRUN is not set
395# CONFIG_X86_LONGHAUL is not set
396# CONFIG_X86_E_POWERSAVER is not set
397
398#
399# shared options
400#
401# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
402# CONFIG_X86_SPEEDSTEP_LIB is not set
403CONFIG_CPU_IDLE=y
404CONFIG_CPU_IDLE_GOV_LADDER=y
405CONFIG_CPU_IDLE_GOV_MENU=y
406
407#
408# Bus options (PCI etc.)
409#
410CONFIG_PCI=y
411# CONFIG_PCI_GOBIOS is not set
412# CONFIG_PCI_GOMMCONFIG is not set
413# CONFIG_PCI_GODIRECT is not set
414# CONFIG_PCI_GOOLPC is not set
415CONFIG_PCI_GOANY=y
416CONFIG_PCI_BIOS=y
417CONFIG_PCI_DIRECT=y
418CONFIG_PCI_MMCONFIG=y
419CONFIG_PCI_DOMAINS=y
420CONFIG_PCIEPORTBUS=y
421# CONFIG_PCIEAER is not set
422# CONFIG_PCIEASPM is not set
423# CONFIG_PCIEASPM_DEBUG is not set
424CONFIG_ARCH_SUPPORTS_MSI=y
425CONFIG_PCI_MSI=y
426# CONFIG_PCI_LEGACY is not set
427# CONFIG_PCI_DEBUG is not set
428# CONFIG_PCI_STUB is not set
429# CONFIG_HT_IRQ is not set
430CONFIG_ISA_DMA_API=y
431CONFIG_ISA=y
432# CONFIG_EISA is not set
433# CONFIG_MCA is not set
434# CONFIG_SCx200 is not set
435# CONFIG_OLPC is not set
436CONFIG_PCCARD=y
437# CONFIG_PCMCIA_DEBUG is not set
438# CONFIG_PCMCIA is not set
439CONFIG_CARDBUS=y
440
441#
442# PC-card bridges
443#
444CONFIG_YENTA=y
445CONFIG_YENTA_O2=y
446CONFIG_YENTA_RICOH=y
447CONFIG_YENTA_TI=y
448CONFIG_YENTA_ENE_TUNE=y
449CONFIG_YENTA_TOSHIBA=y
450CONFIG_PCMCIA_PROBE=y
451CONFIG_PCCARD_NONSTATIC=y
452CONFIG_HOTPLUG_PCI=y
453# CONFIG_HOTPLUG_PCI_PCIE is not set
454# CONFIG_HOTPLUG_PCI_FAKE is not set
455# CONFIG_HOTPLUG_PCI_COMPAQ is not set
456# CONFIG_HOTPLUG_PCI_IBM is not set
457# CONFIG_HOTPLUG_PCI_ACPI is not set
458# CONFIG_HOTPLUG_PCI_CPCI is not set
459# CONFIG_HOTPLUG_PCI_SHPC is not set
460
461#
462# Executable file formats / Emulations
463#
464CONFIG_BINFMT_ELF=y
465# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
466CONFIG_HAVE_AOUT=y
467# CONFIG_BINFMT_AOUT is not set
468CONFIG_BINFMT_MISC=y
469CONFIG_HAVE_ATOMIC_IOMAP=y
470CONFIG_NET=y
471
472#
473# Networking options
474#
475# CONFIG_NET_NS is not set
476CONFIG_PACKET=y
477CONFIG_PACKET_MMAP=y
478CONFIG_UNIX=y
479CONFIG_XFRM=y
480CONFIG_XFRM_USER=y
481CONFIG_XFRM_SUB_POLICY=y
482CONFIG_XFRM_MIGRATE=y
483CONFIG_XFRM_STATISTICS=y
484CONFIG_XFRM_IPCOMP=m
485CONFIG_NET_KEY=m
486CONFIG_NET_KEY_MIGRATE=y
487CONFIG_INET=y
488CONFIG_IP_MULTICAST=y
489# CONFIG_IP_ADVANCED_ROUTER is not set
490CONFIG_IP_FIB_HASH=y
491# CONFIG_IP_PNP is not set
492# CONFIG_NET_IPIP is not set
493# CONFIG_NET_IPGRE is not set
494CONFIG_IP_MROUTE=y
495CONFIG_IP_PIMSM_V1=y
496CONFIG_IP_PIMSM_V2=y
497# CONFIG_ARPD is not set
498CONFIG_SYN_COOKIES=y
499CONFIG_INET_AH=m
500CONFIG_INET_ESP=m
501CONFIG_INET_IPCOMP=m
502CONFIG_INET_XFRM_TUNNEL=m
503CONFIG_INET_TUNNEL=m
504CONFIG_INET_XFRM_MODE_TRANSPORT=m
505CONFIG_INET_XFRM_MODE_TUNNEL=m
506CONFIG_INET_XFRM_MODE_BEET=m
507CONFIG_INET_LRO=y
508CONFIG_INET_DIAG=m
509CONFIG_INET_TCP_DIAG=m
510CONFIG_TCP_CONG_ADVANCED=y
511CONFIG_TCP_CONG_BIC=m
512CONFIG_TCP_CONG_CUBIC=y
513# CONFIG_TCP_CONG_WESTWOOD is not set
514# CONFIG_TCP_CONG_HTCP is not set
515# CONFIG_TCP_CONG_HSTCP is not set
516# CONFIG_TCP_CONG_HYBLA is not set
517# CONFIG_TCP_CONG_VEGAS is not set
518# CONFIG_TCP_CONG_SCALABLE is not set
519# CONFIG_TCP_CONG_LP is not set
520# CONFIG_TCP_CONG_VENO is not set
521# CONFIG_TCP_CONG_YEAH is not set
522# CONFIG_TCP_CONG_ILLINOIS is not set
523# CONFIG_DEFAULT_BIC is not set
524CONFIG_DEFAULT_CUBIC=y
525# CONFIG_DEFAULT_HTCP is not set
526# CONFIG_DEFAULT_VEGAS is not set
527# CONFIG_DEFAULT_WESTWOOD is not set
528# CONFIG_DEFAULT_RENO is not set
529CONFIG_DEFAULT_TCP_CONG="cubic"
530CONFIG_TCP_MD5SIG=y
531CONFIG_IPV6=y
532CONFIG_IPV6_PRIVACY=y
533CONFIG_IPV6_ROUTER_PREF=y
534CONFIG_IPV6_ROUTE_INFO=y
535CONFIG_IPV6_OPTIMISTIC_DAD=y
536CONFIG_INET6_AH=m
537CONFIG_INET6_ESP=m
538CONFIG_INET6_IPCOMP=m
539CONFIG_IPV6_MIP6=m
540CONFIG_INET6_XFRM_TUNNEL=m
541CONFIG_INET6_TUNNEL=m
542CONFIG_INET6_XFRM_MODE_TRANSPORT=m
543CONFIG_INET6_XFRM_MODE_TUNNEL=m
544CONFIG_INET6_XFRM_MODE_BEET=m
545CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
546CONFIG_IPV6_SIT=m
547CONFIG_IPV6_NDISC_NODETYPE=y
548CONFIG_IPV6_TUNNEL=m
549CONFIG_IPV6_MULTIPLE_TABLES=y
550CONFIG_IPV6_SUBTREES=y
551# CONFIG_IPV6_MROUTE is not set
552CONFIG_NETWORK_SECMARK=y
553CONFIG_NETFILTER=y
554# CONFIG_NETFILTER_DEBUG is not set
555CONFIG_NETFILTER_ADVANCED=y
556
557#
558# Core Netfilter Configuration
559#
560CONFIG_NETFILTER_NETLINK=m
561CONFIG_NETFILTER_NETLINK_QUEUE=m
562CONFIG_NETFILTER_NETLINK_LOG=m
563CONFIG_NF_CONNTRACK=y
564CONFIG_NF_CT_ACCT=y
565CONFIG_NF_CONNTRACK_MARK=y
566CONFIG_NF_CONNTRACK_SECMARK=y
567CONFIG_NF_CONNTRACK_EVENTS=y
568# CONFIG_NF_CT_PROTO_DCCP is not set
569CONFIG_NF_CT_PROTO_GRE=m
570CONFIG_NF_CT_PROTO_SCTP=m
571CONFIG_NF_CT_PROTO_UDPLITE=m
572CONFIG_NF_CONNTRACK_AMANDA=m
573CONFIG_NF_CONNTRACK_FTP=m
574CONFIG_NF_CONNTRACK_H323=m
575CONFIG_NF_CONNTRACK_IRC=m
576CONFIG_NF_CONNTRACK_NETBIOS_NS=m
577CONFIG_NF_CONNTRACK_PPTP=m
578CONFIG_NF_CONNTRACK_SANE=m
579CONFIG_NF_CONNTRACK_SIP=m
580CONFIG_NF_CONNTRACK_TFTP=m
581CONFIG_NF_CT_NETLINK=m
582# CONFIG_NETFILTER_TPROXY is not set
583CONFIG_NETFILTER_XTABLES=y
584CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
585CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
586CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
587CONFIG_NETFILTER_XT_TARGET_DSCP=m
588CONFIG_NETFILTER_XT_TARGET_MARK=m
589CONFIG_NETFILTER_XT_TARGET_NFLOG=m
590CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
591CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
592CONFIG_NETFILTER_XT_TARGET_RATEEST=m
593CONFIG_NETFILTER_XT_TARGET_TRACE=m
594CONFIG_NETFILTER_XT_TARGET_SECMARK=m
595CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
596CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
597CONFIG_NETFILTER_XT_MATCH_COMMENT=m
598CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
599CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
600CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
601CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
602# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
603CONFIG_NETFILTER_XT_MATCH_DSCP=m
604CONFIG_NETFILTER_XT_MATCH_ESP=m
605CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
606CONFIG_NETFILTER_XT_MATCH_HELPER=m
607CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
608CONFIG_NETFILTER_XT_MATCH_LENGTH=m
609CONFIG_NETFILTER_XT_MATCH_LIMIT=m
610CONFIG_NETFILTER_XT_MATCH_MAC=m
611CONFIG_NETFILTER_XT_MATCH_MARK=m
612CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
613CONFIG_NETFILTER_XT_MATCH_OWNER=m
614CONFIG_NETFILTER_XT_MATCH_POLICY=m
615CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
616CONFIG_NETFILTER_XT_MATCH_QUOTA=m
617CONFIG_NETFILTER_XT_MATCH_RATEEST=m
618CONFIG_NETFILTER_XT_MATCH_REALM=m
619# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
620CONFIG_NETFILTER_XT_MATCH_SCTP=m
621CONFIG_NETFILTER_XT_MATCH_STATE=y
622CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
623CONFIG_NETFILTER_XT_MATCH_STRING=m
624CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
625CONFIG_NETFILTER_XT_MATCH_TIME=m
626CONFIG_NETFILTER_XT_MATCH_U32=m
627# CONFIG_IP_VS is not set
628
629#
630# IP: Netfilter Configuration
631#
632CONFIG_NF_DEFRAG_IPV4=y
633CONFIG_NF_CONNTRACK_IPV4=y
634# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
635CONFIG_IP_NF_QUEUE=m
636CONFIG_IP_NF_IPTABLES=y
637CONFIG_IP_NF_MATCH_ADDRTYPE=m
638CONFIG_IP_NF_MATCH_AH=m
639CONFIG_IP_NF_MATCH_ECN=m
640CONFIG_IP_NF_MATCH_TTL=m
641CONFIG_IP_NF_FILTER=y
642CONFIG_IP_NF_TARGET_REJECT=y
643CONFIG_IP_NF_TARGET_LOG=m
644CONFIG_IP_NF_TARGET_ULOG=m
645CONFIG_NF_NAT=m
646CONFIG_NF_NAT_NEEDED=y
647CONFIG_IP_NF_TARGET_MASQUERADE=m
648CONFIG_IP_NF_TARGET_NETMAP=m
649CONFIG_IP_NF_TARGET_REDIRECT=m
650CONFIG_NF_NAT_SNMP_BASIC=m
651CONFIG_NF_NAT_PROTO_GRE=m
652CONFIG_NF_NAT_PROTO_UDPLITE=m
653CONFIG_NF_NAT_PROTO_SCTP=m
654CONFIG_NF_NAT_FTP=m
655CONFIG_NF_NAT_IRC=m
656CONFIG_NF_NAT_TFTP=m
657CONFIG_NF_NAT_AMANDA=m
658CONFIG_NF_NAT_PPTP=m
659CONFIG_NF_NAT_H323=m
660CONFIG_NF_NAT_SIP=m
661CONFIG_IP_NF_MANGLE=m
662CONFIG_IP_NF_TARGET_CLUSTERIP=m
663CONFIG_IP_NF_TARGET_ECN=m
664CONFIG_IP_NF_TARGET_TTL=m
665CONFIG_IP_NF_RAW=m
666CONFIG_IP_NF_ARPTABLES=m
667CONFIG_IP_NF_ARPFILTER=m
668CONFIG_IP_NF_ARP_MANGLE=m
669
670#
671# IPv6: Netfilter Configuration
672#
673CONFIG_NF_CONNTRACK_IPV6=y
674CONFIG_IP6_NF_QUEUE=m
675CONFIG_IP6_NF_IPTABLES=y
676CONFIG_IP6_NF_MATCH_AH=m
677CONFIG_IP6_NF_MATCH_EUI64=m
678CONFIG_IP6_NF_MATCH_FRAG=m
679CONFIG_IP6_NF_MATCH_OPTS=m
680CONFIG_IP6_NF_MATCH_HL=m
681CONFIG_IP6_NF_MATCH_IPV6HEADER=m
682CONFIG_IP6_NF_MATCH_MH=m
683CONFIG_IP6_NF_MATCH_RT=m
684CONFIG_IP6_NF_TARGET_LOG=m
685CONFIG_IP6_NF_FILTER=y
686CONFIG_IP6_NF_TARGET_REJECT=y
687CONFIG_IP6_NF_MANGLE=m
688CONFIG_IP6_NF_TARGET_HL=m
689CONFIG_IP6_NF_RAW=m
690# CONFIG_IP_DCCP is not set
691# CONFIG_IP_SCTP is not set
692# CONFIG_TIPC is not set
693# CONFIG_ATM is not set
694# CONFIG_BRIDGE is not set
695# CONFIG_NET_DSA is not set
696# CONFIG_VLAN_8021Q is not set
697# CONFIG_DECNET is not set
698# CONFIG_LLC2 is not set
699# CONFIG_IPX is not set
700# CONFIG_ATALK is not set
701# CONFIG_X25 is not set
702# CONFIG_LAPB is not set
703# CONFIG_ECONET is not set
704# CONFIG_WAN_ROUTER is not set
705# CONFIG_NET_SCHED is not set
706CONFIG_NET_CLS_ROUTE=y
707# CONFIG_DCB is not set
708
709#
710# Network testing
711#
712# CONFIG_NET_PKTGEN is not set
713# CONFIG_HAMRADIO is not set
714# CONFIG_CAN is not set
715# CONFIG_IRDA is not set
716CONFIG_BT=y
717CONFIG_BT_L2CAP=y
718CONFIG_BT_SCO=y
719CONFIG_BT_RFCOMM=y
720CONFIG_BT_RFCOMM_TTY=y
721CONFIG_BT_BNEP=y
722CONFIG_BT_BNEP_MC_FILTER=y
723CONFIG_BT_BNEP_PROTO_FILTER=y
724CONFIG_BT_HIDP=y
725
726#
727# Bluetooth device drivers
728#
729CONFIG_BT_HCIBTUSB=y
730CONFIG_BT_HCIBTSDIO=m
731CONFIG_BT_HCIUART=m
732CONFIG_BT_HCIUART_H4=y
733CONFIG_BT_HCIUART_BCSP=y
734CONFIG_BT_HCIUART_LL=y
735CONFIG_BT_HCIBCM203X=m
736CONFIG_BT_HCIBPA10X=m
737CONFIG_BT_HCIBFUSB=m
738CONFIG_BT_HCIVHCI=m
739# CONFIG_AF_RXRPC is not set
740# CONFIG_PHONET is not set
741CONFIG_FIB_RULES=y
742CONFIG_WIRELESS=y
743CONFIG_CFG80211=y
744# CONFIG_CFG80211_REG_DEBUG is not set
745CONFIG_NL80211=y
746CONFIG_WIRELESS_OLD_REGULATORY=y
747CONFIG_WIRELESS_EXT=y
748CONFIG_WIRELESS_EXT_SYSFS=y
749CONFIG_LIB80211=y
750CONFIG_LIB80211_CRYPT_WEP=m
751CONFIG_LIB80211_CRYPT_CCMP=m
752CONFIG_LIB80211_CRYPT_TKIP=m
753CONFIG_MAC80211=y
754
755#
756# Rate control algorithm selection
757#
758CONFIG_MAC80211_RC_PID=y
759# CONFIG_MAC80211_RC_MINSTREL is not set
760CONFIG_MAC80211_RC_DEFAULT_PID=y
761# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
762CONFIG_MAC80211_RC_DEFAULT="pid"
763CONFIG_MAC80211_MESH=y
764CONFIG_MAC80211_LEDS=y
765CONFIG_MAC80211_DEBUGFS=y
766# CONFIG_MAC80211_DEBUG_MENU is not set
767CONFIG_IEEE80211=m
768# CONFIG_IEEE80211_DEBUG is not set
769CONFIG_IEEE80211_CRYPT_WEP=m
770CONFIG_IEEE80211_CRYPT_CCMP=m
771CONFIG_IEEE80211_CRYPT_TKIP=m
772CONFIG_WIMAX=m
773CONFIG_WIMAX_DEBUG_LEVEL=8
774CONFIG_RFKILL=y
775CONFIG_RFKILL_INPUT=y
776CONFIG_RFKILL_LEDS=y
777# CONFIG_NET_9P is not set
778
779#
780# Device Drivers
781#
782
783#
784# Generic Driver Options
785#
786CONFIG_UEVENT_HELPER_PATH=""
787CONFIG_STANDALONE=y
788CONFIG_PREVENT_FIRMWARE_BUILD=y
789CONFIG_FW_LOADER=y
790CONFIG_FIRMWARE_IN_KERNEL=y
791CONFIG_EXTRA_FIRMWARE=""
792# CONFIG_DEBUG_DRIVER is not set
793CONFIG_DEBUG_DEVRES=y
794# CONFIG_SYS_HYPERVISOR is not set
795# CONFIG_CONNECTOR is not set
796# CONFIG_MTD is not set
797# CONFIG_PARPORT is not set
798CONFIG_PNP=y
799# CONFIG_PNP_DEBUG_MESSAGES is not set
800
801#
802# Protocols
803#
804# CONFIG_ISAPNP is not set
805# CONFIG_PNPBIOS is not set
806CONFIG_PNPACPI=y
807CONFIG_BLK_DEV=y
808# CONFIG_BLK_DEV_FD is not set
809# CONFIG_BLK_DEV_XD is not set
810# CONFIG_BLK_CPQ_DA is not set
811# CONFIG_BLK_CPQ_CISS_DA is not set
812# CONFIG_BLK_DEV_DAC960 is not set
813# CONFIG_BLK_DEV_UMEM is not set
814# CONFIG_BLK_DEV_COW_COMMON is not set
815CONFIG_BLK_DEV_LOOP=y
816CONFIG_BLK_DEV_CRYPTOLOOP=m
817# CONFIG_BLK_DEV_NBD is not set
818# CONFIG_BLK_DEV_SX8 is not set
819# CONFIG_BLK_DEV_UB is not set
820CONFIG_BLK_DEV_RAM=m
821CONFIG_BLK_DEV_RAM_COUNT=16
822CONFIG_BLK_DEV_RAM_SIZE=16384
823# CONFIG_BLK_DEV_XIP is not set
824CONFIG_CDROM_PKTCDVD=m
825CONFIG_CDROM_PKTCDVD_BUFFERS=8
826# CONFIG_CDROM_PKTCDVD_WCACHE is not set
827# CONFIG_ATA_OVER_ETH is not set
828# CONFIG_BLK_DEV_HD is not set
829CONFIG_MISC_DEVICES=y
830# CONFIG_IBM_ASM is not set
831# CONFIG_PHANTOM is not set
832CONFIG_EEPROM_93CX6=m
833# CONFIG_SGI_IOC4 is not set
834CONFIG_TIFM_CORE=m
835# CONFIG_TIFM_7XX1 is not set
836CONFIG_ACER_WMI=y
837CONFIG_ASUS_LAPTOP=m
838CONFIG_FUJITSU_LAPTOP=m
839# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
840CONFIG_TC1100_WMI=m
841CONFIG_HP_WMI=m
842# CONFIG_ICS932S401 is not set
843CONFIG_MSI_LAPTOP=m
844CONFIG_PANASONIC_LAPTOP=m
845CONFIG_COMPAL_LAPTOP=m
846CONFIG_SONY_LAPTOP=m
847# CONFIG_SONYPI_COMPAT is not set
848CONFIG_THINKPAD_ACPI=m
849# CONFIG_THINKPAD_ACPI_DEBUG is not set
850CONFIG_THINKPAD_ACPI_BAY=y
851CONFIG_THINKPAD_ACPI_VIDEO=y
852CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
853# CONFIG_INTEL_MENLOW is not set
854CONFIG_EEEPC_LAPTOP=m
855# CONFIG_ENCLOSURE_SERVICES is not set
856# CONFIG_HP_ILO is not set
857# CONFIG_C2PORT is not set
858CONFIG_HAVE_IDE=y
859# CONFIG_IDE is not set
860
861#
862# SCSI device support
863#
864CONFIG_RAID_ATTRS=m
865CONFIG_SCSI=y
866CONFIG_SCSI_DMA=y
867# CONFIG_SCSI_TGT is not set
868# CONFIG_SCSI_NETLINK is not set
869CONFIG_SCSI_PROC_FS=y
870
871#
872# SCSI support type (disk, tape, CD-ROM)
873#
874CONFIG_BLK_DEV_SD=y
875# CONFIG_CHR_DEV_ST is not set
876# CONFIG_CHR_DEV_OSST is not set
877CONFIG_BLK_DEV_SR=y
878CONFIG_BLK_DEV_SR_VENDOR=y
879CONFIG_CHR_DEV_SG=y
880# CONFIG_CHR_DEV_SCH is not set
881
882#
883# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
884#
885CONFIG_SCSI_MULTI_LUN=y
886CONFIG_SCSI_CONSTANTS=y
887CONFIG_SCSI_LOGGING=y
888CONFIG_SCSI_SCAN_ASYNC=y
889CONFIG_SCSI_WAIT_SCAN=m
890
891#
892# SCSI Transports
893#
894# CONFIG_SCSI_SPI_ATTRS is not set
895# CONFIG_SCSI_FC_ATTRS is not set
896# CONFIG_SCSI_ISCSI_ATTRS is not set
897# CONFIG_SCSI_SAS_ATTRS is not set
898# CONFIG_SCSI_SAS_LIBSAS is not set
899# CONFIG_SCSI_SRP_ATTRS is not set
900CONFIG_SCSI_LOWLEVEL=y
901# CONFIG_ISCSI_TCP is not set
902# CONFIG_SCSI_CXGB3_ISCSI is not set
903# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
904# CONFIG_SCSI_3W_9XXX is not set
905# CONFIG_SCSI_7000FASST is not set
906# CONFIG_SCSI_ACARD is not set
907# CONFIG_SCSI_AHA152X is not set
908# CONFIG_SCSI_AHA1542 is not set
909# CONFIG_SCSI_AACRAID is not set
910# CONFIG_SCSI_AIC7XXX is not set
911# CONFIG_SCSI_AIC7XXX_OLD is not set
912# CONFIG_SCSI_AIC79XX is not set
913# CONFIG_SCSI_AIC94XX is not set
914# CONFIG_SCSI_DPT_I2O is not set
915# CONFIG_SCSI_ADVANSYS is not set
916# CONFIG_SCSI_IN2000 is not set
917# CONFIG_SCSI_ARCMSR is not set
918# CONFIG_MEGARAID_NEWGEN is not set
919# CONFIG_MEGARAID_LEGACY is not set
920# CONFIG_MEGARAID_SAS is not set
921# CONFIG_SCSI_HPTIOP is not set
922# CONFIG_SCSI_BUSLOGIC is not set
923# CONFIG_LIBFC is not set
924# CONFIG_FCOE is not set
925# CONFIG_FCOE_FNIC is not set
926# CONFIG_SCSI_DMX3191D is not set
927# CONFIG_SCSI_DTC3280 is not set
928# CONFIG_SCSI_EATA is not set
929# CONFIG_SCSI_FUTURE_DOMAIN is not set
930# CONFIG_SCSI_GDTH is not set
931# CONFIG_SCSI_GENERIC_NCR5380 is not set
932# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
933# CONFIG_SCSI_IPS is not set
934# CONFIG_SCSI_INITIO is not set
935# CONFIG_SCSI_INIA100 is not set
936# CONFIG_SCSI_MVSAS is not set
937# CONFIG_SCSI_NCR53C406A is not set
938# CONFIG_SCSI_STEX is not set
939# CONFIG_SCSI_SYM53C8XX_2 is not set
940# CONFIG_SCSI_IPR is not set
941# CONFIG_SCSI_PAS16 is not set
942# CONFIG_SCSI_QLOGIC_FAS is not set
943# CONFIG_SCSI_QLOGIC_1280 is not set
944# CONFIG_SCSI_QLA_FC is not set
945# CONFIG_SCSI_QLA_ISCSI is not set
946# CONFIG_SCSI_LPFC is not set
947# CONFIG_SCSI_SYM53C416 is not set
948# CONFIG_SCSI_DC395x is not set
949# CONFIG_SCSI_DC390T is not set
950# CONFIG_SCSI_T128 is not set
951# CONFIG_SCSI_U14_34F is not set
952# CONFIG_SCSI_ULTRASTOR is not set
953# CONFIG_SCSI_NSP32 is not set
954# CONFIG_SCSI_DEBUG is not set
955# CONFIG_SCSI_SRP is not set
956# CONFIG_SCSI_DH is not set
957CONFIG_ATA=y
958# CONFIG_ATA_NONSTANDARD is not set
959CONFIG_ATA_ACPI=y
960# CONFIG_SATA_PMP is not set
961CONFIG_SATA_AHCI=y
962CONFIG_SATA_SIL24=y
963CONFIG_ATA_SFF=y
964# CONFIG_SATA_SVW is not set
965CONFIG_ATA_PIIX=y
966# CONFIG_SATA_MV is not set
967# CONFIG_SATA_NV is not set
968# CONFIG_PDC_ADMA is not set
969# CONFIG_SATA_QSTOR is not set
970# CONFIG_SATA_PROMISE is not set
971# CONFIG_SATA_SX4 is not set
972CONFIG_SATA_SIL=y
973# CONFIG_SATA_SIS is not set
974# CONFIG_SATA_ULI is not set
975# CONFIG_SATA_VIA is not set
976# CONFIG_SATA_VITESSE is not set
977# CONFIG_SATA_INIC162X is not set
978# CONFIG_PATA_ACPI is not set
979# CONFIG_PATA_ALI is not set
980# CONFIG_PATA_AMD is not set
981# CONFIG_PATA_ARTOP is not set
982# CONFIG_PATA_ATIIXP is not set
983# CONFIG_PATA_CMD640_PCI is not set
984# CONFIG_PATA_CMD64X is not set
985# CONFIG_PATA_CS5520 is not set
986# CONFIG_PATA_CS5530 is not set
987# CONFIG_PATA_CS5535 is not set
988# CONFIG_PATA_CS5536 is not set
989# CONFIG_PATA_CYPRESS is not set
990# CONFIG_PATA_EFAR is not set
991CONFIG_ATA_GENERIC=y
992# CONFIG_PATA_HPT366 is not set
993# CONFIG_PATA_HPT37X is not set
994# CONFIG_PATA_HPT3X2N is not set
995# CONFIG_PATA_HPT3X3 is not set
996# CONFIG_PATA_ISAPNP is not set
997# CONFIG_PATA_IT821X is not set
998# CONFIG_PATA_IT8213 is not set
999# CONFIG_PATA_JMICRON is not set
1000# CONFIG_PATA_LEGACY is not set
1001# CONFIG_PATA_TRIFLEX is not set
1002# CONFIG_PATA_MARVELL is not set
1003CONFIG_PATA_MPIIX=y
1004# CONFIG_PATA_OLDPIIX is not set
1005# CONFIG_PATA_NETCELL is not set
1006# CONFIG_PATA_NINJA32 is not set
1007# CONFIG_PATA_NS87410 is not set
1008# CONFIG_PATA_NS87415 is not set
1009# CONFIG_PATA_OPTI is not set
1010# CONFIG_PATA_OPTIDMA is not set
1011# CONFIG_PATA_PDC_OLD is not set
1012# CONFIG_PATA_QDI is not set
1013# CONFIG_PATA_RADISYS is not set
1014# CONFIG_PATA_RZ1000 is not set
1015# CONFIG_PATA_SC1200 is not set
1016# CONFIG_PATA_SERVERWORKS is not set
1017# CONFIG_PATA_PDC2027X is not set
1018# CONFIG_PATA_SIL680 is not set
1019# CONFIG_PATA_SIS is not set
1020# CONFIG_PATA_VIA is not set
1021# CONFIG_PATA_WINBOND is not set
1022# CONFIG_PATA_WINBOND_VLB is not set
1023CONFIG_PATA_SCH=y
1024CONFIG_MD=y
1025# CONFIG_BLK_DEV_MD is not set
1026CONFIG_BLK_DEV_DM=m
1027CONFIG_DM_DEBUG=y
1028# CONFIG_DM_CRYPT is not set
1029CONFIG_DM_SNAPSHOT=m
1030CONFIG_DM_MIRROR=m
1031CONFIG_DM_ZERO=m
1032CONFIG_DM_MULTIPATH=m
1033CONFIG_DM_DELAY=m
1034# CONFIG_DM_UEVENT is not set
1035CONFIG_FUSION=y
1036CONFIG_FUSION_SPI=y
1037CONFIG_FUSION_FC=m
1038CONFIG_FUSION_SAS=m
1039CONFIG_FUSION_MAX_SGE=40
1040CONFIG_FUSION_CTL=m
1041CONFIG_FUSION_LAN=m
1042CONFIG_FUSION_LOGGING=y
1043
1044#
1045# IEEE 1394 (FireWire) support
1046#
1047
1048#
1049# Enable only one of the two stacks, unless you know what you are doing
1050#
1051# CONFIG_FIREWIRE is not set
1052# CONFIG_IEEE1394 is not set
1053# CONFIG_I2O is not set
1054# CONFIG_MACINTOSH_DRIVERS is not set
1055CONFIG_NETDEVICES=y
1056# CONFIG_DUMMY is not set
1057# CONFIG_BONDING is not set
1058CONFIG_MACVLAN=m
1059# CONFIG_EQUALIZER is not set
1060CONFIG_TUN=y
1061# CONFIG_VETH is not set
1062# CONFIG_NET_SB1000 is not set
1063# CONFIG_ARCNET is not set
1064CONFIG_PHYLIB=m
1065
1066#
1067# MII PHY device drivers
1068#
1069CONFIG_MARVELL_PHY=m
1070CONFIG_DAVICOM_PHY=m
1071CONFIG_QSEMI_PHY=m
1072CONFIG_LXT_PHY=m
1073CONFIG_CICADA_PHY=m
1074CONFIG_VITESSE_PHY=m
1075CONFIG_SMSC_PHY=m
1076CONFIG_BROADCOM_PHY=m
1077CONFIG_ICPLUS_PHY=m
1078CONFIG_REALTEK_PHY=m
1079CONFIG_MDIO_BITBANG=m
1080CONFIG_NET_ETHERNET=y
1081CONFIG_MII=y
1082# CONFIG_NATIONAL_PHY is not set
1083# CONFIG_STE10XP is not set
1084# CONFIG_LSI_ET1011C_PHY is not set
1085# CONFIG_HAPPYMEAL is not set
1086# CONFIG_SUNGEM is not set
1087# CONFIG_CASSINI is not set
1088# CONFIG_NET_VENDOR_3COM is not set
1089# CONFIG_LANCE is not set
1090# CONFIG_NET_VENDOR_SMC is not set
1091# CONFIG_NET_VENDOR_RACAL is not set
1092# CONFIG_NET_TULIP is not set
1093# CONFIG_AT1700 is not set
1094# CONFIG_DEPCA is not set
1095# CONFIG_HP100 is not set
1096# CONFIG_NET_ISA is not set
1097# CONFIG_IBM_NEW_EMAC_ZMII is not set
1098# CONFIG_IBM_NEW_EMAC_RGMII is not set
1099# CONFIG_IBM_NEW_EMAC_TAH is not set
1100# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1101# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1102# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1103# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1104CONFIG_NET_PCI=y
1105CONFIG_PCNET32=m
1106# CONFIG_AMD8111_ETH is not set
1107# CONFIG_ADAPTEC_STARFIRE is not set
1108# CONFIG_AC3200 is not set
1109# CONFIG_APRICOT is not set
1110# CONFIG_B44 is not set
1111# CONFIG_FORCEDETH is not set
1112# CONFIG_CS89x0 is not set
1113# CONFIG_EEPRO100 is not set
1114CONFIG_E100=y
1115# CONFIG_FEALNX is not set
1116# CONFIG_NATSEMI is not set
1117# CONFIG_NE2K_PCI is not set
1118CONFIG_8139CP=m
1119CONFIG_8139TOO=m
1120CONFIG_8139TOO_PIO=y
1121# CONFIG_8139TOO_TUNE_TWISTER is not set
1122# CONFIG_8139TOO_8129 is not set
1123# CONFIG_8139_OLD_RX_RESET is not set
1124# CONFIG_R6040 is not set
1125CONFIG_SIS900=m
1126# CONFIG_EPIC100 is not set
1127# CONFIG_SMSC9420 is not set
1128# CONFIG_SUNDANCE is not set
1129# CONFIG_TLAN is not set
1130# CONFIG_VIA_RHINE is not set
1131# CONFIG_SC92031 is not set
1132CONFIG_ATL2=m
1133CONFIG_NETDEV_1000=y
1134# CONFIG_ACENIC is not set
1135# CONFIG_DL2K is not set
1136CONFIG_E1000=y
1137CONFIG_E1000E=y
1138# CONFIG_IP1000 is not set
1139CONFIG_IGB=y
1140# CONFIG_IGB_LRO is not set
1141# CONFIG_NS83820 is not set
1142# CONFIG_HAMACHI is not set
1143# CONFIG_YELLOWFIN is not set
1144CONFIG_R8169=y
1145CONFIG_SIS190=m
1146# CONFIG_SKGE is not set
1147CONFIG_SKY2=m
1148# CONFIG_SKY2_DEBUG is not set
1149# CONFIG_VIA_VELOCITY is not set
1150CONFIG_TIGON3=m
1151CONFIG_BNX2=m
1152# CONFIG_QLA3XXX is not set
1153CONFIG_ATL1=m
1154CONFIG_ATL1E=y
1155# CONFIG_JME is not set
1156CONFIG_NETDEV_10000=y
1157# CONFIG_CHELSIO_T1 is not set
1158# CONFIG_CHELSIO_T3 is not set
1159# CONFIG_ENIC is not set
1160CONFIG_IXGBE=m
1161CONFIG_IXGB=m
1162# CONFIG_S2IO is not set
1163# CONFIG_MYRI10GE is not set
1164# CONFIG_NETXEN_NIC is not set
1165# CONFIG_NIU is not set
1166# CONFIG_MLX4_EN is not set
1167# CONFIG_MLX4_CORE is not set
1168# CONFIG_TEHUTI is not set
1169CONFIG_BNX2X=m
1170# CONFIG_QLGE is not set
1171# CONFIG_SFC is not set
1172# CONFIG_TR is not set
1173
1174#
1175# Wireless LAN
1176#
1177CONFIG_WLAN_PRE80211=y
1178# CONFIG_STRIP is not set
1179# CONFIG_ARLAN is not set
1180# CONFIG_WAVELAN is not set
1181CONFIG_WLAN_80211=y
1182CONFIG_IPW2100=m
1183# CONFIG_IPW2100_MONITOR is not set
1184# CONFIG_IPW2100_DEBUG is not set
1185CONFIG_IPW2200=m
1186# CONFIG_IPW2200_MONITOR is not set
1187CONFIG_IPW2200_QOS=y
1188# CONFIG_IPW2200_DEBUG is not set
1189# CONFIG_LIBIPW_DEBUG is not set
1190CONFIG_LIBERTAS=y
1191# CONFIG_LIBERTAS_THINFIRM is not set
1192CONFIG_LIBERTAS_USB=m
1193CONFIG_LIBERTAS_SDIO=y
1194CONFIG_LIBERTAS_SPI=y
1195# CONFIG_LIBERTAS_DEBUG is not set
1196# CONFIG_AIRO is not set
1197# CONFIG_HERMES is not set
1198# CONFIG_ATMEL is not set
1199# CONFIG_PRISM54 is not set
1200CONFIG_USB_ZD1201=m
1201CONFIG_USB_NET_RNDIS_WLAN=m
1202CONFIG_RTL8180=m
1203CONFIG_RTL8187=m
1204# CONFIG_ADM8211 is not set
1205# CONFIG_MAC80211_HWSIM is not set
1206CONFIG_P54_COMMON=m
1207CONFIG_P54_USB=m
1208CONFIG_P54_PCI=m
1209CONFIG_ATH5K=y
1210CONFIG_ATH9K=m
1211# CONFIG_ATH9K_DEBUG is not set
1212CONFIG_IWLWIFI=m
1213CONFIG_IWLCORE=m
1214# CONFIG_IWLWIFI_LEDS is not set
1215CONFIG_IWLWIFI_RFKILL=y
1216# CONFIG_IWLWIFI_DEBUG is not set
1217CONFIG_IWLAGN=m
1218# CONFIG_IWLAGN_SPECTRUM_MEASUREMENT is not set
1219# CONFIG_IWLAGN_LEDS is not set
1220CONFIG_IWL4965=y
1221CONFIG_IWL5000=y
1222CONFIG_IWL3945=m
1223CONFIG_IWL3945_RFKILL=y
1224# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
1225# CONFIG_IWL3945_LEDS is not set
1226# CONFIG_IWL3945_DEBUG is not set
1227# CONFIG_HOSTAP is not set
1228# CONFIG_B43 is not set
1229CONFIG_B43_PCI_AUTOSELECT=y
1230CONFIG_B43_PCICORE_AUTOSELECT=y
1231CONFIG_B43_LEDS=y
1232CONFIG_B43_RFKILL=y
1233# CONFIG_B43_DEBUG is not set
1234# CONFIG_B43LEGACY is not set
1235# CONFIG_ZD1211RW is not set
1236CONFIG_RT2X00=m
1237CONFIG_RT2400PCI=m
1238CONFIG_RT2500PCI=m
1239CONFIG_RT61PCI=m
1240CONFIG_RT2500USB=m
1241CONFIG_RT73USB=m
1242CONFIG_RT2X00_LIB_PCI=m
1243CONFIG_RT2X00_LIB_USB=m
1244CONFIG_RT2X00_LIB=m
1245CONFIG_RT2X00_LIB_FIRMWARE=y
1246CONFIG_RT2X00_LIB_CRYPTO=y
1247CONFIG_RT2X00_LIB_RFKILL=y
1248CONFIG_RT2X00_LIB_LEDS=y
1249# CONFIG_RT2X00_LIB_DEBUGFS is not set
1250# CONFIG_RT2X00_DEBUG is not set
1251
1252#
1253# WiMAX Wireless Broadband devices
1254#
1255CONFIG_WIMAX_I2400M_USB=m
1256CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
1257# CONFIG_WIMAX_I2400M_SDIO is not set
1258#
1259#
1260
1261#
1262# USB Network Adapters
1263#
1264CONFIG_USB_CATC=m
1265CONFIG_USB_KAWETH=m
1266CONFIG_USB_PEGASUS=m
1267CONFIG_USB_RTL8150=m
1268CONFIG_USB_USBNET=m
1269CONFIG_USB_NET_AX8817X=m
1270CONFIG_USB_NET_CDCETHER=m
1271CONFIG_USB_NET_CDC_EEM=m
1272CONFIG_USB_NET_DM9601=m
1273CONFIG_USB_NET_SMSC95XX=m
1274CONFIG_USB_NET_GL620A=m
1275CONFIG_USB_NET_NET1080=m
1276CONFIG_USB_NET_PLUSB=m
1277CONFIG_USB_NET_MCS7830=m
1278CONFIG_USB_NET_RNDIS_HOST=m
1279CONFIG_USB_NET_CDC_SUBSET=m
1280CONFIG_USB_ALI_M5632=y
1281CONFIG_USB_AN2720=y
1282CONFIG_USB_BELKIN=y
1283CONFIG_USB_ARMLINUX=y
1284CONFIG_USB_EPSON2888=y
1285CONFIG_USB_KC2190=y
1286CONFIG_USB_NET_ZAURUS=m
1287CONFIG_USB_HSO=n
1288# CONFIG_WAN is not set
1289# CONFIG_FDDI is not set
1290# CONFIG_HIPPI is not set
1291CONFIG_PPP=m
1292CONFIG_PPP_MULTILINK=y
1293CONFIG_PPP_FILTER=y
1294CONFIG_PPP_ASYNC=m
1295CONFIG_PPP_SYNC_TTY=m
1296CONFIG_PPP_DEFLATE=m
1297CONFIG_PPP_BSDCOMP=m
1298CONFIG_PPP_MPPE=m
1299CONFIG_PPPOE=m
1300CONFIG_PPPOL2TP=m
1301# CONFIG_SLIP is not set
1302CONFIG_SLHC=m
1303# CONFIG_NET_FC is not set
1304# CONFIG_NETCONSOLE is not set
1305# CONFIG_NETPOLL is not set
1306# CONFIG_NET_POLL_CONTROLLER is not set
1307# CONFIG_ISDN is not set
1308# CONFIG_PHONE is not set
1309
1310#
1311# Input device support
1312#
1313CONFIG_INPUT=y
1314CONFIG_INPUT_FF_MEMLESS=y
1315CONFIG_INPUT_POLLDEV=m
1316
1317#
1318# Userland interfaces
1319#
1320CONFIG_INPUT_MOUSEDEV=y
1321# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
1322CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1323CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1324CONFIG_INPUT_JOYDEV=m
1325CONFIG_INPUT_EVDEV=y
1326# CONFIG_INPUT_EVBUG is not set
1327
1328#
1329# Input Device Drivers
1330#
1331CONFIG_INPUT_KEYBOARD=y
1332CONFIG_KEYBOARD_ATKBD=y
1333# CONFIG_KEYBOARD_SUNKBD is not set
1334# CONFIG_KEYBOARD_LKKBD is not set
1335# CONFIG_KEYBOARD_XTKBD is not set
1336# CONFIG_KEYBOARD_NEWTON is not set
1337# CONFIG_KEYBOARD_STOWAWAY is not set
1338CONFIG_INPUT_MOUSE=y
1339CONFIG_MOUSE_PS2=y
1340CONFIG_MOUSE_PS2_ALPS=y
1341CONFIG_MOUSE_PS2_LOGIPS2PP=y
1342CONFIG_MOUSE_PS2_SYNAPTICS=y
1343CONFIG_MOUSE_PS2_LIFEBOOK=y
1344CONFIG_MOUSE_PS2_TRACKPOINT=y
1345# CONFIG_MOUSE_PS2_ELANTECH is not set
1346CONFIG_MOUSE_PS2_TOUCHKIT=y
1347CONFIG_MOUSE_SERIAL=m
1348# CONFIG_MOUSE_APPLETOUCH is not set
1349# CONFIG_MOUSE_BCM5974 is not set
1350# CONFIG_MOUSE_INPORT is not set
1351# CONFIG_MOUSE_LOGIBM is not set
1352# CONFIG_MOUSE_PC110PAD is not set
1353CONFIG_MOUSE_VSXXXAA=m
1354CONFIG_INPUT_JOYSTICK=y
1355# CONFIG_JOYSTICK_ANALOG is not set
1356# CONFIG_JOYSTICK_A3D is not set
1357# CONFIG_JOYSTICK_ADI is not set
1358# CONFIG_JOYSTICK_COBRA is not set
1359# CONFIG_JOYSTICK_GF2K is not set
1360# CONFIG_JOYSTICK_GRIP is not set
1361# CONFIG_JOYSTICK_GRIP_MP is not set
1362# CONFIG_JOYSTICK_GUILLEMOT is not set
1363# CONFIG_JOYSTICK_INTERACT is not set
1364# CONFIG_JOYSTICK_SIDEWINDER is not set
1365# CONFIG_JOYSTICK_TMDC is not set
1366# CONFIG_JOYSTICK_IFORCE is not set
1367# CONFIG_JOYSTICK_WARRIOR is not set
1368# CONFIG_JOYSTICK_MAGELLAN is not set
1369# CONFIG_JOYSTICK_SPACEORB is not set
1370# CONFIG_JOYSTICK_SPACEBALL is not set
1371# CONFIG_JOYSTICK_STINGER is not set
1372# CONFIG_JOYSTICK_TWIDJOY is not set
1373# CONFIG_JOYSTICK_ZHENHUA is not set
1374# CONFIG_JOYSTICK_JOYDUMP is not set
1375# CONFIG_JOYSTICK_XPAD is not set
1376# CONFIG_INPUT_TABLET is not set
1377CONFIG_INPUT_TOUCHSCREEN=y
1378CONFIG_TOUCHSCREEN_FUJITSU=m
1379CONFIG_TOUCHSCREEN_GUNZE=m
1380CONFIG_TOUCHSCREEN_ELO=m
1381# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1382CONFIG_TOUCHSCREEN_MTOUCH=m
1383CONFIG_TOUCHSCREEN_INEXIO=m
1384CONFIG_TOUCHSCREEN_MK712=m
1385CONFIG_TOUCHSCREEN_HTCPEN=m
1386CONFIG_TOUCHSCREEN_PENMOUNT=m
1387CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
1388CONFIG_TOUCHSCREEN_TOUCHWIN=m
1389CONFIG_TOUCHSCREEN_WM97XX=m
1390CONFIG_TOUCHSCREEN_WM9705=y
1391CONFIG_TOUCHSCREEN_WM9712=y
1392CONFIG_TOUCHSCREEN_WM9713=y
1393CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
1394CONFIG_TOUCHSCREEN_USB_EGALAX=y
1395CONFIG_TOUCHSCREEN_USB_PANJIT=y
1396CONFIG_TOUCHSCREEN_USB_3M=y
1397CONFIG_TOUCHSCREEN_USB_ITM=y
1398CONFIG_TOUCHSCREEN_USB_ETURBO=y
1399CONFIG_TOUCHSCREEN_USB_GUNZE=y
1400CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
1401CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
1402CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
1403CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
1404CONFIG_TOUCHSCREEN_USB_GOTOP=y
1405CONFIG_TOUCHSCREEN_TOUCHIT213=m
1406# CONFIG_TOUCHSCREEN_TSC2007 is not set
1407CONFIG_INPUT_MISC=y
1408# CONFIG_INPUT_PCSPKR is not set
1409# CONFIG_INPUT_APANEL is not set
1410CONFIG_INPUT_WISTRON_BTNS=m
1411# CONFIG_INPUT_ATLAS_BTNS is not set
1412# CONFIG_INPUT_ATI_REMOTE is not set
1413# CONFIG_INPUT_ATI_REMOTE2 is not set
1414CONFIG_INPUT_KEYSPAN_REMOTE=m
1415CONFIG_INPUT_POWERMATE=m
1416CONFIG_INPUT_YEALINK=m
1417# CONFIG_INPUT_CM109 is not set
1418# CONFIG_INPUT_UINPUT is not set
1419
1420#
1421# Hardware I/O ports
1422#
1423CONFIG_SERIO=y
1424CONFIG_SERIO_I8042=y
1425CONFIG_SERIO_SERPORT=y
1426# CONFIG_SERIO_CT82C710 is not set
1427# CONFIG_SERIO_PCIPS2 is not set
1428CONFIG_SERIO_LIBPS2=y
1429CONFIG_SERIO_RAW=m
1430# CONFIG_GAMEPORT is not set
1431
1432#
1433# Character devices
1434#
1435CONFIG_VT=y
1436CONFIG_CONSOLE_TRANSLATIONS=y
1437CONFIG_VT_CONSOLE=y
1438CONFIG_HW_CONSOLE=y
1439CONFIG_VT_HW_CONSOLE_BINDING=y
1440# CONFIG_DEVKMEM is not set
1441# CONFIG_SERIAL_NONSTANDARD is not set
1442# CONFIG_NOZOMI is not set
1443
1444#
1445# Serial drivers
1446#
1447CONFIG_SERIAL_8250=y
1448CONFIG_SERIAL_8250_CONSOLE=y
1449CONFIG_FIX_EARLYCON_MEM=y
1450CONFIG_SERIAL_8250_PCI=y
1451CONFIG_SERIAL_8250_PNP=y
1452CONFIG_SERIAL_8250_NR_UARTS=4
1453CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1454# CONFIG_SERIAL_8250_EXTENDED is not set
1455
1456#
1457# Non-8250 serial port support
1458#
1459# CONFIG_SERIAL_MAX3110 is not set
1460# CONFIG_MRST_MAX3110 is not set
1461# CONFIG_MRST_MAX3110_IRQ is not set
1462CONFIG_SERIAL_CORE=y
1463CONFIG_SERIAL_CORE_CONSOLE=y
1464# CONFIG_SERIAL_JSM is not set
1465CONFIG_UNIX98_PTYS=y
1466# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1467# CONFIG_LEGACY_PTYS is not set
1468# CONFIG_IPMI_HANDLER is not set
1469CONFIG_HW_RANDOM=m
1470# CONFIG_HW_RANDOM_INTEL is not set
1471# CONFIG_HW_RANDOM_AMD is not set
1472# CONFIG_HW_RANDOM_GEODE is not set
1473# CONFIG_HW_RANDOM_VIA is not set
1474CONFIG_NVRAM=m
1475# CONFIG_DTLK is not set
1476# CONFIG_R3964 is not set
1477# CONFIG_APPLICOM is not set
1478# CONFIG_SONYPI is not set
1479# CONFIG_MWAVE is not set
1480# CONFIG_PC8736x_GPIO is not set
1481# CONFIG_NSC_GPIO is not set
1482# CONFIG_CS5535_GPIO is not set
1483# CONFIG_RAW_DRIVER is not set
1484CONFIG_HPET=y
1485# CONFIG_HPET_MMAP is not set
1486# CONFIG_HANGCHECK_TIMER is not set
1487# CONFIG_TCG_TPM is not set
1488# CONFIG_TELCLOCK is not set
1489CONFIG_DEVPORT=y
1490CONFIG_I2C=y
1491CONFIG_I2C_BOARDINFO=y
1492# CONFIG_I2C_CHARDEV is not set
1493# CONFIG_I2C_HELPER_AUTO is not set
1494
1495#
1496# I2C Algorithms
1497#
1498# CONFIG_I2C_ALGOBIT is not set
1499# CONFIG_I2C_ALGOPCF is not set
1500# CONFIG_I2C_ALGOPCA is not set
1501
1502#
1503# I2C Hardware Bus support
1504#
1505
1506#
1507# PC SMBus host controller drivers
1508#
1509# CONFIG_I2C_ALI1535 is not set
1510# CONFIG_I2C_ALI1563 is not set
1511# CONFIG_I2C_ALI15X3 is not set
1512# CONFIG_I2C_AMD756 is not set
1513# CONFIG_I2C_AMD8111 is not set
1514# CONFIG_I2C_I801 is not set
1515# CONFIG_I2C_ISCH is not set
1516# CONFIG_I2C_PIIX4 is not set
1517# CONFIG_I2C_NFORCE2 is not set
1518# CONFIG_I2C_SIS5595 is not set
1519# CONFIG_I2C_SIS630 is not set
1520# CONFIG_I2C_SIS96X is not set
1521# CONFIG_I2C_VIA is not set
1522# CONFIG_I2C_VIAPRO is not set
1523
1524#
1525# I2C system bus drivers (mostly embedded / system-on-chip)
1526#
1527# CONFIG_I2C_OCORES is not set
1528# CONFIG_I2C_SIMTEC is not set
1529
1530#
1531# External I2C/SMBus adapter drivers
1532#
1533# CONFIG_I2C_PARPORT_LIGHT is not set
1534# CONFIG_I2C_TAOS_EVM is not set
1535# CONFIG_I2C_TINY_USB is not set
1536
1537#
1538# Graphics adapter I2C/DDC channel drivers
1539#
1540# CONFIG_I2C_VOODOO3 is not set
1541
1542#
1543# Other I2C/SMBus bus drivers
1544#
1545# CONFIG_I2C_PCA_ISA is not set
1546# CONFIG_I2C_PCA_PLATFORM is not set
1547# CONFIG_I2C_STUB is not set
1548# CONFIG_SCx200_ACB is not set
1549
1550#
1551# Miscellaneous I2C Chip support
1552#
1553# CONFIG_DS1682 is not set
1554# CONFIG_AT24 is not set
1555# CONFIG_SENSORS_EEPROM is not set
1556# CONFIG_EEPROM_AT24 is not set
1557# CONFIG_EEPROM_LEGACY is not set
1558# CONFIG_SENSORS_PCF8574 is not set
1559# CONFIG_PCF8575 is not set
1560# CONFIG_SENSORS_PCA9539 is not set
1561# CONFIG_SENSORS_PCF8591 is not set
1562# CONFIG_SENSORS_MAX6875 is not set
1563# CONFIG_SENSORS_TSL2550 is not set
1564# CONFIG_I2C_DEBUG_CORE is not set
1565# CONFIG_I2C_DEBUG_ALGO is not set
1566# CONFIG_I2C_DEBUG_BUS is not set
1567# CONFIG_I2C_DEBUG_CHIP is not set
1568# CONFIG_SPI is not set
1569# CONFIG_SPI_MRST_SLAVE is not set
1570# CONFIG_SPI_MRST_SLAVE_DMA is not set
1571CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1572# CONFIG_GPIOLIB is not set
1573# CONFIG_W1 is not set
1574CONFIG_POWER_SUPPLY=y
1575# CONFIG_POWER_SUPPLY_DEBUG is not set
1576# CONFIG_PDA_POWER is not set
1577# CONFIG_BATTERY_DS2760 is not set
1578# CONFIG_BATTERY_BQ27x00 is not set
1579CONFIG_HWMON=y
1580# CONFIG_HWMON_VID is not set
1581# CONFIG_SENSORS_ABITUGURU is not set
1582# CONFIG_SENSORS_ABITUGURU3 is not set
1583# CONFIG_SENSORS_AD7414 is not set
1584# CONFIG_SENSORS_AD7418 is not set
1585# CONFIG_SENSORS_ADM1021 is not set
1586# CONFIG_SENSORS_ADM1025 is not set
1587# CONFIG_SENSORS_ADM1026 is not set
1588# CONFIG_SENSORS_ADM1029 is not set
1589# CONFIG_SENSORS_ADM1031 is not set
1590# CONFIG_SENSORS_ADM9240 is not set
1591# CONFIG_SENSORS_ADT7462 is not set
1592# CONFIG_SENSORS_ADT7470 is not set
1593# CONFIG_SENSORS_ADT7473 is not set
1594# CONFIG_SENSORS_K8TEMP is not set
1595# CONFIG_SENSORS_ASB100 is not set
1596# CONFIG_SENSORS_ATXP1 is not set
1597# CONFIG_SENSORS_DS1621 is not set
1598# CONFIG_SENSORS_I5K_AMB is not set
1599# CONFIG_SENSORS_F71805F is not set
1600# CONFIG_SENSORS_F71882FG is not set
1601# CONFIG_SENSORS_F75375S is not set
1602# CONFIG_SENSORS_FSCHER is not set
1603# CONFIG_SENSORS_FSCPOS is not set
1604# CONFIG_SENSORS_FSCHMD is not set
1605# CONFIG_SENSORS_GL518SM is not set
1606# CONFIG_SENSORS_GL520SM is not set
1607# CONFIG_SENSORS_CORETEMP is not set
1608# CONFIG_SENSORS_IT87 is not set
1609# CONFIG_SENSORS_LM63 is not set
1610# CONFIG_SENSORS_LM75 is not set
1611# CONFIG_SENSORS_LM77 is not set
1612# CONFIG_SENSORS_LM78 is not set
1613# CONFIG_SENSORS_LM80 is not set
1614# CONFIG_SENSORS_LM83 is not set
1615# CONFIG_SENSORS_LM85 is not set
1616# CONFIG_SENSORS_LM87 is not set
1617# CONFIG_SENSORS_LM90 is not set
1618# CONFIG_SENSORS_LM92 is not set
1619# CONFIG_SENSORS_LM93 is not set
1620# CONFIG_SENSORS_LTC4245 is not set
1621# CONFIG_SENSORS_MAX1619 is not set
1622# CONFIG_SENSORS_MAX6650 is not set
1623# CONFIG_SENSORS_PC87360 is not set
1624# CONFIG_SENSORS_PC87427 is not set
1625# CONFIG_SENSORS_SIS5595 is not set
1626# CONFIG_SENSORS_DME1737 is not set
1627# CONFIG_SENSORS_SMSC47M1 is not set
1628# CONFIG_SENSORS_SMSC47M192 is not set
1629# CONFIG_SENSORS_SMSC47B397 is not set
1630# CONFIG_SENSORS_ADS7828 is not set
1631# CONFIG_SENSORS_THMC50 is not set
1632# CONFIG_SENSORS_VIA686A is not set
1633# CONFIG_SENSORS_VT1211 is not set
1634# CONFIG_SENSORS_VT8231 is not set
1635# CONFIG_SENSORS_W83781D is not set
1636# CONFIG_SENSORS_W83791D is not set
1637# CONFIG_SENSORS_W83792D is not set
1638# CONFIG_SENSORS_W83793 is not set
1639# CONFIG_SENSORS_W83L785TS is not set
1640# CONFIG_SENSORS_W83L786NG is not set
1641# CONFIG_SENSORS_W83627HF is not set
1642# CONFIG_SENSORS_W83627EHF is not set
1643# CONFIG_SENSORS_HDAPS is not set
1644# CONFIG_SENSORS_LIS3LV02D is not set
1645# CONFIG_SENSORS_APPLESMC is not set
1646# CONFIG_HWMON_DEBUG_CHIP is not set
1647CONFIG_THERMAL=y
1648CONFIG_THERMAL_HWMON=y
1649# CONFIG_WATCHDOG is not set
1650CONFIG_SSB_POSSIBLE=y
1651
1652#
1653# Sonics Silicon Backplane
1654#
1655CONFIG_SSB=m
1656CONFIG_SSB_SPROM=y
1657CONFIG_SSB_PCIHOST_POSSIBLE=y
1658CONFIG_SSB_PCIHOST=y
1659CONFIG_SSB_B43_PCI_BRIDGE=y
1660# CONFIG_SSB_DEBUG is not set
1661CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1662CONFIG_SSB_DRIVER_PCICORE=y
1663
1664#
1665# Multifunction device drivers
1666#
1667# CONFIG_MFD_CORE is not set
1668# CONFIG_MFD_SM501 is not set
1669# CONFIG_HTC_PASIC3 is not set
1670# CONFIG_TWL4030_CORE is not set
1671# CONFIG_MFD_TMIO is not set
1672# CONFIG_PMIC_DA903X is not set
1673# CONFIG_MFD_WM8400 is not set
1674# CONFIG_MFD_WM8350_I2C is not set
1675# CONFIG_REGULATOR is not set
1676
1677#
1678# Multimedia devices
1679#
1680CONFIG_MEDIA_SUPPORT=y
1681
1682#
1683# Multimedia core support
1684#
1685CONFIG_VIDEO_DEV=y
1686CONFIG_VIDEO_V4L2_COMMON=y
1687# CONFIG_VIDEO_ALLOW_V4L1 is not set
1688CONFIG_VIDEO_V4L1_COMPAT=y
1689CONFIG_DVB_CORE=m
1690CONFIG_VIDEO_MEDIA=m
1691
1692#
1693# Multimedia drivers
1694#
1695CONFIG_MEDIA_ATTACH=y
1696CONFIG_MEDIA_TUNER=m
1697# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1698CONFIG_MEDIA_TUNER_SIMPLE=m
1699CONFIG_MEDIA_TUNER_TDA8290=m
1700CONFIG_MEDIA_TUNER_TDA9887=m
1701CONFIG_MEDIA_TUNER_TEA5761=m
1702CONFIG_MEDIA_TUNER_TEA5767=m
1703CONFIG_MEDIA_TUNER_MT20XX=m
1704CONFIG_MEDIA_TUNER_XC2028=m
1705CONFIG_MEDIA_TUNER_XC5000=m
1706CONFIG_VIDEO_V4L2=y
1707CONFIG_VIDEOBUF_GEN=m
1708CONFIG_VIDEOBUF_VMALLOC=m
1709CONFIG_VIDEO_CAPTURE_DRIVERS=y
1710# CONFIG_VIDEO_ADV_DEBUG is not set
1711# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1712CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1713# CONFIG_VIDEO_VIVI is not set
1714# CONFIG_VIDEO_BT848 is not set
1715# CONFIG_VIDEO_SAA5246A is not set
1716# CONFIG_VIDEO_SAA5249 is not set
1717# CONFIG_VIDEO_SAA7134 is not set
1718# CONFIG_VIDEO_HEXIUM_ORION is not set
1719# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1720# CONFIG_VIDEO_CX88 is not set
1721# CONFIG_VIDEO_CX23885 is not set
1722# CONFIG_VIDEO_AU0828 is not set
1723# CONFIG_VIDEO_IVTV is not set
1724# CONFIG_VIDEO_CX18 is not set
1725# CONFIG_VIDEO_CAFE_CCIC is not set
1726# CONFIG_SOC_CAMERA is not set
1727CONFIG_V4L_USB_DRIVERS=y
1728CONFIG_USB_VIDEO_CLASS=m
1729CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
1730CONFIG_USB_GSPCA=m
1731# CONFIG_USB_M5602 is not set
1732# CONFIG_USB_STV06XX is not set
1733# CONFIG_USB_GSPCA_CONEX is not set
1734# CONFIG_USB_GSPCA_ETOMS is not set
1735# CONFIG_USB_GSPCA_FINEPIX is not set
1736# CONFIG_USB_GSPCA_MARS is not set
1737# CONFIG_USB_GSPCA_OV519 is not set
1738# CONFIG_USB_GSPCA_OV534 is not set
1739# CONFIG_USB_GSPCA_PAC207 is not set
1740# CONFIG_USB_GSPCA_PAC7311 is not set
1741# CONFIG_USB_GSPCA_SONIXB is not set
1742# CONFIG_USB_GSPCA_SONIXJ is not set
1743# CONFIG_USB_GSPCA_SPCA500 is not set
1744# CONFIG_USB_GSPCA_SPCA501 is not set
1745# CONFIG_USB_GSPCA_SPCA505 is not set
1746# CONFIG_USB_GSPCA_SPCA506 is not set
1747# CONFIG_USB_GSPCA_SPCA508 is not set
1748# CONFIG_USB_GSPCA_SPCA561 is not set
1749# CONFIG_USB_GSPCA_STK014 is not set
1750# CONFIG_USB_GSPCA_SUNPLUS is not set
1751# CONFIG_USB_GSPCA_T613 is not set
1752# CONFIG_USB_GSPCA_TV8532 is not set
1753# CONFIG_USB_GSPCA_VC032X is not set
1754# CONFIG_USB_GSPCA_ZC3XX is not set
1755# CONFIG_VIDEO_PVRUSB2 is not set
1756# CONFIG_VIDEO_EM28XX is not set
1757# CONFIG_VIDEO_USBVISION is not set
1758CONFIG_USB_ET61X251=m
1759CONFIG_USB_SN9C102=m
1760CONFIG_USB_ZC0301=m
1761CONFIG_USB_ZR364XX=m
1762CONFIG_USB_STKWEBCAM=m
1763CONFIG_USB_S2255=m
1764# CONFIG_RADIO_ADAPTERS is not set
1765# CONFIG_DVB_DYNAMIC_MINORS is not set
1766# CONFIG_DVB_CAPTURE_DRIVERS is not set
1767# CONFIG_DAB is not set
1768
1769#
1770# Graphics support
1771#
1772CONFIG_AGP=y
1773# CONFIG_AGP_ALI is not set
1774# CONFIG_AGP_ATI is not set
1775# CONFIG_AGP_AMD is not set
1776# CONFIG_AGP_AMD64 is not set
1777CONFIG_AGP_INTEL=y
1778# CONFIG_AGP_NVIDIA is not set
1779# CONFIG_AGP_SIS is not set
1780# CONFIG_AGP_SWORKS is not set
1781# CONFIG_AGP_VIA is not set
1782# CONFIG_AGP_EFFICEON is not set
1783CONFIG_DRM=y
1784# CONFIG_DRM_TDFX is not set
1785# CONFIG_DRM_R128 is not set
1786# CONFIG_DRM_RADEON is not set
1787CONFIG_DRM_I810=y
1788# CONFIG_DRM_I830 is not set
1789CONFIG_DRM_I915=y
1790CONFIG_DRM_I915_KMS=y
1791# CONFIG_DRM_MGA is not set
1792# CONFIG_DRM_SIS is not set
1793# CONFIG_DRM_VIA is not set
1794# CONFIG_DRM_SAVAGE is not set
1795# CONFIG_VGASTATE is not set
1796# CONFIG_DRM_PSB is not set
1797CONFIG_VIDEO_OUTPUT_CONTROL=y
1798CONFIG_FB=y
1799# CONFIG_FIRMWARE_EDID is not set
1800# CONFIG_FB_TRIDENT_ACCEL is not set
1801# CONFIG_FB_ARK is not set
1802# CONFIG_FB_PM3 is not set
1803# CONFIG_FB_CARMINE is not set
1804# CONFIG_FB_GEODE is not set
1805# CONFIG_FB_VIRTUAL is not set
1806# CONFIG_FB_METRONOME is not set
1807# CONFIG_FB_MB862XX is not set
1808
1809
1810CONFIG_BACKLIGHT_LCD_SUPPORT=y
1811# CONFIG_LCD_CLASS_DEVICE is not set
1812CONFIG_BACKLIGHT_CLASS_DEVICE=y
1813CONFIG_BACKLIGHT_GENERIC=y
1814CONFIG_SAMSUNG_BACKLIGHT=m
1815# CONFIG_BACKLIGHT_CORGI is not set
1816# CONFIG_BACKLIGHT_PROGEAR is not set
1817# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1818# CONFIG_BACKLIGHT_SAHARA is not set
1819
1820
1821#
1822# Frame buffer hardware drivers
1823#
1824# CONFIG_FB_TILEBLITTING is not set
1825# CONFIG_FB_FOREIGN_ENDIAN is not set
1826# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
1827# CONFIG_FB_3DFX_ACCEL is not set
1828# CONFIG_FB_CIRRUS is not set
1829# CONFIG_FB_PM2 is not set
1830# CONFIG_FB_CYBER2000 is not set
1831# CONFIG_FB_ARC is not set
1832# CONFIG_FB_ASILIANT is not set
1833# CONFIG_FB_IMSTT is not set
1834# CONFIG_FB_VGA16 is not set
1835# CONFIG_FB_VESA is not set
1836# CONFIG_FB_EFI is not set
1837# CONFIG_FB_N411 is not set
1838# CONFIG_FB_HGA is not set
1839# CONFIG_FB_S1D13XXX is not set
1840# CONFIG_FB_NVIDIA is not set
1841# CONFIG_FB_RIVA is not set
1842CONFIG_FB_I810=m
1843# CONFIG_FB_I810_GTF is not set
1844# CONFIG_FB_LE80578 is not set
1845# CONFIG_FB_CARILLO_RANCH is not set
1846# CONFIG_FB_INTEL is not set
1847# CONFIG_FB_INTEL_DEBUG is not set
1848# CONFIG_FB_INTEL_I2C is not set
1849# CONFIG_FB_MATROX is not set
1850# CONFIG_FB_RADEON is not set
1851CONFIG_FB_RADEON_I2C=y
1852# CONFIG_FB_RADEON_BACKLIGHT is not set
1853# CONFIG_FB_RADEON_DEBUG is not set
1854# CONFIG_FB_ATY128 is not set
1855# CONFIG_FB_ATY is not set
1856# CONFIG_FB_S3 is not set
1857# CONFIG_FB_SAVAGE is not set
1858# CONFIG_FB_SIS is not set
1859# CONFIG_FB_SIS_300 is not set
1860# CONFIG_FB_SIS_315 is not set
1861# CONFIG_FB_VIA is not set
1862# CONFIG_FB_NEOMAGIC is not set
1863# CONFIG_FB_KYRO is not set
1864# CONFIG_FB_3DFX is not set
1865# CONFIG_FB_VOODOO1 is not set
1866# CONFIG_FB_VT8623 is not set
1867# CONFIG_FB_CYBLA is not set
1868# CONFIG_FB_TRIDENT is not set
1869# CONFIG_FB_ARK is not set
1870# CONFIG_FB_PM3 is not set
1871# CONFIG_FB_CARMINE is not set
1872# CONFIG_FB_GEODE is not set
1873# CONFIG_FB_VIRTUAL is not set
1874# CONFIG_FB_METRONOME is not set
1875# CONFIG_FB_MB862XX is not set
1876
1877#
1878# Display device support
1879#
1880# CONFIG_DISPLAY_SUPPORT is not set
1881
1882#
1883# Console display driver support
1884#
1885CONFIG_VGA_CONSOLE=y
1886CONFIG_VGACON_SOFT_SCROLLBACK=y
1887CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1888# CONFIG_MDA_CONSOLE is not set
1889CONFIG_DUMMY_CONSOLE=y
1890CONFIG_FRAMEBUFFER_CONSOLE=y
1891CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
1892# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1893CONFIG_FONTS=y
1894CONFIG_FONT_8x16=y
1895# CONFIG_LOGO is not set
1896CONFIG_SOUND=y
1897# CONFIG_SOUND_OSS_CORE is not set
1898CONFIG_SND=y
1899CONFIG_SND_TIMER=y
1900CONFIG_SND_PCM=y
1901CONFIG_SND_HWDEP=y
1902CONFIG_SND_RAWMIDI=m
1903CONFIG_SND_SEQUENCER=y
1904CONFIG_SND_SEQ_DUMMY=y
1905# CONFIG_SND_OSSEMUL is not set
1906# CONFIG_SND_MIXER_OSS is not set
1907# CONFIG_SND_PCM_OSS is not set
1908# CONFIG_SND_SEQUENCER_OSS is not set
1909# CONFIG_SND_HRTIMER is not set
1910CONFIG_SND_DYNAMIC_MINORS=y
1911# CONFIG_SND_SUPPORT_OLD_API is not set
1912CONFIG_SND_VERBOSE_PROCFS=y
1913CONFIG_SND_VERBOSE_PRINTK=y
1914CONFIG_SND_DEBUG=y
1915# CONFIG_SND_DEBUG_VERBOSE is not set
1916CONFIG_SND_PCM_XRUN_DEBUG=y
1917CONFIG_SND_VMASTER=y
1918CONFIG_SND_AC97_CODEC=y
1919CONFIG_SND_DRIVERS=y
1920# CONFIG_SND_DUMMY is not set
1921# CONFIG_SND_VIRMIDI is not set
1922# CONFIG_SND_MTPAV is not set
1923# CONFIG_SND_SERIAL_U16550 is not set
1924# CONFIG_SND_MPU401 is not set
1925CONFIG_SND_AC97_POWER_SAVE=y
1926CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
1927# CONFIG_SND_ISA is not set
1928CONFIG_SND_PCI=y
1929# CONFIG_SND_AD1889 is not set
1930# CONFIG_SND_ALS300 is not set
1931# CONFIG_SND_ALS4000 is not set
1932# CONFIG_SND_ALI5451 is not set
1933# CONFIG_SND_ATIIXP is not set
1934# CONFIG_SND_ATIIXP_MODEM is not set
1935# CONFIG_SND_AU8810 is not set
1936# CONFIG_SND_AU8820 is not set
1937# CONFIG_SND_AU8830 is not set
1938# CONFIG_SND_AW2 is not set
1939# CONFIG_SND_AZT3328 is not set
1940# CONFIG_SND_BT87X is not set
1941# CONFIG_SND_CA0106 is not set
1942# CONFIG_SND_CMIPCI is not set
1943# CONFIG_SND_OXYGEN is not set
1944# CONFIG_SND_CS4281 is not set
1945# CONFIG_SND_CS46XX is not set
1946# CONFIG_SND_CS5530 is not set
1947# CONFIG_SND_CS5535AUDIO is not set
1948# CONFIG_SND_DARLA20 is not set
1949# CONFIG_SND_GINA20 is not set
1950# CONFIG_SND_LAYLA20 is not set
1951# CONFIG_SND_DARLA24 is not set
1952# CONFIG_SND_GINA24 is not set
1953# CONFIG_SND_LAYLA24 is not set
1954# CONFIG_SND_MONA is not set
1955# CONFIG_SND_MIA is not set
1956# CONFIG_SND_ECHO3G is not set
1957# CONFIG_SND_INDIGO is not set
1958# CONFIG_SND_INDIGOIO is not set
1959# CONFIG_SND_INDIGODJ is not set
1960# CONFIG_SND_EMU10K1 is not set
1961# CONFIG_SND_EMU10K1X is not set
1962# CONFIG_SND_ENS1370 is not set
1963CONFIG_SND_ENS1371=m
1964# CONFIG_SND_ES1938 is not set
1965# CONFIG_SND_ES1968 is not set
1966# CONFIG_SND_FM801 is not set
1967CONFIG_SND_HDA_INTEL=y
1968CONFIG_SND_HDA_HWDEP=y
1969# CONFIG_SND_HDA_RECONFIG is not set
1970# CONFIG_SND_HDA_INPUT_BEEP is not set
1971CONFIG_SND_HDA_CODEC_REALTEK=y
1972CONFIG_SND_HDA_CODEC_ANALOG=y
1973CONFIG_SND_HDA_CODEC_SIGMATEL=y
1974CONFIG_SND_HDA_CODEC_VIA=y
1975CONFIG_SND_HDA_CODEC_ATIHDMI=y
1976CONFIG_SND_HDA_CODEC_NVHDMI=y
1977CONFIG_SND_HDA_CODEC_INTELHDMI=y
1978CONFIG_SND_HDA_CODEC_CONEXANT=y
1979CONFIG_SND_HDA_CODEC_CMEDIA=y
1980CONFIG_SND_HDA_CODEC_SI3054=y
1981CONFIG_SND_HDA_GENERIC=y
1982CONFIG_SND_HDA_POWER_SAVE=y
1983CONFIG_SND_HDA_POWER_SAVE_DEFAULT=5
1984# CONFIG_SND_HDSP is not set
1985# CONFIG_SND_HDSPM is not set
1986# CONFIG_SND_HIFIER is not set
1987# CONFIG_SND_ICE1712 is not set
1988# CONFIG_SND_ICE1724 is not set
1989CONFIG_SND_INTEL8X0=y
1990# CONFIG_SND_INTEL8X0M is not set
1991# CONFIG_SND_KORG1212 is not set
1992# CONFIG_SND_MAESTRO3 is not set
1993# CONFIG_SND_MIXART is not set
1994# CONFIG_SND_NM256 is not set
1995# CONFIG_SND_PCXHR is not set
1996# CONFIG_SND_RIPTIDE is not set
1997# CONFIG_SND_RME32 is not set
1998# CONFIG_SND_RME96 is not set
1999# CONFIG_SND_RME9652 is not set
2000# CONFIG_SND_SIS7019 is not set
2001# CONFIG_SND_SONICVIBES is not set
2002# CONFIG_SND_TRIDENT is not set
2003# CONFIG_SND_VIA82XX is not set
2004# CONFIG_SND_VIA82XX_MODEM is not set
2005# CONFIG_SND_VIRTUOSO is not set
2006# CONFIG_SND_VX222 is not set
2007# CONFIG_SND_YMFPCI is not set
2008CONFIG_SND_USB=y
2009CONFIG_SND_USB_AUDIO=m
2010CONFIG_SND_USB_USX2Y=m
2011CONFIG_SND_USB_CAIAQ=m
2012CONFIG_SND_USB_CAIAQ_INPUT=y
2013# CONFIG_SND_USB_US122L is not set
2014# CONFIG_SND_SOC is not set
2015# CONFIG_SOUND_PRIME is not set
2016CONFIG_AC97_BUS=y
2017CONFIG_HID_SUPPORT=y
2018CONFIG_HID=y
2019CONFIG_HID_DEBUG=y
2020CONFIG_HIDRAW=y
2021
2022#
2023# USB Input Devices
2024#
2025CONFIG_USB_HID=y
2026CONFIG_HID_PID=y
2027CONFIG_USB_HIDDEV=y
2028
2029#
2030# Special HID drivers
2031#
2032CONFIG_HID_COMPAT=y
2033CONFIG_HID_A4TECH=y
2034CONFIG_HID_APPLE=y
2035CONFIG_HID_BELKIN=y
2036CONFIG_HID_BRIGHT=y
2037CONFIG_HID_CHERRY=y
2038CONFIG_HID_CHICONY=y
2039CONFIG_HID_CYPRESS=y
2040CONFIG_HID_DELL=y
2041CONFIG_HID_EZKEY=y
2042CONFIG_HID_GYRATION=y
2043CONFIG_HID_LOGITECH=y
2044# CONFIG_LOGITECH_FF is not set
2045# CONFIG_LOGIRUMBLEPAD2_FF is not set
2046CONFIG_HID_MICROSOFT=y
2047CONFIG_HID_MONTEREY=y
2048CONFIG_HID_PANTHERLORD=y
2049# CONFIG_PANTHERLORD_FF is not set
2050CONFIG_HID_PETALYNX=y
2051CONFIG_HID_SAMSUNG=y
2052CONFIG_HID_SONY=y
2053CONFIG_HID_SUNPLUS=y
2054# CONFIG_GREENASIA_FF is not set
2055# CONFIG_THRUSTMASTER_FF is not set
2056# CONFIG_ZEROPLUS_FF is not set
2057CONFIG_USB_SUPPORT=y
2058CONFIG_USB_ARCH_HAS_HCD=y
2059CONFIG_USB_ARCH_HAS_OHCI=y
2060CONFIG_USB_ARCH_HAS_EHCI=y
2061CONFIG_USB=y
2062# CONFIG_USB_DEBUG is not set
2063CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
2064
2065#
2066# Miscellaneous USB options
2067#
2068CONFIG_USB_DEVICEFS=y
2069# CONFIG_USB_DEVICE_CLASS is not set
2070# CONFIG_USB_DYNAMIC_MINORS is not set
2071CONFIG_USB_SUSPEND=y
2072# CONFIG_USB_OTG is not set
2073CONFIG_USB_MON=y
2074CONFIG_USB_WUSB=m
2075# CONFIG_USB_WUSB_CBAF is not set
2076
2077#
2078# USB Host Controller Drivers
2079#
2080# CONFIG_USB_C67X00_HCD is not set
2081CONFIG_USB_EHCI_HCD=y
2082CONFIG_USB_EHCI_ROOT_HUB_TT=y
2083CONFIG_USB_EHCI_TT_NEWSCHED=y
2084# CONFIG_USB_OXU210HP_HCD is not set
2085CONFIG_USB_ISP116X_HCD=m
2086# CONFIG_USB_ISP1760_HCD is not set
2087CONFIG_USB_OHCI_HCD=y
2088# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
2089# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
2090CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2091CONFIG_USB_UHCI_HCD=y
2092CONFIG_USB_U132_HCD=m
2093CONFIG_USB_SL811_HCD=m
2094# CONFIG_USB_R8A66597_HCD is not set
2095CONFIG_USB_WHCI_HCD=m
2096CONFIG_USB_HWA_HCD=m
2097# CONFIG_USB_GADGET_MUSB_HDRC is not set
2098
2099#
2100# USB Device Class drivers
2101#
2102CONFIG_USB_ACM=m
2103CONFIG_USB_PRINTER=m
2104CONFIG_USB_WDM=m
2105# CONFIG_USB_TMC is not set
2106
2107#
2108# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
2109#
2110
2111#
2112# see USB_STORAGE Help for more information
2113#
2114CONFIG_USB_STORAGE=y
2115# CONFIG_USB_STORAGE_DEBUG is not set
2116CONFIG_USB_STORAGE_DATAFAB=y
2117CONFIG_USB_STORAGE_FREECOM=y
2118CONFIG_USB_STORAGE_ISD200=y
2119CONFIG_USB_STORAGE_DPCM=y
2120CONFIG_USB_STORAGE_USBAT=y
2121CONFIG_USB_STORAGE_SDDR09=y
2122CONFIG_USB_STORAGE_SDDR55=y
2123CONFIG_USB_STORAGE_JUMPSHOT=y
2124CONFIG_USB_STORAGE_ALAUDA=y
2125# CONFIG_USB_STORAGE_ONETOUCH is not set
2126CONFIG_USB_STORAGE_KARMA=y
2127# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
2128CONFIG_USB_LIBUSUAL=y
2129
2130#
2131# USB Imaging devices
2132#
2133CONFIG_USB_MDC800=m
2134CONFIG_USB_MICROTEK=m
2135
2136#
2137# USB port drivers
2138#
2139CONFIG_USB_SERIAL=m
2140CONFIG_USB_EZUSB=y
2141CONFIG_USB_SERIAL_GENERIC=y
2142CONFIG_USB_SERIAL_AIRCABLE=m
2143CONFIG_USB_SERIAL_ARK3116=m
2144CONFIG_USB_SERIAL_BELKIN=m
2145CONFIG_USB_SERIAL_CH341=m
2146CONFIG_USB_SERIAL_WHITEHEAT=m
2147CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
2148CONFIG_USB_SERIAL_CP2101=m
2149CONFIG_USB_SERIAL_CYPRESS_M8=m
2150CONFIG_USB_SERIAL_EMPEG=m
2151CONFIG_USB_SERIAL_FTDI_SIO=m
2152CONFIG_USB_SERIAL_FUNSOFT=m
2153CONFIG_USB_SERIAL_VISOR=m
2154CONFIG_USB_SERIAL_IPAQ=m
2155CONFIG_USB_SERIAL_IR=m
2156CONFIG_USB_SERIAL_EDGEPORT=m
2157CONFIG_USB_SERIAL_EDGEPORT_TI=m
2158CONFIG_USB_SERIAL_GARMIN=m
2159CONFIG_USB_SERIAL_IPW=m
2160CONFIG_USB_SERIAL_IUU=m
2161CONFIG_USB_SERIAL_KEYSPAN_PDA=m
2162CONFIG_USB_SERIAL_KEYSPAN=m
2163CONFIG_USB_SERIAL_KEYSPAN_MPR=y
2164CONFIG_USB_SERIAL_KEYSPAN_USA28=y
2165CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
2166CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
2167CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
2168CONFIG_USB_SERIAL_KEYSPAN_USA19=y
2169CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
2170CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
2171CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
2172CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
2173CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
2174CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
2175CONFIG_USB_SERIAL_KLSI=m
2176CONFIG_USB_SERIAL_KOBIL_SCT=m
2177CONFIG_USB_SERIAL_MCT_U232=m
2178CONFIG_USB_SERIAL_MOS7720=m
2179CONFIG_USB_SERIAL_MOS7840=m
2180# CONFIG_USB_SERIAL_MOTOROLA is not set
2181CONFIG_USB_SERIAL_NAVMAN=m
2182CONFIG_USB_SERIAL_PL2303=m
2183CONFIG_USB_SERIAL_OTI6858=m
2184# CONFIG_USB_SERIAL_SPCP8X5 is not set
2185CONFIG_USB_SERIAL_HP4X=m
2186CONFIG_USB_SERIAL_SAFE=m
2187CONFIG_USB_SERIAL_SAFE_PADDED=y
2188# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
2189CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2190CONFIG_USB_SERIAL_TI=m
2191CONFIG_USB_SERIAL_CYBERJACK=m
2192CONFIG_USB_SERIAL_XIRCOM=m
2193CONFIG_USB_SERIAL_OPTION=m
2194CONFIG_USB_SERIAL_OMNINET=m
2195# CONFIG_USB_SERIAL_OPTICON is not set
2196CONFIG_USB_SERIAL_DEBUG=m
2197
2198#
2199# USB Miscellaneous drivers
2200#
2201CONFIG_USB_EMI62=m
2202CONFIG_USB_EMI26=m
2203CONFIG_USB_ADUTUX=m
2204# CONFIG_USB_SEVSEG is not set
2205# CONFIG_USB_RIO500 is not set
2206CONFIG_USB_LEGOTOWER=m
2207CONFIG_USB_LCD=m
2208CONFIG_USB_BERRY_CHARGE=m
2209CONFIG_USB_LED=m
2210# CONFIG_USB_CYPRESS_CY7C63 is not set
2211# CONFIG_USB_CYTHERM is not set
2212CONFIG_USB_PHIDGET=m
2213CONFIG_USB_PHIDGETKIT=m
2214CONFIG_USB_PHIDGETMOTORCONTROL=m
2215CONFIG_USB_PHIDGETSERVO=m
2216CONFIG_USB_IDMOUSE=m
2217CONFIG_USB_FTDI_ELAN=m
2218CONFIG_USB_APPLEDISPLAY=m
2219CONFIG_USB_SISUSBVGA=m
2220CONFIG_USB_SISUSBVGA_CON=y
2221CONFIG_USB_LD=m
2222CONFIG_USB_TRANCEVIBRATOR=m
2223CONFIG_USB_IOWARRIOR=m
2224# CONFIG_USB_TEST is not set
2225# CONFIG_USB_ISIGHTFW is not set
2226# CONFIG_USB_VST is not set
2227# CONFIG_USB_GADGET is not set
2228# CONFIG_USB_GADGET_DEBUG is not set
2229# CONFIG_USB_GADGET_DEBUG_FILES is not set
2230# CONFIG_USB_GADGET_DEBUG_FS is not set
2231# CONFIG_USB_GADGET_VBUS_DRAW is not set
2232# CONFIG_USB_GADGET_SELECTED is not set
2233# CONFIG_USB_GADGET_AT91 is not set
2234# CONFIG_USB_GADGET_ATMEL_USBA is not set
2235# CONFIG_USB_GADGET_FSL_USB2 is not set
2236# CONFIG_USB_GADGET_LH7A40X is not set
2237# CONFIG_USB_GADGET_OMAP is not set
2238# CONFIG_USB_GADGET_PXA25X is not set
2239# CONFIG_USB_GADGET_PXA27X is not set
2240# CONFIG_USB_GADGET_S3C2410 is not set
2241# CONFIG_USB_GADGET_IMX is not set
2242# CONFIG_USB_GADGET_M66592 is not set
2243# CONFIG_USB_GADGET_AMD5536UDC is not set
2244# CONFIG_USB_GADGET_FSL_QE is not set
2245# CONFIG_USB_GADGET_CI13XXX is not set
2246# CONFIG_USB_GADGET_NET2280 is not set
2247# CONFIG_USB_GADGET_GOKU is not set
2248# CONFIG_USB_GADGET_LANGWELL is not set
2249# CONFIG_USB_LANGWELL is not set
2250# CONFIG_USB_GADGET_DUMMY_HCD is not set
2251# CONFIG_USB_GADGET_DUALSPEED is not set
2252# CONFIG_USB_ZERO is not set
2253# CONFIG_USB_ETH is not set
2254# CONFIG_USB_ETH_RNDIS is not set
2255# CONFIG_USB_GADGETFS is not set
2256# CONFIG_USB_FILE_STORAGE is not set
2257# CONFIG_USB_FILE_STORAGE_TEST is not set
2258# CONFIG_USB_G_SERIAL is not set
2259# CONFIG_USB_MIDI_GADGET is not set
2260# CONFIG_USB_G_PRINTER is not set
2261# CONFIG_USB_CDC_COMPOSITE is not set
2262# CONFIG_USB_STILL_IMAGE is not set
2263
2264CONFIG_UWB=m
2265CONFIG_UWB_HWA=m
2266CONFIG_UWB_WHCI=m
2267# CONFIG_UWB_WLP is not set
2268# CONFIG_UWB_I1480U is not set
2269CONFIG_MMC=y
2270# CONFIG_MMC_DEBUG is not set
2271# CONFIG_MMC_UNSAFE_RESUME is not set
2272# CONFIG_SDIO_SUSPEND is not set
2273# CONFIG_MMC_SDHCI_MRST_SDIO1 is not set
2274
2275#
2276# MMC/SD/SDIO Card Drivers
2277#
2278CONFIG_MMC_BLOCK=y
2279CONFIG_MMC_BLOCK_BOUNCE=y
2280CONFIG_SDIO_UART=m
2281# CONFIG_MMC_TEST is not set
2282
2283#
2284# MMC/SD/SDIO Host Controller Drivers
2285#
2286CONFIG_MMC_SDHCI=y
2287CONFIG_MMC_SDHCI_PCI=y
2288# CONFIG_MMC_RICOH_MMC is not set
2289CONFIG_MMC_WBSD=m
2290CONFIG_MMC_TIFM_SD=m
2291# CONFIG_MEMSTICK is not set
2292CONFIG_NEW_LEDS=y
2293CONFIG_LEDS_CLASS=y
2294# CONFIG_MMC_CEATA_WR is not set
2295# CONFIG_MMC_SPI is not set
2296
2297#
2298# LED drivers
2299#
2300# CONFIG_LEDS_ALIX2 is not set
2301# CONFIG_LEDS_PCA9532 is not set
2302# CONFIG_LEDS_HP_DISK is not set
2303# CONFIG_LEDS_CLEVO_MAIL is not set
2304# CONFIG_LEDS_PCA955X is not set
2305
2306#
2307# LED Triggers
2308#
2309CONFIG_LEDS_TRIGGERS=y
2310# CONFIG_LEDS_TRIGGER_TIMER is not set
2311# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
2312# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
2313# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2314# CONFIG_ACCESSIBILITY is not set
2315# CONFIG_INFINIBAND is not set
2316# CONFIG_EDAC is not set
2317CONFIG_RTC_LIB=y
2318CONFIG_RTC_CLASS=y
2319# CONFIG_RTC_HCTOSYS is not set
2320# CONFIG_RTC_DEBUG is not set
2321
2322#
2323# RTC interfaces
2324#
2325CONFIG_RTC_INTF_SYSFS=y
2326CONFIG_RTC_INTF_PROC=y
2327CONFIG_RTC_INTF_DEV=y
2328# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
2329# CONFIG_RTC_DRV_TEST is not set
2330
2331#
2332# I2C RTC drivers
2333#
2334# CONFIG_RTC_DRV_DS1307 is not set
2335# CONFIG_RTC_DRV_DS1374 is not set
2336# CONFIG_RTC_DRV_DS1672 is not set
2337# CONFIG_RTC_DRV_MAX6900 is not set
2338# CONFIG_RTC_DRV_RS5C372 is not set
2339# CONFIG_RTC_DRV_ISL1208 is not set
2340# CONFIG_RTC_DRV_X1205 is not set
2341# CONFIG_RTC_DRV_PCF8563 is not set
2342# CONFIG_RTC_DRV_PCF8583 is not set
2343# CONFIG_RTC_DRV_M41T80 is not set
2344# CONFIG_RTC_DRV_S35390A is not set
2345# CONFIG_RTC_DRV_FM3130 is not set
2346# CONFIG_RTC_DRV_RX8581 is not set
2347
2348#
2349# SPI RTC drivers
2350#
2351
2352#
2353# Platform RTC drivers
2354#
2355CONFIG_RTC_DRV_CMOS=y
2356# CONFIG_RTC_DRV_DS1286 is not set
2357# CONFIG_RTC_DRV_DS1511 is not set
2358# CONFIG_RTC_DRV_DS1553 is not set
2359# CONFIG_RTC_DRV_DS1742 is not set
2360# CONFIG_RTC_DRV_STK17TA8 is not set
2361# CONFIG_RTC_DRV_M48T86 is not set
2362# CONFIG_RTC_DRV_M48T35 is not set
2363# CONFIG_RTC_DRV_M48T59 is not set
2364# CONFIG_RTC_DRV_BQ4802 is not set
2365# CONFIG_RTC_DRV_V3020 is not set
2366
2367#
2368# on-CPU RTC drivers
2369#
2370# CONFIG_DMADEVICES is not set
2371# CONFIG_UIO is not set
2372CONFIG_STAGING=y
2373# CONFIG_STAGING_EXCLUDE_BUILD is not set
2374# CONFIG_ET131X is not set
2375# CONFIG_SLICOSS is not set
2376# CONFIG_SXG is not set
2377# CONFIG_ME4000 is not set
2378# CONFIG_MEILHAUS is not set
2379# CONFIG_VIDEO_GO7007 is not set
2380CONFIG_USB_IP_COMMON=m
2381CONFIG_USB_IP_VHCI_HCD=m
2382CONFIG_USB_IP_HOST=m
2383# CONFIG_W35UND is not set
2384CONFIG_PRISM2_USB=m
2385# CONFIG_ECHO is not set
2386CONFIG_RT2860=m
2387CONFIG_RT2870=m
2388# CONFIG_BENET is not set
2389# CONFIG_COMEDI is not set
2390# CONFIG_ASUS_OLED is not set
2391# CONFIG_USB_ATMEL is not set
2392# CONFIG_AGNX is not set
2393# CONFIG_OTUS is not set
2394# CONFIG_ALTERA_PCIE_CHDMA is not set
2395CONFIG_RTL8187SE=m
2396# CONFIG_INPUT_MIMIO is not set
2397# CONFIG_TRANZPORT is not set
2398# CONFIG_EPL is not set
2399
2400#
2401# Android
2402#
2403# CONFIG_ANDROID is not set
2404# CONFIG_ANDROID_BINDER_IPC is not set
2405# CONFIG_ANDROID_LOGGER is not set
2406# CONFIG_ANDROID_RAM_CONSOLE is not set
2407# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
2408CONFIG_X86_PLATFORM_DEVICES=y
2409
2410#
2411# Firmware Drivers
2412#
2413# CONFIG_EDD is not set
2414CONFIG_FIRMWARE_MEMMAP=y
2415CONFIG_EFI_VARS=m
2416# CONFIG_DELL_RBU is not set
2417# CONFIG_DCDBAS is not set
2418CONFIG_DMIID=y
2419# CONFIG_ISCSI_IBFT_FIND is not set
2420
2421#
2422# File systems
2423#
2424CONFIG_EXT2_FS=y
2425# CONFIG_EXT2_FS_XATTR is not set
2426# CONFIG_EXT2_FS_XIP is not set
2427CONFIG_EXT3_FS=y
2428CONFIG_EXT3_FS_XATTR=y
2429CONFIG_EXT3_FS_POSIX_ACL=y
2430CONFIG_EXT3_FS_SECURITY=y
2431# CONFIG_EXT4_FS is not set
2432CONFIG_JBD=y
2433# CONFIG_JBD_DEBUG is not set
2434CONFIG_FS_MBCACHE=y
2435# CONFIG_REISERFS_FS is not set
2436# CONFIG_JFS_FS is not set
2437CONFIG_FS_POSIX_ACL=y
2438CONFIG_FILE_LOCKING=y
2439# CONFIG_XFS_FS is not set
2440# CONFIG_GFS2_FS is not set
2441# CONFIG_OCFS2_FS is not set
2442CONFIG_BTRFS_FS=y
2443CONFIG_BTRFS_FS_POSIX_ACL=y
2444CONFIG_DNOTIFY=y
2445CONFIG_INOTIFY=y
2446CONFIG_INOTIFY_USER=y
2447# CONFIG_QUOTA is not set
2448# CONFIG_AUTOFS_FS is not set
2449# CONFIG_AUTOFS4_FS is not set
2450CONFIG_FUSE_FS=m
2451CONFIG_GENERIC_ACL=y
2452
2453#
2454# CD-ROM/DVD Filesystems
2455#
2456CONFIG_ISO9660_FS=y
2457CONFIG_JOLIET=y
2458CONFIG_ZISOFS=y
2459CONFIG_UDF_FS=m
2460CONFIG_UDF_NLS=y
2461
2462#
2463# DOS/FAT/NT Filesystems
2464#
2465CONFIG_FAT_FS=y
2466CONFIG_MSDOS_FS=y
2467CONFIG_VFAT_FS=y
2468CONFIG_FAT_DEFAULT_CODEPAGE=437
2469CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
2470# CONFIG_NTFS_FS is not set
2471
2472#
2473# Pseudo filesystems
2474#
2475CONFIG_PROC_FS=y
2476# CONFIG_PROC_KCORE is not set
2477CONFIG_PROC_SYSCTL=y
2478CONFIG_PROC_PAGE_MONITOR=y
2479CONFIG_SYSFS=y
2480CONFIG_TMPFS=y
2481CONFIG_TMPFS_POSIX_ACL=y
2482# CONFIG_HUGETLBFS is not set
2483# CONFIG_HUGETLB_PAGE is not set
2484CONFIG_CONFIGFS_FS=m
2485
2486#
2487# Miscellaneous filesystems
2488#
2489CONFIG_MISC_FILESYSTEMS=y
2490# CONFIG_ADFS_FS is not set
2491# CONFIG_AFFS_FS is not set
2492# CONFIG_HFS_FS is not set
2493# CONFIG_HFSPLUS_FS is not set
2494# CONFIG_BEFS_FS is not set
2495# CONFIG_BFS_FS is not set
2496# CONFIG_EFS_FS is not set
2497# CONFIG_CRAMFS is not set
2498CONFIG_SQUASHFS=y
2499# CONFIG_SQUASHFS_EMBEDDED is not set
2500CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
2501# CONFIG_VXFS_FS is not set
2502# CONFIG_MINIX_FS is not set
2503# CONFIG_OMFS_FS is not set
2504# CONFIG_HPFS_FS is not set
2505# CONFIG_QNX4FS_FS is not set
2506# CONFIG_ROMFS_FS is not set
2507# CONFIG_SYSV_FS is not set
2508# CONFIG_UFS_FS is not set
2509CONFIG_NETWORK_FILESYSTEMS=y
2510# CONFIG_NFS_FS is not set
2511# CONFIG_NFSD is not set
2512# CONFIG_SMB_FS is not set
2513CONFIG_CIFS=m
2514# CONFIG_CIFS_STATS is not set
2515CONFIG_CIFS_WEAK_PW_HASH=y
2516# CONFIG_CIFS_XATTR is not set
2517# CONFIG_CIFS_DEBUG2 is not set
2518# CONFIG_CIFS_EXPERIMENTAL is not set
2519# CONFIG_NCP_FS is not set
2520# CONFIG_CODA_FS is not set
2521# CONFIG_AFS_FS is not set
2522
2523#
2524# Partition Types
2525#
2526CONFIG_PARTITION_ADVANCED=y
2527# CONFIG_ACORN_PARTITION is not set
2528# CONFIG_OSF_PARTITION is not set
2529# CONFIG_AMIGA_PARTITION is not set
2530# CONFIG_ATARI_PARTITION is not set
2531# CONFIG_MAC_PARTITION is not set
2532CONFIG_MSDOS_PARTITION=y
2533CONFIG_BSD_DISKLABEL=y
2534# CONFIG_MINIX_SUBPARTITION is not set
2535# CONFIG_SOLARIS_X86_PARTITION is not set
2536# CONFIG_UNIXWARE_DISKLABEL is not set
2537CONFIG_LDM_PARTITION=y
2538# CONFIG_LDM_DEBUG is not set
2539# CONFIG_SGI_PARTITION is not set
2540# CONFIG_ULTRIX_PARTITION is not set
2541# CONFIG_SUN_PARTITION is not set
2542# CONFIG_KARMA_PARTITION is not set
2543CONFIG_EFI_PARTITION=y
2544# CONFIG_SYSV68_PARTITION is not set
2545CONFIG_NLS=y
2546CONFIG_NLS_DEFAULT="utf8"
2547CONFIG_NLS_CODEPAGE_437=y
2548CONFIG_NLS_CODEPAGE_737=m
2549CONFIG_NLS_CODEPAGE_775=m
2550CONFIG_NLS_CODEPAGE_850=m
2551CONFIG_NLS_CODEPAGE_852=m
2552CONFIG_NLS_CODEPAGE_855=m
2553CONFIG_NLS_CODEPAGE_857=m
2554CONFIG_NLS_CODEPAGE_860=m
2555CONFIG_NLS_CODEPAGE_861=m
2556CONFIG_NLS_CODEPAGE_862=m
2557CONFIG_NLS_CODEPAGE_863=m
2558CONFIG_NLS_CODEPAGE_864=m
2559CONFIG_NLS_CODEPAGE_865=m
2560CONFIG_NLS_CODEPAGE_866=m
2561CONFIG_NLS_CODEPAGE_869=m
2562CONFIG_NLS_CODEPAGE_936=m
2563CONFIG_NLS_CODEPAGE_950=m
2564CONFIG_NLS_CODEPAGE_932=m
2565CONFIG_NLS_CODEPAGE_949=m
2566CONFIG_NLS_CODEPAGE_874=m
2567CONFIG_NLS_ISO8859_8=m
2568CONFIG_NLS_CODEPAGE_1250=m
2569CONFIG_NLS_CODEPAGE_1251=m
2570CONFIG_NLS_ASCII=y
2571CONFIG_NLS_ISO8859_1=m
2572CONFIG_NLS_ISO8859_2=m
2573CONFIG_NLS_ISO8859_3=m
2574CONFIG_NLS_ISO8859_4=m
2575CONFIG_NLS_ISO8859_5=m
2576CONFIG_NLS_ISO8859_6=m
2577CONFIG_NLS_ISO8859_7=m
2578CONFIG_NLS_ISO8859_9=m
2579CONFIG_NLS_ISO8859_13=m
2580CONFIG_NLS_ISO8859_14=m
2581CONFIG_NLS_ISO8859_15=m
2582CONFIG_NLS_KOI8_R=m
2583CONFIG_NLS_KOI8_U=m
2584CONFIG_NLS_UTF8=m
2585# CONFIG_DLM is not set
2586
2587#
2588# Kernel hacking
2589#
2590CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2591CONFIG_PRINTK_TIME=y
2592# CONFIG_ENABLE_WARN_DEPRECATED is not set
2593CONFIG_ENABLE_MUST_CHECK=y
2594CONFIG_FRAME_WARN=1024
2595CONFIG_MAGIC_SYSRQ=y
2596# CONFIG_UNUSED_SYMBOLS is not set
2597CONFIG_DEBUG_FS=y
2598# CONFIG_HEADERS_CHECK is not set
2599CONFIG_DEBUG_KERNEL=y
2600CONFIG_DEBUG_SHIRQ=y
2601CONFIG_DETECT_SOFTLOCKUP=y
2602# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2603CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2604# CONFIG_SCHED_DEBUG is not set
2605CONFIG_SCHEDSTATS=y
2606CONFIG_TIMER_STATS=y
2607# CONFIG_DEBUG_OBJECTS is not set
2608# CONFIG_DEBUG_SLAB is not set
2609# CONFIG_DEBUG_RT_MUTEXES is not set
2610# CONFIG_RT_MUTEX_TESTER is not set
2611CONFIG_DEBUG_SPINLOCK=y
2612# CONFIG_DEBUG_MUTEXES is not set
2613# CONFIG_DEBUG_LOCK_ALLOC is not set
2614# CONFIG_PROVE_LOCKING is not set
2615# CONFIG_DEBUG_LOCKDEP is not set
2616# CONFIG_LOCK_STAT is not set
2617CONFIG_DEBUG_SPINLOCK_SLEEP=y
2618# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2619CONFIG_STACKTRACE=y
2620# CONFIG_DEBUG_KOBJECT is not set
2621CONFIG_DEBUG_HIGHMEM=y
2622CONFIG_DEBUG_BUGVERBOSE=y
2623CONFIG_DEBUG_INFO=y
2624# CONFIG_DEBUG_VM is not set
2625# CONFIG_DEBUG_VIRTUAL is not set
2626# CONFIG_DEBUG_WRITECOUNT is not set
2627# CONFIG_DEBUG_MEMORY_INIT it not set
2628CONFIG_DEBUG_LIST=y
2629# CONFIG_DEBUG_SG is not set
2630CONFIG_DEBUG_NOTIFIERS=y
2631CONFIG_FRAME_POINTER=y
2632CONFIG_BOOT_PRINTK_DELAY=y
2633# CONFIG_RCU_TORTURE_TEST is not set
2634# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2635# CONFIG_BACKTRACE_SELF_TEST is not set
2636# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2637# CONFIG_FAULT_INJECTION is not set
2638CONFIG_LATENCYTOP=y
2639CONFIG_SYSCTL_SYSCALL_CHECK=y
2640CONFIG_HAVE_FUNCTION_TRACER=y
2641CONFIG_HAVE_DYNAMIC_FTRACE=y
2642CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2643
2644# CONFIG_X86_VISWS is not set
2645# CONFIG_FTRACE_STARTUP_TEST is not set
2646#
2647# Tracers
2648#
2649# CONFIG_FUNCTION_TRACER is not set
2650# CONFIG_IRQSOFF_TRACER is not set
2651CONFIG_SYSPROF_TRACER=y
2652CONFIG_SCHED_TRACER=y
2653CONFIG_CONTEXT_SWITCH_TRACER=y
2654CONFIG_OPEN_CLOSE_TRACER=y
2655# CONFIG_BOOT_TRACER is not set
2656CONFIG_POWER_TRACER=y
2657# CONFIG_TRACE_BRANCH_PROFILING is not set
2658# CONFIG_STACK_TRACER is not set
2659# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2660# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2661# CONFIG_SAMPLES is not set
2662CONFIG_HAVE_ARCH_KGDB=y
2663# CONFIG_KGDB is not set
2664CONFIG_STRICT_DEVMEM=y
2665CONFIG_X86_VERBOSE_BOOTUP=y
2666CONFIG_EARLY_PRINTK=y
2667# CONFIG_EARLY_PRINTK_DBGP is not set
2668CONFIG_DEBUG_STACKOVERFLOW=y
2669# CONFIG_DEBUG_STACK_USAGE is not set
2670# CONFIG_DEBUG_PAGEALLOC is not set
2671# CONFIG_DEBUG_PER_CPU_MAPS is not set
2672# CONFIG_X86_PTDUMP is not set
2673CONFIG_DEBUG_RODATA=y
2674# CONFIG_DEBUG_RODATA_TEST is not set
2675# CONFIG_DEBUG_NX_TEST is not set
2676# CONFIG_4KSTACKS is not set
2677CONFIG_DOUBLEFAULT=y
2678# CONFIG_MMIOTRACE is not set
2679CONFIG_IO_DELAY_TYPE_0X80=0
2680CONFIG_IO_DELAY_TYPE_0XED=1
2681CONFIG_IO_DELAY_TYPE_UDELAY=2
2682CONFIG_IO_DELAY_TYPE_NONE=3
2683CONFIG_IO_DELAY_0X80=y
2684# CONFIG_IO_DELAY_0XED is not set
2685# CONFIG_IO_DELAY_UDELAY is not set
2686# CONFIG_IO_DELAY_NONE is not set
2687CONFIG_DEFAULT_IO_DELAY_TYPE=0
2688CONFIG_DEBUG_BOOT_PARAMS=y
2689# CONFIG_CPA_DEBUG is not set
2690# CONFIG_OPTIMIZE_INLINING is not set
2691
2692#
2693# Security options
2694#
2695# CONFIG_KEYS is not set
2696# CONFIG_SECURITY is not set
2697# CONFIG_SECURITYFS is not set
2698# CONFIG_SECURITY_FILE_CAPABILITIES is not set
2699CONFIG_CRYPTO=y
2700
2701#
2702# Crypto core or helper
2703#
2704# CONFIG_CRYPTO_FIPS is not set
2705CONFIG_CRYPTO_ALGAPI=y
2706CONFIG_CRYPTO_AEAD=y
2707CONFIG_CRYPTO_BLKCIPHER=y
2708CONFIG_CRYPTO_HASH=y
2709CONFIG_CRYPTO_RNG=y
2710CONFIG_CRYPTO_MANAGER=y
2711CONFIG_CRYPTO_GF128MUL=m
2712CONFIG_CRYPTO_NULL=m
2713# CONFIG_CRYPTO_CRYPTD is not set
2714CONFIG_CRYPTO_AUTHENC=m
2715CONFIG_CRYPTO_TEST=m
2716
2717#
2718# Authenticated Encryption with Associated Data
2719#
2720CONFIG_CRYPTO_CCM=m
2721CONFIG_CRYPTO_GCM=m
2722CONFIG_CRYPTO_SEQIV=m
2723
2724#
2725# Block modes
2726#
2727CONFIG_CRYPTO_CBC=m
2728CONFIG_CRYPTO_CTR=m
2729# CONFIG_CRYPTO_CTS is not set
2730CONFIG_CRYPTO_ECB=y
2731CONFIG_CRYPTO_LRW=m
2732CONFIG_CRYPTO_PCBC=m
2733CONFIG_CRYPTO_XTS=m
2734
2735#
2736# Hash modes
2737#
2738CONFIG_CRYPTO_HMAC=y
2739CONFIG_CRYPTO_XCBC=m
2740
2741#
2742# Digest
2743#
2744CONFIG_CRYPTO_CRC32C=m
2745# CONFIG_CRYPTO_CRC32C_INTEL is not set
2746CONFIG_CRYPTO_MD4=m
2747CONFIG_CRYPTO_MD5=y
2748CONFIG_CRYPTO_MICHAEL_MIC=m
2749# CONFIG_CRYPTO_RMD128 is not set
2750# CONFIG_CRYPTO_RMD160 is not set
2751# CONFIG_CRYPTO_RMD256 is not set
2752# CONFIG_CRYPTO_RMD320 is not set
2753CONFIG_CRYPTO_SHA1=y
2754CONFIG_CRYPTO_SHA256=m
2755CONFIG_CRYPTO_SHA512=m
2756CONFIG_CRYPTO_TGR192=m
2757CONFIG_CRYPTO_WP512=m
2758
2759#
2760# Ciphers
2761#
2762CONFIG_CRYPTO_AES=y
2763CONFIG_CRYPTO_AES_586=m
2764CONFIG_CRYPTO_ANUBIS=m
2765CONFIG_CRYPTO_ARC4=y
2766CONFIG_CRYPTO_BLOWFISH=m
2767CONFIG_CRYPTO_CAMELLIA=m
2768CONFIG_CRYPTO_CAST5=m
2769CONFIG_CRYPTO_CAST6=m
2770CONFIG_CRYPTO_DES=m
2771CONFIG_CRYPTO_FCRYPT=m
2772CONFIG_CRYPTO_KHAZAD=m
2773CONFIG_CRYPTO_SALSA20=m
2774CONFIG_CRYPTO_SALSA20_586=m
2775CONFIG_CRYPTO_SEED=m
2776CONFIG_CRYPTO_SERPENT=m
2777CONFIG_CRYPTO_TEA=m
2778CONFIG_CRYPTO_TWOFISH=m
2779CONFIG_CRYPTO_TWOFISH_COMMON=m
2780CONFIG_CRYPTO_TWOFISH_586=m
2781
2782#
2783# Compression
2784#
2785CONFIG_CRYPTO_DEFLATE=m
2786# CONFIG_CRYPTO_LZO is not set
2787
2788#
2789# Random Number Generation
2790#
2791# CONFIG_CRYPTO_ANSI_CPRNG is not set
2792CONFIG_CRYPTO_HW=y
2793# CONFIG_CRYPTO_DEV_PADLOCK is not set
2794# CONFIG_CRYPTO_DEV_GEODE is not set
2795# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2796CONFIG_HAVE_KVM=y
2797CONFIG_VIRTUALIZATION=y
2798
2799#
2800# Library routines
2801#
2802CONFIG_BITREVERSE=y
2803CONFIG_GENERIC_FIND_FIRST_BIT=y
2804CONFIG_GENERIC_FIND_NEXT_BIT=y
2805CONFIG_CRC_CCITT=m
2806CONFIG_CRC16=m
2807# CONFIG_CRC_T10DIF is not set
2808CONFIG_CRC_ITU_T=m
2809CONFIG_CRC32=y
2810# CONFIG_CRC7 is not set
2811CONFIG_LIBCRC32C=m
2812CONFIG_ZLIB_INFLATE=y
2813CONFIG_ZLIB_DEFLATE=m
2814CONFIG_TEXTSEARCH=y
2815CONFIG_TEXTSEARCH_KMP=m
2816CONFIG_TEXTSEARCH_BM=m
2817CONFIG_TEXTSEARCH_FSM=m
2818CONFIG_PLIST=y
2819CONFIG_HAS_IOMEM=y
2820CONFIG_HAS_IOPORT=y
2821CONFIG_HAS_DMA=y
2822CONFIG_CHECK_SIGNATURE=y
2823
2824
2825# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
2826# CONFIG_MFD_PCF50633 is not set
2827# CONFIG_SENSORS_ADT7475 is not set
2828# CONFIG_LIB80211_DEBUG is not set
2829# CONFIG_DNET is not set
2830# CONFIG_BE2NET is not set
2831
2832
2833
2834# CONFIG_LNW_IPC is not set
2835# CONFIG_MRST is not set
2836# CONFIG_SFI is not set
2837# CONFIG_MDIO_GPIO is not set
2838# CONFIG_KEYBOARD_GPIO is not set
2839# CONFIG_MOUSE_GPIO is not set
2840# CONFIG_I2C_GPIO is not set
2841# CONFIG_DEBUG_GPIO is not set
2842# CONFIG_GPIO_SYSFS is not set
2843# CONFIG_GPIO_LANGWELL is not set
2844# CONFIG_GPIO_MAX732X is not set
2845# CONFIG_GPIO_PCA953X is not set
2846# CONFIG_GPIO_PCF857X is not set
2847# CONFIG_GPIO_BT8XX is not set
2848# CONFIG_UCB1400_CORE is not set
2849# CONFIG_TPS65010 is not set
2850# CONFIG_USB_GPIO_VBUS is not set
2851# CONFIG_LEDS_GPIO is not set
2852# CONFIG_ANDROID_TIMED_GPIO is not set
2853# CONFIG_X86_MRST_EARLY_PRINTK is not set
2854
2855# CONFIG_APB_TIMER is not set
2856# CONFIG_MRST_SPI_UART_BOOT_MSG is not set
2857# CONFIG_SFI_DEBUG is not set
2858# CONFIG_SFI_PROCFS is not set
2859# CONFIG_TOUCHSCREEN_UCB1400 is not set
2860# CONFIG_GPIO_LNWPMIC is not set
2861# CONFIG_RTC_DRV_VRTC is not set
2862# CONFIG_MRST_NAND is not set
2863# CONFIG_MRST_NAND_HW is not set
2864# CONFIG_USB_LANGWELL_OTG is not set
2865# CONFIG_KEYBOARD_MRST is not set
2866# CONFIG_I2C_MRST is not set
2867# CONFIG_MRST_VIB is not set
2868# CONFIG_SENSORS_ISL29020 is not set
2869# CONFIG_SENSORS_HMC6352 is not set
2870# CONFIG_SENSORS_LIS331DL is not set
2871# CONFIG_SENSORS_EMC1403 is not set
2872# CONFIG_SENSORS_MRST_ANALOG_ACCEL is not set
2873# CONFIG_USB_OTG_WHITELIST is not set
2874# CONFIG_USB_OTG_BLACKLIST_HUB is not set
2875# CONFIG_SND_PCM_OSS_PLUGINS is not set
2876# CONFIG_SND_INTEL_SST is not set
2877# CONFIG_SST_IPC_NOT_INCLUDED is not set
2878# CONFIG_SND_INTELMID is not set
2879# CONFIG_TOUCHSCREEN_MRSTOUCH is not set
2880CONFIG_ATL1C=m
2881# CONFIG_MRST_MMC_WR is not set
2882
2883
2884# CONFIG_VIDEO_MRSTCI is not set
2885# CONFIG_VIDEO_MRST_ISP is not set
2886# CONFIG_VIDEO_MRST_SENSOR is not set
2887# CONFIG_VIDEO_MRST_OV2650 is not set
2888# CONFIG_VIDEO_MRST_OV5630 is not set
2889# CONFIG_VIDEO_MRST_OV9665 is not set
2890# CONFIG_SPI2_MRST is not set
2891
2892# CONFIG_SFI_PM is not set
2893# CONFIG_SFI_CPUIDLE is not set
2894# CONFIG_SFI_PROCESSOR_PM is not set
2895# CONFIG_X86_SFI_CPUFREQ is not set
2896# CONFIG_MSTWN_POWER_MGMT is not set
2897# CONFIG_USB_NET_MBM is not set
2898
2899# CONFIG_USB_GADGET_LANGWELL is not set
2900# CONFIG_USB_LANGWELL is not set
2901
2902# CONFIG_INTEL_LNW_DMAC1 is not set
2903# CONFIG_INTEL_LNW_DMAC2 is not set
2904# CONFIG_LNW_DMA_DEBUG is not set
2905# CONFIG_NET_DMA is not set
2906# CONFIG_DMATEST is not set
2907# CONFIG_8688_RC is not set
2908# CONFIG_SSB_SILENT is not set
2909
2910# CONFIG_TOUCHSCREEN_TSC2003 is not set
2911# CONFIG_MFD_TIMBERDALE is not set
2912# CONFIG_MMC_SDHCI_PLTFM is not set
2913# CONFIG_SPI_XILINX is not set
2914# CONFIG_SPI_MRST is not set
2915# CONFIG_GPE is not set
2916
2917# CONFIG_STRIP_ASM_SYMS is not set
2918# CONFIG_X86_EXTENDED_PLATFORM is not set
2919# CONFIG_X86_32_NON_STANDARD is not set
2920# CONFIG_X86_CPU_DEBUG is not set
2921# CONFIG_CC_STACKPROTECTOR is not set
2922# CONFIG_DMAR is not set
2923# CONFIG_PCI_IOV is not set
2924# CONFIG_NETFILTER_XT_TARGET_LED is not set
2925# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
2926# CONFIG_NETFILTER_XT_MATCH_HL is not set
2927# CONFIG_NET_DROP_MONITOR is not set
2928# CONFIG_ISL29003 is not set
2929# CONFIG_SCSI_MPT2SAS is not set
2930# CONFIG_LIBFCOE is not set
2931# CONFIG_SCSI_OSD_INITIATOR is not set
2932# CONFIG_ETHOC is not set
2933# CONFIG_IGBVF is not set
2934# CONFIG_VXGE it not set
2935CONFIG_AT76C50X_USB=m
2936# CONFIG_MWL8K is not set
2937CONFIG_P54_SPI=m
2938CONFIG_AR9170_USB=m
2939# CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT is not set
2940CONFIG_TOUCHSCREEN_AD7877=m
2941CONFIG_TOUCHSCREEN_AD7879_I2C=m
2942CONFIG_TOUCHSCREEN_AD7879_SPI=m
2943CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
2944CONFIG_SERIAL_MAX3100=m
2945# CONFIG_HW_RANDOM_TIMERIOMEM is not set
2946# CONFIG_SENSORS_ATK0110 is not set
2947# CONFIG_SENSORS_G760A is not set
2948# CONFIG_SENSORS_LTC4215 is not set
2949# CONFIG_SENSORS_LM95241 is not set
2950# CONFIG_SENSORS_SHT15 is not set
2951# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
2952# CONFIG_VIDEO_ZORAN is not set
2953CONFIG_USB_GSPCA_MR97310A=m
2954CONFIG_USB_GSPCA_SQ905=m
2955CONFIG_USB_GSPCA_SQ905C=m
2956# CONFIG_VIDEO_HDPVR is not set
2957# CONFIG_VIDEO_CX231XX is not set
2958# CONFIG_USB_PWC_INPUT_EVDEV is not set
2959# CONFIG_FB_BROADSHEET is not set
2960# CONFIG_SND_PCSP is not set
2961# CONFIG_SND_INDIGOIOX is not set
2962# CONFIG_SND_INDIGODJX is not set
2963# CONFIG_DRAGONRISE_FF is not set
2964CONFIG_USB_SERIAL_CP210X=m
2965CONFIG_USB_SERIAL_QUALCOMM=m
2966CONFIG_USB_SERIAL_SYMBOL=m
2967# CONFIG_NOP_USB_XCEIV is not set
2968# CONFIG_LEDS_LP5521 is not set
2969# CONFIG_LEDS_DAC124S085 is not set
2970# CONFIG_LEDS_BD2802 is not set
2971# CONFIG_LEDS_TRIGGER_GPIO is not set
2972# CONFIG_AUXDISPLAY is not set
2973CONFIG_RT3070=m
2974# CONFIG_DST is not set
2975# CONFIG_POHMELFS is not set
2976# CONFIG_STLC45XX is not set
2977CONFIG_USB_SERIAL_ATEN2011=m
2978# CONFIG_B3DFG is not set
2979# CONFIG_IDE_PHISON is not set
2980# CONFIG_PLAN9AUTH is not set
2981# CONFIG_HECI is not set
2982CONFIG_LINE6_USB=m
2983CONFIG_USB_SERIAL_QUATECH_ESU100=m
2984CONFIG_DELL_WMI=m
2985# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
2986# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
2987# CONFIG_FSCACHE is not set
2988# CONFIG_NILFS2_FS is not set
2989# CONFIG_DETECT_HUNG_TASK is not set
2990# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
2991CONFIG_EVENT_TRACER=y
2992# CONFIG_FTRACE_SYSCALLS is not set
2993# CONFIG_KMEMTRACE is not set
2994# CONFIG_WORKQUEUE_TRACER is not set
2995# CONFIG_DYNAMIC_DEBUG is not set
2996# CONFIG_DMA_API_DEBUG is not set
2997# CONFIG_IMA is not set
2998CONFIG_CRYPTO_ZLIB=y
2999# CONFIG_VXGE is not set
3000# CONFIG_COMPAT_NET_DEV_OPS is not set
3001CONFIG_RD_GZIP=y
3002# CONFIG_RD_BZIP2 is not set
3003# CONFIG_RD_LZMA is not set
3004CONFIG_SENSORS_LIS3_SPI=y
3005# CONFIG_VIDEO_SAA6588 is not set
3006# CONFIG_VIDEO_BT819 is not set
3007# CONFIG_VIDEO_BT856 is not set
3008# CONFIG_VIDEO_BT866 is not set
3009# CONFIG_VIDEO_KS0127 is not set
3010# CONFIG_VIDEO_SAA7110 is not set
3011# CONFIG_VIDEO_VPX3220 is not set
3012# CONFIG_VIDEO_SAA7185 is not set
3013# CONFIG_VIDEO_ADV7170 is not set
3014# CONFIG_VIDEO_ADV7175 is not set
3015# CONFIG_HID_KYE is not set
3016# CONFIG_HID_KENSINGTON is not set
3017# CONFIG_SECURITY_TOMOYO is not set
3018# CONFIG_RTC_DRV_M41T94 is not set
3019# CONFIG_RTC_DRV_DS1305 is not set
3020# CONFIG_RTC_DRV_DS1390 is not set
3021# CONFIG_RTC_DRV_MAX6902 is not set
3022# CONFIG_RTC_DRV_R9701 is not set
3023# CONFIG_RTC_DRV_RS5C348 is not set
3024# CONFIG_RTC_DRV_DS3234 is not set
3025
3026# CONFIG_KS8842 is not set
3027# CONFIG_VIDEO_ADV7180 is not set
3028# CONFIG_MOST is not set
3029
3030CONFIG_PERF_COUNTERS=y
3031CONFIG_EVENT_PROFILE=y
3032# CONFIG_GCOV_KERNEL is not set
3033CONFIG_LBDAF=y
3034# CONFIG_X86_OLD_MCE is not set
3035CONFIG_X86_MCE_INTEL=y
3036# CONFIG_X86_MCE_AMD is not set
3037# CONFIG_X86_ANCIENT_MCE is not set
3038# CONFIG_X86_MCE_INJECT is not set
3039CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
3040# CONFIG_NETFILTER_XT_MATCH_OSF is not set
3041# CONFIG_IEEE802154 is not set
3042# CONFIG_CFG80211_DEBUGFS is not set
3043CONFIG_MAC80211_DEFAULT_PS=y
3044# CONFIG_EEPROM_MAX6875 is not set
3045# CONFIG_CB710_CORE is not set
3046# CONFIG_SCSI_BNX2_ISCSI is not set
3047# CONFIG_DM_LOG_USERSPACE is not set
3048# CONFIG_DM_MULTIPATH_QL is not set
3049# CONFIG_DM_MULTIPATH_ST is not set
3050# CONFIG_CNIC is not set
3051# CONFIG_RT2800USB is not set
3052# CONFIG_WL12XX is not set
3053# CONFIG_IWM is not set
3054# CONFIG_USB_NET_INT51X1 is not set
3055# CONFIG_KEYBOARD_LM8323 is not set
3056# CONFIG_MOUSE_SYNAPTICS_I2C is not set
3057# CONFIG_TOUCHSCREEN_EETI is not set
3058# CONFIG_TOUCHSCREEN_W90X900 is not set
3059# CONFIG_I2C_DESIGNWARE is not set
3060# CONFIG_PPS is not set
3061# CONFIG_BATTERY_MAX17040 is not set
3062# CONFIG_SENSORS_TMP401 is not set
3063# CONFIG_AB3100_CORE is not set
3064# CONFIG_EZX_PCAP is not set
3065# CONFIG_SND_CTXFI is not set
3066# CONFIG_SND_HDA_INPUT_JACK is not set
3067CONFIG_SND_HDA_CODEC_CA0110=y
3068# CONFIG_SND_LX6464ES is not set
3069# CONFIG_SMARTJOYPLUS_FF is not set
3070# CONFIG_USB_XHCI_HCD is not set
3071# CONFIG_MMC_CB710 is not set
3072# CONFIG_MMC_VIA_SDMMC is not set
3073# CONFIG_RTC_DRV_RX8025 is not set
3074# CONFIG_USB_SERIAL_QUATECH2 is not set
3075# CONFIG_VT6655 is not set
3076# CONFIG_USB_CPC is not set
3077# CONFIG_RDC_17F3101X is not set
3078# CONFIG_FB_UDL is not set
3079CONFIG_ACERHDF=m
3080CONFIG_FSNOTIFY=y
3081# CONFIG_CUSE is not set
3082# CONFIG_DEBUG_KMEMLEAK is not set
3083CONFIG_FTRACE=y
3084# CONFIG_RING_BUFFER_BENCHMARK is not set
3085# CONFIG_IOMMU_STRESS is not set
3086# CONFIG_LGUEST is not set
3087# CONFIG_MRST_LNW_A1_WR is not set
3088# CONFIG_MRST_LNW_A2_WR is not set
3089# CONFIG_KEYBOARD_MATRIX is not set
3090# CONFIG_LEDS_LP3944 is not set
3091CONFIG_RTL8192SU=m
3092# CONFIG_KS8851 is not set
3093# CONFIG_BATTERY_DS2782 is not set
3094CONFIG_USB_GSPCA_SN9C20X=m
3095CONFIG_USB_GSPCA_SN9C20X_EVDEV=y
3096# CONFIG_I2C_XILINX is not set
3097# CONFIG_RADIO_SAA7706H is not set
3098# CONFIG_RADIO_TEF6862 is not set
3099# CONFIG_RAR_REGISTER is not set
3100# CONFIG_MRST_RAR_HANDLER is not set
3101# CONFIG_MRST_CEATA_SUPPORT is not set
3102# CONFIG_MRST_IPC_TEST is not set
3103# CONFIG_DX_SEP is not set
3104# CONFIG_BT_MRVL is not set
3105# CONFIG_BT_MRVL_SDIO is not set
3106# CONFIG_GPIO_LANGWELL_PMIC is not set
3107
3108
3109#
3110# MTD options -- subconfigs need to turn on MTD themselves
3111# for these to become active
3112#
3113# CONFIG_MTD_DEBUG is not set
3114CONFIG_MTD_CONCAT=y
3115CONFIG_MTD_PARTITIONS=y
3116# CONFIG_MTD_TESTS is not set
3117# CONFIG_MTD_BLOCK_RO is not set
3118# CONFIG_FTL is not set
3119# CONFIG_NFTL is not set
3120# CONFIG_INFTL is not set
3121# CONFIG_RFD_FTL is not set
3122# CONFIG_SSFDC is not set
3123# CONFIG_MTD_OOPS is not set
3124# CONFIG_MTD_CFI is not set
3125# CONFIG_MTD_JEDECPROBE is not set
3126# CONFIG_MTD_RAM is not set
3127# CONFIG_MTD_ROM is not set
3128# CONFIG_MTD_ABSENT is not
3129# CONFIG_MTD_COMPLEX_MAPPINGS it not set
3130# CONFIG_MTD_TS5500 is not set
3131# CONFIG_MTD_INTEL_VR_NOR is not set
3132# CONFIG_MTD_PLATRAM is not set
3133# CONFIG_MTD_PMC551 is not set
3134# CONFIG_MTD_DATAFLASH is not set
3135# CONFIG_MTD_M25P80 is not set
3136# CONFIG_MTD_SLRAM is not set
3137# CONFIG_MTD_PHRAM is not set
3138# CONFIG_MTD_MTDRAM is not set
3139# CONFIG_MTD_BLOCK2MTD is not set
3140# CONFIG_MTD_DOC2000 is not set
3141# CONFIG_MTD_DOC2001 is not set
3142# CONFIG_MTD_DOC2001PLUS is not set
3143CONFIG_MTD_NAND=y
3144# CONFIG_MTD_ONENAND is not set
3145# CONFIG_MTD_LPDDR is not set
3146CONFIG_MTD_UBI=y
3147CONFIG_JFFS2_FS=y
3148# CONFIG_MTD_REDBOOT_PARTS is not set
3149# CONFIG_MTD_CMDLINE_PARTS is not set
3150# CONFIG_MTD_AR7_PARTS is not set
3151# CONFIG_MTD_ABSENT is not set
3152# CONFIG_MTD_COMPLEX_MAPPINGS is not set
3153# CONFIG_MTD_NAND_VERIFY_WRITE is not set
3154# CONFIG_MTD_NAND_ECC_SMC is not set
3155# CONFIG_MTD_NAND_MUSEUM_IDS is not set
3156# CONFIG_MTD_NAND_DISKONCHIP is not set
3157# CONFIG_MTD_NAND_CAFE is not set
3158# CONFIG_MTD_NAND_CS553X is not set
3159# CONFIG_MTD_NAND_NANDSIM is not set
3160# CONFIG_MTD_NAND_PLATFORM is not set
3161# CONFIG_MTD_ALAUDA is not set
3162# CONFIG_MTD_UBI_GLUEBI is not set
3163# CONFIG_MTD_UBI_DEBUG is not set
3164# CONFIG_JFFS2_FS_WRITEBUFFER is not set
3165# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
3166# CONFIG_JFFS2_SUMMARY is not set
3167CONFIG_JFFS2_FS_XATTR=y
3168# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
3169CONFIG_UBIFS_FS=m
3170
3171# CONFIG_MTD_CHAR is not set
3172# CONFIG_MTD_BLKDEVS is not set
3173# CONFIG_MTD_BLOCK is not set
3174# CONFIG_MTD_BLOCK_RO is not set
3175# CONFIG_FTL is not set
3176# CONFIG_NFTL is not set
3177# CONFIG_INFTL is not set
3178# CONFIG_RFD_FTL is not set
3179# CONFIG_SSFDC is not set
3180# CONFIG_MTD_OOPS is not set
3181CONFIG_JFFS2_FS=y
3182CONFIG_JFFS2_FS_DEBUG=0
3183# CONFIG_JFFS2_FS_WRITEBUFFER is not set
3184# CONFIG_JFFS2_SUMMARY is not set
3185CONFIG_JFFS2_FS_XATTR=y
3186CONFIG_JFFS2_FS_POSIX_ACL=y
3187CONFIG_JFFS2_FS_SECURITY=y
3188# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
3189CONFIG_JFFS2_ZLIB=y
3190# CONFIG_JFFS2_LZO is not set
3191CONFIG_JFFS2_RTIME=y
3192# CONFIG_JFFS2_RUBIN is not set
3193CONFIG_UBIFS_FS=m
3194# CONFIG_UBIFS_FS_XATTR is not set
3195# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
3196CONFIG_UBIFS_FS_LZO=y
3197CONFIG_UBIFS_FS_ZLIB=y
3198# CONFIG_UBIFS_FS_DEBUG is not set
3199CONFIG_MTD_UBI_WL_THRESHOLD=4096
3200CONFIG_MTD_UBI_BEB_RESERVE=1
3201# CONFIG_MTD_UBI_GLUEBI is not set
3202# CONFIG_IEGD is not set
3203# CONFIG_SERIAL_UARTLITE is not set
3204# CONFIG_R8169_VLAN is not set
3205# CONFIG_ATH5K_DEBUG is not set
3206CONFIG_FONT_8x8=y
3207CONFIG_FONT_6x11=y
3208CONFIG_FONT_7x14=y
3209# CONFIG_FONT_PEARL_8x8 is not set
3210# CONFIG_FONT_ACORN_8x8 is not set
3211# CONFIG_FONT_MINI_4x6 is not set
3212# CONFIG_FONT_SUN8x16 is not set
3213# CONFIG_FONT_SUN12x22 is not set
3214CONFIG_FONT_10x18=y
3215CONFIG_KVM=m
3216CONFIG_KVM_INTEL=m
3217# CONFIG_KVM_AMD is not set
3218# CONFIG_KVM_TRACE is not set
3219# CONFIG_VIRTIO_PCI is not set
3220# CONFIG_VIRTIO_BALLOON is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch
new file mode 100644
index 0000000000..38de047249
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-build-nonintconfig.patch
@@ -0,0 +1,142 @@
1From e412ebbb8cea2aaf32f689ffc630b57cfe13bde5 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alanx.r.olsen@intel.com>
3Date: Tue, 21 Jul 2009 13:14:25 -0700
4Subject: [PATCH] linux-2.6-build-nonintconfig.patch
5
6Signed-off-by: Alan Olsen <alanx.r.olsen@intel.com>
7---
8 scripts/kconfig/Makefile | 5 +++++
9 scripts/kconfig/conf.c | 36 ++++++++++++++++++++++++++++++++----
10 2 files changed, 37 insertions(+), 4 deletions(-)
11
12diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
13index 5ddf8be..a4365db 100644
14--- a/scripts/kconfig/Makefile
15+++ b/scripts/kconfig/Makefile
16@@ -28,6 +28,11 @@ oldconfig: $(obj)/conf
17 silentoldconfig: $(obj)/conf
18 $< -s $(Kconfig)
19
20+nonint_oldconfig: $(obj)/conf
21+ $< -b $(Kconfig)
22+loose_nonint_oldconfig: $(obj)/conf
23+ $< -B $(Kconfig)
24+
25 # Create new linux.pot file
26 # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
27 # The symlink is used to repair a deficiency in arch/um
28diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
29index 3baaaec..2a81742 100644
30--- a/scripts/kconfig/conf.c
31+++ b/scripts/kconfig/conf.c
32@@ -23,6 +23,8 @@ enum {
33 ask_all,
34 ask_new,
35 ask_silent,
36+ dont_ask,
37+ dont_ask_dont_tell,
38 set_default,
39 set_yes,
40 set_mod,
41@@ -40,6 +42,8 @@ static struct menu *rootEntry;
42
43 static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
44
45+static int return_value = 0;
46+
47 static const char *get_help(struct menu *menu)
48 {
49 if (menu_has_help(menu))
50@@ -360,7 +364,10 @@ static void conf(struct menu *menu)
51
52 switch (prop->type) {
53 case P_MENU:
54- if (input_mode == ask_silent && rootEntry != menu) {
55+ if ((input_mode == ask_silent ||
56+ input_mode == dont_ask ||
57+ input_mode == dont_ask_dont_tell) &&
58+ rootEntry != menu) {
59 check_conf(menu);
60 return;
61 }
62@@ -418,12 +425,21 @@ static void check_conf(struct menu *menu)
63 if (sym && !sym_has_value(sym)) {
64 if (sym_is_changable(sym) ||
65 (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
66+ if (input_mode == dont_ask ||
67+ input_mode == dont_ask_dont_tell) {
68+ if (input_mode == dont_ask &&
69+ sym->name && !sym_is_choice_value(sym)) {
70+ fprintf(stderr,"CONFIG_%s\n",sym->name);
71+ ++return_value;
72+ }
73+ } else {
74 if (!conf_cnt++)
75 printf(_("*\n* Restart config...\n*\n"));
76 rootEntry = menu_get_parent_menu(menu);
77 conf(rootEntry);
78 }
79 }
80+ }
81
82 for (child = menu->list; child; child = child->next)
83 check_conf(child);
84@@ -439,7 +455,7 @@ int main(int ac, char **av)
85 bindtextdomain(PACKAGE, LOCALEDIR);
86 textdomain(PACKAGE);
87
88- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
89+ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
90 switch (opt) {
91 case 'o':
92 input_mode = ask_silent;
93@@ -448,6 +464,12 @@ int main(int ac, char **av)
94 input_mode = ask_silent;
95 sync_kconfig = 1;
96 break;
97+ case 'b':
98+ input_mode = dont_ask;
99+ break;
100+ case 'B':
101+ input_mode = dont_ask_dont_tell;
102+ break;
103 case 'd':
104 input_mode = set_default;
105 break;
106@@ -525,6 +547,8 @@ int main(int ac, char **av)
107 case ask_silent:
108 case ask_all:
109 case ask_new:
110+ case dont_ask:
111+ case dont_ask_dont_tell:
112 conf_read(NULL);
113 break;
114 case set_no:
115@@ -586,12 +610,16 @@ int main(int ac, char **av)
116 conf(&rootmenu);
117 input_mode = ask_silent;
118 /* fall through */
119+ case dont_ask:
120+ case dont_ask_dont_tell:
121 case ask_silent:
122 /* Update until a loop caused no more changes */
123 do {
124 conf_cnt = 0;
125 check_conf(&rootmenu);
126- } while (conf_cnt);
127+ } while (conf_cnt &&
128+ (input_mode != dont_ask &&
129+ input_mode != dont_ask_dont_tell));
130 break;
131 }
132
133@@ -613,5 +641,5 @@ int main(int ac, char **av)
134 exit(1);
135 }
136 }
137- return 0;
138+ return return_value;
139 }
140--
1411.6.0.6
142
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch
new file mode 100644
index 0000000000..0073343d10
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-driver-level-usb-autosuspend.patch
@@ -0,0 +1,61 @@
1commit 0f592e33934bf6108e33e34f00b425f98ee833ef
2Author: Matthew Garrett <mjg@redhat.com>
3Date: Wed Jul 8 19:04:23 2009 +0100
4
5 usb: Allow drivers to enable USB autosuspend on a per-device basis
6
7 USB autosuspend is currently only enabled by default for hubs. On other
8 hardware the decision is made by userspace. This is unnecessary in cases
9 where we know that the hardware supports autosuspend, so this patch adds
10 a function to allow drivers to enable it at probe time.
11
12 Signed-off-by: Matthew Garrett <mjg@redhat.com>
13
14diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
15index 69e5773..6e81caa 100644
16--- a/drivers/usb/core/driver.c
17+++ b/drivers/usb/core/driver.c
18@@ -1560,6 +1560,21 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
19 EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
20
21 /**
22+ * usb_device_autosuspend_enable - enable autosuspend on a device
23+ * @udev: the usb_device to be autosuspended
24+ *
25+ * This routine should be called by an interface driver when it knows that
26+ * the device in question supports USB autosuspend.
27+ *
28+ */
29+void usb_device_autosuspend_enable(struct usb_device *udev)
30+{
31+ udev->autosuspend_disabled = 0;
32+ udev->autoresume_disabled = 0;
33+}
34+EXPORT_SYMBOL_GPL(usb_device_autosuspend_enable);
35+
36+/**
37 * usb_autopm_get_interface - increment a USB interface's PM-usage counter
38 * @intf: the usb_interface whose counter should be incremented
39 *
40diff --git a/include/linux/usb.h b/include/linux/usb.h
41index b1e3c2f..61bddbe 100644
42--- a/include/linux/usb.h
43+++ b/include/linux/usb.h
44@@ -543,6 +543,7 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
45
46 /* USB autosuspend and autoresume */
47 #ifdef CONFIG_USB_SUSPEND
48+extern void usb_device_autosuspend_enable(struct usb_device *udev);
49 extern int usb_autopm_set_interface(struct usb_interface *intf);
50 extern int usb_autopm_get_interface(struct usb_interface *intf);
51 extern void usb_autopm_put_interface(struct usb_interface *intf);
52@@ -568,6 +569,9 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
53
54 #else
55
56+static inline void usb_device_autosuspend_enable(struct usb_device *udev)
57+{ }
58+
59 static inline int usb_autopm_set_interface(struct usb_interface *intf)
60 { return 0; }
61
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch
new file mode 100644
index 0000000000..b7c7f6e0f4
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6-usb-uvc-autosuspend.patch
@@ -0,0 +1,19 @@
1commit 9d4c919bcfa794c054cc33155c7e3c53ac2c5684
2Author: Matthew Garrett <mjg@redhat.com>
3Date: Sun Jul 19 02:24:49 2009 +0100
4
5 Enable autosuspend on UVC by default
6
7diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
8index 89927b7..8de516b 100644
9--- a/drivers/media/video/uvc/uvc_driver.c
10+++ b/drivers/media/video/uvc/uvc_driver.c
11@@ -1647,6 +1647,8 @@ static int uvc_probe(struct usb_interface *intf,
12 "supported.\n", ret);
13 }
14
15+ usb_device_autosuspend_enable(udev);
16+
17 uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
18 return 0;
19
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch
new file mode 100644
index 0000000000..6b2d54ff5e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-dont-wait-for-mouse.patch
@@ -0,0 +1,47 @@
1From dce8113d033975f56630cf6d2a6a908cfb66059d Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:12:16 -0700
4Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
5
6In the non-initrd case, we wait for all devices to finish their
7probing before we try to mount the rootfs.
8In practice, this means that we end up waiting 2 extra seconds for
9the PS/2 mouse probing even though the root holding device has been
10ready since a long time.
11
12The previous two patches in this series made the RAID autodetect code
13do it's own "wait for probing to be done" code, and added
14"wait and retry" functionality in case the root device isn't actually
15available.
16
17These two changes should make it safe to remove the delay itself,
18and this patch does this. On my test laptop, this reduces the boot time
19by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
20
21Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
22---
23---
24 init/do_mounts.c | 3 +++
25 1 file changed, 3 insertions(+)
26
27Index: linux-2.6.29/init/do_mounts.c
28===================================================================
29--- linux-2.6.29.orig/init/do_mounts.c
30+++ linux-2.6.29/init/do_mounts.c
31@@ -370,6 +370,7 @@ void __init prepare_namespace(void)
32 ssleep(root_delay);
33 }
34
35+#if 0
36 /*
37 * wait for the known devices to complete their probing
38 *
39@@ -378,6 +379,8 @@ void __init prepare_namespace(void)
40 * for the touchpad of a laptop to initialize.
41 */
42 wait_for_device_probe();
43+#endif
44+ async_synchronize_full();
45
46 md_run_setup();
47
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch
new file mode 100644
index 0000000000..46a9e24a7e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-jbd-longer-commit-interval.patch
@@ -0,0 +1,25 @@
1From 0143f8eb8afcaccba5a78196fb3db4361e0097a7 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Mon, 9 Feb 2009 21:25:32 -0800
4Subject: [PATCH] jbd: longer commit interval
5
6... 5 seconds is rather harsh on ssd's..
7
8Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
9---
10 include/linux/jbd.h | 2 +-
11 1 file changed, 1 insertion(+), 1 deletion(-)
12
13Index: linux-2.6.29/include/linux/jbd.h
14===================================================================
15--- linux-2.6.29.orig/include/linux/jbd.h
16+++ linux-2.6.29/include/linux/jbd.h
17@@ -46,7 +46,7 @@
18 /*
19 * The default maximum commit age, in seconds.
20 */
21-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
22+#define JBD_DEFAULT_MAX_COMMIT_AGE 15
23
24 #ifdef CONFIG_JBD_DEBUG
25 /*
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch
new file mode 100644
index 0000000000..f635e2a88d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-after-sata.patch
@@ -0,0 +1,38 @@
1---
2 drivers/Makefile | 14 +++++++-------
3 1 file changed, 7 insertions(+), 7 deletions(-)
4
5Index: linux-2.6.29/drivers/Makefile
6===================================================================
7--- linux-2.6.29.orig/drivers/Makefile
8+++ linux-2.6.29/drivers/Makefile
9@@ -25,15 +25,8 @@ obj-$(CONFIG_REGULATOR) += regulator/
10 # default.
11 obj-y += char/
12
13-# gpu/ comes after char for AGP vs DRM startup
14-obj-y += gpu/
15-
16 obj-$(CONFIG_CONNECTOR) += connector/
17
18-# i810fb and intelfb depend on char/agp/
19-obj-$(CONFIG_FB_I810) += video/i810/
20-obj-$(CONFIG_FB_INTEL) += video/intelfb/
21-
22 obj-y += serial/
23 obj-$(CONFIG_PARPORT) += parport/
24 obj-y += base/ block/ misc/ mfd/ media/
25@@ -43,6 +36,13 @@ obj-$(CONFIG_IDE) += ide/
26 obj-$(CONFIG_SCSI) += scsi/
27 obj-$(CONFIG_ATA) += ata/
28 obj-y += net/
29+
30+# gpu/ comes after char for AGP vs DRM startup
31+obj-y += gpu/
32+# i810fb and intelfb depend on char/agp/
33+obj-$(CONFIG_FB_I810) += video/i810/
34+obj-$(CONFIG_FB_INTEL) += video/intelfb/
35+
36 obj-$(CONFIG_ATM) += atm/
37 obj-$(CONFIG_FUSION) += message/
38 obj-$(CONFIG_FIREWIRE) += firewire/
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch
new file mode 100644
index 0000000000..ad26326967
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-dont-blank-display.patch
@@ -0,0 +1,33 @@
1--- vanilla-2.6.31-rc4/drivers/gpu/drm/i915/intel_lvds.c~ 2009-07-31 11:23:05.000000000 -0700
2+++ vanilla-2.6.31-rc4/drivers/gpu/drm/i915/intel_lvds.c 2009-07-31 11:23:05.000000000 -0700
3@@ -111,19 +111,12 @@ static void intel_lvds_set_power(struct
4 if (on) {
5 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
6 POWER_TARGET_ON);
7- do {
8- pp_status = I915_READ(status_reg);
9- } while ((pp_status & PP_ON) == 0);
10-
11 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
12 } else {
13 intel_lvds_set_backlight(dev, 0);
14
15 I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
16 ~POWER_TARGET_ON);
17- do {
18- pp_status = I915_READ(status_reg);
19- } while (pp_status & PP_ON);
20 }
21 }
22
23--- linux-2.6.31/drivers/gpu/drm/i915/intel_lvds.c~ 2009-10-11 10:13:38.000000000 -0700
24+++ linux-2.6.31/drivers/gpu/drm/i915/intel_lvds.c 2009-10-11 10:13:38.000000000 -0700
25@@ -98,7 +98,7 @@
26 static void intel_lvds_set_power(struct drm_device *dev, bool on)
27 {
28 struct drm_i915_private *dev_priv = dev->dev_private;
29- u32 pp_status, ctl_reg, status_reg;
30+ u32 ctl_reg, status_reg;
31
32 if (IS_IGDNG(dev)) {
33 ctl_reg = PCH_PP_CONTROL;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch
new file mode 100644
index 0000000000..47e5b16a2a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-edid-cache.patch
@@ -0,0 +1,58 @@
1diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
2index 004541c..b218780 100644
3--- a/drivers/gpu/drm/i915/intel_drv.h
4+++ b/drivers/gpu/drm/i915/intel_drv.h
5@@ -81,6 +81,7 @@ struct intel_output {
6 int type;
7 struct i2c_adapter *i2c_bus;
8 struct i2c_adapter *ddc_bus;
9+ struct edid *edid;
10 bool load_detect_temp;
11 bool needs_tv_clock;
12 void *dev_priv;
13diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
14index 9ab38ef..9fba800 100644
15--- a/drivers/gpu/drm/i915/intel_lvds.c
16+++ b/drivers/gpu/drm/i915/intel_lvds.c
17@@ -657,6 +657,7 @@ static void intel_lvds_destroy(struct drm_connector *connector)
18 intel_i2c_destroy(intel_output->ddc_bus);
19 drm_sysfs_connector_remove(connector);
20 drm_connector_cleanup(connector);
21+ kfree(intel_output->edid);
22 kfree(connector);
23 }
24
25@@ -1017,5 +1018,6 @@ failed:
26 if (intel_output->ddc_bus)
27 intel_i2c_destroy(intel_output->ddc_bus);
28 drm_connector_cleanup(connector);
29+ kfree(intel_output->edid);
30 kfree(intel_output);
31 }
32diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
33index 67e2f46..5ac537f 100644
34--- a/drivers/gpu/drm/i915/intel_modes.c
35+++ b/drivers/gpu/drm/i915/intel_modes.c
36@@ -74,6 +74,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
37 int ret = 0;
38
39 intel_i2c_quirk_set(intel_output->base.dev, true);
40+ if (intel_output->edid && intel_output->type == INTEL_OUTPUT_LVDS) {
41+ printk(KERN_INFO "Skipping EDID probe due to cached edid\n");
42+ return ret;
43+ }
44 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
45 intel_i2c_quirk_set(intel_output->base.dev, false);
46 if (edid) {
47@@ -81,7 +85,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
48 edid);
49 ret = drm_add_edid_modes(&intel_output->base, edid);
50 intel_output->base.display_info.raw_edid = NULL;
51- kfree(edid);
52+ if (intel_output->type == INTEL_OUTPUT_LVDS)
53+ intel_output->edid = edid;
54+ else
55+ kfree(edid);
56 }
57
58 return ret;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch
new file mode 100644
index 0000000000..eaef8eb6ce
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-kms-run-async.patch
@@ -0,0 +1,118 @@
1Index: b/drivers/gpu/drm/drm_crtc_helper.c
2===================================================================
3--- a/drivers/gpu/drm/drm_crtc_helper.c
4+++ b/drivers/gpu/drm/drm_crtc_helper.c
5@@ -29,6 +29,8 @@
6 * Jesse Barnes <jesse.barnes@intel.com>
7 */
8
9+#include <linux/async.h>
10+
11 #include "drmP.h"
12 #include "drm_crtc.h"
13 #include "drm_crtc_helper.h"
14@@ -62,6 +64,8 @@ static void drm_mode_validate_flag(struc
15 return;
16 }
17
18+LIST_HEAD(drm_async_list);
19+
20 /**
21 * drm_helper_probe_connector_modes - get complete set of display modes
22 * @dev: DRM device
23@@ -916,6 +920,7 @@ bool drm_helper_plugged_event(struct drm
24 /* FIXME: send hotplug event */
25 return true;
26 }
27+
28 /**
29 * drm_initial_config - setup a sane initial connector configuration
30 * @dev: DRM device
31@@ -953,13 +958,26 @@ bool drm_helper_initial_config(struct dr
32
33 drm_setup_crtcs(dev);
34
35- /* alert the driver fb layer */
36 dev->mode_config.funcs->fb_changed(dev);
37-
38 return 0;
39 }
40 EXPORT_SYMBOL(drm_helper_initial_config);
41
42+static void drm_helper_initial_config_helper(void *ptr, async_cookie_t cookie)
43+{
44+ struct drm_device *dev = ptr;
45+ drm_helper_initial_config(dev);
46+}
47+
48+void drm_helper_initial_config_async(struct drm_device *dev)
49+{
50+ async_schedule_domain(drm_helper_initial_config_helper,
51+ dev, &drm_async_list);
52+}
53+EXPORT_SYMBOL(drm_helper_initial_config_async);
54+
55+
56+
57 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
58 {
59 int dpms = DRM_MODE_DPMS_OFF;
60Index: b/drivers/gpu/drm/drm_drv.c
61===================================================================
62--- a/drivers/gpu/drm/drm_drv.c
63+++ b/drivers/gpu/drm/drm_drv.c
64@@ -49,6 +49,7 @@
65 #include <linux/debugfs.h>
66 #include "drmP.h"
67 #include "drm_core.h"
68+#include <linux/async.h>
69
70
71 static int drm_version(struct drm_device *dev, void *data,
72@@ -290,6 +291,9 @@ void drm_exit(struct drm_driver *driver)
73 struct drm_device *dev, *tmp;
74 DRM_DEBUG("\n");
75
76+ /* make sure all async DRM operations are finished */
77+ async_synchronize_full_domain(&drm_async_list);
78+
79 if (driver->driver_features & DRIVER_MODESET) {
80 pci_unregister_driver(&driver->pci_driver);
81 } else {
82Index: b/include/drm/drmP.h
83===================================================================
84--- a/include/drm/drmP.h
85+++ b/include/drm/drmP.h
86@@ -328,6 +328,7 @@ struct drm_vma_entry {
87 pid_t pid;
88 };
89
90+extern struct list_head drm_async_list;
91 /**
92 * DMA buffer.
93 */
94Index: b/include/drm/drm_crtc_helper.h
95===================================================================
96--- a/include/drm/drm_crtc_helper.h
97+++ b/include/drm/drm_crtc_helper.h
98@@ -92,6 +92,7 @@ extern int drm_helper_probe_single_conne
99 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
100 extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
101 extern bool drm_helper_initial_config(struct drm_device *dev);
102+extern void drm_helper_initial_config_async(struct drm_device *dev);
103 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
104 extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
105 struct drm_display_mode *mode,
106Index: b/drivers/gpu/drm/i915/i915_dma.c
107===================================================================
108--- a/drivers/gpu/drm/i915/i915_dma.c
109+++ b/drivers/gpu/drm/i915/i915_dma.c
110@@ -1045,7 +1045,7 @@ static int i915_load_modeset_init(struct
111
112 intel_modeset_init(dev);
113
114- drm_helper_initial_config(dev);
115+ drm_helper_initial_config_async(dev);
116
117 return 0;
118
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch
new file mode 100644
index 0000000000..ff76f09de0
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-silence-acer-message.patch
@@ -0,0 +1,22 @@
1From: Arjan van de Ven <arjan@linux.intel.com>
2Date: Fri, 23 Jan 2009
3
4Small fix changing error msg to info msg in acer wmi driver
5---
6---
7 drivers/platform/x86/acer-wmi.c | 2 +-
8 1 file changed, 1 insertion(+), 1 deletion(-)
9
10Index: linux-2.6.29/drivers/platform/x86/acer-wmi.c
11===================================================================
12--- linux-2.6.29.orig/drivers/platform/x86/acer-wmi.c
13+++ linux-2.6.29/drivers/platform/x86/acer-wmi.c
14@@ -1290,7 +1290,7 @@ static int __init acer_wmi_init(void)
15 AMW0_find_mailled();
16
17 if (!interface) {
18- printk(ACER_ERR "No or unsupported WMI interface, unable to "
19+ printk(ACER_INFO "No or unsupported WMI interface, unable to "
20 "load\n");
21 return -ENODEV;
22 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch
new file mode 100644
index 0000000000..7fb6a29643
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-sreadahead.patch
@@ -0,0 +1,66 @@
1From 4d690855d6bdc15b753ac3c21bf507ad94d46aac Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 21 Sep 2008 11:58:27 -0700
4Subject: [PATCH] superreadahead patch
5
6---
7 fs/ext3/ioctl.c | 3 +++
8 fs/ext3/super.c | 1 +
9 include/linux/ext3_fs.h | 1 +
10 include/linux/fs.h | 2 ++
11 4 files changed, 7 insertions(+), 0 deletions(-)
12
13diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
14index 8897481..08f4854 100644
15--- a/fs/ext3/ioctl.c
16+++ b/fs/ext3/ioctl.c
17@@ -276,6 +276,9 @@ group_add_out:
18 mnt_drop_write(filp->f_path.mnt);
19 return err;
20 }
21+ case EXT3_IOC_INODE_JIFFIES: {
22+ return inode->created_when;
23+ }
24
25
26 default:
27diff --git a/fs/ext3/super.c b/fs/ext3/super.c
28index 524b349..e6e8514 100644
29--- a/fs/ext3/super.c
30+++ b/fs/ext3/super.c
31@@ -466,6 +466,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
32 return NULL;
33 ei->i_block_alloc_info = NULL;
34 ei->vfs_inode.i_version = 1;
35+ ei->vfs_inode.created_when = jiffies;
36 return &ei->vfs_inode;
37 }
38
39diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
40index 634a5e5..84d5394 100644
41--- a/include/linux/ext3_fs.h
42+++ b/include/linux/ext3_fs.h
43@@ -250,6 +250,7 @@ struct ext3_new_group_data {
44 #endif
45 #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
46 #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
47+#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
48
49 /*
50 * ioctl commands in 32 bit emulation
51diff --git a/include/linux/fs.h b/include/linux/fs.h
52index 0872372..078e3fd 100644
53--- a/include/linux/fs.h
54+++ b/include/linux/fs.h
55@@ -781,6 +781,8 @@ struct inode {
56 struct posix_acl *i_default_acl;
57 #endif
58 void *i_private; /* fs or device private pointer */
59+
60+ unsigned long created_when; /* jiffies of creation time */
61 };
62
63 /*
64--
651.6.0.6
66
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch
new file mode 100644
index 0000000000..5253404c29
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.29-touchkit.patch
@@ -0,0 +1,146 @@
1From 3281da09528ca94f1b1fd39cae388f5b5423aa46 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alanx.r.olsen@intel.com>
3Date: Tue, 21 Jul 2009 13:26:58 -0700
4Subject: [PATCH] linux-2.6.29-touchkit.patch
5
6Signed-off-by: Alan Olsen <alanx.r.olsen@intel.com>
7---
8 drivers/input/mouse/psmouse-base.c | 9 +++++++
9 drivers/input/mouse/psmouse.h | 1 +
10 drivers/input/mouse/touchkit_ps2.c | 45 ++++++++++++++++++++++++++++++++++-
11 drivers/input/mouse/touchkit_ps2.h | 6 ++++
12 4 files changed, 59 insertions(+), 2 deletions(-)
13
14diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
15index b407b35..4c6b184 100644
16--- a/drivers/input/mouse/psmouse-base.c
17+++ b/drivers/input/mouse/psmouse-base.c
18@@ -678,6 +678,9 @@ static int psmouse_extensions(struct psmouse *psmouse,
19
20 if (touchkit_ps2_detect(psmouse, set_properties) == 0)
21 return PSMOUSE_TOUCHKIT_PS2;
22+
23+ if (elftouch_ps2_detect(psmouse, set_properties) == 0)
24+ return PSMOUSE_ELFTOUCH_PS2;
25 }
26
27 /*
28@@ -788,6 +791,12 @@ static const struct psmouse_protocol psmouse_protocols[] = {
29 .alias = "trackpoint",
30 .detect = trackpoint_detect,
31 },
32+ {
33+ .type = PSMOUSE_ELFTOUCH_PS2,
34+ .name = "elftouchPS2",
35+ .alias = "elftouch",
36+ .detect = elftouch_ps2_detect,
37+ },
38 #endif
39 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
40 {
41diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
42index 54ed267..8d1ba79 100644
43--- a/drivers/input/mouse/psmouse.h
44+++ b/drivers/input/mouse/psmouse.h
45@@ -89,6 +89,7 @@ enum psmouse_type {
46 PSMOUSE_TRACKPOINT,
47 PSMOUSE_TOUCHKIT_PS2,
48 PSMOUSE_CORTRON,
49+ PSMOUSE_ELFTOUCH_PS2,
50 PSMOUSE_HGPK,
51 PSMOUSE_ELANTECH,
52 PSMOUSE_AUTO /* This one should always be last */
53diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c
54index 3fadb2a..e9c27f1 100644
55--- a/drivers/input/mouse/touchkit_ps2.c
56+++ b/drivers/input/mouse/touchkit_ps2.c
57@@ -51,6 +51,11 @@
58 #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2])
59 #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4])
60
61+#define ELFTOUCH_MAX_XC 0x0fff
62+#define ELFTOUCH_MAX_YC 0x0fff
63+#define ELFTOUCH_GET_X(packet) (((packet)[3] << 7) | (packet)[4])
64+#define ELFTOUCH_GET_Y(packet) (((packet)[1] << 7) | (packet)[2])
65+
66 static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
67 {
68 unsigned char *packet = psmouse->packet;
69@@ -59,9 +64,15 @@ static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
70 if (psmouse->pktcnt != 5)
71 return PSMOUSE_GOOD_DATA;
72
73- input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
74- input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
75+ if(psmouse->type==PSMOUSE_ELFTOUCH_PS2) {
76+ input_report_abs(dev, ABS_X, ELFTOUCH_GET_X(packet));
77+ input_report_abs(dev, ABS_Y, ELFTOUCH_GET_Y(packet));
78+ } else {
79+ input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
80+ input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
81+ }
82 input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet));
83+
84 input_sync(dev);
85
86 return PSMOUSE_FULL_PACKET;
87@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
88
89 return 0;
90 }
91+
92+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties)
93+{
94+ struct input_dev *dev = psmouse->dev;
95+ unsigned char param[16];
96+ int command, res;
97+
98+ param[0]=0x0f4;
99+ command = TOUCHKIT_SEND_PARMS(1, 0, TOUCHKIT_CMD);
100+ res=ps2_command(&psmouse->ps2dev, param, command);
101+ if(res) { return -ENODEV; }
102+
103+ param[0]=0x0b0;
104+ command = TOUCHKIT_SEND_PARMS(1, 1, TOUCHKIT_CMD);
105+ res=ps2_command(&psmouse->ps2dev, param, command);
106+ if(res) { return -ENODEV; }
107+
108+ if (set_properties) {
109+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
110+ set_bit(BTN_TOUCH, dev->keybit);
111+ input_set_abs_params(dev, ABS_X, 0, ELFTOUCH_MAX_XC, 0, 0);
112+ input_set_abs_params(dev, ABS_Y, 0, ELFTOUCH_MAX_YC, 0, 0);
113+
114+ psmouse->vendor = "ElfTouch";
115+ psmouse->name = "Touchscreen";
116+ psmouse->protocol_handler = touchkit_ps2_process_byte;
117+ psmouse->pktsize = 5;
118+ }
119+ return 0;
120+}
121diff --git a/drivers/input/mouse/touchkit_ps2.h b/drivers/input/mouse/touchkit_ps2.h
122index 8a0dd35..f32ef4c 100644
123--- a/drivers/input/mouse/touchkit_ps2.h
124+++ b/drivers/input/mouse/touchkit_ps2.h
125@@ -14,12 +14,18 @@
126
127 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
128 int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
129+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties);
130 #else
131 static inline int touchkit_ps2_detect(struct psmouse *psmouse,
132 int set_properties)
133 {
134 return -ENOSYS;
135 }
136+static inline int elftouch_ps2_detect(struct psmouse *psmouse,
137+ int set_properties)
138+{
139+ return -ENOSYS;
140+}
141 #endif /* CONFIG_MOUSE_PS2_TOUCHKIT */
142
143 #endif
144--
1451.6.0.6
146
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch
new file mode 100644
index 0000000000..76dd36082a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.30-non-root-X.patch
@@ -0,0 +1,32 @@
1From a5a267593c15ac987f78cfc21cae0c8ef723f81e Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alan.r.olsen@intel.com>
3Date: Mon, 21 Sep 2009 13:58:49 -0700
4Subject: [PATCH] linux-2.6.30-non-root-X.patch
5
6Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
7---
8 drivers/gpu/drm/drm_drv.c | 4 ++--
9 1 files changed, 2 insertions(+), 2 deletions(-)
10
11diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
12index 4678f8f..b7f3a41 100644
13--- a/drivers/gpu/drm/drm_drv.c
14+++ b/drivers/gpu/drm/drm_drv.c
15@@ -64,12 +64,12 @@ static struct drm_ioctl_desc drm_ioctls[] = {
16 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
17 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
18 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
19- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
20+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
21
22 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
23 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
24 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
25- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
26+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
27
28 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
29 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
30--
311.6.0.6
32
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch
new file mode 100644
index 0000000000..9db5b4ac72
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-1-2-timberdale.patch
@@ -0,0 +1,12910 @@
1diff -uNr linux-2.6.31/drivers/gpio/Kconfig linux-2.6.31.new/drivers/gpio/Kconfig
2--- linux-2.6.31/drivers/gpio/Kconfig 2009-10-23 11:18:30.000000000 -0700
3+++ linux-2.6.31.new/drivers/gpio/Kconfig 2009-10-23 11:17:19.000000000 -0700
4@@ -173,6 +173,12 @@
5
6 If unsure, say N.
7
8+config GPIO_TIMBERDALE
9+ bool "Support for timberdale GPIO IP"
10+ depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
11+ ---help---
12+ Add support for the GPIO IP in the timberdale FPGA.
13+
14 comment "SPI GPIO expanders:"
15
16 config GPIO_MAX7301
17@@ -188,4 +194,11 @@
18 SPI driver for Microchip MCP23S08 I/O expander. This provides
19 a GPIO interface supporting inputs and outputs.
20
21+config GPIO_MC33880
22+ tristate "Freescale MC33880 high-side/low-side switch"
23+ depends on SPI_MASTER
24+ help
25+ SPI driver for Freescale MC33880 high-side/low-side switch.
26+ This provides GPIO interface supporting inputs and outputs.
27+
28 endif
29diff -uNr linux-2.6.31/drivers/gpio/Makefile linux-2.6.31.new/drivers/gpio/Makefile
30--- linux-2.6.31/drivers/gpio/Makefile 2009-10-23 11:18:30.000000000 -0700
31+++ linux-2.6.31.new/drivers/gpio/Makefile 2009-10-23 11:17:19.000000000 -0700
32@@ -14,3 +14,6 @@
33 obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
34 obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
35 obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
36+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
37+obj-$(CONFIG_GPIO_MC33880) += mc33880.o
38+
39diff -uNr linux-2.6.31/drivers/gpio/mc33880.c linux-2.6.31.new/drivers/gpio/mc33880.c
40--- linux-2.6.31/drivers/gpio/mc33880.c 1969-12-31 16:00:00.000000000 -0800
41+++ linux-2.6.31.new/drivers/gpio/mc33880.c 2009-10-23 11:17:19.000000000 -0700
42@@ -0,0 +1,196 @@
43+/*
44+ * mc33880.c MC33880 high-side/low-side switch GPIO driver
45+ * Copyright (c) 2009 Intel Corporation
46+ *
47+ * This program is free software; you can redistribute it and/or modify
48+ * it under the terms of the GNU General Public License version 2 as
49+ * published by the Free Software Foundation.
50+ *
51+ * This program is distributed in the hope that it will be useful,
52+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
53+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
54+ * GNU General Public License for more details.
55+ *
56+ * You should have received a copy of the GNU General Public License
57+ * along with this program; if not, write to the Free Software
58+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
59+ */
60+
61+/* Supports:
62+ * Freescale MC33880 high-side/low-side switch
63+ */
64+
65+#include <linux/init.h>
66+#include <linux/mutex.h>
67+#include <linux/spi/spi.h>
68+#include <linux/spi/mc33880.h>
69+#include <linux/gpio.h>
70+
71+#define DRIVER_NAME "mc33880"
72+
73+/*
74+ * Pin configurations, see MAX7301 datasheet page 6
75+ */
76+#define PIN_CONFIG_MASK 0x03
77+#define PIN_CONFIG_IN_PULLUP 0x03
78+#define PIN_CONFIG_IN_WO_PULLUP 0x02
79+#define PIN_CONFIG_OUT 0x01
80+
81+#define PIN_NUMBER 8
82+
83+
84+/*
85+ * Some registers must be read back to modify.
86+ * To save time we cache them here in memory
87+ */
88+struct mc33880 {
89+ struct mutex lock; /* protect from simultanous accesses */
90+ u8 port_config;
91+ struct gpio_chip chip;
92+ struct spi_device *spi;
93+};
94+
95+static int mc33880_write_config(struct mc33880 *mc)
96+{
97+ return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config));
98+}
99+
100+
101+static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
102+{
103+ if (value)
104+ mc->port_config |= 1 << offset;
105+ else
106+ mc->port_config &= ~(1 << offset);
107+
108+ return mc33880_write_config(mc);
109+}
110+
111+
112+static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
113+{
114+ struct mc33880 *mc = container_of(chip, struct mc33880, chip);
115+
116+ mutex_lock(&mc->lock);
117+
118+ __mc33880_set(mc, offset, value);
119+
120+ mutex_unlock(&mc->lock);
121+}
122+
123+static int __devinit mc33880_probe(struct spi_device *spi)
124+{
125+ struct mc33880 *mc;
126+ struct mc33880_platform_data *pdata;
127+ int ret;
128+
129+ pdata = spi->dev.platform_data;
130+ if (!pdata || !pdata->base) {
131+ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
132+ return -EINVAL;
133+ }
134+
135+ /*
136+ * bits_per_word cannot be configured in platform data
137+ */
138+ spi->bits_per_word = 8;
139+
140+ ret = spi_setup(spi);
141+ if (ret < 0)
142+ return ret;
143+
144+ mc = kzalloc(sizeof(struct mc33880), GFP_KERNEL);
145+ if (!mc)
146+ return -ENOMEM;
147+
148+ mutex_init(&mc->lock);
149+
150+ dev_set_drvdata(&spi->dev, mc);
151+
152+ mc->spi = spi;
153+
154+ mc->chip.label = DRIVER_NAME,
155+ mc->chip.set = mc33880_set;
156+ mc->chip.base = pdata->base;
157+ mc->chip.ngpio = PIN_NUMBER;
158+ mc->chip.can_sleep = 1;
159+ mc->chip.dev = &spi->dev;
160+ mc->chip.owner = THIS_MODULE;
161+
162+ mc->port_config = 0x00;
163+ /* write twice, because during initialisation the first setting
164+ * is just for testing SPI communication, and the second is the
165+ * "real" configuration
166+ */
167+ ret = mc33880_write_config(mc);
168+ mc->port_config = 0x00;
169+ if (!ret)
170+ ret = mc33880_write_config(mc);
171+
172+ if (ret) {
173+ printk(KERN_ERR "Failed writing to " DRIVER_NAME ": %d\n", ret);
174+ goto exit_destroy;
175+ }
176+
177+ ret = gpiochip_add(&mc->chip);
178+ if (ret)
179+ goto exit_destroy;
180+
181+ return ret;
182+
183+exit_destroy:
184+ dev_set_drvdata(&spi->dev, NULL);
185+ mutex_destroy(&mc->lock);
186+ kfree(mc);
187+ return ret;
188+}
189+
190+static int mc33880_remove(struct spi_device *spi)
191+{
192+ struct mc33880 *mc;
193+ int ret;
194+
195+ mc = dev_get_drvdata(&spi->dev);
196+ if (mc == NULL)
197+ return -ENODEV;
198+
199+ dev_set_drvdata(&spi->dev, NULL);
200+
201+ ret = gpiochip_remove(&mc->chip);
202+ if (!ret) {
203+ mutex_destroy(&mc->lock);
204+ kfree(mc);
205+ } else
206+ dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
207+ ret);
208+
209+ return ret;
210+}
211+
212+static struct spi_driver mc33880_driver = {
213+ .driver = {
214+ .name = DRIVER_NAME,
215+ .owner = THIS_MODULE,
216+ },
217+ .probe = mc33880_probe,
218+ .remove = __devexit_p(mc33880_remove),
219+};
220+
221+static int __init mc33880_init(void)
222+{
223+ return spi_register_driver(&mc33880_driver);
224+}
225+/* register after spi postcore initcall and before
226+ * subsys initcalls that may rely on these GPIOs
227+ */
228+subsys_initcall(mc33880_init);
229+
230+static void __exit mc33880_exit(void)
231+{
232+ spi_unregister_driver(&mc33880_driver);
233+}
234+module_exit(mc33880_exit);
235+
236+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
237+MODULE_LICENSE("GPL v2");
238+
239diff -uNr linux-2.6.31/drivers/gpio/timbgpio.c linux-2.6.31.new/drivers/gpio/timbgpio.c
240--- linux-2.6.31/drivers/gpio/timbgpio.c 1969-12-31 16:00:00.000000000 -0800
241+++ linux-2.6.31.new/drivers/gpio/timbgpio.c 2009-10-23 11:17:19.000000000 -0700
242@@ -0,0 +1,342 @@
243+/*
244+ * timbgpio.c timberdale FPGA GPIO driver
245+ * Copyright (c) 2009 Intel Corporation
246+ *
247+ * This program is free software; you can redistribute it and/or modify
248+ * it under the terms of the GNU General Public License version 2 as
249+ * published by the Free Software Foundation.
250+ *
251+ * This program is distributed in the hope that it will be useful,
252+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
253+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
254+ * GNU General Public License for more details.
255+ *
256+ * You should have received a copy of the GNU General Public License
257+ * along with this program; if not, write to the Free Software
258+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
259+ */
260+
261+/* Supports:
262+ * Timberdale FPGA GPIO
263+ */
264+
265+#include <linux/module.h>
266+#include <linux/gpio.h>
267+#include <linux/platform_device.h>
268+#include <linux/io.h>
269+#include <linux/timb_gpio.h>
270+#include <linux/interrupt.h>
271+
272+#define DRIVER_NAME "timb-gpio"
273+
274+#define TGPIOVAL 0x00
275+#define TGPIODIR 0x04
276+#define TGPIO_IER 0x08
277+#define TGPIO_ISR 0x0c
278+#define TGPIO_IPR 0x10
279+#define TGPIO_ICR 0x14
280+#define TGPIO_FLR 0x18
281+#define TGPIO_LVR 0x1c
282+
283+struct timbgpio {
284+ void __iomem *membase;
285+ spinlock_t lock; /* mutual exclusion */
286+ struct gpio_chip gpio;
287+ int irq_base;
288+};
289+
290+static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
291+ unsigned offset, bool enabled)
292+{
293+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
294+ u32 reg;
295+
296+ spin_lock(&tgpio->lock);
297+ reg = ioread32(tgpio->membase + offset);
298+
299+ if (enabled)
300+ reg |= (1 << index);
301+ else
302+ reg &= ~(1 << index);
303+
304+ iowrite32(reg, tgpio->membase + offset);
305+ spin_unlock(&tgpio->lock);
306+
307+ return 0;
308+}
309+
310+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
311+{
312+ return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
313+}
314+
315+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
316+{
317+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
318+ u32 value;
319+
320+ value = ioread32(tgpio->membase + TGPIOVAL);
321+ return (value & (1 << nr)) ? 1 : 0;
322+}
323+
324+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
325+ unsigned nr, int val)
326+{
327+ return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
328+}
329+
330+static void timbgpio_gpio_set(struct gpio_chip *gpio,
331+ unsigned nr, int val)
332+{
333+ timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
334+}
335+
336+static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
337+{
338+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
339+
340+ if (tgpio->irq_base <= 0)
341+ return -EINVAL;
342+
343+ return tgpio->irq_base + offset;
344+}
345+
346+/*
347+ * GPIO IRQ
348+ */
349+static void timbgpio_irq_disable(unsigned irq)
350+{
351+ struct timbgpio *tgpio = get_irq_chip_data(irq);
352+ int offset = irq - tgpio->irq_base;
353+
354+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
355+}
356+
357+static void timbgpio_irq_enable(unsigned irq)
358+{
359+ struct timbgpio *tgpio = get_irq_chip_data(irq);
360+ int offset = irq - tgpio->irq_base;
361+
362+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
363+}
364+
365+static int timbgpio_irq_type(unsigned irq, unsigned trigger)
366+{
367+ struct timbgpio *tgpio = get_irq_chip_data(irq);
368+ int offset = irq - tgpio->irq_base;
369+ unsigned long flags;
370+ u32 lvr, flr;
371+
372+ if (offset < 0 || offset > tgpio->gpio.ngpio)
373+ return -EINVAL;
374+
375+ spin_lock_irqsave(&tgpio->lock, flags);
376+
377+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
378+ flr = ioread32(tgpio->membase + TGPIO_FLR);
379+
380+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
381+ flr &= ~(1 << offset);
382+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
383+ lvr |= 1 << offset;
384+ else
385+ lvr &= ~(1 << offset);
386+ }
387+
388+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
389+ return -EINVAL;
390+ else {
391+ flr |= 1 << offset;
392+ /* opposite compared to the datasheet, but it mirrors the
393+ * reality
394+ */
395+ if (trigger & IRQ_TYPE_EDGE_FALLING)
396+ lvr |= 1 << offset;
397+ else
398+ lvr &= ~(1 << offset);
399+ }
400+
401+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
402+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
403+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
404+ spin_unlock_irqrestore(&tgpio->lock, flags);
405+
406+ return 0;
407+}
408+
409+static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
410+{
411+ struct timbgpio *tgpio = get_irq_data(irq);
412+ unsigned long ipr;
413+ int offset;
414+
415+ desc->chip->ack(irq);
416+ ipr = ioread32(tgpio->membase + TGPIO_IPR);
417+ iowrite32(ipr, tgpio->membase + TGPIO_ICR);
418+
419+ for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
420+ generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
421+}
422+
423+static struct irq_chip timbgpio_irqchip = {
424+ .name = "GPIO",
425+ .enable = timbgpio_irq_enable,
426+ .disable = timbgpio_irq_disable,
427+ .set_type = timbgpio_irq_type,
428+};
429+
430+static int __devinit timbgpio_probe(struct platform_device *pdev)
431+{
432+ int err, i;
433+ struct gpio_chip *gc;
434+ struct timbgpio *tgpio;
435+ struct resource *iomem;
436+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
437+ int irq = platform_get_irq(pdev, 0);
438+
439+ if (!pdata || pdata->nr_pins > 32) {
440+ err = -EINVAL;
441+ goto err_mem;
442+ }
443+
444+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
445+ if (!iomem) {
446+ err = -EINVAL;
447+ goto err_mem;
448+ }
449+
450+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
451+ if (!tgpio) {
452+ err = -EINVAL;
453+ goto err_mem;
454+ }
455+ tgpio->irq_base = pdata->irq_base;
456+
457+ spin_lock_init(&tgpio->lock);
458+
459+ if (!request_mem_region(iomem->start, resource_size(iomem),
460+ DRIVER_NAME)) {
461+ err = -EBUSY;
462+ goto err_request;
463+ }
464+
465+ tgpio->membase = ioremap(iomem->start, resource_size(iomem));
466+ if (!tgpio->membase) {
467+ err = -ENOMEM;
468+ goto err_ioremap;
469+ }
470+
471+ gc = &tgpio->gpio;
472+
473+ gc->label = dev_name(&pdev->dev);
474+ gc->owner = THIS_MODULE;
475+ gc->dev = &pdev->dev;
476+ gc->direction_input = timbgpio_gpio_direction_input;
477+ gc->get = timbgpio_gpio_get;
478+ gc->direction_output = timbgpio_gpio_direction_output;
479+ gc->set = timbgpio_gpio_set;
480+ gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
481+ gc->dbg_show = NULL;
482+ gc->base = pdata->gpio_base;
483+ gc->ngpio = pdata->nr_pins;
484+ gc->can_sleep = 0;
485+
486+ err = gpiochip_add(gc);
487+ if (err)
488+ goto err_chipadd;
489+
490+ platform_set_drvdata(pdev, tgpio);
491+
492+ /* make sure to disable interrupts */
493+ iowrite32(0x0, tgpio->membase + TGPIO_IER);
494+
495+ if (irq < 0 || tgpio->irq_base <= 0)
496+ return 0;
497+
498+ for (i = 0; i < pdata->nr_pins; i++) {
499+ set_irq_chip_and_handler_name(tgpio->irq_base + i,
500+ &timbgpio_irqchip, handle_simple_irq, "mux");
501+ set_irq_chip_data(tgpio->irq_base + i, tgpio);
502+#ifdef CONFIG_ARM
503+ set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
504+#endif
505+ }
506+
507+ set_irq_data(irq, tgpio);
508+ set_irq_chained_handler(irq, timbgpio_irq);
509+
510+ return 0;
511+
512+err_chipadd:
513+ iounmap(tgpio->membase);
514+err_ioremap:
515+ release_mem_region(iomem->start, resource_size(iomem));
516+err_request:
517+ kfree(tgpio);
518+err_mem:
519+ printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
520+
521+ return err;
522+}
523+
524+static int __devexit timbgpio_remove(struct platform_device *pdev)
525+{
526+ int err;
527+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
528+ struct timbgpio *tgpio = platform_get_drvdata(pdev);
529+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
530+ int irq = platform_get_irq(pdev, 0);
531+
532+ if (irq >= 0 && tgpio->irq_base > 0) {
533+ int i;
534+ for (i = 0; i < pdata->nr_pins; i++) {
535+ set_irq_chip(tgpio->irq_base + i, NULL);
536+ set_irq_chip_data(tgpio->irq_base + i, NULL);
537+ }
538+
539+ set_irq_handler(irq, NULL);
540+ set_irq_data(irq, NULL);
541+ }
542+
543+ err = gpiochip_remove(&tgpio->gpio);
544+ if (err)
545+ printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
546+
547+ iounmap(tgpio->membase);
548+ release_mem_region(iomem->start, resource_size(iomem));
549+ kfree(tgpio);
550+
551+ platform_set_drvdata(pdev, NULL);
552+
553+ return 0;
554+}
555+
556+static struct platform_driver timbgpio_platform_driver = {
557+ .driver = {
558+ .name = DRIVER_NAME,
559+ .owner = THIS_MODULE,
560+ },
561+ .probe = timbgpio_probe,
562+ .remove = timbgpio_remove,
563+};
564+
565+/*--------------------------------------------------------------------------*/
566+
567+static int __init timbgpio_init(void)
568+{
569+ return platform_driver_register(&timbgpio_platform_driver);
570+}
571+
572+static void __exit timbgpio_exit(void)
573+{
574+ platform_driver_unregister(&timbgpio_platform_driver);
575+}
576+
577+module_init(timbgpio_init);
578+module_exit(timbgpio_exit);
579+
580+MODULE_DESCRIPTION("Timberdale GPIO driver");
581+MODULE_LICENSE("GPL v2");
582+MODULE_AUTHOR("Mocean Laboratories");
583+MODULE_ALIAS("platform:"DRIVER_NAME);
584+
585diff -uNr linux-2.6.31/drivers/i2c/busses/i2c-xiic.c linux-2.6.31.new/drivers/i2c/busses/i2c-xiic.c
586--- linux-2.6.31/drivers/i2c/busses/i2c-xiic.c 1969-12-31 16:00:00.000000000 -0800
587+++ linux-2.6.31.new/drivers/i2c/busses/i2c-xiic.c 2009-10-23 11:17:29.000000000 -0700
588@@ -0,0 +1,1132 @@
589+/*
590+ * i2c-xiic.c
591+ * Copyright (c) 2009 Intel Corporation
592+ *
593+ * This program is free software; you can redistribute it and/or modify
594+ * it under the terms of the GNU General Public License version 2 as
595+ * published by the Free Software Foundation.
596+ *
597+ * This program is distributed in the hope that it will be useful,
598+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
599+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
600+ * GNU General Public License for more details.
601+ *
602+ * You should have received a copy of the GNU General Public License
603+ * along with this program; if not, write to the Free Software
604+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
605+ */
606+
607+/* Supports:
608+ * Xilinx IIC
609+ */
610+#include <linux/kernel.h>
611+#include <linux/module.h>
612+#include <linux/init.h>
613+#include <linux/errno.h>
614+#include <linux/platform_device.h>
615+#include <linux/i2c.h>
616+#include <linux/interrupt.h>
617+#include <linux/wait.h>
618+#include <linux/i2c-xiic.h>
619+#include <linux/io.h>
620+
621+#define DRIVER_NAME "xiic-i2c"
622+
623+struct xiic_i2c {
624+ void __iomem *base;
625+ wait_queue_head_t wait;
626+ struct i2c_adapter adap;
627+ struct i2c_msg *tx_msg;
628+ spinlock_t lock; /* mutual exclusion */
629+ unsigned int tx_pos;
630+ unsigned int nmsgs;
631+ int state; /* see STATE_ */
632+
633+ struct i2c_msg *rx_msg; /* current RX message */
634+ int rx_pos;
635+};
636+
637+static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value);
638+
639+static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg);
640+
641+static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value);
642+
643+static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value);
644+
645+static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg);
646+
647+static void xiic_start_xfer(struct xiic_i2c *i2c);
648+static void __xiic_start_xfer(struct xiic_i2c *i2c);
649+
650+/************************** Constant Definitions ****************************/
651+
652+#define STATE_DONE 0x00
653+#define STATE_ERROR 0x01
654+#define STATE_START 0x02
655+
656+#define XIIC_MSB_OFFSET 0
657+#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
658+
659+/*
660+ * Register offsets in bytes from RegisterBase. Three is added to the
661+ * base offset to access LSB (IBM style) of the word
662+ */
663+#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
664+#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
665+#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
666+#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
667+#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
668+#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
669+#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
670+#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
671+#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
672+#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
673+
674+/* Control Register masks */
675+#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
676+#define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
677+#define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
678+#define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
679+#define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
680+#define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
681+#define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
682+
683+/* Status Register masks */
684+#define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
685+#define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
686+#define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
687+#define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
688+#define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
689+#define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
690+#define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
691+#define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
692+
693+/* Interrupt Status Register masks Interrupt occurs when... */
694+#define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
695+#define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
696+#define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
697+#define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
698+#define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
699+#define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
700+#define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
701+#define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
702+
703+/* The following constants specify the depth of the FIFOs */
704+#define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
705+#define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
706+
707+/* The following constants specify groups of interrupts that are typically
708+ * enabled or disables at the same time
709+ */
710+#define XIIC_TX_INTERRUPTS \
711+(XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
712+
713+#define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
714+
715+/* The following constants are used with the following macros to specify the
716+ * operation, a read or write operation.
717+ */
718+#define XIIC_READ_OPERATION 1
719+#define XIIC_WRITE_OPERATION 0
720+
721+/*
722+ * Tx Fifo upper bit masks.
723+ */
724+#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
725+#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
726+
727+/*
728+ * The following constants define the register offsets for the Interrupt
729+ * registers. There are some holes in the memory map for reserved addresses
730+ * to allow other registers to be added and still match the memory map of the
731+ * interrupt controller registers
732+ */
733+#define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
734+#define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
735+#define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
736+#define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
737+
738+#define XIIC_RESET_MASK 0xAUL
739+
740+/*
741+ * The following constant is used for the device global interrupt enable
742+ * register, to enable all interrupts for the device, this is the only bit
743+ * in the register
744+ */
745+#define XIIC_GINTR_ENABLE_MASK 0x80000000UL
746+
747+/***************** Macros (Inline Functions) Definitions *********************/
748+
749+
750+/******************************************************************************
751+*
752+* This macro disables all interrupts for the device by writing to the Global
753+* interrupt enable register. This register provides the ability to disable
754+* interrupts without any modifications to the interrupt enable register such
755+* that it is minimal effort to restore the interrupts to the previous enabled
756+* state. The corresponding function, XIIC_GINTR_ENABLE, is provided to
757+* restore the interrupts to the previous enabled state. This function is
758+* designed to be used in critical sections of device drivers such that it is
759+* not necessary to disable other device interrupts.
760+*
761+* @param Instance local i2c instance
762+*
763+* @return None.
764+*
765+* @note C-Style signature:
766+* void XIIC_GINTR_DISABLE(i2c);
767+*
768+******************************************************************************/
769+#define XIIC_GINTR_DISABLE(Instance) \
770+ xiic_setreg32(Instance, XIIC_DGIER_OFFSET, 0)
771+
772+/******************************************************************************
773+*
774+* This macro writes to the global interrupt enable register to enable
775+* interrupts from the device. This register provides the ability to enable
776+* interrupts without any modifications to the interrupt enable register such
777+* that it is minimal effort to restore the interrupts to the previous enabled
778+* state. This function does not enable individual interrupts as the interrupt
779+* enable register must be set appropriately. This function is designed to be
780+* used in critical sections of device drivers such that it is not necessary to
781+* disable other device interrupts.
782+*
783+* @param Instance local I2C instance
784+*
785+* @return None.
786+*
787+* @note C-Style signature:
788+* void XIIC_GINTR_ENABLE(i2c);
789+*
790+******************************************************************************/
791+#define XIIC_GINTR_ENABLE(Instance) \
792+ xiic_setreg32(Instance, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK)
793+
794+/******************************************************************************
795+*
796+*
797+* This function sets the Interrupt status register to the specified value.
798+* This register indicates the status of interrupt sources for the device.
799+* The status is independent of whether interrupts are enabled such that
800+* the status register may also be polled when interrupts are not enabled.
801+*
802+* Each bit of the register correlates to a specific interrupt source within the
803+* IIC device. All bits of this register are latched. Setting a bit which is 0
804+* within this register causes an interrupt to be generated. The device global
805+* interrupt enable register and the device interrupt enable register must be set
806+* appropriately to allow an interrupt to be passed out of the device. The
807+* interrupt is cleared by writing to this register with the bits to be
808+* cleared set to a one and all others to zero. This register implements a
809+* toggle on write functionality meaning any bits which are set in the value
810+* written cause the bits in the register to change to the opposite state.
811+*
812+* This function writes only the specified value to the register such that
813+* some status bits may be set and others cleared. It is the caller's
814+* responsibility to get the value of the register prior to setting the value
815+* to prevent an destructive behavior.
816+*
817+* @param Instance local I2C instance
818+* @param Status contains the value to be written to the Interrupt
819+* status register.
820+*
821+* @return None.
822+*
823+* @note C-Style signature:
824+* void XIIC_WRITE_IISR(i2c, u32 Status);
825+*
826+******************************************************************************/
827+#define XIIC_WRITE_IISR(Instance, Status) \
828+ xiic_setreg32(Instance, XIIC_IISR_OFFSET, (Status))
829+
830+/******************************************************************************
831+*
832+*
833+* This function gets the contents of the Interrupt Status Register.
834+* This register indicates the status of interrupt sources for the device.
835+* The status is independent of whether interrupts are enabled such
836+* that the status register may also be polled when interrupts are not enabled.
837+*
838+* Each bit of the register correlates to a specific interrupt source within the
839+* device. All bits of this register are latched. Writing a 1 to a bit within
840+* this register causes an interrupt to be generated if enabled in the interrupt
841+* enable register and the global interrupt enable is set. Since the status is
842+* latched, each status bit must be acknowledged in order for the bit in the
843+* status register to be updated. Each bit can be acknowledged by writing a
844+* 0 to the bit in the status register.
845+
846+* @param Instance local I2C instance
847+*
848+* @return A status which contains the value read from the Interrupt
849+* Status Register.
850+*
851+* @note C-Style signature:
852+* u32 XIIC_READ_IISR(i2c);
853+*
854+******************************************************************************/
855+#define XIIC_READ_IISR(Instance) \
856+ xiic_getreg32(Instance, XIIC_IISR_OFFSET)
857+
858+/******************************************************************************
859+*
860+* This function sets the contents of the Interrupt Enable Register . This
861+* register controls which interrupt sources of the IIC device are allowed to
862+* generate an interrupt. The global interrupt enable register and the device
863+* interrupt enable register must also be set appropriately for an interrupt
864+* to be passed out of the device.
865+*
866+* Each bit of the register correlates to a specific interrupt source within the
867+* device. Setting a bit in this register enables the interrupt source to gen
868+* an interrupt. Clearing a bit in this register disables interrupt generation
869+* for that interrupt source.
870+*
871+* This function writes only the specified value to the register such that
872+* some interrupt sources may be enabled and others disabled. It is the
873+* caller's responsibility to get the value of the interrupt enable register
874+* prior to setting the value to prevent a destructive behavior.
875+*
876+* @param Instance local I2C instance
877+* @param Enable contains the value to be written to the Interrupt Enable
878+* Register.
879+*
880+* @return None
881+*
882+* @note C-Style signature:
883+* void XIIC_WRITE_IIER(i2c, u32 Enable);
884+*
885+******************************************************************************/
886+#define XIIC_WRITE_IIER(Instance, Enable) \
887+ xiic_setreg32(Instance, XIIC_IIER_OFFSET, (Enable))
888+
889+/******************************************************************************
890+*
891+*
892+* This function gets the Interrupt enable register contents. This register
893+* controls which interrupt sources of the device are allowed to generate an
894+* interrupt. The global interrupt enable register and the device interrupt
895+* enable register must also be set appropriately for an interrupt to be
896+* passed out of the IIC device.
897+*
898+* Each bit of the register correlates to a specific interrupt source within the
899+* IIC device. Setting a bit in this register enables the interrupt source to
900+* generate an interrupt. Clearing a bit in this register disables interrupt
901+* generation for that interrupt source.
902+*
903+* @param Instance local I2C instance
904+*
905+* @return The contents read from the Interrupt Enable Register.
906+*
907+* @note C-Style signature:
908+* u32 XIIC_READ_IIER(i2c)
909+*
910+******************************************************************************/
911+#define XIIC_READ_IIER(Instance) \
912+ xiic_getreg32(Instance, XIIC_IIER_OFFSET)
913+
914+/************************** Function Prototypes ******************************/
915+
916+/******************************************************************************
917+*
918+* This macro disables the specified interrupts in the Interrupt enable
919+* register. It is non-destructive in that the register is read and only the
920+* interrupts specified is changed.
921+*
922+* @param BaseAddress is the base address of the IIC device.
923+* @param InterruptMask contains the interrupts to be disabled
924+*
925+* @return None.
926+*
927+* @note Signature:
928+* void XIic_mDisableIntr(u32 BaseAddress, u32 InterruptMask);
929+*
930+******************************************************************************/
931+#define XIic_mDisableIntr(Instance, InterruptMask) \
932+ XIIC_WRITE_IIER((Instance), XIIC_READ_IIER(Instance) & ~(InterruptMask))
933+
934+/******************************************************************************
935+*
936+* This macro enables the specified interrupts in the Interrupt enable
937+* register. It is non-destructive in that the register is read and only the
938+* interrupts specified is changed.
939+*
940+* @param BaseAddress is the base address of the IIC device.
941+* @param InterruptMask contains the interrupts to be disabled
942+*
943+* @return None.
944+*
945+* @note Signature:
946+* void XIic_mEnableIntr(u32 BaseAddress, u32 InterruptMask);
947+*
948+******************************************************************************/
949+#define XIic_mEnableIntr(Instance, InterruptMask) \
950+ XIIC_WRITE_IIER((Instance), XIIC_READ_IIER(Instance) | (InterruptMask))
951+
952+/******************************************************************************
953+*
954+* This macro clears the specified interrupt in the Interrupt status
955+* register. It is non-destructive in that the register is read and only the
956+* interrupt specified is cleared. Clearing an interrupt acknowledges it.
957+*
958+* @param BaseAddress is the base address of the IIC device.
959+* @param InterruptMask contains the interrupts to be disabled
960+*
961+* @return None.
962+*
963+* @note Signature:
964+* void XIic_mClearIntr(u32 BaseAddress, u32 InterruptMask);
965+*
966+******************************************************************************/
967+#define XIic_mClearIntr(Instance, InterruptMask) \
968+ XIIC_WRITE_IISR((Instance), XIIC_READ_IISR(Instance) & (InterruptMask))
969+
970+/******************************************************************************
971+*
972+* This macro clears and enables the specified interrupt in the Interrupt
973+* status and enable registers. It is non-destructive in that the registers are
974+* read and only the interrupt specified is modified.
975+* Clearing an interrupt acknowledges it.
976+*
977+* @param BaseAddress is the base address of the IIC device.
978+* @param InterruptMask contains the interrupts to be cleared and enabled
979+*
980+* @return None.
981+*
982+* @note Signature:
983+* void XIic_mClearEnableIntr(u32 BaseAddress, u32 InterruptMask);
984+*
985+******************************************************************************/
986+#define XIic_mClearEnableIntr(Instance, InterruptMask) { \
987+ XIIC_WRITE_IISR(Instance, \
988+ (XIIC_READ_IISR(Instance) & (InterruptMask))); \
989+ XIIC_WRITE_IIER(Instance, \
990+ (XIIC_READ_IIER(Instance) | (InterruptMask))); \
991+}
992+
993+
994+#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
995+#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
996+
997+static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
998+{
999+ u8 sr;
1000+ for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
1001+ !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
1002+ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
1003+ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
1004+}
1005+
1006+/******************************************************************************
1007+ *
1008+ * Initialize the IIC core for Dynamic Functionality.
1009+ *
1010+ * @param i2c local I2C instance
1011+ *
1012+ * @return None.
1013+ *
1014+ * @note None.
1015+ *
1016+ ******************************************************************************/
1017+static void xiic_reinit(struct xiic_i2c *i2c)
1018+{
1019+ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
1020+
1021+ /* Set receive Fifo depth to maximum (zero based). */
1022+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
1023+
1024+ /* Reset Tx Fifo. */
1025+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
1026+
1027+ /* Enable IIC Device, remove Tx Fifo reset & disable general call. */
1028+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
1029+
1030+ /* make sure RX fifo is empty */
1031+ xiic_clear_rx_fifo(i2c);
1032+
1033+ /* Enable interrupts */
1034+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
1035+
1036+ XIic_mClearEnableIntr(i2c, XIIC_INTR_AAS_MASK |
1037+ XIIC_INTR_ARB_LOST_MASK);
1038+}
1039+
1040+/******************************************************************************
1041+ *
1042+ * De-Initialize the IIC core.
1043+ *
1044+ * @param i2c local I2C instance
1045+ *
1046+ * @return None.
1047+ *
1048+ * @note None.
1049+ *
1050+ ******************************************************************************/
1051+static void xiic_deinit(struct xiic_i2c *i2c)
1052+{
1053+ u8 cr;
1054+
1055+ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
1056+
1057+ /* Disable IIC Device. */
1058+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
1059+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
1060+}
1061+
1062+
1063+
1064+/*****************************************************************************
1065+ *
1066+ *
1067+ * This function is called when the receive register is full. The number
1068+ * of bytes received to cause the interrupt is adjustable using the Receive FIFO
1069+ * Depth register. The number of bytes in the register is read in the Receive
1070+ * FIFO occupancy register. Both these registers are zero based values (0-15)
1071+ * such that a value of zero indicates 1 byte.
1072+ *
1073+ * For a Master Receiver to properly signal the end of a message, the data must
1074+ * be read in up to the message length - 1, where control register bits will be
1075+ * set for bus controls to occur on reading of the last byte.
1076+ *
1077+ * @param InstancePtr is a pointer to the XIic instance to be worked on.
1078+ *
1079+ * @return None.
1080+ *
1081+ * @note None.
1082+ *
1083+ ******************************************************************************/
1084+static void xiic_read_rx(struct xiic_i2c *i2c)
1085+{
1086+ u8 bytes_in_fifo;
1087+ int i;
1088+
1089+ bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
1090+
1091+ dev_dbg(i2c->adap.dev.parent, "%s entry, bytes in fifo: %d, msg: %d"
1092+ ", SR: 0x%x, CR: 0x%x\n",
1093+ __func__, bytes_in_fifo, xiic_rx_space(i2c),
1094+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
1095+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
1096+
1097+ if (bytes_in_fifo > xiic_rx_space(i2c))
1098+ bytes_in_fifo = xiic_rx_space(i2c);
1099+
1100+ for (i = 0; i < bytes_in_fifo; i++)
1101+ i2c->rx_msg->buf[i2c->rx_pos++] =
1102+ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
1103+
1104+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
1105+ (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
1106+ IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
1107+}
1108+
1109+/******************************************************************************
1110+ *
1111+ * This function fills the FIFO using the occupancy register to determine the
1112+ * available space to be filled. When the repeated start option is on, the last
1113+ * byte is withheld to allow the control register to be properly set on the last
1114+ * byte.
1115+ *
1116+ * @param InstancePtr is a pointer to the XIic instance to be worked on.
1117+ *
1118+ * @param Role indicates the role of this IIC device, a slave or a master, on
1119+ * the IIC bus (XIIC_SLAVE_ROLE or XIIC_MASTER_ROLE)
1120+ *
1121+ * @return
1122+ *
1123+ * None.
1124+ *
1125+ * @note
1126+ *
1127+ * None.
1128+ *
1129+ ******************************************************************************/
1130+static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
1131+{
1132+ return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
1133+}
1134+
1135+static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
1136+{
1137+ u8 fifo_space = xiic_tx_fifo_space(i2c);
1138+ int len = xiic_tx_space(i2c);
1139+
1140+ len = (len > fifo_space) ? fifo_space : len;
1141+
1142+ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
1143+ __func__, len, fifo_space);
1144+
1145+ while (len--) {
1146+ u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
1147+ if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
1148+ /* last message in transfer -> STOP */
1149+ data |= XIIC_TX_DYN_STOP_MASK;
1150+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
1151+
1152+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
1153+ } else
1154+ xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
1155+ }
1156+}
1157+
1158+static void xiic_wakeup(struct xiic_i2c *i2c, int code)
1159+{
1160+ i2c->tx_msg = NULL;
1161+ i2c->rx_msg = NULL;
1162+ i2c->nmsgs = 0;
1163+ i2c->state = code;
1164+ wake_up(&i2c->wait);
1165+}
1166+
1167+static void xiic_process(struct xiic_i2c *i2c)
1168+{
1169+ u32 pend, isr, ier;
1170+ u32 Clear = 0;
1171+
1172+ /* Get the interrupt Status from the IPIF. There is no clearing of
1173+ * interrupts in the IPIF. Interrupts must be cleared at the source.
1174+ * To find which interrupts are pending; AND interrupts pending with
1175+ * interrupts masked.
1176+ */
1177+ isr = XIIC_READ_IISR(i2c);
1178+ ier = XIIC_READ_IIER(i2c);
1179+ pend = isr & ier;
1180+
1181+ dev_dbg(i2c->adap.dev.parent, "%s entry, IER: 0x%x, ISR: 0x%x, "
1182+ "pend: 0x%x, SR: 0x%x, msg: %p, nmsgs: %d\n",
1183+ __func__, ier, isr, pend, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
1184+ i2c->tx_msg, i2c->nmsgs);
1185+
1186+ /* Do not processes a devices interrupts if the device has no
1187+ * interrupts pending
1188+ */
1189+ if (!pend)
1190+ return;
1191+
1192+ /* Service requesting interrupt */
1193+ if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
1194+ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
1195+ !(pend & XIIC_INTR_RX_FULL_MASK))) {
1196+ /* bus arbritration lost, or...
1197+ * Transmit error _OR_ RX completed
1198+ * if this happens when RX_FULL is not set
1199+ * this is probably a TX error
1200+ */
1201+
1202+ dev_dbg(i2c->adap.dev.parent,
1203+ "%s error\n", __func__);
1204+
1205+ /* dynamic mode seem to suffer from problems if we just flushes
1206+ * fifos and the next message is a TX with len 0 (only addr)
1207+ * reset the IP instead of just flush fifos
1208+ */
1209+ xiic_reinit(i2c);
1210+
1211+ if (i2c->tx_msg)
1212+ xiic_wakeup(i2c, STATE_ERROR);
1213+
1214+ } else if (pend & XIIC_INTR_RX_FULL_MASK) {
1215+ /* Receive register/FIFO is full */
1216+
1217+ Clear = XIIC_INTR_RX_FULL_MASK;
1218+ if (!i2c->rx_msg) {
1219+ dev_dbg(i2c->adap.dev.parent,
1220+ "%s unexpexted RX IRQ\n", __func__);
1221+ xiic_clear_rx_fifo(i2c);
1222+ goto out;
1223+ }
1224+
1225+ xiic_read_rx(i2c);
1226+ if (xiic_rx_space(i2c) == 0) {
1227+ /* this is the last part of the message */
1228+ i2c->rx_msg = NULL;
1229+
1230+ /* also clear TX error if there (RX complete) */
1231+ Clear |= (isr & XIIC_INTR_TX_ERROR_MASK);
1232+
1233+ dev_dbg(i2c->adap.dev.parent,
1234+ "%s end of message, nmsgs: %d\n",
1235+ __func__, i2c->nmsgs);
1236+
1237+ /* send next message if this wasn't the last,
1238+ * otherwise the transfer will be finialise when
1239+ * receiving the bus not busy interrupt
1240+ */
1241+ if (i2c->nmsgs > 1) {
1242+ i2c->nmsgs--;
1243+ i2c->tx_msg++;
1244+ dev_dbg(i2c->adap.dev.parent,
1245+ "%s will start next...\n", __func__);
1246+
1247+ __xiic_start_xfer(i2c);
1248+ }
1249+ }
1250+ } else if (pend & XIIC_INTR_BNB_MASK) {
1251+ /* IIC bus has transitioned to not busy */
1252+ Clear = XIIC_INTR_BNB_MASK;
1253+
1254+ /* The bus is not busy, disable BusNotBusy interrupt */
1255+ XIic_mDisableIntr(i2c, XIIC_INTR_BNB_MASK);
1256+
1257+ if (!i2c->tx_msg)
1258+ goto out;
1259+
1260+ if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
1261+ xiic_tx_space(i2c) == 0)
1262+ xiic_wakeup(i2c, STATE_DONE);
1263+ else
1264+ xiic_wakeup(i2c, STATE_ERROR);
1265+
1266+ } else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
1267+ /* Transmit register/FIFO is empty or ½ empty */
1268+
1269+ Clear = pend &
1270+ (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
1271+
1272+ if (!i2c->tx_msg) {
1273+ dev_dbg(i2c->adap.dev.parent,
1274+ "%s unexpexted TX IRQ\n", __func__);
1275+ goto out;
1276+ }
1277+
1278+ xiic_fill_tx_fifo(i2c);
1279+
1280+ /* current message sent and there is space in the fifo */
1281+ if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
1282+ dev_dbg(i2c->adap.dev.parent,
1283+ "%s end of message sent, nmsgs: %d\n",
1284+ __func__, i2c->nmsgs);
1285+ if (i2c->nmsgs > 1) {
1286+ i2c->nmsgs--;
1287+ i2c->tx_msg++;
1288+ __xiic_start_xfer(i2c);
1289+ } else {
1290+ XIic_mDisableIntr(i2c, XIIC_INTR_TX_HALF_MASK);
1291+
1292+ dev_err(i2c->adap.dev.parent,
1293+ "%s Got TX IRQ but no more to do...\n",
1294+ __func__);
1295+ }
1296+ } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
1297+ /* current frame is sent and is last,
1298+ * make sure to disable tx half
1299+ */
1300+ XIic_mDisableIntr(i2c, XIIC_INTR_TX_HALF_MASK);
1301+ } else {
1302+ /* got IRQ which is not acked */
1303+ dev_err(i2c->adap.dev.parent, "%s Got unexpected IRQ\n",
1304+ __func__);
1305+ Clear = pend;
1306+ }
1307+out:
1308+ dev_dbg(i2c->adap.dev.parent, "%s Clear: 0x%x\n", __func__, Clear);
1309+
1310+ XIIC_WRITE_IISR(i2c, Clear);
1311+}
1312+
1313+/******************************************************************************
1314+ *
1315+ * This function checks to see if the IIC bus is busy. If so, it will enable
1316+ * the bus not busy interrupt such that the driver is notified when the bus
1317+ * is no longer busy.
1318+ *
1319+ * @param InstancePtr points to the Iic instance to be worked on.
1320+ *
1321+ * @return FALSE if the IIC bus is not busy else TRUE.
1322+ *
1323+ * @note The BusNotBusy interrupt is enabled which will update the
1324+ * EventStatus when the bus is no longer busy.
1325+ *
1326+ ******************************************************************************/
1327+static int xiic_bus_busy(struct xiic_i2c *i2c)
1328+{
1329+ u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
1330+
1331+ return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
1332+}
1333+
1334+static int xiic_busy(struct xiic_i2c *i2c)
1335+{
1336+ int tries = 3;
1337+ int err;
1338+ if (i2c->tx_msg)
1339+ return -EBUSY;
1340+
1341+ /* for instance if previous transfer was terminated due to TX error
1342+ * it might be that the bus is on it's way to become available
1343+ * give it at most 3 ms to wake
1344+ */
1345+ err = xiic_bus_busy(i2c);
1346+ while (err && tries--) {
1347+ mdelay(1);
1348+ err = xiic_bus_busy(i2c);
1349+ }
1350+
1351+ return err;
1352+}
1353+
1354+static void xiic_dump_regs(struct xiic_i2c *i2c, const char *caller)
1355+{
1356+ dev_dbg(i2c->adap.dev.parent, "%s msg: %p, nmsgs: %d, "
1357+ "ISR: 0x%x, CR: 0x%x, SR: 0x%x\n",
1358+ caller, i2c->tx_msg, i2c->nmsgs, XIIC_READ_IISR(i2c),
1359+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET),
1360+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
1361+}
1362+
1363+static void xiic_start_recv(struct xiic_i2c *i2c)
1364+{
1365+ u8 rx_watermark;
1366+ struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1367+
1368+ xiic_dump_regs(i2c, __func__);
1369+
1370+ /* Clear and enable Rx full interrupt. */
1371+ XIic_mClearEnableIntr(i2c, XIIC_INTR_RX_FULL_MASK |
1372+ XIIC_INTR_TX_ERROR_MASK);
1373+
1374+ /* we want to get all but last byte, because the TX_ERROR IRQ is used
1375+ * to inidicate error ACK on the address, and negative ack on the last
1376+ * received byte, so to not mix them receive all but last.
1377+ * In the case where there is only one byte to receive
1378+ * we can check if ERROR and RX full is set at the same time
1379+ */
1380+ rx_watermark = msg->len;
1381+ if (rx_watermark > IIC_RX_FIFO_DEPTH)
1382+ rx_watermark = IIC_RX_FIFO_DEPTH;
1383+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1384+
1385+ if (!(msg->flags & I2C_M_NOSTART))
1386+ /* write the address */
1387+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1388+ (msg->addr << 1) | XIIC_READ_OPERATION |
1389+ XIIC_TX_DYN_START_MASK);
1390+
1391+ XIic_mClearEnableIntr(i2c,
1392+ XIIC_INTR_BNB_MASK);
1393+
1394+ xiic_dump_regs(i2c, "after address");
1395+
1396+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1397+ msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1398+ if (i2c->nmsgs == 1) {
1399+ /* very last, enable bus busy as well */
1400+ XIic_mClearEnableIntr(i2c, XIIC_INTR_BNB_MASK);
1401+ }
1402+
1403+ xiic_dump_regs(i2c, "xiic_start_recv exit");
1404+
1405+ /* the message is tx:ed */
1406+ i2c->tx_pos = msg->len;
1407+}
1408+
1409+static void xiic_start_send(struct xiic_i2c *i2c)
1410+{
1411+ struct i2c_msg *msg = i2c->tx_msg;
1412+
1413+ XIic_mClearIntr(i2c, XIIC_INTR_TX_ERROR_MASK);
1414+
1415+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d, "
1416+ "ISR: 0x%x, CR: 0x%x\n",
1417+ __func__, msg, msg->len, XIIC_READ_IISR(i2c),
1418+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
1419+
1420+ if (!(msg->flags & I2C_M_NOSTART)) {
1421+ /* write the address */
1422+ u16 data = ((msg->addr << 1) & 0xfe) | XIIC_WRITE_OPERATION |
1423+ XIIC_TX_DYN_START_MASK;
1424+ if ((i2c->nmsgs == 1) && msg->len == 0)
1425+ /* no data and last message -> add STOP */
1426+ data |= XIIC_TX_DYN_STOP_MASK;
1427+
1428+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
1429+ }
1430+
1431+ xiic_fill_tx_fifo(i2c);
1432+
1433+ /* Clear any pending Tx empty, Tx Error and then enable them. */
1434+ XIic_mClearEnableIntr(i2c,
1435+ XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
1436+ XIIC_INTR_BNB_MASK);
1437+}
1438+
1439+static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
1440+{
1441+ iowrite8(value, i2c->base + reg);
1442+}
1443+
1444+static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
1445+{
1446+ return ioread8(i2c->base + reg);
1447+}
1448+
1449+static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
1450+{
1451+ iowrite16(value, i2c->base + reg);
1452+}
1453+
1454+static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
1455+{
1456+ iowrite32(value, i2c->base + reg);
1457+}
1458+
1459+static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
1460+{
1461+ return ioread32(i2c->base + reg);
1462+}
1463+
1464+static irqreturn_t xiic_isr(int irq, void *dev_id)
1465+{
1466+ struct xiic_i2c *i2c = dev_id;
1467+ spin_lock(&i2c->lock);
1468+ XIIC_GINTR_DISABLE(i2c);
1469+
1470+ dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
1471+
1472+ xiic_process(i2c);
1473+
1474+ XIIC_GINTR_ENABLE(i2c);
1475+
1476+ spin_unlock(&i2c->lock);
1477+
1478+ return IRQ_HANDLED;
1479+}
1480+
1481+static void __xiic_start_xfer(struct xiic_i2c *i2c)
1482+{
1483+ int first = 1;
1484+ int fifo_space = xiic_tx_fifo_space(i2c);
1485+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
1486+ __func__, i2c->tx_msg, fifo_space);
1487+
1488+ if (!i2c->tx_msg)
1489+ return;
1490+
1491+ i2c->rx_pos = 0;
1492+ i2c->tx_pos = 0;
1493+ i2c->state = STATE_START;
1494+ while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
1495+ if (!first) {
1496+ i2c->nmsgs--;
1497+ i2c->tx_msg++;
1498+ i2c->tx_pos = 0;
1499+ } else
1500+ first = 0;
1501+
1502+ if (i2c->tx_msg->flags & I2C_M_RD) {
1503+ /* we dont date putting several reads in the FIFO */
1504+ xiic_start_recv(i2c);
1505+ return;
1506+ } else {
1507+ xiic_start_send(i2c);
1508+ if (xiic_tx_space(i2c) != 0) {
1509+ /* the message could not be completely sent */
1510+ break;
1511+ }
1512+ }
1513+
1514+ fifo_space = xiic_tx_fifo_space(i2c);
1515+ }
1516+
1517+ /* there are more messages or the current one could not be completely
1518+ * put into the FIFO, also enable the half empty interrupt
1519+ */
1520+ if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
1521+ XIic_mClearEnableIntr(i2c, XIIC_INTR_TX_HALF_MASK);
1522+
1523+}
1524+
1525+static void xiic_start_xfer(struct xiic_i2c *i2c)
1526+{
1527+ unsigned long flags;
1528+
1529+ spin_lock_irqsave(&i2c->lock, flags);
1530+ xiic_reinit(i2c);
1531+ XIIC_GINTR_DISABLE(i2c);
1532+ spin_unlock_irqrestore(&i2c->lock, flags);
1533+
1534+ __xiic_start_xfer(i2c);
1535+
1536+ XIIC_GINTR_ENABLE(i2c);
1537+}
1538+
1539+static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1540+{
1541+ struct xiic_i2c *i2c = i2c_get_adapdata(adap);
1542+ int err;
1543+
1544+ dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
1545+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
1546+
1547+ err = xiic_busy(i2c);
1548+ if (err) {
1549+ xiic_dump_regs(i2c, "bus busy");
1550+ return err;
1551+ }
1552+
1553+ i2c->tx_msg = msgs;
1554+ i2c->nmsgs = num;
1555+
1556+ xiic_start_xfer(i2c);
1557+
1558+ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
1559+ (i2c->state == STATE_DONE), HZ))
1560+ return (i2c->state == STATE_DONE) ? num : -EIO;
1561+ else {
1562+ xiic_dump_regs(i2c, __func__);
1563+ i2c->tx_msg = NULL;
1564+ i2c->rx_msg = NULL;
1565+ i2c->nmsgs = 0;
1566+ return -ETIMEDOUT;
1567+ }
1568+}
1569+
1570+static u32 xiic_func(struct i2c_adapter *adap)
1571+{
1572+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1573+}
1574+
1575+static const struct i2c_algorithm xiic_algorithm = {
1576+ .master_xfer = xiic_xfer,
1577+ .functionality = xiic_func,
1578+};
1579+
1580+static struct i2c_adapter xiic_adapter = {
1581+ .owner = THIS_MODULE,
1582+ .name = DRIVER_NAME,
1583+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
1584+ .algo = &xiic_algorithm,
1585+};
1586+
1587+
1588+static int __devinit xiic_i2c_probe(struct platform_device *pdev)
1589+{
1590+ struct xiic_i2c *i2c;
1591+ struct xiic_i2c_platform_data *pdata;
1592+ struct resource *res;
1593+ int ret, irq;
1594+ u8 i;
1595+
1596+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1597+ if (!res)
1598+ return -ENODEV;
1599+
1600+ irq = platform_get_irq(pdev, 0);
1601+ if (irq < 0)
1602+ return -ENODEV;
1603+
1604+ pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
1605+ if (!pdata)
1606+ return -ENODEV;
1607+
1608+ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
1609+ if (!i2c)
1610+ return -ENOMEM;
1611+
1612+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
1613+ dev_err(&pdev->dev, "Memory region busy\n");
1614+ ret = -EBUSY;
1615+ goto request_mem_failed;
1616+ }
1617+
1618+ i2c->base = ioremap(res->start, resource_size(res));
1619+ if (!i2c->base) {
1620+ dev_err(&pdev->dev, "Unable to map registers\n");
1621+ ret = -EIO;
1622+ goto map_failed;
1623+ }
1624+
1625+ /* hook up driver to tree */
1626+ platform_set_drvdata(pdev, i2c);
1627+ i2c->adap = xiic_adapter;
1628+ i2c_set_adapdata(&i2c->adap, i2c);
1629+ i2c->adap.dev.parent = &pdev->dev;
1630+
1631+ xiic_reinit(i2c);
1632+
1633+ spin_lock_init(&i2c->lock);
1634+ init_waitqueue_head(&i2c->wait);
1635+ ret = request_irq(irq, xiic_isr, 0, pdev->name, i2c);
1636+ if (ret) {
1637+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
1638+ goto request_irq_failed;
1639+ }
1640+
1641+ /* add i2c adapter to i2c tree */
1642+ ret = i2c_add_adapter(&i2c->adap);
1643+ if (ret) {
1644+ dev_err(&pdev->dev, "Failed to add adapter\n");
1645+ goto add_adapter_failed;
1646+ }
1647+
1648+ /* add in known devices to the bus */
1649+ for (i = 0; i < pdata->num_devices; i++)
1650+ i2c_new_device(&i2c->adap, pdata->devices + i);
1651+
1652+ return 0;
1653+
1654+add_adapter_failed:
1655+ free_irq(irq, i2c);
1656+request_irq_failed:
1657+ xiic_deinit(i2c);
1658+ iounmap(i2c->base);
1659+map_failed:
1660+ release_mem_region(res->start, resource_size(res));
1661+request_mem_failed:
1662+ kfree(i2c);
1663+
1664+ return ret;
1665+}
1666+
1667+static int __devexit xiic_i2c_remove(struct platform_device* pdev)
1668+{
1669+ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
1670+ struct resource *res;
1671+
1672+ /* remove adapter & data */
1673+ i2c_del_adapter(&i2c->adap);
1674+
1675+ xiic_deinit(i2c);
1676+
1677+ platform_set_drvdata(pdev, NULL);
1678+
1679+ free_irq(platform_get_irq(pdev, 0), i2c);
1680+
1681+ iounmap(i2c->base);
1682+
1683+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1684+ if (res)
1685+ release_mem_region(res->start, resource_size(res));
1686+
1687+ kfree(i2c);
1688+
1689+ return 0;
1690+}
1691+
1692+
1693+/* work with hotplug and coldplug */
1694+MODULE_ALIAS("platform:"DRIVER_NAME);
1695+
1696+static struct platform_driver xiic_i2c_driver = {
1697+ .probe = xiic_i2c_probe,
1698+ .remove = __devexit_p(xiic_i2c_remove),
1699+ .driver = {
1700+ .owner = THIS_MODULE,
1701+ .name = DRIVER_NAME,
1702+ },
1703+};
1704+
1705+static int __init xiic_i2c_init(void)
1706+{
1707+ return platform_driver_register(&xiic_i2c_driver);
1708+}
1709+
1710+static void __exit xiic_i2c_exit(void)
1711+{
1712+ platform_driver_unregister(&xiic_i2c_driver);
1713+}
1714+
1715+module_init(xiic_i2c_init);
1716+module_exit(xiic_i2c_exit);
1717+
1718+MODULE_AUTHOR("info@mocean-labs.com");
1719+MODULE_DESCRIPTION("Xilinx I2C bus driver");
1720+MODULE_LICENSE("GPL v2");
1721diff -uNr linux-2.6.31/drivers/i2c/busses/Kconfig linux-2.6.31.new/drivers/i2c/busses/Kconfig
1722--- linux-2.6.31/drivers/i2c/busses/Kconfig 2009-10-23 11:18:30.000000000 -0700
1723+++ linux-2.6.31.new/drivers/i2c/busses/Kconfig 2009-10-23 11:17:29.000000000 -0700
1724@@ -433,6 +433,16 @@
1725 This driver can also be built as a module. If so, the module
1726 will be called i2c-ocores.
1727
1728+config I2C_XILINX
1729+ tristate "Xilinx I2C Controller"
1730+ depends on EXPERIMENTAL && HAS_IOMEM
1731+ help
1732+ If you say yes to this option, support will be included for the
1733+ Xilinx I2C controller.
1734+
1735+ This driver can also be built as a module. If so, the module
1736+ will be called xilinx_i2c.
1737+
1738 config I2C_OMAP
1739 tristate "OMAP I2C adapter"
1740 depends on ARCH_OMAP
1741diff -uNr linux-2.6.31/drivers/i2c/busses/Makefile linux-2.6.31.new/drivers/i2c/busses/Makefile
1742--- linux-2.6.31/drivers/i2c/busses/Makefile 2009-10-23 11:18:30.000000000 -0700
1743+++ linux-2.6.31.new/drivers/i2c/busses/Makefile 2009-10-23 11:17:29.000000000 -0700
1744@@ -40,6 +40,7 @@
1745 obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
1746 obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
1747 obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
1748+obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
1749 obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
1750 obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
1751 obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
1752diff -uNr linux-2.6.31/drivers/input/touchscreen/tsc2007.c linux-2.6.31.new/drivers/input/touchscreen/tsc2007.c
1753--- linux-2.6.31/drivers/input/touchscreen/tsc2007.c 2009-10-23 11:18:30.000000000 -0700
1754+++ linux-2.6.31.new/drivers/input/touchscreen/tsc2007.c 2009-10-23 11:17:19.000000000 -0700
1755@@ -21,15 +21,14 @@
1756 */
1757
1758 #include <linux/module.h>
1759-#include <linux/hrtimer.h>
1760 #include <linux/slab.h>
1761 #include <linux/input.h>
1762 #include <linux/interrupt.h>
1763 #include <linux/i2c.h>
1764 #include <linux/i2c/tsc2007.h>
1765
1766-#define TS_POLL_DELAY (10 * 1000) /* ns delay before the first sample */
1767-#define TS_POLL_PERIOD (5 * 1000) /* ns delay between samples */
1768+#define TS_POLL_DELAY 1 /* ms delay between samples */
1769+#define TS_POLL_PERIOD 1 /* ms delay between samples */
1770
1771 #define TSC2007_MEASURE_TEMP0 (0x0 << 4)
1772 #define TSC2007_MEASURE_AUX (0x2 << 4)
1773@@ -70,17 +69,15 @@
1774 struct tsc2007 {
1775 struct input_dev *input;
1776 char phys[32];
1777- struct hrtimer timer;
1778- struct ts_event tc;
1779+ struct delayed_work work;
1780
1781 struct i2c_client *client;
1782
1783- spinlock_t lock;
1784-
1785 u16 model;
1786 u16 x_plate_ohms;
1787
1788- unsigned pendown;
1789+ bool pendown;
1790+ bool ignore_first_irq;
1791 int irq;
1792
1793 int (*get_pendown_state)(void);
1794@@ -109,52 +106,96 @@
1795 return val;
1796 }
1797
1798-static void tsc2007_send_event(void *tsc)
1799+static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc)
1800+{
1801+ /* y- still on; turn on only y+ (and ADC) */
1802+ tc->y = tsc2007_xfer(tsc, READ_Y);
1803+
1804+ /* turn y- off, x+ on, then leave in lowpower */
1805+ tc->x = tsc2007_xfer(tsc, READ_X);
1806+
1807+ /* turn y+ off, x- on; we'll use formula #1 */
1808+ tc->z1 = tsc2007_xfer(tsc, READ_Z1);
1809+ tc->z2 = tsc2007_xfer(tsc, READ_Z2);
1810+
1811+ /* Prepare for next touch reading - power down ADC, enable PENIRQ */
1812+ tsc2007_xfer(tsc, PWRDOWN);
1813+}
1814+
1815+static u32 tsc2007_calculate_pressure(struct tsc2007 *tsc, struct ts_event *tc)
1816 {
1817- struct tsc2007 *ts = tsc;
1818- u32 rt;
1819- u16 x, y, z1, z2;
1820-
1821- x = ts->tc.x;
1822- y = ts->tc.y;
1823- z1 = ts->tc.z1;
1824- z2 = ts->tc.z2;
1825+ u32 rt = 0;
1826
1827 /* range filtering */
1828- if (x == MAX_12BIT)
1829- x = 0;
1830+ if (tc->x == MAX_12BIT)
1831+ tc->x = 0;
1832
1833- if (likely(x && z1)) {
1834+ if (likely(tc->x && tc->z1)) {
1835 /* compute touch pressure resistance using equation #1 */
1836- rt = z2;
1837- rt -= z1;
1838- rt *= x;
1839- rt *= ts->x_plate_ohms;
1840- rt /= z1;
1841+ rt = tc->z2 - tc->z1;
1842+ rt *= tc->x;
1843+ rt *= tsc->x_plate_ohms;
1844+ rt /= tc->z1;
1845 rt = (rt + 2047) >> 12;
1846- } else
1847- rt = 0;
1848+ }
1849+
1850+ return rt;
1851+}
1852+
1853+static void tsc2007_send_up_event(struct tsc2007 *tsc)
1854+{
1855+ struct input_dev *input = tsc->input;
1856+
1857+ dev_dbg(&tsc->client->dev, "UP\n");
1858
1859- /* Sample found inconsistent by debouncing or pressure is beyond
1860- * the maximum. Don't report it to user space, repeat at least
1861- * once more the measurement
1862+ input_report_key(input, BTN_TOUCH, 0);
1863+ input_report_abs(input, ABS_PRESSURE, 0);
1864+ input_sync(input);
1865+}
1866+
1867+static void tsc2007_work(struct work_struct *work)
1868+{
1869+ struct tsc2007 *ts =
1870+ container_of(to_delayed_work(work), struct tsc2007, work);
1871+ struct ts_event tc;
1872+ u32 rt;
1873+
1874+ /*
1875+ * NOTE: We can't rely on the pressure to determine the pen down
1876+ * state, even though this controller has a pressure sensor.
1877+ * The pressure value can fluctuate for quite a while after
1878+ * lifting the pen and in some cases may not even settle at the
1879+ * expected value.
1880+ *
1881+ * The only safe way to check for the pen up condition is in the
1882+ * work function by reading the pen signal state (it's a GPIO
1883+ * and IRQ). Unfortunately such callback is not always available,
1884+ * in that case we have rely on the pressure anyway.
1885 */
1886+ if (ts->get_pendown_state) {
1887+ if (unlikely(!ts->get_pendown_state())) {
1888+ tsc2007_send_up_event(ts);
1889+ ts->pendown = false;
1890+ goto out;
1891+ }
1892+
1893+ dev_dbg(&ts->client->dev, "pen is still down\n");
1894+ }
1895+
1896+ tsc2007_read_values(ts, &tc);
1897+
1898+ rt = tsc2007_calculate_pressure(ts, &tc);
1899 if (rt > MAX_12BIT) {
1900+ /*
1901+ * Sample found inconsistent by debouncing or pressure is
1902+ * beyond the maximum. Don't report it to user space,
1903+ * repeat at least once more the measurement.
1904+ */
1905 dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
1906+ goto out;
1907
1908- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
1909- HRTIMER_MODE_REL);
1910- return;
1911 }
1912
1913- /* NOTE: We can't rely on the pressure to determine the pen down
1914- * state, even this controller has a pressure sensor. The pressure
1915- * value can fluctuate for quite a while after lifting the pen and
1916- * in some cases may not even settle at the expected value.
1917- *
1918- * The only safe way to check for the pen up condition is in the
1919- * timer by reading the pen signal state (it's a GPIO _and_ IRQ).
1920- */
1921 if (rt) {
1922 struct input_dev *input = ts->input;
1923
1924@@ -162,102 +203,82 @@
1925 dev_dbg(&ts->client->dev, "DOWN\n");
1926
1927 input_report_key(input, BTN_TOUCH, 1);
1928- ts->pendown = 1;
1929+ ts->pendown = true;
1930 }
1931
1932- input_report_abs(input, ABS_X, x);
1933- input_report_abs(input, ABS_Y, y);
1934+ input_report_abs(input, ABS_X, tc.x);
1935+ input_report_abs(input, ABS_Y, tc.y);
1936 input_report_abs(input, ABS_PRESSURE, rt);
1937
1938 input_sync(input);
1939
1940 dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
1941- x, y, rt);
1942- }
1943-
1944- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
1945- HRTIMER_MODE_REL);
1946-}
1947-
1948-static int tsc2007_read_values(struct tsc2007 *tsc)
1949-{
1950- /* y- still on; turn on only y+ (and ADC) */
1951- tsc->tc.y = tsc2007_xfer(tsc, READ_Y);
1952-
1953- /* turn y- off, x+ on, then leave in lowpower */
1954- tsc->tc.x = tsc2007_xfer(tsc, READ_X);
1955-
1956- /* turn y+ off, x- on; we'll use formula #1 */
1957- tsc->tc.z1 = tsc2007_xfer(tsc, READ_Z1);
1958- tsc->tc.z2 = tsc2007_xfer(tsc, READ_Z2);
1959-
1960- /* power down */
1961- tsc2007_xfer(tsc, PWRDOWN);
1962-
1963- return 0;
1964-}
1965-
1966-static enum hrtimer_restart tsc2007_timer(struct hrtimer *handle)
1967-{
1968- struct tsc2007 *ts = container_of(handle, struct tsc2007, timer);
1969- unsigned long flags;
1970-
1971- spin_lock_irqsave(&ts->lock, flags);
1972-
1973- if (unlikely(!ts->get_pendown_state() && ts->pendown)) {
1974- struct input_dev *input = ts->input;
1975-
1976- dev_dbg(&ts->client->dev, "UP\n");
1977+ tc.x, tc.y, rt);
1978
1979- input_report_key(input, BTN_TOUCH, 0);
1980- input_report_abs(input, ABS_PRESSURE, 0);
1981- input_sync(input);
1982+ } else if (!ts->get_pendown_state && ts->pendown) {
1983+ /*
1984+ * We don't have callback to check pendown state, so we
1985+ * have to assume that since pressure reported is 0 the
1986+ * pen was lifted up.
1987+ */
1988+ tsc2007_send_up_event(ts);
1989+ ts->pendown = false;
1990+ }
1991
1992- ts->pendown = 0;
1993+ out:
1994+ if (ts->pendown)
1995+ schedule_delayed_work(&ts->work,
1996+ msecs_to_jiffies(TS_POLL_PERIOD));
1997+ else {
1998+ if (!ts->get_pendown_state)
1999+ ts->ignore_first_irq = 1;
2000 enable_irq(ts->irq);
2001- } else {
2002- /* pen is still down, continue with the measurement */
2003- dev_dbg(&ts->client->dev, "pen is still down\n");
2004-
2005- tsc2007_read_values(ts);
2006- tsc2007_send_event(ts);
2007 }
2008-
2009- spin_unlock_irqrestore(&ts->lock, flags);
2010-
2011- return HRTIMER_NORESTART;
2012 }
2013
2014 static irqreturn_t tsc2007_irq(int irq, void *handle)
2015 {
2016 struct tsc2007 *ts = handle;
2017- unsigned long flags;
2018
2019- spin_lock_irqsave(&ts->lock, flags);
2020+ if (ts->ignore_first_irq) {
2021+ ts->ignore_first_irq = 0;
2022+ return IRQ_HANDLED;
2023+ }
2024
2025- if (likely(ts->get_pendown_state())) {
2026+ if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
2027 disable_irq_nosync(ts->irq);
2028- hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY),
2029- HRTIMER_MODE_REL);
2030+ schedule_delayed_work(&ts->work,
2031+ msecs_to_jiffies(TS_POLL_DELAY));
2032 }
2033
2034 if (ts->clear_penirq)
2035 ts->clear_penirq();
2036
2037- spin_unlock_irqrestore(&ts->lock, flags);
2038-
2039 return IRQ_HANDLED;
2040 }
2041
2042-static int tsc2007_probe(struct i2c_client *client,
2043- const struct i2c_device_id *id)
2044+static void tsc2007_free_irq(struct tsc2007 *ts)
2045+{
2046+ free_irq(ts->irq, ts);
2047+ if (cancel_delayed_work_sync(&ts->work)) {
2048+ /*
2049+ * Work was pending, therefore we need to enable
2050+ * IRQ here to balance the disable_irq() done in the
2051+ * interrupt handler.
2052+ */
2053+ enable_irq(ts->irq);
2054+ }
2055+}
2056+
2057+static int __devinit tsc2007_probe(struct i2c_client *client,
2058+ const struct i2c_device_id *id)
2059 {
2060 struct tsc2007 *ts;
2061 struct tsc2007_platform_data *pdata = pdata = client->dev.platform_data;
2062 struct input_dev *input_dev;
2063 int err;
2064
2065- if (!pdata || !pdata->get_pendown_state) {
2066+ if (!pdata) {
2067 dev_err(&client->dev, "platform data is required!\n");
2068 return -EINVAL;
2069 }
2070@@ -274,22 +295,15 @@
2071 }
2072
2073 ts->client = client;
2074- i2c_set_clientdata(client, ts);
2075-
2076+ ts->irq = client->irq;
2077 ts->input = input_dev;
2078-
2079- hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2080- ts->timer.function = tsc2007_timer;
2081-
2082- spin_lock_init(&ts->lock);
2083+ INIT_DELAYED_WORK(&ts->work, tsc2007_work);
2084
2085 ts->model = pdata->model;
2086 ts->x_plate_ohms = pdata->x_plate_ohms;
2087 ts->get_pendown_state = pdata->get_pendown_state;
2088 ts->clear_penirq = pdata->clear_penirq;
2089
2090- pdata->init_platform_hw();
2091-
2092 snprintf(ts->phys, sizeof(ts->phys),
2093 "%s/input0", dev_name(&client->dev));
2094
2095@@ -304,9 +318,8 @@
2096 input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
2097 input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
2098
2099- tsc2007_read_values(ts);
2100-
2101- ts->irq = client->irq;
2102+ if (pdata->init_platform_hw)
2103+ pdata->init_platform_hw();
2104
2105 err = request_irq(ts->irq, tsc2007_irq, 0,
2106 client->dev.driver->name, ts);
2107@@ -319,29 +332,37 @@
2108 if (err)
2109 goto err_free_irq;
2110
2111- dev_info(&client->dev, "registered with irq (%d)\n", ts->irq);
2112+ i2c_set_clientdata(client, ts);
2113+
2114+ /* Prepare for touch readings - power down ADC and enable PENIRQ */
2115+ err = tsc2007_xfer(ts, PWRDOWN);
2116+ if (err < 0)
2117+ goto err_unreg_dev;
2118
2119 return 0;
2120
2121+ err_unreg_dev:
2122+ input_unregister_device(ts->input);
2123 err_free_irq:
2124- free_irq(ts->irq, ts);
2125- hrtimer_cancel(&ts->timer);
2126+ tsc2007_free_irq(ts);
2127+ if (pdata->exit_platform_hw)
2128+ pdata->exit_platform_hw();
2129 err_free_mem:
2130 input_free_device(input_dev);
2131 kfree(ts);
2132 return err;
2133 }
2134
2135-static int tsc2007_remove(struct i2c_client *client)
2136+static int __devexit tsc2007_remove(struct i2c_client *client)
2137 {
2138 struct tsc2007 *ts = i2c_get_clientdata(client);
2139- struct tsc2007_platform_data *pdata;
2140+ struct tsc2007_platform_data *pdata = client->dev.platform_data;
2141
2142- pdata = client->dev.platform_data;
2143- pdata->exit_platform_hw();
2144+ tsc2007_free_irq(ts);
2145+
2146+ if (pdata->exit_platform_hw)
2147+ pdata->exit_platform_hw();
2148
2149- free_irq(ts->irq, ts);
2150- hrtimer_cancel(&ts->timer);
2151 input_unregister_device(ts->input);
2152 kfree(ts);
2153
2154@@ -362,7 +383,7 @@
2155 },
2156 .id_table = tsc2007_idtable,
2157 .probe = tsc2007_probe,
2158- .remove = tsc2007_remove,
2159+ .remove = __devexit_p(tsc2007_remove),
2160 };
2161
2162 static int __init tsc2007_init(void)
2163diff -uNr linux-2.6.31/drivers/media/radio/Kconfig linux-2.6.31.new/drivers/media/radio/Kconfig
2164--- linux-2.6.31/drivers/media/radio/Kconfig 2009-10-23 11:18:30.000000000 -0700
2165+++ linux-2.6.31.new/drivers/media/radio/Kconfig 2009-10-23 11:17:28.000000000 -0700
2166@@ -406,4 +406,38 @@
2167 Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
2168 here if TEA5764 reference frequency is connected in FREQIN.
2169
2170+config RADIO_SAA7706H
2171+ tristate "SAA7706H Car Radio DSP"
2172+ depends on I2C && VIDEO_V4L2
2173+ ---help---
2174+ Say Y here if you want to use the SAA7706H Car radio Digital
2175+ Signal Processor, found for instance on the Russellville development
2176+ board. On the russellville the device is connected to internal
2177+ timberdale I2C bus.
2178+
2179+ To compile this driver as a module, choose M here: the
2180+ module will be called SAA7706H.
2181+
2182+config RADIO_TEF6862
2183+ tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
2184+ depends on I2C && VIDEO_V4L2
2185+ ---help---
2186+ Say Y here if you want to use the TEF6862 Car Radio Enhanced
2187+ Selectivity Tuner, found for instance on the Russellville development
2188+ board. On the russellville the device is connected to internal
2189+ timberdale I2C bus.
2190+
2191+ To compile this driver as a module, choose M here: the
2192+ module will be called TEF6862.
2193+
2194+config RADIO_TIMBERDALE
2195+ tristate "Enable the Timberdale radio driver"
2196+ depends on MFD_TIMBERDALE && VIDEO_V4L2 && HAS_IOMEM
2197+ select RADIO_TEF6862
2198+ select RADIO_SAA7706H
2199+ ---help---
2200+ This is a kind of umbrella driver for the Radio Tuner and DSP
2201+ found behind the Timberdale FPGA on the Russellville board.
2202+ Enable this driver will automatically select the DSP and tuner.
2203+
2204 endif # RADIO_ADAPTERS
2205diff -uNr linux-2.6.31/drivers/media/radio/Makefile linux-2.6.31.new/drivers/media/radio/Makefile
2206--- linux-2.6.31/drivers/media/radio/Makefile 2009-10-23 11:18:30.000000000 -0700
2207+++ linux-2.6.31.new/drivers/media/radio/Makefile 2009-10-23 11:17:28.000000000 -0700
2208@@ -20,5 +20,8 @@
2209 obj-$(CONFIG_USB_SI470X) += radio-si470x.o
2210 obj-$(CONFIG_USB_MR800) += radio-mr800.o
2211 obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
2212+obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
2213+obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
2214+obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
2215
2216 EXTRA_CFLAGS += -Isound
2217diff -uNr linux-2.6.31/drivers/media/radio/radio-timb.c linux-2.6.31.new/drivers/media/radio/radio-timb.c
2218--- linux-2.6.31/drivers/media/radio/radio-timb.c 1969-12-31 16:00:00.000000000 -0800
2219+++ linux-2.6.31.new/drivers/media/radio/radio-timb.c 2009-10-23 11:17:28.000000000 -0700
2220@@ -0,0 +1,545 @@
2221+/*
2222+ * radio-timb.c Timberdale FPGA Radio driver
2223+ * Copyright (c) 2009 Intel Corporation
2224+ *
2225+ * This program is free software; you can redistribute it and/or modify
2226+ * it under the terms of the GNU General Public License version 2 as
2227+ * published by the Free Software Foundation.
2228+ *
2229+ * This program is distributed in the hope that it will be useful,
2230+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2231+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2232+ * GNU General Public License for more details.
2233+ *
2234+ * You should have received a copy of the GNU General Public License
2235+ * along with this program; if not, write to the Free Software
2236+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2237+ */
2238+
2239+#include <linux/list.h>
2240+#include <linux/version.h>
2241+#include <linux/module.h>
2242+#include <linux/io.h>
2243+#include <media/v4l2-common.h>
2244+#include <media/v4l2-ioctl.h>
2245+#include <media/v4l2-device.h>
2246+#include <linux/platform_device.h>
2247+#include <linux/interrupt.h>
2248+#include <linux/i2c.h>
2249+#include <media/timb_radio.h>
2250+
2251+#define DRIVER_NAME "timb-radio"
2252+
2253+#define RDS_BLOCK_SIZE 4
2254+#define RDS_BUFFER_SIZE (RDS_BLOCK_SIZE * 100)
2255+
2256+struct timbradio {
2257+ struct mutex lock; /* for mutual exclusion */
2258+ void __iomem *membase;
2259+ struct timb_radio_platform_data pdata;
2260+ struct v4l2_subdev *sd_tuner;
2261+ struct module *tuner_owner;
2262+ struct v4l2_subdev *sd_dsp;
2263+ struct module *dsp_owner;
2264+ struct video_device *video_dev;
2265+ /* RDS related */
2266+ int open_count;
2267+ int rds_irq;
2268+ wait_queue_head_t read_queue;
2269+ unsigned char buffer[RDS_BUFFER_SIZE];
2270+ unsigned int rd_index;
2271+ unsigned int wr_index;
2272+};
2273+
2274+
2275+static int timbradio_vidioc_querycap(struct file *file, void *priv,
2276+ struct v4l2_capability *v)
2277+{
2278+ strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
2279+ strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
2280+ snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
2281+ v->version = KERNEL_VERSION(0, 0, 1);
2282+ v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
2283+ return 0;
2284+}
2285+
2286+static int timbradio_vidioc_g_tuner(struct file *file, void *priv,
2287+ struct v4l2_tuner *v)
2288+{
2289+ struct timbradio *tr = video_drvdata(file);
2290+ int ret;
2291+
2292+ mutex_lock(&tr->lock);
2293+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, g_tuner, v);
2294+ mutex_unlock(&tr->lock);
2295+
2296+ return ret;
2297+}
2298+
2299+static int timbradio_vidioc_s_tuner(struct file *file, void *priv,
2300+ struct v4l2_tuner *v)
2301+{
2302+ struct timbradio *tr = video_drvdata(file);
2303+ int ret;
2304+
2305+ mutex_lock(&tr->lock);
2306+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, s_tuner, v);
2307+ mutex_unlock(&tr->lock);
2308+
2309+ return ret;
2310+}
2311+
2312+static int timbradio_vidioc_g_input(struct file *filp, void *priv,
2313+ unsigned int *i)
2314+{
2315+ *i = 0;
2316+ return 0;
2317+}
2318+
2319+static int timbradio_vidioc_s_input(struct file *filp, void *priv,
2320+ unsigned int i)
2321+{
2322+ return i ? -EINVAL : 0;
2323+}
2324+
2325+static int timbradio_vidioc_g_audio(struct file *file, void *priv,
2326+ struct v4l2_audio *a)
2327+{
2328+ a->index = 0;
2329+ strlcpy(a->name, "Radio", sizeof(a->name));
2330+ a->capability = V4L2_AUDCAP_STEREO;
2331+ return 0;
2332+}
2333+
2334+
2335+static int timbradio_vidioc_s_audio(struct file *file, void *priv,
2336+ struct v4l2_audio *a)
2337+{
2338+ return a->index ? -EINVAL : 0;
2339+}
2340+
2341+static int timbradio_vidioc_s_frequency(struct file *file, void *priv,
2342+ struct v4l2_frequency *f)
2343+{
2344+ struct timbradio *tr = video_drvdata(file);
2345+ int ret;
2346+
2347+ mutex_lock(&tr->lock);
2348+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, s_frequency, f);
2349+ mutex_unlock(&tr->lock);
2350+
2351+ return ret;
2352+}
2353+
2354+static int timbradio_vidioc_g_frequency(struct file *file, void *priv,
2355+ struct v4l2_frequency *f)
2356+{
2357+ struct timbradio *tr = video_drvdata(file);
2358+ int ret;
2359+
2360+ mutex_lock(&tr->lock);
2361+ ret = v4l2_subdev_call(tr->sd_tuner, tuner, g_frequency, f);
2362+ mutex_unlock(&tr->lock);
2363+
2364+ return ret;
2365+}
2366+
2367+static int timbradio_vidioc_queryctrl(struct file *file, void *priv,
2368+ struct v4l2_queryctrl *qc)
2369+{
2370+ struct timbradio *tr = video_drvdata(file);
2371+ int ret;
2372+
2373+ mutex_lock(&tr->lock);
2374+ ret = v4l2_subdev_call(tr->sd_dsp, core, queryctrl, qc);
2375+ mutex_unlock(&tr->lock);
2376+
2377+ return ret;
2378+}
2379+
2380+static int timbradio_vidioc_g_ctrl(struct file *file, void *priv,
2381+ struct v4l2_control *ctrl)
2382+{
2383+ struct timbradio *tr = video_drvdata(file);
2384+ int ret;
2385+
2386+ mutex_lock(&tr->lock);
2387+ ret = v4l2_subdev_call(tr->sd_dsp, core, g_ctrl, ctrl);
2388+ mutex_unlock(&tr->lock);
2389+
2390+ return ret;
2391+}
2392+
2393+static int timbradio_vidioc_s_ctrl(struct file *file, void *priv,
2394+ struct v4l2_control *ctrl)
2395+{
2396+ struct timbradio *tr = video_drvdata(file);
2397+ int ret;
2398+
2399+ mutex_lock(&tr->lock);
2400+ ret = v4l2_subdev_call(tr->sd_dsp, core, s_ctrl, ctrl);
2401+ mutex_unlock(&tr->lock);
2402+
2403+ return ret;
2404+}
2405+
2406+static const struct v4l2_ioctl_ops timbradio_ioctl_ops = {
2407+ .vidioc_querycap = timbradio_vidioc_querycap,
2408+ .vidioc_g_tuner = timbradio_vidioc_g_tuner,
2409+ .vidioc_s_tuner = timbradio_vidioc_s_tuner,
2410+ .vidioc_g_frequency = timbradio_vidioc_g_frequency,
2411+ .vidioc_s_frequency = timbradio_vidioc_s_frequency,
2412+ .vidioc_g_input = timbradio_vidioc_g_input,
2413+ .vidioc_s_input = timbradio_vidioc_s_input,
2414+ .vidioc_g_audio = timbradio_vidioc_g_audio,
2415+ .vidioc_s_audio = timbradio_vidioc_s_audio,
2416+ .vidioc_queryctrl = timbradio_vidioc_queryctrl,
2417+ .vidioc_g_ctrl = timbradio_vidioc_g_ctrl,
2418+ .vidioc_s_ctrl = timbradio_vidioc_s_ctrl
2419+};
2420+
2421+static irqreturn_t timbradio_irq(int irq, void *devid)
2422+{
2423+ struct timbradio *tr = devid;
2424+ u32 data = ioread32(tr->membase);
2425+
2426+ tr->buffer[tr->wr_index++] = data >> 24;
2427+ tr->buffer[tr->wr_index++] = data >> 16;
2428+ tr->buffer[tr->wr_index++] = data >> 8;
2429+ tr->buffer[tr->wr_index++] = data;
2430+ tr->wr_index %= RDS_BUFFER_SIZE;
2431+
2432+ wake_up(&tr->read_queue);
2433+
2434+ /* new RDS data received, read it */
2435+ return IRQ_HANDLED;
2436+}
2437+
2438+/**************************************************************************
2439+ * File Operations Interface
2440+ **************************************************************************/
2441+
2442+static ssize_t timbradio_rds_fops_read(struct file *file, char __user *buf,
2443+ size_t count, loff_t *ppos)
2444+{
2445+ struct timbradio *tr = video_drvdata(file);
2446+ int outblocks = 0;
2447+
2448+ /* block if no new data available */
2449+ while (tr->wr_index == tr->rd_index) {
2450+ if (file->f_flags & O_NONBLOCK)
2451+ return -EWOULDBLOCK;
2452+
2453+ if (wait_event_interruptible(tr->read_queue,
2454+ tr->wr_index != tr->rd_index))
2455+ return -EINTR;
2456+ }
2457+
2458+ count /= RDS_BLOCK_SIZE;
2459+ /* copy RDS block out of internal buffer and to user buffer */
2460+ mutex_lock(&tr->lock);
2461+ while (outblocks < count) {
2462+ if (tr->rd_index == tr->wr_index)
2463+ break;
2464+
2465+ if (copy_to_user(buf, tr->buffer + tr->rd_index,
2466+ RDS_BLOCK_SIZE))
2467+ break;
2468+ tr->rd_index += RDS_BLOCK_SIZE;
2469+ tr->rd_index %= RDS_BUFFER_SIZE;
2470+ outblocks++;
2471+ }
2472+ mutex_unlock(&tr->lock);
2473+
2474+ return outblocks *RDS_BLOCK_SIZE;
2475+}
2476+
2477+static unsigned int timbradio_rds_fops_poll(struct file *file,
2478+ struct poll_table_struct *pts)
2479+{
2480+ struct timbradio *tr = video_drvdata(file);
2481+
2482+ poll_wait(file, &tr->read_queue, pts);
2483+
2484+ if (tr->rd_index != tr->wr_index)
2485+ return POLLIN | POLLRDNORM;
2486+
2487+ return 0;
2488+}
2489+
2490+struct find_addr_arg {
2491+ char const *name;
2492+ struct i2c_client *client;
2493+};
2494+
2495+static int find_name(struct device *dev, void *argp)
2496+{
2497+ struct find_addr_arg *arg = (struct find_addr_arg *)argp;
2498+ struct i2c_client *client = i2c_verify_client(dev);
2499+
2500+ if (client && !strcmp(arg->name, client->name) && client->driver)
2501+ arg->client = client;
2502+
2503+ return 0;
2504+}
2505+
2506+static struct i2c_client *find_client(struct i2c_adapter *adapt,
2507+ const char *name)
2508+{
2509+ struct find_addr_arg find_arg;
2510+ /* now find the client */
2511+#ifdef MODULE
2512+ request_module(name);
2513+#endif
2514+ /* code for finding the I2C child */
2515+ find_arg.name = name;
2516+ find_arg.client = NULL;
2517+ device_for_each_child(&adapt->dev, &find_arg, find_name);
2518+ return find_arg.client;
2519+}
2520+
2521+static int timbradio_rds_fops_open(struct file *file)
2522+{
2523+ struct timbradio *tr = video_drvdata(file);
2524+ int err = 0;
2525+
2526+ mutex_lock(&tr->lock);
2527+ if (tr->open_count == 0) {
2528+ /* device currently not open, check if the DSP and tuner is not
2529+ * yet found, in that case find them
2530+ */
2531+ if (!tr->sd_tuner) {
2532+ struct i2c_adapter *adapt;
2533+ struct i2c_client *tuner;
2534+ struct i2c_client *dsp;
2535+
2536+ /* find the I2C bus */
2537+ adapt = i2c_get_adapter(tr->pdata.i2c_adapter);
2538+ if (!adapt) {
2539+ printk(KERN_ERR DRIVER_NAME": No I2C bus\n");
2540+ err = -ENODEV;
2541+ goto out;
2542+ }
2543+
2544+ /* now find the tuner and dsp */
2545+ tuner = find_client(adapt, tr->pdata.tuner);
2546+ dsp = find_client(adapt, tr->pdata.dsp);
2547+
2548+ i2c_put_adapter(adapt);
2549+
2550+ if (!tuner || !dsp) {
2551+ printk(KERN_ERR DRIVER_NAME
2552+ ": Failed to get tuner or DSP\n");
2553+ err = -ENODEV;
2554+ goto out;
2555+ }
2556+
2557+ tr->sd_tuner = i2c_get_clientdata(tuner);
2558+ tr->sd_dsp = i2c_get_clientdata(dsp);
2559+
2560+ tr->tuner_owner = tr->sd_tuner->owner;
2561+ tr->dsp_owner = tr->sd_dsp->owner;
2562+ /* Lock the modules */
2563+ if (!try_module_get(tr->tuner_owner)) {
2564+ err = -ENODEV;
2565+ goto err_get_tuner;
2566+ }
2567+
2568+ if (!try_module_get(tr->dsp_owner)) {
2569+ err = -ENODEV;
2570+ goto err_get_dsp;
2571+ }
2572+ }
2573+
2574+ /* enable the IRQ for receiving RDS data */
2575+ err = request_irq(tr->rds_irq, timbradio_irq, 0, DRIVER_NAME,
2576+ tr);
2577+ }
2578+ goto out;
2579+
2580+err_get_dsp:
2581+ module_put(tr->tuner_owner);
2582+err_get_tuner:
2583+ tr->sd_tuner = NULL;
2584+ tr->sd_dsp = NULL;
2585+out:
2586+ if (!err)
2587+ tr->open_count++;
2588+ mutex_unlock(&tr->lock);
2589+ return err;
2590+}
2591+
2592+static int timbradio_rds_fops_release(struct file *file)
2593+{
2594+ struct timbradio *tr = video_drvdata(file);
2595+
2596+ mutex_lock(&tr->lock);
2597+ tr->open_count--;
2598+ if (!tr->open_count) {
2599+ free_irq(tr->rds_irq, tr);
2600+
2601+ tr->wr_index = 0;
2602+ tr->rd_index = 0;
2603+
2604+ /* cancel read processes */
2605+ wake_up_interruptible(&tr->read_queue);
2606+ }
2607+ mutex_unlock(&tr->lock);
2608+
2609+ return 0;
2610+}
2611+
2612+
2613+static const struct v4l2_file_operations timbradio_fops = {
2614+ .owner = THIS_MODULE,
2615+ .ioctl = video_ioctl2,
2616+ .read = timbradio_rds_fops_read,
2617+ .poll = timbradio_rds_fops_poll,
2618+ .open = timbradio_rds_fops_open,
2619+ .release = timbradio_rds_fops_release,
2620+};
2621+
2622+static const struct video_device timbradio_template = {
2623+ .name = "Timberdale Radio",
2624+ .fops = &timbradio_fops,
2625+ .ioctl_ops = &timbradio_ioctl_ops,
2626+ .release = video_device_release_empty,
2627+ .minor = -1
2628+};
2629+
2630+
2631+
2632+static int timbradio_probe(struct platform_device *pdev)
2633+{
2634+ struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
2635+ struct timbradio *tr;
2636+ struct resource *iomem;
2637+ int irq;
2638+ int err;
2639+
2640+ if (!pdata) {
2641+ printk(KERN_ERR DRIVER_NAME": Platform data missing\n");
2642+ err = -EINVAL;
2643+ goto err;
2644+ }
2645+
2646+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2647+ if (!iomem) {
2648+ err = -ENODEV;
2649+ goto err;
2650+ }
2651+
2652+ irq = platform_get_irq(pdev, 0);
2653+ if (irq < 0) {
2654+ err = -ENODEV;
2655+ goto err;
2656+ }
2657+
2658+ if (!request_mem_region(iomem->start, resource_size(iomem),
2659+ DRIVER_NAME)) {
2660+ err = -EBUSY;
2661+ goto err;
2662+ }
2663+
2664+ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
2665+ if (!tr) {
2666+ err = -ENOMEM;
2667+ goto err_alloc;
2668+ }
2669+ mutex_init(&tr->lock);
2670+
2671+ tr->membase = ioremap(iomem->start, resource_size(iomem));
2672+ if (!tr->membase) {
2673+ err = -ENOMEM;
2674+ goto err_ioremap;
2675+ }
2676+
2677+ memcpy(&tr->pdata, pdata, sizeof(tr->pdata));
2678+
2679+ tr->video_dev = video_device_alloc();
2680+ if (!tr->video_dev) {
2681+ err = -ENOMEM;
2682+ goto err_video_req;
2683+ }
2684+ *tr->video_dev = timbradio_template;
2685+ tr->rds_irq = irq;
2686+ init_waitqueue_head(&tr->read_queue);
2687+
2688+ err = video_register_device(tr->video_dev, VFL_TYPE_RADIO, -1);
2689+ if (err) {
2690+ printk(KERN_ALERT DRIVER_NAME": Error reg video\n");
2691+ goto err_video_req;
2692+ }
2693+
2694+ video_set_drvdata(tr->video_dev, tr);
2695+
2696+ platform_set_drvdata(pdev, tr);
2697+ return 0;
2698+
2699+err_video_req:
2700+ if (tr->video_dev->minor != -1)
2701+ video_unregister_device(tr->video_dev);
2702+ else
2703+ video_device_release(tr->video_dev);
2704+ iounmap(tr->membase);
2705+err_ioremap:
2706+ kfree(tr);
2707+err_alloc:
2708+ release_mem_region(iomem->start, resource_size(iomem));
2709+err:
2710+ printk(KERN_ERR DRIVER_NAME ": Failed to register: %d\n", err);
2711+
2712+ return err;
2713+}
2714+
2715+static int timbradio_remove(struct platform_device *pdev)
2716+{
2717+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2718+ struct timbradio *tr = platform_get_drvdata(pdev);
2719+
2720+ if (tr->video_dev->minor != -1)
2721+ video_unregister_device(tr->video_dev);
2722+ else
2723+ video_device_release(tr->video_dev);
2724+
2725+ if (tr->sd_tuner) {
2726+ module_put(tr->tuner_owner);
2727+ module_put(tr->dsp_owner);
2728+ }
2729+
2730+ iounmap(tr->membase);
2731+ release_mem_region(iomem->start, resource_size(iomem));
2732+ kfree(tr);
2733+
2734+ return 0;
2735+}
2736+
2737+static struct platform_driver timbradio_platform_driver = {
2738+ .driver = {
2739+ .name = DRIVER_NAME,
2740+ .owner = THIS_MODULE,
2741+ },
2742+ .probe = timbradio_probe,
2743+ .remove = timbradio_remove,
2744+};
2745+
2746+/*--------------------------------------------------------------------------*/
2747+
2748+static int __init timbradio_init(void)
2749+{
2750+ return platform_driver_register(&timbradio_platform_driver);
2751+}
2752+
2753+static void __exit timbradio_exit(void)
2754+{
2755+ platform_driver_unregister(&timbradio_platform_driver);
2756+}
2757+
2758+module_init(timbradio_init);
2759+module_exit(timbradio_exit);
2760+
2761+MODULE_DESCRIPTION("Timberdale Radio driver");
2762+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
2763+MODULE_LICENSE("GPL v2");
2764+MODULE_ALIAS("platform:"DRIVER_NAME);
2765+
2766diff -uNr linux-2.6.31/drivers/media/radio/saa7706h.c linux-2.6.31.new/drivers/media/radio/saa7706h.c
2767--- linux-2.6.31/drivers/media/radio/saa7706h.c 1969-12-31 16:00:00.000000000 -0800
2768+++ linux-2.6.31.new/drivers/media/radio/saa7706h.c 2009-10-23 11:17:28.000000000 -0700
2769@@ -0,0 +1,496 @@
2770+/*
2771+ * saa7706.c Philips SAA7706H Car Radio DSP driver
2772+ * Copyright (c) 2009 Intel Corporation
2773+ *
2774+ * This program is free software; you can redistribute it and/or modify
2775+ * it under the terms of the GNU General Public License version 2 as
2776+ * published by the Free Software Foundation.
2777+ *
2778+ * This program is distributed in the hope that it will be useful,
2779+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2780+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2781+ * GNU General Public License for more details.
2782+ *
2783+ * You should have received a copy of the GNU General Public License
2784+ * along with this program; if not, write to the Free Software
2785+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2786+ */
2787+
2788+#include <linux/module.h>
2789+#include <linux/init.h>
2790+#include <linux/errno.h>
2791+#include <linux/kernel.h>
2792+#include <linux/interrupt.h>
2793+#include <linux/i2c.h>
2794+#include <linux/i2c-id.h>
2795+#include <media/v4l2-ioctl.h>
2796+#include <media/v4l2-device.h>
2797+#include <media/v4l2-chip-ident.h>
2798+
2799+#define DRIVER_NAME "saa7706h"
2800+
2801+/* the I2C memory map looks like this
2802+
2803+ $1C00 - $FFFF Not Used
2804+ $2200 - $3FFF Reserved YRAM (DSP2) space
2805+ $2000 - $21FF YRAM (DSP2)
2806+ $1FF0 - $1FFF Hardware Registers
2807+ $1280 - $1FEF Reserved XRAM (DSP2) space
2808+ $1000 - $127F XRAM (DSP2)
2809+ $0FFF DSP CONTROL
2810+ $0A00 - $0FFE Reserved
2811+ $0980 - $09FF Reserved YRAM (DSP1) space
2812+ $0800 - $097F YRAM (DSP1)
2813+ $0200 - $07FF Not Used
2814+ $0180 - $01FF Reserved XRAM (DSP1) space
2815+ $0000 - $017F XRAM (DSP1)
2816+*/
2817+
2818+#define SAA7706H_REG_CTRL 0x0fff
2819+#define SAA7706H_CTRL_BYP_PLL 0x0001
2820+#define SAA7706H_CTRL_PLL_DIV_MASK 0x003e
2821+#define SAA7706H_CTRL_PLL3_62975MHZ 0x003e
2822+#define SAA7706H_CTRL_DSP_TURBO 0x0040
2823+#define SAA7706H_CTRL_PC_RESET_DSP1 0x0080
2824+#define SAA7706H_CTRL_PC_RESET_DSP2 0x0100
2825+#define SAA7706H_CTRL_DSP1_ROM_EN_MASK 0x0600
2826+#define SAA7706H_CTRL_DSP1_FUNC_PROM 0x0000
2827+#define SAA7706H_CTRL_DSP2_ROM_EN_MASK 0x1800
2828+#define SAA7706H_CTRL_DSP2_FUNC_PROM 0x0000
2829+#define SAA7706H_CTRL_DIG_SIL_INTERPOL 0x8000
2830+
2831+#define SAA7706H_REG_EVALUATION 0x1ff0
2832+#define SAA7706H_EVAL_DISABLE_CHARGE_PUMP 0x000001
2833+#define SAA7706H_EVAL_DCS_CLOCK 0x000002
2834+#define SAA7706H_EVAL_GNDRC1_ENABLE 0x000004
2835+#define SAA7706H_EVAL_GNDRC2_ENABLE 0x000008
2836+
2837+#define SAA7706H_REG_CL_GEN1 0x1ff3
2838+#define SAA7706H_CL_GEN1_MIN_LOOPGAIN_MASK 0x00000f
2839+#define SAA7706H_CL_GEN1_LOOPGAIN_MASK 0x0000f0
2840+#define SAA7706H_CL_GEN1_COARSE_RATION 0xffff00
2841+
2842+#define SAA7706H_REG_CL_GEN2 0x1ff4
2843+#define SAA7706H_CL_GEN2_WSEDGE_FALLING 0x000001
2844+#define SAA7706H_CL_GEN2_STOP_VCO 0x000002
2845+#define SAA7706H_CL_GEN2_FRERUN 0x000004
2846+#define SAA7706H_CL_GEN2_ADAPTIVE 0x000008
2847+#define SAA7706H_CL_GEN2_FINE_RATIO_MASK 0x0ffff0
2848+
2849+#define SAA7706H_REG_CL_GEN4 0x1ff6
2850+#define SAA7706H_CL_GEN4_BYPASS_PLL1 0x001000
2851+#define SAA7706H_CL_GEN4_PLL1_DIV_MASK 0x03e000
2852+#define SAA7706H_CL_GEN4_DSP1_TURBO 0x040000
2853+
2854+#define SAA7706H_REG_SEL 0x1ff7
2855+#define SAA7706H_SEL_DSP2_SRCA_MASK 0x000007
2856+#define SAA7706H_SEL_DSP2_FMTA_MASK 0x000031
2857+#define SAA7706H_SEL_DSP2_SRCB_MASK 0x0001c0
2858+#define SAA7706H_SEL_DSP2_FMTB_MASK 0x000e00
2859+#define SAA7706H_SEL_DSP1_SRC_MASK 0x003000
2860+#define SAA7706H_SEL_DSP1_FMT_MASK 0x01c003
2861+#define SAA7706H_SEL_SPDIF2 0x020000
2862+#define SAA7706H_SEL_HOST_IO_FMT_MASK 0x1c0000
2863+#define SAA7706H_SEL_EN_HOST_IO 0x200000
2864+
2865+#define SAA7706H_REG_IAC 0x1ff8
2866+#define SAA7706H_REG_CLK_SET 0x1ff9
2867+#define SAA7706H_REG_CLK_COEFF 0x1ffa
2868+#define SAA7706H_REG_INPUT_SENS 0x1ffb
2869+#define SAA7706H_INPUT_SENS_RDS_VOL_MASK 0x0003f
2870+#define SAA7706H_INPUT_SENS_FM_VOL_MASK 0x00fc0
2871+#define SAA7706H_INPUT_SENS_FM_MPX 0x01000
2872+#define SAA7706H_INPUT_SENS_OFF_FILTER_A_EN 0x02000
2873+#define SAA7706H_INPUT_SENS_OFF_FILTER_B_EN 0x04000
2874+#define SAA7706H_REG_PHONE_NAV_AUDIO 0x1ffc
2875+#define SAA7706H_REG_IO_CONF_DSP2 0x1ffd
2876+#define SAA7706H_REG_STATUS_DSP2 0x1ffe
2877+#define SAA7706H_REG_PC_DSP2 0x1fff
2878+
2879+#define SAA7706H_DSP1_MOD0 0x0800
2880+#define SAA7706H_DSP1_ROM_VER 0x097f
2881+#define SAA7706H_DSP2_MPTR0 0x1000
2882+
2883+#define SAA7706H_DSP1_MODPNTR 0x0000
2884+
2885+#define SAA7706H_DSP2_XMEM_CONTLLCW 0x113e
2886+#define SAA7706H_DSP2_XMEM_BUSAMP 0x114a
2887+#define SAA7706H_DSP2_XMEM_FDACPNTR 0x11f9
2888+#define SAA7706H_DSP2_XMEM_IIS1PNTR 0x11fb
2889+
2890+#define SAA7706H_DSP2_YMEM_PVGA 0x212a
2891+#define SAA7706H_DSP2_YMEM_PVAT1 0x212b
2892+#define SAA7706H_DSP2_YMEM_PVAT 0x212c
2893+#define SAA7706H_DSP2_YMEM_ROM_VER 0x21ff
2894+
2895+#define SUPPORTED_DSP1_ROM_VER 0x667
2896+
2897+struct saa7706h_state {
2898+ struct v4l2_subdev sd;
2899+ unsigned muted;
2900+};
2901+
2902+static inline struct saa7706h_state *to_state(struct v4l2_subdev *sd)
2903+{
2904+ return container_of(sd, struct saa7706h_state, sd);
2905+}
2906+
2907+static int saa7706h_i2c_send(struct i2c_client *client, const u8 *data, int len)
2908+{
2909+ int err = i2c_master_send(client, data, len);
2910+ if (err == len)
2911+ return 0;
2912+ else if (err > 0)
2913+ return -EIO;
2914+ return err;
2915+}
2916+
2917+static int saa7706h_i2c_transfer(struct i2c_client *client,
2918+ struct i2c_msg *msgs, int num)
2919+{
2920+ int err = i2c_transfer(client->adapter, msgs, num);
2921+ if (err == num)
2922+ return 0;
2923+ else if (err > 0)
2924+ return -EIO;
2925+ else
2926+ return err;
2927+}
2928+
2929+static int saa7706h_set_reg24(struct i2c_client *client, u16 reg, u32 val)
2930+{
2931+ u8 buf[5];
2932+ int pos = 0;
2933+
2934+ buf[pos++] = reg >> 8;
2935+ buf[pos++] = reg;
2936+ buf[pos++] = val >> 16;
2937+ buf[pos++] = val >> 8;
2938+ buf[pos++] = val;
2939+
2940+ return saa7706h_i2c_send(client, buf, pos);
2941+}
2942+
2943+static int saa7706h_set_reg16(struct i2c_client *client, u16 reg, u16 val)
2944+{
2945+ u8 buf[4];
2946+ int pos = 0;
2947+
2948+ buf[pos++] = reg >> 8;
2949+ buf[pos++] = reg;
2950+ buf[pos++] = val >> 8;
2951+ buf[pos++] = val;
2952+
2953+ return saa7706h_i2c_send(client, buf, pos);
2954+}
2955+
2956+static int saa7706h_get_reg16(struct i2c_client *client, u16 reg)
2957+{
2958+ u8 buf[2];
2959+ int err;
2960+ u8 regaddr[] = {reg >> 8, reg};
2961+ struct i2c_msg msg[] = { {client->addr, 0, sizeof(regaddr), regaddr},
2962+ {client->addr, I2C_M_RD, sizeof(buf), buf} };
2963+
2964+ err = saa7706h_i2c_transfer(client, msg, ARRAY_SIZE(msg));
2965+ if (err)
2966+ return err;
2967+
2968+ return buf[0] << 8 | buf[1];
2969+}
2970+
2971+static int saa7706h_unmute(struct v4l2_subdev *sd)
2972+{
2973+ struct i2c_client *client = v4l2_get_subdevdata(sd);
2974+ struct saa7706h_state *state = to_state(sd);
2975+ int err;
2976+
2977+ err = saa7706h_set_reg16(client, SAA7706H_REG_CTRL,
2978+ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
2979+ SAA7706H_CTRL_PC_RESET_DSP2);
2980+ if (err)
2981+ goto out;
2982+
2983+ /* newer versions of the chip requires a small sleep after reset */
2984+ msleep(1);
2985+
2986+ err = saa7706h_set_reg16(client, SAA7706H_REG_CTRL,
2987+ SAA7706H_CTRL_PLL3_62975MHZ);
2988+ if (err)
2989+ goto out;
2990+
2991+ err = saa7706h_set_reg24(client, SAA7706H_REG_EVALUATION, 0);
2992+ if (err)
2993+ goto out;
2994+
2995+ err = saa7706h_set_reg24(client, SAA7706H_REG_CL_GEN1, 0x040022);
2996+ if (err)
2997+ goto out;
2998+
2999+ err = saa7706h_set_reg24(client, SAA7706H_REG_CL_GEN2,
3000+ SAA7706H_CL_GEN2_WSEDGE_FALLING);
3001+ if (err)
3002+ goto out;
3003+
3004+ err = saa7706h_set_reg24(client, SAA7706H_REG_CL_GEN4, 0x024080);
3005+ if (err)
3006+ goto out;
3007+
3008+ err = saa7706h_set_reg24(client, SAA7706H_REG_SEL, 0x200080);
3009+ if (err)
3010+ goto out;
3011+
3012+ err = saa7706h_set_reg24(client, SAA7706H_REG_IAC, 0xf4caed);
3013+ if (err)
3014+ goto out;
3015+
3016+ err = saa7706h_set_reg24(client, SAA7706H_REG_CLK_SET, 0x124334);
3017+ if (err)
3018+ goto out;
3019+
3020+ err = saa7706h_set_reg24(client, SAA7706H_REG_CLK_COEFF, 0x004a1a);
3021+ if (err)
3022+ goto out;
3023+
3024+ err = saa7706h_set_reg24(client, SAA7706H_REG_INPUT_SENS, 0x0071c7);
3025+ if (err)
3026+ goto out;
3027+
3028+ err = saa7706h_set_reg24(client, SAA7706H_REG_PHONE_NAV_AUDIO,
3029+ 0x0e22ff);
3030+ if (err)
3031+ goto out;
3032+
3033+ err = saa7706h_set_reg24(client, SAA7706H_REG_IO_CONF_DSP2, 0x001ff8);
3034+ if (err)
3035+ goto out;
3036+
3037+ err = saa7706h_set_reg24(client, SAA7706H_REG_STATUS_DSP2, 0x080003);
3038+ if (err)
3039+ goto out;
3040+
3041+ err = saa7706h_set_reg24(client, SAA7706H_REG_PC_DSP2, 0x000004);
3042+ if (err)
3043+ goto out;
3044+
3045+ err = saa7706h_set_reg16(client, SAA7706H_DSP1_MOD0, 0x0c6c);
3046+ if (err)
3047+ goto out;
3048+
3049+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_MPTR0, 0x000b4b);
3050+ if (err)
3051+ goto out;
3052+
3053+ err = saa7706h_set_reg24(client, SAA7706H_DSP1_MODPNTR, 0x000600);
3054+ if (err)
3055+ goto out;
3056+
3057+ err = saa7706h_set_reg24(client, SAA7706H_DSP1_MODPNTR, 0x0000c0);
3058+ if (err)
3059+ goto out;
3060+
3061+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000819);
3062+ if (err)
3063+ goto out;
3064+
3065+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_CONTLLCW, 0x00085a);
3066+ if (err)
3067+ goto out;
3068+
3069+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_BUSAMP, 0x7fffff);
3070+ if (err)
3071+ goto out;
3072+
3073+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_FDACPNTR, 0x2000cb);
3074+ if (err)
3075+ goto out;
3076+
3077+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_IIS1PNTR, 0x2000cb);
3078+ if (err)
3079+ goto out;
3080+
3081+ err = saa7706h_set_reg16(client, SAA7706H_DSP2_YMEM_PVGA, 0x0f80);
3082+ if (err)
3083+ goto out;
3084+
3085+ err = saa7706h_set_reg16(client, SAA7706H_DSP2_YMEM_PVAT1, 0x0800);
3086+ if (err)
3087+ goto out;
3088+
3089+ err = saa7706h_set_reg16(client, SAA7706H_DSP2_YMEM_PVAT, 0x0800);
3090+ if (err)
3091+ goto out;
3092+
3093+ err = saa7706h_set_reg24(client, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000905);
3094+ if (err)
3095+ goto out;
3096+
3097+ state->muted = 0;
3098+out:
3099+ return err;
3100+}
3101+
3102+static int saa7706h_mute(struct v4l2_subdev *sd)
3103+{
3104+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3105+ struct saa7706h_state *state = to_state(sd);
3106+ int err;
3107+
3108+ err = saa7706h_set_reg16(client, SAA7706H_REG_CTRL,
3109+ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
3110+ SAA7706H_CTRL_PC_RESET_DSP2);
3111+ if (err)
3112+ goto out;
3113+
3114+ state->muted = 1;
3115+out:
3116+ return err;
3117+}
3118+
3119+static int saa7706h_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
3120+{
3121+ switch (qc->id) {
3122+ case V4L2_CID_AUDIO_MUTE:
3123+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
3124+ }
3125+ return -EINVAL;
3126+}
3127+
3128+static int saa7706h_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
3129+{
3130+ struct saa7706h_state *state = to_state(sd);
3131+
3132+ switch (ctrl->id) {
3133+ case V4L2_CID_AUDIO_MUTE:
3134+ ctrl->value = state->muted;
3135+ return 0;
3136+ }
3137+ return -EINVAL;
3138+}
3139+
3140+static int saa7706h_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
3141+{
3142+ switch (ctrl->id) {
3143+ case V4L2_CID_AUDIO_MUTE:
3144+ if (ctrl->value)
3145+ return saa7706h_mute(sd);
3146+ else
3147+ return saa7706h_unmute(sd);
3148+ }
3149+ return -EINVAL;
3150+}
3151+
3152+static int saa7706h_g_chip_ident(struct v4l2_subdev *sd,
3153+ struct v4l2_dbg_chip_ident *chip)
3154+{
3155+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3156+
3157+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA7706H, 0);
3158+}
3159+
3160+static const struct v4l2_subdev_core_ops saa7706h_core_ops = {
3161+ .g_chip_ident = saa7706h_g_chip_ident,
3162+ .queryctrl = saa7706h_queryctrl,
3163+ .g_ctrl = saa7706h_g_ctrl,
3164+ .s_ctrl = saa7706h_s_ctrl,
3165+};
3166+
3167+static const struct v4l2_subdev_ops saa7706h_ops = {
3168+ .core = &saa7706h_core_ops,
3169+};
3170+
3171+/*
3172+ * Generic i2c probe
3173+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
3174+ */
3175+
3176+static int __devinit saa7706h_probe(struct i2c_client *client,
3177+ const struct i2c_device_id *id)
3178+{
3179+ struct saa7706h_state *state;
3180+ struct v4l2_subdev *sd;
3181+ int err;
3182+
3183+ /* Check if the adapter supports the needed features */
3184+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
3185+ return -EIO;
3186+
3187+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
3188+ client->addr << 1, client->adapter->name);
3189+
3190+ state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
3191+ if (state == NULL)
3192+ return -ENOMEM;
3193+ sd = &state->sd;
3194+ v4l2_i2c_subdev_init(sd, client, &saa7706h_ops);
3195+
3196+ /* check the rom versions */
3197+ err = saa7706h_get_reg16(client, SAA7706H_DSP1_ROM_VER);
3198+ if (err < 0)
3199+ goto err;
3200+ if (err != SUPPORTED_DSP1_ROM_VER)
3201+ printk(KERN_WARNING DRIVER_NAME
3202+ ": Unknown DSP1 ROM code version: 0x%x\n", err);
3203+
3204+ state->muted = 1;
3205+
3206+ /* startup in a muted state */
3207+ err = saa7706h_mute(sd);
3208+ if (err)
3209+ goto err;
3210+
3211+ return 0;
3212+
3213+err:
3214+ v4l2_device_unregister_subdev(sd);
3215+ kfree(to_state(sd));
3216+
3217+ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", err);
3218+
3219+ return err;
3220+}
3221+
3222+static int __devexit saa7706h_remove(struct i2c_client *client)
3223+{
3224+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
3225+
3226+ saa7706h_mute(sd);
3227+ v4l2_device_unregister_subdev(sd);
3228+ kfree(to_state(sd));
3229+ return 0;
3230+}
3231+
3232+static const struct i2c_device_id saa7706h_id[] = {
3233+ {DRIVER_NAME, 0},
3234+ {},
3235+};
3236+
3237+MODULE_DEVICE_TABLE(i2c, saa7706h_id);
3238+
3239+static struct i2c_driver saa7706h_driver = {
3240+ .driver = {
3241+ .owner = THIS_MODULE,
3242+ .name = DRIVER_NAME,
3243+ },
3244+ .probe = saa7706h_probe,
3245+ .remove = saa7706h_remove,
3246+ .id_table = saa7706h_id,
3247+};
3248+
3249+static __init int saa7706h_init(void)
3250+{
3251+ return i2c_add_driver(&saa7706h_driver);
3252+}
3253+
3254+static __exit void saa7706h_exit(void)
3255+{
3256+ i2c_del_driver(&saa7706h_driver);
3257+}
3258+
3259+module_init(saa7706h_init);
3260+module_exit(saa7706h_exit);
3261+
3262+MODULE_DESCRIPTION("SAA7706H Car Radio DSP driver");
3263+MODULE_AUTHOR("Mocean Laboratories");
3264+MODULE_LICENSE("GPL v2");
3265+
3266diff -uNr linux-2.6.31/drivers/media/radio/tef6862.c linux-2.6.31.new/drivers/media/radio/tef6862.c
3267--- linux-2.6.31/drivers/media/radio/tef6862.c 1969-12-31 16:00:00.000000000 -0800
3268+++ linux-2.6.31.new/drivers/media/radio/tef6862.c 2009-10-23 11:17:28.000000000 -0700
3269@@ -0,0 +1,232 @@
3270+/*
3271+ * tef6862.c Philips TEF6862 Car Radio Enhanced Selectivity Tuner
3272+ * Copyright (c) 2009 Intel Corporation
3273+ *
3274+ * This program is free software; you can redistribute it and/or modify
3275+ * it under the terms of the GNU General Public License version 2 as
3276+ * published by the Free Software Foundation.
3277+ *
3278+ * This program is distributed in the hope that it will be useful,
3279+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3280+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3281+ * GNU General Public License for more details.
3282+ *
3283+ * You should have received a copy of the GNU General Public License
3284+ * along with this program; if not, write to the Free Software
3285+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3286+ */
3287+
3288+#include <linux/module.h>
3289+#include <linux/init.h>
3290+#include <linux/errno.h>
3291+#include <linux/kernel.h>
3292+#include <linux/interrupt.h>
3293+#include <linux/i2c.h>
3294+#include <linux/i2c-id.h>
3295+#include <media/v4l2-ioctl.h>
3296+#include <media/v4l2-device.h>
3297+#include <media/v4l2-chip-ident.h>
3298+
3299+#define DRIVER_NAME "tef6862"
3300+
3301+#define FREQ_MUL 16000
3302+
3303+#define TEF6862_LO_FREQ (875 * FREQ_MUL / 10)
3304+#define TEF6862_HI_FREQ (108 * FREQ_MUL)
3305+
3306+/* Write mode sub addresses */
3307+#define WM_SUB_BANDWIDTH 0x0
3308+#define WM_SUB_PLLM 0x1
3309+#define WM_SUB_PLLL 0x2
3310+#define WM_SUB_DAA 0x3
3311+#define WM_SUB_AGC 0x4
3312+#define WM_SUB_BAND 0x5
3313+#define WM_SUB_CONTROL 0x6
3314+#define WM_SUB_LEVEL 0x7
3315+#define WM_SUB_IFCF 0x8
3316+#define WM_SUB_IFCAP 0x9
3317+#define WM_SUB_ACD 0xA
3318+#define WM_SUB_TEST 0xF
3319+
3320+/* Different modes of the MSA register */
3321+#define MODE_BUFFER 0x0
3322+#define MODE_PRESET 0x1
3323+#define MODE_SEARCH 0x2
3324+#define MODE_AF_UPDATE 0x3
3325+#define MODE_JUMP 0x4
3326+#define MODE_CHECK 0x5
3327+#define MODE_LOAD 0x6
3328+#define MODE_END 0x7
3329+#define MODE_SHIFT 5
3330+
3331+struct tef6862_state {
3332+ struct v4l2_subdev sd;
3333+ unsigned long freq;
3334+};
3335+
3336+static inline struct tef6862_state *to_state(struct v4l2_subdev *sd)
3337+{
3338+ return container_of(sd, struct tef6862_state, sd);
3339+}
3340+
3341+static u16 tef6862_sigstr(struct i2c_client *client)
3342+{
3343+ u8 buf[4];
3344+ int err = i2c_master_recv(client, buf, sizeof(buf));
3345+ if (err == sizeof(buf))
3346+ return buf[3] << 8;
3347+ return 0;
3348+}
3349+
3350+static int tef6862_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
3351+{
3352+ if (v->index > 0)
3353+ return -EINVAL;
3354+
3355+ /* only support FM for now */
3356+ strlcpy(v->name, "FM", sizeof(v->name));
3357+ v->type = V4L2_TUNER_RADIO;
3358+ v->rangelow = TEF6862_LO_FREQ;
3359+ v->rangehigh = TEF6862_HI_FREQ;
3360+ v->rxsubchans = V4L2_TUNER_SUB_MONO;
3361+ v->capability = V4L2_TUNER_CAP_LOW;
3362+ v->audmode = V4L2_TUNER_MODE_STEREO;
3363+ v->signal = tef6862_sigstr(v4l2_get_subdevdata(sd));
3364+
3365+ return 0;
3366+}
3367+
3368+static int tef6862_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
3369+{
3370+ return v->index ? -EINVAL : 0;
3371+}
3372+
3373+static int tef6862_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
3374+{
3375+ struct tef6862_state *state = to_state(sd);
3376+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3377+ u16 pll;
3378+ u8 i2cmsg[3];
3379+ int err;
3380+
3381+ if (f->tuner != 0)
3382+ return -EINVAL;
3383+
3384+ pll = 1964 + ((f->frequency - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
3385+ i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM;
3386+ i2cmsg[1] = (pll >> 8) & 0xff;
3387+ i2cmsg[2] = pll & 0xff;
3388+
3389+ err = i2c_master_send(client, i2cmsg, sizeof(i2cmsg));
3390+ if (!err)
3391+ state->freq = f->frequency;
3392+ return err;
3393+}
3394+
3395+static int tef6862_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
3396+{
3397+ struct tef6862_state *state = to_state(sd);
3398+
3399+ if (f->tuner != 0)
3400+ return -EINVAL;
3401+ f->type = V4L2_TUNER_RADIO;
3402+ f->frequency = state->freq;
3403+ return 0;
3404+}
3405+
3406+static int tef6862_g_chip_ident(struct v4l2_subdev *sd,
3407+ struct v4l2_dbg_chip_ident *chip)
3408+{
3409+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3410+
3411+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TEF6862, 0);
3412+}
3413+
3414+static const struct v4l2_subdev_tuner_ops tef6862_tuner_ops = {
3415+ .g_tuner = tef6862_g_tuner,
3416+ .s_tuner = tef6862_s_tuner,
3417+ .s_frequency = tef6862_s_frequency,
3418+ .g_frequency = tef6862_g_frequency,
3419+};
3420+
3421+static const struct v4l2_subdev_core_ops tef6862_core_ops = {
3422+ .g_chip_ident = tef6862_g_chip_ident,
3423+};
3424+
3425+static const struct v4l2_subdev_ops tef6862_ops = {
3426+ .core = &tef6862_core_ops,
3427+ .tuner = &tef6862_tuner_ops,
3428+};
3429+
3430+/*
3431+ * Generic i2c probe
3432+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
3433+ */
3434+
3435+static int __devinit tef6862_probe(struct i2c_client *client,
3436+ const struct i2c_device_id *id)
3437+{
3438+ struct tef6862_state *state;
3439+ struct v4l2_subdev *sd;
3440+
3441+ /* Check if the adapter supports the needed features */
3442+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
3443+ return -EIO;
3444+
3445+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
3446+ client->addr << 1, client->adapter->name);
3447+
3448+ state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL);
3449+ if (state == NULL)
3450+ return -ENOMEM;
3451+ state->freq = TEF6862_LO_FREQ;
3452+
3453+ sd = &state->sd;
3454+ v4l2_i2c_subdev_init(sd, client, &tef6862_ops);
3455+
3456+ return 0;
3457+}
3458+
3459+static int __devexit tef6862_remove(struct i2c_client *client)
3460+{
3461+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
3462+
3463+ v4l2_device_unregister_subdev(sd);
3464+ kfree(to_state(sd));
3465+ return 0;
3466+}
3467+
3468+static const struct i2c_device_id tef6862_id[] = {
3469+ {DRIVER_NAME, 0},
3470+ {},
3471+};
3472+
3473+MODULE_DEVICE_TABLE(i2c, tef6862_id);
3474+
3475+static struct i2c_driver tef6862_driver = {
3476+ .driver = {
3477+ .owner = THIS_MODULE,
3478+ .name = DRIVER_NAME,
3479+ },
3480+ .probe = tef6862_probe,
3481+ .remove = tef6862_remove,
3482+ .id_table = tef6862_id,
3483+};
3484+
3485+static __init int tef6862_init(void)
3486+{
3487+ return i2c_add_driver(&tef6862_driver);
3488+}
3489+
3490+static __exit void tef6862_exit(void)
3491+{
3492+ i2c_del_driver(&tef6862_driver);
3493+}
3494+
3495+module_init(tef6862_init);
3496+module_exit(tef6862_exit);
3497+
3498+MODULE_DESCRIPTION("TEF6862 Car Radio Enhanced Selectivity Tuner");
3499+MODULE_AUTHOR("Mocean Laboratories");
3500+MODULE_LICENSE("GPL v2");
3501+
3502diff -uNr linux-2.6.31/drivers/media/video/adv7180.c linux-2.6.31.new/drivers/media/video/adv7180.c
3503--- linux-2.6.31/drivers/media/video/adv7180.c 1969-12-31 16:00:00.000000000 -0800
3504+++ linux-2.6.31.new/drivers/media/video/adv7180.c 2009-10-23 11:17:28.000000000 -0700
3505@@ -0,0 +1,475 @@
3506+/*
3507+ * adv7180.c Analog Devices ADV7180 video decoder driver
3508+ * Copyright (c) 2009 Intel Corporation
3509+ *
3510+ * This program is free software; you can redistribute it and/or modify
3511+ * it under the terms of the GNU General Public License version 2 as
3512+ * published by the Free Software Foundation.
3513+ *
3514+ * This program is distributed in the hope that it will be useful,
3515+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3516+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3517+ * GNU General Public License for more details.
3518+ *
3519+ * You should have received a copy of the GNU General Public License
3520+ * along with this program; if not, write to the Free Software
3521+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3522+ */
3523+
3524+#include <linux/module.h>
3525+#include <linux/init.h>
3526+#include <linux/errno.h>
3527+#include <linux/kernel.h>
3528+#include <linux/interrupt.h>
3529+#include <linux/i2c.h>
3530+#include <linux/i2c-id.h>
3531+#include <media/v4l2-ioctl.h>
3532+#include <linux/videodev2.h>
3533+#include <media/v4l2-device.h>
3534+#include <media/v4l2-chip-ident.h>
3535+#include <linux/mutex.h>
3536+
3537+#define DRIVER_NAME "adv7180"
3538+
3539+#define ADV7180_INPUT_CONTROL_REG 0x00
3540+#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM 0x00
3541+#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM_PED 0x10
3542+#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_J_SECAM 0x20
3543+#define ADV7180_INPUT_CONTROL_AD_PAL_N_NTSC_M_SECAM 0x30
3544+#define ADV7180_INPUT_CONTROL_NTSC_J 0x40
3545+#define ADV7180_INPUT_CONTROL_NTSC_M 0x50
3546+#define ADV7180_INPUT_CONTROL_PAL60 0x60
3547+#define ADV7180_INPUT_CONTROL_NTSC_443 0x70
3548+#define ADV7180_INPUT_CONTROL_PAL_BG 0x80
3549+#define ADV7180_INPUT_CONTROL_PAL_N 0x90
3550+#define ADV7180_INPUT_CONTROL_PAL_M 0xa0
3551+#define ADV7180_INPUT_CONTROL_PAL_M_PED 0xb0
3552+#define ADV7180_INPUT_CONTROL_PAL_COMB_N 0xc0
3553+#define ADV7180_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
3554+#define ADV7180_INPUT_CONTROL_PAL_SECAM 0xe0
3555+#define ADV7180_INPUT_CONTROL_PAL_SECAM_PED 0xf0
3556+
3557+#define ADV7180_EXTENDED_OUTPUT_CONTROL_REG 0x04
3558+#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
3559+
3560+#define ADV7180_AUTODETECT_ENABLE_REG 0x07
3561+#define ADV7180_AUTODETECT_DEFAULT 0x7f
3562+
3563+#define ADV7180_ADI_CTRL_REG 0x0e
3564+#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
3565+
3566+#define ADV7180_STATUS1_REG 0x10
3567+#define ADV7180_STATUS1_IN_LOCK 0x01
3568+#define ADV7180_STATUS1_AUTOD_MASK 0x70
3569+#define ADV7180_STATUS1_AUTOD_NTSM_M_J 0x00
3570+#define ADV7180_STATUS1_AUTOD_NTSC_4_43 0x10
3571+#define ADV7180_STATUS1_AUTOD_PAL_M 0x20
3572+#define ADV7180_STATUS1_AUTOD_PAL_60 0x30
3573+#define ADV7180_STATUS1_AUTOD_PAL_B_G 0x40
3574+#define ADV7180_STATUS1_AUTOD_SECAM 0x50
3575+#define ADV7180_STATUS1_AUTOD_PAL_COMB 0x60
3576+#define ADV7180_STATUS1_AUTOD_SECAM_525 0x70
3577+
3578+#define ADV7180_IDENT_REG 0x11
3579+#define ADV7180_ID_7180 0x18
3580+
3581+#define ADV7180_ICONF1_ADI 0x40
3582+#define ADV7180_ICONF1_ACTIVE_LOW 0x01
3583+#define ADV7180_ICONF1_PSYNC_ONLY 0x10
3584+#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
3585+
3586+#define ADV7180_IRQ1_LOCK 0x01
3587+#define ADV7180_IRQ1_UNLOCK 0x02
3588+#define ADV7180_ISR1_ADI 0x42
3589+#define ADV7180_ICR1_ADI 0x43
3590+#define ADV7180_IMR1_ADI 0x44
3591+#define ADV7180_IMR2_ADI 0x48
3592+#define ADV7180_IRQ3_AD_CHANGE 0x08
3593+#define ADV7180_ISR3_ADI 0x4A
3594+#define ADV7180_ICR3_ADI 0x4B
3595+#define ADV7180_IMR3_ADI 0x4C
3596+#define ADV7180_IMR4_ADI 0x50
3597+
3598+#define ADV7180_NTSC_V_BIT_END_REG 0xE6
3599+#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
3600+
3601+struct adv7180_state {
3602+ struct v4l2_subdev sd;
3603+ struct work_struct work;
3604+ struct mutex mutex; /* mutual excl. when accessing chip */
3605+ int irq;
3606+ v4l2_std_id curr_norm;
3607+ bool autodetect;
3608+};
3609+
3610+static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
3611+{
3612+ switch (status1 & ADV7180_STATUS1_AUTOD_MASK) {
3613+ case ADV7180_STATUS1_AUTOD_NTSM_M_J:
3614+ return V4L2_STD_NTSC;
3615+ case ADV7180_STATUS1_AUTOD_NTSC_4_43:
3616+ return V4L2_STD_NTSC_443;
3617+ case ADV7180_STATUS1_AUTOD_PAL_M:
3618+ return V4L2_STD_PAL_M;
3619+ case ADV7180_STATUS1_AUTOD_PAL_60:
3620+ return V4L2_STD_PAL_60;
3621+ case ADV7180_STATUS1_AUTOD_PAL_B_G:
3622+ return V4L2_STD_PAL;
3623+ case ADV7180_STATUS1_AUTOD_SECAM:
3624+ return V4L2_STD_SECAM;
3625+ case ADV7180_STATUS1_AUTOD_PAL_COMB:
3626+ return V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
3627+ case ADV7180_STATUS1_AUTOD_SECAM_525:
3628+ return V4L2_STD_SECAM;
3629+ default:
3630+ return V4L2_STD_UNKNOWN;
3631+ }
3632+}
3633+
3634+static int v4l2_std_to_adv7180(v4l2_std_id std)
3635+{
3636+ if (std == V4L2_STD_PAL_60)
3637+ return ADV7180_INPUT_CONTROL_PAL60;
3638+ if (std == V4L2_STD_NTSC_443)
3639+ return ADV7180_INPUT_CONTROL_NTSC_443;
3640+ if (std == V4L2_STD_PAL_N)
3641+ return ADV7180_INPUT_CONTROL_PAL_N;
3642+ if (std == V4L2_STD_PAL_M)
3643+ return ADV7180_INPUT_CONTROL_PAL_M;
3644+ if (std == V4L2_STD_PAL_Nc)
3645+ return ADV7180_INPUT_CONTROL_PAL_COMB_N;
3646+
3647+ /* pal is a combination of several variants */
3648+ if (std & V4L2_STD_PAL)
3649+ return ADV7180_INPUT_CONTROL_PAL_BG;
3650+ if (std & V4L2_STD_NTSC)
3651+ return ADV7180_INPUT_CONTROL_NTSC_M;
3652+ if (std & V4L2_STD_SECAM)
3653+ return ADV7180_INPUT_CONTROL_PAL_SECAM;
3654+
3655+ return -EINVAL;
3656+}
3657+
3658+static u32 adv7180_status_to_v4l2(u8 status1)
3659+{
3660+ if (!(status1 & ADV7180_STATUS1_IN_LOCK))
3661+ return V4L2_IN_ST_NO_SIGNAL;
3662+
3663+ return 0;
3664+}
3665+
3666+static int __adv7180_status(struct i2c_client *client, u32 *status,
3667+ v4l2_std_id *std)
3668+{
3669+ int status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
3670+
3671+ if (status1 < 0)
3672+ return status1;
3673+
3674+ if (status)
3675+ *status = adv7180_status_to_v4l2(status1);
3676+ if (std)
3677+ *std = adv7180_std_to_v4l2(status1);
3678+
3679+ return 0;
3680+}
3681+
3682+static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
3683+{
3684+ return container_of(sd, struct adv7180_state, sd);
3685+}
3686+
3687+static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
3688+{
3689+ struct adv7180_state *state = to_state(sd);
3690+ int err = mutex_lock_interruptible(&state->mutex);
3691+ if (err)
3692+ return err;
3693+
3694+ /* when we are interrupt driven we know the state */
3695+ if (!state->autodetect || state->irq > 0)
3696+ *std = state->curr_norm;
3697+ else
3698+ err = __adv7180_status(v4l2_get_subdevdata(sd), NULL, std);
3699+
3700+ mutex_unlock(&state->mutex);
3701+ return err;
3702+}
3703+
3704+static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
3705+{
3706+ struct adv7180_state *state = to_state(sd);
3707+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3708+ int ret = mutex_lock_interruptible(&state->mutex);
3709+ if (ret)
3710+ return ret;
3711+
3712+ /* all standards -> autodetect */
3713+ if (std == V4L2_STD_ALL) {
3714+ ret = i2c_smbus_write_byte_data(client,
3715+ ADV7180_INPUT_CONTROL_REG,
3716+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
3717+ if (ret < 0)
3718+ goto out;
3719+
3720+ __adv7180_status(client, NULL, &state->curr_norm);
3721+ state->autodetect = true;
3722+ } else {
3723+ ret = v4l2_std_to_adv7180(std);
3724+ if (ret < 0)
3725+ goto out;
3726+
3727+ ret = i2c_smbus_write_byte_data(client,
3728+ ADV7180_INPUT_CONTROL_REG, ret);
3729+ if (ret < 0)
3730+ goto out;
3731+
3732+ state->curr_norm = std;
3733+ state->autodetect = false;
3734+ }
3735+ ret = 0;
3736+out:
3737+ mutex_unlock(&state->mutex);
3738+ return ret;
3739+}
3740+
3741+static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
3742+{
3743+ struct adv7180_state *state = to_state(sd);
3744+ int ret = mutex_lock_interruptible(&state->mutex);
3745+ if (ret)
3746+ return ret;
3747+
3748+ ret = __adv7180_status(v4l2_get_subdevdata(sd), status, NULL);
3749+ mutex_unlock(&state->mutex);
3750+ return ret;
3751+}
3752+
3753+static int adv7180_g_chip_ident(struct v4l2_subdev *sd,
3754+ struct v4l2_dbg_chip_ident *chip)
3755+{
3756+ struct i2c_client *client = v4l2_get_subdevdata(sd);
3757+
3758+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7180, 0);
3759+}
3760+
3761+static const struct v4l2_subdev_video_ops adv7180_video_ops = {
3762+ .querystd = adv7180_querystd,
3763+ .g_input_status = adv7180_g_input_status,
3764+};
3765+
3766+static const struct v4l2_subdev_core_ops adv7180_core_ops = {
3767+ .g_chip_ident = adv7180_g_chip_ident,
3768+ .s_std = adv7180_s_std,
3769+};
3770+
3771+static const struct v4l2_subdev_ops adv7180_ops = {
3772+ .core = &adv7180_core_ops,
3773+ .video = &adv7180_video_ops,
3774+};
3775+
3776+static void adv7180_work(struct work_struct *work)
3777+{
3778+ struct adv7180_state *state = container_of(work, struct adv7180_state,
3779+ work);
3780+ struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
3781+ u8 isr3;
3782+
3783+ mutex_lock(&state->mutex);
3784+ i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
3785+ ADV7180_ADI_CTRL_IRQ_SPACE);
3786+ isr3 = i2c_smbus_read_byte_data(client, ADV7180_ISR3_ADI);
3787+ /* clear */
3788+ i2c_smbus_write_byte_data(client, ADV7180_ICR3_ADI, isr3);
3789+ i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG, 0);
3790+
3791+ if (isr3 & ADV7180_IRQ3_AD_CHANGE && state->autodetect)
3792+ __adv7180_status(client, NULL, &state->curr_norm);
3793+ mutex_unlock(&state->mutex);
3794+
3795+ enable_irq(state->irq);
3796+}
3797+
3798+static irqreturn_t adv7180_irq(int irq, void *devid)
3799+{
3800+ struct adv7180_state *state = devid;
3801+
3802+ schedule_work(&state->work);
3803+
3804+ disable_irq_nosync(state->irq);
3805+
3806+ return IRQ_HANDLED;
3807+}
3808+
3809+/*
3810+ * Generic i2c probe
3811+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
3812+ */
3813+
3814+static int __devinit adv7180_probe(struct i2c_client *client,
3815+ const struct i2c_device_id *id)
3816+{
3817+ struct adv7180_state *state;
3818+ struct v4l2_subdev *sd;
3819+ int ret;
3820+
3821+ /* Check if the adapter supports the needed features */
3822+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
3823+ return -EIO;
3824+
3825+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
3826+ client->addr << 1, client->adapter->name);
3827+
3828+ state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
3829+ if (state == NULL) {
3830+ ret = -ENOMEM;
3831+ goto err;
3832+ }
3833+
3834+ state->irq = client->irq;
3835+ INIT_WORK(&state->work, adv7180_work);
3836+ mutex_init(&state->mutex);
3837+ state->autodetect = true;
3838+ sd = &state->sd;
3839+ v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
3840+
3841+ /* Initialize adv7180 */
3842+ /* Enable autodetection */
3843+ ret = i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
3844+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
3845+ if (ret < 0)
3846+ goto err_unreg_subdev;
3847+
3848+ ret = i2c_smbus_write_byte_data(client, ADV7180_AUTODETECT_ENABLE_REG,
3849+ ADV7180_AUTODETECT_DEFAULT);
3850+ if (ret < 0)
3851+ goto err_unreg_subdev;
3852+
3853+ /* ITU-R BT.656-4 compatible */
3854+ ret = i2c_smbus_write_byte_data(client,
3855+ ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
3856+ ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
3857+ if (ret < 0)
3858+ goto err_unreg_subdev;
3859+
3860+
3861+ /* Manually set V bit end position in NTSC mode */
3862+ ret = i2c_smbus_write_byte_data(client,
3863+ ADV7180_NTSC_V_BIT_END_REG,
3864+ ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
3865+ if (ret < 0)
3866+ goto err_unreg_subdev;
3867+
3868+ /* read current norm */
3869+ __adv7180_status(client, NULL, &state->curr_norm);
3870+
3871+ /* register for interrupts */
3872+ if (state->irq > 0) {
3873+ ret = request_irq(state->irq, adv7180_irq, 0, DRIVER_NAME,
3874+ state);
3875+ if (ret)
3876+ goto err_unreg_subdev;
3877+
3878+ ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
3879+ ADV7180_ADI_CTRL_IRQ_SPACE);
3880+ if (ret < 0)
3881+ goto err_unreg_subdev;
3882+
3883+ /* config the Interrupt pin to be active low */
3884+ ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
3885+ ADV7180_ICONF1_ACTIVE_LOW | ADV7180_ICONF1_PSYNC_ONLY);
3886+ if (ret < 0)
3887+ goto err_unreg_subdev;
3888+
3889+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
3890+ if (ret < 0)
3891+ goto err_unreg_subdev;
3892+
3893+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
3894+ if (ret < 0)
3895+ goto err_unreg_subdev;
3896+
3897+ /* enable AD change interrupts interrupts */
3898+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
3899+ ADV7180_IRQ3_AD_CHANGE);
3900+ if (ret < 0)
3901+ goto err_unreg_subdev;
3902+
3903+ ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
3904+ if (ret < 0)
3905+ goto err_unreg_subdev;
3906+
3907+ ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
3908+ 0);
3909+ if (ret < 0)
3910+ goto err_unreg_subdev;
3911+ }
3912+
3913+ return 0;
3914+
3915+err_unreg_subdev:
3916+ mutex_destroy(&state->mutex);
3917+ v4l2_device_unregister_subdev(sd);
3918+ kfree(state);
3919+err:
3920+ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", ret);
3921+ return ret;
3922+}
3923+
3924+static int __devexit adv7180_remove(struct i2c_client *client)
3925+{
3926+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
3927+ struct adv7180_state *state = to_state(sd);
3928+
3929+ if (state->irq > 0) {
3930+ free_irq(client->irq, state);
3931+ if (cancel_work_sync(&state->work)) {
3932+ /*
3933+ * Work was pending, therefore we need to enable
3934+ * IRQ here to balance the disable_irq() done in the
3935+ * interrupt handler.
3936+ */
3937+ enable_irq(state->irq);
3938+ }
3939+ }
3940+
3941+ mutex_destroy(&state->mutex);
3942+ v4l2_device_unregister_subdev(sd);
3943+ kfree(to_state(sd));
3944+ return 0;
3945+}
3946+
3947+static const struct i2c_device_id adv7180_id[] = {
3948+ {DRIVER_NAME, 0},
3949+ {},
3950+};
3951+
3952+MODULE_DEVICE_TABLE(i2c, adv7180_id);
3953+
3954+static struct i2c_driver adv7180_driver = {
3955+ .driver = {
3956+ .owner = THIS_MODULE,
3957+ .name = DRIVER_NAME,
3958+ },
3959+ .probe = adv7180_probe,
3960+ .remove = adv7180_remove,
3961+ .id_table = adv7180_id,
3962+};
3963+
3964+static __init int adv7180_init(void)
3965+{
3966+ return i2c_add_driver(&adv7180_driver);
3967+}
3968+
3969+static __exit void adv7180_exit(void)
3970+{
3971+ i2c_del_driver(&adv7180_driver);
3972+}
3973+
3974+module_init(adv7180_init);
3975+module_exit(adv7180_exit);
3976+
3977+MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
3978+MODULE_AUTHOR("Mocean Laboratories");
3979+MODULE_LICENSE("GPL v2");
3980+
3981diff -uNr linux-2.6.31/drivers/media/video/Kconfig linux-2.6.31.new/drivers/media/video/Kconfig
3982--- linux-2.6.31/drivers/media/video/Kconfig 2009-10-23 11:18:30.000000000 -0700
3983+++ linux-2.6.31.new/drivers/media/video/Kconfig 2009-10-23 11:17:28.000000000 -0700
3984@@ -265,6 +265,15 @@
3985
3986 comment "Video decoders"
3987
3988+config VIDEO_ADV7180
3989+ tristate "Analog Devices ADV7180 decoder"
3990+ depends on VIDEO_V4L2 && I2C
3991+ ---help---
3992+ Support for the Analog Devices ADV7180 video decoder.
3993+
3994+ To compile this driver as a module, choose M here: the
3995+ module will be called adv7180.
3996+
3997 config VIDEO_BT819
3998 tristate "BT819A VideoStream decoder"
3999 depends on VIDEO_V4L2 && I2C
4000@@ -816,6 +825,13 @@
4001 ---help---
4002 This is a v4l2 driver for the TI OMAP2 camera capture interface
4003
4004+config VIDEO_TIMBERDALE
4005+ tristate "Support for timberdale Video In/LogiWIN"
4006+ depends on VIDEO_V4L2 && MFD_TIMBERDALE_DMA
4007+ select VIDEO_ADV7180
4008+ ---help---
4009+ Add support for the Video In peripherial of the timberdale FPGA.
4010+
4011 #
4012 # USB Multimedia device configuration
4013 #
4014diff -uNr linux-2.6.31/drivers/media/video/Makefile linux-2.6.31.new/drivers/media/video/Makefile
4015--- linux-2.6.31/drivers/media/video/Makefile 2009-10-23 11:18:30.000000000 -0700
4016+++ linux-2.6.31.new/drivers/media/video/Makefile 2009-10-23 11:17:27.000000000 -0700
4017@@ -45,6 +45,7 @@
4018 obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
4019 obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
4020 obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
4021+obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
4022 obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
4023 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
4024 obj-$(CONFIG_VIDEO_BT819) += bt819.o
4025@@ -156,6 +157,8 @@
4026
4027 obj-$(CONFIG_VIDEO_AU0828) += au0828/
4028
4029+obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
4030+
4031 obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
4032
4033 obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
4034diff -uNr linux-2.6.31/drivers/media/video/timblogiw.c linux-2.6.31.new/drivers/media/video/timblogiw.c
4035--- linux-2.6.31/drivers/media/video/timblogiw.c 1969-12-31 16:00:00.000000000 -0800
4036+++ linux-2.6.31.new/drivers/media/video/timblogiw.c 2009-10-23 11:17:28.000000000 -0700
4037@@ -0,0 +1,1058 @@
4038+/*
4039+ * timblogiw.c timberdale FPGA LogiWin Video In driver
4040+ * Copyright (c) 2009 Intel Corporation
4041+ *
4042+ * This program is free software; you can redistribute it and/or modify
4043+ * it under the terms of the GNU General Public License version 2 as
4044+ * published by the Free Software Foundation.
4045+ *
4046+ * This program is distributed in the hope that it will be useful,
4047+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
4048+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4049+ * GNU General Public License for more details.
4050+ *
4051+ * You should have received a copy of the GNU General Public License
4052+ * along with this program; if not, write to the Free Software
4053+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4054+ */
4055+
4056+/* Supports:
4057+ * Timberdale FPGA LogiWin Video In
4058+ */
4059+
4060+#include <linux/list.h>
4061+#include <linux/version.h>
4062+#include <linux/module.h>
4063+#include <linux/dma-mapping.h>
4064+#include <media/v4l2-common.h>
4065+#include <media/v4l2-ioctl.h>
4066+#include <media/v4l2-device.h>
4067+#include <linux/platform_device.h>
4068+#include <linux/interrupt.h>
4069+#include "timblogiw.h"
4070+#include <linux/mfd/timbdma.h>
4071+#include <linux/i2c.h>
4072+
4073+#define DRIVER_NAME "timb-video"
4074+
4075+#define TIMBLOGIW_CTRL 0x40
4076+
4077+#define TIMBLOGIW_H_SCALE 0x20
4078+#define TIMBLOGIW_V_SCALE 0x28
4079+
4080+#define TIMBLOGIW_X_CROP 0x58
4081+#define TIMBLOGIW_Y_CROP 0x60
4082+
4083+#define TIMBLOGIW_W_CROP 0x00
4084+#define TIMBLOGIW_H_CROP 0x08
4085+
4086+#define TIMBLOGIW_VERSION_CODE 0x02
4087+
4088+#define TIMBLOGIW_BUF 0x04
4089+#define TIMBLOGIW_TBI 0x2c
4090+#define TIMBLOGIW_BPL 0x30
4091+
4092+#define dbg(...)
4093+
4094+#define BYTES_PER_LINE (720 * 2)
4095+
4096+#define DMA_BUFFER_SIZE (BYTES_PER_LINE * 576)
4097+
4098+#define TIMBLOGIW_VIDEO_FORMAT V4L2_PIX_FMT_UYVY
4099+
4100+static void timblogiw_release_buffers(struct timblogiw *lw);
4101+
4102+const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
4103+ {
4104+ .std = V4L2_STD_PAL,
4105+ .width = 720,
4106+ .height = 576
4107+ },
4108+ {
4109+ .std = V4L2_STD_NTSC,
4110+ .width = 720,
4111+ .height = 480
4112+ }
4113+};
4114+
4115+static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
4116+{
4117+ return norm->width * 2;
4118+}
4119+
4120+
4121+static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
4122+{
4123+ return norm->height * timblogiw_bytes_per_line(norm);
4124+}
4125+
4126+static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
4127+{
4128+ int i;
4129+ for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
4130+ if (timblogiw_tvnorms[i].std & std)
4131+ return timblogiw_tvnorms + i;
4132+
4133+ /* default to first element */
4134+ return timblogiw_tvnorms;
4135+}
4136+
4137+static void timblogiw_handleframe(unsigned long arg)
4138+{
4139+ struct timblogiw_frame *f;
4140+ struct timblogiw *lw = (struct timblogiw *)arg;
4141+
4142+ spin_lock_bh(&lw->queue_lock);
4143+ if (lw->dma.filled && !list_empty(&lw->inqueue)) {
4144+ /* put the entry in the outqueue */
4145+ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
4146+
4147+ /* sync memory and unmap */
4148+ dma_sync_single_for_cpu(lw->dev, lw->dma.filled->handle,
4149+ timblogiw_frame_size(lw->cur_norm), DMA_FROM_DEVICE);
4150+
4151+ /* copy data from the DMA buffer */
4152+ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
4153+ /* buffer consumed */
4154+ lw->dma.filled = NULL;
4155+
4156+ do_gettimeofday(&f->buf.timestamp);
4157+ f->buf.sequence = ++lw->frame_count;
4158+ f->buf.field = V4L2_FIELD_NONE;
4159+ f->state = F_DONE;
4160+ f->buf.bytesused = f->buf.length;
4161+ list_move_tail(&f->frame, &lw->outqueue);
4162+ /* wake up any waiter */
4163+ wake_up(&lw->wait_frame);
4164+ } else {
4165+ /* No user buffer available, consume buffer anyway
4166+ * who wants an old video frame?
4167+ */
4168+ lw->dma.filled = NULL;
4169+ }
4170+ spin_unlock_bh(&lw->queue_lock);
4171+}
4172+
4173+static int __timblogiw_start_dma(struct timblogiw *lw)
4174+{
4175+ int size = timblogiw_frame_size(lw->cur_norm);
4176+ int ret;
4177+ struct timbdma_transfer *transfer = lw->dma.transfer + lw->dma.curr;
4178+ int bytes_per_line = timblogiw_bytes_per_line(lw->cur_norm);
4179+
4180+ ret = timbdma_prep_desc(transfer->desc, transfer->handle, size);
4181+ if (ret)
4182+ goto err;
4183+
4184+ ret = timbdma_start(DMA_IRQ_VIDEO_RX, transfer->desc, bytes_per_line);
4185+ if (ret)
4186+ goto err;
4187+ return ret;
4188+err:
4189+ return ret;
4190+}
4191+
4192+static int timblogiw_isr(u32 flag, void *pdev)
4193+{
4194+ struct timblogiw *lw = (struct timblogiw *)pdev;
4195+
4196+ if (lw->stream == STREAM_OFF) {
4197+ timbdma_stop(DMA_IRQ_VIDEO_RX);
4198+ /* stream is stopped, signal that the current transfer is
4199+ * finished */
4200+ complete(&lw->irq_done);
4201+ } else {
4202+ struct timeval timestamp;
4203+
4204+ do_gettimeofday(&timestamp);
4205+
4206+ if (!lw->dma.filled && (flag & DMA_IRQ_VIDEO_RX)) {
4207+ /* Got a frame, store it, and flip to next DMA buffer */
4208+ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
4209+ lw->dma.curr = !lw->dma.curr;
4210+ } else if (lw->dma.filled && (flag & DMA_IRQ_VIDEO_RX))
4211+ printk("No free frame\n");
4212+
4213+ __timblogiw_start_dma(lw);
4214+
4215+ if (flag & DMA_IRQ_VIDEO_DROP)
4216+ dbg("%s: frame dropped\n", __func__);
4217+ if (flag & DMA_IRQ_VIDEO_RX) {
4218+ dbg("%s: frame RX\n", __func__);
4219+ tasklet_schedule(&lw->tasklet);
4220+ }
4221+ }
4222+
4223+ return 0;
4224+}
4225+
4226+static void timblogiw_empty_framequeues(struct timblogiw *lw)
4227+{
4228+ u32 i;
4229+
4230+ dbg("%s\n", __func__);
4231+
4232+ INIT_LIST_HEAD(&lw->inqueue);
4233+ INIT_LIST_HEAD(&lw->outqueue);
4234+
4235+ for (i = 0; i < lw->num_frames; i++) {
4236+ lw->frame[i].state = F_UNUSED;
4237+ lw->frame[i].buf.bytesused = 0;
4238+ }
4239+}
4240+
4241+u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
4242+{
4243+ /* needs to be page aligned cause the */
4244+ /* buffers can be mapped individually! */
4245+ const size_t imagesize = PAGE_ALIGN(timblogiw_frame_size(lw->cur_norm));
4246+ void *buff = NULL;
4247+ int ret;
4248+ u32 i;
4249+
4250+ dbg("%s - request of %i buffers of size %zi\n",
4251+ __func__, count, imagesize);
4252+
4253+ lw->dma.transfer[0].buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
4254+ if (!lw->dma.transfer[0].buf)
4255+ goto err;
4256+
4257+ lw->dma.transfer[1].buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
4258+ if (!lw->dma.transfer[1].buf)
4259+ goto err;
4260+
4261+ lw->dma.transfer[0].desc =
4262+ timbdma_alloc_desc(DMA_BUFFER_SIZE, BYTES_PER_LINE * 2);
4263+ if (!lw->dma.transfer[0].desc)
4264+ goto err;
4265+
4266+ lw->dma.transfer[1].desc =
4267+ timbdma_alloc_desc(DMA_BUFFER_SIZE, BYTES_PER_LINE * 2);
4268+ if (!lw->dma.transfer[1].desc)
4269+ goto err;
4270+
4271+ /* map up the DMA buffers */
4272+ lw->dma.transfer[0].handle = dma_map_single(lw->dev,
4273+ lw->dma.transfer[0].buf, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4274+ ret = dma_mapping_error(lw->dev, lw->dma.transfer[0].handle);
4275+ if (ret) {
4276+ lw->dma.transfer[0].handle = 0;
4277+ goto err;
4278+ }
4279+
4280+ lw->dma.transfer[1].handle = dma_map_single(lw->dev,
4281+ lw->dma.transfer[1].buf, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4282+ ret = dma_mapping_error(lw->dev, lw->dma.transfer[1].handle);
4283+ if (ret) {
4284+ lw->dma.transfer[1].handle = 0;
4285+ goto err;
4286+ }
4287+
4288+ if (count > TIMBLOGIW_NUM_FRAMES)
4289+ count = TIMBLOGIW_NUM_FRAMES;
4290+
4291+ lw->num_frames = count;
4292+ while (lw->num_frames > 0) {
4293+ buff = vmalloc_32(lw->num_frames * imagesize);
4294+ if (buff) {
4295+ memset(buff, 0, lw->num_frames * imagesize);
4296+ break;
4297+ }
4298+ lw->num_frames--;
4299+ }
4300+
4301+ for (i = 0; i < lw->num_frames; i++) {
4302+ lw->frame[i].bufmem = buff + i * imagesize;
4303+ lw->frame[i].buf.index = i;
4304+ lw->frame[i].buf.m.offset = i * imagesize;
4305+ lw->frame[i].buf.length = timblogiw_frame_size(lw->cur_norm);
4306+ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
4307+ lw->frame[i].buf.sequence = 0;
4308+ lw->frame[i].buf.field = V4L2_FIELD_NONE;
4309+ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
4310+ lw->frame[i].buf.flags = 0;
4311+ }
4312+
4313+ lw->dma.curr = 0;
4314+ lw->dma.filled = NULL;
4315+ return lw->num_frames;
4316+err:
4317+ timblogiw_release_buffers(lw);
4318+
4319+ return 0;
4320+}
4321+
4322+static void timblogiw_release_buffers(struct timblogiw *lw)
4323+{
4324+ dbg("%s\n", __func__);
4325+
4326+ if (lw->frame[0].bufmem != NULL) {
4327+ vfree(lw->frame[0].bufmem);
4328+ lw->frame[0].bufmem = NULL;
4329+ }
4330+
4331+ if (lw->dma.transfer[0].handle)
4332+ dma_unmap_single(lw->dev, lw->dma.transfer[0].handle,
4333+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4334+
4335+ if (lw->dma.transfer[1].handle)
4336+ dma_unmap_single(lw->dev, lw->dma.transfer[1].handle,
4337+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
4338+
4339+ if (lw->dma.transfer[0].buf != NULL)
4340+ kfree(lw->dma.transfer[0].buf);
4341+ lw->dma.transfer[0].buf = NULL;
4342+
4343+ if (lw->dma.transfer[1].buf != NULL)
4344+ kfree(lw->dma.transfer[1].buf);
4345+ lw->dma.transfer[1].buf = NULL;
4346+
4347+ if (lw->dma.transfer[0].desc != NULL)
4348+ timbdma_free_desc(lw->dma.transfer[0].desc);
4349+ lw->dma.transfer[0].desc = NULL;
4350+
4351+ if (lw->dma.transfer[1].desc != NULL)
4352+ timbdma_free_desc(lw->dma.transfer[1].desc);
4353+ lw->dma.transfer[1].desc = NULL;
4354+
4355+
4356+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
4357+}
4358+
4359+/* IOCTL functions */
4360+
4361+static int timblogiw_g_fmt(struct file *file, void *priv,
4362+ struct v4l2_format *format)
4363+{
4364+ struct video_device *vdev = video_devdata(file);
4365+ struct timblogiw *lw = video_get_drvdata(vdev);
4366+
4367+ dbg("%s\n", __func__);
4368+
4369+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
4370+ return -EINVAL;
4371+
4372+ format->fmt.pix.width = lw->cur_norm->width;
4373+ format->fmt.pix.height = lw->cur_norm->height;
4374+ format->fmt.pix.pixelformat = TIMBLOGIW_VIDEO_FORMAT;
4375+ format->fmt.pix.bytesperline = timblogiw_bytes_per_line(lw->cur_norm);
4376+ format->fmt.pix.sizeimage = timblogiw_frame_size(lw->cur_norm);
4377+ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
4378+ format->fmt.pix.field = V4L2_FIELD_NONE;
4379+ return 0;
4380+}
4381+
4382+static int timblogiw_try_fmt(struct file *file, void *priv,
4383+ struct v4l2_format *format)
4384+{
4385+ struct video_device *vdev = video_devdata(file);
4386+ struct timblogiw *lw = video_get_drvdata(vdev);
4387+ struct v4l2_pix_format *pix = &format->fmt.pix;
4388+
4389+ dbg("%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
4390+ "bytes per line %d, size image: %d, colorspace: %d\n",
4391+ __func__,
4392+ pix->width, pix->height, pix->pixelformat, pix->field,
4393+ pix->bytesperline, pix->sizeimage, pix->colorspace);
4394+
4395+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
4396+ return -EINVAL;
4397+
4398+ if (pix->field != V4L2_FIELD_NONE)
4399+ return -EINVAL;
4400+
4401+ if (pix->pixelformat != TIMBLOGIW_VIDEO_FORMAT)
4402+ return -EINVAL;
4403+
4404+ if ((lw->cur_norm->height != pix->height) ||
4405+ (lw->cur_norm->width != pix->width)) {
4406+ pix->width = lw->cur_norm->width;
4407+ pix->height = lw->cur_norm->height;
4408+ }
4409+
4410+ return 0;
4411+}
4412+
4413+static int timblogiw_querycap(struct file *file, void *priv,
4414+ struct v4l2_capability *cap)
4415+{
4416+ dbg("%s\n", __func__);
4417+ memset(cap, 0, sizeof(*cap));
4418+ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
4419+ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
4420+ cap->version = TIMBLOGIW_VERSION_CODE;
4421+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
4422+ V4L2_CAP_STREAMING;
4423+
4424+ return 0;
4425+}
4426+
4427+static int timblogiw_enum_fmt(struct file *file, void *priv,
4428+ struct v4l2_fmtdesc *fmt)
4429+{
4430+ dbg("%s, index: %d\n", __func__, fmt->index);
4431+
4432+ if (fmt->index != 0)
4433+ return -EINVAL;
4434+ memset(fmt, 0, sizeof(*fmt));
4435+ fmt->index = 0;
4436+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
4437+ strncpy(fmt->description, "4:2:2, packed, YUYV",
4438+ sizeof(fmt->description)-1);
4439+ fmt->pixelformat = TIMBLOGIW_VIDEO_FORMAT;
4440+ memset(fmt->reserved, 0, sizeof(fmt->reserved));
4441+
4442+ return 0;
4443+}
4444+
4445+static int timblogiw_reqbufs(struct file *file, void *priv,
4446+ struct v4l2_requestbuffers *rb)
4447+{
4448+ struct video_device *vdev = video_devdata(file);
4449+ struct timblogiw *lw = video_get_drvdata(vdev);
4450+
4451+ dbg("%s\n", __func__);
4452+
4453+ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
4454+ rb->memory != V4L2_MEMORY_MMAP)
4455+ return -EINVAL;
4456+
4457+ timblogiw_empty_framequeues(lw);
4458+
4459+ timblogiw_release_buffers(lw);
4460+ if (rb->count)
4461+ rb->count = timblogiw_request_buffers(lw, rb->count);
4462+
4463+ dbg("%s - VIDIOC_REQBUFS: io method is mmap. num bufs %i\n",
4464+ __func__, rb->count);
4465+
4466+ return 0;
4467+}
4468+
4469+static int timblogiw_querybuf(struct file *file, void *priv,
4470+ struct v4l2_buffer *b)
4471+{
4472+ struct video_device *vdev = video_devdata(file);
4473+ struct timblogiw *lw = video_get_drvdata(vdev);
4474+
4475+ dbg("%s\n", __func__);
4476+
4477+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
4478+ b->index >= lw->num_frames)
4479+ return -EINVAL;
4480+
4481+ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
4482+
4483+ if (lw->frame[b->index].vma_use_count)
4484+ b->flags |= V4L2_BUF_FLAG_MAPPED;
4485+
4486+ if (lw->frame[b->index].state == F_DONE)
4487+ b->flags |= V4L2_BUF_FLAG_DONE;
4488+ else if (lw->frame[b->index].state != F_UNUSED)
4489+ b->flags |= V4L2_BUF_FLAG_QUEUED;
4490+
4491+ return 0;
4492+}
4493+
4494+static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
4495+{
4496+ struct video_device *vdev = video_devdata(file);
4497+ struct timblogiw *lw = video_get_drvdata(vdev);
4498+ unsigned long lock_flags;
4499+
4500+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
4501+ b->index >= lw->num_frames)
4502+ return -EINVAL;
4503+
4504+ if (lw->frame[b->index].state != F_UNUSED)
4505+ return -EAGAIN;
4506+
4507+ if (!lw->frame[b->index].bufmem)
4508+ return -EINVAL;
4509+
4510+ if (b->memory != V4L2_MEMORY_MMAP)
4511+ return -EINVAL;
4512+
4513+ lw->frame[b->index].state = F_QUEUED;
4514+
4515+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
4516+ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
4517+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
4518+
4519+ return 0;
4520+}
4521+
4522+static int timblogiw_dqbuf(struct file *file, void *priv,
4523+ struct v4l2_buffer *b)
4524+{
4525+ struct video_device *vdev = video_devdata(file);
4526+ struct timblogiw *lw = video_get_drvdata(vdev);
4527+ struct timblogiw_frame *f;
4528+ unsigned long lock_flags;
4529+ int ret = 0;
4530+
4531+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
4532+ dbg("%s - VIDIOC_DQBUF, illegal buf type!\n",
4533+ __func__);
4534+ return -EINVAL;
4535+ }
4536+
4537+ if (list_empty(&lw->outqueue)) {
4538+ if (file->f_flags & O_NONBLOCK)
4539+ return -EAGAIN;
4540+
4541+ ret = wait_event_interruptible(lw->wait_frame,
4542+ !list_empty(&lw->outqueue));
4543+ if (ret)
4544+ return ret;
4545+ }
4546+
4547+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
4548+ f = list_entry(lw->outqueue.next,
4549+ struct timblogiw_frame, frame);
4550+ list_del(lw->outqueue.next);
4551+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
4552+
4553+ f->state = F_UNUSED;
4554+ memcpy(b, &f->buf, sizeof(*b));
4555+
4556+ if (f->vma_use_count)
4557+ b->flags |= V4L2_BUF_FLAG_MAPPED;
4558+
4559+ return 0;
4560+}
4561+
4562+static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
4563+{
4564+ struct video_device *vdev = video_devdata(file);
4565+ struct timblogiw *lw = video_get_drvdata(vdev);
4566+
4567+ dbg("%s\n", __func__);
4568+
4569+ *std = lw->cur_norm->std;
4570+ return 0;
4571+}
4572+
4573+static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
4574+{
4575+ struct video_device *vdev = video_devdata(file);
4576+ struct timblogiw *lw = video_get_drvdata(vdev);
4577+ int err;
4578+
4579+ dbg("%s\n", __func__);
4580+
4581+ err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
4582+ if (!err)
4583+ lw->cur_norm = timblogiw_get_norm(*std);
4584+
4585+ return err;
4586+}
4587+
4588+static int timblogiw_enuminput(struct file *file, void *priv,
4589+ struct v4l2_input *inp)
4590+{
4591+ dbg("%s\n", __func__);
4592+
4593+ if (inp->index != 0)
4594+ return -EINVAL;
4595+
4596+ memset(inp, 0, sizeof(*inp));
4597+ inp->index = 0;
4598+
4599+ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
4600+ inp->type = V4L2_INPUT_TYPE_CAMERA;
4601+ inp->std = V4L2_STD_ALL;
4602+
4603+ return 0;
4604+}
4605+
4606+static int timblogiw_g_input(struct file *file, void *priv,
4607+ unsigned int *input)
4608+{
4609+ dbg("%s\n", __func__);
4610+
4611+ *input = 0;
4612+
4613+ return 0;
4614+}
4615+
4616+static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
4617+{
4618+ dbg("%s\n", __func__);
4619+
4620+ if (input != 0)
4621+ return -EINVAL;
4622+ return 0;
4623+}
4624+
4625+static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
4626+{
4627+ struct video_device *vdev = video_devdata(file);
4628+ struct timblogiw *lw = video_get_drvdata(vdev);
4629+ struct timblogiw_frame *f;
4630+
4631+ dbg("%s\n", __func__);
4632+
4633+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
4634+ dbg("%s - No capture device\n", __func__);
4635+ return -EINVAL;
4636+ }
4637+
4638+ if (list_empty(&lw->inqueue)) {
4639+ dbg("%s - inqueue is empty\n", __func__);
4640+ return -EINVAL;
4641+ }
4642+
4643+ if (lw->stream == STREAM_ON)
4644+ return 0;
4645+
4646+ lw->stream = STREAM_ON;
4647+
4648+ f = list_entry(lw->inqueue.next,
4649+ struct timblogiw_frame, frame);
4650+
4651+ dbg("%s - f size: %d, bpr: %d, dma addr: %x\n", __func__,
4652+ timblogiw_frame_size(lw->cur_norm),
4653+ timblogiw_bytes_per_line(lw->cur_norm),
4654+ (unsigned int)lw->dma.transfer[lw->dma.curr].handle);
4655+
4656+ __timblogiw_start_dma(lw);
4657+
4658+ return 0;
4659+}
4660+
4661+static void timblogiw_stopstream(struct timblogiw *lw)
4662+{
4663+ if (lw->stream == STREAM_ON) {
4664+ /* The FPGA might be busy copying the current frame, we have
4665+ * to wait for the frame to finish
4666+ */
4667+ unsigned long lock_flags;
4668+
4669+ init_completion(&lw->irq_done);
4670+
4671+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
4672+ lw->stream = STREAM_OFF;
4673+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
4674+
4675+ wait_for_completion_timeout(&lw->irq_done,
4676+ msecs_to_jiffies(100));
4677+ }
4678+}
4679+
4680+static int timblogiw_streamoff(struct file *file, void *priv,
4681+ unsigned int type)
4682+{
4683+ struct video_device *vdev = video_devdata(file);
4684+ struct timblogiw *lw = video_get_drvdata(vdev);
4685+
4686+ dbg("%s\n", __func__);
4687+
4688+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
4689+ return -EINVAL;
4690+
4691+ timblogiw_stopstream(lw);
4692+
4693+ timblogiw_empty_framequeues(lw);
4694+
4695+ return 0;
4696+}
4697+
4698+static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
4699+{
4700+ struct video_device *vdev = video_devdata(file);
4701+ struct timblogiw *lw = video_get_drvdata(vdev);
4702+
4703+ dbg("%s\n", __func__);
4704+
4705+ return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
4706+}
4707+
4708+static int timblogiw_enum_framesizes(struct file *file, void *priv,
4709+ struct v4l2_frmsizeenum *fsize)
4710+{
4711+ struct video_device *vdev = video_devdata(file);
4712+ struct timblogiw *lw = video_get_drvdata(vdev);
4713+
4714+ dbg("%s - index: %d, format: %d\n", __func__,
4715+ fsize->index, fsize->pixel_format);
4716+
4717+ if ((fsize->index != 0) ||
4718+ (fsize->pixel_format != TIMBLOGIW_VIDEO_FORMAT))
4719+ return -EINVAL;
4720+
4721+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
4722+ fsize->discrete.width = lw->cur_norm->width;
4723+ fsize->discrete.height = lw->cur_norm->height;
4724+
4725+ return 0;
4726+}
4727+
4728+struct find_addr_arg {
4729+ char const *name;
4730+ struct i2c_client *client;
4731+};
4732+
4733+static int find_name(struct device *dev, void *argp)
4734+{
4735+ struct find_addr_arg *arg = (struct find_addr_arg *)argp;
4736+ struct i2c_client *client = i2c_verify_client(dev);
4737+
4738+ if (client && !strcmp(arg->name, client->name) && client->driver)
4739+ arg->client = client;
4740+
4741+ return 0;
4742+}
4743+
4744+static struct i2c_client *find_client(struct i2c_adapter *adapt,
4745+ const char *name)
4746+{
4747+ struct find_addr_arg find_arg;
4748+ /* now find the client */
4749+#ifdef MODULE
4750+ request_module(name);
4751+#endif
4752+ /* code for finding the I2C child */
4753+ find_arg.name = name;
4754+ find_arg.client = NULL;
4755+ device_for_each_child(&adapt->dev, &find_arg, find_name);
4756+ return find_arg.client;
4757+}
4758+
4759+/*******************************
4760+ * Device Operations functions *
4761+ *******************************/
4762+
4763+static int timblogiw_open(struct file *file)
4764+{
4765+ struct video_device *vdev = video_devdata(file);
4766+ struct timblogiw *lw = video_get_drvdata(vdev);
4767+ v4l2_std_id std = V4L2_STD_UNKNOWN;
4768+ int err = 0;
4769+
4770+ dbg("%s -\n", __func__);
4771+
4772+ mutex_init(&lw->fileop_lock);
4773+ spin_lock_init(&lw->queue_lock);
4774+ init_waitqueue_head(&lw->wait_frame);
4775+
4776+ mutex_lock(&lw->lock);
4777+
4778+ if (!lw->sd_enc) {
4779+ struct i2c_adapter *adapt;
4780+ struct i2c_client *encoder;
4781+
4782+ /* find the video decoder */
4783+ adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
4784+ if (!adapt) {
4785+ printk(KERN_ERR DRIVER_NAME": No I2C bus\n");
4786+ err = -ENODEV;
4787+ goto out;
4788+ }
4789+
4790+ /* now find the encoder */
4791+ encoder = find_client(adapt, lw->pdata.encoder);
4792+
4793+ i2c_put_adapter(adapt);
4794+
4795+ if (!encoder) {
4796+ printk(KERN_ERR DRIVER_NAME": Failed to get encoder\n");
4797+ err = -ENODEV;
4798+ goto out;
4799+ }
4800+
4801+ lw->sd_enc = i2c_get_clientdata(encoder);
4802+ lw->enc_owner = lw->sd_enc->owner;
4803+ /* Lock the module */
4804+ if (!try_module_get(lw->enc_owner)) {
4805+ lw->sd_enc = NULL;
4806+ err = -ENODEV;
4807+ goto out;
4808+ }
4809+ }
4810+
4811+ timblogiw_querystd(file, NULL, &std);
4812+ lw->cur_norm = timblogiw_get_norm(std);
4813+
4814+ file->private_data = lw;
4815+ lw->stream = STREAM_OFF;
4816+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
4817+
4818+ timblogiw_empty_framequeues(lw);
4819+ timbdma_set_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP,
4820+ timblogiw_isr, (void *)lw);
4821+
4822+out:
4823+ mutex_unlock(&lw->lock);
4824+
4825+ return err;
4826+}
4827+
4828+static int timblogiw_close(struct file *file)
4829+{
4830+ struct timblogiw *lw = file->private_data;
4831+
4832+ dbg("%s - entry\n", __func__);
4833+
4834+ mutex_lock(&lw->lock);
4835+
4836+ timblogiw_stopstream(lw);
4837+
4838+ timbdma_set_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP, NULL,
4839+ NULL);
4840+ timblogiw_release_buffers(lw);
4841+
4842+ mutex_unlock(&lw->lock);
4843+ return 0;
4844+}
4845+
4846+static ssize_t timblogiw_read(struct file *file, char __user *data,
4847+ size_t count, loff_t *ppos)
4848+{
4849+ dbg("%s - read request\n", __func__);
4850+ return -EINVAL;
4851+}
4852+
4853+static void timblogiw_vm_open(struct vm_area_struct *vma)
4854+{
4855+ struct timblogiw_frame *f = vma->vm_private_data;
4856+ f->vma_use_count++;
4857+}
4858+
4859+static void timblogiw_vm_close(struct vm_area_struct *vma)
4860+{
4861+ struct timblogiw_frame *f = vma->vm_private_data;
4862+ f->vma_use_count--;
4863+}
4864+
4865+static struct vm_operations_struct timblogiw_vm_ops = {
4866+ .open = timblogiw_vm_open,
4867+ .close = timblogiw_vm_close,
4868+};
4869+
4870+static int timblogiw_mmap(struct file *filp, struct vm_area_struct *vma)
4871+{
4872+ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
4873+ void *pos;
4874+ u32 i;
4875+ int ret = -EINVAL;
4876+
4877+ struct timblogiw *lw = filp->private_data;
4878+ dbg("%s\n", __func__);
4879+
4880+ if (mutex_lock_interruptible(&lw->fileop_lock))
4881+ return -ERESTARTSYS;
4882+
4883+ if (!(vma->vm_flags & VM_WRITE) ||
4884+ size != PAGE_ALIGN(lw->frame[0].buf.length))
4885+ goto error_unlock;
4886+
4887+ for (i = 0; i < lw->num_frames; i++)
4888+ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
4889+ break;
4890+
4891+ if (i == lw->num_frames) {
4892+ dbg("%s - user supplied mapping address is out of range\n",
4893+ __func__);
4894+ goto error_unlock;
4895+ }
4896+
4897+ vma->vm_flags |= VM_IO;
4898+ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
4899+
4900+ pos = lw->frame[i].bufmem;
4901+ while (size > 0) { /* size is page-aligned */
4902+ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
4903+ dbg("%s - vm_insert_page failed\n", __func__);
4904+ ret = -EAGAIN;
4905+ goto error_unlock;
4906+ }
4907+ start += PAGE_SIZE;
4908+ pos += PAGE_SIZE;
4909+ size -= PAGE_SIZE;
4910+ }
4911+
4912+ vma->vm_ops = &timblogiw_vm_ops;
4913+ vma->vm_private_data = &lw->frame[i];
4914+ timblogiw_vm_open(vma);
4915+ ret = 0;
4916+
4917+error_unlock:
4918+ mutex_unlock(&lw->fileop_lock);
4919+ return ret;
4920+}
4921+
4922+
4923+void timblogiw_vdev_release(struct video_device *vdev)
4924+{
4925+ kfree(vdev);
4926+}
4927+
4928+static const struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
4929+ .vidioc_querycap = timblogiw_querycap,
4930+ .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
4931+ .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
4932+ .vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
4933+ .vidioc_s_fmt_vid_cap = timblogiw_try_fmt,
4934+ .vidioc_reqbufs = timblogiw_reqbufs,
4935+ .vidioc_querybuf = timblogiw_querybuf,
4936+ .vidioc_qbuf = timblogiw_qbuf,
4937+ .vidioc_dqbuf = timblogiw_dqbuf,
4938+ .vidioc_g_std = timblogiw_g_std,
4939+ .vidioc_s_std = timblogiw_s_std,
4940+ .vidioc_enum_input = timblogiw_enuminput,
4941+ .vidioc_g_input = timblogiw_g_input,
4942+ .vidioc_s_input = timblogiw_s_input,
4943+ .vidioc_streamon = timblogiw_streamon,
4944+ .vidioc_streamoff = timblogiw_streamoff,
4945+ .vidioc_querystd = timblogiw_querystd,
4946+ .vidioc_enum_framesizes = timblogiw_enum_framesizes,
4947+};
4948+
4949+static const struct v4l2_file_operations timblogiw_fops = {
4950+ .owner = THIS_MODULE,
4951+ .open = timblogiw_open,
4952+ .release = timblogiw_close,
4953+ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
4954+ .mmap = timblogiw_mmap,
4955+ .read = timblogiw_read,
4956+};
4957+
4958+static const struct video_device timblogiw_template = {
4959+ .name = TIMBLOGIWIN_NAME,
4960+ .fops = &timblogiw_fops,
4961+ .ioctl_ops = &timblogiw_ioctl_ops,
4962+ .release = &timblogiw_vdev_release,
4963+ .minor = -1,
4964+ .tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
4965+};
4966+
4967+static int timblogiw_probe(struct platform_device *dev)
4968+{
4969+ int err;
4970+ struct timblogiw *lw = NULL;
4971+ struct resource *iomem;
4972+ struct timb_video_platform_data *pdata = dev->dev.platform_data;
4973+
4974+ if (!pdata) {
4975+ printk(KERN_ERR DRIVER_NAME": Platform data missing\n");
4976+ err = -EINVAL;
4977+ goto err_mem;
4978+ }
4979+
4980+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
4981+ if (!iomem) {
4982+ err = -EINVAL;
4983+ goto err_mem;
4984+ }
4985+
4986+ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
4987+ if (!lw) {
4988+ err = -ENOMEM;
4989+ goto err_mem;
4990+ }
4991+
4992+ if (dev->dev.parent)
4993+ lw->dev = dev->dev.parent;
4994+ else
4995+ lw->dev = &dev->dev;
4996+
4997+ memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
4998+
4999+ mutex_init(&lw->lock);
5000+
5001+ lw->video_dev = video_device_alloc();
5002+ if (!lw->video_dev) {
5003+ err = -ENOMEM;
5004+ goto err_mem;
5005+ }
5006+ *lw->video_dev = timblogiw_template;
5007+
5008+ err = video_register_device(lw->video_dev, VFL_TYPE_GRABBER, 0);
5009+ if (err) {
5010+ printk(KERN_ALERT DRIVER_NAME": Error reg video\n");
5011+ goto err_request;
5012+ }
5013+
5014+ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
5015+
5016+ if (!request_mem_region(iomem->start, resource_size(iomem),
5017+ DRIVER_NAME)) {
5018+ err = -EBUSY;
5019+ goto err_request;
5020+ }
5021+
5022+ lw->membase = ioremap(iomem->start, resource_size(iomem));
5023+ if (!lw->membase) {
5024+ err = -ENOMEM;
5025+ goto err_ioremap;
5026+ }
5027+
5028+ platform_set_drvdata(dev, lw);
5029+ video_set_drvdata(lw->video_dev, lw);
5030+
5031+ return 0;
5032+
5033+err_ioremap:
5034+ release_mem_region(iomem->start, resource_size(iomem));
5035+err_request:
5036+ if (-1 != lw->video_dev->minor)
5037+ video_unregister_device(lw->video_dev);
5038+ else
5039+ video_device_release(lw->video_dev);
5040+err_mem:
5041+ kfree(lw);
5042+ printk(KERN_ERR DRIVER_NAME ": Failed to register: %d\n", err);
5043+
5044+ return err;
5045+}
5046+
5047+static int timblogiw_remove(struct platform_device *dev)
5048+{
5049+ struct timblogiw *lw = platform_get_drvdata(dev);
5050+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
5051+
5052+ if (-1 != lw->video_dev->minor)
5053+ video_unregister_device(lw->video_dev);
5054+ else
5055+ video_device_release(lw->video_dev);
5056+
5057+ if (lw->sd_enc)
5058+ module_put(lw->enc_owner);
5059+ tasklet_kill(&lw->tasklet);
5060+ iounmap(lw->membase);
5061+ release_mem_region(iomem->start, resource_size(iomem));
5062+ kfree(lw);
5063+
5064+ return 0;
5065+}
5066+
5067+static struct platform_driver timblogiw_platform_driver = {
5068+ .driver = {
5069+ .name = DRIVER_NAME,
5070+ .owner = THIS_MODULE,
5071+ },
5072+ .probe = timblogiw_probe,
5073+ .remove = timblogiw_remove,
5074+};
5075+
5076+/*--------------------------------------------------------------------------*/
5077+
5078+static int __init timblogiw_init(void)
5079+{
5080+ return platform_driver_register(&timblogiw_platform_driver);
5081+}
5082+
5083+static void __exit timblogiw_exit(void)
5084+{
5085+ platform_driver_unregister(&timblogiw_platform_driver);
5086+}
5087+
5088+module_init(timblogiw_init);
5089+module_exit(timblogiw_exit);
5090+
5091+MODULE_DESCRIPTION("Timberdale Video In driver");
5092+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
5093+MODULE_LICENSE("GPL v2");
5094+MODULE_ALIAS("platform:"DRIVER_NAME);
5095+
5096diff -uNr linux-2.6.31/drivers/media/video/timblogiw.h linux-2.6.31.new/drivers/media/video/timblogiw.h
5097--- linux-2.6.31/drivers/media/video/timblogiw.h 1969-12-31 16:00:00.000000000 -0800
5098+++ linux-2.6.31.new/drivers/media/video/timblogiw.h 2009-10-23 11:17:28.000000000 -0700
5099@@ -0,0 +1,96 @@
5100+/*
5101+ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
5102+ * Copyright (c) 2009 Intel Corporation
5103+ *
5104+ * This program is free software; you can redistribute it and/or modify
5105+ * it under the terms of the GNU General Public License version 2 as
5106+ * published by the Free Software Foundation.
5107+ *
5108+ * This program is distributed in the hope that it will be useful,
5109+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5110+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5111+ * GNU General Public License for more details.
5112+ *
5113+ * You should have received a copy of the GNU General Public License
5114+ * along with this program; if not, write to the Free Software
5115+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5116+ */
5117+
5118+/* Supports:
5119+ * Timberdale FPGA LogiWin Video In
5120+ */
5121+
5122+#ifndef _TIMBLOGIW_H
5123+#define _TIMBLOGIW_H
5124+
5125+#include <linux/interrupt.h>
5126+#include <media/timb_video.h>
5127+#include <linux/completion.h>
5128+
5129+#define TIMBLOGIWIN_NAME "Timberdale Video-In"
5130+
5131+#define TIMBLOGIW_NUM_FRAMES 10
5132+
5133+
5134+enum timblogiw_stream_state {
5135+ STREAM_OFF,
5136+ STREAM_ON,
5137+};
5138+
5139+enum timblogiw_frame_state {
5140+ F_UNUSED = 0,
5141+ F_QUEUED,
5142+ F_DONE,
5143+};
5144+
5145+struct timblogiw_frame {
5146+ void *bufmem;
5147+ struct v4l2_buffer buf;
5148+ enum timblogiw_frame_state state;
5149+ struct list_head frame;
5150+ unsigned long vma_use_count;
5151+};
5152+
5153+struct timblogiw_tvnorm {
5154+ v4l2_std_id std;
5155+ u16 width;
5156+ u16 height;
5157+};
5158+
5159+
5160+
5161+struct timbdma_transfer {
5162+ dma_addr_t handle;
5163+ void *buf;
5164+ void *desc;
5165+};
5166+
5167+struct timblogiw_dma_control {
5168+ struct timbdma_transfer transfer[2];
5169+ struct timbdma_transfer *filled;
5170+ int curr;
5171+};
5172+
5173+struct timblogiw {
5174+ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
5175+ int num_frames;
5176+ unsigned int frame_count;
5177+ struct list_head inqueue, outqueue;
5178+ spinlock_t queue_lock; /* mutual exclusion */
5179+ enum timblogiw_stream_state stream;
5180+ struct video_device *video_dev;
5181+ struct mutex lock, fileop_lock;
5182+ wait_queue_head_t wait_frame;
5183+ struct completion irq_done;
5184+ struct timblogiw_tvnorm const *cur_norm;
5185+ struct device *dev;
5186+ struct timblogiw_dma_control dma;
5187+ void __iomem *membase;
5188+ struct tasklet_struct tasklet;
5189+ struct timb_video_platform_data pdata;
5190+ struct v4l2_subdev *sd_enc; /* encoder */
5191+ struct module *enc_owner;
5192+};
5193+
5194+#endif /* _TIMBLOGIW_H */
5195+
5196diff -uNr linux-2.6.31/drivers/mfd/Kconfig linux-2.6.31.new/drivers/mfd/Kconfig
5197--- linux-2.6.31/drivers/mfd/Kconfig 2009-10-23 11:18:30.000000000 -0700
5198+++ linux-2.6.31.new/drivers/mfd/Kconfig 2009-10-23 11:17:29.000000000 -0700
5199@@ -263,6 +263,25 @@
5200 This enables the PCAP ASIC present on EZX Phones. This is
5201 needed for MMC, TouchScreen, Sound, USB, etc..
5202
5203+config MFD_TIMBERDALE
5204+ tristate "Support for the Timberdale FPGA"
5205+ select MFD_CORE
5206+ depends on PCI
5207+ ---help---
5208+ This is the core driver for the timberdale FPGA. This device is a
5209+ multifunctioanl device which may provide numerous interfaces.
5210+
5211+ The timberdale FPGA can be found on the Intel Atom development board
5212+ for automotive in-vehicle infontainment board called Russellville.
5213+
5214+config MFD_TIMBERDALE_DMA
5215+ tristate "Support for timberdale DMA"
5216+ depends on MFD_TIMBERDALE
5217+ depends on HAS_IOMEM
5218+ ---help---
5219+ Add support the DMA block inside the timberdale FPGA. This to be able
5220+ to do DMA transfers directly to some of the blocks inside the FPGA
5221+
5222 endmenu
5223
5224 menu "Multimedia Capabilities Port drivers"
5225diff -uNr linux-2.6.31/drivers/mfd/Makefile linux-2.6.31.new/drivers/mfd/Makefile
5226--- linux-2.6.31/drivers/mfd/Makefile 2009-10-23 11:18:30.000000000 -0700
5227+++ linux-2.6.31.new/drivers/mfd/Makefile 2009-10-23 11:17:29.000000000 -0700
5228@@ -44,3 +44,7 @@
5229 obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
5230 obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
5231 obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
5232+
5233+obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
5234+obj-$(CONFIG_MFD_TIMBERDALE_DMA) += timbdma.o
5235+
5236diff -uNr linux-2.6.31/drivers/mfd/timbdma.c linux-2.6.31.new/drivers/mfd/timbdma.c
5237--- linux-2.6.31/drivers/mfd/timbdma.c 1969-12-31 16:00:00.000000000 -0800
5238+++ linux-2.6.31.new/drivers/mfd/timbdma.c 2009-10-23 11:17:29.000000000 -0700
5239@@ -0,0 +1,542 @@
5240+/*
5241+ * timbdma.c timberdale FPGA DMA driver
5242+ * Copyright (c) 2009 Intel Corporation
5243+ *
5244+ * This program is free software; you can redistribute it and/or modify
5245+ * it under the terms of the GNU General Public License version 2 as
5246+ * published by the Free Software Foundation.
5247+ *
5248+ * This program is distributed in the hope that it will be useful,
5249+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5250+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5251+ * GNU General Public License for more details.
5252+ *
5253+ * You should have received a copy of the GNU General Public License
5254+ * along with this program; if not, write to the Free Software
5255+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5256+ */
5257+
5258+/* Supports:
5259+ * Timberdale FPGA DMA engine
5260+ */
5261+
5262+#include <linux/version.h>
5263+#include <linux/module.h>
5264+#include <linux/interrupt.h>
5265+#include <linux/platform_device.h>
5266+#include <linux/io-mapping.h>
5267+#include <linux/dma-mapping.h>
5268+#include <linux/mfd/timbdma.h>
5269+
5270+#define DRIVER_NAME "timb-dma"
5271+
5272+#define TIMBDMA_ACR 0x34
5273+#define TIMBDMA_32BIT_ADDR 0x01
5274+
5275+#define TIMBDMA_ISR 0x080000
5276+#define TIMBDMA_IPR 0x080004
5277+#define TIMBDMA_IER 0x080008
5278+
5279+/* DMA configuration registers */
5280+/* RX registers */
5281+#define TIMBDMA_OFFS_RX_DHAR 0x00
5282+#define TIMBDMA_OFFS_RX_DLAR 0x04
5283+#define TIMBDMA_OFFS_RX_LR 0x0C
5284+#define TIMBDMA_OFFS_RX_BLR 0x10
5285+#define TIMBDMA_OFFS_RX_ER 0x14
5286+#define TIMBDMA_RX_EN 0x01
5287+/* bytes per Row, video specific register */
5288+#define TIMBDMA_OFFS_RX_BPRR 0x30
5289+
5290+/* TX registers */
5291+#define TIMBDMA_OFFS_TX_DHAR 0x18
5292+#define TIMBDMA_OFFS_TX_DLAR 0x1C
5293+#define TIMBDMA_OFFS_TX_BLR 0x24
5294+#define TIMBDMA_OFFS_TX_LR 0x28
5295+
5296+#define DMA_DESC_SIZE 8
5297+
5298+struct dma_desc {
5299+ u32 len;
5300+ u32 chunk_size;
5301+ u8 buf[0];
5302+};
5303+
5304+struct timbdma_control {
5305+ timbdma_interruptcb callback;
5306+ void *callback_data;
5307+ dma_addr_t desc;
5308+ int desc_len;
5309+ /* the following are used to store a desc while the hw has not been
5310+ * probed yet
5311+ */
5312+ struct dma_desc *stored_desc;
5313+ int stored_bytes_per_row;
5314+};
5315+
5316+struct timbdma_dev {
5317+ void __iomem *membase;
5318+ struct device *dev;
5319+ struct timbdma_control control[DMA_IRQS];
5320+ spinlock_t lock; /* mutual exclusion */
5321+};
5322+
5323+static struct timbdma_dev *self_g;
5324+
5325+
5326+void *timbdma_alloc_desc(u32 size, u16 alignment)
5327+{
5328+ /* calculate the number of chunks needed */
5329+ int chunk_size = USHORT_MAX - (USHORT_MAX % alignment);
5330+ int chunks = size / chunk_size;
5331+ int len;
5332+ struct dma_desc *dma_desc;
5333+
5334+ if (size % chunk_size)
5335+ chunks++;
5336+
5337+ len = sizeof(struct dma_desc) + DMA_DESC_SIZE * chunks;
5338+
5339+ dma_desc = kzalloc(len, GFP_KERNEL);
5340+ if (dma_desc) {
5341+ dma_desc->len = DMA_DESC_SIZE * chunks;
5342+ dma_desc->chunk_size = chunk_size;
5343+ }
5344+ return dma_desc;
5345+}
5346+EXPORT_SYMBOL(timbdma_alloc_desc);
5347+
5348+void timbdma_free_desc(void *desc)
5349+{
5350+ kfree(desc);
5351+}
5352+EXPORT_SYMBOL(timbdma_free_desc);
5353+
5354+int timbdma_prep_desc(void *desc, dma_addr_t addr, u32 size)
5355+{
5356+ struct dma_desc *dma_desc = desc;
5357+ u8 *buf = dma_desc->buf;
5358+ dma_addr_t cur_addr = addr;
5359+ int chunks = size / dma_desc->chunk_size;
5360+ if (size % dma_desc->chunk_size)
5361+ chunks++;
5362+
5363+ if (dma_desc->len < chunks * DMA_DESC_SIZE)
5364+ return -EINVAL;
5365+
5366+ while (size > 0) {
5367+ int chunk_size = dma_desc->chunk_size;
5368+ if (chunk_size > size)
5369+ chunk_size = size;
5370+ buf[7] = (cur_addr >> 24) & 0xff;
5371+ buf[6] = (cur_addr >> 16) & 0xff;
5372+ buf[5] = (cur_addr >> 8) & 0xff;
5373+ buf[4] = (cur_addr >> 0) & 0xff;
5374+
5375+ buf[3] = (chunk_size >> 8) & 0xff;
5376+ buf[2] = (chunk_size >> 0) & 0xff;
5377+
5378+ buf[1] = 0x00;
5379+ buf[0] = 0x21; /* tran, valid */
5380+
5381+ buf += DMA_DESC_SIZE;
5382+ cur_addr += chunk_size;
5383+ size -= chunk_size;
5384+ }
5385+
5386+ /* make sure to mark the last one as end */
5387+ (buf-DMA_DESC_SIZE)[0] |= 0x2;
5388+
5389+ return 0;
5390+}
5391+EXPORT_SYMBOL(timbdma_prep_desc);
5392+
5393+static irqreturn_t timbdma_handleinterrupt(int irq, void *devid)
5394+{
5395+ struct timbdma_dev *dev = (struct timbdma_dev *)devid;
5396+ u32 ipr, ier;
5397+ int i;
5398+
5399+ ipr = ioread32(dev->membase + TIMBDMA_IPR);
5400+ /* the MSI-X controller is level triggered, help it a bit,
5401+ * by disabling interrupts and re-enable them in the end.
5402+ */
5403+ ier = ioread32(dev->membase + TIMBDMA_IER);
5404+ iowrite32(0, dev->membase + TIMBDMA_IER);
5405+
5406+ if (ipr) {
5407+ /* ack */
5408+ iowrite32(ipr, dev->membase + TIMBDMA_ISR);
5409+
5410+ /* call the callbacks */
5411+ for (i = 0; i < DMA_IRQS; i++) {
5412+ int mask = 1 << i;
5413+ if (ipr & mask) {
5414+ struct timbdma_control *ctrl = dev->control + i;
5415+ struct timbdma_control *unmap_ctrl = ctrl;
5416+
5417+ /* special case for video frame drop */
5418+ if (mask == DMA_IRQ_VIDEO_DROP)
5419+ unmap_ctrl = dev->control + i - 1;
5420+
5421+ /* unmap memory */
5422+ dma_unmap_single(dev->dev, unmap_ctrl->desc,
5423+ unmap_ctrl->desc_len, DMA_TO_DEVICE);
5424+ unmap_ctrl->desc = 0;
5425+
5426+ if (ctrl->callback)
5427+ ctrl->callback(mask,
5428+ ctrl->callback_data);
5429+ }
5430+ }
5431+
5432+ iowrite32(ier, dev->membase + TIMBDMA_IER);
5433+ return IRQ_HANDLED;
5434+ } else {
5435+ iowrite32(ier, dev->membase + TIMBDMA_IER);
5436+ return IRQ_NONE;
5437+ }
5438+}
5439+
5440+static int __timbdma_start(struct timbdma_dev *dev, int index,
5441+ struct dma_desc *dma_desc, int bytes_per_row)
5442+{
5443+ u32 offset;
5444+ unsigned long flags;
5445+ struct timbdma_control *ctrl;
5446+ int err;
5447+
5448+ ctrl = dev->control + index;
5449+
5450+ BUG_ON(ctrl->desc);
5451+
5452+ /* check if we already have a descriptor */
5453+ if (ctrl->desc)
5454+ return -EALREADY;
5455+
5456+ /* map up the descriptor */
5457+ ctrl->desc = dma_map_single(dev->dev, dma_desc->buf, dma_desc->len,
5458+ DMA_TO_DEVICE);
5459+ err = dma_mapping_error(dev->dev, ctrl->desc);
5460+ if (err) {
5461+ ctrl->desc = 0;
5462+ return err;
5463+ }
5464+ ctrl->desc_len = dma_desc->len;
5465+
5466+ /* now enable the DMA transfer */
5467+ offset = index / 2 * 0x40;
5468+
5469+ spin_lock_irqsave(&dev->lock, flags);
5470+ if (!(index % 2)) {
5471+ /* RX */
5472+ /* descriptor address */
5473+ iowrite32(0, dev->membase + offset + TIMBDMA_OFFS_RX_DHAR);
5474+ iowrite32(ctrl->desc, dev->membase + offset +
5475+ TIMBDMA_OFFS_RX_DLAR);
5476+ /* Bytes per line */
5477+ iowrite32(bytes_per_row, dev->membase + offset +
5478+ TIMBDMA_OFFS_RX_BPRR);
5479+ /* enable RX */
5480+ iowrite32(TIMBDMA_RX_EN, dev->membase + offset +
5481+ TIMBDMA_OFFS_RX_ER);
5482+ } else {
5483+ /* TX */
5484+ /* address high */
5485+ iowrite32(0, dev->membase + offset + TIMBDMA_OFFS_TX_DHAR);
5486+ iowrite32(ctrl->desc, dev->membase + offset +
5487+ TIMBDMA_OFFS_TX_DLAR);
5488+ }
5489+ spin_unlock_irqrestore(&dev->lock, flags);
5490+
5491+ return 0;
5492+}
5493+int timbdma_start(u32 flag, void *desc, int bytes_per_row)
5494+{
5495+ int i;
5496+ struct timbdma_dev *dev = self_g;
5497+ struct dma_desc *dma_desc = desc;
5498+ int ret = 0;
5499+
5500+ /* only allow 1 flag bit to be set */
5501+ for (i = 0; i < DMA_IRQS && !(flag & (1 << i)); i++)
5502+ ;
5503+ if (i == DMA_IRQS || (flag & ~(1 << i)))
5504+ return -EINVAL;
5505+
5506+ if (!dev->membase) {
5507+ /* the physical DMA device has not showed up yet */
5508+ unsigned long flags;
5509+ struct timbdma_control *ctrl = dev->control + i;
5510+ BUG_ON(ctrl->stored_desc);
5511+ if (ctrl->stored_desc)
5512+ ret = -EALREADY;
5513+ else {
5514+ spin_lock_irqsave(&dev->lock, flags);
5515+ ctrl->stored_desc = desc;
5516+ ctrl->stored_bytes_per_row = bytes_per_row;
5517+ spin_unlock_irqrestore(&dev->lock, flags);
5518+ }
5519+ } else
5520+ ret = __timbdma_start(dev, i, dma_desc, bytes_per_row);
5521+
5522+ if (ret)
5523+ printk(KERN_ERR DRIVER_NAME": Failed to start DMA: %d\n", ret);
5524+ return ret;
5525+}
5526+EXPORT_SYMBOL(timbdma_start);
5527+
5528+int timbdma_stop(u32 flags)
5529+{
5530+ int i;
5531+ unsigned long irqflags;
5532+ struct timbdma_dev *dev = self_g;
5533+ int ret = 0;
5534+
5535+ spin_lock_irqsave(&dev->lock, irqflags);
5536+
5537+ /* now disable the DMA transfers */
5538+ for (i = 0; i < DMA_IRQS; i++)
5539+ if (flags & (1 << i)) {
5540+ /*
5541+ RX enable registers are located at:
5542+ 0x14
5543+ 0x54
5544+ 0x94
5545+
5546+ TX DESC ADDR LOW registers are located at:
5547+ 0x1C
5548+ 0x5C
5549+ */
5550+ struct timbdma_control *ctrl = dev->control + i;
5551+ if (ctrl->desc) {
5552+ u32 offset = i / 2 * 0x40;
5553+
5554+ if (!(i % 2)) {
5555+ /* even -> RX enable */
5556+ offset += TIMBDMA_OFFS_RX_ER;
5557+ /** TODO: FIX received length */
5558+ } else {
5559+ /* odd -> TX desc addr low */
5560+ offset += TIMBDMA_OFFS_TX_DLAR;
5561+ /** TODO: FIX written lenth */
5562+ }
5563+
5564+ if (dev->membase)
5565+ iowrite32(0, dev->membase + offset);
5566+
5567+ dma_unmap_single(dev->dev, ctrl->desc,
5568+ ctrl->desc_len, DMA_TO_DEVICE);
5569+ ctrl->desc = 0;
5570+ } else if (ctrl->stored_desc)
5571+ ctrl->stored_desc = NULL;
5572+ }
5573+
5574+ if (dev->membase)
5575+ /* ack any pending IRQs */
5576+ iowrite32(flags, dev->membase + TIMBDMA_ISR);
5577+
5578+ spin_unlock_irqrestore(&dev->lock, irqflags);
5579+
5580+ return ret;
5581+}
5582+EXPORT_SYMBOL(timbdma_stop);
5583+
5584+void timbdma_set_interruptcb(u32 flags, timbdma_interruptcb icb, void *data)
5585+{
5586+ int i;
5587+ unsigned long irqflags;
5588+ struct timbdma_dev *dev = self_g;
5589+ u32 ier;
5590+
5591+ spin_lock_irqsave(&dev->lock, irqflags);
5592+
5593+ for (i = 0; i < DMA_IRQS; i++)
5594+ if (flags & (1 << i)) {
5595+ struct timbdma_control *ctrl = dev->control + i;
5596+ ctrl->callback = icb;
5597+ ctrl->callback_data = data;
5598+ }
5599+
5600+ /* the DMA device might not have showed up yet */
5601+ if (dev->membase) {
5602+ /* Ack any pending IRQ */
5603+ iowrite32(flags, dev->membase + TIMBDMA_ISR);
5604+
5605+ /* if a null callback is given -> clear interrupt,
5606+ * else -> enable
5607+ */
5608+ ier = ioread32(dev->membase + TIMBDMA_IER);
5609+ if (icb != NULL)
5610+ ier |= flags;
5611+ else
5612+ ier &= ~flags;
5613+ iowrite32(ier, dev->membase + TIMBDMA_IER);
5614+ }
5615+
5616+ spin_unlock_irqrestore(&dev->lock, irqflags);
5617+}
5618+EXPORT_SYMBOL(timbdma_set_interruptcb);
5619+
5620+static void timbdma_start_operations(struct timbdma_dev *self)
5621+{
5622+ int i;
5623+ u32 ier;
5624+ unsigned long flags;
5625+
5626+ spin_lock_irqsave(&self->lock, flags);
5627+ ier = ioread32(self->membase + TIMBDMA_IER);
5628+ for (i = 0; i < DMA_IRQS; i++)
5629+ if (self->control[i].callback)
5630+ ier |= 1 << i;
5631+ iowrite32(ier, self->membase + TIMBDMA_IER);
5632+ spin_unlock_irqrestore(&self->lock, flags);
5633+
5634+ /* look for any transfers that were started before the HW was
5635+ * available, and start them
5636+ */
5637+ for (i = 0; i < DMA_IRQS; i++) {
5638+ struct timbdma_control *ctrl = self->control + i;
5639+ if (ctrl->stored_desc) {
5640+ struct dma_desc *dma_desc = ctrl->stored_desc;
5641+ ctrl->stored_desc = NULL;
5642+ if (__timbdma_start(self, i, dma_desc,
5643+ ctrl->stored_bytes_per_row))
5644+ printk(KERN_ERR DRIVER_NAME
5645+ ": Failed to start DMA\n");
5646+ }
5647+ }
5648+}
5649+
5650+
5651+static int timbdma_probe(struct platform_device *dev)
5652+{
5653+ int err, irq;
5654+ struct resource *iomem;
5655+ struct timbdma_dev *self = self_g;
5656+
5657+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
5658+ if (!iomem) {
5659+ err = -EINVAL;
5660+ goto err_request;
5661+ }
5662+
5663+ if (dev->dev.parent)
5664+ self->dev = dev->dev.parent;
5665+ else
5666+ self->dev = &dev->dev;
5667+
5668+ if (!request_mem_region(iomem->start,
5669+ resource_size(iomem), DRIVER_NAME)) {
5670+ err = -EBUSY;
5671+ goto err_request;
5672+ }
5673+
5674+ self->membase = ioremap(iomem->start, resource_size(iomem));
5675+ if (!self->membase) {
5676+ printk(KERN_ERR DRIVER_NAME ": Failed to remap I/O memory\n");
5677+ err = -ENOMEM;
5678+ goto err_ioremap;
5679+ }
5680+
5681+ /* 32bit addressing */
5682+ iowrite32(TIMBDMA_32BIT_ADDR, self->membase + TIMBDMA_ACR);
5683+
5684+ /* disable and clear any interrupts */
5685+ iowrite32(0x0, self->membase + TIMBDMA_IER);
5686+ iowrite32(0x0, self->membase + TIMBDMA_ISR);
5687+
5688+ /* register interrupt */
5689+ irq = platform_get_irq(dev, 0);
5690+ if (irq < 0) {
5691+ err = irq;
5692+ goto err_get_irq;
5693+ }
5694+
5695+ /* request IRQ */
5696+ err = request_irq(irq, timbdma_handleinterrupt, IRQF_SHARED,
5697+ DRIVER_NAME, self);
5698+ if (err) {
5699+ printk(KERN_ERR DRIVER_NAME ": Failed to request IRQ\n");
5700+ goto err_get_irq;
5701+ }
5702+
5703+ platform_set_drvdata(dev, self);
5704+
5705+ /* assign the global pointer */
5706+ self_g = self;
5707+
5708+ timbdma_start_operations(self);
5709+
5710+ return 0;
5711+
5712+err_get_irq:
5713+ iounmap(self->membase);
5714+err_ioremap:
5715+ release_mem_region(iomem->start, resource_size(iomem));
5716+err_request:
5717+ printk(KERN_ERR DRIVER_NAME ": Failed to register Timberdale DMA: %d\n",
5718+ err);
5719+ self->membase = NULL;
5720+ self->dev = NULL;
5721+
5722+ return err;
5723+}
5724+
5725+static int timbdma_remove(struct platform_device *dev)
5726+{
5727+ struct timbdma_dev *self = platform_get_drvdata(dev);
5728+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
5729+
5730+ free_irq(platform_get_irq(dev, 0), self);
5731+ iounmap(self->membase);
5732+ release_mem_region(iomem->start, resource_size(iomem));
5733+ self->membase = NULL;
5734+ self->dev = NULL;
5735+ return 0;
5736+}
5737+
5738+static struct platform_driver timbdma_platform_driver = {
5739+ .driver = {
5740+ .name = DRIVER_NAME,
5741+ .owner = THIS_MODULE,
5742+ },
5743+ .probe = timbdma_probe,
5744+ .remove = timbdma_remove,
5745+};
5746+
5747+/*--------------------------------------------------------------------------*/
5748+
5749+static int __init timbdma_init(void)
5750+{
5751+ struct timbdma_dev *self;
5752+ int err;
5753+
5754+ self = kzalloc(sizeof(*self), GFP_KERNEL);
5755+ if (!self)
5756+ return -ENOMEM;
5757+
5758+ spin_lock_init(&self->lock);
5759+
5760+ self_g = self;
5761+ err = platform_driver_register(&timbdma_platform_driver);
5762+ if (err)
5763+ kfree(self);
5764+
5765+ return err;
5766+}
5767+
5768+static void __exit timbdma_exit(void)
5769+{
5770+ platform_driver_unregister(&timbdma_platform_driver);
5771+ kfree(self_g);
5772+}
5773+
5774+module_init(timbdma_init);
5775+module_exit(timbdma_exit);
5776+
5777+MODULE_DESCRIPTION("Timberdale DMA driver");
5778+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
5779+MODULE_LICENSE("GPL v2");
5780+MODULE_ALIAS("platform:"DRIVER_NAME);
5781+
5782diff -uNr linux-2.6.31/drivers/mfd/timberdale.c linux-2.6.31.new/drivers/mfd/timberdale.c
5783--- linux-2.6.31/drivers/mfd/timberdale.c 1969-12-31 16:00:00.000000000 -0800
5784+++ linux-2.6.31.new/drivers/mfd/timberdale.c 2009-10-23 11:17:29.000000000 -0700
5785@@ -0,0 +1,914 @@
5786+/*
5787+ * timberdale.c timberdale FPGA mfd shim driver
5788+ * Copyright (c) 2009 Intel Corporation
5789+ *
5790+ * This program is free software; you can redistribute it and/or modify
5791+ * it under the terms of the GNU General Public License version 2 as
5792+ * published by the Free Software Foundation.
5793+ *
5794+ * This program is distributed in the hope that it will be useful,
5795+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5796+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5797+ * GNU General Public License for more details.
5798+ *
5799+ * You should have received a copy of the GNU General Public License
5800+ * along with this program; if not, write to the Free Software
5801+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5802+ */
5803+
5804+/* Supports:
5805+ * Timberdale FPGA
5806+ */
5807+
5808+#include <linux/kernel.h>
5809+#include <linux/module.h>
5810+#include <linux/pci.h>
5811+#include <linux/msi.h>
5812+#include <linux/init.h>
5813+#include <linux/interrupt.h>
5814+#include <linux/platform_device.h>
5815+#include <linux/mfd/core.h>
5816+#include <linux/irq.h>
5817+
5818+#include <linux/timb_gpio.h>
5819+
5820+#include <linux/i2c.h>
5821+#include <linux/i2c-ocores.h>
5822+#include <linux/i2c-xiic.h>
5823+#include <linux/i2c/tsc2007.h>
5824+#include <linux/can/platform/ascb.h>
5825+
5826+#include <linux/spi/spi.h>
5827+#include <linux/spi/xilinx_spi.h>
5828+#include <linux/spi/max7301.h>
5829+#include <linux/spi/mc33880.h>
5830+
5831+#include <media/timb_video.h>
5832+#include <media/timb_radio.h>
5833+#include <linux/most/timbmlb.h>
5834+
5835+#include <sound/timbi2s.h>
5836+
5837+#include "timberdale.h"
5838+
5839+#define DRIVER_NAME "timberdale"
5840+
5841+struct timberdale_device {
5842+ resource_size_t intc_mapbase;
5843+ resource_size_t ctl_mapbase;
5844+ unsigned char __iomem *ctl_membase;
5845+ /* locking from interrupts while modifiying registers */
5846+ spinlock_t lock;
5847+ struct {
5848+ u32 major;
5849+ u32 minor;
5850+ u32 config;
5851+ } fw;
5852+};
5853+
5854+/*--------------------------------------------------------------------------*/
5855+
5856+static struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
5857+ .model = 2003,
5858+ .x_plate_ohms = 100
5859+};
5860+
5861+static struct ascb_platform_data timberdale_ascb_platform_data = {
5862+ .gpio_pin = GPIO_PIN_ASCB
5863+};
5864+
5865+static struct i2c_board_info timberdale_i2c_board_info[] = {
5866+ {
5867+ I2C_BOARD_INFO("tsc2007", 0x48),
5868+ .platform_data = &timberdale_tsc2007_platform_data,
5869+ .irq = IRQ_TIMBERDALE_TSC_INT
5870+ },
5871+ {
5872+ /* Requires jumper JP9 to be off */
5873+ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
5874+ .irq = IRQ_TIMBERDALE_ADV7180
5875+ },
5876+ {
5877+ I2C_BOARD_INFO("tef6862", 0x60)
5878+ },
5879+ {
5880+ I2C_BOARD_INFO("saa7706h", 0x1C)
5881+ },
5882+ {
5883+ I2C_BOARD_INFO("ascb-can", 0x18),
5884+ .platform_data = &timberdale_ascb_platform_data,
5885+ }
5886+};
5887+
5888+static __devinitdata struct xiic_i2c_platform_data
5889+timberdale_xiic_platform_data = {
5890+ .devices = timberdale_i2c_board_info,
5891+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
5892+};
5893+
5894+static __devinitdata struct ocores_i2c_platform_data
5895+timberdale_ocores_platform_data = {
5896+ .regstep = 4,
5897+ .clock_khz = 62500,
5898+ .devices = timberdale_i2c_board_info,
5899+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
5900+};
5901+
5902+const static __devinitconst struct resource timberdale_xiic_resources[] = {
5903+ {
5904+ .start = XIICOFFSET,
5905+ .end = XIICEND,
5906+ .flags = IORESOURCE_MEM,
5907+ },
5908+ {
5909+ .start = IRQ_TIMBERDALE_I2C,
5910+ .end = IRQ_TIMBERDALE_I2C,
5911+ .flags = IORESOURCE_IRQ,
5912+ },
5913+};
5914+
5915+const static __devinitconst struct resource timberdale_ocores_resources[] = {
5916+ {
5917+ .start = OCORESOFFSET,
5918+ .end = OCORESEND,
5919+ .flags = IORESOURCE_MEM,
5920+ },
5921+ {
5922+ .start = IRQ_TIMBERDALE_I2C,
5923+ .end = IRQ_TIMBERDALE_I2C,
5924+ .flags = IORESOURCE_IRQ,
5925+ },
5926+};
5927+
5928+const struct max7301_platform_data timberdale_max7301_platform_data = {
5929+ .base = 200
5930+};
5931+
5932+const struct mc33880_platform_data timberdale_mc33880_platform_data = {
5933+ .base = 100
5934+};
5935+
5936+static struct spi_board_info timberdale_spi_16bit_board_info[] = {
5937+ {
5938+ .modalias = "max7301",
5939+ .max_speed_hz = 26000,
5940+ .chip_select = 2,
5941+ .mode = SPI_MODE_0,
5942+ .platform_data = &timberdale_max7301_platform_data
5943+ },
5944+};
5945+
5946+static struct spi_board_info timberdale_spi_8bit_board_info[] = {
5947+ {
5948+ .modalias = "mc33880",
5949+ .max_speed_hz = 4000,
5950+ .chip_select = 1,
5951+ .mode = SPI_MODE_1,
5952+ .platform_data = &timberdale_mc33880_platform_data
5953+ },
5954+};
5955+
5956+static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
5957+ /* Current(2009-03-06) revision of
5958+ * Timberdale we can handle 3 chip selects
5959+ */
5960+ .num_chipselect = 3,
5961+ /* bits per word and devices will be filled in runtime depending
5962+ * on the HW config
5963+ */
5964+};
5965+
5966+const static __devinitconst struct resource timberdale_spi_resources[] = {
5967+ {
5968+ .start = SPIOFFSET,
5969+ .end = SPIEND,
5970+ .flags = IORESOURCE_MEM,
5971+ },
5972+ {
5973+ .start = IRQ_TIMBERDALE_SPI,
5974+ .end = IRQ_TIMBERDALE_SPI,
5975+ .flags = IORESOURCE_IRQ,
5976+ },
5977+};
5978+
5979+const static __devinitconst struct resource timberdale_eth_resources[] = {
5980+ {
5981+ .start = ETHOFFSET,
5982+ .end = ETHEND,
5983+ .flags = IORESOURCE_MEM,
5984+ },
5985+ {
5986+ .start = IRQ_TIMBERDALE_ETHSW_IF,
5987+ .end = IRQ_TIMBERDALE_ETHSW_IF,
5988+ .flags = IORESOURCE_IRQ,
5989+ },
5990+};
5991+
5992+static __devinitdata struct timbgpio_platform_data
5993+ timberdale_gpio_platform_data = {
5994+ .gpio_base = 0,
5995+ .nr_pins = GPIO_NR_PINS,
5996+ .irq_base = 200,
5997+};
5998+
5999+const static __devinitconst struct resource timberdale_gpio_resources[] = {
6000+ {
6001+ .start = GPIOOFFSET,
6002+ .end = GPIOEND,
6003+ .flags = IORESOURCE_MEM,
6004+ },
6005+ {
6006+ .start = IRQ_TIMBERDALE_GPIO,
6007+ .end = IRQ_TIMBERDALE_GPIO,
6008+ .flags = IORESOURCE_IRQ,
6009+ },
6010+};
6011+
6012+static __devinitdata struct timbmlb_platform_data
6013+ timberdale_mlb_platform_data = {
6014+ .reset_pin = GPIO_PIN_INIC_RST
6015+};
6016+
6017+const static __devinitconst struct resource timberdale_most_resources[] = {
6018+ {
6019+ .start = MOSTOFFSET,
6020+ .end = MOSTEND,
6021+ .flags = IORESOURCE_MEM,
6022+ },
6023+ {
6024+ .start = IRQ_TIMBERDALE_MLB,
6025+ .end = IRQ_TIMBERDALE_MLB,
6026+ .flags = IORESOURCE_IRQ,
6027+ },
6028+};
6029+
6030+const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
6031+ {
6032+ .start = MLCOREOFFSET,
6033+ .end = MLCOREEND,
6034+ .flags = IORESOURCE_MEM,
6035+ },
6036+ {
6037+ .start = IRQ_TIMBERDALE_MLCORE,
6038+ .end = IRQ_TIMBERDALE_MLCORE,
6039+ .flags = IORESOURCE_IRQ,
6040+ },
6041+ {
6042+ .start = IRQ_TIMBERDALE_MLCORE_BUF,
6043+ .end = IRQ_TIMBERDALE_MLCORE_BUF,
6044+ .flags = IORESOURCE_IRQ,
6045+ },
6046+};
6047+
6048+const static __devinitconst struct resource timberdale_uart_resources[] = {
6049+ {
6050+ .start = UARTOFFSET,
6051+ .end = UARTEND,
6052+ .flags = IORESOURCE_MEM,
6053+ },
6054+ {
6055+ .start = IRQ_TIMBERDALE_UART,
6056+ .end = IRQ_TIMBERDALE_UART,
6057+ .flags = IORESOURCE_IRQ,
6058+ },
6059+};
6060+
6061+const static __devinitconst struct resource timberdale_uartlite_resources[] = {
6062+ {
6063+ .start = UARTLITEOFFSET,
6064+ .end = UARTLITEEND,
6065+ .flags = IORESOURCE_MEM,
6066+ },
6067+ {
6068+ .start = IRQ_TIMBERDALE_UARTLITE,
6069+ .end = IRQ_TIMBERDALE_UARTLITE,
6070+ .flags = IORESOURCE_IRQ,
6071+ },
6072+};
6073+
6074+static __devinitdata struct timbi2s_bus_data timbi2s_bus_data[] = {
6075+ {
6076+ .rx = 0,
6077+ .sample_rate = 8000,
6078+ },
6079+ {
6080+ .rx = 1,
6081+ .sample_rate = 8000,
6082+ },
6083+ {
6084+ .rx = 1,
6085+ .sample_rate = 44100,
6086+ },
6087+};
6088+
6089+static __devinitdata struct timbi2s_platform_data timbi2s_platform_data = {
6090+ .busses = timbi2s_bus_data,
6091+ .num_busses = ARRAY_SIZE(timbi2s_bus_data),
6092+ .main_clk = 62500000,
6093+};
6094+
6095+const static __devinitconst struct resource timberdale_i2s_resources[] = {
6096+ {
6097+ .start = I2SOFFSET,
6098+ .end = I2SEND,
6099+ .flags = IORESOURCE_MEM,
6100+ },
6101+ {
6102+ .start = IRQ_TIMBERDALE_I2S,
6103+ .end = IRQ_TIMBERDALE_I2S,
6104+ .flags = IORESOURCE_IRQ,
6105+ },
6106+};
6107+
6108+static __devinitdata struct timb_video_platform_data
6109+ timberdale_video_platform_data = {
6110+ .i2c_adapter = 0,
6111+ .encoder = "adv7180"
6112+};
6113+
6114+const static __devinitconst struct resource timberdale_radio_resources[] = {
6115+ {
6116+ .start = RDSOFFSET,
6117+ .end = RDSEND,
6118+ .flags = IORESOURCE_MEM,
6119+ },
6120+ {
6121+ .start = IRQ_TIMBERDALE_RDS,
6122+ .end = IRQ_TIMBERDALE_RDS,
6123+ .flags = IORESOURCE_IRQ,
6124+ },
6125+};
6126+
6127+static __devinitdata struct timb_radio_platform_data
6128+ timberdale_radio_platform_data = {
6129+ .i2c_adapter = 0,
6130+ .tuner = "tef6862",
6131+ .dsp = "saa7706h"
6132+};
6133+
6134+const static __devinitconst struct resource timberdale_video_resources[] = {
6135+ {
6136+ .start = LOGIWOFFSET,
6137+ .end = LOGIWEND,
6138+ .flags = IORESOURCE_MEM,
6139+ },
6140+ /*
6141+ note that the "frame buffer" is located in DMA area
6142+ starting at 0x1200000
6143+ */
6144+};
6145+
6146+const static __devinitconst struct resource timberdale_dma_resources[] = {
6147+ {
6148+ .start = DMAOFFSET,
6149+ .end = DMAEND,
6150+ .flags = IORESOURCE_MEM,
6151+ },
6152+ {
6153+ .start = IRQ_TIMBERDALE_DMA,
6154+ .end = IRQ_TIMBERDALE_DMA,
6155+ .flags = IORESOURCE_IRQ,
6156+ },
6157+};
6158+
6159+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
6160+ {
6161+ .name = "timb-uart",
6162+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6163+ .resources = timberdale_uart_resources,
6164+ },
6165+ {
6166+ .name = "xiic-i2c",
6167+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
6168+ .resources = timberdale_xiic_resources,
6169+ .platform_data = &timberdale_xiic_platform_data,
6170+ .data_size = sizeof(timberdale_xiic_platform_data),
6171+ },
6172+ {
6173+ .name = "timb-gpio",
6174+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6175+ .resources = timberdale_gpio_resources,
6176+ .platform_data = &timberdale_gpio_platform_data,
6177+ .data_size = sizeof(timberdale_gpio_platform_data),
6178+ },
6179+ {
6180+ .name = "timb-i2s",
6181+ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
6182+ .resources = timberdale_i2s_resources,
6183+ .platform_data = &timbi2s_platform_data,
6184+ .data_size = sizeof(timbi2s_platform_data),
6185+ },
6186+ {
6187+ .name = "timb-most",
6188+ .num_resources = ARRAY_SIZE(timberdale_most_resources),
6189+ .resources = timberdale_most_resources,
6190+ .platform_data = &timberdale_mlb_platform_data,
6191+ .data_size = sizeof(timberdale_mlb_platform_data),
6192+ },
6193+ {
6194+ .name = "timb-video",
6195+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6196+ .resources = timberdale_video_resources,
6197+ .platform_data = &timberdale_video_platform_data,
6198+ .data_size = sizeof(timberdale_video_platform_data),
6199+ },
6200+ {
6201+ .name = "timb-radio",
6202+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6203+ .resources = timberdale_radio_resources,
6204+ .platform_data = &timberdale_radio_platform_data,
6205+ .data_size = sizeof(timberdale_radio_platform_data),
6206+ },
6207+ {
6208+ .name = "xilinx_spi",
6209+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6210+ .resources = timberdale_spi_resources,
6211+ .platform_data = &timberdale_xspi_platform_data,
6212+ .data_size = sizeof(timberdale_xspi_platform_data),
6213+ },
6214+ {
6215+ .name = "ks8842",
6216+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
6217+ .resources = timberdale_eth_resources,
6218+ },
6219+ {
6220+ .name = "timb-dma",
6221+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6222+ .resources = timberdale_dma_resources,
6223+ },
6224+};
6225+
6226+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
6227+ {
6228+ .name = "timb-uart",
6229+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6230+ .resources = timberdale_uart_resources,
6231+ },
6232+ {
6233+ .name = "uartlite",
6234+ .num_resources = ARRAY_SIZE(timberdale_uartlite_resources),
6235+ .resources = timberdale_uartlite_resources,
6236+ },
6237+ {
6238+ .name = "xiic-i2c",
6239+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
6240+ .resources = timberdale_xiic_resources,
6241+ .platform_data = &timberdale_xiic_platform_data,
6242+ .data_size = sizeof(timberdale_xiic_platform_data),
6243+ },
6244+ {
6245+ .name = "timb-gpio",
6246+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6247+ .resources = timberdale_gpio_resources,
6248+ .platform_data = &timberdale_gpio_platform_data,
6249+ .data_size = sizeof(timberdale_gpio_platform_data),
6250+ },
6251+ {
6252+ .name = "timb-mlogicore",
6253+ .num_resources = ARRAY_SIZE(timberdale_mlogicore_resources),
6254+ .resources = timberdale_mlogicore_resources,
6255+ },
6256+ {
6257+ .name = "timb-video",
6258+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6259+ .resources = timberdale_video_resources,
6260+ .platform_data = &timberdale_video_platform_data,
6261+ .data_size = sizeof(timberdale_video_platform_data),
6262+ },
6263+ {
6264+ .name = "timb-radio",
6265+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6266+ .resources = timberdale_radio_resources,
6267+ .platform_data = &timberdale_radio_platform_data,
6268+ .data_size = sizeof(timberdale_radio_platform_data),
6269+ },
6270+ {
6271+ .name = "xilinx_spi",
6272+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6273+ .resources = timberdale_spi_resources,
6274+ .platform_data = &timberdale_xspi_platform_data,
6275+ .data_size = sizeof(timberdale_xspi_platform_data),
6276+ },
6277+ {
6278+ .name = "ks8842",
6279+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
6280+ .resources = timberdale_eth_resources,
6281+ },
6282+ {
6283+ .name = "timb-dma",
6284+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6285+ .resources = timberdale_dma_resources,
6286+ },
6287+};
6288+
6289+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
6290+ {
6291+ .name = "timb-uart",
6292+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6293+ .resources = timberdale_uart_resources,
6294+ },
6295+ {
6296+ .name = "xiic-i2c",
6297+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
6298+ .resources = timberdale_xiic_resources,
6299+ .platform_data = &timberdale_xiic_platform_data,
6300+ .data_size = sizeof(timberdale_xiic_platform_data),
6301+ },
6302+ {
6303+ .name = "timb-gpio",
6304+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6305+ .resources = timberdale_gpio_resources,
6306+ .platform_data = &timberdale_gpio_platform_data,
6307+ .data_size = sizeof(timberdale_gpio_platform_data),
6308+ },
6309+ {
6310+ .name = "timb-video",
6311+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6312+ .resources = timberdale_video_resources,
6313+ .platform_data = &timberdale_video_platform_data,
6314+ .data_size = sizeof(timberdale_video_platform_data),
6315+ },
6316+ {
6317+ .name = "timb-radio",
6318+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6319+ .resources = timberdale_radio_resources,
6320+ .platform_data = &timberdale_radio_platform_data,
6321+ .data_size = sizeof(timberdale_radio_platform_data),
6322+ },
6323+ {
6324+ .name = "xilinx_spi",
6325+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6326+ .resources = timberdale_spi_resources,
6327+ .platform_data = &timberdale_xspi_platform_data,
6328+ .data_size = sizeof(timberdale_xspi_platform_data),
6329+ },
6330+ {
6331+ .name = "timb-dma",
6332+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6333+ .resources = timberdale_dma_resources,
6334+ },
6335+};
6336+
6337+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
6338+ {
6339+ .name = "timb-uart",
6340+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
6341+ .resources = timberdale_uart_resources,
6342+ },
6343+ {
6344+ .name = "ocores-i2c",
6345+ .num_resources = ARRAY_SIZE(timberdale_ocores_resources),
6346+ .resources = timberdale_ocores_resources,
6347+ .platform_data = &timberdale_ocores_platform_data,
6348+ .data_size = sizeof(timberdale_ocores_platform_data),
6349+ },
6350+ {
6351+ .name = "timb-gpio",
6352+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
6353+ .resources = timberdale_gpio_resources,
6354+ .platform_data = &timberdale_gpio_platform_data,
6355+ .data_size = sizeof(timberdale_gpio_platform_data),
6356+ },
6357+ {
6358+ .name = "timb-i2s",
6359+ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
6360+ .resources = timberdale_i2s_resources,
6361+ .platform_data = &timbi2s_platform_data,
6362+ .data_size = sizeof(timbi2s_platform_data),
6363+ },
6364+ {
6365+ .name = "timb-most",
6366+ .num_resources = ARRAY_SIZE(timberdale_most_resources),
6367+ .resources = timberdale_most_resources,
6368+ .platform_data = &timberdale_mlb_platform_data,
6369+ .data_size = sizeof(timberdale_mlb_platform_data),
6370+ },
6371+ {
6372+ .name = "timb-video",
6373+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
6374+ .resources = timberdale_video_resources,
6375+ .platform_data = &timberdale_video_platform_data,
6376+ .data_size = sizeof(timberdale_video_platform_data),
6377+ },
6378+ {
6379+ .name = "timb-radio",
6380+ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
6381+ .resources = timberdale_radio_resources,
6382+ .platform_data = &timberdale_radio_platform_data,
6383+ .data_size = sizeof(timberdale_radio_platform_data),
6384+ },
6385+ {
6386+ .name = "xilinx_spi",
6387+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
6388+ .resources = timberdale_spi_resources,
6389+ .platform_data = &timberdale_xspi_platform_data,
6390+ .data_size = sizeof(timberdale_xspi_platform_data),
6391+ },
6392+ {
6393+ .name = "ks8842",
6394+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
6395+ .resources = timberdale_eth_resources,
6396+ },
6397+ {
6398+ .name = "timb-dma",
6399+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
6400+ .resources = timberdale_dma_resources,
6401+ },
6402+};
6403+
6404+static const __devinitconst struct resource timberdale_sdhc_resources[] = {
6405+ /* located in bar 1 and bar 2 */
6406+ {
6407+ .start = SDHC0OFFSET,
6408+ .end = SDHC0END,
6409+ .flags = IORESOURCE_MEM,
6410+ },
6411+ {
6412+ .start = IRQ_TIMBERDALE_SDHC,
6413+ .end = IRQ_TIMBERDALE_SDHC,
6414+ .flags = IORESOURCE_IRQ,
6415+ },
6416+};
6417+
6418+static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
6419+ {
6420+ .name = "sdhci",
6421+ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
6422+ .resources = timberdale_sdhc_resources,
6423+ },
6424+};
6425+
6426+static __devinitdata struct mfd_cell timberdale_cells_bar2[] = {
6427+ {
6428+ .name = "sdhci",
6429+ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
6430+ .resources = timberdale_sdhc_resources,
6431+ },
6432+};
6433+
6434+static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
6435+ char *buf)
6436+{
6437+ struct pci_dev *pdev = to_pci_dev(dev);
6438+ struct timberdale_device *priv = pci_get_drvdata(pdev);
6439+
6440+ return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor,
6441+ priv->fw.config);
6442+}
6443+
6444+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
6445+
6446+/*--------------------------------------------------------------------------*/
6447+
6448+static int __devinit timb_probe(struct pci_dev *dev,
6449+ const struct pci_device_id *id)
6450+{
6451+ struct timberdale_device *priv;
6452+ int err, i;
6453+ resource_size_t mapbase;
6454+ struct msix_entry *msix_entries = NULL;
6455+ u8 ip_setup;
6456+
6457+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
6458+ if (!priv)
6459+ return -ENOMEM;
6460+
6461+ spin_lock_init(&priv->lock);
6462+ pci_set_drvdata(dev, priv);
6463+
6464+ err = pci_enable_device(dev);
6465+ if (err)
6466+ goto err_enable;
6467+
6468+ mapbase = pci_resource_start(dev, 0);
6469+ if (!mapbase) {
6470+ printk(KERN_ERR DRIVER_NAME ": No resource\n");
6471+ goto err_start;
6472+ }
6473+
6474+ /* create a resource for the PCI master register */
6475+ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
6476+ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
6477+ printk(KERN_ERR DRIVER_NAME ": Failed to request ctl mem\n");
6478+ goto err_request;
6479+ }
6480+
6481+ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
6482+ if (!priv->ctl_membase) {
6483+ printk(KERN_ALERT DRIVER_NAME": Map error, ctl\n");
6484+ goto err_ioremap;
6485+ }
6486+
6487+ /* read the HW config */
6488+ priv->fw.major = ioread32(priv->ctl_membase + TIMB_REV_MAJOR);
6489+ priv->fw.minor = ioread32(priv->ctl_membase + TIMB_REV_MINOR);
6490+ priv->fw.config = ioread32(priv->ctl_membase + TIMB_HW_CONFIG);
6491+
6492+ if (priv->fw.major > TIMB_SUPPORTED_MAJOR) {
6493+ printk(KERN_ERR DRIVER_NAME": The driver supports an older "
6494+ "version of the FPGA, please update the driver to "
6495+ "support %d.%d\n", priv->fw.major, priv->fw.minor);
6496+ goto err_ioremap;
6497+ }
6498+ if (priv->fw.major < TIMB_SUPPORTED_MAJOR ||
6499+ priv->fw.minor < TIMB_REQUIRED_MINOR) {
6500+ printk(KERN_ERR DRIVER_NAME
6501+ ": The FPGA image is too old (%d.%d), "
6502+ "please upgrade the FPGA to at least: %d.%d\n",
6503+ priv->fw.major, priv->fw.minor,
6504+ TIMB_SUPPORTED_MAJOR, TIMB_REQUIRED_MINOR);
6505+ goto err_ioremap;
6506+ }
6507+
6508+ msix_entries = kzalloc(TIMBERDALE_NR_IRQS * sizeof(*msix_entries),
6509+ GFP_KERNEL);
6510+ if (!msix_entries)
6511+ goto err_ioremap;
6512+
6513+ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
6514+ msix_entries[i].entry = i;
6515+
6516+ err = pci_enable_msix(dev, msix_entries, TIMBERDALE_NR_IRQS);
6517+ if (err) {
6518+ printk(KERN_WARNING DRIVER_NAME
6519+ ": MSI-X init failed: %d, expected entries: %d\n",
6520+ err, TIMBERDALE_NR_IRQS);
6521+ goto err_msix;
6522+ }
6523+
6524+ err = device_create_file(&dev->dev, &dev_attr_fw_ver);
6525+ if (err)
6526+ goto err_create_file;
6527+
6528+ /* Reset all FPGA PLB peripherals */
6529+ iowrite32(0x1, priv->ctl_membase + TIMB_SW_RST);
6530+
6531+ /* update IRQ offsets in I2C board info */
6532+ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
6533+ timberdale_i2c_board_info[i].irq =
6534+ msix_entries[timberdale_i2c_board_info[i].irq].vector;
6535+
6536+ /* Update the SPI configuration depending on the HW (8 or 16 bit) */
6537+ if (priv->fw.config & TIMB_HW_CONFIG_SPI_8BIT) {
6538+ timberdale_xspi_platform_data.bits_per_word = 8;
6539+ timberdale_xspi_platform_data.devices =
6540+ timberdale_spi_8bit_board_info;
6541+ timberdale_xspi_platform_data.num_devices =
6542+ ARRAY_SIZE(timberdale_spi_8bit_board_info);
6543+ } else {
6544+ timberdale_xspi_platform_data.bits_per_word = 16;
6545+ timberdale_xspi_platform_data.devices =
6546+ timberdale_spi_16bit_board_info;
6547+ timberdale_xspi_platform_data.num_devices =
6548+ ARRAY_SIZE(timberdale_spi_16bit_board_info);
6549+ }
6550+
6551+ ip_setup = priv->fw.config & TIMB_HW_VER_MASK;
6552+ if (ip_setup == TIMB_HW_VER0)
6553+ err = mfd_add_devices(&dev->dev, -1,
6554+ timberdale_cells_bar0_cfg0,
6555+ ARRAY_SIZE(timberdale_cells_bar0_cfg0),
6556+ &dev->resource[0], msix_entries[0].vector);
6557+ else if (ip_setup == TIMB_HW_VER1)
6558+ err = mfd_add_devices(&dev->dev, -1,
6559+ timberdale_cells_bar0_cfg1,
6560+ ARRAY_SIZE(timberdale_cells_bar0_cfg1),
6561+ &dev->resource[0], msix_entries[0].vector);
6562+ else if (ip_setup == TIMB_HW_VER2)
6563+ err = mfd_add_devices(&dev->dev, -1,
6564+ timberdale_cells_bar0_cfg2,
6565+ ARRAY_SIZE(timberdale_cells_bar0_cfg2),
6566+ &dev->resource[0], msix_entries[0].vector);
6567+ else if (ip_setup == TIMB_HW_VER3)
6568+ err = mfd_add_devices(&dev->dev, -1,
6569+ timberdale_cells_bar0_cfg3,
6570+ ARRAY_SIZE(timberdale_cells_bar0_cfg3),
6571+ &dev->resource[0], msix_entries[0].vector);
6572+ else {
6573+ /* unknown version */
6574+ printk(KERN_ERR"Uknown IP setup: %d.%d.%d\n",
6575+ priv->fw.major, priv->fw.minor, ip_setup);
6576+ err = -ENODEV;
6577+ goto err_mfd;
6578+ }
6579+
6580+ if (err) {
6581+ printk(KERN_WARNING DRIVER_NAME
6582+ ": mfd_add_devices failed: %d\n", err);
6583+ goto err_mfd;
6584+ }
6585+
6586+ err = mfd_add_devices(&dev->dev, 0,
6587+ timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1),
6588+ &dev->resource[1], msix_entries[0].vector);
6589+ if (err) {
6590+ printk(KERN_WARNING DRIVER_NAME
6591+ "mfd_add_devices failed: %d\n", err);
6592+ goto err_mfd2;
6593+ }
6594+
6595+ /* only version 0 and 3 have the iNand routed to SDHCI */
6596+ if (((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER0) ||
6597+ ((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) {
6598+ err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2,
6599+ ARRAY_SIZE(timberdale_cells_bar2),
6600+ &dev->resource[2], msix_entries[0].vector);
6601+ if (err) {
6602+ printk(KERN_WARNING DRIVER_NAME
6603+ ": mfd_add_devices failed: %d\n", err);
6604+ goto err_mfd2;
6605+ }
6606+ }
6607+
6608+ kfree(msix_entries);
6609+
6610+ printk(KERN_INFO
6611+ "Found Timberdale Card. Rev: %d.%d, HW config: 0x%02x\n",
6612+ priv->fw.major, priv->fw.minor, priv->fw.config);
6613+
6614+ return 0;
6615+
6616+err_mfd2:
6617+ mfd_remove_devices(&dev->dev);
6618+err_mfd:
6619+ device_remove_file(&dev->dev, &dev_attr_fw_ver);
6620+err_create_file:
6621+ pci_disable_msix(dev);
6622+err_msix:
6623+ iounmap(priv->ctl_membase);
6624+err_ioremap:
6625+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
6626+err_request:
6627+ pci_set_drvdata(dev, NULL);
6628+err_start:
6629+ pci_disable_device(dev);
6630+err_enable:
6631+ kfree(msix_entries);
6632+ kfree(priv);
6633+ pci_set_drvdata(dev, NULL);
6634+ return -ENODEV;
6635+}
6636+
6637+static void __devexit timb_remove(struct pci_dev *dev)
6638+{
6639+ struct timberdale_device *priv = pci_get_drvdata(dev);
6640+
6641+ mfd_remove_devices(&dev->dev);
6642+
6643+ device_remove_file(&dev->dev, &dev_attr_fw_ver);
6644+
6645+ iounmap(priv->ctl_membase);
6646+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
6647+
6648+ pci_disable_msix(dev);
6649+ pci_disable_device(dev);
6650+ pci_set_drvdata(dev, NULL);
6651+ kfree(priv);
6652+}
6653+
6654+static struct pci_device_id timberdale_pci_tbl[] = {
6655+ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
6656+ { 0 }
6657+};
6658+MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
6659+
6660+static struct pci_driver timberdale_pci_driver = {
6661+ .name = DRIVER_NAME,
6662+ .id_table = timberdale_pci_tbl,
6663+ .probe = timb_probe,
6664+ .remove = __devexit_p(timb_remove),
6665+};
6666+
6667+static int __init timberdale_init(void)
6668+{
6669+ int err;
6670+
6671+ err = pci_register_driver(&timberdale_pci_driver);
6672+ if (err < 0) {
6673+ printk(KERN_ERR
6674+ "Failed to register PCI driver for %s device.\n",
6675+ timberdale_pci_driver.name);
6676+ return -ENODEV;
6677+ }
6678+
6679+ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
6680+ timberdale_pci_driver.name);
6681+
6682+ return 0;
6683+}
6684+
6685+static void __exit timberdale_exit(void)
6686+{
6687+ pci_unregister_driver(&timberdale_pci_driver);
6688+
6689+ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
6690+ timberdale_pci_driver.name);
6691+}
6692+
6693+module_init(timberdale_init);
6694+module_exit(timberdale_exit);
6695+
6696+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
6697+MODULE_VERSION(DRV_VERSION);
6698+MODULE_LICENSE("GPL v2");
6699+
6700diff -uNr linux-2.6.31/drivers/mfd/timberdale.h linux-2.6.31.new/drivers/mfd/timberdale.h
6701--- linux-2.6.31/drivers/mfd/timberdale.h 1969-12-31 16:00:00.000000000 -0800
6702+++ linux-2.6.31.new/drivers/mfd/timberdale.h 2009-10-23 11:17:29.000000000 -0700
6703@@ -0,0 +1,152 @@
6704+/*
6705+ * timberdale.h timberdale FPGA mfd shim driver defines
6706+ * Copyright (c) 2009 Intel Corporation
6707+ *
6708+ * This program is free software; you can redistribute it and/or modify
6709+ * it under the terms of the GNU General Public License version 2 as
6710+ * published by the Free Software Foundation.
6711+ *
6712+ * This program is distributed in the hope that it will be useful,
6713+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
6714+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6715+ * GNU General Public License for more details.
6716+ *
6717+ * You should have received a copy of the GNU General Public License
6718+ * along with this program; if not, write to the Free Software
6719+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
6720+ */
6721+
6722+/* Supports:
6723+ * Timberdale FPGA
6724+ */
6725+
6726+#ifndef MFD_TIMBERDALE_H
6727+#define MFD_TIMBERDALE_H
6728+
6729+#define DRV_VERSION "1.0"
6730+
6731+/* This driver only support versions >= 3.8 and < 4.0 */
6732+#define TIMB_SUPPORTED_MAJOR 3
6733+
6734+/* This driver only support minor >= 8 */
6735+#define TIMB_REQUIRED_MINOR 8
6736+
6737+/* Registers of the interrupt controller */
6738+#define ISR 0x00
6739+#define IPR 0x04
6740+#define IER 0x08
6741+#define IAR 0x0c
6742+#define SIE 0x10
6743+#define CIE 0x14
6744+#define MER 0x1c
6745+
6746+/* Registers of the control area */
6747+#define TIMB_REV_MAJOR 0x00
6748+#define TIMB_REV_MINOR 0x04
6749+#define TIMB_HW_CONFIG 0x08
6750+#define TIMB_SW_RST 0x40
6751+
6752+/* bits in the TIMB_HW_CONFIG register */
6753+#define TIMB_HW_CONFIG_SPI_8BIT 0x80
6754+
6755+#define TIMB_HW_VER_MASK 0x0f
6756+#define TIMB_HW_VER0 0x00
6757+#define TIMB_HW_VER1 0x01
6758+#define TIMB_HW_VER2 0x02
6759+#define TIMB_HW_VER3 0x03
6760+
6761+#define OCORESOFFSET 0x0
6762+#define OCORESEND 0x1f
6763+
6764+#define SPIOFFSET 0x80
6765+#define SPIEND 0xff
6766+
6767+#define UARTLITEOFFSET 0x100
6768+#define UARTLITEEND 0x10f
6769+
6770+#define RDSOFFSET 0x180
6771+#define RDSEND 0x183
6772+
6773+#define ETHOFFSET 0x300
6774+#define ETHEND 0x3ff
6775+
6776+#define GPIOOFFSET 0x400
6777+#define GPIOEND 0x7ff
6778+
6779+#define CHIPCTLOFFSET 0x800
6780+#define CHIPCTLEND 0x8ff
6781+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
6782+
6783+#define INTCOFFSET 0xc00
6784+#define INTCEND 0xfff
6785+#define INTCSIZE (INTCEND - INTCOFFSET)
6786+
6787+#define MOSTOFFSET 0x1000
6788+#define MOSTEND 0x13ff
6789+
6790+#define UARTOFFSET 0x1400
6791+#define UARTEND 0x17ff
6792+
6793+#define XIICOFFSET 0x1800
6794+#define XIICEND 0x19ff
6795+
6796+#define I2SOFFSET 0x1C00
6797+#define I2SEND 0x1fff
6798+
6799+#define LOGIWOFFSET 0x30000
6800+#define LOGIWEND 0x37fff
6801+
6802+#define MLCOREOFFSET 0x40000
6803+#define MLCOREEND 0x43fff
6804+
6805+#define DMAOFFSET 0x01000000
6806+#define DMAEND 0x013fffff
6807+
6808+/* SDHC0 is placed in PCI bar 1 */
6809+#define SDHC0OFFSET 0x00
6810+#define SDHC0END 0xff
6811+
6812+/* SDHC1 is placed in PCI bar 2 */
6813+#define SDHC1OFFSET 0x00
6814+#define SDHC1END 0xff
6815+
6816+#define PCI_VENDOR_ID_TIMB 0x10ee
6817+#define PCI_DEVICE_ID_TIMB 0xa123
6818+
6819+#define IRQ_TIMBERDALE_INIC 0
6820+#define IRQ_TIMBERDALE_MLB 1
6821+#define IRQ_TIMBERDALE_GPIO 2
6822+#define IRQ_TIMBERDALE_I2C 3
6823+#define IRQ_TIMBERDALE_UART 4
6824+#define IRQ_TIMBERDALE_DMA 5
6825+#define IRQ_TIMBERDALE_I2S 6
6826+#define IRQ_TIMBERDALE_TSC_INT 7
6827+#define IRQ_TIMBERDALE_SDHC 8
6828+#define IRQ_TIMBERDALE_ADV7180 9
6829+#define IRQ_TIMBERDALE_ETHSW_IF 10
6830+#define IRQ_TIMBERDALE_SPI 11
6831+#define IRQ_TIMBERDALE_UARTLITE 12
6832+#define IRQ_TIMBERDALE_MLCORE 13
6833+#define IRQ_TIMBERDALE_MLCORE_BUF 14
6834+#define IRQ_TIMBERDALE_RDS 15
6835+
6836+#define TIMBERDALE_NR_IRQS 16
6837+
6838+/* Some of the interrupts are level triggered, some are edge triggered */
6839+#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
6840+ (1 << IRQ_TIMBERDALE_TSC_INT) | \
6841+ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
6842+
6843+#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
6844+ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
6845+ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
6846+ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO) | \
6847+ (1 << IRQ_TIMBERDALE_DMA))
6848+
6849+#define GPIO_PIN_ASCB 8
6850+#define GPIO_PIN_INIC_RST 14
6851+#define GPIO_PIN_BT_RST 15
6852+#define GPIO_NR_PINS 16
6853+
6854+#endif
6855+
6856diff -uNr linux-2.6.31/drivers/mmc/host/sdhci.c linux-2.6.31.new/drivers/mmc/host/sdhci.c
6857--- linux-2.6.31/drivers/mmc/host/sdhci.c 2009-10-23 11:18:30.000000000 -0700
6858+++ linux-2.6.31.new/drivers/mmc/host/sdhci.c 2009-10-23 11:17:25.000000000 -0700
6859@@ -652,7 +652,7 @@
6860 count = sdhci_calc_timeout(host, data);
6861 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
6862
6863- if (host->flags & SDHCI_USE_DMA)
6864+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
6865 host->flags |= SDHCI_REQ_USE_DMA;
6866
6867 /*
6868@@ -1597,7 +1597,7 @@
6869 {
6870 int ret;
6871
6872- if (host->flags & SDHCI_USE_DMA) {
6873+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
6874 if (host->ops->enable_dma)
6875 host->ops->enable_dma(host);
6876 }
6877@@ -1678,23 +1678,20 @@
6878 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
6879
6880 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
6881- host->flags |= SDHCI_USE_DMA;
6882- else if (!(caps & SDHCI_CAN_DO_DMA))
6883- DBG("Controller doesn't have DMA capability\n");
6884+ host->flags |= SDHCI_USE_SDMA;
6885+ else if (!(caps & SDHCI_CAN_DO_SDMA))
6886+ DBG("Controller doesn't have SDMA capability\n");
6887 else
6888- host->flags |= SDHCI_USE_DMA;
6889+ host->flags |= SDHCI_USE_SDMA;
6890
6891 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
6892- (host->flags & SDHCI_USE_DMA)) {
6893+ (host->flags & SDHCI_USE_SDMA)) {
6894 DBG("Disabling DMA as it is marked broken\n");
6895- host->flags &= ~SDHCI_USE_DMA;
6896+ host->flags &= ~SDHCI_USE_SDMA;
6897 }
6898
6899- if (host->flags & SDHCI_USE_DMA) {
6900- if ((host->version >= SDHCI_SPEC_200) &&
6901- (caps & SDHCI_CAN_DO_ADMA2))
6902- host->flags |= SDHCI_USE_ADMA;
6903- }
6904+ if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
6905+ host->flags |= SDHCI_USE_ADMA;
6906
6907 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
6908 (host->flags & SDHCI_USE_ADMA)) {
6909@@ -1702,13 +1699,14 @@
6910 host->flags &= ~SDHCI_USE_ADMA;
6911 }
6912
6913- if (host->flags & SDHCI_USE_DMA) {
6914+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
6915 if (host->ops->enable_dma) {
6916 if (host->ops->enable_dma(host)) {
6917 printk(KERN_WARNING "%s: No suitable DMA "
6918 "available. Falling back to PIO.\n",
6919 mmc_hostname(mmc));
6920- host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
6921+ host->flags &=
6922+ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
6923 }
6924 }
6925 }
6926@@ -1736,7 +1734,7 @@
6927 * mask, but PIO does not need the hw shim so we set a new
6928 * mask here in that case.
6929 */
6930- if (!(host->flags & SDHCI_USE_DMA)) {
6931+ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
6932 host->dma_mask = DMA_BIT_MASK(64);
6933 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
6934 }
6935@@ -1810,7 +1808,7 @@
6936 */
6937 if (host->flags & SDHCI_USE_ADMA)
6938 mmc->max_hw_segs = 128;
6939- else if (host->flags & SDHCI_USE_DMA)
6940+ else if (host->flags & SDHCI_USE_SDMA)
6941 mmc->max_hw_segs = 1;
6942 else /* PIO */
6943 mmc->max_hw_segs = 128;
6944@@ -1893,10 +1891,10 @@
6945
6946 mmc_add_host(mmc);
6947
6948- printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
6949+ printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
6950 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
6951- (host->flags & SDHCI_USE_ADMA)?"A":"",
6952- (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
6953+ (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
6954+ (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
6955
6956 sdhci_enable_card_detection(host);
6957
6958diff -uNr linux-2.6.31/drivers/mmc/host/sdhci.h linux-2.6.31.new/drivers/mmc/host/sdhci.h
6959--- linux-2.6.31/drivers/mmc/host/sdhci.h 2009-10-23 11:18:30.000000000 -0700
6960+++ linux-2.6.31.new/drivers/mmc/host/sdhci.h 2009-10-23 11:17:25.000000000 -0700
6961@@ -143,7 +143,7 @@
6962 #define SDHCI_CAN_DO_ADMA2 0x00080000
6963 #define SDHCI_CAN_DO_ADMA1 0x00100000
6964 #define SDHCI_CAN_DO_HISPD 0x00200000
6965-#define SDHCI_CAN_DO_DMA 0x00400000
6966+#define SDHCI_CAN_DO_SDMA 0x00400000
6967 #define SDHCI_CAN_VDD_330 0x01000000
6968 #define SDHCI_CAN_VDD_300 0x02000000
6969 #define SDHCI_CAN_VDD_180 0x04000000
6970@@ -250,7 +250,7 @@
6971 spinlock_t lock; /* Mutex */
6972
6973 int flags; /* Host attributes */
6974-#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */
6975+#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
6976 #define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
6977 #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
6978 #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
6979diff -uNr linux-2.6.31/drivers/mmc/host/sdhci-pci.c linux-2.6.31.new/drivers/mmc/host/sdhci-pci.c
6980--- linux-2.6.31/drivers/mmc/host/sdhci-pci.c 2009-10-23 11:18:30.000000000 -0700
6981+++ linux-2.6.31.new/drivers/mmc/host/sdhci-pci.c 2009-10-23 11:17:25.000000000 -0700
6982@@ -395,7 +395,7 @@
6983
6984 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
6985 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
6986- (host->flags & SDHCI_USE_DMA)) {
6987+ (host->flags & SDHCI_USE_SDMA)) {
6988 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
6989 "doesn't fully claim to support it.\n");
6990 }
6991diff -uNr linux-2.6.31/drivers/net/Kconfig linux-2.6.31.new/drivers/net/Kconfig
6992--- linux-2.6.31/drivers/net/Kconfig 2009-10-23 11:18:30.000000000 -0700
6993+++ linux-2.6.31.new/drivers/net/Kconfig 2009-10-23 11:17:23.000000000 -0700
6994@@ -1730,6 +1730,16 @@
6995 This platform driver is for Micrel KSZ8842 / KS8842
6996 2-port ethernet switch chip (managed, VLAN, QoS).
6997
6998+config KS8842_TIMB_DMA
6999+ bool "Use Timberdale specific DMA engine"
7000+ depends on KS8842 && MFD_TIMBERDALE
7001+ select MFD_TIMBERDALE_DMA
7002+ help
7003+ This option enables usage of the timberdale specific DMA engine
7004+ for the KS8842 driver. Rather than using PIO which results in
7005+ single accesses over PCIe, the DMA block of the timberdale FPGA
7006+ will burst data to and from the KS8842.
7007+
7008 config KS8851
7009 tristate "Micrel KS8851 SPI"
7010 depends on SPI
7011diff -uNr linux-2.6.31/drivers/net/ks8842.c linux-2.6.31.new/drivers/net/ks8842.c
7012--- linux-2.6.31/drivers/net/ks8842.c 2009-10-23 11:18:30.000000000 -0700
7013+++ linux-2.6.31.new/drivers/net/ks8842.c 2009-10-23 11:17:22.000000000 -0700
7014@@ -26,11 +26,17 @@
7015 #include <linux/netdevice.h>
7016 #include <linux/etherdevice.h>
7017 #include <linux/ethtool.h>
7018+#include <linux/mfd/timbdma.h>
7019
7020 #define DRV_NAME "ks8842"
7021
7022 /* Timberdale specific Registers */
7023-#define REG_TIMB_RST 0x1c
7024+#define REG_TIMB_RST 0x1c
7025+#define REG_TIMB_FIFO 0x20
7026+#define REG_TIMB_ISR 0x24
7027+#define REG_TIMB_IER 0x28
7028+#define REG_TIMB_IAR 0x2C
7029+#define REQ_TIMB_DMA_RESUME 0x30
7030
7031 /* KS8842 registers */
7032
7033@@ -73,6 +79,11 @@
7034 #define IRQ_RX_ERROR 0x0080
7035 #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
7036 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
7037+#ifdef CONFIG_KS8842_TIMB_DMA
7038+ #define ENABLED_IRQS_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
7039+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
7040+ #define ENABLED_IRQS_DMA (ENABLED_IRQS_IP | IRQ_RX)
7041+#endif
7042 #define REG_ISR 0x02
7043 #define REG_RXSR 0x04
7044 #define RXSR_VALID 0x8000
7045@@ -111,14 +122,50 @@
7046 #define REG_P1CR4 0x02
7047 #define REG_P1SR 0x04
7048
7049+#ifdef CONFIG_KS8842_TIMB_DMA
7050+#define DMA_BUFFER_SIZE 2048
7051+
7052+#define DMA_DEV(a) ((a->dev->parent) ? a->dev->parent : a->dev)
7053+
7054+#define DMA_ONGOING(a) (a->dma_tx.ongoing | a->dma_rx.ongoing)
7055+
7056+struct ks8842_dma_ctl {
7057+ void *desc;
7058+ void *buf;
7059+ dma_addr_t addr;
7060+ unsigned ongoing;
7061+};
7062+
7063+struct ks8842_rx_dma_ctl {
7064+ void *desc;
7065+ struct sk_buff *skb;
7066+ dma_addr_t addr;
7067+};
7068+
7069+#endif
7070+
7071 struct ks8842_adapter {
7072 void __iomem *hw_addr;
7073 int irq;
7074 struct tasklet_struct tasklet;
7075 spinlock_t lock; /* spinlock to be interrupt safe */
7076- struct platform_device *pdev;
7077+ struct device *dev;
7078+ struct work_struct timeout_work;
7079+ struct net_device *netdev;
7080+#ifdef CONFIG_KS8842_TIMB_DMA
7081+ unsigned use_dma;
7082+ struct ks8842_dma_ctl dma_tx;
7083+ struct ks8842_rx_dma_ctl dma_rx;
7084+#endif
7085 };
7086
7087+#ifdef CONFIG_KS8842_TIMB_DMA
7088+static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
7089+{
7090+ iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
7091+}
7092+#endif
7093+
7094 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
7095 {
7096 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
7097@@ -195,7 +242,6 @@
7098 msleep(10);
7099 iowrite16(0, adapter->hw_addr + REG_GRR);
7100 */
7101- iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
7102 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
7103 msleep(20);
7104 }
7105@@ -203,8 +249,10 @@
7106 static void ks8842_update_link_status(struct net_device *netdev,
7107 struct ks8842_adapter *adapter)
7108 {
7109+ u16 p1mbsr = ks8842_read16(adapter, 45, REG_P1MBSR);
7110+
7111 /* check the status of the link */
7112- if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
7113+ if (p1mbsr & 0x4) {
7114 netif_carrier_on(netdev);
7115 netif_wake_queue(netdev);
7116 } else {
7117@@ -241,10 +289,8 @@
7118 /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
7119 ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
7120
7121- /* enable the receiver, uni + multi + broadcast + flow ctrl
7122- + crc strip */
7123- ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
7124- REG_RXCR);
7125+ /* enable the receiver, uni + multi + broadcast + crc strip */
7126+ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80, REG_RXCR);
7127
7128 /* TX frame pointer autoincrement */
7129 ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
7130@@ -261,13 +307,11 @@
7131 /* enable no excessive collison drop */
7132 ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
7133
7134- /* Enable port 1 force flow control / back pressure / transmit / recv */
7135- ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
7136+ /* Enable port 1 / back pressure / transmit / recv */
7137+ ks8842_write16(adapter, 48, 0xE07, REG_P1CR2);
7138
7139 /* restart port auto-negotiation */
7140 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
7141- /* only advertise 10Mbps */
7142- ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
7143
7144 /* Enable the transmitter */
7145 ks8842_enable_tx(adapter);
7146@@ -279,7 +323,17 @@
7147 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
7148
7149 /* enable interrupts */
7150- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7151+#ifdef CONFIG_KS8842_TIMB_DMA
7152+ if (adapter->use_dma) {
7153+ iowrite16(ENABLED_IRQS_IP, adapter->hw_addr + REG_TIMB_IER);
7154+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
7155+ } else {
7156+#endif
7157+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7158+ iowrite16(ENABLED_IRQS, adapter->hw_addr + REG_TIMB_IER);
7159+#ifdef CONFIG_KS8842_TIMB_DMA
7160+ }
7161+#endif
7162
7163 /* enable the switch */
7164 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
7165@@ -302,11 +356,73 @@
7166 ks8842_write16(adapter, 39, mac, REG_MACAR3);
7167 }
7168
7169+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
7170+{
7171+ unsigned long flags;
7172+ unsigned i;
7173+
7174+ spin_lock_irqsave(&adapter->lock, flags);
7175+ for (i = 0; i < ETH_ALEN; i++) {
7176+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
7177+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
7178+ REG_MACAR1 + i);
7179+ }
7180+ spin_unlock_irqrestore(&adapter->lock, flags);
7181+}
7182+
7183 static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
7184 {
7185 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
7186 }
7187
7188+#ifdef CONFIG_KS8842_TIMB_DMA
7189+static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
7190+{
7191+ struct ks8842_adapter *adapter = netdev_priv(netdev);
7192+ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
7193+ int err;
7194+ int len = skb->len + sizeof(u32);
7195+ u8 *buf = ctl->buf;
7196+
7197+ if (ctl->ongoing) {
7198+ dev_dbg(adapter->dev, "%s: TX ongoing\n", __func__);
7199+ /* transfer ongoing */
7200+ return NETDEV_TX_BUSY;
7201+ }
7202+
7203+ /* copy data to the TX buffer */
7204+ /* the control word, enable IRQ, port 1 and the length */
7205+ *buf++ = 0x00;
7206+ *buf++ = 0x01; /* Port 1 */
7207+ *buf++ = skb->len & 0xff;
7208+ *buf++ = (skb->len >> 8) & 0xff;
7209+ skb_copy_from_linear_data(skb, buf, skb->len);
7210+
7211+ dma_sync_single_range_for_device(DMA_DEV(adapter), ctl->addr, 0, len,
7212+ DMA_TO_DEVICE);
7213+
7214+ /* make sure the length is a multiple of 4 */
7215+ if (len % 4)
7216+ len += 4 - len % 4;
7217+
7218+ err = timbdma_prep_desc(ctl->desc, ctl->addr, len);
7219+ if (err)
7220+ return NETDEV_TX_BUSY;
7221+
7222+ ctl->ongoing = 1;
7223+ err = timbdma_start(DMA_IRQ_ETH_TX, ctl->desc, 0);
7224+ if (err) {
7225+ ctl->ongoing = 0;
7226+ return NETDEV_TX_BUSY;
7227+ }
7228+ netdev->stats.tx_bytes += skb->len;
7229+
7230+ dev_kfree_skb(skb);
7231+
7232+ return NETDEV_TX_OK;
7233+}
7234+#endif
7235+
7236 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
7237 {
7238 struct ks8842_adapter *adapter = netdev_priv(netdev);
7239@@ -314,7 +430,7 @@
7240 u32 *ptr = (u32 *)skb->data;
7241 u32 ctrl;
7242
7243- dev_dbg(&adapter->pdev->dev,
7244+ dev_dbg(adapter->dev,
7245 "%s: len %u head %p data %p tail %p end %p\n",
7246 __func__, skb->len, skb->head, skb->data,
7247 skb_tail_pointer(skb), skb_end_pointer(skb));
7248@@ -344,6 +460,104 @@
7249 return NETDEV_TX_OK;
7250 }
7251
7252+#ifdef CONFIG_KS8842_TIMB_DMA
7253+static int __ks8842_start_new_rx_dma(struct net_device *netdev,
7254+ struct ks8842_adapter *adapter)
7255+{
7256+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
7257+ int err;
7258+
7259+ ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
7260+ if (ctl->skb) {
7261+ ctl->addr = dma_map_single(DMA_DEV(adapter), ctl->skb->data,
7262+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
7263+ err = dma_mapping_error(DMA_DEV(adapter), ctl->addr);
7264+ if (unlikely(err)) {
7265+ ctl->addr = 0;
7266+ goto out;
7267+ }
7268+ err = timbdma_prep_desc(ctl->desc, ctl->addr, DMA_BUFFER_SIZE);
7269+ if (unlikely(err))
7270+ goto out;
7271+ err = timbdma_start(DMA_IRQ_ETH_RX, ctl->desc, 0);
7272+ if (unlikely(err))
7273+ goto out;
7274+ } else {
7275+ err = -ENOMEM;
7276+ ctl->addr = 0;
7277+ goto out;
7278+ }
7279+
7280+ return err;
7281+out:
7282+ if (ctl->addr)
7283+ dma_unmap_single(DMA_DEV(adapter), ctl->addr, DMA_BUFFER_SIZE,
7284+ DMA_FROM_DEVICE);
7285+ ctl->addr = 0;
7286+ if (ctl->skb)
7287+ dev_kfree_skb(ctl->skb);
7288+
7289+ ctl->skb = NULL;
7290+
7291+ printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
7292+ return err;
7293+}
7294+
7295+static void ks8842_rx_frame_dma(struct net_device *netdev,
7296+ struct ks8842_adapter *adapter)
7297+{
7298+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
7299+ struct sk_buff *skb = ctl->skb;
7300+ dma_addr_t addr = ctl->addr;
7301+ u32 status;
7302+
7303+ /* kick next transfer going */
7304+ __ks8842_start_new_rx_dma(netdev, adapter);
7305+
7306+ /* now handle the data we got */
7307+ dma_unmap_single(DMA_DEV(adapter), addr, DMA_BUFFER_SIZE,
7308+ DMA_FROM_DEVICE);
7309+
7310+ status = *((u32 *)skb->data);
7311+
7312+ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
7313+ __func__, status & 0xffff);
7314+
7315+ /* check the status */
7316+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
7317+ int len = (status >> 16) & 0x7ff;
7318+
7319+ dev_dbg(adapter->dev, "%s, got package, len: %d, skb: %p\n",
7320+ __func__, len, skb);
7321+
7322+ netdev->stats.rx_packets++;
7323+ netdev->stats.rx_bytes += len;
7324+ if (status & RXSR_MULTICAST)
7325+ netdev->stats.multicast++;
7326+
7327+ /* we are not nice to the stack, we want to be nice
7328+ * to our DMA engine instead, reserve 4 bytes
7329+ * which is the status word
7330+ */
7331+ skb_reserve(skb, 4);
7332+ skb_put(skb, len);
7333+
7334+ skb->protocol = eth_type_trans(skb, netdev);
7335+ netif_rx(skb);
7336+ } else {
7337+ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
7338+ netdev->stats.rx_errors++;
7339+ if (status & RXSR_TOO_LONG)
7340+ netdev->stats.rx_length_errors++;
7341+ if (status & RXSR_CRC_ERROR)
7342+ netdev->stats.rx_crc_errors++;
7343+ if (status & RXSR_RUNT)
7344+ netdev->stats.rx_frame_errors++;
7345+ dev_kfree_skb(skb);
7346+ }
7347+}
7348+#endif
7349+
7350 static void ks8842_rx_frame(struct net_device *netdev,
7351 struct ks8842_adapter *adapter)
7352 {
7353@@ -352,14 +566,14 @@
7354
7355 status &= 0xffff;
7356
7357- dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
7358+ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
7359 __func__, status);
7360
7361 /* check the status */
7362 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
7363 struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
7364
7365- dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
7366+ dev_dbg(adapter->dev, "%s, got package, len: %d\n",
7367 __func__, len);
7368 if (skb) {
7369 u32 *data;
7370@@ -386,7 +600,7 @@
7371 } else
7372 netdev->stats.rx_dropped++;
7373 } else {
7374- dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
7375+ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
7376 netdev->stats.rx_errors++;
7377 if (status & RXSR_TOO_LONG)
7378 netdev->stats.rx_length_errors++;
7379@@ -409,7 +623,7 @@
7380 void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
7381 {
7382 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
7383- dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
7384+ dev_dbg(adapter->dev, "%s Entry - rx_data: %d\n",
7385 __func__, rx_data);
7386 while (rx_data) {
7387 ks8842_rx_frame(netdev, adapter);
7388@@ -420,7 +634,7 @@
7389 void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
7390 {
7391 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
7392- dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
7393+ dev_dbg(adapter->dev, "%s - entry, sr: %x\n", __func__, sr);
7394 netdev->stats.tx_packets++;
7395 if (netif_queue_stopped(netdev))
7396 netif_wake_queue(netdev);
7397@@ -429,7 +643,7 @@
7398 void ks8842_handle_rx_overrun(struct net_device *netdev,
7399 struct ks8842_adapter *adapter)
7400 {
7401- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7402+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7403 netdev->stats.rx_errors++;
7404 netdev->stats.rx_fifo_errors++;
7405 }
7406@@ -448,20 +662,33 @@
7407 spin_unlock_irqrestore(&adapter->lock, flags);
7408
7409 isr = ks8842_read16(adapter, 18, REG_ISR);
7410- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
7411+ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
7412+
7413+#ifdef CONFIG_KS8842_TIMB_DMA
7414+ if (adapter->use_dma)
7415+ isr &= ~IRQ_RX;
7416+#endif
7417
7418 /* Ack */
7419 ks8842_write16(adapter, 18, isr, REG_ISR);
7420
7421+ /* Ack in the timberdale IP as well */
7422+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
7423+
7424 if (!netif_running(netdev))
7425 return;
7426
7427 if (isr & IRQ_LINK_CHANGE)
7428 ks8842_update_link_status(netdev, adapter);
7429
7430+ /* should not get IRQ_RX when in DMA mode */
7431 if (isr & (IRQ_RX | IRQ_RX_ERROR))
7432- ks8842_handle_rx(netdev, adapter);
7433+#ifdef CONFIG_KS8842_TIMB_DMA
7434+ if (!adapter->use_dma)
7435+#endif
7436+ ks8842_handle_rx(netdev, adapter);
7437
7438+ /* should only happen when not doing DMA */
7439 if (isr & IRQ_TX)
7440 ks8842_handle_tx(netdev, adapter);
7441
7442@@ -480,8 +707,18 @@
7443
7444 /* re-enable interrupts, put back the bank selection register */
7445 spin_lock_irqsave(&adapter->lock, flags);
7446- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7447+#ifdef CONFIG_KS8842_TIMB_DMA
7448+ if (adapter->use_dma)
7449+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
7450+ else
7451+#endif
7452+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
7453+
7454 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
7455+#ifdef CONFIG_KS8842_TIMB_DMA
7456+ /* resume DMA operations */
7457+ ks8842_resume_dma(adapter);
7458+#endif
7459 spin_unlock_irqrestore(&adapter->lock, flags);
7460 }
7461
7462@@ -493,11 +730,17 @@
7463 irqreturn_t ret = IRQ_NONE;
7464
7465 isr = ks8842_read16(adapter, 18, REG_ISR);
7466- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
7467+ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
7468
7469 if (isr) {
7470- /* disable IRQ */
7471- ks8842_write16(adapter, 18, 0x00, REG_IER);
7472+#ifdef CONFIG_KS8842_TIMB_DMA
7473+ if (adapter->use_dma)
7474+ /* disable all but RX IRQ, since the FPGA relies on it*/
7475+ ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
7476+ else
7477+#endif
7478+ /* disable IRQ */
7479+ ks8842_write16(adapter, 18, 0x00, REG_IER);
7480
7481 /* schedule tasklet */
7482 tasklet_schedule(&adapter->tasklet);
7483@@ -506,23 +749,129 @@
7484 }
7485
7486 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
7487-
7488+#ifdef CONFIG_KS8842_TIMB_DMA
7489+ ks8842_resume_dma(adapter);
7490+#endif
7491 return ret;
7492 }
7493
7494+#ifdef CONFIG_KS8842_TIMB_DMA
7495+static int ks8842_dma_irq(u32 flag, void *data)
7496+{
7497+ struct net_device *netdev = data;
7498+ struct ks8842_adapter *adapter = netdev_priv(netdev);
7499+
7500+ if (flag & DMA_IRQ_ETH_RX) {
7501+ dev_dbg(adapter->dev, "RX DMA finished\n");
7502+ ks8842_rx_frame_dma(netdev, adapter);
7503+ }
7504+ if (flag & DMA_IRQ_ETH_TX) {
7505+ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
7506+ dev_dbg(adapter->dev, "TX DMA finished\n");
7507+
7508+ netdev->stats.tx_packets++;
7509+ ctl->ongoing = 0;
7510+
7511+ if (netif_queue_stopped(netdev))
7512+ netif_wake_queue(netdev);
7513+ }
7514+
7515+ return 0;
7516+}
7517+
7518+static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
7519+{
7520+ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
7521+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
7522+
7523+ if (tx_ctl->ongoing)
7524+ timbdma_stop(DMA_IRQ_ETH_TX);
7525+ tx_ctl->ongoing = 0;
7526+ if (rx_ctl->skb)
7527+ timbdma_stop(DMA_IRQ_ETH_RX);
7528+
7529+ timbdma_set_interruptcb(DMA_IRQ_ETH_RX | DMA_IRQ_ETH_TX, NULL, NULL);
7530+
7531+ if (rx_ctl->desc)
7532+ timbdma_free_desc(rx_ctl->desc);
7533+ rx_ctl->desc = NULL;
7534+ if (tx_ctl->desc)
7535+ timbdma_free_desc(tx_ctl->desc);
7536+ tx_ctl->desc = NULL;
7537+ if (rx_ctl->addr)
7538+ dma_unmap_single(DMA_DEV(adapter), rx_ctl->addr,
7539+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
7540+ rx_ctl->addr = 0;
7541+ if (tx_ctl->addr)
7542+ dma_unmap_single(DMA_DEV(adapter), tx_ctl->addr,
7543+ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
7544+ tx_ctl->addr = 0;
7545+ dev_kfree_skb(rx_ctl->skb);
7546+ rx_ctl->skb = NULL;
7547+ kfree(tx_ctl->buf);
7548+ tx_ctl->buf = NULL;
7549+}
7550+#endif
7551
7552 /* Netdevice operations */
7553
7554 static int ks8842_open(struct net_device *netdev)
7555 {
7556 struct ks8842_adapter *adapter = netdev_priv(netdev);
7557+#ifdef CONFIG_KS8842_TIMB_DMA
7558+ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
7559+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
7560+ int use_dma = 0;
7561+#endif
7562 int err;
7563
7564- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
7565+ dev_dbg(adapter->dev, "%s - entry\n", __func__);
7566+
7567+#ifdef CONFIG_KS8842_TIMB_DMA
7568+ if (adapter->use_dma) {
7569+ /* allocate SG descriptor */
7570+ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
7571+ if (!tx_ctl->buf)
7572+ goto no_dma;
7573+ tx_ctl->addr = dma_map_single(DMA_DEV(adapter), tx_ctl->buf,
7574+ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
7575+ err = dma_mapping_error(DMA_DEV(adapter), tx_ctl->addr);
7576+ if (err) {
7577+ tx_ctl->addr = 0;
7578+ goto no_dma;
7579+ }
7580+ tx_ctl->desc = timbdma_alloc_desc(DMA_BUFFER_SIZE, 1);
7581+ if (!tx_ctl->desc)
7582+ goto no_dma;
7583+
7584+ rx_ctl->desc = timbdma_alloc_desc(DMA_BUFFER_SIZE, 1);
7585+ if (!rx_ctl->desc)
7586+ goto no_dma;
7587+
7588+ timbdma_set_interruptcb(DMA_IRQ_ETH_RX | DMA_IRQ_ETH_TX,
7589+ ks8842_dma_irq, (void *)netdev);
7590+
7591+ /* start RX dma */
7592+ err = __ks8842_start_new_rx_dma(netdev, adapter);
7593+ if (err)
7594+ goto no_dma;
7595+
7596+ use_dma = 1;
7597+ }
7598+no_dma:
7599+ if (!use_dma) {
7600+ printk(KERN_WARNING DRV_NAME
7601+ ": Failed to initiate DMA, falling back to PIO\n");
7602+ ks8842_dealloc_dma_bufs(adapter);
7603+ adapter->use_dma = 0;
7604+ }
7605+#endif
7606
7607 /* reset the HW */
7608 ks8842_reset_hw(adapter);
7609
7610+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
7611+
7612 ks8842_update_link_status(netdev, adapter);
7613
7614 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
7615@@ -536,11 +885,19 @@
7616 return 0;
7617 }
7618
7619+
7620 static int ks8842_close(struct net_device *netdev)
7621 {
7622 struct ks8842_adapter *adapter = netdev_priv(netdev);
7623
7624- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
7625+ dev_dbg(adapter->dev, "%s - entry\n", __func__);
7626+
7627+ cancel_work_sync(&adapter->timeout_work);
7628+
7629+#ifdef CONFIG_KS8842_TIMB_DMA
7630+ if (adapter->use_dma)
7631+ ks8842_dealloc_dma_bufs(adapter);
7632+#endif
7633
7634 /* free the irq */
7635 free_irq(adapter->irq, adapter);
7636@@ -556,8 +913,20 @@
7637 int ret;
7638 struct ks8842_adapter *adapter = netdev_priv(netdev);
7639
7640- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7641+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7642
7643+#ifdef CONFIG_KS8842_TIMB_DMA
7644+ if (adapter->use_dma) {
7645+ unsigned long flags;
7646+ ret = ks8842_tx_frame_dma(skb, netdev);
7647+ /* for now only allow one transfer at the time */
7648+ spin_lock_irqsave(&adapter->lock, flags);
7649+ if (adapter->dma_tx.ongoing)
7650+ netif_stop_queue(netdev);
7651+ spin_unlock_irqrestore(&adapter->lock, flags);
7652+ return ret;
7653+ }
7654+#endif
7655 ret = ks8842_tx_frame(skb, netdev);
7656
7657 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
7658@@ -569,44 +938,77 @@
7659 static int ks8842_set_mac(struct net_device *netdev, void *p)
7660 {
7661 struct ks8842_adapter *adapter = netdev_priv(netdev);
7662- unsigned long flags;
7663 struct sockaddr *addr = p;
7664 char *mac = (u8 *)addr->sa_data;
7665- int i;
7666
7667- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7668+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7669
7670 if (!is_valid_ether_addr(addr->sa_data))
7671 return -EADDRNOTAVAIL;
7672
7673 memcpy(netdev->dev_addr, mac, netdev->addr_len);
7674
7675- spin_lock_irqsave(&adapter->lock, flags);
7676- for (i = 0; i < ETH_ALEN; i++) {
7677- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
7678- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
7679- REG_MACAR1 + i);
7680- }
7681- spin_unlock_irqrestore(&adapter->lock, flags);
7682+ ks8842_write_mac_addr(adapter, mac);
7683 return 0;
7684 }
7685
7686-static void ks8842_tx_timeout(struct net_device *netdev)
7687+static void ks8842_tx_timeout_work(struct work_struct *work)
7688 {
7689- struct ks8842_adapter *adapter = netdev_priv(netdev);
7690+ struct ks8842_adapter *adapter =
7691+ container_of(work, struct ks8842_adapter, timeout_work);
7692+ struct net_device *netdev = adapter->netdev;
7693 unsigned long flags;
7694
7695- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
7696+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7697
7698 spin_lock_irqsave(&adapter->lock, flags);
7699+#ifdef CONFIG_KS8842_TIMB_DMA
7700+ if (adapter->use_dma) {
7701+ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
7702+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
7703+
7704+ if (tx_ctl->ongoing)
7705+ timbdma_stop(DMA_IRQ_ETH_TX);
7706+ tx_ctl->ongoing = 0;
7707+
7708+ timbdma_stop(DMA_IRQ_ETH_RX);
7709+
7710+ dma_unmap_single(DMA_DEV(adapter), rx_ctl->addr,
7711+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
7712+ rx_ctl->addr = 0;
7713+
7714+ dev_kfree_skb(rx_ctl->skb);
7715+ rx_ctl->skb = NULL;
7716+ }
7717+#endif
7718+
7719 /* disable interrupts */
7720 ks8842_write16(adapter, 18, 0, REG_IER);
7721 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
7722+
7723+ netif_stop_queue(netdev);
7724+
7725 spin_unlock_irqrestore(&adapter->lock, flags);
7726
7727 ks8842_reset_hw(adapter);
7728
7729+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
7730+
7731 ks8842_update_link_status(netdev, adapter);
7732+
7733+#ifdef CONFIG_KS8842_TIMB_DMA
7734+ if (adapter->use_dma)
7735+ __ks8842_start_new_rx_dma(netdev, adapter);
7736+#endif
7737+}
7738+
7739+static void ks8842_tx_timeout(struct net_device *netdev)
7740+{
7741+ struct ks8842_adapter *adapter = netdev_priv(netdev);
7742+
7743+ dev_dbg(adapter->dev, "%s: entry\n", __func__);
7744+
7745+ schedule_work(&adapter->timeout_work);
7746 }
7747
7748 static const struct net_device_ops ks8842_netdev_ops = {
7749@@ -641,6 +1043,8 @@
7750 SET_NETDEV_DEV(netdev, &pdev->dev);
7751
7752 adapter = netdev_priv(netdev);
7753+ adapter->netdev = netdev;
7754+ INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
7755 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
7756 if (!adapter->hw_addr)
7757 goto err_ioremap;
7758@@ -651,8 +1055,10 @@
7759 goto err_get_irq;
7760 }
7761
7762- adapter->pdev = pdev;
7763-
7764+ adapter->dev = &pdev->dev;
7765+#ifdef CONFIG_KS8842_TIMB_DMA
7766+ adapter->use_dma = 1;
7767+#endif
7768 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
7769 spin_lock_init(&adapter->lock);
7770
7771@@ -660,6 +1066,8 @@
7772 netdev->ethtool_ops = &ks8842_ethtool_ops;
7773
7774 ks8842_read_mac_addr(adapter, netdev->dev_addr);
7775+ if (!is_valid_ether_addr(netdev->dev_addr))
7776+ random_ether_addr(netdev->dev_addr);
7777
7778 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
7779
7780diff -uNr linux-2.6.31/drivers/net/Makefile linux-2.6.31.new/drivers/net/Makefile
7781--- linux-2.6.31/drivers/net/Makefile 2009-10-23 11:18:30.000000000 -0700
7782+++ linux-2.6.31.new/drivers/net/Makefile 2009-10-23 11:17:22.000000000 -0700
7783@@ -16,6 +16,7 @@
7784 obj-$(CONFIG_CHELSIO_T3) += cxgb3/
7785 obj-$(CONFIG_EHEA) += ehea/
7786 obj-$(CONFIG_CAN) += can/
7787+obj-$(CONFIG_MOST) += most/
7788 obj-$(CONFIG_BONDING) += bonding/
7789 obj-$(CONFIG_ATL1) += atlx/
7790 obj-$(CONFIG_ATL2) += atlx/
7791diff -uNr linux-2.6.31/drivers/net/most/Kconfig linux-2.6.31.new/drivers/net/most/Kconfig
7792--- linux-2.6.31/drivers/net/most/Kconfig 1969-12-31 16:00:00.000000000 -0800
7793+++ linux-2.6.31.new/drivers/net/most/Kconfig 2009-10-23 11:17:22.000000000 -0700
7794@@ -0,0 +1,14 @@
7795+menu "MOST Device Drivers"
7796+ depends on MOST
7797+
7798+config MOST_TIMB_MLB
7799+ tristate "The timberdale MOST block"
7800+ depends on MOST
7801+ depends on MFD_TIMBERDALE_DMA
7802+ depends on GENERIC_GPIO
7803+ depends on HAS_IOMEM
7804+ default N
7805+ ---help---
7806+ Adds support for MOST on the timberdale FPGA.
7807+
7808+endmenu
7809diff -uNr linux-2.6.31/drivers/net/most/Makefile linux-2.6.31.new/drivers/net/most/Makefile
7810--- linux-2.6.31/drivers/net/most/Makefile 1969-12-31 16:00:00.000000000 -0800
7811+++ linux-2.6.31.new/drivers/net/most/Makefile 2009-10-23 11:17:22.000000000 -0700
7812@@ -0,0 +1,6 @@
7813+#
7814+# Makefile for the Linux Media Oriented Systems Transport drivers.
7815+#
7816+
7817+obj-$(CONFIG_MOST_TIMB_MLB) += timbmlb.o
7818+
7819diff -uNr linux-2.6.31/drivers/net/most/timbmlb.c linux-2.6.31.new/drivers/net/most/timbmlb.c
7820--- linux-2.6.31/drivers/net/most/timbmlb.c 1969-12-31 16:00:00.000000000 -0800
7821+++ linux-2.6.31.new/drivers/net/most/timbmlb.c 2009-10-23 11:17:22.000000000 -0700
7822@@ -0,0 +1,1087 @@
7823+/*
7824+ * timbmlb.c Driver for the timberdale MLB block
7825+ * Copyright (c) 2009 Intel Corporation
7826+ *
7827+ * This program is free software; you can redistribute it and/or modify
7828+ * it under the terms of the GNU General Public License version 2 as
7829+ * published by the Free Software Foundation.
7830+ *
7831+ * This program is distributed in the hope that it will be useful,
7832+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
7833+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7834+ * GNU General Public License for more details.
7835+ *
7836+ * You should have received a copy of the GNU General Public License
7837+ * along with this program; if not, write to the Free Software
7838+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
7839+ */
7840+#include <linux/module.h>
7841+#include <linux/interrupt.h>
7842+#include <linux/platform_device.h>
7843+#include <linux/mfd/timbdma.h>
7844+#include <linux/spinlock.h>
7845+#include <net/most/most_core.h>
7846+#include <linux/gpio.h>
7847+#include <linux/most/timbmlb.h>
7848+
7849+#define DRIVER_NAME "timb-most"
7850+
7851+#define MLB_REG_CFG 0x00
7852+#define MLB_REG_CH_CTRL 0x04
7853+#define MLB_REG_ISR 0x08
7854+#define MLB_REG_IMR 0x0C
7855+#define MLB_REG_CH_CFG_1 0x10
7856+#define MLB_REG_CH_CFG_2 0x14
7857+#define MLB_REG_CH_CFG_3 0x18
7858+#define MLB_REG_CH_CFG_4 0x1C
7859+#define MLB_REG_CH_CFG_5 0x20
7860+#define MLB_REG_CH_CFG_6 0x24
7861+#define MLB_REG_CH_CFG_7 0x28
7862+#define MLB_REG_CTRL_RX 0x2C /* 8 bits */
7863+#define MLB_REG_CTRL_TX MLB_REG_CTRL_RX
7864+#define MLB_REG_ASYNC_RX 0x30 /* 32 bits */
7865+#define MLB_REG_ASYNC_TX MLB_REG_ASYNC_RX
7866+#define MLB_REG_SYNC_RX 0x34 /* 32 bits */
7867+#define MLB_REG_SYNC_TX MLB_REG_SYNC_RX
7868+#define MLB_REG_FIFO_RST 0x38
7869+
7870+#define MLB_WR_CFG_CTRL_RX_EMPTY 0x20000
7871+#define MLB_WR_CFG_ASYNC_RX_EMPTY 0x10000
7872+#define MLB_CFG_SYNC_TX_EN 0x00200
7873+#define MLB_CFG_SYNC_RX_EN 0x00100
7874+#define MLB_CFG_ASYNC_RX_EN 0x00080
7875+#define MLB_CFG_CTRL_RX_EN 0x00040
7876+
7877+#define MLB_CH_CTRL_ASYNC_TX_START 0x8000
7878+#define MLB_CH_CTRL_ASYNC_RX_BREAK 0x4000
7879+#define MLB_CH_CTRL_CTRL_TX_START 0x0800
7880+#define MLB_CH_CTRL_CTRL_RX_BREAK 0x0400
7881+
7882+#define MLB_WR_I_SYNC_RX_EMPTY 0x80000
7883+#define MLB_WR_I_SYNC_RX_ALMOST_FULL 0x40000
7884+#define MLB_WR_I_SYNC_TX_FULL 0x20000
7885+#define MLB_WR_I_SYNC_TX_ALMOST_EMPTY 0x10000
7886+#define MLB_I_ASYNC_TX_READY 0x08000
7887+#define MLB_I_ASYNC_TX_PROT_ERR 0x04000
7888+#define MLB_I_ASYNC_TX_RX_BREAK 0x02000
7889+#define MLB_I_ASYNC_TX_BUSY_BREAK 0x01000
7890+#define MLB_I_ASYNC_RX_READY 0x00800
7891+#define MLB_I_ASYNC_RX_PROT_ERR 0x00400
7892+#define MLB_I_ASYNC_RX_CMD_BREAK 0x00200
7893+#define MLB_I_SYNC_LOCK 0x00100
7894+#define MLB_I_CTRL_TX_READY 0x00080
7895+#define MLB_I_CTRL_TX_PROT_ERR 0x00040
7896+#define MLB_I_CTRL_TX_RX_BREAK 0x00020
7897+#define MLB_I_CTRL_TX_BUSY_BREAK 0x00010
7898+#define MLB_I_CTRL_RX_READY 0x00008
7899+#define MLB_I_CTRL_RX_PROT_ERR 0x00004
7900+#define MLB_I_CTRL_RX_CMD_BREAK 0x00002
7901+#define MLB_I_SYNC_RX_PROT_ERR 0x00001
7902+
7903+#define MLB_CH_CFG_NOT_ALLOCATED 0x0000
7904+#define MLB_CH_CFG_SYNC_TX 0x0001
7905+#define MLB_CH_CFG_SYNC_RX 0x0002
7906+#define MLB_CH_CFG_ASYNC_TX 0x0003
7907+#define MLB_CH_CFG_ASYNC_RX 0x0004
7908+#define MLB_CH_CFG_CTRL_TX 0x0005
7909+#define MLB_CH_CFG_CTRL_RX 0x0006
7910+
7911+#define MLB_FIFO_RST_CTRL_TX 0x010000
7912+#define MLB_FIFO_RST_CTRL_RX 0x020000
7913+#define MLB_FIFO_RST_ASYNC_TX 0x040000
7914+#define MLB_FIFO_RST_ASYNC_RX 0x080000
7915+#define MLB_FIFO_RST_SYNC_TX 0x100000
7916+#define MLB_FIFO_RST_SYNC_RX 0x200000
7917+#define MLB_FIFO_RST_MLB 0x400000
7918+#define MLB_FIFO_RST_ALL (MLB_FIFO_RST_CTRL_TX | \
7919+ MLB_FIFO_RST_CTRL_RX | \
7920+ MLB_FIFO_RST_ASYNC_TX | \
7921+ MLB_FIFO_RST_ASYNC_RX | \
7922+ MLB_FIFO_RST_SYNC_TX | \
7923+ MLB_FIFO_RST_SYNC_RX | \
7924+ MLB_FIFO_RST_MLB)
7925+
7926+#define ASYNC_SKB_SIZE 1024
7927+#define SYNC_SKB_SIZE 32
7928+
7929+#define SYNC_MAX_DMA_SIZE 4096
7930+
7931+#define RX_CHAN 0
7932+#define TX_CHAN 1
7933+#define CHANNELS 2
7934+
7935+#define DMA_DEV(s) ((s->mdev->parent->parent) ? \
7936+ s->mdev->parent->parent : s->mdev->parent)
7937+
7938+struct timbmost {
7939+ void __iomem *membase;
7940+ struct most_dev *mdev;
7941+ int irq;
7942+ int reset_pin;
7943+ spinlock_t lock; /* mutual exclusion */
7944+
7945+ /* one queue per channel (type) */
7946+ struct sk_buff_head ctl_q;
7947+ struct sk_buff_head async_q;
7948+ struct sk_buff_head sync_q;
7949+
7950+ /* The SKB currently written/read into by the DMA engine
7951+ * only used for the synchronous channel
7952+ */
7953+ struct sk_buff *sync_read_skb;
7954+ dma_addr_t sync_read_handle;
7955+ void *sync_read_desc;
7956+ struct sk_buff *sync_write_skb;
7957+ void *sync_write_desc;
7958+ int sync_write_next_map;
7959+
7960+ /* channel numbers */
7961+ u8 ctl_channels[CHANNELS];
7962+ u8 sync_channels[CHANNELS];
7963+ u8 async_channels[CHANNELS];
7964+};
7965+
7966+static void timbmost_ctl_write_wake(struct timbmost *self);
7967+static void timbmost_async_write_wake(struct timbmost *self);
7968+
7969+static void __timbmost_dump_regs(struct timbmost *self, const char *caption)
7970+{
7971+ dev_dbg(self->mdev->parent, "%s\nMLB_CFG:\t%x\tCH_CTRL:\t%x\n",
7972+ caption,
7973+ ioread32(self->membase + MLB_REG_CFG),
7974+ ioread32(self->membase + MLB_REG_CH_CTRL));
7975+
7976+ dev_dbg(self->mdev->parent, "ISTAT:\t%x\tIMASK:\t%x\n",
7977+ ioread32(self->membase + MLB_REG_ISR),
7978+ ioread32(self->membase + MLB_REG_IMR));
7979+
7980+ dev_dbg(self->mdev->parent, "CH_CFG1:\t%x\tCH_CFG2:\t%x\n",
7981+ ioread32(self->membase + MLB_REG_CH_CFG_1),
7982+ ioread32(self->membase + MLB_REG_CH_CFG_2));
7983+
7984+ dev_dbg(self->mdev->parent, "CH_CFG3:\t%x\tCH_CFG4:\t%x\n",
7985+ ioread32(self->membase + MLB_REG_CH_CFG_3),
7986+ ioread32(self->membase + MLB_REG_CH_CFG_4));
7987+
7988+ dev_dbg(self->mdev->parent, "CH_CFG5:\t%x\tCH_CFG6:\t%x\n",
7989+ ioread32(self->membase + MLB_REG_CH_CFG_5),
7990+ ioread32(self->membase + MLB_REG_CH_CFG_6));
7991+
7992+ dev_dbg(self->mdev->parent, "CH_CFG7:\t%x\n",
7993+ ioread32(self->membase + MLB_REG_CH_CFG_7));
7994+}
7995+
7996+static void __timbmost_hw_reset(struct timbmost *self)
7997+{
7998+ /* disable all interrupts */
7999+ iowrite32(0, self->membase + MLB_REG_IMR);
8000+ iowrite32(0, self->membase + MLB_REG_ISR);
8001+
8002+ /* disable RX and TX */
8003+ iowrite32(0, self->membase + MLB_REG_CFG);
8004+ iowrite32(0, self->membase + MLB_REG_CH_CTRL);
8005+
8006+ /* make sure the channels are not allocated */
8007+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_1);
8008+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_2);
8009+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_3);
8010+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_4);
8011+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_5);
8012+ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_6);
8013+
8014+ /* reset */
8015+ iowrite32(MLB_FIFO_RST_ALL, self->membase + MLB_REG_FIFO_RST);
8016+
8017+ /* reset the INIC */
8018+ gpio_direction_output(self->reset_pin, 0);
8019+ msleep(10);
8020+ gpio_set_value(self->reset_pin, 1);
8021+}
8022+
8023+static void __timbmost_ctl_rx(struct timbmost *self)
8024+{
8025+ u32 cfg;
8026+ do {
8027+ struct sk_buff *skb =
8028+ most_skb_alloc(CTL_FRAME_SIZE, GFP_ATOMIC);
8029+ if (!skb)
8030+ return;
8031+
8032+ do {
8033+ u32 word = ioread32(self->membase + MLB_REG_CTRL_RX);
8034+ int i;
8035+
8036+ for (i = 0; i < 4; i++)
8037+ *skb_put(skb, 1) = (word >> (i * 8)) & 0xff;
8038+
8039+ cfg = ioread32(self->membase + MLB_REG_CFG);
8040+ } while ((skb->len < CTL_FRAME_SIZE) &&
8041+ !(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
8042+
8043+ /* deliver SKB upstreams */
8044+ skb->dev = (void *)self->mdev;
8045+ most_cb(skb)->channel_type = CHAN_CTL;
8046+ /* only one channel is supported... */
8047+ most_cb(skb)->channel = self->ctl_channels[RX_CHAN];
8048+
8049+ most_recv_frame(skb);
8050+ } while (!(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
8051+}
8052+
8053+static void __timbmost_async_rx(struct timbmost *self)
8054+{
8055+ /* TODO: The FIFO is 32bit not 8bit */
8056+ u32 cfg;
8057+
8058+ __timbmost_dump_regs(self, "Before read");
8059+
8060+ do {
8061+ struct sk_buff *skb =
8062+ most_skb_alloc(ASYNC_SKB_SIZE, GFP_ATOMIC);
8063+ if (!skb)
8064+ return;
8065+
8066+ do {
8067+ *skb_put(skb, 1) =
8068+ ioread32(self->membase + MLB_REG_ASYNC_RX);
8069+ cfg = ioread32(self->membase + MLB_REG_CFG);
8070+ } while ((skb->len < ASYNC_SKB_SIZE) &&
8071+ !(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
8072+
8073+ /* deliver SKB upstreams */
8074+ skb->dev = (void *)self->mdev;
8075+ most_cb(skb)->channel_type = CHAN_ASYNC;
8076+ /* only one channel is supported... */
8077+ most_cb(skb)->channel = self->async_channels[RX_CHAN];
8078+
8079+ most_recv_frame(skb);
8080+ } while (!(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
8081+}
8082+
8083+static void __timbmost_sync_read_wake(struct timbmost *self)
8084+{
8085+ struct sk_buff *skb = self->sync_read_skb;
8086+ dma_addr_t map;
8087+ int err;
8088+
8089+ if (skb)
8090+ return;
8091+
8092+ skb = most_skb_alloc(SYNC_SKB_SIZE, GFP_ATOMIC);
8093+ if (!skb)
8094+ return;
8095+
8096+ map = dma_map_single(DMA_DEV(self), skb->data, SYNC_SKB_SIZE,
8097+ DMA_FROM_DEVICE);
8098+ if (dma_mapping_error(DMA_DEV(self), map))
8099+ goto map_failed;
8100+
8101+ err = timbdma_prep_desc(self->sync_read_desc, map, SYNC_SKB_SIZE);
8102+ if (err)
8103+ goto prep_failed;
8104+
8105+ dev_dbg(self->mdev->parent, "%s: will start RX: to: %x, size: %d\n",
8106+ __func__, (u32)map, SYNC_SKB_SIZE);
8107+
8108+ err = timbdma_start(DMA_IRQ_MLB_RX, self->sync_read_desc, 0);
8109+ if (err)
8110+ goto start_failed;
8111+
8112+ self->sync_read_skb = skb;
8113+ self->sync_read_handle = map;
8114+ return;
8115+start_failed:
8116+prep_failed:
8117+ dma_unmap_single(DMA_DEV(self), map, SYNC_SKB_SIZE, DMA_FROM_DEVICE);
8118+map_failed:
8119+ dev_kfree_skb(skb);
8120+}
8121+
8122+static void __timbmost_sync_rx_done(struct timbmost *self)
8123+{
8124+ struct sk_buff *skb = self->sync_read_skb;
8125+ int len;
8126+
8127+ BUG_ON(!skb);
8128+
8129+ /* unmap DMA */
8130+ dma_unmap_single(DMA_DEV(self), self->sync_read_handle, SYNC_SKB_SIZE,
8131+ DMA_FROM_DEVICE);
8132+
8133+ /* set the length */
8134+ len = timbdma_stop(DMA_IRQ_MLB_RX);
8135+ skb_put(skb, len);
8136+ /* send the SKB upwards */
8137+ skb->dev = (void *)self->mdev;
8138+ most_cb(skb)->channel_type = CHAN_SYNC;
8139+ /* only one channel is supported... */
8140+ most_cb(skb)->channel = self->sync_channels[RX_CHAN];
8141+ most_recv_frame(skb);
8142+ self->sync_read_skb = NULL;
8143+
8144+ __timbmost_sync_read_wake(self);
8145+}
8146+
8147+static void __timbmost_sync_write_wake(struct timbmost *self)
8148+{
8149+ unsigned long flags;
8150+ int len;
8151+ dma_addr_t map;
8152+ struct sk_buff *skb = self->sync_write_skb;
8153+ u32 isr;
8154+
8155+ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
8156+
8157+ if (!skb) {
8158+ /* check for next SKB */
8159+ skb = skb_dequeue(&self->sync_q);
8160+ if (!skb)
8161+ return;
8162+
8163+ if (skb_dma_map(DMA_DEV(self), skb, DMA_TO_DEVICE)) {
8164+ /* failed to dma map? */
8165+ dev_kfree_skb(skb);
8166+ return;
8167+ }
8168+ /* next dma map to write is the first ... */
8169+ self->sync_write_next_map = -1;
8170+ self->sync_write_skb = skb;
8171+ dev_dbg(self->mdev->parent, "%s: New skb: fragments: %d\n",
8172+ __func__, skb_shinfo(skb)->nr_frags);
8173+ }
8174+
8175+ /* check if there is space in the FIFO */
8176+ spin_lock_irqsave(&self->lock, flags);
8177+ isr = ioread32(self->membase + MLB_REG_ISR);
8178+ if (isr & MLB_WR_I_SYNC_TX_FULL) {
8179+ /* FIFO full enable, almost empty interrupt */
8180+ u32 imr = ioread32(self->membase + MLB_REG_IMR);
8181+ imr |= MLB_WR_I_SYNC_TX_ALMOST_EMPTY;
8182+ iowrite32(imr, self->membase + MLB_REG_IMR);
8183+ }
8184+ spin_unlock_irqrestore(&self->lock, flags);
8185+
8186+ /* exit if the FIFO is full, we will continue when the almost empty
8187+ * interrupt occurs
8188+ */
8189+ if (isr & MLB_WR_I_SYNC_TX_FULL)
8190+ return;
8191+
8192+ /* send next fragment */
8193+ if (self->sync_write_next_map < 0) {
8194+ len = skb_headlen(skb);
8195+ map = skb_shinfo(skb)->dma_head;
8196+ } else {
8197+ len = skb_shinfo(skb)->frags[self->sync_write_next_map].size;
8198+ map = skb_shinfo(skb)->dma_maps[self->sync_write_next_map];
8199+ }
8200+ self->sync_write_next_map++;
8201+ dev_dbg(self->mdev->parent, "%s: Will send %x, len: %d\n",
8202+ __func__, (uint32_t)map, len);
8203+ timbdma_prep_desc(self->sync_write_desc, map, len);
8204+ timbdma_start(DMA_IRQ_MLB_TX, self->sync_write_desc, 0);
8205+}
8206+
8207+static void __timbmost_sync_tx_done(struct timbmost *self)
8208+{
8209+ struct sk_buff *skb = self->sync_write_skb;
8210+
8211+ /* TX done, free current SKB, and check for next */
8212+ BUG_ON(!skb);
8213+
8214+ /* check if this was the last DMA map */
8215+ if (self->sync_write_next_map >= skb_shinfo(skb)->nr_frags) {
8216+
8217+ /* it was the last... */
8218+ skb_dma_unmap(DMA_DEV(self), skb, DMA_TO_DEVICE);
8219+ dev_kfree_skb(skb);
8220+ self->sync_write_skb = NULL;
8221+ }
8222+
8223+ __timbmost_sync_write_wake(self);
8224+}
8225+
8226+static void timbmost_sync_start_write(struct timbmost *self)
8227+{
8228+ unsigned long flags;
8229+ struct sk_buff *skb;
8230+
8231+ spin_lock_irqsave(&self->lock, flags);
8232+ skb = self->sync_write_skb;
8233+ spin_unlock_irqrestore(&self->lock, flags);
8234+
8235+ /* transfer is ongoing */
8236+ if (skb)
8237+ return;
8238+
8239+ __timbmost_sync_write_wake(self);
8240+}
8241+
8242+/* function called in interrupt context by the timberdale DMA engine
8243+ * when a transfer is finished
8244+ */
8245+static int timbmost_dma_irq(u32 flag, void *devid)
8246+{
8247+ struct timbmost *self = (struct timbmost *)devid;
8248+
8249+ if (flag & DMA_IRQ_MLB_RX)
8250+ __timbmost_sync_rx_done(self);
8251+
8252+ if (flag & DMA_IRQ_MLB_TX)
8253+ __timbmost_sync_tx_done(self);
8254+
8255+ return 0;
8256+}
8257+
8258+static irqreturn_t timbmost_irq(int irq, void *devid)
8259+{
8260+ struct timbmost *self = (struct timbmost *)devid;
8261+ u32 isr, imr;
8262+
8263+ isr = ioread32(self->membase + MLB_REG_ISR);
8264+ imr = ioread32(self->membase + MLB_REG_IMR);
8265+
8266+ dev_dbg(self->mdev->parent, "%s: entry, isr: %x, imr: %x\n", __func__,
8267+ isr, imr);
8268+
8269+ /* mask out only enabled interrupts */
8270+ isr &= imr;
8271+
8272+ /* ack */
8273+ iowrite32(isr, self->membase + MLB_REG_ISR);
8274+
8275+ if (isr & MLB_WR_I_SYNC_TX_ALMOST_EMPTY) {
8276+ /* disable the interrupt */
8277+ imr &= ~MLB_WR_I_SYNC_TX_ALMOST_EMPTY;
8278+ iowrite32(imr, self->membase + MLB_REG_IMR);
8279+ __timbmost_sync_write_wake(self);
8280+ }
8281+
8282+ if (isr & MLB_I_ASYNC_TX_READY) {
8283+ /* disable TX interrupts */
8284+ imr &= ~(MLB_I_ASYNC_TX_READY | MLB_I_ASYNC_TX_PROT_ERR);
8285+ iowrite32(imr, self->membase + MLB_REG_IMR);
8286+ /* schedule to send next package */
8287+ timbmost_async_write_wake(self);
8288+ }
8289+
8290+ if (isr & MLB_I_ASYNC_RX_READY)
8291+ /* pass data upstreams */
8292+ __timbmost_async_rx(self);
8293+
8294+ if (isr & MLB_I_CTRL_TX_READY) {
8295+ /* disable TX interrupts */
8296+ imr &= ~(MLB_I_CTRL_TX_READY | MLB_I_CTRL_TX_PROT_ERR);
8297+ iowrite32(imr, self->membase + MLB_REG_IMR);
8298+ /* schedule to send next package */
8299+ timbmost_ctl_write_wake(self);
8300+ }
8301+
8302+ if (isr & MLB_I_CTRL_RX_READY)
8303+ /* pass data upstreams */
8304+ __timbmost_ctl_rx(self);
8305+
8306+ if (isr)
8307+ return IRQ_HANDLED;
8308+ else
8309+ return IRQ_NONE;
8310+}
8311+
8312+static int timbmost_open(struct most_dev *mdev)
8313+{
8314+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8315+ int err;
8316+
8317+ dev_dbg(mdev->parent, "%s\n", __func__);
8318+
8319+ skb_queue_head_init(&self->ctl_q);
8320+ skb_queue_head_init(&self->sync_q);
8321+ skb_queue_head_init(&self->async_q);
8322+
8323+ spin_lock_init(&self->lock);
8324+
8325+ /* request the GPIO reset pin */
8326+ err = gpio_request(self->reset_pin, DRIVER_NAME);
8327+ if (err) {
8328+ printk(KERN_ERR DRIVER_NAME
8329+ " Failed to request reset pin: %d, err: %d\n",
8330+ self->reset_pin, err);
8331+ return err;
8332+ }
8333+
8334+ __timbmost_hw_reset(self);
8335+
8336+ /* set DMA callback */
8337+ timbdma_set_interruptcb(DMA_IRQ_MLB_RX | DMA_IRQ_MLB_TX,
8338+ timbmost_dma_irq, (void *)self);
8339+
8340+ self->sync_read_desc = timbdma_alloc_desc(SYNC_MAX_DMA_SIZE, 1);
8341+ if (!self->sync_read_desc) {
8342+ err = -ENOMEM;
8343+ goto err_alloc_r_desc;
8344+ }
8345+
8346+ self->sync_write_desc = timbdma_alloc_desc(SYNC_MAX_DMA_SIZE, 1);
8347+ if (!self->sync_write_desc) {
8348+ err = -ENOMEM;
8349+ goto err_alloc_w_desc;
8350+ }
8351+
8352+ /* request IRQ */
8353+ err = request_irq(self->irq, timbmost_irq, IRQF_SHARED, "timb-most",
8354+ self);
8355+ if (err)
8356+ goto err_req_irq;
8357+
8358+ return 0;
8359+
8360+err_req_irq:
8361+ timbdma_free_desc(self->sync_write_desc);
8362+err_alloc_w_desc:
8363+ timbdma_free_desc(self->sync_read_desc);
8364+err_alloc_r_desc:
8365+ gpio_free(self->reset_pin);
8366+ return err;
8367+}
8368+
8369+static void timbmost_stop_sync_dma(struct timbmost *self)
8370+{
8371+ if (self->sync_read_skb) {
8372+ timbdma_stop(DMA_IRQ_MLB_RX);
8373+ dma_unmap_single(DMA_DEV(self), self->sync_read_handle,
8374+ SYNC_SKB_SIZE, DMA_FROM_DEVICE);
8375+ kfree_skb(self->sync_read_skb);
8376+ self->sync_read_skb = NULL;
8377+ }
8378+
8379+ if (self->sync_write_skb) {
8380+ timbdma_stop(DMA_IRQ_MLB_TX);
8381+ skb_dma_unmap(DMA_DEV(self), self->sync_write_skb,
8382+ DMA_TO_DEVICE);
8383+ kfree_skb(self->sync_write_skb);
8384+ self->sync_write_skb = NULL;
8385+ }
8386+}
8387+
8388+static int timbmost_close(struct most_dev *mdev)
8389+{
8390+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8391+
8392+ dev_dbg(mdev->parent, "%s\n", __func__);
8393+
8394+ /* free IRQ */
8395+ free_irq(self->irq, self);
8396+
8397+ __timbmost_hw_reset(self);
8398+
8399+ /* free GPIO */
8400+ gpio_free(self->reset_pin);
8401+
8402+ /* empty all queues */
8403+ skb_queue_purge(&self->ctl_q);
8404+ skb_queue_purge(&self->sync_q);
8405+ skb_queue_purge(&self->async_q);
8406+
8407+ /* clear DMA callback */
8408+ timbdma_set_interruptcb(DMA_IRQ_MLB_RX | DMA_IRQ_MLB_TX, NULL, NULL);
8409+
8410+ return 0;
8411+}
8412+
8413+static int __timbmost_conf_channel(struct timbmost *self, u8 channel,
8414+ u8 channel_mask)
8415+{
8416+ int register_offset;
8417+ int shift;
8418+ u32 ch_cfg;
8419+
8420+ /* only even channel numbers are allowed */
8421+ if (channel % 2 || channel > 0x3e || channel == 0) {
8422+ printk(KERN_WARNING DRIVER_NAME": Invalid channel: %d\n",
8423+ channel);
8424+ return -EINVAL;
8425+ }
8426+
8427+ channel = (channel / 2) - 1;
8428+ /* the channel conf is spread out over the 7 channel config registers
8429+ * each register configures 5 channels, each reg is 32bit
8430+ */
8431+ register_offset = MLB_REG_CH_CFG_1 + (channel / 5) * 4;
8432+
8433+ /* each register configures 5 channels, 3 bit per channel
8434+ * lowest bits configures highest channel
8435+ */
8436+ shift = (4 - (channel % 5)) * 3;
8437+
8438+ ch_cfg = ioread32(self->membase + register_offset);
8439+ ch_cfg &= ~(0x7 << shift);
8440+ ch_cfg |= (channel_mask & 0x7) << shift;
8441+ iowrite32(ch_cfg, self->membase + register_offset);
8442+ return 0;
8443+}
8444+
8445+static int timbmost_conf_channel(struct most_dev *mdev,
8446+ enum most_chan_type type, u8 channel, u8 flags)
8447+{
8448+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8449+ unsigned long irq_flags;
8450+ u32 imr, cfg;
8451+ int err = -EINVAL;
8452+ int chan_idx = (flags & MOST_CONF_FLAG_TX) ? TX_CHAN : RX_CHAN;
8453+
8454+ dev_dbg(mdev->parent, "%s: channel: %d, flags: %x\n",
8455+ __func__, channel, flags);
8456+
8457+ if (flags & MOST_CONF_FLAG_UP) {
8458+ switch (type) {
8459+ case CHAN_CTL:
8460+ spin_lock_irqsave(&self->lock, irq_flags);
8461+ /* we only support one channel at the time */
8462+ if (self->ctl_channels[chan_idx])
8463+ goto error;
8464+
8465+ /* reset the FIFO */
8466+ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_CTRL_TX :
8467+ MLB_FIFO_RST_CTRL_RX,
8468+ self->membase + MLB_REG_FIFO_RST);
8469+
8470+ err = __timbmost_conf_channel(self, channel,
8471+ (chan_idx == TX_CHAN) ? MLB_CH_CFG_CTRL_TX :
8472+ MLB_CH_CFG_CTRL_RX);
8473+ if (err)
8474+ goto error;
8475+
8476+ if (chan_idx == RX_CHAN) {
8477+ /* enable the receiver */
8478+ cfg = ioread32(self->membase + MLB_REG_CFG);
8479+ cfg |= MLB_CFG_CTRL_RX_EN;
8480+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8481+
8482+ /* enable RX interrupts */
8483+ imr = ioread32(self->membase + MLB_REG_IMR);
8484+ imr |= (MLB_I_CTRL_RX_READY |
8485+ MLB_I_CTRL_RX_PROT_ERR |
8486+ MLB_I_CTRL_RX_CMD_BREAK);
8487+ iowrite32(imr, self->membase + MLB_REG_IMR);
8488+ }
8489+ self->ctl_channels[chan_idx] = channel;
8490+ spin_unlock_irqrestore(&self->lock, irq_flags);
8491+ break;
8492+ case CHAN_SYNC:
8493+ spin_lock_irqsave(&self->lock, irq_flags);
8494+ /* we only support one channel at the time */
8495+ if (self->sync_channels[chan_idx])
8496+ goto error;
8497+
8498+ /* reset the FIFO */
8499+ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_SYNC_TX :
8500+ MLB_FIFO_RST_SYNC_RX,
8501+ self->membase + MLB_REG_FIFO_RST);
8502+
8503+ err = __timbmost_conf_channel(self, channel,
8504+ (chan_idx == TX_CHAN) ? MLB_CH_CFG_SYNC_TX :
8505+ MLB_CH_CFG_SYNC_RX);
8506+ if (err)
8507+ goto error;
8508+
8509+ if (chan_idx == RX_CHAN) {
8510+ /* enable the receiver */
8511+ cfg = ioread32(self->membase + MLB_REG_CFG);
8512+ cfg |= MLB_CFG_SYNC_RX_EN;
8513+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8514+
8515+ /* enable prot error interrupts */
8516+ imr = ioread32(self->membase + MLB_REG_IMR);
8517+ imr |= MLB_I_SYNC_RX_PROT_ERR;
8518+ iowrite32(imr, self->membase + MLB_REG_IMR);
8519+ /* start RX DMA */
8520+ __timbmost_sync_read_wake(self);
8521+ }
8522+ self->sync_channels[chan_idx] = channel;
8523+ spin_unlock_irqrestore(&self->lock, irq_flags);
8524+
8525+ break;
8526+ case CHAN_ASYNC:
8527+ spin_lock_irqsave(&self->lock, irq_flags);
8528+ /* we only support one channel at the time */
8529+ if (self->async_channels[chan_idx])
8530+ goto error;
8531+ /* reset the FIFO */
8532+ iowrite32((chan_idx == TX_CHAN) ?
8533+ MLB_FIFO_RST_ASYNC_TX : MLB_FIFO_RST_ASYNC_RX,
8534+ self->membase + MLB_REG_FIFO_RST);
8535+
8536+ err = __timbmost_conf_channel(self, channel,
8537+ (chan_idx == TX_CHAN) ? MLB_CH_CFG_ASYNC_TX :
8538+ MLB_CH_CFG_ASYNC_RX);
8539+ if (err)
8540+ goto error;
8541+
8542+ if (chan_idx == RX_CHAN) {
8543+ /* enable the receiver */
8544+ cfg = ioread32(self->membase + MLB_REG_CFG);
8545+ cfg |= MLB_CFG_ASYNC_RX_EN;
8546+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8547+
8548+ /* enable RX interrupts */
8549+ imr = ioread32(self->membase + MLB_REG_IMR);
8550+ imr |= (MLB_I_ASYNC_RX_READY |
8551+ MLB_I_ASYNC_RX_PROT_ERR |
8552+ MLB_I_ASYNC_RX_CMD_BREAK);
8553+ iowrite32(imr, self->membase + MLB_REG_IMR);
8554+ }
8555+ self->async_channels[chan_idx] = channel;
8556+ spin_unlock_irqrestore(&self->lock, irq_flags);
8557+ break;
8558+ default:
8559+ printk(KERN_WARNING "timbmlb: Uknown channel type\n");
8560+ return -EINVAL;
8561+ }
8562+ } else {
8563+ switch (type) {
8564+ case CHAN_CTL:
8565+ /* stop any ongoing transfer */
8566+ spin_lock_irqsave(&self->lock, irq_flags);
8567+ if (self->ctl_channels[chan_idx] != channel)
8568+ goto error;
8569+
8570+ imr = ioread32(self->membase + MLB_REG_IMR);
8571+ imr &= ~(MLB_I_CTRL_TX_READY |
8572+ MLB_I_CTRL_TX_PROT_ERR |
8573+ MLB_I_CTRL_TX_RX_BREAK |
8574+ MLB_I_CTRL_TX_BUSY_BREAK |
8575+ MLB_I_CTRL_RX_READY |
8576+ MLB_I_CTRL_RX_PROT_ERR |
8577+ MLB_I_CTRL_RX_CMD_BREAK);
8578+ iowrite32(imr, self->membase + MLB_REG_IMR);
8579+
8580+ /* disable CTL RX */
8581+ cfg = ioread32(self->membase + MLB_REG_CFG);
8582+ cfg &= ~MLB_CFG_CTRL_RX_EN;
8583+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8584+
8585+ err = __timbmost_conf_channel(self, channel,
8586+ MLB_CH_CFG_NOT_ALLOCATED);
8587+ spin_unlock_irqrestore(&self->lock, irq_flags);
8588+ skb_queue_purge(&self->ctl_q);
8589+ self->ctl_channels[chan_idx] = 0;
8590+ return err;
8591+ case CHAN_SYNC:
8592+
8593+ /* stop any ongoing transfer */
8594+ spin_lock_irqsave(&self->lock, irq_flags);
8595+ if (self->sync_channels[chan_idx] != channel)
8596+ goto error;
8597+
8598+ /* stop DMA */
8599+ timbmost_stop_sync_dma(self);
8600+ imr = ioread32(self->membase + MLB_REG_IMR);
8601+ imr &= ~MLB_I_SYNC_RX_PROT_ERR;
8602+ iowrite32(imr, self->membase + MLB_REG_IMR);
8603+
8604+ /* disable SYNC TX/RX */
8605+ cfg = ioread32(self->membase + MLB_REG_CFG);
8606+ cfg &= ~(MLB_CFG_SYNC_TX_EN |
8607+ MLB_CFG_SYNC_RX_EN);
8608+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8609+
8610+ err = __timbmost_conf_channel(self, channel,
8611+ MLB_CH_CFG_NOT_ALLOCATED);
8612+ spin_unlock_irqrestore(&self->lock, irq_flags);
8613+ skb_queue_purge(&self->sync_q);
8614+ self->sync_channels[chan_idx] = 0;
8615+ return err;
8616+ case CHAN_ASYNC:
8617+ /* stop any ongoing transfer */
8618+ spin_lock_irqsave(&self->lock, irq_flags);
8619+ if (self->async_channels[chan_idx] != channel)
8620+ goto error;
8621+ imr = ioread32(self->membase + MLB_REG_IMR);
8622+ imr &= ~(MLB_I_ASYNC_TX_READY |
8623+ MLB_I_ASYNC_TX_PROT_ERR |
8624+ MLB_I_ASYNC_TX_RX_BREAK |
8625+ MLB_I_ASYNC_TX_BUSY_BREAK |
8626+ MLB_I_ASYNC_RX_READY |
8627+ MLB_I_ASYNC_RX_PROT_ERR |
8628+ MLB_I_ASYNC_RX_CMD_BREAK);
8629+ iowrite32(imr, self->membase + MLB_REG_IMR);
8630+
8631+ /* disable CTL RX */
8632+ cfg = ioread32(self->membase + MLB_REG_CFG);
8633+ cfg &= ~MLB_CFG_ASYNC_RX_EN;
8634+ iowrite32(cfg, self->membase + MLB_REG_CFG);
8635+
8636+ err = __timbmost_conf_channel(self, channel,
8637+ MLB_CH_CFG_NOT_ALLOCATED);
8638+ spin_unlock_irqrestore(&self->lock, irq_flags);
8639+ skb_queue_purge(&self->async_q);
8640+ self->async_channels[chan_idx] = 0;
8641+ return err;
8642+ default:
8643+ return -EINVAL;
8644+ }
8645+ }
8646+ return 0;
8647+
8648+error:
8649+ spin_unlock_irqrestore(&self->lock, irq_flags);
8650+ return err;
8651+}
8652+
8653+static void timbmost_ctl_write_wake(struct timbmost *self)
8654+{
8655+ unsigned long flags;
8656+ u32 imr;
8657+ u32 isr;
8658+ struct sk_buff *skb;
8659+ int i;
8660+
8661+ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
8662+ __timbmost_dump_regs(self, "Before write");
8663+
8664+ spin_lock_irqsave(&self->lock, flags);
8665+ imr = ioread32(self->membase + MLB_REG_IMR);
8666+ isr = ioread32(self->membase + MLB_REG_ISR);
8667+ spin_unlock_irqrestore(&self->lock, flags);
8668+
8669+ /* check if the hardware is currently writing */
8670+ if (imr & MLB_I_CTRL_TX_READY)
8671+ return;
8672+
8673+ /* check if we have sync */
8674+ if (!(isr & MLB_I_SYNC_LOCK))
8675+ return;
8676+
8677+ skb = skb_dequeue(&self->ctl_q);
8678+ if (!skb)
8679+ return;
8680+
8681+ /* now write to the FIFO */
8682+ for (i = 0; i < skb->len;) {
8683+ u32 word = 0;
8684+ int j;
8685+
8686+ for (j = 0; j < 4 && i < skb->len; j++, i++)
8687+ word |= ((u8 *)skb->data)[i] << j * 8;
8688+
8689+ iowrite32(word, self->membase + MLB_REG_CTRL_TX);
8690+ }
8691+
8692+ /* data is in the FIFO, enable proper interrupts */
8693+ spin_lock_irqsave(&self->lock, flags);
8694+ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_CTRL_TX_READY |
8695+ MLB_I_CTRL_TX_PROT_ERR;
8696+ iowrite32(imr, self->membase + MLB_REG_IMR);
8697+ /* start TX */
8698+ iowrite32(MLB_CH_CTRL_CTRL_TX_START, self->membase + MLB_REG_CH_CTRL);
8699+ spin_unlock_irqrestore(&self->lock, flags);
8700+
8701+ kfree_skb(skb);
8702+}
8703+
8704+static void timbmost_async_write_wake(struct timbmost *self)
8705+{
8706+ unsigned long flags;
8707+ u32 imr;
8708+ u32 isr;
8709+ struct sk_buff *skb;
8710+ int i;
8711+
8712+ spin_lock_irqsave(&self->lock, flags);
8713+ imr = ioread32(self->membase + MLB_REG_IMR);
8714+ isr = ioread32(self->membase + MLB_REG_ISR);
8715+ spin_unlock_irqrestore(&self->lock, flags);
8716+
8717+ /* check if the hardware is currently writing */
8718+ if (imr & MLB_I_ASYNC_TX_READY)
8719+ return;
8720+
8721+ /* check if we have sync */
8722+ if (!(isr & MLB_I_SYNC_LOCK))
8723+ return;
8724+
8725+ skb = skb_dequeue(&self->async_q);
8726+ if (!skb)
8727+ return;
8728+
8729+ /* TODO: The FIFO is 32bit not 8bit */
8730+ /* now write to the FIFO */
8731+ for (i = 0; i < skb->len; i++)
8732+ iowrite32(skb->data[i], self->membase + MLB_REG_ASYNC_TX);
8733+
8734+ /* data is in the FIFO, enable proper interrupts */
8735+ spin_lock_irqsave(&self->lock, flags);
8736+ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_ASYNC_TX_READY |
8737+ MLB_I_ASYNC_TX_PROT_ERR;
8738+ iowrite32(imr, self->membase + MLB_REG_IMR);
8739+ /* start TX */
8740+ iowrite32(MLB_CH_CTRL_ASYNC_TX_START, self->membase + MLB_REG_CH_CTRL);
8741+ spin_unlock_irqrestore(&self->lock, flags);
8742+
8743+ kfree_skb(skb);
8744+}
8745+
8746+static int timbmost_send(struct sk_buff *skb)
8747+{
8748+ struct most_dev *mdev = (struct most_dev *)skb->dev;
8749+ struct timbmost *self = (struct timbmost *)mdev->driver_data;
8750+
8751+ dev_dbg(mdev->parent, "%s, type: %d\n",
8752+ __func__, most_cb(skb)->channel_type);
8753+
8754+ switch (most_cb(skb)->channel_type) {
8755+ case CHAN_CTL:
8756+ skb_queue_tail(&self->ctl_q, skb);
8757+ timbmost_ctl_write_wake(self);
8758+ break;
8759+ case CHAN_ASYNC:
8760+ skb_queue_tail(&self->async_q, skb);
8761+ timbmost_async_write_wake(self);
8762+ break;
8763+ case CHAN_SYNC:
8764+ skb_queue_tail(&self->sync_q, skb);
8765+ timbmost_sync_start_write(self);
8766+ break;
8767+ default:
8768+ printk(KERN_WARNING "%s: Got unsupported channel type: %d\n",
8769+ __func__, most_cb(skb)->channel_type);
8770+ kfree_skb(skb);
8771+ break;
8772+ }
8773+
8774+ return 0;
8775+}
8776+
8777+static int timbmost_probe(struct platform_device *dev)
8778+{
8779+ int err;
8780+ struct timbmost *self = NULL;
8781+ struct resource *iomem;
8782+ struct timbmlb_platform_data *pdata = dev->dev.platform_data;
8783+
8784+ if (!pdata) {
8785+ printk(KERN_ERR DRIVER_NAME "No platform data supplied\n");
8786+ err = -EINVAL;
8787+ goto err_mem;
8788+ }
8789+
8790+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
8791+ if (!iomem) {
8792+ err = -EINVAL;
8793+ goto err_mem;
8794+ }
8795+
8796+ self = kzalloc(sizeof(*self), GFP_KERNEL);
8797+ if (!self) {
8798+ err = -ENOMEM;
8799+ goto err_mem;
8800+ }
8801+
8802+ self->mdev = most_alloc_dev();
8803+ if (!self->mdev) {
8804+ err = -ENOMEM;
8805+ goto err_mem;
8806+ }
8807+
8808+ self->mdev->owner = THIS_MODULE;
8809+ self->mdev->driver_data = self;
8810+ self->mdev->parent = &dev->dev;
8811+ self->mdev->open = timbmost_open;
8812+ self->mdev->close = timbmost_close;
8813+ self->mdev->send = timbmost_send;
8814+ self->mdev->conf_channel = timbmost_conf_channel;
8815+
8816+ if (!request_mem_region(iomem->start,
8817+ resource_size(iomem), "timb-most")) {
8818+ err = -EBUSY;
8819+ goto err_mem;
8820+ }
8821+
8822+ self->membase = ioremap(iomem->start, resource_size(iomem));
8823+ if (!self->membase) {
8824+ printk(KERN_ERR "timbmost: Failed to remap I/O memory\n");
8825+ err = -ENOMEM;
8826+ goto err_ioremap;
8827+ }
8828+
8829+ self->reset_pin = pdata->reset_pin;
8830+
8831+ /* find interrupt */
8832+ self->irq = platform_get_irq(dev, 0);
8833+ if (self->irq < 0) {
8834+ err = self->irq;
8835+ goto err_get_irq;
8836+ }
8837+
8838+ /* register to the MOST layer */
8839+ err = most_register_dev(self->mdev);
8840+ if (err)
8841+ goto err_register;
8842+
8843+
8844+ platform_set_drvdata(dev, self);
8845+
8846+ return 0;
8847+
8848+err_get_irq:
8849+err_register:
8850+ iounmap(self->membase);
8851+err_ioremap:
8852+ release_mem_region(iomem->start, resource_size(iomem));
8853+err_mem:
8854+ if (self) {
8855+ if (self->mdev)
8856+ most_free_dev(self->mdev);
8857+
8858+ timbdma_free_desc(self->sync_read_desc);
8859+ timbdma_free_desc(self->sync_write_desc);
8860+
8861+ kfree(self);
8862+ }
8863+ printk(KERN_ERR "timb-most: Failed to register: %d\n", err);
8864+
8865+ return err;
8866+}
8867+
8868+static int timbmost_remove(struct platform_device *dev)
8869+{
8870+ struct timbmost *self = platform_get_drvdata(dev);
8871+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
8872+
8873+ most_unregister_dev(self->mdev);
8874+ iounmap(self->membase);
8875+ release_mem_region(iomem->start, resource_size(iomem));
8876+ most_free_dev(self->mdev);
8877+ kfree(self);
8878+ return 0;
8879+}
8880+
8881+static struct platform_driver timbmost_platform_driver = {
8882+ .driver = {
8883+ .name = DRIVER_NAME,
8884+ .owner = THIS_MODULE,
8885+ },
8886+ .probe = timbmost_probe,
8887+ .remove = timbmost_remove,
8888+};
8889+
8890+/*--------------------------------------------------------------------------*/
8891+
8892+static int __init timbmost_init(void)
8893+{
8894+ return platform_driver_register(&timbmost_platform_driver);
8895+}
8896+
8897+static void __exit timbmost_exit(void)
8898+{
8899+ platform_driver_unregister(&timbmost_platform_driver);
8900+}
8901+
8902+module_init(timbmost_init);
8903+module_exit(timbmost_exit);
8904+
8905+MODULE_DESCRIPTION("Timberdale MLB driver");
8906+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
8907+MODULE_LICENSE("GPL v2");
8908+MODULE_ALIAS("platform:timb-most");
8909+
8910diff -uNr linux-2.6.31/drivers/serial/Kconfig linux-2.6.31.new/drivers/serial/Kconfig
8911--- linux-2.6.31/drivers/serial/Kconfig 2009-10-23 11:18:08.000000000 -0700
8912+++ linux-2.6.31.new/drivers/serial/Kconfig 2009-10-23 11:17:29.000000000 -0700
8913@@ -855,7 +855,7 @@
8914
8915 config SERIAL_UARTLITE
8916 tristate "Xilinx uartlite serial port support"
8917- depends on PPC32 || MICROBLAZE
8918+ depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
8919 select SERIAL_CORE
8920 help
8921 Say Y here if you want to use the Xilinx uartlite serial controller.
8922diff -uNr linux-2.6.31/drivers/serial/timbuart.c linux-2.6.31.new/drivers/serial/timbuart.c
8923--- linux-2.6.31/drivers/serial/timbuart.c 2009-10-23 11:18:30.000000000 -0700
8924+++ linux-2.6.31.new/drivers/serial/timbuart.c 2009-10-23 11:17:29.000000000 -0700
8925@@ -31,6 +31,7 @@
8926
8927 struct timbuart_port {
8928 struct uart_port port;
8929+ struct uart_driver uart_driver;
8930 struct tasklet_struct tasklet;
8931 int usedma;
8932 u32 last_ier;
8933@@ -410,7 +411,7 @@
8934 .verify_port = timbuart_verify_port
8935 };
8936
8937-static struct uart_driver timbuart_driver = {
8938+static const __devinitconst struct uart_driver timbuart_driver_template = {
8939 .owner = THIS_MODULE,
8940 .driver_name = "timberdale_uart",
8941 .dev_name = "ttyTU",
8942@@ -419,7 +420,7 @@
8943 .nr = 1
8944 };
8945
8946-static int timbuart_probe(struct platform_device *dev)
8947+static int __devinit timbuart_probe(struct platform_device *dev)
8948 {
8949 int err;
8950 struct timbuart_port *uart;
8951@@ -433,6 +434,8 @@
8952 goto err_mem;
8953 }
8954
8955+ uart->uart_driver = timbuart_driver_template;
8956+
8957 uart->usedma = 0;
8958
8959 uart->port.uartclk = 3250000 * 16;
8960@@ -461,11 +464,11 @@
8961
8962 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
8963
8964- err = uart_register_driver(&timbuart_driver);
8965+ err = uart_register_driver(&uart->uart_driver);
8966 if (err)
8967 goto err_register;
8968
8969- err = uart_add_one_port(&timbuart_driver, &uart->port);
8970+ err = uart_add_one_port(&uart->uart_driver, &uart->port);
8971 if (err)
8972 goto err_add_port;
8973
8974@@ -474,7 +477,7 @@
8975 return 0;
8976
8977 err_add_port:
8978- uart_unregister_driver(&timbuart_driver);
8979+ uart_unregister_driver(&uart->uart_driver);
8980 err_register:
8981 kfree(uart);
8982 err_mem:
8983@@ -484,13 +487,13 @@
8984 return err;
8985 }
8986
8987-static int timbuart_remove(struct platform_device *dev)
8988+static int __devexit timbuart_remove(struct platform_device *dev)
8989 {
8990 struct timbuart_port *uart = platform_get_drvdata(dev);
8991
8992 tasklet_kill(&uart->tasklet);
8993- uart_remove_one_port(&timbuart_driver, &uart->port);
8994- uart_unregister_driver(&timbuart_driver);
8995+ uart_remove_one_port(&uart->uart_driver, &uart->port);
8996+ uart_unregister_driver(&uart->uart_driver);
8997 kfree(uart);
8998
8999 return 0;
9000diff -uNr linux-2.6.31/drivers/spi/Kconfig linux-2.6.31.new/drivers/spi/Kconfig
9001--- linux-2.6.31/drivers/spi/Kconfig 2009-10-23 11:18:30.000000000 -0700
9002+++ linux-2.6.31.new/drivers/spi/Kconfig 2009-10-23 11:17:32.000000000 -0700
9003@@ -218,8 +218,8 @@
9004 SPI driver for Toshiba TXx9 MIPS SoCs
9005
9006 config SPI_XILINX
9007- tristate "Xilinx SPI controller"
9008- depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
9009+ tristate "Xilinx SPI controller common module"
9010+ depends on EXPERIMENTAL
9011 select SPI_BITBANG
9012 help
9013 This exposes the SPI controller IP from the Xilinx EDK.
9014@@ -227,6 +227,25 @@
9015 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
9016 Product Specification document (DS464) for hardware details.
9017
9018+config SPI_XILINX_OF
9019+ tristate "Xilinx SPI controller OF device"
9020+ depends on SPI_XILINX && XILINX_VIRTEX
9021+ help
9022+ This exposes the SPI controller IP from the Xilinx EDK.
9023+
9024+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
9025+ Product Specification document (DS464) for hardware details.
9026+
9027+config SPI_XILINX_PLTFM
9028+ tristate "Xilinx SPI controller platform device"
9029+ depends on SPI_XILINX
9030+ help
9031+ This exposes the SPI controller IP from the Xilinx EDK.
9032+
9033+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
9034+ Product Specification document (DS464) for hardware details.
9035+
9036+
9037 #
9038 # Add new SPI master controllers in alphabetical order above this line
9039 #
9040diff -uNr linux-2.6.31/drivers/spi/Makefile linux-2.6.31.new/drivers/spi/Makefile
9041--- linux-2.6.31/drivers/spi/Makefile 2009-10-23 11:18:30.000000000 -0700
9042+++ linux-2.6.31.new/drivers/spi/Makefile 2009-10-23 11:17:32.000000000 -0700
9043@@ -30,6 +30,8 @@
9044 obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
9045 obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
9046 obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
9047+obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
9048+obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
9049 obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
9050 # ... add above this line ...
9051
9052diff -uNr linux-2.6.31/drivers/spi/xilinx_spi.c linux-2.6.31.new/drivers/spi/xilinx_spi.c
9053--- linux-2.6.31/drivers/spi/xilinx_spi.c 2009-10-23 11:18:30.000000000 -0700
9054+++ linux-2.6.31.new/drivers/spi/xilinx_spi.c 2009-10-23 11:17:32.000000000 -0700
9055@@ -14,22 +14,35 @@
9056 #include <linux/module.h>
9057 #include <linux/init.h>
9058 #include <linux/interrupt.h>
9059-#include <linux/platform_device.h>
9060-
9061-#include <linux/of_platform.h>
9062-#include <linux/of_device.h>
9063-#include <linux/of_spi.h>
9064
9065 #include <linux/spi/spi.h>
9066 #include <linux/spi/spi_bitbang.h>
9067 #include <linux/io.h>
9068
9069-#define XILINX_SPI_NAME "xilinx_spi"
9070+#include "xilinx_spi.h"
9071+
9072+struct xilinx_spi {
9073+ /* bitbang has to be first */
9074+ struct spi_bitbang bitbang;
9075+ struct completion done;
9076+ struct resource mem; /* phys mem */
9077+ void __iomem *regs; /* virt. address of the control registers */
9078+ u32 irq;
9079+ u8 *rx_ptr; /* pointer in the Tx buffer */
9080+ const u8 *tx_ptr; /* pointer in the Rx buffer */
9081+ int remaining_bytes; /* the number of bytes left to transfer */
9082+ /* offset to the XSPI regs, these might vary... */
9083+ u8 bits_per_word;
9084+ bool big_endian; /* The device could be accessed big or little
9085+ * endian
9086+ */
9087+};
9088+
9089
9090 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
9091 * Product Specification", DS464
9092 */
9093-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
9094+#define XSPI_CR_OFFSET 0x60 /* Control Register */
9095
9096 #define XSPI_CR_ENABLE 0x02
9097 #define XSPI_CR_MASTER_MODE 0x04
9098@@ -40,8 +53,9 @@
9099 #define XSPI_CR_RXFIFO_RESET 0x40
9100 #define XSPI_CR_MANUAL_SSELECT 0x80
9101 #define XSPI_CR_TRANS_INHIBIT 0x100
9102+#define XSPI_CR_LSB_FIRST 0x200
9103
9104-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
9105+#define XSPI_SR_OFFSET 0x64 /* Status Register */
9106
9107 #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
9108 #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
9109@@ -49,8 +63,8 @@
9110 #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
9111 #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
9112
9113-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
9114-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
9115+#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
9116+#define XSPI_RXD_OFFSET 0x6C /* Data Receive Register */
9117
9118 #define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
9119
9120@@ -70,43 +84,72 @@
9121 #define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
9122 #define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
9123 #define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
9124+#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
9125
9126 #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
9127 #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
9128
9129-struct xilinx_spi {
9130- /* bitbang has to be first */
9131- struct spi_bitbang bitbang;
9132- struct completion done;
9133+/* to follow are some functions that does little of big endian read and
9134+ * write depending on the config of the device.
9135+ */
9136+static inline void xspi_write8(struct xilinx_spi *xspi, u32 offs, u8 val)
9137+{
9138+ iowrite8(val, xspi->regs + offs + ((xspi->big_endian) ? 3 : 0));
9139+}
9140
9141- void __iomem *regs; /* virt. address of the control registers */
9142+static inline void xspi_write16(struct xilinx_spi *xspi, u32 offs, u16 val)
9143+{
9144+ if (xspi->big_endian)
9145+ iowrite16be(val, xspi->regs + offs + 2);
9146+ else
9147+ iowrite16(val, xspi->regs + offs);
9148+}
9149
9150- u32 irq;
9151+static inline void xspi_write32(struct xilinx_spi *xspi, u32 offs, u32 val)
9152+{
9153+ if (xspi->big_endian)
9154+ iowrite32be(val, xspi->regs + offs);
9155+ else
9156+ iowrite32(val, xspi->regs + offs);
9157+}
9158
9159- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
9160+static inline u8 xspi_read8(struct xilinx_spi *xspi, u32 offs)
9161+{
9162+ return ioread8(xspi->regs + offs + ((xspi->big_endian) ? 3 : 0));
9163+}
9164
9165- u8 *rx_ptr; /* pointer in the Tx buffer */
9166- const u8 *tx_ptr; /* pointer in the Rx buffer */
9167- int remaining_bytes; /* the number of bytes left to transfer */
9168-};
9169+static inline u16 xspi_read16(struct xilinx_spi *xspi, u32 offs)
9170+{
9171+ if (xspi->big_endian)
9172+ return ioread16be(xspi->regs + offs + 2);
9173+ else
9174+ return ioread16(xspi->regs + offs);
9175+}
9176+
9177+static inline u32 xspi_read32(struct xilinx_spi *xspi, u32 offs)
9178+{
9179+ if (xspi->big_endian)
9180+ return ioread32be(xspi->regs + offs);
9181+ else
9182+ return ioread32(xspi->regs + offs);
9183+}
9184
9185-static void xspi_init_hw(void __iomem *regs_base)
9186+static void xspi_init_hw(struct xilinx_spi *xspi)
9187 {
9188 /* Reset the SPI device */
9189- out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
9190- XIPIF_V123B_RESET_MASK);
9191+ xspi_write32(xspi, XIPIF_V123B_RESETR_OFFSET, XIPIF_V123B_RESET_MASK);
9192 /* Disable all the interrupts just in case */
9193- out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
9194+ xspi_write32(xspi, XIPIF_V123B_IIER_OFFSET, 0);
9195 /* Enable the global IPIF interrupt */
9196- out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
9197- XIPIF_V123B_GINTR_ENABLE);
9198+ xspi_write32(xspi, XIPIF_V123B_DGIER_OFFSET, XIPIF_V123B_GINTR_ENABLE);
9199 /* Deselect the slave on the SPI bus */
9200- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
9201+ xspi_write32(xspi, XSPI_SSR_OFFSET, 0xffff);
9202 /* Disable the transmitter, enable Manual Slave Select Assertion,
9203 * put SPI controller into master mode, and enable it */
9204- out_be16(regs_base + XSPI_CR_OFFSET,
9205- XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
9206- | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
9207+ xspi_write16(xspi, XSPI_CR_OFFSET,
9208+ XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
9209+ XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
9210+ XSPI_CR_RXFIFO_RESET);
9211 }
9212
9213 static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
9214@@ -115,16 +158,16 @@
9215
9216 if (is_on == BITBANG_CS_INACTIVE) {
9217 /* Deselect the slave on the SPI bus */
9218- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
9219+ xspi_write32(xspi, XSPI_SSR_OFFSET, 0xffff);
9220 } else if (is_on == BITBANG_CS_ACTIVE) {
9221 /* Set the SPI clock phase and polarity */
9222- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
9223+ u16 cr = xspi_read16(xspi, XSPI_CR_OFFSET)
9224 & ~XSPI_CR_MODE_MASK;
9225 if (spi->mode & SPI_CPHA)
9226 cr |= XSPI_CR_CPHA;
9227 if (spi->mode & SPI_CPOL)
9228 cr |= XSPI_CR_CPOL;
9229- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
9230+ xspi_write16(xspi, XSPI_CR_OFFSET, cr);
9231
9232 /* We do not check spi->max_speed_hz here as the SPI clock
9233 * frequency is not software programmable (the IP block design
9234@@ -132,24 +175,27 @@
9235 */
9236
9237 /* Activate the chip select */
9238- out_be32(xspi->regs + XSPI_SSR_OFFSET,
9239+ xspi_write32(xspi, XSPI_SSR_OFFSET,
9240 ~(0x0001 << spi->chip_select));
9241 }
9242 }
9243
9244 /* spi_bitbang requires custom setup_transfer() to be defined if there is a
9245 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
9246- * supports just 8 bits per word, and SPI clock can't be changed in software.
9247- * Check for 8 bits per word. Chip select delay calculations could be
9248+ * supports 8 or 16 bits per word, which can not be changed in software.
9249+ * SPI clock can't be changed in software.
9250+ * Check for correct bits per word. Chip select delay calculations could be
9251 * added here as soon as bitbang_work() can be made aware of the delay value.
9252 */
9253 static int xilinx_spi_setup_transfer(struct spi_device *spi,
9254- struct spi_transfer *t)
9255+ struct spi_transfer *t)
9256 {
9257 u8 bits_per_word;
9258+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
9259
9260- bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
9261- if (bits_per_word != 8) {
9262+ bits_per_word = (t->bits_per_word) ? t->bits_per_word :
9263+ spi->bits_per_word;
9264+ if (bits_per_word != xspi->bits_per_word) {
9265 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
9266 __func__, bits_per_word);
9267 return -EINVAL;
9268@@ -160,34 +206,50 @@
9269
9270 static int xilinx_spi_setup(struct spi_device *spi)
9271 {
9272- struct spi_bitbang *bitbang;
9273- struct xilinx_spi *xspi;
9274- int retval;
9275-
9276- xspi = spi_master_get_devdata(spi->master);
9277- bitbang = &xspi->bitbang;
9278-
9279- retval = xilinx_spi_setup_transfer(spi, NULL);
9280- if (retval < 0)
9281- return retval;
9282-
9283+ /* always return 0, we can not check the number of bits.
9284+ * There are cases when SPI setup is called before any driver is
9285+ * there, in that case the SPI core defaults to 8 bits, which we
9286+ * do not support in some cases. But if we return an error, the
9287+ * SPI device would not be registered and no driver can get hold of it
9288+ * When the driver is there, it will call SPI setup again with the
9289+ * correct number of bits per transfer.
9290+ * If a driver setups with the wrong bit number, it will fail when
9291+ * it tries to do a transfer
9292+ */
9293 return 0;
9294 }
9295
9296 static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
9297 {
9298 u8 sr;
9299+ u8 wsize;
9300+ if (xspi->bits_per_word == 8)
9301+ wsize = 1;
9302+ else if (xspi->bits_per_word == 16)
9303+ wsize = 2;
9304+ else
9305+ wsize = 4;
9306
9307 /* Fill the Tx FIFO with as many bytes as possible */
9308- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9309- while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
9310+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9311+ while ((sr & XSPI_SR_TX_FULL_MASK) == 0 &&
9312+ xspi->remaining_bytes > 0) {
9313 if (xspi->tx_ptr) {
9314- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
9315- } else {
9316- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
9317- }
9318- xspi->remaining_bytes--;
9319- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9320+ if (wsize == 1)
9321+ xspi_write8(xspi, XSPI_TXD_OFFSET,
9322+ *xspi->tx_ptr);
9323+ else if (wsize == 2)
9324+ xspi_write16(xspi, XSPI_TXD_OFFSET,
9325+ *(u16 *)(xspi->tx_ptr));
9326+ else if (wsize == 4)
9327+ xspi_write32(xspi, XSPI_TXD_OFFSET,
9328+ *(u32 *)(xspi->tx_ptr));
9329+
9330+ xspi->tx_ptr += wsize;
9331+ } else
9332+ xspi_write8(xspi, XSPI_TXD_OFFSET, 0);
9333+ xspi->remaining_bytes -= wsize;
9334+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9335 }
9336 }
9337
9338@@ -209,23 +271,22 @@
9339 /* Enable the transmit empty interrupt, which we use to determine
9340 * progress on the transmission.
9341 */
9342- ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
9343- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
9344+ ipif_ier = xspi_read32(xspi, XIPIF_V123B_IIER_OFFSET);
9345+ xspi_write32(xspi, XIPIF_V123B_IIER_OFFSET,
9346 ipif_ier | XSPI_INTR_TX_EMPTY);
9347
9348 /* Start the transfer by not inhibiting the transmitter any longer */
9349- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
9350- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
9351+ cr = xspi_read16(xspi, XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
9352+ xspi_write16(xspi, XSPI_CR_OFFSET, cr);
9353
9354 wait_for_completion(&xspi->done);
9355
9356 /* Disable the transmit empty interrupt */
9357- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
9358+ xspi_write32(xspi, XIPIF_V123B_IIER_OFFSET, ipif_ier);
9359
9360 return t->len - xspi->remaining_bytes;
9361 }
9362
9363-
9364 /* This driver supports single master mode only. Hence Tx FIFO Empty
9365 * is the only interrupt we care about.
9366 * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
9367@@ -237,32 +298,50 @@
9368 u32 ipif_isr;
9369
9370 /* Get the IPIF interrupts, and clear them immediately */
9371- ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
9372- out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
9373+ ipif_isr = xspi_read32(xspi, XIPIF_V123B_IISR_OFFSET);
9374+ xspi_write32(xspi, XIPIF_V123B_IISR_OFFSET, ipif_isr);
9375
9376 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
9377 u16 cr;
9378 u8 sr;
9379+ u8 rsize;
9380+ if (xspi->bits_per_word == 8)
9381+ rsize = 1;
9382+ else if (xspi->bits_per_word == 16)
9383+ rsize = 2;
9384+ else
9385+ rsize = 4;
9386
9387 /* A transmit has just completed. Process received data and
9388 * check for more data to transmit. Always inhibit the
9389 * transmitter while the Isr refills the transmit register/FIFO,
9390 * or make sure it is stopped if we're done.
9391 */
9392- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
9393- out_be16(xspi->regs + XSPI_CR_OFFSET,
9394- cr | XSPI_CR_TRANS_INHIBIT);
9395+ cr = xspi_read16(xspi, XSPI_CR_OFFSET);
9396+ xspi_write16(xspi, XSPI_CR_OFFSET, cr | XSPI_CR_TRANS_INHIBIT);
9397
9398 /* Read out all the data from the Rx FIFO */
9399- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9400+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9401 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
9402- u8 data;
9403+ u32 data;
9404+ if (rsize == 1)
9405+ data = xspi_read8(xspi, XSPI_RXD_OFFSET);
9406+ else if (rsize == 2)
9407+ data = xspi_read16(xspi, XSPI_RXD_OFFSET);
9408+ else
9409+ data = xspi_read32(xspi, XSPI_RXD_OFFSET);
9410
9411- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
9412 if (xspi->rx_ptr) {
9413- *xspi->rx_ptr++ = data;
9414+ if (rsize == 1)
9415+ *xspi->rx_ptr = data & 0xff;
9416+ else if (rsize == 2)
9417+ *(u16 *)(xspi->rx_ptr) = data & 0xffff;
9418+ else
9419+ *((u32 *)(xspi->rx_ptr)) = data;
9420+ xspi->rx_ptr += rsize;
9421 }
9422- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
9423+
9424+ sr = xspi_read8(xspi, XSPI_SR_OFFSET);
9425 }
9426
9427 /* See if there is more data to send */
9428@@ -271,7 +350,7 @@
9429 /* Start the transfer by not inhibiting the
9430 * transmitter any longer
9431 */
9432- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
9433+ xspi_write16(xspi, XSPI_CR_OFFSET, cr);
9434 } else {
9435 /* No more data to send.
9436 * Indicate the transfer is completed.
9437@@ -279,44 +358,21 @@
9438 complete(&xspi->done);
9439 }
9440 }
9441-
9442 return IRQ_HANDLED;
9443 }
9444
9445-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
9446- const struct of_device_id *match)
9447+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
9448+ u32 irq, s16 bus_num, u16 num_chipselect, u8 bits_per_word,
9449+ bool big_endian)
9450 {
9451 struct spi_master *master;
9452 struct xilinx_spi *xspi;
9453- struct resource r_irq_struct;
9454- struct resource r_mem_struct;
9455+ int ret = 0;
9456
9457- struct resource *r_irq = &r_irq_struct;
9458- struct resource *r_mem = &r_mem_struct;
9459- int rc = 0;
9460- const u32 *prop;
9461- int len;
9462+ master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
9463
9464- /* Get resources(memory, IRQ) associated with the device */
9465- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
9466-
9467- if (master == NULL) {
9468- return -ENOMEM;
9469- }
9470-
9471- dev_set_drvdata(&ofdev->dev, master);
9472-
9473- rc = of_address_to_resource(ofdev->node, 0, r_mem);
9474- if (rc) {
9475- dev_warn(&ofdev->dev, "invalid address\n");
9476- goto put_master;
9477- }
9478-
9479- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
9480- if (rc == NO_IRQ) {
9481- dev_warn(&ofdev->dev, "no IRQ found\n");
9482- goto put_master;
9483- }
9484+ if (master == NULL)
9485+ return ERR_PTR(-ENOMEM);
9486
9487 /* the spi->mode bits understood by this driver: */
9488 master->mode_bits = SPI_CPOL | SPI_CPHA;
9489@@ -329,128 +385,73 @@
9490 xspi->bitbang.master->setup = xilinx_spi_setup;
9491 init_completion(&xspi->done);
9492
9493- xspi->irq = r_irq->start;
9494-
9495- if (!request_mem_region(r_mem->start,
9496- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
9497- rc = -ENXIO;
9498- dev_warn(&ofdev->dev, "memory request failure\n");
9499+ if (!request_mem_region(mem->start, resource_size(mem),
9500+ XILINX_SPI_NAME)) {
9501+ ret = -ENXIO;
9502 goto put_master;
9503 }
9504
9505- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
9506+ xspi->regs = ioremap(mem->start, resource_size(mem));
9507 if (xspi->regs == NULL) {
9508- rc = -ENOMEM;
9509- dev_warn(&ofdev->dev, "ioremap failure\n");
9510- goto release_mem;
9511+ ret = -ENOMEM;
9512+ dev_warn(dev, "ioremap failure\n");
9513+ goto map_failed;
9514 }
9515- xspi->irq = r_irq->start;
9516
9517- /* dynamic bus assignment */
9518- master->bus_num = -1;
9519-
9520- /* number of slave select bits is required */
9521- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
9522- if (!prop || len < sizeof(*prop)) {
9523- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
9524- goto unmap_io;
9525- }
9526- master->num_chipselect = *prop;
9527+ master->bus_num = bus_num;
9528+ master->num_chipselect = num_chipselect;
9529+
9530+ xspi->mem = *mem;
9531+ xspi->irq = irq;
9532+ xspi->bits_per_word = bits_per_word;
9533+ xspi->big_endian = big_endian;
9534
9535 /* SPI controller initializations */
9536- xspi_init_hw(xspi->regs);
9537+ xspi_init_hw(xspi);
9538
9539 /* Register for SPI Interrupt */
9540- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
9541- if (rc != 0) {
9542- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
9543+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
9544+ if (ret != 0)
9545 goto unmap_io;
9546- }
9547
9548- rc = spi_bitbang_start(&xspi->bitbang);
9549- if (rc != 0) {
9550- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
9551+ ret = spi_bitbang_start(&xspi->bitbang);
9552+ if (ret != 0) {
9553+ dev_err(dev, "spi_bitbang_start FAILED\n");
9554 goto free_irq;
9555 }
9556
9557- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
9558- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
9559-
9560- /* Add any subnodes on the SPI bus */
9561- of_register_spi_devices(master, ofdev->node);
9562-
9563- return rc;
9564+ dev_info(dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
9565+ (u32)mem->start, (u32)xspi->regs, xspi->irq);
9566+ return master;
9567
9568 free_irq:
9569 free_irq(xspi->irq, xspi);
9570 unmap_io:
9571 iounmap(xspi->regs);
9572-release_mem:
9573- release_mem_region(r_mem->start, resource_size(r_mem));
9574+map_failed:
9575+ release_mem_region(mem->start, resource_size(mem));
9576 put_master:
9577 spi_master_put(master);
9578- return rc;
9579+ return ERR_PTR(ret);
9580 }
9581+EXPORT_SYMBOL(xilinx_spi_init);
9582
9583-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
9584+void xilinx_spi_deinit(struct spi_master *master)
9585 {
9586 struct xilinx_spi *xspi;
9587- struct spi_master *master;
9588- struct resource r_mem;
9589
9590- master = platform_get_drvdata(ofdev);
9591 xspi = spi_master_get_devdata(master);
9592
9593 spi_bitbang_stop(&xspi->bitbang);
9594 free_irq(xspi->irq, xspi);
9595 iounmap(xspi->regs);
9596- if (!of_address_to_resource(ofdev->node, 0, &r_mem))
9597- release_mem_region(r_mem.start, resource_size(&r_mem));
9598- dev_set_drvdata(&ofdev->dev, 0);
9599- spi_master_put(xspi->bitbang.master);
9600-
9601- return 0;
9602-}
9603-
9604-/* work with hotplug and coldplug */
9605-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
9606-
9607-static int __exit xilinx_spi_of_remove(struct of_device *op)
9608-{
9609- return xilinx_spi_remove(op);
9610-}
9611
9612-static struct of_device_id xilinx_spi_of_match[] = {
9613- { .compatible = "xlnx,xps-spi-2.00.a", },
9614- { .compatible = "xlnx,xps-spi-2.00.b", },
9615- {}
9616-};
9617-
9618-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
9619-
9620-static struct of_platform_driver xilinx_spi_of_driver = {
9621- .owner = THIS_MODULE,
9622- .name = "xilinx-xps-spi",
9623- .match_table = xilinx_spi_of_match,
9624- .probe = xilinx_spi_of_probe,
9625- .remove = __exit_p(xilinx_spi_of_remove),
9626- .driver = {
9627- .name = "xilinx-xps-spi",
9628- .owner = THIS_MODULE,
9629- },
9630-};
9631-
9632-static int __init xilinx_spi_init(void)
9633-{
9634- return of_register_platform_driver(&xilinx_spi_of_driver);
9635+ release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
9636+ spi_master_put(xspi->bitbang.master);
9637 }
9638-module_init(xilinx_spi_init);
9639+EXPORT_SYMBOL(xilinx_spi_deinit);
9640
9641-static void __exit xilinx_spi_exit(void)
9642-{
9643- of_unregister_platform_driver(&xilinx_spi_of_driver);
9644-}
9645-module_exit(xilinx_spi_exit);
9646 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
9647 MODULE_DESCRIPTION("Xilinx SPI driver");
9648 MODULE_LICENSE("GPL");
9649+
9650diff -uNr linux-2.6.31/drivers/spi/xilinx_spi.h linux-2.6.31.new/drivers/spi/xilinx_spi.h
9651--- linux-2.6.31/drivers/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
9652+++ linux-2.6.31.new/drivers/spi/xilinx_spi.h 2009-10-23 11:17:32.000000000 -0700
9653@@ -0,0 +1,33 @@
9654+/*
9655+ * xilinx_spi.h
9656+ * Copyright (c) 2009 Intel Corporation
9657+ *
9658+ * This program is free software; you can redistribute it and/or modify
9659+ * it under the terms of the GNU General Public License version 2 as
9660+ * published by the Free Software Foundation.
9661+ *
9662+ * This program is distributed in the hope that it will be useful,
9663+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9664+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9665+ * GNU General Public License for more details.
9666+ *
9667+ * You should have received a copy of the GNU General Public License
9668+ * along with this program; if not, write to the Free Software
9669+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9670+ */
9671+
9672+#ifndef _XILINX_SPI_H_
9673+#define _XILINX_SPI_H_ 1
9674+
9675+#include <linux/spi/spi.h>
9676+#include <linux/spi/spi_bitbang.h>
9677+#include <linux/spi/xilinx_spi.h>
9678+
9679+#define XILINX_SPI_NAME "xilinx_spi"
9680+
9681+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
9682+ u32 irq, s16 bus_num, u16 num_chipselect, u8 bits_per_word,
9683+ bool big_endian);
9684+
9685+void xilinx_spi_deinit(struct spi_master *master);
9686+#endif
9687diff -uNr linux-2.6.31/drivers/spi/xilinx_spi_of.c linux-2.6.31.new/drivers/spi/xilinx_spi_of.c
9688--- linux-2.6.31/drivers/spi/xilinx_spi_of.c 1969-12-31 16:00:00.000000000 -0800
9689+++ linux-2.6.31.new/drivers/spi/xilinx_spi_of.c 2009-10-23 11:17:32.000000000 -0700
9690@@ -0,0 +1,120 @@
9691+/*
9692+ * xilinx_spi_of.c
9693+ *
9694+ * Xilinx SPI controller driver (master mode only)
9695+ *
9696+ * Author: MontaVista Software, Inc.
9697+ * source@mvista.com
9698+ *
9699+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
9700+ * terms of the GNU General Public License version 2. This program is licensed
9701+ * "as is" without any warranty of any kind, whether express or implied.
9702+ */
9703+
9704+#include <linux/module.h>
9705+#include <linux/init.h>
9706+#include <linux/interrupt.h>
9707+#include <linux/io.h>
9708+#include <linux/platform_device.h>
9709+
9710+#include <linux/of_platform.h>
9711+#include <linux/of_device.h>
9712+#include <linux/of_spi.h>
9713+
9714+#include <linux/spi/spi.h>
9715+#include <linux/spi/spi_bitbang.h>
9716+
9717+#include "xilinx_spi.h"
9718+
9719+
9720+static int __init xilinx_spi_of_probe(struct of_device *ofdev,
9721+ const struct of_device_id *match)
9722+{
9723+ struct resource r_irq_struct;
9724+ struct resource r_mem_struct;
9725+ struct spi_master *master;
9726+
9727+ struct resource *r_irq = &r_irq_struct;
9728+ struct resource *r_mem = &r_mem_struct;
9729+ int rc = 0;
9730+ const u32 *prop;
9731+ int len;
9732+
9733+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
9734+ if (rc) {
9735+ dev_warn(&ofdev->dev, "invalid address\n");
9736+ return rc;
9737+ }
9738+
9739+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
9740+ if (rc == NO_IRQ) {
9741+ dev_warn(&ofdev->dev, "no IRQ found\n");
9742+ return -ENODEV;
9743+ }
9744+
9745+ /* number of slave select bits is required */
9746+ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
9747+ if (!prop || len < sizeof(*prop)) {
9748+ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
9749+ return -EINVAL;
9750+ }
9751+ master = xilinx_spi_init(&ofdev->dev, r_mem, r_irq->start, -1, *prop, 8,
9752+ true);
9753+ if (IS_ERR(master))
9754+ return PTR_ERR(master);
9755+
9756+ dev_set_drvdata(&ofdev->dev, master);
9757+
9758+ /* Add any subnodes on the SPI bus */
9759+ of_register_spi_devices(master, ofdev->node);
9760+
9761+ return 0;
9762+}
9763+
9764+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
9765+{
9766+ xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
9767+ dev_set_drvdata(&ofdev->dev, 0);
9768+ return 0;
9769+}
9770+
9771+static int __exit xilinx_spi_of_remove(struct of_device *op)
9772+{
9773+ return xilinx_spi_remove(op);
9774+}
9775+
9776+static struct of_device_id xilinx_spi_of_match[] = {
9777+ { .compatible = "xlnx,xps-spi-2.00.a", },
9778+ { .compatible = "xlnx,xps-spi-2.00.b", },
9779+ {}
9780+};
9781+
9782+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
9783+
9784+static struct of_platform_driver xilinx_spi_of_driver = {
9785+ .owner = THIS_MODULE,
9786+ .name = "xilinx-xps-spi",
9787+ .match_table = xilinx_spi_of_match,
9788+ .probe = xilinx_spi_of_probe,
9789+ .remove = __exit_p(xilinx_spi_of_remove),
9790+ .driver = {
9791+ .name = "xilinx-xps-spi",
9792+ .owner = THIS_MODULE,
9793+ },
9794+};
9795+
9796+static int __init xilinx_spi_of_init(void)
9797+{
9798+ return of_register_platform_driver(&xilinx_spi_of_driver);
9799+}
9800+module_init(xilinx_spi_of_init);
9801+
9802+static void __exit xilinx_spi_of_exit(void)
9803+{
9804+ of_unregister_platform_driver(&xilinx_spi_of_driver);
9805+}
9806+module_exit(xilinx_spi_of_exit);
9807+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
9808+MODULE_DESCRIPTION("Xilinx SPI driver");
9809+MODULE_LICENSE("GPL");
9810+
9811diff -uNr linux-2.6.31/drivers/spi/xilinx_spi_pltfm.c linux-2.6.31.new/drivers/spi/xilinx_spi_pltfm.c
9812--- linux-2.6.31/drivers/spi/xilinx_spi_pltfm.c 1969-12-31 16:00:00.000000000 -0800
9813+++ linux-2.6.31.new/drivers/spi/xilinx_spi_pltfm.c 2009-10-23 11:17:32.000000000 -0700
9814@@ -0,0 +1,104 @@
9815+/*
9816+ * xilinx_spi_pltfm.c Support for Xilinx SPI platform devices
9817+ * Copyright (c) 2009 Intel Corporation
9818+ *
9819+ * This program is free software; you can redistribute it and/or modify
9820+ * it under the terms of the GNU General Public License version 2 as
9821+ * published by the Free Software Foundation.
9822+ *
9823+ * This program is distributed in the hope that it will be useful,
9824+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9825+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9826+ * GNU General Public License for more details.
9827+ *
9828+ * You should have received a copy of the GNU General Public License
9829+ * along with this program; if not, write to the Free Software
9830+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9831+ */
9832+
9833+/* Supports:
9834+ * Xilinx SPI devices as platform devices
9835+ *
9836+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
9837+ */
9838+
9839+#include <linux/module.h>
9840+#include <linux/init.h>
9841+#include <linux/interrupt.h>
9842+#include <linux/io.h>
9843+#include <linux/platform_device.h>
9844+
9845+#include <linux/spi/spi.h>
9846+#include <linux/spi/spi_bitbang.h>
9847+#include <linux/spi/xilinx_spi.h>
9848+
9849+#include "xilinx_spi.h"
9850+
9851+static int __devinit xilinx_spi_probe(struct platform_device *dev)
9852+{
9853+ struct xspi_platform_data *pdata;
9854+ struct resource *r;
9855+ int irq;
9856+ struct spi_master *master;
9857+ u8 i;
9858+
9859+ pdata = dev->dev.platform_data;
9860+ if (pdata == NULL)
9861+ return -ENODEV;
9862+
9863+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
9864+ if (r == NULL)
9865+ return -ENODEV;
9866+
9867+ irq = platform_get_irq(dev, 0);
9868+ if (irq < 0)
9869+ return -ENXIO;
9870+
9871+ master = xilinx_spi_init(&dev->dev, r, irq, dev->id,
9872+ pdata->num_chipselect, pdata->bits_per_word, false);
9873+ if (IS_ERR(master))
9874+ return PTR_ERR(master);
9875+
9876+ for (i = 0; i < pdata->num_devices; i++)
9877+ spi_new_device(master, pdata->devices + i);
9878+
9879+ platform_set_drvdata(dev, master);
9880+ return 0;
9881+}
9882+
9883+static int __devexit xilinx_spi_remove(struct platform_device *dev)
9884+{
9885+ xilinx_spi_deinit(platform_get_drvdata(dev));
9886+ platform_set_drvdata(dev, 0);
9887+
9888+ return 0;
9889+}
9890+
9891+/* work with hotplug and coldplug */
9892+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
9893+
9894+static struct platform_driver xilinx_spi_driver = {
9895+ .probe = xilinx_spi_probe,
9896+ .remove = __devexit_p(xilinx_spi_remove),
9897+ .driver = {
9898+ .name = XILINX_SPI_NAME,
9899+ .owner = THIS_MODULE,
9900+ },
9901+};
9902+
9903+static int __init xilinx_spi_pltfm_init(void)
9904+{
9905+ return platform_driver_register(&xilinx_spi_driver);
9906+}
9907+module_init(xilinx_spi_pltfm_init);
9908+
9909+static void __exit xilinx_spi_pltfm_exit(void)
9910+{
9911+ platform_driver_unregister(&xilinx_spi_driver);
9912+}
9913+module_exit(xilinx_spi_pltfm_exit);
9914+
9915+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
9916+MODULE_DESCRIPTION("Xilinx SPI platform driver");
9917+MODULE_LICENSE("GPL v2");
9918+
9919diff -uNr linux-2.6.31/include/linux/can/platform/ascb.h linux-2.6.31.new/include/linux/can/platform/ascb.h
9920--- linux-2.6.31/include/linux/can/platform/ascb.h 1969-12-31 16:00:00.000000000 -0800
9921+++ linux-2.6.31.new/include/linux/can/platform/ascb.h 2009-10-23 11:16:56.000000000 -0700
9922@@ -0,0 +1,8 @@
9923+#ifndef _CAN_PLATFORM_ASCB_H_
9924+#define _CAN_PLATFORM_ASCB_H_
9925+
9926+struct ascb_platform_data {
9927+ int gpio_pin;
9928+};
9929+
9930+#endif
9931diff -uNr linux-2.6.31/include/linux/i2c-xiic.h linux-2.6.31.new/include/linux/i2c-xiic.h
9932--- linux-2.6.31/include/linux/i2c-xiic.h 1969-12-31 16:00:00.000000000 -0800
9933+++ linux-2.6.31.new/include/linux/i2c-xiic.h 2009-10-23 11:16:56.000000000 -0700
9934@@ -0,0 +1,31 @@
9935+/*
9936+ * i2c-xiic.h
9937+ * Copyright (c) 2009 Intel Corporation
9938+ *
9939+ * This program is free software; you can redistribute it and/or modify
9940+ * it under the terms of the GNU General Public License version 2 as
9941+ * published by the Free Software Foundation.
9942+ *
9943+ * This program is distributed in the hope that it will be useful,
9944+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9945+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9946+ * GNU General Public License for more details.
9947+ *
9948+ * You should have received a copy of the GNU General Public License
9949+ * along with this program; if not, write to the Free Software
9950+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9951+ */
9952+
9953+/* Supports:
9954+ * Xilinx IIC
9955+ */
9956+
9957+#ifndef _LINUX_I2C_XIIC_H
9958+#define _LINUX_I2C_XIIC_H
9959+
9960+struct xiic_i2c_platform_data {
9961+ u8 num_devices; /* number of devices in the devices list */
9962+ struct i2c_board_info const *devices; /* devices connected to the bus */
9963+};
9964+
9965+#endif /* _LINUX_I2C_XIIC_H */
9966diff -uNr linux-2.6.31/include/linux/mfd/timbdma.h linux-2.6.31.new/include/linux/mfd/timbdma.h
9967--- linux-2.6.31/include/linux/mfd/timbdma.h 1969-12-31 16:00:00.000000000 -0800
9968+++ linux-2.6.31.new/include/linux/mfd/timbdma.h 2009-10-23 11:16:56.000000000 -0700
9969@@ -0,0 +1,58 @@
9970+/*
9971+ * timbdma.h timberdale FPGA DMA driver defines
9972+ * Copyright (c) 2009 Intel Corporation
9973+ *
9974+ * This program is free software; you can redistribute it and/or modify
9975+ * it under the terms of the GNU General Public License version 2 as
9976+ * published by the Free Software Foundation.
9977+ *
9978+ * This program is distributed in the hope that it will be useful,
9979+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
9980+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9981+ * GNU General Public License for more details.
9982+ *
9983+ * You should have received a copy of the GNU General Public License
9984+ * along with this program; if not, write to the Free Software
9985+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9986+ */
9987+
9988+/* Supports:
9989+ * Timberdale FPGA DMA engine
9990+ */
9991+
9992+#ifndef _TIMBDMA_H
9993+#define _TIMBDMA_H
9994+
9995+#include <linux/spinlock.h>
9996+
9997+
9998+#define DMA_IRQ_UART_RX 0x001
9999+#define DMA_IRQ_UART_TX 0x002
10000+#define DMA_IRQ_MLB_RX 0x004
10001+#define DMA_IRQ_MLB_TX 0x008
10002+#define DMA_IRQ_VIDEO_RX 0x010
10003+#define DMA_IRQ_VIDEO_DROP 0x020
10004+#define DMA_IRQ_SDHCI_RX 0x040
10005+#define DMA_IRQ_SDHCI_TX 0x080
10006+#define DMA_IRQ_ETH_RX 0x100
10007+#define DMA_IRQ_ETH_TX 0x200
10008+#define DMA_IRQS 10
10009+
10010+
10011+typedef int (*timbdma_interruptcb)(u32 flag, void *data);
10012+
10013+
10014+int timbdma_start(u32 flag, void *desc, int bytes_per_row);
10015+
10016+int timbdma_stop(u32 flags);
10017+
10018+void timbdma_set_interruptcb(u32 flags, timbdma_interruptcb icb, void *data);
10019+
10020+void *timbdma_alloc_desc(u32 size, u16 alignment);
10021+
10022+void timbdma_free_desc(void *desc);
10023+
10024+int timbdma_prep_desc(void *desc, dma_addr_t buf, u32 size);
10025+
10026+#endif /* _TIMBDMA_H */
10027+
10028diff -uNr linux-2.6.31/include/linux/most/timbmlb.h linux-2.6.31.new/include/linux/most/timbmlb.h
10029--- linux-2.6.31/include/linux/most/timbmlb.h 1969-12-31 16:00:00.000000000 -0800
10030+++ linux-2.6.31.new/include/linux/most/timbmlb.h 2009-10-23 11:16:56.000000000 -0700
10031@@ -0,0 +1,9 @@
10032+#ifndef __LINUX_MOST_TIMBMLB_H
10033+#define __LINUX_MOST_TIMBMLB_H
10034+
10035+/* Timberdale MLB IP */
10036+struct timbmlb_platform_data {
10037+ int reset_pin; /* pin used for reset of the INIC */
10038+};
10039+
10040+#endif
10041diff -uNr linux-2.6.31/include/linux/socket.h linux-2.6.31.new/include/linux/socket.h
10042--- linux-2.6.31/include/linux/socket.h 2009-10-23 11:18:30.000000000 -0700
10043+++ linux-2.6.31.new/include/linux/socket.h 2009-10-23 11:16:56.000000000 -0700
10044@@ -195,7 +195,8 @@
10045 #define AF_ISDN 34 /* mISDN sockets */
10046 #define AF_PHONET 35 /* Phonet sockets */
10047 #define AF_IEEE802154 36 /* IEEE802154 sockets */
10048-#define AF_MAX 37 /* For now.. */
10049+#define AF_MOST 37 /* Media Oriented Systems Transport */
10050+#define AF_MAX 38 /* For now.. */
10051
10052 /* Protocol families, same as address families. */
10053 #define PF_UNSPEC AF_UNSPEC
10054@@ -235,6 +236,7 @@
10055 #define PF_ISDN AF_ISDN
10056 #define PF_PHONET AF_PHONET
10057 #define PF_IEEE802154 AF_IEEE802154
10058+#define PF_MOST AF_MOST
10059 #define PF_MAX AF_MAX
10060
10061 /* Maximum queue length specifiable by listen. */
10062diff -uNr linux-2.6.31/include/linux/spi/mc33880.h linux-2.6.31.new/include/linux/spi/mc33880.h
10063--- linux-2.6.31/include/linux/spi/mc33880.h 1969-12-31 16:00:00.000000000 -0800
10064+++ linux-2.6.31.new/include/linux/spi/mc33880.h 2009-10-23 11:16:56.000000000 -0700
10065@@ -0,0 +1,10 @@
10066+#ifndef LINUX_SPI_MC33880_H
10067+#define LINUX_SPI_MC33880_H
10068+
10069+struct mc33880_platform_data {
10070+ /* number assigned to the first GPIO */
10071+ unsigned base;
10072+};
10073+
10074+#endif
10075+
10076diff -uNr linux-2.6.31/include/linux/spi/xilinx_spi.h linux-2.6.31.new/include/linux/spi/xilinx_spi.h
10077--- linux-2.6.31/include/linux/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
10078+++ linux-2.6.31.new/include/linux/spi/xilinx_spi.h 2009-10-23 11:16:56.000000000 -0700
10079@@ -0,0 +1,19 @@
10080+#ifndef __LINUX_SPI_XILINX_SPI_H
10081+#define __LINUX_SPI_XILINX_SPI_H
10082+
10083+/**
10084+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
10085+ * @num_chipselect: Number of chip select by the IP
10086+ * @bits_per_word: Number of bits per word. 8/16/32, Note that the DS464
10087+ * only support 8bit SPI.
10088+ * @devices: Devices to add when the driver is probed.
10089+ * @num_devices: Number of devices in the devices array.
10090+ */
10091+struct xspi_platform_data {
10092+ u16 num_chipselect;
10093+ u8 bits_per_word;
10094+ struct spi_board_info *devices;
10095+ u8 num_devices;
10096+};
10097+
10098+#endif /* __LINUX_SPI_XILINX_SPI_H */
10099diff -uNr linux-2.6.31/include/linux/timb_gpio.h linux-2.6.31.new/include/linux/timb_gpio.h
10100--- linux-2.6.31/include/linux/timb_gpio.h 1969-12-31 16:00:00.000000000 -0800
10101+++ linux-2.6.31.new/include/linux/timb_gpio.h 2009-10-23 11:16:56.000000000 -0700
10102@@ -0,0 +1,28 @@
10103+/*
10104+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
10105+ * Copyright (c) 2009 Intel Corporation
10106+ *
10107+ * This program is free software; you can redistribute it and/or modify
10108+ * it under the terms of the GNU General Public License version 2 as
10109+ * published by the Free Software Foundation.
10110+ *
10111+ * This program is distributed in the hope that it will be useful,
10112+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10113+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10114+ * GNU General Public License for more details.
10115+ *
10116+ * You should have received a copy of the GNU General Public License
10117+ * along with this program; if not, write to the Free Software
10118+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10119+ */
10120+
10121+#ifndef _LINUX_TIMB_GPIO_H
10122+#define _LINUX_TIMB_GPIO_H
10123+
10124+struct timbgpio_platform_data {
10125+ int gpio_base;
10126+ int nr_pins;
10127+ int irq_base;
10128+};
10129+
10130+#endif
10131diff -uNr linux-2.6.31/include/media/timb_radio.h linux-2.6.31.new/include/media/timb_radio.h
10132--- linux-2.6.31/include/media/timb_radio.h 1969-12-31 16:00:00.000000000 -0800
10133+++ linux-2.6.31.new/include/media/timb_radio.h 2009-10-23 11:16:55.000000000 -0700
10134@@ -0,0 +1,31 @@
10135+/*
10136+ * timb_radio.h Platform struct for the Timberdale radio driver
10137+ * Copyright (c) 2009 Intel Corporation
10138+ *
10139+ * This program is free software; you can redistribute it and/or modify
10140+ * it under the terms of the GNU General Public License version 2 as
10141+ * published by the Free Software Foundation.
10142+ *
10143+ * This program is distributed in the hope that it will be useful,
10144+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10145+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10146+ * GNU General Public License for more details.
10147+ *
10148+ * You should have received a copy of the GNU General Public License
10149+ * along with this program; if not, write to the Free Software
10150+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10151+ */
10152+
10153+#ifndef _TIMB_RADIO_
10154+#define _TIMB_RADIO_ 1
10155+
10156+#include <linux/i2c.h>
10157+
10158+struct timb_radio_platform_data {
10159+ int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */
10160+ char tuner[32];
10161+ char dsp[32];
10162+};
10163+
10164+#endif
10165+
10166diff -uNr linux-2.6.31/include/media/timb_video.h linux-2.6.31.new/include/media/timb_video.h
10167--- linux-2.6.31/include/media/timb_video.h 1969-12-31 16:00:00.000000000 -0800
10168+++ linux-2.6.31.new/include/media/timb_video.h 2009-10-23 11:16:55.000000000 -0700
10169@@ -0,0 +1,30 @@
10170+/*
10171+ * timb_video.h Platform struct for the Timberdale video driver
10172+ * Copyright (c) 2009 Intel Corporation
10173+ *
10174+ * This program is free software; you can redistribute it and/or modify
10175+ * it under the terms of the GNU General Public License version 2 as
10176+ * published by the Free Software Foundation.
10177+ *
10178+ * This program is distributed in the hope that it will be useful,
10179+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10180+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10181+ * GNU General Public License for more details.
10182+ *
10183+ * You should have received a copy of the GNU General Public License
10184+ * along with this program; if not, write to the Free Software
10185+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10186+ */
10187+
10188+#ifndef _TIMB_VIDEO_
10189+#define _TIMB_VIDEO_ 1
10190+
10191+#include <linux/i2c.h>
10192+
10193+struct timb_video_platform_data {
10194+ int i2c_adapter; /* The I2C adapter where the encoder is attached */
10195+ char encoder[32];
10196+};
10197+
10198+#endif
10199+
10200diff -uNr linux-2.6.31/include/media/v4l2-chip-ident.h linux-2.6.31.new/include/media/v4l2-chip-ident.h
10201--- linux-2.6.31/include/media/v4l2-chip-ident.h 2009-10-23 11:18:30.000000000 -0700
10202+++ linux-2.6.31.new/include/media/v4l2-chip-ident.h 2009-10-23 11:16:55.000000000 -0700
10203@@ -129,12 +129,18 @@
10204 V4L2_IDENT_SAA6752HS = 6752,
10205 V4L2_IDENT_SAA6752HS_AC3 = 6753,
10206
10207+ /* modules tef6862: just ident 6862 */
10208+ V4L2_IDENT_TEF6862 = 6862,
10209+
10210 /* module adv7170: just ident 7170 */
10211 V4L2_IDENT_ADV7170 = 7170,
10212
10213 /* module adv7175: just ident 7175 */
10214 V4L2_IDENT_ADV7175 = 7175,
10215
10216+ /* module adv7180: just ident 7180 */
10217+ V4L2_IDENT_ADV7180 = 7180,
10218+
10219 /* module saa7185: just ident 7185 */
10220 V4L2_IDENT_SAA7185 = 7185,
10221
10222@@ -147,6 +153,9 @@
10223 /* module adv7343: just ident 7343 */
10224 V4L2_IDENT_ADV7343 = 7343,
10225
10226+ /* module saa7706h: just ident 7706 */
10227+ V4L2_IDENT_SAA7706H = 7706,
10228+
10229 /* module wm8739: just ident 8739 */
10230 V4L2_IDENT_WM8739 = 8739,
10231
10232diff -uNr linux-2.6.31/include/net/most/async.h linux-2.6.31.new/include/net/most/async.h
10233--- linux-2.6.31/include/net/most/async.h 1969-12-31 16:00:00.000000000 -0800
10234+++ linux-2.6.31.new/include/net/most/async.h 2009-10-23 11:16:55.000000000 -0700
10235@@ -0,0 +1,12 @@
10236+#ifndef __ASYNC_H
10237+#define __ASYNC_H
10238+
10239+struct sockaddr_mostasync {
10240+ sa_family_t most_family;
10241+ unsigned short most_dev;
10242+ unsigned char rx_channel;
10243+ unsigned char tx_channel;
10244+};
10245+
10246+#endif
10247+
10248diff -uNr linux-2.6.31/include/net/most/ctl.h linux-2.6.31.new/include/net/most/ctl.h
10249--- linux-2.6.31/include/net/most/ctl.h 1969-12-31 16:00:00.000000000 -0800
10250+++ linux-2.6.31.new/include/net/most/ctl.h 2009-10-23 11:16:55.000000000 -0700
10251@@ -0,0 +1,12 @@
10252+#ifndef __CTL_H
10253+#define __CTL_H
10254+
10255+struct sockaddr_mostctl {
10256+ sa_family_t most_family;
10257+ unsigned short most_dev;
10258+ unsigned char rx_channel;
10259+ unsigned char tx_channel;
10260+};
10261+
10262+#endif
10263+
10264diff -uNr linux-2.6.31/include/net/most/dev.h linux-2.6.31.new/include/net/most/dev.h
10265--- linux-2.6.31/include/net/most/dev.h 1969-12-31 16:00:00.000000000 -0800
10266+++ linux-2.6.31.new/include/net/most/dev.h 2009-10-23 11:16:55.000000000 -0700
10267@@ -0,0 +1,27 @@
10268+#ifndef __DEV_H
10269+#define __DEV_H
10270+
10271+struct sockaddr_mostdev {
10272+ sa_family_t most_family;
10273+ unsigned short most_dev;
10274+};
10275+
10276+
10277+/* MOST Dev ioctl defines */
10278+#define MOSTDEVUP _IOW('M', 201, int)
10279+#define MOSTDEVDOWN _IOW('M', 202, int)
10280+
10281+#define MOSTGETDEVLIST _IOR('M', 210, int)
10282+
10283+struct most_dev_req {
10284+ uint16_t dev_id;
10285+};
10286+
10287+struct most_dev_list_req {
10288+ uint16_t dev_num;
10289+ struct most_dev_req dev_req[0];
10290+};
10291+
10292+
10293+#endif
10294+
10295diff -uNr linux-2.6.31/include/net/most/most_core.h linux-2.6.31.new/include/net/most/most_core.h
10296--- linux-2.6.31/include/net/most/most_core.h 1969-12-31 16:00:00.000000000 -0800
10297+++ linux-2.6.31.new/include/net/most/most_core.h 2009-10-23 11:16:55.000000000 -0700
10298@@ -0,0 +1,133 @@
10299+#ifndef __MOST_CORE_H
10300+#define __MOST_CORE_H
10301+
10302+#include <net/most/most.h>
10303+
10304+enum most_chan_type {
10305+ CHAN_CTL = 0,
10306+ CHAN_SYNC,
10307+ CHAN_ASYNC,
10308+ CHAN_DEV
10309+};
10310+
10311+#define MOST_CONF_FLAG_UP 0x01
10312+#define MOST_CONF_FLAG_TX 0x02
10313+
10314+enum most_dev_state {
10315+ MOST_DEV_DOWN = 0,
10316+ MOST_DEV_UP
10317+};
10318+
10319+struct most_dev {
10320+
10321+ struct list_head list;
10322+ atomic_t refcnt;
10323+
10324+ char name[8];
10325+
10326+ __u16 id;
10327+ enum most_dev_state state;
10328+
10329+ struct module *owner;
10330+
10331+ struct tasklet_struct rx_task;
10332+ struct tasklet_struct tx_task;
10333+
10334+ struct sk_buff_head rx_q;
10335+ struct sk_buff_head ctl_q;
10336+ struct sk_buff_head async_q;
10337+ struct sk_buff_head sync_q;
10338+
10339+ /* set by the driver */
10340+
10341+ void *driver_data;
10342+ struct device *parent;
10343+
10344+ int (*open)(struct most_dev *mdev);
10345+ int (*close)(struct most_dev *mdev);
10346+ int (*conf_channel)(struct most_dev *mdev, enum most_chan_type type,
10347+ u8 channel, u8 flags);
10348+ int (*send)(struct sk_buff *skb);
10349+ int (*can_send)(struct sk_buff *skb);
10350+};
10351+
10352+#define most_dbg(...) printk(__VA_ARGS__)
10353+
10354+static inline struct most_dev *most_dev_hold(struct most_dev *d)
10355+{
10356+ if (try_module_get(d->owner))
10357+ return d;
10358+ return NULL;
10359+}
10360+
10361+static inline void most_dev_put(struct most_dev *d)
10362+{
10363+ module_put(d->owner);
10364+}
10365+
10366+static inline void most_sched_tx(struct most_dev *mdev)
10367+{
10368+ tasklet_schedule(&mdev->tx_task);
10369+}
10370+
10371+static inline void most_sched_rx(struct most_dev *mdev)
10372+{
10373+ tasklet_schedule(&mdev->rx_task);
10374+}
10375+
10376+static inline int most_recv_frame(struct sk_buff *skb)
10377+{
10378+ struct most_dev *mdev = (struct most_dev *) skb->dev;
10379+
10380+ /* Time stamp */
10381+ __net_timestamp(skb);
10382+
10383+ /* Queue frame for rx task */
10384+ skb_queue_tail(&mdev->rx_q, skb);
10385+ most_sched_rx(mdev);
10386+ return 0;
10387+}
10388+
10389+static inline int __most_configure_channel(struct most_dev *mdev,
10390+ u8 channel_type, u8 channel, u8 up)
10391+{
10392+ if (mdev->state != MOST_DEV_UP)
10393+ return -ENETDOWN;
10394+
10395+ if (mdev->conf_channel)
10396+ if (channel != MOST_NO_CHANNEL)
10397+ return mdev->conf_channel(mdev, channel_type, channel,
10398+ up);
10399+ return 0;
10400+}
10401+
10402+static inline int most_configure_channels(struct most_dev *mdev,
10403+ struct most_sock *sk, u8 up)
10404+{
10405+ int err;
10406+ u8 flags = (up) ? MOST_CONF_FLAG_UP : 0;
10407+
10408+ err = __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
10409+ flags);
10410+ if (err)
10411+ return err;
10412+
10413+ err = __most_configure_channel(mdev, sk->channel_type, sk->tx_channel,
10414+ flags | MOST_CONF_FLAG_TX);
10415+ if (err)
10416+ __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
10417+ (up) ? 0 : MOST_CONF_FLAG_UP);
10418+ return err;
10419+}
10420+
10421+struct most_dev *most_alloc_dev(void);
10422+void most_free_dev(struct most_dev *mdev);
10423+int most_register_dev(struct most_dev *mdev);
10424+int most_unregister_dev(struct most_dev *mdev);
10425+
10426+int most_get_dev_list(void __user *arg);
10427+int most_open_dev(u16 dev_id);
10428+int most_close_dev(u16 dev_id);
10429+
10430+#endif
10431+
10432diff -uNr linux-2.6.31/include/net/most/most.h linux-2.6.31.new/include/net/most/most.h
10433--- linux-2.6.31/include/net/most/most.h 1969-12-31 16:00:00.000000000 -0800
10434+++ linux-2.6.31.new/include/net/most/most.h 2009-10-23 11:16:55.000000000 -0700
10435@@ -0,0 +1,110 @@
10436+#ifndef __MOST_H
10437+#define __MOST_H
10438+
10439+#include <net/sock.h>
10440+
10441+#ifndef AF_MOST
10442+#define AF_MOST 37
10443+#define PF_MOST AF_MOST
10444+#endif
10445+
10446+/* Reserve for core and drivers use */
10447+#define MOST_SKB_RESERVE 8
10448+
10449+#define CTL_FRAME_SIZE 32
10450+
10451+#define MOSTPROTO_DEV 0
10452+#define MOSTPROTO_CTL 1
10453+#define MOSTPROTO_SYNC 2
10454+#define MOSTPROTO_ASYNC 3
10455+
10456+#define MOST_NO_CHANNEL 0xFE
10457+
10458+enum {
10459+ MOST_CONNECTED = 1, /* Equal to TCP_ESTABLISHED makes net code happy */
10460+ MOST_OPEN,
10461+ MOST_BOUND,
10462+};
10463+
10464+
10465+struct most_skb_cb {
10466+ __u8 channel_type;
10467+ __u8 channel;
10468+};
10469+#define most_cb(skb) ((struct most_skb_cb *)(skb->cb))
10470+
10471+struct most_sock {
10472+ struct sock sk;
10473+ u8 channel_type;
10474+ u8 rx_channel;
10475+ u8 tx_channel;
10476+ int dev_id;
10477+ struct most_dev *mdev;
10478+};
10479+#define most_sk(sk) ((struct most_sock *)sk)
10480+
10481+static inline struct sock *most_sk_alloc(struct net *net,
10482+ struct proto *pops, u8 channel_type)
10483+{
10484+ struct sock *sk = sk_alloc(net, PF_MOST, GFP_ATOMIC, pops);
10485+ if (sk) {
10486+ most_sk(sk)->channel_type = channel_type;
10487+ most_sk(sk)->dev_id = -1;
10488+ }
10489+
10490+ return sk;
10491+}
10492+static inline struct sk_buff *most_skb_alloc(unsigned int len, gfp_t how)
10493+{
10494+ struct sk_buff *skb = alloc_skb(len + MOST_SKB_RESERVE, how);
10495+
10496+ if (skb)
10497+ skb_reserve(skb, MOST_SKB_RESERVE);
10498+
10499+ return skb;
10500+}
10501+
10502+static inline struct sk_buff *most_skb_send_alloc(struct sock *sk,
10503+ unsigned long len, int nb, int *err)
10504+{
10505+ struct sk_buff *skb =
10506+ sock_alloc_send_skb(sk, len + MOST_SKB_RESERVE, nb, err);
10507+
10508+ if (skb)
10509+ skb_reserve(skb, MOST_SKB_RESERVE);
10510+
10511+ return skb;
10512+}
10513+
10514+struct most_sock_list {
10515+ struct hlist_head head;
10516+ rwlock_t lock;
10517+};
10518+
10519+struct most_dev *most_dev_get(int index);
10520+
10521+int most_sock_register(int proto, struct net_proto_family *ops);
10522+int most_sock_unregister(int proto);
10523+void most_sock_link(struct sock *s);
10524+void most_sock_unlink(struct sock *sk);
10525+
10526+int most_send_to_sock(int dev_id, struct sk_buff *skb);
10527+
10528+/* default implementation of socket operations */
10529+int most_sock_release(struct socket *sock);
10530+int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan);
10531+int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
10532+int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
10533+ struct msghdr *msg, size_t len, int flags);
10534+int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
10535+ struct msghdr *msg, size_t len);
10536+int most_sock_setsockopt(struct socket *sock, int level, int optname,
10537+ char __user *optval, int len);
10538+int most_sock_getsockopt(struct socket *sock, int level, int optname,
10539+ char __user *optval, int __user *optlen);
10540+
10541+extern int dev_sock_init(void);
10542+extern void dev_sock_cleanup(void);
10543+
10544+#endif /* __MOST_H */
10545+
10546diff -uNr linux-2.6.31/include/net/most/sync.h linux-2.6.31.new/include/net/most/sync.h
10547--- linux-2.6.31/include/net/most/sync.h 1969-12-31 16:00:00.000000000 -0800
10548+++ linux-2.6.31.new/include/net/most/sync.h 2009-10-23 11:16:55.000000000 -0700
10549@@ -0,0 +1,12 @@
10550+#ifndef __SYNC_H
10551+#define __SYNC_H
10552+
10553+struct sockaddr_mostsync {
10554+ sa_family_t most_family;
10555+ unsigned short most_dev;
10556+ unsigned char rx_channel;
10557+ unsigned char tx_channel;
10558+};
10559+
10560+#endif
10561+
10562diff -uNr linux-2.6.31/include/sound/timbi2s.h linux-2.6.31.new/include/sound/timbi2s.h
10563--- linux-2.6.31/include/sound/timbi2s.h 1969-12-31 16:00:00.000000000 -0800
10564+++ linux-2.6.31.new/include/sound/timbi2s.h 2009-10-23 11:16:55.000000000 -0700
10565@@ -0,0 +1,32 @@
10566+/*
10567+ * timbi2s.h timberdale FPGA I2S platform data
10568+ * Copyright (c) 2009 Intel Corporation
10569+ *
10570+ * This program is free software; you can redistribute it and/or modify
10571+ * it under the terms of the GNU General Public License version 2 as
10572+ * published by the Free Software Foundation.
10573+ *
10574+ * This program is distributed in the hope that it will be useful,
10575+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10576+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10577+ * GNU General Public License for more details.
10578+ *
10579+ * You should have received a copy of the GNU General Public License
10580+ * along with this program; if not, write to the Free Software
10581+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10582+ */
10583+#ifndef __INCLUDE_SOUND_TIMBI2S_H
10584+#define __INCLUDE_SOUND_TIMBI2S_H
10585+
10586+struct timbi2s_bus_data {
10587+ u8 rx;
10588+ u16 sample_rate;
10589+};
10590+
10591+struct timbi2s_platform_data {
10592+ const struct timbi2s_bus_data *busses;
10593+ int num_busses;
10594+ u32 main_clk;
10595+};
10596+
10597+#endif
10598diff -uNr linux-2.6.31/net/Kconfig linux-2.6.31.new/net/Kconfig
10599--- linux-2.6.31/net/Kconfig 2009-10-23 11:18:30.000000000 -0700
10600+++ linux-2.6.31.new/net/Kconfig 2009-10-23 11:17:37.000000000 -0700
10601@@ -235,6 +235,7 @@
10602 source "net/irda/Kconfig"
10603 source "net/bluetooth/Kconfig"
10604 source "net/rxrpc/Kconfig"
10605+source "net/most/Kconfig"
10606
10607 config FIB_RULES
10608 bool
10609diff -uNr linux-2.6.31/net/Makefile linux-2.6.31.new/net/Makefile
10610--- linux-2.6.31/net/Makefile 2009-10-23 11:18:30.000000000 -0700
10611+++ linux-2.6.31.new/net/Makefile 2009-10-23 11:17:36.000000000 -0700
10612@@ -44,6 +44,7 @@
10613 obj-$(CONFIG_DECNET) += decnet/
10614 obj-$(CONFIG_ECONET) += econet/
10615 obj-$(CONFIG_PHONET) += phonet/
10616+obj-$(CONFIG_MOST) += most/
10617 ifneq ($(CONFIG_VLAN_8021Q),)
10618 obj-y += 8021q/
10619 endif
10620diff -uNr linux-2.6.31/net/most/af_most.c linux-2.6.31.new/net/most/af_most.c
10621--- linux-2.6.31/net/most/af_most.c 1969-12-31 16:00:00.000000000 -0800
10622+++ linux-2.6.31.new/net/most/af_most.c 2009-10-23 11:17:37.000000000 -0700
10623@@ -0,0 +1,169 @@
10624+/*
10625+ * af_most.c Support for the MOST address family
10626+ * Copyright (c) 2009 Intel Corporation
10627+ *
10628+ * This program is free software; you can redistribute it and/or modify
10629+ * it under the terms of the GNU General Public License version 2 as
10630+ * published by the Free Software Foundation.
10631+ *
10632+ * This program is distributed in the hope that it will be useful,
10633+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10634+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10635+ * GNU General Public License for more details.
10636+ *
10637+ * You should have received a copy of the GNU General Public License
10638+ * along with this program; if not, write to the Free Software
10639+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10640+ */
10641+
10642+#include <linux/module.h>
10643+#include <net/most/most.h>
10644+
10645+#define MOST_MAX_PROTO 4
10646+static struct net_proto_family *most_proto[MOST_MAX_PROTO];
10647+static DEFINE_RWLOCK(most_proto_lock);
10648+
10649+#ifdef CONFIG_DEBUG_LOCK_ALLOC
10650+static struct lock_class_key most_lock_key[MOST_MAX_PROTO];
10651+static const char *most_key_strings[MOST_MAX_PROTO] = {
10652+ "sk_lock-AF_MOST-MOSTPROTO_DEV",
10653+ "sk_lock-AF_MOST-MOSTPROTO_CTL",
10654+ "sk_lock-AF_MOST-MOSTPROTO_SYNC",
10655+ "sk_lock-AF_MOST-MOSTPROTO_ASYNC",
10656+};
10657+
10658+static struct lock_class_key most_slock_key[MOST_MAX_PROTO];
10659+static const char *most_slock_key_strings[MOST_MAX_PROTO] = {
10660+ "slock-AF_MOST-MOSTPROTO_DEV",
10661+ "slock-AF_MOST-MOSTPROTO_CTL",
10662+ "slock-AF_MOST-MOSTPROTO_SYNC",
10663+ "slock-AF_MOST-MOSTPROTO_ASYNC",
10664+};
10665+
10666+static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
10667+{
10668+ struct sock *sk = sock->sk;
10669+
10670+ if (!sk)
10671+ return;
10672+
10673+ BUG_ON(sock_owned_by_user(sk));
10674+
10675+ sock_lock_init_class_and_name(sk,
10676+ most_slock_key_strings[proto], &most_slock_key[proto],
10677+ most_key_strings[proto], &most_lock_key[proto]);
10678+}
10679+#else
10680+static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
10681+{
10682+}
10683+#endif
10684+
10685+
10686+int most_sock_register(int proto, struct net_proto_family *ops)
10687+{
10688+ int err = 0;
10689+
10690+ if (proto < 0 || proto >= MOST_MAX_PROTO)
10691+ return -EINVAL;
10692+
10693+ write_lock(&most_proto_lock);
10694+
10695+ if (most_proto[proto])
10696+ err = -EEXIST;
10697+ else
10698+ most_proto[proto] = ops;
10699+
10700+ write_unlock(&most_proto_lock);
10701+
10702+ return err;
10703+}
10704+EXPORT_SYMBOL(most_sock_register);
10705+
10706+int most_sock_unregister(int proto)
10707+{
10708+ int err = 0;
10709+
10710+ if (proto < 0 || proto >= MOST_MAX_PROTO)
10711+ return -EINVAL;
10712+
10713+ write_lock(&most_proto_lock);
10714+
10715+ if (!most_proto[proto])
10716+ err = -ENOENT;
10717+ else
10718+ most_proto[proto] = NULL;
10719+
10720+ write_unlock(&most_proto_lock);
10721+
10722+ return err;
10723+}
10724+EXPORT_SYMBOL(most_sock_unregister);
10725+
10726+static int most_sock_create(struct net *net, struct socket *sock, int proto)
10727+{
10728+ int err;
10729+
10730+ if (net != &init_net)
10731+ return -EAFNOSUPPORT;
10732+
10733+ if (proto < 0 || proto >= MOST_MAX_PROTO)
10734+ return -EINVAL;
10735+
10736+ if (!most_proto[proto])
10737+ request_module("most-proto-%d", proto);
10738+
10739+ err = -EPROTONOSUPPORT;
10740+
10741+ read_lock(&most_proto_lock);
10742+
10743+ if (most_proto[proto] && try_module_get(most_proto[proto]->owner)) {
10744+ err = most_proto[proto]->create(net, sock, proto);
10745+ most_sock_reclassify_lock(sock, proto);
10746+ module_put(most_proto[proto]->owner);
10747+ }
10748+
10749+ read_unlock(&most_proto_lock);
10750+
10751+ return err;
10752+}
10753+
10754+static struct net_proto_family most_sock_family_ops = {
10755+ .owner = THIS_MODULE,
10756+ .family = PF_MOST,
10757+ .create = most_sock_create,
10758+};
10759+
10760+static int __init most_init(void)
10761+{
10762+ int err;
10763+
10764+ err = sock_register(&most_sock_family_ops);
10765+ if (err < 0)
10766+ return err;
10767+
10768+ err = dev_sock_init();
10769+ if (err < 0) {
10770+ sock_unregister(PF_MOST);
10771+ return err;
10772+ }
10773+
10774+ printk(KERN_INFO "MOST is initialized\n");
10775+
10776+ return 0;
10777+}
10778+
10779+static void __exit most_exit(void)
10780+{
10781+ dev_sock_cleanup();
10782+
10783+ sock_unregister(PF_MOST);
10784+}
10785+
10786+subsys_initcall(most_init);
10787+module_exit(most_exit);
10788+
10789+MODULE_DESCRIPTION("MOST Core");
10790+MODULE_LICENSE("GPL v2");
10791+MODULE_ALIAS_NETPROTO(PF_MOST);
10792+
10793diff -uNr linux-2.6.31/net/most/async_sock.c linux-2.6.31.new/net/most/async_sock.c
10794--- linux-2.6.31/net/most/async_sock.c 1969-12-31 16:00:00.000000000 -0800
10795+++ linux-2.6.31.new/net/most/async_sock.c 2009-10-23 11:17:37.000000000 -0700
10796@@ -0,0 +1,154 @@
10797+/*
10798+ * async_sock.c MOST asyncronous socket support
10799+ * Copyright (c) 2009 Intel Corporation
10800+ *
10801+ * This program is free software; you can redistribute it and/or modify
10802+ * it under the terms of the GNU General Public License version 2 as
10803+ * published by the Free Software Foundation.
10804+ *
10805+ * This program is distributed in the hope that it will be useful,
10806+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10807+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10808+ * GNU General Public License for more details.
10809+ *
10810+ * You should have received a copy of the GNU General Public License
10811+ * along with this program; if not, write to the Free Software
10812+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10813+ */
10814+
10815+/* Supports:
10816+ * Support for MOST asynchronous sockets
10817+ */
10818+
10819+#include <linux/module.h>
10820+#include <net/most/most.h>
10821+#include <net/most/most_core.h>
10822+#include <net/most/async.h>
10823+
10824+static int async_sock_bind(struct socket *sock, struct sockaddr *addr,
10825+ int addr_len)
10826+{
10827+ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
10828+
10829+ if (!aaddr || aaddr->most_family != AF_MOST)
10830+ return -EINVAL;
10831+
10832+ return most_sock_bind(sock, aaddr->most_dev, aaddr->rx_channel,
10833+ aaddr->tx_channel);
10834+}
10835+
10836+static int async_sock_getname(struct socket *sock, struct sockaddr *addr,
10837+ int *addr_len, int peer)
10838+{
10839+ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
10840+ struct sock *sk = sock->sk;
10841+ struct most_dev *mdev = most_sk(sk)->mdev;
10842+
10843+ if (!mdev)
10844+ return -EBADFD;
10845+
10846+ lock_sock(sk);
10847+
10848+ *addr_len = sizeof(*aaddr);
10849+ aaddr->most_family = AF_MOST;
10850+ aaddr->most_dev = mdev->id;
10851+ aaddr->rx_channel = most_sk(sk)->rx_channel;
10852+ aaddr->tx_channel = most_sk(sk)->tx_channel;
10853+
10854+ release_sock(sk);
10855+ return 0;
10856+}
10857+
10858+
10859+static const struct proto_ops async_sock_ops = {
10860+ .family = PF_MOST,
10861+ .owner = THIS_MODULE,
10862+ .release = most_sock_release,
10863+ .bind = async_sock_bind,
10864+ .getname = async_sock_getname,
10865+ .sendmsg = most_sock_sendmsg,
10866+ .recvmsg = most_sock_recvmsg,
10867+ .ioctl = most_sock_ioctl,
10868+ .poll = datagram_poll,
10869+ .listen = sock_no_listen,
10870+ .shutdown = sock_no_shutdown,
10871+ .setsockopt = most_sock_setsockopt,
10872+ .getsockopt = most_sock_getsockopt,
10873+ .connect = sock_no_connect,
10874+ .socketpair = sock_no_socketpair,
10875+ .accept = sock_no_accept,
10876+ .mmap = sock_no_mmap
10877+};
10878+static struct proto async_sk_proto = {
10879+ .name = "ASYNC",
10880+ .owner = THIS_MODULE,
10881+ .obj_size = sizeof(struct most_sock)
10882+};
10883+
10884+static int async_sock_create(struct net *net, struct socket *sock, int protocol)
10885+{
10886+ struct sock *sk;
10887+
10888+ if (sock->type != SOCK_DGRAM)
10889+ return -ESOCKTNOSUPPORT;
10890+
10891+ sock->ops = &async_sock_ops;
10892+
10893+ sk = most_sk_alloc(net, &async_sk_proto, CHAN_ASYNC);
10894+ if (!sk)
10895+ return -ENOMEM;
10896+
10897+ sock_init_data(sock, sk);
10898+
10899+ sock_reset_flag(sk, SOCK_ZAPPED);
10900+
10901+ sk->sk_protocol = protocol;
10902+
10903+ sock->state = SS_UNCONNECTED;
10904+ sk->sk_state = MOST_OPEN;
10905+
10906+ most_sock_link(sk);
10907+ return 0;
10908+}
10909+
10910+static struct net_proto_family async_sock_family_ops = {
10911+ .family = PF_MOST,
10912+ .owner = THIS_MODULE,
10913+ .create = async_sock_create,
10914+};
10915+
10916+
10917+static int __init async_init(void)
10918+{
10919+ int err;
10920+
10921+ err = proto_register(&async_sk_proto, 0);
10922+ if (err < 0)
10923+ return err;
10924+
10925+ err = most_sock_register(MOSTPROTO_ASYNC, &async_sock_family_ops);
10926+ if (err < 0) {
10927+ printk(KERN_ERR "MOST socket registration failed\n");
10928+ return err;
10929+ }
10930+
10931+ printk(KERN_INFO "MOST asynchronous socket layer initialized\n");
10932+
10933+ return 0;
10934+}
10935+
10936+static void __exit async_exit(void)
10937+{
10938+ if (most_sock_unregister(MOSTPROTO_ASYNC) < 0)
10939+ printk(KERN_ERR "ASYNC socket unregistration failed\n");
10940+
10941+ proto_unregister(&async_sk_proto);
10942+}
10943+
10944+module_init(async_init);
10945+module_exit(async_exit);
10946+
10947+MODULE_DESCRIPTION("Most Asyncronous");
10948+MODULE_LICENSE("GPL v2");
10949+MODULE_ALIAS("most-proto-3");
10950+
10951diff -uNr linux-2.6.31/net/most/ctl_sock.c linux-2.6.31.new/net/most/ctl_sock.c
10952--- linux-2.6.31/net/most/ctl_sock.c 1969-12-31 16:00:00.000000000 -0800
10953+++ linux-2.6.31.new/net/most/ctl_sock.c 2009-10-23 11:17:37.000000000 -0700
10954@@ -0,0 +1,159 @@
10955+/*
10956+ * ctl_sock.c Support for MOST control sockets
10957+ * Copyright (c) 2009 Intel Corporation
10958+ *
10959+ * This program is free software; you can redistribute it and/or modify
10960+ * it under the terms of the GNU General Public License version 2 as
10961+ * published by the Free Software Foundation.
10962+ *
10963+ * This program is distributed in the hope that it will be useful,
10964+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10965+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10966+ * GNU General Public License for more details.
10967+ *
10968+ * You should have received a copy of the GNU General Public License
10969+ * along with this program; if not, write to the Free Software
10970+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10971+ */
10972+
10973+#include <linux/module.h>
10974+#include <net/most/most.h>
10975+#include <net/most/most_core.h>
10976+#include <net/most/ctl.h>
10977+
10978+
10979+static int ctl_sock_bind(struct socket *sock, struct sockaddr *addr,
10980+ int addr_len)
10981+{
10982+ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
10983+
10984+ if (!caddr || caddr->most_family != AF_MOST)
10985+ return -EINVAL;
10986+
10987+ return most_sock_bind(sock, caddr->most_dev, caddr->rx_channel,
10988+ caddr->tx_channel);
10989+}
10990+
10991+static int ctl_sock_getname(struct socket *sock, struct sockaddr *addr,
10992+ int *addr_len, int peer)
10993+{
10994+ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
10995+ struct sock *sk = sock->sk;
10996+ struct most_dev *mdev = most_sk(sk)->mdev;
10997+
10998+ if (!mdev)
10999+ return -EBADFD;
11000+
11001+ lock_sock(sk);
11002+
11003+ *addr_len = sizeof(*caddr);
11004+ caddr->most_family = AF_MOST;
11005+ caddr->most_dev = mdev->id;
11006+ caddr->rx_channel = most_sk(sk)->rx_channel;
11007+ caddr->tx_channel = most_sk(sk)->tx_channel;
11008+
11009+ release_sock(sk);
11010+ return 0;
11011+}
11012+
11013+int ctl_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
11014+ struct msghdr *msg, size_t len)
11015+{
11016+ if (len != CTL_FRAME_SIZE)
11017+ return -EINVAL;
11018+
11019+ return most_sock_sendmsg(iocb, sock, msg, len);
11020+}
11021+
11022+static const struct proto_ops ctl_sock_ops = {
11023+ .family = PF_MOST,
11024+ .owner = THIS_MODULE,
11025+ .release = most_sock_release,
11026+ .bind = ctl_sock_bind,
11027+ .getname = ctl_sock_getname,
11028+ .sendmsg = most_sock_sendmsg,
11029+ .recvmsg = most_sock_recvmsg,
11030+ .ioctl = most_sock_ioctl,
11031+ .poll = datagram_poll,
11032+ .listen = sock_no_listen,
11033+ .shutdown = sock_no_shutdown,
11034+ .setsockopt = most_sock_setsockopt,
11035+ .getsockopt = most_sock_getsockopt,
11036+ .connect = sock_no_connect,
11037+ .socketpair = sock_no_socketpair,
11038+ .accept = sock_no_accept,
11039+ .mmap = sock_no_mmap
11040+};
11041+static struct proto ctl_sk_proto = {
11042+ .name = "CTL",
11043+ .owner = THIS_MODULE,
11044+ .obj_size = sizeof(struct most_sock)
11045+};
11046+
11047+static int ctl_sock_create(struct net *net, struct socket *sock, int protocol)
11048+{
11049+ struct sock *sk;
11050+
11051+ if (sock->type != SOCK_RAW)
11052+ return -ESOCKTNOSUPPORT;
11053+
11054+ sock->ops = &ctl_sock_ops;
11055+
11056+ sk = most_sk_alloc(net, &ctl_sk_proto, CHAN_CTL);
11057+ if (!sk)
11058+ return -ENOMEM;
11059+
11060+ sock_init_data(sock, sk);
11061+
11062+ sock_reset_flag(sk, SOCK_ZAPPED);
11063+
11064+ sk->sk_protocol = protocol;
11065+
11066+ sock->state = SS_UNCONNECTED;
11067+ sk->sk_state = MOST_OPEN;
11068+
11069+ most_sock_link(sk);
11070+ return 0;
11071+}
11072+
11073+static struct net_proto_family ctl_sock_family_ops = {
11074+ .family = PF_MOST,
11075+ .owner = THIS_MODULE,
11076+ .create = ctl_sock_create,
11077+};
11078+
11079+
11080+static int __init ctl_init(void)
11081+{
11082+ int err;
11083+
11084+ err = proto_register(&ctl_sk_proto, 0);
11085+ if (err < 0)
11086+ return err;
11087+
11088+ err = most_sock_register(MOSTPROTO_CTL, &ctl_sock_family_ops);
11089+ if (err < 0) {
11090+ printk(KERN_ERR "MOST socket registration failed\n");
11091+ return err;
11092+ }
11093+
11094+ printk(KERN_INFO "MOST control socket layer initialized\n");
11095+
11096+ return 0;
11097+}
11098+
11099+static void __exit ctl_exit(void)
11100+{
11101+ if (most_sock_unregister(MOSTPROTO_CTL) < 0)
11102+ printk(KERN_ERR "Control socket unregistration failed\n");
11103+
11104+ proto_unregister(&ctl_sk_proto);
11105+}
11106+
11107+module_init(ctl_init);
11108+module_exit(ctl_exit);
11109+
11110+MODULE_DESCRIPTION("Most Control");
11111+MODULE_LICENSE("GPL v2");
11112+MODULE_ALIAS("most-proto-1");
11113+
11114diff -uNr linux-2.6.31/net/most/dev_sock.c linux-2.6.31.new/net/most/dev_sock.c
11115--- linux-2.6.31/net/most/dev_sock.c 1969-12-31 16:00:00.000000000 -0800
11116+++ linux-2.6.31.new/net/most/dev_sock.c 2009-10-23 11:17:37.000000000 -0700
11117@@ -0,0 +1,170 @@
11118+/*
11119+ * dev_sock.c Device MOST sockets, to control the underlaying devices
11120+ * Copyright (c) 2009 Intel Corporation
11121+ *
11122+ * This program is free software; you can redistribute it and/or modify
11123+ * it under the terms of the GNU General Public License version 2 as
11124+ * published by the Free Software Foundation.
11125+ *
11126+ * This program is distributed in the hope that it will be useful,
11127+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11128+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11129+ * GNU General Public License for more details.
11130+ *
11131+ * You should have received a copy of the GNU General Public License
11132+ * along with this program; if not, write to the Free Software
11133+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11134+ */
11135+
11136+#include <linux/module.h>
11137+#include <net/most/most.h>
11138+#include <net/most/most_core.h>
11139+#include <net/most/dev.h>
11140+
11141+/* Ioctls that require bound socket */
11142+static inline int dev_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
11143+ unsigned long arg)
11144+{
11145+ return -ENOSYS;
11146+}
11147+
11148+static int dev_sock_ioctl(struct socket *sock, unsigned int cmd,
11149+ unsigned long arg)
11150+{
11151+ void __user *argp = (void __user *) arg;
11152+
11153+ switch (cmd) {
11154+ case MOSTDEVUP:
11155+ return most_open_dev(arg & 0xffff);
11156+ case MOSTDEVDOWN:
11157+ return most_close_dev(arg & 0xffff);
11158+ case MOSTGETDEVLIST:
11159+ return most_get_dev_list(argp);
11160+ default:
11161+ return -EINVAL;
11162+ }
11163+}
11164+
11165+static int dev_sock_bind(struct socket *sock, struct sockaddr *addr,
11166+ int addr_len)
11167+{
11168+ return -ENOSYS;
11169+}
11170+
11171+static int dev_sock_getname(struct socket *sock, struct sockaddr *addr,
11172+ int *addr_len, int peer)
11173+{
11174+ struct sockaddr_mostdev *daddr = (struct sockaddr_mostdev *) addr;
11175+ struct sock *sk = sock->sk;
11176+ struct most_dev *mdev = most_sk(sk)->mdev;
11177+
11178+ if (!mdev)
11179+ return -EBADFD;
11180+
11181+ lock_sock(sk);
11182+
11183+ *addr_len = sizeof(*daddr);
11184+ daddr->most_family = AF_MOST;
11185+ daddr->most_dev = mdev->id;
11186+
11187+ release_sock(sk);
11188+ return 0;
11189+}
11190+
11191+static int dev_sock_setsockopt(struct socket *sock, int level, int optname,
11192+ char __user *optval, int len)
11193+{
11194+ return -ENOSYS;
11195+}
11196+
11197+static int dev_sock_getsockopt(struct socket *sock, int level, int optname,
11198+ char __user *optval, int __user *optlen)
11199+{
11200+ return -ENOSYS;
11201+}
11202+
11203+static const struct proto_ops dev_sock_ops = {
11204+ .family = PF_MOST,
11205+ .owner = THIS_MODULE,
11206+ .release = most_sock_release,
11207+ .bind = dev_sock_bind,
11208+ .getname = dev_sock_getname,
11209+ .sendmsg = sock_no_sendmsg,
11210+ .recvmsg = sock_no_recvmsg,
11211+ .ioctl = dev_sock_ioctl,
11212+ .poll = sock_no_poll,
11213+ .listen = sock_no_listen,
11214+ .shutdown = sock_no_shutdown,
11215+ .setsockopt = dev_sock_setsockopt,
11216+ .getsockopt = dev_sock_getsockopt,
11217+ .connect = sock_no_connect,
11218+ .socketpair = sock_no_socketpair,
11219+ .accept = sock_no_accept,
11220+ .mmap = sock_no_mmap
11221+};
11222+static struct proto dev_sk_proto = {
11223+ .name = "DEV",
11224+ .owner = THIS_MODULE,
11225+ .obj_size = sizeof(struct most_sock)
11226+};
11227+
11228+static int dev_sock_create(struct net *net, struct socket *sock, int protocol)
11229+{
11230+ struct sock *sk;
11231+
11232+ if (sock->type != SOCK_RAW)
11233+ return -ESOCKTNOSUPPORT;
11234+
11235+ sock->ops = &dev_sock_ops;
11236+
11237+ sk = most_sk_alloc(net, &dev_sk_proto, CHAN_DEV);
11238+ if (!sk)
11239+ return -ENOMEM;
11240+
11241+ sock_init_data(sock, sk);
11242+
11243+ sock_reset_flag(sk, SOCK_ZAPPED);
11244+
11245+ sk->sk_protocol = protocol;
11246+
11247+ sock->state = SS_UNCONNECTED;
11248+ sk->sk_state = MOST_OPEN;
11249+
11250+ most_sock_link(sk);
11251+ return 0;
11252+}
11253+
11254+static struct net_proto_family dev_sock_family_ops = {
11255+ .family = PF_MOST,
11256+ .owner = THIS_MODULE,
11257+ .create = dev_sock_create,
11258+};
11259+
11260+
11261+int __init dev_sock_init(void)
11262+{
11263+ int err;
11264+
11265+ err = proto_register(&dev_sk_proto, 0);
11266+ if (err < 0)
11267+ return err;
11268+
11269+ err = most_sock_register(MOSTPROTO_DEV, &dev_sock_family_ops);
11270+ if (err < 0) {
11271+ printk(KERN_ERR "MOST socket registration failed\n");
11272+ return err;
11273+ }
11274+
11275+ printk(KERN_INFO "MOST device socket layer initialized\n");
11276+
11277+ return 0;
11278+}
11279+
11280+void __exit dev_sock_cleanup(void)
11281+{
11282+ if (most_sock_unregister(MOSTPROTO_DEV) < 0)
11283+ printk(KERN_ERR "Device socket unregistration failed\n");
11284+
11285+ proto_unregister(&dev_sk_proto);
11286+}
11287+
11288diff -uNr linux-2.6.31/net/most/Kconfig linux-2.6.31.new/net/most/Kconfig
11289--- linux-2.6.31/net/most/Kconfig 1969-12-31 16:00:00.000000000 -0800
11290+++ linux-2.6.31.new/net/most/Kconfig 2009-10-23 11:17:37.000000000 -0700
11291@@ -0,0 +1,38 @@
11292+#
11293+# Media Oriented Systems Transport (MOST) network layer core configuration
11294+#
11295+
11296+menuconfig MOST
11297+ depends on NET
11298+ tristate "MOST bus subsystem support"
11299+ ---help---
11300+ Media Oriented Systems Transport (MOST) is a multimedia
11301+ communications protocol in the automotive industry.
11302+
11303+ If you want MOST support you should say Y here.
11304+
11305+config MOST_CTL
11306+ tristate "Support for Control data over MOST"
11307+ depends on MOST
11308+ default N
11309+ ---help---
11310+ Support for the control channel of the MOST bus.
11311+
11312+config MOST_ASYNC
11313+ tristate "Support for Asynchronous data over MOST"
11314+ depends on MOST
11315+ default N
11316+ ---help---
11317+ Support for the asyncronous channel of the MOST bus. Normally
11318+ used for software download od file transfers.
11319+
11320+config MOST_SYNC
11321+ tristate "Support for Synchronous data over MOST"
11322+ depends on MOST
11323+ default N
11324+ ---help---
11325+ Support for synchronous channles of the MOST bus. Normally used
11326+ for streaming media such as audio and video.
11327+
11328+
11329+source "drivers/net/most/Kconfig"
11330diff -uNr linux-2.6.31/net/most/Makefile linux-2.6.31.new/net/most/Makefile
11331--- linux-2.6.31/net/most/Makefile 1969-12-31 16:00:00.000000000 -0800
11332+++ linux-2.6.31.new/net/most/Makefile 2009-10-23 11:17:37.000000000 -0700
11333@@ -0,0 +1,15 @@
11334+#
11335+# Makefile for the Linux Media Oriented Systems Transport core.
11336+#
11337+
11338+obj-$(CONFIG_MOST) += most.o
11339+most-objs := af_most.o most_core.o most_sock.o dev_sock.o
11340+
11341+obj-$(CONFIG_MOST_CTL) += ctl.o
11342+ctl-objs := ctl_sock.o
11343+
11344+obj-$(CONFIG_MOST_SYNC) += sync.o
11345+sync-objs := sync_sock.o
11346+
11347+obj-$(CONFIG_MOST_ASYNC) += async.o
11348+async-objs := async_sock.o
11349diff -uNr linux-2.6.31/net/most/most_core.c linux-2.6.31.new/net/most/most_core.c
11350--- linux-2.6.31/net/most/most_core.c 1969-12-31 16:00:00.000000000 -0800
11351+++ linux-2.6.31.new/net/most/most_core.c 2009-10-23 11:17:37.000000000 -0700
11352@@ -0,0 +1,287 @@
11353+/*
11354+ * most_core.c The MOST core functions
11355+ * Copyright (c) 2009 Intel Corporation
11356+ *
11357+ * This program is free software; you can redistribute it and/or modify
11358+ * it under the terms of the GNU General Public License version 2 as
11359+ * published by the Free Software Foundation.
11360+ *
11361+ * This program is distributed in the hope that it will be useful,
11362+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11363+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11364+ * GNU General Public License for more details.
11365+ *
11366+ * You should have received a copy of the GNU General Public License
11367+ * along with this program; if not, write to the Free Software
11368+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11369+ */
11370+
11371+#include <linux/kernel.h>
11372+#include <linux/slab.h>
11373+#include <linux/module.h>
11374+
11375+#include <net/most/most_core.h>
11376+#include <net/most/dev.h>
11377+
11378+/* MOST device list */
11379+LIST_HEAD(most_dev_list);
11380+DEFINE_RWLOCK(most_dev_list_lock);
11381+
11382+
11383+int most_open_dev(u16 dev_id)
11384+{
11385+ struct most_dev *mdev = most_dev_get(dev_id);
11386+ int err = 0;
11387+
11388+ if (!mdev)
11389+ return -ENODEV;
11390+
11391+ most_dbg("%s: %s, state: %d\n", __func__, mdev->name, mdev->state);
11392+
11393+ if (mdev->state == MOST_DEV_UP)
11394+ err = -EALREADY;
11395+
11396+ if (!err)
11397+ err = mdev->open(mdev);
11398+ if (!err)
11399+ mdev->state = MOST_DEV_UP;
11400+
11401+ most_dev_put(mdev);
11402+ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
11403+ mdev->name, mdev->state, err);
11404+ return err;
11405+}
11406+
11407+static int __most_close_dev(struct most_dev *mdev)
11408+{
11409+ int err = 0;
11410+
11411+ most_dbg("%s: %s, state: %d\n", __func__, mdev ? mdev->name : "nil",
11412+ mdev ? mdev->state : -1);
11413+
11414+ if (!mdev)
11415+ return -ENODEV;
11416+
11417+ if (mdev->state == MOST_DEV_DOWN)
11418+ err = -EALREADY;
11419+
11420+ if (!err)
11421+ err = mdev->close(mdev);
11422+ if (!err)
11423+ mdev->state = MOST_DEV_DOWN;
11424+
11425+ most_dev_put(mdev);
11426+ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
11427+ mdev->name, mdev->state, err);
11428+ return err;
11429+}
11430+
11431+int most_close_dev(u16 dev_id)
11432+{
11433+ return __most_close_dev(most_dev_get(dev_id));
11434+}
11435+
11436+int most_get_dev_list(void __user *arg)
11437+{
11438+ struct most_dev_list_req *dl;
11439+ struct most_dev_req *dr;
11440+ struct list_head *p;
11441+ int n = 0, size, err;
11442+ u16 dev_num;
11443+
11444+ if (get_user(dev_num, (u16 __user *) arg))
11445+ return -EFAULT;
11446+
11447+ if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
11448+ return -EINVAL;
11449+
11450+ size = sizeof(*dl) + dev_num * sizeof(*dr);
11451+
11452+ dl = kzalloc(size, GFP_KERNEL);
11453+ if (!dl)
11454+ return -ENOMEM;
11455+
11456+ dr = dl->dev_req;
11457+
11458+ read_lock_bh(&most_dev_list_lock);
11459+ list_for_each(p, &most_dev_list) {
11460+ struct most_dev *mdev;
11461+ mdev = list_entry(p, struct most_dev, list);
11462+ (dr + n)->dev_id = mdev->id;
11463+ if (++n >= dev_num)
11464+ break;
11465+ }
11466+ read_unlock_bh(&most_dev_list_lock);
11467+
11468+ dl->dev_num = n;
11469+ size = sizeof(*dl) + n * sizeof(*dr);
11470+
11471+ err = copy_to_user(arg, dl, size);
11472+ kfree(dl);
11473+
11474+ return err ? -EFAULT : 0;
11475+}
11476+
11477+static int most_send_frame(struct sk_buff *skb)
11478+{
11479+ struct most_dev *mdev = (struct most_dev *) skb->dev;
11480+
11481+ if (!mdev) {
11482+ kfree_skb(skb);
11483+ return -ENODEV;
11484+ }
11485+
11486+ most_dbg("%s: %s type %d len %d\n", __func__, mdev->name,
11487+ most_cb(skb)->channel_type, skb->len);
11488+
11489+ /* Get rid of skb owner, prior to sending to the driver. */
11490+ skb_orphan(skb);
11491+
11492+ return mdev->send(skb);
11493+}
11494+
11495+static void most_send_queue(struct sk_buff_head *q)
11496+{
11497+ struct sk_buff *skb;
11498+
11499+ while ((skb = skb_dequeue(q))) {
11500+ struct most_dev *mdev = (struct most_dev *)skb->dev;
11501+
11502+ most_dbg("%s: skb %p len %d\n", __func__, skb, skb->len);
11503+
11504+ if (!mdev->can_send || mdev->can_send(skb))
11505+ most_send_frame(skb);
11506+ else {
11507+ most_dbg("%s, could not send frame, requeueing\n",
11508+ __func__);
11509+ skb_queue_tail(q, skb);
11510+ break;
11511+ }
11512+ }
11513+}
11514+
11515+static void most_tx_task(unsigned long arg)
11516+{
11517+ struct most_dev *mdev = (struct most_dev *) arg;
11518+
11519+ most_dbg("%s: %s\n", __func__, mdev->name);
11520+
11521+ most_send_queue(&mdev->ctl_q);
11522+ most_send_queue(&mdev->sync_q);
11523+ most_send_queue(&mdev->async_q);
11524+}
11525+
11526+static void most_rx_task(unsigned long arg)
11527+{
11528+ struct most_dev *mdev = (struct most_dev *) arg;
11529+ struct sk_buff *skb = skb_dequeue(&mdev->rx_q);
11530+
11531+ most_dbg("%s: %s\n", __func__, mdev->name);
11532+
11533+ while (skb) {
11534+ /* Send to the sockets */
11535+ most_send_to_sock(mdev->id, skb);
11536+ kfree_skb(skb);
11537+ skb = skb_dequeue(&mdev->rx_q);
11538+ }
11539+}
11540+
11541+
11542+/* Get MOST device by index.
11543+ * Device is held on return. */
11544+struct most_dev *most_dev_get(int index)
11545+{
11546+ struct most_dev *mdev = NULL;
11547+ struct list_head *p;
11548+
11549+ if (index < 0)
11550+ return NULL;
11551+
11552+ read_lock(&most_dev_list_lock);
11553+ list_for_each(p, &most_dev_list) {
11554+ struct most_dev *d = list_entry(p, struct most_dev, list);
11555+ if (d->id == index) {
11556+ mdev = most_dev_hold(d);
11557+ break;
11558+ }
11559+ }
11560+ read_unlock(&most_dev_list_lock);
11561+ return mdev;
11562+}
11563+EXPORT_SYMBOL(most_dev_get);
11564+
11565+
11566+/* Alloc MOST device */
11567+struct most_dev *most_alloc_dev(void)
11568+{
11569+ struct most_dev *mdev;
11570+
11571+ mdev = kzalloc(sizeof(struct most_dev), GFP_KERNEL);
11572+ if (!mdev)
11573+ return NULL;
11574+
11575+ mdev->state = MOST_DEV_DOWN;
11576+
11577+ return mdev;
11578+}
11579+EXPORT_SYMBOL(most_alloc_dev);
11580+
11581+
11582+void most_free_dev(struct most_dev *mdev)
11583+{
11584+ kfree(mdev);
11585+}
11586+EXPORT_SYMBOL(most_free_dev);
11587+
11588+
11589+/* Register MOST device */
11590+int most_register_dev(struct most_dev *mdev)
11591+{
11592+ struct list_head *head = &most_dev_list, *p;
11593+ int id = 0;
11594+
11595+ if (!mdev->open || !mdev->close || !mdev->send || !mdev->owner)
11596+ return -EINVAL;
11597+
11598+ write_lock_bh(&most_dev_list_lock);
11599+
11600+ /* Find first available device id */
11601+ list_for_each(p, &most_dev_list) {
11602+ if (list_entry(p, struct most_dev, list)->id != id)
11603+ break;
11604+ head = p; id++;
11605+ }
11606+
11607+ sprintf(mdev->name, "most%d", id);
11608+ mdev->id = id;
11609+ list_add(&mdev->list, head);
11610+
11611+ tasklet_init(&mdev->rx_task, most_rx_task, (unsigned long) mdev);
11612+ tasklet_init(&mdev->tx_task, most_tx_task, (unsigned long) mdev);
11613+
11614+ skb_queue_head_init(&mdev->rx_q);
11615+ skb_queue_head_init(&mdev->ctl_q);
11616+ skb_queue_head_init(&mdev->sync_q);
11617+ skb_queue_head_init(&mdev->async_q);
11618+
11619+ write_unlock_bh(&most_dev_list_lock);
11620+ return 0;
11621+}
11622+EXPORT_SYMBOL(most_register_dev);
11623+
11624+int most_unregister_dev(struct most_dev *mdev)
11625+{
11626+ int ret = 0;
11627+ most_dbg("%s: %s: state: %d\n", __func__, mdev->name, mdev->state);
11628+
11629+ if (mdev->state != MOST_DEV_DOWN)
11630+ ret = __most_close_dev(mdev);
11631+
11632+ write_lock_bh(&most_dev_list_lock);
11633+ list_del(&mdev->list);
11634+ write_unlock_bh(&most_dev_list_lock);
11635+
11636+ return ret;
11637+}
11638+EXPORT_SYMBOL(most_unregister_dev);
11639+
11640diff -uNr linux-2.6.31/net/most/most_sock.c linux-2.6.31.new/net/most/most_sock.c
11641--- linux-2.6.31/net/most/most_sock.c 1969-12-31 16:00:00.000000000 -0800
11642+++ linux-2.6.31.new/net/most/most_sock.c 2009-10-23 11:17:37.000000000 -0700
11643@@ -0,0 +1,315 @@
11644+/*
11645+ * most_sock.c Generic functions for MOST sockets
11646+ * Copyright (c) 2009 Intel Corporation
11647+ *
11648+ * This program is free software; you can redistribute it and/or modify
11649+ * it under the terms of the GNU General Public License version 2 as
11650+ * published by the Free Software Foundation.
11651+ *
11652+ * This program is distributed in the hope that it will be useful,
11653+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11654+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11655+ * GNU General Public License for more details.
11656+ *
11657+ * You should have received a copy of the GNU General Public License
11658+ * along with this program; if not, write to the Free Software
11659+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11660+ */
11661+
11662+#include <linux/module.h>
11663+#include <net/most/most_core.h>
11664+
11665+static struct most_sock_list most_sk_list = {
11666+ .lock = __RW_LOCK_UNLOCKED(ctl_sk_list.lock)
11667+};
11668+
11669+void most_sock_link(struct sock *sk)
11670+{
11671+ write_lock_bh(&most_sk_list.lock);
11672+ sk_add_node(sk, &most_sk_list.head);
11673+ write_unlock_bh(&most_sk_list.lock);
11674+}
11675+EXPORT_SYMBOL(most_sock_link);
11676+
11677+void most_sock_unlink(struct sock *sk)
11678+{
11679+ write_lock_bh(&most_sk_list.lock);
11680+ sk_del_node_init(sk);
11681+ write_unlock_bh(&most_sk_list.lock);
11682+}
11683+EXPORT_SYMBOL(most_sock_unlink);
11684+
11685+static int channel_in_use(int dev_id, u8 channel)
11686+{
11687+ struct sock *sk;
11688+ struct hlist_node *node;
11689+
11690+ read_lock_bh(&most_sk_list.lock);
11691+
11692+ sk_for_each(sk, node, &most_sk_list.head)
11693+ if (most_sk(sk)->dev_id == dev_id &&
11694+ sk->sk_state == MOST_BOUND &&
11695+ (most_sk(sk)->rx_channel == channel ||
11696+ most_sk(sk)->tx_channel == channel))
11697+ goto found;
11698+
11699+ sk = NULL;
11700+found:
11701+ read_unlock_bh(&most_sk_list.lock);
11702+
11703+ return sk != NULL;
11704+}
11705+
11706+int most_send_to_sock(int dev_id, struct sk_buff *skb)
11707+{
11708+ struct sock *sk;
11709+ struct hlist_node *node;
11710+
11711+ read_lock(&most_sk_list.lock);
11712+ sk_for_each(sk, node, &most_sk_list.head) {
11713+ if (most_sk(sk)->dev_id == dev_id &&
11714+ most_sk(sk)->channel_type == most_cb(skb)->channel_type
11715+ && most_sk(sk)->rx_channel == most_cb(skb)->channel &&
11716+ sk->sk_state == MOST_BOUND) {
11717+
11718+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
11719+ if (nskb)
11720+ if (sock_queue_rcv_skb(sk, nskb))
11721+ kfree_skb(nskb);
11722+ }
11723+
11724+ }
11725+ read_unlock(&most_sk_list.lock);
11726+
11727+ return 0;
11728+}
11729+EXPORT_SYMBOL(most_send_to_sock);
11730+
11731+int most_sock_release(struct socket *sock)
11732+{
11733+ struct sock *sk = sock->sk;
11734+ struct most_dev *mdev;
11735+
11736+ most_dbg("%s: sock %p sk %p\n", __func__, sock, sk);
11737+
11738+ if (!sk)
11739+ return 0;
11740+
11741+ mdev = most_sk(sk)->mdev;
11742+
11743+ most_sock_unlink(sk);
11744+
11745+ if (mdev) {
11746+ if (sk->sk_state == MOST_BOUND)
11747+ most_configure_channels(mdev, most_sk(sk), 0);
11748+
11749+ most_dev_put(mdev);
11750+ }
11751+
11752+ sock_orphan(sk);
11753+ sock_put(sk);
11754+ return 0;
11755+}
11756+EXPORT_SYMBOL(most_sock_release);
11757+
11758+int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan)
11759+{
11760+ struct sock *sk = sock->sk;
11761+ struct most_dev *mdev = NULL;
11762+ int err = 0;
11763+
11764+ most_dbg("%s: sock %p sk %p, rx: %d, tx: %d\n",
11765+ __func__, sock, sk, rx_chan, tx_chan);
11766+
11767+ lock_sock(sk);
11768+
11769+ if (sk->sk_state != MOST_OPEN) {
11770+ err = -EBADFD;
11771+ goto done;
11772+ }
11773+
11774+ if (most_sk(sk)->mdev) {
11775+ err = -EALREADY;
11776+ goto done;
11777+ }
11778+
11779+ if (channel_in_use(dev_id, rx_chan) ||
11780+ channel_in_use(dev_id, tx_chan)) {
11781+ err = -EADDRINUSE;
11782+ goto done;
11783+ } else {
11784+ most_sk(sk)->rx_channel = rx_chan;
11785+ most_sk(sk)->tx_channel = tx_chan;
11786+ }
11787+
11788+ mdev = most_dev_get(dev_id);
11789+ if (!mdev) {
11790+ err = -ENODEV;
11791+ goto done;
11792+ }
11793+
11794+ err = most_configure_channels(mdev, most_sk(sk), 1);
11795+ if (err) {
11796+ most_dev_put(mdev);
11797+ goto done;
11798+ }
11799+
11800+ most_sk(sk)->mdev = mdev;
11801+ most_sk(sk)->dev_id = mdev->id;
11802+
11803+ sk->sk_state = MOST_BOUND;
11804+
11805+done:
11806+ release_sock(sk);
11807+ return err;
11808+}
11809+EXPORT_SYMBOL(most_sock_bind);
11810+
11811+
11812+int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
11813+{
11814+ most_dbg("%s\n", __func__);
11815+ return -EINVAL;
11816+}
11817+EXPORT_SYMBOL(most_sock_ioctl);
11818+
11819+int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
11820+ struct msghdr *msg, size_t len, int flags)
11821+{
11822+ int noblock = flags & MSG_DONTWAIT;
11823+ struct sock *sk = sock->sk;
11824+ struct sk_buff *skb;
11825+ int copied, err;
11826+
11827+ most_dbg("%s\n", __func__);
11828+
11829+ if (most_sk(sk)->rx_channel == MOST_NO_CHANNEL)
11830+ return -EOPNOTSUPP;
11831+
11832+ if (flags & (MSG_OOB))
11833+ return -EOPNOTSUPP;
11834+
11835+ if (sk->sk_state != MOST_BOUND)
11836+ return 0;
11837+
11838+ skb = skb_recv_datagram(sk, flags, noblock, &err);
11839+ if (!skb)
11840+ return err;
11841+
11842+ msg->msg_namelen = 0;
11843+
11844+ copied = skb->len;
11845+ if (len < copied) {
11846+ msg->msg_flags |= MSG_TRUNC;
11847+ copied = len;
11848+ }
11849+
11850+ skb_reset_transport_header(skb);
11851+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
11852+
11853+ skb_free_datagram(sk, skb);
11854+
11855+ return err ? : copied;
11856+}
11857+EXPORT_SYMBOL(most_sock_recvmsg);
11858+
11859+int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
11860+ struct msghdr *msg, size_t len)
11861+{
11862+ struct sock *sk = sock->sk;
11863+ struct most_dev *mdev;
11864+ struct sk_buff *skb;
11865+ int err;
11866+
11867+ most_dbg("%s: sock %p sk %p, channeltype: %d\n",
11868+ __func__, sock, sk, most_sk(sk)->channel_type);
11869+
11870+ if (most_sk(sk)->tx_channel == MOST_NO_CHANNEL)
11871+ return -EOPNOTSUPP;
11872+
11873+ if (msg->msg_flags & MSG_OOB)
11874+ return -EOPNOTSUPP;
11875+
11876+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
11877+ return -EINVAL;
11878+
11879+ lock_sock(sk);
11880+
11881+ mdev = most_sk(sk)->mdev;
11882+ if (!mdev) {
11883+ err = -EBADFD;
11884+ goto done;
11885+ }
11886+
11887+ skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
11888+ if (!skb)
11889+ goto done;
11890+
11891+ most_cb(skb)->channel = most_sk(sk)->tx_channel;
11892+ most_cb(skb)->channel_type = most_sk(sk)->channel_type;
11893+
11894+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
11895+ err = -EFAULT;
11896+ goto drop;
11897+ }
11898+
11899+ skb->dev = (void *) mdev;
11900+
11901+ skb_queue_tail(&mdev->ctl_q, skb);
11902+ most_sched_tx(mdev);
11903+
11904+ err = len;
11905+
11906+done:
11907+ release_sock(sk);
11908+ return err;
11909+
11910+drop:
11911+ kfree_skb(skb);
11912+ goto done;
11913+}
11914+EXPORT_SYMBOL(most_sock_sendmsg);
11915+
11916+int most_sock_setsockopt(struct socket *sock, int level, int optname,
11917+ char __user *optval, int len)
11918+{
11919+ struct sock *sk = sock->sk;
11920+ int err = 0;
11921+
11922+ most_dbg("%s: sk %p", __func__, sk);
11923+
11924+ lock_sock(sk);
11925+
11926+ switch (optname) {
11927+ default:
11928+ err = -ENOPROTOOPT;
11929+ break;
11930+ }
11931+
11932+ release_sock(sk);
11933+ return err;
11934+}
11935+EXPORT_SYMBOL(most_sock_setsockopt);
11936+
11937+
11938+int most_sock_getsockopt(struct socket *sock, int level, int optname,
11939+ char __user *optval, int __user *optlen)
11940+{
11941+ struct sock *sk = sock->sk;
11942+ int err = 0;
11943+
11944+ most_dbg("%s: sk %p", __func__, sk);
11945+
11946+ lock_sock(sk);
11947+
11948+ switch (optname) {
11949+ default:
11950+ err = -ENOPROTOOPT;
11951+ break;
11952+ }
11953+
11954+ release_sock(sk);
11955+ return err;
11956+}
11957+EXPORT_SYMBOL(most_sock_getsockopt);
11958+
11959diff -uNr linux-2.6.31/net/most/sync_sock.c linux-2.6.31.new/net/most/sync_sock.c
11960--- linux-2.6.31/net/most/sync_sock.c 1969-12-31 16:00:00.000000000 -0800
11961+++ linux-2.6.31.new/net/most/sync_sock.c 2009-10-23 11:17:37.000000000 -0700
11962@@ -0,0 +1,150 @@
11963+/*
11964+ * sync_sock.c Support for MOST synchronous sockets
11965+ * Copyright (c) 2009 Intel Corporation
11966+ *
11967+ * This program is free software; you can redistribute it and/or modify
11968+ * it under the terms of the GNU General Public License version 2 as
11969+ * published by the Free Software Foundation.
11970+ *
11971+ * This program is distributed in the hope that it will be useful,
11972+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
11973+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11974+ * GNU General Public License for more details.
11975+ *
11976+ * You should have received a copy of the GNU General Public License
11977+ * along with this program; if not, write to the Free Software
11978+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11979+ */
11980+
11981+#include <linux/module.h>
11982+#include <net/most/most.h>
11983+#include <net/most/most_core.h>
11984+#include <net/most/sync.h>
11985+
11986+static int sync_sock_bind(struct socket *sock, struct sockaddr *addr,
11987+ int addr_len)
11988+{
11989+ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
11990+
11991+ if (!saddr || saddr->most_family != AF_MOST)
11992+ return -EINVAL;
11993+
11994+ return most_sock_bind(sock, saddr->most_dev, saddr->rx_channel,
11995+ saddr->tx_channel);
11996+}
11997+
11998+static int sync_sock_getname(struct socket *sock, struct sockaddr *addr,
11999+ int *addr_len, int peer)
12000+{
12001+ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
12002+ struct sock *sk = sock->sk;
12003+ struct most_dev *mdev = most_sk(sk)->mdev;
12004+
12005+ if (!mdev)
12006+ return -EBADFD;
12007+
12008+ lock_sock(sk);
12009+
12010+ *addr_len = sizeof(*saddr);
12011+ saddr->most_family = AF_MOST;
12012+ saddr->most_dev = mdev->id;
12013+ saddr->rx_channel = most_sk(sk)->rx_channel;
12014+ saddr->tx_channel = most_sk(sk)->tx_channel;
12015+
12016+ release_sock(sk);
12017+ return 0;
12018+}
12019+
12020+
12021+static const struct proto_ops sync_sock_ops = {
12022+ .family = PF_MOST,
12023+ .owner = THIS_MODULE,
12024+ .release = most_sock_release,
12025+ .bind = sync_sock_bind,
12026+ .getname = sync_sock_getname,
12027+ .sendmsg = most_sock_sendmsg,
12028+ .recvmsg = most_sock_recvmsg,
12029+ .ioctl = most_sock_ioctl,
12030+ .poll = datagram_poll,
12031+ .listen = sock_no_listen,
12032+ .shutdown = sock_no_shutdown,
12033+ .setsockopt = most_sock_setsockopt,
12034+ .getsockopt = most_sock_getsockopt,
12035+ .connect = sock_no_connect,
12036+ .socketpair = sock_no_socketpair,
12037+ .accept = sock_no_accept,
12038+ .mmap = sock_no_mmap
12039+};
12040+static struct proto sync_sk_proto = {
12041+ .name = "SYNC",
12042+ .owner = THIS_MODULE,
12043+ .obj_size = sizeof(struct most_sock)
12044+};
12045+
12046+static int sync_sock_create(struct net *net, struct socket *sock, int protocol)
12047+{
12048+ struct sock *sk;
12049+
12050+ if (sock->type != SOCK_STREAM)
12051+ return -ESOCKTNOSUPPORT;
12052+
12053+ sock->ops = &sync_sock_ops;
12054+
12055+ sk = most_sk_alloc(net, &sync_sk_proto, CHAN_SYNC);
12056+ if (!sk)
12057+ return -ENOMEM;
12058+
12059+ sock_init_data(sock, sk);
12060+
12061+ sock_reset_flag(sk, SOCK_ZAPPED);
12062+
12063+ sk->sk_protocol = protocol;
12064+
12065+ sock->state = SS_UNCONNECTED;
12066+ sk->sk_state = MOST_OPEN;
12067+
12068+ most_sock_link(sk);
12069+ return 0;
12070+}
12071+
12072+static struct net_proto_family sync_sock_family_ops = {
12073+ .family = PF_MOST,
12074+ .owner = THIS_MODULE,
12075+ .create = sync_sock_create,
12076+};
12077+
12078+
12079+static int __init sync_init(void)
12080+{
12081+ int err;
12082+
12083+ err = proto_register(&sync_sk_proto, 0);
12084+ if (err < 0)
12085+ return err;
12086+
12087+ err = most_sock_register(MOSTPROTO_SYNC, &sync_sock_family_ops);
12088+ if (err < 0) {
12089+ printk(KERN_ERR "MOST socket registration failed\n");
12090+ return err;
12091+ }
12092+
12093+ printk(KERN_INFO "MOST synchronous socket layer initialized\n");
12094+
12095+ return 0;
12096+}
12097+
12098+static void __exit sync_exit(void)
12099+{
12100+ if (most_sock_unregister(MOSTPROTO_SYNC) < 0)
12101+ printk(KERN_ERR "SYNC socket unregistration failed\n");
12102+
12103+ proto_unregister(&sync_sk_proto);
12104+}
12105+
12106+module_init(sync_init);
12107+module_exit(sync_exit);
12108+
12109+MODULE_DESCRIPTION("Most Syncronous");
12110+MODULE_LICENSE("GPL v2");
12111+MODULE_ALIAS("most-proto-2");
12112+
12113diff -uNr linux-2.6.31/sound/drivers/Kconfig linux-2.6.31.new/sound/drivers/Kconfig
12114--- linux-2.6.31/sound/drivers/Kconfig 2009-10-23 11:18:30.000000000 -0700
12115+++ linux-2.6.31.new/sound/drivers/Kconfig 2009-10-23 11:16:57.000000000 -0700
12116@@ -182,4 +182,17 @@
12117 The default time-out value in seconds for AC97 automatic
12118 power-save mode. 0 means to disable the power-save mode.
12119
12120+config SND_TIMBERDALE_I2S
12121+ tristate "The timberdale FPGA I2S driver"
12122+ depends on MFD_TIMBERDALE && HAS_IOMEM
12123+ default y
12124+ help
12125+ Say Y here to enable driver for the I2S block found within the
12126+ Timberdale FPGA.
12127+ There is support for up to 8 I2S channels, in either transmitter
12128+ or receiver mode.
12129+
12130+ To compile this driver as a module, choose M here: the module
12131+ will be called snd-timbi2s.
12132+
12133 endif # SND_DRIVERS
12134diff -uNr linux-2.6.31/sound/drivers/Makefile linux-2.6.31.new/sound/drivers/Makefile
12135--- linux-2.6.31/sound/drivers/Makefile 2009-10-23 11:18:30.000000000 -0700
12136+++ linux-2.6.31.new/sound/drivers/Makefile 2009-10-23 11:16:57.000000000 -0700
12137@@ -10,6 +10,7 @@
12138 snd-serial-u16550-objs := serial-u16550.o
12139 snd-virmidi-objs := virmidi.o
12140 snd-ml403-ac97cr-objs := ml403-ac97cr.o pcm-indirect2.o
12141+snd-timbi2s-objs := timbi2s.o
12142
12143 # Toplevel Module Dependency
12144 obj-$(CONFIG_SND_DUMMY) += snd-dummy.o
12145@@ -19,5 +20,6 @@
12146 obj-$(CONFIG_SND_MTS64) += snd-mts64.o
12147 obj-$(CONFIG_SND_PORTMAN2X4) += snd-portman2x4.o
12148 obj-$(CONFIG_SND_ML403_AC97CR) += snd-ml403-ac97cr.o
12149+obj-$(CONFIG_SND_TIMBERDALE_I2S) += snd-timbi2s.o
12150
12151 obj-$(CONFIG_SND) += opl3/ opl4/ mpu401/ vx/ pcsp/
12152diff -uNr linux-2.6.31/sound/drivers/timbi2s.c linux-2.6.31.new/sound/drivers/timbi2s.c
12153--- linux-2.6.31/sound/drivers/timbi2s.c 1969-12-31 16:00:00.000000000 -0800
12154+++ linux-2.6.31.new/sound/drivers/timbi2s.c 2009-10-23 11:16:57.000000000 -0700
12155@@ -0,0 +1,755 @@
12156+/*
12157+ * timbi2s.c timberdale FPGA I2S driver
12158+ * Copyright (c) 2009 Intel Corporation
12159+ *
12160+ * This program is free software; you can redistribute it and/or modify
12161+ * it under the terms of the GNU General Public License version 2 as
12162+ * published by the Free Software Foundation.
12163+ *
12164+ * This program is distributed in the hope that it will be useful,
12165+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12166+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12167+ * GNU General Public License for more details.
12168+ *
12169+ * You should have received a copy of the GNU General Public License
12170+ * along with this program; if not, write to the Free Software
12171+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
12172+ */
12173+
12174+/* Supports:
12175+ * Timberdale FPGA I2S
12176+ *
12177+ */
12178+
12179+#include <linux/io.h>
12180+#include <linux/interrupt.h>
12181+#include <linux/platform_device.h>
12182+#include <sound/core.h>
12183+#include <sound/pcm.h>
12184+#include <sound/pcm_params.h>
12185+#include <sound/initval.h>
12186+#include <sound/timbi2s.h>
12187+
12188+#define DRIVER_NAME "timb-i2s"
12189+
12190+#define MAX_BUSSES 8
12191+
12192+#define TIMBI2S_REG_VER 0x00
12193+#define TIMBI2S_REG_UIR 0x04
12194+
12195+#define TIMBI2S_BUS_PRESCALE 0x00
12196+#define TIMBI2S_BUS_ICLR 0x04
12197+#define TIMBI2S_BUS_IPR 0x08
12198+#define TIMBI2S_BUS_ISR 0x0c
12199+#define TIMBI2S_BUS_IER 0x10
12200+
12201+
12202+#define TIMBI2S_IRQ_TX_FULL 0x01
12203+#define TIMBI2S_IRQ_TX_ALMOST_FULL 0x02
12204+#define TIMBI2S_IRQ_TX_ALMOST_EMPTY 0x04
12205+#define TIMBI2S_IRQ_TX_EMPTY 0x08
12206+
12207+#define TIMBI2S_IRQ_RX_FULL 0x10
12208+#define TIMBI2S_IRQ_RX_ALMOST_FULL 0x20
12209+#define TIMBI2S_IRQ_RX_ALMOST_EMPTY 0x40
12210+#define TIMBI2S_IRQ_RX_NOT_EMPTY 0x80
12211+
12212+#define TIMBI2S_BUS_ICOR 0x14
12213+#define TIMBI2S_ICOR_TX_ENABLE 0x00000001
12214+#define TIMBI2S_ICOR_RX_ENABLE 0x00000002
12215+#define TIMBI2S_ICOR_LFIFO_RST 0x00000004
12216+#define TIMBI2S_ICOR_RFIFO_RST 0x00000008
12217+#define TIMBI2S_ICOR_FIFO_RST (TIMBI2S_ICOR_LFIFO_RST | TIMBI2S_ICOR_RFIFO_RST)
12218+#define TIMBI2S_ICOR_SOFT_RST 0x00000010
12219+#define TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT 8
12220+#define TIMBI2S_ICOR_WORD_SEL_LEFT_MASK (0xff << 8)
12221+#define TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT 16
12222+#define TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK (0xff << 16)
12223+#define TIMBI2S_ICOR_CLK_MASTER 0x10000000
12224+#define TIMBI2S_ICOR_RX_ID 0x20000000
12225+#define TIMBI2S_ICOR_TX_ID 0x40000000
12226+#define TIMBI2S_ICOR_WORD_SEL 0x80000000
12227+#define TIMBI2S_BUS_FIFO 0x18
12228+
12229+#define TIMBI2S_BUS_REG_AREA_SIZE (TIMBI2S_BUS_FIFO - \
12230+ TIMBI2S_BUS_PRESCALE + 4)
12231+#define TIMBI2S_FIRST_BUS_AREA_OFS 0x08
12232+
12233+struct timbi2s_bus {
12234+ u32 flags;
12235+ u32 prescale;
12236+ struct snd_pcm *pcm;
12237+ struct snd_card *card;
12238+ struct snd_pcm_substream *substream;
12239+ unsigned buf_pos;
12240+ spinlock_t lock; /* mutual exclusion */
12241+ u16 sample_rate;
12242+};
12243+
12244+#define BUS_RX 0x200
12245+#define BUS_MASTER 0x100
12246+#define BUS_INDEX_MASK 0xff
12247+#define BUS_INDEX(b) ((b)->flags & BUS_INDEX_MASK)
12248+#define BUS_IS_MASTER(b) ((b)->flags & BUS_MASTER)
12249+#define BUS_IS_RX(b) ((b)->flags & BUS_RX)
12250+
12251+#define SET_BUS_INDEX(b, id) ((b)->flags = ((b)->flags & ~BUS_INDEX_MASK) | id)
12252+#define SET_BUS_MASTER(b) ((b)->flags |= BUS_MASTER)
12253+#define SET_BUS_RX(b) ((b)->flags |= BUS_RX)
12254+
12255+#define TIMBI2S_BUS_OFFSET(bus) (TIMBI2S_FIRST_BUS_AREA_OFS + \
12256+ TIMBI2S_BUS_REG_AREA_SIZE * BUS_INDEX(bus))
12257+
12258+struct timbi2s {
12259+ void __iomem *membase;
12260+ int irq;
12261+ struct tasklet_struct tasklet;
12262+ u32 main_clk;
12263+ unsigned num_busses;
12264+ struct timbi2s_bus busses[0];
12265+};
12266+
12267+#define BITS_PER_CHANNEL 16
12268+#define NUM_CHANNELS 2
12269+
12270+#define SAMPLE_SIZE ((NUM_CHANNELS * BITS_PER_CHANNEL) / 8)
12271+#define NUM_PERIODS 32
12272+#define NUM_SAMPLES 256
12273+
12274+static struct snd_pcm_hardware timbi2s_rx_hw = {
12275+ .info = (SNDRV_PCM_INFO_MMAP
12276+ | SNDRV_PCM_INFO_MMAP_VALID
12277+ | SNDRV_PCM_INFO_INTERLEAVED),
12278+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
12279+ .rates = SNDRV_PCM_RATE_44100,
12280+ .rate_min = 44100,
12281+ .rate_max = 44100,
12282+ .channels_min = 2, /* only stereo */
12283+ .channels_max = 2,
12284+ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
12285+ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
12286+ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
12287+ .periods_min = NUM_PERIODS,
12288+ .periods_max = NUM_PERIODS,
12289+};
12290+
12291+static struct snd_pcm_hardware timbi2s_tx_hw = {
12292+ .info = (SNDRV_PCM_INFO_MMAP
12293+ | SNDRV_PCM_INFO_MMAP_VALID
12294+ | SNDRV_PCM_INFO_INTERLEAVED),
12295+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
12296+ .rates = SNDRV_PCM_RATE_8000,
12297+ .rate_min = 8000,
12298+ .rate_max = 8000,
12299+ .channels_min = 2, /* only stereo */
12300+ .channels_max = 2,
12301+ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
12302+ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
12303+ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
12304+ .periods_min = NUM_PERIODS,
12305+ .periods_max = NUM_PERIODS,
12306+};
12307+
12308+static inline void timbi2s_bus_write(struct timbi2s_bus *bus, u32 val, u32 reg)
12309+{
12310+ struct timbi2s *i2s = snd_pcm_chip(bus->card);
12311+
12312+ iowrite32(val, i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
12313+}
12314+
12315+static inline u32 timbi2s_bus_read(struct timbi2s_bus *bus, u32 reg)
12316+{
12317+ struct timbi2s *i2s = snd_pcm_chip(bus->card);
12318+
12319+ return ioread32(i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
12320+}
12321+
12322+static u32 timbi2s_calc_prescale(u32 main_clk, u32 sample_rate)
12323+{
12324+ u32 halfbit_rate = sample_rate * BITS_PER_CHANNEL * NUM_CHANNELS * 2;
12325+ return main_clk / halfbit_rate;
12326+}
12327+
12328+static int timbi2s_open(struct snd_pcm_substream *substream)
12329+{
12330+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12331+ struct snd_card *card = bus->card;
12332+ struct snd_pcm_runtime *runtime = substream->runtime;
12333+ dev_dbg(snd_card_get_device_link(card),
12334+ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
12335+ BUS_INDEX(bus));
12336+
12337+ if (BUS_IS_RX(bus)) {
12338+ runtime->hw = timbi2s_rx_hw;
12339+ if (bus->sample_rate == 8000) {
12340+ runtime->hw.rates = SNDRV_PCM_RATE_8000;
12341+ runtime->hw.rate_min = 8000;
12342+ runtime->hw.rate_max = 8000;
12343+ }
12344+ } else
12345+ runtime->hw = timbi2s_tx_hw;
12346+
12347+ bus->substream = substream;
12348+
12349+ return 0;
12350+}
12351+
12352+static int timbi2s_close(struct snd_pcm_substream *substream)
12353+{
12354+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12355+ struct snd_card *card = bus->card;
12356+ dev_dbg(snd_card_get_device_link(card),
12357+ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
12358+ BUS_INDEX(bus));
12359+
12360+ bus->substream = NULL;
12361+
12362+ return 0;
12363+}
12364+
12365+static int timbi2s_hw_params(struct snd_pcm_substream *substream,
12366+ struct snd_pcm_hw_params *hw_params)
12367+{
12368+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12369+ struct snd_card *card = bus->card;
12370+ struct timbi2s *i2s = snd_pcm_chip(card);
12371+ int err;
12372+
12373+ dev_dbg(snd_card_get_device_link(card),
12374+ "%s: Entry, substream: %p, bus: %d\n", __func__,
12375+ substream, BUS_INDEX(bus));
12376+
12377+ bus->prescale = timbi2s_calc_prescale(i2s->main_clk,
12378+ params_rate(hw_params));
12379+
12380+ err = snd_pcm_lib_malloc_pages(substream,
12381+ params_buffer_bytes(hw_params));
12382+ if (err < 0)
12383+ return err;
12384+
12385+ dev_dbg(snd_card_get_device_link(card),
12386+ "%s: Rate: %d, format: %d\n", __func__, params_rate(hw_params),
12387+ params_format(hw_params));
12388+
12389+ return 0;
12390+}
12391+
12392+static int timbi2s_hw_free(struct snd_pcm_substream *substream)
12393+{
12394+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12395+ struct snd_card *card = bus->card;
12396+ unsigned long flags;
12397+
12398+ dev_dbg(snd_card_get_device_link(card),
12399+ "%s: Entry, substream: %p\n", __func__, substream);
12400+
12401+ spin_lock_irqsave(&bus->lock, flags);
12402+ /* disable interrupts */
12403+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
12404+ spin_unlock_irqrestore(&bus->lock, flags);
12405+
12406+ /* disable TX and RX */
12407+ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
12408+ TIMBI2S_BUS_ICOR);
12409+
12410+ return snd_pcm_lib_free_pages(substream);
12411+}
12412+
12413+static int timbi2s_prepare(struct snd_pcm_substream *substream)
12414+{
12415+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12416+ struct snd_card *card = bus->card;
12417+ struct snd_pcm_runtime *runtime = substream->runtime;
12418+ u32 data;
12419+
12420+ dev_dbg(snd_card_get_device_link(card),
12421+ "%s: Entry, substream: %p, bus: %d, buffer: %d, period: %d\n",
12422+ __func__, substream,
12423+ BUS_INDEX(bus), (int)snd_pcm_lib_buffer_bytes(substream),
12424+ (int)snd_pcm_lib_period_bytes(substream));
12425+
12426+ if (runtime->dma_addr & 3 || runtime->buffer_size & 3) {
12427+ dev_err(snd_card_get_device_link(card),
12428+ "%s: Only word aligned data allowed\n", __func__);
12429+ return -EINVAL;
12430+ }
12431+
12432+ if (runtime->channels != NUM_CHANNELS) {
12433+ dev_err(snd_card_get_device_link(card),
12434+ "%s: Number of channels unsupported %d\n", __func__,
12435+ runtime->channels);
12436+ return -EINVAL;
12437+ }
12438+
12439+ /* reset */
12440+ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
12441+ TIMBI2S_BUS_ICOR);
12442+
12443+ /* only masters have prescaling, don't write if not needed */
12444+ if (BUS_IS_MASTER(bus))
12445+ timbi2s_bus_write(bus, bus->prescale, TIMBI2S_BUS_PRESCALE);
12446+
12447+ /* write word select */
12448+ data = ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT) &
12449+ TIMBI2S_ICOR_WORD_SEL_LEFT_MASK) |
12450+ ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT) &
12451+ TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK);
12452+ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
12453+
12454+ bus->buf_pos = 0;
12455+
12456+ return 0;
12457+}
12458+
12459+static int
12460+timbi2s_playback_trigger(struct snd_pcm_substream *substream, int cmd)
12461+{
12462+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12463+ struct snd_card *card = bus->card;
12464+ unsigned long flags;
12465+ u32 data;
12466+
12467+ dev_dbg(snd_card_get_device_link(card),
12468+ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
12469+ substream, BUS_INDEX(bus), cmd);
12470+
12471+ switch (cmd) {
12472+ case SNDRV_PCM_TRIGGER_START:
12473+ dev_dbg(snd_card_get_device_link(card),
12474+ "%s: Got TRIGGER_START command\n", __func__);
12475+
12476+ /* start */
12477+ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
12478+ data |= TIMBI2S_ICOR_TX_ENABLE;
12479+ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
12480+
12481+ /* enable interrupts */
12482+ timbi2s_bus_write(bus, TIMBI2S_IRQ_TX_ALMOST_EMPTY,
12483+ TIMBI2S_BUS_IER);
12484+ dev_dbg(snd_card_get_device_link(card),
12485+ "%s: ISR: %x, ICOR: %x\n", __func__,
12486+ timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
12487+ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
12488+ break;
12489+ case SNDRV_PCM_TRIGGER_STOP:
12490+ dev_dbg(snd_card_get_device_link(card),
12491+ "%s: Got TRIGGER_STOP command\n", __func__);
12492+
12493+ spin_lock_irqsave(&bus->lock, flags);
12494+ /* disable interrupts */
12495+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
12496+ spin_unlock_irqrestore(&bus->lock, flags);
12497+
12498+ /* reset */
12499+ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
12500+ data &= ~TIMBI2S_ICOR_TX_ENABLE;
12501+
12502+ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
12503+ break;
12504+ default:
12505+ dev_dbg(snd_card_get_device_link(card),
12506+ "%s: Got unsupported command\n", __func__);
12507+
12508+ return -EINVAL;
12509+ }
12510+
12511+ return 0;
12512+}
12513+
12514+static int
12515+timbi2s_capture_trigger(struct snd_pcm_substream *substream, int cmd)
12516+{
12517+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12518+ struct snd_card *card = bus->card;
12519+ unsigned long flags;
12520+
12521+ dev_dbg(snd_card_get_device_link(card),
12522+ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
12523+ substream, BUS_INDEX(bus), cmd);
12524+
12525+ switch (cmd) {
12526+ case SNDRV_PCM_TRIGGER_START:
12527+ dev_dbg(snd_card_get_device_link(card),
12528+ "%s: Got TRIGGER_START command\n", __func__);
12529+
12530+ timbi2s_bus_write(bus, TIMBI2S_ICOR_RX_ENABLE |
12531+ TIMBI2S_ICOR_FIFO_RST, TIMBI2S_BUS_ICOR);
12532+
12533+ timbi2s_bus_write(bus, TIMBI2S_IRQ_RX_ALMOST_FULL,
12534+ TIMBI2S_BUS_IER);
12535+ break;
12536+ case SNDRV_PCM_TRIGGER_STOP:
12537+ dev_dbg(snd_card_get_device_link(card),
12538+ "%s: Got TRIGGER_STOP command\n", __func__);
12539+ /* disable interrupts */
12540+ spin_lock_irqsave(&bus->lock, flags);
12541+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
12542+ spin_unlock_irqrestore(&bus->lock, flags);
12543+ /* Stop RX */
12544+ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_ICOR);
12545+ break;
12546+ default:
12547+ dev_dbg(snd_card_get_device_link(card),
12548+ "%s: Got unsupported command\n", __func__);
12549+
12550+ return -EINVAL;
12551+ }
12552+
12553+ return 0;
12554+}
12555+
12556+static snd_pcm_uframes_t
12557+timbi2s_pointer(struct snd_pcm_substream *substream)
12558+{
12559+ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
12560+ struct snd_card *card = bus->card;
12561+ snd_pcm_uframes_t ret;
12562+
12563+ dev_dbg(snd_card_get_device_link(card),
12564+ "%s: Entry, substream: %p\n", __func__, substream);
12565+
12566+ ret = bytes_to_frames(substream->runtime, bus->buf_pos);
12567+ if (ret >= substream->runtime->buffer_size)
12568+ ret -= substream->runtime->buffer_size;
12569+
12570+ return ret;
12571+}
12572+
12573+static struct snd_pcm_ops timbi2s_playback_ops = {
12574+ .open = timbi2s_open,
12575+ .close = timbi2s_close,
12576+ .ioctl = snd_pcm_lib_ioctl,
12577+ .hw_params = timbi2s_hw_params,
12578+ .hw_free = timbi2s_hw_free,
12579+ .prepare = timbi2s_prepare,
12580+ .trigger = timbi2s_playback_trigger,
12581+ .pointer = timbi2s_pointer,
12582+};
12583+
12584+static struct snd_pcm_ops timbi2s_capture_ops = {
12585+ .open = timbi2s_open,
12586+ .close = timbi2s_close,
12587+ .ioctl = snd_pcm_lib_ioctl,
12588+ .hw_params = timbi2s_hw_params,
12589+ .hw_free = timbi2s_hw_free,
12590+ .prepare = timbi2s_prepare,
12591+ .trigger = timbi2s_capture_trigger,
12592+ .pointer = timbi2s_pointer,
12593+};
12594+
12595+static void timbi2s_irq_process_rx(struct timbi2s_bus *bus)
12596+{
12597+ struct snd_pcm_runtime *runtime = bus->substream->runtime;
12598+ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
12599+ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
12600+ int i;
12601+
12602+ dev_dbg(snd_card_get_device_link(bus->card),
12603+ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
12604+
12605+ for (i = 0; i < NUM_SAMPLES; i++) {
12606+ *(u32 *)(runtime->dma_area + bus->buf_pos) =
12607+ timbi2s_bus_read(bus, TIMBI2S_BUS_FIFO);
12608+ bus->buf_pos += SAMPLE_SIZE;
12609+ bus->buf_pos %= buffer_size;
12610+ }
12611+
12612+ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
12613+
12614+ /* inform ALSA that a period was received */
12615+ snd_pcm_period_elapsed(bus->substream);
12616+}
12617+
12618+static void timbi2s_irq_process_tx(struct timbi2s_bus *bus)
12619+{
12620+ struct snd_pcm_runtime *runtime = bus->substream->runtime;
12621+ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
12622+ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
12623+ int i;
12624+
12625+ dev_dbg(snd_card_get_device_link(bus->card),
12626+ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
12627+
12628+ for (i = 0; i < NUM_SAMPLES; i++) {
12629+ timbi2s_bus_write(bus,
12630+ *(u32 *)(runtime->dma_area + bus->buf_pos),
12631+ TIMBI2S_BUS_FIFO);
12632+ bus->buf_pos += SAMPLE_SIZE;
12633+ bus->buf_pos %= buffer_size;
12634+ }
12635+
12636+ dev_dbg(snd_card_get_device_link(bus->card), "%s: ISR: %x, ICOR: %x\n",
12637+ __func__, timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
12638+ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
12639+
12640+ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
12641+
12642+ /* inform ALSA that a period was received */
12643+ snd_pcm_period_elapsed(bus->substream);
12644+}
12645+
12646+static void timbi2s_tasklet(unsigned long arg)
12647+{
12648+ struct snd_card *card = (struct snd_card *)arg;
12649+ struct timbi2s *i2s = snd_pcm_chip(card);
12650+ u32 uir = ioread32(i2s->membase + TIMBI2S_REG_UIR);
12651+ unsigned i;
12652+
12653+ dev_dbg(snd_card_get_device_link(card), "%s: Entry, UIR %x\n",
12654+ __func__, uir);
12655+
12656+ for (i = 0; i < i2s->num_busses; i++)
12657+ if (uir & (1 << i)) {
12658+ struct timbi2s_bus *bus = i2s->busses + i;
12659+ if (BUS_IS_RX(bus))
12660+ timbi2s_irq_process_rx(bus);
12661+ else
12662+ timbi2s_irq_process_tx(bus);
12663+ }
12664+
12665+ enable_irq(i2s->irq);
12666+}
12667+
12668+static irqreturn_t timbi2s_irq(int irq, void *devid)
12669+{
12670+ struct timbi2s *i2s = devid;
12671+
12672+ tasklet_schedule(&i2s->tasklet);
12673+ disable_irq_nosync(i2s->irq);
12674+
12675+ return IRQ_HANDLED;
12676+}
12677+
12678+static int timbi2s_setup_busses(struct snd_card *card,
12679+ struct platform_device *pdev)
12680+{
12681+ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
12682+ unsigned i;
12683+
12684+ dev_dbg(&pdev->dev, "%s: Entry, no busses: %d, busses: %p\n", __func__,
12685+ pdata->num_busses, pdata->busses);
12686+
12687+ for (i = 0; i < pdata->num_busses; i++) {
12688+ int capture = pdata->busses[i].rx;
12689+ int err;
12690+ u32 ctl;
12691+ struct timbi2s *i2s = snd_pcm_chip(card);
12692+ struct timbi2s_bus *bus = i2s->busses + i;
12693+
12694+ dev_dbg(&pdev->dev, "%s: Setting up bus: %d\n", __func__, i);
12695+
12696+ SET_BUS_INDEX(bus, i);
12697+ bus->sample_rate = pdata->busses[i].sample_rate;
12698+ bus->card = card;
12699+ /* prescaling only applies to master busses, we use the
12700+ * knowledge of that to identify the direction later
12701+ * eg, bus->prescale != 0 -> master bus
12702+ */
12703+ if (capture)
12704+ SET_BUS_RX(bus);
12705+
12706+ spin_lock_init(&bus->lock);
12707+
12708+ if (bus->sample_rate != 44100 && bus->sample_rate != 8000) {
12709+ dev_err(&pdev->dev,
12710+ "Unsupported bitrate: %d\n", bus->sample_rate);
12711+ return -EINVAL;
12712+ }
12713+
12714+ dev_dbg(&pdev->dev, "%s: Will check HW direction on bus: %d\n",
12715+ __func__, BUS_INDEX(bus));
12716+
12717+ /* check that the HW agrees with the direction */
12718+ ctl = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
12719+ if ((capture && !(ctl & TIMBI2S_ICOR_RX_ID)) ||
12720+ (!capture && !(ctl & TIMBI2S_ICOR_TX_ID))) {
12721+ dev_dbg(&pdev->dev,
12722+ "HW and platform data disagree on direction\n");
12723+ return -EINVAL;
12724+ }
12725+
12726+ dev_dbg(&pdev->dev, "%s: Will create PCM channel for bus: %d\n",
12727+ __func__, BUS_INDEX(bus));
12728+ err = snd_pcm_new(card, card->shortname, i, !capture,
12729+ capture, &bus->pcm);
12730+ if (err) {
12731+ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
12732+ __func__, err);
12733+ return err;
12734+ }
12735+
12736+ if (capture)
12737+ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_CAPTURE,
12738+ &timbi2s_capture_ops);
12739+ if (!capture)
12740+ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_PLAYBACK,
12741+ &timbi2s_playback_ops);
12742+
12743+ dev_dbg(&pdev->dev, "%s: Will preallocate buffers to bus: %d\n",
12744+ __func__, BUS_INDEX(bus));
12745+
12746+ err = snd_pcm_lib_preallocate_pages_for_all(bus->pcm,
12747+ SNDRV_DMA_TYPE_CONTINUOUS,
12748+ snd_dma_continuous_data(GFP_KERNEL),
12749+ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2,
12750+ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2);
12751+ if (err) {
12752+ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
12753+ __func__, err);
12754+
12755+ return err;
12756+ }
12757+
12758+ bus->pcm->private_data = bus;
12759+ bus->pcm->info_flags = 0;
12760+ strcpy(bus->pcm->name, card->shortname);
12761+ i2s->num_busses++;
12762+ }
12763+
12764+ return 0;
12765+}
12766+
12767+static int __devinit timbi2s_probe(struct platform_device *pdev)
12768+{
12769+ int err;
12770+ int irq;
12771+ struct timbi2s *i2s;
12772+ struct resource *iomem;
12773+ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
12774+ struct snd_card *card;
12775+ u32 ver;
12776+
12777+ if (!pdata) {
12778+ err = -ENODEV;
12779+ goto out;
12780+ }
12781+
12782+ if (pdata->num_busses > MAX_BUSSES) {
12783+ err = -EINVAL;
12784+ goto out;
12785+ }
12786+
12787+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12788+ if (!iomem) {
12789+ err = -ENODEV;
12790+ goto out;
12791+ }
12792+
12793+ irq = platform_get_irq(pdev, 0);
12794+ if (irq < 0) {
12795+ err = -ENODEV;
12796+ goto out;
12797+ }
12798+
12799+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
12800+ THIS_MODULE, sizeof(struct timbi2s) +
12801+ sizeof(struct timbi2s_bus) * pdata->num_busses, &card);
12802+ if (err)
12803+ goto out;
12804+
12805+ strcpy(card->driver, "Timberdale I2S");
12806+ strcpy(card->shortname, "Timberdale I2S");
12807+ sprintf(card->longname, "Timberdale I2S Driver");
12808+
12809+ snd_card_set_dev(card, &pdev->dev);
12810+
12811+ i2s = snd_pcm_chip(card);
12812+
12813+ if (!request_mem_region(iomem->start, resource_size(iomem),
12814+ DRIVER_NAME)) {
12815+ err = -EBUSY;
12816+ goto err_region;
12817+ }
12818+
12819+ i2s->membase = ioremap(iomem->start, resource_size(iomem));
12820+ if (!i2s->membase) {
12821+ err = -ENOMEM;
12822+ goto err_ioremap;
12823+ }
12824+
12825+ err = timbi2s_setup_busses(card, pdev);
12826+ if (err)
12827+ goto err_setup;
12828+
12829+ tasklet_init(&i2s->tasklet, timbi2s_tasklet, (unsigned long)card);
12830+ i2s->irq = irq;
12831+ i2s->main_clk = pdata->main_clk;
12832+
12833+ err = request_irq(irq, timbi2s_irq, 0, DRIVER_NAME, i2s);
12834+ if (err)
12835+ goto err_request_irq;
12836+
12837+ err = snd_card_register(card);
12838+ if (err)
12839+ goto err_register;
12840+
12841+ platform_set_drvdata(pdev, card);
12842+
12843+ ver = ioread32(i2s->membase + TIMBI2S_REG_VER);
12844+
12845+ printk(KERN_INFO
12846+ "Driver for Timberdale I2S (ver: %d.%d) successfully probed.\n",
12847+ ver >> 16 , ver & 0xffff);
12848+
12849+ return 0;
12850+
12851+err_register:
12852+ free_irq(irq, card);
12853+err_request_irq:
12854+err_setup:
12855+ iounmap(i2s->membase);
12856+err_ioremap:
12857+ release_mem_region(iomem->start, resource_size(iomem));
12858+err_region:
12859+ snd_card_free(card);
12860+out:
12861+ printk(KERN_ERR DRIVER_NAME": Failed to register: %d\n", err);
12862+
12863+ return err;
12864+}
12865+
12866+static int __devexit timbi2s_remove(struct platform_device *pdev)
12867+{
12868+ struct snd_card *card = platform_get_drvdata(pdev);
12869+ struct timbi2s *i2s = snd_pcm_chip(card);
12870+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12871+
12872+ tasklet_kill(&i2s->tasklet);
12873+ free_irq(i2s->irq, i2s);
12874+
12875+ iounmap(i2s->membase);
12876+ release_mem_region(iomem->start, resource_size(iomem));
12877+ snd_card_free(card);
12878+
12879+ platform_set_drvdata(pdev, 0);
12880+ return 0;
12881+}
12882+
12883+static struct platform_driver timbi2s_platform_driver = {
12884+ .driver = {
12885+ .name = DRIVER_NAME,
12886+ .owner = THIS_MODULE,
12887+ },
12888+ .probe = timbi2s_probe,
12889+ .remove = __devexit_p(timbi2s_remove),
12890+};
12891+
12892+/*--------------------------------------------------------------------------*/
12893+
12894+static int __init timbi2s_init(void)
12895+{
12896+ return platform_driver_register(&timbi2s_platform_driver);
12897+}
12898+
12899+static void __exit timbi2s_exit(void)
12900+{
12901+ platform_driver_unregister(&timbi2s_platform_driver);
12902+}
12903+
12904+module_init(timbi2s_init);
12905+module_exit(timbi2s_exit);
12906+
12907+MODULE_ALIAS("platform:"DRIVER_NAME);
12908+MODULE_DESCRIPTION("Timberdale I2S bus driver");
12909+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
12910+MODULE_LICENSE("GPL v2");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch
new file mode 100644
index 0000000000..3a7e27881e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-2-2-timberdale.patch
@@ -0,0 +1,44 @@
1From 9de5f61c79361bf6e9394d2f77a2b436d53deee5 Mon Sep 17 00:00:00 2001
2From: Yong Wang <yong.y.wang@intel.com>
3Date: Tue, 30 Jun 2009 14:17:19 +0800
4Subject: [PATCH] Revert "net: num_dma_maps is not used"
5
6This reverts commit eae3f29cc73f83cc3f1891d3ad40021b5172c630.
7
8The IVI driver is a user of num_dma_maps.
9
10Signed-off-by: Yong Wang <yong.y.wang@intel.com>
11---
12 include/linux/skbuff.h | 3 +++
13 net/core/skb_dma_map.c | 1 +
14 2 files changed, 4 insertions(+), 0 deletions(-)
15
16diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
17index b47b3f0..468bc21 100644
18--- a/include/linux/skbuff.h
19+++ b/include/linux/skbuff.h
20@@ -198,6 +198,9 @@ struct skb_shared_info {
21 unsigned short gso_type;
22 __be32 ip6_frag_id;
23 union skb_shared_tx tx_flags;
24+#ifdef CONFIG_HAS_DMA
25+ unsigned int num_dma_maps;
26+#endif
27 struct sk_buff *frag_list;
28 struct skb_shared_hwtstamps hwtstamps;
29 skb_frag_t frags[MAX_SKB_FRAGS];
30diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
31index 79687df..07d4ac5 100644
32--- a/net/core/skb_dma_map.c
33+++ b/net/core/skb_dma_map.c
34@@ -30,6 +30,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
35 goto unwind;
36 sp->dma_maps[i] = map;
37 }
38+ sp->num_dma_maps = i + 1;
39
40 return 0;
41
42--
431.6.0.6
44
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch
new file mode 100644
index 0000000000..786e1f2fcd
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-bluetooth-suspend.patch
@@ -0,0 +1,465 @@
1diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2index e70c57e..8e36fc8 100644
3--- a/drivers/bluetooth/btusb.c
4+++ b/drivers/bluetooth/btusb.c
5@@ -35,7 +36,7 @@
6 #include <net/bluetooth/bluetooth.h>
7 #include <net/bluetooth/hci_core.h>
8
9-#define VERSION "0.5"
10+#define VERSION "0.6"
11
12 static int ignore_dga;
13 static int ignore_csr;
14@@ -145,6 +146,7 @@ static struct usb_device_id blacklist_table[] = {
15 #define BTUSB_INTR_RUNNING 0
16 #define BTUSB_BULK_RUNNING 1
17 #define BTUSB_ISOC_RUNNING 2
18+#define BTUSB_SUSPENDING 3
19
20 struct btusb_data {
21 struct hci_dev *hdev;
22@@ -157,11 +159,15 @@ struct btusb_data {
23 unsigned long flags;
24
25 struct work_struct work;
26+ struct work_struct waker;
27
28 struct usb_anchor tx_anchor;
29 struct usb_anchor intr_anchor;
30 struct usb_anchor bulk_anchor;
31 struct usb_anchor isoc_anchor;
32+ struct usb_anchor deferred;
33+ int tx_in_flight;
34+ spinlock_t txlock;
35
36 struct usb_endpoint_descriptor *intr_ep;
37 struct usb_endpoint_descriptor *bulk_tx_ep;
38@@ -174,8 +180,26 @@ struct btusb_data {
39 unsigned int sco_num;
40 int isoc_altsetting;
41 int suspend_count;
42+ int did_iso_resume:1;
43 };
44
45+static int inc_tx(struct btusb_data *data)
46+{
47+ unsigned long flags;
48+ int rv;
49+
50+ spin_lock_irqsave(&data->txlock, flags);
51+ rv = test_bit(BTUSB_SUSPENDING, &data->flags);
52+ BT_DBG("BTUSB_SUSPENDING bit = %d for intf %p in %s",
53+ rv, data->intf, __func__);
54+ if (!rv)
55+ data->tx_in_flight++;
56+ spin_unlock_irqrestore(&data->txlock, flags);
57+
58+ return rv;
59+}
60+
61+
62 static void btusb_intr_complete(struct urb *urb)
63 {
64 struct hci_dev *hdev = urb->context;
65@@ -202,6 +226,7 @@ static void btusb_intr_complete(struct urb *urb)
66 if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
67 return;
68
69+ usb_mark_last_busy(data->udev);
70 usb_anchor_urb(urb, &data->intr_anchor);
71
72 err = usb_submit_urb(urb, GFP_ATOMIC);
73@@ -327,6 +352,7 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
74
75 urb->transfer_flags |= URB_FREE_BUFFER;
76
77+ usb_mark_last_busy(data->udev);
78 usb_anchor_urb(urb, &data->bulk_anchor);
79
80 err = usb_submit_urb(urb, mem_flags);
81@@ -465,6 +491,33 @@ static void btusb_tx_complete(struct urb *urb)
82 {
83 struct sk_buff *skb = urb->context;
84 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
85+ struct btusb_data *data = hdev->driver_data;
86+
87+ BT_DBG("%s urb %p status %d count %d", hdev->name,
88+ urb, urb->status, urb->actual_length);
89+
90+ if (!test_bit(HCI_RUNNING, &hdev->flags))
91+ goto done;
92+
93+ if (!urb->status)
94+ hdev->stat.byte_tx += urb->transfer_buffer_length;
95+ else
96+ hdev->stat.err_tx++;
97+
98+done:
99+ spin_lock(&data->txlock);
100+ data->tx_in_flight--;
101+ spin_unlock(&data->txlock);
102+
103+ kfree(urb->setup_packet);
104+
105+ kfree_skb(skb);
106+}
107+
108+static void btusb_isoc_tx_complete(struct urb *urb)
109+{
110+ struct sk_buff *skb = urb->context;
111+ struct hci_dev *hdev = (struct hci_dev *) skb->dev;
112
113 BT_DBG("%s urb %p status %d count %d", hdev->name,
114 urb, urb->status, urb->actual_length);
115@@ -490,11 +543,16 @@ static int btusb_open(struct hci_dev *hdev)
116
117 BT_DBG("%s", hdev->name);
118
119+ err = usb_autopm_get_interface(data->intf);
120+ if (err < 0)
121+ return err;
122+ data->intf->needs_remote_wakeup = 1;
123+
124 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
125- return 0;
126+ goto out;
127
128 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
129- return 0;
130+ goto out;
131
132 err = btusb_submit_intr_urb(hdev, GFP_KERNEL);
133 if (err < 0)
134@@ -502,6 +560,7 @@ static int btusb_open(struct hci_dev *hdev)
135
136 err = btusb_submit_bulk_urb(hdev, GFP_KERNEL);
137 if (err < 0) {
138+ BT_DBG("kill urbs %s", __func__);
139 usb_kill_anchored_urbs(&data->intr_anchor);
140 goto failed;
141 }
142@@ -509,17 +568,28 @@ static int btusb_open(struct hci_dev *hdev)
143 set_bit(BTUSB_BULK_RUNNING, &data->flags);
144 btusb_submit_bulk_urb(hdev, GFP_KERNEL);
145
146+out:
147+ usb_autopm_put_interface(data->intf);
148 return 0;
149
150 failed:
151 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
152 clear_bit(HCI_RUNNING, &hdev->flags);
153+ usb_autopm_put_interface(data->intf);
154 return err;
155 }
156
157+static void btusb_stop_traffic(struct btusb_data *data)
158+{
159+ usb_kill_anchored_urbs(&data->intr_anchor);
160+ usb_kill_anchored_urbs(&data->bulk_anchor);
161+ usb_kill_anchored_urbs(&data->isoc_anchor);
162+}
163+
164 static int btusb_close(struct hci_dev *hdev)
165 {
166 struct btusb_data *data = hdev->driver_data;
167+ int err;
168
169 BT_DBG("%s", hdev->name);
170
171@@ -529,13 +599,16 @@ static int btusb_close(struct hci_dev *hdev)
172 cancel_work_sync(&data->work);
173
174 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
175- usb_kill_anchored_urbs(&data->isoc_anchor);
176-
177 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
178- usb_kill_anchored_urbs(&data->bulk_anchor);
179-
180 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
181- usb_kill_anchored_urbs(&data->intr_anchor);
182+
183+ BT_DBG("kill urbs %s", __func__);
184+ btusb_stop_traffic(data);
185+ err = usb_autopm_get_interface(data->intf);
186+ if (!err) {
187+ data->intf->needs_remote_wakeup = 0;
188+ usb_autopm_put_interface(data->intf);
189+ }
190
191 return 0;
192 }
193@@ -546,6 +619,7 @@ static int btusb_flush(struct hci_dev *hdev)
194
195 BT_DBG("%s", hdev->name);
196
197+ BT_DBG("kill urbs %s", __func__);
198 usb_kill_anchored_urbs(&data->tx_anchor);
199
200 return 0;
201@@ -622,7 +696,7 @@ static int btusb_send_frame(struct sk_buff *skb)
202 urb->dev = data->udev;
203 urb->pipe = pipe;
204 urb->context = skb;
205- urb->complete = btusb_tx_complete;
206+ urb->complete = btusb_isoc_tx_complete;
207 urb->interval = data->isoc_tx_ep->bInterval;
208
209 urb->transfer_flags = URB_ISO_ASAP;
210@@ -633,12 +707,23 @@ static int btusb_send_frame(struct sk_buff *skb)
211 le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
212
213 hdev->stat.sco_tx++;
214- break;
215+ goto skip_waking;
216
217 default:
218 return -EILSEQ;
219 }
220
221+ err = inc_tx(data);
222+ if (err) {
223+
224+ usb_anchor_urb(urb, &data->deferred);
225+ schedule_work(&data->waker);
226+ err = 0;
227+ goto out;
228+ } else {
229+
230+ }
231+skip_waking:
232 usb_anchor_urb(urb, &data->tx_anchor);
233
234 err = usb_submit_urb(urb, GFP_ATOMIC);
235@@ -646,10 +731,13 @@ static int btusb_send_frame(struct sk_buff *skb)
236 BT_ERR("%s urb %p submission failed", hdev->name, urb);
237 kfree(urb->setup_packet);
238 usb_unanchor_urb(urb);
239+ } else {
240+ usb_mark_last_busy(data->udev);
241 }
242
243 usb_free_urb(urb);
244
245+out:
246 return err;
247 }
248
249@@ -721,10 +809,23 @@ static void btusb_work(struct work_struct *work)
250 {
251 struct btusb_data *data = container_of(work, struct btusb_data, work);
252 struct hci_dev *hdev = data->hdev;
253+ int err;
254
255 if (hdev->conn_hash.sco_num > 0) {
256+ if (!data->did_iso_resume) {
257+ err = usb_autopm_get_interface(data->isoc);
258+ if (!err) {
259+ data->did_iso_resume = 1;
260+ } else {
261+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
262+ BT_DBG("kill urbs %s", __func__);
263+ usb_kill_anchored_urbs(&data->isoc_anchor);
264+ return;
265+ }
266+ }
267 if (data->isoc_altsetting != 2) {
268 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
269+ BT_DBG("kill urbs %s", __func__);
270 usb_kill_anchored_urbs(&data->isoc_anchor);
271
272 if (__set_isoc_interface(hdev, 2) < 0)
273@@ -739,12 +840,28 @@ static void btusb_work(struct work_struct *work)
274 }
275 } else {
276 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
277+ BT_DBG("kill urbs %s", __func__);
278 usb_kill_anchored_urbs(&data->isoc_anchor);
279
280 __set_isoc_interface(hdev, 0);
281+ if (data->did_iso_resume) {
282+ data->did_iso_resume = 0;
283+ usb_autopm_put_interface(data->isoc);
284+ }
285 }
286 }
287
288+static void btusb_waker(struct work_struct *work)
289+{
290+ struct btusb_data *data = container_of(work, struct btusb_data, waker);
291+ int err;
292+
293+
294+ err = usb_autopm_get_interface(data->intf);
295+ if (!err)
296+ usb_autopm_put_interface(data->intf);
297+}
298+
299 static int btusb_probe(struct usb_interface *intf,
300 const struct usb_device_id *id)
301 {
302@@ -814,11 +931,14 @@ static int btusb_probe(struct usb_interface *intf,
303 spin_lock_init(&data->lock);
304
305 INIT_WORK(&data->work, btusb_work);
306+ INIT_WORK(&data->waker, btusb_waker);
307+ spin_lock_init(&data->txlock);
308
309 init_usb_anchor(&data->tx_anchor);
310 init_usb_anchor(&data->intr_anchor);
311 init_usb_anchor(&data->bulk_anchor);
312 init_usb_anchor(&data->isoc_anchor);
313+ init_usb_anchor(&data->deferred);
314
315 hdev = hci_alloc_dev();
316 if (!hdev) {
317@@ -949,39 +1069,78 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
318
319 BT_DBG("intf %p", intf);
320
321- if (data->suspend_count++)
322+ if (data->suspend_count++) {
323+ BT_DBG("data->suspend_count = %d for intf %p, returning from %s",
324+ data->suspend_count, intf, __func__);
325 return 0;
326+ }
327+ BT_DBG("data->suspend_count = %d for intf %p, continuing %s",
328+ data->suspend_count, intf, __func__);
329+
330+ spin_lock_irq(&data->txlock);
331+ if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) {
332+ BT_DBG("Setting BTUSB_SUSPENDING bit in %s for intf %p",
333+ __func__, intf);
334+ set_bit(BTUSB_SUSPENDING, &data->flags);
335+ spin_unlock_irq(&data->txlock);
336+ } else {
337+ spin_unlock_irq(&data->txlock);
338+ BT_DBG("%d URBs in flight", data->tx_in_flight);
339+ data->suspend_count--;
340+ return -EBUSY;
341+ }
342
343 cancel_work_sync(&data->work);
344
345+ BT_DBG("kill urbs %s", __func__);
346+ btusb_stop_traffic(data);
347 usb_kill_anchored_urbs(&data->tx_anchor);
348
349- usb_kill_anchored_urbs(&data->isoc_anchor);
350- usb_kill_anchored_urbs(&data->bulk_anchor);
351- usb_kill_anchored_urbs(&data->intr_anchor);
352-
353 return 0;
354 }
355
356+static void play_deferred(struct btusb_data *data)
357+{
358+ struct urb *urb;
359+ int err;
360+
361+ while ((urb = usb_get_from_anchor(&data->deferred))) {
362+ err = usb_submit_urb(urb, GFP_ATOMIC);
363+ if (err < 0)
364+ break;
365+ else
366+ data->tx_in_flight++;
367+
368+ }
369+ usb_scuttle_anchored_urbs(&data->deferred);
370+}
371+
372 static int btusb_resume(struct usb_interface *intf)
373 {
374 struct btusb_data *data = usb_get_intfdata(intf);
375 struct hci_dev *hdev = data->hdev;
376- int err;
377+ int err = 0;
378
379 BT_DBG("intf %p", intf);
380
381- if (--data->suspend_count)
382+ if (--data->suspend_count) {
383+ BT_DBG("data->suspend_count = %d for intf %p, returning from %s",
384+ data->suspend_count, intf, __func__);
385 return 0;
386+ }
387
388- if (!test_bit(HCI_RUNNING, &hdev->flags))
389- return 0;
390+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
391+ BT_DBG("HCI not running, returning from %s", __func__);
392+ goto no_io_needed;
393+ }
394
395 if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) {
396 err = btusb_submit_intr_urb(hdev, GFP_NOIO);
397 if (err < 0) {
398 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
399- return err;
400+ BT_DBG("Error (%d) submitting interrupt URB, returning from %s",
401+ err, __func__);
402+ goto err_out;
403 }
404 }
405
406@@ -989,9 +1148,12 @@ static int btusb_resume(struct usb_interface *intf)
407 err = btusb_submit_bulk_urb(hdev, GFP_NOIO);
408 if (err < 0) {
409 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
410- return err;
411- } else
412+ BT_DBG("Error (%d) submitting bulk URB, returning from %s",
413+ err, __func__);
414+ goto err_out;
415+ } else {
416 btusb_submit_bulk_urb(hdev, GFP_NOIO);
417+ }
418 }
419
420 if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
421@@ -1001,7 +1163,24 @@ static int btusb_resume(struct usb_interface *intf)
422 btusb_submit_isoc_urb(hdev, GFP_NOIO);
423 }
424
425+ spin_lock_irq(&data->txlock);
426+ play_deferred(data);
427+ BT_DBG("Clearing BTUSB_SUSPENDING bit in %s for intf %p", __func__, intf);
428+ clear_bit(BTUSB_SUSPENDING, &data->flags);
429+ spin_unlock_irq(&data->txlock);
430+ schedule_work(&data->work);
431+
432 return 0;
433+
434+err_out:
435+ usb_scuttle_anchored_urbs(&data->deferred);
436+no_io_needed:
437+ spin_lock_irq(&data->txlock);
438+ BT_DBG("Clearing BTUSB_SUSPENDING bit in %s for intf %p", __func__, intf);
439+ clear_bit(BTUSB_SUSPENDING, &data->flags);
440+ spin_unlock_irq(&data->txlock);
441+
442+ return err;
443 }
444
445 static struct usb_driver btusb_driver = {
446@@ -1011,6 +1190,7 @@ static struct usb_driver btusb_driver = {
447 .suspend = btusb_suspend,
448 .resume = btusb_resume,
449 .id_table = btusb_table,
450+ .supports_autosuspend = 1,
451 };
452
453 static int __init btusb_init(void)
454diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
455index e70c57e..ac94f91 100644
456--- a/drivers/bluetooth/btusb.c
457+++ b/drivers/bluetooth/btusb.c
458@@ -908,6 +967,7 @@ static int btusb_probe(struct usb_interface *intf,
459 }
460
461 usb_set_intfdata(intf, data);
462+ usb_device_autosuspend_enable(data->udev);
463
464 return 0;
465 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch
new file mode 100644
index 0000000000..fabe878413
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-fix.patch
@@ -0,0 +1,26 @@
1From 70ae749d15e012ab4a33aa2abe7a4d97a4dcebdb Mon Sep 17 00:00:00 2001
2From: Li Peng <peng.li@intel.com>
3Date: Thu, 20 Aug 2009 13:54:04 +0800
4Subject: Add G33 series in VGA hotplug support category
5
6Test on the IGD chip, which is a G33-like graphic device.
7---
8 drivers/gpu/drm/i915/i915_drv.h | 2 +-
9 1 files changed, 1 insertions(+), 1 deletions(-)
10
11diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
12index 7537f57..940ee4c 100644
13--- a/drivers/gpu/drm/i915/i915_drv.h
14+++ b/drivers/gpu/drm/i915/i915_drv.h
15@@ -892,7 +892,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
16 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
17 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
18 #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
19-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
20+#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
21 /* dsparb controlled by hw only */
22 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
23
24--
251.6.1.3
26
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch
new file mode 100644
index 0000000000..a9b5e03cec
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-opregion.patch
@@ -0,0 +1,43 @@
1diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
2index e4b4e88..2d51935 100644
3--- a/drivers/gpu/drm/i915/i915_opregion.c
4+++ b/drivers/gpu/drm/i915/i915_opregion.c
5@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
6 struct drm_i915_private *dev_priv = dev->dev_private;
7 struct opregion_asle *asle = dev_priv->opregion.asle;
8 u32 blc_pwm_ctl, blc_pwm_ctl2;
9+ u32 max_backlight, level, shift;
10
11 if (!(bclp & ASLE_BCLP_VALID))
12 return ASLE_BACKLIGHT_FAIL;
13@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
14 return ASLE_BACKLIGHT_FAIL;
15
16 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
17- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
18 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
19
20- if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
21+ if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
22 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
23- else
24- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
25-
26+ else {
27+ if (IS_IGD(dev)) {
28+ blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
29+ max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
30+ BACKLIGHT_MODULATION_FREQ_SHIFT;
31+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
32+ } else {
33+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
34+ max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
35+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
36+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
37+ }
38+ level = (bclp * max_backlight) / 255;
39+ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
40+ }
41 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
42
43 return 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch
new file mode 100644
index 0000000000..ef136c9877
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-i915-vblank-fix.patch
@@ -0,0 +1,26 @@
1diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
2index f85aaf2..2e5841e 100644
3--- a/drivers/gpu/drm/drm_irq.c
4+++ b/drivers/gpu/drm/drm_irq.c
5@@ -412,6 +412,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
6 dev->vblank_enabled[crtc] = 1;
7 drm_update_vblank_count(dev, crtc);
8 }
9+ } else if (atomic_read(&dev->vblank_refcount[crtc]) > 1) {
10+ atomic_dec(&dev->vblank_refcount[crtc]);
11+ ret = -EINVAL;
12 }
13 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
14
15diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
16index 748ed50..9cb07a5 100644
17--- a/drivers/gpu/drm/i915/intel_display.c
18+++ b/drivers/gpu/drm/i915/intel_display.c
19@@ -1549,6 +1549,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
20
21 /* Wait for vblank for the disable to take effect. */
22 intel_wait_for_vblank(dev);
23+ dev->vblank_enabled[pipe] = 0;
24
25 temp = I915_READ(dpll_reg);
26 if ((temp & DPLL_VCO_ENABLE) != 0) {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch
new file mode 100644
index 0000000000..1b85ecba09
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-kms-flip.patch
@@ -0,0 +1,307 @@
1From 4e8354884daa2ee3e491bae69a81f85a2d1ca8ba Mon Sep 17 00:00:00 2001
2From: Fei Jiang <fei.jiang@intel.com>
3Date: Mon, 3 Aug 2009 11:31:53 -0400
4Subject: [PATCH] change for general drm code to implement kms-flip feature
5
6
7Signed-off-by: Fei Jiang <fei.jiang@intel.com>
8---
9 drivers/gpu/drm/drm_crtc.c | 128 ++++++++++++++++++++++++++++++++++++++++++++
10 drivers/gpu/drm/drm_drv.c | 1 +
11 drivers/gpu/drm/drm_irq.c | 30 ++++++++++
12 include/drm/drm.h | 1 +
13 include/drm/drmP.h | 9 +++
14 include/drm/drm_crtc.h | 12 ++++
15 include/drm/drm_mode.h | 16 ++++++
16 7 files changed, 197 insertions(+), 0 deletions(-)
17
18diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
19index 8fab789..3ada446
20--- a/drivers/gpu/drm/drm_crtc.c
21+++ b/drivers/gpu/drm/drm_crtc.c
22@@ -2452,3 +2452,131 @@ out:
23 mutex_unlock(&dev->mode_config.mutex);
24 return ret;
25 }
26+
27+/**
28+ * drm_mode_page_flip_ioctl - page flip ioctl
29+ * @dev: DRM device
30+ * @data: ioctl args
31+ * @file_priv: file private data
32+ *
33+ * The page flip ioctl replaces the current front buffer with a new
34+ * one, using the CRTC's set_base function, which should just update
35+ * the front buffer base pointer. It's up to set_base to make
36+ * sure the update doesn't result in tearing (on some hardware the
37+ * base register is double buffered, so this is easy).
38+ *
39+ * Note that this covers just the simple case of flipping the front
40+ * buffer immediately. Interval handling and interlaced modes have to
41+ * be handled by userspace, or with new ioctls.
42+ */
43+int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
44+ struct drm_file *file_priv)
45+{
46+#if 0
47+ struct drm_pending_flip *pending;
48+#endif
49+
50+ struct drm_mode_page_flip *flip_data = data;
51+ struct drm_mode_object *drm_obj, *fb_obj;
52+ struct drm_crtc *crtc;
53+ int ret = 0;
54+
55+ if (!(drm_core_check_feature(dev, DRIVER_MODESET)))
56+ return -ENODEV;
57+
58+ /*
59+ * Reject unknown flags so future userspace knows what we (don't)
60+ * support
61+ */
62+ if (flip_data->flags & (~DRM_MODE_PAGE_FLIP_FLAGS_MASK)) {
63+ DRM_DEBUG("bad page flip flags\n");
64+ return -EINVAL;
65+ }
66+#if 0
67+ pending = kzalloc(sizeof *pending, GFP_KERNEL);
68+ if (pending == NULL)
69+ return -ENOMEM;
70+#endif
71+ mutex_lock(&dev->struct_mutex);
72+
73+ fb_obj = drm_mode_object_find(dev, flip_data->fb_id,
74+ DRM_MODE_OBJECT_FB);
75+ if (!fb_obj) {
76+ DRM_DEBUG("unknown fb %d\n", flip_data->fb_id);
77+ ret = -ENOENT;
78+ goto out_unlock;
79+ }
80+
81+ drm_obj = drm_mode_object_find(dev, flip_data->crtc_id,
82+ DRM_MODE_OBJECT_CRTC);
83+ if (!drm_obj) {
84+ DRM_DEBUG("unknown crtc %d\n", flip_data->crtc_id);
85+ ret = -ENOENT;
86+ goto out_unlock;
87+ }
88+ crtc = obj_to_crtc(drm_obj);
89+ if (!crtc->enabled) {
90+ DRM_DEBUG("crtc %d not enabled\n", flip_data->crtc_id);
91+ ret = -EINVAL;
92+ goto out_unlock;
93+ }
94+
95+#if 0
96+ if (crtc->fb->funcs->unpin == NULL) {
97+ DRM_DEBUG("fb for crtc %d does not support delayed unpin\n",
98+ flip_data->crtc_id);
99+ ret = -ENODEV;
100+ goto out_unlock;
101+ }
102+
103+ pending->crtc = crtc;
104+ pending->old_fb = crtc->fb;
105+ pending->pipe = crtc->pipe;
106+ pending->event.base.type = DRM_EVENT_MODE_PAGE_FLIP;
107+ pending->event.base.length = sizeof pending->event;
108+ pending->event.user_data = flip_data->user_data;
109+ pending->pending_event.event = &pending->event.base;
110+ pending->pending_event.file_priv = file_priv;
111+ pending->pending_event.destroy =
112+ (void (*) (struct drm_pending_event *)) kfree;
113+
114+ /* Get vblank ref for completion handling */
115+ ret = drm_vblank_get(dev, crtc->pipe);
116+ if (ret) {
117+ DRM_DEBUG("failed to take vblank ref\n");
118+ goto out_unlock;
119+ }
120+
121+ pending->frame = drm_vblank_count(dev, crtc->pipe);
122+ list_add_tail(&pending->link, &dev->flip_list);
123+#endif
124+
125+ /*
126+ * The set_base call will change the domain on the new fb,
127+ * which will force the rendering to finish and block the
128+ * ioctl. We need to do this last part from a work queue, to
129+ * avoid blocking userspace here.
130+ */
131+ crtc->fb = obj_to_fb(fb_obj);
132+retry_set:
133+ ret = (*crtc->funcs->set_base)(crtc, 0, 0, NULL);
134+ if (ret == -ERESTARTSYS)
135+ goto retry_set;
136+
137+ if (ret) {
138+ DRM_ERROR("set_base failed: %d\n", ret);
139+ goto out_unlock;
140+ }
141+
142+ mutex_unlock(&dev->struct_mutex);
143+
144+ return 0;
145+
146+out_unlock:
147+ mutex_unlock(&dev->struct_mutex);
148+#if 0
149+ kfree(pending);
150+#endif
151+ return ret;
152+}
153+
154diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
155index 1ce7977..761c2ec
156--- a/drivers/gpu/drm/drm_drv.c
157+++ b/drivers/gpu/drm/drm_drv.c
158@@ -145,6 +145,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
162+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
163 };
164
165 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
166diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
167index b4a3dbc..d5104df
168--- a/drivers/gpu/drm/drm_irq.c
169+++ b/drivers/gpu/drm/drm_irq.c
170@@ -71,6 +71,28 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
171 return 0;
172 }
173
174+#if 0
175+static void drm_flip_work_func(struct work_struct *work)
176+{
177+ struct drm_device *dev =
178+ container_of(work, struct drm_device, flip_work);
179+#if 0
180+ struct drm_pending_flip *f, *t;
181+#endif
182+ u32 frame;
183+
184+ mutex_lock(&dev->struct_mutex);
185+
186+ list_for_each_entry_safe(f, t, &dev->flip_list, link) {
187+ frame = drm_vblank_count(dev, f->pipe);
188+ if (vblank_after(frame, f->frame))
189+ drm_finish_pending_flip(dev, f, frame);
190+ }
191+
192+ mutex_unlock(&dev->struct_mutex);
193+}
194+#endif
195+
196 static void vblank_disable_fn(unsigned long arg)
197 {
198 struct drm_device *dev = (struct drm_device *)arg;
199@@ -161,6 +183,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
200 atomic_set(&dev->vblank_refcount[i], 0);
201 }
202
203+#if 0
204+ INIT_LIST_HEAD(&dev->flip_list);
205+ INIT_WORK(&dev->flip_work, drm_flip_work_func);
206+#endif
207+
208 dev->vblank_disable_allowed = 0;
209
210 return 0;
211@@ -626,5 +653,8 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
212 {
213 atomic_inc(&dev->_vblank_count[crtc]);
214 DRM_WAKEUP(&dev->vbl_queue[crtc]);
215+#if 0
216+ schedule_work(&dev->flip_work);
217+#endif
218 }
219 EXPORT_SYMBOL(drm_handle_vblank);
220diff --git a/include/drm/drm.h b/include/drm/drm.h
221index 7cb50bd..78bd91b
222--- a/include/drm/drm.h
223+++ b/include/drm/drm.h
224@@ -686,6 +686,7 @@ struct drm_gem_open {
225 #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
226 #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
227 #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
228+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOW( 0xB0, struct drm_mode_page_flip)
229
230 /**
231 * Device specific ioctls should only be in their respective headers
232diff --git a/include/drm/drmP.h b/include/drm/drmP.h
233index c5122bf..36f9e6a
234--- a/include/drm/drmP.h
235+++ b/include/drm/drmP.h
236@@ -976,6 +976,15 @@ struct drm_device {
237 cycles_t ctx_start;
238 cycles_t lck_start;
239
240+ struct work_struct flip_work;
241+
242+#if 0
243+ /**
244+ * List of objects waiting on flip completion
245+ */
246+ struct list_head flip_list;
247+#endif
248+
249 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
250 wait_queue_head_t buf_readers; /**< Processes waiting to read */
251 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
252diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
253index 7300fb8..742c870
254--- a/include/drm/drm_crtc.h
255+++ b/include/drm/drm_crtc.h
256@@ -331,6 +331,16 @@ struct drm_crtc_funcs {
257 void (*destroy)(struct drm_crtc *crtc);
258
259 int (*set_config)(struct drm_mode_set *set);
260+
261+ /*
262+ * Move the crtc on the current fb to the given position.
263+ * This function is optional. If old_fb is provided, the
264+ * function will wait for vblank and unpin it. If old_fb is
265+ * NULL, nothing is unpinned and the caller must call
266+ * mode_unpin_fb to release the old framebuffer.
267+ */
268+ int (*set_base)(struct drm_crtc *crtc, int x, int y,
269+ struct drm_framebuffer *old_fb);
270 };
271
272 /**
273@@ -736,4 +746,6 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
274 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
275 void *data, struct drm_file *file_priv);
276 extern bool drm_detect_hdmi_monitor(struct edid *edid);
277+extern int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
278+ struct drm_file *file_priv);
279 #endif /* __DRM_CRTC_H__ */
280diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
281index ae304cc..464b779
282--- a/include/drm/drm_mode.h
283+++ b/include/drm/drm_mode.h
284@@ -265,4 +265,20 @@ struct drm_mode_crtc_lut {
285 __u64 blue;
286 };
287
288+#define DRM_MODE_PAGE_FLIP_WAIT (1<<0) /* block on previous page flip */
289+#define DRM_MODE_PAGE_FLIP_FLAGS_MASK (DRM_MODE_PAGE_FLIP_WAIT)
290+
291+struct drm_mode_page_flip {
292+ /** Handle of new front buffer */
293+ __u32 fb_id;
294+ __u32 crtc_id;
295+
296+ /* 64 bit cookie returned to userspace in the page flip event. */
297+ __u64 user_data;
298+ /**
299+ * page flip flags (wait on flip only for now)
300+ */
301+ __u32 flags;
302+};
303+
304 #endif
305--
3061.5.3.4
307
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch
new file mode 100644
index 0000000000..3b9463f01d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-drm-mem-info.patch
@@ -0,0 +1,140 @@
1From 5deab387f5b9ec79a6bf7edc52b0653c2a6d44b5 Mon Sep 17 00:00:00 2001
2From: Alan Olsen <alan.r.olsen@intel.com>
3Date: Fri, 11 Sep 2009 15:57:46 -0700
4Subject: [PATCH] linux-2.6.31-drm-mem-info.patch
5
6Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
7---
8 drivers/gpu/drm/Makefile | 4 +++
9 drivers/gpu/drm/drm_info.c | 58 ++++++++++++++++++++++++++++++++++++++++++++
10 drivers/gpu/drm/drm_proc.c | 2 +
11 include/drm/drmP.h | 2 +
12 4 files changed, 66 insertions(+), 0 deletions(-)
13
14diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
15index fe23f29..d76f167 100644
16--- a/drivers/gpu/drm/Makefile
17+++ b/drivers/gpu/drm/Makefile
18@@ -4,6 +4,10 @@
19
20 ccflags-y := -Iinclude/drm
21
22+ifeq ($(CONFIG_DRM_PSB),y)
23+ ccflags-y += -Idrivers/gpu/drm/psb
24+endif
25+
26 drm-y := drm_auth.o drm_bufs.o drm_cache.o \
27 drm_context.o drm_dma.o drm_drawable.o \
28 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
29diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30index f0f6c6b..0ecc778 100644
31--- a/drivers/gpu/drm/drm_info.c
32+++ b/drivers/gpu/drm/drm_info.c
33@@ -36,6 +36,10 @@
34 #include <linux/seq_file.h>
35 #include "drmP.h"
36
37+#ifdef CONFIG_DRM_PSB
38+#include "psb/psb_drv.h"
39+#endif
40+
41 /**
42 * Called when "/proc/dri/.../name" is read.
43 *
44@@ -211,6 +215,33 @@ int drm_vblank_info(struct seq_file *m, void *data)
45 return 0;
46 }
47
48+int drm_gem_object_mem_info(int id, void *ptr, void *data)
49+{
50+ struct drm_gem_object *obj = ptr;
51+ struct seq_file *m = data;
52+
53+ seq_printf(m, "object 0x%p name %2d memory %8zd\n",
54+ obj, obj->name, obj->size);
55+
56+ return 0;
57+}
58+
59+int drm_gem_clients_info(struct seq_file *m, void *data)
60+{
61+ struct drm_info_node *node = (struct drm_info_node *) m->private;
62+ struct drm_device *dev = node->minor->dev;
63+ struct drm_file *priv;
64+
65+ mutex_lock(&dev->struct_mutex);
66+ list_for_each_entry(priv, &dev->filelist, lhead) {
67+ seq_printf(m, "pid %5d \n", priv->pid);
68+ idr_for_each(&priv->object_idr, &drm_gem_object_mem_info, m);
69+ seq_printf(m, "\n");
70+ }
71+ mutex_unlock(&dev->struct_mutex);
72+ return 0;
73+}
74+
75 /**
76 * Called when "/proc/dri/.../clients" is read.
77 *
78@@ -273,6 +304,33 @@ int drm_gem_object_info(struct seq_file *m, void* data)
79 return 0;
80 }
81
82+#ifdef CONFIG_DRM_PSB
83+int drm_ttm_mem_info(struct seq_file *m, void* data)
84+{
85+ struct drm_info_node *node = (struct drm_info_node *) m->private;
86+ struct drm_device *dev = node->minor->dev;
87+
88+ if (!strncmp("psb", dev->devname, 3)) {
89+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
90+ struct ttm_mem_global *glob = bdev->mem_glob;
91+
92+ spin_lock(&glob->lock);
93+ seq_printf(m, "used memory %llu \n", glob->used_memory);
94+ seq_printf(m, "used total memory %llu \n", glob->used_total_memory);
95+ spin_unlock(&glob->lock);
96+ } else {
97+ seq_printf(m, "This is not a PSB device, no ttm mem info available\n");
98+ }
99+ return 0;
100+}
101+#else
102+int drm_ttm_mem_info(struct seq_file *m, void* data)
103+{
104+ seq_printf(m, "ttm is not used\n");
105+ return 0;
106+}
107+#endif
108+
109 #if DRM_DEBUG_CODE
110
111 int drm_vma_info(struct seq_file *m, void *data)
112diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
113index bbd4b3d..26e64ec 100644
114--- a/drivers/gpu/drm/drm_proc.c
115+++ b/drivers/gpu/drm/drm_proc.c
116@@ -55,6 +55,8 @@ static struct drm_info_list drm_proc_list[] = {
117 {"bufs", drm_bufs_info, 0},
118 {"gem_names", drm_gem_name_info, DRIVER_GEM},
119 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
120+ {"gem_clients", drm_gem_clients_info, DRIVER_GEM},
121+ {"ttm_meminfo", drm_ttm_mem_info, 0},
122 #if DRM_DEBUG_CODE
123 {"vma", drm_vma_info, 0},
124 #endif
125diff --git a/include/drm/drmP.h b/include/drm/drmP.h
126index dbd40f1..5575b9a 100644
127--- a/include/drm/drmP.h
128+++ b/include/drm/drmP.h
129@@ -1355,6 +1355,8 @@ extern int drm_vblank_info(struct seq_file *m, void *data);
130 extern int drm_clients_info(struct seq_file *m, void* data);
131 extern int drm_gem_name_info(struct seq_file *m, void *data);
132 extern int drm_gem_object_info(struct seq_file *m, void* data);
133+extern int drm_gem_clients_info(struct seq_file *m, void *data);
134+extern int drm_ttm_mem_info(struct seq_file *m, void* data);
135
136 #if DRM_DEBUG_CODE
137 extern int drm_vma_info(struct seq_file *m, void *data);
138--
1391.6.0.6
140
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch
new file mode 100644
index 0000000000..fa6a3ea9f1
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-iegd.patch
@@ -0,0 +1,9290 @@
1diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Kconfig patch_script_temp/drivers/gpu/drm/Kconfig
2--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Kconfig 2009-08-27 11:30:10.000000000 -0700
3+++ patch_script_temp/drivers/gpu/drm/Kconfig 2009-10-06 10:30:05.000000000 -0700
4@@ -154,3 +154,10 @@
5 Choose this option if you have a Poulsbo or Moorestown platform.
6 If M is selected the module will be called psb.
7
8+
9+config IEGD
10+ tristate "Intel IEGD"
11+ depends on DRM
12+ help
13+ Choose this option for the Intel Embedded Graphics Driver (IEGD)
14+
15diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Makefile patch_script_temp/drivers/gpu/drm/Makefile
16--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/Makefile 2009-08-27 11:30:10.000000000 -0700
17+++ patch_script_temp/drivers/gpu/drm/Makefile 2009-10-06 10:30:05.000000000 -0700
18@@ -32,3 +32,4 @@
19 obj-$(CONFIG_DRM_SIS) += sis/
20 obj-$(CONFIG_DRM_SAVAGE)+= savage/
21 obj-$(CONFIG_DRM_VIA) +=via/
22+obj-$(CONFIG_IEGD) += iegd/
23diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/Makefile patch_script_temp/drivers/gpu/drm/iegd/Makefile
24--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/Makefile 1969-12-31 17:00:00.000000000 -0700
25+++ patch_script_temp/drivers/gpu/drm/iegd/Makefile 2009-10-06 10:30:05.000000000 -0700
26@@ -0,0 +1,9 @@
27+
28+
29+ccflags-y := -Idrivers/gpu/drm/iegd/include
30+ccflags-y += -Idrivers/char/agp -Iinclude/drm
31+
32+iegd_mod-objs := agp/pci.o agp/global.o agp/drv_alm.o agp/drv_nap.o agp/drv_plb.o agp/drv_cmn.o agp/drv_gn4.o drm/iegd_drv.o drm/iegd_interface.o drm/iegd_interface_265.o drm/iegd_interface_2611.o drm/iegd_interface_2615.o drm/iegd_interface_2624.o drm/psb_irq.o
33+
34+obj-$(CONFIG_IEGD) += iegd_mod.o
35+
36diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/agp_test.c patch_script_temp/drivers/gpu/drm/iegd/agp/agp_test.c
37--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/agp_test.c 1969-12-31 17:00:00.000000000 -0700
38+++ patch_script_temp/drivers/gpu/drm/iegd/agp/agp_test.c 2009-10-06 10:30:05.000000000 -0700
39@@ -0,0 +1,314 @@
40+/* -*- pse-c -*-
41+ *----------------------------------------------------------------------------
42+ * Filename: agp_test.c
43+ * $Revision: 1.5 $
44+ *----------------------------------------------------------------------------
45+ * Unit level test for IEGD AGP
46+ * Copyright © 2009 Intel Corporation.
47+ *
48+ * This program is free software; you can redistribute it and/or modify it
49+ * under the terms and conditions of the GNU General Public License,
50+ * version 2, as published by the Free Software Foundation.
51+ *
52+ * This program is distributed in the hope it will be useful, but WITHOUT
53+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
54+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
55+ * more details.
56+ *
57+ * You should have received a copy of the GNU General Public License along with
58+ * this program; if not, write to the Free Software Foundation, Inc.,
59+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
60+ *
61+ */
62+
63+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
64+ *
65+ * Redistribution and use in source and binary forms, with or without
66+ * modification, are permitted provided that the following conditions are met:
67+ * Redistributions of source code must retain the above copyright notice,
68+ * this list of conditions and the following disclaimer.
69+ *
70+ * Redistributions in binary form must reproduce the above copyright
71+ * notice, this list of conditions and the following disclaimer in the
72+ * documentation and/or other materials provided with the distribution.
73+ *
74+ * Neither the name Intel Corporation nor the names of its contributors
75+ * may be used to endorse or promote products derived from this software
76+ * without specific prior written permission.
77+ *
78+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
80+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
81+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
82+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
83+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
84+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
85+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
86+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
87+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
88+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89+ *
90+ */
91+#include <fcntl.h>
92+#include <unistd.h>
93+#include <sys/ioctl.h>
94+#include <stdlib.h>
95+#include <stdio.h>
96+#include <linux/agpgart.h>
97+#include <sys/mman.h>
98+
99+/*#define PAGE_SIZE 1024*/
100+
101+#define VERBOSE "-v"
102+
103+int verbose = 0;
104+int file_desc, temp, length;
105+unsigned char *mmap_gart;
106+
107+int init_agp(void)
108+{
109+ agp_info info;
110+ agp_setup setup;
111+
112+ if (verbose)
113+ {
114+ printf("Testing ioctl AGPIOC_ACQUIRE.\n");
115+ }
116+ if(ioctl(file_desc, AGPIOC_ACQUIRE) != 0)
117+ {
118+ printf("Error on AGPIOC_ACQUIRE.\n");
119+ printf("Reinstall IKM.\n");
120+ exit(-1);
121+ }
122+ if (verbose)
123+ {
124+ printf("Testing ioctl call for info init\n");
125+ }
126+ if(ioctl(file_desc, AGPIOC_INFO, &info) != 0)
127+ {
128+ printf("Error on AGPIOC_INFO\n");
129+ printf("Reinstall IKM.\n");
130+ exit(-1);
131+ }
132+ if (verbose)
133+ {
134+ printf("Testing init info\n version:%i.%i,\n id:0x%lx,\n mode:0x%lx,\n"
135+ " base:0x%lx,\n size:%i,\n total mem:%i,\n system mem:%i,\n"
136+ " used mem:%i\n", info.version.major, info.version.minor,
137+ info.bridge_id, info.agp_mode, info.aper_base, info.aper_size,
138+ info.pg_total, info.pg_system, info.pg_used);
139+
140+ printf("Testing mmap the device\n");
141+ }
142+ length = info.aper_size*0x100000;
143+ mmap_gart = mmap(NULL, info.aper_size*0x100000,
144+ PROT_READ | PROT_WRITE, MAP_SHARED, file_desc, 0);
145+ if(mmap_gart == (unsigned char *) 0xFFFFFFFF)
146+ {
147+ printf("Error on mmap\n");
148+ printf("Reinstall IKM.\n");
149+ close(file_desc);
150+ exit(-1);
151+ }
152+
153+ setup.agp_mode = info.agp_mode;
154+ if (verbose)
155+ {
156+ printf("Testing ioctl AGPIOC_SETUP\n");
157+ }
158+ if(ioctl(file_desc, AGPIOC_SETUP, &setup) != 0)
159+ {
160+ printf("Error on AGPIOC_SETUP\n");
161+ printf("Reinstall IKM.\n");
162+ exit(-1);
163+ }
164+}
165+
166+void gart_unbind(int key)
167+{
168+ agp_unbind unbind;
169+ unbind.key = key;
170+ unbind.priority = 0;
171+ if (verbose)
172+ {
173+ printf("Testing ioctl AGPIOC_UNBIND\n");
174+ }
175+ if(ioctl(file_desc, AGPIOC_UNBIND, &unbind) != 0)
176+ {
177+ printf("Error on AGPIOC_UNBIND\n");
178+ printf("Reinstall IKM.\n");
179+ exit(-1);
180+ }
181+}
182+
183+void gart_bind(int key)
184+{
185+ agp_bind bind;
186+ bind.key = key;
187+ agp_info info;
188+
189+ int page_size = 4000;
190+ int aper_size, gtt_entries, bind_error;
191+
192+ if(ioctl(file_desc, AGPIOC_INFO, &info) != 0)
193+ {
194+ printf("Error on AGPIOC_INFO\n");
195+ printf("Reinstall IKM.\n");
196+ exit(-1);
197+ }
198+ aper_size = info.aper_size;
199+ gtt_entries = aper_size*1000000/page_size;
200+ if (verbose)
201+ {
202+ printf("max memory: %i\n", gtt_entries);
203+ }
204+
205+ do
206+ {
207+ bind_error = 0;
208+ bind.pg_start = page_size;
209+ printf("Testing ioctl AGPIOC_BIND\n");
210+ if(ioctl(file_desc, AGPIOC_BIND, &bind) != 0)
211+ {
212+ if (page_size < gtt_entries)
213+ {
214+ page_size = page_size+4000;
215+ printf("Trying new address for bind %i.\n", page_size);
216+ bind_error = 1;
217+ }
218+ else
219+ {
220+ printf("Error on AGPIOC_BIND\n");
221+ printf("Reinstall IKM.");
222+ exit(-1);
223+ }
224+ }
225+ } while (bind_error);
226+ printf("Sucessful bind.\n");
227+}
228+
229+int gart_alloc(int count)
230+{
231+ agp_allocate allocate;
232+
233+ allocate.type = 0;
234+ allocate.pg_count = count;
235+ if (verbose)
236+ {
237+ printf("Testing ioctl AGPIOC_ALLOCATE\n");
238+ }
239+ if(ioctl(file_desc, AGPIOC_ALLOCATE, &allocate) != 0)
240+ {
241+ printf("Error on AGPIOC_ALLOCATE\n");
242+ printf("Reinstall IKM.");
243+ exit(-1);
244+ }
245+
246+ gart_bind(allocate.key);
247+
248+ return(allocate.key);
249+}
250+
251+void gart_free(int key)
252+{
253+
254+ gart_unbind(key);
255+ if (verbose)
256+ {
257+ printf("Testing ioctl AGPIOC_DEALLOCATE\n");
258+ }
259+ if(ioctl(file_desc, AGPIOC_DEALLOCATE, key) != 0)
260+ {
261+ printf("Error on AGPIOC_DEALLOCATE\n");
262+ printf("Reinstall IKM.\n");
263+ exit(-1);
264+ }
265+}
266+
267+int main(int argc, char *argv[])
268+{
269+ /* Check for verbose mode */
270+ int i, key, key1;
271+ agp_info info;
272+
273+ for (i = 1; i < argc; i++)
274+ {
275+ if(strcmp(argv[1], VERBOSE) == 0)
276+ {
277+ verbose = 1;
278+ printf("Verbose mode.\n");
279+ }
280+ }
281+
282+ /* Open the agpgart */
283+ file_desc=open("/dev/agpgart",O_RDWR);
284+
285+ if(file_desc<0){
286+ printf("Cannot open device file:%d\n",file_desc);
287+ printf("Check for root level permissions.");
288+ exit(-1);
289+ }
290+
291+ if (verbose)
292+ {
293+ printf("Open device file:%d\n",file_desc);
294+ /* This the ioctl that allocates physical memory */
295+ printf("Testing ioctl for memory allocation\n");
296+ }
297+
298+ init_agp();
299+
300+ key = gart_alloc(64);
301+ key1 = gart_alloc(0);
302+ if (verbose)
303+ {
304+ printf("Testing ioctl call for info\n");
305+ }
306+ if(ioctl(file_desc, AGPIOC_INFO, &info) != 0)
307+ {
308+ close(file_desc);
309+ printf("Error on AGPIOC_INFO\n");
310+ printf("Reinstall IKM.\n");
311+ exit(-1);
312+ }
313+
314+ if (verbose)
315+ {
316+ printf("Testing init info\n version:%i.%i,\n id:0x%lx,\n mode:0x%lx,\n"
317+ " base:0x%lx,\n size:%i,\n total mem:%i,\n system mem:%i,\n"
318+ " used mem:%i\n", info.version.major, info.version.minor,
319+ info.bridge_id, info.agp_mode, info.aper_base, info.aper_size,
320+ info.pg_total, info.pg_system, info.pg_used);
321+ }
322+
323+ gart_free(key);
324+ gart_free(key1);
325+
326+ if (munmap(mmap_gart, length) < 0)
327+ {
328+ close(file_desc);
329+ printf("Error on munmap\n");
330+ printf("Reinstall IKM.\n");
331+ exit(-1);
332+ }
333+
334+ if (verbose)
335+ {
336+ printf("Testing ioctl AGPIOC_RELEASE\n");
337+ }
338+ if(ioctl(file_desc, AGPIOC_RELEASE) != 0)
339+ {
340+ close(file_desc);
341+ printf("Error on AGPIOC_RELEASE\n");
342+ printf("Reinstall IKM.");
343+ exit(-1);
344+ }
345+
346+ printf("AGPGART successfully loaded\n");
347+
348+ close(file_desc);
349+
350+ return 0;
351+
352+
353+}
354diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_alm.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_alm.c
355--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_alm.c 1969-12-31 17:00:00.000000000 -0700
356+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_alm.c 2009-10-06 10:30:05.000000000 -0700
357@@ -0,0 +1,447 @@
358+/* -*- pse-c -*-
359+ *----------------------------------------------------------------------------
360+ * Filename: drv_alm.c
361+ * $Revision: 1.7 $
362+ *----------------------------------------------------------------------------
363+ * Gart and DRM driver for Intel Embedded Graphics Driver
364+ * Copyright © 2008, Intel Corporation.
365+ *
366+ * This program is free software; you can redistribute it and/or modify it
367+ * under the terms and conditions of the GNU General Public License,
368+ * version 2, as published by the Free Software Foundation.
369+ *
370+ * This program is distributed in the hope it will be useful, but WITHOUT
371+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
372+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
373+ * more details.
374+ *
375+ * You should have received a copy of the GNU General Public License along with
376+ * this program; if not, write to the Free Software Foundation, Inc.,
377+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
378+ *
379+ */
380+
381+#include "global.h"
382+#include "intelpci.h"
383+
384+static int iegd_alm_configure(void);
385+static int iegd_alm_fetch_size(void);
386+static void iegd_alm_cleanup(void);
387+static void iegd_alm_tlbflush(struct agp_memory *mem);
388+
389+static int iegd_alm_insert_entries(
390+ struct agp_memory *mem,
391+ off_t pg_start,
392+ int type);
393+
394+static int iegd_alm_remove_entries(
395+ struct agp_memory *mem,
396+ off_t pg_start,
397+ int type);
398+
399+struct aper_size_info_fixed intel_i830_sizes[] =
400+{
401+ {128, 32768, 5},
402+ /* The 64M mode still requires a 128k gatt */
403+ {64, 16384, 5},
404+ {256, 65536, 6},
405+ {512, 131072, 7},
406+};
407+
408+struct aper_size_info_fixed intel_i810_sizes[] =
409+{
410+ {64, 16384, 4},
411+ {32, 8192, 4},
412+};
413+
414+bridge_driver_t drv_alm = {
415+ .owner = THIS_MODULE,
416+ .size_type = FIXED_APER_SIZE,
417+ .aperture_sizes = 0,
418+ .num_aperture_sizes = 0,
419+ .needs_scratch_page = TRUE,
420+ .configure = iegd_alm_configure,
421+ .fetch_size = iegd_alm_fetch_size,
422+ .cleanup = iegd_alm_cleanup,
423+ .tlb_flush = iegd_alm_tlbflush,
424+ .mask_memory = iegd_cmn_mask_memory,
425+ .masks = iegd_cmn_masks,
426+ .agp_enable = iegd_cmn_agp_enable,
427+ .cache_flush = global_cache_flush,
428+ .create_gatt_table = NULL,
429+ .free_gatt_table = NULL,
430+ .insert_memory = iegd_alm_insert_entries,
431+ .remove_memory = iegd_alm_remove_entries,
432+ .alloc_by_type = iegd_cmn_alloc_by_type,
433+ .free_by_type = iegd_cmn_free_by_type,
434+ .agp_alloc_page = agp_generic_alloc_page,
435+ .agp_destroy_page = agp_generic_destroy_page,
436+};
437+
438+static int iegd_alm_configure(void)
439+{
440+ struct aper_size_info_fixed *current_size;
441+ u32 temp;
442+ u16 gmch_ctrl;
443+ int i;
444+ int entries_start = 0;
445+
446+ AGN_DEBUG("Enter");
447+
448+ current_size = A_SIZE_FIX(agp_bridge->current_size);
449+
450+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
451+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
452+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
453+ private_data.pdev->device == PCI_DEVICE_ID_815) {
454+
455+ pci_read_config_dword(private_data.pdev, I810_MMADDR, &temp);
456+ temp &= 0xfff80000;
457+
458+ private_data.registers = ioremap(temp, 128*4096);
459+ if(!private_data.registers) {
460+ AGN_ERROR("Unable to remap memory");
461+ return -ENOMEM;
462+ }
463+
464+ if((readl(private_data.registers+I810_DRAM_CTL)
465+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
466+ AGN_LOG("Detected 4MB dedicated video RAM.");
467+ private_data.num_dcache_entries = 1024;
468+ }
469+ } else if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
470+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
471+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
472+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
473+
474+ entries_start = private_data.gtt_entries;
475+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
476+ gmch_ctrl |= I830_GMCH_ENABLED;
477+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
478+ }
479+
480+ /* Get based address of the graphic aperture */
481+ pci_read_config_dword(private_data.pdev, I810_GMADDR, &temp);
482+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
483+
484+ /* Write the based address of the gtt table to the
485+ * page table control register */
486+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED,
487+ private_data.registers+I810_PGETBL_CTL);
488+ readl(private_data.registers+I810_PGETBL_CTL);
489+
490+ if (agp_bridge->driver->needs_scratch_page) {
491+ for (i = entries_start; i < current_size->num_entries; i++) {
492+ writel(agp_bridge->scratch_page,
493+ private_data.registers+I810_PTE_BASE+(i*4));
494+ /* PCI Posting. */
495+ readl(private_data.registers+I810_PTE_BASE+(i*4));
496+ }
497+ }
498+
499+ global_cache_flush();
500+
501+ AGN_DEBUG("Exit");
502+ return 0;
503+}
504+
505+
506+static int iegd_alm_fetch_size(void)
507+{
508+ u32 smram_miscc;
509+ u16 gmch_ctrl;
510+ struct aper_size_info_fixed *values;
511+
512+ AGN_DEBUG("Enter");
513+
514+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
515+
516+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
517+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
518+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
519+ private_data.pdev->device == PCI_DEVICE_ID_815) {
520+
521+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC,
522+ &smram_miscc);
523+
524+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
525+ printk(KERN_WARNING PFX "i810 is disabled\n");
526+ return 0;
527+ }
528+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) ==
529+ I810_GFX_MEM_WIN_32M) {
530+ agp_bridge->previous_size =
531+ agp_bridge->current_size = (void *) (values + 1);
532+ agp_bridge->aperture_size_idx = 1;
533+ return values[1].size;
534+ } else {
535+ agp_bridge->previous_size =
536+ agp_bridge->current_size = (void *) (values);
537+ agp_bridge->aperture_size_idx = 0;
538+ return values[0].size;
539+ }
540+ } else if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
541+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
542+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
543+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
544+
545+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
546+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
547+ /* 855GM/852GM/865G has 128MB aperture size */
548+ agp_bridge->previous_size =
549+ agp_bridge->current_size = (void *) values;
550+ agp_bridge->aperture_size_idx = 0;
551+ return values[0].size;
552+ }
553+
554+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
555+
556+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
557+ agp_bridge->previous_size =
558+ agp_bridge->current_size = (void *) values;
559+ agp_bridge->aperture_size_idx = 0;
560+ return values[0].size;
561+ } else {
562+ agp_bridge->previous_size =
563+ agp_bridge->current_size = (void *) (values + 1);
564+ agp_bridge->aperture_size_idx = 1;
565+ return values[1].size;
566+ }
567+ }
568+
569+ AGN_DEBUG("Exit");
570+
571+ return 0;
572+}
573+
574+static void iegd_alm_cleanup(void)
575+{
576+
577+ AGN_DEBUG("Enter");
578+
579+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
580+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
581+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
582+ private_data.pdev->device == PCI_DEVICE_ID_815) {
583+
584+ writel(0, private_data.registers+I810_PGETBL_CTL);
585+ readl(private_data.registers); /* PCI Posting. */
586+ }
587+
588+ /* Unmap the mapping of the mmio */
589+ iounmap((void *) private_data.registers);
590+
591+ AGN_DEBUG("Exit");
592+}
593+
594+static void iegd_alm_tlbflush(struct agp_memory *mem)
595+{
596+ AGN_DEBUG("Enter");
597+ return;
598+ AGN_DEBUG("Exit");
599+}
600+
601+int AGP_CREATE_GATT(iegd_alm_create_gatt_table)
602+{
603+ int num_entries = 0;
604+ int i830_gtt_page_order = 0;
605+ u32 gtt_bus_addr = 0;
606+ u32 mmio_bus_addr = 0;
607+ char *gtt_table = NULL;
608+ char *gtt_table_end = NULL;
609+ char *current_entry = NULL;
610+ int gtt_enabled = FALSE;
611+ struct page *gtt_table_page = NULL;
612+ struct aper_size_info_fixed *aper_size = NULL;
613+
614+ AGN_DEBUG("Enter");
615+
616+ agp_bridge->gatt_table_real = NULL;
617+ agp_bridge->gatt_table = NULL;
618+ aper_size = (struct aper_size_info_fixed *)agp_bridge->current_size;
619+
620+ /* Find and save the address of the MMIO registers */
621+ pci_read_config_dword(private_data.pdev, I810_MMADDR,
622+ &mmio_bus_addr);
623+ mmio_bus_addr &= 0xFFF80000;
624+
625+ private_data.registers = (volatile u8 *)
626+ ioremap(mmio_bus_addr, 128 * 4096);
627+
628+ if (!private_data.registers) {
629+ AGN_ERROR("ioremap failed to map");
630+ return (-ENOMEM);
631+ }
632+
633+ /* Get value on the control register */
634+ gtt_bus_addr = readl(private_data.registers+I810_PGETBL_CTL) &
635+ 0xFFFFF000;
636+ gtt_enabled = readl(private_data.registers+I810_PGETBL_CTL) &
637+ I810_PGETBL_ENABLED;
638+ global_cache_flush();
639+
640+ /* we have to call this as early as possible after the MMIO base address
641+ * is known */
642+ iegd_cmn_init_gtt_entries();
643+
644+ /* If GTT does not exist, which can happen if a PCI graphics card is the
645+ * boot-up display device, then we will have to allocate the GTT table
646+ * ourselves
647+ */
648+ if (!gtt_enabled) {
649+
650+ AGN_DEBUG("Gtt is disabled");
651+
652+ i830_gtt_page_order = aper_size->page_order;
653+ num_entries = aper_size->num_entries;
654+ gtt_table = (char *) __get_free_pages(
655+ GFP_KERNEL, i830_gtt_page_order);
656+ gtt_table_end = gtt_table +
657+ ((PAGE_SIZE * (1<<i830_gtt_page_order)) - 1);
658+
659+ /* Make sure allocation was successful */
660+ if (NULL == gtt_table) {
661+ AGN_ERROR("Fail to allocate kernel memory");
662+ return -ENOMEM;
663+ }
664+
665+ for (current_entry = gtt_table; current_entry < gtt_table_end;
666+ current_entry += PAGE_SIZE) {
667+ gtt_table_page = virt_to_page(current_entry);
668+ set_bit(PG_reserved, &gtt_table_page->flags);
669+ }
670+ agp_bridge->gatt_bus_addr = virt_to_phys(gtt_table);
671+ } else {
672+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
673+ }
674+
675+ AGN_DEBUG("Exit");
676+ return(0);
677+}
678+
679+
680+static int iegd_alm_insert_entries(
681+ struct agp_memory *mem,
682+ off_t pg_start,
683+ int type)
684+{
685+ int i, j, num_entries;
686+ void *temp;
687+
688+ AGN_DEBUG("Enter");
689+
690+ temp = agp_bridge->current_size;
691+ num_entries = A_SIZE_FIX(temp)->num_entries;
692+
693+ if ((pg_start + mem->page_count) > num_entries) {
694+ AGN_ERROR("Trying to write beyond aperture limit");
695+ AGN_DEBUG("pg_start=0x%.8lx, mem->page_count=%d,"
696+ "num_entries=%d", pg_start, mem->page_count,
697+ num_entries);
698+ return -EINVAL;
699+ }
700+
701+ if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
702+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
703+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
704+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
705+
706+ if (pg_start < private_data.gtt_entries) {
707+ AGN_ERROR("Trying to insert into local/stolen memory");
708+ AGN_DEBUG("pg_start == 0x%.8lx,private_data.gtt_entries =="
709+ "0x%.8x", pg_start,private_data.gtt_entries);
710+ return -EINVAL;
711+ }
712+ } else if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
713+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
714+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
715+ private_data.pdev->device == PCI_DEVICE_ID_815) {
716+
717+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
718+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
719+ AGN_ERROR("Device busy");
720+ return -EBUSY;
721+ }
722+ }
723+ if (type != 0 || mem->type != 0) {
724+ if ((type == AGP_DCACHE_MEMORY) &&
725+ (mem->type == AGP_DCACHE_MEMORY)) {
726+ /* special insert */
727+ global_cache_flush();
728+ for (i = pg_start; i < (pg_start + mem->page_count);
729+ i++) {
730+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
731+ private_data.registers+I810_PTE_BASE+(i*4));
732+ /* PCI Posting. */
733+ readl(private_data.registers +
734+ I810_PTE_BASE+(i*4));
735+ }
736+ global_cache_flush();
737+ agp_bridge->driver->tlb_flush(mem);
738+ AGN_DEBUG("AGP_DCACHE_MEMORY.. Exit");
739+ return 0;
740+ }
741+ }
742+ }
743+
744+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
745+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) {
746+ AGN_ERROR("Unsupported memory type");
747+ AGN_DEBUG("mem->type=%x", mem->type);
748+ return -EINVAL;
749+ }
750+
751+ global_cache_flush();
752+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
753+ writel(AGP_MASK_GTT(), private_data.registers+I810_PTE_BASE+(j*4));
754+ /* PCI Posting. */
755+ readl(private_data.registers+I810_PTE_BASE+(j*4));
756+ }
757+
758+ global_cache_flush();
759+ agp_bridge->driver->tlb_flush(mem);
760+
761+ AGN_DEBUG("Exit");
762+
763+ return 0;
764+}
765+
766+static int iegd_alm_remove_entries(
767+ struct agp_memory *mem,
768+ off_t pg_start,
769+ int type)
770+{
771+ int i;
772+
773+ AGN_DEBUG("Enter");
774+
775+ global_cache_flush();
776+
777+ if(private_data.pdev->device == PCI_DEVICE_ID_830M ||
778+ private_data.pdev->device == PCI_DEVICE_ID_845G ||
779+ private_data.pdev->device == PCI_DEVICE_ID_855 ||
780+ private_data.pdev->device == PCI_DEVICE_ID_865G) {
781+
782+ if (pg_start < private_data.gtt_entries) {
783+ AGN_ERROR("Trying to disable local/stolen memory");
784+ AGN_DEBUG("pg_start=0x%.8lx, private_data.gtt_entries=%d",
785+ pg_start, private_data.gtt_entries);
786+ return -EINVAL;
787+ }
788+ }
789+
790+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
791+ writel(agp_bridge->scratch_page,
792+ private_data.registers+I810_PTE_BASE+(i*4));
793+ /* PCI Posting. */
794+ readl(private_data.registers+I810_PTE_BASE+(i*4));
795+ }
796+
797+ global_cache_flush();
798+ agp_bridge->driver->tlb_flush(mem);
799+
800+ AGN_DEBUG("Exit");
801+
802+ return 0;
803+}
804+
805diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_cmn.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_cmn.c
806--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_cmn.c 1969-12-31 17:00:00.000000000 -0700
807+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_cmn.c 2009-10-06 10:30:05.000000000 -0700
808@@ -0,0 +1,682 @@
809+/* -*- pse-c -*-
810+ *----------------------------------------------------------------------------
811+ * Filename: drv_cmn.c
812+ * $Revision: 1.28 $
813+ *----------------------------------------------------------------------------
814+ * Gart and DRM driver for Intel Embedded Graphics Driver
815+ * Copyright © 2008, Intel Corporation.
816+ *
817+ * This program is free software; you can redistribute it and/or modify it
818+ * under the terms and conditions of the GNU General Public License,
819+ * version 2, as published by the Free Software Foundation.
820+ *
821+ * This program is distributed in the hope it will be useful, but WITHOUT
822+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
823+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
824+ * more details.
825+ *
826+ * You should have received a copy of the GNU General Public License along with
827+ * this program; if not, write to the Free Software Foundation, Inc.,
828+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
829+ *
830+ */
831+
832+#include <linux/pagemap.h>
833+#include "global.h"
834+#include "intelpci.h"
835+#include "interface_abs.h"
836+#include "igd_abs.h"
837+
838+static struct agp_memory *alloc_agpphysmem_i8xx(
839+ size_t pg_count, int type);
840+static AGP_MEM_TYPE i8xx_alloc_pages(size_t pg_count,
841+ unsigned int order);
842+
843+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
844+static void i8xx_destroy_pages_by_addr(void *addr,
845+ size_t pg_count, unsigned int order);
846+#define PAGES_OR_MEMORY(a) gart_to_virt(a->memory[0])
847+#define DESTROY_PAGES i8xx_destroy_pages_by_addr
848+#else
849+static void i8xx_destroy_pages(struct page **pages,
850+ size_t pg_count, unsigned int order);
851+#define PAGES_OR_MEMORY(a) a->pages
852+#define DESTROY_PAGES i8xx_destroy_pages
853+#endif
854+
855+dispatch_table_t driver_dispatch_list[] = {
856+ { PCI_DEVICE_ID_810, &drv_alm },
857+ { PCI_DEVICE_ID_810DC, &drv_alm },
858+ { PCI_DEVICE_ID_810E, &drv_alm },
859+ { PCI_DEVICE_ID_815, &drv_alm },
860+ { PCI_DEVICE_ID_830M, &drv_alm },
861+ { PCI_DEVICE_ID_845G, &drv_alm },
862+ { PCI_DEVICE_ID_855, &drv_alm },
863+ { PCI_DEVICE_ID_865G, &drv_alm },
864+ { PCI_DEVICE_ID_915GD, &drv_nap },
865+ { PCI_DEVICE_ID_915AL, &drv_nap },
866+ { PCI_DEVICE_ID_945G, &drv_nap },
867+ { PCI_DEVICE_ID_945GM, &drv_nap },
868+ { PCI_DEVICE_ID_945GME,&drv_nap },
869+ { PCI_DEVICE_ID_Q35, &drv_nap },
870+ { PCI_DEVICE_ID_Q35A2, &drv_nap },
871+ { PCI_DEVICE_ID_965G, &drv_gn4 },
872+ { PCI_DEVICE_ID_946GZ, &drv_gn4 },
873+ { PCI_DEVICE_ID_G965, &drv_gn4 },
874+ { PCI_DEVICE_ID_Q965, &drv_gn4 },
875+ { PCI_DEVICE_ID_GM965, &drv_gn4 },
876+ { PCI_DEVICE_ID_GME965,&drv_gn4 },
877+ { PCI_DEVICE_ID_GM45, &drv_gm45},
878+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
879+ { PCI_DEVICE_ID_PLB, &drv_plb },
880+#endif
881+ { PCI_DEVICE_ID_ELK, &drv_gm45},
882+ { PCI_DEVICE_ID_Q45, &drv_gm45},
883+ { PCI_DEVICE_ID_G45, &drv_gm45},
884+ { PCI_DEVICE_ID_G41, &drv_gm45},
885+ { 0, NULL },
886+};
887+
888+/* Structure contained bit mask for the page table entries */
889+struct gatt_mask iegd_cmn_masks[] =
890+{
891+ {.mask = I810_PTE_VALID, .type = 0},
892+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL),
893+ .type = AGP_DCACHE_MEMORY},
894+ {.mask = I810_PTE_VALID, .type = 0}
895+};
896+
897+
898+int iegd_cmn_configure(void)
899+{
900+ struct aper_size_info_fixed *current_size;
901+ u32 temp;
902+ u16 gmch_ctrl;
903+ int i;
904+
905+ AGN_DEBUG("Enter");
906+
907+ current_size = A_SIZE_FIX(agp_bridge->current_size);
908+
909+ pci_read_config_dword(private_data.pdev, I915_GMADDR, &temp);
910+
911+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
912+
913+ if(!((private_data.pdev->device == PCI_DEVICE_ID_Q35) ||
914+ (private_data.pdev->device == PCI_DEVICE_ID_Q35A2))) {
915+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
916+ gmch_ctrl |= I830_GMCH_ENABLED;
917+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
918+
919+ global_cache_flush();
920+ agp_bridge->driver->tlb_flush(0);
921+
922+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED,
923+ private_data.registers+I810_PGETBL_CTL);
924+ /* PCI Posting. */
925+ readl(private_data.registers+I810_PGETBL_CTL);
926+ }
927+
928+ AGN_DEBUG ("gtt_entries: %X", private_data.gtt_entries);
929+ if (agp_bridge->driver->needs_scratch_page) {
930+ for (i = private_data.gtt_entries;
931+ i < current_size->num_entries; i++) {
932+ writel(agp_bridge->scratch_page, private_data.gtt+i);
933+ readl(private_data.gtt+i); /* PCI Posting. */
934+ }
935+ }
936+ global_cache_flush();
937+
938+ AGN_DEBUG("Exit");
939+
940+ return 0;
941+}
942+
943+void iegd_cmn_init_gtt_entries(void)
944+{
945+ u16 gmch_ctrl;
946+ u32 iegd_scratch, iegd_scratch2;
947+ int gtt_entries;
948+ u8 rdct;
949+ int local = 0;
950+ static const int ddt[4] = { 0, 16, 32, 64 };
951+ int size;
952+ int gtt_enabled = FALSE;
953+
954+ AGN_DEBUG("Enter");
955+
956+ /* This code original read the GMCH_CTRL register of the host
957+ * bridge. This register is also mirrored on the VGA device at
958+ * the same address. In the PLB family, the host bridge no
959+ * longer contains the register. As a result, all platforms
960+ * will now use the mirrored register. This breaks
961+ * compatability with chipsets prior to 915G
962+ */
963+ pci_read_config_word(private_data.pdev, I830_GMCH_CTRL, &gmch_ctrl);
964+
965+ gtt_enabled = readl(private_data.registers + I810_PGETBL_CTL) &
966+ I810_PGETBL_ENABLED;
967+
968+ /* A note on stolen memory:
969+ * Intel chipsets set aside a small area at the top of system memory
970+ * for VGA framebuffers etc. When the Intel device is the VGA
971+ * device, this memory is used to contain the GTT itself, and a scratch
972+ * memory page. Therefore the actual available memory already populated
973+ * in the GTT is the stolen memory minus the 4k scratch page minus the
974+ * 128 page table.
975+ *
976+ * Additionally, the embedded firmware may further alter this amount.
977+ * It can either allocate additional memory to be placed in the GTT
978+ * or use some stolen memory for data. If the IEGD vBIOS has altered
979+ * the amount we can detect it by reading a well-defined scratch
980+ * register.
981+ *
982+ * When the Intel Graphics Device is not the VGA device, i.e.
983+ * the system boots with a PCI card, then this driver discards
984+ * the stolen memory.
985+ *
986+ * We obtain the size of the GTT, which is also stored (for some
987+ * reason) at the top of stolen memory. Then we add 4KB to that
988+ * for the video BIOS popup, which is also stored in there. */
989+
990+ size = agp_bridge->driver->fetch_size() + 4;
991+ AGN_DEBUG("Size from fetch size + 4 = %x", size);
992+
993+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
994+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
995+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
996+ case I830_GMCH_GMS_STOLEN_512:
997+ gtt_entries = KB(512) - KB(size);
998+ break;
999+ case I830_GMCH_GMS_STOLEN_1024:
1000+ gtt_entries = MB(1) - KB(size);
1001+ break;
1002+ case I830_GMCH_GMS_STOLEN_8192:
1003+ gtt_entries = MB(8) - KB(size);
1004+ break;
1005+ case I830_GMCH_GMS_LOCAL:
1006+ rdct = readb(private_data.registers+I830_RDRAM_CHANNEL_TYPE);
1007+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
1008+ MB(ddt[I830_RDRAM_DDT(rdct)]);
1009+ local = 1;
1010+ break;
1011+ default:
1012+ gtt_entries = 0;
1013+ break;
1014+ }
1015+ } else {
1016+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1017+ case I855_GMCH_GMS_STOLEN_1M:
1018+ gtt_entries = MB(1) - KB(size);
1019+ break;
1020+ case I855_GMCH_GMS_STOLEN_4M:
1021+ gtt_entries = MB(4) - KB(size);
1022+ break;
1023+ case I855_GMCH_GMS_STOLEN_8M:
1024+ gtt_entries = MB(8) - KB(size);
1025+ break;
1026+ case I855_GMCH_GMS_STOLEN_16M:
1027+ gtt_entries = MB(16) - KB(size);
1028+ break;
1029+ case I855_GMCH_GMS_STOLEN_32M:
1030+ gtt_entries = MB(32) - KB(size);
1031+ break;
1032+ case I915_GMCH_GMS_STOLEN_48M:
1033+ /* Check it's really I915G */
1034+ if (agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915GD ||
1035+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915AL ||
1036+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945G ||
1037+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GM ||
1038+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GME ||
1039+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_965G ||
1040+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_G965 ||
1041+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_Q965 ||
1042+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GM965 ||
1043+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GME965 ||
1044+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_946GZ )
1045+ gtt_entries = MB(48) - KB(size);
1046+ else
1047+ gtt_entries = 0;
1048+ break;
1049+ case I915_GMCH_GMS_STOLEN_64M:
1050+ /* Check it's really I915G */
1051+ if (agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915GD ||
1052+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_915AL ||
1053+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945G ||
1054+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GM ||
1055+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_945GME ||
1056+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_965G ||
1057+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_G965 ||
1058+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_Q965 ||
1059+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GM965 ||
1060+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_GME965 ||
1061+ agp_bridge->dev->device == PCI_DEVICE_ID_BRIDGE_946GZ )
1062+ gtt_entries = MB(64) - KB(size);
1063+ else
1064+ gtt_entries = 0;
1065+ default:
1066+ gtt_entries = 0;
1067+ break;
1068+ }
1069+ }
1070+
1071+ /* if GTT is not enabled, then initialize gtt entries to 0 */
1072+
1073+ if (!gtt_entries) {
1074+ AGN_DEBUG("GTT is disabled");
1075+ AGN_LOG("IGD not primary, throwing away stolen memory.");
1076+
1077+ /* Update the scratch registers to say that we have no stolen memory */
1078+ writel((0xE1DF << 16), private_data.registers + 0x71410);
1079+
1080+ iegd_scratch = readl(private_data.registers + 0x71410);
1081+ iegd_scratch |= 0x4;
1082+
1083+ writel(iegd_scratch, private_data.registers + 0x71410);
1084+
1085+ /* say that we have 0 stolen memory regardless of what was
1086+ * really in there */
1087+ writel(0, private_data.registers + 0x71418);
1088+
1089+ gtt_entries = 0;
1090+ }
1091+
1092+ iegd_scratch = readl(private_data.registers + 0x71410);
1093+
1094+ if(((iegd_scratch>>16) == 0xE1DF) && (iegd_scratch & 0x4)) {
1095+ AGN_LOG("IEGD Firmware Detected");
1096+ /* IEGD firmware found, and Mem Reservation Flag present */
1097+ iegd_scratch2 = readl(private_data.registers + 0x71418);
1098+ gtt_entries = (iegd_scratch2 & 0xFFFF) * 4096;
1099+ }
1100+
1101+ if (gtt_entries > 0)
1102+ AGN_LOG("Detected %dK %s memory.",
1103+ gtt_entries / KB(1), local ? "local" : "stolen");
1104+ else
1105+ AGN_LOG("No pre-allocated video memory detected.\n");
1106+
1107+ gtt_entries /= KB(4);
1108+ private_data.gtt_entries = gtt_entries;
1109+
1110+ AGN_DEBUG("Exit");
1111+}
1112+
1113+int AGP_FREE_GATT(iegd_cmn_free_gatt_table)
1114+{
1115+ AGN_DEBUG("Enter");
1116+ return 0;
1117+ AGN_DEBUG("Exit");
1118+}
1119+
1120+void AGP_ENABLE(iegd_cmn_agp_enable)
1121+{
1122+ AGN_DEBUG("Enter");
1123+ return;
1124+ AGN_DEBUG("Exit");
1125+}
1126+
1127+struct agp_memory *iegd_cmn_alloc_by_type(
1128+ size_t pg_count, int type)
1129+{
1130+ struct agp_memory *new;
1131+
1132+ AGN_DEBUG("Enter");
1133+
1134+ /* AGP_DCACHE_MEMORY use by certain chipset only, especially
1135+ * chipset from almador family. */
1136+ if(private_data.pdev->device == PCI_DEVICE_ID_810 ||
1137+ private_data.pdev->device == PCI_DEVICE_ID_810DC ||
1138+ private_data.pdev->device == PCI_DEVICE_ID_810E ||
1139+ private_data.pdev->device == PCI_DEVICE_ID_815) {
1140+ if (type == AGP_DCACHE_MEMORY) {
1141+ if (pg_count != private_data.num_dcache_entries) {
1142+ AGN_ERROR("Page count error");
1143+ AGN_DEBUG("pg_count=%d, num_dcache_entries=%d",
1144+ pg_count, private_data.num_dcache_entries);
1145+ return NULL;
1146+ }
1147+
1148+ new = agp_create_memory(1);
1149+ if (new == NULL) {
1150+ AGN_ERROR("Allocating memory failed");
1151+ return NULL;
1152+ }
1153+
1154+ new->type = AGP_DCACHE_MEMORY;
1155+ new->page_count = pg_count;
1156+ new->num_scratch_pages = 0;
1157+ vfree(new->AGP_MEMORY_MEMBER); //free pages or memory
1158+ AGN_DEBUG("AGP_DCACHE_MEMORY.. Exit");
1159+ return new;
1160+ }
1161+ }
1162+
1163+ if (type == AGP_PHYS_MEMORY) {
1164+ AGN_DEBUG("AGP_PHYS_MEMORY.. Exit");
1165+ return alloc_agpphysmem_i8xx(pg_count, type);
1166+ }
1167+
1168+ AGN_DEBUG("NULL.. Exit");
1169+ return NULL;
1170+}
1171+
1172+void iegd_cmn_free_by_type(struct agp_memory *curr)
1173+{
1174+ unsigned int order;
1175+
1176+ AGN_DEBUG("Enter");
1177+
1178+ switch (curr->page_count) {
1179+ case 1:
1180+ order = 0; /* pg_count = 1 => 2 ^ 0 */
1181+ break;
1182+ case 4:
1183+ order = 2; /* pg_count = 4 => 2 ^ 2 */
1184+ break;
1185+ case 8:
1186+ order = 3; /* pg_count = 8 => 2 ^ 3 */
1187+ break;
1188+ default:
1189+ /* This case should never happen */
1190+ return;
1191+ }
1192+
1193+ agp_free_key(curr->key);
1194+ if(curr->type == AGP_PHYS_MEMORY) {
1195+ DESTROY_PAGES(PAGES_OR_MEMORY(curr), curr->page_count,
1196+ order);
1197+ IGD_FREE_MEM(curr);
1198+ }
1199+ kfree(curr);
1200+
1201+ AGN_DEBUG("Exit");
1202+}
1203+
1204+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
1205+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
1206+{
1207+ struct agp_memory *new;
1208+ void *addr;
1209+ unsigned int order, i;
1210+
1211+ AGN_DEBUG("Enter");
1212+
1213+ /* To support RGBA hardware cursor which may require contiguous physical
1214+ * * memory to be allocated with either 1, 4 or 8 pages. 8 pages is
1215+ * * the worst case for 830 which requires 4 pages and 4 page alignment.
1216+ * */
1217+ switch (pg_count) {
1218+ case 1:
1219+ order = 0; /* pg_count = 1 => 2 ^ 0 */
1220+ break;
1221+ case 4:
1222+ order = 2; /* pg_count = 4 => 2 ^ 2 */
1223+ break;
1224+ case 8:
1225+ order = 3; /* pg_count = 8 => 2 ^ 3 */
1226+ break;
1227+ default:
1228+ return NULL;
1229+ }
1230+
1231+ addr = i8xx_alloc_pages(pg_count, order);
1232+ if (addr == NULL) {
1233+ AGN_ERROR("Allocating pages failed");
1234+ return NULL;
1235+ }
1236+
1237+ new = agp_create_memory(pg_count);
1238+ if (new == NULL) {
1239+ AGN_ERROR("Allocating memory failed");
1240+ return NULL;
1241+ }
1242+
1243+ new->memory[0] = virt_to_gart(addr);
1244+ for (i = 1; i < pg_count; i++) {
1245+ new->memory[i] = new->memory[i-1] + PAGE_SIZE;
1246+ }
1247+ new->page_count = pg_count;
1248+ new->num_scratch_pages = pg_count;
1249+ new->type = AGP_PHYS_MEMORY;
1250+ new->physical = new->memory[0];
1251+
1252+ AGN_DEBUG("Exit");
1253+ return new;
1254+}
1255+#else // kernel 31 or newer
1256+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
1257+{
1258+ struct agp_memory *new;
1259+ struct page *page;
1260+ unsigned int order, i;
1261+
1262+ AGN_DEBUG("Enter");
1263+
1264+ /* To support RGBA hardware cursor which may require contiguous physical
1265+ * memory to be allocated with either 1, 4 or 8 pages. 8 pages is
1266+ * the worst case for 830 which requires 4 pages and 4 page alignment.
1267+ */
1268+ switch (pg_count) {
1269+ case 1:
1270+ order = 0; /* pg_count = 1 => 2 ^ 0 */
1271+ break;
1272+ case 4:
1273+ order = 2; /* pg_count = 4 => 2 ^ 2 */
1274+ break;
1275+ case 8:
1276+ order = 3; /* pg_count = 8 => 2 ^ 3 */
1277+ break;
1278+ default:
1279+ return NULL;
1280+ }
1281+
1282+ page = i8xx_alloc_pages(pg_count, order);
1283+ if (page == NULL) {
1284+ AGN_ERROR("Allocating pages failed");
1285+ return NULL;
1286+ }
1287+
1288+ new = agp_create_memory(pg_count);
1289+ if (new == NULL) {
1290+ AGN_ERROR("Allocating memory failed");
1291+ return NULL;
1292+ }
1293+
1294+ new->pages[0] = page;
1295+ if (pg_count > 1) { // if page count is 4 or 8
1296+ for (i=0; i< pg_count-1; i++) {
1297+ new->pages[i+1] = new->pages[i] + 1;
1298+ }
1299+ }
1300+ new->page_count = pg_count;
1301+ new->num_scratch_pages = pg_count;
1302+ new->type = AGP_PHYS_MEMORY;
1303+ new->physical = page_to_phys(new->pages[0]);
1304+ return new;
1305+
1306+ AGN_DEBUG("Exit");
1307+}
1308+#endif
1309+
1310+static AGP_MEM_TYPE i8xx_alloc_pages(size_t pg_count, unsigned int order)
1311+{
1312+ struct page * page;
1313+
1314+ AGN_DEBUG("Enter");
1315+
1316+ page = alloc_pages(GFP_KERNEL, order);
1317+ if (page == NULL) {
1318+ AGN_ERROR("Allocating kernel page failed");
1319+ return NULL;
1320+ }
1321+
1322+ if (SET_PAGES_UC(page, pg_count) < 0) {
1323+ SET_PAGES_WB(page, pg_count);
1324+ GLOBAL_FLUSH_TLB();
1325+ __free_pages(page, pg_count);
1326+ AGN_ERROR("Change page attribute failed");
1327+ return NULL;
1328+ }
1329+ GLOBAL_FLUSH_TLB();
1330+ get_page(page);
1331+
1332+ /*
1333+ * Starting kernel 2.6.23 locking will causing lot of trouble. This is
1334+ * because of the changes in page fault handler in the kernel.
1335+ */
1336+ AGP_LOCK_PAGE(page);
1337+ atomic_inc(&agp_bridge->current_memory_agp);
1338+ return PAGE_ADDRESS(page); //returns page or addr depending on kernel
1339+
1340+ AGN_DEBUG("Exit");
1341+}
1342+
1343+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
1344+static void i8xx_destroy_pages_by_addr(void *addr,
1345+ size_t pg_count, unsigned int order)
1346+{
1347+ struct page *page;
1348+
1349+ AGN_DEBUG("Enter");
1350+
1351+ if (addr == NULL)
1352+ return;
1353+
1354+ page = virt_to_page(addr);
1355+ SET_PAGES_WB(page, pg_count);
1356+ GLOBAL_FLUSH_TLB();
1357+ put_page(page);
1358+ /*
1359+ * Starting kernel 2.6.23 locking will causing lot of trouble. This is
1360+ * because of the changes in page fault handler in the kernel.
1361+ */
1362+ AGP_UNLOCK_PAGE(page);
1363+
1364+ free_pages((unsigned long)addr, order);
1365+ atomic_dec(&agp_bridge->current_memory_agp);
1366+
1367+ AGN_DEBUG("Exit");
1368+}
1369+
1370+#else //kernel is 31 or newer
1371+static void i8xx_destroy_pages(struct page **pages,
1372+ size_t pg_count, unsigned int order)
1373+{
1374+ struct page *page;
1375+ int i;
1376+
1377+ AGN_DEBUG("Enter");
1378+
1379+ if (pages == NULL)
1380+ return;
1381+
1382+ GLOBAL_FLUSH_TLB();
1383+ //The following code is based on agp_generic_destroy_pages in generic.c
1384+ for (i = 0; i < pg_count; i++) {
1385+ page = pages[i];
1386+
1387+ put_page(page);
1388+ __free_page(page);
1389+ atomic_dec(&agp_bridge->current_memory_agp);
1390+ pages[i] = NULL;
1391+ }
1392+
1393+ AGN_DEBUG("Exit");
1394+}
1395+#endif
1396+
1397+unsigned long AGP_MASK_MEMORY(iegd_cmn_mask_memory)
1398+{
1399+ struct agp_bridge_data *brdg = AGP_BRIDGE_VAR;
1400+
1401+ // only converts if kernel is 2.6.31 or newer
1402+ unsigned long address = CONVERT_PAGE_TO_GART(addr);
1403+
1404+ /* Type checking must be done elsewhere */
1405+ return address | AGP_MASK_ADDR(brdg);
1406+}
1407+
1408+int iegd_cmn_insert_entries(struct agp_memory *mem,
1409+ off_t pg_start, int type)
1410+{
1411+ int i,j,num_entries;
1412+ void *temp;
1413+
1414+ AGN_DEBUG("Enter");
1415+
1416+ temp = agp_bridge->current_size;
1417+ num_entries = A_SIZE_FIX(temp)->num_entries;
1418+
1419+ if (pg_start < private_data.gtt_entries) {
1420+ AGN_ERROR("Trying to insert into local/stolen memory");
1421+ AGN_DEBUG("pg_start == 0x%.8lx,private_data.gtt_entries =="
1422+ "%d", pg_start,private_data.gtt_entries);
1423+ return -EINVAL;
1424+ }
1425+
1426+ /* If we try to write beyond gtt table, return error */
1427+ if ((pg_start + mem->page_count) > num_entries) {
1428+ AGN_ERROR("Trying to write beyond aperture limit");
1429+ AGN_DEBUG("pg_start=0x%.8lx, mem->page_count=%d,"
1430+ "num_entries=%d", pg_start, mem->page_count,
1431+ num_entries);
1432+ return -EINVAL;
1433+ }
1434+
1435+ /* The i830 can't check the GTT for entries since its read only,
1436+ * depend on the caller to make the correct offset decisions.
1437+ */
1438+
1439+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
1440+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) {
1441+ AGN_ERROR("Unsupported memory type");
1442+ AGN_DEBUG("mem->type=%x, type=%x", mem->type, type);
1443+ return -EINVAL;
1444+ }
1445+
1446+ global_cache_flush();
1447+ agp_bridge->driver->tlb_flush(mem);
1448+
1449+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1450+ writel(AGP_MASK_GTT(), private_data.gtt+j);
1451+ readl(private_data.gtt+j); /* PCI Posting. */
1452+ }
1453+
1454+ global_cache_flush();
1455+ agp_bridge->driver->tlb_flush(mem);
1456+
1457+ AGN_DEBUG("Exit");
1458+
1459+ return 0;
1460+}
1461+
1462+int iegd_cmn_remove_entries(struct agp_memory *mem,
1463+ off_t pg_start, int type)
1464+{
1465+ int i;
1466+
1467+ AGN_DEBUG("Enter");
1468+
1469+ global_cache_flush();
1470+ agp_bridge->driver->tlb_flush(mem);
1471+
1472+ if (pg_start < private_data.gtt_entries) {
1473+ AGN_ERROR("Trying to disable local/stolen memory");
1474+ AGN_DEBUG("pg_start=0x%.8lx, private_data.gtt_entries=%d",
1475+ pg_start, private_data.gtt_entries);
1476+ return -EINVAL;
1477+ }
1478+
1479+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1480+ writel(agp_bridge->scratch_page, private_data.gtt+i);
1481+ readl(private_data.gtt+i);
1482+ }
1483+
1484+ global_cache_flush();
1485+ agp_bridge->driver->tlb_flush(mem);
1486+
1487+ AGN_DEBUG("Exit");
1488+
1489+ return 0;
1490+}
1491diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_gn4.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_gn4.c
1492--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_gn4.c 1969-12-31 17:00:00.000000000 -0700
1493+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_gn4.c 2009-10-06 10:30:05.000000000 -0700
1494@@ -0,0 +1,455 @@
1495+/* -*- pse-c -*-
1496+ *----------------------------------------------------------------------------
1497+ * Filename: iegd_interface.c
1498+ * $Revision: 1.17 $
1499+ *----------------------------------------------------------------------------
1500+ * Gart and DRM driver for Intel Embedded Graphics Driver
1501+ * Copyright © 2007, Intel Corporation.
1502+ *
1503+ * This program is free software; you can redistribute it and/or modify it
1504+ * under the terms and conditions of the GNU General Public License,
1505+ * version 2, as published by the Free Software Foundation.
1506+ *
1507+ * This program is distributed in the hope it will be useful, but WITHOUT
1508+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1509+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1510+ * more details.
1511+ *
1512+ * You should have received a copy of the GNU General Public License along with
1513+ * this program; if not, write to the Free Software Foundation, Inc.,
1514+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1515+ *
1516+ */
1517+
1518+#include "global.h"
1519+#include "intelpci.h"
1520+
1521+static int iegd_gn4_fetch_size(void);
1522+static void iegd_gn4_cleanup(void);
1523+static void iegd_gn4_tlbflush(struct agp_memory *mem);
1524+static int AGP_CREATE_GATT(iegd_gn4_create_gatt_table);
1525+
1526+/* GM45 functions */
1527+static int iegd_igm45_fetch_size(void);
1528+static int iegd_igm45_configure(void);
1529+static int AGP_CREATE_GATT(iegd_igm45_create_gatt_table);
1530+
1531+struct aper_size_info_fixed iegd_i965_sizes[] =
1532+{
1533+ /* VBIOS always allocates enough space for 512MB aperture */
1534+ /* Size KB, # of entries, ? */
1535+ {128, 131072, 7},
1536+ {64, 131072, 7},
1537+ {256, 131072, 7},
1538+ {512, 131072, 7},
1539+};
1540+
1541+struct aper_size_info_fixed iegd_igm45_sizes[] =
1542+{
1543+ /* GM45 has 2MB GTT (EDS page 217) size */
1544+ /* Size_KB #_of_entries ? */
1545+ {256, 524288, 7},
1546+ {512, 524288, 7},
1547+};
1548+
1549+
1550+bridge_driver_t drv_gn4 = {
1551+ .owner = THIS_MODULE,
1552+ .size_type = FIXED_APER_SIZE,
1553+ .aperture_sizes = 0,
1554+ .num_aperture_sizes = 0,
1555+ .needs_scratch_page = TRUE,
1556+ .configure = iegd_cmn_configure,
1557+ .fetch_size = iegd_gn4_fetch_size,
1558+ .cleanup = iegd_gn4_cleanup,
1559+ .tlb_flush = iegd_gn4_tlbflush,
1560+ .mask_memory = iegd_cmn_mask_memory,
1561+ .masks = iegd_cmn_masks,
1562+ .agp_enable = iegd_cmn_agp_enable,
1563+ .cache_flush = global_cache_flush,
1564+ .create_gatt_table = iegd_gn4_create_gatt_table,
1565+ .free_gatt_table = iegd_cmn_free_gatt_table,
1566+ .insert_memory = iegd_cmn_insert_entries,
1567+ .remove_memory = iegd_cmn_remove_entries,
1568+ .alloc_by_type = iegd_cmn_alloc_by_type,
1569+ .free_by_type = iegd_cmn_free_by_type,
1570+ .agp_alloc_page = agp_generic_alloc_page,
1571+ .agp_destroy_page = agp_generic_destroy_page,
1572+};
1573+
1574+/* GM45 */
1575+bridge_driver_t drv_gm45 = {
1576+ .owner = THIS_MODULE,
1577+ .size_type = FIXED_APER_SIZE,
1578+ .aperture_sizes = (void *)iegd_igm45_sizes,
1579+ .num_aperture_sizes = 3,
1580+ .needs_scratch_page = TRUE,
1581+ .configure = iegd_igm45_configure,
1582+ .fetch_size = iegd_igm45_fetch_size,
1583+ .cleanup = iegd_gn4_cleanup,
1584+ .tlb_flush = iegd_gn4_tlbflush,
1585+ .mask_memory = iegd_cmn_mask_memory,
1586+ .masks = iegd_cmn_masks,
1587+ .agp_enable = iegd_cmn_agp_enable,
1588+ .cache_flush = global_cache_flush,
1589+ .create_gatt_table = iegd_igm45_create_gatt_table,
1590+ .free_gatt_table = iegd_cmn_free_gatt_table,
1591+ .insert_memory = iegd_cmn_insert_entries,
1592+ .remove_memory = iegd_cmn_remove_entries,
1593+ .alloc_by_type = iegd_cmn_alloc_by_type,
1594+ .free_by_type = iegd_cmn_free_by_type,
1595+ .agp_alloc_page = agp_generic_alloc_page,
1596+ .agp_destroy_page = agp_generic_destroy_page,
1597+};
1598+
1599+
1600+static int iegd_gn4_fetch_size(void)
1601+{
1602+ struct aper_size_info_fixed *values;
1603+ u32 offset = 0;
1604+ u8 temp;
1605+
1606+#define IG965_GMCH_MSAC 0x62
1607+#define IGM965_GMCH_MSAC 0x66
1608+
1609+ AGN_DEBUG("Enter");
1610+
1611+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
1612+
1613+ if(private_data.pdev->device == PCI_DEVICE_ID_GM965) {
1614+ pci_read_config_byte(private_data.pdev, IGM965_GMCH_MSAC, &temp);
1615+ } else {
1616+ pci_read_config_byte(private_data.pdev, IG965_GMCH_MSAC, &temp);
1617+ }
1618+
1619+ switch (temp & 6) {
1620+ case 0:
1621+ offset = 0; /* 128MB aperture */
1622+ break;
1623+ case 2:
1624+ offset = 2; /* 256MB aperture */
1625+ break;
1626+ case 6:
1627+ offset = 3; /* 512MB aperture */
1628+ break;
1629+ }
1630+
1631+ /* Set the actual size here */
1632+ agp_bridge->previous_size = agp_bridge->current_size =
1633+ (void *)(values + offset);
1634+
1635+ AGN_DEBUG("Exit");
1636+
1637+ /* Always return 512KB GTT when calculating available stolen memory */
1638+ return values[3].size;
1639+}
1640+
1641+static void iegd_gn4_cleanup(void)
1642+{
1643+ AGN_DEBUG("Enter");
1644+ iounmap((void *)private_data.registers);
1645+ AGN_DEBUG("Exit");
1646+}
1647+
1648+static void iegd_gn4_tlbflush(struct agp_memory *mem)
1649+{
1650+ AGN_DEBUG("Enter");
1651+ /* Gen4 must flush the GTT or simple 2D rendering will lock the engine. */
1652+ writel(0, private_data.registers+0x2170);
1653+ writel(0, private_data.registers+0x2174);
1654+ AGN_DEBUG("Exit");
1655+ return;
1656+}
1657+
1658+static int AGP_CREATE_GATT(iegd_gn4_create_gatt_table)
1659+{
1660+ const u32 i965_gtt_table_order = 7;
1661+
1662+ int i;
1663+ u16 j = 0;
1664+ int num_entries;
1665+ u32 gtt_bus_addr;
1666+ u32 mmio_bus_addr;
1667+ u32 gtt_enabled = FALSE;
1668+ u32 gtt_table_size = (1 << i965_gtt_table_order) * PAGE_SIZE - 1;
1669+ u32 gtt_pgctl_reg;
1670+ char *gtt_table, *gtt_table_end, *current_entry;
1671+ struct page *gtt_table_page;
1672+
1673+ AGN_DEBUG("Enter");
1674+
1675+ agp_bridge->gatt_table_real = NULL;
1676+
1677+ /* Find and save the address of the MMIO register */
1678+ pci_read_config_dword(private_data.pdev, I915_MMADDR, &mmio_bus_addr);
1679+
1680+ mmio_bus_addr &= 0xFFF80000;
1681+ private_data.registers =(volatile u8 *)
1682+ ioremap(mmio_bus_addr,1024 * 4096);
1683+ if (!private_data.registers) {
1684+ AGN_ERROR("ioremap failed to map");
1685+ return (-ENOMEM);
1686+ }
1687+ /* GTT is mapped 512KB after the registers */
1688+ private_data.gtt = (u32 __iomem *)((u32)private_data.registers +
1689+ 512*1024);
1690+
1691+ /* Extract the content of the control register */
1692+ gtt_pgctl_reg = readl(private_data.registers+I810_PGETBL_CTL);
1693+ gtt_bus_addr = gtt_pgctl_reg & 0xFFFFF000;
1694+ gtt_enabled = gtt_pgctl_reg & I810_PGETBL_ENABLED;
1695+
1696+ global_cache_flush();
1697+ agp_bridge->driver->tlb_flush(0);
1698+
1699+ /* we have to call this as early as possible after the MMIO base address is known */
1700+ iegd_cmn_init_gtt_entries();
1701+
1702+ if( !gtt_enabled ) {
1703+ num_entries = iegd_i965_sizes[0].num_entries;
1704+ gtt_table = (char *) __get_free_pages(GFP_KERNEL,
1705+ i965_gtt_table_order);
1706+ gtt_table_end = gtt_table + gtt_table_size;
1707+
1708+ /* Make sure allocation was successful */
1709+ if( NULL == gtt_table ) {
1710+ AGN_ERROR("Fail to allocate kernel pages");
1711+ return (-ENOMEM);
1712+ }
1713+
1714+ for( current_entry = gtt_table; current_entry < gtt_table_end;
1715+ current_entry += PAGE_SIZE ) {
1716+ gtt_table_page = virt_to_page( current_entry );
1717+ set_bit( PG_reserved, &gtt_table_page->flags );
1718+ }
1719+
1720+ agp_bridge->gatt_bus_addr = virt_to_phys( gtt_table );
1721+
1722+ for( i = 0; i < num_entries; i++ ) {
1723+ *(gtt_table + j) = (unsigned long) agp_bridge->scratch_page;
1724+ j += 4;
1725+ }
1726+ }
1727+ else {
1728+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
1729+ }
1730+
1731+ agp_bridge->gatt_table = NULL;
1732+
1733+ AGN_DEBUG("Exit");
1734+
1735+ return(0);
1736+}
1737+
1738+static int AGP_CREATE_GATT(iegd_igm45_create_gatt_table)
1739+{
1740+ u32 mmio_bus_addr;
1741+
1742+ u32 gtt_mem_size;
1743+ u32 base_stolen_mem;
1744+ u16 gmch_ctrl;
1745+
1746+ u32 iegd_scratch, iegd_scratch2;
1747+ int gtt_entries;
1748+ int size = 4; /* Scratch page 4KB */
1749+
1750+ AGN_DEBUG("Enter");
1751+
1752+ agp_bridge->gatt_table_real = NULL;
1753+
1754+ /* Find and save the address of the MMIO register */
1755+ pci_read_config_dword(private_data.pdev, I915_MMADDR, &mmio_bus_addr);
1756+
1757+ /* Bits 35-22 */
1758+ mmio_bus_addr &= 0xFFC00000;
1759+
1760+ /* Map 4MB: 512KB MMIO, 2MB GTT */
1761+ private_data.registers =(volatile u8 *) ioremap(mmio_bus_addr, MB(4));
1762+
1763+ if (!private_data.registers) {
1764+ AGN_ERROR("ioremap failed to map");
1765+ return (-ENOMEM);
1766+ }
1767+
1768+ /* GTT is mapped 2MB after the registers */
1769+ private_data.gtt = (u32 __iomem *)((u32)private_data.registers + MB(2));
1770+
1771+ global_cache_flush();
1772+ agp_bridge->driver->tlb_flush(0);
1773+
1774+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL, &gmch_ctrl);
1775+
1776+#define IGM45_GMCH_GMS_STOLEN_128M (0x8 << 4)
1777+#define IGM45_GMCH_GMS_STOLEN_256M (0x9 << 4)
1778+#define IGM45_BASE_STOLEN 0x5C
1779+ pci_read_config_dword(private_data.pdev,IGM45_BASE_STOLEN,&base_stolen_mem);
1780+ base_stolen_mem &= 0xFFF00000;
1781+
1782+ /* Bits [7:4] will tell the amount of stolen memory */
1783+ /* Stolen memory = Amount specfied - 1 scratch page */
1784+ switch (gmch_ctrl & 0xf0) {
1785+ case I855_GMCH_GMS_STOLEN_1M:
1786+ gtt_entries = MB(1) - KB(size);
1787+ break;
1788+ case I855_GMCH_GMS_STOLEN_4M:
1789+ gtt_entries = MB(4) - KB(size);
1790+ break;
1791+ case I855_GMCH_GMS_STOLEN_8M:
1792+ gtt_entries = MB(8) - KB(size);
1793+ break;
1794+ case I855_GMCH_GMS_STOLEN_16M:
1795+ gtt_entries = MB(16) - KB(size);
1796+ break;
1797+ case I855_GMCH_GMS_STOLEN_32M:
1798+ gtt_entries = MB(32) - KB(size);
1799+ break;
1800+ case I915_GMCH_GMS_STOLEN_48M:
1801+ gtt_entries = MB(48) - KB(size);
1802+ break;
1803+ case I915_GMCH_GMS_STOLEN_64M:
1804+ gtt_entries = MB(64) - KB(size);
1805+ break;
1806+ case IGM45_GMCH_GMS_STOLEN_128M:
1807+ gtt_entries = MB(128) - KB(size);
1808+ break;
1809+ case IGM45_GMCH_GMS_STOLEN_256M:
1810+ gtt_entries = MB(256) - KB(size);
1811+ break;
1812+ default:
1813+ gtt_entries = 0;
1814+ break;
1815+ }
1816+
1817+ iegd_scratch = readl(private_data.registers + 0x71410);
1818+
1819+ /* check for the pci card as primary */
1820+ if (iegd_scratch == 0) {
1821+ /* No stolen memory has been used */
1822+ /*
1823+ * In Gen4, GTT is 2MB below stolen memory, which is a fix location
1824+ * The GTT is empty.
1825+ * Populate the GTT with PTE point to the stolen memory.
1826+ * This will not waste the stolen memory which BIOS already allocated.
1827+ */
1828+ int num_entries;
1829+ int i;
1830+ u16 j = 0;
1831+
1832+ AGN_DEBUG("PCI as primary.\n");
1833+
1834+ num_entries = gtt_entries / KB (4);
1835+
1836+ for (i = 0; i < num_entries; i++) {
1837+ writel(((base_stolen_mem + i * KB(4)) | 1), private_data.gtt+j);
1838+ j+=1;
1839+ }
1840+
1841+ gtt_entries = num_entries * KB(4);
1842+
1843+ AGN_DEBUG("PCI as primary scratch_page = %08lx gtt_entries = %d",
1844+ agp_bridge->scratch_page, gtt_entries);
1845+ } else if (((iegd_scratch>>16) == 0xE1DF) && (iegd_scratch & 0x4)) {
1846+ AGN_LOG("IEGD Firmware Detected");
1847+ /* IEGD firmware found, and Mem Reservation Flag present */
1848+ iegd_scratch2 = readl(private_data.registers + 0x71418);
1849+ /* Stolen memory = # of pages * 4KB */
1850+ gtt_entries = (iegd_scratch2 & 0xFFFF) * 4096;
1851+ }
1852+
1853+ if (gtt_entries > 0) {
1854+ AGN_LOG("Detected %d KB = %d MB stolen memory.", gtt_entries / KB(1),
1855+ gtt_entries/MB(1));
1856+ } else {
1857+ AGN_LOG("No pre-allocated video memory detected.");
1858+ }
1859+
1860+ /* Divide by 4KB to get the # of GTT entries */
1861+ private_data.gtt_entries = gtt_entries/KB(4);
1862+
1863+
1864+ /* On GM45, GTTADR size is 2MB. EDS page 217 */
1865+ gtt_mem_size = MB(2);
1866+
1867+ AGN_DEBUG("gtt_mem_size = %uMB", gtt_mem_size/MB(1));
1868+
1869+ /* Minus base stolen memory to get the base of gtt. This address
1870+ * can also get from register 0xA8 of config space device 0 */
1871+ agp_bridge->gatt_bus_addr = base_stolen_mem - gtt_mem_size;
1872+ agp_bridge->gatt_table = NULL;
1873+ AGN_DEBUG("Exit");
1874+
1875+ return(0);
1876+}
1877+
1878+/* GM45: configure */
1879+static int iegd_igm45_configure(void)
1880+{
1881+ struct aper_size_info_fixed *current_size;
1882+ u32 temp;
1883+ int i;
1884+
1885+ AGN_DEBUG("Enter");
1886+
1887+ current_size = A_SIZE_FIX(agp_bridge->current_size);
1888+
1889+ pci_read_config_dword(private_data.pdev, I915_GMADDR, &temp);
1890+ AGN_DEBUG("1. Reg[0x%x] = 0x%x\n", I915_GMADDR, temp);
1891+
1892+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1893+ AGN_DEBUG("2. Reg[0x%x] = 0x%x\n", I915_GMADDR, temp);
1894+
1895+ if (agp_bridge->driver->needs_scratch_page) {
1896+ for (i = private_data.gtt_entries;
1897+ i < current_size->num_entries; i++) {
1898+ writel(agp_bridge->scratch_page, private_data.gtt+i);
1899+ readl(private_data.gtt+i); /* PCI Posting. */
1900+ }
1901+ }
1902+ global_cache_flush();
1903+
1904+ AGN_DEBUG("Exit");
1905+
1906+ return 0;
1907+}
1908+
1909+/* GM45: fetch_size() */
1910+static int iegd_igm45_fetch_size(void)
1911+{
1912+ struct aper_size_info_fixed *values;
1913+ u32 offset = 0;
1914+ u8 temp;
1915+
1916+#define IGM45_GMCH_MSAC 0x66
1917+#define Q45_GMCH_MSAC 0x62
1918+
1919+ AGN_DEBUG("Enter");
1920+
1921+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
1922+
1923+ if(private_data.pdev->device == PCI_DEVICE_ID_ELK ||
1924+ private_data.pdev->device == PCI_DEVICE_ID_Q45 ||
1925+ private_data.pdev->device == PCI_DEVICE_ID_G45 ||
1926+ private_data.pdev->device == PCI_DEVICE_ID_G41) {
1927+ pci_read_config_byte(private_data.pdev, Q45_GMCH_MSAC, &temp);
1928+ } else {
1929+ pci_read_config_byte(private_data.pdev, IGM45_GMCH_MSAC, &temp);
1930+ }
1931+
1932+ /* GM45 has only 2 aperture sizes (EDS 227) : 256MB/512MB */
1933+ switch (temp & 6) {
1934+ case 2:
1935+ offset = 0; /* 256MB aperture */
1936+ break;
1937+ case 6:
1938+ offset = 1; /* 512MB aperture */
1939+ break;
1940+ }
1941+
1942+ /* Set the actual size here */
1943+ agp_bridge->previous_size = agp_bridge->current_size =
1944+ (void *)(values + offset);
1945+
1946+ AGN_DEBUG("Exit");
1947+ /* For GM45 always return 2MB as GTT size */
1948+ return values[0].size;
1949+}
1950diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_nap.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_nap.c
1951--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_nap.c 1969-12-31 17:00:00.000000000 -0700
1952+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_nap.c 2009-10-06 10:30:05.000000000 -0700
1953@@ -0,0 +1,470 @@
1954+/* -*- pse-c -*-
1955+ *----------------------------------------------------------------------------
1956+ * Filename: drv_nap.c
1957+ * $Revision: 1.14 $
1958+ *----------------------------------------------------------------------------
1959+ * Gart and DRM driver for Intel Embedded Graphics Driver
1960+ * Copyright © 2008, Intel Corporation.
1961+ *
1962+ * This program is free software; you can redistribute it and/or modify it
1963+ * under the terms and conditions of the GNU General Public License,
1964+ * version 2, as published by the Free Software Foundation.
1965+ *
1966+ * This program is distributed in the hope it will be useful, but WITHOUT
1967+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1968+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1969+ * more details.
1970+ *
1971+ * You should have received a copy of the GNU General Public License along with
1972+ * this program; if not, write to the Free Software Foundation, Inc.,
1973+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1974+ *
1975+ */
1976+
1977+#include "global.h"
1978+#include "intelpci.h"
1979+
1980+static int iegd_nap_fetch_size(void);
1981+static void iegd_nap_tlbflush(struct agp_memory *mem);
1982+
1983+static void iegd_iq35_init_gtt_entries(void);
1984+static void iegd_nap_iq35_gatt(void);
1985+static int iegd_nap_9series(u32 order);
1986+static int AGP_CREATE_GATT(iegd_nap_create_gatt_table);
1987+static void iegd_nap_cleanup(void);
1988+
1989+
1990+struct aper_size_info_fixed iegd_i915_sizes[] =
1991+{
1992+ {128, 32768, 5},
1993+ /* The 64M mode still requires a 128k gatt */
1994+ {64, 16384, 5},
1995+ {256, 65536, 6},
1996+ {512, 131072, 7},
1997+};
1998+
1999+struct aper_size_info_fixed iegd_iq35_sizes[] =
2000+{
2001+ {128, 32768, 5},
2002+ {256, 65536, 6},
2003+ {512, 131072, 7},
2004+};
2005+
2006+bridge_driver_t drv_nap = {
2007+ .owner = THIS_MODULE,
2008+ .size_type = FIXED_APER_SIZE,
2009+ .aperture_sizes = 0,
2010+ .num_aperture_sizes = 0,
2011+ .needs_scratch_page = TRUE,
2012+ .configure = iegd_cmn_configure,
2013+ .fetch_size = iegd_nap_fetch_size,
2014+ .cleanup = iegd_nap_cleanup,
2015+ .tlb_flush = iegd_nap_tlbflush,
2016+ .mask_memory = iegd_cmn_mask_memory,
2017+ .masks = iegd_cmn_masks,
2018+ .agp_enable = iegd_cmn_agp_enable,
2019+ .cache_flush = global_cache_flush,
2020+ .create_gatt_table = iegd_nap_create_gatt_table,
2021+ .free_gatt_table = iegd_cmn_free_gatt_table,
2022+ .insert_memory = iegd_cmn_insert_entries,
2023+ .remove_memory = iegd_cmn_remove_entries,
2024+ .alloc_by_type = iegd_cmn_alloc_by_type,
2025+ .free_by_type = iegd_cmn_free_by_type,
2026+ .agp_alloc_page = agp_generic_alloc_page,
2027+ .agp_destroy_page = agp_generic_destroy_page,
2028+};
2029+
2030+static int iegd_nap_fetch_size()
2031+{
2032+ struct aper_size_info_fixed *values;
2033+ u32 offset = 0;
2034+ u32 temp2;
2035+ u8 temp;
2036+
2037+#define IQ35_GMCH_MSAC 0x62
2038+#define I915_256MB_ADDRESS_MASK (1<<27)
2039+
2040+ AGN_DEBUG("Enter");
2041+
2042+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
2043+
2044+ switch(private_data.pdev->device) {
2045+ case PCI_DEVICE_ID_Q35:
2046+ case PCI_DEVICE_ID_Q35A2:
2047+ pci_read_config_byte(private_data.pdev,
2048+ IQ35_GMCH_MSAC, &temp);
2049+ switch(temp & 0x3) {
2050+ case 1:
2051+ offset = 2; /* 512MB aperture size */
2052+ break;
2053+ case 2:
2054+ offset = 1; /* 256MB aperture size */
2055+ break;
2056+ case 3:
2057+ offset = 0; /* 128MB aperture size */
2058+ break;
2059+ }
2060+ break;
2061+ case PCI_DEVICE_ID_915GD:
2062+ case PCI_DEVICE_ID_915AL:
2063+ case PCI_DEVICE_ID_945G:
2064+ case PCI_DEVICE_ID_945GM:
2065+ case PCI_DEVICE_ID_945GME:
2066+ pci_read_config_dword(private_data.pdev,
2067+ I915_GMADDR, &temp2);
2068+ if (temp2 & I915_256MB_ADDRESS_MASK) {
2069+ offset = 0; /* 128MB aperture */
2070+ } else {
2071+ offset = 2; /* 256MB aperture */
2072+ }
2073+ break;
2074+ }
2075+
2076+ agp_bridge->previous_size = agp_bridge->current_size =
2077+ (void *)(values + offset);
2078+
2079+ AGN_DEBUG("Exit");
2080+
2081+ return values[offset].size;
2082+}
2083+
2084+static void iegd_nap_tlbflush(struct agp_memory *mem)
2085+{
2086+ AGN_DEBUG("Enter");
2087+ return;
2088+ AGN_DEBUG("Exit");
2089+}
2090+
2091+static void iegd_iq35_init_gtt_entries(void)
2092+{
2093+ u16 gmch_ctrl;
2094+ u32 iegd_scratch, iegd_scratch2;
2095+ int gtt_entries;
2096+ int local = 0;
2097+ int size = 4;
2098+
2099+#define I35_GMCH_GMS_STOLEN_128M (0x8 << 4)
2100+#define I35_GMCH_GMS_STOLEN_256M (0x9 << 4)
2101+#define I35_GMCH_GMS_MASK 0xf0
2102+
2103+ AGN_DEBUG("Enter");
2104+
2105+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
2106+
2107+ switch (gmch_ctrl & I35_GMCH_GMS_MASK) {
2108+ case I855_GMCH_GMS_STOLEN_1M:
2109+ gtt_entries = MB(1) - KB(size);
2110+ break;
2111+ case I855_GMCH_GMS_STOLEN_4M:
2112+ gtt_entries = MB(4) - KB(size);
2113+ break;
2114+ case I855_GMCH_GMS_STOLEN_8M:
2115+ gtt_entries = MB(8) - KB(size);
2116+ break;
2117+ case I855_GMCH_GMS_STOLEN_16M:
2118+ gtt_entries = MB(16) - KB(size);
2119+ break;
2120+ case I855_GMCH_GMS_STOLEN_32M:
2121+ gtt_entries = MB(32) - KB(size);
2122+ break;
2123+ case I915_GMCH_GMS_STOLEN_48M:
2124+ gtt_entries = MB(48) - KB(size);
2125+ break;
2126+ case I915_GMCH_GMS_STOLEN_64M:
2127+ gtt_entries = MB(64) - KB(size);
2128+ break;
2129+ case I35_GMCH_GMS_STOLEN_128M:
2130+ gtt_entries = MB(128) - KB(size);
2131+ break;
2132+ case I35_GMCH_GMS_STOLEN_256M:
2133+ gtt_entries = MB(256) - KB(size);
2134+ break;
2135+ default:
2136+ gtt_entries = 0;
2137+ break;
2138+ }
2139+
2140+ iegd_scratch = readl(private_data.registers + 0x71410);
2141+
2142+ /* FIXME: check for the pci card as primary */
2143+ if(iegd_scratch == 0) {
2144+ gtt_entries = 0;
2145+ } else if (((iegd_scratch>>16) == 0xE1DF) && (iegd_scratch & 0x4)) {
2146+ AGN_LOG("IEGD Firmware Detected");
2147+ /* IEGD firmware found, and Mem Reservation Flag present */
2148+ iegd_scratch2 = readl(private_data.registers + 0x71418);
2149+ gtt_entries = (iegd_scratch2 & 0xFFFF) * 4096;
2150+ }
2151+
2152+ if (gtt_entries > 0) {
2153+ AGN_LOG("Detected %dK %s memory.",
2154+ gtt_entries / KB(1), local ? "local" : "stolen");
2155+ } else {
2156+ AGN_LOG("No pre-allocated video memory detected.");
2157+ }
2158+
2159+ gtt_entries /= KB(4);
2160+
2161+ private_data.gtt_entries = gtt_entries;
2162+
2163+ AGN_DEBUG("Exit");
2164+}
2165+
2166+static void iegd_nap_iq35_gatt()
2167+{
2168+ u32 gtt_mem_size;
2169+ u32 base_stolen_mem;
2170+ u16 gmch_ctrl;
2171+
2172+ AGN_DEBUG("Enter");
2173+
2174+ iegd_iq35_init_gtt_entries();
2175+
2176+ pci_read_config_dword(private_data.pdev,
2177+ IQ35_BASE_STOLEN, &base_stolen_mem);
2178+ base_stolen_mem &= 0xFFF00000;
2179+
2180+ pci_read_config_word(private_data.pdev,
2181+ I830_GMCH_CTRL, &gmch_ctrl);
2182+
2183+ switch(gmch_ctrl & IQ35_GTT_MEM_SIZE) {
2184+ case IQ35_GGMS_1MB:
2185+ gtt_mem_size = MB(1); /* Normal mode */
2186+ break;
2187+ case IQ35_GGMS_2MB:
2188+ gtt_mem_size = MB(2); /* VT mode */
2189+ break;
2190+ default:
2191+ gtt_mem_size = 0;
2192+ }
2193+
2194+ AGN_DEBUG("gtt_mem_size = %uMB", gtt_mem_size);
2195+
2196+ /* Minus based stolen memory to get the base of gtt. This address
2197+ * can also get from register 0xA8 of config space device 0 */
2198+ agp_bridge->gatt_bus_addr = base_stolen_mem - gtt_mem_size;
2199+
2200+ AGN_DEBUG("Exit");
2201+}
2202+
2203+static int iegd_nap_9series(u32 order)
2204+{
2205+ u32 gtt_pgctl_reg;
2206+ u32 gtt_bus_addr;
2207+ u32 gtt_enabled = FALSE;
2208+ u32 iegd_scratch;
2209+
2210+ gtt_pgctl_reg = readl(private_data.registers +
2211+ I810_PGETBL_CTL);
2212+ global_cache_flush();
2213+ gtt_bus_addr = gtt_pgctl_reg & 0xFFFFF000;
2214+ gtt_enabled = gtt_pgctl_reg & I810_PGETBL_ENABLED;
2215+
2216+
2217+ /* we have to call this as early as possible after the MMIO base
2218+ * address is known */
2219+ iegd_cmn_init_gtt_entries();
2220+
2221+ /*
2222+ * If GTT not enabled created our own gtt table from kernel memory
2223+ * and initialize it to scratch page. This in case the VBIOS is
2224+ * not our VBIOS
2225+ */
2226+ iegd_scratch = readl(private_data.registers + 0x71410);
2227+
2228+ if (iegd_scratch == 0) {
2229+ /* PCI as primary device. IEGD VBIOS is not loaded.
2230+ * Need to setup the GTT in stolen memory
2231+ * GTT will located at the bottom of stolen memory.
2232+ * The rest of the memory will use as video memory and map the PTE except
2233+ * the last page, which use as sratch page.
2234+ */
2235+ u32 gtt_end;
2236+ u32 gtt_addr_reg;
2237+ u32 base_stolen_mem;
2238+ u16 gmch_ctrl;
2239+ int aperture_size = 0;
2240+ int total_stolen_pages = 0;
2241+ int total_gtt_entries = 0;
2242+ int num_entries;
2243+ int i;
2244+ u16 j = 0;
2245+ u32 temp2;
2246+ u8 temp;
2247+
2248+ /* read the stolen memory address.
2249+ * use 512 bytes as GTT table, and use the rest table for memory. */
2250+ pci_read_config_dword(private_data.pdev,
2251+ IQ35_BASE_STOLEN, &base_stolen_mem);
2252+ base_stolen_mem &= 0xFFF00000;
2253+
2254+ /* have to determine the stolen memory size.
2255+ * We can't use the private_data.gtt_entries value because the value assume VBIOS is present. */
2256+ pci_read_config_word(private_data.pdev, I830_GMCH_CTRL, &gmch_ctrl);
2257+ gmch_ctrl = (gmch_ctrl >> 4) & 0xf;
2258+ /* Translate the stolen memory size to num of pages available. */
2259+ if (gmch_ctrl == 1) {
2260+ total_stolen_pages = 1024 / 4 ;
2261+ } else if (gmch_ctrl > 1) {
2262+ total_stolen_pages = (2 << (gmch_ctrl - 1)) * (1024 / 4);
2263+ }
2264+
2265+ /* We need to allocate the last page as scratch page. */
2266+ total_stolen_pages = total_stolen_pages - 1;
2267+
2268+ /* Need to program the PGETBL_CTL to enable page table. */
2269+ writel(base_stolen_mem | 1, private_data.registers + I810_PGETBL_CTL);
2270+
2271+#define I810_GTT_ADDR 0x1c
2272+ /* Find and save the address of the MMIO register */
2273+ pci_read_config_dword(private_data.pdev, I810_GTT_ADDR, &gtt_addr_reg);
2274+ private_data.gtt = (volatile u32 *) ioremap(gtt_addr_reg, KB(512));
2275+
2276+ if (!private_data.gtt) {
2277+ AGN_ERROR("ioremap failed to map");
2278+ return (-ENOMEM);
2279+ }
2280+
2281+
2282+ switch(private_data.pdev->device) {
2283+ case PCI_DEVICE_ID_Q35:
2284+ case PCI_DEVICE_ID_Q35A2:
2285+ pci_read_config_byte(private_data.pdev,
2286+ IQ35_GMCH_MSAC, &temp);
2287+ switch(temp & 0x3) {
2288+ case 1:
2289+ aperture_size = 512; /* 512MB aperture size */
2290+ break;
2291+ case 2:
2292+ aperture_size = 256; /* 256MB aperture size */
2293+ break;
2294+ case 3:
2295+ aperture_size = 128; /* 128MB aperture size */
2296+ break;
2297+ }
2298+ break;
2299+ case PCI_DEVICE_ID_915GD:
2300+ case PCI_DEVICE_ID_915AL:
2301+ case PCI_DEVICE_ID_945G:
2302+ case PCI_DEVICE_ID_945GM:
2303+ case PCI_DEVICE_ID_945GME:
2304+ pci_read_config_dword(private_data.pdev,
2305+ I915_GMADDR, &temp2);
2306+ if (temp2 & I915_256MB_ADDRESS_MASK) {
2307+ aperture_size = 128; /* 128MB aperture */
2308+ } else {
2309+ aperture_size = 256; /* 256MB aperture */
2310+ }
2311+ break;
2312+ default: AGN_ERROR("Illegal Device ID");
2313+ break;
2314+ }
2315+ /* Number of GTT entries available based on the aperture size. */
2316+ total_gtt_entries = aperture_size * 1024 / 4;
2317+ /* gtt_end is the last entry of the GTT, and start of video memory. */
2318+ gtt_end = base_stolen_mem + KB(aperture_size);
2319+
2320+ /* This num_entries mean total of PTE can be populate based on the
2321+ * remaining stolen memory size.*/
2322+ num_entries = ((total_stolen_pages * 4) - aperture_size) / 4;
2323+
2324+ /* Have to program the PTE through the GTT ADDRESS.*/
2325+ for (i=0; i < num_entries; i++) {
2326+ writel(((gtt_end + i * KB(4)) | 1), (private_data.gtt + j));
2327+ j+=1;
2328+ }
2329+
2330+ /* I believe this will be the reserved memory refer by GMM.
2331+ * So, have to update the actual PTE has been used.*/
2332+ private_data.gtt_entries = num_entries - 1;
2333+
2334+ /* This num_entries is the remaining GTT table not fill up. Have to
2335+ * populate with scratch page. */
2336+ num_entries = total_gtt_entries - num_entries;
2337+
2338+ for (i=0; i < num_entries; i++)
2339+ {
2340+ writel(agp_bridge->scratch_page, private_data.gtt + j);
2341+ j+=1;
2342+ }
2343+
2344+ agp_bridge->gatt_bus_addr = base_stolen_mem;
2345+ } else {
2346+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
2347+ }
2348+
2349+ agp_bridge->gatt_table = NULL;
2350+
2351+ AGN_DEBUG("Exit");
2352+
2353+ return 0;
2354+}
2355+
2356+
2357+static int AGP_CREATE_GATT(iegd_nap_create_gatt_table)
2358+{
2359+ const u32 i915_gtt_table_order = 6;
2360+ u32 mmio_bus_addr, temp2;
2361+ int ret;
2362+
2363+ AGN_DEBUG("Enter");
2364+
2365+ agp_bridge->gatt_table_real = NULL;
2366+
2367+ /* Find and save the address of the MMIO register */
2368+ pci_read_config_dword(private_data.pdev, I915_MMADDR,
2369+ &mmio_bus_addr);
2370+ mmio_bus_addr &= 0xFFF80000;
2371+
2372+ private_data.registers = (volatile u8 *) ioremap(mmio_bus_addr,
2373+ 128 * 4096);
2374+ if (!private_data.registers) {
2375+ AGN_ERROR("ioremap failed to map mmio");
2376+ return (-ENOMEM);
2377+ }
2378+
2379+ pci_read_config_dword(private_data.pdev, I915_PTEADDR,&temp2);
2380+
2381+ /* FIXME: double check the size of area to map to pci space */
2382+ private_data.gtt = (volatile u32 *)ioremap(temp2, 512 * 1024);
2383+ if (!private_data.gtt) {
2384+ AGN_ERROR("ioremap failed to map gtt");
2385+ return (-ENOMEM);
2386+ }
2387+
2388+ switch(private_data.pdev->device) {
2389+ case PCI_DEVICE_ID_Q35:
2390+ case PCI_DEVICE_ID_Q35A2:
2391+ /* Bearlake B is difference from other chipset, especially
2392+ * when reading gtt based address. Probably future chipset
2393+ * will have same architecture as Bearlake-B and this
2394+ * code can move to common file*/
2395+ iegd_nap_iq35_gatt();
2396+ break;
2397+ case PCI_DEVICE_ID_915GD:
2398+ case PCI_DEVICE_ID_915AL:
2399+ case PCI_DEVICE_ID_945G:
2400+ case PCI_DEVICE_ID_945GM:
2401+ case PCI_DEVICE_ID_945GME:
2402+ if((ret = iegd_nap_9series(i915_gtt_table_order))) {
2403+ return (ret);
2404+ }
2405+ break;
2406+ }
2407+
2408+ agp_bridge->gatt_table = NULL;
2409+
2410+ AGN_DEBUG("Exit");
2411+
2412+ return (0);
2413+}
2414+
2415+static void iegd_nap_cleanup(void)
2416+{
2417+
2418+ AGN_DEBUG("Enter");
2419+ iounmap((void *)private_data.gtt);
2420+ iounmap((void *)private_data.registers);
2421+ AGN_DEBUG("Exit");
2422+}
2423+
2424diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_plb.c patch_script_temp/drivers/gpu/drm/iegd/agp/drv_plb.c
2425--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/drv_plb.c 1969-12-31 17:00:00.000000000 -0700
2426+++ patch_script_temp/drivers/gpu/drm/iegd/agp/drv_plb.c 2009-10-06 10:30:05.000000000 -0700
2427@@ -0,0 +1,945 @@
2428+/* -*- pse-c -*-
2429+ *----------------------------------------------------------------------------
2430+ * Filename: iegd_interface.c
2431+ * $Revision: 1.36 $
2432+ *----------------------------------------------------------------------------
2433+ * Gart and DRM driver for Intel Embedded Graphics Driver
2434+ * Copyright © 2007, Intel Corporation.
2435+ *
2436+ * This program is free software; you can redistribute it and/or modify it
2437+ * under the terms and conditions of the GNU General Public License,
2438+ * version 2, as published by the Free Software Foundation.
2439+ *
2440+ * This program is distributed in the hope it will be useful, but WITHOUT
2441+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2442+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2443+ * more details.
2444+ *
2445+ * You should have received a copy of the GNU General Public License along with
2446+ * this program; if not, write to the Free Software Foundation, Inc.,
2447+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2448+ *
2449+ */
2450+
2451+#include "global.h"
2452+#include "intelpci.h"
2453+#include <linux/pagemap.h>
2454+#include <linux/list.h>
2455+
2456+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
2457+
2458+static int iegd_plb_fetch_size(void);
2459+static void iegd_plb_tlbflush(struct agp_memory *mem);
2460+static int iegd_plb_init_gtt(u32 order);
2461+static int AGP_CREATE_GATT(iegd_plb_create_gatt_table);
2462+static void iegd_plb_cleanup(void);
2463+static struct page *iegd_plb_vm_nopage(struct vm_area_struct *,
2464+ unsigned long, int *);
2465+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2466+static int iegd_plb_vm_fault(struct vm_area_struct *vma,
2467+ struct vm_fault *vmf);
2468+#endif
2469+static void iegd_plb_vm_close(struct vm_area_struct *);
2470+int iegd_plb_insert_entries(struct agp_memory *, off_t, int);
2471+int iegd_plb_remove_entries(struct agp_memory *, off_t, int);
2472+void iegd_plb_free_by_type(struct agp_memory *);
2473+int iegd_plb_configure(void);
2474+
2475+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
2476+#define PLB_DESTROY_PAGES(a,b,c) plb_destroy_pages(a->pages[0],b,c)
2477+#else
2478+#define PLB_DESTROY_PAGES(a,b,c) plb_destroy_pages_by_addr(gart_to_virt(a->memory[0]),b,c)
2479+#endif
2480+
2481+
2482+/* Each structure in this array contains three elements:
2483+ * Size of GTT in KB
2484+ * Number of 32-bit entries that make up the GTT
2485+ * Page "order" -- 2^order == number of contiguous CPU pages
2486+ * required to store the GTT
2487+ */
2488+struct aper_size_info_fixed iegd_plb_sizes[] =
2489+{
2490+ {256, 65536, 6},
2491+};
2492+
2493+bridge_driver_t drv_plb = {
2494+ .owner = THIS_MODULE,
2495+ .size_type = FIXED_APER_SIZE,
2496+ .aperture_sizes = iegd_plb_sizes,
2497+ .num_aperture_sizes = 1,
2498+ .needs_scratch_page = TRUE,
2499+ .configure = iegd_plb_configure,
2500+ .fetch_size = iegd_plb_fetch_size,
2501+ .cleanup = iegd_plb_cleanup,
2502+ .tlb_flush = iegd_plb_tlbflush,
2503+ .mask_memory = iegd_cmn_mask_memory,
2504+ .masks = iegd_cmn_masks,
2505+ .agp_enable = iegd_cmn_agp_enable,
2506+ .cache_flush = global_cache_flush,
2507+ .create_gatt_table = iegd_plb_create_gatt_table,
2508+ .free_gatt_table = iegd_cmn_free_gatt_table,
2509+ .insert_memory = iegd_plb_insert_entries,
2510+ .remove_memory = iegd_plb_remove_entries,
2511+ .alloc_by_type = iegd_cmn_alloc_by_type,
2512+ .free_by_type = iegd_plb_free_by_type,
2513+ .agp_alloc_page = agp_generic_alloc_page,
2514+ .agp_destroy_page = agp_generic_destroy_page,
2515+};
2516+
2517+struct vm_operations_struct iegd_plb_vm_ops = {
2518+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2519+ .fault = iegd_plb_vm_fault,
2520+#else
2521+ .nopage = iegd_plb_vm_nopage,
2522+#endif
2523+ .close = iegd_plb_vm_close
2524+};
2525+
2526+static DECLARE_MUTEX(client_sem);
2527+
2528+struct client_list_struct {
2529+ struct list_head list;
2530+ struct vm_area_struct *vma;
2531+ pid_t pid;
2532+};
2533+
2534+static LIST_HEAD(client_list);
2535+
2536+
2537+static int iegd_plb_fetch_size()
2538+{
2539+ struct aper_size_info_fixed *values;
2540+
2541+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
2542+
2543+
2544+ agp_bridge->previous_size = agp_bridge->current_size =
2545+ (void *)(values);
2546+
2547+ return values[0].size;
2548+}
2549+
2550+static void iegd_plb_tlbflush(struct agp_memory *mem)
2551+{
2552+ u32 sgx_mmu;
2553+
2554+ /* Flush TLB */
2555+ sgx_mmu = readl(private_data.registers + 0x40C00);
2556+ sgx_mmu &= 0xFFFFFFE0;
2557+ sgx_mmu |= 0x0C;
2558+ writel(sgx_mmu, private_data.registers + 0x40C00);
2559+
2560+ wmb();
2561+ sgx_mmu = readl(private_data.registers + 0x40C00);
2562+ sgx_mmu &= 0xFFFFFFE0;
2563+ writel(sgx_mmu, private_data.registers + 0x40C00);
2564+
2565+ return;
2566+}
2567+
2568+#define IUS15_GMCH_MSAC 0x62
2569+
2570+static int iegd_plb_init_gtt(u32 order)
2571+{
2572+ u32 gtt_pgctl_reg;
2573+ u32 gtt_bus_addr;
2574+ u32 gtt_enabled = FALSE;
2575+ int num_entries;
2576+ u32 *gtt_table, *dstvirt;
2577+ u32 *sgx_dir, sgx_mmu;
2578+ u32 iegd_scratch, aperphys;
2579+ u8 temp;
2580+ struct page *gtt_table_page;
2581+ int i,j;
2582+ u32 new_order;
2583+
2584+ /* Has the system BIOS only allocateda GTT for 128MB? If
2585+ * so we need to replace it with one sized for 256MB
2586+ */
2587+ pci_read_config_byte(private_data.pdev, IUS15_GMCH_MSAC, &temp);
2588+ if ((temp & 0x03) == 0x03) {
2589+ AGN_DEBUG("Graphics aperture is configured for 128MB");
2590+ AGN_DEBUG("Enabling 256MB split aperture");
2591+ private_data.split_gtt = 1;
2592+ } else {
2593+ private_data.split_gtt = 0;
2594+ }
2595+
2596+ gtt_pgctl_reg = readl(private_data.registers +
2597+ I810_PGETBL_CTL);
2598+ global_cache_flush();
2599+ gtt_bus_addr = gtt_pgctl_reg & 0xFFFFF000;
2600+ gtt_enabled = gtt_pgctl_reg & I810_PGETBL_ENABLED;
2601+
2602+ /* we have to call this as early as possible after the MMIO base
2603+ * address is known */
2604+ iegd_cmn_init_gtt_entries();
2605+
2606+ /* Update the scratch registers to say that we have no stolen memory */
2607+ iegd_scratch = readl(private_data.registers + 0x71410);
2608+ if ((iegd_scratch & 0xE1DF0000) == 0xE1DF0000) {
2609+ /* if our vBios modify only the stolen memory bit */
2610+ iegd_scratch |= 0x00000004;
2611+ writel(iegd_scratch, private_data.registers + 0x71410);
2612+ } else {
2613+ /* Not our vBIOS but set the stolen memory anyway */
2614+ writel(0xE1DF0004, private_data.registers + 0x71410);
2615+ }
2616+
2617+ /* Reportthat we have 0 stolen memory regardless of what was
2618+ * really in there. We _want_ to insert fresh pages on top of
2619+ * stolen memory. */
2620+ writel(0, private_data.registers + 0x71418);
2621+
2622+ num_entries = (1 << order) * KB(1);
2623+
2624+ private_data.upper_gtt=NULL;
2625+
2626+ /*
2627+ * If GTT not enabled created our own gtt table from kernel memory
2628+ * and initialize it to scratch page. This in case the VBIOS is
2629+ * not our VBIOS
2630+ */
2631+ if (!gtt_enabled) {
2632+ gtt_table = (u32 *)__get_free_pages(GFP_KERNEL, order);
2633+
2634+ /* Make sure allocation was successful */
2635+ if (NULL == gtt_table) {
2636+ AGN_ERROR("Failed to allocate kernel pages");
2637+ return (-ENOMEM);
2638+ }
2639+
2640+ for (i=0; i < (1 << order); i++) {
2641+ dstvirt = gtt_table + (PAGE_SIZE * i);
2642+ gtt_table_page = virt_to_page(dstvirt);
2643+ AGN_DEBUG("Setting reserved bit on %p", gtt_table_page);
2644+ set_bit(PG_reserved, &gtt_table_page->flags);
2645+ }
2646+
2647+ private_data.upper_gtt = gtt_table + 0x8000;
2648+ agp_bridge->gatt_bus_addr = virt_to_phys(gtt_table);
2649+
2650+ for (i = 0; i < num_entries; i++) {
2651+ gtt_table[i] = (unsigned long) agp_bridge->scratch_page;
2652+ }
2653+
2654+ /* Enable the newly created GTT */
2655+ AGN_DEBUG("Enabling new GTT");
2656+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED,
2657+ private_data.registers+I810_PGETBL_CTL);
2658+ readl(private_data.registers+I810_PGETBL_CTL);
2659+
2660+ } else if (private_data.split_gtt) {
2661+ /* We're keeping the system BIOS created normal gtt but
2662+ * augmenting it with more entries
2663+ */
2664+ gtt_table = (u32 *)__get_free_pages(GFP_KERNEL, order - 1);
2665+
2666+ //AGN_DEBUG("Allocated secondary GTT at %p:%p (virt:phys)", gtt_table,
2667+ // virt_to_phys(gtt_table));
2668+
2669+ /* Make sure allocation was successful */
2670+ if (NULL == gtt_table) {
2671+ AGN_ERROR("Failed to allocate kernel pages");
2672+ return (-ENOMEM);
2673+ }
2674+
2675+ private_data.upper_gtt = gtt_table;
2676+
2677+ for (i = 0; i < num_entries/2; i++) {
2678+ gtt_table[i] = (unsigned long) agp_bridge->scratch_page;
2679+ }
2680+
2681+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
2682+
2683+ } else {
2684+
2685+ agp_bridge->gatt_bus_addr = gtt_bus_addr;
2686+
2687+ }
2688+
2689+ /*
2690+ * Now that the GTT exists and has been configured, enable
2691+ * the SGX MMU to point to the GTT as its page tables
2692+ */
2693+
2694+ /* The directory level is a single page of memory */
2695+ sgx_dir = (u32 *)__get_free_pages(GFP_KERNEL, 0);
2696+ if (NULL == sgx_dir ) {
2697+ AGN_ERROR("Failed to allocate kernel page");
2698+ return (-ENOMEM);
2699+ }
2700+
2701+ /* Mark the directory so that it is not swappable */
2702+ gtt_table_page = virt_to_page( sgx_dir );
2703+ set_bit(PG_reserved, &gtt_table_page->flags);
2704+
2705+ memset (sgx_dir, 0, PAGE_SIZE);
2706+
2707+ /* Initialize the directory so that each used page table
2708+ * is addressed
2709+ */
2710+
2711+ /* Make sure entire SGX directory is populated */
2712+ for (i = 0; i < 0x400; i++) {
2713+ sgx_dir[i] = agp_bridge->gatt_bus_addr | 0x01;
2714+ }
2715+
2716+ pci_read_config_dword(private_data.pdev, I915_GMADDR, &aperphys);
2717+ aperphys &= PCI_BASE_ADDRESS_MEM_MASK;
2718+ aperphys = aperphys >> 22;
2719+
2720+ if (private_data.split_gtt) {
2721+ /* Only use half of the entries */
2722+ new_order = order-1;
2723+ } else {
2724+ /* Full GTT, use all entries */
2725+ new_order = order;
2726+ }
2727+
2728+ for (i = 0; i < (1 << new_order); i++) {
2729+ /* Set the address for 2D/3D*/
2730+ sgx_dir[i] = agp_bridge->gatt_bus_addr + (PAGE_SIZE * i);
2731+ /* Set the address for hostport */
2732+ sgx_dir[i+aperphys] = agp_bridge->gatt_bus_addr + (PAGE_SIZE * i);
2733+
2734+ /* Mark them as valid */
2735+ sgx_dir[i] |= 0x01;
2736+ sgx_dir[i+aperphys] |= 0x01;
2737+
2738+ //AGN_DEBUG("Directory %d is %08lx", i, sgx_dir[i]);
2739+ }
2740+
2741+ /* If we're in split gtt mode, set the directory entries of the second
2742+ * gtt
2743+ */
2744+
2745+ if (private_data.split_gtt) {
2746+ j=0;
2747+ for (i = (1 << (order - 1)); i < (1 << order); i++) {
2748+ /* Set the address for 2D/3D*/
2749+ sgx_dir[i] = virt_to_phys(private_data.upper_gtt) + (PAGE_SIZE * j);
2750+ /* Set the address for hostport */
2751+ sgx_dir[i+aperphys] = virt_to_phys(private_data.upper_gtt) + (PAGE_SIZE * j);
2752+
2753+ j++;
2754+
2755+ /* Mark them as valid */
2756+ sgx_dir[i] |= 0x01;
2757+ sgx_dir[i+aperphys] |= 0x01;
2758+ //AGN_DEBUG("Directory %d is %08lx", i, sgx_dir[i]);
2759+ }
2760+ }
2761+
2762+ /*
2763+ * Program the directory's address into the MMU control
2764+ * register
2765+ */
2766+
2767+ /* Flush the cache */
2768+ flush_cache_all();
2769+ global_cache_flush();
2770+
2771+ /* Invalidate directory cache */
2772+ sgx_mmu = readl(private_data.registers + 0x40C00);
2773+ sgx_mmu |= 0x1E;
2774+ writel(sgx_mmu, private_data.registers + 0x40C00);
2775+ wmb();
2776+ readl(private_data.registers + 0x40C00);
2777+
2778+ writel(virt_to_phys(sgx_dir), private_data.registers + 0x40C84);
2779+ wmb();
2780+ readl(private_data.registers + 0x40C84);
2781+
2782+ /* Turn on host access to aperture via the MMU */
2783+ sgx_mmu = readl(private_data.registers + 0x40C00);
2784+ sgx_mmu &= 0xFFFE0000;
2785+ writel(sgx_mmu, private_data.registers + 0x40C00);
2786+ wmb();
2787+ readl(private_data.registers + 0x40C00);
2788+
2789+ return 0;
2790+}
2791+
2792+
2793+static int AGP_CREATE_GATT(iegd_plb_create_gatt_table)
2794+{
2795+ u32 order;
2796+ u32 mmio_bus_addr, temp2;
2797+ int ret;
2798+ u32 gtt_size;
2799+ unsigned char msac;
2800+ u32 msac_gtt_size;
2801+
2802+ agp_bridge->gatt_table_real = NULL;
2803+
2804+ order=A_SIZE_FIX(agp_bridge->current_size)->page_order;
2805+
2806+ /* Find and save the address of the MMIO register */
2807+ pci_read_config_dword(private_data.pdev, I915_MMADDR,
2808+ &mmio_bus_addr);
2809+ mmio_bus_addr &= 0xFFF80000;
2810+
2811+ private_data.registers = (volatile u8 *) ioremap(mmio_bus_addr,
2812+ KB(512));
2813+
2814+ if (!private_data.registers) {
2815+ AGN_ERROR("ioremap failed to map mmio");
2816+ return (-ENOMEM);
2817+ }
2818+
2819+ pci_read_config_dword(private_data.pdev, I915_PTEADDR, &temp2);
2820+
2821+ /* Get the GTT size via MSAC */
2822+ pci_read_config_byte(private_data.pdev, IUS15_GMCH_MSAC, &msac);
2823+
2824+ switch (msac & 0x03) {
2825+ case 0x02: /* 256K GTT size */
2826+ msac_gtt_size = KB(256);
2827+ break;
2828+ case 0x03: /* 128K GTT size */
2829+ default:
2830+ msac_gtt_size = KB(128);
2831+ break;
2832+ }
2833+
2834+ gtt_size = A_SIZE_FIX(agp_bridge->current_size)->num_entries * sizeof(u32);
2835+
2836+ if (gtt_size!=msac_gtt_size) {
2837+ AGN_DEBUG("MSAC GTT size 0x%08x, bridge GTT size 0x%08x; using MSAC",
2838+ msac_gtt_size, gtt_size);
2839+ gtt_size = msac_gtt_size;
2840+ }
2841+
2842+ private_data.gtt = (volatile u32 *)ioremap(temp2, gtt_size);
2843+
2844+ if (!private_data.gtt) {
2845+ AGN_ERROR("ioremap failed to map gtt");
2846+ return (-ENOMEM);
2847+ }
2848+
2849+ if((ret = iegd_plb_init_gtt(order))) {
2850+ return (ret);
2851+ }
2852+
2853+ agp_bridge->gatt_table = NULL;
2854+
2855+ return (0);
2856+}
2857+
2858+static void iegd_plb_cleanup(void)
2859+{
2860+
2861+ iounmap((void *)private_data.gtt);
2862+ iounmap((void *)private_data.registers);
2863+}
2864+
2865+
2866+static void iegd_plb_vm_close(struct vm_area_struct *vma)
2867+{
2868+ struct list_head *tmp;
2869+ struct client_list_struct *entry;
2870+
2871+ down(&client_sem);
2872+ list_for_each(tmp, &client_list) {
2873+ entry = list_entry(tmp, struct client_list_struct, list);
2874+ if (entry->vma == vma) {
2875+ list_del(&entry->list);
2876+ kfree(entry);
2877+ AGN_DEBUG("Removed VMA %p from client list", vma);
2878+ break;
2879+ }
2880+ }
2881+ up(&client_sem);
2882+}
2883+
2884+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2885+static int iegd_plb_vm_fault(struct vm_area_struct *vma,
2886+ struct vm_fault *vmf)
2887+{
2888+ int type=0; /* New fault handler doesn't use type */
2889+ unsigned long address = (unsigned long) vmf->virtual_address;
2890+
2891+ vmf->page = iegd_plb_vm_nopage(vma, address, &type);
2892+
2893+ return 0;
2894+}
2895+#endif
2896+
2897+static struct page *iegd_plb_vm_nopage(struct vm_area_struct *vma,
2898+ unsigned long address,
2899+ int *type)
2900+{
2901+ unsigned long offset=0;
2902+ unsigned long physaddr=0;
2903+ struct page *page;
2904+ struct list_head *tmp;
2905+ struct client_list_struct *entry;
2906+ int flag=0;
2907+
2908+ /* On the Intel SCH US15, we don't have a traditional aperture. As
2909+ * a result, we're substituting the base of stolen memory
2910+ * as the aperture address.
2911+ *
2912+ * Mmaps relative to the base of stolen memory will be
2913+ * treated as mmaps covering parts of our virtual aperture.
2914+ *
2915+ * Given that a single surface may be mapped, and not the
2916+ * whole virtual aperture, we must translate the values
2917+ * received so that they are relative to our 0-based virtual
2918+ * aperture.
2919+ */
2920+ offset = (vma->vm_pgoff << PAGE_SHIFT) - agp_bridge->gart_bus_addr;
2921+
2922+ /* All pages returned must be noncached or write-combined*/
2923+ if (agp_use_pat()) {
2924+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
2925+ pgprot_val(vma->vm_page_prot) |= _PAGE_PAT;
2926+ } else {
2927+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2928+ }
2929+
2930+ /* Access to the virtual frame buffer does not appear to
2931+ * call open properly before faulting. As a result, we
2932+ * need to do this housekeeping at each fault.
2933+ */
2934+ down(&client_sem);
2935+ list_for_each(tmp, &client_list) {
2936+ entry = list_entry(tmp, struct client_list_struct, list);
2937+ if (entry->vma == vma) {
2938+ flag=1;
2939+ }
2940+ }
2941+
2942+ if (!flag) {
2943+ entry = kmalloc(sizeof(struct client_list_struct), GFP_KERNEL);
2944+ if (entry) {
2945+ entry->vma = vma;
2946+ list_add(&(entry->list), &client_list);
2947+ AGN_DEBUG("Added VMA %p to client list", vma);
2948+
2949+ AGN_DEBUG("Scratch: %p", virt_to_page(agp_bridge->scratch_page));
2950+
2951+ } else {
2952+ AGN_ERROR("Failed to add VMA to client list");
2953+ }
2954+ }
2955+ up(&client_sem);
2956+
2957+ offset += address - vma->vm_start;
2958+
2959+ if (private_data.split_gtt && ((offset >> PAGE_SHIFT)) >= 0x8000) {
2960+ physaddr = readl(private_data.upper_gtt + (offset >> PAGE_SHIFT)
2961+ - 0x8000);
2962+ } else {
2963+ physaddr = readl(private_data.gtt + (offset >> PAGE_SHIFT));
2964+ }
2965+
2966+
2967+ physaddr &= PAGE_MASK;
2968+
2969+ if (!pfn_valid(physaddr >> PAGE_SHIFT)) {
2970+ AGN_ERROR("Referencing non-existant struct page.\n");
2971+ }
2972+
2973+ if (physaddr >= agp_bridge->gart_bus_addr) {
2974+ AGN_DEBUG("Faulted before insert, returning scratch page");
2975+ page = virt_to_page(__va(agp_bridge->scratch_page));
2976+ } else {
2977+ page = virt_to_page(__va(physaddr));
2978+ }
2979+
2980+ get_page(page);
2981+
2982+ if (type) {
2983+ *type = VM_FAULT_MINOR;
2984+ }
2985+
2986+ return (page);
2987+}
2988+
2989+
2990+int iegd_plb_insert_entries(struct agp_memory *mem,
2991+ off_t pg_start, int type)
2992+{
2993+ int i,j,num_entries, zap;
2994+ void *temp;
2995+ struct list_head *tmp;
2996+ struct client_list_struct *entry;
2997+ unsigned long addr_start=0;
2998+ unsigned long addr_end=0;
2999+ unsigned long addr_offset=0;
3000+ unsigned long vaddr;
3001+ char *srcvirt;
3002+ unsigned long srcphys;
3003+ unsigned long dstphys;
3004+ pgd_t *pgd;
3005+ pud_t *pud;
3006+ pmd_t *pmd;
3007+ pte_t *pte;
3008+
3009+ temp = agp_bridge->current_size;
3010+ num_entries = A_SIZE_FIX(temp)->num_entries;
3011+
3012+ /* If we try to write beyond gtt table, return error */
3013+ if ((pg_start + mem->page_count) > num_entries) {
3014+ AGN_ERROR("Trying to write beyond aperture limit");
3015+ AGN_DEBUG("pg_start=0x%.8lx, mem->page_count=%d,"
3016+ "num_entries=%d", pg_start, mem->page_count,
3017+ num_entries);
3018+ return -EINVAL;
3019+ }
3020+
3021+ /* The i830 can't check the GTT for entries since its read only,
3022+ * depend on the caller to make the correct offset decisions.
3023+ */
3024+
3025+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
3026+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) {
3027+ AGN_ERROR("Unsupported memory type");
3028+ AGN_DEBUG("mem->type=%x, type=%x", mem->type, type);
3029+ return -EINVAL;
3030+ }
3031+
3032+ global_cache_flush();
3033+ agp_bridge->driver->tlb_flush(mem);
3034+
3035+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
3036+
3037+ /* If we're inserting into stolen memory, we need to read
3038+ * the contents of the original page that occupied this space
3039+ */
3040+ if (j < private_data.gtt_entries) {
3041+ srcphys=readl(private_data.gtt+j);
3042+ srcphys &= PAGE_MASK;
3043+
3044+ if (srcphys >= agp_bridge->gart_bus_addr) {
3045+ srcvirt=ioremap(srcphys, PAGE_SIZE);
3046+
3047+ if (!srcvirt) {
3048+ AGN_ERROR("Could not map stolen memory source %d:%08lX", j, srcphys);
3049+ return -ENOMEM;
3050+ }
3051+
3052+ dstphys=AGP_MASK_GTT();
3053+ dstphys &= PAGE_MASK;
3054+
3055+ copy_page(__va(dstphys), srcvirt);
3056+
3057+ iounmap(srcvirt);
3058+ } else {
3059+ AGN_ERROR ("Tried to copy a page not in stolen memory %d:%08lX", j, srcphys);
3060+ }
3061+ }
3062+
3063+ if (private_data.split_gtt && (j >= 0x8000)) {
3064+ writel(AGP_MASK_GTT(), private_data.upper_gtt + j - 0x8000);
3065+ } else {
3066+ writel(AGP_MASK_GTT(), private_data.gtt+j);
3067+ readl(private_data.gtt+j); /* PCI Posting. */
3068+ }
3069+
3070+ down(&client_sem);
3071+ list_for_each(tmp, &client_list) {
3072+ entry = list_entry(tmp, struct client_list_struct, list);
3073+
3074+ /* We need to handle invalidating VMA's that are only mapping
3075+ * a portion of the virtual aperture. Calculate what if
3076+ * any invalidated pages need to be zapped
3077+ */
3078+ addr_start = (entry->vma->vm_pgoff << PAGE_SHIFT)
3079+ - agp_bridge->gart_bus_addr;
3080+ addr_end = addr_start + (entry->vma->vm_end - entry->vma->vm_start);
3081+ addr_offset = j << PAGE_SHIFT;
3082+
3083+ vaddr = entry->vma->vm_start + (addr_offset - addr_start);
3084+
3085+ zap=0;
3086+ pgd=NULL;
3087+ pud=NULL;
3088+ pmd=NULL;
3089+ pte=NULL;
3090+
3091+ pgd = pgd_offset(entry->vma->vm_mm, vaddr);
3092+ if (!pgd_none(*pgd)) {
3093+ pud = pud_offset(pgd, vaddr);
3094+ if (!pud_none(*pud)) {
3095+ pmd = pmd_offset(pud, vaddr);
3096+ if (!pmd_none(*pmd)) {
3097+ pte = pte_offset_map(pmd, vaddr);
3098+ if (!pte_none(*pte)) {
3099+ zap=1;
3100+ }
3101+ }
3102+ }
3103+ }
3104+
3105+ /* Only zap a page if it falls within the mapped region
3106+ * and it has previously faulted
3107+ */
3108+ if (zap && (addr_offset >= addr_start) &&
3109+ (addr_offset < addr_end)) {
3110+
3111+ if (!page_mapcount(pte_page(*pte))) {
3112+ AGN_ERROR("ERROR No mapcount");
3113+ AGN_DEBUG("ZI %p %08lX %d %d %p", pte_page(*pte),
3114+ pte_page(*pte)->flags, page_count(pte_page(*pte)),
3115+ page_mapcount(pte_page(*pte)), pte_page(*pte)->mapping);
3116+ } else {
3117+ atomic_add_negative(-1, &pte_page(*pte)->_mapcount);
3118+ put_page(pte_page(*pte));
3119+ dec_mm_counter(entry->vma->vm_mm, file_rss);
3120+ }
3121+
3122+ pte_clear(entry->vma->vm_mm, vaddr, pte);
3123+ }
3124+
3125+ if(pte) {
3126+ pte_unmap(pte);
3127+ }
3128+ }
3129+ up(&client_sem);
3130+ }
3131+
3132+ global_cache_flush();
3133+ agp_bridge->driver->tlb_flush(mem);
3134+
3135+ AGN_DEBUG("Exit");
3136+ return 0;
3137+}
3138+
3139+
3140+int iegd_plb_remove_entries(struct agp_memory *mem,
3141+ off_t pg_start, int type)
3142+{
3143+ int i, zap;
3144+ struct list_head *tmp;
3145+ struct client_list_struct *entry;
3146+ unsigned long physaddr;
3147+ unsigned long addr_start=0;
3148+ unsigned long addr_end=0;
3149+ unsigned long addr_offset=0;
3150+ unsigned long vaddr;
3151+ pgd_t *pgd;
3152+ pud_t *pud;
3153+ pmd_t *pmd;
3154+ pte_t *pte;
3155+
3156+ global_cache_flush();
3157+ agp_bridge->driver->tlb_flush(mem);
3158+
3159+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
3160+ if (i < private_data.gtt_entries) {
3161+ physaddr = agp_bridge->gart_bus_addr + (i * PAGE_SIZE);
3162+ physaddr |= 0x01;
3163+ writel(physaddr, private_data.gtt+i);
3164+ readl(private_data.gtt+i); /* PCI Posting. */
3165+ } else {
3166+ if (private_data.split_gtt && (i >= 0x8000)) {
3167+ writel(agp_bridge->scratch_page, private_data.upper_gtt + i - 0x8000);
3168+ } else {
3169+ writel(agp_bridge->scratch_page, private_data.gtt+i);
3170+ readl(private_data.gtt+i); /* PCI Posting. */
3171+ }
3172+ }
3173+
3174+ down(&client_sem);
3175+ list_for_each(tmp, &client_list) {
3176+ entry = list_entry(tmp, struct client_list_struct, list);
3177+
3178+ /* We need to handle invalidating VMA's that are only mapping
3179+ * a portion of the virtual aperture. Calculate what if
3180+ * any invalidated pages need to be zapped
3181+ */
3182+ addr_start = (entry->vma->vm_pgoff << PAGE_SHIFT)
3183+ - agp_bridge->gart_bus_addr;
3184+ addr_end = addr_start + (entry->vma->vm_end - entry->vma->vm_start);
3185+ addr_offset = i << PAGE_SHIFT;
3186+
3187+ vaddr = entry->vma->vm_start + (addr_offset - addr_start);
3188+
3189+ zap=0;
3190+ pgd=NULL;
3191+ pud=NULL;
3192+ pmd=NULL;
3193+ pte=NULL;
3194+
3195+ /* Look up page table entries for all VMAs that currently
3196+ * have the virtual aperture mapped -- to see if the page
3197+ * has ever faulted
3198+ */
3199+ pgd = pgd_offset(entry->vma->vm_mm, vaddr);
3200+ if (!pgd_none(*pgd)) {
3201+ pud = pud_offset(pgd, vaddr);
3202+ if (!pud_none(*pud)) {
3203+ pmd = pmd_offset(pud, vaddr);
3204+ if (!pmd_none(*pmd)) {
3205+ pte = pte_offset_map(pmd, vaddr);
3206+ if (!pte_none(*pte)) {
3207+ zap=1;
3208+ }
3209+ }
3210+ }
3211+ }
3212+
3213+ /* Only zap a page if it falls within the mapped region
3214+ * and it has previously faulted
3215+ */
3216+ if (zap && (addr_offset >= addr_start) &&
3217+ (addr_offset < addr_end)) {
3218+
3219+
3220+ if (!page_mapcount(pte_page(*pte))) {
3221+ AGN_ERROR("ERROR No mapcount");
3222+ AGN_DEBUG("ZR %p %08lX %d %d %p", pte_page(*pte),
3223+ pte_page(*pte)->flags, page_count(pte_page(*pte)),
3224+ page_mapcount(pte_page(*pte)), pte_page(*pte)->mapping);
3225+ } else {
3226+ atomic_add_negative(-1, &pte_page(*pte)->_mapcount);
3227+ put_page(pte_page(*pte));
3228+ dec_mm_counter(entry->vma->vm_mm, file_rss);
3229+ }
3230+
3231+ pte_clear(entry->vma->vm_mm, vaddr, pte);
3232+ }
3233+
3234+ if(pte) {
3235+ pte_unmap(pte);
3236+ }
3237+ }
3238+ up(&client_sem);
3239+ }
3240+
3241+ global_cache_flush();
3242+ agp_bridge->driver->tlb_flush(mem);
3243+
3244+ return 0;
3245+}
3246+
3247+
3248+int iegd_plb_configure(void)
3249+{
3250+ struct aper_size_info_fixed *current_size;
3251+ u32 temp;
3252+ u16 gmch_ctrl;
3253+ int i;
3254+
3255+ current_size = A_SIZE_FIX(agp_bridge->current_size);
3256+
3257+ /* SCH US15 uses the Base of Stolen Memory as it's artificial
3258+ * aperture address
3259+ */
3260+ pci_read_config_dword(private_data.pdev, 0x5C, &temp);
3261+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
3262+
3263+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
3264+ gmch_ctrl |= I830_GMCH_ENABLED;
3265+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
3266+
3267+ global_cache_flush();
3268+ agp_bridge->driver->tlb_flush(0);
3269+
3270+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED,
3271+ private_data.registers+I810_PGETBL_CTL);
3272+ /* PCI Posting. */
3273+ readl(private_data.registers+I810_PGETBL_CTL);
3274+
3275+ if (agp_bridge->driver->needs_scratch_page) {
3276+
3277+ for (i = private_data.gtt_entries; i < current_size->num_entries; i++) {
3278+ if ((private_data.split_gtt) && (i >= 0x8000)) {
3279+ writel(agp_bridge->scratch_page, private_data.upper_gtt+i-0x8000);
3280+ readl(private_data.upper_gtt+i-0x8000);
3281+ } else {
3282+ writel(agp_bridge->scratch_page, private_data.gtt+i);
3283+ readl(private_data.gtt+i); /* PCI Posting. */
3284+ }
3285+ }
3286+ }
3287+
3288+ global_cache_flush();
3289+
3290+ return 0;
3291+}
3292+
3293+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
3294+static void plb_destroy_pages_by_addr(void *addr, size_t pg_count, unsigned int order)
3295+{
3296+ struct page *page;
3297+
3298+ AGN_DEBUG("Enter");
3299+
3300+ if (addr == NULL) {
3301+ return;
3302+ }
3303+
3304+ page = virt_to_page(addr);
3305+ SET_PAGES_WB(page, pg_count);
3306+ GLOBAL_FLUSH_TLB();
3307+ put_page(page);
3308+ AGP_UNLOCK_PAGE(page);
3309+
3310+ if(page_count(page) > 1) {
3311+ free_pages((unsigned long)addr, order);
3312+ }
3313+
3314+ atomic_dec(&agp_bridge->current_memory_agp);
3315+
3316+ AGN_DEBUG("Exit");
3317+}
3318+#else
3319+static void plb_destroy_pages(struct page *page, size_t pg_count, unsigned int order)
3320+{
3321+ //AGN_LOG("IN plb_destroy_pages");
3322+ AGN_DEBUG("Enter");
3323+
3324+ if (page == NULL) {
3325+ return;
3326+ }
3327+
3328+ SET_PAGES_WB(page, pg_count);
3329+ GLOBAL_FLUSH_TLB();
3330+ put_page(page);
3331+ AGP_UNLOCK_PAGE(page);
3332+
3333+ if(page_count(page) > 1) {
3334+ __free_pages(page, order);
3335+ }
3336+
3337+ atomic_dec(&agp_bridge->current_memory_agp);
3338+
3339+ AGN_DEBUG("Exit");
3340+}
3341+#endif
3342+
3343+void iegd_plb_free_by_type(struct agp_memory *curr)
3344+{
3345+ unsigned int order;
3346+
3347+ switch (curr->page_count) {
3348+ case 1:
3349+ order = 0; /* pg_count = 1 => 2 ^ 0 */
3350+ break;
3351+ case 4:
3352+ order = 2; /* pg_count = 4 => 2 ^ 2 */
3353+ break;
3354+ case 8:
3355+ order = 3; /* pg_count = 8 => 2 ^ 3 */
3356+ break;
3357+ default:
3358+ /* This case should never happen */
3359+ return;
3360+ }
3361+
3362+ agp_free_key(curr->key);
3363+ if(curr->type == AGP_PHYS_MEMORY) {
3364+ PLB_DESTROY_PAGES(curr, curr->page_count, order);
3365+ IGD_FREE_MEM(curr);
3366+ }
3367+
3368+ kfree(curr);
3369+
3370+}
3371+
3372+#endif
3373diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/global.c patch_script_temp/drivers/gpu/drm/iegd/agp/global.c
3374--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/global.c 1969-12-31 17:00:00.000000000 -0700
3375+++ patch_script_temp/drivers/gpu/drm/iegd/agp/global.c 2009-10-06 10:30:05.000000000 -0700
3376@@ -0,0 +1,142 @@
3377+/* -*- pse-c -*-
3378+ *----------------------------------------------------------------------------
3379+ * Filename: global.c
3380+ * $Revision: 1.17 $
3381+ *----------------------------------------------------------------------------
3382+ * Gart and DRM driver for Intel Embedded Graphics Driver
3383+ * Copyright © 2008, Intel Corporation.
3384+ *
3385+ * This program is free software; you can redistribute it and/or modify it
3386+ * under the terms and conditions of the GNU General Public License,
3387+ * version 2, as published by the Free Software Foundation.
3388+ *
3389+ * This program is distributed in the hope it will be useful, but WITHOUT
3390+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3391+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3392+ * more details.
3393+ *
3394+ * You should have received a copy of the GNU General Public License along with
3395+ * this program; if not, write to the Free Software Foundation, Inc.,
3396+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3397+ *
3398+ */
3399+
3400+#include "global.h"
3401+#include "intelpci.h"
3402+
3403+/* will point to the current table entries for
3404+ * current chipset */
3405+gart_dispatch_t *gart_id;
3406+
3407+/* Private data that contained chipset information */
3408+dev_private_data_t private_data;
3409+
3410+int iegd_find_device(u16 device)
3411+{
3412+ struct pci_dev *device_pdev;
3413+
3414+ device_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
3415+ /* Check for function 0. */
3416+ if(device_pdev && PCI_FUNC(device_pdev->devfn) != 0) {
3417+ device_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
3418+ device, device_pdev);
3419+ }
3420+
3421+ if(!device_pdev) {
3422+ return 0;
3423+ }
3424+
3425+ AGN_DEBUG("Device found = 0x%x\n", device);
3426+ private_data.pdev = device_pdev;
3427+ return 1;
3428+
3429+}
3430+
3431+/**
3432+ * This function is to hook the function pointer that
3433+ * belong to specific chipset, other than that this
3434+ * is the place for customization of the structure
3435+ * in case chipset in the same family have different
3436+ * architecture. Make sure to add new device id here
3437+ * if new device been introduce.
3438+ *
3439+ * parameter:
3440+ * driver_hook - Pointer to hold the structure
3441+ * did - device id
3442+ * list - lookup table for the chipset family
3443+ *
3444+ * return value:
3445+ * 0 - success
3446+ * 1 - No function hook
3447+ */
3448+int bridge_driver_init(bridge_driver_t **driver_hook,
3449+ unsigned short did, dispatch_table_t *list )
3450+{
3451+
3452+ (*driver_hook) = (bridge_driver_t *)dispatch_acquire(
3453+ gart_id->device_id, list);
3454+
3455+ /* For specific chipset implementation assign the pointer
3456+ * here. */
3457+ switch(did) {
3458+ case PCI_DEVICE_ID_GM45:
3459+ case PCI_DEVICE_ID_ELK:
3460+ case PCI_DEVICE_ID_Q45:
3461+ case PCI_DEVICE_ID_G45:
3462+ case PCI_DEVICE_ID_G41:
3463+ (*driver_hook)->aperture_sizes = iegd_igm45_sizes;
3464+ (*driver_hook)->num_aperture_sizes = 2;
3465+ break;
3466+ case PCI_DEVICE_ID_Q35:
3467+ case PCI_DEVICE_ID_Q35A2:
3468+ (*driver_hook)->aperture_sizes = iegd_iq35_sizes;
3469+ (*driver_hook)->num_aperture_sizes = 3;
3470+ break;
3471+ case PCI_DEVICE_ID_915GD:
3472+ case PCI_DEVICE_ID_915AL:
3473+ case PCI_DEVICE_ID_945G:
3474+ case PCI_DEVICE_ID_945GM:
3475+ case PCI_DEVICE_ID_945GME:
3476+ (*driver_hook)->aperture_sizes = iegd_i915_sizes;
3477+ (*driver_hook)->num_aperture_sizes = 4;
3478+ break;
3479+ case PCI_DEVICE_ID_965G:
3480+ case PCI_DEVICE_ID_946GZ:
3481+ case PCI_DEVICE_ID_G965:
3482+ case PCI_DEVICE_ID_Q965:
3483+ case PCI_DEVICE_ID_GM965:
3484+ case PCI_DEVICE_ID_GME965:
3485+ (*driver_hook)->aperture_sizes = iegd_i965_sizes;
3486+ (*driver_hook)->num_aperture_sizes = 4;
3487+ break;
3488+ case PCI_DEVICE_ID_810:
3489+ case PCI_DEVICE_ID_810DC:
3490+ case PCI_DEVICE_ID_810E:
3491+ case PCI_DEVICE_ID_815:
3492+ (*driver_hook)->aperture_sizes = intel_i810_sizes;
3493+ (*driver_hook)->num_aperture_sizes = 2;
3494+ (*driver_hook)->create_gatt_table = agp_generic_create_gatt_table;
3495+ (*driver_hook)->free_gatt_table = agp_generic_free_gatt_table;
3496+ break;
3497+ case PCI_DEVICE_ID_830M:
3498+ case PCI_DEVICE_ID_845G:
3499+ case PCI_DEVICE_ID_855:
3500+ case PCI_DEVICE_ID_865G:
3501+ (*driver_hook)->aperture_sizes = intel_i830_sizes;
3502+ (*driver_hook)->num_aperture_sizes = 4;
3503+ (*driver_hook)->create_gatt_table = iegd_alm_create_gatt_table;
3504+ (*driver_hook)->free_gatt_table = iegd_cmn_free_gatt_table;
3505+ break;
3506+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3507+ case PCI_DEVICE_ID_PLB:
3508+ (*driver_hook)->aperture_sizes = iegd_plb_sizes;
3509+ (*driver_hook)->num_aperture_sizes = 1;
3510+ break;
3511+#endif
3512+ default:
3513+ return -1;
3514+ }
3515+
3516+ return 0;
3517+
3518+}
3519diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/pci.c patch_script_temp/drivers/gpu/drm/iegd/agp/pci.c
3520--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/agp/pci.c 1969-12-31 17:00:00.000000000 -0700
3521+++ patch_script_temp/drivers/gpu/drm/iegd/agp/pci.c 2009-10-06 10:30:05.000000000 -0700
3522@@ -0,0 +1,501 @@
3523+/* -*- pse-c -*-
3524+ *----------------------------------------------------------------------------
3525+ * Filename: pci.c
3526+ * $Revision: 1.31 $
3527+ *----------------------------------------------------------------------------
3528+ * Gart and DRM driver for Intel Embedded Graphics Driver
3529+ * Copyright © 2008, Intel Corporation.
3530+ *
3531+ * This program is free software; you can redistribute it and/or modify it
3532+ * under the terms and conditions of the GNU General Public License,
3533+ * version 2, as published by the Free Software Foundation.
3534+ *
3535+ * This program is distributed in the hope it will be useful, but WITHOUT
3536+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3537+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3538+ * more details.
3539+ *
3540+ * You should have received a copy of the GNU General Public License along with
3541+ * this program; if not, write to the Free Software Foundation, Inc.,
3542+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3543+ *
3544+ */
3545+
3546+#include <linux/init.h>
3547+#include <linux/module.h>
3548+#include <linux/pci.h>
3549+#include <linux/agp_backend.h>
3550+#include "agp.h"
3551+#include "global.h"
3552+#include "igd_gart.h"
3553+#include "intelpci.h"
3554+#include "igd_abs.h"
3555+
3556+static gart_dispatch_t gart_pci_device_table[] = {
3557+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_810, PCI_DEVICE_ID_810,
3558+ "810", 0, 0, 0,
3559+ },
3560+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_810DC, PCI_DEVICE_ID_810DC,
3561+ "810DC", 0, 0, 0,
3562+ },
3563+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_810E, PCI_DEVICE_ID_810E,
3564+ "810E", 0, 0, 0,
3565+ },
3566+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_815, PCI_DEVICE_ID_815,
3567+ "815", 0, 0, 0,
3568+ },
3569+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_830M, PCI_DEVICE_ID_830M,
3570+ "830M", 0, 0, 0,
3571+ },
3572+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_835, PCI_DEVICE_ID_835,
3573+ "835", 0, 0, 0,
3574+ },
3575+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_845G, PCI_DEVICE_ID_845G,
3576+ "845G", 0, 0, 0,
3577+ },
3578+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_855, PCI_DEVICE_ID_855,
3579+ "855", 0, 0, 0,
3580+ },
3581+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_865G, PCI_DEVICE_ID_865G,
3582+ "865G", 0, 0, 0,
3583+ },
3584+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_915GD, PCI_DEVICE_ID_915GD,
3585+ "915GD", 0, 0, 0,
3586+ },
3587+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_910GL, PCI_DEVICE_ID_910GL,
3588+ "910GL", 0, 0, 0,
3589+ },
3590+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_915AL, PCI_DEVICE_ID_915AL,
3591+ "915AL", 0, 0, 0,
3592+ },
3593+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_945G, PCI_DEVICE_ID_945G,
3594+ "945G", 0, 0, 0,
3595+ },
3596+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_945GM, PCI_DEVICE_ID_945GM,
3597+ "945GM", 0, 0, 0,
3598+ },
3599+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_945GME, PCI_DEVICE_ID_945GME,
3600+ "945GME/GSE", 0, 0, 0,
3601+ },
3602+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q35, PCI_DEVICE_ID_Q35,
3603+ "Q33/Q35", 0, 0, 0,
3604+ },
3605+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q35A2, PCI_DEVICE_ID_Q35A2,
3606+ "Q33/Q35", 0, 0, 0,
3607+ },
3608+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_965G, PCI_DEVICE_ID_965G,
3609+ "965G", 0, 0, 0,
3610+ },
3611+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_946GZ, PCI_DEVICE_ID_946GZ,
3612+ "946GZ", 0, 0, 0,
3613+ },
3614+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_G965, PCI_DEVICE_ID_G965,
3615+ "G965", 0, 0, 0,
3616+ },
3617+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q965, PCI_DEVICE_ID_Q965,
3618+ "Q965", 0, 0, 0,
3619+ },
3620+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_GM965, PCI_DEVICE_ID_GM965,
3621+ "GM965", 0, 0, 0,
3622+ },
3623+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_GME965, PCI_DEVICE_ID_GME965,
3624+ "GLE960/GME965", 0, 0, 0,
3625+ },
3626+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3627+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_PLB, PCI_DEVICE_ID_PLB,
3628+ "US15", 0, 0, 0,
3629+ },
3630+#endif
3631+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_GM45, PCI_DEVICE_ID_GM45,
3632+ "GM45/GS45/GL40", 0, 0, 0,
3633+ },
3634+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_ELK, PCI_DEVICE_ID_ELK,
3635+ "Q45", 0, 0, 0,
3636+ },
3637+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_Q45, PCI_DEVICE_ID_Q45,
3638+ "Q45", 0, 0, 0,
3639+ },
3640+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_G45, PCI_DEVICE_ID_G45,
3641+ "G45", 0, 0, 0,
3642+ },
3643+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRIDGE_G41, PCI_DEVICE_ID_G41,
3644+ "G41", 0, 0, 0,
3645+ },
3646+};
3647+
3648+/* PCI device id that supported by IEGD */
3649+struct pci_device_id iegd_pci_table[] = {
3650+ ID(PCI_DEVICE_ID_BRIDGE_810),
3651+ ID(PCI_DEVICE_ID_BRIDGE_810DC),
3652+ ID(PCI_DEVICE_ID_BRIDGE_810E),
3653+ ID(PCI_DEVICE_ID_BRIDGE_815),
3654+ ID(PCI_DEVICE_ID_BRIDGE_830M),
3655+ ID(PCI_DEVICE_ID_BRIDGE_845G),
3656+ ID(PCI_DEVICE_ID_BRIDGE_855),
3657+ ID(PCI_DEVICE_ID_BRIDGE_865G),
3658+ ID(PCI_DEVICE_ID_BRIDGE_915GD),
3659+ ID(PCI_DEVICE_ID_BRIDGE_915AL),
3660+ ID(PCI_DEVICE_ID_BRIDGE_945G),
3661+ ID(PCI_DEVICE_ID_BRIDGE_945GM),
3662+ ID(PCI_DEVICE_ID_BRIDGE_945GME),
3663+ ID(PCI_DEVICE_ID_BRIDGE_965G),
3664+ ID(PCI_DEVICE_ID_BRIDGE_946GZ),
3665+ ID(PCI_DEVICE_ID_BRIDGE_G965),
3666+ ID(PCI_DEVICE_ID_BRIDGE_Q965),
3667+ ID(PCI_DEVICE_ID_BRIDGE_GM965),
3668+ ID(PCI_DEVICE_ID_BRIDGE_GME965),
3669+ ID(PCI_DEVICE_ID_BRIDGE_Q35),
3670+ ID(PCI_DEVICE_ID_BRIDGE_Q35A2),
3671+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3672+ ID(PCI_DEVICE_ID_BRIDGE_PLB),
3673+#endif
3674+ ID(PCI_DEVICE_ID_BRIDGE_GM45),
3675+ ID(PCI_DEVICE_ID_BRIDGE_ELK),
3676+ ID(PCI_DEVICE_ID_BRIDGE_Q45),
3677+ ID(PCI_DEVICE_ID_BRIDGE_G45),
3678+ ID(PCI_DEVICE_ID_BRIDGE_G41),
3679+ { }
3680+};
3681+
3682+MODULE_DEVICE_TABLE(pci, iegd_pci_table);
3683+
3684+#include <asm/tlbflush.h>
3685+
3686+static int agp_has_pat = 0;
3687+
3688+int agp_use_pat(void)
3689+{
3690+ return agp_has_pat;
3691+}
3692+EXPORT_SYMBOL(agp_use_pat);
3693+
3694+static void agp_pat_ipi_handler(void *notused)
3695+{
3696+ u32 v1, v2;
3697+
3698+ rdmsr(MSR_IA32_CR_PAT, v1, v2);
3699+ v2 &= 0xFFFFFFF8;
3700+ v2 |= 0x00000001;
3701+ wbinvd();
3702+ wrmsr(MSR_IA32_CR_PAT, v1, v2);
3703+ __flush_tlb_all();
3704+}
3705+
3706+/*
3707+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
3708+ */
3709+
3710+void agp_init_pat(void)
3711+{
3712+
3713+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
3714+ AGN_ERROR("PAT Feature not available\n");
3715+ return;
3716+ }
3717+ AGN_DEBUG("Enabled PAT");
3718+ if (ON_EACH_CPU(agp_pat_ipi_handler, NULL, 1, 1) != 0) {
3719+ AGN_ERROR("Timed out setting up CPU PAT.\n");
3720+ return;
3721+ }
3722+ agp_has_pat = 1;
3723+}
3724+EXPORT_SYMBOL(agp_init_pat);
3725+
3726+
3727+/* This function get called by PCI core when one of the chipset
3728+ * above detected */
3729+static int __devinit iegd_intel_probe(
3730+ struct pci_dev *pdev,
3731+ const struct pci_device_id *ent)
3732+{
3733+ bridge_data_t *bridge_device;
3734+ u8 cap_ptr = 0;
3735+ struct resource *r;
3736+ int ret;
3737+
3738+ AGN_DEBUG("Enter");
3739+ AGN_LOG("Initialize IEGD agpgart and drm");
3740+
3741+ /* Make sure this probing is called because of prefered
3742+ * chipsets. This is because to make sure we initialize
3743+ * chipset that belong to deregister gart module */
3744+ if(!gart_id->bridge_pdev ||
3745+ (gart_id->bridge_pdev->device != pdev->device)) {
3746+ return -ENODEV;
3747+ }
3748+
3749+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
3750+
3751+ /* Allocate memory for the bridge. This data structure then will be
3752+ * used by the agp backend and frontend */
3753+ gart_id->bridge_info = agp_alloc_bridge();
3754+ if(gart_id->bridge_info == NULL) {
3755+ return -ENOMEM;
3756+ }
3757+
3758+ /* Check for the device and initialize private data */
3759+ if(!iegd_find_device(gart_id->device_id)) {
3760+ agp_put_bridge(gart_id->bridge_info);
3761+ AGN_ERROR("Unsupported device: %x", gart_id->device_id);
3762+ return -ENODEV;
3763+ }
3764+
3765+ bridge_device = gart_id->bridge_info;
3766+
3767+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
3768+ if (gart_id->device_id == PCI_DEVICE_ID_PLB) {
3769+ agp_init_pat();
3770+ bridge_device->vm_ops = &iegd_plb_vm_ops;
3771+ }
3772+#endif
3773+
3774+ AGN_DEBUG("driver %p, id %X, list %p", bridge_device->driver,
3775+ gart_id->device_id, driver_dispatch_list);
3776+ /* Dispatch the core function based on the chipset id */
3777+ ret = bridge_driver_init((bridge_driver_t **) &bridge_device->driver,
3778+ gart_id->device_id, driver_dispatch_list);
3779+
3780+ if(ret) {
3781+ agp_put_bridge(gart_id->bridge_info);
3782+ AGN_ERROR("Device found but no function hook");
3783+ return -ENODEV;
3784+ }
3785+
3786+ bridge_device->dev = pdev;
3787+ bridge_device->capndx = cap_ptr;
3788+ bridge_device->dev_private_data = &private_data;
3789+
3790+ AGN_LOG("Intel %s chipset detected", gart_id->name);
3791+
3792+ r = &pdev->resource[0];
3793+ if (!r->start && r->end) {
3794+ if (pci_assign_resource(pdev, 0)) {
3795+ AGN_ERROR("could not assign resource 0");
3796+ agp_put_bridge(gart_id->bridge_info);
3797+ return -ENODEV;
3798+ }
3799+ }
3800+
3801+ if(pci_enable_device(pdev)) {
3802+ AGN_ERROR("Unable to enable PCI device");
3803+ agp_put_bridge(gart_id->bridge_info);
3804+ return -ENODEV;
3805+ }
3806+
3807+ if(cap_ptr) {
3808+ pci_read_config_dword(pdev,
3809+ bridge_device->capndx+PCI_AGP_STATUS,
3810+ &bridge_device->mode);
3811+ }
3812+
3813+ pci_set_drvdata(pdev, bridge_device);
3814+ AGN_DEBUG("Exit");
3815+ return agp_add_bridge(bridge_device);
3816+}
3817+
3818+static void iegd_intel_remove(struct pci_dev *pdev)
3819+{
3820+ AGN_LOG("Exit from module");
3821+}
3822+
3823+int iegd_intel_suspend(struct pci_dev *dev, pm_message_t state)
3824+{
3825+ int pm_cap;
3826+ struct pci_dev *iegd_dev;
3827+ unsigned short pci_pm_csr;
3828+
3829+ AGN_DEBUG("Enter");
3830+
3831+ if (!(IGD_IS_SUSPEND(state))) {
3832+ AGN_DEBUG("Unsupported PM event %d", state.event);
3833+ return -EINVAL;
3834+ }
3835+
3836+ iegd_dev = private_data.pdev;
3837+
3838+ /* Save our resources */
3839+ IGD_PCI_SAVE_STATE(iegd_dev, private_data.pm_save);
3840+
3841+ /* Find the PM CSR */
3842+ pm_cap = pci_find_capability(iegd_dev, PCI_CAP_ID_PM);
3843+
3844+ if (!pm_cap) {
3845+ AGN_DEBUG("No PCI PM capability record.. Exit");
3846+ return 0;
3847+ }
3848+
3849+ /* Power down the device */
3850+ pci_read_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, &pci_pm_csr);
3851+ pci_pm_csr |= PCI_PM_CTRL_STATE_MASK;
3852+ pci_write_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, pci_pm_csr);
3853+
3854+ AGN_DEBUG("Suspended.. Exit");
3855+ return 0;
3856+}
3857+
3858+int iegd_intel_resume(struct pci_dev *dev)
3859+{
3860+ int pm_cap;
3861+ struct pci_dev *iegd_dev;
3862+ unsigned short pci_pm_csr;
3863+
3864+ AGN_DEBUG("Enter");
3865+
3866+ iegd_dev = private_data.pdev;
3867+
3868+ /* Get back our resources */
3869+ IGD_PCI_RESTORE_STATE(iegd_dev, private_data.pm_save);
3870+
3871+ /* Find the PM CSR */
3872+ pm_cap = pci_find_capability(iegd_dev, PCI_CAP_ID_PM);
3873+
3874+ if (!pm_cap) {
3875+ AGN_DEBUG("No PCI PM capability record.. Exit");
3876+ return 0;
3877+ }
3878+
3879+ /* Power on device */
3880+ pci_read_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, &pci_pm_csr);
3881+ pci_pm_csr &= ~PCI_PM_CTRL_STATE_MASK;
3882+ pci_write_config_word(iegd_dev, pm_cap + PCI_PM_CTRL, pci_pm_csr);
3883+
3884+ AGN_DEBUG("Resumed.. Exit");
3885+ return 0;
3886+}
3887+
3888+static struct pci_driver iegd_pci_driver = {
3889+ .name = "iegd-intel",
3890+ .id_table = iegd_pci_table,
3891+ .probe = iegd_intel_probe,
3892+ .remove = __devexit_p(iegd_intel_remove),
3893+ .suspend = iegd_intel_suspend,
3894+ .resume = iegd_intel_resume,
3895+};
3896+
3897+struct pci_dev *iegd_probe_device()
3898+{
3899+ int i;
3900+ struct pci_dev *dev;
3901+
3902+ AGN_DEBUG("Enter");
3903+
3904+ /* Probed for the supported devices */
3905+ for(i=0 ; i<sizeof(gart_pci_device_table)/sizeof(gart_dispatch_t);
3906+ i++) {
3907+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
3908+ (unsigned int)gart_pci_device_table[i].bridge_id, NULL);
3909+ if(dev){
3910+ gart_id = &gart_pci_device_table[i];
3911+ AGN_DEBUG("Device found.. Exit");
3912+ return dev;
3913+ }
3914+ }
3915+ AGN_DEBUG("Device not found.. Exit");
3916+ return NULL;
3917+}
3918+
3919+/**
3920+ * This is the first routine been called by the init function.
3921+ * This function will probe for devices that supported by IEGD.
3922+ * Once it found the device, it will check whether driver for
3923+ * this device exist. If it exist, get the pci_driver structure
3924+ * for the existing driver and call the pci unregister fucntion
3925+ * to deregister existing driver and register iegd agpgart
3926+ */
3927+static int iegd_agp_init(void)
3928+{
3929+ struct pci_driver *curr_driver;
3930+ struct pci_dev *temp_pdev;
3931+
3932+ AGN_DEBUG("Enter");
3933+
3934+ /* Probe for the intel embedded graphic device chipset */
3935+ temp_pdev = iegd_probe_device();
3936+
3937+ if(!temp_pdev) {
3938+ AGN_ERROR("Probe device failed");
3939+ return -ENODEV;
3940+ }
3941+
3942+ gart_id->bridge_pdev = temp_pdev;
3943+ curr_driver = pci_dev_driver(gart_id->bridge_pdev);
3944+
3945+ if(curr_driver) {
3946+ /* FIXME: Don't know whether we have to make separate copy of this
3947+ * structure */
3948+ gart_id->old_gart = curr_driver;
3949+
3950+ /* deregister pci driver from pci core. This is needed since we
3951+ * don't want 2 agpgart reside in the kernel that respond to the
3952+ * same device id */
3953+ AGN_LOG("Unregister agpgart name %s", curr_driver->name);
3954+ pci_unregister_driver(curr_driver);
3955+ }
3956+
3957+ AGN_LOG("Registering iegd gart module");
3958+ /* Register our own to pci core */
3959+ AGN_DEBUG("Exit");
3960+ return pci_register_driver(&iegd_pci_driver);
3961+
3962+}
3963+
3964+static void iegd_restore_device(void)
3965+{
3966+ int ret;
3967+
3968+ AGN_DEBUG("Enter");
3969+
3970+ /* Decrement the reference for this pci device */
3971+ pci_dev_put(gart_id->bridge_pdev);
3972+
3973+ if(gart_id->old_gart) {
3974+ /* Register the original driver */
3975+ ret = pci_register_driver(gart_id->old_gart);
3976+ }
3977+
3978+ AGN_DEBUG("Exit");
3979+
3980+}
3981+
3982+static int iegd_gart_init(void)
3983+{
3984+ int ret;
3985+
3986+ AGN_DEBUG("Enter");
3987+
3988+ /* Find bridge based on chipset supported by IEGD */
3989+ ret = iegd_agp_init();
3990+ if(AGP_RET(ret)) {
3991+ AGN_LOG("Registering iegd drm module");
3992+ /* Initialize DRM module by calling DRM init function */
3993+ return DRM_INIT_MODULE();
3994+ } else {
3995+ /* Log the driver failed to register */
3996+ AGN_LOG("Driver registration failed");
3997+ }
3998+
3999+ AGN_DEBUG("Exit");
4000+
4001+ /* Return agp error if agp init failed */
4002+ return ret;
4003+}
4004+
4005+static void iegd_gart_exit(void)
4006+{
4007+ /* Unregister DRM module */
4008+ AGN_DEBUG("Unregister iegd DRM module");
4009+ DRM_EXIT_MODULE();
4010+
4011+ /* Remove our device from the kernel */
4012+ AGN_DEBUG("Unregister IKM module");
4013+ pci_unregister_driver(&iegd_pci_driver);
4014+
4015+ /* Restore back the old agp gart */
4016+ AGN_DEBUG("Register original module");
4017+ iegd_restore_device();
4018+}
4019+
4020+MODULE_LICENSE("GPL and additional rights");
4021+
4022+module_init(iegd_gart_init);
4023+module_exit(iegd_gart_exit);
4024diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drm_test.c patch_script_temp/drivers/gpu/drm/iegd/drm/drm_test.c
4025--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drm_test.c 1969-12-31 17:00:00.000000000 -0700
4026+++ patch_script_temp/drivers/gpu/drm/iegd/drm/drm_test.c 2009-10-06 10:30:05.000000000 -0700
4027@@ -0,0 +1,288 @@
4028+/* -*- pse-c -*-
4029+ *----------------------------------------------------------------------------
4030+ * Filename: drm_test.c
4031+ * $Revision: 1.3 $
4032+ *----------------------------------------------------------------------------
4033+ * Unit level test for IEGD DRM
4034+ * Copyright © 2009 Intel Corporation.
4035+ *
4036+ * This program is free software; you can redistribute it and/or modify it
4037+ * under the terms and conditions of the GNU General Public License,
4038+ * version 2, as published by the Free Software Foundation.
4039+ *
4040+ * This program is distributed in the hope it will be useful, but WITHOUT
4041+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4042+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4043+ * more details.
4044+ *
4045+ * You should have received a copy of the GNU General Public License along with
4046+ * this program; if not, write to the Free Software Foundation, Inc.,
4047+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4048+ *
4049+ */
4050+
4051+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4052+ *
4053+ * Redistribution and use in source and binary forms, with or without
4054+ * modification, are permitted provided that the following conditions are met:
4055+ * Redistributions of source code must retain the above copyright notice,
4056+ * this list of conditions and the following disclaimer.
4057+ *
4058+ * Redistributions in binary form must reproduce the above copyright
4059+ * notice, this list of conditions and the following disclaimer in the
4060+ * documentation and/or other materials provided with the distribution.
4061+ *
4062+ * Neither the name Intel Corporation nor the names of its contributors
4063+ * may be used to endorse or promote products derived from this software
4064+ * without specific prior written permission.
4065+ *
4066+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4067+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4068+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4069+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4070+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4071+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4072+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4073+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4074+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4075+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4076+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4077+ *
4078+ */
4079+
4080+#include <fcntl.h>
4081+#include <unistd.h>
4082+#include <sys/ioctl.h>
4083+#include <stdlib.h>
4084+#include <stdio.h>
4085+#include <string.h>
4086+
4087+#include "iegd.h"
4088+#include "iegd_drm_client.h"
4089+
4090+#define DRM_IOCTL_BASE 'd'
4091+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4092+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4093+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
4094+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
4095+
4096+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4097+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4098+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4099+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4100+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4101+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4102+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4103+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4104+
4105+#define PAGE_SIZE 4096
4106+
4107+#define VERBOSE "-v"
4108+
4109+int main(int argc, char *argv[])
4110+{
4111+ int file_desc, ret_value, i;
4112+ unsigned long *virt_ptr;
4113+ int failed=0;
4114+
4115+ /* Check for verbose mode */
4116+ int index;
4117+ int verbose = 0;
4118+
4119+ for (index = 1; index < argc; index++)
4120+ {
4121+ if(strcmp(argv[index], VERBOSE) == 0)
4122+ {
4123+ verbose = 1;
4124+ printf("Verbose mode.\n");
4125+ }
4126+ }
4127+
4128+ if (verbose)
4129+ {
4130+ printf("Starting client\n");
4131+ }
4132+ /* Open the drm */
4133+ file_desc=open("/dev/dri/card0",O_RDWR);
4134+
4135+ if(file_desc<0){
4136+ /* In case of a different /dev tree struct.
4137+ * try /dev/card0
4138+ */
4139+ file_desc=open("/dev/card0",O_RDWR);
4140+ }
4141+
4142+ if(file_desc<0){
4143+ printf("Can't open device file:%s\n",DRIVER_DESC);
4144+ printf("Check for root level permissions.");
4145+ printf("Reinstall IKM.\n");
4146+ exit(-1);
4147+ }
4148+
4149+ if (verbose)
4150+ {
4151+ printf("Open device file:%d\n",file_desc);
4152+ /* This the ioctl that allocates physical memory */
4153+ printf("Testing ioctl for memory allocation\n");
4154+ }
4155+
4156+ drm_intel_getpages_t getpages;
4157+ /* set the number of bytes we want the drm to allocate */
4158+ getpages.size=(PAGE_SIZE- 1000);
4159+
4160+ ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
4161+ if (verbose)
4162+ {
4163+ printf("IOCTL call memory allocation test:");
4164+ }
4165+ if(ret_value<0){
4166+ printf("DRM module failed memory allocation test.\n");
4167+ printf("Reinstall IKM.\n");
4168+ exit(-1);
4169+ }
4170+ if (verbose)
4171+ {
4172+ printf(" Success\n");
4173+ printf("size: %d,phy_address: %#x,virt_address: %#x,offset: %#x\n",
4174+ getpages.size, getpages.phy_address, getpages.virt_address,
4175+ getpages.offset);
4176+
4177+ /* test for memory access */
4178+
4179+ printf("Testing ioctl for memory access\n");
4180+ }
4181+
4182+ virt_ptr=(unsigned long *)getpages.virt_address;
4183+
4184+ /* input 0..10 into subsequent memory */
4185+
4186+ for(i=0;i<=11;i++){
4187+ *virt_ptr=i;
4188+ virt_ptr++;
4189+ }
4190+
4191+ /*read from subsequent memory */
4192+ virt_ptr=(unsigned long *)getpages.virt_address;
4193+ for(i=0;i<=11;i++){
4194+ if (verbose)
4195+ {
4196+ printf("virt_ptr @ %#x,value: %d\n",virt_ptr,*virt_ptr);
4197+ }
4198+ if(*virt_ptr!=i){
4199+ printf("Failed memory read.\n");
4200+ }
4201+ virt_ptr++;
4202+ }
4203+ if (verbose)
4204+ {
4205+ printf("IOCTL call memory access test:");
4206+ }
4207+ if(failed){
4208+ printf("DRM module failed memory access test.\n");
4209+ printf("Reinstall IKM.\n");
4210+ exit(-1);
4211+ }
4212+ if (verbose)
4213+ {
4214+ printf(" Success\n");
4215+ }
4216+ /* freeing memory */
4217+
4218+ drm_intel_freepages_t freepages;
4219+ freepages.size=getpages.size;
4220+ freepages.phy_address=getpages.phy_address;
4221+ freepages.virt_address=getpages.virt_address;
4222+ if (verbose)
4223+ {
4224+ printf("Freeing phy_address:%#x,size:%#x\n",
4225+ freepages.phy_address,freepages.size);
4226+
4227+ printf("Testing ioctl call for info init\n");
4228+ }
4229+ /* init the drm info structure in the drm and test its value */
4230+ intel_drm_info_t info;
4231+ intel_drm_info_t test_info;
4232+ info.device_id=0x456;
4233+ info.revision=333;
4234+ info.video_memory_offset=0x10245;
4235+ info.video_memory_size=987;
4236+ info.hw_status_offset=0x444;
4237+ if (verbose)
4238+ {
4239+
4240+ printf("Testing init info device_id: %#x,revision: %d,offset: %#x,"
4241+ " size: %d, hw_status_offset: %lx\n", info.device_id, info.revision,
4242+ info.video_memory_offset, info.video_memory_size,
4243+ info.hw_status_offset);
4244+ }
4245+
4246+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
4247+
4248+ if (verbose)
4249+ {
4250+ printf("Alternative data to init\n");
4251+ }
4252+ /* init the drm info structure in the drm and test its value */
4253+ info.device_id=0x123;
4254+ info.revision=456;
4255+ info.video_memory_offset=0x789;
4256+ info.video_memory_size=111;
4257+ info.hw_status_offset=0x555;
4258+
4259+ if (verbose)
4260+ {
4261+ printf("Testing init 2nd info device_id: %#x,revision: %d,offset: %#x,"
4262+ " size: %d, hw_status_offset: %lx\n", info.device_id, info.revision,
4263+ info.video_memory_offset, info.video_memory_size,
4264+ info.hw_status_offset);
4265+
4266+ printf("Get init info\n");
4267+ }
4268+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_GET,&test_info);
4269+
4270+ if (verbose)
4271+ {
4272+ printf("IOCTL call for info init:");
4273+ printf("Got init info device_id: %#x,revision: %d,"
4274+ "offset: %#x,size:%d, hw_status_offset: %lx\n", test_info.device_id,
4275+ test_info.revision, test_info.video_memory_offset,
4276+ test_info.video_memory_size, test_info.hw_status_offset);
4277+ }
4278+
4279+ /* compare with original data to see if its still the same */
4280+ info.device_id=0x456;
4281+ info.revision=333;
4282+ info.video_memory_offset=0x10245;
4283+ info.video_memory_size=987;
4284+ info.hw_status_offset=0x444;
4285+ failed=0;
4286+
4287+ if(info.device_id!=test_info.device_id){
4288+ failed=1;
4289+ }
4290+ if(info.revision!=test_info.revision){
4291+ failed=1;
4292+ }
4293+ if(info.video_memory_offset!=test_info.video_memory_offset){
4294+ failed=1;
4295+ }
4296+ if(info.video_memory_size!=test_info.video_memory_size){
4297+ failed=1;
4298+ }
4299+ if(info.hw_status_offset!=test_info.hw_status_offset){
4300+ failed=1;
4301+ }
4302+
4303+ if(failed){
4304+ printf("DRM module failed IOCTL info did not match.\n");
4305+ printf("Reinstall IKM.");
4306+ exit(-1);
4307+ }
4308+
4309+ printf("DRM successfully loaded\n");
4310+
4311+ close(file_desc);
4312+
4313+ return 0;
4314+
4315+}
4316diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drmult.c patch_script_temp/drivers/gpu/drm/iegd/drm/drmult.c
4317--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/drmult.c 1969-12-31 17:00:00.000000000 -0700
4318+++ patch_script_temp/drivers/gpu/drm/iegd/drm/drmult.c 2009-10-06 10:30:05.000000000 -0700
4319@@ -0,0 +1,270 @@
4320+/* -*- pse-c -*-
4321+ *----------------------------------------------------------------------------
4322+ * Filename: drmult.c
4323+ * $Revision: 1.7 $
4324+ *----------------------------------------------------------------------------
4325+ * Unit level test for IEGD DRM
4326+ * Copyright © 2008, Intel Corporation.
4327+ *
4328+ * This program is free software; you can redistribute it and/or modify it
4329+ * under the terms and conditions of the GNU General Public License,
4330+ * version 2, as published by the Free Software Foundation.
4331+ *
4332+ * This program is distributed in the hope it will be useful, but WITHOUT
4333+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4334+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4335+ * more details.
4336+ *
4337+ * You should have received a copy of the GNU General Public License along with
4338+ * this program; if not, write to the Free Software Foundation, Inc.,
4339+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4340+ *
4341+ */
4342+
4343+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4344+ *
4345+ * Redistribution and use in source and binary forms, with or without
4346+ * modification, are permitted provided that the following conditions are met:
4347+ * Redistributions of source code must retain the above copyright notice,
4348+ * this list of conditions and the following disclaimer.
4349+ *
4350+ * Redistributions in binary form must reproduce the above copyright
4351+ * notice, this list of conditions and the following disclaimer in the
4352+ * documentation and/or other materials provided with the distribution.
4353+ *
4354+ * Neither the name Intel Corporation nor the names of its contributors
4355+ * may be used to endorse or promote products derived from this software
4356+ * without specific prior written permission.
4357+ *
4358+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4359+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4360+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4361+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4362+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4363+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4364+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4365+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4366+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4367+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4368+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4369+ *
4370+ */
4371+
4372+/*client to test the ioctl
4373+ * make sure you change the permission bits in intel.h to 0,0
4374+ * before you start using this
4375+ */
4376+
4377+#include "iegd.h"
4378+
4379+#include <fcntl.h>
4380+#include <unistd.h>
4381+#include <sys/ioctl.h>
4382+#include <stdlib.h>
4383+#include <stdio.h>
4384+
4385+#if 0
4386+#define DRM_IOCTL_BASE 'd'
4387+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4388+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4389+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
4390+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
4391+#endif
4392+
4393+#include "iegd_drm_client.h"
4394+
4395+#if 0
4396+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4397+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4398+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4399+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4400+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4401+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4402+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4403+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4404+#endif
4405+
4406+
4407+#define PAGE_SIZE 4096
4408+int main()
4409+{
4410+int file_desc, ret_value;
4411+printf("Starting client\n");
4412+/* Open the drm */
4413+file_desc=open("/dev/dri/card0",O_RDWR);
4414+
4415+if(file_desc<0){
4416+/* Suse has a different /dev tree struct.
4417+ * try /dev/card0
4418+ */
4419+
4420+file_desc=open("/dev/card0",O_RDWR);
4421+
4422+}
4423+
4424+if(file_desc<0){
4425+ printf("Can't open device file:%s\n",DRIVER_DESC);
4426+ exit(-1);
4427+}
4428+
4429+printf("Open device file:%d\n",file_desc);
4430+
4431+
4432+/* Test interrupt IOCTL */
4433+interrupt_info_t irq_info;
4434+
4435+irq_info.req_status = 0;
4436+irq_info.req_type = READ_INT; /* CLEAR_INT, WAIT_INT */
4437+irq_info.in[0] = 0xffffffff;
4438+irq_info.in[1] = 0xffffffff;
4439+irq_info.in[2] = 0xffffffff;
4440+irq_info.in[3] = 0xffffffff;
4441+irq_info.in[4] = 0xa5a5a5a5;
4442+irq_info.in[5] = 0xdeadbeef;
4443+
4444+ret_value = ioctl(file_desc, DRM_IOCTL_INTEL_INTERRUPT, &irq_info);
4445+printf("ULT IOCTL call read interrupt tests: %d\n\n", irq_info.req_status);
4446+
4447+irq_info.req_status = 0;
4448+irq_info.req_type = WAIT_INT;
4449+irq_info.in[0] = 0xffffffff;
4450+irq_info.in[1] = 0xffffffff;
4451+irq_info.in[2] = 0xffffffff;
4452+irq_info.in[3] = 0xffffffff;
4453+
4454+ret_value = ioctl(file_desc, DRM_IOCTL_INTEL_INTERRUPT, &irq_info);
4455+printf("ULT IOCTL call wait interrupt tests: %d\n\n", irq_info.req_status);
4456+
4457+
4458+
4459+/* This the ioctl that allocates physical memory */
4460+printf("ULT: Testing ioctl for memory allocation\n");
4461+
4462+drm_intel_getpages_t getpages;
4463+/* set the number of bytes we want the drm to allocate */
4464+getpages.size=(PAGE_SIZE- 1000);
4465+
4466+ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
4467+printf("ULT IOCTL call memory allocation test:");
4468+if(ret_value<0){
4469+ printf(" Failed\n");
4470+ exit(-1);
4471+}
4472+printf(" Success\n");
4473+printf("size%d,phy_address:%#x,virt_address:%#x,offset:%#x\n",getpages.size,getpages.phy_address,getpages.virt_address,getpages.offset);
4474+
4475+/* test for memory access */
4476+
4477+printf("ULT: Testing ioctl for memory access\n");
4478+int i;
4479+unsigned long *virt_ptr;
4480+
4481+virt_ptr=(unsigned long *)getpages.virt_address;
4482+
4483+/* input 0..10 into subsequent memory */
4484+
4485+for(i=0;i<=11;i++){
4486+*virt_ptr=i;
4487+virt_ptr++;
4488+
4489+}
4490+
4491+/*read from subsequent memory */
4492+int failed=0;
4493+virt_ptr=(unsigned long *)getpages.virt_address;
4494+for(i=0;i<=11;i++){
4495+ printf("virt_ptr@%#x,value:%d\n",virt_ptr,*virt_ptr);
4496+ if(*virt_ptr!=i){
4497+ printf("Test failed!\n");
4498+ }
4499+virt_ptr++;
4500+}
4501+printf("ULT IOCTL call memory access test:");
4502+if(failed){
4503+ printf(" Failed\n");
4504+ exit(-1);
4505+}
4506+
4507+ printf(" Success\n");
4508+/* freeing memory */
4509+
4510+drm_intel_freepages_t freepages;
4511+freepages.size=getpages.size;
4512+freepages.phy_address=getpages.phy_address;
4513+freepages.virt_address=getpages.virt_address;
4514+printf("Freeing phy_address:%#x,size:%#x\n",freepages.phy_address,freepages.size);
4515+
4516+/* init the drm info structure in the drm and test its value */
4517+
4518+printf("ULT: Testing ioctl call for info init\n");
4519+ intel_drm_info_t info;
4520+ intel_drm_info_t test_info;
4521+ info.device_id=0x456;
4522+ info.revision=333;
4523+ info.video_memory_offset=0x10245;
4524+ info.video_memory_size=987;
4525+ info.hw_status_offset=0x444;
4526+
4527+ printf("Testing init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
4528+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
4529+
4530+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
4531+
4532+/* init the drm info structure in the drm and test its value */
4533+printf("Alternative data to init\n");
4534+ info.device_id=0x123;
4535+ info.revision=456;
4536+ info.video_memory_offset=0x789;
4537+ info.video_memory_size=111;
4538+ info.hw_status_offset=0x555;
4539+
4540+ printf("Testing init 2nd info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
4541+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
4542+
4543+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
4544+
4545+
4546+printf("Get init info\n");
4547+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_GET,&test_info);
4548+
4549+ printf("Got init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
4550+ test_info.device_id,test_info.revision,test_info.video_memory_offset,test_info.video_memory_size,test_info.hw_status_offset);
4551+/* compare with original data to see if its still the same */
4552+info.device_id=0x456;
4553+info.revision=333;
4554+info.video_memory_offset=0x10245;
4555+info.video_memory_size=987;
4556+info.hw_status_offset=0x444;
4557+failed=0;
4558+
4559+if(info.device_id!=test_info.device_id){
4560+ failed=1;
4561+}
4562+if(info.revision!=test_info.revision){
4563+ failed=1;
4564+}
4565+if(info.video_memory_offset!=test_info.video_memory_offset){
4566+ failed=1;
4567+}
4568+if(info.video_memory_size!=test_info.video_memory_size){
4569+ failed=1;
4570+}
4571+if(info.hw_status_offset!=test_info.hw_status_offset){
4572+ failed=1;
4573+}
4574+
4575+printf("ULT IOCTL call for info init:");
4576+if(failed){
4577+ printf(" Failed\n");
4578+ exit(-1);
4579+}
4580+
4581+ printf(" Success\n");
4582+
4583+close(file_desc);
4584+/*
4585+sleep(100000000000);
4586+*/
4587+return 0;
4588+
4589+}
4590diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd.h
4591--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd.h 1969-12-31 17:00:00.000000000 -0700
4592+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd.h 2009-10-06 10:30:05.000000000 -0700
4593@@ -0,0 +1,117 @@
4594+
4595+/* -*- pse-c -*-
4596+ *----------------------------------------------------------------------------
4597+ * Filename: iegd.h
4598+ * $Revision: 1.7 $
4599+ *----------------------------------------------------------------------------
4600+ * Gart and DRM driver for Intel Embedded Graphics Driver
4601+ * Copyright © 2008, Intel Corporation.
4602+ *
4603+ * This program is free software; you can redistribute it and/or modify it
4604+ * under the terms and conditions of the GNU General Public License,
4605+ * version 2, as published by the Free Software Foundation.
4606+ *
4607+ * This program is distributed in the hope it will be useful, but WITHOUT
4608+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4609+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4610+ * more details.
4611+ *
4612+ * You should have received a copy of the GNU General Public License along with
4613+ * this program; if not, write to the Free Software Foundation, Inc.,
4614+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4615+ *
4616+ */
4617+
4618+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4619+ *
4620+ * Redistribution and use in source and binary forms, with or without
4621+ * modification, are permitted provided that the following conditions are met:
4622+ * Redistributions of source code must retain the above copyright notice,
4623+ * this list of conditions and the following disclaimer.
4624+ *
4625+ * Redistributions in binary form must reproduce the above copyright
4626+ * notice, this list of conditions and the following disclaimer in the
4627+ * documentation and/or other materials provided with the distribution.
4628+ *
4629+ * Neither the name Intel Corporation nor the names of its contributors
4630+ * may be used to endorse or promote products derived from this software
4631+ * without specific prior written permission.
4632+ *
4633+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4634+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4635+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4636+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4637+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4638+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4639+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4640+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4641+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4642+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4643+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4644+ *
4645+ */
4646+#include <linux/version.h>
4647+#ifndef __IEGD_H__
4648+#define __IEGD_H__
4649+
4650+/* General customization:
4651+ */
4652+#define __HAVE_AGP 1
4653+#define __MUST_HAVE_AGP 0
4654+#define __HAVE_MTRR 0
4655+#define __HAVE_CTX_BITMAP 1
4656+
4657+#define DRIVER_AUTHOR " "
4658+
4659+#define DRIVER_NAME "iegd"
4660+#define DRIVER_DESC "Intel DRM"
4661+#define DRIVER_DATE "20081022"
4662+
4663+#define DRIVER_MAJOR 1
4664+#define DRIVER_MINOR 0
4665+#define DRIVER_PATCHLEVEL 1
4666+
4667+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,5)
4668+#define KERNEL265 1
4669+#endif
4670+
4671+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)) && \
4672+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)))
4673+#define KERNEL2611 1
4674+#endif
4675+
4676+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) && \
4677+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)))
4678+#define KERNEL2615 1
4679+#endif
4680+
4681+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
4682+#define KERNEL2624 1
4683+#endif
4684+
4685+#ifndef KERNEL265
4686+#define KERNEL265 0
4687+#endif
4688+
4689+#ifndef KERNEL2611
4690+#define KERNEL2611 0
4691+#endif
4692+
4693+#ifndef KERNEL2615
4694+#define KERNEL2615 0
4695+#endif
4696+
4697+#ifndef KERNEL2624
4698+#define KERNEL2624 0
4699+#endif
4700+
4701+/* For some arcane reasons certain stuff needs to be defined in this file.
4702+ * This is being defined in intel_interface_265.h.If not the drm won't
4703+ * compile properly.
4704+ */
4705+#include "iegd_interface_265.h"
4706+#include "iegd_interface_2611.h"
4707+#include "iegd_interface_2615.h"
4708+#include "iegd_interface_2624.h"
4709+
4710+#endif
4711diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm.h
4712--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm.h 1969-12-31 17:00:00.000000000 -0700
4713+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm.h 2009-10-06 10:30:05.000000000 -0700
4714@@ -0,0 +1,116 @@
4715+/* -*- pse-c -*-
4716+ *----------------------------------------------------------------------------
4717+ * Filename: iegd_drm.h
4718+ * $Revision: 1.7 $
4719+ *----------------------------------------------------------------------------
4720+ * Gart and DRM driver for Intel Embedded Graphics Driver
4721+ * Copyright © 2008, Intel Corporation.
4722+ *
4723+ * This program is free software; you can redistribute it and/or modify it
4724+ * under the terms and conditions of the GNU General Public License,
4725+ * version 2, as published by the Free Software Foundation.
4726+ *
4727+ * This program is distributed in the hope it will be useful, but WITHOUT
4728+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4729+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4730+ * more details.
4731+ *
4732+ * You should have received a copy of the GNU General Public License along with
4733+ * this program; if not, write to the Free Software Foundation, Inc.,
4734+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4735+ *
4736+ */
4737+
4738+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4739+ *
4740+ * Redistribution and use in source and binary forms, with or without
4741+ * modification, are permitted provided that the following conditions are met:
4742+ * Redistributions of source code must retain the above copyright notice,
4743+ * this list of conditions and the following disclaimer.
4744+ *
4745+ * Redistributions in binary form must reproduce the above copyright
4746+ * notice, this list of conditions and the following disclaimer in the
4747+ * documentation and/or other materials provided with the distribution.
4748+ *
4749+ * Neither the name Intel Corporation nor the names of its contributors
4750+ * may be used to endorse or promote products derived from this software
4751+ * without specific prior written permission.
4752+ *
4753+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4754+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4755+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4756+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4757+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4758+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4759+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4760+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4761+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4762+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4763+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4764+ *
4765+ */
4766+
4767+#ifndef _IEGD_DRM_H__
4768+#define _IEGD_DRM_H__
4769+
4770+#include "iegd_drm_client.h"
4771+
4772+/* INTEL specific ioctls
4773+ * The device specific ioctl range is 0x40 to 0x79.
4774+ */
4775+
4776+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4777+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4778+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4779+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4780+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4781+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4782+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4783+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4784+#define DRM_IOCTL_INTEL_INTERRUPT DRM_IOWR( DRM_BASE_COMMAND + \
4785+ DRM_INTEL_INTERRUPT, interrupt_info_t)
4786+
4787+/* New ioctl to set kernel params:
4788+ */
4789+typedef struct drm_intel_listpages {
4790+ int pid;
4791+ int size;
4792+ unsigned long phy_address;
4793+ unsigned long virt_address;
4794+ unsigned long offset;
4795+} drm_intel_listpages_t;
4796+
4797+typedef struct drm_intel_list{
4798+ struct list_head head;
4799+ drm_intel_listpages_t *page;
4800+}drm_intel_list_t;
4801+/*
4802+ * This is the basic information structure that is obtained from the
4803+ * IEGD XFree driver.
4804+ */
4805+typedef struct intel_device_private{
4806+ drm_intel_list_t *pagelist;
4807+ intel_drm_info_t *info_ptr;
4808+ spinlock_t irqmask_lock;
4809+ uint8_t *sgx_reg;
4810+ uint8_t *vdc_reg;
4811+ uint8_t *msvdx_reg;
4812+ uint32_t sgx_irq_mask;
4813+ uint32_t sgx_irq_mask2;
4814+ uint32_t vdc_irq_mask;
4815+ uint32_t msvdx_irq_mask;
4816+ /* interrupt status bits returned once woken up */
4817+ uint32_t interrupt_status;
4818+ int irq_enabled;
4819+ /* condition to wake up on */
4820+ unsigned int event_present;
4821+ wait_queue_head_t event_queue;
4822+ /* interrupts that have already occured */
4823+ unsigned int out_vdc;
4824+ unsigned int out_sgx;
4825+ unsigned int out_sgx2;
4826+ unsigned int out_mtx;
4827+} intel_device_private_t;
4828+
4829+#endif /* _INTEL_DRM_H_ */
4830+
4831diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm_client.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm_client.h
4832--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drm_client.h 1969-12-31 17:00:00.000000000 -0700
4833+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drm_client.h 2009-10-06 10:30:05.000000000 -0700
4834@@ -0,0 +1,139 @@
4835+/* -*- pse-c -*-
4836+ *----------------------------------------------------------------------------
4837+ * Filename: iegd_drm_client.h
4838+ * $Revision: 1.7 $
4839+ *----------------------------------------------------------------------------
4840+ * Gart and DRM driver for Intel Embedded Graphics Driver
4841+ * Copyright © 2008, Intel Corporation.
4842+ *
4843+ * This program is free software; you can redistribute it and/or modify it
4844+ * under the terms and conditions of the GNU General Public License,
4845+ * version 2, as published by the Free Software Foundation.
4846+ *
4847+ * This program is distributed in the hope it will be useful, but WITHOUT
4848+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4849+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4850+ * more details.
4851+ *
4852+ * You should have received a copy of the GNU General Public License along with
4853+ * this program; if not, write to the Free Software Foundation, Inc.,
4854+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4855+ *
4856+ */
4857+
4858+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
4859+ *
4860+ * Redistribution and use in source and binary forms, with or without
4861+ * modification, are permitted provided that the following conditions are met:
4862+ * Redistributions of source code must retain the above copyright notice,
4863+ * this list of conditions and the following disclaimer.
4864+ *
4865+ * Redistributions in binary form must reproduce the above copyright
4866+ * notice, this list of conditions and the following disclaimer in the
4867+ * documentation and/or other materials provided with the distribution.
4868+ *
4869+ * Neither the name Intel Corporation nor the names of its contributors
4870+ * may be used to endorse or promote products derived from this software
4871+ * without specific prior written permission.
4872+ *
4873+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4874+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
4875+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4876+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4877+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4878+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4879+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
4880+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
4881+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
4882+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
4883+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4884+ *
4885+ */
4886+
4887+#ifndef __IEGD_DRM_CLIENT_H__
4888+#define __IEGD_DRM_CLIENT_H__
4889+
4890+/* Requests made from client to drm */
4891+#define CLEAR_INT 1
4892+#define WAIT_INT 2
4893+#define READ_INT 3
4894+#define UNMASK_INT 4
4895+#define MASK_INT 5
4896+
4897+/* Responses returned to the client from drm */
4898+#define INT_INVALID -1
4899+#define INT_NOOP 0
4900+#define INT_CLEARED 1
4901+#define INT_HANDLED 2
4902+#define INT_READ 3
4903+#define INT_STORED 4
4904+#define INT_TIMEOUT 5
4905+
4906+typedef struct drm_intel_getpages {
4907+ int size;
4908+ unsigned long phy_address;
4909+ unsigned long virt_address;
4910+ unsigned long offset;
4911+} drm_intel_getpages_t;
4912+
4913+typedef struct drm_intel_freepages {
4914+ int size;
4915+ unsigned long phy_address;
4916+ unsigned long virt_address;
4917+ unsigned long offset;
4918+} drm_intel_freepages_t;
4919+
4920+/*
4921+ * This is the basic information structure that is is obtained from the
4922+ * IEGD drm driver.
4923+ */
4924+typedef struct _intel_drm_info {
4925+ unsigned long device_id;
4926+ unsigned long revision;
4927+ unsigned long video_memory_offset;
4928+ unsigned long video_memory_size;
4929+ unsigned long hw_status_offset;
4930+} intel_drm_info_t;
4931+
4932+typedef struct {
4933+ /* request status returned to client */
4934+ unsigned int req_status;
4935+ /* what type of request is being made to drm (clear/wait/read) */
4936+ unsigned int req_type;
4937+ /* which interrupts to clear or look for */
4938+ unsigned long in[8]; /* Array of device dependant mask/request bits */
4939+ /* interrupts that have already occured, returned to the client */
4940+ unsigned long out[8]; /* Array of device dependant status bits */
4941+} interrupt_info_t;
4942+
4943+#define DRM_IOCTL_BASE 'd'
4944+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4945+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4946+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
4947+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
4948+
4949+/* IOCTL numbers to be used along side drmCommand* in Xserver
4950+ * example taken from intel_dri.c:
4951+ * drmCommandWrite(iptr->drm_sub_fd
4952+ * , DRM_INTEL_INFO_INIT
4953+ * , &info,sizeof(intel_drm_info_t)
4954+ */
4955+
4956+#define DRM_BASE_COMMAND 0x40
4957+#define DRM_INTEL_GETPAGES 0x01
4958+#define DRM_INTEL_FREEPAGES 0x02
4959+#define DRM_INTEL_INFO_INIT 0x03
4960+#define DRM_INTEL_INFO_GET 0x04
4961+#define DRM_INTEL_INTERRUPT 0x05
4962+
4963+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4964+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
4965+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
4966+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
4967+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
4968+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
4969+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
4970+ DRM_INTEL_INFO_GET, intel_drm_info_t)
4971+#define DRM_IOCTL_INTEL_INTERRUPT DRM_IOWR( DRM_BASE_COMMAND + \
4972+ DRM_INTEL_INTERRUPT, interrupt_info_t)
4973+#endif
4974diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.c
4975--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.c 1969-12-31 17:00:00.000000000 -0700
4976+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.c 2009-10-06 10:30:05.000000000 -0700
4977@@ -0,0 +1,59 @@
4978+/* -*- pse-c -*-
4979+ *----------------------------------------------------------------------------
4980+ * Filename: iegd_drv.c
4981+ * $Revision: 1.5 $
4982+ *----------------------------------------------------------------------------
4983+ * Gart and DRM driver for Intel Embedded Graphics Driver
4984+ * Copyright © 2008, Intel Corporation.
4985+ *
4986+ * This program is free software; you can redistribute it and/or modify it
4987+ * under the terms and conditions of the GNU General Public License,
4988+ * version 2, as published by the Free Software Foundation.
4989+ *
4990+ * This program is distributed in the hope it will be useful, but WITHOUT
4991+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4992+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4993+ * more details.
4994+ *
4995+ * You should have received a copy of the GNU General Public License along with
4996+ * this program; if not, write to the Free Software Foundation, Inc.,
4997+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4998+ *
4999+ */
5000+
5001+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
5002+ *
5003+ * Redistribution and use in source and binary forms, with or without
5004+ * modification, are permitted provided that the following conditions are met:
5005+ * Redistributions of source code must retain the above copyright notice,
5006+ * this list of conditions and the following disclaimer.
5007+ *
5008+ * Redistributions in binary form must reproduce the above copyright
5009+ * notice, this list of conditions and the following disclaimer in the
5010+ * documentation and/or other materials provided with the distribution.
5011+ *
5012+ * Neither the name Intel Corporation nor the names of its contributors
5013+ * may be used to endorse or promote products derived from this software
5014+ * without specific prior written permission.
5015+ *
5016+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5017+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
5018+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5019+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5020+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5021+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5022+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5023+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5024+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5025+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5026+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5027+ *
5028+ */
5029+
5030+#include <linux/config.h>
5031+#include "iegd.h"
5032+#include <drmP.h>
5033+#include <drm.h>
5034+#include "iegd_drm.h"
5035+#include "iegd_drv.h"
5036+
5037diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.h
5038--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_drv.h 1969-12-31 17:00:00.000000000 -0700
5039+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_drv.h 2009-10-06 10:30:05.000000000 -0700
5040@@ -0,0 +1,216 @@
5041+
5042+/* -*- pse-c -*-
5043+ *----------------------------------------------------------------------------
5044+ * Filename: iegd_drv.h
5045+ * $Revision: 1.15 $
5046+ *----------------------------------------------------------------------------
5047+ * Gart and DRM driver for Intel Embedded Graphics Driver
5048+ * Copyright © 2008, Intel Corporation.
5049+ *
5050+ * This program is free software; you can redistribute it and/or modify it
5051+ * under the terms and conditions of the GNU General Public License,
5052+ * version 2, as published by the Free Software Foundation.
5053+ *
5054+ * This program is distributed in the hope it will be useful, but WITHOUT
5055+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5056+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5057+ * more details.
5058+ *
5059+ * You should have received a copy of the GNU General Public License along with
5060+ * this program; if not, write to the Free Software Foundation, Inc.,
5061+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5062+ *
5063+ */
5064+
5065+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
5066+ *
5067+ * Redistribution and use in source and binary forms, with or without
5068+ * modification, are permitted provided that the following conditions are met:
5069+ * Redistributions of source code must retain the above copyright notice,
5070+ * this list of conditions and the following disclaimer.
5071+ *
5072+ * Redistributions in binary form must reproduce the above copyright
5073+ * notice, this list of conditions and the following disclaimer in the
5074+ * documentation and/or other materials provided with the distribution.
5075+ *
5076+ * Neither the name Intel Corporation nor the names of its contributors
5077+ * may be used to endorse or promote products derived from this software
5078+ * without specific prior written permission.
5079+ *
5080+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5081+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
5082+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5083+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5084+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5085+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5086+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5087+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5088+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5089+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5090+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5091+ *
5092+ */
5093+
5094+#ifndef _IEGD_DRV_H_
5095+#define _IEGD_DRV_H_
5096+
5097+#define KB(x) ((x) * 1024)
5098+#define MB(x) (KB (KB (x)))
5099+#define GB(x) (MB (KB (x)))
5100+#include "iegd_drm.h"
5101+#include "igd_gart.h"
5102+
5103+/* Define the PCI IDs below */
5104+#define INTEL_PCI_IDS \
5105+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5106+ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5107+ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5108+ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5109+ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5110+ {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5111+ {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5112+ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5113+ {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5114+ {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5115+ {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5116+ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5117+ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5118+ {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5119+ {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5120+ {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5121+ {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5122+ {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5123+ {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5124+ {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5125+ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5126+ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5127+ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
5128+ {0, 0, 0}
5129+
5130+/* Latest kernel remove this macro from drmP.h */
5131+#ifndef VM_OFFSET
5132+#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
5133+#endif
5134+
5135+#ifndef DRMFILE
5136+#define DRMFILE struct file *
5137+#endif
5138+
5139+/*
5140+ * We check kernel version here because in kernel 2.6.23 onward some of the
5141+ * structure definition for the drm have been change. They have remove all the
5142+ * typedef for the drm data structure to follow kernel coding guidelines. This
5143+ * causing backward compatibility problem with IKM. Since only the typedef and
5144+ * the way they handling link list are changing, to create separate file just
5145+ * for handling this changes are redundant since implementation wise are
5146+ * still the same.
5147+ */
5148+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5149+typedef struct drm_device drm_device_t;
5150+typedef struct drm_file drm_file_t;
5151+typedef struct drm_map drm_map_t;
5152+typedef struct drm_map_list drm_map_list_t;
5153+typedef struct drm_vma_entry drm_vma_entry_t;
5154+typedef struct drm_ioctl_desc drm_ioctl_desc_t;
5155+
5156+#define INSERT_VMA() \
5157+ mutex_lock(&dev->struct_mutex); \
5158+ vma_entry->vma = vma; \
5159+ vma_entry->pid = current->pid; \
5160+ list_add(&vma_entry->head, &dev->vmalist); \
5161+ mutex_unlock(&dev->struct_mutex);
5162+
5163+#define LIST_FOR_EACH(l, d) list_for_each((l), &(d)->maplist)
5164+
5165+#else
5166+
5167+#define INSERT_VMA() \
5168+ mutex_lock(&dev->struct_mutex); \
5169+ vma_entry->vma = vma; \
5170+ vma_entry->next = dev->vmalist; \
5171+ vma_entry->pid = current->pid; \
5172+ dev->vmalist = vma_entry; \
5173+ mutex_unlock(&dev->struct_mutex);
5174+
5175+#define LIST_FOR_EACH(l, d) list_for_each((l), &(d)->maplist->head)
5176+
5177+#endif /* #if LINUX_VERSION_CODE */
5178+
5179+/* Define the prototype and interfaces for functions for the different
5180+ * kernel version below.
5181+ */
5182+
5183+/* function definition in intel_interface.c */
5184+extern int intel_mmap_buffers(struct file *filp,struct vm_area_struct *vma);
5185+
5186+/* function definition to get pages this is in intel_interface*/
5187+extern int intel_getpages( drm_device_t *dev,struct file *filp, unsigned long arg );
5188+
5189+extern int intel_freepages(drm_device_t *dev , unsigned long arg );
5190+
5191+extern int intel_drm_info_init( drm_device_t *dev, unsigned long arg );
5192+
5193+extern int intel_drm_info_get( drm_device_t *dev, unsigned long arg );
5194+
5195+extern int intel_postinit(intel_device_private_t **priv);
5196+
5197+extern int intel_prerelease(drm_device_t *dev);
5198+
5199+/* Functions in intel_interface_265.c used in 2.6.5 kernel and below */
5200+
5201+extern int intel_postinit_265(drm_device_t *dev);
5202+
5203+extern int intel_prerelease_265(drm_device_t *dev);
5204+
5205+extern int intel_getpages_265( struct inode *inode, struct file *filp,
5206+ unsigned int cmd, unsigned long arg );
5207+
5208+extern int intel_freepages_265( struct inode *inode, struct file *filp,
5209+ unsigned int cmd, unsigned long arg );
5210+
5211+extern int intel_drm_info_init_265( struct inode *inode, struct file *filp,
5212+ unsigned int cmd, unsigned long arg );
5213+
5214+extern int intel_drm_info_get_265( struct inode *inode, struct file *filp,
5215+ unsigned int cmd, unsigned long arg );
5216+
5217+/* Functions in intel_interface_2611.c used in 2.6.11 kernel and above */
5218+
5219+extern int intel_postinit_2611(struct drm_device *dev,unsigned long flags);
5220+
5221+extern void intel_prerelease_2611(drm_device_t *dev,DRMFILE filp);
5222+
5223+extern int intel_getpages_2611( struct inode *inode, struct file *filp,
5224+ unsigned int cmd, unsigned long arg );
5225+
5226+extern int intel_freepages_2611( struct inode *inode, struct file *filp,
5227+ unsigned int cmd, unsigned long arg );
5228+
5229+extern int intel_drm_info_init_2611( struct inode *inode, struct file *filp,
5230+ unsigned int cmd, unsigned long arg );
5231+
5232+extern int intel_drm_info_get_2611( struct inode *inode, struct file *filp,
5233+ unsigned int cmd, unsigned long arg );
5234+
5235+extern unsigned long intel_alloc_pages(int order, int area);
5236+
5237+extern void intel_free_pages(unsigned long address, int order, int area);
5238+
5239+extern int drm_plb_mmap(struct file *, struct vm_area_struct *);
5240+
5241+extern struct vm_operations_struct iegd_plb_vm_ops_drm;
5242+extern gart_dispatch_t *gart_id;
5243+extern dev_private_data_t private_data;
5244+
5245+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
5246+extern void psb_irq_preinstall(struct drm_device *dev);
5247+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
5248+extern void psb_irq_postinstall(struct drm_device *dev);
5249+#else
5250+extern int psb_irq_postinstall(struct drm_device *dev);
5251+#endif
5252+extern void psb_irq_uninstall(struct drm_device *dev);
5253+extern int psb_init(intel_device_private_t *priv);
5254+int intel_drm_plb_interrupts( drm_device_t *dev, void *data );
5255+#endif
5256+
5257diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface.c
5258--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface.c 1969-12-31 17:00:00.000000000 -0700
5259+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface.c 2009-10-06 10:30:05.000000000 -0700
5260@@ -0,0 +1,888 @@
5261+/* -*- pse-c -*-
5262+ *----------------------------------------------------------------------------
5263+ * Filename: iegd_interface.c
5264+ * $Revision: 1.23 $
5265+ *----------------------------------------------------------------------------
5266+ * Gart and DRM driver for Intel Embedded Graphics Driver
5267+ * Copyright © 2008, Intel Corporation.
5268+ *
5269+ * This program is free software; you can redistribute it and/or modify it
5270+ * under the terms and conditions of the GNU General Public License,
5271+ * version 2, as published by the Free Software Foundation.
5272+ *
5273+ * This program is distributed in the hope it will be useful, but WITHOUT
5274+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5275+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5276+ * more details.
5277+ *
5278+ * You should have received a copy of the GNU General Public License along with
5279+ * this program; if not, write to the Free Software Foundation, Inc.,
5280+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5281+ *
5282+ */
5283+
5284+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
5285+ *
5286+ * Redistribution and use in source and binary forms, with or without
5287+ * modification, are permitted provided that the following conditions are met:
5288+ * Redistributions of source code must retain the above copyright notice,
5289+ * this list of conditions and the following disclaimer.
5290+ *
5291+ * Redistributions in binary form must reproduce the above copyright
5292+ * notice, this list of conditions and the following disclaimer in the
5293+ * documentation and/or other materials provided with the distribution.
5294+ *
5295+ * Neither the name Intel Corporation nor the names of its contributors
5296+ * may be used to endorse or promote products derived from this software
5297+ * without specific prior written permission.
5298+ *
5299+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5300+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
5301+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5302+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5303+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5304+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5305+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5306+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5307+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5308+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5309+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5310+ *
5311+ */
5312+
5313+#include "iegd.h"
5314+#include "igd_abs.h"
5315+#include "drmP.h"
5316+#include "drm.h"
5317+
5318+#include "iegd_drm.h"
5319+#include "iegd_drv.h"
5320+#include "psb_intregs.h"
5321+
5322+#ifndef MSR_IA32_CR_PAT
5323+#define MSR_IA32_CR_PAT 0x0277
5324+#endif
5325+#ifndef _PAGE_PAT
5326+#define _PAGE_PAT 0x080
5327+#endif
5328+
5329+extern void agp_init_pat(void);
5330+extern int agp_use_pat (void);
5331+
5332+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
5333+int drm_irq_install(drm_device_t *dev);
5334+#endif
5335+
5336+/* get intel_buffer_fops from the interface_###.c files */
5337+extern struct file_operations intel_buffer_fops;
5338+
5339+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
5340+extern struct vm_operations_struct iegd_plb_vm_ops;
5341+#endif
5342+
5343+
5344+/* Global variable to keep track the amount of memory we are using */
5345+static int memory;
5346+
5347+/* Our own mmap function to memory map physical to user space memory
5348+ */
5349+int intel_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
5350+{
5351+ DRM_DEBUG("\n");
5352+
5353+ lock_kernel();
5354+ vma->vm_flags |= (VM_IO | VM_RESERVED);
5355+ vma->vm_file = filp;
5356+ unlock_kernel();
5357+
5358+ DRM_DEBUG("VM_OFFSET(vma):%#x\n",(unsigned int)VM_OFFSET(vma));
5359+ if (REMAP_PAGE( vma,
5360+ vma->vm_start,
5361+ VM_OFFSET(vma),
5362+ (vma->vm_end - vma->vm_start),
5363+ pgprot_noncached(vma->vm_page_prot))){
5364+ return -EAGAIN;
5365+ }
5366+
5367+ return 0;
5368+}
5369+
5370+/* IOCTL to Allocate size pages and mmap it to the client calling it with
5371+ * corresponding virtual address
5372+ */
5373+int intel_getpages( drm_device_t *dev, struct file *filp, unsigned long arg ){
5374+
5375+ drm_intel_getpages_t getpages;
5376+ /* allocate some bytes */
5377+ unsigned long bytes;
5378+ int order;
5379+ int size;
5380+
5381+ unsigned long address;
5382+ unsigned long phy_address;
5383+ unsigned long offset;
5384+
5385+ struct page *pg;
5386+
5387+ unsigned long virtual;
5388+ struct file_operations *old_fops;
5389+
5390+ intel_device_private_t *dev_ptr=dev->dev_private;
5391+ drm_intel_listpages_t *page;
5392+ drm_intel_list_t *list;
5393+
5394+ DRM_DEBUG("\n");
5395+ /* copy user arguments */
5396+ if(copy_from_user(&getpages, (void __user *) arg, sizeof(getpages))){
5397+ return -EFAULT;
5398+ }
5399+
5400+ bytes=getpages.size;
5401+ /* Check to see if this allocation would exceed 16MEG in total memory
5402+ * This is to prevent denial of service attack. 16Meg should be enough.
5403+ */
5404+ if((memory+bytes)>MB(16) ){
5405+ /* We exceeded 16MEG. Bail out */
5406+ DRM_ERROR("Total memory allocated exceeded 16Meg!\n");
5407+ return -EFAULT;
5408+ }
5409+
5410+ /*number of pages that are needed*/
5411+ size=bytes>>PAGE_SHIFT;
5412+ if(bytes & ~(PAGE_SIZE*size)){
5413+ ++size;
5414+ }
5415+ order=ORDER(size);
5416+ DRM_DEBUG("Allocating bytes:%#lx,size:%d,order:%d\n",
5417+ (unsigned long)bytes,size,order);
5418+ /* allocate the pages */
5419+ /* returns kernel logical address.
5420+ * Is this the same as the kernel virtual address??
5421+ */
5422+ address=ALLOC_PAGES(order,0);
5423+ if(!address){
5424+ DRM_ERROR("Can't get pages\n");
5425+ return -EFAULT;
5426+ }
5427+ phy_address=__pa(address);
5428+
5429+ /* Find virtual address of the phys address */
5430+ pg=virt_to_page((void *)address);
5431+ offset=pg->index;
5432+ /* Find the number of bytes that is actually allocated */
5433+ size=PAGE_SIZE<<order;
5434+ DRM_DEBUG("Allocated address:%#lx,page offset:%#lx,phy_address:%#lx\n",
5435+ address,offset,phy_address);
5436+
5437+ /*do_mmap on the logical address and return virtual address */
5438+ down_write(&current->mm->mmap_sem);
5439+
5440+ old_fops= (struct file_operations *)filp->f_op;
5441+ filp->f_op=&intel_buffer_fops;
5442+
5443+ virtual=do_mmap(filp,0,size,PROT_READ|PROT_WRITE,MAP_SHARED,phy_address);
5444+
5445+ filp->f_op=old_fops;
5446+ up_write(&current->mm->mmap_sem);
5447+ DRM_DEBUG("Mmaped virtual:%#lx,address:%#lx\n",virtual,
5448+ (unsigned long)__va(phy_address));
5449+ if(virtual > -1024UL){
5450+ DRM_ERROR("mmap failed:%d\n",(int)virtual);
5451+ return -EFAULT;
5452+ }
5453+ getpages.phy_address=phy_address;
5454+ getpages.virt_address=virtual;
5455+ getpages.size=size;
5456+ getpages.offset=offset;
5457+
5458+ DRM_DEBUG("Mmap success requested size:%d (%d)\n",
5459+ getpages.size,(int)bytes);
5460+
5461+ /* alloc the page to be put into the linked list */
5462+ page=ALLOC(sizeof(*page),DRM_MEM_DRIVER);
5463+ if(!page){
5464+ DRM_DEBUG("Can't alloc list for page\n");
5465+ return -ENOMEM;
5466+ }
5467+
5468+ /*page->pid=current->pid;*/
5469+ page->pid=current->group_leader->pid;
5470+ page->size=size;
5471+ page->phy_address=phy_address;
5472+ page->virt_address=virtual;
5473+ page->offset=offset;
5474+
5475+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
5476+ ,current->parent->pid,current->pid,current->group_leader->pid);
5477+ /* Alloc the list to be added then add it to the linked list */
5478+ list=ALLOC(sizeof(*list),DRM_MEM_DRIVER);
5479+ if(!list){
5480+ DRM_DEBUG("Can't alloc list for page\n");
5481+ FREE(page,sizeof(*page),0);
5482+ return -ENOMEM;
5483+ }
5484+ memset(list,0,sizeof(*list));
5485+ list->page=page;
5486+ LOCK_DRM(dev);
5487+ list_add(&list->head,&dev_ptr->pagelist->head);
5488+ UNLOCK_DRM(dev);
5489+ if(copy_to_user((void __user *) arg,&getpages,sizeof(getpages))){
5490+ return -EFAULT;
5491+ }
5492+ /* update the total amount of memory we use */
5493+ memory+=size;
5494+ DRM_DEBUG("memory has:%d bytes\n",memory);
5495+
5496+return 0;
5497+}
5498+
5499+/* IOCTL to free pages that are allocated by getpages
5500+ */
5501+int intel_freepages( drm_device_t *dev, unsigned long arg ){
5502+
5503+ drm_intel_freepages_t freepages;
5504+ /* allocate some bytes */
5505+ unsigned long bytes;
5506+ int order;
5507+ int size;
5508+
5509+ intel_device_private_t *dev_ptr=dev->dev_private;
5510+ drm_intel_listpages_t *page;
5511+ drm_intel_list_t *r_list=NULL;
5512+ struct list_head *pagelist;
5513+
5514+ DRM_DEBUG("Freeing pages\n");
5515+ /* copy user arguments */
5516+ if(copy_from_user(&freepages, (void __user *) arg, sizeof(freepages))){
5517+ return -EFAULT;
5518+ }
5519+
5520+ bytes=freepages.size;
5521+ /*number of pages that are needed*/
5522+ size=bytes>>PAGE_SHIFT;
5523+ if(bytes & ~(PAGE_SIZE*size)){
5524+ ++size;
5525+ }
5526+ order=ORDER(size);
5527+ DRM_DEBUG("bytes:%d,size:%d,order:%d,phy_address:%#lx\n",(int)bytes,(int)size,(int)order,freepages.phy_address);
5528+
5529+ /* free the pages */
5530+ DRM_DEBUG("freeing address:%#lx,size:%#lx\n",(unsigned long)__va(freepages.phy_address),(unsigned long)bytes);
5531+
5532+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
5533+ ,current->parent->pid,current->pid,current->group_leader->pid);
5534+ /* See if the requested address is in our page list */
5535+ LOCK_DRM(dev);
5536+ pagelist=&dev_ptr->pagelist->head;
5537+ list_for_each(pagelist,&dev_ptr->pagelist->head){
5538+ r_list=list_entry(pagelist,drm_intel_list_t,head);
5539+ if((r_list->page->pid==current->group_leader->pid)
5540+ && (r_list->page->phy_address==freepages.phy_address)){
5541+
5542+ DRM_DEBUG("found pid:%d\n",current->group_leader->pid);
5543+ DRM_DEBUG("size:%d\n",r_list->page->size);
5544+ DRM_DEBUG("phy_address:%#lx\n",r_list->page->phy_address);
5545+ DRM_DEBUG("virt_add:%#lx\n",r_list->page->virt_address);
5546+ DRM_DEBUG("offset:%#lx\n",r_list->page->offset);
5547+
5548+ break;
5549+ }
5550+
5551+ }
5552+ if(pagelist==(&dev_ptr->pagelist->head)){
5553+ DRM_DEBUG("Can't find pages alloc for pid:%d\n",current->pid);
5554+ UNLOCK_DRM(dev);
5555+ return -EINVAL;
5556+ }
5557+
5558+ /* munmap the region 1st */
5559+ down_write(&current->mm->mmap_sem);
5560+ DRM_DEBUG("Unmapping virt_address:%#lx\n",freepages.virt_address);
5561+ do_munmap(current->mm,freepages.virt_address,bytes);
5562+ up_write(&current->mm->mmap_sem);
5563+
5564+ /* Free the pages! */
5565+ FREE_PAGES((unsigned long)__va(freepages.phy_address),order,0);
5566+
5567+ /* Free the page list */
5568+ page=r_list->page;
5569+ list_del(pagelist);
5570+ size=r_list->page->size;
5571+ FREE(pagelist,sizeof(*pagelist),0);
5572+ FREE(page,sizeof(*page),0);
5573+ UNLOCK_DRM(dev);
5574+
5575+ /* update the total memory that we use */
5576+ memory-=size;
5577+ DRM_DEBUG("memory has:%d bytes\n",memory);
5578+ return 0;
5579+}
5580+
5581+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
5582+/* This code is copied verbatim from the DRM module code in
5583+ * IKM/val/drm/drmv11p0/drm_irq.c. It's here because we
5584+ * need to activate interrupt handling, but for some reason the DRM module
5585+ * only exports the routine to disable interrupt handling drm_irq_uninstall(),
5586+ * and not the one to install.
5587+ *
5588+ * This could be problematic when new DRM versions appear.
5589+ *
5590+ * Fortunately, should a new DRM version appear, it should export
5591+ * drm_irq_install(), and then this source won't be needed at all; the
5592+ * code should compile cleanly with an external reference if this
5593+ * static version is removed completely.
5594+ */
5595+int drm_irq_install(drm_device_t * dev)
5596+{
5597+ int ret;
5598+ unsigned long sh_flags = 0;
5599+ int dev_irq = 0;
5600+
5601+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
5602+ return -EINVAL;
5603+
5604+ dev_irq = DRM_DEV_TO_IRQ(dev);
5605+ if (dev_irq == 0)
5606+ return -EINVAL;
5607+
5608+ mutex_lock(&dev->struct_mutex);
5609+
5610+ /* Driver must have been initialized */
5611+ if (!dev->dev_private) {
5612+ mutex_unlock(&dev->struct_mutex);
5613+ return -EINVAL;
5614+ }
5615+
5616+ if (dev->irq_enabled) {
5617+ mutex_unlock(&dev->struct_mutex);
5618+ return -EBUSY;
5619+ }
5620+ dev->irq_enabled = 1;
5621+ mutex_unlock(&dev->struct_mutex);
5622+
5623+ DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev_irq);
5624+
5625+ /*
5626+ if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
5627+ init_waitqueue_head(&dev->vbl_queue);
5628+
5629+ spin_lock_init(&dev->vbl_lock);
5630+
5631+ INIT_LIST_HEAD(&dev->vbl_sigs.head);
5632+ INIT_LIST_HEAD(&dev->vbl_sigs2.head);
5633+
5634+ dev->vbl_pending = 0;
5635+ }
5636+ */
5637+
5638+ /* Before installing handler */
5639+ dev->driver->irq_preinstall(dev);
5640+
5641+ /* Install handler */
5642+ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
5643+ sh_flags = IRQF_SHARED;
5644+
5645+ ret = request_irq(dev_irq, dev->driver->irq_handler,
5646+ sh_flags, dev->devname, dev);
5647+ if (ret < 0) {
5648+ mutex_lock(&dev->struct_mutex);
5649+ dev->irq_enabled = 0;
5650+ mutex_unlock(&dev->struct_mutex);
5651+ return ret;
5652+ }
5653+
5654+ /* After installing handler */
5655+ dev->driver->irq_postinstall(dev);
5656+
5657+ return 0;
5658+}
5659+#endif
5660+
5661+/* IOCTL to init the info that is needed by the client
5662+ */
5663+int intel_drm_info_init( drm_device_t *dev, unsigned long arg ){
5664+
5665+ intel_drm_info_t info;
5666+ intel_drm_info_t *info_ptr;
5667+ intel_device_private_t *dev_ptr;
5668+
5669+ DRM_DEBUG("info init succesful dev_private:%#lx\n",(unsigned long)dev->dev_private);
5670+ dev_ptr=dev->dev_private;
5671+ /* See if dev_private is already allocated */
5672+ if(!dev->dev_private){
5673+ DRM_ERROR("dev_private not allocated!\n");
5674+ return 0;
5675+ }
5676+ info_ptr=dev_ptr->info_ptr;
5677+ /* See if info is already allocated */
5678+ if(info_ptr->device_id){
5679+ DRM_ERROR("Info already allocated!\n");
5680+ return 0;
5681+ }
5682+
5683+ /* copy user arguments */
5684+ if(copy_from_user(&info, (void __user *) arg, sizeof(info))){
5685+ return -EFAULT;
5686+ }
5687+
5688+ info_ptr->device_id=info.device_id;
5689+ info_ptr->revision=info.revision;
5690+ info_ptr->video_memory_offset=info.video_memory_offset;
5691+ info_ptr->video_memory_size=info.video_memory_size;
5692+ info_ptr->hw_status_offset=info.hw_status_offset;
5693+ DRM_DEBUG("device_id:%#lx,revision:%#lx,offset:%#lx,size:%#lx,hw_status_offset:%lx\n",
5694+ info_ptr->device_id,info_ptr->revision,
5695+ info_ptr->video_memory_offset,info_ptr->video_memory_size,
5696+ info_ptr->hw_status_offset);
5697+return 0;
5698+}
5699+/* IOCTL to get the info that is needed by the client
5700+ */
5701+int intel_drm_info_get( drm_device_t *dev, unsigned long arg ){
5702+
5703+ intel_drm_info_t info;
5704+ intel_device_private_t *dev_ptr=dev->dev_private;
5705+ intel_drm_info_t *info_ptr=dev_ptr->info_ptr;
5706+
5707+ DRM_DEBUG("Info get device_id:%#lx,revision:%#lx,offset:%#lx,size:%#lx, hw_status_offset:%lx\n",
5708+ info_ptr->device_id,info_ptr->revision,
5709+ info_ptr->video_memory_offset,info_ptr->video_memory_size,
5710+ info_ptr->hw_status_offset);
5711+
5712+ info.device_id=info_ptr->device_id;
5713+ info.revision=info_ptr->revision;
5714+ info.video_memory_offset=info_ptr->video_memory_offset;
5715+ info.video_memory_size=info_ptr->video_memory_size;
5716+ info.hw_status_offset=info_ptr->hw_status_offset;
5717+
5718+ if(copy_to_user((void __user *) arg,&info,sizeof(info))){
5719+ return -EFAULT;
5720+ }
5721+
5722+return 0;
5723+}
5724+
5725+/* initialise structure for link list and driver info in dev_private */
5726+int intel_postinit(intel_device_private_t **priv){
5727+
5728+ intel_drm_info_t *info_ptr;
5729+ intel_device_private_t *dev_ptr;
5730+ DRM_DEBUG("\n");
5731+ /* allocate info to be stored */
5732+ dev_ptr=ALLOC(sizeof(intel_device_private_t),DRM_MEM_DRIVER);
5733+
5734+ if(!dev_ptr){
5735+ return -ENOMEM;
5736+ }
5737+
5738+ DRM_DEBUG("dev_ptr allocation succesful\n");
5739+
5740+ memset(dev_ptr,0,sizeof(intel_device_private_t));
5741+ *priv=dev_ptr;
5742+
5743+ info_ptr=ALLOC(sizeof(intel_drm_info_t),DRM_MEM_DRIVER);
5744+
5745+ if(!info_ptr){
5746+ return -ENOMEM;
5747+ }
5748+
5749+ DRM_DEBUG("Info_ptr allocation succesful\n");
5750+ memset(info_ptr,0,sizeof(intel_drm_info_t));
5751+ dev_ptr->info_ptr=info_ptr;
5752+
5753+ dev_ptr->pagelist=ALLOC(sizeof(*dev_ptr->pagelist),DRM_MEM_DRIVER);
5754+
5755+ if(!dev_ptr->pagelist){
5756+ return -ENOMEM;
5757+ }
5758+
5759+ DRM_DEBUG("pagelist allocation succesful\n");
5760+ memset(dev_ptr->pagelist,0,sizeof(*dev_ptr->pagelist));
5761+ INIT_LIST_HEAD(&dev_ptr->pagelist->head);
5762+ /* Initialise global variable to zero when we start up */
5763+ memory=0;
5764+ DRM_DEBUG("Initialised memory:%d\n",memory);
5765+
5766+return 0;
5767+
5768+}
5769+/* check and free pages of client that is closing the fd */
5770+int intel_prerelease(drm_device_t *dev){
5771+ unsigned long bytes;
5772+ int order;
5773+ int size;
5774+
5775+ intel_device_private_t *dev_ptr=dev->dev_private;
5776+ drm_intel_listpages_t *page;
5777+ drm_intel_list_t *r_list=NULL;
5778+ struct list_head *pagelist, *pagelist_next;
5779+
5780+ DRM_DEBUG("Client closing freeing pages alloc to it\n");
5781+
5782+
5783+ /* Search for the page list has been added and free it */
5784+
5785+ LOCK_DRM(dev);
5786+
5787+ /* The changes to this function are copied form 8.1 */
5788+ /* I've no idea why, but sometimes during bootup the dev_private
5789+ * field can show up as NULL. Guarding against this for now...
5790+ */
5791+ if (dev_ptr != NULL) {
5792+
5793+ pagelist=&dev_ptr->pagelist->head;
5794+ list_for_each_safe(pagelist,pagelist_next,&dev_ptr->pagelist->head){
5795+ r_list=list_entry(pagelist,drm_intel_list_t,head);
5796+ if(r_list->page->pid==current->group_leader->pid){
5797+#if 0
5798+ printk("found pid:%d\n",current->pid);
5799+ printk("size:%d\n",r_list->page->size);
5800+ printk("phy_address:%#lx\n",r_list->page->phy_address);
5801+ printk("virt_add:%#lx\n",r_list->page->virt_address);
5802+ printk("offset:%#lx\n",r_list->page->offset);
5803+#endif
5804+ bytes=r_list->page->size;
5805+
5806+ /*number of pages that are needed*/
5807+
5808+ size=bytes>>PAGE_SHIFT;
5809+ if(bytes & ~(PAGE_SIZE*size)){
5810+ ++size;
5811+ }
5812+ order=ORDER(size);
5813+ /* free the pages */
5814+
5815+#if 0
5816+ printk("freeing address:%#lx,size:%#lx\n"
5817+ ,(unsigned long)__va(r_list->page->phy_address)
5818+ ,(unsigned long)bytes);
5819+#endif
5820+
5821+ FREE_PAGES((unsigned long)__va(r_list->page->phy_address)
5822+ ,order,0);
5823+
5824+ /* remove from list and free the resource */
5825+
5826+ page=r_list->page;
5827+ list_del(pagelist);
5828+ FREE(pagelist,sizeof(*pagelist),0);
5829+ FREE(page,sizeof(*page),0);
5830+ /* update the total memory that we use */
5831+ memory-=bytes;
5832+ DRM_DEBUG("memory:%d bytes\n",memory);
5833+ }
5834+ }
5835+ }
5836+
5837+ UNLOCK_DRM(dev);
5838+
5839+ return 0;
5840+
5841+}
5842+
5843+int drm_plb_mmap(struct file *filp, struct vm_area_struct *vma)
5844+{
5845+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
5846+ drm_file_t *priv = filp->private_data;
5847+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
5848+ drm_device_t *dev = priv->minor->dev;
5849+#else
5850+ drm_device_t *dev = priv->head->dev;
5851+#endif
5852+
5853+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
5854+ drm_local_map_t *map = NULL;
5855+#else
5856+ drm_map_t *map = NULL;
5857+#endif
5858+ drm_map_list_t *r_list;
5859+ unsigned long offset = 0;
5860+ struct list_head *list;
5861+ drm_vma_entry_t *vma_entry;
5862+
5863+ DRM_DEBUG("drm_plb_mmap: start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
5864+ vma->vm_start, vma->vm_end, VM_OFFSET(vma));
5865+
5866+ if (!priv->authenticated) {
5867+ DRM_DEBUG("Did not authenticate");
5868+ return -EACCES;
5869+ } else {
5870+ DRM_DEBUG("Authenticate successful");
5871+ }
5872+
5873+ /* A sequential search of a linked list is
5874+ * fine here because: 1) there will only be
5875+ * about 5-10 entries in the list and, 2) a
5876+ * DRI client only has to do this mapping
5877+ * once, so it doesn't have to be optimized
5878+ * for performance, even if the list was a
5879+ * bit longer. */
5880+
5881+ /* FIXME: Temporary fix. */
5882+ LIST_FOR_EACH(list, dev) {
5883+
5884+ r_list = list_entry(list, drm_map_list_t, head);
5885+ map = r_list->map;
5886+ if (!map)
5887+ continue;
5888+ if (r_list->user_token == VM_OFFSET(vma))
5889+ break;
5890+ }
5891+
5892+ if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
5893+ return -EPERM;
5894+
5895+ /* Check for valid size. */
5896+ if (map->size != vma->vm_end - vma->vm_start) {
5897+ return -EINVAL;
5898+ }
5899+
5900+ if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
5901+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
5902+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
5903+ }
5904+
5905+ switch (map->type) {
5906+
5907+ case _DRM_AGP:
5908+ case _DRM_FRAME_BUFFER:
5909+ if (agp_use_pat()) {
5910+ pgprot_val(vma->vm_page_prot) &= ~(_PAGE_PWT | _PAGE_PCD);
5911+ pgprot_val(vma->vm_page_prot) |= _PAGE_PAT;
5912+ vma->vm_flags |= VM_IO; /* not in core dump */
5913+
5914+ offset = VM_OFFSET(vma) - agp_bridge->gart_bus_addr;
5915+ vma->vm_ops = &iegd_plb_vm_ops;
5916+ break;
5917+ }
5918+
5919+ /* Fallthrough */
5920+ case _DRM_REGISTERS:
5921+ if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
5922+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
5923+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
5924+ }
5925+ vma->vm_flags |= VM_IO; /* not in core dump */
5926+ offset = VM_OFFSET(vma) - agp_bridge->gart_bus_addr;
5927+
5928+ vma->vm_ops = &iegd_plb_vm_ops;
5929+ break;
5930+ case _DRM_SHM:
5931+ case _DRM_CONSISTENT:
5932+ case _DRM_SCATTER_GATHER:
5933+ DRM_DEBUG("Fall through to original mmap\n");
5934+ return drm_mmap(filp, vma);
5935+ break;
5936+ default:
5937+ return -EINVAL; /* This should never happen. */
5938+ }
5939+
5940+
5941+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
5942+
5943+ vma->vm_file = filp; /* Needed for drm_vm_open() */
5944+
5945+ vma_entry = ALLOC(sizeof(*vma_entry), DRM_MEM_VMAS);
5946+ if (vma_entry) {
5947+ /*
5948+ * FIXME: Temporary fix. Will figure out later
5949+ */
5950+ INSERT_VMA();
5951+ }
5952+
5953+#endif
5954+ return 0;
5955+}
5956+
5957+int psb_init(intel_device_private_t *priv)
5958+{
5959+ DRM_INIT_WAITQUEUE(&priv->event_queue);
5960+ spin_lock_init(&priv->irqmask_lock);
5961+ priv->event_present = 0;
5962+ priv->out_vdc = 0;
5963+ priv->out_sgx = 0;
5964+ priv->out_sgx2 = 0;
5965+ priv->out_mtx = 0;
5966+
5967+ return 0;
5968+}
5969+
5970+int intel_drm_plb_interrupts( drm_device_t *dev, void *data )
5971+{
5972+ intel_device_private_t *priv;
5973+ interrupt_info_t plb_info;
5974+ unsigned long irqflags;
5975+ int ret = 0;
5976+ int rv;
5977+ priv=(intel_device_private_t *)dev->dev_private;
5978+
5979+ if(copy_from_user(&plb_info, (void __user *) data, sizeof(plb_info))) {
5980+ return -EFAULT;
5981+ }
5982+
5983+ /* USW15 definition of in and out
5984+ *
5985+ * in/out[0] VDC
5986+ * in/out[1] sgx
5987+ * in/out[2] sgx2
5988+ * in/out[3] msvdx
5989+ */
5990+
5991+ plb_info.out[0]=0;
5992+ plb_info.out[1]=0;
5993+ plb_info.out[2]=0;
5994+ plb_info.out[3]=0;
5995+
5996+ switch (plb_info.req_type) {
5997+ case CLEAR_INT:
5998+
5999+ plb_info.in[0] &= priv->vdc_irq_mask;
6000+ plb_info.in[1] &= priv->sgx_irq_mask;
6001+ plb_info.in[2] &= priv->sgx_irq_mask2;
6002+ plb_info.in[3] &= priv->msvdx_irq_mask;
6003+
6004+ if (plb_info.in[0] || plb_info.in[1] ||
6005+ plb_info.in[2] || plb_info.in[3]) {
6006+
6007+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6008+ priv->out_vdc &= ~plb_info.in[0];
6009+ plb_info.out[0] = priv->out_vdc;
6010+
6011+ priv->out_sgx &= ~plb_info.in[1];
6012+ plb_info.out[1] = priv->out_sgx;
6013+
6014+ priv->out_sgx2 &= ~plb_info.in[2];
6015+ plb_info.out[2] = priv->out_sgx2;
6016+
6017+ priv->out_mtx &= ~plb_info.in[3];
6018+ plb_info.out[3] = priv->out_mtx;
6019+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6020+
6021+ plb_info.req_status = INT_CLEARED;
6022+
6023+ } else {
6024+ plb_info.req_status = INT_NOOP;
6025+ }
6026+
6027+ break;
6028+
6029+ case READ_INT:
6030+
6031+
6032+ plb_info.out[0] = priv->out_vdc;
6033+ plb_info.out[1] = priv->out_sgx;
6034+ plb_info.out[2] = priv->out_sgx2;
6035+ plb_info.out[3] = priv->out_mtx;
6036+ plb_info.req_status = INT_READ;
6037+
6038+ break;
6039+
6040+ case WAIT_INT:
6041+
6042+ plb_info.in[0] &= priv->vdc_irq_mask;
6043+ plb_info.in[1] &= priv->sgx_irq_mask;
6044+ plb_info.in[2] &= priv->sgx_irq_mask2;
6045+ plb_info.in[3] &= priv->msvdx_irq_mask;
6046+
6047+ if (plb_info.in[0] || plb_info.in[1] ||
6048+ plb_info.in[2] || plb_info.in[3]) {
6049+
6050+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6051+
6052+ /* none of the interrupts have ocurred */
6053+ if ((priv->out_vdc & plb_info.in[0]) ||
6054+ (priv->out_sgx & plb_info.in[1]) ||
6055+ (priv->out_sgx2 & plb_info.in[2]) ||
6056+ (priv->out_mtx & plb_info.in[3])) {
6057+
6058+ /* At least one of the interrupts has already occurred */
6059+ plb_info.req_status = INT_STORED;
6060+
6061+ } else {
6062+
6063+ /* Wait for an interrupt to occur */
6064+ priv->event_present = 0;
6065+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6066+
6067+ DRM_WAIT_ON(ret, priv->event_queue, 20 * DRM_HZ,
6068+ priv->event_present);
6069+
6070+ if (ret) {
6071+ plb_info.req_status = INT_TIMEOUT;
6072+ break;
6073+ }
6074+
6075+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6076+
6077+ plb_info.req_status = INT_HANDLED;
6078+
6079+ }
6080+
6081+ plb_info.out[0] = priv->out_vdc;
6082+ plb_info.out[1] = priv->out_sgx;
6083+ plb_info.out[2] = priv->out_sgx2;
6084+ plb_info.out[3] = priv->out_mtx;
6085+
6086+ /* Clear the outstanding interrupts that have just been
6087+ * retrieved
6088+ */
6089+ priv->out_vdc &= ~(plb_info.out[0] & plb_info.in[0]);
6090+ priv->out_sgx &= ~(plb_info.out[1] & plb_info.in[1]) ;
6091+ priv->out_sgx2 &= ~(plb_info.out[2] & plb_info.in[2]);
6092+ priv->out_mtx &= ~(plb_info.out[3] & plb_info.in[3]);
6093+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6094+
6095+ } else {
6096+
6097+ /* Unsupported interrupt */
6098+ plb_info.req_status = INT_NOOP;
6099+
6100+ }
6101+
6102+ break;
6103+
6104+ case UNMASK_INT:
6105+
6106+ if (!dev->irq_enabled) {
6107+ rv = drm_irq_install(dev);
6108+ if (rv != 0) {
6109+ DRM_ERROR("%s: could not install IRQs: rv = %d\n", __FUNCTION__, rv);
6110+ return rv;
6111+ }
6112+ }
6113+
6114+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6115+ PSB_WVDC32(0x00000000, IMR);
6116+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6117+
6118+ break;
6119+
6120+ case MASK_INT:
6121+
6122+ if (dev->irq_enabled) {
6123+ rv = drm_irq_uninstall(dev);
6124+ if (rv != 0) {
6125+ DRM_ERROR("%s: could not uninstall IRQs: rv = %d\n", __FUNCTION__, rv);
6126+ return rv;
6127+ }
6128+ }
6129+
6130+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
6131+ PSB_WVDC32(0xFFFFFFFF, IMR);
6132+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
6133+
6134+ break;
6135+
6136+ default:
6137+
6138+ plb_info.req_status = INT_INVALID;
6139+
6140+ }
6141+
6142+
6143+ if (copy_to_user((void __user *) data, &plb_info, sizeof(plb_info))) {
6144+ return -EFAULT;
6145+ }
6146+
6147+ return 0;
6148+}
6149diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c
6150--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c 1969-12-31 17:00:00.000000000 -0700
6151+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.c 2009-10-06 10:30:05.000000000 -0700
6152@@ -0,0 +1,250 @@
6153+/* -*- pse-c -*-
6154+ *----------------------------------------------------------------------------
6155+ * Filename: iegd_interface_2611.c
6156+ * $Revision: 1.6 $
6157+ *----------------------------------------------------------------------------
6158+ * Gart and DRM driver for Intel Embedded Graphics Driver
6159+ * Copyright © 2008, Intel Corporation.
6160+ *
6161+ * This program is free software; you can redistribute it and/or modify it
6162+ * under the terms and conditions of the GNU General Public License,
6163+ * version 2, as published by the Free Software Foundation.
6164+ *
6165+ * This program is distributed in the hope it will be useful, but WITHOUT
6166+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6167+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6168+ * more details.
6169+ *
6170+ * You should have received a copy of the GNU General Public License along with
6171+ * this program; if not, write to the Free Software Foundation, Inc.,
6172+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6173+ *
6174+ */
6175+
6176+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6177+ *
6178+ * Redistribution and use in source and binary forms, with or without
6179+ * modification, are permitted provided that the following conditions are met:
6180+ * Redistributions of source code must retain the above copyright notice,
6181+ * this list of conditions and the following disclaimer.
6182+ *
6183+ * Redistributions in binary form must reproduce the above copyright
6184+ * notice, this list of conditions and the following disclaimer in the
6185+ * documentation and/or other materials provided with the distribution.
6186+ *
6187+ * Neither the name Intel Corporation nor the names of its contributors
6188+ * may be used to endorse or promote products derived from this software
6189+ * without specific prior written permission.
6190+ *
6191+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6192+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6193+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6194+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6195+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6196+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6197+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6198+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6199+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6200+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6201+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6202+ *
6203+ */
6204+
6205+#include "iegd.h"
6206+#include "drmP.h"
6207+#include "drm.h"
6208+
6209+#include "iegd_drm.h"
6210+#include "iegd_drv.h"
6211+
6212+#if KERNEL2611
6213+int intel_postinit_2611(struct drm_device *dev,unsigned long flags){
6214+
6215+ intel_device_private_t *priv;
6216+ priv=(intel_device_private_t *)dev->dev_private;
6217+
6218+ intel_postinit(&priv);
6219+ dev->dev_private=priv;
6220+
6221+ return 0;
6222+
6223+}
6224+
6225+void intel_prerelease_2611(drm_device_t *dev,DRMFILE filp){
6226+
6227+ intel_prerelease(dev);
6228+
6229+}
6230+
6231+int intel_getpages_2611( struct inode *inode, struct file *filp,
6232+ unsigned int cmd, unsigned long arg ){
6233+
6234+ drm_file_t *priv=filp->private_data;
6235+ drm_device_t *dev=priv->head->dev;
6236+ return intel_getpages(dev,filp,arg);
6237+
6238+}
6239+
6240+int intel_freepages_2611( struct inode *inode, struct file *filp,
6241+ unsigned int cmd, unsigned long arg ){
6242+
6243+ drm_file_t *priv=filp->private_data;
6244+ drm_device_t *dev=priv->head->dev;
6245+ return intel_freepages(dev,arg);
6246+}
6247+
6248+int intel_drm_info_init_2611( struct inode *inode, struct file *filp,
6249+ unsigned int cmd, unsigned long arg ){
6250+
6251+ drm_file_t *priv=filp->private_data;
6252+ drm_device_t *dev=priv->head->dev;
6253+ return intel_drm_info_init(dev,arg);
6254+
6255+}
6256+
6257+int intel_drm_info_get_2611( struct inode *inode, struct file *filp,
6258+ unsigned int cmd, unsigned long arg ){
6259+
6260+ drm_file_t *priv=filp->private_data;
6261+ drm_device_t *dev=priv->head->dev;
6262+ return intel_drm_info_get(dev,arg);
6263+
6264+}
6265+
6266+/* Following 2 functions were taken from drm_memory.c
6267+ * For some reason they are not being exported to use
6268+ * by the other drm.
6269+ */
6270+
6271+/**
6272+ * Allocate pages.
6273+ *
6274+ * \param order size order.
6275+ * \param area memory area. (Not used.)
6276+ * \return page address on success, or zero on failure.
6277+ *
6278+ * Allocate and reserve free pages.
6279+ */
6280+unsigned long intel_alloc_pages(int order, int area)
6281+{
6282+ unsigned long address;
6283+ unsigned long bytes = PAGE_SIZE << order;
6284+ unsigned long addr;
6285+ unsigned int sz;
6286+
6287+ address = __get_free_pages(GFP_KERNEL, order);
6288+ if (!address)
6289+ return 0;
6290+
6291+ /* Zero */
6292+ memset((void *)address, 0, bytes);
6293+
6294+ /* Reserve */
6295+ for (addr = address, sz = bytes;
6296+ sz > 0;
6297+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6298+ SetPageReserved(virt_to_page(addr));
6299+ }
6300+
6301+ return address;
6302+}
6303+
6304+/**
6305+ * Free pages.
6306+ *
6307+ * \param address address of the pages to free.
6308+ * \param order size order.
6309+ * \param area memory area. (Not used.)
6310+ *
6311+ * Unreserve and free pages allocated by alloc_pages().
6312+ */
6313+void intel_free_pages(unsigned long address, int order, int area)
6314+{
6315+ unsigned long bytes = PAGE_SIZE << order;
6316+ unsigned long addr;
6317+ unsigned int sz;
6318+
6319+ if (!address)
6320+ return;
6321+
6322+ /* Unreserve */
6323+ for (addr = address, sz = bytes;
6324+ sz > 0;
6325+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6326+ ClearPageReserved(virt_to_page(addr));
6327+ }
6328+
6329+ free_pages(address, order);
6330+}
6331+
6332+drm_ioctl_desc_t intel_ioctls[]={
6333+ [DRM_IOCTL_NR(DRM_INTEL_GETPAGES)] = { intel_getpages_2611, 0,0 },
6334+ [DRM_IOCTL_NR(DRM_INTEL_FREEPAGES)] = { intel_freepages_2611, 0,0 },
6335+ [DRM_IOCTL_NR(DRM_INTEL_INFO_INIT)] = { intel_drm_info_init_2611, 0,0 },
6336+ [DRM_IOCTL_NR(DRM_INTEL_INFO_GET)] = { intel_drm_info_get_2611, 0,0 }
6337+};
6338+
6339+int intel_max_ioctl = DRM_ARRAY_SIZE(intel_ioctls);
6340+
6341+
6342+static int version( drm_version_t *version )
6343+{
6344+ int len;
6345+
6346+ version->version_major = DRIVER_MAJOR;
6347+ version->version_minor = DRIVER_MINOR;
6348+ version->version_patchlevel = DRIVER_PATCHLEVEL;
6349+ DRM_COPY( version->name, DRIVER_NAME );
6350+ DRM_COPY( version->date, DRIVER_DATE );
6351+ DRM_COPY( version->desc, DRIVER_DESC );
6352+ return 0;
6353+}
6354+
6355+static struct pci_device_id pciidlist[] = {
6356+ INTEL_PCI_IDS
6357+};
6358+
6359+static struct drm_driver driver = {
6360+ .driver_features = DRIVER_USE_AGP|DRIVER_REQUIRE_AGP|DRIVER_USE_MTRR,
6361+ .prerelease = intel_prerelease_2611,
6362+ .postinit =intel_postinit_2611,
6363+ .reclaim_buffers=drm_core_reclaim_buffers,
6364+ .get_map_ofs=drm_core_get_map_ofs,
6365+ .get_reg_ofs=drm_core_get_reg_ofs,
6366+ .version = version,
6367+ .ioctls = intel_ioctls,
6368+ .fops = {
6369+ .owner = THIS_MODULE,
6370+ .open = drm_open,
6371+ .release = drm_release,
6372+ .ioctl = drm_ioctl,
6373+ .mmap = drm_mmap,
6374+ .poll = drm_poll,
6375+ .fasync = drm_fasync,
6376+ },
6377+ .pci_driver = {
6378+ .name = DRIVER_NAME,
6379+ .id_table = pciidlist,
6380+ }
6381+};
6382+
6383+int __init intel_init(void)
6384+{
6385+ driver.num_ioctls = intel_max_ioctl;
6386+ return drm_init(&driver);
6387+}
6388+
6389+void __exit intel_exit(void)
6390+{
6391+ drm_exit(&driver);
6392+}
6393+
6394+struct file_operations intel_buffer_fops = {
6395+ .open = drm_open,
6396+ .flush = drm_flush,
6397+ .release = drm_release,
6398+ .ioctl = drm_ioctl,
6399+ .mmap = intel_mmap_buffers,
6400+ .fasync = drm_fasync,
6401+};
6402+#endif
6403diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h
6404--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h 1969-12-31 17:00:00.000000000 -0700
6405+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2611.h 2009-10-06 10:30:05.000000000 -0700
6406@@ -0,0 +1,71 @@
6407+/* -*- pse-c -*-
6408+ *----------------------------------------------------------------------------
6409+ * Filename: iegd_interface_2611.h
6410+ * $Revision: 1.6 $
6411+ *----------------------------------------------------------------------------
6412+ * Gart and DRM driver for Intel Embedded Graphics Driver
6413+ * Copyright © 2008, Intel Corporation.
6414+ *
6415+ * This program is free software; you can redistribute it and/or modify it
6416+ * under the terms and conditions of the GNU General Public License,
6417+ * version 2, as published by the Free Software Foundation.
6418+ *
6419+ * This program is distributed in the hope it will be useful, but WITHOUT
6420+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6421+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6422+ * more details.
6423+ *
6424+ * You should have received a copy of the GNU General Public License along with
6425+ * this program; if not, write to the Free Software Foundation, Inc.,
6426+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6427+ *
6428+ */
6429+
6430+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6431+ *
6432+ * Redistribution and use in source and binary forms, with or without
6433+ * modification, are permitted provided that the following conditions are met:
6434+ * Redistributions of source code must retain the above copyright notice,
6435+ * this list of conditions and the following disclaimer.
6436+ *
6437+ * Redistributions in binary form must reproduce the above copyright
6438+ * notice, this list of conditions and the following disclaimer in the
6439+ * documentation and/or other materials provided with the distribution.
6440+ *
6441+ * Neither the name Intel Corporation nor the names of its contributors
6442+ * may be used to endorse or promote products derived from this software
6443+ * without specific prior written permission.
6444+ *
6445+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6446+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6447+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6448+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6449+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6450+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6451+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6452+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6453+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6454+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6455+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6456+ *
6457+ */
6458+
6459+/* Macros are defined here such that only kernel specific functions can be
6460+ * used.
6461+ */
6462+#if KERNEL2611
6463+#define REMAP_PAGE(a,b,c,d,e) io_remap_pfn_range(a,b, \
6464+ c >>PAGE_SHIFT, \
6465+ d,e)
6466+
6467+#define ORDER(a) drm_order(a)
6468+#define ALLOC_PAGES(a,b) intel_alloc_pages(a,b)
6469+#define ALLOC(a,b) drm_alloc(a,b)
6470+#define FREE(a,b,c) drm_free(a,b,c)
6471+#define FREE_PAGES(a,b,c) intel_free_pages(a,b,c)
6472+
6473+#define LOCK_DRM(d) down(&d->struct_sem)
6474+#define UNLOCK_DRM(d) up(&d->struct_sem)
6475+#endif
6476+
6477+/* endif for KERNEL2611 */
6478diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c
6479--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c 1969-12-31 17:00:00.000000000 -0700
6480+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.c 2009-10-06 10:30:05.000000000 -0700
6481@@ -0,0 +1,394 @@
6482+
6483+/* -*- pse-c -*-
6484+ *----------------------------------------------------------------------------
6485+ * Filename: iegd_interface_2615.c
6486+ * $Revision: 1.11 $
6487+ *----------------------------------------------------------------------------
6488+ * Gart and DRM driver for Intel Embedded Graphics Driver
6489+ * Copyright © 2008, Intel Corporation.
6490+ *
6491+ * This program is free software; you can redistribute it and/or modify it
6492+ * under the terms and conditions of the GNU General Public License,
6493+ * version 2, as published by the Free Software Foundation.
6494+ *
6495+ * This program is distributed in the hope it will be useful, but WITHOUT
6496+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6497+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6498+ * more details.
6499+ *
6500+ * You should have received a copy of the GNU General Public License along with
6501+ * this program; if not, write to the Free Software Foundation, Inc.,
6502+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6503+ *
6504+ */
6505+
6506+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6507+ *
6508+ * Redistribution and use in source and binary forms, with or without
6509+ * modification, are permitted provided that the following conditions are met:
6510+ * Redistributions of source code must retain the above copyright notice,
6511+ * this list of conditions and the following disclaimer.
6512+ *
6513+ * Redistributions in binary form must reproduce the above copyright
6514+ * notice, this list of conditions and the following disclaimer in the
6515+ * documentation and/or other materials provided with the distribution.
6516+ *
6517+ * Neither the name Intel Corporation nor the names of its contributors
6518+ * may be used to endorse or promote products derived from this software
6519+ * without specific prior written permission.
6520+ *
6521+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6522+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6523+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6524+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6525+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6526+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6527+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6528+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6529+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6530+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6531+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6532+ *
6533+ */
6534+
6535+#include "iegd.h"
6536+#include "drmP.h"
6537+#include "drm.h"
6538+
6539+#include "iegd_drm.h"
6540+
6541+#include "iegd_drv.h"
6542+#include "psb_intregs.h"
6543+#include "intelpci.h"
6544+#include <linux/i2c.h>
6545+
6546+int drm_irq_install(drm_device_t *dev);
6547+
6548+#if KERNEL2615
6549+int intel_firstopen_2615(struct drm_device *dev)
6550+{
6551+
6552+ intel_device_private_t *priv;
6553+ priv=(intel_device_private_t *)dev->dev_private;
6554+
6555+ intel_postinit(&priv);
6556+ dev->dev_private=priv;
6557+
6558+ return 0;
6559+
6560+}
6561+
6562+
6563+int intel_psb_firstopen_2615(struct drm_device *dev)
6564+{
6565+
6566+ unsigned long resource_start;
6567+ intel_device_private_t *priv;
6568+ priv=(intel_device_private_t *)dev->dev_private;
6569+
6570+ intel_postinit(&priv);
6571+ psb_init(priv);
6572+ dev->dev_private=priv;
6573+
6574+
6575+ /*
6576+ * Map MMIO addresses so that the DRM can control interrupt support
6577+ */
6578+
6579+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
6580+
6581+ priv->vdc_reg = ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
6582+
6583+ if (!priv->vdc_reg) {
6584+ /* Normally we'd want to unload the driver on failure. But due
6585+ * to circular dependancies, we can only return failure.
6586+ */
6587+ /* psb_driver_unload(dev); */
6588+ return 1;
6589+ }
6590+
6591+ priv->sgx_reg = ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
6592+ if (!priv->sgx_reg) {
6593+ /* Normally we'd want to unload the driver on failure. But due
6594+ * to circular dependancies, we can only return failure.
6595+ */
6596+ /* psb_driver_unload(dev); */
6597+ return 1;
6598+ }
6599+
6600+ priv->msvdx_reg = ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
6601+ if (!priv->msvdx_reg) {
6602+ /* Normally we'd want to unload the driver on failure. But due
6603+ * to circular dependancies, we can only return failure.
6604+ */
6605+ /* psb_driver_unload(dev); */
6606+ return 1;
6607+ }
6608+
6609+ return 0;
6610+
6611+}
6612+
6613+void intel_preclose_2615(drm_device_t *dev,DRMFILE filp)
6614+{
6615+ intel_prerelease(dev);
6616+}
6617+
6618+
6619+int intel_getpages_2615( struct inode *inode, struct file *filp,
6620+ unsigned int cmd, unsigned long arg)
6621+{
6622+ drm_file_t *priv=filp->private_data;
6623+ drm_device_t *dev=priv->head->dev;
6624+ return intel_getpages(dev,filp,arg);
6625+}
6626+
6627+
6628+int intel_freepages_2615( struct inode *inode, struct file *filp,
6629+ unsigned int cmd, unsigned long arg )
6630+{
6631+
6632+ drm_file_t *priv=filp->private_data;
6633+ drm_device_t *dev=priv->head->dev;
6634+ return intel_freepages(dev,arg);
6635+}
6636+
6637+
6638+int intel_drm_info_init_2615( struct inode *inode, struct file *filp,
6639+ unsigned int cmd, unsigned long arg )
6640+{
6641+
6642+ drm_file_t *priv=filp->private_data;
6643+ drm_device_t *dev=priv->head->dev;
6644+ return intel_drm_info_init(dev,arg);
6645+
6646+}
6647+
6648+
6649+int intel_drm_info_get_2615( struct inode *inode, struct file *filp,
6650+ unsigned int cmd, unsigned long arg )
6651+{
6652+
6653+ drm_file_t *priv=filp->private_data;
6654+ drm_device_t *dev=priv->head->dev;
6655+ return intel_drm_info_get(dev,arg);
6656+
6657+}
6658+
6659+
6660+/* Following 2 functions were taken from drm_memory.c
6661+ * For some reason they are not being exported to use
6662+ * by the other drm.
6663+ */
6664+
6665+/**
6666+ * Allocate pages.
6667+ *
6668+ * \param order size order.
6669+ * \param area memory area. (Not used.)
6670+ * \return page address on success, or zero on failure.
6671+ *
6672+ * Allocate and reserve free pages.
6673+ */
6674+unsigned long intel_alloc_pages(int order, int area)
6675+{
6676+ unsigned long address;
6677+ unsigned long bytes = PAGE_SIZE << order;
6678+ unsigned long addr;
6679+ unsigned int sz;
6680+
6681+ address = __get_free_pages(GFP_KERNEL, order);
6682+ if (!address)
6683+ return 0;
6684+
6685+ /* Zero */
6686+ memset((void *)address, 0, bytes);
6687+
6688+ /* Reserve */
6689+ for (addr = address, sz = bytes;
6690+ sz > 0;
6691+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6692+ SetPageReserved(virt_to_page(addr));
6693+ }
6694+
6695+ return address;
6696+}
6697+
6698+
6699+/**
6700+ * Free pages.
6701+ *
6702+ * \param address address of the pages to free.
6703+ * \param order size order.
6704+ * \param area memory area. (Not used.)
6705+ *
6706+ * Unreserve and free pages allocated by alloc_pages().
6707+ */
6708+void intel_free_pages(unsigned long address, int order, int area)
6709+{
6710+ unsigned long bytes = PAGE_SIZE << order;
6711+ unsigned long addr;
6712+ unsigned int sz;
6713+
6714+ if (!address) {
6715+ return;
6716+ }
6717+
6718+ /* Unreserve */
6719+ for (addr = address, sz = bytes;
6720+ sz > 0;
6721+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
6722+ ClearPageReserved(virt_to_page(addr));
6723+ }
6724+
6725+ free_pages(address, order);
6726+}
6727+
6728+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
6729+{
6730+ intel_device_private_t *priv;
6731+
6732+ priv=(intel_device_private_t *)dev->dev_private;
6733+
6734+ return 0;
6735+}
6736+
6737+int intel_drm_plb_interrupts_2615 ( struct inode *inode,
6738+ struct file *filp,
6739+ unsigned int cmd, void *arg )
6740+{
6741+ drm_file_t *priv=filp->private_data;
6742+ drm_device_t *dev=priv->head->dev;
6743+
6744+ return intel_drm_plb_interrupts( dev, arg );
6745+}
6746+
6747+drm_ioctl_desc_t intel_ioctls[] = {
6748+ [DRM_IOCTL_NR(DRM_INTEL_GETPAGES)] = { intel_getpages_2615, 0},
6749+ [DRM_IOCTL_NR(DRM_INTEL_FREEPAGES)] = { intel_freepages_2615, 0},
6750+ [DRM_IOCTL_NR(DRM_INTEL_INFO_INIT)] = { intel_drm_info_init_2615, 0},
6751+ [DRM_IOCTL_NR(DRM_INTEL_INFO_GET)] = { intel_drm_info_get_2615, 0},
6752+ [DRM_IOCTL_NR(DRM_INTEL_INTERRUPT)] = {intel_drm_plb_interrupts_2615,0}
6753+};
6754+
6755+int intel_max_ioctl = DRM_ARRAY_SIZE(intel_ioctls);
6756+
6757+
6758+
6759+static struct pci_device_id pciidlist[] = {
6760+ INTEL_PCI_IDS
6761+};
6762+
6763+int device_is_agp_2615(drm_device_t * dev)
6764+{
6765+ return 1;
6766+}
6767+
6768+
6769+static struct drm_driver driver = {
6770+ .firstopen = intel_firstopen_2615,
6771+ .preclose = intel_preclose_2615,
6772+ .reclaim_buffers=drm_core_reclaim_buffers,
6773+ .get_map_ofs=drm_core_get_map_ofs,
6774+ .get_reg_ofs=drm_core_get_reg_ofs,
6775+
6776+ .device_is_agp = device_is_agp_2615,
6777+
6778+ .major = DRIVER_MAJOR,
6779+ .minor = DRIVER_MINOR,
6780+ .patchlevel = DRIVER_PATCHLEVEL,
6781+ .name = DRIVER_NAME,
6782+ .desc = DRIVER_DESC,
6783+ .date = DRIVER_DATE,
6784+
6785+ .driver_features = DRIVER_USE_AGP|DRIVER_REQUIRE_AGP|DRIVER_USE_MTRR,
6786+ .ioctls = intel_ioctls,
6787+ .fops = {
6788+ .owner = THIS_MODULE,
6789+ .open = drm_open,
6790+ .release = drm_release,
6791+ .ioctl = drm_ioctl,
6792+ .mmap = drm_mmap,
6793+ .poll = drm_poll,
6794+ .fasync = drm_fasync,
6795+ },
6796+ .pci_driver = {
6797+ .name = DRIVER_NAME,
6798+ .id_table = pciidlist,
6799+ }
6800+};
6801+
6802+static struct drm_driver driver_plb = {
6803+ .load = psb_driver_load,
6804+ .firstopen = intel_psb_firstopen_2615,
6805+ .preclose = intel_preclose_2615,
6806+ .reclaim_buffers=drm_core_reclaim_buffers,
6807+ .get_map_ofs=drm_core_get_map_ofs,
6808+ .get_reg_ofs=drm_core_get_reg_ofs,
6809+
6810+ .device_is_agp = device_is_agp_2615,
6811+
6812+ .major = DRIVER_MAJOR,
6813+ .minor = DRIVER_MINOR,
6814+ .patchlevel = DRIVER_PATCHLEVEL,
6815+ .name = DRIVER_NAME,
6816+ .desc = DRIVER_DESC,
6817+ .date = DRIVER_DATE,
6818+
6819+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
6820+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
6821+ .ioctls = intel_ioctls,
6822+ .irq_preinstall = psb_irq_preinstall,
6823+ .irq_postinstall = psb_irq_postinstall,
6824+ .irq_uninstall = psb_irq_uninstall,
6825+ .irq_handler = psb_irq_handler,
6826+
6827+ .fops = {
6828+ .owner = THIS_MODULE,
6829+ .open = drm_open,
6830+ .release = drm_release,
6831+ .ioctl = drm_ioctl,
6832+ .mmap = drm_plb_mmap,
6833+ .poll = drm_poll,
6834+ .fasync = drm_fasync,
6835+ },
6836+ .pci_driver = {
6837+ .name = DRIVER_NAME,
6838+ .id_table = pciidlist,
6839+ }
6840+};
6841+
6842+
6843+int intel_init(void)
6844+{
6845+ driver.num_ioctls = intel_max_ioctl;
6846+ driver_plb.num_ioctls = intel_max_ioctl;
6847+
6848+ /* We are peeking into the global AGP structures that
6849+ * we have access to in order to determine what chipset we're
6850+ * on. This isn't necessarily a good thing to do.
6851+ */
6852+
6853+ if (gart_id->device_id == PCI_DEVICE_ID_PLB) {
6854+ printk(KERN_ERR "Initializing DRM for Intel US15 SCH\n");
6855+ return drm_init(&driver_plb);
6856+ } else {
6857+ return drm_init(&driver);
6858+ }
6859+
6860+}
6861+
6862+void intel_exit(void)
6863+{
6864+ drm_exit(&driver);
6865+}
6866+
6867+struct file_operations intel_buffer_fops = {
6868+ .open = drm_open,
6869+ .release = drm_release,
6870+ .ioctl = drm_ioctl,
6871+ .mmap = intel_mmap_buffers,
6872+ .poll = drm_poll,
6873+ .fasync = drm_fasync,
6874+};
6875+#endif
6876diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h
6877--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h 1969-12-31 17:00:00.000000000 -0700
6878+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2615.h 2009-10-06 10:30:05.000000000 -0700
6879@@ -0,0 +1,72 @@
6880+
6881+/* -*- pse-c -*-
6882+ *----------------------------------------------------------------------------
6883+ * Filename: iegd_interface_2615.h
6884+ * $Revision: 1.6 $
6885+ *----------------------------------------------------------------------------
6886+ * Gart and DRM driver for Intel Embedded Graphics Driver
6887+ * Copyright © 2008, Intel Corporation.
6888+ *
6889+ * This program is free software; you can redistribute it and/or modify it
6890+ * under the terms and conditions of the GNU General Public License,
6891+ * version 2, as published by the Free Software Foundation.
6892+ *
6893+ * This program is distributed in the hope it will be useful, but WITHOUT
6894+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6895+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6896+ * more details.
6897+ *
6898+ * You should have received a copy of the GNU General Public License along with
6899+ * this program; if not, write to the Free Software Foundation, Inc.,
6900+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6901+ *
6902+ */
6903+
6904+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6905+ *
6906+ * Redistribution and use in source and binary forms, with or without
6907+ * modification, are permitted provided that the following conditions are met:
6908+ * Redistributions of source code must retain the above copyright notice,
6909+ * this list of conditions and the following disclaimer.
6910+ *
6911+ * Redistributions in binary form must reproduce the above copyright
6912+ * notice, this list of conditions and the following disclaimer in the
6913+ * documentation and/or other materials provided with the distribution.
6914+ *
6915+ * Neither the name Intel Corporation nor the names of its contributors
6916+ * may be used to endorse or promote products derived from this software
6917+ * without specific prior written permission.
6918+ *
6919+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6920+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6921+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6922+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6923+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6924+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6925+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6926+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6927+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6928+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6929+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6930+ *
6931+ */
6932+
6933+/* Macros are defined here such that only kernel specific functions can be
6934+ * used.
6935+ */
6936+#if KERNEL2615
6937+#define REMAP_PAGE(a,b,c,d,e) io_remap_pfn_range(a,b, \
6938+ c >>PAGE_SHIFT, \
6939+ d,e)
6940+
6941+#define ORDER(a) drm_order(a)
6942+#define ALLOC_PAGES(a,b) intel_alloc_pages(a,b)
6943+#define ALLOC(a,b) drm_alloc(a,b)
6944+#define FREE(a,b,c) drm_free(a,b,c)
6945+#define FREE_PAGES(a,b,c) intel_free_pages(a,b,c)
6946+
6947+#define LOCK_DRM(d) mutex_lock(&d->struct_mutex)
6948+#define UNLOCK_DRM(d) mutex_unlock(&d->struct_mutex)
6949+#endif
6950+
6951+/* endif for KERNEL2615 */
6952diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c
6953--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c 1969-12-31 17:00:00.000000000 -0700
6954+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.c 2009-10-06 10:30:05.000000000 -0700
6955@@ -0,0 +1,820 @@
6956+/* -*- pse-c -*-
6957+ *----------------------------------------------------------------------------
6958+ * Filename: iegd_interface_2611.c
6959+ * $Revision: 1.8 $
6960+ *----------------------------------------------------------------------------
6961+ * Gart and DRM driver for Intel Embedded Graphics Driver
6962+ * Copyright © 2008, Intel Corporation.
6963+ *
6964+ * This program is free software; you can redistribute it and/or modify it
6965+ * under the terms and conditions of the GNU General Public License,
6966+ * version 2, as published by the Free Software Foundation.
6967+ *
6968+ * This program is distributed in the hope it will be useful, but WITHOUT
6969+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6970+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6971+ * more details.
6972+ *
6973+ * You should have received a copy of the GNU General Public License along with
6974+ * this program; if not, write to the Free Software Foundation, Inc.,
6975+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6976+ *
6977+ */
6978+
6979+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
6980+ *
6981+ * Redistribution and use in source and binary forms, with or without
6982+ * modification, are permitted provided that the following conditions are met:
6983+ * Redistributions of source code must retain the above copyright notice,
6984+ * this list of conditions and the following disclaimer.
6985+ *
6986+ * Redistributions in binary form must reproduce the above copyright
6987+ * notice, this list of conditions and the following disclaimer in the
6988+ * documentation and/or other materials provided with the distribution.
6989+ *
6990+ * Neither the name Intel Corporation nor the names of its contributors
6991+ * may be used to endorse or promote products derived from this software
6992+ * without specific prior written permission.
6993+ *
6994+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6995+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
6996+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6997+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6998+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6999+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7000+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
7001+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
7002+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
7003+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
7004+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7005+ *
7006+ */
7007+
7008+#include "iegd.h"
7009+#include "drmP.h"
7010+#include "drm.h"
7011+
7012+#include "iegd_drm.h"
7013+#include "iegd_drv.h"
7014+#include "psb_intregs.h"
7015+#include "intelpci.h"
7016+
7017+int drm_irq_install(drm_device_t *dev);
7018+
7019+#if KERNEL2624
7020+
7021+/* get intel_buffer_fops from the interface_###.c files */
7022+extern struct file_operations intel_buffer_fops;
7023+
7024+/* Global variable to keep track the amount of memory we are using */
7025+static int memory = 0;
7026+
7027+int intel_firstopen_2624(struct drm_device *dev)
7028+{
7029+ intel_device_private_t *priv;
7030+ priv=(intel_device_private_t *)dev->dev_private;
7031+
7032+ intel_postinit(&priv);
7033+ dev->dev_private=priv;
7034+
7035+ return 0;
7036+
7037+}
7038+
7039+int intel_plb_firstopen_2624(struct drm_device *dev)
7040+{
7041+
7042+ unsigned long resource_start;
7043+ intel_device_private_t *priv;
7044+ priv=(intel_device_private_t *)dev->dev_private;
7045+
7046+ intel_postinit(&priv);
7047+ psb_init(priv);
7048+ dev->dev_private=priv;
7049+
7050+ /*
7051+ * Map MMIO addresses so that the DRM can control interrupt support
7052+ */
7053+
7054+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
7055+ priv->vdc_reg = ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
7056+ if (!priv->vdc_reg) {
7057+ /*
7058+ * Normally we'd want to unload the driver on failure. But due
7059+ * to circular dependancies, we can only return failure.
7060+ */
7061+ /* psb_driver_unload(dev); */
7062+ return 1;
7063+ }
7064+
7065+ priv->sgx_reg = ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
7066+ if (!priv->sgx_reg) {
7067+ /*
7068+ * Normally we'd want to unload the driver on failure. But due
7069+ * to circular dependancies, we can only return failure.
7070+ */
7071+ /* psb_driver_unload(dev); */
7072+ return 1;
7073+ }
7074+
7075+ priv->msvdx_reg = ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
7076+ if (!priv->msvdx_reg) {
7077+ /*
7078+ * Normally we'd want to unload the driver on failure. But due
7079+ * to circular dependancies, we can only return failure.
7080+ */
7081+ /* psb_driver_unload(dev); */
7082+ return 1;
7083+ }
7084+
7085+ return 0;
7086+
7087+}
7088+
7089+void intel_preclose_2624(drm_device_t *dev, struct drm_file *filp)
7090+{
7091+ intel_prerelease(dev);
7092+}
7093+
7094+/*
7095+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7096+ * that gets pages of memory from the DRM and returns them to the caller.
7097+ */
7098+int intel_getpages_2624(struct drm_device *dev,
7099+ void *data,
7100+ struct drm_file *filepriv)
7101+{
7102+ drm_intel_getpages_t *getpages;
7103+ unsigned long bytes;
7104+ int order;
7105+ int size;
7106+
7107+ unsigned long address;
7108+ unsigned long phy_address;
7109+ unsigned long offset;
7110+
7111+ struct page *pg;
7112+
7113+ unsigned long virtual;
7114+ struct file_operations *old_fops;
7115+
7116+ intel_device_private_t *dev_ptr = dev->dev_private;
7117+ drm_intel_listpages_t *page;
7118+ drm_intel_list_t *list;
7119+
7120+ DRM_DEBUG("\n");
7121+ DRM_INFO("in intel_getpages_2624, calling intel_getpages\n");
7122+ getpages = (drm_intel_getpages_t *)data;
7123+
7124+ bytes = getpages->size;
7125+
7126+ /*
7127+ * Check to see if this allocation would exceed 16MEG in total memory
7128+ * This is to prevent denial of service attack. 16Meg should be enough.
7129+ */
7130+ if((memory + bytes) > MB(16) ){
7131+ /* We exceeded 16MEG. Bail out */
7132+ DRM_ERROR("Total memory allocated exceeded 16Meg!\n");
7133+ DRM_INFO("Total memory allocated exceeded 16Meg!\n");
7134+ return -EFAULT;
7135+ }
7136+
7137+ /* number of pages that are needed */
7138+ size = bytes>>PAGE_SHIFT;
7139+ if(bytes & ~(PAGE_SIZE*size)){
7140+ ++size;
7141+ }
7142+ order = ORDER(size);
7143+ DRM_DEBUG("Allocating bytes:%#lx,size:%d,order:%d\n",
7144+ (unsigned long)bytes,size,order);
7145+
7146+ /*
7147+ * Allocate the pages.
7148+ * returns kernel logical address.
7149+ * Is this the same as the kernel virtual address??
7150+ */
7151+ address = ALLOC_PAGES(order,0);
7152+ if(!address){
7153+ DRM_ERROR("Can't get pages\n");
7154+ DRM_INFO("Can't get pages\n");
7155+ return -EFAULT;
7156+ }
7157+ phy_address = __pa(address);
7158+
7159+ /* Find virtual address of the phys address */
7160+ pg = virt_to_page((void *)address);
7161+ offset = pg->index;
7162+
7163+ /* Find the number of bytes that is actually allocated */
7164+ size = PAGE_SIZE<<order;
7165+ DRM_DEBUG("Allocated address:%#lx,page offset:%#lx,phy_address:%#lx\n",
7166+ address,offset,phy_address);
7167+
7168+ /*do_mmap on the logical address and return virtual address */
7169+ down_write(&current->mm->mmap_sem);
7170+
7171+ old_fops = (struct file_operations *) (filepriv->filp->f_op);
7172+ filepriv->filp->f_op = &intel_buffer_fops;
7173+
7174+ virtual = do_mmap(filepriv->filp, 0, size,
7175+ PROT_READ|PROT_WRITE,MAP_SHARED, phy_address);
7176+ filepriv->filp->f_op = old_fops;
7177+
7178+ up_write(&current->mm->mmap_sem);
7179+ DRM_DEBUG("Mmaped virtual:%#lx,address:%#lx\n",virtual,
7180+ (unsigned long)__va(phy_address));
7181+
7182+ if(virtual > -1024UL){
7183+ DRM_ERROR("mmap failed:%d\n",(int)virtual);
7184+ DRM_INFO("mmap failed:%d\n",(int)virtual);
7185+ return -EFAULT;
7186+ }
7187+
7188+ getpages->phy_address = phy_address;
7189+ getpages->virt_address = virtual;
7190+ getpages->size = size;
7191+ getpages->offset = offset;
7192+
7193+ DRM_DEBUG("Mmap success requested size:%d (%d)\n",
7194+ getpages->size,(int)bytes);
7195+
7196+ /* alloc the page to be put into the linked list */
7197+ page = ALLOC(sizeof(*page),DRM_MEM_DRIVER);
7198+ if(!page){
7199+ DRM_DEBUG("Can't alloc list for page\n");
7200+ DRM_INFO("Can't alloc list for page\n");
7201+ return -ENOMEM;
7202+ }
7203+
7204+ /*page->pid=current->pid;*/
7205+ page->pid = current->group_leader->pid;
7206+ page->size = size;
7207+ page->phy_address = phy_address;
7208+ page->virt_address = virtual;
7209+ page->offset = offset;
7210+
7211+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
7212+ ,current->parent->pid,current->pid,current->group_leader->pid);
7213+
7214+ /* Alloc the list to be added then add it to the linked list */
7215+ list = ALLOC(sizeof(*list),DRM_MEM_DRIVER);
7216+ if(!list){
7217+ DRM_DEBUG("Can't alloc list for page\n");
7218+ DRM_INFO("Can't alloc list for page\n");
7219+ FREE(page,sizeof(*page),0);
7220+ return -ENOMEM;
7221+ }
7222+ memset(list,0,sizeof(*list));
7223+ list->page = page;
7224+ LOCK_DRM(dev);
7225+ list_add(&list->head,&dev_ptr->pagelist->head);
7226+ UNLOCK_DRM(dev);
7227+
7228+ /* update the total amount of memory we use */
7229+ memory += size;
7230+ DRM_DEBUG("memory has:%d bytes\n",memory);
7231+
7232+ DRM_INFO("intel_getpages Exit\n");
7233+ return 0;
7234+}
7235+
7236+
7237+/*
7238+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7239+ * that frees pages of memory that were previouslly allocated from the DRM.
7240+ */
7241+int intel_freepages_2624(struct drm_device *dev,
7242+ void *data,
7243+ struct drm_file *filepriv)
7244+{
7245+ drm_intel_freepages_t *freepages;
7246+ unsigned long bytes;
7247+ int order;
7248+ int size;
7249+
7250+ intel_device_private_t *dev_ptr=dev->dev_private;
7251+ drm_intel_listpages_t *page;
7252+ drm_intel_list_t *r_list=NULL;
7253+ struct list_head *pagelist;
7254+
7255+ DRM_DEBUG("Freeing pages\n");
7256+ freepages = (drm_intel_freepages_t *)data;
7257+
7258+ /* number of pages that are needed */
7259+ bytes = freepages->size;
7260+ size = bytes>>PAGE_SHIFT;
7261+ if(bytes & ~(PAGE_SIZE*size)){
7262+ ++size;
7263+ }
7264+ order = ORDER(size);
7265+ DRM_DEBUG("bytes:%d,size:%d,order:%d,phy_address:%#lx\n", (int)bytes,
7266+ (int)size,(int)order,freepages->phy_address);
7267+
7268+ /* free the pages */
7269+ DRM_DEBUG("freeing address:%#lx,size:%#lx\n",
7270+ (unsigned long)__va(freepages->phy_address),(unsigned long)bytes);
7271+
7272+ DRM_DEBUG("parent pid:%d,pid:%d,group_leader->pid:%d\n"
7273+ ,current->parent->pid,current->pid,current->group_leader->pid);
7274+
7275+ /* See if the requested address is in our page list */
7276+ LOCK_DRM(dev);
7277+ pagelist = &dev_ptr->pagelist->head;
7278+ list_for_each(pagelist, &dev_ptr->pagelist->head){
7279+ r_list=list_entry(pagelist, drm_intel_list_t, head);
7280+ if((r_list->page->pid==current->group_leader->pid)
7281+ && (r_list->page->phy_address==freepages->phy_address)){
7282+
7283+ DRM_DEBUG("found pid:%d\n",current->group_leader->pid);
7284+ DRM_DEBUG("size:%d\n",r_list->page->size);
7285+ DRM_DEBUG("phy_address:%#lx\n",r_list->page->phy_address);
7286+ DRM_DEBUG("virt_add:%#lx\n",r_list->page->virt_address);
7287+ DRM_DEBUG("offset:%#lx\n",r_list->page->offset);
7288+
7289+ break;
7290+ }
7291+
7292+ }
7293+
7294+ if(pagelist == (&dev_ptr->pagelist->head)){
7295+ DRM_DEBUG("Can't find pages alloc for pid:%d\n",current->pid);
7296+ UNLOCK_DRM(dev);
7297+ return -EINVAL;
7298+ }
7299+
7300+ /* munmap the region 1st */
7301+ down_write(&current->mm->mmap_sem);
7302+ DRM_DEBUG("Unmapping virt_address:%#lx\n",freepages->virt_address);
7303+ do_munmap(current->mm,freepages->virt_address,bytes);
7304+ up_write(&current->mm->mmap_sem);
7305+
7306+ /* Free the pages! */
7307+ FREE_PAGES((unsigned long)__va(freepages->phy_address), order, 0);
7308+
7309+ /* Free the page list */
7310+ page = r_list->page;
7311+ list_del(pagelist);
7312+ size = r_list->page->size;
7313+ FREE(pagelist,sizeof(*pagelist),0);
7314+ FREE(page,sizeof(*page),0);
7315+ UNLOCK_DRM(dev);
7316+
7317+ /* update the total memory that we use */
7318+ memory -= size;
7319+ DRM_DEBUG("memory has:%d bytes\n", memory);
7320+ return 0;
7321+}
7322+
7323+
7324+/*
7325+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7326+ * that stores client specific information.
7327+ */
7328+int intel_drm_info_init_2624(struct drm_device *dev,
7329+ void *data,
7330+ struct drm_file *filepriv)
7331+{
7332+ intel_drm_info_t *info;
7333+ intel_drm_info_t *info_ptr;
7334+ intel_device_private_t *dev_ptr;
7335+
7336+ if (dev == NULL) {
7337+ DRM_INFO("ERROR ERROR, drm device is NULL\n");
7338+ return -EFAULT;
7339+ }
7340+ DRM_DEBUG("info init succesful dev_private:%#lx\n",
7341+ (unsigned long)dev->dev_private);
7342+ dev_ptr = dev->dev_private;
7343+
7344+ /* See if dev_private is already allocated */
7345+ if(!dev->dev_private){
7346+ DRM_ERROR("dev_private not allocated!\n");
7347+ return 0;
7348+ }
7349+ info_ptr = dev_ptr->info_ptr;
7350+
7351+ /* See if info is already allocated */
7352+ if(info_ptr->device_id){
7353+ DRM_DEBUG("Info already allocated: device id = 0x%lx\n",
7354+ info_ptr->device_id);
7355+ DRM_ERROR("Info already allocated!\n");
7356+ return 0;
7357+ }
7358+
7359+ info = (intel_drm_info_t *)data;
7360+
7361+ info_ptr->device_id = info->device_id;
7362+ info_ptr->revision = info->revision;
7363+ info_ptr->video_memory_offset = info->video_memory_offset;
7364+ info_ptr->video_memory_size = info->video_memory_size;
7365+ info_ptr->hw_status_offset = info->hw_status_offset;
7366+ DRM_DEBUG("Saving dev_id:%#lx rev:%#lx offset:%#lx size:%#lx, "
7367+ "hwst_offset:%lx\n",
7368+ info_ptr->device_id, info_ptr->revision,
7369+ info_ptr->video_memory_offset, info_ptr->video_memory_size,
7370+ info_ptr->hw_status_offset);
7371+
7372+ return 0;
7373+}
7374+
7375+
7376+/*
7377+ * Implement the 2.6.24 kernel interface for the device specific IOCTL
7378+ * that retrieves client specific information.
7379+ */
7380+int intel_drm_info_get_2624(struct drm_device *dev,
7381+ void *data,
7382+ struct drm_file *filepriv)
7383+{
7384+ intel_drm_info_t *info;
7385+ intel_device_private_t *dev_ptr = dev->dev_private;
7386+ intel_drm_info_t *info_ptr = dev_ptr->info_ptr;
7387+
7388+ DRM_DEBUG("Info get dev_id:%#lx rev:%#lx offset:%#lx size:%#lx "
7389+ "hwst_offset:%lx\n",
7390+ info_ptr->device_id,info_ptr->revision,
7391+ info_ptr->video_memory_offset,info_ptr->video_memory_size,
7392+ info_ptr->hw_status_offset);
7393+
7394+ info = (intel_drm_info_t *)data;
7395+
7396+ info->device_id = info_ptr->device_id;
7397+ info->revision = info_ptr->revision;
7398+ info->video_memory_offset = info_ptr->video_memory_offset;
7399+ info->video_memory_size = info_ptr->video_memory_size;
7400+ info->hw_status_offset = info_ptr->hw_status_offset;
7401+
7402+ return 0;
7403+}
7404+
7405+/*
7406+ * The following 2 functions were taken from drm_memory.c
7407+ * For some reason they are not being exported to use by the other drm.
7408+ */
7409+
7410+/**
7411+ * Allocate pages.
7412+ *
7413+ * \param order size order.
7414+ * \param area memory area. (Not used.)
7415+ * \return page address on success, or zero on failure.
7416+ *
7417+ * Allocate and reserve free pages.
7418+ */
7419+unsigned long intel_alloc_pages(int order, int area)
7420+{
7421+ unsigned long address;
7422+ unsigned long bytes = PAGE_SIZE << order;
7423+ unsigned long addr;
7424+ unsigned int sz;
7425+
7426+ address = __get_free_pages(GFP_KERNEL, order);
7427+ if (!address)
7428+ return 0;
7429+
7430+ /* Zero */
7431+ memset((void *)address, 0, bytes);
7432+
7433+ /* Reserve */
7434+ for (addr = address, sz = bytes;
7435+ sz > 0;
7436+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
7437+ SetPageReserved(virt_to_page(addr));
7438+ }
7439+
7440+ return address;
7441+}
7442+
7443+/**
7444+ * Free pages.
7445+ *
7446+ * \param address address of the pages to free.
7447+ * \param order size order.
7448+ * \param area memory area. (Not used.)
7449+ *
7450+ * Unreserve and free pages allocated by alloc_pages().
7451+ */
7452+void intel_free_pages(unsigned long address, int order, int area)
7453+{
7454+ unsigned long bytes = PAGE_SIZE << order;
7455+ unsigned long addr;
7456+ unsigned int sz;
7457+
7458+ if (!address) {
7459+ return;
7460+ }
7461+
7462+ /* Unreserve */
7463+ for (addr = address, sz = bytes; sz > 0;
7464+ addr += PAGE_SIZE, sz -= PAGE_SIZE) {
7465+ ClearPageReserved(virt_to_page(addr));
7466+ }
7467+
7468+ free_pages(address, order);
7469+}
7470+
7471+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
7472+{
7473+ intel_device_private_t *priv;
7474+
7475+ priv=(intel_device_private_t *)dev->dev_private;
7476+
7477+ return 0;
7478+}
7479+
7480+int intel_drm_plb_interrupts_2624 (struct drm_device *dev,
7481+ void *data,
7482+ struct drm_file *filepriv)
7483+{
7484+ intel_device_private_t *priv = dev->dev_private;
7485+ interrupt_info_t *plb_info;
7486+ unsigned long irqflags;
7487+ int ret = 0;
7488+ int rv;
7489+
7490+ plb_info = (interrupt_info_t *)data;
7491+
7492+ /* USW15 definition of in and out
7493+ *
7494+ * in/out[0] VDC
7495+ * in/out[1] sgx
7496+ * in/out[2] sgx2
7497+ * in/out[3] msvdx
7498+ */
7499+
7500+ plb_info->out[0]=0;
7501+ plb_info->out[1]=0;
7502+ plb_info->out[2]=0;
7503+ plb_info->out[3]=0;
7504+
7505+ switch (plb_info->req_type) {
7506+ case CLEAR_INT:
7507+ plb_info->in[0] &= priv->vdc_irq_mask;
7508+ plb_info->in[1] &= priv->sgx_irq_mask;
7509+ plb_info->in[2] &= priv->sgx_irq_mask2;
7510+ plb_info->in[3] &= priv->msvdx_irq_mask;
7511+
7512+ if (plb_info->in[0] || plb_info->in[1] ||
7513+ plb_info->in[2] || plb_info->in[3]) {
7514+
7515+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7516+ priv->out_vdc &= ~plb_info->in[0];
7517+ plb_info->out[0] = priv->out_vdc;
7518+
7519+ priv->out_sgx &= ~plb_info->in[1];
7520+ plb_info->out[1] = priv->out_sgx;
7521+
7522+ priv->out_sgx2 &= ~plb_info->in[2];
7523+ plb_info->out[2] = priv->out_sgx2;
7524+
7525+ priv->out_mtx &= ~plb_info->in[3];
7526+ plb_info->out[3] = priv->out_mtx;
7527+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7528+
7529+ plb_info->req_status = INT_CLEARED;
7530+
7531+ } else {
7532+ plb_info->req_status = INT_NOOP;
7533+ }
7534+
7535+ break;
7536+
7537+ case READ_INT:
7538+ plb_info->out[0] = priv->out_vdc;
7539+ plb_info->out[1] = priv->out_sgx;
7540+ plb_info->out[2] = priv->out_sgx2;
7541+ plb_info->out[3] = priv->out_mtx;
7542+ plb_info->req_status = INT_READ;
7543+
7544+ break;
7545+
7546+ case WAIT_INT:
7547+ plb_info->in[0] &= priv->vdc_irq_mask;
7548+ plb_info->in[1] &= priv->sgx_irq_mask;
7549+ plb_info->in[2] &= priv->sgx_irq_mask2;
7550+ plb_info->in[3] &= priv->msvdx_irq_mask;
7551+
7552+ if (plb_info->in[0] || plb_info->in[1] ||
7553+ plb_info->in[2] || plb_info->in[3]) {
7554+
7555+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7556+
7557+ /* none of the interrupts have ocurred */
7558+ if ((priv->out_vdc & plb_info->in[0]) ||
7559+ (priv->out_sgx & plb_info->in[1]) ||
7560+ (priv->out_sgx2 & plb_info->in[2]) ||
7561+ (priv->out_mtx & plb_info->in[3])) {
7562+
7563+ /* At least one of the interrupts has already occurred */
7564+ plb_info->req_status = INT_STORED;
7565+
7566+ } else {
7567+
7568+ /* Wait for an interrupt to occur */
7569+ priv->event_present = 0;
7570+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7571+
7572+ DRM_WAIT_ON(ret, priv->event_queue, 20 * DRM_HZ,
7573+ priv->event_present);
7574+
7575+ if (ret) {
7576+ plb_info->req_status = INT_TIMEOUT;
7577+ break;
7578+ }
7579+
7580+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7581+
7582+ plb_info->req_status = INT_HANDLED;
7583+
7584+ }
7585+ plb_info->out[0] = priv->out_vdc;
7586+ plb_info->out[1] = priv->out_sgx;
7587+ plb_info->out[2] = priv->out_sgx2;
7588+ plb_info->out[3] = priv->out_mtx;
7589+
7590+ /* Clear the outstanding interrupts that have just been
7591+ * retrieved
7592+ */
7593+ priv->out_vdc &= ~(plb_info->out[0] & plb_info->in[0]);
7594+ priv->out_sgx &= ~(plb_info->out[1] & plb_info->in[1]) ;
7595+ priv->out_sgx2 &= ~(plb_info->out[2] & plb_info->in[2]);
7596+ priv->out_mtx &= ~(plb_info->out[3] & plb_info->in[3]);
7597+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7598+
7599+ } else {
7600+
7601+ /* Unsupported interrupt */
7602+ plb_info->req_status = INT_NOOP;
7603+
7604+ }
7605+
7606+ break;
7607+
7608+ case UNMASK_INT:
7609+
7610+ if (!dev->irq_enabled) {
7611+ rv = drm_irq_install(dev);
7612+ if (rv != 0) {
7613+ DRM_ERROR("%s: could not install IRQs: rv = %d\n", __FUNCTION__, rv);
7614+ return rv;
7615+ }
7616+ }
7617+
7618+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7619+ PSB_WVDC32(0x00000000, IMR);
7620+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7621+
7622+ break;
7623+
7624+ case MASK_INT:
7625+
7626+ if (dev->irq_enabled) {
7627+ rv = drm_irq_uninstall(dev);
7628+ if (rv != 0) {
7629+ DRM_ERROR("%s: could not uninstall IRQs: rv = %d\n", __FUNCTION__, rv);
7630+ return rv;
7631+ }
7632+ }
7633+
7634+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
7635+ PSB_WVDC32(0xFFFFFFFF, IMR);
7636+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
7637+
7638+ break;
7639+
7640+ default:
7641+
7642+ plb_info->req_status = INT_INVALID;
7643+ }
7644+
7645+ return 0;
7646+}
7647+
7648+
7649+drm_ioctl_desc_t intel_ioctls[] = {
7650+ DRM_IOCTL_DEF(DRM_INTEL_GETPAGES, intel_getpages_2624, 0),
7651+ DRM_IOCTL_DEF(DRM_INTEL_FREEPAGES, intel_freepages_2624, 0),
7652+ DRM_IOCTL_DEF(DRM_INTEL_INFO_INIT, intel_drm_info_init_2624, 0),
7653+ DRM_IOCTL_DEF(DRM_INTEL_INFO_GET, intel_drm_info_get_2624, 0),
7654+ DRM_IOCTL_DEF(DRM_INTEL_INTERRUPT, intel_drm_plb_interrupts_2624, 0)
7655+};
7656+
7657+int intel_max_ioctl = DRM_ARRAY_SIZE(intel_ioctls);
7658+
7659+
7660+
7661+static struct pci_device_id pciidlist[] = {
7662+ INTEL_PCI_IDS
7663+};
7664+
7665+int device_is_agp_2624(drm_device_t * dev)
7666+{
7667+ return 1;
7668+}
7669+
7670+static struct drm_driver driver = {
7671+ .firstopen = intel_firstopen_2624,
7672+ .preclose = intel_preclose_2624,
7673+ .reclaim_buffers=drm_core_reclaim_buffers,
7674+ .get_map_ofs=drm_core_get_map_ofs,
7675+ .get_reg_ofs=drm_core_get_reg_ofs,
7676+
7677+ .device_is_agp = device_is_agp_2624,
7678+
7679+ .major = DRIVER_MAJOR,
7680+ .minor = DRIVER_MINOR,
7681+ .patchlevel = DRIVER_PATCHLEVEL,
7682+ .name = DRIVER_NAME,
7683+ .desc = DRIVER_DESC,
7684+ .date = DRIVER_DATE,
7685+
7686+ .driver_features = DRIVER_USE_AGP|DRIVER_REQUIRE_AGP|DRIVER_USE_MTRR,
7687+ .ioctls = intel_ioctls,
7688+ .fops = {
7689+ .owner = THIS_MODULE,
7690+ .open = drm_open,
7691+ .release = drm_release,
7692+ .ioctl = drm_ioctl,
7693+ .mmap = drm_mmap,
7694+ .poll = drm_poll,
7695+ .fasync = drm_fasync,
7696+ },
7697+ .pci_driver = {
7698+ .name = DRIVER_NAME,
7699+ .id_table = pciidlist,
7700+ }
7701+};
7702+
7703+static struct drm_driver driver_plb = {
7704+ .load = psb_driver_load,
7705+ .firstopen = intel_plb_firstopen_2624,
7706+ .preclose = intel_preclose_2624,
7707+ .reclaim_buffers=drm_core_reclaim_buffers,
7708+ .get_map_ofs=drm_core_get_map_ofs,
7709+ .get_reg_ofs=drm_core_get_reg_ofs,
7710+
7711+ .device_is_agp = device_is_agp_2624,
7712+
7713+ .major = DRIVER_MAJOR,
7714+ .minor = DRIVER_MINOR,
7715+ .patchlevel = DRIVER_PATCHLEVEL,
7716+ .name = DRIVER_NAME,
7717+ .desc = DRIVER_DESC,
7718+ .date = DRIVER_DATE,
7719+
7720+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
7721+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_USE_MTRR,
7722+ .ioctls = intel_ioctls,
7723+ .irq_preinstall = psb_irq_preinstall,
7724+ .irq_postinstall = psb_irq_postinstall,
7725+ .irq_uninstall = psb_irq_uninstall,
7726+ .irq_handler = psb_irq_handler,
7727+
7728+ .fops = {
7729+ .owner = THIS_MODULE,
7730+ .open = drm_open,
7731+ .release = drm_release,
7732+ .ioctl = drm_ioctl,
7733+ .mmap = drm_plb_mmap,
7734+ .poll = drm_poll,
7735+ .fasync = drm_fasync,
7736+ },
7737+ .pci_driver = {
7738+ .name = DRIVER_NAME,
7739+ .id_table = pciidlist,
7740+ }
7741+};
7742+
7743+int intel_init(void)
7744+{
7745+ driver.num_ioctls = intel_max_ioctl;
7746+ driver_plb.num_ioctls = intel_max_ioctl;
7747+
7748+ /* We are peeking into the global AGP structures that
7749+ * we have access to in order to determine what chipset we're
7750+ * on. This isn't necessarily a good thing to do.
7751+ */
7752+
7753+ if (gart_id->device_id == PCI_DEVICE_ID_PLB) {
7754+ printk(KERN_ERR "Initializing DRM for Intel US15 SCH\n");
7755+ return drm_init(&driver_plb);
7756+ } else {
7757+ return drm_init(&driver);
7758+ }
7759+
7760+}
7761+
7762+void intel_exit(void)
7763+{
7764+ drm_exit(&driver);
7765+}
7766+
7767+struct file_operations intel_buffer_fops = {
7768+ .open = drm_open,
7769+ .release = drm_release,
7770+ .ioctl = drm_ioctl,
7771+ .mmap = intel_mmap_buffers,
7772+ .poll = drm_poll,
7773+ .fasync = drm_fasync,
7774+};
7775+#endif
7776diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h
7777--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h 1969-12-31 17:00:00.000000000 -0700
7778+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_2624.h 2009-10-06 10:30:05.000000000 -0700
7779@@ -0,0 +1,78 @@
7780+/* -*- pse-c -*-
7781+ *----------------------------------------------------------------------------
7782+ * Filename: iegd_interface_2611.h
7783+ * $Revision: 1.5 $
7784+ *----------------------------------------------------------------------------
7785+ * Gart and DRM driver for Intel Embedded Graphics Driver
7786+ * Copyright © 2008, Intel Corporation.
7787+ *
7788+ * This program is free software; you can redistribute it and/or modify it
7789+ * under the terms and conditions of the GNU General Public License,
7790+ * version 2, as published by the Free Software Foundation.
7791+ *
7792+ * This program is distributed in the hope it will be useful, but WITHOUT
7793+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7794+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7795+ * more details.
7796+ *
7797+ * You should have received a copy of the GNU General Public License along with
7798+ * this program; if not, write to the Free Software Foundation, Inc.,
7799+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7800+ *
7801+ */
7802+
7803+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
7804+ *
7805+ * Redistribution and use in source and binary forms, with or without
7806+ * modification, are permitted provided that the following conditions are met:
7807+ * Redistributions of source code must retain the above copyright notice,
7808+ * this list of conditions and the following disclaimer.
7809+ *
7810+ * Redistributions in binary form must reproduce the above copyright
7811+ * notice, this list of conditions and the following disclaimer in the
7812+ * documentation and/or other materials provided with the distribution.
7813+ *
7814+ * Neither the name Intel Corporation nor the names of its contributors
7815+ * may be used to endorse or promote products derived from this software
7816+ * without specific prior written permission.
7817+ *
7818+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
7819+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
7820+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
7821+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
7822+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
7823+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7824+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
7825+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
7826+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
7827+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
7828+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7829+ *
7830+ */
7831+
7832+/* Macros are defined here such that only kernel specific functions can be
7833+ * used.
7834+ */
7835+#if KERNEL2624
7836+#define REMAP_PAGE(a,b,c,d,e) io_remap_pfn_range(a,b, \
7837+ c >>PAGE_SHIFT, \
7838+ d,e)
7839+
7840+#define ORDER(a) drm_order(a)
7841+#define ALLOC_PAGES(a,b) intel_alloc_pages(a,b)
7842+
7843+//kernel version 31 removed some wrapper functions
7844+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
7845+#define ALLOC(a, b) kmalloc(a, GFP_KERNEL)
7846+#define FREE(a,b,c) kfree(a)
7847+#else
7848+#define ALLOC(a,b) drm_alloc(a,b)
7849+#define FREE(a,b,c) drm_free(a,b,c)
7850+#endif
7851+
7852+#define FREE_PAGES(a,b,c) intel_free_pages(a,b,c)
7853+#define LOCK_DRM(d) mutex_lock(&d->struct_mutex)
7854+#define UNLOCK_DRM(d) mutex_unlock(&d->struct_mutex)
7855+#endif
7856+
7857+/* endif for KERNEL2624 */
7858diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.c patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.c
7859--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.c 1969-12-31 17:00:00.000000000 -0700
7860+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.c 2009-10-06 10:30:05.000000000 -0700
7861@@ -0,0 +1,147 @@
7862+/* -*- pse-c -*-
7863+ *----------------------------------------------------------------------------
7864+ * Filename: iegd_interface_265.c
7865+ * $Revision: 1.6 $
7866+ *----------------------------------------------------------------------------
7867+ * Gart and DRM driver for Intel Embedded Graphics Driver
7868+ * Copyright © 2008, Intel Corporation.
7869+ *
7870+ * This program is free software; you can redistribute it and/or modify it
7871+ * under the terms and conditions of the GNU General Public License,
7872+ * version 2, as published by the Free Software Foundation.
7873+ *
7874+ * This program is distributed in the hope it will be useful, but WITHOUT
7875+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7876+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7877+ * more details.
7878+ *
7879+ * You should have received a copy of the GNU General Public License along with
7880+ * this program; if not, write to the Free Software Foundation, Inc.,
7881+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7882+ *
7883+ */
7884+
7885+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
7886+ *
7887+ * Redistribution and use in source and binary forms, with or without
7888+ * modification, are permitted provided that the following conditions are met:
7889+ * Redistributions of source code must retain the above copyright notice,
7890+ * this list of conditions and the following disclaimer.
7891+ *
7892+ * Redistributions in binary form must reproduce the above copyright
7893+ * notice, this list of conditions and the following disclaimer in the
7894+ * documentation and/or other materials provided with the distribution.
7895+ *
7896+ * Neither the name Intel Corporation nor the names of its contributors
7897+ * may be used to endorse or promote products derived from this software
7898+ * without specific prior written permission.
7899+ *
7900+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
7901+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
7902+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
7903+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
7904+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
7905+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7906+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
7907+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
7908+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
7909+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
7910+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7911+ *
7912+ */
7913+
7914+#include "iegd.h"
7915+#include "drmP.h"
7916+#include "drm.h"
7917+
7918+#include "iegd_drm.h"
7919+#include "iegd_drv.h"
7920+
7921+#if KERNEL265
7922+#include "drm_agpsupport.h"
7923+#include "drm_auth.h"
7924+#include "drm_bufs.h"
7925+#include "drm_context.h"
7926+#include "drm_dma.h"
7927+#include "drm_drawable.h"
7928+#include "drm_drv.h"
7929+
7930+#include "drm_fops.h"
7931+#include "drm_init.h"
7932+#include "drm_ioctl.h"
7933+#include "drm_lock.h"
7934+#include "drm_memory.h"
7935+#include "drm_proc.h"
7936+#include "drm_vm.h"
7937+#include "drm_stub.h"
7938+
7939+int intel_postinit_265(drm_device_t *dev){
7940+
7941+ intel_device_private_t *priv;
7942+ priv=(intel_device_private_t *)dev->dev_private;
7943+
7944+ intel_postinit(&priv);
7945+ dev->dev_private=priv;
7946+
7947+ return 0;
7948+
7949+}
7950+
7951+int intel_prerelease_265(drm_device_t *dev){
7952+
7953+ intel_prerelease(dev);
7954+
7955+ return 0;
7956+
7957+}
7958+
7959+int intel_getpages_265( struct inode *inode, struct file *filp,
7960+ unsigned int cmd, unsigned long arg ){
7961+
7962+ drm_file_t *priv=filp->private_data;
7963+ drm_device_t *dev=priv->dev;
7964+ return intel_getpages(dev,filp,arg);
7965+
7966+
7967+}
7968+
7969+int intel_freepages_265( struct inode *inode, struct file *filp,
7970+ unsigned int cmd, unsigned long arg ){
7971+
7972+ drm_file_t *priv=filp->private_data;
7973+ drm_device_t *dev=priv->dev;
7974+ return intel_freepages(dev,arg);
7975+
7976+}
7977+
7978+int intel_drm_info_init_265( struct inode *inode, struct file *filp,
7979+ unsigned int cmd, unsigned long arg ){
7980+
7981+ drm_file_t *priv=filp->private_data;
7982+ drm_device_t *dev=priv->dev;
7983+ return intel_drm_info_init(dev,arg);
7984+
7985+}
7986+
7987+int intel_drm_info_get_265( struct inode *inode, struct file *filp,
7988+ unsigned int cmd, unsigned long arg ){
7989+
7990+ drm_file_t *priv=filp->private_data;
7991+ drm_device_t *dev=priv->dev;
7992+ return intel_drm_info_get(dev,arg);
7993+
7994+}
7995+
7996+struct file_operations intel_buffer_fops = {
7997+ .open = DRM(open),
7998+ .flush = DRM(flush),
7999+ .release = DRM(release),
8000+ .ioctl = DRM(ioctl),
8001+ .mmap = intel_mmap_buffers,
8002+ .fasync = DRM(fasync),
8003+};
8004+
8005+#endif
8006+/*end of 2.6.5 definition */
8007+
8008+
8009diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.h patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.h
8010--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/iegd_interface_265.h 1969-12-31 17:00:00.000000000 -0700
8011+++ patch_script_temp/drivers/gpu/drm/iegd/drm/iegd_interface_265.h 2009-10-06 10:30:05.000000000 -0700
8012@@ -0,0 +1,129 @@
8013+/* -*- pse-c -*-
8014+ *----------------------------------------------------------------------------
8015+ * Filename: iegd_interface_265.h
8016+ * $Revision: 1.6 $
8017+ *----------------------------------------------------------------------------
8018+ * Gart and DRM driver for Intel Embedded Graphics Driver
8019+ * Copyright © 2008, Intel Corporation.
8020+ *
8021+ * This program is free software; you can redistribute it and/or modify it
8022+ * under the terms and conditions of the GNU General Public License,
8023+ * version 2, as published by the Free Software Foundation.
8024+ *
8025+ * This program is distributed in the hope it will be useful, but WITHOUT
8026+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8027+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8028+ * more details.
8029+ *
8030+ * You should have received a copy of the GNU General Public License along with
8031+ * this program; if not, write to the Free Software Foundation, Inc.,
8032+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8033+ *
8034+ */
8035+
8036+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
8037+ *
8038+ * Redistribution and use in source and binary forms, with or without
8039+ * modification, are permitted provided that the following conditions are met:
8040+ * Redistributions of source code must retain the above copyright notice,
8041+ * this list of conditions and the following disclaimer.
8042+ *
8043+ * Redistributions in binary form must reproduce the above copyright
8044+ * notice, this list of conditions and the following disclaimer in the
8045+ * documentation and/or other materials provided with the distribution.
8046+ *
8047+ * Neither the name Intel Corporation nor the names of its contributors
8048+ * may be used to endorse or promote products derived from this software
8049+ * without specific prior written permission.
8050+ *
8051+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
8052+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
8053+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
8054+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
8055+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
8056+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8057+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8058+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8059+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8060+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
8061+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8062+ *
8063+ */
8064+
8065+/* For some arcane reasons this must be defined for 2.6.5 kernel in
8066+ * intel.h if not the drm won't compile properly.
8067+ */
8068+#if KERNEL265
8069+
8070+
8071+/* KERNEL265 defines these functions in the drm directory
8072+ * that got expanded when you #define DRM(x) intel_##x. This is very ugly and
8073+ * confusing. Luckily 2.6.11 don't have this. Can't do much here but follow
8074+ * the rules for it.
8075+ */
8076+#define DRM(x) intel_##x
8077+
8078+/* Changing the permission bits to 0,0 for testing. auth,root permission */
8079+#define DRIVER_IOCTLS \
8080+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_GETPAGES)] = { intel_getpages_265, 0, 0 }, \
8081+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_FREEPAGES)] = { intel_freepages_265, 0, 0 },\
8082+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_INFO_INIT)] = { intel_drm_info_init_265, 0, 0 },\
8083+ [DRM_IOCTL_NR(DRM_IOCTL_INTEL_INFO_GET)] = { intel_drm_info_get_265, 0, 0 }
8084+
8085+/* Driver customization:
8086+ */
8087+#define __HAVE_RELEASE 1
8088+#define DRIVER_PRERELEASE() do { \
8089+ intel_prerelease_265(dev); \
8090+} while (0)
8091+
8092+#define DRIVER_RELEASE() do { \
8093+} while (0)
8094+
8095+#define DRIVER_PRETAKEDOWN() do { \
8096+} while (0)
8097+
8098+#define DRIVER_POSTSETUP() do { \
8099+} while (0)
8100+
8101+#define DRIVER_POSTCLEANUP() do { \
8102+} while (0)
8103+
8104+#define DRIVER_POSTINIT() do { \
8105+ intel_postinit_265(dev); \
8106+} while (0)
8107+
8108+/*
8109+ * Explaination: For unknown reasons the DRM infrastructure has a lot
8110+ * of really horrid programming techniques to generate custom init
8111+ * code using header files (containing c code) and macros. Apparently
8112+ * this is to save a few nano seconds during init.
8113+ *
8114+ * This logic here is that if you define this magic macro you will use
8115+ * this code to count the number of devices you are supporting. We
8116+ * need to support 2 devices and we don't know the device IDs at startup
8117+ * and there is usually not 2 PCI devices anyway. So we just return 2
8118+ * and worry about it later.
8119+ *
8120+ * Note: DRM has issues with DIH so for now we'll live with one drm
8121+ *#define DRIVER_COUNT_CARDS() 2
8122+ */
8123+
8124+/* KERNEL265 defines these functions in the drm directory
8125+ * that got expanded when you #define DRM(x) intel_##x. This is very ugly and
8126+ * confusing. Luckily 2.6.11 don't have this
8127+ */
8128+#define REMAP_PAGE(a,b,c,d,e) remap_page_range( \
8129+ DRM_RPR_ARG(a) b , \
8130+ c,d,e)
8131+#define ORDER(a) DRM(order)(a)
8132+#define ALLOC_PAGES(a,b) DRM(alloc_pages)(a,b)
8133+#define ALLOC(a,b) DRM(alloc)(a,b)
8134+#define FREE(a,b,c) DRM(free)(a,b,c)
8135+#define FREE_PAGES(a,b,c) DRM(free_pages)(a,b,c)
8136+
8137+#define LOCK_DRM(d) down(&d->struct_sem)
8138+#define UNLOCK_DRM(d) up(&d->struct_sem)
8139+
8140+#endif
8141+/* endif for KERNEL265 */
8142diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/myclient.c patch_script_temp/drivers/gpu/drm/iegd/drm/myclient.c
8143--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/myclient.c 1969-12-31 17:00:00.000000000 -0700
8144+++ patch_script_temp/drivers/gpu/drm/iegd/drm/myclient.c 2009-10-06 10:30:05.000000000 -0700
8145@@ -0,0 +1,210 @@
8146+/* -*- pse-c -*-
8147+ *----------------------------------------------------------------------------
8148+ * Filename: myclient.c
8149+ * $Revision: 1.5 $
8150+ *----------------------------------------------------------------------------
8151+ * DRM test program
8152+ * Copyright © 2008, Intel Corporation.
8153+ *
8154+ * This program is free software; you can redistribute it and/or modify it
8155+ * under the terms and conditions of the GNU General Public License,
8156+ * version 2, as published by the Free Software Foundation.
8157+ *
8158+ * This program is distributed in the hope it will be useful, but WITHOUT
8159+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8160+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8161+ * more details.
8162+ *
8163+ * You should have received a copy of the GNU General Public License along with
8164+ * this program; if not, write to the Free Software Foundation, Inc.,
8165+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8166+ *
8167+ */
8168+
8169+/* Copyright 2003 - 2005 Intel Corporation. All Rights Reserved.
8170+ *
8171+ * Redistribution and use in source and binary forms, with or without
8172+ * modification, are permitted provided that the following conditions are met:
8173+ * Redistributions of source code must retain the above copyright notice,
8174+ * this list of conditions and the following disclaimer.
8175+ *
8176+ * Redistributions in binary form must reproduce the above copyright
8177+ * notice, this list of conditions and the following disclaimer in the
8178+ * documentation and/or other materials provided with the distribution.
8179+ *
8180+ * Neither the name Intel Corporation nor the names of its contributors
8181+ * may be used to endorse or promote products derived from this software
8182+ * without specific prior written permission.
8183+ *
8184+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
8185+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
8186+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
8187+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
8188+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
8189+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8190+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8191+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8192+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8193+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
8194+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8195+ *
8196+ */
8197+
8198+/*client to test the ioctl
8199+ * make sure you change the permission bits in intel.h to 0,0
8200+ * before you start using this
8201+ */
8202+
8203+#include "iegd.h"
8204+
8205+#include <fcntl.h>
8206+#include <unistd.h>
8207+#include <sys/ioctl.h>
8208+#include <stdlib.h>
8209+#include <stdio.h>
8210+
8211+#define DRM_IOCTL_BASE 'd'
8212+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
8213+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
8214+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
8215+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
8216+
8217+#include "intel_drm_client.h"
8218+
8219+#define DRM_IOCTL_INTEL_GETPAGES DRM_IOWR(DRM_BASE_COMMAND + \
8220+ DRM_INTEL_GETPAGES, drm_intel_getpages_t)
8221+#define DRM_IOCTL_INTEL_FREEPAGES DRM_IOWR(DRM_BASE_COMMAND + \
8222+ DRM_INTEL_FREEPAGES, drm_intel_freepages_t)
8223+#define DRM_IOCTL_INTEL_INFO_INIT DRM_IOW( DRM_BASE_COMMAND + \
8224+ DRM_INTEL_INFO_INIT, intel_drm_info_t)
8225+#define DRM_IOCTL_INTEL_INFO_GET DRM_IOR( DRM_BASE_COMMAND + \
8226+ DRM_INTEL_INFO_GET, intel_drm_info_t)
8227+
8228+
8229+#define PAGE_SIZE 4096
8230+int main()
8231+{
8232+int file_desc, ret_value;
8233+printf("ytay starting client\n");
8234+/* Open the drm */
8235+file_desc=open("/dev/dri/card0",O_RDWR);
8236+
8237+if(file_desc<0){
8238+/* probably Suse distro since the dev tree is different.
8239+ * try /dev/card0
8240+ */
8241+file_desc=open("/dev/card0",O_RDWR);
8242+
8243+}
8244+
8245+if(file_desc<0){
8246+
8247+printf("ytay can't open device file:%s\n",DRIVER_DESC);
8248+ exit(-1);
8249+}
8250+
8251+printf("ytay open device file:%d\n",file_desc);
8252+drm_intel_getpages_t getpages;
8253+/* set the number of bytes we want the drm to allocate */
8254+getpages.size=(PAGE_SIZE- 1000);
8255+
8256+ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
8257+if(ret_value<0){
8258+printf("ytay ioctl failed!\n");
8259+ exit(-1);
8260+}
8261+printf("ytay ioctl success\n");
8262+printf("ytay size%d,phy_address:%#x,virt_address:%#x,offset:%#x\n",getpages.size,getpages.phy_address,getpages.virt_address,getpages.offset);
8263+
8264+/* test for memory access */
8265+
8266+int i;
8267+unsigned long *virt_ptr;
8268+
8269+virt_ptr=(unsigned long *)getpages.virt_address;
8270+
8271+/* input 0..10 into subsequent memory */
8272+
8273+for(i=0;i<=11;i++){
8274+*virt_ptr=i;
8275+virt_ptr++;
8276+
8277+}
8278+
8279+/*read from subsequent memory */
8280+
8281+virt_ptr=(unsigned long *)getpages.virt_address;
8282+for(i=0;i<=15;i++){
8283+printf("virt_ptr@%#x,value:%d\n",virt_ptr,*virt_ptr);
8284+virt_ptr++;
8285+}
8286+/* set the number of bytes we want the drm to allocate */
8287+getpages.size=(PAGE_SIZE- 1000);
8288+
8289+ret_value=ioctl(file_desc,DRM_IOCTL_INTEL_GETPAGES,&getpages);
8290+if(ret_value<0){
8291+printf("ytay ioctl failed!\n");
8292+ exit(-1);
8293+}
8294+printf("ytay ioctl success\n");
8295+printf("ytay size%d,phy_address:%#x,virt_address:%#x,offset:%#x\n",getpages.size,getpages.phy_address,getpages.virt_address,getpages.offset);
8296+
8297+
8298+/* freeing memory */
8299+
8300+drm_intel_freepages_t freepages;
8301+freepages.size=getpages.size;
8302+freepages.phy_address=getpages.phy_address;
8303+freepages.virt_address=getpages.virt_address;
8304+printf("ytay freeing phy_address:%#x,size:%#x\n",freepages.phy_address,freepages.size);
8305+/*
8306+ioctl(file_desc,DRM_IOCTL_INTEL_FREEPAGES,&freepages);
8307+*/
8308+/* init the drm info structure in the drm and test its value */
8309+
8310+ intel_drm_info_t info;
8311+ intel_drm_info_t test_info;
8312+ info.device_id=0x456;
8313+ info.revision=333;
8314+ info.video_memory_offset=0x10245;
8315+ info.video_memory_size=987;
8316+ info.hw_status_offset=0x444;
8317+
8318+ printf("Testing init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8319+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
8320+
8321+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
8322+
8323+/* init the drm info structure in the drm and test its value */
8324+
8325+ info.device_id=0x123;
8326+ info.revision=456;
8327+ info.video_memory_offset=0x789;
8328+ info.video_memory_size=111;
8329+ info.hw_status_offset=0x555;
8330+
8331+ printf("Testing init 2nd info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8332+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
8333+
8334+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
8335+
8336+ printf("Testing init 2nd info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8337+ info.device_id,info.revision,info.video_memory_offset,info.video_memory_size,info.hw_status_offset);
8338+
8339+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_INIT,&info);
8340+
8341+
8342+
8343+ ioctl(file_desc,DRM_IOCTL_INTEL_INFO_GET,&test_info);
8344+
8345+ printf("Got init info device_id:%#x,revision:%d,offset:%#x,size:%d,hw_status_offset:%lx\n",
8346+ test_info.device_id,test_info.revision,test_info.video_memory_offset,test_info.video_memory_size,test_info.hw_status_offset);
8347+
8348+
8349+close(file_desc);
8350+/*
8351+sleep(100000000000);
8352+*/
8353+return 0;
8354+
8355+}
8356diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_intregs.h patch_script_temp/drivers/gpu/drm/iegd/drm/psb_intregs.h
8357--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_intregs.h 1969-12-31 17:00:00.000000000 -0700
8358+++ patch_script_temp/drivers/gpu/drm/iegd/drm/psb_intregs.h 2009-10-06 10:30:05.000000000 -0700
8359@@ -0,0 +1,114 @@
8360+/* -*- pse-c -*-
8361+ *-----------------------------------------------------------------------------
8362+ * Filename: psb_intregs.h
8363+ *-----------------------------------------------------------------------------
8364+ * INTEL CONFIDENTIAL
8365+ * Copyright (2002-2008) Intel Corporation All Rights Reserved.
8366+ * The source code contained or described herein and all documents related to
8367+ * the source code ("Material") are owned by Intel Corporation or its suppliers
8368+ * or licensors. Title to the Material remains with Intel Corporation or its
8369+ * suppliers and licensors. The Material contains trade secrets and proprietary
8370+ * and confidential information of Intel or its suppliers and licensors. The
8371+ * Material is protected by worldwide copyright and trade secret laws and
8372+ * treaty provisions. No part of the Material may be used, copied, reproduced,
8373+ * modified, published, uploaded, posted, transmitted, distributed, or
8374+ * disclosed in any way without Intel's prior express written permission.
8375+ *
8376+ * No license under any patent, copyright, trade secret or other intellectual
8377+ * property right is granted to or conferred upon you by disclosure or
8378+ * delivery of the Materials, either expressly, by implication, inducement,
8379+ * estoppel or otherwise. Any license under such intellectual property rights
8380+ * must be express and approved by Intel in writing.
8381+ *
8382+ *
8383+ *-----------------------------------------------------------------------------
8384+ * Description:
8385+ * This file contains the interrupt related register definition and
8386+ * macros for the PLB platform.
8387+ *-----------------------------------------------------------------------------
8388+ */
8389+
8390+#ifndef _REGS_H_
8391+#define _REGS_H_
8392+
8393+/*-----------------------------------------------------------------------------
8394+ * SGX, VDC, and MSVDX interrupt registers
8395+ ----------------------------------------------------------------------------*/
8396+//#define SGX_BASE 0x40000
8397+
8398+#define PSB_MMIO_RESOURCE 0
8399+
8400+#define PSB_VDC_OFFSET 0x00000000
8401+#define PSB_VDC_SIZE 0x000080000
8402+#define PSB_SGX_OFFSET 0x00040000
8403+#define PSB_SGX_SIZE 0x8000
8404+#define PSB_MSVDX_OFFSET 0x00050000
8405+#define PSB_MSVDX_SIZE 0x1000
8406+
8407+/* bits in PSB_CR_EVENT_STATUS */
8408+#define PSB_DPM_3D_MEM_FREE (1<<0)
8409+#define PSB_OUT_OF_MEM_MT (1<<1)
8410+#define PSB_OUT_OF_MEM_GBL (1<<2)
8411+#define PSB_REACHED_MEM_THRESH (1<<3)
8412+#define PSB_TA_TERMINATE (1<<12)
8413+#define PSB_TA_FINISHED (1<<13)
8414+#define PSB_PIXELBE_END_RENDER (1<<18)
8415+#define PSB_DPM_TA_MEM_FREE (1<<24)
8416+#define PSB_DPM_OUT_OF_MEM_ZLS (1<<25)
8417+#define PSB_TWOD_COMPLETE (1<<27)
8418+#define PSB_TA_DPM_FAULT (1<<28)
8419+
8420+#define PSB_BIF_REQ_FAULT (1<<4)
8421+#define PSB_TRIG_DL (1<<5)
8422+#define PSB_TRIG_3D (1<<6)
8423+#define PSB_TRIG_TA (1<<7)
8424+
8425+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
8426+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
8427+#define PSB_CR_EVENT_STATUS2 0x0118
8428+
8429+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
8430+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
8431+#define PSB_CR_EVENT_STATUS 0x012C
8432+
8433+#define PSB_MTX_EVENT_HOST_ENABLE 0x0610
8434+#define PSB_MTX_EVENT_CLEAR 0x060C
8435+#define PSB_MTX_EVENT_STATUS 0x0608
8436+
8437+/*-----------------------------------------------------------------------------
8438+ * Memory mapped I/O Registers Definitions
8439+ *---------------------------------------------------------------------------*/
8440+
8441+/*-----------------------------------------------------------------------------
8442+ * Instruction and Interrupt Control Registers (01000h - 02FFFh)
8443+ *---------------------------------------------------------------------------*/
8444+#define HWSTAM 0x02098 /* Hardware Status Mask */
8445+#define IER 0x020A0 /* Interrupt Enable */
8446+#define IIR 0x020A4 /* Interrupt Identity */
8447+#define IMR 0x020A8 /* Interrupt Mask */
8448+#define ISR 0x020AC /* Interrupt Status */
8449+
8450+#define PIPEA_STAT 0x70024 /* Pipe A Display Status */
8451+#define PIPEB_STAT 0x71024 /* Pipe B Display Status */
8452+
8453+#define VBLANK_CLEAR (1<<1)
8454+#define VSYNC_PIPEB_FLAG (1<<5)
8455+#define VSYNC_PIPEA_FLAG (1<<7)
8456+#define VBLANK_INTERRUPT_ENABLE (1<<17)
8457+#define IRQ_SGX_FLAG (1<<18)
8458+#define IRQ_MSVDX_FLAG (1<<19)
8459+
8460+#define PSB_WVDC32(_val, _offs) \
8461+ iowrite32(_val, priv->vdc_reg + (_offs))
8462+#define PSB_RVDC32(_offs) \
8463+ ioread32(priv->vdc_reg + (_offs))
8464+#define PSB_WSGX32(_val, _offs) \
8465+ iowrite32(_val, priv->sgx_reg + (_offs))
8466+#define PSB_RSGX32(_offs) \
8467+ ioread32(priv->sgx_reg + (_offs))
8468+#define PSB_WMSVDX32(_val, _offs) \
8469+ iowrite32(_val, priv->msvdx_reg + (_offs))
8470+#define PSB_RMSVDX32(_offs) \
8471+ ioread32(priv->msvdx_reg + (_offs))
8472+
8473+#endif /* _REGS_H_ */
8474diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_irq.c patch_script_temp/drivers/gpu/drm/iegd/drm/psb_irq.c
8475--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/drm/psb_irq.c 1969-12-31 17:00:00.000000000 -0700
8476+++ patch_script_temp/drivers/gpu/drm/iegd/drm/psb_irq.c 2009-10-06 10:30:05.000000000 -0700
8477@@ -0,0 +1,185 @@
8478+/* -*- pse-c -*-
8479+ *-----------------------------------------------------------------------------
8480+ * Filename: psb_intregs.h
8481+ *-----------------------------------------------------------------------------
8482+ * INTEL CONFIDENTIAL
8483+ * Copyright (2002-2008) Intel Corporation All Rights Reserved.
8484+ * The source code contained or described herein and all documents related to
8485+ * the source code ("Material") are owned by Intel Corporation or its suppliers
8486+ * or licensors. Title to the Material remains with Intel Corporation or its
8487+ * suppliers and licensors. The Material contains trade secrets and proprietary
8488+ * and confidential information of Intel or its suppliers and licensors. The
8489+ * Material is protected by worldwide copyright and trade secret laws and
8490+ * treaty provisions. No part of the Material may be used, copied, reproduced,
8491+ * modified, published, uploaded, posted, transmitted, distributed, or
8492+ * disclosed in any way without Intel's prior express written permission.
8493+ *
8494+ * No license under any patent, copyright, trade secret or other intellectual
8495+ * property right is granted to or conferred upon you by disclosure or
8496+ * delivery of the Materials, either expressly, by implication, inducement,
8497+ * estoppel or otherwise. Any license under such intellectual property rights
8498+ * must be express and approved by Intel in writing.
8499+ *
8500+ *
8501+ *-----------------------------------------------------------------------------
8502+ * Description:
8503+ * This file contains nterrupt related routines for the PLB platform.
8504+ *-----------------------------------------------------------------------------
8505+ */
8506+
8507+#include <linux/version.h>
8508+#include "drmP.h"
8509+#include "psb_intregs.h"
8510+#include "iegd_drm.h"
8511+
8512+
8513+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
8514+{
8515+ int handled = 0;
8516+ struct drm_device *dev = (struct drm_device *)arg;
8517+ intel_device_private_t *priv=dev->dev_private;
8518+ uint32_t vdc_stat, sgx_stat, sgx_stat2, mtx_stat;
8519+
8520+ spin_lock(&priv->irqmask_lock);
8521+ vdc_stat = PSB_RVDC32(IIR);
8522+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
8523+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
8524+ mtx_stat = PSB_RMSVDX32(PSB_MTX_EVENT_STATUS);
8525+ vdc_stat &= priv->vdc_irq_mask;
8526+ sgx_stat &= priv->sgx_irq_mask;
8527+ sgx_stat2 &= priv->sgx_irq_mask2;
8528+ mtx_stat &= priv->msvdx_irq_mask;
8529+
8530+ if (vdc_stat) {
8531+ PSB_WVDC32(vdc_stat, IIR);
8532+ (void)PSB_RVDC32(IIR);
8533+
8534+ priv->out_vdc |= vdc_stat;
8535+ handled = 1;
8536+
8537+ if (sgx_stat || sgx_stat2 || mtx_stat) {
8538+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
8539+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
8540+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
8541+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
8542+ PSB_WMSVDX32(mtx_stat, PSB_MTX_EVENT_CLEAR);
8543+ (void)PSB_RMSVDX32(PSB_MTX_EVENT_CLEAR);
8544+
8545+ priv->out_sgx |= sgx_stat;
8546+ priv->out_sgx2 |= sgx_stat2;
8547+ priv->out_mtx |= mtx_stat;
8548+
8549+ priv->event_present = 1;
8550+ spin_unlock(&priv->irqmask_lock);
8551+ DRM_WAKEUP(&priv->event_queue);
8552+
8553+ } else {
8554+
8555+ spin_unlock(&priv->irqmask_lock);
8556+
8557+ }
8558+
8559+ } else {
8560+
8561+ spin_unlock(&priv->irqmask_lock);
8562+
8563+ }
8564+
8565+ if (!handled) {
8566+ return IRQ_NONE;
8567+ }
8568+
8569+ return IRQ_HANDLED;
8570+}
8571+
8572+void psb_irq_preinstall(struct drm_device *dev)
8573+{
8574+ intel_device_private_t *priv =
8575+ (intel_device_private_t *)dev->dev_private;
8576+
8577+ spin_lock(&priv->irqmask_lock);
8578+ PSB_WVDC32(0xFFFFFFFF, HWSTAM);
8579+ PSB_WVDC32(0xFFFFFFFF, IMR);
8580+ PSB_WVDC32(0x00000000, IER);
8581+
8582+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
8583+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
8584+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE2);
8585+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE2);
8586+ PSB_WMSVDX32(0x00000000, PSB_MTX_EVENT_HOST_ENABLE);
8587+ (void)PSB_RMSVDX32(PSB_MTX_EVENT_HOST_ENABLE);
8588+
8589+ priv->sgx_irq_mask = PSB_TWOD_COMPLETE |
8590+ PSB_TA_FINISHED | PSB_TA_TERMINATE |
8591+ PSB_PIXELBE_END_RENDER | PSB_DPM_3D_MEM_FREE |
8592+ PSB_OUT_OF_MEM_MT | PSB_OUT_OF_MEM_GBL |
8593+ PSB_REACHED_MEM_THRESH | PSB_DPM_TA_MEM_FREE |
8594+ PSB_DPM_OUT_OF_MEM_ZLS | PSB_TA_DPM_FAULT;
8595+
8596+ priv->sgx_irq_mask2 = PSB_BIF_REQ_FAULT | PSB_TRIG_TA | PSB_TRIG_3D |
8597+ PSB_TRIG_DL;
8598+
8599+ priv->vdc_irq_mask = IRQ_SGX_FLAG | IRQ_MSVDX_FLAG;
8600+
8601+ priv->msvdx_irq_mask = (1<<14); /* Enable only MTX interrupt */
8602+
8603+ spin_unlock(&priv->irqmask_lock);
8604+}
8605+
8606+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
8607+void psb_irq_postinstall(struct drm_device *dev)
8608+#else
8609+int psb_irq_postinstall(struct drm_device *dev)
8610+#endif
8611+{
8612+ intel_device_private_t *priv =
8613+ (intel_device_private_t *)dev->dev_private;
8614+ unsigned long irqflags;
8615+
8616+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
8617+ PSB_WVDC32(priv->vdc_irq_mask, IER);
8618+ PSB_WSGX32(priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
8619+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
8620+ PSB_WSGX32(priv->sgx_irq_mask2, PSB_CR_EVENT_HOST_ENABLE2);
8621+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE2);
8622+ PSB_WMSVDX32(priv->msvdx_irq_mask, PSB_MTX_EVENT_HOST_ENABLE);
8623+ (void)PSB_RMSVDX32(PSB_MTX_EVENT_HOST_ENABLE);
8624+
8625+ priv->irq_enabled = 1;
8626+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
8627+
8628+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
8629+ return 0;
8630+#endif
8631+
8632+}
8633+
8634+void psb_irq_uninstall(struct drm_device *dev)
8635+{
8636+ intel_device_private_t *priv =
8637+ (intel_device_private_t *)dev->dev_private;
8638+ unsigned long irqflags;
8639+
8640+ spin_lock_irqsave(&priv->irqmask_lock, irqflags);
8641+
8642+ priv->sgx_irq_mask = 0x00000000;
8643+ priv->sgx_irq_mask2 = 0x00000000;
8644+ priv->vdc_irq_mask = 0x00000000;
8645+ priv->msvdx_irq_mask = 0x00000000;
8646+
8647+ /* By default, we're enabling interrupts buy leaving them masked */
8648+ PSB_WVDC32(0xFFFFFFFF, HWSTAM);
8649+ PSB_WVDC32(0xFFFFFFFF, IMR);
8650+ PSB_WVDC32(priv->vdc_irq_mask, IER);
8651+ PSB_WSGX32(priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
8652+ PSB_WSGX32(priv->sgx_irq_mask2, PSB_CR_EVENT_HOST_ENABLE2);
8653+ PSB_WMSVDX32(priv->msvdx_irq_mask, PSB_MTX_EVENT_HOST_ENABLE);
8654+ wmb();
8655+ PSB_WVDC32(PSB_RVDC32(IIR), IIR);
8656+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
8657+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
8658+ PSB_WMSVDX32(PSB_RMSVDX32(PSB_MTX_EVENT_STATUS), PSB_MTX_EVENT_CLEAR);
8659+
8660+ priv->irq_enabled = 0;
8661+ spin_unlock_irqrestore(&priv->irqmask_lock, irqflags);
8662+}
8663diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/global.h patch_script_temp/drivers/gpu/drm/iegd/include/global.h
8664--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/global.h 1969-12-31 17:00:00.000000000 -0700
8665+++ patch_script_temp/drivers/gpu/drm/iegd/include/global.h 2009-10-06 10:30:05.000000000 -0700
8666@@ -0,0 +1,160 @@
8667+/* -*- pse-c -*-
8668+ * Filename: iegd_interface.c
8669+ * $Revision: 1.19 $
8670+ *----------------------------------------------------------------------------
8671+ * <>
8672+ * Copyright © 2008, Intel Corporation.
8673+ *
8674+ * This program is free software; you can redistribute it and/or modify it
8675+ * under the terms and conditions of the GNU General Public License,
8676+ * version 2, as published by the Free Software Foundation.
8677+ *
8678+ * This program is distributed in the hope it will be useful, but WITHOUT
8679+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8680+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8681+ * more details.
8682+ *
8683+ * You should have received a copy of the GNU General Public License along with
8684+ * this program; if not, write to the Free Software Foundation, Inc.,
8685+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8686+ *
8687+ *
8688+ *----------------------------------------------------------------------------
8689+ * Functions:
8690+ *
8691+ *
8692+ *----------------------------------------------------------------------------
8693+ */
8694+
8695+#ifndef _GART_GLOBAL_DEF
8696+#define _GART_GLOBAL_DEF
8697+#include "igd_gart.h"
8698+#include "igd_abs.h"
8699+#include "interface_abs.h"
8700+
8701+
8702+#define APER_ENTRY(a,b) sizeof((a))/(b)
8703+#define AGP_DCACHE_MEMORY 1
8704+#define AGP_PHYS_MEMORY 2
8705+
8706+#define IEGD "IEGD"
8707+
8708+/**
8709+ * This is global data that is shared across file. New global
8710+ * data should goes here.
8711+ */
8712+extern gart_dispatch_t *gart_id;
8713+extern dev_private_data_t private_data;
8714+extern struct pci_device_id iegd_pci_table[];
8715+extern dispatch_table_t driver_dispatch_list[];
8716+extern struct gatt_mask iegd_cmn_masks[];
8717+extern struct aper_size_info_fixed iegd_i915_sizes[];
8718+extern struct aper_size_info_fixed iegd_iq35_sizes[];
8719+extern struct aper_size_info_fixed iegd_i965_sizes[];
8720+extern struct aper_size_info_fixed intel_i830_sizes[];
8721+extern struct aper_size_info_fixed intel_i810_sizes[];
8722+extern struct aper_size_info_fixed iegd_igm45_sizes[];
8723+
8724+/* All dispatch table for the chipset family goes here */
8725+extern bridge_driver_t drv_alm;
8726+extern bridge_driver_t drv_nap;
8727+extern bridge_driver_t drv_gn4;
8728+extern bridge_driver_t drv_gm45;
8729+
8730+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
8731+/* Poulsbo */
8732+extern struct aper_size_info_fixed iegd_plb_sizes[];
8733+extern bridge_driver_t drv_plb;
8734+
8735+/* Poulsbo specific structure so that the DRM can utilize the
8736+ * AGP's virtual aperture management code
8737+ */
8738+extern struct vm_operations_struct iegd_plb_vm_ops;
8739+#endif
8740+
8741+
8742+/*
8743+ * Macro to fill device information for PCI devices registration.
8744+ * Copy from public agpgart in kernel source
8745+ */
8746+#define ID(x) { \
8747+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
8748+ .class_mask = ~0, \
8749+ .vendor = PCI_VENDOR_ID_INTEL, \
8750+ .device = x, \
8751+ .subvendor = PCI_ANY_ID, \
8752+ .subdevice = PCI_ANY_ID, \
8753+}
8754+
8755+#ifdef CONFIG_AGP_DEBUG
8756+#define AGN_DEBUG(x,y...) printk(KERN_INFO "[" IEGD \
8757+ ":DEBUG]:%s " x "\n", __FUNCTION__, ##y)
8758+#else
8759+#define AGN_DEBUG(x,y...) do {} while(0)
8760+#endif
8761+
8762+#define AGN_ERROR(x,y...) printk(KERN_ALERT "[" IEGD \
8763+ ":ERROR]:%s: " x "\n", __FUNCTION__, ##y)
8764+#define AGN_LOG(x,y...) printk(KERN_INFO "[" IEGD "]: " x "\n", ##y)
8765+
8766+/**
8767+ * Global extern function prototype, basically common function
8768+ * should goes here. Most of this function extern is from
8769+ * drv_cmn.c
8770+ */
8771+extern int iegd_find_device(u16 device);
8772+extern struct pci_dev *iegd_probe_device(void);
8773+extern void iegd_cmn_init_gtt_entries(void);
8774+extern int AGP_FREE_GATT(iegd_cmn_free_gatt_table);
8775+extern void iegd_cmn_free_by_type(struct agp_memory *curr);
8776+extern struct agp_memory *iegd_cmn_alloc_by_type(
8777+ size_t pg_count, int type);
8778+extern int iegd_cmn_insert_entries(struct agp_memory *mem,
8779+ off_t pg_start, int type);
8780+extern int iegd_cmn_remove_entries(struct agp_memory *mem, off_t pg_start,
8781+ int type);
8782+extern int bridge_driver_init(bridge_driver_t **driver_hook,
8783+ unsigned short did, dispatch_table_t *list );
8784+
8785+#ifndef MSR_IA32_CR_PAT
8786+#define MSR_IA32_CR_PAT 0x0277
8787+#endif
8788+#ifndef _PAGE_PAT
8789+#define _PAGE_PAT 0x080
8790+#endif
8791+extern void agp_init_pat(void);
8792+extern int agp_use_pat (void);
8793+
8794+/**
8795+ * masking valid bit for page table entries before
8796+ * put it insert it to gtt table
8797+ */
8798+unsigned long AGP_MASK_MEMORY(iegd_cmn_mask_memory);
8799+int AGP_CREATE_GATT(iegd_alm_create_gatt_table);
8800+extern int iegd_cmn_configure(void);
8801+extern void AGP_ENABLE(iegd_cmn_agp_enable);
8802+
8803+/* Global DRM function prototype */
8804+extern int intel_init(void);
8805+extern void intel_exit(void);
8806+extern int drm_init(void);
8807+extern void drm_cleanup(void);
8808+
8809+/* Generic function to dispatch the information according to
8810+ * chipset id */
8811+static __inline void *dispatch_acquire(
8812+
8813+ unsigned short did,
8814+ dispatch_table_t *table_list) {
8815+
8816+ dispatch_table_t *curr = table_list;
8817+ while(curr && (curr->did != 0)) {
8818+ if(curr->did == did) {
8819+ return curr->table;
8820+ }
8821+ curr++;
8822+ }
8823+
8824+ return NULL;
8825+}
8826+#endif
8827diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_abs.h patch_script_temp/drivers/gpu/drm/iegd/include/igd_abs.h
8828--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_abs.h 1969-12-31 17:00:00.000000000 -0700
8829+++ patch_script_temp/drivers/gpu/drm/iegd/include/igd_abs.h 2009-10-06 10:30:05.000000000 -0700
8830@@ -0,0 +1,136 @@
8831+/* -*- pse-c -*-
8832+ *----------------------------------------------------------------------------
8833+ * Filename: iegd_interface.c
8834+ * $Revision: 1.15 $
8835+ *----------------------------------------------------------------------------
8836+ * <>
8837+ * Copyright © 2008, Intel Corporation.
8838+ *
8839+ * This program is free software; you can redistribute it and/or modify it
8840+ * under the terms and conditions of the GNU General Public License,
8841+ * version 2, as published by the Free Software Foundation.
8842+ *
8843+ * This program is distributed in the hope it will be useful, but WITHOUT
8844+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8845+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8846+ * more details.
8847+ *
8848+ * You should have received a copy of the GNU General Public License along with
8849+ * this program; if not, write to the Free Software Foundation, Inc.,
8850+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8851+ *
8852+ *
8853+ *----------------------------------------------------------------------------
8854+ * Functions:
8855+ *
8856+ *
8857+ *----------------------------------------------------------------------------
8858+ */
8859+
8860+#ifndef _KERNEL_ABS_LAYER
8861+#define _KERNEL_ABS_LAYER
8862+
8863+#include <linux/version.h>
8864+
8865+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
8866+#define IGD_FREE_MEM(a) agp_free_page_array(a)
8867+#else
8868+#define IGD_FREE_MEM(a) vfree((a)->memory)
8869+#endif
8870+
8871+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)
8872+#define DRM_INIT_MODULE() drm_init()
8873+#define DRM_EXIT_MODULE() drm_cleanup()
8874+#define AGP_RET(a) ((a)>=0) ? 1 : 0
8875+#else
8876+#define DRM_INIT_MODULE() intel_init()
8877+#define DRM_EXIT_MODULE() intel_exit()
8878+#define AGP_RET(a) ((a)==0) ? 1 : 0
8879+#endif
8880+
8881+#if LINUX_VERSION_CODE<KERNEL_VERSION(2,6,10)
8882+#define IGD_PCI_SAVE_STATE(a,b) pci_save_state(a,b)
8883+#define IGD_PCI_RESTORE_STATE(a,b) pci_restore_state(a,b)
8884+#define pm_message_t u32
8885+#define IGD_IS_SUSPEND(state) ((state)==3)
8886+#else
8887+#define IGD_PCI_SAVE_STATE(a,b) pci_save_state(a)
8888+#define IGD_PCI_RESTORE_STATE(a,b) pci_restore_state(a)
8889+#define IGD_IS_SUSPEND(state) \
8890+ (((state.event)==PM_EVENT_SUSPEND) | ((state.event)==PM_EVENT_FREEZE))
8891+#endif
8892+
8893+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
8894+#define AGP_LOCK_PAGE(a)
8895+#define AGP_UNLOCK_PAGE(a)
8896+#else
8897+#define AGP_LOCK_PAGE(a) SetPageLocked((a))
8898+#define AGP_UNLOCK_PAGE(a) unlock_page((a))
8899+#endif
8900+
8901+#define MASK_PTE(a,b) (a)->driver->masks[(b)].mask
8902+#define AGP_MASK_ADDR(x) MASK_PTE((x),type)
8903+
8904+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
8905+#ifndef TRUE
8906+#define TRUE 1
8907+#endif
8908+#ifndef FALSE
8909+#define FALSE 0
8910+#endif
8911+#define SET_PAGES_UC(a,b) set_pages_uc(a,b)
8912+#define SET_PAGES_WB(a,b) set_pages_wb(a,b)
8913+#define GLOBAL_FLUSH_TLB()
8914+#ifndef SetPageLocked
8915+#define SetPageLocked(page) set_bit(PG_locked, &page->flags);
8916+#endif
8917+#else
8918+#define SET_PAGES_UC(a,b) change_page_attr(a,b,PAGE_KERNEL_NOCACHE)
8919+#define SET_PAGES_WB(a,b) change_page_attr(a,b,PAGE_KERNEL)
8920+#define GLOBAL_FLUSH_TLB() global_flush_tlb()
8921+#endif
8922+
8923+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
8924+#define ON_EACH_CPU(a,b,c,d) on_each_cpu(a,b,d)
8925+/* Note: drm_dev_to_irq appears 2.6.28, but some distros
8926+ * have pulled it into earlier versions of their kernel.
8927+ * That's why it's defined here.
8928+ */
8929+#define DRM_DEV_TO_IRQ(a) drm_dev_to_irq(a)
8930+#else
8931+#define ON_EACH_CPU(a,b,c,d) on_each_cpu(a,b,c,d)
8932+#define DRM_DEV_TO_IRQ(a) (a->irq)
8933+#endif
8934+
8935+
8936+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
8937+#define AGP_MEM_TYPE struct page*
8938+#define CONVERT_PAGE_TO_GART(a) phys_to_gart(page_to_phys(a));
8939+#define AGP_MEMORY_MEMBER pages
8940+#define PAGE_ADDRESS(a) a
8941+#else
8942+#define AGP_MEM_TYPE void*
8943+#define CONVERT_PAGE_TO_GART(a) a
8944+#define AGP_MEMORY_MEMBER memory
8945+#define PAGE_ADDRESS(a) page_address(a)
8946+#endif
8947+
8948+
8949+/*
8950+ * Kernel interface abstraction. This macro will
8951+ * point to the proper definition for that particular
8952+ * kernel
8953+ */
8954+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
8955+#define AGP_MASK_MEMORY(f) _MASK_MEMORY_PAGE(f)
8956+#else
8957+#define AGP_MASK_MEMORY(f) _MASK_MEMORY(f)
8958+#endif
8959+#define AGP_CREATE_GATT(f) _CREATE_GATT_TABLE(f)
8960+#define AGP_FREE_GATT(f) _FREE_GATT_TABLE(f)
8961+#define AGP_ALLOC_PAGE(f) _ALLOC_PAGE_AGP(f)
8962+#define AGP_ENABLE(f) _ENABLE_AGP(f)
8963+#define AGP_TYPE_TO_MASK_TYPE(f) _TYPE_TO_MASK_TYPE(f)
8964+
8965+#define AGP_MASK_GTT() _mask_gtt()
8966+#endif
8967diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_gart.h patch_script_temp/drivers/gpu/drm/iegd/include/igd_gart.h
8968--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/igd_gart.h 1969-12-31 17:00:00.000000000 -0700
8969+++ patch_script_temp/drivers/gpu/drm/iegd/include/igd_gart.h 2009-10-06 10:30:05.000000000 -0700
8970@@ -0,0 +1,81 @@
8971+/* -*- pse-c -*-
8972+ *----------------------------------------------------------------------------
8973+ * Filename: igd_gart.h
8974+ * $Revision: 1.10 $
8975+ *----------------------------------------------------------------------------
8976+ * <>
8977+ * Copyright © 2008, Intel Corporation.
8978+ *
8979+ * This program is free software; you can redistribute it and/or modify it
8980+ * under the terms and conditions of the GNU General Public License,
8981+ * version 2, as published by the Free Software Foundation.
8982+ *
8983+ * This program is distributed in the hope it will be useful, but WITHOUT
8984+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8985+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8986+ * more details.
8987+ *
8988+ * You should have received a copy of the GNU General Public License along with
8989+ * this program; if not, write to the Free Software Foundation, Inc.,
8990+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8991+ *
8992+ *
8993+ *----------------------------------------------------------------------------
8994+ * Functions:
8995+ *
8996+ *
8997+ *----------------------------------------------------------------------------
8998+ */
8999+
9000+#ifndef _INIT_GART_DISPATCH
9001+#define _INIT_GART_DISPATCH
9002+
9003+#include <linux/pci.h>
9004+#include <linux/agp_backend.h>
9005+#include "agp.h"
9006+
9007+typedef struct agp_bridge_driver bridge_driver_t;
9008+typedef struct agp_bridge_data bridge_data_t;
9009+
9010+/* Dispatch table that contained information about
9011+ * specific chipset */
9012+typedef struct _gart_dispatch {
9013+ unsigned short vendor_id;
9014+ unsigned short bridge_id; /* Bridge device id */
9015+ unsigned short device_id; /* chipset id */
9016+ char *name; /* Name for the chipset */
9017+ unsigned short dev_flag;
9018+ struct pci_driver *old_gart; /* old gart info */
9019+ struct pci_dev *bridge_pdev; /* Bridge device info */
9020+ bridge_data_t *bridge_info; /* bridge information for gart */
9021+}gart_dispatch_t;
9022+
9023+/* Structure that keep the private data for chipset */
9024+typedef struct _dev_private_data {
9025+ struct pci_dev *pdev;
9026+ volatile u8 __iomem *registers;
9027+ volatile u32 __iomem *gtt;
9028+ union {
9029+ int num_dcache_entries;
9030+ int gtt_entries;
9031+ };
9032+ u32 pm_save[16]; /* PCI config saved here on suspend/resume. */
9033+ /* Required for older kernel versions. */
9034+ int split_gtt;
9035+ volatile u32 __iomem *upper_gtt;
9036+}dev_private_data_t;
9037+
9038+/* Dispatch table for function hook */
9039+typedef struct _dispatch_table {
9040+ unsigned short did;
9041+ void *table;
9042+}dispatch_table_t;
9043+
9044+/* Table contained function pointer for specific chipset */
9045+typedef struct _driver_func_table {
9046+ bridge_driver_t driver_func; /* Contained actual function */
9047+ void (*config_private)(void); /* config private */
9048+}driver_func_table_t;
9049+
9050+
9051+#endif
9052diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/intelpci.h patch_script_temp/drivers/gpu/drm/iegd/include/intelpci.h
9053--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/intelpci.h 1969-12-31 17:00:00.000000000 -0700
9054+++ patch_script_temp/drivers/gpu/drm/iegd/include/intelpci.h 2009-10-06 10:30:05.000000000 -0700
9055@@ -0,0 +1,178 @@
9056+/* -*- pse-c -*-
9057+ *----------------------------------------------------------------------------
9058+ * Filename: intelpci.h
9059+ * $Revision: 1.16 $
9060+ *----------------------------------------------------------------------------
9061+ * <>
9062+ * Copyright © 2008, Intel Corporation.
9063+ *
9064+ * This program is free software; you can redistribute it and/or modify it
9065+ * under the terms and conditions of the GNU General Public License,
9066+ * version 2, as published by the Free Software Foundation.
9067+ *
9068+ * This program is distributed in the hope it will be useful, but WITHOUT
9069+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9070+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9071+ * more details.
9072+ *
9073+ * You should have received a copy of the GNU General Public License along with
9074+ * this program; if not, write to the Free Software Foundation, Inc.,
9075+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9076+ *
9077+ *
9078+ *----------------------------------------------------------------------------
9079+ * Functions:
9080+ *
9081+ *
9082+ *----------------------------------------------------------------------------
9083+ */
9084+
9085+#define PCI_VENDOR_ID_INTEL 0x8086
9086+
9087+
9088+/* Start: Whitney core specific */
9089+#define PCI_DEVICE_ID_BRIDGE_810 0x7120
9090+#define PCI_DEVICE_ID_810 0x7121
9091+
9092+#define PCI_DEVICE_ID_BRIDGE_810DC 0x7122
9093+#define PCI_DEVICE_ID_810DC 0x7123
9094+
9095+#define PCI_DEVICE_ID_BRIDGE_810E 0x7124
9096+#define PCI_DEVICE_ID_810E 0x7125
9097+
9098+#define PCI_DEVICE_ID_BRIDGE_815 0x1130
9099+#define PCI_DEVICE_ID_815 0x1132
9100+
9101+
9102+/* Start: Almador core specific */
9103+#define PCI_DEVICE_ID_BRIDGE_830M 0x3575
9104+#define PCI_DEVICE_ID_830M 0x3577
9105+#define PCI_DEVICE_ID_AGP_830M 0x3576
9106+
9107+#define PCI_DEVICE_ID_BRIDGE_835 0x3579
9108+#define PCI_DEVICE_ID_835 0x357b
9109+#define PCI_DEVICE_ID_AGP_835 0x357a
9110+
9111+#define PCI_DEVICE_ID_BRIDGE_845G 0x2560
9112+#define PCI_DEVICE_ID_845G 0x2562
9113+#define PCI_DEVICE_ID_AGP_845G 0x0000
9114+
9115+#define PCI_DEVICE_ID_BRIDGE_855 0x3580 /* Montara-G */
9116+#define PCI_DEVICE_ID_MEM_855 0x3584
9117+#define PCI_DEVICE_ID_855 0x3582
9118+#define PCI_DEVICE_ID_AGP_855 0x0000
9119+
9120+#define PCI_DEVICE_ID_BRIDGE_865G 0x2570
9121+#define PCI_DEVICE_ID_865G 0x2572
9122+#define PCI_DEVICE_ID_AGP_865G 0x0000
9123+
9124+
9125+/* Start: Napa core specific */
9126+/* Grantsdale - 915G/915GV */
9127+#define PCI_DEVICE_ID_BRIDGE_915GD 0x2580
9128+#define PCI_DEVICE_ID_PEG_915GD 0x2581
9129+#define PCI_DEVICE_ID_915GD 0x2582
9130+/* Grantsdale - 910GL*/
9131+#define PCI_DEVICE_ID_BRIDGE_910GL 0x258C
9132+#define PCI_DEVICE_ID_PEG_910GL PCI_DEVICE_ID_PEG_915GD
9133+#define PCI_DEVICE_ID_910GL PCI_DEVICE_ID_915GD
9134+/* Alviso - 915GM/GMS/910GML*/
9135+#define PCI_DEVICE_ID_BRIDGE_915AL 0x2590
9136+#define PCI_DEVICE_ID_PEG_915AL 0x2591
9137+#define PCI_DEVICE_ID_915AL 0x2592
9138+
9139+/* Lakeport - 945G */
9140+#define PCI_DEVICE_ID_BRIDGE_945G 0x2770
9141+#define PCI_DEVICE_ID_PEG_945G 0x2771
9142+#define PCI_DEVICE_ID_945G 0x2772
9143+
9144+/* Calistoga - 945GM */
9145+#define PCI_DEVICE_ID_BRIDGE_945GM 0x27A0
9146+#define PCI_DEVICE_ID_PEG_945GM 0x27A1
9147+#define PCI_DEVICE_ID_945GM 0x27A2
9148+
9149+/* Calistoga Westbriar - 945GME/GSE */
9150+#define PCI_DEVICE_ID_BRIDGE_945GME 0x27AC
9151+#define PCI_DEVICE_ID_PEG_945GME 0x27AD
9152+#define PCI_DEVICE_ID_945GME 0x27AE
9153+
9154+/* Bearlake B - Q35 */
9155+#define PCI_DEVICE_ID_BRIDGE_Q35 0x29C0
9156+#define PCI_DEVICE_ID_PEG_Q35 0x29C1
9157+#define PCI_DEVICE_ID_Q35 0x29C2
9158+
9159+/* Bearlake B - Q35 */
9160+#define PCI_DEVICE_ID_BRIDGE_Q35A2 0x29B0
9161+#define PCI_DEVICE_ID_PEG_Q35A2 0x29B1
9162+#define PCI_DEVICE_ID_Q35A2 0x29B2
9163+
9164+/* Start: Gen4 core specific*/
9165+/* Broadwater - Unlocked - 965G */
9166+#define PCI_DEVICE_ID_BRIDGE_965G 0x2980
9167+#define PCI_DEVICE_ID_PEG_965G 0x2981
9168+#define PCI_DEVICE_ID_965G 0x2982
9169+
9170+/* Broadwater - Value - 945GZ */
9171+#define PCI_DEVICE_ID_BRIDGE_946GZ 0x2970
9172+#define PCI_DEVICE_ID_PEG_946GZ 0x2971
9173+#define PCI_DEVICE_ID_946GZ 0x2972
9174+
9175+/* Broadwater - Consumer - G965 */
9176+#define PCI_DEVICE_ID_BRIDGE_G965 0x29A0
9177+#define PCI_DEVICE_ID_PEG_G965 0x29A1
9178+#define PCI_DEVICE_ID_G965 0x29A2
9179+
9180+/* Broadwater - Corporate - Q965/Q963 */
9181+#define PCI_DEVICE_ID_BRIDGE_Q965 0x2990
9182+#define PCI_DEVICE_ID_PEG_Q965 0x2991
9183+#define PCI_DEVICE_ID_Q965 0x2992
9184+
9185+/* Crestline - Generic GM965 */
9186+#define PCI_DEVICE_ID_BRIDGE_GM965 0x2A00
9187+#define PCI_DEVICE_ID_PEG_GM965 0x2A01
9188+#define PCI_DEVICE_ID_GM965 0x2A02
9189+
9190+/* Crestline Westbriar GME965 */
9191+#define PCI_DEVICE_ID_BRIDGE_GME965 0x2A10
9192+#define PCI_DEVICE_ID_PEG_GME965 0x2A11
9193+#define PCI_DEVICE_ID_GME965 0x2A12
9194+
9195+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,5)
9196+/* Poulsbo */
9197+#define PCI_DEVICE_ID_BRIDGE_PLB 0x8100
9198+#define PCI_DEVICE_ID_PEG_PLB 0x8101
9199+#define PCI_DEVICE_ID_PLB 0x8108
9200+#endif
9201+
9202+/* Cantiga GM45 */
9203+#define PCI_DEVICE_ID_BRIDGE_GM45 0x2A40
9204+#define PCI_DEVICE_ID_PEG_GM45 0x2A41
9205+#define PCI_DEVICE_ID_GM45 0x2A42
9206+
9207+#define PCI_DEVICE_ID_BRIDGE_ELK 0x2E00
9208+#define PCI_DEVICE_ID_PEG_ELK 0x2E01
9209+#define PCI_DEVICE_ID_ELK 0x2E02
9210+
9211+#define PCI_DEVICE_ID_BRIDGE_Q45 0x2E10
9212+#define PCI_DEVICE_ID_PEG_Q45 0x2E11
9213+#define PCI_DEVICE_ID_Q45 0x2E12
9214+
9215+#define PCI_DEVICE_ID_BRIDGE_G45 0x2E20
9216+#define PCI_DEVICE_ID_PEG_G45 0x2E21
9217+#define PCI_DEVICE_ID_G45 0x2E22
9218+
9219+#define PCI_DEVICE_ID_BRIDGE_G41 0x2E30
9220+#define PCI_DEVICE_ID_PEG_G41 0x2E31
9221+#define PCI_DEVICE_ID_G41 0x2E32
9222+
9223+#define I915_GMADDR 0x18
9224+#define I915_MMADDR 0x10
9225+#define I915_PTEADDR 0x1C
9226+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
9227+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
9228+
9229+/* intel Q35 register */
9230+#define IQ35_BASE_STOLEN 0x5c
9231+#define IQ35_GTT_MEM_SIZE 0x300
9232+#define IQ35_GGMS_1MB 0x100
9233+#define IQ35_GGMS_2MB 0x200
9234diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/interface_abs.h patch_script_temp/drivers/gpu/drm/iegd/include/interface_abs.h
9235--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/drivers/gpu/drm/iegd/include/interface_abs.h 1969-12-31 17:00:00.000000000 -0700
9236+++ patch_script_temp/drivers/gpu/drm/iegd/include/interface_abs.h 2009-10-06 10:30:05.000000000 -0700
9237@@ -0,0 +1,48 @@
9238+/* -*- pse-c -*-
9239+ *----------------------------------------------------------------------------
9240+ * Filename: iegd_interface.c
9241+ * $Revision: 1.4 $
9242+ *----------------------------------------------------------------------------
9243+ * <>
9244+ * Copyright © 2006, Intel Corporation.
9245+ *
9246+ * This program is free software; you can redistribute it and/or modify it
9247+ * under the terms and conditions of the GNU General Public License,
9248+ * version 2, as published by the Free Software Foundation.
9249+ *
9250+ * This program is distributed in the hope it will be useful, but WITHOUT
9251+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9252+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9253+ * more details.
9254+ *
9255+ * You should have received a copy of the GNU General Public License along with
9256+ * this program; if not, write to the Free Software Foundation, Inc.,
9257+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9258+ *
9259+ *----------------------------------------------------------------------------
9260+ * Functions:
9261+ *
9262+ *
9263+ *----------------------------------------------------------------------------
9264+ */
9265+
9266+#ifndef _AGP_INTERFACE_ABS_LAYER
9267+#define _AGP_INTERFACE_ABS_LAYER
9268+
9269+#define AGP_BRIDGE_VAR bridge
9270+
9271+#define _MASK_MEMORY_PAGE(f) f( struct agp_bridge_data *bridge, \
9272+ struct page* addr, int type)
9273+
9274+#define _MASK_MEMORY(f) f( struct agp_bridge_data *bridge, \
9275+ unsigned long addr, int type)
9276+
9277+#define _CREATE_GATT_TABLE(f) f(struct agp_bridge_data *bridge)
9278+#define _FREE_GATT_TABLE(f) f(struct agp_bridge_data *bridge)
9279+#define _ALLOC_PAGE_AGP(f) f(struct agp_bridge_data *bridge)
9280+#define _ENABLE_AGP(f) f(struct agp_bridge_data *bridge, u32 mode)
9281+#define _TYPE_TO_MASK_TYPE(f) f(struct agp_bridge_data *bridge, int x)
9282+
9283+#define _mask_gtt() agp_bridge->driver->mask_memory( \
9284+ agp_bridge, mem->pages[i], mem->type)
9285+#endif
9286diff -uNr vanilla.2.6.31.rc6-67.1.moblin2-ivi/include/linux/config.h patch_script_temp/include/linux/config.h
9287--- vanilla.2.6.31.rc6-67.1.moblin2-ivi/include/linux/config.h 1969-12-31 17:00:00.000000000 -0700
9288+++ patch_script_temp/include/linux/config.h 2009-10-06 10:30:05.000000000 -0700
9289@@ -0,0 +1 @@
9290+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch
new file mode 100644
index 0000000000..88c9788013
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-samsung.patch
@@ -0,0 +1,206 @@
1
2
3From: Greg Kroah-Hartman <gregkh@suse.de>
4Subject: Samsung backlight driver
5
6This driver implements backlight controls for Samsung laptops that currently do not have ACPI support for this control.
7
8It has been tested on the N130 laptop and properly works there.
9
10Info for the NC10 was provided by Soeren Sonnenburg <bugreports@nn7.de> Info for the NP-Q45 from Jie Huchet <jeremie@lamah.info>
11
12Many thanks to Dmitry Torokhov <dmitry.torokhov@gmail.com> for cleanups and other suggestions on how to make the driver simpler.
13
14Cc: Soeren Sonnenburg <bugreports@nn7.de>
15Cc: Jie Huchet <jeremie@lamah.info>
16Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
17Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
18
19---
20 drivers/platform/x86/Kconfig | 12 ++
21 drivers/platform/x86/Makefile | 1
22 drivers/platform/x86/samsung-backlight.c | 157
23+++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+)
24diff -purN vanilla-2.6.31-rc6/drivers/platform/x86/Kconfig linux-2.6.31-rc6/drivers/platform/x86/Kconfig
25--- vanilla-2.6.31-rc6/drivers/platform/x86/Kconfig 2009-08-17 20:55:37.000000000 +0000
26+++ linux-2.6.31-rc6/drivers/platform/x86/Kconfig 2009-08-17 20:58:25.000000000 +0000
27@@ -425,4 +425,16 @@ config ACPI_TOSHIBA
28
29 If you have a legacy free Toshiba laptop (such as the Libretto L1
30 series), say Y.
31+
32+config SAMSUNG_BACKLIGHT
33+ tristate "Samsung Backlight driver"
34+ depends on BACKLIGHT_CLASS_DEVICE
35+ depends on DMI
36+ ---help---
37+ This driver adds support to control the backlight on a number of
38+ Samsung laptops, like the N130.
39+
40+ It will only be loaded on laptops that properly need it, so it is
41+ safe to say Y here.
42+
43 endif # X86_PLATFORM_DEVICES
44diff -purN vanilla-2.6.31-rc6/drivers/platform/x86/Makefile linux-2.6.31-rc6/drivers/platform/x86/Makefile
45--- vanilla-2.6.31-rc6/drivers/platform/x86/Makefile 2009-08-17 20:55:37.000000000 +0000
46+++ linux-2.6.31-rc6/drivers/platform/x86/Makefile 2009-08-17 20:58:44.000000000 +0000
47@@ -20,3 +20,4 @@ obj-$(CONFIG_INTEL_MENLOW) += intel_menl
48 obj-$(CONFIG_ACPI_WMI) += wmi.o
49 obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
50 obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
51+obj-$(CONFIG_SAMSUNG_BACKLIGHT) += samsung-backlight.o
52diff -purN vanilla-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c linux-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c
53--- vanilla-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c 1970-01-01 00:00:00.000000000 +0000
54+++ linux-2.6.31-rc6/drivers/platform/x86/samsung-backlight.c 2009-08-17 21:00:10.000000000 +0000
55@@ -0,0 +1,151 @@
56+/*
57+ * Samsung N130 and NC10 Laptop Backlight driver
58+ *
59+ * Copyright (C) 2009 Greg Kroah-Hartman (gregkh@suse.de)
60+ * Copyright (C) 2009 Novell Inc.
61+ *
62+ * This program is free software; you can redistribute it and/or modify it
63+ * under the terms of the GNU General Public License version 2 as published by
64+ * the Free Software Foundation.
65+ */
66+
67+#include <linux/kernel.h>
68+#include <linux/init.h>
69+#include <linux/module.h>
70+#include <linux/pci.h>
71+#include <linux/backlight.h>
72+#include <linux/fb.h>
73+#include <linux/dmi.h>
74+
75+#define MAX_BRIGHT 0xff
76+#define OFFSET 0xf4
77+
78+static int offset = OFFSET;
79+module_param(offset, int, S_IRUGO | S_IWUSR);
80+MODULE_PARM_DESC(offset, "The offset into the PCI device for the brightness control");
81+static struct pci_dev *pci_device;
82+static struct backlight_device *backlight_device;
83+
84+static u8 read_brightness(void)
85+{
86+ u8 brightness;
87+
88+ pci_read_config_byte(pci_device, offset, &brightness);
89+ return brightness;
90+}
91+
92+static void set_brightness(u8 brightness) {
93+ pci_write_config_byte(pci_device, offset, brightness); }
94+
95+static int get_brightness(struct backlight_device *bd) {
96+ return bd->props.brightness;
97+}
98+
99+static int update_status(struct backlight_device *bd) {
100+ set_brightness(bd->props.brightness);
101+ return 0;
102+}
103+
104+static struct backlight_ops backlight_ops = {
105+ .get_brightness = get_brightness,
106+ .update_status = update_status,
107+};
108+
109+static int __init dmi_check_cb(const struct dmi_system_id *id) {
110+ printk(KERN_INFO KBUILD_MODNAME ": found laptop model '%s'\n",
111+ id->ident);
112+ return 0;
113+}
114+
115+static struct dmi_system_id __initdata samsung_dmi_table[] = {
116+ {
117+ .ident = "N120",
118+ .matches = {
119+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
120+ DMI_MATCH(DMI_PRODUCT_NAME, "N120"),
121+ DMI_MATCH(DMI_BOARD_NAME, "N120"),
122+ },
123+ .callback = dmi_check_cb,
124+ },
125+ {
126+ .ident = "N130",
127+ .matches = {
128+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
129+ DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
130+ DMI_MATCH(DMI_BOARD_NAME, "N130"),
131+ },
132+ .callback = dmi_check_cb,
133+ },
134+ {
135+ .ident = "NC10",
136+ .matches = {
137+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
138+ DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
139+ DMI_MATCH(DMI_BOARD_NAME, "NC10"),
140+ },
141+ .callback = dmi_check_cb,
142+ },
143+ {
144+ .ident = "NP-Q45",
145+ .matches = {
146+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
147+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
148+ DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
149+ },
150+ .callback = dmi_check_cb,
151+ },
152+ { },
153+};
154+
155+static int __init samsung_init(void)
156+{
157+ if (!dmi_check_system(samsung_dmi_table))
158+ return -ENODEV;
159+
160+ /*
161+ * The Samsung N120, N130, and NC10 use pci device id 0x27ae, while the
162+ * NP-Q45 uses 0x2a02. Odds are we might need to add more to the
163+ * list over time...
164+ */
165+ pci_device = pci_get_device(PCI_VENDOR_ID_INTEL, 0x27ae, NULL);
166+ if (!pci_device) {
167+ pci_device = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2a02, NULL);
168+ if (!pci_device)
169+ return -ENODEV;
170+ }
171+
172+ /* create a backlight device to talk to this one */
173+ backlight_device = backlight_device_register("samsung",
174+ &pci_device->dev,
175+ NULL, &backlight_ops);
176+ if (IS_ERR(backlight_device)) {
177+ pci_dev_put(pci_device);
178+ return PTR_ERR(backlight_device);
179+ }
180+
181+ backlight_device->props.max_brightness = MAX_BRIGHT;
182+ backlight_device->props.brightness = read_brightness();
183+ backlight_device->props.power = FB_BLANK_UNBLANK;
184+ backlight_update_status(backlight_device);
185+
186+ return 0;
187+}
188+
189+static void __exit samsung_exit(void)
190+{
191+ backlight_device_unregister(backlight_device);
192+
193+ /* we are done with the PCI device, put it back */
194+ pci_dev_put(pci_device);
195+}
196+
197+module_init(samsung_init);
198+module_exit(samsung_exit);
199+
200+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
201+MODULE_DESCRIPTION("Samsung Backlight driver");
202+MODULE_LICENSE("GPL");
203+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnN120:*:rnN120:*");
204+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnN130:*:rnN130:*");
205+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnNC10:*:rnNC10:*");
206+MODULE_ALIAS("dmi:*:svnSAMSUNGELECTRONICSCO.,LTD.:pnSQ45S70S:*:rnSQ45S70S:*");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch
new file mode 100644
index 0000000000..635709ea91
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.31-silence-wacom.patch
@@ -0,0 +1,14 @@
1KERN_ERR is not appropriate for a printk level of a successful operation
2
3
4--- linux-2.6.30/drivers/hid/hid-wacom.c~ 2009-09-04 10:37:20.000000000 -0700
5+++ linux-2.6.30/drivers/hid/hid-wacom.c 2009-09-04 10:37:20.000000000 -0700
6@@ -244,7 +244,7 @@
7 ret = hid_register_driver(&wacom_driver);
8 if (ret)
9 printk(KERN_ERR "can't register wacom driver\n");
10- printk(KERN_ERR "wacom driver registered\n");
11+ printk(KERN_INFO "wacom driver registered\n");
12 return ret;
13 }
14
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch
new file mode 100644
index 0000000000..6a1204d4c1
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-acpi-cstate-fixup.patch
@@ -0,0 +1,173 @@
1From edeae90d635501a632efa0c7fe0667aa2cbe29be Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Mon, 28 Sep 2009 15:14:04 +0200
4Subject: [PATCH] acpi: Provide a set of tables to check the BIOS tables for correctness
5
6Today, the BIOS provides us with latency information for each C state.
7Unfortunately this information is sometimes put into the BIOS by
8apprentice BIOS programmers in a hurry, and as a result, it occasionally
9contains utter garbage.
10
11This patch adds a table based verification; if the CPU is known in the table,
12the values the BIOS provides to us are corrected for the apprentice-factor
13so that the CPUIDLE code can rely on the latency and break-even values
14to be reasonably sane.
15
16Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
17---
18 drivers/acpi/Makefile | 2 +-
19 drivers/acpi/processor_idle.c | 3 +
20 drivers/acpi/processor_mwait_table.c | 110 ++++++++++++++++++++++++++++++++++
21 include/acpi/processor.h | 3 +
22 4 files changed, 117 insertions(+), 1 deletions(-)
23 create mode 100644 drivers/acpi/processor_mwait_table.c
24
25diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
26index 82cd49d..ab56b28 100644
27--- a/drivers/acpi/Makefile
28+++ b/drivers/acpi/Makefile
29@@ -60,5 +60,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
30
31 # processor has its own "processor." module_param namespace
32 processor-y := processor_core.o processor_throttling.o
33-processor-y += processor_idle.o processor_thermal.o
34+processor-y += processor_idle.o processor_thermal.o processor_mwait_table.o
35 processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
36diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
37index cc61a62..db444a0 100644
38--- a/drivers/acpi/processor_idle.c
39+++ b/drivers/acpi/processor_idle.c
40@@ -1088,6 +1088,9 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
41 state->target_residency = cx->latency * latency_factor;
42 state->power_usage = cx->power;
43
44+ if (cx->entry_method == ACPI_CSTATE_FFH)
45+ acpi_verify_mwait_data(state, cx);
46+
47 state->flags = 0;
48 switch (cx->type) {
49 case ACPI_STATE_C1:
50diff --git a/drivers/acpi/processor_mwait_table.c b/drivers/acpi/processor_mwait_table.c
51new file mode 100644
52index 0000000..f29c28c
53--- /dev/null
54+++ b/drivers/acpi/processor_mwait_table.c
55@@ -0,0 +1,102 @@
56+/*
57+ * processor_mwait_table.c: BIOS table verification/correction
58+ *
59+ * (C) Copyright 2009 Intel Corporation
60+ * Authors:
61+ * Arjan van de Ven <arjan@linux.intel.com>
62+ *
63+ * This program is free software; you can redistribute it and/or
64+ * modify it under the terms of the GNU General Public License
65+ * as published by the Free Software Foundation; version 2
66+ * of the License.
67+ */
68+
69+#include <asm/processor.h>
70+#include <linux/acpi.h>
71+#include <acpi/processor.h>
72+#include <linux/cpuidle.h>
73+
74+
75+#define ATLEAST 1
76+#define ATMOST 2
77+#define EXACTLY 3
78+
79+#define MAX_ENTRIES 12
80+
81+struct mwait_entry {
82+ unsigned int mwait_value;
83+ unsigned long exit_latency;
84+ unsigned long break_even_point;
85+ int compare_method;
86+};
87+
88+struct cpu_entry {
89+ int vendor;
90+ int family;
91+ int model;
92+
93+ struct mwait_entry entries[MAX_ENTRIES];
94+};
95+
96+static struct cpu_entry mwait_entries[] =
97+{
98+ /* Intel "Atom" CPUs */
99+ {.vendor = X86_VENDOR_INTEL, .family = 6, . model = 28,
100+ .entries = {
101+ {0x00, 1, 1, ATLEAST},
102+ {0x10, 2, 20, ATLEAST},
103+ {0x30, 57, 300, ATLEAST},
104+ {0x50, 64, 4000, ATLEAST},
105+ }
106+ },
107+
108+
109+};
110+
111+
112+static unsigned long
113+compare_and_set(unsigned long original, unsigned long new, int compare)
114+{
115+ if (compare == EXACTLY)
116+ return new;
117+ if (compare == ATLEAST && new > original)
118+ return new;
119+ if (compare == ATMOST && new < original)
120+ return new;
121+ return original;
122+}
123+
124+
125+void acpi_verify_mwait_data(struct cpuidle_state *state,
126+ struct acpi_processor_cx *cx)
127+{
128+#if defined(__i386__) || defined(__x86_64__)
129+ int i;
130+
131+ struct cpuinfo_x86 *cpudata = &boot_cpu_data;
132+
133+
134+ for (i = 0; i < ARRAY_SIZE(mwait_entries); i++) {
135+ int j;
136+ if (mwait_entries[i].vendor != cpudata->x86_vendor)
137+ continue;
138+ if (mwait_entries[i].family != cpudata->x86)
139+ continue;
140+ if (mwait_entries[i].model != cpudata->x86_model)
141+ continue;
142+ for (j = 0; j < ARRAY_SIZE(mwait_entries[i].entries); j++) {
143+ if (!mwait_entries[i].entries[j].compare_method)
144+ continue;
145+ if (mwait_entries[i].entries[j].mwait_value != cx->address)
146+ continue;
147+ state->exit_latency = compare_and_set(state->exit_latency,
148+ mwait_entries[i].entries[j].exit_latency,
149+ mwait_entries[i].entries[j].compare_method);
150+ state->target_residency = compare_and_set(state->target_residency,
151+ mwait_entries[i].entries[j].break_even_point,
152+ mwait_entries[i].entries[j].compare_method);
153+ break;
154+ }
155+ }
156+#endif
157+}
158diff --git a/include/acpi/processor.h b/include/acpi/processor.h
159index 740ac3a..175e4d1 100644
160--- a/include/acpi/processor.h
161+++ b/include/acpi/processor.h
162@@ -352,5 +352,8 @@ static inline void acpi_thermal_cpufreq_exit(void)
163 return;
164 }
165 #endif
166+extern void acpi_verify_mwait_data(struct cpuidle_state *state,
167+ struct acpi_processor_cx *cx);
168+
169
170 #endif
171--
1721.6.2.5
173
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch
new file mode 100644
index 0000000000..ef930b76d4
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-cpuidle.patch
@@ -0,0 +1,407 @@
1From f890417fc5dc4450e1dab69d7a870d6e706825a5 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Sep 2009 08:45:07 +0200
4Subject: [PATCH] cpuidle: Fix the menu governor to boost IO performance
5
6Fix the menu idle governor which balances power savings, energy efficiency
7and performance impact.
8
9The reason for a reworked governor is that there have been serious
10performance issues reported with the existing code on Nehalem server
11systems.
12
13To show this I'm sure Andrew wants to see benchmark results:
14(benchmark is "fio", "no cstates" is using "idle=poll")
15
16 no cstates current linux new algorithm
171 disk 107 Mb/s 85 Mb/s 105 Mb/s
182 disks 215 Mb/s 123 Mb/s 209 Mb/s
1912 disks 590 Mb/s 320 Mb/s 585 Mb/s
20
21In various power benchmark measurements, no degredation was found by our
22measurement&diagnostics team. Obviously a small percentage more power
23was used in the "fio" benchmark, due to the much higher performance.
24
25While it would be a novel idea to describe the new algorithm in this
26commit message, I cheaped out and described it in comments in the code
27instead.
28
29[changes in v2: spelling fixes from akpm, review feedback,
30folded menu-tng into menu.c
31 changes in v3: use this_rq() as per akpm suggestion]
32
33Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
34Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
35Cc: Len Brown <lenb@kernel.org>
36Acked-by: Ingo Molnar <mingo@elte.hu>
37Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
38Cc: Yanmin Zhang <yanmin_zhang@linux.intel.com>
39Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
40---
41 drivers/cpuidle/governors/menu.c | 251 ++++++++++++++++++++++++++++++++------
42 include/linux/sched.h | 4 +
43 kernel/sched.c | 13 ++
44 3 files changed, 229 insertions(+), 39 deletions(-)
45
46diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
47index f1df59f..9f3d775 100644
48--- a/drivers/cpuidle/governors/menu.c
49+++ b/drivers/cpuidle/governors/menu.c
50@@ -2,8 +2,12 @@
51 * menu.c - the menu idle governor
52 *
53 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
54+ * Copyright (C) 2009 Intel Corporation
55+ * Author:
56+ * Arjan van de Ven <arjan@linux.intel.com>
57 *
58- * This code is licenced under the GPL.
59+ * This code is licenced under the GPL version 2 as described
60+ * in the COPYING file that acompanies the Linux Kernel.
61 */
62
63 #include <linux/kernel.h>
64@@ -13,20 +17,153 @@
65 #include <linux/ktime.h>
66 #include <linux/hrtimer.h>
67 #include <linux/tick.h>
68+#include <linux/sched.h>
69
70-#define BREAK_FUZZ 4 /* 4 us */
71-#define PRED_HISTORY_PCT 50
72+#define BUCKETS 12
73+#define RESOLUTION 1024
74+#define DECAY 4
75+#define MAX_INTERESTING 50000
76+
77+/*
78+ * Concepts and ideas behind the menu governor
79+ *
80+ * For the menu governor, there are 3 decision factors for picking a C
81+ * state:
82+ * 1) Energy break even point
83+ * 2) Performance impact
84+ * 3) Latency tolerance (from pmqos infrastructure)
85+ * These these three factors are treated independently.
86+ *
87+ * Energy break even point
88+ * -----------------------
89+ * C state entry and exit have an energy cost, and a certain amount of time in
90+ * the C state is required to actually break even on this cost. CPUIDLE
91+ * provides us this duration in the "target_residency" field. So all that we
92+ * need is a good prediction of how long we'll be idle. Like the traditional
93+ * menu governor, we start with the actual known "next timer event" time.
94+ *
95+ * Since there are other source of wakeups (interrupts for example) than
96+ * the next timer event, this estimation is rather optimistic. To get a
97+ * more realistic estimate, a correction factor is applied to the estimate,
98+ * that is based on historic behavior. For example, if in the past the actual
99+ * duration always was 50% of the next timer tick, the correction factor will
100+ * be 0.5.
101+ *
102+ * menu uses a running average for this correction factor, however it uses a
103+ * set of factors, not just a single factor. This stems from the realization
104+ * that the ratio is dependent on the order of magnitude of the expected
105+ * duration; if we expect 500 milliseconds of idle time the likelihood of
106+ * getting an interrupt very early is much higher than if we expect 50 micro
107+ * seconds of idle time. A second independent factor that has big impact on
108+ * the actual factor is if there is (disk) IO outstanding or not.
109+ * (as a special twist, we consider every sleep longer than 50 milliseconds
110+ * as perfect; there are no power gains for sleeping longer than this)
111+ *
112+ * For these two reasons we keep an array of 12 independent factors, that gets
113+ * indexed based on the magnitude of the expected duration as well as the
114+ * "is IO outstanding" property.
115+ *
116+ * Limiting Performance Impact
117+ * ---------------------------
118+ * C states, especially those with large exit latencies, can have a real
119+ * noticable impact on workloads, which is not acceptable for most sysadmins,
120+ * and in addition, less performance has a power price of its own.
121+ *
122+ * As a general rule of thumb, menu assumes that the following heuristic
123+ * holds:
124+ * The busier the system, the less impact of C states is acceptable
125+ *
126+ * This rule-of-thumb is implemented using a performance-multiplier:
127+ * If the exit latency times the performance multiplier is longer than
128+ * the predicted duration, the C state is not considered a candidate
129+ * for selection due to a too high performance impact. So the higher
130+ * this multiplier is, the longer we need to be idle to pick a deep C
131+ * state, and thus the less likely a busy CPU will hit such a deep
132+ * C state.
133+ *
134+ * Two factors are used in determing this multiplier:
135+ * a value of 10 is added for each point of "per cpu load average" we have.
136+ * a value of 5 points is added for each process that is waiting for
137+ * IO on this CPU.
138+ * (these values are experimentally determined)
139+ *
140+ * The load average factor gives a longer term (few seconds) input to the
141+ * decision, while the iowait value gives a cpu local instantanious input.
142+ * The iowait factor may look low, but realize that this is also already
143+ * represented in the system load average.
144+ *
145+ */
146
147 struct menu_device {
148 int last_state_idx;
149
150 unsigned int expected_us;
151- unsigned int predicted_us;
152- unsigned int current_predicted_us;
153- unsigned int last_measured_us;
154- unsigned int elapsed_us;
155+ u64 predicted_us;
156+ unsigned int measured_us;
157+ unsigned int exit_us;
158+ unsigned int bucket;
159+ u64 correction_factor[BUCKETS];
160 };
161
162+
163+#define LOAD_INT(x) ((x) >> FSHIFT)
164+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
165+
166+static int get_loadavg(void)
167+{
168+ unsigned long this = this_cpu_load();
169+
170+
171+ return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
172+}
173+
174+static inline int which_bucket(unsigned int duration)
175+{
176+ int bucket = 0;
177+
178+ /*
179+ * We keep two groups of stats; one with no
180+ * IO pending, one without.
181+ * This allows us to calculate
182+ * E(duration)|iowait
183+ */
184+ if (nr_iowait_cpu())
185+ bucket = BUCKETS/2;
186+
187+ if (duration < 10)
188+ return bucket;
189+ if (duration < 100)
190+ return bucket + 1;
191+ if (duration < 1000)
192+ return bucket + 2;
193+ if (duration < 10000)
194+ return bucket + 3;
195+ if (duration < 100000)
196+ return bucket + 4;
197+ return bucket + 5;
198+}
199+
200+/*
201+ * Return a multiplier for the exit latency that is intended
202+ * to take performance requirements into account.
203+ * The more performance critical we estimate the system
204+ * to be, the higher this multiplier, and thus the higher
205+ * the barrier to go to an expensive C state.
206+ */
207+static inline int performance_multiplier(void)
208+{
209+ int mult = 1;
210+
211+ /* for higher loadavg, we are more reluctant */
212+
213+ mult += 2 * get_loadavg();
214+
215+ /* for IO wait tasks (per cpu!) we add 5x each */
216+ mult += 10 * nr_iowait_cpu();
217+
218+ return mult;
219+}
220+
221 static DEFINE_PER_CPU(struct menu_device, menu_devices);
222
223 /**
224@@ -38,37 +175,59 @@ static int menu_select(struct cpuidle_device *dev)
225 struct menu_device *data = &__get_cpu_var(menu_devices);
226 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
227 int i;
228+ int multiplier;
229+
230+ data->last_state_idx = 0;
231+ data->exit_us = 0;
232
233 /* Special case when user has set very strict latency requirement */
234- if (unlikely(latency_req == 0)) {
235- data->last_state_idx = 0;
236+ if (unlikely(latency_req == 0))
237 return 0;
238- }
239
240- /* determine the expected residency time */
241+ /* determine the expected residency time, round up */
242 data->expected_us =
243- (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
244+ DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
245+
246+
247+ data->bucket = which_bucket(data->expected_us);
248+
249+ multiplier = performance_multiplier();
250+
251+ /*
252+ * if the correction factor is 0 (eg first time init or cpu hotplug
253+ * etc), we actually want to start out with a unity factor.
254+ */
255+ if (data->correction_factor[data->bucket] == 0)
256+ data->correction_factor[data->bucket] = RESOLUTION * DECAY;
257+
258+ /* Make sure to round up for half microseconds */
259+ data->predicted_us = DIV_ROUND_CLOSEST(
260+ data->expected_us * data->correction_factor[data->bucket],
261+ RESOLUTION * DECAY);
262+
263+ /*
264+ * We want to default to C1 (hlt), not to busy polling
265+ * unless the timer is happening really really soon.
266+ */
267+ if (data->expected_us > 5)
268+ data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
269
270- /* Recalculate predicted_us based on prediction_history_pct */
271- data->predicted_us *= PRED_HISTORY_PCT;
272- data->predicted_us += (100 - PRED_HISTORY_PCT) *
273- data->current_predicted_us;
274- data->predicted_us /= 100;
275
276 /* find the deepest idle state that satisfies our constraints */
277- for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
278+ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
279 struct cpuidle_state *s = &dev->states[i];
280
281- if (s->target_residency > data->expected_us)
282- break;
283 if (s->target_residency > data->predicted_us)
284 break;
285 if (s->exit_latency > latency_req)
286 break;
287+ if (s->exit_latency * multiplier > data->predicted_us)
288+ break;
289+ data->exit_us = s->exit_latency;
290+ data->last_state_idx = i;
291 }
292
293- data->last_state_idx = i - 1;
294- return i - 1;
295+ return data->last_state_idx;
296 }
297
298 /**
299@@ -85,35 +244,49 @@ static void menu_reflect(struct cpuidle_device *dev)
300 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
301 struct cpuidle_state *target = &dev->states[last_idx];
302 unsigned int measured_us;
303+ u64 new_factor;
304
305 /*
306 * Ugh, this idle state doesn't support residency measurements, so we
307 * are basically lost in the dark. As a compromise, assume we slept
308- * for one full standard timer tick. However, be aware that this
309- * could potentially result in a suboptimal state transition.
310+ * for the whole expected time.
311 */
312 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
313- last_idle_us = USEC_PER_SEC / HZ;
314+ last_idle_us = data->expected_us;
315+
316+
317+ measured_us = last_idle_us;
318
319 /*
320- * measured_us and elapsed_us are the cumulative idle time, since the
321- * last time we were woken out of idle by an interrupt.
322+ * We correct for the exit latency; we are assuming here that the
323+ * exit latency happens after the event that we're interested in.
324 */
325- if (data->elapsed_us <= data->elapsed_us + last_idle_us)
326- measured_us = data->elapsed_us + last_idle_us;
327+ if (measured_us > data->exit_us)
328+ measured_us -= data->exit_us;
329+
330+
331+ /* update our correction ratio */
332+
333+ new_factor = data->correction_factor[data->bucket]
334+ * (DECAY - 1) / DECAY;
335+
336+ if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
337+ new_factor += RESOLUTION * measured_us / data->expected_us;
338 else
339- measured_us = -1;
340+ /*
341+ * we were idle so long that we count it as a perfect
342+ * prediction
343+ */
344+ new_factor += RESOLUTION;
345
346- /* Predict time until next break event */
347- data->current_predicted_us = max(measured_us, data->last_measured_us);
348+ /*
349+ * We don't want 0 as factor; we always want at least
350+ * a tiny bit of estimated time.
351+ */
352+ if (new_factor == 0)
353+ new_factor = 1;
354
355- if (last_idle_us + BREAK_FUZZ <
356- data->expected_us - target->exit_latency) {
357- data->last_measured_us = measured_us;
358- data->elapsed_us = 0;
359- } else {
360- data->elapsed_us = measured_us;
361- }
362+ data->correction_factor[data->bucket] = new_factor;
363 }
364
365 /**
366diff --git a/include/linux/sched.h b/include/linux/sched.h
367index cdc1298..d559406 100644
368--- a/include/linux/sched.h
369+++ b/include/linux/sched.h
370@@ -140,6 +140,10 @@ extern int nr_processes(void);
371 extern unsigned long nr_running(void);
372 extern unsigned long nr_uninterruptible(void);
373 extern unsigned long nr_iowait(void);
374+extern unsigned long nr_iowait_cpu(void);
375+extern unsigned long this_cpu_load(void);
376+
377+
378 extern void calc_global_load(void);
379 extern u64 cpu_nr_migrations(int cpu);
380
381diff --git a/kernel/sched.c b/kernel/sched.c
382index 4dbe8e7..541b370 100644
383--- a/kernel/sched.c
384+++ b/kernel/sched.c
385@@ -2910,6 +2910,19 @@ unsigned long nr_iowait(void)
386 return sum;
387 }
388
389+unsigned long nr_iowait_cpu(void)
390+{
391+ struct rq *this = this_rq();
392+ return atomic_read(&this->nr_iowait);
393+}
394+
395+unsigned long this_cpu_load(void)
396+{
397+ struct rq *this = this_rq();
398+ return this->cpu_load[0];
399+}
400+
401+
402 /* Variables and functions for calc_load */
403 static atomic_long_t calc_load_tasks;
404 static unsigned long calc_load_update;
405--
4061.6.0.6
407
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch
new file mode 100644
index 0000000000..216fca7a2b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch
@@ -0,0 +1,86 @@
1commit ee5aa7b8b98774f408d20a2f61f97a89ac66c29b
2Author: Joe Peterson <joe@skyrush.com>
3Date: Wed Sep 9 15:03:13 2009 -0600
4
5 n_tty: honor opost flag for echoes
6
7 Fixes the following bug:
8
9 http://bugs.linuxbase.org/show_bug.cgi?id=2692
10
11 Causes processing of echoed characters (output from the echo buffer) to
12 honor the O_OPOST flag, which is consistent with the old behavior.
13
14 Note that this and the next patch ("n_tty: move echoctl check and
15 clean up logic") were verified together by the bug reporters, and
16 the test now passes.
17
18 Signed-off-by: Joe Peterson <joe@skyrush.com>
19 Cc: Linux Torvalds <torvalds@linux-foundation.org>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
21
22diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
23index 4e28b35..e6eeeb2 100644
24--- a/drivers/char/n_tty.c
25+++ b/drivers/char/n_tty.c
26@@ -272,7 +272,8 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty)
27 *
28 * This is a helper function that handles one output character
29 * (including special characters like TAB, CR, LF, etc.),
30- * putting the results in the tty driver's write buffer.
31+ * doing OPOST processing and putting the results in the
32+ * tty driver's write buffer.
33 *
34 * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY
35 * and NLDLY. They simply aren't relevant in the world today.
36@@ -350,8 +351,9 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
37 * @c: character (or partial unicode symbol)
38 * @tty: terminal device
39 *
40- * Perform OPOST processing. Returns -1 when the output device is
41- * full and the character must be retried.
42+ * Output one character with OPOST processing.
43+ * Returns -1 when the output device is full and the character
44+ * must be retried.
45 *
46 * Locking: output_lock to protect column state and space left
47 * (also, this is called from n_tty_write under the
48@@ -377,8 +379,11 @@ static int process_output(unsigned char c, struct tty_struct *tty)
49 /**
50 * process_output_block - block post processor
51 * @tty: terminal device
52- * @inbuf: user buffer
53- * @nr: number of bytes
54+ * @buf: character buffer
55+ * @nr: number of bytes to output
56+ *
57+ * Output a block of characters with OPOST processing.
58+ * Returns the number of characters output.
59 *
60 * This path is used to speed up block console writes, among other
61 * things when processing blocks of output data. It handles only
62@@ -605,12 +610,18 @@ static void process_echoes(struct tty_struct *tty)
63 if (no_space_left)
64 break;
65 } else {
66- int retval;
67-
68- retval = do_output_char(c, tty, space);
69- if (retval < 0)
70- break;
71- space -= retval;
72+ if (O_OPOST(tty) &&
73+ !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
74+ int retval = do_output_char(c, tty, space);
75+ if (retval < 0)
76+ break;
77+ space -= retval;
78+ } else {
79+ if (!space)
80+ break;
81+ tty_put_char(tty, c);
82+ space -= 1;
83+ }
84 cp += 1;
85 nr -= 1;
86 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch
new file mode 100644
index 0000000000..3a7e0fd942
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch
@@ -0,0 +1,91 @@
1commit 62b263585bb5005d44a764c90d80f9c4bb8188c1
2Author: Joe Peterson <joe@skyrush.com>
3Date: Wed Sep 9 15:03:47 2009 -0600
4
5 n_tty: move echoctl check and clean up logic
6
7 Check L_ECHOCTL before insertting a character in the echo buffer
8 (rather than as the buffer is processed), to be more consistent with
9 when all other L_ flags are checked. Also cleaned up the related logic.
10
11 Note that this and the previous patch ("n_tty: honor opost flag for echoes")
12 were verified together by the reporters of the bug that patch addresses
13 (http://bugs.linuxbase.org/show_bug.cgi?id=2692), and the test now passes.
14
15 Signed-off-by: Joe Peterson <joe@skyrush.com>
16 Cc: Linus Torvalds <torvalds@linux-foundation.org>
17 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
18
19diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
20index e6eeeb2..2e50f4d 100644
21--- a/drivers/char/n_tty.c
22+++ b/drivers/char/n_tty.c
23@@ -576,33 +576,23 @@ static void process_echoes(struct tty_struct *tty)
24 break;
25
26 default:
27- if (iscntrl(op)) {
28- if (L_ECHOCTL(tty)) {
29- /*
30- * Ensure there is enough space
31- * for the whole ctrl pair.
32- */
33- if (space < 2) {
34- no_space_left = 1;
35- break;
36- }
37- tty_put_char(tty, '^');
38- tty_put_char(tty, op ^ 0100);
39- tty->column += 2;
40- space -= 2;
41- } else {
42- if (!space) {
43- no_space_left = 1;
44- break;
45- }
46- tty_put_char(tty, op);
47- space--;
48- }
49- }
50 /*
51- * If above falls through, this was an
52- * undefined op.
53+ * If the op is not a special byte code,
54+ * it is a ctrl char tagged to be echoed
55+ * as "^X" (where X is the letter
56+ * representing the control char).
57+ * Note that we must ensure there is
58+ * enough space for the whole ctrl pair.
59+ *
60 */
61+ if (space < 2) {
62+ no_space_left = 1;
63+ break;
64+ }
65+ tty_put_char(tty, '^');
66+ tty_put_char(tty, op ^ 0100);
67+ tty->column += 2;
68+ space -= 2;
69 cp += 2;
70 nr -= 2;
71 }
72@@ -809,8 +799,8 @@ static void echo_char_raw(unsigned char c, struct tty_struct *tty)
73 * Echo user input back onto the screen. This must be called only when
74 * L_ECHO(tty) is true. Called from the driver receive_buf path.
75 *
76- * This variant tags control characters to be possibly echoed as
77- * as "^X" (where X is the letter representing the control char).
78+ * This variant tags control characters to be echoed as "^X"
79+ * (where X is the letter representing the control char).
80 *
81 * Locking: echo_lock to protect the echo buffer
82 */
83@@ -823,7 +813,7 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
84 add_echo_byte(ECHO_OP_START, tty);
85 add_echo_byte(ECHO_OP_START, tty);
86 } else {
87- if (iscntrl(c) && c != '\t')
88+ if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t')
89 add_echo_byte(ECHO_OP_START, tty);
90 add_echo_byte(c, tty);
91 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch
new file mode 100644
index 0000000000..a6f5079fc3
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.32-timer-fix.patch
@@ -0,0 +1,64 @@
1From 33725d4939f457b12d7bc1bcbcc0dfb8b2f5bd48 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Thu, 24 Sep 2009 13:24:16 +0200
4Subject: [PATCH] x86, timers: check for pending timers after (device) interrupts
5
6Now that range timers and deferred timers are common, I found a
7problem with these using the "perf timechart" tool.
8
9It turns out that on x86, these two 'opportunistic' timers only
10get checked when another "real" timer happens.
11These opportunistic timers have the objective to save power by
12hitchhiking on other wakeups, as to avoid CPU wakeups by themselves
13as much as possible.
14
15The change in this patch runs this check not only at timer interrupts,
16but at all (device) interrupts. The effect is that
171) the deferred timers/range timers get delayed less
182) the range timers cause less wakeups by themselves because
19 the percentage of hitchhiking on existing wakeup events goes up.
20
21I've verified the working of the patch using "perf timechart",
22the original exposed bug is gone with this patch.
23
24Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
25---
26 arch/x86/kernel/irq.c | 2 ++
27 arch/x86/kernel/smp.c | 1 +
28 2 files changed, 3 insertions(+), 0 deletions(-)
29
30diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
31index 74656d1..3912061 100644
32--- a/arch/x86/kernel/irq.c
33+++ b/arch/x86/kernel/irq.c
34@@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
35 __func__, smp_processor_id(), vector, irq);
36 }
37
38+ run_local_timers();
39 irq_exit();
40
41 set_irq_regs(old_regs);
42@@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs)
43 if (generic_interrupt_extension)
44 generic_interrupt_extension();
45
46+ run_local_timers();
47 irq_exit();
48
49 set_irq_regs(old_regs);
50diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
51index ec1de97..d915d95 100644
52--- a/arch/x86/kernel/smp.c
53+++ b/arch/x86/kernel/smp.c
54@@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
55 {
56 ack_APIC_irq();
57 inc_irq_stat(irq_resched_count);
58+ run_local_timers();
59 /*
60 * KVM uses this interrupt to force a cpu out of guest mode
61 */
62--
631.6.0.6
64
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch
new file mode 100644
index 0000000000..720fda24e8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-copy-checks.patch
@@ -0,0 +1,275 @@
1From 524a1da3c45683cec77480acc6cab1d33ae8d5cb Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sat, 26 Sep 2009 12:36:21 +0200
4Subject: [PATCH] x86: Use __builtin_object_size to validate the buffer size for copy_from_user
5
6gcc (4.x) supports the __builtin_object_size() builtin, which reports the
7size of an object that a pointer point to, when known at compile time.
8If the buffer size is not known at compile time, a constant -1 is returned.
9
10This patch uses this feature to add a sanity check to copy_from_user();
11if the target buffer is known to be smaller than the copy size, the copy
12is aborted and a WARNing is emitted in memory debug mode.
13
14These extra checks compile away when the object size is not known,
15or if both the buffer size and the copy length are constants.
16
17Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
18Reviewed-by: Ingo Molnar <mingo@elte.hu>
19---
20 arch/x86/include/asm/uaccess_32.h | 19 ++++++++++++++++++-
21 arch/x86/include/asm/uaccess_64.h | 19 ++++++++++++++++++-
22 arch/x86/kernel/x8664_ksyms_64.c | 2 +-
23 arch/x86/lib/copy_user_64.S | 4 ++--
24 arch/x86/lib/usercopy_32.c | 4 ++--
25 include/linux/compiler-gcc4.h | 2 ++
26 include/linux/compiler.h | 4 ++++
27 7 files changed, 47 insertions(+), 7 deletions(-)
28
29diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
30index 632fb44..582d6ae 100644
31--- a/arch/x86/include/asm/uaccess_32.h
32+++ b/arch/x86/include/asm/uaccess_32.h
33@@ -187,9 +187,26 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
34
35 unsigned long __must_check copy_to_user(void __user *to,
36 const void *from, unsigned long n);
37-unsigned long __must_check copy_from_user(void *to,
38+unsigned long __must_check _copy_from_user(void *to,
39 const void __user *from,
40 unsigned long n);
41+
42+static inline unsigned long __must_check copy_from_user(void *to,
43+ const void __user *from,
44+ unsigned long n)
45+{
46+ int sz = __compiletime_object_size(to);
47+ int ret = -EFAULT;
48+
49+ if (likely(sz == -1 || sz >= n))
50+ ret = _copy_from_user(to, from, n);
51+#ifdef CONFIG_DEBUG_VM
52+ else
53+ WARN(1, "Buffer overflow detected!\n");
54+#endif
55+ return ret;
56+}
57+
58 long __must_check strncpy_from_user(char *dst, const char __user *src,
59 long count);
60 long __must_check __strncpy_from_user(char *dst,
61diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
62index db24b21..ce6fec7 100644
63--- a/arch/x86/include/asm/uaccess_64.h
64+++ b/arch/x86/include/asm/uaccess_64.h
65@@ -21,10 +21,27 @@ copy_user_generic(void *to, const void *from, unsigned len);
66 __must_check unsigned long
67 copy_to_user(void __user *to, const void *from, unsigned len);
68 __must_check unsigned long
69-copy_from_user(void *to, const void __user *from, unsigned len);
70+_copy_from_user(void *to, const void __user *from, unsigned len);
71 __must_check unsigned long
72 copy_in_user(void __user *to, const void __user *from, unsigned len);
73
74+static inline unsigned long __must_check copy_from_user(void *to,
75+ const void __user *from,
76+ unsigned long n)
77+{
78+ int sz = __compiletime_object_size(to);
79+ int ret = -EFAULT;
80+
81+ if (likely(sz == -1 || sz >= n))
82+ ret = _copy_from_user(to, from, n);
83+#ifdef CONFIG_DEBUG_VM
84+ else
85+ WARN(1, "Buffer overflow detected!\n");
86+#endif
87+ return ret;
88+}
89+
90+
91 static __always_inline __must_check
92 int __copy_from_user(void *dst, const void __user *src, unsigned size)
93 {
94diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
95index 3909e3b..a0cdd8c 100644
96--- a/arch/x86/kernel/x8664_ksyms_64.c
97+++ b/arch/x86/kernel/x8664_ksyms_64.c
98@@ -30,7 +30,7 @@ EXPORT_SYMBOL(__put_user_8);
99
100 EXPORT_SYMBOL(copy_user_generic);
101 EXPORT_SYMBOL(__copy_user_nocache);
102-EXPORT_SYMBOL(copy_from_user);
103+EXPORT_SYMBOL(_copy_from_user);
104 EXPORT_SYMBOL(copy_to_user);
105 EXPORT_SYMBOL(__copy_from_user_inatomic);
106
107diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
108index 6ba0f7b..4be3c41 100644
109--- a/arch/x86/lib/copy_user_64.S
110+++ b/arch/x86/lib/copy_user_64.S
111@@ -78,7 +78,7 @@ ENTRY(copy_to_user)
112 ENDPROC(copy_to_user)
113
114 /* Standard copy_from_user with segment limit checking */
115-ENTRY(copy_from_user)
116+ENTRY(_copy_from_user)
117 CFI_STARTPROC
118 GET_THREAD_INFO(%rax)
119 movq %rsi,%rcx
120@@ -88,7 +88,7 @@ ENTRY(copy_from_user)
121 jae bad_from_user
122 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
123 CFI_ENDPROC
124-ENDPROC(copy_from_user)
125+ENDPROC(_copy_from_user)
126
127 ENTRY(copy_user_generic)
128 CFI_STARTPROC
129diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
130index 1f118d4..8498684 100644
131--- a/arch/x86/lib/usercopy_32.c
132+++ b/arch/x86/lib/usercopy_32.c
133@@ -874,7 +874,7 @@ EXPORT_SYMBOL(copy_to_user);
134 * data to the requested size using zero bytes.
135 */
136 unsigned long
137-copy_from_user(void *to, const void __user *from, unsigned long n)
138+_copy_from_user(void *to, const void __user *from, unsigned long n)
139 {
140 if (access_ok(VERIFY_READ, from, n))
141 n = __copy_from_user(to, from, n);
142@@ -882,4 +882,4 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
143 memset(to, 0, n);
144 return n;
145 }
146-EXPORT_SYMBOL(copy_from_user);
147+EXPORT_SYMBOL(_copy_from_user);
148diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
149index 450fa59..a3aef5d 100644
150--- a/include/linux/compiler-gcc4.h
151+++ b/include/linux/compiler-gcc4.h
152@@ -37,3 +37,5 @@
153 #define __cold __attribute__((__cold__))
154
155 #endif
156+
157+#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
158diff --git a/include/linux/compiler.h b/include/linux/compiler.h
159index 9d4c4b0..9c42853 100644
160--- a/include/linux/compiler.h
161+++ b/include/linux/compiler.h
162@@ -185,6 +185,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
163 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
164 #endif
165
166+/* Compile time object size, -1 for unknown */
167+#ifndef __compiletime_object_size
168+# define __compiletime_object_size(obj) -1
169+#endif
170 /*
171 * Prevent the compiler from merging or refetching accesses. The compiler
172 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
173--
1741.6.0.6
175
176From 350cf3cd513e6759ae6852946532a47249f25600 Mon Sep 17 00:00:00 2001
177From: Arjan van de Ven <arjan@linux.intel.com>
178Date: Wed, 30 Sep 2009 12:57:46 +0200
179Subject: [PATCH] x86: Turn the copy_from_user check into an (optional) compile time warning
180
181A previous patch added the buffer size check to copy_from_user().
182
183One of the things learned from analyzing the result of the previous patch
184is that in general, gcc is really good at proving that the code contains
185sufficient security checks to not need to do a runtime check. But that
186for those cases where gcc could not prove this, there was a relatively
187high percentage of real security issues.
188
189This patch turns the case of "gcc cannot prove" into a compile time
190warning, as long as a sufficiently new gcc is in use.
191The objective is that these warnings will trigger developers checking
192new cases out before a security hole enters a linux kernel release.
193
194Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
195---
196 arch/x86/include/asm/uaccess_32.h | 12 +++++++++---
197 arch/x86/lib/usercopy_32.c | 6 ++++++
198 include/linux/compiler-gcc4.h | 3 +++
199 include/linux/compiler.h | 4 ++++
200 4 files changed, 22 insertions(+), 3 deletions(-)
201
202diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
203index 582d6ae..7826639 100644
204--- a/arch/x86/include/asm/uaccess_32.h
205+++ b/arch/x86/include/asm/uaccess_32.h
206@@ -191,6 +191,13 @@ unsigned long __must_check _copy_from_user(void *to,
207 const void __user *from,
208 unsigned long n);
209
210+
211+extern void copy_from_user_overflow(void)
212+#ifdef CONFIG_DEBUG_STACKOVERFLOW
213+ __compiletime_warning("copy_from_user buffer size is not provably correct")
214+#endif
215+;
216+
217 static inline unsigned long __must_check copy_from_user(void *to,
218 const void __user *from,
219 unsigned long n)
220@@ -200,10 +207,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
221
222 if (likely(sz == -1 || sz >= n))
223 ret = _copy_from_user(to, from, n);
224-#ifdef CONFIG_DEBUG_VM
225 else
226- WARN(1, "Buffer overflow detected!\n");
227-#endif
228+ copy_from_user_overflow();
229+
230 return ret;
231 }
232
233diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
234index 8498684..e218d5d 100644
235--- a/arch/x86/lib/usercopy_32.c
236+++ b/arch/x86/lib/usercopy_32.c
237@@ -883,3 +883,9 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
238 return n;
239 }
240 EXPORT_SYMBOL(_copy_from_user);
241+
242+void copy_from_user_overflow(void)
243+{
244+ WARN(1, "Buffer overflow detected!\n");
245+}
246+EXPORT_SYMBOL(copy_from_user_overflow);
247diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
248index a3aef5d..f1709c1 100644
249--- a/include/linux/compiler-gcc4.h
250+++ b/include/linux/compiler-gcc4.h
251@@ -39,3 +39,6 @@
252 #endif
253
254 #define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
255+#if __GNUC_MINOR__ >= 4
256+#define __compiletime_warning(message) __attribute__((warning(message)))
257+#endif
258diff --git a/include/linux/compiler.h b/include/linux/compiler.h
259index 9c42853..241dfd8 100644
260--- a/include/linux/compiler.h
261+++ b/include/linux/compiler.h
262@@ -189,6 +189,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
263 #ifndef __compiletime_object_size
264 # define __compiletime_object_size(obj) -1
265 #endif
266+#ifndef __compiletime_warning
267+# define __compiletime_warning(message)
268+#endif
269+
270 /*
271 * Prevent the compiler from merging or refetching accesses. The compiler
272 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
273--
2741.6.2.5
275
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch
new file mode 100644
index 0000000000..78a297400f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.31.5/linux-2.6.33-pit-fix.patch
@@ -0,0 +1,95 @@
1From 42cb68d81a218b0fd7c053356d379a93270b40ea Mon Sep 17 00:00:00 2001
2From: Yong Wang <yong.y.wang@intel.com>
3Date: Fri, 30 Oct 2009 10:33:20 +0800
4Subject: [PATCH] x86: Do not unregister PIT clocksource on PIT oneshot setup/shutdown
5
6Backported from upstream commit 8cab02dc3c58a12235c6d463ce684dded9696848
7and this fixes bug #7377 "system can not resume from S3". Further information
8can be found at http://bugzilla.kernel.org/show_bug.cgi?id=14222.
9
10Signed-off-by: Yong Wang <yong.y.wang@intel.com>
11---
12 arch/x86/kernel/i8253.c | 36 ++----------------------------------
13 1 files changed, 2 insertions(+), 34 deletions(-)
14
15diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
16index da890f0..23c1679 100644
17--- a/arch/x86/kernel/i8253.c
18+++ b/arch/x86/kernel/i8253.c
19@@ -19,14 +19,6 @@
20 DEFINE_SPINLOCK(i8253_lock);
21 EXPORT_SYMBOL(i8253_lock);
22
23-#ifdef CONFIG_X86_32
24-static void pit_disable_clocksource(void);
25-static void pit_enable_clocksource(void);
26-#else
27-static inline void pit_disable_clocksource(void) { }
28-static inline void pit_enable_clocksource(void) { }
29-#endif
30-
31 /*
32 * HPET replaces the PIT, when enabled. So we need to know, which of
33 * the two timers is used
34@@ -59,17 +51,15 @@ static void init_pit_timer(enum clock_event_mode mode,
35 outb_pit(0, PIT_CH0);
36 outb_pit(0, PIT_CH0);
37 }
38- pit_disable_clocksource();
39 break;
40
41 case CLOCK_EVT_MODE_ONESHOT:
42 /* One shot setup */
43- pit_disable_clocksource();
44 outb_pit(0x38, PIT_MODE);
45 break;
46
47 case CLOCK_EVT_MODE_RESUME:
48- pit_enable_clocksource();
49+ /* Nothing to do here */
50 break;
51 }
52 spin_unlock(&i8253_lock);
53@@ -202,27 +192,8 @@ static struct clocksource pit_cs = {
54 .shift = 20,
55 };
56
57-int pit_cs_registered;
58-static void pit_disable_clocksource(void)
59-{
60- if (pit_cs_registered) {
61- clocksource_unregister(&pit_cs);
62- pit_cs_registered = 0;
63- }
64-}
65-
66-static void pit_enable_clocksource(void)
67-{
68- if (!pit_cs_registered && !clocksource_register(&pit_cs)) {
69- pit_cs_registered = 1;
70- }
71-}
72-
73-
74-
75 static int __init init_pit_clocksource(void)
76 {
77- int ret;
78 /*
79 * Several reasons not to register PIT as a clocksource:
80 *
81@@ -236,10 +207,7 @@ static int __init init_pit_clocksource(void)
82
83 pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift);
84
85- ret = clocksource_register(&pit_cs);
86- if (!ret)
87- pit_cs_registered = 1;
88- return ret;
89+ return clocksource_register(&pit_cs);
90 }
91 arch_initcall(init_pit_clocksource);
92
93--
941.5.5.1
95
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb b/meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb
new file mode 100644
index 0000000000..409242de31
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin_2.6.31.5.bb
@@ -0,0 +1,47 @@
1require linux-moblin.inc
2
3PR = "r0"
4
5DEFAULT_PREFERENCE = "-1"
6DEFAULT_PREFERENCE_netbook = "1"
7#DEFAULT_PREFERENCE_menlow = "1"
8
9SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-2.6.31.5.tar.bz2 \
10 file://linux-2.6-build-nonintconfig.patch;patch=1 \
11 file://linux-2.6.32-cpuidle.patch;patch=1 \
12 file://linux-2.6.32-n_tty-honor-opost-flag-for-echoes.patch;patch=1 \
13 file://linux-2.6.32-n_tty-move-echoctl-check-and-clean-up-logic.patch;patch=1 \
14 file://linux-2.6.33-pit-fix.patch;patch=1 \
15 file://linux-2.6.29-dont-wait-for-mouse.patch;patch=1 \
16 file://linux-2.6.29-sreadahead.patch;patch=1 \
17 file://linux-2.6.29-kms-edid-cache.patch;patch=1 \
18 file://linux-2.6.29-kms-run-async.patch;patch=1 \
19 file://linux-2.6.29-kms-dont-blank-display.patch;patch=1 \
20 file://linux-2.6.29-kms-after-sata.patch;patch=1 \
21 file://linux-2.6.30-non-root-X.patch;patch=1 \
22 file://linux-2.6.31-drm-kms-flip.patch;patch=1 \
23 file://linux-2.6.31-drm-mem-info.patch;patch=1 \
24 file://linux-2.6.31-drm-i915-fix.patch;patch=1 \
25 file://linux-2.6.31-drm-i915-opregion.patch;patch=1 \
26 file://linux-2.6.31-drm-i915-vblank-fix.patch;patch=1 \
27 file://linux-2.6.29-silence-acer-message.patch;patch=1 \
28 file://linux-2.6.31-silence-wacom.patch;patch=1 \
29 file://linux-2.6.29-jbd-longer-commit-interval.patch;patch=1 \
30 file://linux-2.6.29-touchkit.patch;patch=1 \
31 file://linux-2.6.31-1-2-timberdale.patch;patch=1 \
32 file://linux-2.6.31-2-2-timberdale.patch;patch=1 \
33 file://linux-2.6-driver-level-usb-autosuspend.patch;patch=1 \
34 file://linux-2.6.31-bluetooth-suspend.patch;patch=1 \
35 file://linux-2.6-usb-uvc-autosuspend.patch;patch=1 \
36 file://linux-2.6.31-samsung.patch;patch=1 \
37 file://MRST-GFX-driver-consolidated.patch;patch=1 \
38 file://linux-2.6.31-iegd.patch;patch=1 \
39 file://linux-2.6.32-acpi-cstate-fixup.patch;patch=1 \
40 file://linux-2.6.32-timer-fix.patch;patch=1 \
41 file://linux-2.6.33-copy-checks.patch;patch=1 \
42 file://close_debug_info_of_rt2860.patch;patch=1 \
43# file://i915_split.patch.patch;patch=1 \
44# file://defconfig-menlow \
45 file://defconfig-netbook"
46
47S = "${WORKDIR}/linux-2.6.31.5"